Show More
The requested changes are too big and content was truncated. Show full diff
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
@@ -1,193 +1,193 b'' | |||
|
1 | 1 | # |
|
2 | 2 | # This file is autogenerated by pip-compile |
|
3 | 3 | # To update, run: |
|
4 | 4 | # |
|
5 | 5 | # pip-compile --generate-hashes --output-file=contrib/automation/requirements.txt contrib/automation/requirements.txt.in |
|
6 | 6 | # |
|
7 | 7 | asn1crypto==1.0.1 \ |
|
8 | 8 | --hash=sha256:0b199f211ae690df3db4fd6c1c4ff976497fb1da689193e368eedbadc53d9292 \ |
|
9 | 9 | --hash=sha256:bca90060bd995c3f62c4433168eab407e44bdbdb567b3f3a396a676c1a4c4a3f \ |
|
10 | 10 | # via cryptography |
|
11 | 11 | bcrypt==3.1.7 \ |
|
12 | 12 | --hash=sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89 \ |
|
13 | 13 | --hash=sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42 \ |
|
14 | 14 | --hash=sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294 \ |
|
15 | 15 | --hash=sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161 \ |
|
16 | 16 | --hash=sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31 \ |
|
17 | 17 | --hash=sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5 \ |
|
18 | 18 | --hash=sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c \ |
|
19 | 19 | --hash=sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0 \ |
|
20 | 20 | --hash=sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de \ |
|
21 | 21 | --hash=sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e \ |
|
22 | 22 | --hash=sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052 \ |
|
23 | 23 | --hash=sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09 \ |
|
24 | 24 | --hash=sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105 \ |
|
25 | 25 | --hash=sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133 \ |
|
26 | 26 | --hash=sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7 \ |
|
27 | 27 | --hash=sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc \ |
|
28 | 28 | # via paramiko |
|
29 | 29 | bleach==3.1.0 \ |
|
30 | 30 | --hash=sha256:213336e49e102af26d9cde77dd2d0397afabc5a6bf2fed985dc35b5d1e285a16 \ |
|
31 | 31 | --hash=sha256:3fdf7f77adcf649c9911387df51254b813185e32b2c6619f690b593a617e19fa \ |
|
32 | 32 | # via readme-renderer |
|
33 | 33 | boto3==1.9.243 \ |
|
34 | 34 | --hash=sha256:404acbecef8f4912f18312fcfaffe7eba7f10b3b7adf7853bdba59cdf2275ebb \ |
|
35 | 35 | --hash=sha256:c6e5a7e4548ce7586c354ff633f2a66ba3c471d15a8ae6a30f873122ab04e1cf |
|
36 | 36 | botocore==1.12.243 \ |
|
37 | 37 | --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \ |
|
38 | 38 | --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \ |
|
39 | 39 | # via boto3, s3transfer |
|
40 |
certifi==20 |
|
|
41 | --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \ | |
|
42 | --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \ | |
|
40 | certifi==2021.5.30 \ | |
|
41 | --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ | |
|
42 | --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ | |
|
43 | 43 | # via requests |
|
44 | 44 | cffi==1.12.3 \ |
|
45 | 45 | --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \ |
|
46 | 46 | --hash=sha256:046ef9a22f5d3eed06334d01b1e836977eeef500d9b78e9ef693f9380ad0b83d \ |
|
47 | 47 | --hash=sha256:066bc4c7895c91812eff46f4b1c285220947d4aa46fa0a2651ff85f2afae9c90 \ |
|
48 | 48 | --hash=sha256:066c7ff148ae33040c01058662d6752fd73fbc8e64787229ea8498c7d7f4041b \ |
|
49 | 49 | --hash=sha256:2444d0c61f03dcd26dbf7600cf64354376ee579acad77aef459e34efcb438c63 \ |
|
50 | 50 | --hash=sha256:300832850b8f7967e278870c5d51e3819b9aad8f0a2c8dbe39ab11f119237f45 \ |
|
51 | 51 | --hash=sha256:34c77afe85b6b9e967bd8154e3855e847b70ca42043db6ad17f26899a3df1b25 \ |
|
52 | 52 | --hash=sha256:46de5fa00f7ac09f020729148ff632819649b3e05a007d286242c4882f7b1dc3 \ |
|
53 | 53 | --hash=sha256:4aa8ee7ba27c472d429b980c51e714a24f47ca296d53f4d7868075b175866f4b \ |
|
54 | 54 | --hash=sha256:4d0004eb4351e35ed950c14c11e734182591465a33e960a4ab5e8d4f04d72647 \ |
|
55 | 55 | --hash=sha256:4e3d3f31a1e202b0f5a35ba3bc4eb41e2fc2b11c1eff38b362de710bcffb5016 \ |
|
56 | 56 | --hash=sha256:50bec6d35e6b1aaeb17f7c4e2b9374ebf95a8975d57863546fa83e8d31bdb8c4 \ |
|
57 | 57 | --hash=sha256:55cad9a6df1e2a1d62063f79d0881a414a906a6962bc160ac968cc03ed3efcfb \ |
|
58 | 58 | --hash=sha256:5662ad4e4e84f1eaa8efce5da695c5d2e229c563f9d5ce5b0113f71321bcf753 \ |
|
59 | 59 | --hash=sha256:59b4dc008f98fc6ee2bb4fd7fc786a8d70000d058c2bbe2698275bc53a8d3fa7 \ |
|
60 | 60 | --hash=sha256:73e1ffefe05e4ccd7bcea61af76f36077b914f92b76f95ccf00b0c1b9186f3f9 \ |
|
61 | 61 | --hash=sha256:a1f0fd46eba2d71ce1589f7e50a9e2ffaeb739fb2c11e8192aa2b45d5f6cc41f \ |
|
62 | 62 | --hash=sha256:a2e85dc204556657661051ff4bab75a84e968669765c8a2cd425918699c3d0e8 \ |
|
63 | 63 | --hash=sha256:a5457d47dfff24882a21492e5815f891c0ca35fefae8aa742c6c263dac16ef1f \ |
|
64 | 64 | --hash=sha256:a8dccd61d52a8dae4a825cdbb7735da530179fea472903eb871a5513b5abbfdc \ |
|
65 | 65 | --hash=sha256:ae61af521ed676cf16ae94f30fe202781a38d7178b6b4ab622e4eec8cefaff42 \ |
|
66 | 66 | --hash=sha256:b012a5edb48288f77a63dba0840c92d0504aa215612da4541b7b42d849bc83a3 \ |
|
67 | 67 | --hash=sha256:d2c5cfa536227f57f97c92ac30c8109688ace8fa4ac086d19d0af47d134e2909 \ |
|
68 | 68 | --hash=sha256:d42b5796e20aacc9d15e66befb7a345454eef794fdb0737d1af593447c6c8f45 \ |
|
69 | 69 | --hash=sha256:dee54f5d30d775f525894d67b1495625dd9322945e7fee00731952e0368ff42d \ |
|
70 | 70 | --hash=sha256:e070535507bd6aa07124258171be2ee8dfc19119c28ca94c9dfb7efd23564512 \ |
|
71 | 71 | --hash=sha256:e1ff2748c84d97b065cc95429814cdba39bcbd77c9c85c89344b317dc0d9cbff \ |
|
72 | 72 | --hash=sha256:ed851c75d1e0e043cbf5ca9a8e1b13c4c90f3fbd863dacb01c0808e2b5204201 \ |
|
73 | 73 | # via bcrypt, cryptography, pynacl |
|
74 | 74 | chardet==3.0.4 \ |
|
75 | 75 | --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ |
|
76 | 76 | --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \ |
|
77 | 77 | # via requests |
|
78 | 78 | cryptography==2.7 \ |
|
79 | 79 | --hash=sha256:24b61e5fcb506424d3ec4e18bca995833839bf13c59fc43e530e488f28d46b8c \ |
|
80 | 80 | --hash=sha256:25dd1581a183e9e7a806fe0543f485103232f940fcfc301db65e630512cce643 \ |
|
81 | 81 | --hash=sha256:3452bba7c21c69f2df772762be0066c7ed5dc65df494a1d53a58b683a83e1216 \ |
|
82 | 82 | --hash=sha256:41a0be220dd1ed9e998f5891948306eb8c812b512dc398e5a01846d855050799 \ |
|
83 | 83 | --hash=sha256:5751d8a11b956fbfa314f6553d186b94aa70fdb03d8a4d4f1c82dcacf0cbe28a \ |
|
84 | 84 | --hash=sha256:5f61c7d749048fa6e3322258b4263463bfccefecb0dd731b6561cb617a1d9bb9 \ |
|
85 | 85 | --hash=sha256:72e24c521fa2106f19623a3851e9f89ddfdeb9ac63871c7643790f872a305dfc \ |
|
86 | 86 | --hash=sha256:7b97ae6ef5cba2e3bb14256625423413d5ce8d1abb91d4f29b6d1a081da765f8 \ |
|
87 | 87 | --hash=sha256:961e886d8a3590fd2c723cf07be14e2a91cf53c25f02435c04d39e90780e3b53 \ |
|
88 | 88 | --hash=sha256:96d8473848e984184b6728e2c9d391482008646276c3ff084a1bd89e15ff53a1 \ |
|
89 | 89 | --hash=sha256:ae536da50c7ad1e002c3eee101871d93abdc90d9c5f651818450a0d3af718609 \ |
|
90 | 90 | --hash=sha256:b0db0cecf396033abb4a93c95d1602f268b3a68bb0a9cc06a7cff587bb9a7292 \ |
|
91 | 91 | --hash=sha256:cfee9164954c186b191b91d4193989ca994703b2fff406f71cf454a2d3c7327e \ |
|
92 | 92 | --hash=sha256:e6347742ac8f35ded4a46ff835c60e68c22a536a8ae5c4422966d06946b6d4c6 \ |
|
93 | 93 | --hash=sha256:f27d93f0139a3c056172ebb5d4f9056e770fdf0206c2f422ff2ebbad142e09ed \ |
|
94 | 94 | --hash=sha256:f57b76e46a58b63d1c6375017f4564a28f19a5ca912691fd2e4261b3414b618d \ |
|
95 | 95 | # via paramiko, pypsrp |
|
96 | 96 | docutils==0.15.2 \ |
|
97 | 97 | --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \ |
|
98 | 98 | --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \ |
|
99 | 99 | --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99 \ |
|
100 | 100 | # via botocore, readme-renderer |
|
101 | 101 | idna==2.8 \ |
|
102 | 102 | --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \ |
|
103 | 103 | --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \ |
|
104 | 104 | # via requests |
|
105 | 105 | jmespath==0.9.4 \ |
|
106 | 106 | --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \ |
|
107 | 107 | --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \ |
|
108 | 108 | # via boto3, botocore |
|
109 | 109 | ntlm-auth==1.4.0 \ |
|
110 | 110 | --hash=sha256:11f7a3cec38155b7cecdd9bbc8c37cd738d8012f0523b3f98d8caefe394feb97 \ |
|
111 | 111 | --hash=sha256:350f2389c8ee5517f47db55a36ac2f8efc9742a60a678d6e2caa92385bdcaa9a \ |
|
112 | 112 | # via pypsrp |
|
113 | 113 | paramiko==2.6.0 \ |
|
114 | 114 | --hash=sha256:99f0179bdc176281d21961a003ffdb2ec369daac1a1007241f53374e376576cf \ |
|
115 | 115 | --hash=sha256:f4b2edfa0d226b70bd4ca31ea7e389325990283da23465d572ed1f70a7583041 |
|
116 | 116 | pkginfo==1.5.0.1 \ |
|
117 | 117 | --hash=sha256:7424f2c8511c186cd5424bbf31045b77435b37a8d604990b79d4e70d741148bb \ |
|
118 | 118 | --hash=sha256:a6d9e40ca61ad3ebd0b72fbadd4fba16e4c0e4df0428c041e01e06eb6ee71f32 \ |
|
119 | 119 | # via twine |
|
120 | 120 | pycparser==2.19 \ |
|
121 | 121 | --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \ |
|
122 | 122 | # via cffi |
|
123 | 123 | pygments==2.4.2 \ |
|
124 | 124 | --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \ |
|
125 | 125 | --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297 \ |
|
126 | 126 | # via readme-renderer |
|
127 | 127 | pynacl==1.3.0 \ |
|
128 | 128 | --hash=sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255 \ |
|
129 | 129 | --hash=sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c \ |
|
130 | 130 | --hash=sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e \ |
|
131 | 131 | --hash=sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae \ |
|
132 | 132 | --hash=sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621 \ |
|
133 | 133 | --hash=sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56 \ |
|
134 | 134 | --hash=sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39 \ |
|
135 | 135 | --hash=sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310 \ |
|
136 | 136 | --hash=sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1 \ |
|
137 | 137 | --hash=sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a \ |
|
138 | 138 | --hash=sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786 \ |
|
139 | 139 | --hash=sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b \ |
|
140 | 140 | --hash=sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b \ |
|
141 | 141 | --hash=sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f \ |
|
142 | 142 | --hash=sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20 \ |
|
143 | 143 | --hash=sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415 \ |
|
144 | 144 | --hash=sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715 \ |
|
145 | 145 | --hash=sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1 \ |
|
146 | 146 | --hash=sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0 \ |
|
147 | 147 | # via paramiko |
|
148 | 148 | pypsrp==0.4.0 \ |
|
149 | 149 | --hash=sha256:64b5bdd725a9744c821483b05ecd266f6417f4c6e90ee961a08838480f7d025e \ |
|
150 | 150 | --hash=sha256:f42919247fb80f7dc24c552560d7c24e754d15326030c9e3b7b94f51cfa4dc69 |
|
151 | 151 | python-dateutil==2.8.0 \ |
|
152 | 152 | --hash=sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb \ |
|
153 | 153 | --hash=sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e \ |
|
154 | 154 | # via botocore |
|
155 | 155 | readme-renderer==24.0 \ |
|
156 | 156 | --hash=sha256:bb16f55b259f27f75f640acf5e00cf897845a8b3e4731b5c1a436e4b8529202f \ |
|
157 | 157 | --hash=sha256:c8532b79afc0375a85f10433eca157d6b50f7d6990f337fa498c96cd4bfc203d \ |
|
158 | 158 | # via twine |
|
159 | 159 | requests-toolbelt==0.9.1 \ |
|
160 | 160 | --hash=sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f \ |
|
161 | 161 | --hash=sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0 \ |
|
162 | 162 | # via twine |
|
163 | 163 | requests==2.22.0 \ |
|
164 | 164 | --hash=sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4 \ |
|
165 | 165 | --hash=sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31 \ |
|
166 | 166 | # via pypsrp, requests-toolbelt, twine |
|
167 | 167 | s3transfer==0.2.1 \ |
|
168 | 168 | --hash=sha256:6efc926738a3cd576c2a79725fed9afde92378aa5c6a957e3af010cb019fac9d \ |
|
169 | 169 | --hash=sha256:b780f2411b824cb541dbcd2c713d0cb61c7d1bcadae204cdddda2b35cef493ba \ |
|
170 | 170 | # via boto3 |
|
171 | 171 | six==1.12.0 \ |
|
172 | 172 | --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \ |
|
173 | 173 | --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \ |
|
174 | 174 | # via bcrypt, bleach, cryptography, pynacl, pypsrp, python-dateutil, readme-renderer |
|
175 | 175 | tqdm==4.36.1 \ |
|
176 | 176 | --hash=sha256:abc25d0ce2397d070ef07d8c7e706aede7920da163c64997585d42d3537ece3d \ |
|
177 | 177 | --hash=sha256:dd3fcca8488bb1d416aa7469d2f277902f26260c45aa86b667b074cd44b3b115 \ |
|
178 | 178 | # via twine |
|
179 | 179 | twine==2.0.0 \ |
|
180 | 180 | --hash=sha256:5319dd3e02ac73fcddcd94f035b9631589ab5d23e1f4699d57365199d85261e1 \ |
|
181 | 181 | --hash=sha256:9fe7091715c7576df166df8ef6654e61bada39571783f2fd415bdcba867c6993 |
|
182 | 182 | urllib3==1.25.6 \ |
|
183 | 183 | --hash=sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398 \ |
|
184 | 184 | --hash=sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86 \ |
|
185 | 185 | # via botocore, requests |
|
186 | 186 | webencodings==0.5.1 \ |
|
187 | 187 | --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ |
|
188 | 188 | --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 \ |
|
189 | 189 | # via bleach |
|
190 | 190 | |
|
191 | 191 | # WARNING: The following packages were not pinned, but pip requires them to be |
|
192 | 192 | # pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag. |
|
193 | 193 | # setuptools==41.2.0 # via twine |
@@ -1,59 +1,59 b'' | |||
|
1 | 1 | # |
|
2 | 2 | # This file is autogenerated by pip-compile |
|
3 | 3 | # To update, run: |
|
4 | 4 | # |
|
5 | 5 | # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in |
|
6 | 6 | # |
|
7 |
certifi==202 |
|
|
8 | --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \ | |
|
9 | --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \ | |
|
7 | certifi==2021.5.30 \ | |
|
8 | --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ | |
|
9 | --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ | |
|
10 | 10 | # via dulwich |
|
11 | 11 | configparser==4.0.2 \ |
|
12 | 12 | --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \ |
|
13 | 13 | --hash=sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df \ |
|
14 | 14 | # via entrypoints |
|
15 | 15 | docutils==0.16 \ |
|
16 | 16 | --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ |
|
17 | 17 | --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \ |
|
18 | 18 | # via -r contrib/packaging/requirements-windows.txt.in |
|
19 | 19 | dulwich==0.19.16 ; python_version <= "2.7" \ |
|
20 | 20 | --hash=sha256:10699277c6268d0c16febe141a5b1c1a6e9744f3144c2d2de1706f4b1adafe63 \ |
|
21 | 21 | --hash=sha256:267160904e9a1cb6c248c5efc53597a35d038ecc6f60bdc4546b3053bed11982 \ |
|
22 | 22 | --hash=sha256:4e3aba5e4844e7c700721c1fc696987ea820ee3528a03604dc4e74eff4196826 \ |
|
23 | 23 | --hash=sha256:60bb2c2c92f5025c1b53a556304008f0f624c98ae36f22d870e056b2d4236c11 \ |
|
24 | 24 | --hash=sha256:dddae02d372fc3b5cfb0046d0f62246ef281fa0c088df7601ab5916607add94b \ |
|
25 | 25 | --hash=sha256:f00d132082b8fcc2eb0d722abc773d4aeb5558c1475d7edd1f0f571146c29db9 \ |
|
26 | 26 | --hash=sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850 \ |
|
27 | 27 | # via -r contrib/packaging/requirements-windows.txt.in |
|
28 | 28 | entrypoints==0.3 \ |
|
29 | 29 | --hash=sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19 \ |
|
30 | 30 | --hash=sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451 \ |
|
31 | 31 | # via keyring |
|
32 | 32 | keyring==18.0.1 \ |
|
33 | 33 | --hash=sha256:67d6cc0132bd77922725fae9f18366bb314fd8f95ff4d323a4df41890a96a838 \ |
|
34 | 34 | --hash=sha256:7b29ebfcf8678c4da531b2478a912eea01e80007e5ddca9ee0c7038cb3489ec6 \ |
|
35 | 35 | # via -r contrib/packaging/requirements-windows.txt.in |
|
36 | 36 | pygments==2.5.2 \ |
|
37 | 37 | --hash=sha256:2a3fe295e54a20164a9df49c75fa58526d3be48e14aceba6d6b1e8ac0bfd6f1b \ |
|
38 | 38 | --hash=sha256:98c8aa5a9f778fcd1026a17361ddaf7330d1b7c62ae97c3bb0ae73e0b9b6b0fe \ |
|
39 | 39 | # via -r contrib/packaging/requirements-windows.txt.in |
|
40 | 40 | pywin32-ctypes==0.2.0 \ |
|
41 | 41 | --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \ |
|
42 | 42 | --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \ |
|
43 | 43 | # via -r contrib/packaging/requirements-windows.txt.in, keyring |
|
44 | 44 | urllib3==1.25.11 \ |
|
45 | 45 | --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \ |
|
46 | 46 | --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e \ |
|
47 | 47 | # via dulwich |
|
48 | 48 | windows-curses==2.1.0 \ |
|
49 | 49 | --hash=sha256:261fde5680d1ce4ce116908996b9a3cfb0ffb03ea68d42240f62b56a9fa6af2c \ |
|
50 | 50 | --hash=sha256:66034dc9a705d87308cc9ea90836f4ee60008a1d5e2c1d34ace627f60268158b \ |
|
51 | 51 | --hash=sha256:669caad3ae16faf2d201d7ab3b8af418a2fd074d8a39d60ca26f3acb34b6afe5 \ |
|
52 | 52 | --hash=sha256:73bd3eebccfda55330783f165151de115bfa238d1332f0b2e224b550d6187840 \ |
|
53 | 53 | --hash=sha256:89a6d973f88cfe49b41ea80164dcbec209d296e0cec34a02002578b0bf464a64 \ |
|
54 | 54 | --hash=sha256:8ba7c000d7ffa5452bbd0966b96e69261e4f117ebe510aeb8771a9650197b7f0 \ |
|
55 | 55 | --hash=sha256:97084c6b37b1534f6a28a514d521dfae402f77dcbad42b14ee32e8d5bdc13648 \ |
|
56 | 56 | --hash=sha256:9e474a181f96d60429a4766145628264e60b72e7715876f9135aeb2e842f9433 \ |
|
57 | 57 | --hash=sha256:cfe64c30807c146ef8d094412f90f2a2c81ad6aefff3ebfe8e37aabe2f801303 \ |
|
58 | 58 | --hash=sha256:ff8c67f74b88944d99fa9d22971c05c335bc74f149120f0a69340c2c3a595497 \ |
|
59 | 59 | # via -r contrib/packaging/requirements-windows.txt.in |
@@ -1,301 +1,301 b'' | |||
|
1 | 1 | # |
|
2 | 2 | # This file is autogenerated by pip-compile |
|
3 | 3 | # To update, run: |
|
4 | 4 | # |
|
5 | 5 | # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py3.txt contrib/packaging/requirements-windows.txt.in |
|
6 | 6 | # |
|
7 | 7 | atomicwrites==1.4.0 \ |
|
8 | 8 | --hash=sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197 \ |
|
9 | 9 | --hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a \ |
|
10 | 10 | # via pytest |
|
11 | 11 | attrs==21.2.0 \ |
|
12 | 12 | --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ |
|
13 | 13 | --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb \ |
|
14 | 14 | # via pytest |
|
15 | 15 | cached-property==1.5.2 \ |
|
16 | 16 | --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \ |
|
17 | 17 | --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \ |
|
18 | 18 | # via pygit2 |
|
19 |
certifi==202 |
|
|
20 | --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \ | |
|
21 | --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \ | |
|
19 | certifi==2021.5.30 \ | |
|
20 | --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ | |
|
21 | --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \ | |
|
22 | 22 | # via dulwich |
|
23 | 23 | cffi==1.14.4 \ |
|
24 | 24 | --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \ |
|
25 | 25 | --hash=sha256:00e28066507bfc3fe865a31f325c8391a1ac2916219340f87dfad602c3e48e5d \ |
|
26 | 26 | --hash=sha256:045d792900a75e8b1e1b0ab6787dd733a8190ffcf80e8c8ceb2fb10a29ff238a \ |
|
27 | 27 | --hash=sha256:0638c3ae1a0edfb77c6765d487fee624d2b1ee1bdfeffc1f0b58c64d149e7eec \ |
|
28 | 28 | --hash=sha256:105abaf8a6075dc96c1fe5ae7aae073f4696f2905fde6aeada4c9d2926752362 \ |
|
29 | 29 | --hash=sha256:155136b51fd733fa94e1c2ea5211dcd4c8879869008fc811648f16541bf99668 \ |
|
30 | 30 | --hash=sha256:1a465cbe98a7fd391d47dce4b8f7e5b921e6cd805ef421d04f5f66ba8f06086c \ |
|
31 | 31 | --hash=sha256:1d2c4994f515e5b485fd6d3a73d05526aa0fcf248eb135996b088d25dfa1865b \ |
|
32 | 32 | --hash=sha256:2c24d61263f511551f740d1a065eb0212db1dbbbbd241db758f5244281590c06 \ |
|
33 | 33 | --hash=sha256:51a8b381b16ddd370178a65360ebe15fbc1c71cf6f584613a7ea08bfad946698 \ |
|
34 | 34 | --hash=sha256:594234691ac0e9b770aee9fcdb8fa02c22e43e5c619456efd0d6c2bf276f3eb2 \ |
|
35 | 35 | --hash=sha256:5cf4be6c304ad0b6602f5c4e90e2f59b47653ac1ed9c662ed379fe48a8f26b0c \ |
|
36 | 36 | --hash=sha256:64081b3f8f6f3c3de6191ec89d7dc6c86a8a43911f7ecb422c60e90c70be41c7 \ |
|
37 | 37 | --hash=sha256:6bc25fc545a6b3d57b5f8618e59fc13d3a3a68431e8ca5fd4c13241cd70d0009 \ |
|
38 | 38 | --hash=sha256:798caa2a2384b1cbe8a2a139d80734c9db54f9cc155c99d7cc92441a23871c03 \ |
|
39 | 39 | --hash=sha256:7c6b1dece89874d9541fc974917b631406233ea0440d0bdfbb8e03bf39a49b3b \ |
|
40 | 40 | --hash=sha256:840793c68105fe031f34d6a086eaea153a0cd5c491cde82a74b420edd0a2b909 \ |
|
41 | 41 | --hash=sha256:8d6603078baf4e11edc4168a514c5ce5b3ba6e3e9c374298cb88437957960a53 \ |
|
42 | 42 | --hash=sha256:9cc46bc107224ff5b6d04369e7c595acb700c3613ad7bcf2e2012f62ece80c35 \ |
|
43 | 43 | --hash=sha256:9f7a31251289b2ab6d4012f6e83e58bc3b96bd151f5b5262467f4bb6b34a7c26 \ |
|
44 | 44 | --hash=sha256:9ffb888f19d54a4d4dfd4b3f29bc2c16aa4972f1c2ab9c4ab09b8ab8685b9c2b \ |
|
45 | 45 | --hash=sha256:a7711edca4dcef1a75257b50a2fbfe92a65187c47dab5a0f1b9b332c5919a3fb \ |
|
46 | 46 | --hash=sha256:af5c59122a011049aad5dd87424b8e65a80e4a6477419c0c1015f73fb5ea0293 \ |
|
47 | 47 | --hash=sha256:b18e0a9ef57d2b41f5c68beefa32317d286c3d6ac0484efd10d6e07491bb95dd \ |
|
48 | 48 | --hash=sha256:b4e248d1087abf9f4c10f3c398896c87ce82a9856494a7155823eb45a892395d \ |
|
49 | 49 | --hash=sha256:ba4e9e0ae13fc41c6b23299545e5ef73055213e466bd107953e4a013a5ddd7e3 \ |
|
50 | 50 | --hash=sha256:c6332685306b6417a91b1ff9fae889b3ba65c2292d64bd9245c093b1b284809d \ |
|
51 | 51 | --hash=sha256:d9efd8b7a3ef378dd61a1e77367f1924375befc2eba06168b6ebfa903a5e59ca \ |
|
52 | 52 | --hash=sha256:df5169c4396adc04f9b0a05f13c074df878b6052430e03f50e68adf3a57aa28d \ |
|
53 | 53 | --hash=sha256:ebb253464a5d0482b191274f1c8bf00e33f7e0b9c66405fbffc61ed2c839c775 \ |
|
54 | 54 | --hash=sha256:ec80dc47f54e6e9a78181ce05feb71a0353854cc26999db963695f950b5fb375 \ |
|
55 | 55 | --hash=sha256:f032b34669220030f905152045dfa27741ce1a6db3324a5bc0b96b6c7420c87b \ |
|
56 | 56 | --hash=sha256:f60567825f791c6f8a592f3c6e3bd93dd2934e3f9dac189308426bd76b00ef3b \ |
|
57 | 57 | --hash=sha256:f803eaa94c2fcda012c047e62bc7a51b0bdabda1cad7a92a522694ea2d76e49f \ |
|
58 | 58 | # via pygit2 |
|
59 | 59 | colorama==0.4.4 \ |
|
60 | 60 | --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ |
|
61 | 61 | --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 \ |
|
62 | 62 | # via pytest |
|
63 | 63 | docutils==0.16 \ |
|
64 | 64 | --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ |
|
65 | 65 | --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \ |
|
66 | 66 | # via -r contrib/packaging/requirements-windows.txt.in |
|
67 | 67 | dulwich==0.20.6 ; python_version >= "3" \ |
|
68 | 68 | --hash=sha256:1ccd55e38fa9f169290f93e027ab4508202f5bdd6ef534facac4edd3f6903f0d \ |
|
69 | 69 | --hash=sha256:2452a0379cc7bbbd7ab893ec104d18039f1ea98b0d6be6bca5646e5cf29e0ae9 \ |
|
70 | 70 | --hash=sha256:2f4aebc54ed2d37dcee737024421452375570a422eb682232e676aa7ebc9cb4b \ |
|
71 | 71 | --hash=sha256:304f52b10c49c3a6ddfbd73e2e93d8e979350225cfba9688e51110e74fa2f718 \ |
|
72 | 72 | --hash=sha256:49e747c72d9099e873bf6196260346d5996c3f28af788294d47a8accdc524de7 \ |
|
73 | 73 | --hash=sha256:4fee359928c59b53af153a582a7ed7595259a5a825df400301a29e17fd78dfd3 \ |
|
74 | 74 | --hash=sha256:50ef300a9fa4efd9f85009c2bd8b515266ec1529400f8834f85c04fa9f09b2c0 \ |
|
75 | 75 | --hash=sha256:5348310f21b2a23847342ce464461499b6652483fa42de03714d0f6421a99698 \ |
|
76 | 76 | --hash=sha256:7e7b5dea5178b6493fdb83adccbe81de9ddff55f79880185ed594c0e3a97209b \ |
|
77 | 77 | --hash=sha256:8f7a7f973be2beedfb10dd8d3eb6bdf9ec466c72ad555704897cbd6357fe5021 \ |
|
78 | 78 | --hash=sha256:bea6e6caffc6c73bfd1647714c5715ab96ac49deb8beb8b67511529afa25685a \ |
|
79 | 79 | --hash=sha256:e5871b86a079e9e290f52ab14559cea1b694a0b8ed2b9ebb898f6ced7f14a406 \ |
|
80 | 80 | --hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b \ |
|
81 | 81 | # via -r contrib/packaging/requirements-windows.txt.in |
|
82 | 82 | fuzzywuzzy==0.18.0 \ |
|
83 | 83 | --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \ |
|
84 | 84 | # via -r contrib/packaging/requirements-windows.txt.in |
|
85 | 85 | idna==3.2 \ |
|
86 | 86 | --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ |
|
87 | 87 | --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 \ |
|
88 | 88 | # via yarl |
|
89 | 89 | importlib-metadata==3.1.0 \ |
|
90 | 90 | --hash=sha256:590690d61efdd716ff82c39ca9a9d4209252adfe288a4b5721181050acbd4175 \ |
|
91 | 91 | --hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099 \ |
|
92 | 92 | # via keyring, pluggy, pytest |
|
93 | 93 | iniconfig==1.1.1 \ |
|
94 | 94 | --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ |
|
95 | 95 | --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 \ |
|
96 | 96 | # via pytest |
|
97 | 97 | keyring==21.4.0 \ |
|
98 | 98 | --hash=sha256:4e34ea2fdec90c1c43d6610b5a5fafa1b9097db1802948e90caf5763974b8f8d \ |
|
99 | 99 | --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 \ |
|
100 | 100 | # via -r contrib/packaging/requirements-windows.txt.in |
|
101 | 101 | multidict==5.1.0 \ |
|
102 | 102 | --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ |
|
103 | 103 | --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ |
|
104 | 104 | --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ |
|
105 | 105 | --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ |
|
106 | 106 | --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ |
|
107 | 107 | --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ |
|
108 | 108 | --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ |
|
109 | 109 | --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \ |
|
110 | 110 | --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ |
|
111 | 111 | --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ |
|
112 | 112 | --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ |
|
113 | 113 | --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ |
|
114 | 114 | --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ |
|
115 | 115 | --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ |
|
116 | 116 | --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ |
|
117 | 117 | --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ |
|
118 | 118 | --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ |
|
119 | 119 | --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ |
|
120 | 120 | --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ |
|
121 | 121 | --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ |
|
122 | 122 | --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ |
|
123 | 123 | --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ |
|
124 | 124 | --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ |
|
125 | 125 | --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ |
|
126 | 126 | --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ |
|
127 | 127 | --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ |
|
128 | 128 | --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ |
|
129 | 129 | --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ |
|
130 | 130 | --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ |
|
131 | 131 | --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ |
|
132 | 132 | --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ |
|
133 | 133 | --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ |
|
134 | 134 | --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ |
|
135 | 135 | --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ |
|
136 | 136 | --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ |
|
137 | 137 | --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ |
|
138 | 138 | --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 \ |
|
139 | 139 | # via yarl |
|
140 | 140 | packaging==21.0 \ |
|
141 | 141 | --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ |
|
142 | 142 | --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \ |
|
143 | 143 | # via pytest |
|
144 | 144 | pluggy==0.13.1 \ |
|
145 | 145 | --hash=sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0 \ |
|
146 | 146 | --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d \ |
|
147 | 147 | # via pytest |
|
148 | 148 | py==1.10.0 \ |
|
149 | 149 | --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \ |
|
150 | 150 | --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a \ |
|
151 | 151 | # via pytest |
|
152 | 152 | pycparser==2.20 \ |
|
153 | 153 | --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ |
|
154 | 154 | --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 \ |
|
155 | 155 | # via cffi |
|
156 | 156 | pygit2==1.4.0 ; python_version >= "3" \ |
|
157 | 157 | --hash=sha256:0d298098e286eeda000e49ca7e1b41f87300e10dd8b9d06b32b008bd61f50b83 \ |
|
158 | 158 | --hash=sha256:0ee135eb2cd8b07ce1374f3596cc5c3213472d6389bad6a4c5d87d8e267e93e9 \ |
|
159 | 159 | --hash=sha256:32eb863d6651d4890ced318505ea8dc229bd9637deaf29c898de1ab574d727a0 \ |
|
160 | 160 | --hash=sha256:37d6d7d6d7804c42a0fe23425c72e38093488525092fc5e51a05684e63503ce7 \ |
|
161 | 161 | --hash=sha256:41204b6f3406d9f53147710f3cc485d77181ba67f57c34d36b7c86de1c14a18c \ |
|
162 | 162 | --hash=sha256:818c91b582109d90580c5da74af783738838353f15eb12eeb734d80a974b05a3 \ |
|
163 | 163 | --hash=sha256:8306a302487dac67df7af6a064bb37e8a8eb4138958f9560ff49ff162e185dab \ |
|
164 | 164 | --hash=sha256:9c2f2d9ef59513007b66f6534b000792b614de3faf60313a0a68f6b8571aea85 \ |
|
165 | 165 | --hash=sha256:9c8d5881eb709e2e2e13000b507a131bd5fb91a879581030088d0ddffbcd19af \ |
|
166 | 166 | --hash=sha256:b422e417739def0a136a6355723dfe8a5ffc83db5098076f28a14f1d139779c1 \ |
|
167 | 167 | --hash=sha256:cbeb38ab1df9b5d8896548a11e63aae8a064763ab5f1eabe4475e6b8a78ee1c8 \ |
|
168 | 168 | --hash=sha256:cf00481ddf053e549a6edd0216bdc267b292d261eae02a67bb3737de920cbf88 \ |
|
169 | 169 | --hash=sha256:d0d889144e9487d926fecea947c3f39ce5f477e521d7d467d2e66907e4cd657d \ |
|
170 | 170 | --hash=sha256:ddb7a1f6d38063e8724abfa1cfdfb0f9b25014b8bca0546274b7a84b873a3888 \ |
|
171 | 171 | --hash=sha256:e9037a7d810750fe23c9f5641ef14a0af2525ff03e14752cd4f73e1870ecfcb0 \ |
|
172 | 172 | --hash=sha256:ec5c0365a9bdfcac1609d20868507b28685ec5ea7cc3a2c903c9b62ef2e0bbc0 \ |
|
173 | 173 | --hash=sha256:fdd8ba30cda277290e000322f505132f590cf89bd7d31829b45a3cb57447ec32 \ |
|
174 | 174 | # via -r contrib/packaging/requirements-windows.txt.in |
|
175 | 175 | pygments==2.7.1 \ |
|
176 | 176 | --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \ |
|
177 | 177 | --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \ |
|
178 | 178 | # via -r contrib/packaging/requirements-windows.txt.in |
|
179 | 179 | pyparsing==2.4.7 \ |
|
180 | 180 | --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ |
|
181 | 181 | --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b \ |
|
182 | 182 | # via packaging |
|
183 | 183 | pytest-vcr==1.0.2 \ |
|
184 | 184 | --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896 \ |
|
185 | 185 | # via -r contrib/packaging/requirements-windows.txt.in |
|
186 | 186 | pytest==6.2.4 \ |
|
187 | 187 | --hash=sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b \ |
|
188 | 188 | --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 \ |
|
189 | 189 | # via pytest-vcr |
|
190 | 190 | pywin32-ctypes==0.2.0 \ |
|
191 | 191 | --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \ |
|
192 | 192 | --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \ |
|
193 | 193 | # via -r contrib/packaging/requirements-windows.txt.in, keyring |
|
194 | 194 | pyyaml==5.4.1 \ |
|
195 | 195 | --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ |
|
196 | 196 | --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ |
|
197 | 197 | --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ |
|
198 | 198 | --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ |
|
199 | 199 | --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ |
|
200 | 200 | --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ |
|
201 | 201 | --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ |
|
202 | 202 | --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ |
|
203 | 203 | --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ |
|
204 | 204 | --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ |
|
205 | 205 | --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ |
|
206 | 206 | --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ |
|
207 | 207 | --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ |
|
208 | 208 | --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ |
|
209 | 209 | --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ |
|
210 | 210 | --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ |
|
211 | 211 | --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ |
|
212 | 212 | --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ |
|
213 | 213 | --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ |
|
214 | 214 | --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ |
|
215 | 215 | --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ |
|
216 | 216 | --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ |
|
217 | 217 | --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ |
|
218 | 218 | --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ |
|
219 | 219 | --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ |
|
220 | 220 | --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ |
|
221 | 221 | --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ |
|
222 | 222 | --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ |
|
223 | 223 | --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 \ |
|
224 | 224 | # via vcrpy |
|
225 | 225 | six==1.16.0 \ |
|
226 | 226 | --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ |
|
227 | 227 | --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ |
|
228 | 228 | # via vcrpy |
|
229 | 229 | toml==0.10.2 \ |
|
230 | 230 | --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ |
|
231 | 231 | --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f \ |
|
232 | 232 | # via pytest |
|
233 | 233 | typing-extensions==3.10.0.0 \ |
|
234 | 234 | --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ |
|
235 | 235 | --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \ |
|
236 | 236 | --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 \ |
|
237 | 237 | # via yarl |
|
238 | 238 | urllib3==1.25.11 \ |
|
239 | 239 | --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \ |
|
240 | 240 | --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e \ |
|
241 | 241 | # via dulwich |
|
242 | 242 | vcrpy==4.1.1 \ |
|
243 | 243 | --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \ |
|
244 | 244 | --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 \ |
|
245 | 245 | # via pytest-vcr |
|
246 | 246 | windows-curses==2.2.0 \ |
|
247 | 247 | --hash=sha256:1452d771ec6f9b3fef037da2b169196a9a12be4e86a6c27dd579adac70c42028 \ |
|
248 | 248 | --hash=sha256:267544e4f60c09af6505e50a69d7f01d7f8a281cf4bd4fc7efc3b32b9a4ef64e \ |
|
249 | 249 | --hash=sha256:389228a3df556102e72450f599283094168aa82eee189f501ad9f131a0fc92e1 \ |
|
250 | 250 | --hash=sha256:84336fe470fa07288daec5c684dec74c0766fec6b3511ccedb4c494804acfbb7 \ |
|
251 | 251 | --hash=sha256:9aa6ff60be76f5de696dc6dbf7897e3b1e6abcf4c0f741e9a0ee22cd6ef382f8 \ |
|
252 | 252 | --hash=sha256:c4a8ce00e82635f06648cc40d99f470be4e3ffeb84f9f7ae9d6a4f68ec6361e7 \ |
|
253 | 253 | --hash=sha256:c5cd032bc7d0f03224ab55c925059d98e81795098d59bbd10f7d05c7ea9677ce \ |
|
254 | 254 | --hash=sha256:fc0be372fe6da3c39d7093154ce029115a927bf287f34b4c615e2b3f8c23dfaa \ |
|
255 | 255 | # via -r contrib/packaging/requirements-windows.txt.in |
|
256 | 256 | wrapt==1.12.1 \ |
|
257 | 257 | --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \ |
|
258 | 258 | # via vcrpy |
|
259 | 259 | yarl==1.6.3 \ |
|
260 | 260 | --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ |
|
261 | 261 | --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ |
|
262 | 262 | --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ |
|
263 | 263 | --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ |
|
264 | 264 | --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ |
|
265 | 265 | --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ |
|
266 | 266 | --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ |
|
267 | 267 | --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ |
|
268 | 268 | --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ |
|
269 | 269 | --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ |
|
270 | 270 | --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ |
|
271 | 271 | --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ |
|
272 | 272 | --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ |
|
273 | 273 | --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ |
|
274 | 274 | --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ |
|
275 | 275 | --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ |
|
276 | 276 | --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ |
|
277 | 277 | --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ |
|
278 | 278 | --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ |
|
279 | 279 | --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \ |
|
280 | 280 | --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ |
|
281 | 281 | --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ |
|
282 | 282 | --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ |
|
283 | 283 | --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ |
|
284 | 284 | --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ |
|
285 | 285 | --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ |
|
286 | 286 | --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ |
|
287 | 287 | --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ |
|
288 | 288 | --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ |
|
289 | 289 | --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ |
|
290 | 290 | --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ |
|
291 | 291 | --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ |
|
292 | 292 | --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ |
|
293 | 293 | --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ |
|
294 | 294 | --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ |
|
295 | 295 | --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ |
|
296 | 296 | --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 \ |
|
297 | 297 | # via vcrpy |
|
298 | 298 | zipp==3.4.0 \ |
|
299 | 299 | --hash=sha256:102c24ef8f171fd729d46599845e95c7ab894a4cf45f5de11a44cc7444fb1108 \ |
|
300 | 300 | --hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb \ |
|
301 | 301 | # via importlib-metadata |
@@ -1,565 +1,565 b'' | |||
|
1 | 1 | # synthrepo.py - repo synthesis |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2012 Facebook |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''synthesize structurally interesting change history |
|
9 | 9 | |
|
10 | 10 | This extension is useful for creating a repository with properties |
|
11 | 11 | that are statistically similar to an existing repository. During |
|
12 | 12 | analysis, a simple probability table is constructed from the history |
|
13 | 13 | of an existing repository. During synthesis, these properties are |
|
14 | 14 | reconstructed. |
|
15 | 15 | |
|
16 | 16 | Properties that are analyzed and synthesized include the following: |
|
17 | 17 | |
|
18 | 18 | - Lines added or removed when an existing file is modified |
|
19 | 19 | - Number and sizes of files added |
|
20 | 20 | - Number of files removed |
|
21 | 21 | - Line lengths |
|
22 | 22 | - Topological distance to parent changeset(s) |
|
23 | 23 | - Probability of a commit being a merge |
|
24 | 24 | - Probability of a newly added file being added to a new directory |
|
25 | 25 | - Interarrival time, and time zone, of commits |
|
26 | 26 | - Number of files in each directory |
|
27 | 27 | |
|
28 | 28 | A few obvious properties that are not currently handled realistically: |
|
29 | 29 | |
|
30 | 30 | - Merges are treated as regular commits with two parents, which is not |
|
31 | 31 | realistic |
|
32 | 32 | - Modifications are not treated as operations on hunks of lines, but |
|
33 | 33 | as insertions and deletions of randomly chosen single lines |
|
34 | 34 | - Committer ID (always random) |
|
35 | 35 | - Executability of files |
|
36 | 36 | - Symlinks and binary files are ignored |
|
37 | 37 | ''' |
|
38 | 38 | |
|
39 | 39 | from __future__ import absolute_import |
|
40 | 40 | import bisect |
|
41 | 41 | import collections |
|
42 | 42 | import itertools |
|
43 | 43 | import json |
|
44 | 44 | import os |
|
45 | 45 | import random |
|
46 | 46 | import sys |
|
47 | 47 | import time |
|
48 | 48 | |
|
49 | 49 | from mercurial.i18n import _ |
|
50 | 50 | from mercurial.node import ( |
|
51 | 51 | nullid, |
|
52 | 52 | nullrev, |
|
53 | 53 | short, |
|
54 | 54 | ) |
|
55 | 55 | from mercurial import ( |
|
56 | 56 | context, |
|
57 | 57 | diffutil, |
|
58 | 58 | error, |
|
59 | 59 | hg, |
|
60 | logcmdutil, | |
|
60 | 61 | patch, |
|
61 | 62 | pycompat, |
|
62 | 63 | registrar, |
|
63 | scmutil, | |
|
64 | 64 | ) |
|
65 | 65 | from mercurial.utils import dateutil |
|
66 | 66 | |
|
67 | 67 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
68 | 68 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
69 | 69 | # be specifying the version(s) of Mercurial they are tested with, or |
|
70 | 70 | # leave the attribute unspecified. |
|
71 | 71 | testedwith = 'ships-with-hg-core' |
|
72 | 72 | |
|
73 | 73 | cmdtable = {} |
|
74 | 74 | command = registrar.command(cmdtable) |
|
75 | 75 | |
|
76 | 76 | newfile = {'new fi', 'rename', 'copy f', 'copy t'} |
|
77 | 77 | |
|
78 | 78 | |
|
79 | 79 | def zerodict(): |
|
80 | 80 | return collections.defaultdict(lambda: 0) |
|
81 | 81 | |
|
82 | 82 | |
|
83 | 83 | def roundto(x, k): |
|
84 | 84 | if x > k * 2: |
|
85 | 85 | return int(round(x / float(k)) * k) |
|
86 | 86 | return int(round(x)) |
|
87 | 87 | |
|
88 | 88 | |
|
89 | 89 | def parsegitdiff(lines): |
|
90 | 90 | filename, mar, lineadd, lineremove = None, None, zerodict(), 0 |
|
91 | 91 | binary = False |
|
92 | 92 | for line in lines: |
|
93 | 93 | start = line[:6] |
|
94 | 94 | if start == 'diff -': |
|
95 | 95 | if filename: |
|
96 | 96 | yield filename, mar, lineadd, lineremove, binary |
|
97 | 97 | mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False |
|
98 | 98 | filename = patch.gitre.match(line).group(1) |
|
99 | 99 | elif start in newfile: |
|
100 | 100 | mar = 'a' |
|
101 | 101 | elif start == 'GIT bi': |
|
102 | 102 | binary = True |
|
103 | 103 | elif start == 'delete': |
|
104 | 104 | mar = 'r' |
|
105 | 105 | elif start: |
|
106 | 106 | s = start[0] |
|
107 | 107 | if s == '-' and not line.startswith('--- '): |
|
108 | 108 | lineremove += 1 |
|
109 | 109 | elif s == '+' and not line.startswith('+++ '): |
|
110 | 110 | lineadd[roundto(len(line) - 1, 5)] += 1 |
|
111 | 111 | if filename: |
|
112 | 112 | yield filename, mar, lineadd, lineremove, binary |
|
113 | 113 | |
|
114 | 114 | |
|
115 | 115 | @command( |
|
116 | 116 | 'analyze', |
|
117 | 117 | [ |
|
118 | 118 | ('o', 'output', '', _('write output to given file'), _('FILE')), |
|
119 | 119 | ('r', 'rev', [], _('analyze specified revisions'), _('REV')), |
|
120 | 120 | ], |
|
121 | 121 | _('hg analyze'), |
|
122 | 122 | optionalrepo=True, |
|
123 | 123 | ) |
|
124 | 124 | def analyze(ui, repo, *revs, **opts): |
|
125 | 125 | """create a simple model of a repository to use for later synthesis |
|
126 | 126 | |
|
127 | 127 | This command examines every changeset in the given range (or all |
|
128 | 128 | of history if none are specified) and creates a simple statistical |
|
129 | 129 | model of the history of the repository. It also measures the directory |
|
130 | 130 | structure of the repository as checked out. |
|
131 | 131 | |
|
132 | 132 | The model is written out to a JSON file, and can be used by |
|
133 | 133 | :hg:`synthesize` to create or augment a repository with synthetic |
|
134 | 134 | commits that have a structure that is statistically similar to the |
|
135 | 135 | analyzed repository. |
|
136 | 136 | """ |
|
137 | 137 | root = repo.root |
|
138 | 138 | if not root.endswith(os.path.sep): |
|
139 | 139 | root += os.path.sep |
|
140 | 140 | |
|
141 | 141 | revs = list(revs) |
|
142 | 142 | revs.extend(opts['rev']) |
|
143 | 143 | if not revs: |
|
144 | 144 | revs = [':'] |
|
145 | 145 | |
|
146 | 146 | output = opts['output'] |
|
147 | 147 | if not output: |
|
148 | 148 | output = os.path.basename(root) + '.json' |
|
149 | 149 | |
|
150 | 150 | if output == '-': |
|
151 | 151 | fp = sys.stdout |
|
152 | 152 | else: |
|
153 | 153 | fp = open(output, 'w') |
|
154 | 154 | |
|
155 | 155 | # Always obtain file counts of each directory in the given root directory. |
|
156 | 156 | def onerror(e): |
|
157 | 157 | ui.warn(_('error walking directory structure: %s\n') % e) |
|
158 | 158 | |
|
159 | 159 | dirs = {} |
|
160 | 160 | rootprefixlen = len(root) |
|
161 | 161 | for dirpath, dirnames, filenames in os.walk(root, onerror=onerror): |
|
162 | 162 | dirpathfromroot = dirpath[rootprefixlen:] |
|
163 | 163 | dirs[dirpathfromroot] = len(filenames) |
|
164 | 164 | if '.hg' in dirnames: |
|
165 | 165 | dirnames.remove('.hg') |
|
166 | 166 | |
|
167 | 167 | lineschanged = zerodict() |
|
168 | 168 | children = zerodict() |
|
169 | 169 | p1distance = zerodict() |
|
170 | 170 | p2distance = zerodict() |
|
171 | 171 | linesinfilesadded = zerodict() |
|
172 | 172 | fileschanged = zerodict() |
|
173 | 173 | filesadded = zerodict() |
|
174 | 174 | filesremoved = zerodict() |
|
175 | 175 | linelengths = zerodict() |
|
176 | 176 | interarrival = zerodict() |
|
177 | 177 | parents = zerodict() |
|
178 | 178 | dirsadded = zerodict() |
|
179 | 179 | tzoffset = zerodict() |
|
180 | 180 | |
|
181 | 181 | # If a mercurial repo is available, also model the commit history. |
|
182 | 182 | if repo: |
|
183 |
revs = |
|
|
183 | revs = logcmdutil.revrange(repo, revs) | |
|
184 | 184 | revs.sort() |
|
185 | 185 | |
|
186 | 186 | progress = ui.makeprogress( |
|
187 | 187 | _('analyzing'), unit=_('changesets'), total=len(revs) |
|
188 | 188 | ) |
|
189 | 189 | for i, rev in enumerate(revs): |
|
190 | 190 | progress.update(i) |
|
191 | 191 | ctx = repo[rev] |
|
192 | 192 | pl = ctx.parents() |
|
193 | 193 | pctx = pl[0] |
|
194 | 194 | prev = pctx.rev() |
|
195 | 195 | children[prev] += 1 |
|
196 | 196 | p1distance[rev - prev] += 1 |
|
197 | 197 | parents[len(pl)] += 1 |
|
198 | 198 | tzoffset[ctx.date()[1]] += 1 |
|
199 | 199 | if len(pl) > 1: |
|
200 | 200 | p2distance[rev - pl[1].rev()] += 1 |
|
201 | 201 | if prev == rev - 1: |
|
202 | 202 | lastctx = pctx |
|
203 | 203 | else: |
|
204 | 204 | lastctx = repo[rev - 1] |
|
205 | 205 | if lastctx.rev() != nullrev: |
|
206 | 206 | timedelta = ctx.date()[0] - lastctx.date()[0] |
|
207 | 207 | interarrival[roundto(timedelta, 300)] += 1 |
|
208 | 208 | diffopts = diffutil.diffallopts(ui, {'git': True}) |
|
209 | 209 | diff = sum( |
|
210 | 210 | (d.splitlines() for d in ctx.diff(pctx, opts=diffopts)), [] |
|
211 | 211 | ) |
|
212 | 212 | fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0 |
|
213 | 213 | for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff): |
|
214 | 214 | if isbin: |
|
215 | 215 | continue |
|
216 | 216 | added = sum(pycompat.itervalues(lineadd), 0) |
|
217 | 217 | if mar == 'm': |
|
218 | 218 | if added and lineremove: |
|
219 | 219 | lineschanged[ |
|
220 | 220 | roundto(added, 5), roundto(lineremove, 5) |
|
221 | 221 | ] += 1 |
|
222 | 222 | filechanges += 1 |
|
223 | 223 | elif mar == 'a': |
|
224 | 224 | fileadds += 1 |
|
225 | 225 | if '/' in filename: |
|
226 | 226 | filedir = filename.rsplit('/', 1)[0] |
|
227 | 227 | if filedir not in pctx.dirs(): |
|
228 | 228 | diradds += 1 |
|
229 | 229 | linesinfilesadded[roundto(added, 5)] += 1 |
|
230 | 230 | elif mar == 'r': |
|
231 | 231 | fileremoves += 1 |
|
232 | 232 | for length, count in lineadd.iteritems(): |
|
233 | 233 | linelengths[length] += count |
|
234 | 234 | fileschanged[filechanges] += 1 |
|
235 | 235 | filesadded[fileadds] += 1 |
|
236 | 236 | dirsadded[diradds] += 1 |
|
237 | 237 | filesremoved[fileremoves] += 1 |
|
238 | 238 | progress.complete() |
|
239 | 239 | |
|
240 | 240 | invchildren = zerodict() |
|
241 | 241 | |
|
242 | 242 | for rev, count in children.iteritems(): |
|
243 | 243 | invchildren[count] += 1 |
|
244 | 244 | |
|
245 | 245 | if output != '-': |
|
246 | 246 | ui.status(_('writing output to %s\n') % output) |
|
247 | 247 | |
|
248 | 248 | def pronk(d): |
|
249 | 249 | return sorted(d.iteritems(), key=lambda x: x[1], reverse=True) |
|
250 | 250 | |
|
251 | 251 | json.dump( |
|
252 | 252 | { |
|
253 | 253 | 'revs': len(revs), |
|
254 | 254 | 'initdirs': pronk(dirs), |
|
255 | 255 | 'lineschanged': pronk(lineschanged), |
|
256 | 256 | 'children': pronk(invchildren), |
|
257 | 257 | 'fileschanged': pronk(fileschanged), |
|
258 | 258 | 'filesadded': pronk(filesadded), |
|
259 | 259 | 'linesinfilesadded': pronk(linesinfilesadded), |
|
260 | 260 | 'dirsadded': pronk(dirsadded), |
|
261 | 261 | 'filesremoved': pronk(filesremoved), |
|
262 | 262 | 'linelengths': pronk(linelengths), |
|
263 | 263 | 'parents': pronk(parents), |
|
264 | 264 | 'p1distance': pronk(p1distance), |
|
265 | 265 | 'p2distance': pronk(p2distance), |
|
266 | 266 | 'interarrival': pronk(interarrival), |
|
267 | 267 | 'tzoffset': pronk(tzoffset), |
|
268 | 268 | }, |
|
269 | 269 | fp, |
|
270 | 270 | ) |
|
271 | 271 | fp.close() |
|
272 | 272 | |
|
273 | 273 | |
|
274 | 274 | @command( |
|
275 | 275 | 'synthesize', |
|
276 | 276 | [ |
|
277 | 277 | ('c', 'count', 0, _('create given number of commits'), _('COUNT')), |
|
278 | 278 | ('', 'dict', '', _('path to a dictionary of words'), _('FILE')), |
|
279 | 279 | ('', 'initfiles', 0, _('initial file count to create'), _('COUNT')), |
|
280 | 280 | ], |
|
281 | 281 | _('hg synthesize [OPTION].. DESCFILE'), |
|
282 | 282 | ) |
|
283 | 283 | def synthesize(ui, repo, descpath, **opts): |
|
284 | 284 | """synthesize commits based on a model of an existing repository |
|
285 | 285 | |
|
286 | 286 | The model must have been generated by :hg:`analyze`. Commits will |
|
287 | 287 | be generated randomly according to the probabilities described in |
|
288 | 288 | the model. If --initfiles is set, the repository will be seeded with |
|
289 | 289 | the given number files following the modeled repository's directory |
|
290 | 290 | structure. |
|
291 | 291 | |
|
292 | 292 | When synthesizing new content, commit descriptions, and user |
|
293 | 293 | names, words will be chosen randomly from a dictionary that is |
|
294 | 294 | presumed to contain one word per line. Use --dict to specify the |
|
295 | 295 | path to an alternate dictionary to use. |
|
296 | 296 | """ |
|
297 | 297 | try: |
|
298 | 298 | fp = hg.openpath(ui, descpath) |
|
299 | 299 | except Exception as err: |
|
300 | 300 | raise error.Abort('%s: %s' % (descpath, err[0].strerror)) |
|
301 | 301 | desc = json.load(fp) |
|
302 | 302 | fp.close() |
|
303 | 303 | |
|
304 | 304 | def cdf(l): |
|
305 | 305 | if not l: |
|
306 | 306 | return [], [] |
|
307 | 307 | vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True)) |
|
308 | 308 | t = float(sum(probs, 0)) |
|
309 | 309 | s, cdfs = 0, [] |
|
310 | 310 | for v in probs: |
|
311 | 311 | s += v |
|
312 | 312 | cdfs.append(s / t) |
|
313 | 313 | return vals, cdfs |
|
314 | 314 | |
|
315 | 315 | lineschanged = cdf(desc['lineschanged']) |
|
316 | 316 | fileschanged = cdf(desc['fileschanged']) |
|
317 | 317 | filesadded = cdf(desc['filesadded']) |
|
318 | 318 | dirsadded = cdf(desc['dirsadded']) |
|
319 | 319 | filesremoved = cdf(desc['filesremoved']) |
|
320 | 320 | linelengths = cdf(desc['linelengths']) |
|
321 | 321 | parents = cdf(desc['parents']) |
|
322 | 322 | p1distance = cdf(desc['p1distance']) |
|
323 | 323 | p2distance = cdf(desc['p2distance']) |
|
324 | 324 | interarrival = cdf(desc['interarrival']) |
|
325 | 325 | linesinfilesadded = cdf(desc['linesinfilesadded']) |
|
326 | 326 | tzoffset = cdf(desc['tzoffset']) |
|
327 | 327 | |
|
328 | 328 | dictfile = opts.get('dict') or '/usr/share/dict/words' |
|
329 | 329 | try: |
|
330 | 330 | fp = open(dictfile, 'rU') |
|
331 | 331 | except IOError as err: |
|
332 | 332 | raise error.Abort('%s: %s' % (dictfile, err.strerror)) |
|
333 | 333 | words = fp.read().splitlines() |
|
334 | 334 | fp.close() |
|
335 | 335 | |
|
336 | 336 | initdirs = {} |
|
337 | 337 | if desc['initdirs']: |
|
338 | 338 | for k, v in desc['initdirs']: |
|
339 | 339 | initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v |
|
340 | 340 | initdirs = renamedirs(initdirs, words) |
|
341 | 341 | initdirscdf = cdf(initdirs) |
|
342 | 342 | |
|
343 | 343 | def pick(cdf): |
|
344 | 344 | return cdf[0][bisect.bisect_left(cdf[1], random.random())] |
|
345 | 345 | |
|
346 | 346 | def pickpath(): |
|
347 | 347 | return os.path.join(pick(initdirscdf), random.choice(words)) |
|
348 | 348 | |
|
349 | 349 | def makeline(minimum=0): |
|
350 | 350 | total = max(minimum, pick(linelengths)) |
|
351 | 351 | c, l = 0, [] |
|
352 | 352 | while c < total: |
|
353 | 353 | w = random.choice(words) |
|
354 | 354 | c += len(w) + 1 |
|
355 | 355 | l.append(w) |
|
356 | 356 | return ' '.join(l) |
|
357 | 357 | |
|
358 | 358 | wlock = repo.wlock() |
|
359 | 359 | lock = repo.lock() |
|
360 | 360 | |
|
361 | 361 | nevertouch = {'.hgsub', '.hgignore', '.hgtags'} |
|
362 | 362 | |
|
363 | 363 | _synthesizing = _('synthesizing') |
|
364 | 364 | _files = _('initial files') |
|
365 | 365 | _changesets = _('changesets') |
|
366 | 366 | |
|
367 | 367 | # Synthesize a single initial revision adding files to the repo according |
|
368 | 368 | # to the modeled directory structure. |
|
369 | 369 | initcount = int(opts['initfiles']) |
|
370 | 370 | if initcount and initdirs: |
|
371 | 371 | pctx = repo['.'] |
|
372 | 372 | dirs = set(pctx.dirs()) |
|
373 | 373 | files = {} |
|
374 | 374 | |
|
375 | 375 | def validpath(path): |
|
376 | 376 | # Don't pick filenames which are already directory names. |
|
377 | 377 | if path in dirs: |
|
378 | 378 | return False |
|
379 | 379 | # Don't pick directories which were used as file names. |
|
380 | 380 | while path: |
|
381 | 381 | if path in files: |
|
382 | 382 | return False |
|
383 | 383 | path = os.path.dirname(path) |
|
384 | 384 | return True |
|
385 | 385 | |
|
386 | 386 | progress = ui.makeprogress(_synthesizing, unit=_files, total=initcount) |
|
387 | 387 | for i in pycompat.xrange(0, initcount): |
|
388 | 388 | progress.update(i) |
|
389 | 389 | |
|
390 | 390 | path = pickpath() |
|
391 | 391 | while not validpath(path): |
|
392 | 392 | path = pickpath() |
|
393 | 393 | data = '%s contents\n' % path |
|
394 | 394 | files[path] = data |
|
395 | 395 | dir = os.path.dirname(path) |
|
396 | 396 | while dir and dir not in dirs: |
|
397 | 397 | dirs.add(dir) |
|
398 | 398 | dir = os.path.dirname(dir) |
|
399 | 399 | |
|
400 | 400 | def filectxfn(repo, memctx, path): |
|
401 | 401 | return context.memfilectx(repo, memctx, path, files[path]) |
|
402 | 402 | |
|
403 | 403 | progress.complete() |
|
404 | 404 | message = 'synthesized wide repo with %d files' % (len(files),) |
|
405 | 405 | mc = context.memctx( |
|
406 | 406 | repo, |
|
407 | 407 | [pctx.node(), nullid], |
|
408 | 408 | message, |
|
409 | 409 | files, |
|
410 | 410 | filectxfn, |
|
411 | 411 | ui.username(), |
|
412 | 412 | '%d %d' % dateutil.makedate(), |
|
413 | 413 | ) |
|
414 | 414 | initnode = mc.commit() |
|
415 | 415 | if ui.debugflag: |
|
416 | 416 | hexfn = hex |
|
417 | 417 | else: |
|
418 | 418 | hexfn = short |
|
419 | 419 | ui.status( |
|
420 | 420 | _('added commit %s with %d files\n') % (hexfn(initnode), len(files)) |
|
421 | 421 | ) |
|
422 | 422 | |
|
423 | 423 | # Synthesize incremental revisions to the repository, adding repo depth. |
|
424 | 424 | count = int(opts['count']) |
|
425 | 425 | heads = set(map(repo.changelog.rev, repo.heads())) |
|
426 | 426 | progress = ui.makeprogress(_synthesizing, unit=_changesets, total=count) |
|
427 | 427 | for i in pycompat.xrange(count): |
|
428 | 428 | progress.update(i) |
|
429 | 429 | |
|
430 | 430 | node = repo.changelog.node |
|
431 | 431 | revs = len(repo) |
|
432 | 432 | |
|
433 | 433 | def pickhead(heads, distance): |
|
434 | 434 | if heads: |
|
435 | 435 | lheads = sorted(heads) |
|
436 | 436 | rev = revs - min(pick(distance), revs) |
|
437 | 437 | if rev < lheads[-1]: |
|
438 | 438 | rev = lheads[bisect.bisect_left(lheads, rev)] |
|
439 | 439 | else: |
|
440 | 440 | rev = lheads[-1] |
|
441 | 441 | return rev, node(rev) |
|
442 | 442 | return nullrev, nullid |
|
443 | 443 | |
|
444 | 444 | r1 = revs - min(pick(p1distance), revs) |
|
445 | 445 | p1 = node(r1) |
|
446 | 446 | |
|
447 | 447 | # the number of heads will grow without bound if we use a pure |
|
448 | 448 | # model, so artificially constrain their proliferation |
|
449 | 449 | toomanyheads = len(heads) > random.randint(1, 20) |
|
450 | 450 | if p2distance[0] and (pick(parents) == 2 or toomanyheads): |
|
451 | 451 | r2, p2 = pickhead(heads.difference([r1]), p2distance) |
|
452 | 452 | else: |
|
453 | 453 | r2, p2 = nullrev, nullid |
|
454 | 454 | |
|
455 | 455 | pl = [p1, p2] |
|
456 | 456 | pctx = repo[r1] |
|
457 | 457 | mf = pctx.manifest() |
|
458 | 458 | mfk = mf.keys() |
|
459 | 459 | changes = {} |
|
460 | 460 | if mfk: |
|
461 | 461 | for __ in pycompat.xrange(pick(fileschanged)): |
|
462 | 462 | for __ in pycompat.xrange(10): |
|
463 | 463 | fctx = pctx.filectx(random.choice(mfk)) |
|
464 | 464 | path = fctx.path() |
|
465 | 465 | if not ( |
|
466 | 466 | path in nevertouch |
|
467 | 467 | or fctx.isbinary() |
|
468 | 468 | or 'l' in fctx.flags() |
|
469 | 469 | ): |
|
470 | 470 | break |
|
471 | 471 | lines = fctx.data().splitlines() |
|
472 | 472 | add, remove = pick(lineschanged) |
|
473 | 473 | for __ in pycompat.xrange(remove): |
|
474 | 474 | if not lines: |
|
475 | 475 | break |
|
476 | 476 | del lines[random.randrange(0, len(lines))] |
|
477 | 477 | for __ in pycompat.xrange(add): |
|
478 | 478 | lines.insert(random.randint(0, len(lines)), makeline()) |
|
479 | 479 | path = fctx.path() |
|
480 | 480 | changes[path] = '\n'.join(lines) + '\n' |
|
481 | 481 | for __ in pycompat.xrange(pick(filesremoved)): |
|
482 | 482 | for __ in pycompat.xrange(10): |
|
483 | 483 | path = random.choice(mfk) |
|
484 | 484 | if path not in changes: |
|
485 | 485 | break |
|
486 | 486 | if filesadded: |
|
487 | 487 | dirs = list(pctx.dirs()) |
|
488 | 488 | dirs.insert(0, '') |
|
489 | 489 | for __ in pycompat.xrange(pick(filesadded)): |
|
490 | 490 | pathstr = '' |
|
491 | 491 | while pathstr in dirs: |
|
492 | 492 | path = [random.choice(dirs)] |
|
493 | 493 | if pick(dirsadded): |
|
494 | 494 | path.append(random.choice(words)) |
|
495 | 495 | path.append(random.choice(words)) |
|
496 | 496 | pathstr = '/'.join(filter(None, path)) |
|
497 | 497 | data = ( |
|
498 | 498 | '\n'.join( |
|
499 | 499 | makeline() |
|
500 | 500 | for __ in pycompat.xrange(pick(linesinfilesadded)) |
|
501 | 501 | ) |
|
502 | 502 | + '\n' |
|
503 | 503 | ) |
|
504 | 504 | changes[pathstr] = data |
|
505 | 505 | |
|
506 | 506 | def filectxfn(repo, memctx, path): |
|
507 | 507 | if path not in changes: |
|
508 | 508 | return None |
|
509 | 509 | return context.memfilectx(repo, memctx, path, changes[path]) |
|
510 | 510 | |
|
511 | 511 | if not changes: |
|
512 | 512 | continue |
|
513 | 513 | if revs: |
|
514 | 514 | date = repo['tip'].date()[0] + pick(interarrival) |
|
515 | 515 | else: |
|
516 | 516 | date = time.time() - (86400 * count) |
|
517 | 517 | # dates in mercurial must be positive, fit in 32-bit signed integers. |
|
518 | 518 | date = min(0x7FFFFFFF, max(0, date)) |
|
519 | 519 | user = random.choice(words) + '@' + random.choice(words) |
|
520 | 520 | mc = context.memctx( |
|
521 | 521 | repo, |
|
522 | 522 | pl, |
|
523 | 523 | makeline(minimum=2), |
|
524 | 524 | sorted(changes), |
|
525 | 525 | filectxfn, |
|
526 | 526 | user, |
|
527 | 527 | '%d %d' % (date, pick(tzoffset)), |
|
528 | 528 | ) |
|
529 | 529 | newnode = mc.commit() |
|
530 | 530 | heads.add(repo.changelog.rev(newnode)) |
|
531 | 531 | heads.discard(r1) |
|
532 | 532 | heads.discard(r2) |
|
533 | 533 | progress.complete() |
|
534 | 534 | |
|
535 | 535 | lock.release() |
|
536 | 536 | wlock.release() |
|
537 | 537 | |
|
538 | 538 | |
|
539 | 539 | def renamedirs(dirs, words): |
|
540 | 540 | '''Randomly rename the directory names in the per-dir file count dict.''' |
|
541 | 541 | wordgen = itertools.cycle(words) |
|
542 | 542 | replacements = {'': ''} |
|
543 | 543 | |
|
544 | 544 | def rename(dirpath): |
|
545 | 545 | """Recursively rename the directory and all path prefixes. |
|
546 | 546 | |
|
547 | 547 | The mapping from path to renamed path is stored for all path prefixes |
|
548 | 548 | as in dynamic programming, ensuring linear runtime and consistent |
|
549 | 549 | renaming regardless of iteration order through the model. |
|
550 | 550 | """ |
|
551 | 551 | if dirpath in replacements: |
|
552 | 552 | return replacements[dirpath] |
|
553 | 553 | head, _ = os.path.split(dirpath) |
|
554 | 554 | if head: |
|
555 | 555 | head = rename(head) |
|
556 | 556 | else: |
|
557 | 557 | head = '' |
|
558 | 558 | renamed = os.path.join(head, next(wordgen)) |
|
559 | 559 | replacements[dirpath] = renamed |
|
560 | 560 | return renamed |
|
561 | 561 | |
|
562 | 562 | result = [] |
|
563 | 563 | for dirpath, count in dirs.iteritems(): |
|
564 | 564 | result.append([rename(dirpath.lstrip(os.sep)), count]) |
|
565 | 565 | return result |
@@ -1,119 +1,120 b'' | |||
|
1 | 1 | # Copyright (C) 2015 - Mike Edgar <adgar@google.com> |
|
2 | 2 | # |
|
3 | 3 | # This extension enables removal of file content at a given revision, |
|
4 | 4 | # rewriting the data/metadata of successive revisions to preserve revision log |
|
5 | 5 | # integrity. |
|
6 | 6 | |
|
7 | 7 | """erase file content at a given revision |
|
8 | 8 | |
|
9 | 9 | The censor command instructs Mercurial to erase all content of a file at a given |
|
10 | 10 | revision *without updating the changeset hash.* This allows existing history to |
|
11 | 11 | remain valid while preventing future clones/pulls from receiving the erased |
|
12 | 12 | data. |
|
13 | 13 | |
|
14 | 14 | Typical uses for censor are due to security or legal requirements, including:: |
|
15 | 15 | |
|
16 | 16 | * Passwords, private keys, cryptographic material |
|
17 | 17 | * Licensed data/code/libraries for which the license has expired |
|
18 | 18 | * Personally Identifiable Information or other private data |
|
19 | 19 | |
|
20 | 20 | Censored nodes can interrupt mercurial's typical operation whenever the excised |
|
21 | 21 | data needs to be materialized. Some commands, like ``hg cat``/``hg revert``, |
|
22 | 22 | simply fail when asked to produce censored data. Others, like ``hg verify`` and |
|
23 | 23 | ``hg update``, must be capable of tolerating censored data to continue to |
|
24 | 24 | function in a meaningful way. Such commands only tolerate censored file |
|
25 | 25 | revisions if they are allowed by the "censor.policy=ignore" config option. |
|
26 | 26 | |
|
27 | 27 | A few informative commands such as ``hg grep`` will unconditionally |
|
28 | 28 | ignore censored data and merely report that it was encountered. |
|
29 | 29 | """ |
|
30 | 30 | |
|
31 | 31 | from __future__ import absolute_import |
|
32 | 32 | |
|
33 | 33 | from mercurial.i18n import _ |
|
34 | 34 | from mercurial.node import short |
|
35 | 35 | |
|
36 | 36 | from mercurial import ( |
|
37 | 37 | error, |
|
38 | logcmdutil, | |
|
38 | 39 | registrar, |
|
39 | 40 | scmutil, |
|
40 | 41 | ) |
|
41 | 42 | |
|
42 | 43 | cmdtable = {} |
|
43 | 44 | command = registrar.command(cmdtable) |
|
44 | 45 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
45 | 46 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
46 | 47 | # be specifying the version(s) of Mercurial they are tested with, or |
|
47 | 48 | # leave the attribute unspecified. |
|
48 | 49 | testedwith = b'ships-with-hg-core' |
|
49 | 50 | |
|
50 | 51 | |
|
51 | 52 | @command( |
|
52 | 53 | b'censor', |
|
53 | 54 | [ |
|
54 | 55 | ( |
|
55 | 56 | b'r', |
|
56 | 57 | b'rev', |
|
57 | 58 | b'', |
|
58 | 59 | _(b'censor file from specified revision'), |
|
59 | 60 | _(b'REV'), |
|
60 | 61 | ), |
|
61 | 62 | (b't', b'tombstone', b'', _(b'replacement tombstone data'), _(b'TEXT')), |
|
62 | 63 | ], |
|
63 | 64 | _(b'-r REV [-t TEXT] [FILE]'), |
|
64 | 65 | helpcategory=command.CATEGORY_MAINTENANCE, |
|
65 | 66 | ) |
|
66 | 67 | def censor(ui, repo, path, rev=b'', tombstone=b'', **opts): |
|
67 | 68 | with repo.wlock(), repo.lock(): |
|
68 | 69 | return _docensor(ui, repo, path, rev, tombstone, **opts) |
|
69 | 70 | |
|
70 | 71 | |
|
71 | 72 | def _docensor(ui, repo, path, rev=b'', tombstone=b'', **opts): |
|
72 | 73 | if not path: |
|
73 | 74 | raise error.Abort(_(b'must specify file path to censor')) |
|
74 | 75 | if not rev: |
|
75 | 76 | raise error.Abort(_(b'must specify revision to censor')) |
|
76 | 77 | |
|
77 | 78 | wctx = repo[None] |
|
78 | 79 | |
|
79 | 80 | m = scmutil.match(wctx, (path,)) |
|
80 | 81 | if m.anypats() or len(m.files()) != 1: |
|
81 | 82 | raise error.Abort(_(b'can only specify an explicit filename')) |
|
82 | 83 | path = m.files()[0] |
|
83 | 84 | flog = repo.file(path) |
|
84 | 85 | if not len(flog): |
|
85 | 86 | raise error.Abort(_(b'cannot censor file with no history')) |
|
86 | 87 | |
|
87 |
rev = |
|
|
88 | rev = logcmdutil.revsingle(repo, rev, rev).rev() | |
|
88 | 89 | try: |
|
89 | 90 | ctx = repo[rev] |
|
90 | 91 | except KeyError: |
|
91 | 92 | raise error.Abort(_(b'invalid revision identifier %s') % rev) |
|
92 | 93 | |
|
93 | 94 | try: |
|
94 | 95 | fctx = ctx.filectx(path) |
|
95 | 96 | except error.LookupError: |
|
96 | 97 | raise error.Abort(_(b'file does not exist at revision %s') % rev) |
|
97 | 98 | |
|
98 | 99 | fnode = fctx.filenode() |
|
99 | 100 | heads = [] |
|
100 | 101 | for headnode in repo.heads(): |
|
101 | 102 | hc = repo[headnode] |
|
102 | 103 | if path in hc and hc.filenode(path) == fnode: |
|
103 | 104 | heads.append(hc) |
|
104 | 105 | if heads: |
|
105 | 106 | headlist = b', '.join([short(c.node()) for c in heads]) |
|
106 | 107 | raise error.Abort( |
|
107 | 108 | _(b'cannot censor file in heads (%s)') % headlist, |
|
108 | 109 | hint=_(b'clean/delete and commit first'), |
|
109 | 110 | ) |
|
110 | 111 | |
|
111 | 112 | wp = wctx.parents() |
|
112 | 113 | if ctx.node() in [p.node() for p in wp]: |
|
113 | 114 | raise error.Abort( |
|
114 | 115 | _(b'cannot censor working directory'), |
|
115 | 116 | hint=_(b'clean/delete/update first'), |
|
116 | 117 | ) |
|
117 | 118 | |
|
118 | 119 | with repo.transaction(b'censor') as tr: |
|
119 | 120 | flog.censorrevision(tr, fnode, tombstone=tombstone) |
@@ -1,84 +1,83 b'' | |||
|
1 | 1 | # Mercurial extension to provide the 'hg children' command |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2007 by Intevation GmbH <intevation@intevation.de> |
|
4 | 4 | # |
|
5 | 5 | # Author(s): |
|
6 | 6 | # Thomas Arendsen Hein <thomas@intevation.de> |
|
7 | 7 | # |
|
8 | 8 | # This software may be used and distributed according to the terms of the |
|
9 | 9 | # GNU General Public License version 2 or any later version. |
|
10 | 10 | |
|
11 | 11 | '''command to display child changesets (DEPRECATED) |
|
12 | 12 | |
|
13 | 13 | This extension is deprecated. You should use :hg:`log -r |
|
14 | 14 | "children(REV)"` instead. |
|
15 | 15 | ''' |
|
16 | 16 | |
|
17 | 17 | from __future__ import absolute_import |
|
18 | 18 | |
|
19 | 19 | from mercurial.i18n import _ |
|
20 | 20 | from mercurial import ( |
|
21 | 21 | cmdutil, |
|
22 | 22 | logcmdutil, |
|
23 | 23 | pycompat, |
|
24 | 24 | registrar, |
|
25 | scmutil, | |
|
26 | 25 | ) |
|
27 | 26 | |
|
28 | 27 | templateopts = cmdutil.templateopts |
|
29 | 28 | |
|
30 | 29 | cmdtable = {} |
|
31 | 30 | command = registrar.command(cmdtable) |
|
32 | 31 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
33 | 32 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
34 | 33 | # be specifying the version(s) of Mercurial they are tested with, or |
|
35 | 34 | # leave the attribute unspecified. |
|
36 | 35 | testedwith = b'ships-with-hg-core' |
|
37 | 36 | |
|
38 | 37 | |
|
39 | 38 | @command( |
|
40 | 39 | b'children', |
|
41 | 40 | [ |
|
42 | 41 | ( |
|
43 | 42 | b'r', |
|
44 | 43 | b'rev', |
|
45 | 44 | b'.', |
|
46 | 45 | _(b'show children of the specified revision'), |
|
47 | 46 | _(b'REV'), |
|
48 | 47 | ), |
|
49 | 48 | ] |
|
50 | 49 | + templateopts, |
|
51 | 50 | _(b'hg children [-r REV] [FILE]'), |
|
52 | 51 | helpcategory=command.CATEGORY_CHANGE_NAVIGATION, |
|
53 | 52 | inferrepo=True, |
|
54 | 53 | ) |
|
55 | 54 | def children(ui, repo, file_=None, **opts): |
|
56 | 55 | """show the children of the given or working directory revision |
|
57 | 56 | |
|
58 | 57 | Print the children of the working directory's revisions. If a |
|
59 | 58 | revision is given via -r/--rev, the children of that revision will |
|
60 | 59 | be printed. If a file argument is given, revision in which the |
|
61 | 60 | file was last changed (after the working directory revision or the |
|
62 | 61 | argument to --rev if given) is printed. |
|
63 | 62 | |
|
64 | 63 | Please use :hg:`log` instead:: |
|
65 | 64 | |
|
66 | 65 | hg children => hg log -r "children(.)" |
|
67 | 66 | hg children -r REV => hg log -r "children(REV)" |
|
68 | 67 | |
|
69 | 68 | See :hg:`help log` and :hg:`help revsets.children`. |
|
70 | 69 | |
|
71 | 70 | """ |
|
72 | 71 | opts = pycompat.byteskwargs(opts) |
|
73 | 72 | rev = opts.get(b'rev') |
|
74 |
ctx = |
|
|
73 | ctx = logcmdutil.revsingle(repo, rev) | |
|
75 | 74 | if file_: |
|
76 | 75 | fctx = repo.filectx(file_, changeid=ctx.rev()) |
|
77 | 76 | childctxs = [fcctx.changectx() for fcctx in fctx.children()] |
|
78 | 77 | else: |
|
79 | 78 | childctxs = ctx.children() |
|
80 | 79 | |
|
81 | 80 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
82 | 81 | for cctx in childctxs: |
|
83 | 82 | displayer.show(cctx) |
|
84 | 83 | displayer.close() |
@@ -1,95 +1,95 b'' | |||
|
1 | 1 | # closehead.py - Close arbitrary heads without checking them out first |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | '''close arbitrary heads without checking them out first''' |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | from mercurial.i18n import _ |
|
11 | 11 | from mercurial import ( |
|
12 | 12 | bookmarks, |
|
13 | 13 | cmdutil, |
|
14 | 14 | context, |
|
15 | 15 | error, |
|
16 | logcmdutil, | |
|
16 | 17 | pycompat, |
|
17 | 18 | registrar, |
|
18 | scmutil, | |
|
19 | 19 | ) |
|
20 | 20 | |
|
21 | 21 | cmdtable = {} |
|
22 | 22 | command = registrar.command(cmdtable) |
|
23 | 23 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
24 | 24 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
25 | 25 | # be specifying the version(s) of Mercurial they are tested with, or |
|
26 | 26 | # leave the attribute unspecified. |
|
27 | 27 | testedwith = b'ships-with-hg-core' |
|
28 | 28 | |
|
29 | 29 | commitopts = cmdutil.commitopts |
|
30 | 30 | commitopts2 = cmdutil.commitopts2 |
|
31 | 31 | commitopts3 = [(b'r', b'rev', [], _(b'revision to check'), _(b'REV'))] |
|
32 | 32 | |
|
33 | 33 | |
|
34 | 34 | @command( |
|
35 | 35 | b'close-head|close-heads', |
|
36 | 36 | commitopts + commitopts2 + commitopts3, |
|
37 | 37 | _(b'[OPTION]... [REV]...'), |
|
38 | 38 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, |
|
39 | 39 | inferrepo=True, |
|
40 | 40 | ) |
|
41 | 41 | def close_branch(ui, repo, *revs, **opts): |
|
42 | 42 | """close the given head revisions |
|
43 | 43 | |
|
44 | 44 | This is equivalent to checking out each revision in a clean tree and running |
|
45 | 45 | ``hg commit --close-branch``, except that it doesn't change the working |
|
46 | 46 | directory. |
|
47 | 47 | |
|
48 | 48 | The commit message must be specified with -l or -m. |
|
49 | 49 | """ |
|
50 | 50 | |
|
51 | 51 | def docommit(rev): |
|
52 | 52 | cctx = context.memctx( |
|
53 | 53 | repo, |
|
54 | 54 | parents=[rev, None], |
|
55 | 55 | text=message, |
|
56 | 56 | files=[], |
|
57 | 57 | filectxfn=None, |
|
58 | 58 | user=opts.get(b'user'), |
|
59 | 59 | date=opts.get(b'date'), |
|
60 | 60 | extra=extra, |
|
61 | 61 | ) |
|
62 | 62 | tr = repo.transaction(b'commit') |
|
63 | 63 | ret = repo.commitctx(cctx, True) |
|
64 | 64 | bookmarks.update(repo, [rev, None], ret) |
|
65 | 65 | cctx.markcommitted(ret) |
|
66 | 66 | tr.close() |
|
67 | 67 | |
|
68 | 68 | opts = pycompat.byteskwargs(opts) |
|
69 | 69 | |
|
70 | 70 | revs += tuple(opts.get(b'rev', [])) |
|
71 |
revs = |
|
|
71 | revs = logcmdutil.revrange(repo, revs) | |
|
72 | 72 | |
|
73 | 73 | if not revs: |
|
74 | 74 | raise error.Abort(_(b'no revisions specified')) |
|
75 | 75 | |
|
76 | 76 | heads = [] |
|
77 | 77 | for branch in repo.branchmap(): |
|
78 | 78 | heads.extend(repo.branchheads(branch)) |
|
79 | 79 | heads = {repo[h].rev() for h in heads} |
|
80 | 80 | for rev in revs: |
|
81 | 81 | if rev not in heads: |
|
82 | 82 | raise error.Abort(_(b'revision is not an open head: %d') % rev) |
|
83 | 83 | |
|
84 | 84 | message = cmdutil.logmessage(ui, opts) |
|
85 | 85 | if not message: |
|
86 | 86 | raise error.Abort(_(b"no commit message specified with -l or -m")) |
|
87 | 87 | extra = {b'close': b'1'} |
|
88 | 88 | |
|
89 | 89 | with repo.wlock(), repo.lock(): |
|
90 | 90 | for rev in revs: |
|
91 | 91 | r = repo[rev] |
|
92 | 92 | branch = r.branch() |
|
93 | 93 | extra[b'branch'] = branch |
|
94 | 94 | docommit(r) |
|
95 | 95 | return 0 |
@@ -1,732 +1,732 b'' | |||
|
1 | 1 | # hg.py - hg backend for convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # Notes for hg->hg conversion: |
|
9 | 9 | # |
|
10 | 10 | # * Old versions of Mercurial didn't trim the whitespace from the ends |
|
11 | 11 | # of commit messages, but new versions do. Changesets created by |
|
12 | 12 | # those older versions, then converted, may thus have different |
|
13 | 13 | # hashes for changesets that are otherwise identical. |
|
14 | 14 | # |
|
15 | 15 | # * Using "--config convert.hg.saverev=true" will make the source |
|
16 | 16 | # identifier to be stored in the converted revision. This will cause |
|
17 | 17 | # the converted revision to have a different identity than the |
|
18 | 18 | # source. |
|
19 | 19 | from __future__ import absolute_import |
|
20 | 20 | |
|
21 | 21 | import os |
|
22 | 22 | import re |
|
23 | 23 | import time |
|
24 | 24 | |
|
25 | 25 | from mercurial.i18n import _ |
|
26 | 26 | from mercurial.pycompat import open |
|
27 | 27 | from mercurial.node import ( |
|
28 | 28 | bin, |
|
29 | 29 | hex, |
|
30 | 30 | sha1nodeconstants, |
|
31 | 31 | ) |
|
32 | 32 | from mercurial import ( |
|
33 | 33 | bookmarks, |
|
34 | 34 | context, |
|
35 | 35 | error, |
|
36 | 36 | exchange, |
|
37 | 37 | hg, |
|
38 | 38 | lock as lockmod, |
|
39 | logcmdutil, | |
|
39 | 40 | merge as mergemod, |
|
40 | 41 | phases, |
|
41 | 42 | pycompat, |
|
42 | scmutil, | |
|
43 | 43 | util, |
|
44 | 44 | ) |
|
45 | 45 | from mercurial.utils import dateutil |
|
46 | 46 | |
|
47 | 47 | stringio = util.stringio |
|
48 | 48 | |
|
49 | 49 | from . import common |
|
50 | 50 | |
|
51 | 51 | mapfile = common.mapfile |
|
52 | 52 | NoRepo = common.NoRepo |
|
53 | 53 | |
|
54 | 54 | sha1re = re.compile(br'\b[0-9a-f]{12,40}\b') |
|
55 | 55 | |
|
56 | 56 | |
|
57 | 57 | class mercurial_sink(common.converter_sink): |
|
58 | 58 | def __init__(self, ui, repotype, path): |
|
59 | 59 | common.converter_sink.__init__(self, ui, repotype, path) |
|
60 | 60 | self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames') |
|
61 | 61 | self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches') |
|
62 | 62 | self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch') |
|
63 | 63 | self.lastbranch = None |
|
64 | 64 | if os.path.isdir(path) and len(os.listdir(path)) > 0: |
|
65 | 65 | try: |
|
66 | 66 | self.repo = hg.repository(self.ui, path) |
|
67 | 67 | if not self.repo.local(): |
|
68 | 68 | raise NoRepo( |
|
69 | 69 | _(b'%s is not a local Mercurial repository') % path |
|
70 | 70 | ) |
|
71 | 71 | except error.RepoError as err: |
|
72 | 72 | ui.traceback() |
|
73 | 73 | raise NoRepo(err.args[0]) |
|
74 | 74 | else: |
|
75 | 75 | try: |
|
76 | 76 | ui.status(_(b'initializing destination %s repository\n') % path) |
|
77 | 77 | self.repo = hg.repository(self.ui, path, create=True) |
|
78 | 78 | if not self.repo.local(): |
|
79 | 79 | raise NoRepo( |
|
80 | 80 | _(b'%s is not a local Mercurial repository') % path |
|
81 | 81 | ) |
|
82 | 82 | self.created.append(path) |
|
83 | 83 | except error.RepoError: |
|
84 | 84 | ui.traceback() |
|
85 | 85 | raise NoRepo( |
|
86 | 86 | _(b"could not create hg repository %s as sink") % path |
|
87 | 87 | ) |
|
88 | 88 | self.lock = None |
|
89 | 89 | self.wlock = None |
|
90 | 90 | self.filemapmode = False |
|
91 | 91 | self.subrevmaps = {} |
|
92 | 92 | |
|
93 | 93 | def before(self): |
|
94 | 94 | self.ui.debug(b'run hg sink pre-conversion action\n') |
|
95 | 95 | self.wlock = self.repo.wlock() |
|
96 | 96 | self.lock = self.repo.lock() |
|
97 | 97 | |
|
98 | 98 | def after(self): |
|
99 | 99 | self.ui.debug(b'run hg sink post-conversion action\n') |
|
100 | 100 | if self.lock: |
|
101 | 101 | self.lock.release() |
|
102 | 102 | if self.wlock: |
|
103 | 103 | self.wlock.release() |
|
104 | 104 | |
|
105 | 105 | def revmapfile(self): |
|
106 | 106 | return self.repo.vfs.join(b"shamap") |
|
107 | 107 | |
|
108 | 108 | def authorfile(self): |
|
109 | 109 | return self.repo.vfs.join(b"authormap") |
|
110 | 110 | |
|
111 | 111 | def setbranch(self, branch, pbranches): |
|
112 | 112 | if not self.clonebranches: |
|
113 | 113 | return |
|
114 | 114 | |
|
115 | 115 | setbranch = branch != self.lastbranch |
|
116 | 116 | self.lastbranch = branch |
|
117 | 117 | if not branch: |
|
118 | 118 | branch = b'default' |
|
119 | 119 | pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches] |
|
120 | 120 | |
|
121 | 121 | branchpath = os.path.join(self.path, branch) |
|
122 | 122 | if setbranch: |
|
123 | 123 | self.after() |
|
124 | 124 | try: |
|
125 | 125 | self.repo = hg.repository(self.ui, branchpath) |
|
126 | 126 | except Exception: |
|
127 | 127 | self.repo = hg.repository(self.ui, branchpath, create=True) |
|
128 | 128 | self.before() |
|
129 | 129 | |
|
130 | 130 | # pbranches may bring revisions from other branches (merge parents) |
|
131 | 131 | # Make sure we have them, or pull them. |
|
132 | 132 | missings = {} |
|
133 | 133 | for b in pbranches: |
|
134 | 134 | try: |
|
135 | 135 | self.repo.lookup(b[0]) |
|
136 | 136 | except Exception: |
|
137 | 137 | missings.setdefault(b[1], []).append(b[0]) |
|
138 | 138 | |
|
139 | 139 | if missings: |
|
140 | 140 | self.after() |
|
141 | 141 | for pbranch, heads in sorted(pycompat.iteritems(missings)): |
|
142 | 142 | pbranchpath = os.path.join(self.path, pbranch) |
|
143 | 143 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
144 | 144 | self.ui.note( |
|
145 | 145 | _(b'pulling from %s into %s\n') % (pbranch, branch) |
|
146 | 146 | ) |
|
147 | 147 | exchange.pull( |
|
148 | self.repo, prepo, [prepo.lookup(h) for h in heads] | |
|
148 | self.repo, prepo, heads=[prepo.lookup(h) for h in heads] | |
|
149 | 149 | ) |
|
150 | 150 | self.before() |
|
151 | 151 | |
|
152 | 152 | def _rewritetags(self, source, revmap, data): |
|
153 | 153 | fp = stringio() |
|
154 | 154 | for line in data.splitlines(): |
|
155 | 155 | s = line.split(b' ', 1) |
|
156 | 156 | if len(s) != 2: |
|
157 | 157 | self.ui.warn(_(b'invalid tag entry: "%s"\n') % line) |
|
158 | 158 | fp.write(b'%s\n' % line) # Bogus, but keep for hash stability |
|
159 | 159 | continue |
|
160 | 160 | revid = revmap.get(source.lookuprev(s[0])) |
|
161 | 161 | if not revid: |
|
162 | 162 | if s[0] == sha1nodeconstants.nullhex: |
|
163 | 163 | revid = s[0] |
|
164 | 164 | else: |
|
165 | 165 | # missing, but keep for hash stability |
|
166 | 166 | self.ui.warn(_(b'missing tag entry: "%s"\n') % line) |
|
167 | 167 | fp.write(b'%s\n' % line) |
|
168 | 168 | continue |
|
169 | 169 | fp.write(b'%s %s\n' % (revid, s[1])) |
|
170 | 170 | return fp.getvalue() |
|
171 | 171 | |
|
172 | 172 | def _rewritesubstate(self, source, data): |
|
173 | 173 | fp = stringio() |
|
174 | 174 | for line in data.splitlines(): |
|
175 | 175 | s = line.split(b' ', 1) |
|
176 | 176 | if len(s) != 2: |
|
177 | 177 | continue |
|
178 | 178 | |
|
179 | 179 | revid = s[0] |
|
180 | 180 | subpath = s[1] |
|
181 | 181 | if revid != sha1nodeconstants.nullhex: |
|
182 | 182 | revmap = self.subrevmaps.get(subpath) |
|
183 | 183 | if revmap is None: |
|
184 | 184 | revmap = mapfile( |
|
185 | 185 | self.ui, self.repo.wjoin(subpath, b'.hg/shamap') |
|
186 | 186 | ) |
|
187 | 187 | self.subrevmaps[subpath] = revmap |
|
188 | 188 | |
|
189 | 189 | # It is reasonable that one or more of the subrepos don't |
|
190 | 190 | # need to be converted, in which case they can be cloned |
|
191 | 191 | # into place instead of converted. Therefore, only warn |
|
192 | 192 | # once. |
|
193 | 193 | msg = _(b'no ".hgsubstate" updates will be made for "%s"\n') |
|
194 | 194 | if len(revmap) == 0: |
|
195 | 195 | sub = self.repo.wvfs.reljoin(subpath, b'.hg') |
|
196 | 196 | |
|
197 | 197 | if self.repo.wvfs.exists(sub): |
|
198 | 198 | self.ui.warn(msg % subpath) |
|
199 | 199 | |
|
200 | 200 | newid = revmap.get(revid) |
|
201 | 201 | if not newid: |
|
202 | 202 | if len(revmap) > 0: |
|
203 | 203 | self.ui.warn( |
|
204 | 204 | _(b"%s is missing from %s/.hg/shamap\n") |
|
205 | 205 | % (revid, subpath) |
|
206 | 206 | ) |
|
207 | 207 | else: |
|
208 | 208 | revid = newid |
|
209 | 209 | |
|
210 | 210 | fp.write(b'%s %s\n' % (revid, subpath)) |
|
211 | 211 | |
|
212 | 212 | return fp.getvalue() |
|
213 | 213 | |
|
214 | 214 | def _calculatemergedfiles(self, source, p1ctx, p2ctx): |
|
215 | 215 | """Calculates the files from p2 that we need to pull in when merging p1 |
|
216 | 216 | and p2, given that the merge is coming from the given source. |
|
217 | 217 | |
|
218 | 218 | This prevents us from losing files that only exist in the target p2 and |
|
219 | 219 | that don't come from the source repo (like if you're merging multiple |
|
220 | 220 | repositories together). |
|
221 | 221 | """ |
|
222 | 222 | anc = [p1ctx.ancestor(p2ctx)] |
|
223 | 223 | # Calculate what files are coming from p2 |
|
224 | 224 | # TODO: mresult.commitinfo might be able to get that info |
|
225 | 225 | mresult = mergemod.calculateupdates( |
|
226 | 226 | self.repo, |
|
227 | 227 | p1ctx, |
|
228 | 228 | p2ctx, |
|
229 | 229 | anc, |
|
230 | 230 | branchmerge=True, |
|
231 | 231 | force=True, |
|
232 | 232 | acceptremote=False, |
|
233 | 233 | followcopies=False, |
|
234 | 234 | ) |
|
235 | 235 | |
|
236 | 236 | for file, (action, info, msg) in mresult.filemap(): |
|
237 | 237 | if source.targetfilebelongstosource(file): |
|
238 | 238 | # If the file belongs to the source repo, ignore the p2 |
|
239 | 239 | # since it will be covered by the existing fileset. |
|
240 | 240 | continue |
|
241 | 241 | |
|
242 | 242 | # If the file requires actual merging, abort. We don't have enough |
|
243 | 243 | # context to resolve merges correctly. |
|
244 | 244 | if action in [b'm', b'dm', b'cd', b'dc']: |
|
245 | 245 | raise error.Abort( |
|
246 | 246 | _( |
|
247 | 247 | b"unable to convert merge commit " |
|
248 | 248 | b"since target parents do not merge cleanly (file " |
|
249 | 249 | b"%s, parents %s and %s)" |
|
250 | 250 | ) |
|
251 | 251 | % (file, p1ctx, p2ctx) |
|
252 | 252 | ) |
|
253 | 253 | elif action == b'k': |
|
254 | 254 | # 'keep' means nothing changed from p1 |
|
255 | 255 | continue |
|
256 | 256 | else: |
|
257 | 257 | # Any other change means we want to take the p2 version |
|
258 | 258 | yield file |
|
259 | 259 | |
|
260 | 260 | def putcommit( |
|
261 | 261 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
262 | 262 | ): |
|
263 | 263 | files = dict(files) |
|
264 | 264 | |
|
265 | 265 | def getfilectx(repo, memctx, f): |
|
266 | 266 | if p2ctx and f in p2files and f not in copies: |
|
267 | 267 | self.ui.debug(b'reusing %s from p2\n' % f) |
|
268 | 268 | try: |
|
269 | 269 | return p2ctx[f] |
|
270 | 270 | except error.ManifestLookupError: |
|
271 | 271 | # If the file doesn't exist in p2, then we're syncing a |
|
272 | 272 | # delete, so just return None. |
|
273 | 273 | return None |
|
274 | 274 | try: |
|
275 | 275 | v = files[f] |
|
276 | 276 | except KeyError: |
|
277 | 277 | return None |
|
278 | 278 | data, mode = source.getfile(f, v) |
|
279 | 279 | if data is None: |
|
280 | 280 | return None |
|
281 | 281 | if f == b'.hgtags': |
|
282 | 282 | data = self._rewritetags(source, revmap, data) |
|
283 | 283 | if f == b'.hgsubstate': |
|
284 | 284 | data = self._rewritesubstate(source, data) |
|
285 | 285 | return context.memfilectx( |
|
286 | 286 | self.repo, |
|
287 | 287 | memctx, |
|
288 | 288 | f, |
|
289 | 289 | data, |
|
290 | 290 | b'l' in mode, |
|
291 | 291 | b'x' in mode, |
|
292 | 292 | copies.get(f), |
|
293 | 293 | ) |
|
294 | 294 | |
|
295 | 295 | pl = [] |
|
296 | 296 | for p in parents: |
|
297 | 297 | if p not in pl: |
|
298 | 298 | pl.append(p) |
|
299 | 299 | parents = pl |
|
300 | 300 | nparents = len(parents) |
|
301 | 301 | if self.filemapmode and nparents == 1: |
|
302 | 302 | m1node = self.repo.changelog.read(bin(parents[0]))[0] |
|
303 | 303 | parent = parents[0] |
|
304 | 304 | |
|
305 | 305 | if len(parents) < 2: |
|
306 | 306 | parents.append(self.repo.nullid) |
|
307 | 307 | if len(parents) < 2: |
|
308 | 308 | parents.append(self.repo.nullid) |
|
309 | 309 | p2 = parents.pop(0) |
|
310 | 310 | |
|
311 | 311 | text = commit.desc |
|
312 | 312 | |
|
313 | 313 | sha1s = re.findall(sha1re, text) |
|
314 | 314 | for sha1 in sha1s: |
|
315 | 315 | oldrev = source.lookuprev(sha1) |
|
316 | 316 | newrev = revmap.get(oldrev) |
|
317 | 317 | if newrev is not None: |
|
318 | 318 | text = text.replace(sha1, newrev[: len(sha1)]) |
|
319 | 319 | |
|
320 | 320 | extra = commit.extra.copy() |
|
321 | 321 | |
|
322 | 322 | sourcename = self.repo.ui.config(b'convert', b'hg.sourcename') |
|
323 | 323 | if sourcename: |
|
324 | 324 | extra[b'convert_source'] = sourcename |
|
325 | 325 | |
|
326 | 326 | for label in ( |
|
327 | 327 | b'source', |
|
328 | 328 | b'transplant_source', |
|
329 | 329 | b'rebase_source', |
|
330 | 330 | b'intermediate-source', |
|
331 | 331 | ): |
|
332 | 332 | node = extra.get(label) |
|
333 | 333 | |
|
334 | 334 | if node is None: |
|
335 | 335 | continue |
|
336 | 336 | |
|
337 | 337 | # Only transplant stores its reference in binary |
|
338 | 338 | if label == b'transplant_source': |
|
339 | 339 | node = hex(node) |
|
340 | 340 | |
|
341 | 341 | newrev = revmap.get(node) |
|
342 | 342 | if newrev is not None: |
|
343 | 343 | if label == b'transplant_source': |
|
344 | 344 | newrev = bin(newrev) |
|
345 | 345 | |
|
346 | 346 | extra[label] = newrev |
|
347 | 347 | |
|
348 | 348 | if self.branchnames and commit.branch: |
|
349 | 349 | extra[b'branch'] = commit.branch |
|
350 | 350 | if commit.rev and commit.saverev: |
|
351 | 351 | extra[b'convert_revision'] = commit.rev |
|
352 | 352 | |
|
353 | 353 | while parents: |
|
354 | 354 | p1 = p2 |
|
355 | 355 | p2 = parents.pop(0) |
|
356 | 356 | p1ctx = self.repo[p1] |
|
357 | 357 | p2ctx = None |
|
358 | 358 | if p2 != self.repo.nullid: |
|
359 | 359 | p2ctx = self.repo[p2] |
|
360 | 360 | fileset = set(files) |
|
361 | 361 | if full: |
|
362 | 362 | fileset.update(self.repo[p1]) |
|
363 | 363 | fileset.update(self.repo[p2]) |
|
364 | 364 | |
|
365 | 365 | if p2ctx: |
|
366 | 366 | p2files = set(cleanp2) |
|
367 | 367 | for file in self._calculatemergedfiles(source, p1ctx, p2ctx): |
|
368 | 368 | p2files.add(file) |
|
369 | 369 | fileset.add(file) |
|
370 | 370 | |
|
371 | 371 | ctx = context.memctx( |
|
372 | 372 | self.repo, |
|
373 | 373 | (p1, p2), |
|
374 | 374 | text, |
|
375 | 375 | fileset, |
|
376 | 376 | getfilectx, |
|
377 | 377 | commit.author, |
|
378 | 378 | commit.date, |
|
379 | 379 | extra, |
|
380 | 380 | ) |
|
381 | 381 | |
|
382 | 382 | # We won't know if the conversion changes the node until after the |
|
383 | 383 | # commit, so copy the source's phase for now. |
|
384 | 384 | self.repo.ui.setconfig( |
|
385 | 385 | b'phases', |
|
386 | 386 | b'new-commit', |
|
387 | 387 | phases.phasenames[commit.phase], |
|
388 | 388 | b'convert', |
|
389 | 389 | ) |
|
390 | 390 | |
|
391 | 391 | with self.repo.transaction(b"convert") as tr: |
|
392 | 392 | if self.repo.ui.config(b'convert', b'hg.preserve-hash'): |
|
393 | 393 | origctx = commit.ctx |
|
394 | 394 | else: |
|
395 | 395 | origctx = None |
|
396 | 396 | node = hex(self.repo.commitctx(ctx, origctx=origctx)) |
|
397 | 397 | |
|
398 | 398 | # If the node value has changed, but the phase is lower than |
|
399 | 399 | # draft, set it back to draft since it hasn't been exposed |
|
400 | 400 | # anywhere. |
|
401 | 401 | if commit.rev != node: |
|
402 | 402 | ctx = self.repo[node] |
|
403 | 403 | if ctx.phase() < phases.draft: |
|
404 | 404 | phases.registernew( |
|
405 | 405 | self.repo, tr, phases.draft, [ctx.rev()] |
|
406 | 406 | ) |
|
407 | 407 | |
|
408 | 408 | text = b"(octopus merge fixup)\n" |
|
409 | 409 | p2 = node |
|
410 | 410 | |
|
411 | 411 | if self.filemapmode and nparents == 1: |
|
412 | 412 | man = self.repo.manifestlog.getstorage(b'') |
|
413 | 413 | mnode = self.repo.changelog.read(bin(p2))[0] |
|
414 | 414 | closed = b'close' in commit.extra |
|
415 | 415 | if not closed and not man.cmp(m1node, man.revision(mnode)): |
|
416 | 416 | self.ui.status(_(b"filtering out empty revision\n")) |
|
417 | 417 | self.repo.rollback(force=True) |
|
418 | 418 | return parent |
|
419 | 419 | return p2 |
|
420 | 420 | |
|
421 | 421 | def puttags(self, tags): |
|
422 | 422 | tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True) |
|
423 | 423 | tagparent = tagparent or self.repo.nullid |
|
424 | 424 | |
|
425 | 425 | oldlines = set() |
|
426 | 426 | for branch, heads in pycompat.iteritems(self.repo.branchmap()): |
|
427 | 427 | for h in heads: |
|
428 | 428 | if b'.hgtags' in self.repo[h]: |
|
429 | 429 | oldlines.update( |
|
430 | 430 | set(self.repo[h][b'.hgtags'].data().splitlines(True)) |
|
431 | 431 | ) |
|
432 | 432 | oldlines = sorted(list(oldlines)) |
|
433 | 433 | |
|
434 | 434 | newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
435 | 435 | if newlines == oldlines: |
|
436 | 436 | return None, None |
|
437 | 437 | |
|
438 | 438 | # if the old and new tags match, then there is nothing to update |
|
439 | 439 | oldtags = set() |
|
440 | 440 | newtags = set() |
|
441 | 441 | for line in oldlines: |
|
442 | 442 | s = line.strip().split(b' ', 1) |
|
443 | 443 | if len(s) != 2: |
|
444 | 444 | continue |
|
445 | 445 | oldtags.add(s[1]) |
|
446 | 446 | for line in newlines: |
|
447 | 447 | s = line.strip().split(b' ', 1) |
|
448 | 448 | if len(s) != 2: |
|
449 | 449 | continue |
|
450 | 450 | if s[1] not in oldtags: |
|
451 | 451 | newtags.add(s[1].strip()) |
|
452 | 452 | |
|
453 | 453 | if not newtags: |
|
454 | 454 | return None, None |
|
455 | 455 | |
|
456 | 456 | data = b"".join(newlines) |
|
457 | 457 | |
|
458 | 458 | def getfilectx(repo, memctx, f): |
|
459 | 459 | return context.memfilectx(repo, memctx, f, data, False, False, None) |
|
460 | 460 | |
|
461 | 461 | self.ui.status(_(b"updating tags\n")) |
|
462 | 462 | date = b"%d 0" % int(time.mktime(time.gmtime())) |
|
463 | 463 | extra = {b'branch': self.tagsbranch} |
|
464 | 464 | ctx = context.memctx( |
|
465 | 465 | self.repo, |
|
466 | 466 | (tagparent, None), |
|
467 | 467 | b"update tags", |
|
468 | 468 | [b".hgtags"], |
|
469 | 469 | getfilectx, |
|
470 | 470 | b"convert-repo", |
|
471 | 471 | date, |
|
472 | 472 | extra, |
|
473 | 473 | ) |
|
474 | 474 | node = self.repo.commitctx(ctx) |
|
475 | 475 | return hex(node), hex(tagparent) |
|
476 | 476 | |
|
477 | 477 | def setfilemapmode(self, active): |
|
478 | 478 | self.filemapmode = active |
|
479 | 479 | |
|
480 | 480 | def putbookmarks(self, updatedbookmark): |
|
481 | 481 | if not len(updatedbookmark): |
|
482 | 482 | return |
|
483 | 483 | wlock = lock = tr = None |
|
484 | 484 | try: |
|
485 | 485 | wlock = self.repo.wlock() |
|
486 | 486 | lock = self.repo.lock() |
|
487 | 487 | tr = self.repo.transaction(b'bookmark') |
|
488 | 488 | self.ui.status(_(b"updating bookmarks\n")) |
|
489 | 489 | destmarks = self.repo._bookmarks |
|
490 | 490 | changes = [ |
|
491 | 491 | (bookmark, bin(updatedbookmark[bookmark])) |
|
492 | 492 | for bookmark in updatedbookmark |
|
493 | 493 | ] |
|
494 | 494 | destmarks.applychanges(self.repo, tr, changes) |
|
495 | 495 | tr.close() |
|
496 | 496 | finally: |
|
497 | 497 | lockmod.release(lock, wlock, tr) |
|
498 | 498 | |
|
499 | 499 | def hascommitfrommap(self, rev): |
|
500 | 500 | # the exact semantics of clonebranches is unclear so we can't say no |
|
501 | 501 | return rev in self.repo or self.clonebranches |
|
502 | 502 | |
|
503 | 503 | def hascommitforsplicemap(self, rev): |
|
504 | 504 | if rev not in self.repo and self.clonebranches: |
|
505 | 505 | raise error.Abort( |
|
506 | 506 | _( |
|
507 | 507 | b'revision %s not found in destination ' |
|
508 | 508 | b'repository (lookups with clonebranches=true ' |
|
509 | 509 | b'are not implemented)' |
|
510 | 510 | ) |
|
511 | 511 | % rev |
|
512 | 512 | ) |
|
513 | 513 | return rev in self.repo |
|
514 | 514 | |
|
515 | 515 | |
|
516 | 516 | class mercurial_source(common.converter_source): |
|
517 | 517 | def __init__(self, ui, repotype, path, revs=None): |
|
518 | 518 | common.converter_source.__init__(self, ui, repotype, path, revs) |
|
519 | 519 | self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors') |
|
520 | 520 | self.ignored = set() |
|
521 | 521 | self.saverev = ui.configbool(b'convert', b'hg.saverev') |
|
522 | 522 | try: |
|
523 | 523 | self.repo = hg.repository(self.ui, path) |
|
524 | 524 | # try to provoke an exception if this isn't really a hg |
|
525 | 525 | # repo, but some other bogus compatible-looking url |
|
526 | 526 | if not self.repo.local(): |
|
527 | 527 | raise error.RepoError |
|
528 | 528 | except error.RepoError: |
|
529 | 529 | ui.traceback() |
|
530 | 530 | raise NoRepo(_(b"%s is not a local Mercurial repository") % path) |
|
531 | 531 | self.lastrev = None |
|
532 | 532 | self.lastctx = None |
|
533 | 533 | self._changescache = None, None |
|
534 | 534 | self.convertfp = None |
|
535 | 535 | # Restrict converted revisions to startrev descendants |
|
536 | 536 | startnode = ui.config(b'convert', b'hg.startrev') |
|
537 | 537 | hgrevs = ui.config(b'convert', b'hg.revs') |
|
538 | 538 | if hgrevs is None: |
|
539 | 539 | if startnode is not None: |
|
540 | 540 | try: |
|
541 | 541 | startnode = self.repo.lookup(startnode) |
|
542 | 542 | except error.RepoError: |
|
543 | 543 | raise error.Abort( |
|
544 | 544 | _(b'%s is not a valid start revision') % startnode |
|
545 | 545 | ) |
|
546 | 546 | startrev = self.repo.changelog.rev(startnode) |
|
547 | 547 | children = {startnode: 1} |
|
548 | 548 | for r in self.repo.changelog.descendants([startrev]): |
|
549 | 549 | children[self.repo.changelog.node(r)] = 1 |
|
550 | 550 | self.keep = children.__contains__ |
|
551 | 551 | else: |
|
552 | 552 | self.keep = util.always |
|
553 | 553 | if revs: |
|
554 | 554 | self._heads = [self.repo.lookup(r) for r in revs] |
|
555 | 555 | else: |
|
556 | 556 | self._heads = self.repo.heads() |
|
557 | 557 | else: |
|
558 | 558 | if revs or startnode is not None: |
|
559 | 559 | raise error.Abort( |
|
560 | 560 | _( |
|
561 | 561 | b'hg.revs cannot be combined with ' |
|
562 | 562 | b'hg.startrev or --rev' |
|
563 | 563 | ) |
|
564 | 564 | ) |
|
565 | 565 | nodes = set() |
|
566 | 566 | parents = set() |
|
567 |
for r in |
|
|
567 | for r in logcmdutil.revrange(self.repo, [hgrevs]): | |
|
568 | 568 | ctx = self.repo[r] |
|
569 | 569 | nodes.add(ctx.node()) |
|
570 | 570 | parents.update(p.node() for p in ctx.parents()) |
|
571 | 571 | self.keep = nodes.__contains__ |
|
572 | 572 | self._heads = nodes - parents |
|
573 | 573 | |
|
574 | 574 | def _changectx(self, rev): |
|
575 | 575 | if self.lastrev != rev: |
|
576 | 576 | self.lastctx = self.repo[rev] |
|
577 | 577 | self.lastrev = rev |
|
578 | 578 | return self.lastctx |
|
579 | 579 | |
|
580 | 580 | def _parents(self, ctx): |
|
581 | 581 | return [p for p in ctx.parents() if p and self.keep(p.node())] |
|
582 | 582 | |
|
583 | 583 | def getheads(self): |
|
584 | 584 | return [hex(h) for h in self._heads if self.keep(h)] |
|
585 | 585 | |
|
586 | 586 | def getfile(self, name, rev): |
|
587 | 587 | try: |
|
588 | 588 | fctx = self._changectx(rev)[name] |
|
589 | 589 | return fctx.data(), fctx.flags() |
|
590 | 590 | except error.LookupError: |
|
591 | 591 | return None, None |
|
592 | 592 | |
|
593 | 593 | def _changedfiles(self, ctx1, ctx2): |
|
594 | 594 | ma, r = [], [] |
|
595 | 595 | maappend = ma.append |
|
596 | 596 | rappend = r.append |
|
597 | 597 | d = ctx1.manifest().diff(ctx2.manifest()) |
|
598 | 598 | for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d): |
|
599 | 599 | if node2 is None: |
|
600 | 600 | rappend(f) |
|
601 | 601 | else: |
|
602 | 602 | maappend(f) |
|
603 | 603 | return ma, r |
|
604 | 604 | |
|
605 | 605 | def getchanges(self, rev, full): |
|
606 | 606 | ctx = self._changectx(rev) |
|
607 | 607 | parents = self._parents(ctx) |
|
608 | 608 | if full or not parents: |
|
609 | 609 | files = copyfiles = ctx.manifest() |
|
610 | 610 | if parents: |
|
611 | 611 | if self._changescache[0] == rev: |
|
612 | 612 | ma, r = self._changescache[1] |
|
613 | 613 | else: |
|
614 | 614 | ma, r = self._changedfiles(parents[0], ctx) |
|
615 | 615 | if not full: |
|
616 | 616 | files = ma + r |
|
617 | 617 | copyfiles = ma |
|
618 | 618 | # _getcopies() is also run for roots and before filtering so missing |
|
619 | 619 | # revlogs are detected early |
|
620 | 620 | copies = self._getcopies(ctx, parents, copyfiles) |
|
621 | 621 | cleanp2 = set() |
|
622 | 622 | if len(parents) == 2: |
|
623 | 623 | d = parents[1].manifest().diff(ctx.manifest(), clean=True) |
|
624 | 624 | for f, value in pycompat.iteritems(d): |
|
625 | 625 | if value is None: |
|
626 | 626 | cleanp2.add(f) |
|
627 | 627 | changes = [(f, rev) for f in files if f not in self.ignored] |
|
628 | 628 | changes.sort() |
|
629 | 629 | return changes, copies, cleanp2 |
|
630 | 630 | |
|
631 | 631 | def _getcopies(self, ctx, parents, files): |
|
632 | 632 | copies = {} |
|
633 | 633 | for name in files: |
|
634 | 634 | if name in self.ignored: |
|
635 | 635 | continue |
|
636 | 636 | try: |
|
637 | 637 | copysource = ctx.filectx(name).copysource() |
|
638 | 638 | if copysource in self.ignored: |
|
639 | 639 | continue |
|
640 | 640 | # Ignore copy sources not in parent revisions |
|
641 | 641 | if not any(copysource in p for p in parents): |
|
642 | 642 | continue |
|
643 | 643 | copies[name] = copysource |
|
644 | 644 | except TypeError: |
|
645 | 645 | pass |
|
646 | 646 | except error.LookupError as e: |
|
647 | 647 | if not self.ignoreerrors: |
|
648 | 648 | raise |
|
649 | 649 | self.ignored.add(name) |
|
650 | 650 | self.ui.warn(_(b'ignoring: %s\n') % e) |
|
651 | 651 | return copies |
|
652 | 652 | |
|
653 | 653 | def getcommit(self, rev): |
|
654 | 654 | ctx = self._changectx(rev) |
|
655 | 655 | _parents = self._parents(ctx) |
|
656 | 656 | parents = [p.hex() for p in _parents] |
|
657 | 657 | optparents = [p.hex() for p in ctx.parents() if p and p not in _parents] |
|
658 | 658 | crev = rev |
|
659 | 659 | |
|
660 | 660 | return common.commit( |
|
661 | 661 | author=ctx.user(), |
|
662 | 662 | date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'), |
|
663 | 663 | desc=ctx.description(), |
|
664 | 664 | rev=crev, |
|
665 | 665 | parents=parents, |
|
666 | 666 | optparents=optparents, |
|
667 | 667 | branch=ctx.branch(), |
|
668 | 668 | extra=ctx.extra(), |
|
669 | 669 | sortkey=ctx.rev(), |
|
670 | 670 | saverev=self.saverev, |
|
671 | 671 | phase=ctx.phase(), |
|
672 | 672 | ctx=ctx, |
|
673 | 673 | ) |
|
674 | 674 | |
|
675 | 675 | def numcommits(self): |
|
676 | 676 | return len(self.repo) |
|
677 | 677 | |
|
678 | 678 | def gettags(self): |
|
679 | 679 | # This will get written to .hgtags, filter non global tags out. |
|
680 | 680 | tags = [ |
|
681 | 681 | t |
|
682 | 682 | for t in self.repo.tagslist() |
|
683 | 683 | if self.repo.tagtype(t[0]) == b'global' |
|
684 | 684 | ] |
|
685 | 685 | return {name: hex(node) for name, node in tags if self.keep(node)} |
|
686 | 686 | |
|
687 | 687 | def getchangedfiles(self, rev, i): |
|
688 | 688 | ctx = self._changectx(rev) |
|
689 | 689 | parents = self._parents(ctx) |
|
690 | 690 | if not parents and i is None: |
|
691 | 691 | i = 0 |
|
692 | 692 | ma, r = ctx.manifest().keys(), [] |
|
693 | 693 | else: |
|
694 | 694 | i = i or 0 |
|
695 | 695 | ma, r = self._changedfiles(parents[i], ctx) |
|
696 | 696 | ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)] |
|
697 | 697 | |
|
698 | 698 | if i == 0: |
|
699 | 699 | self._changescache = (rev, (ma, r)) |
|
700 | 700 | |
|
701 | 701 | return ma + r |
|
702 | 702 | |
|
703 | 703 | def converted(self, rev, destrev): |
|
704 | 704 | if self.convertfp is None: |
|
705 | 705 | self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab') |
|
706 | 706 | self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev))) |
|
707 | 707 | self.convertfp.flush() |
|
708 | 708 | |
|
709 | 709 | def before(self): |
|
710 | 710 | self.ui.debug(b'run hg source pre-conversion action\n') |
|
711 | 711 | |
|
712 | 712 | def after(self): |
|
713 | 713 | self.ui.debug(b'run hg source post-conversion action\n') |
|
714 | 714 | |
|
715 | 715 | def hasnativeorder(self): |
|
716 | 716 | return True |
|
717 | 717 | |
|
718 | 718 | def hasnativeclose(self): |
|
719 | 719 | return True |
|
720 | 720 | |
|
721 | 721 | def lookuprev(self, rev): |
|
722 | 722 | try: |
|
723 | 723 | return hex(self.repo.lookup(rev)) |
|
724 | 724 | except (error.RepoError, error.LookupError): |
|
725 | 725 | return None |
|
726 | 726 | |
|
727 | 727 | def getbookmarks(self): |
|
728 | 728 | return bookmarks.listbookmarks(self.repo) |
|
729 | 729 | |
|
730 | 730 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
731 | 731 | """Mercurial, revision string is a 40 byte hex""" |
|
732 | 732 | self.checkhexformat(revstr, mapname) |
@@ -1,480 +1,480 b'' | |||
|
1 | 1 | """automatically manage newlines in repository files |
|
2 | 2 | |
|
3 | 3 | This extension allows you to manage the type of line endings (CRLF or |
|
4 | 4 | LF) that are used in the repository and in the local working |
|
5 | 5 | directory. That way you can get CRLF line endings on Windows and LF on |
|
6 | 6 | Unix/Mac, thereby letting everybody use their OS native line endings. |
|
7 | 7 | |
|
8 | 8 | The extension reads its configuration from a versioned ``.hgeol`` |
|
9 | 9 | configuration file found in the root of the working directory. The |
|
10 | 10 | ``.hgeol`` file use the same syntax as all other Mercurial |
|
11 | 11 | configuration files. It uses two sections, ``[patterns]`` and |
|
12 | 12 | ``[repository]``. |
|
13 | 13 | |
|
14 | 14 | The ``[patterns]`` section specifies how line endings should be |
|
15 | 15 | converted between the working directory and the repository. The format is |
|
16 | 16 | specified by a file pattern. The first match is used, so put more |
|
17 | 17 | specific patterns first. The available line endings are ``LF``, |
|
18 | 18 | ``CRLF``, and ``BIN``. |
|
19 | 19 | |
|
20 | 20 | Files with the declared format of ``CRLF`` or ``LF`` are always |
|
21 | 21 | checked out and stored in the repository in that format and files |
|
22 | 22 | declared to be binary (``BIN``) are left unchanged. Additionally, |
|
23 | 23 | ``native`` is an alias for checking out in the platform's default line |
|
24 | 24 | ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on |
|
25 | 25 | Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's |
|
26 | 26 | default behavior; it is only needed if you need to override a later, |
|
27 | 27 | more general pattern. |
|
28 | 28 | |
|
29 | 29 | The optional ``[repository]`` section specifies the line endings to |
|
30 | 30 | use for files stored in the repository. It has a single setting, |
|
31 | 31 | ``native``, which determines the storage line endings for files |
|
32 | 32 | declared as ``native`` in the ``[patterns]`` section. It can be set to |
|
33 | 33 | ``LF`` or ``CRLF``. The default is ``LF``. For example, this means |
|
34 | 34 | that on Windows, files configured as ``native`` (``CRLF`` by default) |
|
35 | 35 | will be converted to ``LF`` when stored in the repository. Files |
|
36 | 36 | declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section |
|
37 | 37 | are always stored as-is in the repository. |
|
38 | 38 | |
|
39 | 39 | Example versioned ``.hgeol`` file:: |
|
40 | 40 | |
|
41 | 41 | [patterns] |
|
42 | 42 | **.py = native |
|
43 | 43 | **.vcproj = CRLF |
|
44 | 44 | **.txt = native |
|
45 | 45 | Makefile = LF |
|
46 | 46 | **.jpg = BIN |
|
47 | 47 | |
|
48 | 48 | [repository] |
|
49 | 49 | native = LF |
|
50 | 50 | |
|
51 | 51 | .. note:: |
|
52 | 52 | |
|
53 | 53 | The rules will first apply when files are touched in the working |
|
54 | 54 | directory, e.g. by updating to null and back to tip to touch all files. |
|
55 | 55 | |
|
56 | 56 | The extension uses an optional ``[eol]`` section read from both the |
|
57 | 57 | normal Mercurial configuration files and the ``.hgeol`` file, with the |
|
58 | 58 | latter overriding the former. You can use that section to control the |
|
59 | 59 | overall behavior. There are three settings: |
|
60 | 60 | |
|
61 | 61 | - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or |
|
62 | 62 | ``CRLF`` to override the default interpretation of ``native`` for |
|
63 | 63 | checkout. This can be used with :hg:`archive` on Unix, say, to |
|
64 | 64 | generate an archive where files have line endings for Windows. |
|
65 | 65 | |
|
66 | 66 | - ``eol.only-consistent`` (default True) can be set to False to make |
|
67 | 67 | the extension convert files with inconsistent EOLs. Inconsistent |
|
68 | 68 | means that there is both ``CRLF`` and ``LF`` present in the file. |
|
69 | 69 | Such files are normally not touched under the assumption that they |
|
70 | 70 | have mixed EOLs on purpose. |
|
71 | 71 | |
|
72 | 72 | - ``eol.fix-trailing-newline`` (default False) can be set to True to |
|
73 | 73 | ensure that converted files end with a EOL character (either ``\\n`` |
|
74 | 74 | or ``\\r\\n`` as per the configured patterns). |
|
75 | 75 | |
|
76 | 76 | The extension provides ``cleverencode:`` and ``cleverdecode:`` filters |
|
77 | 77 | like the deprecated win32text extension does. This means that you can |
|
78 | 78 | disable win32text and enable eol and your filters will still work. You |
|
79 | 79 | only need to these filters until you have prepared a ``.hgeol`` file. |
|
80 | 80 | |
|
81 | 81 | The ``win32text.forbid*`` hooks provided by the win32text extension |
|
82 | 82 | have been unified into a single hook named ``eol.checkheadshook``. The |
|
83 | 83 | hook will lookup the expected line endings from the ``.hgeol`` file, |
|
84 | 84 | which means you must migrate to a ``.hgeol`` file first before using |
|
85 | 85 | the hook. ``eol.checkheadshook`` only checks heads, intermediate |
|
86 | 86 | invalid revisions will be pushed. To forbid them completely, use the |
|
87 | 87 | ``eol.checkallhook`` hook. These hooks are best used as |
|
88 | 88 | ``pretxnchangegroup`` hooks. |
|
89 | 89 | |
|
90 | 90 | See :hg:`help patterns` for more information about the glob patterns |
|
91 | 91 | used. |
|
92 | 92 | """ |
|
93 | 93 | |
|
94 | 94 | from __future__ import absolute_import |
|
95 | 95 | |
|
96 | 96 | import os |
|
97 | 97 | import re |
|
98 | 98 | from mercurial.i18n import _ |
|
99 | 99 | from mercurial import ( |
|
100 | 100 | config, |
|
101 | 101 | error as errormod, |
|
102 | 102 | extensions, |
|
103 | 103 | match, |
|
104 | 104 | pycompat, |
|
105 | 105 | registrar, |
|
106 | 106 | scmutil, |
|
107 | 107 | util, |
|
108 | 108 | ) |
|
109 | 109 | from mercurial.utils import stringutil |
|
110 | 110 | |
|
111 | 111 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
112 | 112 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
113 | 113 | # be specifying the version(s) of Mercurial they are tested with, or |
|
114 | 114 | # leave the attribute unspecified. |
|
115 | 115 | testedwith = b'ships-with-hg-core' |
|
116 | 116 | |
|
117 | 117 | configtable = {} |
|
118 | 118 | configitem = registrar.configitem(configtable) |
|
119 | 119 | |
|
120 | 120 | configitem( |
|
121 | 121 | b'eol', |
|
122 | 122 | b'fix-trailing-newline', |
|
123 | 123 | default=False, |
|
124 | 124 | ) |
|
125 | 125 | configitem( |
|
126 | 126 | b'eol', |
|
127 | 127 | b'native', |
|
128 | 128 | default=pycompat.oslinesep, |
|
129 | 129 | ) |
|
130 | 130 | configitem( |
|
131 | 131 | b'eol', |
|
132 | 132 | b'only-consistent', |
|
133 | 133 | default=True, |
|
134 | 134 | ) |
|
135 | 135 | |
|
136 | 136 | # Matches a lone LF, i.e., one that is not part of CRLF. |
|
137 | 137 | singlelf = re.compile(b'(^|[^\r])\n') |
|
138 | 138 | |
|
139 | 139 | |
|
140 | 140 | def inconsistenteol(data): |
|
141 | 141 | return b'\r\n' in data and singlelf.search(data) |
|
142 | 142 | |
|
143 | 143 | |
|
144 | 144 | def tolf(s, params, ui, **kwargs): |
|
145 | 145 | """Filter to convert to LF EOLs.""" |
|
146 | 146 | if stringutil.binary(s): |
|
147 | 147 | return s |
|
148 | 148 | if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s): |
|
149 | 149 | return s |
|
150 | 150 | if ( |
|
151 | 151 | ui.configbool(b'eol', b'fix-trailing-newline') |
|
152 | 152 | and s |
|
153 | 153 | and not s.endswith(b'\n') |
|
154 | 154 | ): |
|
155 | 155 | s = s + b'\n' |
|
156 | 156 | return util.tolf(s) |
|
157 | 157 | |
|
158 | 158 | |
|
159 | 159 | def tocrlf(s, params, ui, **kwargs): |
|
160 | 160 | """Filter to convert to CRLF EOLs.""" |
|
161 | 161 | if stringutil.binary(s): |
|
162 | 162 | return s |
|
163 | 163 | if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s): |
|
164 | 164 | return s |
|
165 | 165 | if ( |
|
166 | 166 | ui.configbool(b'eol', b'fix-trailing-newline') |
|
167 | 167 | and s |
|
168 | 168 | and not s.endswith(b'\n') |
|
169 | 169 | ): |
|
170 | 170 | s = s + b'\n' |
|
171 | 171 | return util.tocrlf(s) |
|
172 | 172 | |
|
173 | 173 | |
|
174 | 174 | def isbinary(s, params, ui, **kwargs): |
|
175 | 175 | """Filter to do nothing with the file.""" |
|
176 | 176 | return s |
|
177 | 177 | |
|
178 | 178 | |
|
179 | 179 | filters = { |
|
180 | 180 | b'to-lf': tolf, |
|
181 | 181 | b'to-crlf': tocrlf, |
|
182 | 182 | b'is-binary': isbinary, |
|
183 | 183 | # The following provide backwards compatibility with win32text |
|
184 | 184 | b'cleverencode:': tolf, |
|
185 | 185 | b'cleverdecode:': tocrlf, |
|
186 | 186 | } |
|
187 | 187 | |
|
188 | 188 | |
|
189 | 189 | class eolfile(object): |
|
190 | 190 | def __init__(self, ui, root, data): |
|
191 | 191 | self._decode = { |
|
192 | 192 | b'LF': b'to-lf', |
|
193 | 193 | b'CRLF': b'to-crlf', |
|
194 | 194 | b'BIN': b'is-binary', |
|
195 | 195 | } |
|
196 | 196 | self._encode = { |
|
197 | 197 | b'LF': b'to-lf', |
|
198 | 198 | b'CRLF': b'to-crlf', |
|
199 | 199 | b'BIN': b'is-binary', |
|
200 | 200 | } |
|
201 | 201 | |
|
202 | 202 | self.cfg = config.config() |
|
203 | 203 | # Our files should not be touched. The pattern must be |
|
204 | 204 | # inserted first override a '** = native' pattern. |
|
205 | 205 | self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol') |
|
206 | 206 | # We can then parse the user's patterns. |
|
207 | 207 | self.cfg.parse(b'.hgeol', data) |
|
208 | 208 | |
|
209 | 209 | isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF' |
|
210 | 210 | self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf' |
|
211 | 211 | iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n') |
|
212 | 212 | self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf' |
|
213 | 213 | |
|
214 | 214 | include = [] |
|
215 | 215 | exclude = [] |
|
216 | 216 | self.patterns = [] |
|
217 | 217 | for pattern, style in self.cfg.items(b'patterns'): |
|
218 | 218 | key = style.upper() |
|
219 | 219 | if key == b'BIN': |
|
220 | 220 | exclude.append(pattern) |
|
221 | 221 | else: |
|
222 | 222 | include.append(pattern) |
|
223 | 223 | m = match.match(root, b'', [pattern]) |
|
224 | 224 | self.patterns.append((pattern, key, m)) |
|
225 | 225 | # This will match the files for which we need to care |
|
226 | 226 | # about inconsistent newlines. |
|
227 | 227 | self.match = match.match(root, b'', [], include, exclude) |
|
228 | 228 | |
|
229 | 229 | def copytoui(self, ui): |
|
230 | 230 | newpatterns = {pattern for pattern, key, m in self.patterns} |
|
231 | 231 | for section in (b'decode', b'encode'): |
|
232 | 232 | for oldpattern, _filter in ui.configitems(section): |
|
233 | 233 | if oldpattern not in newpatterns: |
|
234 | 234 | if ui.configsource(section, oldpattern) == b'eol': |
|
235 | 235 | ui.setconfig(section, oldpattern, b'!', b'eol') |
|
236 | 236 | for pattern, key, m in self.patterns: |
|
237 | 237 | try: |
|
238 | 238 | ui.setconfig(b'decode', pattern, self._decode[key], b'eol') |
|
239 | 239 | ui.setconfig(b'encode', pattern, self._encode[key], b'eol') |
|
240 | 240 | except KeyError: |
|
241 | 241 | ui.warn( |
|
242 | 242 | _(b"ignoring unknown EOL style '%s' from %s\n") |
|
243 | 243 | % (key, self.cfg.source(b'patterns', pattern)) |
|
244 | 244 | ) |
|
245 | 245 | # eol.only-consistent can be specified in ~/.hgrc or .hgeol |
|
246 | 246 | for k, v in self.cfg.items(b'eol'): |
|
247 | 247 | ui.setconfig(b'eol', k, v, b'eol') |
|
248 | 248 | |
|
249 | 249 | def checkrev(self, repo, ctx, files): |
|
250 | 250 | failed = [] |
|
251 | 251 | for f in files or ctx.files(): |
|
252 | 252 | if f not in ctx: |
|
253 | 253 | continue |
|
254 | 254 | for pattern, key, m in self.patterns: |
|
255 | 255 | if not m(f): |
|
256 | 256 | continue |
|
257 | 257 | target = self._encode[key] |
|
258 | 258 | data = ctx[f].data() |
|
259 | 259 | if ( |
|
260 | 260 | target == b"to-lf" |
|
261 | 261 | and b"\r\n" in data |
|
262 | 262 | or target == b"to-crlf" |
|
263 | 263 | and singlelf.search(data) |
|
264 | 264 | ): |
|
265 | 265 | failed.append((f, target, bytes(ctx))) |
|
266 | 266 | break |
|
267 | 267 | return failed |
|
268 | 268 | |
|
269 | 269 | |
|
270 | 270 | def parseeol(ui, repo, nodes): |
|
271 | 271 | try: |
|
272 | 272 | for node in nodes: |
|
273 | 273 | try: |
|
274 | 274 | if node is None: |
|
275 | 275 | # Cannot use workingctx.data() since it would load |
|
276 | 276 | # and cache the filters before we configure them. |
|
277 | 277 | data = repo.wvfs(b'.hgeol').read() |
|
278 | 278 | else: |
|
279 | 279 | data = repo[node][b'.hgeol'].data() |
|
280 | 280 | return eolfile(ui, repo.root, data) |
|
281 | 281 | except (IOError, LookupError): |
|
282 | 282 | pass |
|
283 | 283 | except errormod.ConfigError as inst: |
|
284 | 284 | ui.warn( |
|
285 | 285 | _( |
|
286 | 286 | b"warning: ignoring .hgeol file due to parse error " |
|
287 | 287 | b"at %s: %s\n" |
|
288 | 288 | ) |
|
289 | 289 | % (inst.location, inst.message) |
|
290 | 290 | ) |
|
291 | 291 | return None |
|
292 | 292 | |
|
293 | 293 | |
|
294 | 294 | def ensureenabled(ui): |
|
295 | 295 | """make sure the extension is enabled when used as hook |
|
296 | 296 | |
|
297 | 297 | When eol is used through hooks, the extension is never formally loaded and |
|
298 | 298 | enabled. This has some side effect, for example the config declaration is |
|
299 | 299 | never loaded. This function ensure the extension is enabled when running |
|
300 | 300 | hooks. |
|
301 | 301 | """ |
|
302 | 302 | if b'eol' in ui._knownconfig: |
|
303 | 303 | return |
|
304 | 304 | ui.setconfig(b'extensions', b'eol', b'', source=b'internal') |
|
305 | 305 | extensions.loadall(ui, [b'eol']) |
|
306 | 306 | |
|
307 | 307 | |
|
308 | 308 | def _checkhook(ui, repo, node, headsonly): |
|
309 | 309 | # Get revisions to check and touched files at the same time |
|
310 | 310 | ensureenabled(ui) |
|
311 | 311 | files = set() |
|
312 | 312 | revs = set() |
|
313 | 313 | for rev in pycompat.xrange(repo[node].rev(), len(repo)): |
|
314 | 314 | revs.add(rev) |
|
315 | 315 | if headsonly: |
|
316 | 316 | ctx = repo[rev] |
|
317 | 317 | files.update(ctx.files()) |
|
318 | 318 | for pctx in ctx.parents(): |
|
319 | 319 | revs.discard(pctx.rev()) |
|
320 | 320 | failed = [] |
|
321 | 321 | for rev in revs: |
|
322 | 322 | ctx = repo[rev] |
|
323 | 323 | eol = parseeol(ui, repo, [ctx.node()]) |
|
324 | 324 | if eol: |
|
325 | 325 | failed.extend(eol.checkrev(repo, ctx, files)) |
|
326 | 326 | |
|
327 | 327 | if failed: |
|
328 | 328 | eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'} |
|
329 | 329 | msgs = [] |
|
330 | 330 | for f, target, node in sorted(failed): |
|
331 | 331 | msgs.append( |
|
332 | 332 | _(b" %s in %s should not have %s line endings") |
|
333 | 333 | % (f, node, eols[target]) |
|
334 | 334 | ) |
|
335 | 335 | raise errormod.Abort( |
|
336 | 336 | _(b"end-of-line check failed:\n") + b"\n".join(msgs) |
|
337 | 337 | ) |
|
338 | 338 | |
|
339 | 339 | |
|
340 | 340 | def checkallhook(ui, repo, node, hooktype, **kwargs): |
|
341 | 341 | """verify that files have expected EOLs""" |
|
342 | 342 | _checkhook(ui, repo, node, False) |
|
343 | 343 | |
|
344 | 344 | |
|
345 | 345 | def checkheadshook(ui, repo, node, hooktype, **kwargs): |
|
346 | 346 | """verify that files have expected EOLs""" |
|
347 | 347 | _checkhook(ui, repo, node, True) |
|
348 | 348 | |
|
349 | 349 | |
|
350 | 350 | # "checkheadshook" used to be called "hook" |
|
351 | 351 | hook = checkheadshook |
|
352 | 352 | |
|
353 | 353 | |
|
354 | 354 | def preupdate(ui, repo, hooktype, parent1, parent2): |
|
355 | 355 | p1node = scmutil.resolvehexnodeidprefix(repo, parent1) |
|
356 | 356 | repo.loadeol([p1node]) |
|
357 | 357 | return False |
|
358 | 358 | |
|
359 | 359 | |
|
360 | 360 | def uisetup(ui): |
|
361 | 361 | ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol') |
|
362 | 362 | |
|
363 | 363 | |
|
364 | 364 | def extsetup(ui): |
|
365 | 365 | try: |
|
366 | 366 | extensions.find(b'win32text') |
|
367 | 367 | ui.warn( |
|
368 | 368 | _( |
|
369 | 369 | b"the eol extension is incompatible with the " |
|
370 | 370 | b"win32text extension\n" |
|
371 | 371 | ) |
|
372 | 372 | ) |
|
373 | 373 | except KeyError: |
|
374 | 374 | pass |
|
375 | 375 | |
|
376 | 376 | |
|
377 | 377 | def reposetup(ui, repo): |
|
378 | 378 | uisetup(repo.ui) |
|
379 | 379 | |
|
380 | 380 | if not repo.local(): |
|
381 | 381 | return |
|
382 | 382 | for name, fn in pycompat.iteritems(filters): |
|
383 | 383 | repo.adddatafilter(name, fn) |
|
384 | 384 | |
|
385 | 385 | ui.setconfig(b'patch', b'eol', b'auto', b'eol') |
|
386 | 386 | |
|
387 | 387 | class eolrepo(repo.__class__): |
|
388 | 388 | def loadeol(self, nodes): |
|
389 | 389 | eol = parseeol(self.ui, self, nodes) |
|
390 | 390 | if eol is None: |
|
391 | 391 | return None |
|
392 | 392 | eol.copytoui(self.ui) |
|
393 | 393 | return eol.match |
|
394 | 394 | |
|
395 | 395 | def _hgcleardirstate(self): |
|
396 | 396 | self._eolmatch = self.loadeol([None]) |
|
397 | 397 | if not self._eolmatch: |
|
398 | 398 | self._eolmatch = util.never |
|
399 | 399 | return |
|
400 | 400 | |
|
401 | 401 | oldeol = None |
|
402 | 402 | try: |
|
403 | 403 | cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache")) |
|
404 | 404 | except OSError: |
|
405 | 405 | cachemtime = 0 |
|
406 | 406 | else: |
|
407 | 407 | olddata = self.vfs.read(b"eol.cache") |
|
408 | 408 | if olddata: |
|
409 | 409 | oldeol = eolfile(self.ui, self.root, olddata) |
|
410 | 410 | |
|
411 | 411 | try: |
|
412 | 412 | eolmtime = os.path.getmtime(self.wjoin(b".hgeol")) |
|
413 | 413 | except OSError: |
|
414 | 414 | eolmtime = 0 |
|
415 | 415 | |
|
416 | 416 | if eolmtime >= cachemtime and eolmtime > 0: |
|
417 | 417 | self.ui.debug(b"eol: detected change in .hgeol\n") |
|
418 | 418 | |
|
419 | 419 | hgeoldata = self.wvfs.read(b'.hgeol') |
|
420 | 420 | neweol = eolfile(self.ui, self.root, hgeoldata) |
|
421 | 421 | |
|
422 | 422 | wlock = None |
|
423 | 423 | try: |
|
424 | 424 | wlock = self.wlock() |
|
425 | 425 | for f in self.dirstate: |
|
426 |
if self.dirstate |
|
|
426 | if not self.dirstate.get_entry(f).maybe_clean: | |
|
427 | 427 | continue |
|
428 | 428 | if oldeol is not None: |
|
429 | 429 | if not oldeol.match(f) and not neweol.match(f): |
|
430 | 430 | continue |
|
431 | 431 | oldkey = None |
|
432 | 432 | for pattern, key, m in oldeol.patterns: |
|
433 | 433 | if m(f): |
|
434 | 434 | oldkey = key |
|
435 | 435 | break |
|
436 | 436 | newkey = None |
|
437 | 437 | for pattern, key, m in neweol.patterns: |
|
438 | 438 | if m(f): |
|
439 | 439 | newkey = key |
|
440 | 440 | break |
|
441 | 441 | if oldkey == newkey: |
|
442 | 442 | continue |
|
443 | 443 | # all normal files need to be looked at again since |
|
444 | 444 | # the new .hgeol file specify a different filter |
|
445 | 445 | self.dirstate.set_possibly_dirty(f) |
|
446 | 446 | # Write the cache to update mtime and cache .hgeol |
|
447 | 447 | with self.vfs(b"eol.cache", b"w") as f: |
|
448 | 448 | f.write(hgeoldata) |
|
449 | 449 | except errormod.LockUnavailable: |
|
450 | 450 | # If we cannot lock the repository and clear the |
|
451 | 451 | # dirstate, then a commit might not see all files |
|
452 | 452 | # as modified. But if we cannot lock the |
|
453 | 453 | # repository, then we can also not make a commit, |
|
454 | 454 | # so ignore the error. |
|
455 | 455 | pass |
|
456 | 456 | finally: |
|
457 | 457 | if wlock is not None: |
|
458 | 458 | wlock.release() |
|
459 | 459 | |
|
460 | 460 | def commitctx(self, ctx, error=False, origctx=None): |
|
461 | 461 | for f in sorted(ctx.added() + ctx.modified()): |
|
462 | 462 | if not self._eolmatch(f): |
|
463 | 463 | continue |
|
464 | 464 | fctx = ctx[f] |
|
465 | 465 | if fctx is None: |
|
466 | 466 | continue |
|
467 | 467 | data = fctx.data() |
|
468 | 468 | if stringutil.binary(data): |
|
469 | 469 | # We should not abort here, since the user should |
|
470 | 470 | # be able to say "** = native" to automatically |
|
471 | 471 | # have all non-binary files taken care of. |
|
472 | 472 | continue |
|
473 | 473 | if inconsistenteol(data): |
|
474 | 474 | raise errormod.Abort( |
|
475 | 475 | _(b"inconsistent newline style in %s\n") % f |
|
476 | 476 | ) |
|
477 | 477 | return super(eolrepo, self).commitctx(ctx, error, origctx) |
|
478 | 478 | |
|
479 | 479 | repo.__class__ = eolrepo |
|
480 | 480 | repo._hgcleardirstate() |
@@ -1,803 +1,804 b'' | |||
|
1 | 1 | # extdiff.py - external diff program support for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''command to allow external programs to compare revisions |
|
9 | 9 | |
|
10 | 10 | The extdiff Mercurial extension allows you to use external programs |
|
11 | 11 | to compare revisions, or revision with working directory. The external |
|
12 | 12 | diff programs are called with a configurable set of options and two |
|
13 | 13 | non-option arguments: paths to directories containing snapshots of |
|
14 | 14 | files to compare. |
|
15 | 15 | |
|
16 | 16 | If there is more than one file being compared and the "child" revision |
|
17 | 17 | is the working directory, any modifications made in the external diff |
|
18 | 18 | program will be copied back to the working directory from the temporary |
|
19 | 19 | directory. |
|
20 | 20 | |
|
21 | 21 | The extdiff extension also allows you to configure new diff commands, so |
|
22 | 22 | you do not need to type :hg:`extdiff -p kdiff3` always. :: |
|
23 | 23 | |
|
24 | 24 | [extdiff] |
|
25 | 25 | # add new command that runs GNU diff(1) in 'context diff' mode |
|
26 | 26 | cdiff = gdiff -Nprc5 |
|
27 | 27 | ## or the old way: |
|
28 | 28 | #cmd.cdiff = gdiff |
|
29 | 29 | #opts.cdiff = -Nprc5 |
|
30 | 30 | |
|
31 | 31 | # add new command called meld, runs meld (no need to name twice). If |
|
32 | 32 | # the meld executable is not available, the meld tool in [merge-tools] |
|
33 | 33 | # will be used, if available |
|
34 | 34 | meld = |
|
35 | 35 | |
|
36 | 36 | # add new command called vimdiff, runs gvimdiff with DirDiff plugin |
|
37 | 37 | # (see http://www.vim.org/scripts/script.php?script_id=102) Non |
|
38 | 38 | # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in |
|
39 | 39 | # your .vimrc |
|
40 | 40 | vimdiff = gvim -f "+next" \\ |
|
41 | 41 | "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))" |
|
42 | 42 | |
|
43 | 43 | Tool arguments can include variables that are expanded at runtime:: |
|
44 | 44 | |
|
45 | 45 | $parent1, $plabel1 - filename, descriptive label of first parent |
|
46 | 46 | $child, $clabel - filename, descriptive label of child revision |
|
47 | 47 | $parent2, $plabel2 - filename, descriptive label of second parent |
|
48 | 48 | $root - repository root |
|
49 | 49 | $parent is an alias for $parent1. |
|
50 | 50 | |
|
51 | 51 | The extdiff extension will look in your [diff-tools] and [merge-tools] |
|
52 | 52 | sections for diff tool arguments, when none are specified in [extdiff]. |
|
53 | 53 | |
|
54 | 54 | :: |
|
55 | 55 | |
|
56 | 56 | [extdiff] |
|
57 | 57 | kdiff3 = |
|
58 | 58 | |
|
59 | 59 | [diff-tools] |
|
60 | 60 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child |
|
61 | 61 | |
|
62 | 62 | If a program has a graphical interface, it might be interesting to tell |
|
63 | 63 | Mercurial about it. It will prevent the program from being mistakenly |
|
64 | 64 | used in a terminal-only environment (such as an SSH terminal session), |
|
65 | 65 | and will make :hg:`extdiff --per-file` open multiple file diffs at once |
|
66 | 66 | instead of one by one (if you still want to open file diffs one by one, |
|
67 | 67 | you can use the --confirm option). |
|
68 | 68 | |
|
69 | 69 | Declaring that a tool has a graphical interface can be done with the |
|
70 | 70 | ``gui`` flag next to where ``diffargs`` are specified: |
|
71 | 71 | |
|
72 | 72 | :: |
|
73 | 73 | |
|
74 | 74 | [diff-tools] |
|
75 | 75 | kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child |
|
76 | 76 | kdiff3.gui = true |
|
77 | 77 | |
|
78 | 78 | You can use -I/-X and list of file or directory names like normal |
|
79 | 79 | :hg:`diff` command. The extdiff extension makes snapshots of only |
|
80 | 80 | needed files, so running the external diff program will actually be |
|
81 | 81 | pretty fast (at least faster than having to compare the entire tree). |
|
82 | 82 | ''' |
|
83 | 83 | |
|
84 | 84 | from __future__ import absolute_import |
|
85 | 85 | |
|
86 | 86 | import os |
|
87 | 87 | import re |
|
88 | 88 | import shutil |
|
89 | 89 | import stat |
|
90 | 90 | import subprocess |
|
91 | 91 | |
|
92 | 92 | from mercurial.i18n import _ |
|
93 | 93 | from mercurial.node import ( |
|
94 | 94 | nullrev, |
|
95 | 95 | short, |
|
96 | 96 | ) |
|
97 | 97 | from mercurial import ( |
|
98 | 98 | archival, |
|
99 | 99 | cmdutil, |
|
100 | 100 | encoding, |
|
101 | 101 | error, |
|
102 | 102 | filemerge, |
|
103 | 103 | formatter, |
|
104 | logcmdutil, | |
|
104 | 105 | pycompat, |
|
105 | 106 | registrar, |
|
106 | 107 | scmutil, |
|
107 | 108 | util, |
|
108 | 109 | ) |
|
109 | 110 | from mercurial.utils import ( |
|
110 | 111 | procutil, |
|
111 | 112 | stringutil, |
|
112 | 113 | ) |
|
113 | 114 | |
|
114 | 115 | cmdtable = {} |
|
115 | 116 | command = registrar.command(cmdtable) |
|
116 | 117 | |
|
117 | 118 | configtable = {} |
|
118 | 119 | configitem = registrar.configitem(configtable) |
|
119 | 120 | |
|
120 | 121 | configitem( |
|
121 | 122 | b'extdiff', |
|
122 | 123 | br'opts\..*', |
|
123 | 124 | default=b'', |
|
124 | 125 | generic=True, |
|
125 | 126 | ) |
|
126 | 127 | |
|
127 | 128 | configitem( |
|
128 | 129 | b'extdiff', |
|
129 | 130 | br'gui\..*', |
|
130 | 131 | generic=True, |
|
131 | 132 | ) |
|
132 | 133 | |
|
133 | 134 | configitem( |
|
134 | 135 | b'diff-tools', |
|
135 | 136 | br'.*\.diffargs$', |
|
136 | 137 | default=None, |
|
137 | 138 | generic=True, |
|
138 | 139 | ) |
|
139 | 140 | |
|
140 | 141 | configitem( |
|
141 | 142 | b'diff-tools', |
|
142 | 143 | br'.*\.gui$', |
|
143 | 144 | generic=True, |
|
144 | 145 | ) |
|
145 | 146 | |
|
146 | 147 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
147 | 148 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
148 | 149 | # be specifying the version(s) of Mercurial they are tested with, or |
|
149 | 150 | # leave the attribute unspecified. |
|
150 | 151 | testedwith = b'ships-with-hg-core' |
|
151 | 152 | |
|
152 | 153 | |
|
153 | 154 | def snapshot(ui, repo, files, node, tmproot, listsubrepos): |
|
154 | 155 | """snapshot files as of some revision |
|
155 | 156 | if not using snapshot, -I/-X does not work and recursive diff |
|
156 | 157 | in tools like kdiff3 and meld displays too many files.""" |
|
157 | 158 | dirname = os.path.basename(repo.root) |
|
158 | 159 | if dirname == b"": |
|
159 | 160 | dirname = b"root" |
|
160 | 161 | if node is not None: |
|
161 | 162 | dirname = b'%s.%s' % (dirname, short(node)) |
|
162 | 163 | base = os.path.join(tmproot, dirname) |
|
163 | 164 | os.mkdir(base) |
|
164 | 165 | fnsandstat = [] |
|
165 | 166 | |
|
166 | 167 | if node is not None: |
|
167 | 168 | ui.note( |
|
168 | 169 | _(b'making snapshot of %d files from rev %s\n') |
|
169 | 170 | % (len(files), short(node)) |
|
170 | 171 | ) |
|
171 | 172 | else: |
|
172 | 173 | ui.note( |
|
173 | 174 | _(b'making snapshot of %d files from working directory\n') |
|
174 | 175 | % (len(files)) |
|
175 | 176 | ) |
|
176 | 177 | |
|
177 | 178 | if files: |
|
178 | 179 | repo.ui.setconfig(b"ui", b"archivemeta", False) |
|
179 | 180 | |
|
180 | 181 | archival.archive( |
|
181 | 182 | repo, |
|
182 | 183 | base, |
|
183 | 184 | node, |
|
184 | 185 | b'files', |
|
185 | 186 | match=scmutil.matchfiles(repo, files), |
|
186 | 187 | subrepos=listsubrepos, |
|
187 | 188 | ) |
|
188 | 189 | |
|
189 | 190 | for fn in sorted(files): |
|
190 | 191 | wfn = util.pconvert(fn) |
|
191 | 192 | ui.note(b' %s\n' % wfn) |
|
192 | 193 | |
|
193 | 194 | if node is None: |
|
194 | 195 | dest = os.path.join(base, wfn) |
|
195 | 196 | |
|
196 | 197 | fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest))) |
|
197 | 198 | return dirname, fnsandstat |
|
198 | 199 | |
|
199 | 200 | |
|
200 | 201 | def formatcmdline( |
|
201 | 202 | cmdline, |
|
202 | 203 | repo_root, |
|
203 | 204 | do3way, |
|
204 | 205 | parent1, |
|
205 | 206 | plabel1, |
|
206 | 207 | parent2, |
|
207 | 208 | plabel2, |
|
208 | 209 | child, |
|
209 | 210 | clabel, |
|
210 | 211 | ): |
|
211 | 212 | # Function to quote file/dir names in the argument string. |
|
212 | 213 | # When not operating in 3-way mode, an empty string is |
|
213 | 214 | # returned for parent2 |
|
214 | 215 | replace = { |
|
215 | 216 | b'parent': parent1, |
|
216 | 217 | b'parent1': parent1, |
|
217 | 218 | b'parent2': parent2, |
|
218 | 219 | b'plabel1': plabel1, |
|
219 | 220 | b'plabel2': plabel2, |
|
220 | 221 | b'child': child, |
|
221 | 222 | b'clabel': clabel, |
|
222 | 223 | b'root': repo_root, |
|
223 | 224 | } |
|
224 | 225 | |
|
225 | 226 | def quote(match): |
|
226 | 227 | pre = match.group(2) |
|
227 | 228 | key = match.group(3) |
|
228 | 229 | if not do3way and key == b'parent2': |
|
229 | 230 | return pre |
|
230 | 231 | return pre + procutil.shellquote(replace[key]) |
|
231 | 232 | |
|
232 | 233 | # Match parent2 first, so 'parent1?' will match both parent1 and parent |
|
233 | 234 | regex = ( |
|
234 | 235 | br'''(['"]?)([^\s'"$]*)''' |
|
235 | 236 | br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1' |
|
236 | 237 | ) |
|
237 | 238 | if not do3way and not re.search(regex, cmdline): |
|
238 | 239 | cmdline += b' $parent1 $child' |
|
239 | 240 | return re.sub(regex, quote, cmdline) |
|
240 | 241 | |
|
241 | 242 | |
|
242 | 243 | def _systembackground(cmd, environ=None, cwd=None): |
|
243 | 244 | """like 'procutil.system', but returns the Popen object directly |
|
244 | 245 | so we don't have to wait on it. |
|
245 | 246 | """ |
|
246 | 247 | env = procutil.shellenviron(environ) |
|
247 | 248 | proc = subprocess.Popen( |
|
248 | 249 | procutil.tonativestr(cmd), |
|
249 | 250 | shell=True, |
|
250 | 251 | close_fds=procutil.closefds, |
|
251 | 252 | env=procutil.tonativeenv(env), |
|
252 | 253 | cwd=pycompat.rapply(procutil.tonativestr, cwd), |
|
253 | 254 | ) |
|
254 | 255 | return proc |
|
255 | 256 | |
|
256 | 257 | |
|
257 | 258 | def _runperfilediff( |
|
258 | 259 | cmdline, |
|
259 | 260 | repo_root, |
|
260 | 261 | ui, |
|
261 | 262 | guitool, |
|
262 | 263 | do3way, |
|
263 | 264 | confirm, |
|
264 | 265 | commonfiles, |
|
265 | 266 | tmproot, |
|
266 | 267 | dir1a, |
|
267 | 268 | dir1b, |
|
268 | 269 | dir2, |
|
269 | 270 | rev1a, |
|
270 | 271 | rev1b, |
|
271 | 272 | rev2, |
|
272 | 273 | ): |
|
273 | 274 | # Note that we need to sort the list of files because it was |
|
274 | 275 | # built in an "unstable" way and it's annoying to get files in a |
|
275 | 276 | # random order, especially when "confirm" mode is enabled. |
|
276 | 277 | waitprocs = [] |
|
277 | 278 | totalfiles = len(commonfiles) |
|
278 | 279 | for idx, commonfile in enumerate(sorted(commonfiles)): |
|
279 | 280 | path1a = os.path.join(dir1a, commonfile) |
|
280 | 281 | label1a = commonfile + rev1a |
|
281 | 282 | if not os.path.isfile(path1a): |
|
282 | 283 | path1a = pycompat.osdevnull |
|
283 | 284 | |
|
284 | 285 | path1b = b'' |
|
285 | 286 | label1b = b'' |
|
286 | 287 | if do3way: |
|
287 | 288 | path1b = os.path.join(dir1b, commonfile) |
|
288 | 289 | label1b = commonfile + rev1b |
|
289 | 290 | if not os.path.isfile(path1b): |
|
290 | 291 | path1b = pycompat.osdevnull |
|
291 | 292 | |
|
292 | 293 | path2 = os.path.join(dir2, commonfile) |
|
293 | 294 | label2 = commonfile + rev2 |
|
294 | 295 | |
|
295 | 296 | if confirm: |
|
296 | 297 | # Prompt before showing this diff |
|
297 | 298 | difffiles = _(b'diff %s (%d of %d)') % ( |
|
298 | 299 | commonfile, |
|
299 | 300 | idx + 1, |
|
300 | 301 | totalfiles, |
|
301 | 302 | ) |
|
302 | 303 | responses = _( |
|
303 | 304 | b'[Yns?]' |
|
304 | 305 | b'$$ &Yes, show diff' |
|
305 | 306 | b'$$ &No, skip this diff' |
|
306 | 307 | b'$$ &Skip remaining diffs' |
|
307 | 308 | b'$$ &? (display help)' |
|
308 | 309 | ) |
|
309 | 310 | r = ui.promptchoice(b'%s %s' % (difffiles, responses)) |
|
310 | 311 | if r == 3: # ? |
|
311 | 312 | while r == 3: |
|
312 | 313 | for c, t in ui.extractchoices(responses)[1]: |
|
313 | 314 | ui.write(b'%s - %s\n' % (c, encoding.lower(t))) |
|
314 | 315 | r = ui.promptchoice(b'%s %s' % (difffiles, responses)) |
|
315 | 316 | if r == 0: # yes |
|
316 | 317 | pass |
|
317 | 318 | elif r == 1: # no |
|
318 | 319 | continue |
|
319 | 320 | elif r == 2: # skip |
|
320 | 321 | break |
|
321 | 322 | |
|
322 | 323 | curcmdline = formatcmdline( |
|
323 | 324 | cmdline, |
|
324 | 325 | repo_root, |
|
325 | 326 | do3way=do3way, |
|
326 | 327 | parent1=path1a, |
|
327 | 328 | plabel1=label1a, |
|
328 | 329 | parent2=path1b, |
|
329 | 330 | plabel2=label1b, |
|
330 | 331 | child=path2, |
|
331 | 332 | clabel=label2, |
|
332 | 333 | ) |
|
333 | 334 | |
|
334 | 335 | if confirm or not guitool: |
|
335 | 336 | # Run the comparison program and wait for it to exit |
|
336 | 337 | # before we show the next file. |
|
337 | 338 | # This is because either we need to wait for confirmation |
|
338 | 339 | # from the user between each invocation, or because, as far |
|
339 | 340 | # as we know, the tool doesn't have a GUI, in which case |
|
340 | 341 | # we can't run multiple CLI programs at the same time. |
|
341 | 342 | ui.debug( |
|
342 | 343 | b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot) |
|
343 | 344 | ) |
|
344 | 345 | ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff') |
|
345 | 346 | else: |
|
346 | 347 | # Run the comparison program but don't wait, as we're |
|
347 | 348 | # going to rapid-fire each file diff and then wait on |
|
348 | 349 | # the whole group. |
|
349 | 350 | ui.debug( |
|
350 | 351 | b'running %r in %s (backgrounded)\n' |
|
351 | 352 | % (pycompat.bytestr(curcmdline), tmproot) |
|
352 | 353 | ) |
|
353 | 354 | proc = _systembackground(curcmdline, cwd=tmproot) |
|
354 | 355 | waitprocs.append(proc) |
|
355 | 356 | |
|
356 | 357 | if waitprocs: |
|
357 | 358 | with ui.timeblockedsection(b'extdiff'): |
|
358 | 359 | for proc in waitprocs: |
|
359 | 360 | proc.wait() |
|
360 | 361 | |
|
361 | 362 | |
|
362 | 363 | def diffpatch(ui, repo, node1, node2, tmproot, matcher, cmdline): |
|
363 | 364 | template = b'hg-%h.patch' |
|
364 | 365 | # write patches to temporary files |
|
365 | 366 | with formatter.nullformatter(ui, b'extdiff', {}) as fm: |
|
366 | 367 | cmdutil.export( |
|
367 | 368 | repo, |
|
368 | 369 | [repo[node1].rev(), repo[node2].rev()], |
|
369 | 370 | fm, |
|
370 | 371 | fntemplate=repo.vfs.reljoin(tmproot, template), |
|
371 | 372 | match=matcher, |
|
372 | 373 | ) |
|
373 | 374 | label1 = cmdutil.makefilename(repo[node1], template) |
|
374 | 375 | label2 = cmdutil.makefilename(repo[node2], template) |
|
375 | 376 | file1 = repo.vfs.reljoin(tmproot, label1) |
|
376 | 377 | file2 = repo.vfs.reljoin(tmproot, label2) |
|
377 | 378 | cmdline = formatcmdline( |
|
378 | 379 | cmdline, |
|
379 | 380 | repo.root, |
|
380 | 381 | # no 3way while comparing patches |
|
381 | 382 | do3way=False, |
|
382 | 383 | parent1=file1, |
|
383 | 384 | plabel1=label1, |
|
384 | 385 | # while comparing patches, there is no second parent |
|
385 | 386 | parent2=None, |
|
386 | 387 | plabel2=None, |
|
387 | 388 | child=file2, |
|
388 | 389 | clabel=label2, |
|
389 | 390 | ) |
|
390 | 391 | ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot)) |
|
391 | 392 | ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff') |
|
392 | 393 | return 1 |
|
393 | 394 | |
|
394 | 395 | |
|
395 | 396 | def diffrevs( |
|
396 | 397 | ui, |
|
397 | 398 | repo, |
|
398 | 399 | ctx1a, |
|
399 | 400 | ctx1b, |
|
400 | 401 | ctx2, |
|
401 | 402 | matcher, |
|
402 | 403 | tmproot, |
|
403 | 404 | cmdline, |
|
404 | 405 | do3way, |
|
405 | 406 | guitool, |
|
406 | 407 | opts, |
|
407 | 408 | ): |
|
408 | 409 | |
|
409 | 410 | subrepos = opts.get(b'subrepos') |
|
410 | 411 | |
|
411 | 412 | # calculate list of files changed between both revs |
|
412 | 413 | st = ctx1a.status(ctx2, matcher, listsubrepos=subrepos) |
|
413 | 414 | mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed) |
|
414 | 415 | if do3way: |
|
415 | 416 | stb = ctx1b.status(ctx2, matcher, listsubrepos=subrepos) |
|
416 | 417 | mod_b, add_b, rem_b = ( |
|
417 | 418 | set(stb.modified), |
|
418 | 419 | set(stb.added), |
|
419 | 420 | set(stb.removed), |
|
420 | 421 | ) |
|
421 | 422 | else: |
|
422 | 423 | mod_b, add_b, rem_b = set(), set(), set() |
|
423 | 424 | modadd = mod_a | add_a | mod_b | add_b |
|
424 | 425 | common = modadd | rem_a | rem_b |
|
425 | 426 | if not common: |
|
426 | 427 | return 0 |
|
427 | 428 | |
|
428 | 429 | # Always make a copy of ctx1a (and ctx1b, if applicable) |
|
429 | 430 | # dir1a should contain files which are: |
|
430 | 431 | # * modified or removed from ctx1a to ctx2 |
|
431 | 432 | # * modified or added from ctx1b to ctx2 |
|
432 | 433 | # (except file added from ctx1a to ctx2 as they were not present in |
|
433 | 434 | # ctx1a) |
|
434 | 435 | dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) |
|
435 | 436 | dir1a = snapshot(ui, repo, dir1a_files, ctx1a.node(), tmproot, subrepos)[0] |
|
436 | 437 | rev1a = b'' if ctx1a.rev() is None else b'@%d' % ctx1a.rev() |
|
437 | 438 | if do3way: |
|
438 | 439 | # file calculation criteria same as dir1a |
|
439 | 440 | dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) |
|
440 | 441 | dir1b = snapshot( |
|
441 | 442 | ui, repo, dir1b_files, ctx1b.node(), tmproot, subrepos |
|
442 | 443 | )[0] |
|
443 | 444 | rev1b = b'@%d' % ctx1b.rev() |
|
444 | 445 | else: |
|
445 | 446 | dir1b = None |
|
446 | 447 | rev1b = b'' |
|
447 | 448 | |
|
448 | 449 | fnsandstat = [] |
|
449 | 450 | |
|
450 | 451 | # If ctx2 is not the wc or there is >1 change, copy it |
|
451 | 452 | dir2root = b'' |
|
452 | 453 | rev2 = b'' |
|
453 | 454 | if ctx2.node() is not None: |
|
454 | 455 | dir2 = snapshot(ui, repo, modadd, ctx2.node(), tmproot, subrepos)[0] |
|
455 | 456 | rev2 = b'@%d' % ctx2.rev() |
|
456 | 457 | elif len(common) > 1: |
|
457 | 458 | # we only actually need to get the files to copy back to |
|
458 | 459 | # the working dir in this case (because the other cases |
|
459 | 460 | # are: diffing 2 revisions or single file -- in which case |
|
460 | 461 | # the file is already directly passed to the diff tool). |
|
461 | 462 | dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos) |
|
462 | 463 | else: |
|
463 | 464 | # This lets the diff tool open the changed file directly |
|
464 | 465 | dir2 = b'' |
|
465 | 466 | dir2root = repo.root |
|
466 | 467 | |
|
467 | 468 | label1a = rev1a |
|
468 | 469 | label1b = rev1b |
|
469 | 470 | label2 = rev2 |
|
470 | 471 | |
|
471 | 472 | if not opts.get(b'per_file'): |
|
472 | 473 | # If only one change, diff the files instead of the directories |
|
473 | 474 | # Handle bogus modifies correctly by checking if the files exist |
|
474 | 475 | if len(common) == 1: |
|
475 | 476 | common_file = util.localpath(common.pop()) |
|
476 | 477 | dir1a = os.path.join(tmproot, dir1a, common_file) |
|
477 | 478 | label1a = common_file + rev1a |
|
478 | 479 | if not os.path.isfile(dir1a): |
|
479 | 480 | dir1a = pycompat.osdevnull |
|
480 | 481 | if do3way: |
|
481 | 482 | dir1b = os.path.join(tmproot, dir1b, common_file) |
|
482 | 483 | label1b = common_file + rev1b |
|
483 | 484 | if not os.path.isfile(dir1b): |
|
484 | 485 | dir1b = pycompat.osdevnull |
|
485 | 486 | dir2 = os.path.join(dir2root, dir2, common_file) |
|
486 | 487 | label2 = common_file + rev2 |
|
487 | 488 | |
|
488 | 489 | # Run the external tool on the 2 temp directories or the patches |
|
489 | 490 | cmdline = formatcmdline( |
|
490 | 491 | cmdline, |
|
491 | 492 | repo.root, |
|
492 | 493 | do3way=do3way, |
|
493 | 494 | parent1=dir1a, |
|
494 | 495 | plabel1=label1a, |
|
495 | 496 | parent2=dir1b, |
|
496 | 497 | plabel2=label1b, |
|
497 | 498 | child=dir2, |
|
498 | 499 | clabel=label2, |
|
499 | 500 | ) |
|
500 | 501 | ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot)) |
|
501 | 502 | ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff') |
|
502 | 503 | else: |
|
503 | 504 | # Run the external tool once for each pair of files |
|
504 | 505 | _runperfilediff( |
|
505 | 506 | cmdline, |
|
506 | 507 | repo.root, |
|
507 | 508 | ui, |
|
508 | 509 | guitool=guitool, |
|
509 | 510 | do3way=do3way, |
|
510 | 511 | confirm=opts.get(b'confirm'), |
|
511 | 512 | commonfiles=common, |
|
512 | 513 | tmproot=tmproot, |
|
513 | 514 | dir1a=os.path.join(tmproot, dir1a), |
|
514 | 515 | dir1b=os.path.join(tmproot, dir1b) if do3way else None, |
|
515 | 516 | dir2=os.path.join(dir2root, dir2), |
|
516 | 517 | rev1a=rev1a, |
|
517 | 518 | rev1b=rev1b, |
|
518 | 519 | rev2=rev2, |
|
519 | 520 | ) |
|
520 | 521 | |
|
521 | 522 | for copy_fn, working_fn, st in fnsandstat: |
|
522 | 523 | cpstat = os.lstat(copy_fn) |
|
523 | 524 | # Some tools copy the file and attributes, so mtime may not detect |
|
524 | 525 | # all changes. A size check will detect more cases, but not all. |
|
525 | 526 | # The only certain way to detect every case is to diff all files, |
|
526 | 527 | # which could be expensive. |
|
527 | 528 | # copyfile() carries over the permission, so the mode check could |
|
528 | 529 | # be in an 'elif' branch, but for the case where the file has |
|
529 | 530 | # changed without affecting mtime or size. |
|
530 | 531 | if ( |
|
531 | 532 | cpstat[stat.ST_MTIME] != st[stat.ST_MTIME] |
|
532 | 533 | or cpstat.st_size != st.st_size |
|
533 | 534 | or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100) |
|
534 | 535 | ): |
|
535 | 536 | ui.debug( |
|
536 | 537 | b'file changed while diffing. ' |
|
537 | 538 | b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn) |
|
538 | 539 | ) |
|
539 | 540 | util.copyfile(copy_fn, working_fn) |
|
540 | 541 | |
|
541 | 542 | return 1 |
|
542 | 543 | |
|
543 | 544 | |
|
544 | 545 | def dodiff(ui, repo, cmdline, pats, opts, guitool=False): |
|
545 | 546 | """Do the actual diff: |
|
546 | 547 | |
|
547 | 548 | - copy to a temp structure if diffing 2 internal revisions |
|
548 | 549 | - copy to a temp structure if diffing working revision with |
|
549 | 550 | another one and more than 1 file is changed |
|
550 | 551 | - just invoke the diff for a single file in the working dir |
|
551 | 552 | """ |
|
552 | 553 | |
|
553 | 554 | cmdutil.check_at_most_one_arg(opts, b'rev', b'change') |
|
554 | 555 | revs = opts.get(b'rev') |
|
555 | 556 | from_rev = opts.get(b'from') |
|
556 | 557 | to_rev = opts.get(b'to') |
|
557 | 558 | change = opts.get(b'change') |
|
558 | 559 | do3way = b'$parent2' in cmdline |
|
559 | 560 | |
|
560 | 561 | if change: |
|
561 |
ctx2 = |
|
|
562 | ctx2 = logcmdutil.revsingle(repo, change, None) | |
|
562 | 563 | ctx1a, ctx1b = ctx2.p1(), ctx2.p2() |
|
563 | 564 | elif from_rev or to_rev: |
|
564 | 565 | repo = scmutil.unhidehashlikerevs( |
|
565 | 566 | repo, [from_rev] + [to_rev], b'nowarn' |
|
566 | 567 | ) |
|
567 |
ctx1a = |
|
|
568 | ctx1a = logcmdutil.revsingle(repo, from_rev, None) | |
|
568 | 569 | ctx1b = repo[nullrev] |
|
569 |
ctx2 = |
|
|
570 | ctx2 = logcmdutil.revsingle(repo, to_rev, None) | |
|
570 | 571 | else: |
|
571 |
ctx1a, ctx2 = |
|
|
572 | ctx1a, ctx2 = logcmdutil.revpair(repo, revs) | |
|
572 | 573 | if not revs: |
|
573 | 574 | ctx1b = repo[None].p2() |
|
574 | 575 | else: |
|
575 | 576 | ctx1b = repo[nullrev] |
|
576 | 577 | |
|
577 | 578 | # Disable 3-way merge if there is only one parent |
|
578 | 579 | if do3way: |
|
579 | 580 | if ctx1b.rev() == nullrev: |
|
580 | 581 | do3way = False |
|
581 | 582 | |
|
582 | 583 | matcher = scmutil.match(ctx2, pats, opts) |
|
583 | 584 | |
|
584 | 585 | if opts.get(b'patch'): |
|
585 | 586 | if opts.get(b'subrepos'): |
|
586 | 587 | raise error.Abort(_(b'--patch cannot be used with --subrepos')) |
|
587 | 588 | if opts.get(b'per_file'): |
|
588 | 589 | raise error.Abort(_(b'--patch cannot be used with --per-file')) |
|
589 | 590 | if ctx2.node() is None: |
|
590 | 591 | raise error.Abort(_(b'--patch requires two revisions')) |
|
591 | 592 | |
|
592 | 593 | tmproot = pycompat.mkdtemp(prefix=b'extdiff.') |
|
593 | 594 | try: |
|
594 | 595 | if opts.get(b'patch'): |
|
595 | 596 | return diffpatch( |
|
596 | 597 | ui, repo, ctx1a.node(), ctx2.node(), tmproot, matcher, cmdline |
|
597 | 598 | ) |
|
598 | 599 | |
|
599 | 600 | return diffrevs( |
|
600 | 601 | ui, |
|
601 | 602 | repo, |
|
602 | 603 | ctx1a, |
|
603 | 604 | ctx1b, |
|
604 | 605 | ctx2, |
|
605 | 606 | matcher, |
|
606 | 607 | tmproot, |
|
607 | 608 | cmdline, |
|
608 | 609 | do3way, |
|
609 | 610 | guitool, |
|
610 | 611 | opts, |
|
611 | 612 | ) |
|
612 | 613 | |
|
613 | 614 | finally: |
|
614 | 615 | ui.note(_(b'cleaning up temp directory\n')) |
|
615 | 616 | shutil.rmtree(tmproot) |
|
616 | 617 | |
|
617 | 618 | |
|
618 | 619 | extdiffopts = ( |
|
619 | 620 | [ |
|
620 | 621 | ( |
|
621 | 622 | b'o', |
|
622 | 623 | b'option', |
|
623 | 624 | [], |
|
624 | 625 | _(b'pass option to comparison program'), |
|
625 | 626 | _(b'OPT'), |
|
626 | 627 | ), |
|
627 | 628 | (b'r', b'rev', [], _(b'revision (DEPRECATED)'), _(b'REV')), |
|
628 | 629 | (b'', b'from', b'', _(b'revision to diff from'), _(b'REV1')), |
|
629 | 630 | (b'', b'to', b'', _(b'revision to diff to'), _(b'REV2')), |
|
630 | 631 | (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')), |
|
631 | 632 | ( |
|
632 | 633 | b'', |
|
633 | 634 | b'per-file', |
|
634 | 635 | False, |
|
635 | 636 | _(b'compare each file instead of revision snapshots'), |
|
636 | 637 | ), |
|
637 | 638 | ( |
|
638 | 639 | b'', |
|
639 | 640 | b'confirm', |
|
640 | 641 | False, |
|
641 | 642 | _(b'prompt user before each external program invocation'), |
|
642 | 643 | ), |
|
643 | 644 | (b'', b'patch', None, _(b'compare patches for two revisions')), |
|
644 | 645 | ] |
|
645 | 646 | + cmdutil.walkopts |
|
646 | 647 | + cmdutil.subrepoopts |
|
647 | 648 | ) |
|
648 | 649 | |
|
649 | 650 | |
|
650 | 651 | @command( |
|
651 | 652 | b'extdiff', |
|
652 | 653 | [ |
|
653 | 654 | (b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')), |
|
654 | 655 | ] |
|
655 | 656 | + extdiffopts, |
|
656 | 657 | _(b'hg extdiff [OPT]... [FILE]...'), |
|
657 | 658 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
658 | 659 | inferrepo=True, |
|
659 | 660 | ) |
|
660 | 661 | def extdiff(ui, repo, *pats, **opts): |
|
661 | 662 | """use external program to diff repository (or selected files) |
|
662 | 663 | |
|
663 | 664 | Show differences between revisions for the specified files, using |
|
664 | 665 | an external program. The default program used is diff, with |
|
665 | 666 | default options "-Npru". |
|
666 | 667 | |
|
667 | 668 | To select a different program, use the -p/--program option. The |
|
668 | 669 | program will be passed the names of two directories to compare, |
|
669 | 670 | unless the --per-file option is specified (see below). To pass |
|
670 | 671 | additional options to the program, use -o/--option. These will be |
|
671 | 672 | passed before the names of the directories or files to compare. |
|
672 | 673 | |
|
673 | 674 | The --from, --to, and --change options work the same way they do for |
|
674 | 675 | :hg:`diff`. |
|
675 | 676 | |
|
676 | 677 | The --per-file option runs the external program repeatedly on each |
|
677 | 678 | file to diff, instead of once on two directories. By default, |
|
678 | 679 | this happens one by one, where the next file diff is open in the |
|
679 | 680 | external program only once the previous external program (for the |
|
680 | 681 | previous file diff) has exited. If the external program has a |
|
681 | 682 | graphical interface, it can open all the file diffs at once instead |
|
682 | 683 | of one by one. See :hg:`help -e extdiff` for information about how |
|
683 | 684 | to tell Mercurial that a given program has a graphical interface. |
|
684 | 685 | |
|
685 | 686 | The --confirm option will prompt the user before each invocation of |
|
686 | 687 | the external program. It is ignored if --per-file isn't specified. |
|
687 | 688 | """ |
|
688 | 689 | opts = pycompat.byteskwargs(opts) |
|
689 | 690 | program = opts.get(b'program') |
|
690 | 691 | option = opts.get(b'option') |
|
691 | 692 | if not program: |
|
692 | 693 | program = b'diff' |
|
693 | 694 | option = option or [b'-Npru'] |
|
694 | 695 | cmdline = b' '.join(map(procutil.shellquote, [program] + option)) |
|
695 | 696 | return dodiff(ui, repo, cmdline, pats, opts) |
|
696 | 697 | |
|
697 | 698 | |
|
698 | 699 | class savedcmd(object): |
|
699 | 700 | """use external program to diff repository (or selected files) |
|
700 | 701 | |
|
701 | 702 | Show differences between revisions for the specified files, using |
|
702 | 703 | the following program:: |
|
703 | 704 | |
|
704 | 705 | %(path)s |
|
705 | 706 | |
|
706 | 707 | When two revision arguments are given, then changes are shown |
|
707 | 708 | between those revisions. If only one revision is specified then |
|
708 | 709 | that revision is compared to the working directory, and, when no |
|
709 | 710 | revisions are specified, the working directory files are compared |
|
710 | 711 | to its parent. |
|
711 | 712 | """ |
|
712 | 713 | |
|
713 | 714 | def __init__(self, path, cmdline, isgui): |
|
714 | 715 | # We can't pass non-ASCII through docstrings (and path is |
|
715 | 716 | # in an unknown encoding anyway), but avoid double separators on |
|
716 | 717 | # Windows |
|
717 | 718 | docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\') |
|
718 | 719 | self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))} |
|
719 | 720 | self._cmdline = cmdline |
|
720 | 721 | self._isgui = isgui |
|
721 | 722 | |
|
722 | 723 | def __call__(self, ui, repo, *pats, **opts): |
|
723 | 724 | opts = pycompat.byteskwargs(opts) |
|
724 | 725 | options = b' '.join(map(procutil.shellquote, opts[b'option'])) |
|
725 | 726 | if options: |
|
726 | 727 | options = b' ' + options |
|
727 | 728 | return dodiff( |
|
728 | 729 | ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui |
|
729 | 730 | ) |
|
730 | 731 | |
|
731 | 732 | |
|
732 | 733 | def _gettooldetails(ui, cmd, path): |
|
733 | 734 | """ |
|
734 | 735 | returns following things for a |
|
735 | 736 | ``` |
|
736 | 737 | [extdiff] |
|
737 | 738 | <cmd> = <path> |
|
738 | 739 | ``` |
|
739 | 740 | entry: |
|
740 | 741 | |
|
741 | 742 | cmd: command/tool name |
|
742 | 743 | path: path to the tool |
|
743 | 744 | cmdline: the command which should be run |
|
744 | 745 | isgui: whether the tool uses GUI or not |
|
745 | 746 | |
|
746 | 747 | Reads all external tools related configs, whether it be extdiff section, |
|
747 | 748 | diff-tools or merge-tools section, or its specified in an old format or |
|
748 | 749 | the latest format. |
|
749 | 750 | """ |
|
750 | 751 | path = util.expandpath(path) |
|
751 | 752 | if cmd.startswith(b'cmd.'): |
|
752 | 753 | cmd = cmd[4:] |
|
753 | 754 | if not path: |
|
754 | 755 | path = procutil.findexe(cmd) |
|
755 | 756 | if path is None: |
|
756 | 757 | path = filemerge.findexternaltool(ui, cmd) or cmd |
|
757 | 758 | diffopts = ui.config(b'extdiff', b'opts.' + cmd) |
|
758 | 759 | cmdline = procutil.shellquote(path) |
|
759 | 760 | if diffopts: |
|
760 | 761 | cmdline += b' ' + diffopts |
|
761 | 762 | isgui = ui.configbool(b'extdiff', b'gui.' + cmd) |
|
762 | 763 | else: |
|
763 | 764 | if path: |
|
764 | 765 | # case "cmd = path opts" |
|
765 | 766 | cmdline = path |
|
766 | 767 | diffopts = len(pycompat.shlexsplit(cmdline)) > 1 |
|
767 | 768 | else: |
|
768 | 769 | # case "cmd =" |
|
769 | 770 | path = procutil.findexe(cmd) |
|
770 | 771 | if path is None: |
|
771 | 772 | path = filemerge.findexternaltool(ui, cmd) or cmd |
|
772 | 773 | cmdline = procutil.shellquote(path) |
|
773 | 774 | diffopts = False |
|
774 | 775 | isgui = ui.configbool(b'extdiff', b'gui.' + cmd) |
|
775 | 776 | # look for diff arguments in [diff-tools] then [merge-tools] |
|
776 | 777 | if not diffopts: |
|
777 | 778 | key = cmd + b'.diffargs' |
|
778 | 779 | for section in (b'diff-tools', b'merge-tools'): |
|
779 | 780 | args = ui.config(section, key) |
|
780 | 781 | if args: |
|
781 | 782 | cmdline += b' ' + args |
|
782 | 783 | if isgui is None: |
|
783 | 784 | isgui = ui.configbool(section, cmd + b'.gui') or False |
|
784 | 785 | break |
|
785 | 786 | return cmd, path, cmdline, isgui |
|
786 | 787 | |
|
787 | 788 | |
|
788 | 789 | def uisetup(ui): |
|
789 | 790 | for cmd, path in ui.configitems(b'extdiff'): |
|
790 | 791 | if cmd.startswith(b'opts.') or cmd.startswith(b'gui.'): |
|
791 | 792 | continue |
|
792 | 793 | cmd, path, cmdline, isgui = _gettooldetails(ui, cmd, path) |
|
793 | 794 | command( |
|
794 | 795 | cmd, |
|
795 | 796 | extdiffopts[:], |
|
796 | 797 | _(b'hg %s [OPTION]... [FILE]...') % cmd, |
|
797 | 798 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
798 | 799 | inferrepo=True, |
|
799 | 800 | )(savedcmd(path, cmdline, isgui)) |
|
800 | 801 | |
|
801 | 802 | |
|
802 | 803 | # tell hggettext to extract docstrings from these functions: |
|
803 | 804 | i18nfunctions = [savedcmd] |
@@ -1,357 +1,358 b'' | |||
|
1 | 1 | # Copyright 2016-present Facebook. All Rights Reserved. |
|
2 | 2 | # |
|
3 | 3 | # commands: fastannotate commands |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | commands, |
|
15 | 15 | encoding, |
|
16 | 16 | error, |
|
17 | 17 | extensions, |
|
18 | logcmdutil, | |
|
18 | 19 | patch, |
|
19 | 20 | pycompat, |
|
20 | 21 | registrar, |
|
21 | 22 | scmutil, |
|
22 | 23 | util, |
|
23 | 24 | ) |
|
24 | 25 | |
|
25 | 26 | from . import ( |
|
26 | 27 | context as facontext, |
|
27 | 28 | error as faerror, |
|
28 | 29 | formatter as faformatter, |
|
29 | 30 | ) |
|
30 | 31 | |
|
31 | 32 | cmdtable = {} |
|
32 | 33 | command = registrar.command(cmdtable) |
|
33 | 34 | |
|
34 | 35 | |
|
35 | 36 | def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts): |
|
36 | 37 | """generate paths matching given patterns""" |
|
37 | 38 | perfhack = repo.ui.configbool(b'fastannotate', b'perfhack') |
|
38 | 39 | |
|
39 | 40 | # disable perfhack if: |
|
40 | 41 | # a) any walkopt is used |
|
41 | 42 | # b) if we treat pats as plain file names, some of them do not have |
|
42 | 43 | # corresponding linelog files |
|
43 | 44 | if perfhack: |
|
44 | 45 | # cwd related to reporoot |
|
45 | 46 | reporoot = os.path.dirname(repo.path) |
|
46 | 47 | reldir = os.path.relpath(encoding.getcwd(), reporoot) |
|
47 | 48 | if reldir == b'.': |
|
48 | 49 | reldir = b'' |
|
49 | 50 | if any(opts.get(o[1]) for o in commands.walkopts): # a) |
|
50 | 51 | perfhack = False |
|
51 | 52 | else: # b) |
|
52 | 53 | relpats = [ |
|
53 | 54 | os.path.relpath(p, reporoot) if os.path.isabs(p) else p |
|
54 | 55 | for p in pats |
|
55 | 56 | ] |
|
56 | 57 | # disable perfhack on '..' since it allows escaping from the repo |
|
57 | 58 | if any( |
|
58 | 59 | ( |
|
59 | 60 | b'..' in f |
|
60 | 61 | or not os.path.isfile( |
|
61 | 62 | facontext.pathhelper(repo, f, aopts).linelogpath |
|
62 | 63 | ) |
|
63 | 64 | ) |
|
64 | 65 | for f in relpats |
|
65 | 66 | ): |
|
66 | 67 | perfhack = False |
|
67 | 68 | |
|
68 | 69 | # perfhack: emit paths directory without checking with manifest |
|
69 | 70 | # this can be incorrect if the rev dos not have file. |
|
70 | 71 | if perfhack: |
|
71 | 72 | for p in relpats: |
|
72 | 73 | yield os.path.join(reldir, p) |
|
73 | 74 | else: |
|
74 | 75 | |
|
75 | 76 | def bad(x, y): |
|
76 | 77 | raise error.Abort(b"%s: %s" % (x, y)) |
|
77 | 78 | |
|
78 |
ctx = |
|
|
79 | ctx = logcmdutil.revsingle(repo, rev) | |
|
79 | 80 | m = scmutil.match(ctx, pats, opts, badfn=bad) |
|
80 | 81 | for p in ctx.walk(m): |
|
81 | 82 | yield p |
|
82 | 83 | |
|
83 | 84 | |
|
84 | 85 | fastannotatecommandargs = { |
|
85 | 86 | 'options': [ |
|
86 | 87 | (b'r', b'rev', b'.', _(b'annotate the specified revision'), _(b'REV')), |
|
87 | 88 | (b'u', b'user', None, _(b'list the author (long with -v)')), |
|
88 | 89 | (b'f', b'file', None, _(b'list the filename')), |
|
89 | 90 | (b'd', b'date', None, _(b'list the date (short with -q)')), |
|
90 | 91 | (b'n', b'number', None, _(b'list the revision number (default)')), |
|
91 | 92 | (b'c', b'changeset', None, _(b'list the changeset')), |
|
92 | 93 | ( |
|
93 | 94 | b'l', |
|
94 | 95 | b'line-number', |
|
95 | 96 | None, |
|
96 | 97 | _(b'show line number at the first appearance'), |
|
97 | 98 | ), |
|
98 | 99 | ( |
|
99 | 100 | b'e', |
|
100 | 101 | b'deleted', |
|
101 | 102 | None, |
|
102 | 103 | _(b'show deleted lines (slow) (EXPERIMENTAL)'), |
|
103 | 104 | ), |
|
104 | 105 | ( |
|
105 | 106 | b'', |
|
106 | 107 | b'no-content', |
|
107 | 108 | None, |
|
108 | 109 | _(b'do not show file content (EXPERIMENTAL)'), |
|
109 | 110 | ), |
|
110 | 111 | (b'', b'no-follow', None, _(b"don't follow copies and renames")), |
|
111 | 112 | ( |
|
112 | 113 | b'', |
|
113 | 114 | b'linear', |
|
114 | 115 | None, |
|
115 | 116 | _( |
|
116 | 117 | b'enforce linear history, ignore second parent ' |
|
117 | 118 | b'of merges (EXPERIMENTAL)' |
|
118 | 119 | ), |
|
119 | 120 | ), |
|
120 | 121 | ( |
|
121 | 122 | b'', |
|
122 | 123 | b'long-hash', |
|
123 | 124 | None, |
|
124 | 125 | _(b'show long changeset hash (EXPERIMENTAL)'), |
|
125 | 126 | ), |
|
126 | 127 | ( |
|
127 | 128 | b'', |
|
128 | 129 | b'rebuild', |
|
129 | 130 | None, |
|
130 | 131 | _(b'rebuild cache even if it exists (EXPERIMENTAL)'), |
|
131 | 132 | ), |
|
132 | 133 | ] |
|
133 | 134 | + commands.diffwsopts |
|
134 | 135 | + commands.walkopts |
|
135 | 136 | + commands.formatteropts, |
|
136 | 137 | 'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'), |
|
137 | 138 | 'inferrepo': True, |
|
138 | 139 | } |
|
139 | 140 | |
|
140 | 141 | |
|
141 | 142 | def fastannotate(ui, repo, *pats, **opts): |
|
142 | 143 | """show changeset information by line for each file |
|
143 | 144 | |
|
144 | 145 | List changes in files, showing the revision id responsible for each line. |
|
145 | 146 | |
|
146 | 147 | This command is useful for discovering when a change was made and by whom. |
|
147 | 148 | |
|
148 | 149 | By default this command prints revision numbers. If you include --file, |
|
149 | 150 | --user, or --date, the revision number is suppressed unless you also |
|
150 | 151 | include --number. The default format can also be customized by setting |
|
151 | 152 | fastannotate.defaultformat. |
|
152 | 153 | |
|
153 | 154 | Returns 0 on success. |
|
154 | 155 | |
|
155 | 156 | .. container:: verbose |
|
156 | 157 | |
|
157 | 158 | This command uses an implementation different from the vanilla annotate |
|
158 | 159 | command, which may produce slightly different (while still reasonable) |
|
159 | 160 | outputs for some cases. |
|
160 | 161 | |
|
161 | 162 | Unlike the vanilla anootate, fastannotate follows rename regardless of |
|
162 | 163 | the existence of --file. |
|
163 | 164 | |
|
164 | 165 | For the best performance when running on a full repo, use -c, -l, |
|
165 | 166 | avoid -u, -d, -n. Use --linear and --no-content to make it even faster. |
|
166 | 167 | |
|
167 | 168 | For the best performance when running on a shallow (remotefilelog) |
|
168 | 169 | repo, avoid --linear, --no-follow, or any diff options. As the server |
|
169 | 170 | won't be able to populate annotate cache when non-default options |
|
170 | 171 | affecting results are used. |
|
171 | 172 | """ |
|
172 | 173 | if not pats: |
|
173 | 174 | raise error.Abort(_(b'at least one filename or pattern is required')) |
|
174 | 175 | |
|
175 | 176 | # performance hack: filtered repo can be slow. unfilter by default. |
|
176 | 177 | if ui.configbool(b'fastannotate', b'unfilteredrepo'): |
|
177 | 178 | repo = repo.unfiltered() |
|
178 | 179 | |
|
179 | 180 | opts = pycompat.byteskwargs(opts) |
|
180 | 181 | |
|
181 | 182 | rev = opts.get(b'rev', b'.') |
|
182 | 183 | rebuild = opts.get(b'rebuild', False) |
|
183 | 184 | |
|
184 | 185 | diffopts = patch.difffeatureopts( |
|
185 | 186 | ui, opts, section=b'annotate', whitespace=True |
|
186 | 187 | ) |
|
187 | 188 | aopts = facontext.annotateopts( |
|
188 | 189 | diffopts=diffopts, |
|
189 | 190 | followmerge=not opts.get(b'linear', False), |
|
190 | 191 | followrename=not opts.get(b'no_follow', False), |
|
191 | 192 | ) |
|
192 | 193 | |
|
193 | 194 | if not any( |
|
194 | 195 | opts.get(s) |
|
195 | 196 | for s in [b'user', b'date', b'file', b'number', b'changeset'] |
|
196 | 197 | ): |
|
197 | 198 | # default 'number' for compatibility. but fastannotate is more |
|
198 | 199 | # efficient with "changeset", "line-number" and "no-content". |
|
199 | 200 | for name in ui.configlist( |
|
200 | 201 | b'fastannotate', b'defaultformat', [b'number'] |
|
201 | 202 | ): |
|
202 | 203 | opts[name] = True |
|
203 | 204 | |
|
204 | 205 | ui.pager(b'fastannotate') |
|
205 | 206 | template = opts.get(b'template') |
|
206 | 207 | if template == b'json': |
|
207 | 208 | formatter = faformatter.jsonformatter(ui, repo, opts) |
|
208 | 209 | else: |
|
209 | 210 | formatter = faformatter.defaultformatter(ui, repo, opts) |
|
210 | 211 | showdeleted = opts.get(b'deleted', False) |
|
211 | 212 | showlines = not bool(opts.get(b'no_content')) |
|
212 | 213 | showpath = opts.get(b'file', False) |
|
213 | 214 | |
|
214 | 215 | # find the head of the main (master) branch |
|
215 | 216 | master = ui.config(b'fastannotate', b'mainbranch') or rev |
|
216 | 217 | |
|
217 | 218 | # paths will be used for prefetching and the real annotating |
|
218 | 219 | paths = list(_matchpaths(repo, rev, pats, opts, aopts)) |
|
219 | 220 | |
|
220 | 221 | # for client, prefetch from the server |
|
221 | 222 | if util.safehasattr(repo, 'prefetchfastannotate'): |
|
222 | 223 | repo.prefetchfastannotate(paths) |
|
223 | 224 | |
|
224 | 225 | for path in paths: |
|
225 | 226 | result = lines = existinglines = None |
|
226 | 227 | while True: |
|
227 | 228 | try: |
|
228 | 229 | with facontext.annotatecontext(repo, path, aopts, rebuild) as a: |
|
229 | 230 | result = a.annotate( |
|
230 | 231 | rev, |
|
231 | 232 | master=master, |
|
232 | 233 | showpath=showpath, |
|
233 | 234 | showlines=(showlines and not showdeleted), |
|
234 | 235 | ) |
|
235 | 236 | if showdeleted: |
|
236 | 237 | existinglines = {(l[0], l[1]) for l in result} |
|
237 | 238 | result = a.annotatealllines( |
|
238 | 239 | rev, showpath=showpath, showlines=showlines |
|
239 | 240 | ) |
|
240 | 241 | break |
|
241 | 242 | except (faerror.CannotReuseError, faerror.CorruptedFileError): |
|
242 | 243 | # happens if master moves backwards, or the file was deleted |
|
243 | 244 | # and readded, or renamed to an existing name, or corrupted. |
|
244 | 245 | if rebuild: # give up since we have tried rebuild already |
|
245 | 246 | raise |
|
246 | 247 | else: # try a second time rebuilding the cache (slow) |
|
247 | 248 | rebuild = True |
|
248 | 249 | continue |
|
249 | 250 | |
|
250 | 251 | if showlines: |
|
251 | 252 | result, lines = result |
|
252 | 253 | |
|
253 | 254 | formatter.write(result, lines, existinglines=existinglines) |
|
254 | 255 | formatter.end() |
|
255 | 256 | |
|
256 | 257 | |
|
257 | 258 | _newopts = set() |
|
258 | 259 | _knownopts = { |
|
259 | 260 | opt[1].replace(b'-', b'_') |
|
260 | 261 | for opt in (fastannotatecommandargs['options'] + commands.globalopts) |
|
261 | 262 | } |
|
262 | 263 | |
|
263 | 264 | |
|
264 | 265 | def _annotatewrapper(orig, ui, repo, *pats, **opts): |
|
265 | 266 | """used by wrapdefault""" |
|
266 | 267 | # we need this hack until the obsstore has 0.0 seconds perf impact |
|
267 | 268 | if ui.configbool(b'fastannotate', b'unfilteredrepo'): |
|
268 | 269 | repo = repo.unfiltered() |
|
269 | 270 | |
|
270 | 271 | # treat the file as text (skip the isbinary check) |
|
271 | 272 | if ui.configbool(b'fastannotate', b'forcetext'): |
|
272 | 273 | opts['text'] = True |
|
273 | 274 | |
|
274 | 275 | # check if we need to do prefetch (client-side) |
|
275 | 276 | rev = opts.get('rev') |
|
276 | 277 | if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None: |
|
277 | 278 | paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts))) |
|
278 | 279 | repo.prefetchfastannotate(paths) |
|
279 | 280 | |
|
280 | 281 | return orig(ui, repo, *pats, **opts) |
|
281 | 282 | |
|
282 | 283 | |
|
283 | 284 | def registercommand(): |
|
284 | 285 | """register the fastannotate command""" |
|
285 | 286 | name = b'fastannotate|fastblame|fa' |
|
286 | 287 | command(name, helpbasic=True, **fastannotatecommandargs)(fastannotate) |
|
287 | 288 | |
|
288 | 289 | |
|
289 | 290 | def wrapdefault(): |
|
290 | 291 | """wrap the default annotate command, to be aware of the protocol""" |
|
291 | 292 | extensions.wrapcommand(commands.table, b'annotate', _annotatewrapper) |
|
292 | 293 | |
|
293 | 294 | |
|
294 | 295 | @command( |
|
295 | 296 | b'debugbuildannotatecache', |
|
296 | 297 | [(b'r', b'rev', b'', _(b'build up to the specific revision'), _(b'REV'))] |
|
297 | 298 | + commands.walkopts, |
|
298 | 299 | _(b'[-r REV] FILE...'), |
|
299 | 300 | ) |
|
300 | 301 | def debugbuildannotatecache(ui, repo, *pats, **opts): |
|
301 | 302 | """incrementally build fastannotate cache up to REV for specified files |
|
302 | 303 | |
|
303 | 304 | If REV is not specified, use the config 'fastannotate.mainbranch'. |
|
304 | 305 | |
|
305 | 306 | If fastannotate.client is True, download the annotate cache from the |
|
306 | 307 | server. Otherwise, build the annotate cache locally. |
|
307 | 308 | |
|
308 | 309 | The annotate cache will be built using the default diff and follow |
|
309 | 310 | options and lives in '.hg/fastannotate/default'. |
|
310 | 311 | """ |
|
311 | 312 | opts = pycompat.byteskwargs(opts) |
|
312 | 313 | rev = opts.get(b'REV') or ui.config(b'fastannotate', b'mainbranch') |
|
313 | 314 | if not rev: |
|
314 | 315 | raise error.Abort( |
|
315 | 316 | _(b'you need to provide a revision'), |
|
316 | 317 | hint=_(b'set fastannotate.mainbranch or use --rev'), |
|
317 | 318 | ) |
|
318 | 319 | if ui.configbool(b'fastannotate', b'unfilteredrepo'): |
|
319 | 320 | repo = repo.unfiltered() |
|
320 |
ctx = |
|
|
321 | ctx = logcmdutil.revsingle(repo, rev) | |
|
321 | 322 | m = scmutil.match(ctx, pats, opts) |
|
322 | 323 | paths = list(ctx.walk(m)) |
|
323 | 324 | if util.safehasattr(repo, 'prefetchfastannotate'): |
|
324 | 325 | # client |
|
325 | 326 | if opts.get(b'REV'): |
|
326 | 327 | raise error.Abort(_(b'--rev cannot be used for client')) |
|
327 | 328 | repo.prefetchfastannotate(paths) |
|
328 | 329 | else: |
|
329 | 330 | # server, or full repo |
|
330 | 331 | progress = ui.makeprogress(_(b'building'), total=len(paths)) |
|
331 | 332 | for i, path in enumerate(paths): |
|
332 | 333 | progress.update(i) |
|
333 | 334 | with facontext.annotatecontext(repo, path) as actx: |
|
334 | 335 | try: |
|
335 | 336 | if actx.isuptodate(rev): |
|
336 | 337 | continue |
|
337 | 338 | actx.annotate(rev, rev) |
|
338 | 339 | except (faerror.CannotReuseError, faerror.CorruptedFileError): |
|
339 | 340 | # the cache is broken (could happen with renaming so the |
|
340 | 341 | # file history gets invalidated). rebuild and try again. |
|
341 | 342 | ui.debug( |
|
342 | 343 | b'fastannotate: %s: rebuilding broken cache\n' % path |
|
343 | 344 | ) |
|
344 | 345 | actx.rebuild() |
|
345 | 346 | try: |
|
346 | 347 | actx.annotate(rev, rev) |
|
347 | 348 | except Exception as ex: |
|
348 | 349 | # possibly a bug, but should not stop us from building |
|
349 | 350 | # cache for other files. |
|
350 | 351 | ui.warn( |
|
351 | 352 | _( |
|
352 | 353 | b'fastannotate: %s: failed to ' |
|
353 | 354 | b'build cache: %r\n' |
|
354 | 355 | ) |
|
355 | 356 | % (path, ex) |
|
356 | 357 | ) |
|
357 | 358 | progress.complete() |
@@ -1,263 +1,261 b'' | |||
|
1 | 1 | # Copyright 2016-present Facebook. All Rights Reserved. |
|
2 | 2 | # |
|
3 | 3 | # protocol: logic for a server providing fastannotate support |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | import contextlib |
|
10 | 10 | import os |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | from mercurial.pycompat import open |
|
14 | 14 | from mercurial import ( |
|
15 | 15 | error, |
|
16 | 16 | extensions, |
|
17 | 17 | hg, |
|
18 | 18 | pycompat, |
|
19 | 19 | util, |
|
20 | 20 | wireprotov1peer, |
|
21 | 21 | wireprotov1server, |
|
22 | 22 | ) |
|
23 | 23 | from mercurial.utils import ( |
|
24 | 24 | urlutil, |
|
25 | 25 | ) |
|
26 | 26 | from . import context |
|
27 | 27 | |
|
28 | 28 | # common |
|
29 | 29 | |
|
30 | 30 | |
|
31 | 31 | def _getmaster(ui): |
|
32 | 32 | """get the mainbranch, and enforce it is set""" |
|
33 | 33 | master = ui.config(b'fastannotate', b'mainbranch') |
|
34 | 34 | if not master: |
|
35 | 35 | raise error.Abort( |
|
36 | 36 | _( |
|
37 | 37 | b'fastannotate.mainbranch is required ' |
|
38 | 38 | b'for both the client and the server' |
|
39 | 39 | ) |
|
40 | 40 | ) |
|
41 | 41 | return master |
|
42 | 42 | |
|
43 | 43 | |
|
44 | 44 | # server-side |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | def _capabilities(orig, repo, proto): |
|
48 | 48 | result = orig(repo, proto) |
|
49 | 49 | result.append(b'getannotate') |
|
50 | 50 | return result |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | def _getannotate(repo, proto, path, lastnode): |
|
54 | 54 | # output: |
|
55 | 55 | # FILE := vfspath + '\0' + str(size) + '\0' + content |
|
56 | 56 | # OUTPUT := '' | FILE + OUTPUT |
|
57 | 57 | result = b'' |
|
58 | 58 | buildondemand = repo.ui.configbool( |
|
59 | 59 | b'fastannotate', b'serverbuildondemand', True |
|
60 | 60 | ) |
|
61 | 61 | with context.annotatecontext(repo, path) as actx: |
|
62 | 62 | if buildondemand: |
|
63 | 63 | # update before responding to the client |
|
64 | 64 | master = _getmaster(repo.ui) |
|
65 | 65 | try: |
|
66 | 66 | if not actx.isuptodate(master): |
|
67 | 67 | actx.annotate(master, master) |
|
68 | 68 | except Exception: |
|
69 | 69 | # non-fast-forward move or corrupted. rebuild automically. |
|
70 | 70 | actx.rebuild() |
|
71 | 71 | try: |
|
72 | 72 | actx.annotate(master, master) |
|
73 | 73 | except Exception: |
|
74 | 74 | actx.rebuild() # delete files |
|
75 | 75 | finally: |
|
76 | 76 | # although the "with" context will also do a close/flush, we |
|
77 | 77 | # need to do it early so we can send the correct respond to |
|
78 | 78 | # client. |
|
79 | 79 | actx.close() |
|
80 | 80 | # send back the full content of revmap and linelog, in the future we |
|
81 | 81 | # may want to do some rsync-like fancy updating. |
|
82 | 82 | # the lastnode check is not necessary if the client and the server |
|
83 | 83 | # agree where the main branch is. |
|
84 | 84 | if actx.lastnode != lastnode: |
|
85 | 85 | for p in [actx.revmappath, actx.linelogpath]: |
|
86 | 86 | if not os.path.exists(p): |
|
87 | 87 | continue |
|
88 | 88 | with open(p, b'rb') as f: |
|
89 | 89 | content = f.read() |
|
90 | 90 | vfsbaselen = len(repo.vfs.base + b'/') |
|
91 | 91 | relpath = p[vfsbaselen:] |
|
92 | 92 | result += b'%s\0%d\0%s' % (relpath, len(content), content) |
|
93 | 93 | return result |
|
94 | 94 | |
|
95 | 95 | |
|
96 | 96 | def _registerwireprotocommand(): |
|
97 | 97 | if b'getannotate' in wireprotov1server.commands: |
|
98 | 98 | return |
|
99 | 99 | wireprotov1server.wireprotocommand(b'getannotate', b'path lastnode')( |
|
100 | 100 | _getannotate |
|
101 | 101 | ) |
|
102 | 102 | |
|
103 | 103 | |
|
104 | 104 | def serveruisetup(ui): |
|
105 | 105 | _registerwireprotocommand() |
|
106 | 106 | extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities) |
|
107 | 107 | |
|
108 | 108 | |
|
109 | 109 | # client-side |
|
110 | 110 | |
|
111 | 111 | |
|
112 | 112 | def _parseresponse(payload): |
|
113 | 113 | result = {} |
|
114 | 114 | i = 0 |
|
115 | 115 | l = len(payload) - 1 |
|
116 | 116 | state = 0 # 0: vfspath, 1: size |
|
117 | 117 | vfspath = size = b'' |
|
118 | 118 | while i < l: |
|
119 | 119 | ch = payload[i : i + 1] |
|
120 | 120 | if ch == b'\0': |
|
121 | 121 | if state == 1: |
|
122 | 122 | result[vfspath] = payload[i + 1 : i + 1 + int(size)] |
|
123 | 123 | i += int(size) |
|
124 | 124 | state = 0 |
|
125 | 125 | vfspath = size = b'' |
|
126 | 126 | elif state == 0: |
|
127 | 127 | state = 1 |
|
128 | 128 | else: |
|
129 | 129 | if state == 1: |
|
130 | 130 | size += ch |
|
131 | 131 | elif state == 0: |
|
132 | 132 | vfspath += ch |
|
133 | 133 | i += 1 |
|
134 | 134 | return result |
|
135 | 135 | |
|
136 | 136 | |
|
137 | 137 | def peersetup(ui, peer): |
|
138 | 138 | class fastannotatepeer(peer.__class__): |
|
139 | 139 | @wireprotov1peer.batchable |
|
140 | 140 | def getannotate(self, path, lastnode=None): |
|
141 | 141 | if not self.capable(b'getannotate'): |
|
142 | 142 | ui.warn(_(b'remote peer cannot provide annotate cache\n')) |
|
143 |
|
|
|
143 | return None, None | |
|
144 | 144 | else: |
|
145 | 145 | args = {b'path': path, b'lastnode': lastnode or b''} |
|
146 | f = wireprotov1peer.future() | |
|
147 | yield args, f | |
|
148 | yield _parseresponse(f.value) | |
|
146 | return args, _parseresponse | |
|
149 | 147 | |
|
150 | 148 | peer.__class__ = fastannotatepeer |
|
151 | 149 | |
|
152 | 150 | |
|
153 | 151 | @contextlib.contextmanager |
|
154 | 152 | def annotatepeer(repo): |
|
155 | 153 | ui = repo.ui |
|
156 | 154 | |
|
157 | 155 | remotedest = ui.config(b'fastannotate', b'remotepath', b'default') |
|
158 | 156 | r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest) |
|
159 | 157 | remotepath = r[0] |
|
160 | 158 | peer = hg.peer(ui, {}, remotepath) |
|
161 | 159 | |
|
162 | 160 | try: |
|
163 | 161 | yield peer |
|
164 | 162 | finally: |
|
165 | 163 | peer.close() |
|
166 | 164 | |
|
167 | 165 | |
|
168 | 166 | def clientfetch(repo, paths, lastnodemap=None, peer=None): |
|
169 | 167 | """download annotate cache from the server for paths""" |
|
170 | 168 | if not paths: |
|
171 | 169 | return |
|
172 | 170 | |
|
173 | 171 | if peer is None: |
|
174 | 172 | with annotatepeer(repo) as peer: |
|
175 | 173 | return clientfetch(repo, paths, lastnodemap, peer) |
|
176 | 174 | |
|
177 | 175 | if lastnodemap is None: |
|
178 | 176 | lastnodemap = {} |
|
179 | 177 | |
|
180 | 178 | ui = repo.ui |
|
181 | 179 | results = [] |
|
182 | 180 | with peer.commandexecutor() as batcher: |
|
183 | 181 | ui.debug(b'fastannotate: requesting %d files\n' % len(paths)) |
|
184 | 182 | for p in paths: |
|
185 | 183 | results.append( |
|
186 | 184 | batcher.callcommand( |
|
187 | 185 | b'getannotate', |
|
188 | 186 | {b'path': p, b'lastnode': lastnodemap.get(p)}, |
|
189 | 187 | ) |
|
190 | 188 | ) |
|
191 | 189 | |
|
192 | 190 | for result in results: |
|
193 | 191 | r = result.result() |
|
194 | 192 | # TODO: pconvert these paths on the server? |
|
195 | 193 | r = {util.pconvert(p): v for p, v in pycompat.iteritems(r)} |
|
196 | 194 | for path in sorted(r): |
|
197 | 195 | # ignore malicious paths |
|
198 | 196 | if not path.startswith(b'fastannotate/') or b'/../' in ( |
|
199 | 197 | path + b'/' |
|
200 | 198 | ): |
|
201 | 199 | ui.debug( |
|
202 | 200 | b'fastannotate: ignored malicious path %s\n' % path |
|
203 | 201 | ) |
|
204 | 202 | continue |
|
205 | 203 | content = r[path] |
|
206 | 204 | if ui.debugflag: |
|
207 | 205 | ui.debug( |
|
208 | 206 | b'fastannotate: writing %d bytes to %s\n' |
|
209 | 207 | % (len(content), path) |
|
210 | 208 | ) |
|
211 | 209 | repo.vfs.makedirs(os.path.dirname(path)) |
|
212 | 210 | with repo.vfs(path, b'wb') as f: |
|
213 | 211 | f.write(content) |
|
214 | 212 | |
|
215 | 213 | |
|
216 | 214 | def _filterfetchpaths(repo, paths): |
|
217 | 215 | """return a subset of paths whose history is long and need to fetch linelog |
|
218 | 216 | from the server. works with remotefilelog and non-remotefilelog repos. |
|
219 | 217 | """ |
|
220 | 218 | threshold = repo.ui.configint(b'fastannotate', b'clientfetchthreshold', 10) |
|
221 | 219 | if threshold <= 0: |
|
222 | 220 | return paths |
|
223 | 221 | |
|
224 | 222 | result = [] |
|
225 | 223 | for path in paths: |
|
226 | 224 | try: |
|
227 | 225 | if len(repo.file(path)) >= threshold: |
|
228 | 226 | result.append(path) |
|
229 | 227 | except Exception: # file not found etc. |
|
230 | 228 | result.append(path) |
|
231 | 229 | |
|
232 | 230 | return result |
|
233 | 231 | |
|
234 | 232 | |
|
235 | 233 | def localreposetup(ui, repo): |
|
236 | 234 | class fastannotaterepo(repo.__class__): |
|
237 | 235 | def prefetchfastannotate(self, paths, peer=None): |
|
238 | 236 | master = _getmaster(self.ui) |
|
239 | 237 | needupdatepaths = [] |
|
240 | 238 | lastnodemap = {} |
|
241 | 239 | try: |
|
242 | 240 | for path in _filterfetchpaths(self, paths): |
|
243 | 241 | with context.annotatecontext(self, path) as actx: |
|
244 | 242 | if not actx.isuptodate(master, strict=False): |
|
245 | 243 | needupdatepaths.append(path) |
|
246 | 244 | lastnodemap[path] = actx.lastnode |
|
247 | 245 | if needupdatepaths: |
|
248 | 246 | clientfetch(self, needupdatepaths, lastnodemap, peer) |
|
249 | 247 | except Exception as ex: |
|
250 | 248 | # could be directory not writable or so, not fatal |
|
251 | 249 | self.ui.debug(b'fastannotate: prefetch failed: %r\n' % ex) |
|
252 | 250 | |
|
253 | 251 | repo.__class__ = fastannotaterepo |
|
254 | 252 | |
|
255 | 253 | |
|
256 | 254 | def clientreposetup(ui, repo): |
|
257 | 255 | _registerwireprotocommand() |
|
258 | 256 | if repo.local(): |
|
259 | 257 | localreposetup(ui, repo) |
|
260 | 258 | # TODO: this mutates global state, but only if at least one repo |
|
261 | 259 | # has the extension enabled. This is probably bad for hgweb. |
|
262 | 260 | if peersetup not in hg.wirepeersetupfuncs: |
|
263 | 261 | hg.wirepeersetupfuncs.append(peersetup) |
@@ -1,219 +1,220 b'' | |||
|
1 | 1 | # Copyright 2020 Joerg Sonnenberger <joerg@bec.de> |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | """export repositories as git fast-import stream""" |
|
6 | 6 | |
|
7 | 7 | # The format specification for fast-import streams can be found at |
|
8 | 8 | # https://git-scm.com/docs/git-fast-import#_input_format |
|
9 | 9 | |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | import re |
|
12 | 12 | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | 14 | from mercurial.node import hex, nullrev |
|
15 | 15 | from mercurial.utils import stringutil |
|
16 | 16 | from mercurial import ( |
|
17 | 17 | error, |
|
18 | logcmdutil, | |
|
18 | 19 | pycompat, |
|
19 | 20 | registrar, |
|
20 | 21 | scmutil, |
|
21 | 22 | ) |
|
22 | 23 | from .convert import convcmd |
|
23 | 24 | |
|
24 | 25 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
25 | 26 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
26 | 27 | # be specifying the version(s) of Mercurial they are tested with, or |
|
27 | 28 | # leave the attribute unspecified. |
|
28 | 29 | testedwith = b"ships-with-hg-core" |
|
29 | 30 | |
|
30 | 31 | cmdtable = {} |
|
31 | 32 | command = registrar.command(cmdtable) |
|
32 | 33 | |
|
33 | 34 | GIT_PERSON_PROHIBITED = re.compile(b'[<>\n"]') |
|
34 | 35 | GIT_EMAIL_PROHIBITED = re.compile(b"[<> \n]") |
|
35 | 36 | |
|
36 | 37 | |
|
37 | 38 | def convert_to_git_user(authormap, user, rev): |
|
38 | 39 | mapped_user = authormap.get(user, user) |
|
39 | 40 | user_person = stringutil.person(mapped_user) |
|
40 | 41 | user_email = stringutil.email(mapped_user) |
|
41 | 42 | if GIT_EMAIL_PROHIBITED.match(user_email) or GIT_PERSON_PROHIBITED.match( |
|
42 | 43 | user_person |
|
43 | 44 | ): |
|
44 | 45 | raise error.Abort( |
|
45 | 46 | _(b"Unable to parse user into person and email for revision %s") |
|
46 | 47 | % rev |
|
47 | 48 | ) |
|
48 | 49 | if user_person: |
|
49 | 50 | return b'"' + user_person + b'" <' + user_email + b'>' |
|
50 | 51 | else: |
|
51 | 52 | return b"<" + user_email + b">" |
|
52 | 53 | |
|
53 | 54 | |
|
54 | 55 | def convert_to_git_date(date): |
|
55 | 56 | timestamp, utcoff = date |
|
56 | 57 | tzsign = b"+" if utcoff <= 0 else b"-" |
|
57 | 58 | if utcoff % 60 != 0: |
|
58 | 59 | raise error.Abort( |
|
59 | 60 | _(b"UTC offset in %b is not an integer number of seconds") % (date,) |
|
60 | 61 | ) |
|
61 | 62 | utcoff = abs(utcoff) // 60 |
|
62 | 63 | tzh = utcoff // 60 |
|
63 | 64 | tzmin = utcoff % 60 |
|
64 | 65 | return b"%d " % int(timestamp) + tzsign + b"%02d%02d" % (tzh, tzmin) |
|
65 | 66 | |
|
66 | 67 | |
|
67 | 68 | def convert_to_git_ref(branch): |
|
68 | 69 | # XXX filter/map depending on git restrictions |
|
69 | 70 | return b"refs/heads/" + branch |
|
70 | 71 | |
|
71 | 72 | |
|
72 | 73 | def write_data(buf, data, skip_newline): |
|
73 | 74 | buf.append(b"data %d\n" % len(data)) |
|
74 | 75 | buf.append(data) |
|
75 | 76 | if not skip_newline or data[-1:] != b"\n": |
|
76 | 77 | buf.append(b"\n") |
|
77 | 78 | |
|
78 | 79 | |
|
79 | 80 | def export_commit(ui, repo, rev, marks, authormap): |
|
80 | 81 | ctx = repo[rev] |
|
81 | 82 | revid = ctx.hex() |
|
82 | 83 | if revid in marks: |
|
83 | 84 | ui.debug(b"warning: revision %s already exported, skipped\n" % revid) |
|
84 | 85 | return |
|
85 | 86 | parents = [p for p in ctx.parents() if p.rev() != nullrev] |
|
86 | 87 | for p in parents: |
|
87 | 88 | if p.hex() not in marks: |
|
88 | 89 | ui.warn( |
|
89 | 90 | _(b"warning: parent %s of %s has not been exported, skipped\n") |
|
90 | 91 | % (p, revid) |
|
91 | 92 | ) |
|
92 | 93 | return |
|
93 | 94 | |
|
94 | 95 | # For all files modified by the commit, check if they have already |
|
95 | 96 | # been exported and otherwise dump the blob with the new mark. |
|
96 | 97 | for fname in ctx.files(): |
|
97 | 98 | if fname not in ctx: |
|
98 | 99 | continue |
|
99 | 100 | filectx = ctx.filectx(fname) |
|
100 | 101 | filerev = hex(filectx.filenode()) |
|
101 | 102 | if filerev not in marks: |
|
102 | 103 | mark = len(marks) + 1 |
|
103 | 104 | marks[filerev] = mark |
|
104 | 105 | data = filectx.data() |
|
105 | 106 | buf = [b"blob\n", b"mark :%d\n" % mark] |
|
106 | 107 | write_data(buf, data, False) |
|
107 | 108 | ui.write(*buf, keepprogressbar=True) |
|
108 | 109 | del buf |
|
109 | 110 | |
|
110 | 111 | # Assign a mark for the current revision for references by |
|
111 | 112 | # latter merge commits. |
|
112 | 113 | mark = len(marks) + 1 |
|
113 | 114 | marks[revid] = mark |
|
114 | 115 | |
|
115 | 116 | ref = convert_to_git_ref(ctx.branch()) |
|
116 | 117 | buf = [ |
|
117 | 118 | b"commit %s\n" % ref, |
|
118 | 119 | b"mark :%d\n" % mark, |
|
119 | 120 | b"committer %s %s\n" |
|
120 | 121 | % ( |
|
121 | 122 | convert_to_git_user(authormap, ctx.user(), revid), |
|
122 | 123 | convert_to_git_date(ctx.date()), |
|
123 | 124 | ), |
|
124 | 125 | ] |
|
125 | 126 | write_data(buf, ctx.description(), True) |
|
126 | 127 | if parents: |
|
127 | 128 | buf.append(b"from :%d\n" % marks[parents[0].hex()]) |
|
128 | 129 | if len(parents) == 2: |
|
129 | 130 | buf.append(b"merge :%d\n" % marks[parents[1].hex()]) |
|
130 | 131 | p0ctx = repo[parents[0]] |
|
131 | 132 | files = ctx.manifest().diff(p0ctx.manifest()) |
|
132 | 133 | else: |
|
133 | 134 | files = ctx.files() |
|
134 | 135 | filebuf = [] |
|
135 | 136 | for fname in files: |
|
136 | 137 | if fname not in ctx: |
|
137 | 138 | filebuf.append((fname, b"D %s\n" % fname)) |
|
138 | 139 | else: |
|
139 | 140 | filectx = ctx.filectx(fname) |
|
140 | 141 | filerev = filectx.filenode() |
|
141 | 142 | fileperm = b"755" if filectx.isexec() else b"644" |
|
142 | 143 | changed = b"M %s :%d %s\n" % (fileperm, marks[hex(filerev)], fname) |
|
143 | 144 | filebuf.append((fname, changed)) |
|
144 | 145 | filebuf.sort() |
|
145 | 146 | buf.extend(changed for (fname, changed) in filebuf) |
|
146 | 147 | del filebuf |
|
147 | 148 | buf.append(b"\n") |
|
148 | 149 | ui.write(*buf, keepprogressbar=True) |
|
149 | 150 | del buf |
|
150 | 151 | |
|
151 | 152 | |
|
152 | 153 | isrev = re.compile(b"^[0-9a-f]{40}$") |
|
153 | 154 | |
|
154 | 155 | |
|
155 | 156 | @command( |
|
156 | 157 | b"fastexport", |
|
157 | 158 | [ |
|
158 | 159 | (b"r", b"rev", [], _(b"revisions to export"), _(b"REV")), |
|
159 | 160 | (b"i", b"import-marks", b"", _(b"old marks file to read"), _(b"FILE")), |
|
160 | 161 | (b"e", b"export-marks", b"", _(b"new marks file to write"), _(b"FILE")), |
|
161 | 162 | ( |
|
162 | 163 | b"A", |
|
163 | 164 | b"authormap", |
|
164 | 165 | b"", |
|
165 | 166 | _(b"remap usernames using this file"), |
|
166 | 167 | _(b"FILE"), |
|
167 | 168 | ), |
|
168 | 169 | ], |
|
169 | 170 | _(b"[OPTION]... [REV]..."), |
|
170 | 171 | helpcategory=command.CATEGORY_IMPORT_EXPORT, |
|
171 | 172 | ) |
|
172 | 173 | def fastexport(ui, repo, *revs, **opts): |
|
173 | 174 | """export repository as git fast-import stream |
|
174 | 175 | |
|
175 | 176 | This command lets you dump a repository as a human-readable text stream. |
|
176 | 177 | It can be piped into corresponding import routines like "git fast-import". |
|
177 | 178 | Incremental dumps can be created by using marks files. |
|
178 | 179 | """ |
|
179 | 180 | opts = pycompat.byteskwargs(opts) |
|
180 | 181 | |
|
181 | 182 | revs += tuple(opts.get(b"rev", [])) |
|
182 | 183 | if not revs: |
|
183 | 184 | revs = scmutil.revrange(repo, [b":"]) |
|
184 | 185 | else: |
|
185 |
revs = |
|
|
186 | revs = logcmdutil.revrange(repo, revs) | |
|
186 | 187 | if not revs: |
|
187 | 188 | raise error.Abort(_(b"no revisions matched")) |
|
188 | 189 | authorfile = opts.get(b"authormap") |
|
189 | 190 | if authorfile: |
|
190 | 191 | authormap = convcmd.readauthormap(ui, authorfile) |
|
191 | 192 | else: |
|
192 | 193 | authormap = {} |
|
193 | 194 | |
|
194 | 195 | import_marks = opts.get(b"import_marks") |
|
195 | 196 | marks = {} |
|
196 | 197 | if import_marks: |
|
197 | 198 | with open(import_marks, "rb") as import_marks_file: |
|
198 | 199 | for line in import_marks_file: |
|
199 | 200 | line = line.strip() |
|
200 | 201 | if not isrev.match(line) or line in marks: |
|
201 | 202 | raise error.Abort(_(b"Corrupted marks file")) |
|
202 | 203 | marks[line] = len(marks) + 1 |
|
203 | 204 | |
|
204 | 205 | revs.sort() |
|
205 | 206 | with ui.makeprogress( |
|
206 | 207 | _(b"exporting"), unit=_(b"revisions"), total=len(revs) |
|
207 | 208 | ) as progress: |
|
208 | 209 | for rev in revs: |
|
209 | 210 | export_commit(ui, repo, rev, marks, authormap) |
|
210 | 211 | progress.increment() |
|
211 | 212 | |
|
212 | 213 | export_marks = opts.get(b"export_marks") |
|
213 | 214 | if export_marks: |
|
214 | 215 | with open(export_marks, "wb") as export_marks_file: |
|
215 | 216 | output_marks = [None] * len(marks) |
|
216 | 217 | for k, v in marks.items(): |
|
217 | 218 | output_marks[v - 1] = k |
|
218 | 219 | for k in output_marks: |
|
219 | 220 | export_marks_file.write(k + b"\n") |
@@ -1,939 +1,971 b'' | |||
|
1 | 1 | # fix - rewrite file content in changesets and working copy |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2018 Google LLC. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """rewrite file content in changesets or working copy (EXPERIMENTAL) |
|
8 | 8 | |
|
9 | 9 | Provides a command that runs configured tools on the contents of modified files, |
|
10 | 10 | writing back any fixes to the working copy or replacing changesets. |
|
11 | 11 | |
|
12 | 12 | Here is an example configuration that causes :hg:`fix` to apply automatic |
|
13 | 13 | formatting fixes to modified lines in C++ code:: |
|
14 | 14 | |
|
15 | 15 | [fix] |
|
16 | 16 | clang-format:command=clang-format --assume-filename={rootpath} |
|
17 | 17 | clang-format:linerange=--lines={first}:{last} |
|
18 | 18 | clang-format:pattern=set:**.cpp or **.hpp |
|
19 | 19 | |
|
20 | 20 | The :command suboption forms the first part of the shell command that will be |
|
21 | 21 | used to fix a file. The content of the file is passed on standard input, and the |
|
22 | 22 | fixed file content is expected on standard output. Any output on standard error |
|
23 | 23 | will be displayed as a warning. If the exit status is not zero, the file will |
|
24 | 24 | not be affected. A placeholder warning is displayed if there is a non-zero exit |
|
25 | 25 | status but no standard error output. Some values may be substituted into the |
|
26 | 26 | command:: |
|
27 | 27 | |
|
28 | 28 | {rootpath} The path of the file being fixed, relative to the repo root |
|
29 | 29 | {basename} The name of the file being fixed, without the directory path |
|
30 | 30 | |
|
31 | 31 | If the :linerange suboption is set, the tool will only be run if there are |
|
32 | 32 | changed lines in a file. The value of this suboption is appended to the shell |
|
33 | 33 | command once for every range of changed lines in the file. Some values may be |
|
34 | 34 | substituted into the command:: |
|
35 | 35 | |
|
36 | 36 | {first} The 1-based line number of the first line in the modified range |
|
37 | 37 | {last} The 1-based line number of the last line in the modified range |
|
38 | 38 | |
|
39 | 39 | Deleted sections of a file will be ignored by :linerange, because there is no |
|
40 | 40 | corresponding line range in the version being fixed. |
|
41 | 41 | |
|
42 | 42 | By default, tools that set :linerange will only be executed if there is at least |
|
43 | 43 | one changed line range. This is meant to prevent accidents like running a code |
|
44 | 44 | formatter in such a way that it unexpectedly reformats the whole file. If such a |
|
45 | 45 | tool needs to operate on unchanged files, it should set the :skipclean suboption |
|
46 | 46 | to false. |
|
47 | 47 | |
|
48 | 48 | The :pattern suboption determines which files will be passed through each |
|
49 | 49 | configured tool. See :hg:`help patterns` for possible values. However, all |
|
50 | 50 | patterns are relative to the repo root, even if that text says they are relative |
|
51 | 51 | to the current working directory. If there are file arguments to :hg:`fix`, the |
|
52 | 52 | intersection of these patterns is used. |
|
53 | 53 | |
|
54 | 54 | There is also a configurable limit for the maximum size of file that will be |
|
55 | 55 | processed by :hg:`fix`:: |
|
56 | 56 | |
|
57 | 57 | [fix] |
|
58 | 58 | maxfilesize = 2MB |
|
59 | 59 | |
|
60 | 60 | Normally, execution of configured tools will continue after a failure (indicated |
|
61 | 61 | by a non-zero exit status). It can also be configured to abort after the first |
|
62 | 62 | such failure, so that no files will be affected if any tool fails. This abort |
|
63 | 63 | will also cause :hg:`fix` to exit with a non-zero status:: |
|
64 | 64 | |
|
65 | 65 | [fix] |
|
66 | 66 | failure = abort |
|
67 | 67 | |
|
68 | 68 | When multiple tools are configured to affect a file, they execute in an order |
|
69 | 69 | defined by the :priority suboption. The priority suboption has a default value |
|
70 | 70 | of zero for each tool. Tools are executed in order of descending priority. The |
|
71 | 71 | execution order of tools with equal priority is unspecified. For example, you |
|
72 | 72 | could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers |
|
73 | 73 | in a text file by ensuring that 'sort' runs before 'head':: |
|
74 | 74 | |
|
75 | 75 | [fix] |
|
76 | 76 | sort:command = sort -n |
|
77 | 77 | head:command = head -n 10 |
|
78 | 78 | sort:pattern = numbers.txt |
|
79 | 79 | head:pattern = numbers.txt |
|
80 | 80 | sort:priority = 2 |
|
81 | 81 | head:priority = 1 |
|
82 | 82 | |
|
83 | 83 | To account for changes made by each tool, the line numbers used for incremental |
|
84 | 84 | formatting are recomputed before executing the next tool. So, each tool may see |
|
85 | 85 | different values for the arguments added by the :linerange suboption. |
|
86 | 86 | |
|
87 | 87 | Each fixer tool is allowed to return some metadata in addition to the fixed file |
|
88 | 88 | content. The metadata must be placed before the file content on stdout, |
|
89 | 89 | separated from the file content by a zero byte. The metadata is parsed as a JSON |
|
90 | 90 | value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool |
|
91 | 91 | is expected to produce this metadata encoding if and only if the :metadata |
|
92 | 92 | suboption is true:: |
|
93 | 93 | |
|
94 | 94 | [fix] |
|
95 | 95 | tool:command = tool --prepend-json-metadata |
|
96 | 96 | tool:metadata = true |
|
97 | 97 | |
|
98 | 98 | The metadata values are passed to hooks, which can be used to print summaries or |
|
99 | 99 | perform other post-fixing work. The supported hooks are:: |
|
100 | 100 | |
|
101 | 101 | "postfixfile" |
|
102 | 102 | Run once for each file in each revision where any fixer tools made changes |
|
103 | 103 | to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file, |
|
104 | 104 | and "$HG_METADATA" with a map of fixer names to metadata values from fixer |
|
105 | 105 | tools that affected the file. Fixer tools that didn't affect the file have a |
|
106 | 106 | value of None. Only fixer tools that executed are present in the metadata. |
|
107 | 107 | |
|
108 | 108 | "postfix" |
|
109 | 109 | Run once after all files and revisions have been handled. Provides |
|
110 | 110 | "$HG_REPLACEMENTS" with information about what revisions were created and |
|
111 | 111 | made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any |
|
112 | 112 | files in the working copy were updated. Provides a list "$HG_METADATA" |
|
113 | 113 | mapping fixer tool names to lists of metadata values returned from |
|
114 | 114 | executions that modified a file. This aggregates the same metadata |
|
115 | 115 | previously passed to the "postfixfile" hook. |
|
116 | 116 | |
|
117 | 117 | Fixer tools are run in the repository's root directory. This allows them to read |
|
118 | 118 | configuration files from the working copy, or even write to the working copy. |
|
119 | 119 | The working copy is not updated to match the revision being fixed. In fact, |
|
120 | 120 | several revisions may be fixed in parallel. Writes to the working copy are not |
|
121 | 121 | amended into the revision being fixed; fixer tools should always write fixed |
|
122 | 122 | file content back to stdout as documented above. |
|
123 | 123 | """ |
|
124 | 124 | |
|
125 | 125 | from __future__ import absolute_import |
|
126 | 126 | |
|
127 | 127 | import collections |
|
128 | 128 | import itertools |
|
129 | 129 | import os |
|
130 | 130 | import re |
|
131 | 131 | import subprocess |
|
132 | 132 | |
|
133 | 133 | from mercurial.i18n import _ |
|
134 | 134 | from mercurial.node import ( |
|
135 | 135 | nullid, |
|
136 | 136 | nullrev, |
|
137 | 137 | wdirrev, |
|
138 | 138 | ) |
|
139 | 139 | |
|
140 | 140 | from mercurial.utils import procutil |
|
141 | 141 | |
|
142 | 142 | from mercurial import ( |
|
143 | 143 | cmdutil, |
|
144 | 144 | context, |
|
145 | 145 | copies, |
|
146 | 146 | error, |
|
147 | logcmdutil, | |
|
147 | 148 | match as matchmod, |
|
148 | 149 | mdiff, |
|
149 | 150 | merge, |
|
150 | 151 | mergestate as mergestatemod, |
|
151 | 152 | obsolete, |
|
152 | 153 | pycompat, |
|
153 | 154 | registrar, |
|
154 | 155 | rewriteutil, |
|
155 | 156 | scmutil, |
|
156 | 157 | util, |
|
157 | 158 | worker, |
|
158 | 159 | ) |
|
159 | 160 | |
|
160 | 161 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
161 | 162 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
162 | 163 | # be specifying the version(s) of Mercurial they are tested with, or |
|
163 | 164 | # leave the attribute unspecified. |
|
164 | 165 | testedwith = b'ships-with-hg-core' |
|
165 | 166 | |
|
166 | 167 | cmdtable = {} |
|
167 | 168 | command = registrar.command(cmdtable) |
|
168 | 169 | |
|
169 | 170 | configtable = {} |
|
170 | 171 | configitem = registrar.configitem(configtable) |
|
171 | 172 | |
|
172 | 173 | # Register the suboptions allowed for each configured fixer, and default values. |
|
173 | 174 | FIXER_ATTRS = { |
|
174 | 175 | b'command': None, |
|
175 | 176 | b'linerange': None, |
|
176 | 177 | b'pattern': None, |
|
177 | 178 | b'priority': 0, |
|
178 | 179 | b'metadata': False, |
|
179 | 180 | b'skipclean': True, |
|
180 | 181 | b'enabled': True, |
|
181 | 182 | } |
|
182 | 183 | |
|
183 | 184 | for key, default in FIXER_ATTRS.items(): |
|
184 | 185 | configitem(b'fix', b'.*:%s$' % key, default=default, generic=True) |
|
185 | 186 | |
|
186 | 187 | # A good default size allows most source code files to be fixed, but avoids |
|
187 | 188 | # letting fixer tools choke on huge inputs, which could be surprising to the |
|
188 | 189 | # user. |
|
189 | 190 | configitem(b'fix', b'maxfilesize', default=b'2MB') |
|
190 | 191 | |
|
191 | 192 | # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero. |
|
192 | 193 | # This helps users do shell scripts that stop when a fixer tool signals a |
|
193 | 194 | # problem. |
|
194 | 195 | configitem(b'fix', b'failure', default=b'continue') |
|
195 | 196 | |
|
196 | 197 | |
|
197 | 198 | def checktoolfailureaction(ui, message, hint=None): |
|
198 | 199 | """Abort with 'message' if fix.failure=abort""" |
|
199 | 200 | action = ui.config(b'fix', b'failure') |
|
200 | 201 | if action not in (b'continue', b'abort'): |
|
201 | 202 | raise error.Abort( |
|
202 | 203 | _(b'unknown fix.failure action: %s') % (action,), |
|
203 | 204 | hint=_(b'use "continue" or "abort"'), |
|
204 | 205 | ) |
|
205 | 206 | if action == b'abort': |
|
206 | 207 | raise error.Abort(message, hint=hint) |
|
207 | 208 | |
|
208 | 209 | |
|
209 | 210 | allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions')) |
|
210 | 211 | baseopt = ( |
|
211 | 212 | b'', |
|
212 | 213 | b'base', |
|
213 | 214 | [], |
|
214 | 215 | _( |
|
215 | 216 | b'revisions to diff against (overrides automatic ' |
|
216 | 217 | b'selection, and applies to every revision being ' |
|
217 | 218 | b'fixed)' |
|
218 | 219 | ), |
|
219 | 220 | _(b'REV'), |
|
220 | 221 | ) |
|
221 | 222 | revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV')) |
|
222 | 223 | sourceopt = ( |
|
223 | 224 | b's', |
|
224 | 225 | b'source', |
|
225 | 226 | [], |
|
226 | 227 | _(b'fix the specified revisions and their descendants'), |
|
227 | 228 | _(b'REV'), |
|
228 | 229 | ) |
|
229 | 230 | wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory')) |
|
230 | 231 | wholeopt = (b'', b'whole', False, _(b'always fix every line of a file')) |
|
231 | 232 | usage = _(b'[OPTION]... [FILE]...') |
|
232 | 233 | |
|
233 | 234 | |
|
234 | 235 | @command( |
|
235 | 236 | b'fix', |
|
236 | 237 | [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt], |
|
237 | 238 | usage, |
|
238 | 239 | helpcategory=command.CATEGORY_FILE_CONTENTS, |
|
239 | 240 | ) |
|
240 | 241 | def fix(ui, repo, *pats, **opts): |
|
241 | 242 | """rewrite file content in changesets or working directory |
|
242 | 243 | |
|
243 | 244 | Runs any configured tools to fix the content of files. Only affects files |
|
244 | 245 | with changes, unless file arguments are provided. Only affects changed lines |
|
245 | 246 | of files, unless the --whole flag is used. Some tools may always affect the |
|
246 | 247 | whole file regardless of --whole. |
|
247 | 248 | |
|
248 | 249 | If --working-dir is used, files with uncommitted changes in the working copy |
|
249 | 250 | will be fixed. Note that no backup are made. |
|
250 | 251 | |
|
251 | 252 | If revisions are specified with --source, those revisions and their |
|
252 | 253 | descendants will be checked, and they may be replaced with new revisions |
|
253 | 254 | that have fixed file content. By automatically including the descendants, |
|
254 | 255 | no merging, rebasing, or evolution will be required. If an ancestor of the |
|
255 | 256 | working copy is included, then the working copy itself will also be fixed, |
|
256 | 257 | and the working copy will be updated to the fixed parent. |
|
257 | 258 | |
|
258 | 259 | When determining what lines of each file to fix at each revision, the whole |
|
259 | 260 | set of revisions being fixed is considered, so that fixes to earlier |
|
260 | 261 | revisions are not forgotten in later ones. The --base flag can be used to |
|
261 | 262 | override this default behavior, though it is not usually desirable to do so. |
|
262 | 263 | """ |
|
263 | 264 | opts = pycompat.byteskwargs(opts) |
|
264 | 265 | cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev') |
|
265 | 266 | cmdutil.check_incompatible_arguments( |
|
266 | 267 | opts, b'working_dir', [b'all', b'source'] |
|
267 | 268 | ) |
|
268 | 269 | |
|
269 | 270 | with repo.wlock(), repo.lock(), repo.transaction(b'fix'): |
|
270 | 271 | revstofix = getrevstofix(ui, repo, opts) |
|
271 | 272 | basectxs = getbasectxs(repo, opts, revstofix) |
|
272 | 273 | workqueue, numitems = getworkqueue( |
|
273 | 274 | ui, repo, pats, opts, revstofix, basectxs |
|
274 | 275 | ) |
|
275 | 276 | basepaths = getbasepaths(repo, opts, workqueue, basectxs) |
|
276 | 277 | fixers = getfixers(ui) |
|
277 | 278 | |
|
278 | 279 | # Rather than letting each worker independently fetch the files |
|
279 | 280 | # (which also would add complications for shared/keepalive |
|
280 | 281 | # connections), prefetch them all first. |
|
281 | 282 | _prefetchfiles(repo, workqueue, basepaths) |
|
282 | 283 | |
|
283 | 284 | # There are no data dependencies between the workers fixing each file |
|
284 | 285 | # revision, so we can use all available parallelism. |
|
285 | 286 | def getfixes(items): |
|
286 | for rev, path in items: | |
|
287 | ctx = repo[rev] | |
|
287 | for srcrev, path, dstrevs in items: | |
|
288 | ctx = repo[srcrev] | |
|
288 | 289 | olddata = ctx[path].data() |
|
289 | 290 | metadata, newdata = fixfile( |
|
290 | ui, repo, opts, fixers, ctx, path, basepaths, basectxs[rev] | |
|
291 | ui, | |
|
292 | repo, | |
|
293 | opts, | |
|
294 | fixers, | |
|
295 | ctx, | |
|
296 | path, | |
|
297 | basepaths, | |
|
298 | basectxs[srcrev], | |
|
291 | 299 | ) |
|
292 | # Don't waste memory/time passing unchanged content back, but | |
|
293 | # produce one result per item either way. | |
|
294 | yield ( | |
|
295 | rev, | |
|
296 | path, | |
|
297 | metadata, | |
|
298 | newdata if newdata != olddata else None, | |
|
299 | ) | |
|
300 | # We ungroup the work items now, because the code that consumes | |
|
301 | # these results has to handle each dstrev separately, and in | |
|
302 | # topological order. Because these are handled in topological | |
|
303 | # order, it's important that we pass around references to | |
|
304 | # "newdata" instead of copying it. Otherwise, we would be | |
|
305 | # keeping more copies of file content in memory at a time than | |
|
306 | # if we hadn't bothered to group/deduplicate the work items. | |
|
307 | data = newdata if newdata != olddata else None | |
|
308 | for dstrev in dstrevs: | |
|
309 | yield (dstrev, path, metadata, data) | |
|
300 | 310 | |
|
301 | 311 | results = worker.worker( |
|
302 | 312 | ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False |
|
303 | 313 | ) |
|
304 | 314 | |
|
305 | 315 | # We have to hold on to the data for each successor revision in memory |
|
306 | 316 | # until all its parents are committed. We ensure this by committing and |
|
307 | 317 | # freeing memory for the revisions in some topological order. This |
|
308 | 318 | # leaves a little bit of memory efficiency on the table, but also makes |
|
309 | 319 | # the tests deterministic. It might also be considered a feature since |
|
310 | 320 | # it makes the results more easily reproducible. |
|
311 | 321 | filedata = collections.defaultdict(dict) |
|
312 | 322 | aggregatemetadata = collections.defaultdict(list) |
|
313 | 323 | replacements = {} |
|
314 | 324 | wdirwritten = False |
|
315 | 325 | commitorder = sorted(revstofix, reverse=True) |
|
316 | 326 | with ui.makeprogress( |
|
317 | 327 | topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values()) |
|
318 | 328 | ) as progress: |
|
319 | 329 | for rev, path, filerevmetadata, newdata in results: |
|
320 | 330 | progress.increment(item=path) |
|
321 | 331 | for fixername, fixermetadata in filerevmetadata.items(): |
|
322 | 332 | aggregatemetadata[fixername].append(fixermetadata) |
|
323 | 333 | if newdata is not None: |
|
324 | 334 | filedata[rev][path] = newdata |
|
325 | 335 | hookargs = { |
|
326 | 336 | b'rev': rev, |
|
327 | 337 | b'path': path, |
|
328 | 338 | b'metadata': filerevmetadata, |
|
329 | 339 | } |
|
330 | 340 | repo.hook( |
|
331 | 341 | b'postfixfile', |
|
332 | 342 | throw=False, |
|
333 | 343 | **pycompat.strkwargs(hookargs) |
|
334 | 344 | ) |
|
335 | 345 | numitems[rev] -= 1 |
|
336 | 346 | # Apply the fixes for this and any other revisions that are |
|
337 | 347 | # ready and sitting at the front of the queue. Using a loop here |
|
338 | 348 | # prevents the queue from being blocked by the first revision to |
|
339 | 349 | # be ready out of order. |
|
340 | 350 | while commitorder and not numitems[commitorder[-1]]: |
|
341 | 351 | rev = commitorder.pop() |
|
342 | 352 | ctx = repo[rev] |
|
343 | 353 | if rev == wdirrev: |
|
344 | 354 | writeworkingdir(repo, ctx, filedata[rev], replacements) |
|
345 | 355 | wdirwritten = bool(filedata[rev]) |
|
346 | 356 | else: |
|
347 | 357 | replacerev(ui, repo, ctx, filedata[rev], replacements) |
|
348 | 358 | del filedata[rev] |
|
349 | 359 | |
|
350 | 360 | cleanup(repo, replacements, wdirwritten) |
|
351 | 361 | hookargs = { |
|
352 | 362 | b'replacements': replacements, |
|
353 | 363 | b'wdirwritten': wdirwritten, |
|
354 | 364 | b'metadata': aggregatemetadata, |
|
355 | 365 | } |
|
356 | 366 | repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs)) |
|
357 | 367 | |
|
358 | 368 | |
|
359 | 369 | def cleanup(repo, replacements, wdirwritten): |
|
360 | 370 | """Calls scmutil.cleanupnodes() with the given replacements. |
|
361 | 371 | |
|
362 | 372 | "replacements" is a dict from nodeid to nodeid, with one key and one value |
|
363 | 373 | for every revision that was affected by fixing. This is slightly different |
|
364 | 374 | from cleanupnodes(). |
|
365 | 375 | |
|
366 | 376 | "wdirwritten" is a bool which tells whether the working copy was affected by |
|
367 | 377 | fixing, since it has no entry in "replacements". |
|
368 | 378 | |
|
369 | 379 | Useful as a hook point for extending "hg fix" with output summarizing the |
|
370 | 380 | effects of the command, though we choose not to output anything here. |
|
371 | 381 | """ |
|
372 | 382 | replacements = { |
|
373 | 383 | prec: [succ] for prec, succ in pycompat.iteritems(replacements) |
|
374 | 384 | } |
|
375 | 385 | scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True) |
|
376 | 386 | |
|
377 | 387 | |
|
378 | 388 | def getworkqueue(ui, repo, pats, opts, revstofix, basectxs): |
|
379 |
"""Constructs |
|
|
389 | """Constructs a list of files to fix and which revisions each fix applies to | |
|
380 | 390 | |
|
381 | It is up to the caller how to consume the work items, and the only | |
|
382 | dependence between them is that replacement revisions must be committed in | |
|
383 | topological order. Each work item represents a file in the working copy or | |
|
384 | in some revision that should be fixed and written back to the working copy | |
|
385 | or into a replacement revision. | |
|
391 | To avoid duplicating work, there is usually only one work item for each file | |
|
392 | revision that might need to be fixed. There can be multiple work items per | |
|
393 | file revision if the same file needs to be fixed in multiple changesets with | |
|
394 | different baserevs. Each work item also contains a list of changesets where | |
|
395 | the file's data should be replaced with the fixed data. The work items for | |
|
396 | earlier changesets come earlier in the work queue, to improve pipelining by | |
|
397 | allowing the first changeset to be replaced while fixes are still being | |
|
398 | computed for later changesets. | |
|
386 | 399 | |
|
387 | Work items for the same revision are grouped together, so that a worker | |
|
388 | pool starting with the first N items in parallel is likely to finish the | |
|
389 | first revision's work before other revisions. This can allow us to write | |
|
390 | the result to disk and reduce memory footprint. At time of writing, the | |
|
391 | partition strategy in worker.py seems favorable to this. We also sort the | |
|
392 | items by ascending revision number to match the order in which we commit | |
|
393 | the fixes later. | |
|
400 | Also returned is a map from changesets to the count of work items that might | |
|
401 | affect each changeset. This is used later to count when all of a changeset's | |
|
402 | work items have been finished, without having to inspect the remaining work | |
|
403 | queue in each worker subprocess. | |
|
404 | ||
|
405 | The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of | |
|
406 | bar.txt should be read from revision 1, then fixed, and written back to | |
|
407 | revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of | |
|
408 | revisions is called the "dstrevs". In practice the srcrev is always one of | |
|
409 | the dstrevs, and we make that choice when constructing the work item so that | |
|
410 | the choice can't be made inconsistently later on. The dstrevs should all | |
|
411 | have the same file revision for the given path, so the choice of srcrev is | |
|
412 | arbitrary. The wdirrev can be a dstrev and a srcrev. | |
|
394 | 413 | """ |
|
395 | workqueue = [] | |
|
414 | dstrevmap = collections.defaultdict(list) | |
|
396 | 415 | numitems = collections.defaultdict(int) |
|
397 | 416 | maxfilesize = ui.configbytes(b'fix', b'maxfilesize') |
|
398 | 417 | for rev in sorted(revstofix): |
|
399 | 418 | fixctx = repo[rev] |
|
400 | 419 | match = scmutil.match(fixctx, pats, opts) |
|
401 | 420 | for path in sorted( |
|
402 | 421 | pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx) |
|
403 | 422 | ): |
|
404 | 423 | fctx = fixctx[path] |
|
405 | 424 | if fctx.islink(): |
|
406 | 425 | continue |
|
407 | 426 | if fctx.size() > maxfilesize: |
|
408 | 427 | ui.warn( |
|
409 | 428 | _(b'ignoring file larger than %s: %s\n') |
|
410 | 429 | % (util.bytecount(maxfilesize), path) |
|
411 | 430 | ) |
|
412 | 431 | continue |
|
413 | workqueue.append((rev, path)) | |
|
432 | baserevs = tuple(ctx.rev() for ctx in basectxs[rev]) | |
|
433 | dstrevmap[(fctx.filerev(), baserevs, path)].append(rev) | |
|
414 | 434 | numitems[rev] += 1 |
|
435 | workqueue = [ | |
|
436 | (min(dstrevs), path, dstrevs) | |
|
437 | for (_filerev, _baserevs, path), dstrevs in dstrevmap.items() | |
|
438 | ] | |
|
439 | # Move work items for earlier changesets to the front of the queue, so we | |
|
440 | # might be able to replace those changesets (in topological order) while | |
|
441 | # we're still processing later work items. Note the min() in the previous | |
|
442 | # expression, which means we don't need a custom comparator here. The path | |
|
443 | # is also important in the sort order to make the output order stable. There | |
|
444 | # are some situations where this doesn't help much, but some situations | |
|
445 | # where it lets us buffer O(1) files instead of O(n) files. | |
|
446 | workqueue.sort() | |
|
415 | 447 | return workqueue, numitems |
|
416 | 448 | |
|
417 | 449 | |
|
418 | 450 | def getrevstofix(ui, repo, opts): |
|
419 | 451 | """Returns the set of revision numbers that should be fixed""" |
|
420 | 452 | if opts[b'all']: |
|
421 | 453 | revs = repo.revs(b'(not public() and not obsolete()) or wdir()') |
|
422 | 454 | elif opts[b'source']: |
|
423 |
source_revs = |
|
|
455 | source_revs = logcmdutil.revrange(repo, opts[b'source']) | |
|
424 | 456 | revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs)) |
|
425 | 457 | if wdirrev in source_revs: |
|
426 | 458 | # `wdir()::` is currently empty, so manually add wdir |
|
427 | 459 | revs.add(wdirrev) |
|
428 | 460 | if repo[b'.'].rev() in revs: |
|
429 | 461 | revs.add(wdirrev) |
|
430 | 462 | else: |
|
431 |
revs = set( |
|
|
463 | revs = set(logcmdutil.revrange(repo, opts[b'rev'])) | |
|
432 | 464 | if opts.get(b'working_dir'): |
|
433 | 465 | revs.add(wdirrev) |
|
434 | 466 | for rev in revs: |
|
435 | 467 | checkfixablectx(ui, repo, repo[rev]) |
|
436 | 468 | # Allow fixing only wdir() even if there's an unfinished operation |
|
437 | 469 | if not (len(revs) == 1 and wdirrev in revs): |
|
438 | 470 | cmdutil.checkunfinished(repo) |
|
439 | 471 | rewriteutil.precheck(repo, revs, b'fix') |
|
440 | 472 | if ( |
|
441 | 473 | wdirrev in revs |
|
442 | 474 | and mergestatemod.mergestate.read(repo).unresolvedcount() |
|
443 | 475 | ): |
|
444 | 476 | raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'") |
|
445 | 477 | if not revs: |
|
446 | 478 | raise error.Abort( |
|
447 | 479 | b'no changesets specified', hint=b'use --source or --working-dir' |
|
448 | 480 | ) |
|
449 | 481 | return revs |
|
450 | 482 | |
|
451 | 483 | |
|
452 | 484 | def checkfixablectx(ui, repo, ctx): |
|
453 | 485 | """Aborts if the revision shouldn't be replaced with a fixed one.""" |
|
454 | 486 | if ctx.obsolete(): |
|
455 | 487 | # It would be better to actually check if the revision has a successor. |
|
456 | 488 | if not obsolete.isenabled(repo, obsolete.allowdivergenceopt): |
|
457 | 489 | raise error.Abort( |
|
458 | 490 | b'fixing obsolete revision could cause divergence' |
|
459 | 491 | ) |
|
460 | 492 | |
|
461 | 493 | |
|
462 | 494 | def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx): |
|
463 | 495 | """Returns the set of files that should be fixed in a context |
|
464 | 496 | |
|
465 | 497 | The result depends on the base contexts; we include any file that has |
|
466 | 498 | changed relative to any of the base contexts. Base contexts should be |
|
467 | 499 | ancestors of the context being fixed. |
|
468 | 500 | """ |
|
469 | 501 | files = set() |
|
470 | 502 | for basectx in basectxs: |
|
471 | 503 | stat = basectx.status( |
|
472 | 504 | fixctx, match=match, listclean=bool(pats), listunknown=bool(pats) |
|
473 | 505 | ) |
|
474 | 506 | files.update( |
|
475 | 507 | set( |
|
476 | 508 | itertools.chain( |
|
477 | 509 | stat.added, stat.modified, stat.clean, stat.unknown |
|
478 | 510 | ) |
|
479 | 511 | ) |
|
480 | 512 | ) |
|
481 | 513 | return files |
|
482 | 514 | |
|
483 | 515 | |
|
484 | 516 | def lineranges(opts, path, basepaths, basectxs, fixctx, content2): |
|
485 | 517 | """Returns the set of line ranges that should be fixed in a file |
|
486 | 518 | |
|
487 | 519 | Of the form [(10, 20), (30, 40)]. |
|
488 | 520 | |
|
489 | 521 | This depends on the given base contexts; we must consider lines that have |
|
490 | 522 | changed versus any of the base contexts, and whether the file has been |
|
491 | 523 | renamed versus any of them. |
|
492 | 524 | |
|
493 | 525 | Another way to understand this is that we exclude line ranges that are |
|
494 | 526 | common to the file in all base contexts. |
|
495 | 527 | """ |
|
496 | 528 | if opts.get(b'whole'): |
|
497 | 529 | # Return a range containing all lines. Rely on the diff implementation's |
|
498 | 530 | # idea of how many lines are in the file, instead of reimplementing it. |
|
499 | 531 | return difflineranges(b'', content2) |
|
500 | 532 | |
|
501 | 533 | rangeslist = [] |
|
502 | 534 | for basectx in basectxs: |
|
503 | 535 | basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path) |
|
504 | 536 | |
|
505 | 537 | if basepath in basectx: |
|
506 | 538 | content1 = basectx[basepath].data() |
|
507 | 539 | else: |
|
508 | 540 | content1 = b'' |
|
509 | 541 | rangeslist.extend(difflineranges(content1, content2)) |
|
510 | 542 | return unionranges(rangeslist) |
|
511 | 543 | |
|
512 | 544 | |
|
513 | 545 | def getbasepaths(repo, opts, workqueue, basectxs): |
|
514 | 546 | if opts.get(b'whole'): |
|
515 | 547 | # Base paths will never be fetched for line range determination. |
|
516 | 548 | return {} |
|
517 | 549 | |
|
518 | 550 | basepaths = {} |
|
519 | for rev, path in workqueue: | |
|
520 | fixctx = repo[rev] | |
|
521 | for basectx in basectxs[rev]: | |
|
551 | for srcrev, path, _dstrevs in workqueue: | |
|
552 | fixctx = repo[srcrev] | |
|
553 | for basectx in basectxs[srcrev]: | |
|
522 | 554 | basepath = copies.pathcopies(basectx, fixctx).get(path, path) |
|
523 | 555 | if basepath in basectx: |
|
524 | 556 | basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath |
|
525 | 557 | return basepaths |
|
526 | 558 | |
|
527 | 559 | |
|
528 | 560 | def unionranges(rangeslist): |
|
529 | 561 | """Return the union of some closed intervals |
|
530 | 562 | |
|
531 | 563 | >>> unionranges([]) |
|
532 | 564 | [] |
|
533 | 565 | >>> unionranges([(1, 100)]) |
|
534 | 566 | [(1, 100)] |
|
535 | 567 | >>> unionranges([(1, 100), (1, 100)]) |
|
536 | 568 | [(1, 100)] |
|
537 | 569 | >>> unionranges([(1, 100), (2, 100)]) |
|
538 | 570 | [(1, 100)] |
|
539 | 571 | >>> unionranges([(1, 99), (1, 100)]) |
|
540 | 572 | [(1, 100)] |
|
541 | 573 | >>> unionranges([(1, 100), (40, 60)]) |
|
542 | 574 | [(1, 100)] |
|
543 | 575 | >>> unionranges([(1, 49), (50, 100)]) |
|
544 | 576 | [(1, 100)] |
|
545 | 577 | >>> unionranges([(1, 48), (50, 100)]) |
|
546 | 578 | [(1, 48), (50, 100)] |
|
547 | 579 | >>> unionranges([(1, 2), (3, 4), (5, 6)]) |
|
548 | 580 | [(1, 6)] |
|
549 | 581 | """ |
|
550 | 582 | rangeslist = sorted(set(rangeslist)) |
|
551 | 583 | unioned = [] |
|
552 | 584 | if rangeslist: |
|
553 | 585 | unioned, rangeslist = [rangeslist[0]], rangeslist[1:] |
|
554 | 586 | for a, b in rangeslist: |
|
555 | 587 | c, d = unioned[-1] |
|
556 | 588 | if a > d + 1: |
|
557 | 589 | unioned.append((a, b)) |
|
558 | 590 | else: |
|
559 | 591 | unioned[-1] = (c, max(b, d)) |
|
560 | 592 | return unioned |
|
561 | 593 | |
|
562 | 594 | |
|
563 | 595 | def difflineranges(content1, content2): |
|
564 | 596 | """Return list of line number ranges in content2 that differ from content1. |
|
565 | 597 | |
|
566 | 598 | Line numbers are 1-based. The numbers are the first and last line contained |
|
567 | 599 | in the range. Single-line ranges have the same line number for the first and |
|
568 | 600 | last line. Excludes any empty ranges that result from lines that are only |
|
569 | 601 | present in content1. Relies on mdiff's idea of where the line endings are in |
|
570 | 602 | the string. |
|
571 | 603 | |
|
572 | 604 | >>> from mercurial import pycompat |
|
573 | 605 | >>> lines = lambda s: b'\\n'.join([c for c in pycompat.iterbytestr(s)]) |
|
574 | 606 | >>> difflineranges2 = lambda a, b: difflineranges(lines(a), lines(b)) |
|
575 | 607 | >>> difflineranges2(b'', b'') |
|
576 | 608 | [] |
|
577 | 609 | >>> difflineranges2(b'a', b'') |
|
578 | 610 | [] |
|
579 | 611 | >>> difflineranges2(b'', b'A') |
|
580 | 612 | [(1, 1)] |
|
581 | 613 | >>> difflineranges2(b'a', b'a') |
|
582 | 614 | [] |
|
583 | 615 | >>> difflineranges2(b'a', b'A') |
|
584 | 616 | [(1, 1)] |
|
585 | 617 | >>> difflineranges2(b'ab', b'') |
|
586 | 618 | [] |
|
587 | 619 | >>> difflineranges2(b'', b'AB') |
|
588 | 620 | [(1, 2)] |
|
589 | 621 | >>> difflineranges2(b'abc', b'ac') |
|
590 | 622 | [] |
|
591 | 623 | >>> difflineranges2(b'ab', b'aCb') |
|
592 | 624 | [(2, 2)] |
|
593 | 625 | >>> difflineranges2(b'abc', b'aBc') |
|
594 | 626 | [(2, 2)] |
|
595 | 627 | >>> difflineranges2(b'ab', b'AB') |
|
596 | 628 | [(1, 2)] |
|
597 | 629 | >>> difflineranges2(b'abcde', b'aBcDe') |
|
598 | 630 | [(2, 2), (4, 4)] |
|
599 | 631 | >>> difflineranges2(b'abcde', b'aBCDe') |
|
600 | 632 | [(2, 4)] |
|
601 | 633 | """ |
|
602 | 634 | ranges = [] |
|
603 | 635 | for lines, kind in mdiff.allblocks(content1, content2): |
|
604 | 636 | firstline, lastline = lines[2:4] |
|
605 | 637 | if kind == b'!' and firstline != lastline: |
|
606 | 638 | ranges.append((firstline + 1, lastline)) |
|
607 | 639 | return ranges |
|
608 | 640 | |
|
609 | 641 | |
|
610 | 642 | def getbasectxs(repo, opts, revstofix): |
|
611 | 643 | """Returns a map of the base contexts for each revision |
|
612 | 644 | |
|
613 | 645 | The base contexts determine which lines are considered modified when we |
|
614 | 646 | attempt to fix just the modified lines in a file. It also determines which |
|
615 | 647 | files we attempt to fix, so it is important to compute this even when |
|
616 | 648 | --whole is used. |
|
617 | 649 | """ |
|
618 | 650 | # The --base flag overrides the usual logic, and we give every revision |
|
619 | 651 | # exactly the set of baserevs that the user specified. |
|
620 | 652 | if opts.get(b'base'): |
|
621 |
baserevs = set( |
|
|
653 | baserevs = set(logcmdutil.revrange(repo, opts.get(b'base'))) | |
|
622 | 654 | if not baserevs: |
|
623 | 655 | baserevs = {nullrev} |
|
624 | 656 | basectxs = {repo[rev] for rev in baserevs} |
|
625 | 657 | return {rev: basectxs for rev in revstofix} |
|
626 | 658 | |
|
627 | 659 | # Proceed in topological order so that we can easily determine each |
|
628 | 660 | # revision's baserevs by looking at its parents and their baserevs. |
|
629 | 661 | basectxs = collections.defaultdict(set) |
|
630 | 662 | for rev in sorted(revstofix): |
|
631 | 663 | ctx = repo[rev] |
|
632 | 664 | for pctx in ctx.parents(): |
|
633 | 665 | if pctx.rev() in basectxs: |
|
634 | 666 | basectxs[rev].update(basectxs[pctx.rev()]) |
|
635 | 667 | else: |
|
636 | 668 | basectxs[rev].add(pctx) |
|
637 | 669 | return basectxs |
|
638 | 670 | |
|
639 | 671 | |
|
640 | 672 | def _prefetchfiles(repo, workqueue, basepaths): |
|
641 | 673 | toprefetch = set() |
|
642 | 674 | |
|
643 | 675 | # Prefetch the files that will be fixed. |
|
644 | for rev, path in workqueue: | |
|
645 | if rev == wdirrev: | |
|
676 | for srcrev, path, _dstrevs in workqueue: | |
|
677 | if srcrev == wdirrev: | |
|
646 | 678 | continue |
|
647 | toprefetch.add((rev, path)) | |
|
679 | toprefetch.add((srcrev, path)) | |
|
648 | 680 | |
|
649 | 681 | # Prefetch the base contents for lineranges(). |
|
650 | 682 | for (baserev, fixrev, path), basepath in basepaths.items(): |
|
651 | 683 | toprefetch.add((baserev, basepath)) |
|
652 | 684 | |
|
653 | 685 | if toprefetch: |
|
654 | 686 | scmutil.prefetchfiles( |
|
655 | 687 | repo, |
|
656 | 688 | [ |
|
657 | 689 | (rev, scmutil.matchfiles(repo, [path])) |
|
658 | 690 | for rev, path in toprefetch |
|
659 | 691 | ], |
|
660 | 692 | ) |
|
661 | 693 | |
|
662 | 694 | |
|
663 | 695 | def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs): |
|
664 | 696 | """Run any configured fixers that should affect the file in this context |
|
665 | 697 | |
|
666 | 698 | Returns the file content that results from applying the fixers in some order |
|
667 | 699 | starting with the file's content in the fixctx. Fixers that support line |
|
668 | 700 | ranges will affect lines that have changed relative to any of the basectxs |
|
669 | 701 | (i.e. they will only avoid lines that are common to all basectxs). |
|
670 | 702 | |
|
671 | 703 | A fixer tool's stdout will become the file's new content if and only if it |
|
672 | 704 | exits with code zero. The fixer tool's working directory is the repository's |
|
673 | 705 | root. |
|
674 | 706 | """ |
|
675 | 707 | metadata = {} |
|
676 | 708 | newdata = fixctx[path].data() |
|
677 | 709 | for fixername, fixer in pycompat.iteritems(fixers): |
|
678 | 710 | if fixer.affects(opts, fixctx, path): |
|
679 | 711 | ranges = lineranges( |
|
680 | 712 | opts, path, basepaths, basectxs, fixctx, newdata |
|
681 | 713 | ) |
|
682 | 714 | command = fixer.command(ui, path, ranges) |
|
683 | 715 | if command is None: |
|
684 | 716 | continue |
|
685 | 717 | ui.debug(b'subprocess: %s\n' % (command,)) |
|
686 | 718 | proc = subprocess.Popen( |
|
687 | 719 | procutil.tonativestr(command), |
|
688 | 720 | shell=True, |
|
689 | 721 | cwd=procutil.tonativestr(repo.root), |
|
690 | 722 | stdin=subprocess.PIPE, |
|
691 | 723 | stdout=subprocess.PIPE, |
|
692 | 724 | stderr=subprocess.PIPE, |
|
693 | 725 | ) |
|
694 | 726 | stdout, stderr = proc.communicate(newdata) |
|
695 | 727 | if stderr: |
|
696 | 728 | showstderr(ui, fixctx.rev(), fixername, stderr) |
|
697 | 729 | newerdata = stdout |
|
698 | 730 | if fixer.shouldoutputmetadata(): |
|
699 | 731 | try: |
|
700 | 732 | metadatajson, newerdata = stdout.split(b'\0', 1) |
|
701 | 733 | metadata[fixername] = pycompat.json_loads(metadatajson) |
|
702 | 734 | except ValueError: |
|
703 | 735 | ui.warn( |
|
704 | 736 | _(b'ignored invalid output from fixer tool: %s\n') |
|
705 | 737 | % (fixername,) |
|
706 | 738 | ) |
|
707 | 739 | continue |
|
708 | 740 | else: |
|
709 | 741 | metadata[fixername] = None |
|
710 | 742 | if proc.returncode == 0: |
|
711 | 743 | newdata = newerdata |
|
712 | 744 | else: |
|
713 | 745 | if not stderr: |
|
714 | 746 | message = _(b'exited with status %d\n') % (proc.returncode,) |
|
715 | 747 | showstderr(ui, fixctx.rev(), fixername, message) |
|
716 | 748 | checktoolfailureaction( |
|
717 | 749 | ui, |
|
718 | 750 | _(b'no fixes will be applied'), |
|
719 | 751 | hint=_( |
|
720 | 752 | b'use --config fix.failure=continue to apply any ' |
|
721 | 753 | b'successful fixes anyway' |
|
722 | 754 | ), |
|
723 | 755 | ) |
|
724 | 756 | return metadata, newdata |
|
725 | 757 | |
|
726 | 758 | |
|
727 | 759 | def showstderr(ui, rev, fixername, stderr): |
|
728 | 760 | """Writes the lines of the stderr string as warnings on the ui |
|
729 | 761 | |
|
730 | 762 | Uses the revision number and fixername to give more context to each line of |
|
731 | 763 | the error message. Doesn't include file names, since those take up a lot of |
|
732 | 764 | space and would tend to be included in the error message if they were |
|
733 | 765 | relevant. |
|
734 | 766 | """ |
|
735 | 767 | for line in re.split(b'[\r\n]+', stderr): |
|
736 | 768 | if line: |
|
737 | 769 | ui.warn(b'[') |
|
738 | 770 | if rev is None: |
|
739 | 771 | ui.warn(_(b'wdir'), label=b'evolve.rev') |
|
740 | 772 | else: |
|
741 | 773 | ui.warn(b'%d' % rev, label=b'evolve.rev') |
|
742 | 774 | ui.warn(b'] %s: %s\n' % (fixername, line)) |
|
743 | 775 | |
|
744 | 776 | |
|
745 | 777 | def writeworkingdir(repo, ctx, filedata, replacements): |
|
746 | 778 | """Write new content to the working copy and check out the new p1 if any |
|
747 | 779 | |
|
748 | 780 | We check out a new revision if and only if we fixed something in both the |
|
749 | 781 | working directory and its parent revision. This avoids the need for a full |
|
750 | 782 | update/merge, and means that the working directory simply isn't affected |
|
751 | 783 | unless the --working-dir flag is given. |
|
752 | 784 | |
|
753 | 785 | Directly updates the dirstate for the affected files. |
|
754 | 786 | """ |
|
755 | 787 | for path, data in pycompat.iteritems(filedata): |
|
756 | 788 | fctx = ctx[path] |
|
757 | 789 | fctx.write(data, fctx.flags()) |
|
758 | 790 | |
|
759 | 791 | oldp1 = repo.dirstate.p1() |
|
760 | 792 | newp1 = replacements.get(oldp1, oldp1) |
|
761 | 793 | if newp1 != oldp1: |
|
762 | 794 | assert repo.dirstate.p2() == nullid |
|
763 | 795 | with repo.dirstate.parentchange(): |
|
764 | 796 | scmutil.movedirstate(repo, repo[newp1]) |
|
765 | 797 | |
|
766 | 798 | |
|
767 | 799 | def replacerev(ui, repo, ctx, filedata, replacements): |
|
768 | 800 | """Commit a new revision like the given one, but with file content changes |
|
769 | 801 | |
|
770 | 802 | "ctx" is the original revision to be replaced by a modified one. |
|
771 | 803 | |
|
772 | 804 | "filedata" is a dict that maps paths to their new file content. All other |
|
773 | 805 | paths will be recreated from the original revision without changes. |
|
774 | 806 | "filedata" may contain paths that didn't exist in the original revision; |
|
775 | 807 | they will be added. |
|
776 | 808 | |
|
777 | 809 | "replacements" is a dict that maps a single node to a single node, and it is |
|
778 | 810 | updated to indicate the original revision is replaced by the newly created |
|
779 | 811 | one. No entry is added if the replacement's node already exists. |
|
780 | 812 | |
|
781 | 813 | The new revision has the same parents as the old one, unless those parents |
|
782 | 814 | have already been replaced, in which case those replacements are the parents |
|
783 | 815 | of this new revision. Thus, if revisions are replaced in topological order, |
|
784 | 816 | there is no need to rebase them into the original topology later. |
|
785 | 817 | """ |
|
786 | 818 | |
|
787 | 819 | p1rev, p2rev = repo.changelog.parentrevs(ctx.rev()) |
|
788 | 820 | p1ctx, p2ctx = repo[p1rev], repo[p2rev] |
|
789 | 821 | newp1node = replacements.get(p1ctx.node(), p1ctx.node()) |
|
790 | 822 | newp2node = replacements.get(p2ctx.node(), p2ctx.node()) |
|
791 | 823 | |
|
792 | 824 | # We don't want to create a revision that has no changes from the original, |
|
793 | 825 | # but we should if the original revision's parent has been replaced. |
|
794 | 826 | # Otherwise, we would produce an orphan that needs no actual human |
|
795 | 827 | # intervention to evolve. We can't rely on commit() to avoid creating the |
|
796 | 828 | # un-needed revision because the extra field added below produces a new hash |
|
797 | 829 | # regardless of file content changes. |
|
798 | 830 | if ( |
|
799 | 831 | not filedata |
|
800 | 832 | and p1ctx.node() not in replacements |
|
801 | 833 | and p2ctx.node() not in replacements |
|
802 | 834 | ): |
|
803 | 835 | return |
|
804 | 836 | |
|
805 | 837 | extra = ctx.extra().copy() |
|
806 | 838 | extra[b'fix_source'] = ctx.hex() |
|
807 | 839 | |
|
808 | 840 | wctx = context.overlayworkingctx(repo) |
|
809 | 841 | wctx.setbase(repo[newp1node]) |
|
810 | 842 | merge.revert_to(ctx, wc=wctx) |
|
811 | 843 | copies.graftcopies(wctx, ctx, ctx.p1()) |
|
812 | 844 | |
|
813 | 845 | for path in filedata.keys(): |
|
814 | 846 | fctx = ctx[path] |
|
815 | 847 | copysource = fctx.copysource() |
|
816 | 848 | wctx.write(path, filedata[path], flags=fctx.flags()) |
|
817 | 849 | if copysource: |
|
818 | 850 | wctx.markcopied(path, copysource) |
|
819 | 851 | |
|
820 | 852 | desc = rewriteutil.update_hash_refs( |
|
821 | 853 | repo, |
|
822 | 854 | ctx.description(), |
|
823 | 855 | {oldnode: [newnode] for oldnode, newnode in replacements.items()}, |
|
824 | 856 | ) |
|
825 | 857 | |
|
826 | 858 | memctx = wctx.tomemctx( |
|
827 | 859 | text=desc, |
|
828 | 860 | branch=ctx.branch(), |
|
829 | 861 | extra=extra, |
|
830 | 862 | date=ctx.date(), |
|
831 | 863 | parents=(newp1node, newp2node), |
|
832 | 864 | user=ctx.user(), |
|
833 | 865 | ) |
|
834 | 866 | |
|
835 | 867 | sucnode = memctx.commit() |
|
836 | 868 | prenode = ctx.node() |
|
837 | 869 | if prenode == sucnode: |
|
838 | 870 | ui.debug(b'node %s already existed\n' % (ctx.hex())) |
|
839 | 871 | else: |
|
840 | 872 | replacements[ctx.node()] = sucnode |
|
841 | 873 | |
|
842 | 874 | |
|
843 | 875 | def getfixers(ui): |
|
844 | 876 | """Returns a map of configured fixer tools indexed by their names |
|
845 | 877 | |
|
846 | 878 | Each value is a Fixer object with methods that implement the behavior of the |
|
847 | 879 | fixer's config suboptions. Does not validate the config values. |
|
848 | 880 | """ |
|
849 | 881 | fixers = {} |
|
850 | 882 | for name in fixernames(ui): |
|
851 | 883 | enabled = ui.configbool(b'fix', name + b':enabled') |
|
852 | 884 | command = ui.config(b'fix', name + b':command') |
|
853 | 885 | pattern = ui.config(b'fix', name + b':pattern') |
|
854 | 886 | linerange = ui.config(b'fix', name + b':linerange') |
|
855 | 887 | priority = ui.configint(b'fix', name + b':priority') |
|
856 | 888 | metadata = ui.configbool(b'fix', name + b':metadata') |
|
857 | 889 | skipclean = ui.configbool(b'fix', name + b':skipclean') |
|
858 | 890 | # Don't use a fixer if it has no pattern configured. It would be |
|
859 | 891 | # dangerous to let it affect all files. It would be pointless to let it |
|
860 | 892 | # affect no files. There is no reasonable subset of files to use as the |
|
861 | 893 | # default. |
|
862 | 894 | if command is None: |
|
863 | 895 | ui.warn( |
|
864 | 896 | _(b'fixer tool has no command configuration: %s\n') % (name,) |
|
865 | 897 | ) |
|
866 | 898 | elif pattern is None: |
|
867 | 899 | ui.warn( |
|
868 | 900 | _(b'fixer tool has no pattern configuration: %s\n') % (name,) |
|
869 | 901 | ) |
|
870 | 902 | elif not enabled: |
|
871 | 903 | ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,)) |
|
872 | 904 | else: |
|
873 | 905 | fixers[name] = Fixer( |
|
874 | 906 | command, pattern, linerange, priority, metadata, skipclean |
|
875 | 907 | ) |
|
876 | 908 | return collections.OrderedDict( |
|
877 | 909 | sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True) |
|
878 | 910 | ) |
|
879 | 911 | |
|
880 | 912 | |
|
881 | 913 | def fixernames(ui): |
|
882 | 914 | """Returns the names of [fix] config options that have suboptions""" |
|
883 | 915 | names = set() |
|
884 | 916 | for k, v in ui.configitems(b'fix'): |
|
885 | 917 | if b':' in k: |
|
886 | 918 | names.add(k.split(b':', 1)[0]) |
|
887 | 919 | return names |
|
888 | 920 | |
|
889 | 921 | |
|
890 | 922 | class Fixer(object): |
|
891 | 923 | """Wraps the raw config values for a fixer with methods""" |
|
892 | 924 | |
|
893 | 925 | def __init__( |
|
894 | 926 | self, command, pattern, linerange, priority, metadata, skipclean |
|
895 | 927 | ): |
|
896 | 928 | self._command = command |
|
897 | 929 | self._pattern = pattern |
|
898 | 930 | self._linerange = linerange |
|
899 | 931 | self._priority = priority |
|
900 | 932 | self._metadata = metadata |
|
901 | 933 | self._skipclean = skipclean |
|
902 | 934 | |
|
903 | 935 | def affects(self, opts, fixctx, path): |
|
904 | 936 | """Should this fixer run on the file at the given path and context?""" |
|
905 | 937 | repo = fixctx.repo() |
|
906 | 938 | matcher = matchmod.match( |
|
907 | 939 | repo.root, repo.root, [self._pattern], ctx=fixctx |
|
908 | 940 | ) |
|
909 | 941 | return matcher(path) |
|
910 | 942 | |
|
911 | 943 | def shouldoutputmetadata(self): |
|
912 | 944 | """Should the stdout of this fixer start with JSON and a null byte?""" |
|
913 | 945 | return self._metadata |
|
914 | 946 | |
|
915 | 947 | def command(self, ui, path, ranges): |
|
916 | 948 | """A shell command to use to invoke this fixer on the given file/lines |
|
917 | 949 | |
|
918 | 950 | May return None if there is no appropriate command to run for the given |
|
919 | 951 | parameters. |
|
920 | 952 | """ |
|
921 | 953 | expand = cmdutil.rendercommandtemplate |
|
922 | 954 | parts = [ |
|
923 | 955 | expand( |
|
924 | 956 | ui, |
|
925 | 957 | self._command, |
|
926 | 958 | {b'rootpath': path, b'basename': os.path.basename(path)}, |
|
927 | 959 | ) |
|
928 | 960 | ] |
|
929 | 961 | if self._linerange: |
|
930 | 962 | if self._skipclean and not ranges: |
|
931 | 963 | # No line ranges to fix, so don't run the fixer. |
|
932 | 964 | return None |
|
933 | 965 | for first, last in ranges: |
|
934 | 966 | parts.append( |
|
935 | 967 | expand( |
|
936 | 968 | ui, self._linerange, {b'first': first, b'last': last} |
|
937 | 969 | ) |
|
938 | 970 | ) |
|
939 | 971 | return b' '.join(parts) |
@@ -1,1001 +1,1005 b'' | |||
|
1 | 1 | # __init__.py - fsmonitor initialization and overrides |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013-2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | Integrates the file-watching program Watchman with Mercurial to produce faster |
|
11 | 11 | status results. |
|
12 | 12 | |
|
13 | 13 | On a particular Linux system, for a real-world repository with over 400,000 |
|
14 | 14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same |
|
15 | 15 | system, with fsmonitor it takes about 0.3 seconds. |
|
16 | 16 | |
|
17 | 17 | fsmonitor requires no configuration -- it will tell Watchman about your |
|
18 | 18 | repository as necessary. You'll need to install Watchman from |
|
19 | 19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. |
|
20 | 20 | |
|
21 | 21 | fsmonitor is incompatible with the largefiles and eol extensions, and |
|
22 | 22 | will disable itself if any of those are active. |
|
23 | 23 | |
|
24 | 24 | The following configuration options exist: |
|
25 | 25 | |
|
26 | 26 | :: |
|
27 | 27 | |
|
28 | 28 | [fsmonitor] |
|
29 | 29 | mode = {off, on, paranoid} |
|
30 | 30 | |
|
31 | 31 | When `mode = off`, fsmonitor will disable itself (similar to not loading the |
|
32 | 32 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). |
|
33 | 33 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, |
|
34 | 34 | and ensure that the results are consistent. |
|
35 | 35 | |
|
36 | 36 | :: |
|
37 | 37 | |
|
38 | 38 | [fsmonitor] |
|
39 | 39 | timeout = (float) |
|
40 | 40 | |
|
41 | 41 | A value, in seconds, that determines how long fsmonitor will wait for Watchman |
|
42 | 42 | to return results. Defaults to `2.0`. |
|
43 | 43 | |
|
44 | 44 | :: |
|
45 | 45 | |
|
46 | 46 | [fsmonitor] |
|
47 | 47 | blacklistusers = (list of userids) |
|
48 | 48 | |
|
49 | 49 | A list of usernames for which fsmonitor will disable itself altogether. |
|
50 | 50 | |
|
51 | 51 | :: |
|
52 | 52 | |
|
53 | 53 | [fsmonitor] |
|
54 | 54 | walk_on_invalidate = (boolean) |
|
55 | 55 | |
|
56 | 56 | Whether or not to walk the whole repo ourselves when our cached state has been |
|
57 | 57 | invalidated, for example when Watchman has been restarted or .hgignore rules |
|
58 | 58 | have been changed. Walking the repo in that case can result in competing for |
|
59 | 59 | I/O with Watchman. For large repos it is recommended to set this value to |
|
60 | 60 | false. You may wish to set this to true if you have a very fast filesystem |
|
61 | 61 | that can outpace the IPC overhead of getting the result data for the full repo |
|
62 | 62 | from Watchman. Defaults to false. |
|
63 | 63 | |
|
64 | 64 | :: |
|
65 | 65 | |
|
66 | 66 | [fsmonitor] |
|
67 | 67 | warn_when_unused = (boolean) |
|
68 | 68 | |
|
69 | 69 | Whether to print a warning during certain operations when fsmonitor would be |
|
70 | 70 | beneficial to performance but isn't enabled. |
|
71 | 71 | |
|
72 | 72 | :: |
|
73 | 73 | |
|
74 | 74 | [fsmonitor] |
|
75 | 75 | warn_update_file_count = (integer) |
|
76 | 76 | # or when mercurial is built with rust support |
|
77 | 77 | warn_update_file_count_rust = (integer) |
|
78 | 78 | |
|
79 | 79 | If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will |
|
80 | 80 | be printed during working directory updates if this many files will be |
|
81 | 81 | created. |
|
82 | 82 | ''' |
|
83 | 83 | |
|
84 | 84 | # Platforms Supported |
|
85 | 85 | # =================== |
|
86 | 86 | # |
|
87 | 87 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, |
|
88 | 88 | # even under severe loads. |
|
89 | 89 | # |
|
90 | 90 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor |
|
91 | 91 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of |
|
92 | 92 | # user testing under normal loads. |
|
93 | 93 | # |
|
94 | 94 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but |
|
95 | 95 | # very little testing has been done. |
|
96 | 96 | # |
|
97 | 97 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. |
|
98 | 98 | # |
|
99 | 99 | # Known Issues |
|
100 | 100 | # ============ |
|
101 | 101 | # |
|
102 | 102 | # * fsmonitor will disable itself if any of the following extensions are |
|
103 | 103 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. |
|
104 | 104 | # * fsmonitor will produce incorrect results if nested repos that are not |
|
105 | 105 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. |
|
106 | 106 | # |
|
107 | 107 | # The issues related to nested repos and subrepos are probably not fundamental |
|
108 | 108 | # ones. Patches to fix them are welcome. |
|
109 | 109 | |
|
110 | 110 | from __future__ import absolute_import |
|
111 | 111 | |
|
112 | 112 | import codecs |
|
113 | 113 | import os |
|
114 | 114 | import stat |
|
115 | 115 | import sys |
|
116 | 116 | import tempfile |
|
117 | 117 | import weakref |
|
118 | 118 | |
|
119 | 119 | from mercurial.i18n import _ |
|
120 | 120 | from mercurial.node import hex |
|
121 | 121 | from mercurial.pycompat import open |
|
122 | 122 | from mercurial import ( |
|
123 | 123 | context, |
|
124 | 124 | encoding, |
|
125 | 125 | error, |
|
126 | 126 | extensions, |
|
127 | 127 | localrepo, |
|
128 | 128 | merge, |
|
129 | 129 | pathutil, |
|
130 | 130 | pycompat, |
|
131 | 131 | registrar, |
|
132 | 132 | scmutil, |
|
133 | 133 | util, |
|
134 | 134 | ) |
|
135 | 135 | from mercurial import match as matchmod |
|
136 | 136 | from mercurial.utils import ( |
|
137 | 137 | hashutil, |
|
138 | 138 | stringutil, |
|
139 | 139 | ) |
|
140 | 140 | |
|
141 | 141 | from . import ( |
|
142 | 142 | pywatchman, |
|
143 | 143 | state, |
|
144 | 144 | watchmanclient, |
|
145 | 145 | ) |
|
146 | 146 | |
|
147 | 147 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
148 | 148 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
149 | 149 | # be specifying the version(s) of Mercurial they are tested with, or |
|
150 | 150 | # leave the attribute unspecified. |
|
151 | 151 | testedwith = b'ships-with-hg-core' |
|
152 | 152 | |
|
153 | 153 | configtable = {} |
|
154 | 154 | configitem = registrar.configitem(configtable) |
|
155 | 155 | |
|
156 | 156 | configitem( |
|
157 | 157 | b'fsmonitor', |
|
158 | 158 | b'mode', |
|
159 | 159 | default=b'on', |
|
160 | 160 | ) |
|
161 | 161 | configitem( |
|
162 | 162 | b'fsmonitor', |
|
163 | 163 | b'walk_on_invalidate', |
|
164 | 164 | default=False, |
|
165 | 165 | ) |
|
166 | 166 | configitem( |
|
167 | 167 | b'fsmonitor', |
|
168 | 168 | b'timeout', |
|
169 | 169 | default=b'2', |
|
170 | 170 | ) |
|
171 | 171 | configitem( |
|
172 | 172 | b'fsmonitor', |
|
173 | 173 | b'blacklistusers', |
|
174 | 174 | default=list, |
|
175 | 175 | ) |
|
176 | 176 | configitem( |
|
177 | 177 | b'fsmonitor', |
|
178 | 178 | b'watchman_exe', |
|
179 | 179 | default=b'watchman', |
|
180 | 180 | ) |
|
181 | 181 | configitem( |
|
182 | 182 | b'fsmonitor', |
|
183 | 183 | b'verbose', |
|
184 | 184 | default=True, |
|
185 | 185 | experimental=True, |
|
186 | 186 | ) |
|
187 | 187 | configitem( |
|
188 | 188 | b'experimental', |
|
189 | 189 | b'fsmonitor.transaction_notify', |
|
190 | 190 | default=False, |
|
191 | 191 | ) |
|
192 | 192 | |
|
193 | 193 | # This extension is incompatible with the following blacklisted extensions |
|
194 | 194 | # and will disable itself when encountering one of these: |
|
195 | 195 | _blacklist = [b'largefiles', b'eol'] |
|
196 | 196 | |
|
197 | 197 | |
|
198 | 198 | def debuginstall(ui, fm): |
|
199 | 199 | fm.write( |
|
200 | 200 | b"fsmonitor-watchman", |
|
201 | 201 | _(b"fsmonitor checking for watchman binary... (%s)\n"), |
|
202 | 202 | ui.configpath(b"fsmonitor", b"watchman_exe"), |
|
203 | 203 | ) |
|
204 | 204 | root = tempfile.mkdtemp() |
|
205 | 205 | c = watchmanclient.client(ui, root) |
|
206 | 206 | err = None |
|
207 | 207 | try: |
|
208 | 208 | v = c.command(b"version") |
|
209 | 209 | fm.write( |
|
210 | 210 | b"fsmonitor-watchman-version", |
|
211 | 211 | _(b" watchman binary version %s\n"), |
|
212 | 212 | pycompat.bytestr(v["version"]), |
|
213 | 213 | ) |
|
214 | 214 | except watchmanclient.Unavailable as e: |
|
215 | 215 | err = stringutil.forcebytestr(e) |
|
216 | 216 | fm.condwrite( |
|
217 | 217 | err, |
|
218 | 218 | b"fsmonitor-watchman-error", |
|
219 | 219 | _(b" watchman binary missing or broken: %s\n"), |
|
220 | 220 | err, |
|
221 | 221 | ) |
|
222 | 222 | return 1 if err else 0 |
|
223 | 223 | |
|
224 | 224 | |
|
225 | 225 | def _handleunavailable(ui, state, ex): |
|
226 | 226 | """Exception handler for Watchman interaction exceptions""" |
|
227 | 227 | if isinstance(ex, watchmanclient.Unavailable): |
|
228 | 228 | # experimental config: fsmonitor.verbose |
|
229 | 229 | if ex.warn and ui.configbool(b'fsmonitor', b'verbose'): |
|
230 | 230 | if b'illegal_fstypes' not in stringutil.forcebytestr(ex): |
|
231 | 231 | ui.warn(stringutil.forcebytestr(ex) + b'\n') |
|
232 | 232 | if ex.invalidate: |
|
233 | 233 | state.invalidate() |
|
234 | 234 | # experimental config: fsmonitor.verbose |
|
235 | 235 | if ui.configbool(b'fsmonitor', b'verbose'): |
|
236 | 236 | ui.log( |
|
237 | 237 | b'fsmonitor', |
|
238 | 238 | b'Watchman unavailable: %s\n', |
|
239 | 239 | stringutil.forcebytestr(ex.msg), |
|
240 | 240 | ) |
|
241 | 241 | else: |
|
242 | 242 | ui.log( |
|
243 | 243 | b'fsmonitor', |
|
244 | 244 | b'Watchman exception: %s\n', |
|
245 | 245 | stringutil.forcebytestr(ex), |
|
246 | 246 | ) |
|
247 | 247 | |
|
248 | 248 | |
|
249 | 249 | def _hashignore(ignore): |
|
250 | 250 | """Calculate hash for ignore patterns and filenames |
|
251 | 251 | |
|
252 | 252 | If this information changes between Mercurial invocations, we can't |
|
253 | 253 | rely on Watchman information anymore and have to re-scan the working |
|
254 | 254 | copy. |
|
255 | 255 | |
|
256 | 256 | """ |
|
257 | 257 | sha1 = hashutil.sha1() |
|
258 | 258 | sha1.update(pycompat.byterepr(ignore)) |
|
259 | 259 | return pycompat.sysbytes(sha1.hexdigest()) |
|
260 | 260 | |
|
261 | 261 | |
|
262 | 262 | _watchmanencoding = pywatchman.encoding.get_local_encoding() |
|
263 | 263 | _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() |
|
264 | 264 | _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding) |
|
265 | 265 | |
|
266 | 266 | |
|
267 | 267 | def _watchmantofsencoding(path): |
|
268 | 268 | """Fix path to match watchman and local filesystem encoding |
|
269 | 269 | |
|
270 | 270 | watchman's paths encoding can differ from filesystem encoding. For example, |
|
271 | 271 | on Windows, it's always utf-8. |
|
272 | 272 | """ |
|
273 | 273 | try: |
|
274 | 274 | decoded = path.decode(_watchmanencoding) |
|
275 | 275 | except UnicodeDecodeError as e: |
|
276 | 276 | raise error.Abort( |
|
277 | 277 | stringutil.forcebytestr(e), hint=b'watchman encoding error' |
|
278 | 278 | ) |
|
279 | 279 | |
|
280 | 280 | try: |
|
281 | 281 | encoded = decoded.encode(_fsencoding, 'strict') |
|
282 | 282 | except UnicodeEncodeError as e: |
|
283 | 283 | raise error.Abort(stringutil.forcebytestr(e)) |
|
284 | 284 | |
|
285 | 285 | return encoded |
|
286 | 286 | |
|
287 | 287 | |
|
288 | 288 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
289 | 289 | """Replacement for dirstate.walk, hooking into Watchman. |
|
290 | 290 | |
|
291 | 291 | Whenever full is False, ignored is False, and the Watchman client is |
|
292 | 292 | available, use Watchman combined with saved state to possibly return only a |
|
293 | 293 | subset of files.""" |
|
294 | 294 | |
|
295 | 295 | def bail(reason): |
|
296 | 296 | self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason) |
|
297 | 297 | return orig(match, subrepos, unknown, ignored, full=True) |
|
298 | 298 | |
|
299 | 299 | if full: |
|
300 | 300 | return bail(b'full rewalk requested') |
|
301 | 301 | if ignored: |
|
302 | 302 | return bail(b'listing ignored files') |
|
303 | 303 | if not self._watchmanclient.available(): |
|
304 | 304 | return bail(b'client unavailable') |
|
305 | 305 | state = self._fsmonitorstate |
|
306 | 306 | clock, ignorehash, notefiles = state.get() |
|
307 | 307 | if not clock: |
|
308 | 308 | if state.walk_on_invalidate: |
|
309 | 309 | return bail(b'no clock') |
|
310 | 310 | # Initial NULL clock value, see |
|
311 | 311 | # https://facebook.github.io/watchman/docs/clockspec.html |
|
312 | 312 | clock = b'c:0:0' |
|
313 | 313 | notefiles = [] |
|
314 | 314 | |
|
315 | 315 | ignore = self._ignore |
|
316 | 316 | dirignore = self._dirignore |
|
317 | 317 | if unknown: |
|
318 | 318 | if _hashignore(ignore) != ignorehash and clock != b'c:0:0': |
|
319 | 319 | # ignore list changed -- can't rely on Watchman state any more |
|
320 | 320 | if state.walk_on_invalidate: |
|
321 | 321 | return bail(b'ignore rules changed') |
|
322 | 322 | notefiles = [] |
|
323 | 323 | clock = b'c:0:0' |
|
324 | 324 | else: |
|
325 | 325 | # always ignore |
|
326 | 326 | ignore = util.always |
|
327 | 327 | dirignore = util.always |
|
328 | 328 | |
|
329 | 329 | matchfn = match.matchfn |
|
330 | 330 | matchalways = match.always() |
|
331 | 331 | dmap = self._map |
|
332 | 332 | if util.safehasattr(dmap, b'_map'): |
|
333 | 333 | # for better performance, directly access the inner dirstate map if the |
|
334 | 334 | # standard dirstate implementation is in use. |
|
335 | 335 | dmap = dmap._map |
|
336 |
nonnormalset = |
|
|
336 | nonnormalset = { | |
|
337 | f | |
|
338 | for f, e in self._map.items() | |
|
339 | if e.v1_state() != "n" or e.v1_mtime() == -1 | |
|
340 | } | |
|
337 | 341 | |
|
338 | 342 | copymap = self._map.copymap |
|
339 | 343 | getkind = stat.S_IFMT |
|
340 | 344 | dirkind = stat.S_IFDIR |
|
341 | 345 | regkind = stat.S_IFREG |
|
342 | 346 | lnkkind = stat.S_IFLNK |
|
343 | 347 | join = self._join |
|
344 | 348 | normcase = util.normcase |
|
345 | 349 | fresh_instance = False |
|
346 | 350 | |
|
347 | 351 | exact = skipstep3 = False |
|
348 | 352 | if match.isexact(): # match.exact |
|
349 | 353 | exact = True |
|
350 | 354 | dirignore = util.always # skip step 2 |
|
351 | 355 | elif match.prefix(): # match.match, no patterns |
|
352 | 356 | skipstep3 = True |
|
353 | 357 | |
|
354 | 358 | if not exact and self._checkcase: |
|
355 | 359 | # note that even though we could receive directory entries, we're only |
|
356 | 360 | # interested in checking if a file with the same name exists. So only |
|
357 | 361 | # normalize files if possible. |
|
358 | 362 | normalize = self._normalizefile |
|
359 | 363 | skipstep3 = False |
|
360 | 364 | else: |
|
361 | 365 | normalize = None |
|
362 | 366 | |
|
363 | 367 | # step 1: find all explicit files |
|
364 | 368 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
365 | 369 | |
|
366 | 370 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
367 | 371 | work = [d for d in work if not dirignore(d[0])] |
|
368 | 372 | |
|
369 | 373 | if not work and (exact or skipstep3): |
|
370 | 374 | for s in subrepos: |
|
371 | 375 | del results[s] |
|
372 | 376 | del results[b'.hg'] |
|
373 | 377 | return results |
|
374 | 378 | |
|
375 | 379 | # step 2: query Watchman |
|
376 | 380 | try: |
|
377 | 381 | # Use the user-configured timeout for the query. |
|
378 | 382 | # Add a little slack over the top of the user query to allow for |
|
379 | 383 | # overheads while transferring the data |
|
380 | 384 | self._watchmanclient.settimeout(state.timeout + 0.1) |
|
381 | 385 | result = self._watchmanclient.command( |
|
382 | 386 | b'query', |
|
383 | 387 | { |
|
384 | 388 | b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'], |
|
385 | 389 | b'since': clock, |
|
386 | 390 | b'expression': [ |
|
387 | 391 | b'not', |
|
388 | 392 | [ |
|
389 | 393 | b'anyof', |
|
390 | 394 | [b'dirname', b'.hg'], |
|
391 | 395 | [b'name', b'.hg', b'wholename'], |
|
392 | 396 | ], |
|
393 | 397 | ], |
|
394 | 398 | b'sync_timeout': int(state.timeout * 1000), |
|
395 | 399 | b'empty_on_fresh_instance': state.walk_on_invalidate, |
|
396 | 400 | }, |
|
397 | 401 | ) |
|
398 | 402 | except Exception as ex: |
|
399 | 403 | _handleunavailable(self._ui, state, ex) |
|
400 | 404 | self._watchmanclient.clearconnection() |
|
401 | 405 | return bail(b'exception during run') |
|
402 | 406 | else: |
|
403 | 407 | # We need to propagate the last observed clock up so that we |
|
404 | 408 | # can use it for our next query |
|
405 | 409 | state.setlastclock(pycompat.sysbytes(result[b'clock'])) |
|
406 | 410 | if result[b'is_fresh_instance']: |
|
407 | 411 | if state.walk_on_invalidate: |
|
408 | 412 | state.invalidate() |
|
409 | 413 | return bail(b'fresh instance') |
|
410 | 414 | fresh_instance = True |
|
411 | 415 | # Ignore any prior noteable files from the state info |
|
412 | 416 | notefiles = [] |
|
413 | 417 | |
|
414 | 418 | # for file paths which require normalization and we encounter a case |
|
415 | 419 | # collision, we store our own foldmap |
|
416 | 420 | if normalize: |
|
417 | 421 | foldmap = {normcase(k): k for k in results} |
|
418 | 422 | |
|
419 | 423 | switch_slashes = pycompat.ossep == b'\\' |
|
420 | 424 | # The order of the results is, strictly speaking, undefined. |
|
421 | 425 | # For case changes on a case insensitive filesystem we may receive |
|
422 | 426 | # two entries, one with exists=True and another with exists=False. |
|
423 | 427 | # The exists=True entries in the same response should be interpreted |
|
424 | 428 | # as being happens-after the exists=False entries due to the way that |
|
425 | 429 | # Watchman tracks files. We use this property to reconcile deletes |
|
426 | 430 | # for name case changes. |
|
427 | 431 | for entry in result[b'files']: |
|
428 | 432 | fname = entry[b'name'] |
|
429 | 433 | |
|
430 | 434 | # Watchman always give us a str. Normalize to bytes on Python 3 |
|
431 | 435 | # using Watchman's encoding, if needed. |
|
432 | 436 | if not isinstance(fname, bytes): |
|
433 | 437 | fname = fname.encode(_watchmanencoding) |
|
434 | 438 | |
|
435 | 439 | if _fixencoding: |
|
436 | 440 | fname = _watchmantofsencoding(fname) |
|
437 | 441 | |
|
438 | 442 | if switch_slashes: |
|
439 | 443 | fname = fname.replace(b'\\', b'/') |
|
440 | 444 | if normalize: |
|
441 | 445 | normed = normcase(fname) |
|
442 | 446 | fname = normalize(fname, True, True) |
|
443 | 447 | foldmap[normed] = fname |
|
444 | 448 | fmode = entry[b'mode'] |
|
445 | 449 | fexists = entry[b'exists'] |
|
446 | 450 | kind = getkind(fmode) |
|
447 | 451 | |
|
448 | 452 | if b'/.hg/' in fname or fname.endswith(b'/.hg'): |
|
449 | 453 | return bail(b'nested-repo-detected') |
|
450 | 454 | |
|
451 | 455 | if not fexists: |
|
452 | 456 | # if marked as deleted and we don't already have a change |
|
453 | 457 | # record, mark it as deleted. If we already have an entry |
|
454 | 458 | # for fname then it was either part of walkexplicit or was |
|
455 | 459 | # an earlier result that was a case change |
|
456 | 460 | if ( |
|
457 | 461 | fname not in results |
|
458 | 462 | and fname in dmap |
|
459 | 463 | and (matchalways or matchfn(fname)) |
|
460 | 464 | ): |
|
461 | 465 | results[fname] = None |
|
462 | 466 | elif kind == dirkind: |
|
463 | 467 | if fname in dmap and (matchalways or matchfn(fname)): |
|
464 | 468 | results[fname] = None |
|
465 | 469 | elif kind == regkind or kind == lnkkind: |
|
466 | 470 | if fname in dmap: |
|
467 | 471 | if matchalways or matchfn(fname): |
|
468 | 472 | results[fname] = entry |
|
469 | 473 | elif (matchalways or matchfn(fname)) and not ignore(fname): |
|
470 | 474 | results[fname] = entry |
|
471 | 475 | elif fname in dmap and (matchalways or matchfn(fname)): |
|
472 | 476 | results[fname] = None |
|
473 | 477 | |
|
474 | 478 | # step 3: query notable files we don't already know about |
|
475 | 479 | # XXX try not to iterate over the entire dmap |
|
476 | 480 | if normalize: |
|
477 | 481 | # any notable files that have changed case will already be handled |
|
478 | 482 | # above, so just check membership in the foldmap |
|
479 | 483 | notefiles = { |
|
480 | 484 | normalize(f, True, True) |
|
481 | 485 | for f in notefiles |
|
482 | 486 | if normcase(f) not in foldmap |
|
483 | 487 | } |
|
484 | 488 | visit = { |
|
485 | 489 | f |
|
486 | 490 | for f in notefiles |
|
487 | 491 | if (f not in results and matchfn(f) and (f in dmap or not ignore(f))) |
|
488 | 492 | } |
|
489 | 493 | |
|
490 | 494 | if not fresh_instance: |
|
491 | 495 | if matchalways: |
|
492 | 496 | visit.update(f for f in nonnormalset if f not in results) |
|
493 | 497 | visit.update(f for f in copymap if f not in results) |
|
494 | 498 | else: |
|
495 | 499 | visit.update( |
|
496 | 500 | f for f in nonnormalset if f not in results and matchfn(f) |
|
497 | 501 | ) |
|
498 | 502 | visit.update(f for f in copymap if f not in results and matchfn(f)) |
|
499 | 503 | else: |
|
500 | 504 | if matchalways: |
|
501 | 505 | visit.update( |
|
502 | 506 | f for f, st in pycompat.iteritems(dmap) if f not in results |
|
503 | 507 | ) |
|
504 | 508 | visit.update(f for f in copymap if f not in results) |
|
505 | 509 | else: |
|
506 | 510 | visit.update( |
|
507 | 511 | f |
|
508 | 512 | for f, st in pycompat.iteritems(dmap) |
|
509 | 513 | if f not in results and matchfn(f) |
|
510 | 514 | ) |
|
511 | 515 | visit.update(f for f in copymap if f not in results and matchfn(f)) |
|
512 | 516 | |
|
513 | 517 | audit = pathutil.pathauditor(self._root, cached=True).check |
|
514 | 518 | auditpass = [f for f in visit if audit(f)] |
|
515 | 519 | auditpass.sort() |
|
516 | 520 | auditfail = visit.difference(auditpass) |
|
517 | 521 | for f in auditfail: |
|
518 | 522 | results[f] = None |
|
519 | 523 | |
|
520 | 524 | nf = iter(auditpass) |
|
521 | 525 | for st in util.statfiles([join(f) for f in auditpass]): |
|
522 | 526 | f = next(nf) |
|
523 | 527 | if st or f in dmap: |
|
524 | 528 | results[f] = st |
|
525 | 529 | |
|
526 | 530 | for s in subrepos: |
|
527 | 531 | del results[s] |
|
528 | 532 | del results[b'.hg'] |
|
529 | 533 | return results |
|
530 | 534 | |
|
531 | 535 | |
|
532 | 536 | def overridestatus( |
|
533 | 537 | orig, |
|
534 | 538 | self, |
|
535 | 539 | node1=b'.', |
|
536 | 540 | node2=None, |
|
537 | 541 | match=None, |
|
538 | 542 | ignored=False, |
|
539 | 543 | clean=False, |
|
540 | 544 | unknown=False, |
|
541 | 545 | listsubrepos=False, |
|
542 | 546 | ): |
|
543 | 547 | listignored = ignored |
|
544 | 548 | listclean = clean |
|
545 | 549 | listunknown = unknown |
|
546 | 550 | |
|
547 | 551 | def _cmpsets(l1, l2): |
|
548 | 552 | try: |
|
549 | 553 | if b'FSMONITOR_LOG_FILE' in encoding.environ: |
|
550 | 554 | fn = encoding.environ[b'FSMONITOR_LOG_FILE'] |
|
551 | 555 | f = open(fn, b'wb') |
|
552 | 556 | else: |
|
553 | 557 | fn = b'fsmonitorfail.log' |
|
554 | 558 | f = self.vfs.open(fn, b'wb') |
|
555 | 559 | except (IOError, OSError): |
|
556 | 560 | self.ui.warn(_(b'warning: unable to write to %s\n') % fn) |
|
557 | 561 | return |
|
558 | 562 | |
|
559 | 563 | try: |
|
560 | 564 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
561 | 565 | if set(s1) != set(s2): |
|
562 | 566 | f.write(b'sets at position %d are unequal\n' % i) |
|
563 |
f.write(b'watchman returned: % |
|
|
564 |
f.write(b'stat returned: % |
|
|
567 | f.write(b'watchman returned: %r\n' % s1) | |
|
568 | f.write(b'stat returned: %r\n' % s2) | |
|
565 | 569 | finally: |
|
566 | 570 | f.close() |
|
567 | 571 | |
|
568 | 572 | if isinstance(node1, context.changectx): |
|
569 | 573 | ctx1 = node1 |
|
570 | 574 | else: |
|
571 | 575 | ctx1 = self[node1] |
|
572 | 576 | if isinstance(node2, context.changectx): |
|
573 | 577 | ctx2 = node2 |
|
574 | 578 | else: |
|
575 | 579 | ctx2 = self[node2] |
|
576 | 580 | |
|
577 | 581 | working = ctx2.rev() is None |
|
578 | 582 | parentworking = working and ctx1 == self[b'.'] |
|
579 | 583 | match = match or matchmod.always() |
|
580 | 584 | |
|
581 | 585 | # Maybe we can use this opportunity to update Watchman's state. |
|
582 | 586 | # Mercurial uses workingcommitctx and/or memctx to represent the part of |
|
583 | 587 | # the workingctx that is to be committed. So don't update the state in |
|
584 | 588 | # that case. |
|
585 | 589 | # HG_PENDING is set in the environment when the dirstate is being updated |
|
586 | 590 | # in the middle of a transaction; we must not update our state in that |
|
587 | 591 | # case, or we risk forgetting about changes in the working copy. |
|
588 | 592 | updatestate = ( |
|
589 | 593 | parentworking |
|
590 | 594 | and match.always() |
|
591 | 595 | and not isinstance(ctx2, (context.workingcommitctx, context.memctx)) |
|
592 | 596 | and b'HG_PENDING' not in encoding.environ |
|
593 | 597 | ) |
|
594 | 598 | |
|
595 | 599 | try: |
|
596 | 600 | if self._fsmonitorstate.walk_on_invalidate: |
|
597 | 601 | # Use a short timeout to query the current clock. If that |
|
598 | 602 | # takes too long then we assume that the service will be slow |
|
599 | 603 | # to answer our query. |
|
600 | 604 | # walk_on_invalidate indicates that we prefer to walk the |
|
601 | 605 | # tree ourselves because we can ignore portions that Watchman |
|
602 | 606 | # cannot and we tend to be faster in the warmer buffer cache |
|
603 | 607 | # cases. |
|
604 | 608 | self._watchmanclient.settimeout(0.1) |
|
605 | 609 | else: |
|
606 | 610 | # Give Watchman more time to potentially complete its walk |
|
607 | 611 | # and return the initial clock. In this mode we assume that |
|
608 | 612 | # the filesystem will be slower than parsing a potentially |
|
609 | 613 | # very large Watchman result set. |
|
610 | 614 | self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1) |
|
611 | 615 | startclock = self._watchmanclient.getcurrentclock() |
|
612 | 616 | except Exception as ex: |
|
613 | 617 | self._watchmanclient.clearconnection() |
|
614 | 618 | _handleunavailable(self.ui, self._fsmonitorstate, ex) |
|
615 | 619 | # boo, Watchman failed. bail |
|
616 | 620 | return orig( |
|
617 | 621 | node1, |
|
618 | 622 | node2, |
|
619 | 623 | match, |
|
620 | 624 | listignored, |
|
621 | 625 | listclean, |
|
622 | 626 | listunknown, |
|
623 | 627 | listsubrepos, |
|
624 | 628 | ) |
|
625 | 629 | |
|
626 | 630 | if updatestate: |
|
627 | 631 | # We need info about unknown files. This may make things slower the |
|
628 | 632 | # first time, but whatever. |
|
629 | 633 | stateunknown = True |
|
630 | 634 | else: |
|
631 | 635 | stateunknown = listunknown |
|
632 | 636 | |
|
633 | 637 | if updatestate: |
|
634 | 638 | ps = poststatus(startclock) |
|
635 | 639 | self.addpostdsstatus(ps) |
|
636 | 640 | |
|
637 | 641 | r = orig( |
|
638 | 642 | node1, node2, match, listignored, listclean, stateunknown, listsubrepos |
|
639 | 643 | ) |
|
640 | 644 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
641 | 645 | |
|
642 | 646 | if not listunknown: |
|
643 | 647 | unknown = [] |
|
644 | 648 | |
|
645 | 649 | # don't do paranoid checks if we're not going to query Watchman anyway |
|
646 | 650 | full = listclean or match.traversedir is not None |
|
647 | 651 | if self._fsmonitorstate.mode == b'paranoid' and not full: |
|
648 | 652 | # run status again and fall back to the old walk this time |
|
649 | 653 | self.dirstate._fsmonitordisable = True |
|
650 | 654 | |
|
651 | 655 | # shut the UI up |
|
652 | 656 | quiet = self.ui.quiet |
|
653 | 657 | self.ui.quiet = True |
|
654 | 658 | fout, ferr = self.ui.fout, self.ui.ferr |
|
655 | 659 | self.ui.fout = self.ui.ferr = open(os.devnull, b'wb') |
|
656 | 660 | |
|
657 | 661 | try: |
|
658 | 662 | rv2 = orig( |
|
659 | 663 | node1, |
|
660 | 664 | node2, |
|
661 | 665 | match, |
|
662 | 666 | listignored, |
|
663 | 667 | listclean, |
|
664 | 668 | listunknown, |
|
665 | 669 | listsubrepos, |
|
666 | 670 | ) |
|
667 | 671 | finally: |
|
668 | 672 | self.dirstate._fsmonitordisable = False |
|
669 | 673 | self.ui.quiet = quiet |
|
670 | 674 | self.ui.fout, self.ui.ferr = fout, ferr |
|
671 | 675 | |
|
672 | 676 | # clean isn't tested since it's set to True above |
|
673 | 677 | with self.wlock(): |
|
674 | 678 | _cmpsets( |
|
675 | 679 | [modified, added, removed, deleted, unknown, ignored, clean], |
|
676 | 680 | rv2, |
|
677 | 681 | ) |
|
678 | 682 | modified, added, removed, deleted, unknown, ignored, clean = rv2 |
|
679 | 683 | |
|
680 | 684 | return scmutil.status( |
|
681 | 685 | modified, added, removed, deleted, unknown, ignored, clean |
|
682 | 686 | ) |
|
683 | 687 | |
|
684 | 688 | |
|
685 | 689 | class poststatus(object): |
|
686 | 690 | def __init__(self, startclock): |
|
687 | 691 | self._startclock = pycompat.sysbytes(startclock) |
|
688 | 692 | |
|
689 | 693 | def __call__(self, wctx, status): |
|
690 | 694 | clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock |
|
691 | 695 | hashignore = _hashignore(wctx.repo().dirstate._ignore) |
|
692 | 696 | notefiles = ( |
|
693 | 697 | status.modified |
|
694 | 698 | + status.added |
|
695 | 699 | + status.removed |
|
696 | 700 | + status.deleted |
|
697 | 701 | + status.unknown |
|
698 | 702 | ) |
|
699 | 703 | wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles) |
|
700 | 704 | |
|
701 | 705 | |
|
702 | 706 | def makedirstate(repo, dirstate): |
|
703 | 707 | class fsmonitordirstate(dirstate.__class__): |
|
704 | 708 | def _fsmonitorinit(self, repo): |
|
705 | 709 | # _fsmonitordisable is used in paranoid mode |
|
706 | 710 | self._fsmonitordisable = False |
|
707 | 711 | self._fsmonitorstate = repo._fsmonitorstate |
|
708 | 712 | self._watchmanclient = repo._watchmanclient |
|
709 | 713 | self._repo = weakref.proxy(repo) |
|
710 | 714 | |
|
711 | 715 | def walk(self, *args, **kwargs): |
|
712 | 716 | orig = super(fsmonitordirstate, self).walk |
|
713 | 717 | if self._fsmonitordisable: |
|
714 | 718 | return orig(*args, **kwargs) |
|
715 | 719 | return overridewalk(orig, self, *args, **kwargs) |
|
716 | 720 | |
|
717 | 721 | def rebuild(self, *args, **kwargs): |
|
718 | 722 | self._fsmonitorstate.invalidate() |
|
719 | 723 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) |
|
720 | 724 | |
|
721 | 725 | def invalidate(self, *args, **kwargs): |
|
722 | 726 | self._fsmonitorstate.invalidate() |
|
723 | 727 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) |
|
724 | 728 | |
|
725 | 729 | dirstate.__class__ = fsmonitordirstate |
|
726 | 730 | dirstate._fsmonitorinit(repo) |
|
727 | 731 | |
|
728 | 732 | |
|
729 | 733 | def wrapdirstate(orig, self): |
|
730 | 734 | ds = orig(self) |
|
731 | 735 | # only override the dirstate when Watchman is available for the repo |
|
732 | 736 | if util.safehasattr(self, b'_fsmonitorstate'): |
|
733 | 737 | makedirstate(self, ds) |
|
734 | 738 | return ds |
|
735 | 739 | |
|
736 | 740 | |
|
737 | 741 | def extsetup(ui): |
|
738 | 742 | extensions.wrapfilecache( |
|
739 | 743 | localrepo.localrepository, b'dirstate', wrapdirstate |
|
740 | 744 | ) |
|
741 | 745 | if pycompat.isdarwin: |
|
742 | 746 | # An assist for avoiding the dangling-symlink fsevents bug |
|
743 | 747 | extensions.wrapfunction(os, b'symlink', wrapsymlink) |
|
744 | 748 | |
|
745 | 749 | extensions.wrapfunction(merge, b'_update', wrapupdate) |
|
746 | 750 | |
|
747 | 751 | |
|
748 | 752 | def wrapsymlink(orig, source, link_name): |
|
749 | 753 | """if we create a dangling symlink, also touch the parent dir |
|
750 | 754 | to encourage fsevents notifications to work more correctly""" |
|
751 | 755 | try: |
|
752 | 756 | return orig(source, link_name) |
|
753 | 757 | finally: |
|
754 | 758 | try: |
|
755 | 759 | os.utime(os.path.dirname(link_name), None) |
|
756 | 760 | except OSError: |
|
757 | 761 | pass |
|
758 | 762 | |
|
759 | 763 | |
|
760 | 764 | class state_update(object): |
|
761 | 765 | """This context manager is responsible for dispatching the state-enter |
|
762 | 766 | and state-leave signals to the watchman service. The enter and leave |
|
763 | 767 | methods can be invoked manually (for scenarios where context manager |
|
764 | 768 | semantics are not possible). If parameters oldnode and newnode are None, |
|
765 | 769 | they will be populated based on current working copy in enter and |
|
766 | 770 | leave, respectively. Similarly, if the distance is none, it will be |
|
767 | 771 | calculated based on the oldnode and newnode in the leave method.""" |
|
768 | 772 | |
|
769 | 773 | def __init__( |
|
770 | 774 | self, |
|
771 | 775 | repo, |
|
772 | 776 | name, |
|
773 | 777 | oldnode=None, |
|
774 | 778 | newnode=None, |
|
775 | 779 | distance=None, |
|
776 | 780 | partial=False, |
|
777 | 781 | ): |
|
778 | 782 | self.repo = repo.unfiltered() |
|
779 | 783 | self.name = name |
|
780 | 784 | self.oldnode = oldnode |
|
781 | 785 | self.newnode = newnode |
|
782 | 786 | self.distance = distance |
|
783 | 787 | self.partial = partial |
|
784 | 788 | self._lock = None |
|
785 | 789 | self.need_leave = False |
|
786 | 790 | |
|
787 | 791 | def __enter__(self): |
|
788 | 792 | self.enter() |
|
789 | 793 | |
|
790 | 794 | def enter(self): |
|
791 | 795 | # Make sure we have a wlock prior to sending notifications to watchman. |
|
792 | 796 | # We don't want to race with other actors. In the update case, |
|
793 | 797 | # merge.update is going to take the wlock almost immediately. We are |
|
794 | 798 | # effectively extending the lock around several short sanity checks. |
|
795 | 799 | if self.oldnode is None: |
|
796 | 800 | self.oldnode = self.repo[b'.'].node() |
|
797 | 801 | |
|
798 | 802 | if self.repo.currentwlock() is None: |
|
799 | 803 | if util.safehasattr(self.repo, b'wlocknostateupdate'): |
|
800 | 804 | self._lock = self.repo.wlocknostateupdate() |
|
801 | 805 | else: |
|
802 | 806 | self._lock = self.repo.wlock() |
|
803 | 807 | self.need_leave = self._state(b'state-enter', hex(self.oldnode)) |
|
804 | 808 | return self |
|
805 | 809 | |
|
806 | 810 | def __exit__(self, type_, value, tb): |
|
807 | 811 | abort = True if type_ else False |
|
808 | 812 | self.exit(abort=abort) |
|
809 | 813 | |
|
810 | 814 | def exit(self, abort=False): |
|
811 | 815 | try: |
|
812 | 816 | if self.need_leave: |
|
813 | 817 | status = b'failed' if abort else b'ok' |
|
814 | 818 | if self.newnode is None: |
|
815 | 819 | self.newnode = self.repo[b'.'].node() |
|
816 | 820 | if self.distance is None: |
|
817 | 821 | self.distance = calcdistance( |
|
818 | 822 | self.repo, self.oldnode, self.newnode |
|
819 | 823 | ) |
|
820 | 824 | self._state(b'state-leave', hex(self.newnode), status=status) |
|
821 | 825 | finally: |
|
822 | 826 | self.need_leave = False |
|
823 | 827 | if self._lock: |
|
824 | 828 | self._lock.release() |
|
825 | 829 | |
|
826 | 830 | def _state(self, cmd, commithash, status=b'ok'): |
|
827 | 831 | if not util.safehasattr(self.repo, b'_watchmanclient'): |
|
828 | 832 | return False |
|
829 | 833 | try: |
|
830 | 834 | self.repo._watchmanclient.command( |
|
831 | 835 | cmd, |
|
832 | 836 | { |
|
833 | 837 | b'name': self.name, |
|
834 | 838 | b'metadata': { |
|
835 | 839 | # the target revision |
|
836 | 840 | b'rev': commithash, |
|
837 | 841 | # approximate number of commits between current and target |
|
838 | 842 | b'distance': self.distance if self.distance else 0, |
|
839 | 843 | # success/failure (only really meaningful for state-leave) |
|
840 | 844 | b'status': status, |
|
841 | 845 | # whether the working copy parent is changing |
|
842 | 846 | b'partial': self.partial, |
|
843 | 847 | }, |
|
844 | 848 | }, |
|
845 | 849 | ) |
|
846 | 850 | return True |
|
847 | 851 | except Exception as e: |
|
848 | 852 | # Swallow any errors; fire and forget |
|
849 | 853 | self.repo.ui.log( |
|
850 | 854 | b'watchman', b'Exception %s while running %s\n', e, cmd |
|
851 | 855 | ) |
|
852 | 856 | return False |
|
853 | 857 | |
|
854 | 858 | |
|
855 | 859 | # Estimate the distance between two nodes |
|
856 | 860 | def calcdistance(repo, oldnode, newnode): |
|
857 | 861 | anc = repo.changelog.ancestor(oldnode, newnode) |
|
858 | 862 | ancrev = repo[anc].rev() |
|
859 | 863 | distance = abs(repo[oldnode].rev() - ancrev) + abs( |
|
860 | 864 | repo[newnode].rev() - ancrev |
|
861 | 865 | ) |
|
862 | 866 | return distance |
|
863 | 867 | |
|
864 | 868 | |
|
865 | 869 | # Bracket working copy updates with calls to the watchman state-enter |
|
866 | 870 | # and state-leave commands. This allows clients to perform more intelligent |
|
867 | 871 | # settling during bulk file change scenarios |
|
868 | 872 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling |
|
869 | 873 | def wrapupdate( |
|
870 | 874 | orig, |
|
871 | 875 | repo, |
|
872 | 876 | node, |
|
873 | 877 | branchmerge, |
|
874 | 878 | force, |
|
875 | 879 | ancestor=None, |
|
876 | 880 | mergeancestor=False, |
|
877 | 881 | labels=None, |
|
878 | 882 | matcher=None, |
|
879 | 883 | **kwargs |
|
880 | 884 | ): |
|
881 | 885 | |
|
882 | 886 | distance = 0 |
|
883 | 887 | partial = True |
|
884 | 888 | oldnode = repo[b'.'].node() |
|
885 | 889 | newnode = repo[node].node() |
|
886 | 890 | if matcher is None or matcher.always(): |
|
887 | 891 | partial = False |
|
888 | 892 | distance = calcdistance(repo.unfiltered(), oldnode, newnode) |
|
889 | 893 | |
|
890 | 894 | with state_update( |
|
891 | 895 | repo, |
|
892 | 896 | name=b"hg.update", |
|
893 | 897 | oldnode=oldnode, |
|
894 | 898 | newnode=newnode, |
|
895 | 899 | distance=distance, |
|
896 | 900 | partial=partial, |
|
897 | 901 | ): |
|
898 | 902 | return orig( |
|
899 | 903 | repo, |
|
900 | 904 | node, |
|
901 | 905 | branchmerge, |
|
902 | 906 | force, |
|
903 | 907 | ancestor, |
|
904 | 908 | mergeancestor, |
|
905 | 909 | labels, |
|
906 | 910 | matcher, |
|
907 | 911 | **kwargs |
|
908 | 912 | ) |
|
909 | 913 | |
|
910 | 914 | |
|
911 | 915 | def repo_has_depth_one_nested_repo(repo): |
|
912 | 916 | for f in repo.wvfs.listdir(): |
|
913 | 917 | if os.path.isdir(os.path.join(repo.root, f, b'.hg')): |
|
914 | 918 | msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n' |
|
915 | 919 | repo.ui.debug(msg % f) |
|
916 | 920 | return True |
|
917 | 921 | return False |
|
918 | 922 | |
|
919 | 923 | |
|
920 | 924 | def reposetup(ui, repo): |
|
921 | 925 | # We don't work with largefiles or inotify |
|
922 | 926 | exts = extensions.enabled() |
|
923 | 927 | for ext in _blacklist: |
|
924 | 928 | if ext in exts: |
|
925 | 929 | ui.warn( |
|
926 | 930 | _( |
|
927 | 931 | b'The fsmonitor extension is incompatible with the %s ' |
|
928 | 932 | b'extension and has been disabled.\n' |
|
929 | 933 | ) |
|
930 | 934 | % ext |
|
931 | 935 | ) |
|
932 | 936 | return |
|
933 | 937 | |
|
934 | 938 | if repo.local(): |
|
935 | 939 | # We don't work with subrepos either. |
|
936 | 940 | # |
|
937 | 941 | # if repo[None].substate can cause a dirstate parse, which is too |
|
938 | 942 | # slow. Instead, look for a file called hgsubstate, |
|
939 | 943 | if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'): |
|
940 | 944 | return |
|
941 | 945 | |
|
942 | 946 | if repo_has_depth_one_nested_repo(repo): |
|
943 | 947 | return |
|
944 | 948 | |
|
945 | 949 | fsmonitorstate = state.state(repo) |
|
946 | 950 | if fsmonitorstate.mode == b'off': |
|
947 | 951 | return |
|
948 | 952 | |
|
949 | 953 | try: |
|
950 | 954 | client = watchmanclient.client(repo.ui, repo.root) |
|
951 | 955 | except Exception as ex: |
|
952 | 956 | _handleunavailable(ui, fsmonitorstate, ex) |
|
953 | 957 | return |
|
954 | 958 | |
|
955 | 959 | repo._fsmonitorstate = fsmonitorstate |
|
956 | 960 | repo._watchmanclient = client |
|
957 | 961 | |
|
958 | 962 | dirstate, cached = localrepo.isfilecached(repo, b'dirstate') |
|
959 | 963 | if cached: |
|
960 | 964 | # at this point since fsmonitorstate wasn't present, |
|
961 | 965 | # repo.dirstate is not a fsmonitordirstate |
|
962 | 966 | makedirstate(repo, dirstate) |
|
963 | 967 | |
|
964 | 968 | class fsmonitorrepo(repo.__class__): |
|
965 | 969 | def status(self, *args, **kwargs): |
|
966 | 970 | orig = super(fsmonitorrepo, self).status |
|
967 | 971 | return overridestatus(orig, self, *args, **kwargs) |
|
968 | 972 | |
|
969 | 973 | def wlocknostateupdate(self, *args, **kwargs): |
|
970 | 974 | return super(fsmonitorrepo, self).wlock(*args, **kwargs) |
|
971 | 975 | |
|
972 | 976 | def wlock(self, *args, **kwargs): |
|
973 | 977 | l = super(fsmonitorrepo, self).wlock(*args, **kwargs) |
|
974 | 978 | if not ui.configbool( |
|
975 | 979 | b"experimental", b"fsmonitor.transaction_notify" |
|
976 | 980 | ): |
|
977 | 981 | return l |
|
978 | 982 | if l.held != 1: |
|
979 | 983 | return l |
|
980 | 984 | origrelease = l.releasefn |
|
981 | 985 | |
|
982 | 986 | def staterelease(): |
|
983 | 987 | if origrelease: |
|
984 | 988 | origrelease() |
|
985 | 989 | if l.stateupdate: |
|
986 | 990 | l.stateupdate.exit() |
|
987 | 991 | l.stateupdate = None |
|
988 | 992 | |
|
989 | 993 | try: |
|
990 | 994 | l.stateupdate = None |
|
991 | 995 | l.stateupdate = state_update(self, name=b"hg.transaction") |
|
992 | 996 | l.stateupdate.enter() |
|
993 | 997 | l.releasefn = staterelease |
|
994 | 998 | except Exception as e: |
|
995 | 999 | # Swallow any errors; fire and forget |
|
996 | 1000 | self.ui.log( |
|
997 | 1001 | b'watchman', b'Exception in state update %s\n', e |
|
998 | 1002 | ) |
|
999 | 1003 | return l |
|
1000 | 1004 | |
|
1001 | 1005 | repo.__class__ = fsmonitorrepo |
This diff has been collapsed as it changes many lines, (651 lines changed) Show them Hide them | |||
@@ -1,2665 +1,2678 b'' | |||
|
1 | 1 | # histedit.py - interactive history editing for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2009 Augie Fackler <raf@durin42.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """interactive history editing |
|
8 | 8 | |
|
9 | 9 | With this extension installed, Mercurial gains one new command: histedit. Usage |
|
10 | 10 | is as follows, assuming the following history:: |
|
11 | 11 | |
|
12 | 12 | @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42 |
|
13 | 13 | | Add delta |
|
14 | 14 | | |
|
15 | 15 | o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42 |
|
16 | 16 | | Add gamma |
|
17 | 17 | | |
|
18 | 18 | o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42 |
|
19 | 19 | | Add beta |
|
20 | 20 | | |
|
21 | 21 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
22 | 22 | Add alpha |
|
23 | 23 | |
|
24 | 24 | If you were to run ``hg histedit c561b4e977df``, you would see the following |
|
25 | 25 | file open in your editor:: |
|
26 | 26 | |
|
27 | 27 | pick c561b4e977df Add beta |
|
28 | 28 | pick 030b686bedc4 Add gamma |
|
29 | 29 | pick 7c2fd3b9020c Add delta |
|
30 | 30 | |
|
31 | 31 | # Edit history between c561b4e977df and 7c2fd3b9020c |
|
32 | 32 | # |
|
33 | 33 | # Commits are listed from least to most recent |
|
34 | 34 | # |
|
35 | 35 | # Commands: |
|
36 | 36 | # p, pick = use commit |
|
37 | 37 | # e, edit = use commit, but allow edits before making new commit |
|
38 | 38 | # f, fold = use commit, but combine it with the one above |
|
39 | 39 | # r, roll = like fold, but discard this commit's description and date |
|
40 | 40 | # d, drop = remove commit from history |
|
41 | 41 | # m, mess = edit commit message without changing commit content |
|
42 | 42 | # b, base = checkout changeset and apply further changesets from there |
|
43 | 43 | # |
|
44 | 44 | |
|
45 | 45 | In this file, lines beginning with ``#`` are ignored. You must specify a rule |
|
46 | 46 | for each revision in your history. For example, if you had meant to add gamma |
|
47 | 47 | before beta, and then wanted to add delta in the same revision as beta, you |
|
48 | 48 | would reorganize the file to look like this:: |
|
49 | 49 | |
|
50 | 50 | pick 030b686bedc4 Add gamma |
|
51 | 51 | pick c561b4e977df Add beta |
|
52 | 52 | fold 7c2fd3b9020c Add delta |
|
53 | 53 | |
|
54 | 54 | # Edit history between c561b4e977df and 7c2fd3b9020c |
|
55 | 55 | # |
|
56 | 56 | # Commits are listed from least to most recent |
|
57 | 57 | # |
|
58 | 58 | # Commands: |
|
59 | 59 | # p, pick = use commit |
|
60 | 60 | # e, edit = use commit, but allow edits before making new commit |
|
61 | 61 | # f, fold = use commit, but combine it with the one above |
|
62 | 62 | # r, roll = like fold, but discard this commit's description and date |
|
63 | 63 | # d, drop = remove commit from history |
|
64 | 64 | # m, mess = edit commit message without changing commit content |
|
65 | 65 | # b, base = checkout changeset and apply further changesets from there |
|
66 | 66 | # |
|
67 | 67 | |
|
68 | 68 | At which point you close the editor and ``histedit`` starts working. When you |
|
69 | 69 | specify a ``fold`` operation, ``histedit`` will open an editor when it folds |
|
70 | 70 | those revisions together, offering you a chance to clean up the commit message:: |
|
71 | 71 | |
|
72 | 72 | Add beta |
|
73 | 73 | *** |
|
74 | 74 | Add delta |
|
75 | 75 | |
|
76 | 76 | Edit the commit message to your liking, then close the editor. The date used |
|
77 | 77 | for the commit will be the later of the two commits' dates. For this example, |
|
78 | 78 | let's assume that the commit message was changed to ``Add beta and delta.`` |
|
79 | 79 | After histedit has run and had a chance to remove any old or temporary |
|
80 | 80 | revisions it needed, the history looks like this:: |
|
81 | 81 | |
|
82 | 82 | @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42 |
|
83 | 83 | | Add beta and delta. |
|
84 | 84 | | |
|
85 | 85 | o 1 081603921c3f 2009-04-27 18:04 -0500 durin42 |
|
86 | 86 | | Add gamma |
|
87 | 87 | | |
|
88 | 88 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
89 | 89 | Add alpha |
|
90 | 90 | |
|
91 | 91 | Note that ``histedit`` does *not* remove any revisions (even its own temporary |
|
92 | 92 | ones) until after it has completed all the editing operations, so it will |
|
93 | 93 | probably perform several strip operations when it's done. For the above example, |
|
94 | 94 | it had to run strip twice. Strip can be slow depending on a variety of factors, |
|
95 | 95 | so you might need to be a little patient. You can choose to keep the original |
|
96 | 96 | revisions by passing the ``--keep`` flag. |
|
97 | 97 | |
|
98 | 98 | The ``edit`` operation will drop you back to a command prompt, |
|
99 | 99 | allowing you to edit files freely, or even use ``hg record`` to commit |
|
100 | 100 | some changes as a separate commit. When you're done, any remaining |
|
101 | 101 | uncommitted changes will be committed as well. When done, run ``hg |
|
102 | 102 | histedit --continue`` to finish this step. If there are uncommitted |
|
103 | 103 | changes, you'll be prompted for a new commit message, but the default |
|
104 | 104 | commit message will be the original message for the ``edit`` ed |
|
105 | 105 | revision, and the date of the original commit will be preserved. |
|
106 | 106 | |
|
107 | 107 | The ``message`` operation will give you a chance to revise a commit |
|
108 | 108 | message without changing the contents. It's a shortcut for doing |
|
109 | 109 | ``edit`` immediately followed by `hg histedit --continue``. |
|
110 | 110 | |
|
111 | 111 | If ``histedit`` encounters a conflict when moving a revision (while |
|
112 | 112 | handling ``pick`` or ``fold``), it'll stop in a similar manner to |
|
113 | 113 | ``edit`` with the difference that it won't prompt you for a commit |
|
114 | 114 | message when done. If you decide at this point that you don't like how |
|
115 | 115 | much work it will be to rearrange history, or that you made a mistake, |
|
116 | 116 | you can use ``hg histedit --abort`` to abandon the new changes you |
|
117 | 117 | have made and return to the state before you attempted to edit your |
|
118 | 118 | history. |
|
119 | 119 | |
|
120 | 120 | If we clone the histedit-ed example repository above and add four more |
|
121 | 121 | changes, such that we have the following history:: |
|
122 | 122 | |
|
123 | 123 | @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan |
|
124 | 124 | | Add theta |
|
125 | 125 | | |
|
126 | 126 | o 5 140988835471 2009-04-27 18:04 -0500 stefan |
|
127 | 127 | | Add eta |
|
128 | 128 | | |
|
129 | 129 | o 4 122930637314 2009-04-27 18:04 -0500 stefan |
|
130 | 130 | | Add zeta |
|
131 | 131 | | |
|
132 | 132 | o 3 836302820282 2009-04-27 18:04 -0500 stefan |
|
133 | 133 | | Add epsilon |
|
134 | 134 | | |
|
135 | 135 | o 2 989b4d060121 2009-04-27 18:04 -0500 durin42 |
|
136 | 136 | | Add beta and delta. |
|
137 | 137 | | |
|
138 | 138 | o 1 081603921c3f 2009-04-27 18:04 -0500 durin42 |
|
139 | 139 | | Add gamma |
|
140 | 140 | | |
|
141 | 141 | o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42 |
|
142 | 142 | Add alpha |
|
143 | 143 | |
|
144 | 144 | If you run ``hg histedit --outgoing`` on the clone then it is the same |
|
145 | 145 | as running ``hg histedit 836302820282``. If you need plan to push to a |
|
146 | 146 | repository that Mercurial does not detect to be related to the source |
|
147 | 147 | repo, you can add a ``--force`` option. |
|
148 | 148 | |
|
149 | 149 | Config |
|
150 | 150 | ------ |
|
151 | 151 | |
|
152 | 152 | Histedit rule lines are truncated to 80 characters by default. You |
|
153 | 153 | can customize this behavior by setting a different length in your |
|
154 | 154 | configuration file:: |
|
155 | 155 | |
|
156 | 156 | [histedit] |
|
157 | 157 | linelen = 120 # truncate rule lines at 120 characters |
|
158 | 158 | |
|
159 | 159 | The summary of a change can be customized as well:: |
|
160 | 160 | |
|
161 | 161 | [histedit] |
|
162 | 162 | summary-template = '{rev} {bookmarks} {desc|firstline}' |
|
163 | 163 | |
|
164 | 164 | The customized summary should be kept short enough that rule lines |
|
165 | 165 | will fit in the configured line length. See above if that requires |
|
166 | 166 | customization. |
|
167 | 167 | |
|
168 | 168 | ``hg histedit`` attempts to automatically choose an appropriate base |
|
169 | 169 | revision to use. To change which base revision is used, define a |
|
170 | 170 | revset in your configuration file:: |
|
171 | 171 | |
|
172 | 172 | [histedit] |
|
173 | 173 | defaultrev = only(.) & draft() |
|
174 | 174 | |
|
175 | 175 | By default each edited revision needs to be present in histedit commands. |
|
176 | 176 | To remove revision you need to use ``drop`` operation. You can configure |
|
177 | 177 | the drop to be implicit for missing commits by adding:: |
|
178 | 178 | |
|
179 | 179 | [histedit] |
|
180 | 180 | dropmissing = True |
|
181 | 181 | |
|
182 | 182 | By default, histedit will close the transaction after each action. For |
|
183 | 183 | performance purposes, you can configure histedit to use a single transaction |
|
184 | 184 | across the entire histedit. WARNING: This setting introduces a significant risk |
|
185 | 185 | of losing the work you've done in a histedit if the histedit aborts |
|
186 | 186 | unexpectedly:: |
|
187 | 187 | |
|
188 | 188 | [histedit] |
|
189 | 189 | singletransaction = True |
|
190 | 190 | |
|
191 | 191 | """ |
|
192 | 192 | |
|
193 | 193 | from __future__ import absolute_import |
|
194 | 194 | |
|
195 | 195 | # chistedit dependencies that are not available everywhere |
|
196 | 196 | try: |
|
197 | 197 | import fcntl |
|
198 | 198 | import termios |
|
199 | 199 | except ImportError: |
|
200 | 200 | fcntl = None |
|
201 | 201 | termios = None |
|
202 | 202 | |
|
203 | 203 | import functools |
|
204 | 204 | import os |
|
205 | 205 | import struct |
|
206 | 206 | |
|
207 | 207 | from mercurial.i18n import _ |
|
208 | 208 | from mercurial.pycompat import ( |
|
209 | 209 | getattr, |
|
210 | 210 | open, |
|
211 | 211 | ) |
|
212 | 212 | from mercurial.node import ( |
|
213 | 213 | bin, |
|
214 | 214 | hex, |
|
215 | 215 | short, |
|
216 | 216 | ) |
|
217 | 217 | from mercurial import ( |
|
218 | 218 | bundle2, |
|
219 | 219 | cmdutil, |
|
220 | 220 | context, |
|
221 | 221 | copies, |
|
222 | 222 | destutil, |
|
223 | 223 | discovery, |
|
224 | 224 | encoding, |
|
225 | 225 | error, |
|
226 | 226 | exchange, |
|
227 | 227 | extensions, |
|
228 | 228 | hg, |
|
229 | 229 | logcmdutil, |
|
230 | 230 | merge as mergemod, |
|
231 | 231 | mergestate as mergestatemod, |
|
232 | 232 | mergeutil, |
|
233 | 233 | obsolete, |
|
234 | 234 | pycompat, |
|
235 | 235 | registrar, |
|
236 | 236 | repair, |
|
237 | 237 | rewriteutil, |
|
238 | 238 | scmutil, |
|
239 | 239 | state as statemod, |
|
240 | 240 | util, |
|
241 | 241 | ) |
|
242 | 242 | from mercurial.utils import ( |
|
243 | 243 | dateutil, |
|
244 | 244 | stringutil, |
|
245 | 245 | urlutil, |
|
246 | 246 | ) |
|
247 | 247 | |
|
248 | 248 | pickle = util.pickle |
|
249 | 249 | cmdtable = {} |
|
250 | 250 | command = registrar.command(cmdtable) |
|
251 | 251 | |
|
252 | 252 | configtable = {} |
|
253 | 253 | configitem = registrar.configitem(configtable) |
|
254 | 254 | configitem( |
|
255 | 255 | b'experimental', |
|
256 | 256 | b'histedit.autoverb', |
|
257 | 257 | default=False, |
|
258 | 258 | ) |
|
259 | 259 | configitem( |
|
260 | 260 | b'histedit', |
|
261 | 261 | b'defaultrev', |
|
262 | 262 | default=None, |
|
263 | 263 | ) |
|
264 | 264 | configitem( |
|
265 | 265 | b'histedit', |
|
266 | 266 | b'dropmissing', |
|
267 | 267 | default=False, |
|
268 | 268 | ) |
|
269 | 269 | configitem( |
|
270 | 270 | b'histedit', |
|
271 | 271 | b'linelen', |
|
272 | 272 | default=80, |
|
273 | 273 | ) |
|
274 | 274 | configitem( |
|
275 | 275 | b'histedit', |
|
276 | 276 | b'singletransaction', |
|
277 | 277 | default=False, |
|
278 | 278 | ) |
|
279 | 279 | configitem( |
|
280 | 280 | b'ui', |
|
281 | 281 | b'interface.histedit', |
|
282 | 282 | default=None, |
|
283 | 283 | ) |
|
284 | 284 | configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}') |
|
285 | # TODO: Teach the text-based histedit interface to respect this config option | |
|
286 | # before we make it non-experimental. | |
|
287 | configitem( | |
|
288 | b'histedit', b'later-commits-first', default=False, experimental=True | |
|
289 | ) | |
|
285 | 290 | |
|
286 | 291 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
287 | 292 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
288 | 293 | # be specifying the version(s) of Mercurial they are tested with, or |
|
289 | 294 | # leave the attribute unspecified. |
|
290 | 295 | testedwith = b'ships-with-hg-core' |
|
291 | 296 | |
|
292 | 297 | actiontable = {} |
|
293 | 298 | primaryactions = set() |
|
294 | 299 | secondaryactions = set() |
|
295 | 300 | tertiaryactions = set() |
|
296 | 301 | internalactions = set() |
|
297 | 302 | |
|
298 | 303 | |
|
299 | 304 | def geteditcomment(ui, first, last): |
|
300 | 305 | """construct the editor comment |
|
301 | 306 | The comment includes:: |
|
302 | 307 | - an intro |
|
303 | 308 | - sorted primary commands |
|
304 | 309 | - sorted short commands |
|
305 | 310 | - sorted long commands |
|
306 | 311 | - additional hints |
|
307 | 312 | |
|
308 | 313 | Commands are only included once. |
|
309 | 314 | """ |
|
310 | 315 | intro = _( |
|
311 | 316 | b"""Edit history between %s and %s |
|
312 | 317 | |
|
313 | 318 | Commits are listed from least to most recent |
|
314 | 319 | |
|
315 | 320 | You can reorder changesets by reordering the lines |
|
316 | 321 | |
|
317 | 322 | Commands: |
|
318 | 323 | """ |
|
319 | 324 | ) |
|
320 | 325 | actions = [] |
|
321 | 326 | |
|
322 | 327 | def addverb(v): |
|
323 | 328 | a = actiontable[v] |
|
324 | 329 | lines = a.message.split(b"\n") |
|
325 | 330 | if len(a.verbs): |
|
326 | 331 | v = b', '.join(sorted(a.verbs, key=lambda v: len(v))) |
|
327 | 332 | actions.append(b" %s = %s" % (v, lines[0])) |
|
328 | 333 | actions.extend([b' %s'] * (len(lines) - 1)) |
|
329 | 334 | |
|
330 | 335 | for v in ( |
|
331 | 336 | sorted(primaryactions) |
|
332 | 337 | + sorted(secondaryactions) |
|
333 | 338 | + sorted(tertiaryactions) |
|
334 | 339 | ): |
|
335 | 340 | addverb(v) |
|
336 | 341 | actions.append(b'') |
|
337 | 342 | |
|
338 | 343 | hints = [] |
|
339 | 344 | if ui.configbool(b'histedit', b'dropmissing'): |
|
340 | 345 | hints.append( |
|
341 | 346 | b"Deleting a changeset from the list " |
|
342 | 347 | b"will DISCARD it from the edited history!" |
|
343 | 348 | ) |
|
344 | 349 | |
|
345 | 350 | lines = (intro % (first, last)).split(b'\n') + actions + hints |
|
346 | 351 | |
|
347 | 352 | return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines]) |
|
348 | 353 | |
|
349 | 354 | |
|
350 | 355 | class histeditstate(object): |
|
351 | 356 | def __init__(self, repo): |
|
352 | 357 | self.repo = repo |
|
353 | 358 | self.actions = None |
|
354 | 359 | self.keep = None |
|
355 | 360 | self.topmost = None |
|
356 | 361 | self.parentctxnode = None |
|
357 | 362 | self.lock = None |
|
358 | 363 | self.wlock = None |
|
359 | 364 | self.backupfile = None |
|
360 | 365 | self.stateobj = statemod.cmdstate(repo, b'histedit-state') |
|
361 | 366 | self.replacements = [] |
|
362 | 367 | |
|
363 | 368 | def read(self): |
|
364 | 369 | """Load histedit state from disk and set fields appropriately.""" |
|
365 | 370 | if not self.stateobj.exists(): |
|
366 | 371 | cmdutil.wrongtooltocontinue(self.repo, _(b'histedit')) |
|
367 | 372 | |
|
368 | 373 | data = self._read() |
|
369 | 374 | |
|
370 | 375 | self.parentctxnode = data[b'parentctxnode'] |
|
371 | 376 | actions = parserules(data[b'rules'], self) |
|
372 | 377 | self.actions = actions |
|
373 | 378 | self.keep = data[b'keep'] |
|
374 | 379 | self.topmost = data[b'topmost'] |
|
375 | 380 | self.replacements = data[b'replacements'] |
|
376 | 381 | self.backupfile = data[b'backupfile'] |
|
377 | 382 | |
|
378 | 383 | def _read(self): |
|
379 | 384 | fp = self.repo.vfs.read(b'histedit-state') |
|
380 | 385 | if fp.startswith(b'v1\n'): |
|
381 | 386 | data = self._load() |
|
382 | 387 | parentctxnode, rules, keep, topmost, replacements, backupfile = data |
|
383 | 388 | else: |
|
384 | 389 | data = pickle.loads(fp) |
|
385 | 390 | parentctxnode, rules, keep, topmost, replacements = data |
|
386 | 391 | backupfile = None |
|
387 | 392 | rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules]) |
|
388 | 393 | |
|
389 | 394 | return { |
|
390 | 395 | b'parentctxnode': parentctxnode, |
|
391 | 396 | b"rules": rules, |
|
392 | 397 | b"keep": keep, |
|
393 | 398 | b"topmost": topmost, |
|
394 | 399 | b"replacements": replacements, |
|
395 | 400 | b"backupfile": backupfile, |
|
396 | 401 | } |
|
397 | 402 | |
|
398 | 403 | def write(self, tr=None): |
|
399 | 404 | if tr: |
|
400 | 405 | tr.addfilegenerator( |
|
401 | 406 | b'histedit-state', |
|
402 | 407 | (b'histedit-state',), |
|
403 | 408 | self._write, |
|
404 | 409 | location=b'plain', |
|
405 | 410 | ) |
|
406 | 411 | else: |
|
407 | 412 | with self.repo.vfs(b"histedit-state", b"w") as f: |
|
408 | 413 | self._write(f) |
|
409 | 414 | |
|
410 | 415 | def _write(self, fp): |
|
411 | 416 | fp.write(b'v1\n') |
|
412 | 417 | fp.write(b'%s\n' % hex(self.parentctxnode)) |
|
413 | 418 | fp.write(b'%s\n' % hex(self.topmost)) |
|
414 | 419 | fp.write(b'%s\n' % (b'True' if self.keep else b'False')) |
|
415 | 420 | fp.write(b'%d\n' % len(self.actions)) |
|
416 | 421 | for action in self.actions: |
|
417 | 422 | fp.write(b'%s\n' % action.tostate()) |
|
418 | 423 | fp.write(b'%d\n' % len(self.replacements)) |
|
419 | 424 | for replacement in self.replacements: |
|
420 | 425 | fp.write( |
|
421 | 426 | b'%s%s\n' |
|
422 | 427 | % ( |
|
423 | 428 | hex(replacement[0]), |
|
424 | 429 | b''.join(hex(r) for r in replacement[1]), |
|
425 | 430 | ) |
|
426 | 431 | ) |
|
427 | 432 | backupfile = self.backupfile |
|
428 | 433 | if not backupfile: |
|
429 | 434 | backupfile = b'' |
|
430 | 435 | fp.write(b'%s\n' % backupfile) |
|
431 | 436 | |
|
432 | 437 | def _load(self): |
|
433 | 438 | fp = self.repo.vfs(b'histedit-state', b'r') |
|
434 | 439 | lines = [l[:-1] for l in fp.readlines()] |
|
435 | 440 | |
|
436 | 441 | index = 0 |
|
437 | 442 | lines[index] # version number |
|
438 | 443 | index += 1 |
|
439 | 444 | |
|
440 | 445 | parentctxnode = bin(lines[index]) |
|
441 | 446 | index += 1 |
|
442 | 447 | |
|
443 | 448 | topmost = bin(lines[index]) |
|
444 | 449 | index += 1 |
|
445 | 450 | |
|
446 | 451 | keep = lines[index] == b'True' |
|
447 | 452 | index += 1 |
|
448 | 453 | |
|
449 | 454 | # Rules |
|
450 | 455 | rules = [] |
|
451 | 456 | rulelen = int(lines[index]) |
|
452 | 457 | index += 1 |
|
453 | 458 | for i in pycompat.xrange(rulelen): |
|
454 | 459 | ruleaction = lines[index] |
|
455 | 460 | index += 1 |
|
456 | 461 | rule = lines[index] |
|
457 | 462 | index += 1 |
|
458 | 463 | rules.append((ruleaction, rule)) |
|
459 | 464 | |
|
460 | 465 | # Replacements |
|
461 | 466 | replacements = [] |
|
462 | 467 | replacementlen = int(lines[index]) |
|
463 | 468 | index += 1 |
|
464 | 469 | for i in pycompat.xrange(replacementlen): |
|
465 | 470 | replacement = lines[index] |
|
466 | 471 | original = bin(replacement[:40]) |
|
467 | 472 | succ = [ |
|
468 | 473 | bin(replacement[i : i + 40]) |
|
469 | 474 | for i in range(40, len(replacement), 40) |
|
470 | 475 | ] |
|
471 | 476 | replacements.append((original, succ)) |
|
472 | 477 | index += 1 |
|
473 | 478 | |
|
474 | 479 | backupfile = lines[index] |
|
475 | 480 | index += 1 |
|
476 | 481 | |
|
477 | 482 | fp.close() |
|
478 | 483 | |
|
479 | 484 | return parentctxnode, rules, keep, topmost, replacements, backupfile |
|
480 | 485 | |
|
481 | 486 | def clear(self): |
|
482 | 487 | if self.inprogress(): |
|
483 | 488 | self.repo.vfs.unlink(b'histedit-state') |
|
484 | 489 | |
|
485 | 490 | def inprogress(self): |
|
486 | 491 | return self.repo.vfs.exists(b'histedit-state') |
|
487 | 492 | |
|
488 | 493 | |
|
489 | 494 | class histeditaction(object): |
|
490 | 495 | def __init__(self, state, node): |
|
491 | 496 | self.state = state |
|
492 | 497 | self.repo = state.repo |
|
493 | 498 | self.node = node |
|
494 | 499 | |
|
495 | 500 | @classmethod |
|
496 | 501 | def fromrule(cls, state, rule): |
|
497 | 502 | """Parses the given rule, returning an instance of the histeditaction.""" |
|
498 | 503 | ruleid = rule.strip().split(b' ', 1)[0] |
|
499 | 504 | # ruleid can be anything from rev numbers, hashes, "bookmarks" etc |
|
500 | 505 | # Check for validation of rule ids and get the rulehash |
|
501 | 506 | try: |
|
502 | 507 | rev = bin(ruleid) |
|
503 | 508 | except TypeError: |
|
504 | 509 | try: |
|
505 | 510 | _ctx = scmutil.revsingle(state.repo, ruleid) |
|
506 | 511 | rulehash = _ctx.hex() |
|
507 | 512 | rev = bin(rulehash) |
|
508 | 513 | except error.RepoLookupError: |
|
509 | 514 | raise error.ParseError(_(b"invalid changeset %s") % ruleid) |
|
510 | 515 | return cls(state, rev) |
|
511 | 516 | |
|
512 | 517 | def verify(self, prev, expected, seen): |
|
513 | 518 | """Verifies semantic correctness of the rule""" |
|
514 | 519 | repo = self.repo |
|
515 | 520 | ha = hex(self.node) |
|
516 | 521 | self.node = scmutil.resolvehexnodeidprefix(repo, ha) |
|
517 | 522 | if self.node is None: |
|
518 | 523 | raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12]) |
|
519 | 524 | self._verifynodeconstraints(prev, expected, seen) |
|
520 | 525 | |
|
521 | 526 | def _verifynodeconstraints(self, prev, expected, seen): |
|
522 | 527 | # by default command need a node in the edited list |
|
523 | 528 | if self.node not in expected: |
|
524 | 529 | raise error.ParseError( |
|
525 | 530 | _(b'%s "%s" changeset was not a candidate') |
|
526 | 531 | % (self.verb, short(self.node)), |
|
527 | 532 | hint=_(b'only use listed changesets'), |
|
528 | 533 | ) |
|
529 | 534 | # and only one command per node |
|
530 | 535 | if self.node in seen: |
|
531 | 536 | raise error.ParseError( |
|
532 | 537 | _(b'duplicated command for changeset %s') % short(self.node) |
|
533 | 538 | ) |
|
534 | 539 | |
|
535 | 540 | def torule(self): |
|
536 | 541 | """build a histedit rule line for an action |
|
537 | 542 | |
|
538 | 543 | by default lines are in the form: |
|
539 | 544 | <hash> <rev> <summary> |
|
540 | 545 | """ |
|
541 | 546 | ctx = self.repo[self.node] |
|
542 | 547 | ui = self.repo.ui |
|
543 | 548 | # We don't want color codes in the commit message template, so |
|
544 | 549 | # disable the label() template function while we render it. |
|
545 | 550 | with ui.configoverride( |
|
546 | 551 | {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit' |
|
547 | 552 | ): |
|
548 | 553 | summary = cmdutil.rendertemplate( |
|
549 | 554 | ctx, ui.config(b'histedit', b'summary-template') |
|
550 | 555 | ) |
|
551 | 556 | # Handle the fact that `''.splitlines() => []` |
|
552 | 557 | summary = summary.splitlines()[0] if summary else b'' |
|
553 | 558 | line = b'%s %s %s' % (self.verb, ctx, summary) |
|
554 | 559 | # trim to 75 columns by default so it's not stupidly wide in my editor |
|
555 | 560 | # (the 5 more are left for verb) |
|
556 | 561 | maxlen = self.repo.ui.configint(b'histedit', b'linelen') |
|
557 | 562 | maxlen = max(maxlen, 22) # avoid truncating hash |
|
558 | 563 | return stringutil.ellipsis(line, maxlen) |
|
559 | 564 | |
|
560 | 565 | def tostate(self): |
|
561 | 566 | """Print an action in format used by histedit state files |
|
562 | 567 | (the first line is a verb, the remainder is the second) |
|
563 | 568 | """ |
|
564 | 569 | return b"%s\n%s" % (self.verb, hex(self.node)) |
|
565 | 570 | |
|
566 | 571 | def run(self): |
|
567 | 572 | """Runs the action. The default behavior is simply apply the action's |
|
568 | 573 | rulectx onto the current parentctx.""" |
|
569 | 574 | self.applychange() |
|
570 | 575 | self.continuedirty() |
|
571 | 576 | return self.continueclean() |
|
572 | 577 | |
|
573 | 578 | def applychange(self): |
|
574 | 579 | """Applies the changes from this action's rulectx onto the current |
|
575 | 580 | parentctx, but does not commit them.""" |
|
576 | 581 | repo = self.repo |
|
577 | 582 | rulectx = repo[self.node] |
|
578 | 583 | with repo.ui.silent(): |
|
579 | 584 | hg.update(repo, self.state.parentctxnode, quietempty=True) |
|
580 | 585 | stats = applychanges(repo.ui, repo, rulectx, {}) |
|
581 | 586 | repo.dirstate.setbranch(rulectx.branch()) |
|
582 | 587 | if stats.unresolvedcount: |
|
583 | 588 | raise error.InterventionRequired( |
|
584 | 589 | _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)), |
|
585 | 590 | hint=_(b'hg histedit --continue to resume'), |
|
586 | 591 | ) |
|
587 | 592 | |
|
588 | 593 | def continuedirty(self): |
|
589 | 594 | """Continues the action when changes have been applied to the working |
|
590 | 595 | copy. The default behavior is to commit the dirty changes.""" |
|
591 | 596 | repo = self.repo |
|
592 | 597 | rulectx = repo[self.node] |
|
593 | 598 | |
|
594 | 599 | editor = self.commiteditor() |
|
595 | 600 | commit = commitfuncfor(repo, rulectx) |
|
596 | 601 | if repo.ui.configbool(b'rewrite', b'update-timestamp'): |
|
597 | 602 | date = dateutil.makedate() |
|
598 | 603 | else: |
|
599 | 604 | date = rulectx.date() |
|
600 | 605 | commit( |
|
601 | 606 | text=rulectx.description(), |
|
602 | 607 | user=rulectx.user(), |
|
603 | 608 | date=date, |
|
604 | 609 | extra=rulectx.extra(), |
|
605 | 610 | editor=editor, |
|
606 | 611 | ) |
|
607 | 612 | |
|
608 | 613 | def commiteditor(self): |
|
609 | 614 | """The editor to be used to edit the commit message.""" |
|
610 | 615 | return False |
|
611 | 616 | |
|
612 | 617 | def continueclean(self): |
|
613 | 618 | """Continues the action when the working copy is clean. The default |
|
614 | 619 | behavior is to accept the current commit as the new version of the |
|
615 | 620 | rulectx.""" |
|
616 | 621 | ctx = self.repo[b'.'] |
|
617 | 622 | if ctx.node() == self.state.parentctxnode: |
|
618 | 623 | self.repo.ui.warn( |
|
619 | 624 | _(b'%s: skipping changeset (no changes)\n') % short(self.node) |
|
620 | 625 | ) |
|
621 | 626 | return ctx, [(self.node, tuple())] |
|
622 | 627 | if ctx.node() == self.node: |
|
623 | 628 | # Nothing changed |
|
624 | 629 | return ctx, [] |
|
625 | 630 | return ctx, [(self.node, (ctx.node(),))] |
|
626 | 631 | |
|
627 | 632 | |
|
628 | 633 | def commitfuncfor(repo, src): |
|
629 | 634 | """Build a commit function for the replacement of <src> |
|
630 | 635 | |
|
631 | 636 | This function ensure we apply the same treatment to all changesets. |
|
632 | 637 | |
|
633 | 638 | - Add a 'histedit_source' entry in extra. |
|
634 | 639 | |
|
635 | 640 | Note that fold has its own separated logic because its handling is a bit |
|
636 | 641 | different and not easily factored out of the fold method. |
|
637 | 642 | """ |
|
638 | 643 | phasemin = src.phase() |
|
639 | 644 | |
|
640 | 645 | def commitfunc(**kwargs): |
|
641 | 646 | overrides = {(b'phases', b'new-commit'): phasemin} |
|
642 | 647 | with repo.ui.configoverride(overrides, b'histedit'): |
|
643 | 648 | extra = kwargs.get('extra', {}).copy() |
|
644 | 649 | extra[b'histedit_source'] = src.hex() |
|
645 | 650 | kwargs['extra'] = extra |
|
646 | 651 | return repo.commit(**kwargs) |
|
647 | 652 | |
|
648 | 653 | return commitfunc |
|
649 | 654 | |
|
650 | 655 | |
|
651 | 656 | def applychanges(ui, repo, ctx, opts): |
|
652 | 657 | """Merge changeset from ctx (only) in the current working directory""" |
|
653 | 658 | if ctx.p1().node() == repo.dirstate.p1(): |
|
654 | 659 | # edits are "in place" we do not need to make any merge, |
|
655 | 660 | # just applies changes on parent for editing |
|
656 | 661 | with ui.silent(): |
|
657 | 662 | cmdutil.revert(ui, repo, ctx, all=True) |
|
658 | 663 | stats = mergemod.updateresult(0, 0, 0, 0) |
|
659 | 664 | else: |
|
660 | 665 | try: |
|
661 | 666 | # ui.forcemerge is an internal variable, do not document |
|
662 | 667 | repo.ui.setconfig( |
|
663 | 668 | b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit' |
|
664 | 669 | ) |
|
665 | 670 | stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit']) |
|
666 | 671 | finally: |
|
667 | 672 | repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit') |
|
668 | 673 | return stats |
|
669 | 674 | |
|
670 | 675 | |
|
671 | 676 | def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False): |
|
672 | 677 | """collapse the set of revisions from first to last as new one. |
|
673 | 678 | |
|
674 | 679 | Expected commit options are: |
|
675 | 680 | - message |
|
676 | 681 | - date |
|
677 | 682 | - username |
|
678 | 683 | Commit message is edited in all cases. |
|
679 | 684 | |
|
680 | 685 | This function works in memory.""" |
|
681 | 686 | ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev())) |
|
682 | 687 | if not ctxs: |
|
683 | 688 | return None |
|
684 | 689 | for c in ctxs: |
|
685 | 690 | if not c.mutable(): |
|
686 | 691 | raise error.ParseError( |
|
687 | 692 | _(b"cannot fold into public change %s") % short(c.node()) |
|
688 | 693 | ) |
|
689 | 694 | base = firstctx.p1() |
|
690 | 695 | |
|
691 | 696 | # commit a new version of the old changeset, including the update |
|
692 | 697 | # collect all files which might be affected |
|
693 | 698 | files = set() |
|
694 | 699 | for ctx in ctxs: |
|
695 | 700 | files.update(ctx.files()) |
|
696 | 701 | |
|
697 | 702 | # Recompute copies (avoid recording a -> b -> a) |
|
698 | 703 | copied = copies.pathcopies(base, lastctx) |
|
699 | 704 | |
|
700 | 705 | # prune files which were reverted by the updates |
|
701 | 706 | files = [f for f in files if not cmdutil.samefile(f, lastctx, base)] |
|
702 | 707 | # commit version of these files as defined by head |
|
703 | 708 | headmf = lastctx.manifest() |
|
704 | 709 | |
|
705 | 710 | def filectxfn(repo, ctx, path): |
|
706 | 711 | if path in headmf: |
|
707 | 712 | fctx = lastctx[path] |
|
708 | 713 | flags = fctx.flags() |
|
709 | 714 | mctx = context.memfilectx( |
|
710 | 715 | repo, |
|
711 | 716 | ctx, |
|
712 | 717 | fctx.path(), |
|
713 | 718 | fctx.data(), |
|
714 | 719 | islink=b'l' in flags, |
|
715 | 720 | isexec=b'x' in flags, |
|
716 | 721 | copysource=copied.get(path), |
|
717 | 722 | ) |
|
718 | 723 | return mctx |
|
719 | 724 | return None |
|
720 | 725 | |
|
721 | 726 | if commitopts.get(b'message'): |
|
722 | 727 | message = commitopts[b'message'] |
|
723 | 728 | else: |
|
724 | 729 | message = firstctx.description() |
|
725 | 730 | user = commitopts.get(b'user') |
|
726 | 731 | date = commitopts.get(b'date') |
|
727 | 732 | extra = commitopts.get(b'extra') |
|
728 | 733 | |
|
729 | 734 | parents = (firstctx.p1().node(), firstctx.p2().node()) |
|
730 | 735 | editor = None |
|
731 | 736 | if not skipprompt: |
|
732 | 737 | editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold') |
|
733 | 738 | new = context.memctx( |
|
734 | 739 | repo, |
|
735 | 740 | parents=parents, |
|
736 | 741 | text=message, |
|
737 | 742 | files=files, |
|
738 | 743 | filectxfn=filectxfn, |
|
739 | 744 | user=user, |
|
740 | 745 | date=date, |
|
741 | 746 | extra=extra, |
|
742 | 747 | editor=editor, |
|
743 | 748 | ) |
|
744 | 749 | return repo.commitctx(new) |
|
745 | 750 | |
|
746 | 751 | |
|
747 | 752 | def _isdirtywc(repo): |
|
748 | 753 | return repo[None].dirty(missing=True) |
|
749 | 754 | |
|
750 | 755 | |
|
751 | 756 | def abortdirty(): |
|
752 |
raise error. |
|
|
757 | raise error.StateError( | |
|
753 | 758 | _(b'working copy has pending changes'), |
|
754 | 759 | hint=_( |
|
755 | 760 | b'amend, commit, or revert them and run histedit ' |
|
756 | 761 | b'--continue, or abort with histedit --abort' |
|
757 | 762 | ), |
|
758 | 763 | ) |
|
759 | 764 | |
|
760 | 765 | |
|
761 | 766 | def action(verbs, message, priority=False, internal=False): |
|
762 | 767 | def wrap(cls): |
|
763 | 768 | assert not priority or not internal |
|
764 | 769 | verb = verbs[0] |
|
765 | 770 | if priority: |
|
766 | 771 | primaryactions.add(verb) |
|
767 | 772 | elif internal: |
|
768 | 773 | internalactions.add(verb) |
|
769 | 774 | elif len(verbs) > 1: |
|
770 | 775 | secondaryactions.add(verb) |
|
771 | 776 | else: |
|
772 | 777 | tertiaryactions.add(verb) |
|
773 | 778 | |
|
774 | 779 | cls.verb = verb |
|
775 | 780 | cls.verbs = verbs |
|
776 | 781 | cls.message = message |
|
777 | 782 | for verb in verbs: |
|
778 | 783 | actiontable[verb] = cls |
|
779 | 784 | return cls |
|
780 | 785 | |
|
781 | 786 | return wrap |
|
782 | 787 | |
|
783 | 788 | |
|
784 | 789 | @action([b'pick', b'p'], _(b'use commit'), priority=True) |
|
785 | 790 | class pick(histeditaction): |
|
786 | 791 | def run(self): |
|
787 | 792 | rulectx = self.repo[self.node] |
|
788 | 793 | if rulectx.p1().node() == self.state.parentctxnode: |
|
789 | 794 | self.repo.ui.debug(b'node %s unchanged\n' % short(self.node)) |
|
790 | 795 | return rulectx, [] |
|
791 | 796 | |
|
792 | 797 | return super(pick, self).run() |
|
793 | 798 | |
|
794 | 799 | |
|
795 | 800 | @action( |
|
796 | 801 | [b'edit', b'e'], |
|
797 | 802 | _(b'use commit, but allow edits before making new commit'), |
|
798 | 803 | priority=True, |
|
799 | 804 | ) |
|
800 | 805 | class edit(histeditaction): |
|
801 | 806 | def run(self): |
|
802 | 807 | repo = self.repo |
|
803 | 808 | rulectx = repo[self.node] |
|
804 | 809 | hg.update(repo, self.state.parentctxnode, quietempty=True) |
|
805 | 810 | applychanges(repo.ui, repo, rulectx, {}) |
|
806 | 811 | hint = _(b'to edit %s, `hg histedit --continue` after making changes') |
|
807 | 812 | raise error.InterventionRequired( |
|
808 | 813 | _(b'Editing (%s), commit as needed now to split the change') |
|
809 | 814 | % short(self.node), |
|
810 | 815 | hint=hint % short(self.node), |
|
811 | 816 | ) |
|
812 | 817 | |
|
813 | 818 | def commiteditor(self): |
|
814 | 819 | return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit') |
|
815 | 820 | |
|
816 | 821 | |
|
817 | 822 | @action([b'fold', b'f'], _(b'use commit, but combine it with the one above')) |
|
818 | 823 | class fold(histeditaction): |
|
819 | 824 | def verify(self, prev, expected, seen): |
|
820 | 825 | """Verifies semantic correctness of the fold rule""" |
|
821 | 826 | super(fold, self).verify(prev, expected, seen) |
|
822 | 827 | repo = self.repo |
|
823 | 828 | if not prev: |
|
824 | 829 | c = repo[self.node].p1() |
|
825 | 830 | elif not prev.verb in (b'pick', b'base'): |
|
826 | 831 | return |
|
827 | 832 | else: |
|
828 | 833 | c = repo[prev.node] |
|
829 | 834 | if not c.mutable(): |
|
830 | 835 | raise error.ParseError( |
|
831 | 836 | _(b"cannot fold into public change %s") % short(c.node()) |
|
832 | 837 | ) |
|
833 | 838 | |
|
834 | 839 | def continuedirty(self): |
|
835 | 840 | repo = self.repo |
|
836 | 841 | rulectx = repo[self.node] |
|
837 | 842 | |
|
838 | 843 | commit = commitfuncfor(repo, rulectx) |
|
839 | 844 | commit( |
|
840 | 845 | text=b'fold-temp-revision %s' % short(self.node), |
|
841 | 846 | user=rulectx.user(), |
|
842 | 847 | date=rulectx.date(), |
|
843 | 848 | extra=rulectx.extra(), |
|
844 | 849 | ) |
|
845 | 850 | |
|
846 | 851 | def continueclean(self): |
|
847 | 852 | repo = self.repo |
|
848 | 853 | ctx = repo[b'.'] |
|
849 | 854 | rulectx = repo[self.node] |
|
850 | 855 | parentctxnode = self.state.parentctxnode |
|
851 | 856 | if ctx.node() == parentctxnode: |
|
852 | 857 | repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node)) |
|
853 | 858 | return ctx, [(self.node, (parentctxnode,))] |
|
854 | 859 | |
|
855 | 860 | parentctx = repo[parentctxnode] |
|
856 | 861 | newcommits = { |
|
857 | 862 | c.node() |
|
858 | 863 | for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev()) |
|
859 | 864 | } |
|
860 | 865 | if not newcommits: |
|
861 | 866 | repo.ui.warn( |
|
862 | 867 | _( |
|
863 | 868 | b'%s: cannot fold - working copy is not a ' |
|
864 | 869 | b'descendant of previous commit %s\n' |
|
865 | 870 | ) |
|
866 | 871 | % (short(self.node), short(parentctxnode)) |
|
867 | 872 | ) |
|
868 | 873 | return ctx, [(self.node, (ctx.node(),))] |
|
869 | 874 | |
|
870 | 875 | middlecommits = newcommits.copy() |
|
871 | 876 | middlecommits.discard(ctx.node()) |
|
872 | 877 | |
|
873 | 878 | return self.finishfold( |
|
874 | 879 | repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits |
|
875 | 880 | ) |
|
876 | 881 | |
|
877 | 882 | def skipprompt(self): |
|
878 | 883 | """Returns true if the rule should skip the message editor. |
|
879 | 884 | |
|
880 | 885 | For example, 'fold' wants to show an editor, but 'rollup' |
|
881 | 886 | doesn't want to. |
|
882 | 887 | """ |
|
883 | 888 | return False |
|
884 | 889 | |
|
885 | 890 | def mergedescs(self): |
|
886 | 891 | """Returns true if the rule should merge messages of multiple changes. |
|
887 | 892 | |
|
888 | 893 | This exists mainly so that 'rollup' rules can be a subclass of |
|
889 | 894 | 'fold'. |
|
890 | 895 | """ |
|
891 | 896 | return True |
|
892 | 897 | |
|
893 | 898 | def firstdate(self): |
|
894 | 899 | """Returns true if the rule should preserve the date of the first |
|
895 | 900 | change. |
|
896 | 901 | |
|
897 | 902 | This exists mainly so that 'rollup' rules can be a subclass of |
|
898 | 903 | 'fold'. |
|
899 | 904 | """ |
|
900 | 905 | return False |
|
901 | 906 | |
|
902 | 907 | def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges): |
|
903 | 908 | mergemod.update(ctx.p1()) |
|
904 | 909 | ### prepare new commit data |
|
905 | 910 | commitopts = {} |
|
906 | 911 | commitopts[b'user'] = ctx.user() |
|
907 | 912 | # commit message |
|
908 | 913 | if not self.mergedescs(): |
|
909 | 914 | newmessage = ctx.description() |
|
910 | 915 | else: |
|
911 | 916 | newmessage = ( |
|
912 | 917 | b'\n***\n'.join( |
|
913 | 918 | [ctx.description()] |
|
914 | 919 | + [repo[r].description() for r in internalchanges] |
|
915 | 920 | + [oldctx.description()] |
|
916 | 921 | ) |
|
917 | 922 | + b'\n' |
|
918 | 923 | ) |
|
919 | 924 | commitopts[b'message'] = newmessage |
|
920 | 925 | # date |
|
921 | 926 | if self.firstdate(): |
|
922 | 927 | commitopts[b'date'] = ctx.date() |
|
923 | 928 | else: |
|
924 | 929 | commitopts[b'date'] = max(ctx.date(), oldctx.date()) |
|
925 | 930 | # if date is to be updated to current |
|
926 | 931 | if ui.configbool(b'rewrite', b'update-timestamp'): |
|
927 | 932 | commitopts[b'date'] = dateutil.makedate() |
|
928 | 933 | |
|
929 | 934 | extra = ctx.extra().copy() |
|
930 | 935 | # histedit_source |
|
931 | 936 | # note: ctx is likely a temporary commit but that the best we can do |
|
932 | 937 | # here. This is sufficient to solve issue3681 anyway. |
|
933 | 938 | extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex()) |
|
934 | 939 | commitopts[b'extra'] = extra |
|
935 | 940 | phasemin = max(ctx.phase(), oldctx.phase()) |
|
936 | 941 | overrides = {(b'phases', b'new-commit'): phasemin} |
|
937 | 942 | with repo.ui.configoverride(overrides, b'histedit'): |
|
938 | 943 | n = collapse( |
|
939 | 944 | repo, |
|
940 | 945 | ctx, |
|
941 | 946 | repo[newnode], |
|
942 | 947 | commitopts, |
|
943 | 948 | skipprompt=self.skipprompt(), |
|
944 | 949 | ) |
|
945 | 950 | if n is None: |
|
946 | 951 | return ctx, [] |
|
947 | 952 | mergemod.update(repo[n]) |
|
948 | 953 | replacements = [ |
|
949 | 954 | (oldctx.node(), (newnode,)), |
|
950 | 955 | (ctx.node(), (n,)), |
|
951 | 956 | (newnode, (n,)), |
|
952 | 957 | ] |
|
953 | 958 | for ich in internalchanges: |
|
954 | 959 | replacements.append((ich, (n,))) |
|
955 | 960 | return repo[n], replacements |
|
956 | 961 | |
|
957 | 962 | |
|
958 | 963 | @action( |
|
959 | 964 | [b'base', b'b'], |
|
960 | 965 | _(b'checkout changeset and apply further changesets from there'), |
|
961 | 966 | ) |
|
962 | 967 | class base(histeditaction): |
|
963 | 968 | def run(self): |
|
964 | 969 | if self.repo[b'.'].node() != self.node: |
|
965 | 970 | mergemod.clean_update(self.repo[self.node]) |
|
966 | 971 | return self.continueclean() |
|
967 | 972 | |
|
968 | 973 | def continuedirty(self): |
|
969 | 974 | abortdirty() |
|
970 | 975 | |
|
971 | 976 | def continueclean(self): |
|
972 | 977 | basectx = self.repo[b'.'] |
|
973 | 978 | return basectx, [] |
|
974 | 979 | |
|
975 | 980 | def _verifynodeconstraints(self, prev, expected, seen): |
|
976 | 981 | # base can only be use with a node not in the edited set |
|
977 | 982 | if self.node in expected: |
|
978 | 983 | msg = _(b'%s "%s" changeset was an edited list candidate') |
|
979 | 984 | raise error.ParseError( |
|
980 | 985 | msg % (self.verb, short(self.node)), |
|
981 | 986 | hint=_(b'base must only use unlisted changesets'), |
|
982 | 987 | ) |
|
983 | 988 | |
|
984 | 989 | |
|
985 | 990 | @action( |
|
986 | 991 | [b'_multifold'], |
|
987 | 992 | _( |
|
988 | 993 | """fold subclass used for when multiple folds happen in a row |
|
989 | 994 | |
|
990 | 995 | We only want to fire the editor for the folded message once when |
|
991 | 996 | (say) four changes are folded down into a single change. This is |
|
992 | 997 | similar to rollup, but we should preserve both messages so that |
|
993 | 998 | when the last fold operation runs we can show the user all the |
|
994 | 999 | commit messages in their editor. |
|
995 | 1000 | """ |
|
996 | 1001 | ), |
|
997 | 1002 | internal=True, |
|
998 | 1003 | ) |
|
999 | 1004 | class _multifold(fold): |
|
1000 | 1005 | def skipprompt(self): |
|
1001 | 1006 | return True |
|
1002 | 1007 | |
|
1003 | 1008 | |
|
1004 | 1009 | @action( |
|
1005 | 1010 | [b"roll", b"r"], |
|
1006 | 1011 | _(b"like fold, but discard this commit's description and date"), |
|
1007 | 1012 | ) |
|
1008 | 1013 | class rollup(fold): |
|
1009 | 1014 | def mergedescs(self): |
|
1010 | 1015 | return False |
|
1011 | 1016 | |
|
1012 | 1017 | def skipprompt(self): |
|
1013 | 1018 | return True |
|
1014 | 1019 | |
|
1015 | 1020 | def firstdate(self): |
|
1016 | 1021 | return True |
|
1017 | 1022 | |
|
1018 | 1023 | |
|
1019 | 1024 | @action([b"drop", b"d"], _(b'remove commit from history')) |
|
1020 | 1025 | class drop(histeditaction): |
|
1021 | 1026 | def run(self): |
|
1022 | 1027 | parentctx = self.repo[self.state.parentctxnode] |
|
1023 | 1028 | return parentctx, [(self.node, tuple())] |
|
1024 | 1029 | |
|
1025 | 1030 | |
|
1026 | 1031 | @action( |
|
1027 | 1032 | [b"mess", b"m"], |
|
1028 | 1033 | _(b'edit commit message without changing commit content'), |
|
1029 | 1034 | priority=True, |
|
1030 | 1035 | ) |
|
1031 | 1036 | class message(histeditaction): |
|
1032 | 1037 | def commiteditor(self): |
|
1033 | 1038 | return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess') |
|
1034 | 1039 | |
|
1035 | 1040 | |
|
1036 | 1041 | def findoutgoing(ui, repo, remote=None, force=False, opts=None): |
|
1037 | 1042 | """utility function to find the first outgoing changeset |
|
1038 | 1043 | |
|
1039 | 1044 | Used by initialization code""" |
|
1040 | 1045 | if opts is None: |
|
1041 | 1046 | opts = {} |
|
1042 | 1047 | path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote) |
|
1043 | 1048 | dest = path.pushloc or path.loc |
|
1044 | 1049 | |
|
1045 | 1050 | ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest)) |
|
1046 | 1051 | |
|
1047 | 1052 | revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None) |
|
1048 | 1053 | other = hg.peer(repo, opts, dest) |
|
1049 | 1054 | |
|
1050 | 1055 | if revs: |
|
1051 | 1056 | revs = [repo.lookup(rev) for rev in revs] |
|
1052 | 1057 | |
|
1053 | 1058 | outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force) |
|
1054 | 1059 | if not outgoing.missing: |
|
1055 |
raise error. |
|
|
1060 | raise error.StateError(_(b'no outgoing ancestors')) | |
|
1056 | 1061 | roots = list(repo.revs(b"roots(%ln)", outgoing.missing)) |
|
1057 | 1062 | if len(roots) > 1: |
|
1058 | 1063 | msg = _(b'there are ambiguous outgoing revisions') |
|
1059 | 1064 | hint = _(b"see 'hg help histedit' for more detail") |
|
1060 |
raise error. |
|
|
1065 | raise error.StateError(msg, hint=hint) | |
|
1061 | 1066 | return repo[roots[0]].node() |
|
1062 | 1067 | |
|
1063 | 1068 | |
|
1064 | 1069 | # Curses Support |
|
1065 | 1070 | try: |
|
1066 | 1071 | import curses |
|
1067 | 1072 | except ImportError: |
|
1068 | 1073 | curses = None |
|
1069 | 1074 | |
|
1070 | 1075 | KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll'] |
|
1071 | 1076 | ACTION_LABELS = { |
|
1072 | 1077 | b'fold': b'^fold', |
|
1073 | 1078 | b'roll': b'^roll', |
|
1074 | 1079 | } |
|
1075 | 1080 | |
|
1076 | 1081 | COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5 |
|
1077 | 1082 | COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8 |
|
1078 | 1083 | COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11 |
|
1079 | 1084 | |
|
1080 | 1085 | E_QUIT, E_HISTEDIT = 1, 2 |
|
1081 | 1086 | E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7 |
|
1082 | 1087 | MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3 |
|
1083 | 1088 | |
|
1084 | 1089 | KEYTABLE = { |
|
1085 | 1090 | b'global': { |
|
1086 | 1091 | b'h': b'next-action', |
|
1087 | 1092 | b'KEY_RIGHT': b'next-action', |
|
1088 | 1093 | b'l': b'prev-action', |
|
1089 | 1094 | b'KEY_LEFT': b'prev-action', |
|
1090 | 1095 | b'q': b'quit', |
|
1091 | 1096 | b'c': b'histedit', |
|
1092 | 1097 | b'C': b'histedit', |
|
1093 | 1098 | b'v': b'showpatch', |
|
1094 | 1099 | b'?': b'help', |
|
1095 | 1100 | }, |
|
1096 | 1101 | MODE_RULES: { |
|
1097 | 1102 | b'd': b'action-drop', |
|
1098 | 1103 | b'e': b'action-edit', |
|
1099 | 1104 | b'f': b'action-fold', |
|
1100 | 1105 | b'm': b'action-mess', |
|
1101 | 1106 | b'p': b'action-pick', |
|
1102 | 1107 | b'r': b'action-roll', |
|
1103 | 1108 | b' ': b'select', |
|
1104 | 1109 | b'j': b'down', |
|
1105 | 1110 | b'k': b'up', |
|
1106 | 1111 | b'KEY_DOWN': b'down', |
|
1107 | 1112 | b'KEY_UP': b'up', |
|
1108 | 1113 | b'J': b'move-down', |
|
1109 | 1114 | b'K': b'move-up', |
|
1110 | 1115 | b'KEY_NPAGE': b'move-down', |
|
1111 | 1116 | b'KEY_PPAGE': b'move-up', |
|
1112 | 1117 | b'0': b'goto', # Used for 0..9 |
|
1113 | 1118 | }, |
|
1114 | 1119 | MODE_PATCH: { |
|
1115 | 1120 | b' ': b'page-down', |
|
1116 | 1121 | b'KEY_NPAGE': b'page-down', |
|
1117 | 1122 | b'KEY_PPAGE': b'page-up', |
|
1118 | 1123 | b'j': b'line-down', |
|
1119 | 1124 | b'k': b'line-up', |
|
1120 | 1125 | b'KEY_DOWN': b'line-down', |
|
1121 | 1126 | b'KEY_UP': b'line-up', |
|
1122 | 1127 | b'J': b'down', |
|
1123 | 1128 | b'K': b'up', |
|
1124 | 1129 | }, |
|
1125 | 1130 | MODE_HELP: {}, |
|
1126 | 1131 | } |
|
1127 | 1132 | |
|
1128 | 1133 | |
|
1129 | 1134 | def screen_size(): |
|
1130 | 1135 | return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' ')) |
|
1131 | 1136 | |
|
1132 | 1137 | |
|
1133 | 1138 | class histeditrule(object): |
|
1134 | 1139 | def __init__(self, ui, ctx, pos, action=b'pick'): |
|
1135 | 1140 | self.ui = ui |
|
1136 | 1141 | self.ctx = ctx |
|
1137 | 1142 | self.action = action |
|
1138 | 1143 | self.origpos = pos |
|
1139 | 1144 | self.pos = pos |
|
1140 | 1145 | self.conflicts = [] |
|
1141 | 1146 | |
|
1142 | 1147 | def __bytes__(self): |
|
1143 | 1148 | # Example display of several histeditrules: |
|
1144 | 1149 | # |
|
1145 | 1150 | # #10 pick 316392:06a16c25c053 add option to skip tests |
|
1146 | 1151 | # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED> |
|
1147 | 1152 | # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h |
|
1148 | 1153 | # #13 ^fold 316395:14ce5803f4c3 fix warnings |
|
1149 | 1154 | # |
|
1150 | 1155 | # The carets point to the changeset being folded into ("roll this |
|
1151 | 1156 | # changeset into the changeset above"). |
|
1152 | 1157 | return b'%s%s' % (self.prefix, self.desc) |
|
1153 | 1158 | |
|
1154 | 1159 | __str__ = encoding.strmethod(__bytes__) |
|
1155 | 1160 | |
|
1156 | 1161 | @property |
|
1157 | 1162 | def prefix(self): |
|
1158 | 1163 | # Some actions ('fold' and 'roll') combine a patch with a |
|
1159 | 1164 | # previous one. Add a marker showing which patch they apply |
|
1160 | 1165 | # to. |
|
1161 | 1166 | action = ACTION_LABELS.get(self.action, self.action) |
|
1162 | 1167 | |
|
1163 | 1168 | h = self.ctx.hex()[0:12] |
|
1164 | 1169 | r = self.ctx.rev() |
|
1165 | 1170 | |
|
1166 | 1171 | return b"#%s %s %d:%s " % ( |
|
1167 | 1172 | (b'%d' % self.origpos).ljust(2), |
|
1168 | 1173 | action.ljust(6), |
|
1169 | 1174 | r, |
|
1170 | 1175 | h, |
|
1171 | 1176 | ) |
|
1172 | 1177 | |
|
1173 | 1178 | @util.propertycache |
|
1174 | 1179 | def desc(self): |
|
1175 | 1180 | summary = cmdutil.rendertemplate( |
|
1176 | 1181 | self.ctx, self.ui.config(b'histedit', b'summary-template') |
|
1177 | 1182 | ) |
|
1178 | 1183 | if summary: |
|
1179 | 1184 | return summary |
|
1180 | 1185 | # This is split off from the prefix property so that we can |
|
1181 | 1186 | # separately make the description for 'roll' red (since it |
|
1182 | 1187 | # will get discarded). |
|
1183 | 1188 | return self.ctx.description().splitlines()[0].strip() |
|
1184 | 1189 | |
|
1185 | 1190 | def checkconflicts(self, other): |
|
1186 | 1191 | if other.pos > self.pos and other.origpos <= self.origpos: |
|
1187 | 1192 | if set(other.ctx.files()) & set(self.ctx.files()) != set(): |
|
1188 | 1193 | self.conflicts.append(other) |
|
1189 | 1194 | return self.conflicts |
|
1190 | 1195 | |
|
1191 | 1196 | if other in self.conflicts: |
|
1192 | 1197 | self.conflicts.remove(other) |
|
1193 | 1198 | return self.conflicts |
|
1194 | 1199 | |
|
1195 | 1200 | |
|
1196 | # ============ EVENTS =============== | |
|
1197 | def movecursor(state, oldpos, newpos): | |
|
1198 | """Change the rule/changeset that the cursor is pointing to, regardless of | |
|
1199 | current mode (you can switch between patches from the view patch window).""" | |
|
1200 | state[b'pos'] = newpos | |
|
1201 | ||
|
1202 | mode, _ = state[b'mode'] | |
|
1203 | if mode == MODE_RULES: | |
|
1204 | # Scroll through the list by updating the view for MODE_RULES, so that | |
|
1205 | # even if we are not currently viewing the rules, switching back will | |
|
1206 | # result in the cursor's rule being visible. | |
|
1207 | modestate = state[b'modes'][MODE_RULES] | |
|
1208 | if newpos < modestate[b'line_offset']: | |
|
1209 | modestate[b'line_offset'] = newpos | |
|
1210 | elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1: | |
|
1211 | modestate[b'line_offset'] = newpos - state[b'page_height'] + 1 | |
|
1212 | ||
|
1213 | # Reset the patch view region to the top of the new patch. | |
|
1214 | state[b'modes'][MODE_PATCH][b'line_offset'] = 0 | |
|
1215 | ||
|
1216 | ||
|
1217 | def changemode(state, mode): | |
|
1218 | curmode, _ = state[b'mode'] | |
|
1219 | state[b'mode'] = (mode, curmode) | |
|
1220 | if mode == MODE_PATCH: | |
|
1221 | state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state) | |
|
1222 | ||
|
1223 | ||
|
1224 | def makeselection(state, pos): | |
|
1225 | state[b'selected'] = pos | |
|
1226 | ||
|
1227 | ||
|
1228 | def swap(state, oldpos, newpos): | |
|
1229 | """Swap two positions and calculate necessary conflicts in | |
|
1230 | O(|newpos-oldpos|) time""" | |
|
1231 | ||
|
1232 | rules = state[b'rules'] | |
|
1233 | assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules) | |
|
1234 | ||
|
1235 | rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos] | |
|
1236 | ||
|
1237 | # TODO: swap should not know about histeditrule's internals | |
|
1238 | rules[newpos].pos = newpos | |
|
1239 | rules[oldpos].pos = oldpos | |
|
1240 | ||
|
1241 | start = min(oldpos, newpos) | |
|
1242 | end = max(oldpos, newpos) | |
|
1243 | for r in pycompat.xrange(start, end + 1): | |
|
1244 | rules[newpos].checkconflicts(rules[r]) | |
|
1245 | rules[oldpos].checkconflicts(rules[r]) | |
|
1246 | ||
|
1247 | if state[b'selected']: | |
|
1248 | makeselection(state, newpos) | |
|
1249 | ||
|
1250 | ||
|
1251 | def changeaction(state, pos, action): | |
|
1252 | """Change the action state on the given position to the new action""" | |
|
1253 | rules = state[b'rules'] | |
|
1254 | assert 0 <= pos < len(rules) | |
|
1255 | rules[pos].action = action | |
|
1256 | ||
|
1257 | ||
|
1258 | def cycleaction(state, pos, next=False): | |
|
1259 | """Changes the action state the next or the previous action from | |
|
1260 | the action list""" | |
|
1261 | rules = state[b'rules'] | |
|
1262 | assert 0 <= pos < len(rules) | |
|
1263 | current = rules[pos].action | |
|
1264 | ||
|
1265 | assert current in KEY_LIST | |
|
1266 | ||
|
1267 | index = KEY_LIST.index(current) | |
|
1268 | if next: | |
|
1269 | index += 1 | |
|
1270 | else: | |
|
1271 | index -= 1 | |
|
1272 | changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)]) | |
|
1273 | ||
|
1274 | ||
|
1275 | def changeview(state, delta, unit): | |
|
1276 | """Change the region of whatever is being viewed (a patch or the list of | |
|
1277 | changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.""" | |
|
1278 | mode, _ = state[b'mode'] | |
|
1279 | if mode != MODE_PATCH: | |
|
1280 | return | |
|
1281 | mode_state = state[b'modes'][mode] | |
|
1282 | num_lines = len(mode_state[b'patchcontents']) | |
|
1283 | page_height = state[b'page_height'] | |
|
1284 | unit = page_height if unit == b'page' else 1 | |
|
1285 | num_pages = 1 + (num_lines - 1) // page_height | |
|
1286 | max_offset = (num_pages - 1) * page_height | |
|
1287 | newline = mode_state[b'line_offset'] + delta * unit | |
|
1288 | mode_state[b'line_offset'] = max(0, min(max_offset, newline)) | |
|
1289 | ||
|
1290 | ||
|
1291 | def event(state, ch): | |
|
1292 | """Change state based on the current character input | |
|
1293 | ||
|
1294 | This takes the current state and based on the current character input from | |
|
1295 | the user we change the state. | |
|
1296 | """ | |
|
1297 | selected = state[b'selected'] | |
|
1298 | oldpos = state[b'pos'] | |
|
1299 | rules = state[b'rules'] | |
|
1300 | ||
|
1301 | if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"): | |
|
1302 | return E_RESIZE | |
|
1303 | ||
|
1304 | lookup_ch = ch | |
|
1305 | if ch is not None and b'0' <= ch <= b'9': | |
|
1306 | lookup_ch = b'0' | |
|
1307 | ||
|
1308 | curmode, prevmode = state[b'mode'] | |
|
1309 | action = KEYTABLE[curmode].get( | |
|
1310 | lookup_ch, KEYTABLE[b'global'].get(lookup_ch) | |
|
1311 | ) | |
|
1312 | if action is None: | |
|
1313 | return | |
|
1314 | if action in (b'down', b'move-down'): | |
|
1315 | newpos = min(oldpos + 1, len(rules) - 1) | |
|
1316 | movecursor(state, oldpos, newpos) | |
|
1317 | if selected is not None or action == b'move-down': | |
|
1318 | swap(state, oldpos, newpos) | |
|
1319 | elif action in (b'up', b'move-up'): | |
|
1320 | newpos = max(0, oldpos - 1) | |
|
1321 | movecursor(state, oldpos, newpos) | |
|
1322 | if selected is not None or action == b'move-up': | |
|
1323 | swap(state, oldpos, newpos) | |
|
1324 | elif action == b'next-action': | |
|
1325 | cycleaction(state, oldpos, next=True) | |
|
1326 | elif action == b'prev-action': | |
|
1327 | cycleaction(state, oldpos, next=False) | |
|
1328 | elif action == b'select': | |
|
1329 | selected = oldpos if selected is None else None | |
|
1330 | makeselection(state, selected) | |
|
1331 | elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10: | |
|
1332 | newrule = next((r for r in rules if r.origpos == int(ch))) | |
|
1333 | movecursor(state, oldpos, newrule.pos) | |
|
1334 | if selected is not None: | |
|
1335 | swap(state, oldpos, newrule.pos) | |
|
1336 | elif action.startswith(b'action-'): | |
|
1337 | changeaction(state, oldpos, action[7:]) | |
|
1338 | elif action == b'showpatch': | |
|
1339 | changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode) | |
|
1340 | elif action == b'help': | |
|
1341 | changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode) | |
|
1342 | elif action == b'quit': | |
|
1343 | return E_QUIT | |
|
1344 | elif action == b'histedit': | |
|
1345 | return E_HISTEDIT | |
|
1346 | elif action == b'page-down': | |
|
1347 | return E_PAGEDOWN | |
|
1348 | elif action == b'page-up': | |
|
1349 | return E_PAGEUP | |
|
1350 | elif action == b'line-down': | |
|
1351 | return E_LINEDOWN | |
|
1352 | elif action == b'line-up': | |
|
1353 | return E_LINEUP | |
|
1354 | ||
|
1355 | ||
|
1356 | 1201 | def makecommands(rules): |
|
1357 | 1202 | """Returns a list of commands consumable by histedit --commands based on |
|
1358 | 1203 | our list of rules""" |
|
1359 | 1204 | commands = [] |
|
1360 | 1205 | for rules in rules: |
|
1361 | 1206 | commands.append(b'%s %s\n' % (rules.action, rules.ctx)) |
|
1362 | 1207 | return commands |
|
1363 | 1208 | |
|
1364 | 1209 | |
|
1365 | 1210 | def addln(win, y, x, line, color=None): |
|
1366 | 1211 | """Add a line to the given window left padding but 100% filled with |
|
1367 | 1212 | whitespace characters, so that the color appears on the whole line""" |
|
1368 | 1213 | maxy, maxx = win.getmaxyx() |
|
1369 | 1214 | length = maxx - 1 - x |
|
1370 | 1215 | line = bytes(line).ljust(length)[:length] |
|
1371 | 1216 | if y < 0: |
|
1372 | 1217 | y = maxy + y |
|
1373 | 1218 | if x < 0: |
|
1374 | 1219 | x = maxx + x |
|
1375 | 1220 | if color: |
|
1376 | 1221 | win.addstr(y, x, line, color) |
|
1377 | 1222 | else: |
|
1378 | 1223 | win.addstr(y, x, line) |
|
1379 | 1224 | |
|
1380 | 1225 | |
|
1381 | 1226 | def _trunc_head(line, n): |
|
1382 | 1227 | if len(line) <= n: |
|
1383 | 1228 | return line |
|
1384 | 1229 | return b'> ' + line[-(n - 2) :] |
|
1385 | 1230 | |
|
1386 | 1231 | |
|
1387 | 1232 | def _trunc_tail(line, n): |
|
1388 | 1233 | if len(line) <= n: |
|
1389 | 1234 | return line |
|
1390 | 1235 | return line[: n - 2] + b' >' |
|
1391 | 1236 | |
|
1392 | 1237 | |
|
1393 | def patchcontents(state): | |
|
1394 | repo = state[b'repo'] | |
|
1395 | rule = state[b'rules'][state[b'pos']] | |
|
1396 | displayer = logcmdutil.changesetdisplayer( | |
|
1397 | repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True | |
|
1398 | ) | |
|
1399 | overrides = {(b'ui', b'verbose'): True} | |
|
1400 | with repo.ui.configoverride(overrides, source=b'histedit'): | |
|
1401 | displayer.show(rule.ctx) | |
|
1402 | displayer.close() | |
|
1403 | return displayer.hunk[rule.ctx.rev()].splitlines() | |
|
1404 | ||
|
1405 | ||
|
1406 | def _chisteditmain(repo, rules, stdscr): | |
|
1407 | try: | |
|
1408 | curses.use_default_colors() | |
|
1409 | except curses.error: | |
|
1410 | pass | |
|
1411 | ||
|
1412 | # initialize color pattern | |
|
1413 | curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE) | |
|
1414 | curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE) | |
|
1415 | curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW) | |
|
1416 | curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN) | |
|
1417 | curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA) | |
|
1418 | curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1) | |
|
1419 | curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1) | |
|
1420 | curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1) | |
|
1421 | curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1) | |
|
1422 | curses.init_pair( | |
|
1423 | COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA | |
|
1424 | ) | |
|
1425 | curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE) | |
|
1426 | ||
|
1427 | # don't display the cursor | |
|
1428 | try: | |
|
1429 | curses.curs_set(0) | |
|
1430 | except curses.error: | |
|
1431 | pass | |
|
1432 | ||
|
1433 | def rendercommit(win, state): | |
|
1238 | class _chistedit_state(object): | |
|
1239 | def __init__( | |
|
1240 | self, | |
|
1241 | repo, | |
|
1242 | rules, | |
|
1243 | stdscr, | |
|
1244 | ): | |
|
1245 | self.repo = repo | |
|
1246 | self.rules = rules | |
|
1247 | self.stdscr = stdscr | |
|
1248 | self.later_on_top = repo.ui.configbool( | |
|
1249 | b'histedit', b'later-commits-first' | |
|
1250 | ) | |
|
1251 | # The current item in display order, initialized to point to the top | |
|
1252 | # of the screen. | |
|
1253 | self.pos = 0 | |
|
1254 | self.selected = None | |
|
1255 | self.mode = (MODE_INIT, MODE_INIT) | |
|
1256 | self.page_height = None | |
|
1257 | self.modes = { | |
|
1258 | MODE_RULES: { | |
|
1259 | b'line_offset': 0, | |
|
1260 | }, | |
|
1261 | MODE_PATCH: { | |
|
1262 | b'line_offset': 0, | |
|
1263 | }, | |
|
1264 | } | |
|
1265 | ||
|
1266 | def render_commit(self, win): | |
|
1434 | 1267 | """Renders the commit window that shows the log of the current selected |
|
1435 | 1268 | commit""" |
|
1436 | pos = state[b'pos'] | |
|
1437 | rules = state[b'rules'] | |
|
1438 | rule = rules[pos] | |
|
1269 | rule = self.rules[self.display_pos_to_rule_pos(self.pos)] | |
|
1439 | 1270 | |
|
1440 | 1271 | ctx = rule.ctx |
|
1441 | 1272 | win.box() |
|
1442 | 1273 | |
|
1443 | 1274 | maxy, maxx = win.getmaxyx() |
|
1444 | 1275 | length = maxx - 3 |
|
1445 | 1276 | |
|
1446 | 1277 | line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12]) |
|
1447 | 1278 | win.addstr(1, 1, line[:length]) |
|
1448 | 1279 | |
|
1449 | 1280 | line = b"user: %s" % ctx.user() |
|
1450 | 1281 | win.addstr(2, 1, line[:length]) |
|
1451 | 1282 | |
|
1452 | bms = repo.nodebookmarks(ctx.node()) | |
|
1283 | bms = self.repo.nodebookmarks(ctx.node()) | |
|
1453 | 1284 | line = b"bookmark: %s" % b' '.join(bms) |
|
1454 | 1285 | win.addstr(3, 1, line[:length]) |
|
1455 | 1286 | |
|
1456 | 1287 | line = b"summary: %s" % (ctx.description().splitlines()[0]) |
|
1457 | 1288 | win.addstr(4, 1, line[:length]) |
|
1458 | 1289 | |
|
1459 | 1290 | line = b"files: " |
|
1460 | 1291 | win.addstr(5, 1, line) |
|
1461 | 1292 | fnx = 1 + len(line) |
|
1462 | 1293 | fnmaxx = length - fnx + 1 |
|
1463 | 1294 | y = 5 |
|
1464 | 1295 | fnmaxn = maxy - (1 + y) - 1 |
|
1465 | 1296 | files = ctx.files() |
|
1466 | 1297 | for i, line1 in enumerate(files): |
|
1467 | 1298 | if len(files) > fnmaxn and i == fnmaxn - 1: |
|
1468 | 1299 | win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx)) |
|
1469 | 1300 | y = y + 1 |
|
1470 | 1301 | break |
|
1471 | 1302 | win.addstr(y, fnx, _trunc_head(line1, fnmaxx)) |
|
1472 | 1303 | y = y + 1 |
|
1473 | 1304 | |
|
1474 | 1305 | conflicts = rule.conflicts |
|
1475 | 1306 | if len(conflicts) > 0: |
|
1476 | 1307 | conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts)) |
|
1477 | 1308 | conflictstr = b"changed files overlap with %s" % conflictstr |
|
1478 | 1309 | else: |
|
1479 | 1310 | conflictstr = b'no overlap' |
|
1480 | 1311 | |
|
1481 | 1312 | win.addstr(y, 1, conflictstr[:length]) |
|
1482 | 1313 | win.noutrefresh() |
|
1483 | 1314 | |
|
1484 |
def helplines( |
|
|
1485 | if mode == MODE_PATCH: | |
|
1315 | def helplines(self): | |
|
1316 | if self.mode[0] == MODE_PATCH: | |
|
1486 | 1317 | help = b"""\ |
|
1487 | 1318 | ?: help, k/up: line up, j/down: line down, v: stop viewing patch |
|
1488 | 1319 | pgup: prev page, space/pgdn: next page, c: commit, q: abort |
|
1489 | 1320 | """ |
|
1490 | 1321 | else: |
|
1491 | 1322 | help = b"""\ |
|
1492 | 1323 | ?: help, k/up: move up, j/down: move down, space: select, v: view patch |
|
1493 | 1324 | d: drop, e: edit, f: fold, m: mess, p: pick, r: roll |
|
1494 | 1325 | pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort |
|
1495 | 1326 | """ |
|
1496 | 1327 | return help.splitlines() |
|
1497 | 1328 | |
|
1498 |
def renderhelp( |
|
|
1329 | def render_help(self, win): | |
|
1499 | 1330 | maxy, maxx = win.getmaxyx() |
|
1500 | mode, _ = state[b'mode'] | |
|
1501 | for y, line in enumerate(helplines(mode)): | |
|
1331 | for y, line in enumerate(self.helplines()): | |
|
1502 | 1332 | if y >= maxy: |
|
1503 | 1333 | break |
|
1504 | 1334 | addln(win, y, 0, line, curses.color_pair(COLOR_HELP)) |
|
1505 | 1335 | win.noutrefresh() |
|
1506 | 1336 | |
|
1507 | def renderrules(rulesscr, state): | |
|
1508 | rules = state[b'rules'] | |
|
1509 | pos = state[b'pos'] | |
|
1510 | selected = state[b'selected'] | |
|
1511 | start = state[b'modes'][MODE_RULES][b'line_offset'] | |
|
1512 | ||
|
1513 | conflicts = [r.ctx for r in rules if r.conflicts] | |
|
1337 | def layout(self): | |
|
1338 | maxy, maxx = self.stdscr.getmaxyx() | |
|
1339 | helplen = len(self.helplines()) | |
|
1340 | mainlen = maxy - helplen - 12 | |
|
1341 | if mainlen < 1: | |
|
1342 | raise error.Abort( | |
|
1343 | _(b"terminal dimensions %d by %d too small for curses histedit") | |
|
1344 | % (maxy, maxx), | |
|
1345 | hint=_( | |
|
1346 | b"enlarge your terminal or use --config ui.interface=text" | |
|
1347 | ), | |
|
1348 | ) | |
|
1349 | return { | |
|
1350 | b'commit': (12, maxx), | |
|
1351 | b'help': (helplen, maxx), | |
|
1352 | b'main': (mainlen, maxx), | |
|
1353 | } | |
|
1354 | ||
|
1355 | def display_pos_to_rule_pos(self, display_pos): | |
|
1356 | """Converts a position in display order to rule order. | |
|
1357 | ||
|
1358 | The `display_pos` is the order from the top in display order, not | |
|
1359 | considering which items are currently visible on the screen. Thus, | |
|
1360 | `display_pos=0` is the item at the top (possibly after scrolling to | |
|
1361 | the top) | |
|
1362 | """ | |
|
1363 | if self.later_on_top: | |
|
1364 | return len(self.rules) - 1 - display_pos | |
|
1365 | else: | |
|
1366 | return display_pos | |
|
1367 | ||
|
1368 | def render_rules(self, rulesscr): | |
|
1369 | start = self.modes[MODE_RULES][b'line_offset'] | |
|
1370 | ||
|
1371 | conflicts = [r.ctx for r in self.rules if r.conflicts] | |
|
1514 | 1372 | if len(conflicts) > 0: |
|
1515 | 1373 | line = b"potential conflict in %s" % b','.join( |
|
1516 | 1374 | map(pycompat.bytestr, conflicts) |
|
1517 | 1375 | ) |
|
1518 | 1376 | addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN)) |
|
1519 | 1377 | |
|
1520 | for y, rule in enumerate(rules[start:]): | |
|
1521 | if y >= state[b'page_height']: | |
|
1522 | break | |
|
1378 | for display_pos in range(start, len(self.rules)): | |
|
1379 | y = display_pos - start | |
|
1380 | if y < 0 or y >= self.page_height: | |
|
1381 | continue | |
|
1382 | rule_pos = self.display_pos_to_rule_pos(display_pos) | |
|
1383 | rule = self.rules[rule_pos] | |
|
1523 | 1384 | if len(rule.conflicts) > 0: |
|
1524 | 1385 | rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN)) |
|
1525 | 1386 | else: |
|
1526 | 1387 | rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK) |
|
1527 | 1388 | |
|
1528 |
if |
|
|
1389 | if display_pos == self.selected: | |
|
1529 | 1390 | rollcolor = COLOR_ROLL_SELECTED |
|
1530 | 1391 | addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED)) |
|
1531 |
elif |
|
|
1392 | elif display_pos == self.pos: | |
|
1532 | 1393 | rollcolor = COLOR_ROLL_CURRENT |
|
1533 | 1394 | addln( |
|
1534 | 1395 | rulesscr, |
|
1535 | 1396 | y, |
|
1536 | 1397 | 2, |
|
1537 | 1398 | rule, |
|
1538 | 1399 | curses.color_pair(COLOR_CURRENT) | curses.A_BOLD, |
|
1539 | 1400 | ) |
|
1540 | 1401 | else: |
|
1541 | 1402 | rollcolor = COLOR_ROLL |
|
1542 | 1403 | addln(rulesscr, y, 2, rule) |
|
1543 | 1404 | |
|
1544 | 1405 | if rule.action == b'roll': |
|
1545 | 1406 | rulesscr.addstr( |
|
1546 | 1407 | y, |
|
1547 | 1408 | 2 + len(rule.prefix), |
|
1548 | 1409 | rule.desc, |
|
1549 | 1410 | curses.color_pair(rollcolor), |
|
1550 | 1411 | ) |
|
1551 | 1412 | |
|
1552 | 1413 | rulesscr.noutrefresh() |
|
1553 | 1414 | |
|
1554 |
def renderstring( |
|
|
1415 | def render_string(self, win, output, diffcolors=False): | |
|
1555 | 1416 | maxy, maxx = win.getmaxyx() |
|
1556 | 1417 | length = min(maxy - 1, len(output)) |
|
1557 | 1418 | for y in range(0, length): |
|
1558 | 1419 | line = output[y] |
|
1559 | 1420 | if diffcolors: |
|
1560 | 1421 | if line and line[0] == b'+': |
|
1561 | 1422 | win.addstr( |
|
1562 | 1423 | y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE) |
|
1563 | 1424 | ) |
|
1564 | 1425 | elif line and line[0] == b'-': |
|
1565 | 1426 | win.addstr( |
|
1566 | 1427 | y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE) |
|
1567 | 1428 | ) |
|
1568 | 1429 | elif line.startswith(b'@@ '): |
|
1569 | 1430 | win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET)) |
|
1570 | 1431 | else: |
|
1571 | 1432 | win.addstr(y, 0, line) |
|
1572 | 1433 | else: |
|
1573 | 1434 | win.addstr(y, 0, line) |
|
1574 | 1435 | win.noutrefresh() |
|
1575 | 1436 | |
|
1576 |
def renderpatch( |
|
|
1577 |
start = s |
|
|
1578 |
content = s |
|
|
1579 |
renderstring(win |
|
|
1580 | ||
|
1581 | def layout(mode): | |
|
1582 | maxy, maxx = stdscr.getmaxyx() | |
|
1583 | helplen = len(helplines(mode)) | |
|
1584 | mainlen = maxy - helplen - 12 | |
|
1585 | if mainlen < 1: | |
|
1586 | raise error.Abort( | |
|
1587 | _(b"terminal dimensions %d by %d too small for curses histedit") | |
|
1588 | % (maxy, maxx), | |
|
1589 | hint=_( | |
|
1590 | b"enlarge your terminal or use --config ui.interface=text" | |
|
1591 | ), | |
|
1592 | ) | |
|
1593 | return { | |
|
1594 | b'commit': (12, maxx), | |
|
1595 | b'help': (helplen, maxx), | |
|
1596 | b'main': (mainlen, maxx), | |
|
1597 | } | |
|
1437 | def render_patch(self, win): | |
|
1438 | start = self.modes[MODE_PATCH][b'line_offset'] | |
|
1439 | content = self.modes[MODE_PATCH][b'patchcontents'] | |
|
1440 | self.render_string(win, content[start:], diffcolors=True) | |
|
1441 | ||
|
1442 | def event(self, ch): | |
|
1443 | """Change state based on the current character input | |
|
1444 | ||
|
1445 | This takes the current state and based on the current character input from | |
|
1446 | the user we change the state. | |
|
1447 | """ | |
|
1448 | oldpos = self.pos | |
|
1449 | ||
|
1450 | if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"): | |
|
1451 | return E_RESIZE | |
|
1452 | ||
|
1453 | lookup_ch = ch | |
|
1454 | if ch is not None and b'0' <= ch <= b'9': | |
|
1455 | lookup_ch = b'0' | |
|
1456 | ||
|
1457 | curmode, prevmode = self.mode | |
|
1458 | action = KEYTABLE[curmode].get( | |
|
1459 | lookup_ch, KEYTABLE[b'global'].get(lookup_ch) | |
|
1460 | ) | |
|
1461 | if action is None: | |
|
1462 | return | |
|
1463 | if action in (b'down', b'move-down'): | |
|
1464 | newpos = min(oldpos + 1, len(self.rules) - 1) | |
|
1465 | self.move_cursor(oldpos, newpos) | |
|
1466 | if self.selected is not None or action == b'move-down': | |
|
1467 | self.swap(oldpos, newpos) | |
|
1468 | elif action in (b'up', b'move-up'): | |
|
1469 | newpos = max(0, oldpos - 1) | |
|
1470 | self.move_cursor(oldpos, newpos) | |
|
1471 | if self.selected is not None or action == b'move-up': | |
|
1472 | self.swap(oldpos, newpos) | |
|
1473 | elif action == b'next-action': | |
|
1474 | self.cycle_action(oldpos, next=True) | |
|
1475 | elif action == b'prev-action': | |
|
1476 | self.cycle_action(oldpos, next=False) | |
|
1477 | elif action == b'select': | |
|
1478 | self.selected = oldpos if self.selected is None else None | |
|
1479 | self.make_selection(self.selected) | |
|
1480 | elif action == b'goto' and int(ch) < len(self.rules) <= 10: | |
|
1481 | newrule = next((r for r in self.rules if r.origpos == int(ch))) | |
|
1482 | self.move_cursor(oldpos, newrule.pos) | |
|
1483 | if self.selected is not None: | |
|
1484 | self.swap(oldpos, newrule.pos) | |
|
1485 | elif action.startswith(b'action-'): | |
|
1486 | self.change_action(oldpos, action[7:]) | |
|
1487 | elif action == b'showpatch': | |
|
1488 | self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode) | |
|
1489 | elif action == b'help': | |
|
1490 | self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode) | |
|
1491 | elif action == b'quit': | |
|
1492 | return E_QUIT | |
|
1493 | elif action == b'histedit': | |
|
1494 | return E_HISTEDIT | |
|
1495 | elif action == b'page-down': | |
|
1496 | return E_PAGEDOWN | |
|
1497 | elif action == b'page-up': | |
|
1498 | return E_PAGEUP | |
|
1499 | elif action == b'line-down': | |
|
1500 | return E_LINEDOWN | |
|
1501 | elif action == b'line-up': | |
|
1502 | return E_LINEUP | |
|
1503 | ||
|
1504 | def patch_contents(self): | |
|
1505 | repo = self.repo | |
|
1506 | rule = self.rules[self.display_pos_to_rule_pos(self.pos)] | |
|
1507 | displayer = logcmdutil.changesetdisplayer( | |
|
1508 | repo.ui, | |
|
1509 | repo, | |
|
1510 | {b"patch": True, b"template": b"status"}, | |
|
1511 | buffered=True, | |
|
1512 | ) | |
|
1513 | overrides = {(b'ui', b'verbose'): True} | |
|
1514 | with repo.ui.configoverride(overrides, source=b'histedit'): | |
|
1515 | displayer.show(rule.ctx) | |
|
1516 | displayer.close() | |
|
1517 | return displayer.hunk[rule.ctx.rev()].splitlines() | |
|
1518 | ||
|
1519 | def move_cursor(self, oldpos, newpos): | |
|
1520 | """Change the rule/changeset that the cursor is pointing to, regardless of | |
|
1521 | current mode (you can switch between patches from the view patch window).""" | |
|
1522 | self.pos = newpos | |
|
1523 | ||
|
1524 | mode, _ = self.mode | |
|
1525 | if mode == MODE_RULES: | |
|
1526 | # Scroll through the list by updating the view for MODE_RULES, so that | |
|
1527 | # even if we are not currently viewing the rules, switching back will | |
|
1528 | # result in the cursor's rule being visible. | |
|
1529 | modestate = self.modes[MODE_RULES] | |
|
1530 | if newpos < modestate[b'line_offset']: | |
|
1531 | modestate[b'line_offset'] = newpos | |
|
1532 | elif newpos > modestate[b'line_offset'] + self.page_height - 1: | |
|
1533 | modestate[b'line_offset'] = newpos - self.page_height + 1 | |
|
1534 | ||
|
1535 | # Reset the patch view region to the top of the new patch. | |
|
1536 | self.modes[MODE_PATCH][b'line_offset'] = 0 | |
|
1537 | ||
|
1538 | def change_mode(self, mode): | |
|
1539 | curmode, _ = self.mode | |
|
1540 | self.mode = (mode, curmode) | |
|
1541 | if mode == MODE_PATCH: | |
|
1542 | self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents() | |
|
1543 | ||
|
1544 | def make_selection(self, pos): | |
|
1545 | self.selected = pos | |
|
1546 | ||
|
1547 | def swap(self, oldpos, newpos): | |
|
1548 | """Swap two positions and calculate necessary conflicts in | |
|
1549 | O(|newpos-oldpos|) time""" | |
|
1550 | old_rule_pos = self.display_pos_to_rule_pos(oldpos) | |
|
1551 | new_rule_pos = self.display_pos_to_rule_pos(newpos) | |
|
1552 | ||
|
1553 | rules = self.rules | |
|
1554 | assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules) | |
|
1555 | ||
|
1556 | rules[old_rule_pos], rules[new_rule_pos] = ( | |
|
1557 | rules[new_rule_pos], | |
|
1558 | rules[old_rule_pos], | |
|
1559 | ) | |
|
1560 | ||
|
1561 | # TODO: swap should not know about histeditrule's internals | |
|
1562 | rules[new_rule_pos].pos = new_rule_pos | |
|
1563 | rules[old_rule_pos].pos = old_rule_pos | |
|
1564 | ||
|
1565 | start = min(old_rule_pos, new_rule_pos) | |
|
1566 | end = max(old_rule_pos, new_rule_pos) | |
|
1567 | for r in pycompat.xrange(start, end + 1): | |
|
1568 | rules[new_rule_pos].checkconflicts(rules[r]) | |
|
1569 | rules[old_rule_pos].checkconflicts(rules[r]) | |
|
1570 | ||
|
1571 | if self.selected: | |
|
1572 | self.make_selection(newpos) | |
|
1573 | ||
|
1574 | def change_action(self, pos, action): | |
|
1575 | """Change the action state on the given position to the new action""" | |
|
1576 | assert 0 <= pos < len(self.rules) | |
|
1577 | self.rules[pos].action = action | |
|
1578 | ||
|
1579 | def cycle_action(self, pos, next=False): | |
|
1580 | """Changes the action state the next or the previous action from | |
|
1581 | the action list""" | |
|
1582 | assert 0 <= pos < len(self.rules) | |
|
1583 | current = self.rules[pos].action | |
|
1584 | ||
|
1585 | assert current in KEY_LIST | |
|
1586 | ||
|
1587 | index = KEY_LIST.index(current) | |
|
1588 | if next: | |
|
1589 | index += 1 | |
|
1590 | else: | |
|
1591 | index -= 1 | |
|
1592 | self.change_action(pos, KEY_LIST[index % len(KEY_LIST)]) | |
|
1593 | ||
|
1594 | def change_view(self, delta, unit): | |
|
1595 | """Change the region of whatever is being viewed (a patch or the list of | |
|
1596 | changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.""" | |
|
1597 | mode, _ = self.mode | |
|
1598 | if mode != MODE_PATCH: | |
|
1599 | return | |
|
1600 | mode_state = self.modes[mode] | |
|
1601 | num_lines = len(mode_state[b'patchcontents']) | |
|
1602 | page_height = self.page_height | |
|
1603 | unit = page_height if unit == b'page' else 1 | |
|
1604 | num_pages = 1 + (num_lines - 1) // page_height | |
|
1605 | max_offset = (num_pages - 1) * page_height | |
|
1606 | newline = mode_state[b'line_offset'] + delta * unit | |
|
1607 | mode_state[b'line_offset'] = max(0, min(max_offset, newline)) | |
|
1608 | ||
|
1609 | ||
|
1610 | def _chisteditmain(repo, rules, stdscr): | |
|
1611 | try: | |
|
1612 | curses.use_default_colors() | |
|
1613 | except curses.error: | |
|
1614 | pass | |
|
1615 | ||
|
1616 | # initialize color pattern | |
|
1617 | curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE) | |
|
1618 | curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE) | |
|
1619 | curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW) | |
|
1620 | curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN) | |
|
1621 | curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA) | |
|
1622 | curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1) | |
|
1623 | curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1) | |
|
1624 | curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1) | |
|
1625 | curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1) | |
|
1626 | curses.init_pair( | |
|
1627 | COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA | |
|
1628 | ) | |
|
1629 | curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE) | |
|
1630 | ||
|
1631 | # don't display the cursor | |
|
1632 | try: | |
|
1633 | curses.curs_set(0) | |
|
1634 | except curses.error: | |
|
1635 | pass | |
|
1598 | 1636 | |
|
1599 | 1637 | def drawvertwin(size, y, x): |
|
1600 | 1638 | win = curses.newwin(size[0], size[1], y, x) |
|
1601 | 1639 | y += size[0] |
|
1602 | 1640 | return win, y, x |
|
1603 | 1641 | |
|
1604 | state = { | |
|
1605 | b'pos': 0, | |
|
1606 | b'rules': rules, | |
|
1607 | b'selected': None, | |
|
1608 | b'mode': (MODE_INIT, MODE_INIT), | |
|
1609 | b'page_height': None, | |
|
1610 | b'modes': { | |
|
1611 | MODE_RULES: { | |
|
1612 | b'line_offset': 0, | |
|
1613 | }, | |
|
1614 | MODE_PATCH: { | |
|
1615 | b'line_offset': 0, | |
|
1616 | }, | |
|
1617 | }, | |
|
1618 | b'repo': repo, | |
|
1619 | } | |
|
1642 | state = _chistedit_state(repo, rules, stdscr) | |
|
1620 | 1643 | |
|
1621 | 1644 | # eventloop |
|
1622 | 1645 | ch = None |
|
1623 | 1646 | stdscr.clear() |
|
1624 | 1647 | stdscr.refresh() |
|
1625 | 1648 | while True: |
|
1626 |
oldmode, unused = state |
|
|
1649 | oldmode, unused = state.mode | |
|
1627 | 1650 | if oldmode == MODE_INIT: |
|
1628 |
changemode( |
|
|
1629 |
e = event( |
|
|
1651 | state.change_mode(MODE_RULES) | |
|
1652 | e = state.event(ch) | |
|
1630 | 1653 | |
|
1631 | 1654 | if e == E_QUIT: |
|
1632 | 1655 | return False |
|
1633 | 1656 | if e == E_HISTEDIT: |
|
1634 |
return state |
|
|
1657 | return state.rules | |
|
1635 | 1658 | else: |
|
1636 | 1659 | if e == E_RESIZE: |
|
1637 | 1660 | size = screen_size() |
|
1638 | 1661 | if size != stdscr.getmaxyx(): |
|
1639 | 1662 | curses.resizeterm(*size) |
|
1640 | 1663 | |
|
1641 | curmode, unused = state[b'mode'] | |
|
1642 | sizes = layout(curmode) | |
|
1664 | sizes = state.layout() | |
|
1665 | curmode, unused = state.mode | |
|
1643 | 1666 | if curmode != oldmode: |
|
1644 |
state |
|
|
1667 | state.page_height = sizes[b'main'][0] | |
|
1645 | 1668 | # Adjust the view to fit the current screen size. |
|
1646 |
movecursor(state, state |
|
|
1669 | state.move_cursor(state.pos, state.pos) | |
|
1647 | 1670 | |
|
1648 | 1671 | # Pack the windows against the top, each pane spread across the |
|
1649 | 1672 | # full width of the screen. |
|
1650 | 1673 | y, x = (0, 0) |
|
1651 | 1674 | helpwin, y, x = drawvertwin(sizes[b'help'], y, x) |
|
1652 | 1675 | mainwin, y, x = drawvertwin(sizes[b'main'], y, x) |
|
1653 | 1676 | commitwin, y, x = drawvertwin(sizes[b'commit'], y, x) |
|
1654 | 1677 | |
|
1655 | 1678 | if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP): |
|
1656 | 1679 | if e == E_PAGEDOWN: |
|
1657 |
changeview( |
|
|
1680 | state.change_view(+1, b'page') | |
|
1658 | 1681 | elif e == E_PAGEUP: |
|
1659 |
changeview( |
|
|
1682 | state.change_view(-1, b'page') | |
|
1660 | 1683 | elif e == E_LINEDOWN: |
|
1661 |
changeview( |
|
|
1684 | state.change_view(+1, b'line') | |
|
1662 | 1685 | elif e == E_LINEUP: |
|
1663 |
changeview( |
|
|
1686 | state.change_view(-1, b'line') | |
|
1664 | 1687 | |
|
1665 | 1688 | # start rendering |
|
1666 | 1689 | commitwin.erase() |
|
1667 | 1690 | helpwin.erase() |
|
1668 | 1691 | mainwin.erase() |
|
1669 | 1692 | if curmode == MODE_PATCH: |
|
1670 |
renderpatch(mainwin |
|
|
1693 | state.render_patch(mainwin) | |
|
1671 | 1694 | elif curmode == MODE_HELP: |
|
1672 |
renderstring(mainwin |
|
|
1695 | state.render_string(mainwin, __doc__.strip().splitlines()) | |
|
1673 | 1696 | else: |
|
1674 |
renderrules(mainwin |
|
|
1675 |
rendercommit(commitwin |
|
|
1676 |
renderhelp(helpwin |
|
|
1697 | state.render_rules(mainwin) | |
|
1698 | state.render_commit(commitwin) | |
|
1699 | state.render_help(helpwin) | |
|
1677 | 1700 | curses.doupdate() |
|
1678 | 1701 | # done rendering |
|
1679 | 1702 | ch = encoding.strtolocal(stdscr.getkey()) |
|
1680 | 1703 | |
|
1681 | 1704 | |
|
1682 | 1705 | def _chistedit(ui, repo, freeargs, opts): |
|
1683 | 1706 | """interactively edit changeset history via a curses interface |
|
1684 | 1707 | |
|
1685 | 1708 | Provides a ncurses interface to histedit. Press ? in chistedit mode |
|
1686 | 1709 | to see an extensive help. Requires python-curses to be installed.""" |
|
1687 | 1710 | |
|
1688 | 1711 | if curses is None: |
|
1689 | 1712 | raise error.Abort(_(b"Python curses library required")) |
|
1690 | 1713 | |
|
1691 | 1714 | # disable color |
|
1692 | 1715 | ui._colormode = None |
|
1693 | 1716 | |
|
1694 | 1717 | try: |
|
1695 | 1718 | keep = opts.get(b'keep') |
|
1696 | 1719 | revs = opts.get(b'rev', [])[:] |
|
1697 | 1720 | cmdutil.checkunfinished(repo) |
|
1698 | 1721 | cmdutil.bailifchanged(repo) |
|
1699 | 1722 | |
|
1700 | if os.path.exists(os.path.join(repo.path, b'histedit-state')): | |
|
1701 | raise error.Abort( | |
|
1702 | _( | |
|
1703 | b'history edit already in progress, try ' | |
|
1704 | b'--continue or --abort' | |
|
1705 | ) | |
|
1706 | ) | |
|
1707 | 1723 | revs.extend(freeargs) |
|
1708 | 1724 | if not revs: |
|
1709 | 1725 | defaultrev = destutil.desthistedit(ui, repo) |
|
1710 | 1726 | if defaultrev is not None: |
|
1711 | 1727 | revs.append(defaultrev) |
|
1712 | 1728 | if len(revs) != 1: |
|
1713 |
raise error. |
|
|
1729 | raise error.InputError( | |
|
1714 | 1730 | _(b'histedit requires exactly one ancestor revision') |
|
1715 | 1731 | ) |
|
1716 | 1732 | |
|
1717 |
rr = list(repo.set(b'roots(%ld)', |
|
|
1733 | rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs))) | |
|
1718 | 1734 | if len(rr) != 1: |
|
1719 |
raise error. |
|
|
1735 | raise error.InputError( | |
|
1720 | 1736 | _( |
|
1721 | 1737 | b'The specified revisions must have ' |
|
1722 | 1738 | b'exactly one common root' |
|
1723 | 1739 | ) |
|
1724 | 1740 | ) |
|
1725 | 1741 | root = rr[0].node() |
|
1726 | 1742 | |
|
1727 | 1743 | topmost = repo.dirstate.p1() |
|
1728 | 1744 | revs = between(repo, root, topmost, keep) |
|
1729 | 1745 | if not revs: |
|
1730 |
raise error. |
|
|
1746 | raise error.InputError( | |
|
1731 | 1747 | _(b'%s is not an ancestor of working directory') % short(root) |
|
1732 | 1748 | ) |
|
1733 | 1749 | |
|
1734 |
|
|
|
1750 | rules = [] | |
|
1735 | 1751 | for i, r in enumerate(revs): |
|
1736 |
|
|
|
1752 | rules.append(histeditrule(ui, repo[r], i)) | |
|
1737 | 1753 | with util.with_lc_ctype(): |
|
1738 |
rc = curses.wrapper(functools.partial(_chisteditmain, repo, |
|
|
1754 | rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules)) | |
|
1739 | 1755 | curses.echo() |
|
1740 | 1756 | curses.endwin() |
|
1741 | 1757 | if rc is False: |
|
1742 | 1758 | ui.write(_(b"histedit aborted\n")) |
|
1743 | 1759 | return 0 |
|
1744 | 1760 | if type(rc) is list: |
|
1745 | 1761 | ui.status(_(b"performing changes\n")) |
|
1746 | 1762 | rules = makecommands(rc) |
|
1747 | 1763 | with repo.vfs(b'chistedit', b'w+') as fp: |
|
1748 | 1764 | for r in rules: |
|
1749 | 1765 | fp.write(r) |
|
1750 | 1766 | opts[b'commands'] = fp.name |
|
1751 | 1767 | return _texthistedit(ui, repo, freeargs, opts) |
|
1752 | 1768 | except KeyboardInterrupt: |
|
1753 | 1769 | pass |
|
1754 | 1770 | return -1 |
|
1755 | 1771 | |
|
1756 | 1772 | |
|
1757 | 1773 | @command( |
|
1758 | 1774 | b'histedit', |
|
1759 | 1775 | [ |
|
1760 | 1776 | ( |
|
1761 | 1777 | b'', |
|
1762 | 1778 | b'commands', |
|
1763 | 1779 | b'', |
|
1764 | 1780 | _(b'read history edits from the specified file'), |
|
1765 | 1781 | _(b'FILE'), |
|
1766 | 1782 | ), |
|
1767 | 1783 | (b'c', b'continue', False, _(b'continue an edit already in progress')), |
|
1768 | 1784 | (b'', b'edit-plan', False, _(b'edit remaining actions list')), |
|
1769 | 1785 | ( |
|
1770 | 1786 | b'k', |
|
1771 | 1787 | b'keep', |
|
1772 | 1788 | False, |
|
1773 | 1789 | _(b"don't strip old nodes after edit is complete"), |
|
1774 | 1790 | ), |
|
1775 | 1791 | (b'', b'abort', False, _(b'abort an edit in progress')), |
|
1776 | 1792 | (b'o', b'outgoing', False, _(b'changesets not found in destination')), |
|
1777 | 1793 | ( |
|
1778 | 1794 | b'f', |
|
1779 | 1795 | b'force', |
|
1780 | 1796 | False, |
|
1781 | 1797 | _(b'force outgoing even for unrelated repositories'), |
|
1782 | 1798 | ), |
|
1783 | 1799 | (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')), |
|
1784 | 1800 | ] |
|
1785 | 1801 | + cmdutil.formatteropts, |
|
1786 | 1802 | _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"), |
|
1787 | 1803 | helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, |
|
1788 | 1804 | ) |
|
1789 | 1805 | def histedit(ui, repo, *freeargs, **opts): |
|
1790 | 1806 | """interactively edit changeset history |
|
1791 | 1807 | |
|
1792 | 1808 | This command lets you edit a linear series of changesets (up to |
|
1793 | 1809 | and including the working directory, which should be clean). |
|
1794 | 1810 | You can: |
|
1795 | 1811 | |
|
1796 | 1812 | - `pick` to [re]order a changeset |
|
1797 | 1813 | |
|
1798 | 1814 | - `drop` to omit changeset |
|
1799 | 1815 | |
|
1800 | 1816 | - `mess` to reword the changeset commit message |
|
1801 | 1817 | |
|
1802 | 1818 | - `fold` to combine it with the preceding changeset (using the later date) |
|
1803 | 1819 | |
|
1804 | 1820 | - `roll` like fold, but discarding this commit's description and date |
|
1805 | 1821 | |
|
1806 | 1822 | - `edit` to edit this changeset (preserving date) |
|
1807 | 1823 | |
|
1808 | 1824 | - `base` to checkout changeset and apply further changesets from there |
|
1809 | 1825 | |
|
1810 | 1826 | There are a number of ways to select the root changeset: |
|
1811 | 1827 | |
|
1812 | 1828 | - Specify ANCESTOR directly |
|
1813 | 1829 | |
|
1814 | 1830 | - Use --outgoing -- it will be the first linear changeset not |
|
1815 | 1831 | included in destination. (See :hg:`help config.paths.default-push`) |
|
1816 | 1832 | |
|
1817 | 1833 | - Otherwise, the value from the "histedit.defaultrev" config option |
|
1818 | 1834 | is used as a revset to select the base revision when ANCESTOR is not |
|
1819 | 1835 | specified. The first revision returned by the revset is used. By |
|
1820 | 1836 | default, this selects the editable history that is unique to the |
|
1821 | 1837 | ancestry of the working directory. |
|
1822 | 1838 | |
|
1823 | 1839 | .. container:: verbose |
|
1824 | 1840 | |
|
1825 | 1841 | If you use --outgoing, this command will abort if there are ambiguous |
|
1826 | 1842 | outgoing revisions. For example, if there are multiple branches |
|
1827 | 1843 | containing outgoing revisions. |
|
1828 | 1844 | |
|
1829 | 1845 | Use "min(outgoing() and ::.)" or similar revset specification |
|
1830 | 1846 | instead of --outgoing to specify edit target revision exactly in |
|
1831 | 1847 | such ambiguous situation. See :hg:`help revsets` for detail about |
|
1832 | 1848 | selecting revisions. |
|
1833 | 1849 | |
|
1834 | 1850 | .. container:: verbose |
|
1835 | 1851 | |
|
1836 | 1852 | Examples: |
|
1837 | 1853 | |
|
1838 | 1854 | - A number of changes have been made. |
|
1839 | 1855 | Revision 3 is no longer needed. |
|
1840 | 1856 | |
|
1841 | 1857 | Start history editing from revision 3:: |
|
1842 | 1858 | |
|
1843 | 1859 | hg histedit -r 3 |
|
1844 | 1860 | |
|
1845 | 1861 | An editor opens, containing the list of revisions, |
|
1846 | 1862 | with specific actions specified:: |
|
1847 | 1863 | |
|
1848 | 1864 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
1849 | 1865 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
1850 | 1866 | pick 0a9639fcda9d 5 Morgify the cromulancy |
|
1851 | 1867 | |
|
1852 | 1868 | Additional information about the possible actions |
|
1853 | 1869 | to take appears below the list of revisions. |
|
1854 | 1870 | |
|
1855 | 1871 | To remove revision 3 from the history, |
|
1856 | 1872 | its action (at the beginning of the relevant line) |
|
1857 | 1873 | is changed to 'drop':: |
|
1858 | 1874 | |
|
1859 | 1875 | drop 5339bf82f0ca 3 Zworgle the foobar |
|
1860 | 1876 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
1861 | 1877 | pick 0a9639fcda9d 5 Morgify the cromulancy |
|
1862 | 1878 | |
|
1863 | 1879 | - A number of changes have been made. |
|
1864 | 1880 | Revision 2 and 4 need to be swapped. |
|
1865 | 1881 | |
|
1866 | 1882 | Start history editing from revision 2:: |
|
1867 | 1883 | |
|
1868 | 1884 | hg histedit -r 2 |
|
1869 | 1885 | |
|
1870 | 1886 | An editor opens, containing the list of revisions, |
|
1871 | 1887 | with specific actions specified:: |
|
1872 | 1888 | |
|
1873 | 1889 | pick 252a1af424ad 2 Blorb a morgwazzle |
|
1874 | 1890 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
1875 | 1891 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
1876 | 1892 | |
|
1877 | 1893 | To swap revision 2 and 4, its lines are swapped |
|
1878 | 1894 | in the editor:: |
|
1879 | 1895 | |
|
1880 | 1896 | pick 8ef592ce7cc4 4 Bedazzle the zerlog |
|
1881 | 1897 | pick 5339bf82f0ca 3 Zworgle the foobar |
|
1882 | 1898 | pick 252a1af424ad 2 Blorb a morgwazzle |
|
1883 | 1899 | |
|
1884 | 1900 | Returns 0 on success, 1 if user intervention is required (not only |
|
1885 | 1901 | for intentional "edit" command, but also for resolving unexpected |
|
1886 | 1902 | conflicts). |
|
1887 | 1903 | """ |
|
1888 | 1904 | opts = pycompat.byteskwargs(opts) |
|
1889 | 1905 | |
|
1890 | 1906 | # kludge: _chistedit only works for starting an edit, not aborting |
|
1891 | 1907 | # or continuing, so fall back to regular _texthistedit for those |
|
1892 | 1908 | # operations. |
|
1893 | 1909 | if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew: |
|
1894 | 1910 | return _chistedit(ui, repo, freeargs, opts) |
|
1895 | 1911 | return _texthistedit(ui, repo, freeargs, opts) |
|
1896 | 1912 | |
|
1897 | 1913 | |
|
1898 | 1914 | def _texthistedit(ui, repo, freeargs, opts): |
|
1899 | 1915 | state = histeditstate(repo) |
|
1900 | 1916 | with repo.wlock() as wlock, repo.lock() as lock: |
|
1901 | 1917 | state.wlock = wlock |
|
1902 | 1918 | state.lock = lock |
|
1903 | 1919 | _histedit(ui, repo, state, freeargs, opts) |
|
1904 | 1920 | |
|
1905 | 1921 | |
|
1906 | 1922 | goalcontinue = b'continue' |
|
1907 | 1923 | goalabort = b'abort' |
|
1908 | 1924 | goaleditplan = b'edit-plan' |
|
1909 | 1925 | goalnew = b'new' |
|
1910 | 1926 | |
|
1911 | 1927 | |
|
1912 | 1928 | def _getgoal(opts): |
|
1913 | 1929 | if opts.get(b'continue'): |
|
1914 | 1930 | return goalcontinue |
|
1915 | 1931 | if opts.get(b'abort'): |
|
1916 | 1932 | return goalabort |
|
1917 | 1933 | if opts.get(b'edit_plan'): |
|
1918 | 1934 | return goaleditplan |
|
1919 | 1935 | return goalnew |
|
1920 | 1936 | |
|
1921 | 1937 | |
|
1922 | 1938 | def _readfile(ui, path): |
|
1923 | 1939 | if path == b'-': |
|
1924 | 1940 | with ui.timeblockedsection(b'histedit'): |
|
1925 | 1941 | return ui.fin.read() |
|
1926 | 1942 | else: |
|
1927 | 1943 | with open(path, b'rb') as f: |
|
1928 | 1944 | return f.read() |
|
1929 | 1945 | |
|
1930 | 1946 | |
|
1931 |
def _validateargs(ui, repo |
|
|
1947 | def _validateargs(ui, repo, freeargs, opts, goal, rules, revs): | |
|
1932 | 1948 | # TODO only abort if we try to histedit mq patches, not just |
|
1933 | 1949 | # blanket if mq patches are applied somewhere |
|
1934 | 1950 | mq = getattr(repo, 'mq', None) |
|
1935 | 1951 | if mq and mq.applied: |
|
1936 |
raise error. |
|
|
1952 | raise error.StateError(_(b'source has mq patches applied')) | |
|
1937 | 1953 | |
|
1938 | 1954 | # basic argument incompatibility processing |
|
1939 | 1955 | outg = opts.get(b'outgoing') |
|
1940 | 1956 | editplan = opts.get(b'edit_plan') |
|
1941 | 1957 | abort = opts.get(b'abort') |
|
1942 | 1958 | force = opts.get(b'force') |
|
1943 | 1959 | if force and not outg: |
|
1944 |
raise error. |
|
|
1960 | raise error.InputError(_(b'--force only allowed with --outgoing')) | |
|
1945 | 1961 | if goal == b'continue': |
|
1946 | 1962 | if any((outg, abort, revs, freeargs, rules, editplan)): |
|
1947 |
raise error. |
|
|
1963 | raise error.InputError(_(b'no arguments allowed with --continue')) | |
|
1948 | 1964 | elif goal == b'abort': |
|
1949 | 1965 | if any((outg, revs, freeargs, rules, editplan)): |
|
1950 |
raise error. |
|
|
1966 | raise error.InputError(_(b'no arguments allowed with --abort')) | |
|
1951 | 1967 | elif goal == b'edit-plan': |
|
1952 | 1968 | if any((outg, revs, freeargs)): |
|
1953 |
raise error. |
|
|
1969 | raise error.InputError( | |
|
1954 | 1970 | _(b'only --commands argument allowed with --edit-plan') |
|
1955 | 1971 | ) |
|
1956 | 1972 | else: |
|
1957 | if state.inprogress(): | |
|
1958 | raise error.Abort( | |
|
1959 | _( | |
|
1960 | b'history edit already in progress, try ' | |
|
1961 | b'--continue or --abort' | |
|
1962 | ) | |
|
1963 | ) | |
|
1964 | 1973 | if outg: |
|
1965 | 1974 | if revs: |
|
1966 | raise error.Abort(_(b'no revisions allowed with --outgoing')) | |
|
1975 | raise error.InputError( | |
|
1976 | _(b'no revisions allowed with --outgoing') | |
|
1977 | ) | |
|
1967 | 1978 | if len(freeargs) > 1: |
|
1968 |
raise error. |
|
|
1979 | raise error.InputError( | |
|
1969 | 1980 | _(b'only one repo argument allowed with --outgoing') |
|
1970 | 1981 | ) |
|
1971 | 1982 | else: |
|
1972 | 1983 | revs.extend(freeargs) |
|
1973 | 1984 | if len(revs) == 0: |
|
1974 | 1985 | defaultrev = destutil.desthistedit(ui, repo) |
|
1975 | 1986 | if defaultrev is not None: |
|
1976 | 1987 | revs.append(defaultrev) |
|
1977 | 1988 | |
|
1978 | 1989 | if len(revs) != 1: |
|
1979 |
raise error. |
|
|
1990 | raise error.InputError( | |
|
1980 | 1991 | _(b'histedit requires exactly one ancestor revision') |
|
1981 | 1992 | ) |
|
1982 | 1993 | |
|
1983 | 1994 | |
|
1984 | 1995 | def _histedit(ui, repo, state, freeargs, opts): |
|
1985 | 1996 | fm = ui.formatter(b'histedit', opts) |
|
1986 | 1997 | fm.startitem() |
|
1987 | 1998 | goal = _getgoal(opts) |
|
1988 | 1999 | revs = opts.get(b'rev', []) |
|
1989 | 2000 | nobackup = not ui.configbool(b'rewrite', b'backup-bundle') |
|
1990 | 2001 | rules = opts.get(b'commands', b'') |
|
1991 | 2002 | state.keep = opts.get(b'keep', False) |
|
1992 | 2003 | |
|
1993 |
_validateargs(ui, repo |
|
|
2004 | _validateargs(ui, repo, freeargs, opts, goal, rules, revs) | |
|
1994 | 2005 | |
|
1995 | 2006 | hastags = False |
|
1996 | 2007 | if revs: |
|
1997 |
revs = |
|
|
2008 | revs = logcmdutil.revrange(repo, revs) | |
|
1998 | 2009 | ctxs = [repo[rev] for rev in revs] |
|
1999 | 2010 | for ctx in ctxs: |
|
2000 | 2011 | tags = [tag for tag in ctx.tags() if tag != b'tip'] |
|
2001 | 2012 | if not hastags: |
|
2002 | 2013 | hastags = len(tags) |
|
2003 | 2014 | if hastags: |
|
2004 | 2015 | if ui.promptchoice( |
|
2005 | 2016 | _( |
|
2006 | 2017 | b'warning: tags associated with the given' |
|
2007 | 2018 | b' changeset will be lost after histedit.\n' |
|
2008 | 2019 | b'do you want to continue (yN)? $$ &Yes $$ &No' |
|
2009 | 2020 | ), |
|
2010 | 2021 | default=1, |
|
2011 | 2022 | ): |
|
2012 |
raise error. |
|
|
2023 | raise error.CanceledError(_(b'histedit cancelled\n')) | |
|
2013 | 2024 | # rebuild state |
|
2014 | 2025 | if goal == goalcontinue: |
|
2015 | 2026 | state.read() |
|
2016 | 2027 | state = bootstrapcontinue(ui, state, opts) |
|
2017 | 2028 | elif goal == goaleditplan: |
|
2018 | 2029 | _edithisteditplan(ui, repo, state, rules) |
|
2019 | 2030 | return |
|
2020 | 2031 | elif goal == goalabort: |
|
2021 | 2032 | _aborthistedit(ui, repo, state, nobackup=nobackup) |
|
2022 | 2033 | return |
|
2023 | 2034 | else: |
|
2024 | 2035 | # goal == goalnew |
|
2025 | 2036 | _newhistedit(ui, repo, state, revs, freeargs, opts) |
|
2026 | 2037 | |
|
2027 | 2038 | _continuehistedit(ui, repo, state) |
|
2028 | 2039 | _finishhistedit(ui, repo, state, fm) |
|
2029 | 2040 | fm.end() |
|
2030 | 2041 | |
|
2031 | 2042 | |
|
2032 | 2043 | def _continuehistedit(ui, repo, state): |
|
2033 | 2044 | """This function runs after either: |
|
2034 | 2045 | - bootstrapcontinue (if the goal is 'continue') |
|
2035 | 2046 | - _newhistedit (if the goal is 'new') |
|
2036 | 2047 | """ |
|
2037 | 2048 | # preprocess rules so that we can hide inner folds from the user |
|
2038 | 2049 | # and only show one editor |
|
2039 | 2050 | actions = state.actions[:] |
|
2040 | 2051 | for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])): |
|
2041 | 2052 | if action.verb == b'fold' and nextact and nextact.verb == b'fold': |
|
2042 | 2053 | state.actions[idx].__class__ = _multifold |
|
2043 | 2054 | |
|
2044 | 2055 | # Force an initial state file write, so the user can run --abort/continue |
|
2045 | 2056 | # even if there's an exception before the first transaction serialize. |
|
2046 | 2057 | state.write() |
|
2047 | 2058 | |
|
2048 | 2059 | tr = None |
|
2049 | 2060 | # Don't use singletransaction by default since it rolls the entire |
|
2050 | 2061 | # transaction back if an unexpected exception happens (like a |
|
2051 | 2062 | # pretxncommit hook throws, or the user aborts the commit msg editor). |
|
2052 | 2063 | if ui.configbool(b"histedit", b"singletransaction"): |
|
2053 | 2064 | # Don't use a 'with' for the transaction, since actions may close |
|
2054 | 2065 | # and reopen a transaction. For example, if the action executes an |
|
2055 | 2066 | # external process it may choose to commit the transaction first. |
|
2056 | 2067 | tr = repo.transaction(b'histedit') |
|
2057 | 2068 | progress = ui.makeprogress( |
|
2058 | 2069 | _(b"editing"), unit=_(b'changes'), total=len(state.actions) |
|
2059 | 2070 | ) |
|
2060 | 2071 | with progress, util.acceptintervention(tr): |
|
2061 | 2072 | while state.actions: |
|
2062 | 2073 | state.write(tr=tr) |
|
2063 | 2074 | actobj = state.actions[0] |
|
2064 | 2075 | progress.increment(item=actobj.torule()) |
|
2065 | 2076 | ui.debug( |
|
2066 | 2077 | b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule()) |
|
2067 | 2078 | ) |
|
2068 | 2079 | parentctx, replacement_ = actobj.run() |
|
2069 | 2080 | state.parentctxnode = parentctx.node() |
|
2070 | 2081 | state.replacements.extend(replacement_) |
|
2071 | 2082 | state.actions.pop(0) |
|
2072 | 2083 | |
|
2073 | 2084 | state.write() |
|
2074 | 2085 | |
|
2075 | 2086 | |
|
2076 | 2087 | def _finishhistedit(ui, repo, state, fm): |
|
2077 | 2088 | """This action runs when histedit is finishing its session""" |
|
2078 | 2089 | mergemod.update(repo[state.parentctxnode]) |
|
2079 | 2090 | |
|
2080 | 2091 | mapping, tmpnodes, created, ntm = processreplacement(state) |
|
2081 | 2092 | if mapping: |
|
2082 | 2093 | for prec, succs in pycompat.iteritems(mapping): |
|
2083 | 2094 | if not succs: |
|
2084 | 2095 | ui.debug(b'histedit: %s is dropped\n' % short(prec)) |
|
2085 | 2096 | else: |
|
2086 | 2097 | ui.debug( |
|
2087 | 2098 | b'histedit: %s is replaced by %s\n' |
|
2088 | 2099 | % (short(prec), short(succs[0])) |
|
2089 | 2100 | ) |
|
2090 | 2101 | if len(succs) > 1: |
|
2091 | 2102 | m = b'histedit: %s' |
|
2092 | 2103 | for n in succs[1:]: |
|
2093 | 2104 | ui.debug(m % short(n)) |
|
2094 | 2105 | |
|
2095 | 2106 | if not state.keep: |
|
2096 | 2107 | if mapping: |
|
2097 | 2108 | movetopmostbookmarks(repo, state.topmost, ntm) |
|
2098 | 2109 | # TODO update mq state |
|
2099 | 2110 | else: |
|
2100 | 2111 | mapping = {} |
|
2101 | 2112 | |
|
2102 | 2113 | for n in tmpnodes: |
|
2103 | 2114 | if n in repo: |
|
2104 | 2115 | mapping[n] = () |
|
2105 | 2116 | |
|
2106 | 2117 | # remove entries about unknown nodes |
|
2107 | 2118 | has_node = repo.unfiltered().changelog.index.has_node |
|
2108 | 2119 | mapping = { |
|
2109 | 2120 | k: v |
|
2110 | 2121 | for k, v in mapping.items() |
|
2111 | 2122 | if has_node(k) and all(has_node(n) for n in v) |
|
2112 | 2123 | } |
|
2113 | 2124 | scmutil.cleanupnodes(repo, mapping, b'histedit') |
|
2114 | 2125 | hf = fm.hexfunc |
|
2115 | 2126 | fl = fm.formatlist |
|
2116 | 2127 | fd = fm.formatdict |
|
2117 | 2128 | nodechanges = fd( |
|
2118 | 2129 | { |
|
2119 | 2130 | hf(oldn): fl([hf(n) for n in newn], name=b'node') |
|
2120 | 2131 | for oldn, newn in pycompat.iteritems(mapping) |
|
2121 | 2132 | }, |
|
2122 | 2133 | key=b"oldnode", |
|
2123 | 2134 | value=b"newnodes", |
|
2124 | 2135 | ) |
|
2125 | 2136 | fm.data(nodechanges=nodechanges) |
|
2126 | 2137 | |
|
2127 | 2138 | state.clear() |
|
2128 | 2139 | if os.path.exists(repo.sjoin(b'undo')): |
|
2129 | 2140 | os.unlink(repo.sjoin(b'undo')) |
|
2130 | 2141 | if repo.vfs.exists(b'histedit-last-edit.txt'): |
|
2131 | 2142 | repo.vfs.unlink(b'histedit-last-edit.txt') |
|
2132 | 2143 | |
|
2133 | 2144 | |
|
2134 | 2145 | def _aborthistedit(ui, repo, state, nobackup=False): |
|
2135 | 2146 | try: |
|
2136 | 2147 | state.read() |
|
2137 | 2148 | __, leafs, tmpnodes, __ = processreplacement(state) |
|
2138 | 2149 | ui.debug(b'restore wc to old parent %s\n' % short(state.topmost)) |
|
2139 | 2150 | |
|
2140 | 2151 | # Recover our old commits if necessary |
|
2141 | 2152 | if not state.topmost in repo and state.backupfile: |
|
2142 | 2153 | backupfile = repo.vfs.join(state.backupfile) |
|
2143 | 2154 | f = hg.openpath(ui, backupfile) |
|
2144 | 2155 | gen = exchange.readbundle(ui, f, backupfile) |
|
2145 | 2156 | with repo.transaction(b'histedit.abort') as tr: |
|
2146 | 2157 | bundle2.applybundle( |
|
2147 | 2158 | repo, |
|
2148 | 2159 | gen, |
|
2149 | 2160 | tr, |
|
2150 | 2161 | source=b'histedit', |
|
2151 | 2162 | url=b'bundle:' + backupfile, |
|
2152 | 2163 | ) |
|
2153 | 2164 | |
|
2154 | 2165 | os.remove(backupfile) |
|
2155 | 2166 | |
|
2156 | 2167 | # check whether we should update away |
|
2157 | 2168 | if repo.unfiltered().revs( |
|
2158 | 2169 | b'parents() and (%n or %ln::)', |
|
2159 | 2170 | state.parentctxnode, |
|
2160 | 2171 | leafs | tmpnodes, |
|
2161 | 2172 | ): |
|
2162 | 2173 | hg.clean(repo, state.topmost, show_stats=True, quietempty=True) |
|
2163 | 2174 | cleanupnode(ui, repo, tmpnodes, nobackup=nobackup) |
|
2164 | 2175 | cleanupnode(ui, repo, leafs, nobackup=nobackup) |
|
2165 | 2176 | except Exception: |
|
2166 | 2177 | if state.inprogress(): |
|
2167 | 2178 | ui.warn( |
|
2168 | 2179 | _( |
|
2169 | 2180 | b'warning: encountered an exception during histedit ' |
|
2170 | 2181 | b'--abort; the repository may not have been completely ' |
|
2171 | 2182 | b'cleaned up\n' |
|
2172 | 2183 | ) |
|
2173 | 2184 | ) |
|
2174 | 2185 | raise |
|
2175 | 2186 | finally: |
|
2176 | 2187 | state.clear() |
|
2177 | 2188 | |
|
2178 | 2189 | |
|
2179 | 2190 | def hgaborthistedit(ui, repo): |
|
2180 | 2191 | state = histeditstate(repo) |
|
2181 | 2192 | nobackup = not ui.configbool(b'rewrite', b'backup-bundle') |
|
2182 | 2193 | with repo.wlock() as wlock, repo.lock() as lock: |
|
2183 | 2194 | state.wlock = wlock |
|
2184 | 2195 | state.lock = lock |
|
2185 | 2196 | _aborthistedit(ui, repo, state, nobackup=nobackup) |
|
2186 | 2197 | |
|
2187 | 2198 | |
|
2188 | 2199 | def _edithisteditplan(ui, repo, state, rules): |
|
2189 | 2200 | state.read() |
|
2190 | 2201 | if not rules: |
|
2191 | 2202 | comment = geteditcomment( |
|
2192 | 2203 | ui, short(state.parentctxnode), short(state.topmost) |
|
2193 | 2204 | ) |
|
2194 | 2205 | rules = ruleeditor(repo, ui, state.actions, comment) |
|
2195 | 2206 | else: |
|
2196 | 2207 | rules = _readfile(ui, rules) |
|
2197 | 2208 | actions = parserules(rules, state) |
|
2198 | 2209 | ctxs = [repo[act.node] for act in state.actions if act.node] |
|
2199 | 2210 | warnverifyactions(ui, repo, actions, state, ctxs) |
|
2200 | 2211 | state.actions = actions |
|
2201 | 2212 | state.write() |
|
2202 | 2213 | |
|
2203 | 2214 | |
|
2204 | 2215 | def _newhistedit(ui, repo, state, revs, freeargs, opts): |
|
2205 | 2216 | outg = opts.get(b'outgoing') |
|
2206 | 2217 | rules = opts.get(b'commands', b'') |
|
2207 | 2218 | force = opts.get(b'force') |
|
2208 | 2219 | |
|
2209 | 2220 | cmdutil.checkunfinished(repo) |
|
2210 | 2221 | cmdutil.bailifchanged(repo) |
|
2211 | 2222 | |
|
2212 | 2223 | topmost = repo.dirstate.p1() |
|
2213 | 2224 | if outg: |
|
2214 | 2225 | if freeargs: |
|
2215 | 2226 | remote = freeargs[0] |
|
2216 | 2227 | else: |
|
2217 | 2228 | remote = None |
|
2218 | 2229 | root = findoutgoing(ui, repo, remote, force, opts) |
|
2219 | 2230 | else: |
|
2220 |
rr = list(repo.set(b'roots(%ld)', |
|
|
2231 | rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs))) | |
|
2221 | 2232 | if len(rr) != 1: |
|
2222 |
raise error. |
|
|
2233 | raise error.InputError( | |
|
2223 | 2234 | _( |
|
2224 | 2235 | b'The specified revisions must have ' |
|
2225 | 2236 | b'exactly one common root' |
|
2226 | 2237 | ) |
|
2227 | 2238 | ) |
|
2228 | 2239 | root = rr[0].node() |
|
2229 | 2240 | |
|
2230 | 2241 | revs = between(repo, root, topmost, state.keep) |
|
2231 | 2242 | if not revs: |
|
2232 |
raise error. |
|
|
2243 | raise error.InputError( | |
|
2233 | 2244 | _(b'%s is not an ancestor of working directory') % short(root) |
|
2234 | 2245 | ) |
|
2235 | 2246 | |
|
2236 | 2247 | ctxs = [repo[r] for r in revs] |
|
2237 | 2248 | |
|
2238 | 2249 | wctx = repo[None] |
|
2239 | 2250 | # Please don't ask me why `ancestors` is this value. I figured it |
|
2240 | 2251 | # out with print-debugging, not by actually understanding what the |
|
2241 | 2252 | # merge code is doing. :( |
|
2242 | 2253 | ancs = [repo[b'.']] |
|
2243 | 2254 | # Sniff-test to make sure we won't collide with untracked files in |
|
2244 | 2255 | # the working directory. If we don't do this, we can get a |
|
2245 | 2256 | # collision after we've started histedit and backing out gets ugly |
|
2246 | 2257 | # for everyone, especially the user. |
|
2247 | 2258 | for c in [ctxs[0].p1()] + ctxs: |
|
2248 | 2259 | try: |
|
2249 | 2260 | mergemod.calculateupdates( |
|
2250 | 2261 | repo, |
|
2251 | 2262 | wctx, |
|
2252 | 2263 | c, |
|
2253 | 2264 | ancs, |
|
2254 | 2265 | # These parameters were determined by print-debugging |
|
2255 | 2266 | # what happens later on inside histedit. |
|
2256 | 2267 | branchmerge=False, |
|
2257 | 2268 | force=False, |
|
2258 | 2269 | acceptremote=False, |
|
2259 | 2270 | followcopies=False, |
|
2260 | 2271 | ) |
|
2261 | 2272 | except error.Abort: |
|
2262 |
raise error. |
|
|
2273 | raise error.StateError( | |
|
2263 | 2274 | _( |
|
2264 | 2275 | b"untracked files in working directory conflict with files in %s" |
|
2265 | 2276 | ) |
|
2266 | 2277 | % c |
|
2267 | 2278 | ) |
|
2268 | 2279 | |
|
2269 | 2280 | if not rules: |
|
2270 | 2281 | comment = geteditcomment(ui, short(root), short(topmost)) |
|
2271 | 2282 | actions = [pick(state, r) for r in revs] |
|
2272 | 2283 | rules = ruleeditor(repo, ui, actions, comment) |
|
2273 | 2284 | else: |
|
2274 | 2285 | rules = _readfile(ui, rules) |
|
2275 | 2286 | actions = parserules(rules, state) |
|
2276 | 2287 | warnverifyactions(ui, repo, actions, state, ctxs) |
|
2277 | 2288 | |
|
2278 | 2289 | parentctxnode = repo[root].p1().node() |
|
2279 | 2290 | |
|
2280 | 2291 | state.parentctxnode = parentctxnode |
|
2281 | 2292 | state.actions = actions |
|
2282 | 2293 | state.topmost = topmost |
|
2283 | 2294 | state.replacements = [] |
|
2284 | 2295 | |
|
2285 | 2296 | ui.log( |
|
2286 | 2297 | b"histedit", |
|
2287 | 2298 | b"%d actions to histedit\n", |
|
2288 | 2299 | len(actions), |
|
2289 | 2300 | histedit_num_actions=len(actions), |
|
2290 | 2301 | ) |
|
2291 | 2302 | |
|
2292 | 2303 | # Create a backup so we can always abort completely. |
|
2293 | 2304 | backupfile = None |
|
2294 | 2305 | if not obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
2295 | 2306 | backupfile = repair.backupbundle( |
|
2296 | 2307 | repo, [parentctxnode], [topmost], root, b'histedit' |
|
2297 | 2308 | ) |
|
2298 | 2309 | state.backupfile = backupfile |
|
2299 | 2310 | |
|
2300 | 2311 | |
|
2301 | 2312 | def _getsummary(ctx): |
|
2302 | 2313 | # a common pattern is to extract the summary but default to the empty |
|
2303 | 2314 | # string |
|
2304 | 2315 | summary = ctx.description() or b'' |
|
2305 | 2316 | if summary: |
|
2306 | 2317 | summary = summary.splitlines()[0] |
|
2307 | 2318 | return summary |
|
2308 | 2319 | |
|
2309 | 2320 | |
|
2310 | 2321 | def bootstrapcontinue(ui, state, opts): |
|
2311 | 2322 | repo = state.repo |
|
2312 | 2323 | |
|
2313 | 2324 | ms = mergestatemod.mergestate.read(repo) |
|
2314 | 2325 | mergeutil.checkunresolved(ms) |
|
2315 | 2326 | |
|
2316 | 2327 | if state.actions: |
|
2317 | 2328 | actobj = state.actions.pop(0) |
|
2318 | 2329 | |
|
2319 | 2330 | if _isdirtywc(repo): |
|
2320 | 2331 | actobj.continuedirty() |
|
2321 | 2332 | if _isdirtywc(repo): |
|
2322 | 2333 | abortdirty() |
|
2323 | 2334 | |
|
2324 | 2335 | parentctx, replacements = actobj.continueclean() |
|
2325 | 2336 | |
|
2326 | 2337 | state.parentctxnode = parentctx.node() |
|
2327 | 2338 | state.replacements.extend(replacements) |
|
2328 | 2339 | |
|
2329 | 2340 | return state |
|
2330 | 2341 | |
|
2331 | 2342 | |
|
2332 | 2343 | def between(repo, old, new, keep): |
|
2333 | 2344 | """select and validate the set of revision to edit |
|
2334 | 2345 | |
|
2335 | 2346 | When keep is false, the specified set can't have children.""" |
|
2336 | 2347 | revs = repo.revs(b'%n::%n', old, new) |
|
2337 | 2348 | if revs and not keep: |
|
2338 | 2349 | rewriteutil.precheck(repo, revs, b'edit') |
|
2339 | 2350 | if repo.revs(b'(%ld) and merge()', revs): |
|
2340 | raise error.Abort(_(b'cannot edit history that contains merges')) | |
|
2351 | raise error.StateError( | |
|
2352 | _(b'cannot edit history that contains merges') | |
|
2353 | ) | |
|
2341 | 2354 | return pycompat.maplist(repo.changelog.node, revs) |
|
2342 | 2355 | |
|
2343 | 2356 | |
|
2344 | 2357 | def ruleeditor(repo, ui, actions, editcomment=b""): |
|
2345 | 2358 | """open an editor to edit rules |
|
2346 | 2359 | |
|
2347 | 2360 | rules are in the format [ [act, ctx], ...] like in state.rules |
|
2348 | 2361 | """ |
|
2349 | 2362 | if repo.ui.configbool(b"experimental", b"histedit.autoverb"): |
|
2350 | 2363 | newact = util.sortdict() |
|
2351 | 2364 | for act in actions: |
|
2352 | 2365 | ctx = repo[act.node] |
|
2353 | 2366 | summary = _getsummary(ctx) |
|
2354 | 2367 | fword = summary.split(b' ', 1)[0].lower() |
|
2355 | 2368 | added = False |
|
2356 | 2369 | |
|
2357 | 2370 | # if it doesn't end with the special character '!' just skip this |
|
2358 | 2371 | if fword.endswith(b'!'): |
|
2359 | 2372 | fword = fword[:-1] |
|
2360 | 2373 | if fword in primaryactions | secondaryactions | tertiaryactions: |
|
2361 | 2374 | act.verb = fword |
|
2362 | 2375 | # get the target summary |
|
2363 | 2376 | tsum = summary[len(fword) + 1 :].lstrip() |
|
2364 | 2377 | # safe but slow: reverse iterate over the actions so we |
|
2365 | 2378 | # don't clash on two commits having the same summary |
|
2366 | 2379 | for na, l in reversed(list(pycompat.iteritems(newact))): |
|
2367 | 2380 | actx = repo[na.node] |
|
2368 | 2381 | asum = _getsummary(actx) |
|
2369 | 2382 | if asum == tsum: |
|
2370 | 2383 | added = True |
|
2371 | 2384 | l.append(act) |
|
2372 | 2385 | break |
|
2373 | 2386 | |
|
2374 | 2387 | if not added: |
|
2375 | 2388 | newact[act] = [] |
|
2376 | 2389 | |
|
2377 | 2390 | # copy over and flatten the new list |
|
2378 | 2391 | actions = [] |
|
2379 | 2392 | for na, l in pycompat.iteritems(newact): |
|
2380 | 2393 | actions.append(na) |
|
2381 | 2394 | actions += l |
|
2382 | 2395 | |
|
2383 | 2396 | rules = b'\n'.join([act.torule() for act in actions]) |
|
2384 | 2397 | rules += b'\n\n' |
|
2385 | 2398 | rules += editcomment |
|
2386 | 2399 | rules = ui.edit( |
|
2387 | 2400 | rules, |
|
2388 | 2401 | ui.username(), |
|
2389 | 2402 | {b'prefix': b'histedit'}, |
|
2390 | 2403 | repopath=repo.path, |
|
2391 | 2404 | action=b'histedit', |
|
2392 | 2405 | ) |
|
2393 | 2406 | |
|
2394 | 2407 | # Save edit rules in .hg/histedit-last-edit.txt in case |
|
2395 | 2408 | # the user needs to ask for help after something |
|
2396 | 2409 | # surprising happens. |
|
2397 | 2410 | with repo.vfs(b'histedit-last-edit.txt', b'wb') as f: |
|
2398 | 2411 | f.write(rules) |
|
2399 | 2412 | |
|
2400 | 2413 | return rules |
|
2401 | 2414 | |
|
2402 | 2415 | |
|
2403 | 2416 | def parserules(rules, state): |
|
2404 | 2417 | """Read the histedit rules string and return list of action objects""" |
|
2405 | 2418 | rules = [ |
|
2406 | 2419 | l |
|
2407 | 2420 | for l in (r.strip() for r in rules.splitlines()) |
|
2408 | 2421 | if l and not l.startswith(b'#') |
|
2409 | 2422 | ] |
|
2410 | 2423 | actions = [] |
|
2411 | 2424 | for r in rules: |
|
2412 | 2425 | if b' ' not in r: |
|
2413 | 2426 | raise error.ParseError(_(b'malformed line "%s"') % r) |
|
2414 | 2427 | verb, rest = r.split(b' ', 1) |
|
2415 | 2428 | |
|
2416 | 2429 | if verb not in actiontable: |
|
2417 | 2430 | raise error.ParseError(_(b'unknown action "%s"') % verb) |
|
2418 | 2431 | |
|
2419 | 2432 | action = actiontable[verb].fromrule(state, rest) |
|
2420 | 2433 | actions.append(action) |
|
2421 | 2434 | return actions |
|
2422 | 2435 | |
|
2423 | 2436 | |
|
2424 | 2437 | def warnverifyactions(ui, repo, actions, state, ctxs): |
|
2425 | 2438 | try: |
|
2426 | 2439 | verifyactions(actions, state, ctxs) |
|
2427 | 2440 | except error.ParseError: |
|
2428 | 2441 | if repo.vfs.exists(b'histedit-last-edit.txt'): |
|
2429 | 2442 | ui.warn( |
|
2430 | 2443 | _( |
|
2431 | 2444 | b'warning: histedit rules saved ' |
|
2432 | 2445 | b'to: .hg/histedit-last-edit.txt\n' |
|
2433 | 2446 | ) |
|
2434 | 2447 | ) |
|
2435 | 2448 | raise |
|
2436 | 2449 | |
|
2437 | 2450 | |
|
2438 | 2451 | def verifyactions(actions, state, ctxs): |
|
2439 | 2452 | """Verify that there exists exactly one action per given changeset and |
|
2440 | 2453 | other constraints. |
|
2441 | 2454 | |
|
2442 | 2455 | Will abort if there are to many or too few rules, a malformed rule, |
|
2443 | 2456 | or a rule on a changeset outside of the user-given range. |
|
2444 | 2457 | """ |
|
2445 | 2458 | expected = {c.node() for c in ctxs} |
|
2446 | 2459 | seen = set() |
|
2447 | 2460 | prev = None |
|
2448 | 2461 | |
|
2449 | 2462 | if actions and actions[0].verb in [b'roll', b'fold']: |
|
2450 | 2463 | raise error.ParseError( |
|
2451 | 2464 | _(b'first changeset cannot use verb "%s"') % actions[0].verb |
|
2452 | 2465 | ) |
|
2453 | 2466 | |
|
2454 | 2467 | for action in actions: |
|
2455 | 2468 | action.verify(prev, expected, seen) |
|
2456 | 2469 | prev = action |
|
2457 | 2470 | if action.node is not None: |
|
2458 | 2471 | seen.add(action.node) |
|
2459 | 2472 | missing = sorted(expected - seen) # sort to stabilize output |
|
2460 | 2473 | |
|
2461 | 2474 | if state.repo.ui.configbool(b'histedit', b'dropmissing'): |
|
2462 | 2475 | if len(actions) == 0: |
|
2463 | 2476 | raise error.ParseError( |
|
2464 | 2477 | _(b'no rules provided'), |
|
2465 | 2478 | hint=_(b'use strip extension to remove commits'), |
|
2466 | 2479 | ) |
|
2467 | 2480 | |
|
2468 | 2481 | drops = [drop(state, n) for n in missing] |
|
2469 | 2482 | # put the in the beginning so they execute immediately and |
|
2470 | 2483 | # don't show in the edit-plan in the future |
|
2471 | 2484 | actions[:0] = drops |
|
2472 | 2485 | elif missing: |
|
2473 | 2486 | raise error.ParseError( |
|
2474 | 2487 | _(b'missing rules for changeset %s') % short(missing[0]), |
|
2475 | 2488 | hint=_( |
|
2476 | 2489 | b'use "drop %s" to discard, see also: ' |
|
2477 | 2490 | b"'hg help -e histedit.config'" |
|
2478 | 2491 | ) |
|
2479 | 2492 | % short(missing[0]), |
|
2480 | 2493 | ) |
|
2481 | 2494 | |
|
2482 | 2495 | |
|
2483 | 2496 | def adjustreplacementsfrommarkers(repo, oldreplacements): |
|
2484 | 2497 | """Adjust replacements from obsolescence markers |
|
2485 | 2498 | |
|
2486 | 2499 | Replacements structure is originally generated based on |
|
2487 | 2500 | histedit's state and does not account for changes that are |
|
2488 | 2501 | not recorded there. This function fixes that by adding |
|
2489 | 2502 | data read from obsolescence markers""" |
|
2490 | 2503 | if not obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
2491 | 2504 | return oldreplacements |
|
2492 | 2505 | |
|
2493 | 2506 | unfi = repo.unfiltered() |
|
2494 | 2507 | get_rev = unfi.changelog.index.get_rev |
|
2495 | 2508 | obsstore = repo.obsstore |
|
2496 | 2509 | newreplacements = list(oldreplacements) |
|
2497 | 2510 | oldsuccs = [r[1] for r in oldreplacements] |
|
2498 | 2511 | # successors that have already been added to succstocheck once |
|
2499 | 2512 | seensuccs = set().union( |
|
2500 | 2513 | *oldsuccs |
|
2501 | 2514 | ) # create a set from an iterable of tuples |
|
2502 | 2515 | succstocheck = list(seensuccs) |
|
2503 | 2516 | while succstocheck: |
|
2504 | 2517 | n = succstocheck.pop() |
|
2505 | 2518 | missing = get_rev(n) is None |
|
2506 | 2519 | markers = obsstore.successors.get(n, ()) |
|
2507 | 2520 | if missing and not markers: |
|
2508 | 2521 | # dead end, mark it as such |
|
2509 | 2522 | newreplacements.append((n, ())) |
|
2510 | 2523 | for marker in markers: |
|
2511 | 2524 | nsuccs = marker[1] |
|
2512 | 2525 | newreplacements.append((n, nsuccs)) |
|
2513 | 2526 | for nsucc in nsuccs: |
|
2514 | 2527 | if nsucc not in seensuccs: |
|
2515 | 2528 | seensuccs.add(nsucc) |
|
2516 | 2529 | succstocheck.append(nsucc) |
|
2517 | 2530 | |
|
2518 | 2531 | return newreplacements |
|
2519 | 2532 | |
|
2520 | 2533 | |
|
2521 | 2534 | def processreplacement(state): |
|
2522 | 2535 | """process the list of replacements to return |
|
2523 | 2536 | |
|
2524 | 2537 | 1) the final mapping between original and created nodes |
|
2525 | 2538 | 2) the list of temporary node created by histedit |
|
2526 | 2539 | 3) the list of new commit created by histedit""" |
|
2527 | 2540 | replacements = adjustreplacementsfrommarkers(state.repo, state.replacements) |
|
2528 | 2541 | allsuccs = set() |
|
2529 | 2542 | replaced = set() |
|
2530 | 2543 | fullmapping = {} |
|
2531 | 2544 | # initialize basic set |
|
2532 | 2545 | # fullmapping records all operations recorded in replacement |
|
2533 | 2546 | for rep in replacements: |
|
2534 | 2547 | allsuccs.update(rep[1]) |
|
2535 | 2548 | replaced.add(rep[0]) |
|
2536 | 2549 | fullmapping.setdefault(rep[0], set()).update(rep[1]) |
|
2537 | 2550 | new = allsuccs - replaced |
|
2538 | 2551 | tmpnodes = allsuccs & replaced |
|
2539 | 2552 | # Reduce content fullmapping into direct relation between original nodes |
|
2540 | 2553 | # and final node created during history edition |
|
2541 | 2554 | # Dropped changeset are replaced by an empty list |
|
2542 | 2555 | toproceed = set(fullmapping) |
|
2543 | 2556 | final = {} |
|
2544 | 2557 | while toproceed: |
|
2545 | 2558 | for x in list(toproceed): |
|
2546 | 2559 | succs = fullmapping[x] |
|
2547 | 2560 | for s in list(succs): |
|
2548 | 2561 | if s in toproceed: |
|
2549 | 2562 | # non final node with unknown closure |
|
2550 | 2563 | # We can't process this now |
|
2551 | 2564 | break |
|
2552 | 2565 | elif s in final: |
|
2553 | 2566 | # non final node, replace with closure |
|
2554 | 2567 | succs.remove(s) |
|
2555 | 2568 | succs.update(final[s]) |
|
2556 | 2569 | else: |
|
2557 | 2570 | final[x] = succs |
|
2558 | 2571 | toproceed.remove(x) |
|
2559 | 2572 | # remove tmpnodes from final mapping |
|
2560 | 2573 | for n in tmpnodes: |
|
2561 | 2574 | del final[n] |
|
2562 | 2575 | # we expect all changes involved in final to exist in the repo |
|
2563 | 2576 | # turn `final` into list (topologically sorted) |
|
2564 | 2577 | get_rev = state.repo.changelog.index.get_rev |
|
2565 | 2578 | for prec, succs in final.items(): |
|
2566 | 2579 | final[prec] = sorted(succs, key=get_rev) |
|
2567 | 2580 | |
|
2568 | 2581 | # computed topmost element (necessary for bookmark) |
|
2569 | 2582 | if new: |
|
2570 | 2583 | newtopmost = sorted(new, key=state.repo.changelog.rev)[-1] |
|
2571 | 2584 | elif not final: |
|
2572 | 2585 | # Nothing rewritten at all. we won't need `newtopmost` |
|
2573 | 2586 | # It is the same as `oldtopmost` and `processreplacement` know it |
|
2574 | 2587 | newtopmost = None |
|
2575 | 2588 | else: |
|
2576 | 2589 | # every body died. The newtopmost is the parent of the root. |
|
2577 | 2590 | r = state.repo.changelog.rev |
|
2578 | 2591 | newtopmost = state.repo[sorted(final, key=r)[0]].p1().node() |
|
2579 | 2592 | |
|
2580 | 2593 | return final, tmpnodes, new, newtopmost |
|
2581 | 2594 | |
|
2582 | 2595 | |
|
2583 | 2596 | def movetopmostbookmarks(repo, oldtopmost, newtopmost): |
|
2584 | 2597 | """Move bookmark from oldtopmost to newly created topmost |
|
2585 | 2598 | |
|
2586 | 2599 | This is arguably a feature and we may only want that for the active |
|
2587 | 2600 | bookmark. But the behavior is kept compatible with the old version for now. |
|
2588 | 2601 | """ |
|
2589 | 2602 | if not oldtopmost or not newtopmost: |
|
2590 | 2603 | return |
|
2591 | 2604 | oldbmarks = repo.nodebookmarks(oldtopmost) |
|
2592 | 2605 | if oldbmarks: |
|
2593 | 2606 | with repo.lock(), repo.transaction(b'histedit') as tr: |
|
2594 | 2607 | marks = repo._bookmarks |
|
2595 | 2608 | changes = [] |
|
2596 | 2609 | for name in oldbmarks: |
|
2597 | 2610 | changes.append((name, newtopmost)) |
|
2598 | 2611 | marks.applychanges(repo, tr, changes) |
|
2599 | 2612 | |
|
2600 | 2613 | |
|
2601 | 2614 | def cleanupnode(ui, repo, nodes, nobackup=False): |
|
2602 | 2615 | """strip a group of nodes from the repository |
|
2603 | 2616 | |
|
2604 | 2617 | The set of node to strip may contains unknown nodes.""" |
|
2605 | 2618 | with repo.lock(): |
|
2606 | 2619 | # do not let filtering get in the way of the cleanse |
|
2607 | 2620 | # we should probably get rid of obsolescence marker created during the |
|
2608 | 2621 | # histedit, but we currently do not have such information. |
|
2609 | 2622 | repo = repo.unfiltered() |
|
2610 | 2623 | # Find all nodes that need to be stripped |
|
2611 | 2624 | # (we use %lr instead of %ln to silently ignore unknown items) |
|
2612 | 2625 | has_node = repo.changelog.index.has_node |
|
2613 | 2626 | nodes = sorted(n for n in nodes if has_node(n)) |
|
2614 | 2627 | roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)] |
|
2615 | 2628 | if roots: |
|
2616 | 2629 | backup = not nobackup |
|
2617 | 2630 | repair.strip(ui, repo, roots, backup=backup) |
|
2618 | 2631 | |
|
2619 | 2632 | |
|
2620 | 2633 | def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs): |
|
2621 | 2634 | if isinstance(nodelist, bytes): |
|
2622 | 2635 | nodelist = [nodelist] |
|
2623 | 2636 | state = histeditstate(repo) |
|
2624 | 2637 | if state.inprogress(): |
|
2625 | 2638 | state.read() |
|
2626 | 2639 | histedit_nodes = { |
|
2627 | 2640 | action.node for action in state.actions if action.node |
|
2628 | 2641 | } |
|
2629 | 2642 | common_nodes = histedit_nodes & set(nodelist) |
|
2630 | 2643 | if common_nodes: |
|
2631 | 2644 | raise error.Abort( |
|
2632 | 2645 | _(b"histedit in progress, can't strip %s") |
|
2633 | 2646 | % b', '.join(short(x) for x in common_nodes) |
|
2634 | 2647 | ) |
|
2635 | 2648 | return orig(ui, repo, nodelist, *args, **kwargs) |
|
2636 | 2649 | |
|
2637 | 2650 | |
|
2638 | 2651 | extensions.wrapfunction(repair, b'strip', stripwrapper) |
|
2639 | 2652 | |
|
2640 | 2653 | |
|
2641 | 2654 | def summaryhook(ui, repo): |
|
2642 | 2655 | state = histeditstate(repo) |
|
2643 | 2656 | if not state.inprogress(): |
|
2644 | 2657 | return |
|
2645 | 2658 | state.read() |
|
2646 | 2659 | if state.actions: |
|
2647 | 2660 | # i18n: column positioning for "hg summary" |
|
2648 | 2661 | ui.write( |
|
2649 | 2662 | _(b'hist: %s (histedit --continue)\n') |
|
2650 | 2663 | % ( |
|
2651 | 2664 | ui.label(_(b'%d remaining'), b'histedit.remaining') |
|
2652 | 2665 | % len(state.actions) |
|
2653 | 2666 | ) |
|
2654 | 2667 | ) |
|
2655 | 2668 | |
|
2656 | 2669 | |
|
2657 | 2670 | def extsetup(ui): |
|
2658 | 2671 | cmdutil.summaryhooks.add(b'histedit', summaryhook) |
|
2659 | 2672 | statemod.addunfinished( |
|
2660 | 2673 | b'histedit', |
|
2661 | 2674 | fname=b'histedit-state', |
|
2662 | 2675 | allowcommit=True, |
|
2663 | 2676 | continueflag=True, |
|
2664 | 2677 | abortfunc=hgaborthistedit, |
|
2665 | 2678 | ) |
@@ -1,1389 +1,1390 b'' | |||
|
1 | 1 | # Infinite push |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2016 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | """ store some pushes in a remote blob store on the server (EXPERIMENTAL) |
|
8 | 8 | |
|
9 | 9 | IMPORTANT: if you use this extension, please contact |
|
10 | 10 | mercurial-devel@mercurial-scm.org ASAP. This extension is believed to |
|
11 | 11 | be unused and barring learning of users of this functionality, we will |
|
12 | 12 | delete this code at the end of 2020. |
|
13 | 13 | |
|
14 | 14 | [infinitepush] |
|
15 | 15 | # Server-side and client-side option. Pattern of the infinitepush bookmark |
|
16 | 16 | branchpattern = PATTERN |
|
17 | 17 | |
|
18 | 18 | # Server or client |
|
19 | 19 | server = False |
|
20 | 20 | |
|
21 | 21 | # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set |
|
22 | 22 | indextype = disk |
|
23 | 23 | |
|
24 | 24 | # Server-side option. Used only if indextype=sql. |
|
25 | 25 | # Format: 'IP:PORT:DB_NAME:USER:PASSWORD' |
|
26 | 26 | sqlhost = IP:PORT:DB_NAME:USER:PASSWORD |
|
27 | 27 | |
|
28 | 28 | # Server-side option. Used only if indextype=disk. |
|
29 | 29 | # Filesystem path to the index store |
|
30 | 30 | indexpath = PATH |
|
31 | 31 | |
|
32 | 32 | # Server-side option. Possible values: 'disk' or 'external' |
|
33 | 33 | # Fails if not set |
|
34 | 34 | storetype = disk |
|
35 | 35 | |
|
36 | 36 | # Server-side option. |
|
37 | 37 | # Path to the binary that will save bundle to the bundlestore |
|
38 | 38 | # Formatted cmd line will be passed to it (see `put_args`) |
|
39 | 39 | put_binary = put |
|
40 | 40 | |
|
41 | 41 | # Serser-side option. Used only if storetype=external. |
|
42 | 42 | # Format cmd-line string for put binary. Placeholder: {filename} |
|
43 | 43 | put_args = {filename} |
|
44 | 44 | |
|
45 | 45 | # Server-side option. |
|
46 | 46 | # Path to the binary that get bundle from the bundlestore. |
|
47 | 47 | # Formatted cmd line will be passed to it (see `get_args`) |
|
48 | 48 | get_binary = get |
|
49 | 49 | |
|
50 | 50 | # Serser-side option. Used only if storetype=external. |
|
51 | 51 | # Format cmd-line string for get binary. Placeholders: {filename} {handle} |
|
52 | 52 | get_args = {filename} {handle} |
|
53 | 53 | |
|
54 | 54 | # Server-side option |
|
55 | 55 | logfile = FIlE |
|
56 | 56 | |
|
57 | 57 | # Server-side option |
|
58 | 58 | loglevel = DEBUG |
|
59 | 59 | |
|
60 | 60 | # Server-side option. Used only if indextype=sql. |
|
61 | 61 | # Sets mysql wait_timeout option. |
|
62 | 62 | waittimeout = 300 |
|
63 | 63 | |
|
64 | 64 | # Server-side option. Used only if indextype=sql. |
|
65 | 65 | # Sets mysql innodb_lock_wait_timeout option. |
|
66 | 66 | locktimeout = 120 |
|
67 | 67 | |
|
68 | 68 | # Server-side option. Used only if indextype=sql. |
|
69 | 69 | # Name of the repository |
|
70 | 70 | reponame = '' |
|
71 | 71 | |
|
72 | 72 | # Client-side option. Used by --list-remote option. List of remote scratch |
|
73 | 73 | # patterns to list if no patterns are specified. |
|
74 | 74 | defaultremotepatterns = ['*'] |
|
75 | 75 | |
|
76 | 76 | # Instructs infinitepush to forward all received bundle2 parts to the |
|
77 | 77 | # bundle for storage. Defaults to False. |
|
78 | 78 | storeallparts = True |
|
79 | 79 | |
|
80 | 80 | # routes each incoming push to the bundlestore. defaults to False |
|
81 | 81 | pushtobundlestore = True |
|
82 | 82 | |
|
83 | 83 | [remotenames] |
|
84 | 84 | # Client-side option |
|
85 | 85 | # This option should be set only if remotenames extension is enabled. |
|
86 | 86 | # Whether remote bookmarks are tracked by remotenames extension. |
|
87 | 87 | bookmarks = True |
|
88 | 88 | """ |
|
89 | 89 | |
|
90 | 90 | from __future__ import absolute_import |
|
91 | 91 | |
|
92 | 92 | import collections |
|
93 | 93 | import contextlib |
|
94 | 94 | import errno |
|
95 | 95 | import functools |
|
96 | 96 | import logging |
|
97 | 97 | import os |
|
98 | 98 | import random |
|
99 | 99 | import re |
|
100 | 100 | import socket |
|
101 | 101 | import subprocess |
|
102 | 102 | import time |
|
103 | 103 | |
|
104 | 104 | from mercurial.node import ( |
|
105 | 105 | bin, |
|
106 | 106 | hex, |
|
107 | 107 | ) |
|
108 | 108 | |
|
109 | 109 | from mercurial.i18n import _ |
|
110 | 110 | |
|
111 | 111 | from mercurial.pycompat import ( |
|
112 | 112 | getattr, |
|
113 | 113 | open, |
|
114 | 114 | ) |
|
115 | 115 | |
|
116 | 116 | from mercurial.utils import ( |
|
117 | 117 | procutil, |
|
118 | 118 | stringutil, |
|
119 | 119 | urlutil, |
|
120 | 120 | ) |
|
121 | 121 | |
|
122 | 122 | from mercurial import ( |
|
123 | 123 | bundle2, |
|
124 | 124 | changegroup, |
|
125 | 125 | commands, |
|
126 | 126 | discovery, |
|
127 | 127 | encoding, |
|
128 | 128 | error, |
|
129 | 129 | exchange, |
|
130 | 130 | extensions, |
|
131 | 131 | hg, |
|
132 | 132 | localrepo, |
|
133 | 133 | phases, |
|
134 | 134 | pushkey, |
|
135 | 135 | pycompat, |
|
136 | 136 | registrar, |
|
137 | 137 | util, |
|
138 | 138 | wireprototypes, |
|
139 | 139 | wireprotov1peer, |
|
140 | 140 | wireprotov1server, |
|
141 | 141 | ) |
|
142 | 142 | |
|
143 | 143 | from . import ( |
|
144 | 144 | bundleparts, |
|
145 | 145 | common, |
|
146 | 146 | ) |
|
147 | 147 | |
|
148 | 148 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
149 | 149 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
150 | 150 | # be specifying the version(s) of Mercurial they are tested with, or |
|
151 | 151 | # leave the attribute unspecified. |
|
152 | 152 | testedwith = b'ships-with-hg-core' |
|
153 | 153 | |
|
154 | 154 | configtable = {} |
|
155 | 155 | configitem = registrar.configitem(configtable) |
|
156 | 156 | |
|
157 | 157 | configitem( |
|
158 | 158 | b'infinitepush', |
|
159 | 159 | b'server', |
|
160 | 160 | default=False, |
|
161 | 161 | ) |
|
162 | 162 | configitem( |
|
163 | 163 | b'infinitepush', |
|
164 | 164 | b'storetype', |
|
165 | 165 | default=b'', |
|
166 | 166 | ) |
|
167 | 167 | configitem( |
|
168 | 168 | b'infinitepush', |
|
169 | 169 | b'indextype', |
|
170 | 170 | default=b'', |
|
171 | 171 | ) |
|
172 | 172 | configitem( |
|
173 | 173 | b'infinitepush', |
|
174 | 174 | b'indexpath', |
|
175 | 175 | default=b'', |
|
176 | 176 | ) |
|
177 | 177 | configitem( |
|
178 | 178 | b'infinitepush', |
|
179 | 179 | b'storeallparts', |
|
180 | 180 | default=False, |
|
181 | 181 | ) |
|
182 | 182 | configitem( |
|
183 | 183 | b'infinitepush', |
|
184 | 184 | b'reponame', |
|
185 | 185 | default=b'', |
|
186 | 186 | ) |
|
187 | 187 | configitem( |
|
188 | 188 | b'scratchbranch', |
|
189 | 189 | b'storepath', |
|
190 | 190 | default=b'', |
|
191 | 191 | ) |
|
192 | 192 | configitem( |
|
193 | 193 | b'infinitepush', |
|
194 | 194 | b'branchpattern', |
|
195 | 195 | default=b'', |
|
196 | 196 | ) |
|
197 | 197 | configitem( |
|
198 | 198 | b'infinitepush', |
|
199 | 199 | b'pushtobundlestore', |
|
200 | 200 | default=False, |
|
201 | 201 | ) |
|
202 | 202 | configitem( |
|
203 | 203 | b'experimental', |
|
204 | 204 | b'server-bundlestore-bookmark', |
|
205 | 205 | default=b'', |
|
206 | 206 | ) |
|
207 | 207 | configitem( |
|
208 | 208 | b'experimental', |
|
209 | 209 | b'infinitepush-scratchpush', |
|
210 | 210 | default=False, |
|
211 | 211 | ) |
|
212 | 212 | |
|
213 | 213 | experimental = b'experimental' |
|
214 | 214 | configbookmark = b'server-bundlestore-bookmark' |
|
215 | 215 | configscratchpush = b'infinitepush-scratchpush' |
|
216 | 216 | |
|
217 | 217 | scratchbranchparttype = bundleparts.scratchbranchparttype |
|
218 | 218 | revsetpredicate = registrar.revsetpredicate() |
|
219 | 219 | templatekeyword = registrar.templatekeyword() |
|
220 | 220 | _scratchbranchmatcher = lambda x: False |
|
221 | 221 | _maybehash = re.compile('^[a-f0-9]+$').search |
|
222 | 222 | |
|
223 | 223 | |
|
224 | 224 | def _buildexternalbundlestore(ui): |
|
225 | 225 | put_args = ui.configlist(b'infinitepush', b'put_args', []) |
|
226 | 226 | put_binary = ui.config(b'infinitepush', b'put_binary') |
|
227 | 227 | if not put_binary: |
|
228 | 228 | raise error.Abort(b'put binary is not specified') |
|
229 | 229 | get_args = ui.configlist(b'infinitepush', b'get_args', []) |
|
230 | 230 | get_binary = ui.config(b'infinitepush', b'get_binary') |
|
231 | 231 | if not get_binary: |
|
232 | 232 | raise error.Abort(b'get binary is not specified') |
|
233 | 233 | from . import store |
|
234 | 234 | |
|
235 | 235 | return store.externalbundlestore(put_binary, put_args, get_binary, get_args) |
|
236 | 236 | |
|
237 | 237 | |
|
238 | 238 | def _buildsqlindex(ui): |
|
239 | 239 | sqlhost = ui.config(b'infinitepush', b'sqlhost') |
|
240 | 240 | if not sqlhost: |
|
241 | 241 | raise error.Abort(_(b'please set infinitepush.sqlhost')) |
|
242 | 242 | host, port, db, user, password = sqlhost.split(b':') |
|
243 | 243 | reponame = ui.config(b'infinitepush', b'reponame') |
|
244 | 244 | if not reponame: |
|
245 | 245 | raise error.Abort(_(b'please set infinitepush.reponame')) |
|
246 | 246 | |
|
247 | 247 | logfile = ui.config(b'infinitepush', b'logfile', b'') |
|
248 | 248 | waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300) |
|
249 | 249 | locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120) |
|
250 | 250 | from . import sqlindexapi |
|
251 | 251 | |
|
252 | 252 | return sqlindexapi.sqlindexapi( |
|
253 | 253 | reponame, |
|
254 | 254 | host, |
|
255 | 255 | port, |
|
256 | 256 | db, |
|
257 | 257 | user, |
|
258 | 258 | password, |
|
259 | 259 | logfile, |
|
260 | 260 | _getloglevel(ui), |
|
261 | 261 | waittimeout=waittimeout, |
|
262 | 262 | locktimeout=locktimeout, |
|
263 | 263 | ) |
|
264 | 264 | |
|
265 | 265 | |
|
266 | 266 | def _getloglevel(ui): |
|
267 | 267 | loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG') |
|
268 | 268 | numeric_loglevel = getattr(logging, loglevel.upper(), None) |
|
269 | 269 | if not isinstance(numeric_loglevel, int): |
|
270 | 270 | raise error.Abort(_(b'invalid log level %s') % loglevel) |
|
271 | 271 | return numeric_loglevel |
|
272 | 272 | |
|
273 | 273 | |
|
274 | 274 | def _tryhoist(ui, remotebookmark): |
|
275 | 275 | """returns a bookmarks with hoisted part removed |
|
276 | 276 | |
|
277 | 277 | Remotenames extension has a 'hoist' config that allows to use remote |
|
278 | 278 | bookmarks without specifying remote path. For example, 'hg update master' |
|
279 | 279 | works as well as 'hg update remote/master'. We want to allow the same in |
|
280 | 280 | infinitepush. |
|
281 | 281 | """ |
|
282 | 282 | |
|
283 | 283 | if common.isremotebooksenabled(ui): |
|
284 | 284 | hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/' |
|
285 | 285 | if remotebookmark.startswith(hoist): |
|
286 | 286 | return remotebookmark[len(hoist) :] |
|
287 | 287 | return remotebookmark |
|
288 | 288 | |
|
289 | 289 | |
|
290 | 290 | class bundlestore(object): |
|
291 | 291 | def __init__(self, repo): |
|
292 | 292 | self._repo = repo |
|
293 | 293 | storetype = self._repo.ui.config(b'infinitepush', b'storetype') |
|
294 | 294 | if storetype == b'disk': |
|
295 | 295 | from . import store |
|
296 | 296 | |
|
297 | 297 | self.store = store.filebundlestore(self._repo.ui, self._repo) |
|
298 | 298 | elif storetype == b'external': |
|
299 | 299 | self.store = _buildexternalbundlestore(self._repo.ui) |
|
300 | 300 | else: |
|
301 | 301 | raise error.Abort( |
|
302 | 302 | _(b'unknown infinitepush store type specified %s') % storetype |
|
303 | 303 | ) |
|
304 | 304 | |
|
305 | 305 | indextype = self._repo.ui.config(b'infinitepush', b'indextype') |
|
306 | 306 | if indextype == b'disk': |
|
307 | 307 | from . import fileindexapi |
|
308 | 308 | |
|
309 | 309 | self.index = fileindexapi.fileindexapi(self._repo) |
|
310 | 310 | elif indextype == b'sql': |
|
311 | 311 | self.index = _buildsqlindex(self._repo.ui) |
|
312 | 312 | else: |
|
313 | 313 | raise error.Abort( |
|
314 | 314 | _(b'unknown infinitepush index type specified %s') % indextype |
|
315 | 315 | ) |
|
316 | 316 | |
|
317 | 317 | |
|
318 | 318 | def _isserver(ui): |
|
319 | 319 | return ui.configbool(b'infinitepush', b'server') |
|
320 | 320 | |
|
321 | 321 | |
|
322 | 322 | def reposetup(ui, repo): |
|
323 | 323 | if _isserver(ui) and repo.local(): |
|
324 | 324 | repo.bundlestore = bundlestore(repo) |
|
325 | 325 | |
|
326 | 326 | |
|
327 | 327 | def extsetup(ui): |
|
328 | 328 | commonsetup(ui) |
|
329 | 329 | if _isserver(ui): |
|
330 | 330 | serverextsetup(ui) |
|
331 | 331 | else: |
|
332 | 332 | clientextsetup(ui) |
|
333 | 333 | |
|
334 | 334 | |
|
335 | 335 | def commonsetup(ui): |
|
336 | 336 | wireprotov1server.commands[b'listkeyspatterns'] = ( |
|
337 | 337 | wireprotolistkeyspatterns, |
|
338 | 338 | b'namespace patterns', |
|
339 | 339 | ) |
|
340 | 340 | scratchbranchpat = ui.config(b'infinitepush', b'branchpattern') |
|
341 | 341 | if scratchbranchpat: |
|
342 | 342 | global _scratchbranchmatcher |
|
343 | 343 | kind, pat, _scratchbranchmatcher = stringutil.stringmatcher( |
|
344 | 344 | scratchbranchpat |
|
345 | 345 | ) |
|
346 | 346 | |
|
347 | 347 | |
|
348 | 348 | def serverextsetup(ui): |
|
349 | 349 | origpushkeyhandler = bundle2.parthandlermapping[b'pushkey'] |
|
350 | 350 | |
|
351 | 351 | def newpushkeyhandler(*args, **kwargs): |
|
352 | 352 | bundle2pushkey(origpushkeyhandler, *args, **kwargs) |
|
353 | 353 | |
|
354 | 354 | newpushkeyhandler.params = origpushkeyhandler.params |
|
355 | 355 | bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler |
|
356 | 356 | |
|
357 | 357 | orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads'] |
|
358 | 358 | newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases( |
|
359 | 359 | orighandlephasehandler, *args, **kwargs |
|
360 | 360 | ) |
|
361 | 361 | newphaseheadshandler.params = orighandlephasehandler.params |
|
362 | 362 | bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler |
|
363 | 363 | |
|
364 | 364 | extensions.wrapfunction( |
|
365 | 365 | localrepo.localrepository, b'listkeys', localrepolistkeys |
|
366 | 366 | ) |
|
367 | 367 | wireprotov1server.commands[b'lookup'] = ( |
|
368 | 368 | _lookupwrap(wireprotov1server.commands[b'lookup'][0]), |
|
369 | 369 | b'key', |
|
370 | 370 | ) |
|
371 | 371 | extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks) |
|
372 | 372 | |
|
373 | 373 | extensions.wrapfunction(bundle2, b'processparts', processparts) |
|
374 | 374 | |
|
375 | 375 | |
|
376 | 376 | def clientextsetup(ui): |
|
377 | 377 | entry = extensions.wrapcommand(commands.table, b'push', _push) |
|
378 | 378 | |
|
379 | 379 | entry[1].append( |
|
380 | 380 | ( |
|
381 | 381 | b'', |
|
382 | 382 | b'bundle-store', |
|
383 | 383 | None, |
|
384 | 384 | _(b'force push to go to bundle store (EXPERIMENTAL)'), |
|
385 | 385 | ) |
|
386 | 386 | ) |
|
387 | 387 | |
|
388 | 388 | extensions.wrapcommand(commands.table, b'pull', _pull) |
|
389 | 389 | |
|
390 | 390 | extensions.wrapfunction(discovery, b'checkheads', _checkheads) |
|
391 | 391 | |
|
392 | 392 | wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns |
|
393 | 393 | |
|
394 | 394 | partorder = exchange.b2partsgenorder |
|
395 | 395 | index = partorder.index(b'changeset') |
|
396 | 396 | partorder.insert( |
|
397 | 397 | index, partorder.pop(partorder.index(scratchbranchparttype)) |
|
398 | 398 | ) |
|
399 | 399 | |
|
400 | 400 | |
|
401 | 401 | def _checkheads(orig, pushop): |
|
402 | 402 | if pushop.ui.configbool(experimental, configscratchpush, False): |
|
403 | 403 | return |
|
404 | 404 | return orig(pushop) |
|
405 | 405 | |
|
406 | 406 | |
|
407 | 407 | def wireprotolistkeyspatterns(repo, proto, namespace, patterns): |
|
408 | 408 | patterns = wireprototypes.decodelist(patterns) |
|
409 | 409 | d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns)) |
|
410 | 410 | return pushkey.encodekeys(d) |
|
411 | 411 | |
|
412 | 412 | |
|
413 | 413 | def localrepolistkeys(orig, self, namespace, patterns=None): |
|
414 | 414 | if namespace == b'bookmarks' and patterns: |
|
415 | 415 | index = self.bundlestore.index |
|
416 | 416 | results = {} |
|
417 | 417 | bookmarks = orig(self, namespace) |
|
418 | 418 | for pattern in patterns: |
|
419 | 419 | results.update(index.getbookmarks(pattern)) |
|
420 | 420 | if pattern.endswith(b'*'): |
|
421 | 421 | pattern = b're:^' + pattern[:-1] + b'.*' |
|
422 | 422 | kind, pat, matcher = stringutil.stringmatcher(pattern) |
|
423 | 423 | for bookmark, node in pycompat.iteritems(bookmarks): |
|
424 | 424 | if matcher(bookmark): |
|
425 | 425 | results[bookmark] = node |
|
426 | 426 | return results |
|
427 | 427 | else: |
|
428 | 428 | return orig(self, namespace) |
|
429 | 429 | |
|
430 | 430 | |
|
431 | 431 | @wireprotov1peer.batchable |
|
432 | 432 | def listkeyspatterns(self, namespace, patterns): |
|
433 | 433 | if not self.capable(b'pushkey'): |
|
434 |
|
|
|
435 | f = wireprotov1peer.future() | |
|
434 | return {}, None | |
|
436 | 435 | self.ui.debug(b'preparing listkeys for "%s"\n' % namespace) |
|
437 | yield { | |
|
436 | ||
|
437 | def decode(d): | |
|
438 | self.ui.debug( | |
|
439 | b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) | |
|
440 | ) | |
|
441 | return pushkey.decodekeys(d) | |
|
442 | ||
|
443 | return { | |
|
438 | 444 | b'namespace': encoding.fromlocal(namespace), |
|
439 | 445 | b'patterns': wireprototypes.encodelist(patterns), |
|
440 |
}, |
|
|
441 | d = f.value | |
|
442 | self.ui.debug( | |
|
443 | b'received listkey for "%s": %i bytes\n' % (namespace, len(d)) | |
|
444 | ) | |
|
445 | yield pushkey.decodekeys(d) | |
|
446 | }, decode | |
|
446 | 447 | |
|
447 | 448 | |
|
448 | 449 | def _readbundlerevs(bundlerepo): |
|
449 | 450 | return list(bundlerepo.revs(b'bundle()')) |
|
450 | 451 | |
|
451 | 452 | |
|
452 | 453 | def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui): |
|
453 | 454 | """Tells remotefilelog to include all changed files to the changegroup |
|
454 | 455 | |
|
455 | 456 | By default remotefilelog doesn't include file content to the changegroup. |
|
456 | 457 | But we need to include it if we are fetching from bundlestore. |
|
457 | 458 | """ |
|
458 | 459 | changedfiles = set() |
|
459 | 460 | cl = bundlerepo.changelog |
|
460 | 461 | for r in bundlerevs: |
|
461 | 462 | # [3] means changed files |
|
462 | 463 | changedfiles.update(cl.read(r)[3]) |
|
463 | 464 | if not changedfiles: |
|
464 | 465 | return bundlecaps |
|
465 | 466 | |
|
466 | 467 | changedfiles = b'\0'.join(changedfiles) |
|
467 | 468 | newcaps = [] |
|
468 | 469 | appended = False |
|
469 | 470 | for cap in bundlecaps or []: |
|
470 | 471 | if cap.startswith(b'excludepattern='): |
|
471 | 472 | newcaps.append(b'\0'.join((cap, changedfiles))) |
|
472 | 473 | appended = True |
|
473 | 474 | else: |
|
474 | 475 | newcaps.append(cap) |
|
475 | 476 | if not appended: |
|
476 | 477 | # Not found excludepattern cap. Just append it |
|
477 | 478 | newcaps.append(b'excludepattern=' + changedfiles) |
|
478 | 479 | |
|
479 | 480 | return newcaps |
|
480 | 481 | |
|
481 | 482 | |
|
482 | 483 | def _rebundle(bundlerepo, bundleroots, unknownhead): |
|
483 | 484 | """ |
|
484 | 485 | Bundle may include more revision then user requested. For example, |
|
485 | 486 | if user asks for revision but bundle also consists its descendants. |
|
486 | 487 | This function will filter out all revision that user is not requested. |
|
487 | 488 | """ |
|
488 | 489 | parts = [] |
|
489 | 490 | |
|
490 | 491 | version = b'02' |
|
491 | 492 | outgoing = discovery.outgoing( |
|
492 | 493 | bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead] |
|
493 | 494 | ) |
|
494 | 495 | cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull') |
|
495 | 496 | cgstream = util.chunkbuffer(cgstream).read() |
|
496 | 497 | cgpart = bundle2.bundlepart(b'changegroup', data=cgstream) |
|
497 | 498 | cgpart.addparam(b'version', version) |
|
498 | 499 | parts.append(cgpart) |
|
499 | 500 | |
|
500 | 501 | return parts |
|
501 | 502 | |
|
502 | 503 | |
|
503 | 504 | def _getbundleroots(oldrepo, bundlerepo, bundlerevs): |
|
504 | 505 | cl = bundlerepo.changelog |
|
505 | 506 | bundleroots = [] |
|
506 | 507 | for rev in bundlerevs: |
|
507 | 508 | node = cl.node(rev) |
|
508 | 509 | parents = cl.parents(node) |
|
509 | 510 | for parent in parents: |
|
510 | 511 | # include all revs that exist in the main repo |
|
511 | 512 | # to make sure that bundle may apply client-side |
|
512 | 513 | if parent in oldrepo: |
|
513 | 514 | bundleroots.append(parent) |
|
514 | 515 | return bundleroots |
|
515 | 516 | |
|
516 | 517 | |
|
517 | 518 | def _needsrebundling(head, bundlerepo): |
|
518 | 519 | bundleheads = list(bundlerepo.revs(b'heads(bundle())')) |
|
519 | 520 | return not ( |
|
520 | 521 | len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head |
|
521 | 522 | ) |
|
522 | 523 | |
|
523 | 524 | |
|
524 | 525 | def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): |
|
525 | 526 | """generates bundle that will be send to the user |
|
526 | 527 | |
|
527 | 528 | returns tuple with raw bundle string and bundle type |
|
528 | 529 | """ |
|
529 | 530 | parts = [] |
|
530 | 531 | if not _needsrebundling(head, bundlerepo): |
|
531 | 532 | with util.posixfile(bundlefile, b"rb") as f: |
|
532 | 533 | unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) |
|
533 | 534 | if isinstance(unbundler, changegroup.cg1unpacker): |
|
534 | 535 | part = bundle2.bundlepart( |
|
535 | 536 | b'changegroup', data=unbundler._stream.read() |
|
536 | 537 | ) |
|
537 | 538 | part.addparam(b'version', b'01') |
|
538 | 539 | parts.append(part) |
|
539 | 540 | elif isinstance(unbundler, bundle2.unbundle20): |
|
540 | 541 | haschangegroup = False |
|
541 | 542 | for part in unbundler.iterparts(): |
|
542 | 543 | if part.type == b'changegroup': |
|
543 | 544 | haschangegroup = True |
|
544 | 545 | newpart = bundle2.bundlepart(part.type, data=part.read()) |
|
545 | 546 | for key, value in pycompat.iteritems(part.params): |
|
546 | 547 | newpart.addparam(key, value) |
|
547 | 548 | parts.append(newpart) |
|
548 | 549 | |
|
549 | 550 | if not haschangegroup: |
|
550 | 551 | raise error.Abort( |
|
551 | 552 | b'unexpected bundle without changegroup part, ' |
|
552 | 553 | + b'head: %s' % hex(head), |
|
553 | 554 | hint=b'report to administrator', |
|
554 | 555 | ) |
|
555 | 556 | else: |
|
556 | 557 | raise error.Abort(b'unknown bundle type') |
|
557 | 558 | else: |
|
558 | 559 | parts = _rebundle(bundlerepo, bundleroots, head) |
|
559 | 560 | |
|
560 | 561 | return parts |
|
561 | 562 | |
|
562 | 563 | |
|
563 | 564 | def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs): |
|
564 | 565 | heads = heads or [] |
|
565 | 566 | # newheads are parents of roots of scratch bundles that were requested |
|
566 | 567 | newphases = {} |
|
567 | 568 | scratchbundles = [] |
|
568 | 569 | newheads = [] |
|
569 | 570 | scratchheads = [] |
|
570 | 571 | nodestobundle = {} |
|
571 | 572 | allbundlestocleanup = [] |
|
572 | 573 | try: |
|
573 | 574 | for head in heads: |
|
574 | 575 | if not repo.changelog.index.has_node(head): |
|
575 | 576 | if head not in nodestobundle: |
|
576 | 577 | newbundlefile = common.downloadbundle(repo, head) |
|
577 | 578 | bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile) |
|
578 | 579 | bundlerepo = hg.repository(repo.ui, bundlepath) |
|
579 | 580 | |
|
580 | 581 | allbundlestocleanup.append((bundlerepo, newbundlefile)) |
|
581 | 582 | bundlerevs = set(_readbundlerevs(bundlerepo)) |
|
582 | 583 | bundlecaps = _includefilelogstobundle( |
|
583 | 584 | bundlecaps, bundlerepo, bundlerevs, repo.ui |
|
584 | 585 | ) |
|
585 | 586 | cl = bundlerepo.changelog |
|
586 | 587 | bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs) |
|
587 | 588 | for rev in bundlerevs: |
|
588 | 589 | node = cl.node(rev) |
|
589 | 590 | newphases[hex(node)] = str(phases.draft) |
|
590 | 591 | nodestobundle[node] = ( |
|
591 | 592 | bundlerepo, |
|
592 | 593 | bundleroots, |
|
593 | 594 | newbundlefile, |
|
594 | 595 | ) |
|
595 | 596 | |
|
596 | 597 | scratchbundles.append( |
|
597 | 598 | _generateoutputparts(head, *nodestobundle[head]) |
|
598 | 599 | ) |
|
599 | 600 | newheads.extend(bundleroots) |
|
600 | 601 | scratchheads.append(head) |
|
601 | 602 | finally: |
|
602 | 603 | for bundlerepo, bundlefile in allbundlestocleanup: |
|
603 | 604 | bundlerepo.close() |
|
604 | 605 | try: |
|
605 | 606 | os.unlink(bundlefile) |
|
606 | 607 | except (IOError, OSError): |
|
607 | 608 | # if we can't cleanup the file then just ignore the error, |
|
608 | 609 | # no need to fail |
|
609 | 610 | pass |
|
610 | 611 | |
|
611 | 612 | pullfrombundlestore = bool(scratchbundles) |
|
612 | 613 | wrappedchangegrouppart = False |
|
613 | 614 | wrappedlistkeys = False |
|
614 | 615 | oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup'] |
|
615 | 616 | try: |
|
616 | 617 | |
|
617 | 618 | def _changegrouppart(bundler, *args, **kwargs): |
|
618 | 619 | # Order is important here. First add non-scratch part |
|
619 | 620 | # and only then add parts with scratch bundles because |
|
620 | 621 | # non-scratch part contains parents of roots of scratch bundles. |
|
621 | 622 | result = oldchangegrouppart(bundler, *args, **kwargs) |
|
622 | 623 | for bundle in scratchbundles: |
|
623 | 624 | for part in bundle: |
|
624 | 625 | bundler.addpart(part) |
|
625 | 626 | return result |
|
626 | 627 | |
|
627 | 628 | exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart |
|
628 | 629 | wrappedchangegrouppart = True |
|
629 | 630 | |
|
630 | 631 | def _listkeys(orig, self, namespace): |
|
631 | 632 | origvalues = orig(self, namespace) |
|
632 | 633 | if namespace == b'phases' and pullfrombundlestore: |
|
633 | 634 | if origvalues.get(b'publishing') == b'True': |
|
634 | 635 | # Make repo non-publishing to preserve draft phase |
|
635 | 636 | del origvalues[b'publishing'] |
|
636 | 637 | origvalues.update(newphases) |
|
637 | 638 | return origvalues |
|
638 | 639 | |
|
639 | 640 | extensions.wrapfunction( |
|
640 | 641 | localrepo.localrepository, b'listkeys', _listkeys |
|
641 | 642 | ) |
|
642 | 643 | wrappedlistkeys = True |
|
643 | 644 | heads = list((set(newheads) | set(heads)) - set(scratchheads)) |
|
644 | 645 | result = orig( |
|
645 | 646 | repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs |
|
646 | 647 | ) |
|
647 | 648 | finally: |
|
648 | 649 | if wrappedchangegrouppart: |
|
649 | 650 | exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart |
|
650 | 651 | if wrappedlistkeys: |
|
651 | 652 | extensions.unwrapfunction( |
|
652 | 653 | localrepo.localrepository, b'listkeys', _listkeys |
|
653 | 654 | ) |
|
654 | 655 | return result |
|
655 | 656 | |
|
656 | 657 | |
|
657 | 658 | def _lookupwrap(orig): |
|
658 | 659 | def _lookup(repo, proto, key): |
|
659 | 660 | localkey = encoding.tolocal(key) |
|
660 | 661 | |
|
661 | 662 | if isinstance(localkey, str) and _scratchbranchmatcher(localkey): |
|
662 | 663 | scratchnode = repo.bundlestore.index.getnode(localkey) |
|
663 | 664 | if scratchnode: |
|
664 | 665 | return b"%d %s\n" % (1, scratchnode) |
|
665 | 666 | else: |
|
666 | 667 | return b"%d %s\n" % ( |
|
667 | 668 | 0, |
|
668 | 669 | b'scratch branch %s not found' % localkey, |
|
669 | 670 | ) |
|
670 | 671 | else: |
|
671 | 672 | try: |
|
672 | 673 | r = hex(repo.lookup(localkey)) |
|
673 | 674 | return b"%d %s\n" % (1, r) |
|
674 | 675 | except Exception as inst: |
|
675 | 676 | if repo.bundlestore.index.getbundle(localkey): |
|
676 | 677 | return b"%d %s\n" % (1, localkey) |
|
677 | 678 | else: |
|
678 | 679 | r = stringutil.forcebytestr(inst) |
|
679 | 680 | return b"%d %s\n" % (0, r) |
|
680 | 681 | |
|
681 | 682 | return _lookup |
|
682 | 683 | |
|
683 | 684 | |
|
684 | 685 | def _pull(orig, ui, repo, source=b"default", **opts): |
|
685 | 686 | opts = pycompat.byteskwargs(opts) |
|
686 | 687 | # Copy paste from `pull` command |
|
687 | 688 | source, branches = urlutil.get_unique_pull_path( |
|
688 | 689 | b"infinite-push's pull", |
|
689 | 690 | repo, |
|
690 | 691 | ui, |
|
691 | 692 | source, |
|
692 | 693 | default_branches=opts.get(b'branch'), |
|
693 | 694 | ) |
|
694 | 695 | |
|
695 | 696 | scratchbookmarks = {} |
|
696 | 697 | unfi = repo.unfiltered() |
|
697 | 698 | unknownnodes = [] |
|
698 | 699 | for rev in opts.get(b'rev', []): |
|
699 | 700 | if rev not in unfi: |
|
700 | 701 | unknownnodes.append(rev) |
|
701 | 702 | if opts.get(b'bookmark'): |
|
702 | 703 | bookmarks = [] |
|
703 | 704 | revs = opts.get(b'rev') or [] |
|
704 | 705 | for bookmark in opts.get(b'bookmark'): |
|
705 | 706 | if _scratchbranchmatcher(bookmark): |
|
706 | 707 | # rev is not known yet |
|
707 | 708 | # it will be fetched with listkeyspatterns next |
|
708 | 709 | scratchbookmarks[bookmark] = b'REVTOFETCH' |
|
709 | 710 | else: |
|
710 | 711 | bookmarks.append(bookmark) |
|
711 | 712 | |
|
712 | 713 | if scratchbookmarks: |
|
713 | 714 | other = hg.peer(repo, opts, source) |
|
714 | 715 | try: |
|
715 | 716 | fetchedbookmarks = other.listkeyspatterns( |
|
716 | 717 | b'bookmarks', patterns=scratchbookmarks |
|
717 | 718 | ) |
|
718 | 719 | for bookmark in scratchbookmarks: |
|
719 | 720 | if bookmark not in fetchedbookmarks: |
|
720 | 721 | raise error.Abort( |
|
721 | 722 | b'remote bookmark %s not found!' % bookmark |
|
722 | 723 | ) |
|
723 | 724 | scratchbookmarks[bookmark] = fetchedbookmarks[bookmark] |
|
724 | 725 | revs.append(fetchedbookmarks[bookmark]) |
|
725 | 726 | finally: |
|
726 | 727 | other.close() |
|
727 | 728 | opts[b'bookmark'] = bookmarks |
|
728 | 729 | opts[b'rev'] = revs |
|
729 | 730 | |
|
730 | 731 | if scratchbookmarks or unknownnodes: |
|
731 | 732 | # Set anyincoming to True |
|
732 | 733 | extensions.wrapfunction( |
|
733 | 734 | discovery, b'findcommonincoming', _findcommonincoming |
|
734 | 735 | ) |
|
735 | 736 | try: |
|
736 | 737 | # Remote scratch bookmarks will be deleted because remotenames doesn't |
|
737 | 738 | # know about them. Let's save it before pull and restore after |
|
738 | 739 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source) |
|
739 | 740 | result = orig(ui, repo, source, **pycompat.strkwargs(opts)) |
|
740 | 741 | # TODO(stash): race condition is possible |
|
741 | 742 | # if scratch bookmarks was updated right after orig. |
|
742 | 743 | # But that's unlikely and shouldn't be harmful. |
|
743 | 744 | if common.isremotebooksenabled(ui): |
|
744 | 745 | remotescratchbookmarks.update(scratchbookmarks) |
|
745 | 746 | _saveremotebookmarks(repo, remotescratchbookmarks, source) |
|
746 | 747 | else: |
|
747 | 748 | _savelocalbookmarks(repo, scratchbookmarks) |
|
748 | 749 | return result |
|
749 | 750 | finally: |
|
750 | 751 | if scratchbookmarks: |
|
751 | 752 | extensions.unwrapfunction(discovery, b'findcommonincoming') |
|
752 | 753 | |
|
753 | 754 | |
|
754 | 755 | def _readscratchremotebookmarks(ui, repo, other): |
|
755 | 756 | if common.isremotebooksenabled(ui): |
|
756 | 757 | remotenamesext = extensions.find(b'remotenames') |
|
757 | 758 | remotepath = remotenamesext.activepath(repo.ui, other) |
|
758 | 759 | result = {} |
|
759 | 760 | # Let's refresh remotenames to make sure we have it up to date |
|
760 | 761 | # Seems that `repo.names['remotebookmarks']` may return stale bookmarks |
|
761 | 762 | # and it results in deleting scratch bookmarks. Our best guess how to |
|
762 | 763 | # fix it is to use `clearnames()` |
|
763 | 764 | repo._remotenames.clearnames() |
|
764 | 765 | for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo): |
|
765 | 766 | path, bookname = remotenamesext.splitremotename(remotebookmark) |
|
766 | 767 | if path == remotepath and _scratchbranchmatcher(bookname): |
|
767 | 768 | nodes = repo.names[b'remotebookmarks'].nodes( |
|
768 | 769 | repo, remotebookmark |
|
769 | 770 | ) |
|
770 | 771 | if nodes: |
|
771 | 772 | result[bookname] = hex(nodes[0]) |
|
772 | 773 | return result |
|
773 | 774 | else: |
|
774 | 775 | return {} |
|
775 | 776 | |
|
776 | 777 | |
|
777 | 778 | def _saveremotebookmarks(repo, newbookmarks, remote): |
|
778 | 779 | remotenamesext = extensions.find(b'remotenames') |
|
779 | 780 | remotepath = remotenamesext.activepath(repo.ui, remote) |
|
780 | 781 | branches = collections.defaultdict(list) |
|
781 | 782 | bookmarks = {} |
|
782 | 783 | remotenames = remotenamesext.readremotenames(repo) |
|
783 | 784 | for hexnode, nametype, remote, rname in remotenames: |
|
784 | 785 | if remote != remotepath: |
|
785 | 786 | continue |
|
786 | 787 | if nametype == b'bookmarks': |
|
787 | 788 | if rname in newbookmarks: |
|
788 | 789 | # It's possible if we have a normal bookmark that matches |
|
789 | 790 | # scratch branch pattern. In this case just use the current |
|
790 | 791 | # bookmark node |
|
791 | 792 | del newbookmarks[rname] |
|
792 | 793 | bookmarks[rname] = hexnode |
|
793 | 794 | elif nametype == b'branches': |
|
794 | 795 | # saveremotenames expects 20 byte binary nodes for branches |
|
795 | 796 | branches[rname].append(bin(hexnode)) |
|
796 | 797 | |
|
797 | 798 | for bookmark, hexnode in pycompat.iteritems(newbookmarks): |
|
798 | 799 | bookmarks[bookmark] = hexnode |
|
799 | 800 | remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks) |
|
800 | 801 | |
|
801 | 802 | |
|
802 | 803 | def _savelocalbookmarks(repo, bookmarks): |
|
803 | 804 | if not bookmarks: |
|
804 | 805 | return |
|
805 | 806 | with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr: |
|
806 | 807 | changes = [] |
|
807 | 808 | for scratchbook, node in pycompat.iteritems(bookmarks): |
|
808 | 809 | changectx = repo[node] |
|
809 | 810 | changes.append((scratchbook, changectx.node())) |
|
810 | 811 | repo._bookmarks.applychanges(repo, tr, changes) |
|
811 | 812 | |
|
812 | 813 | |
|
813 | 814 | def _findcommonincoming(orig, *args, **kwargs): |
|
814 | 815 | common, inc, remoteheads = orig(*args, **kwargs) |
|
815 | 816 | return common, True, remoteheads |
|
816 | 817 | |
|
817 | 818 | |
|
818 | 819 | def _push(orig, ui, repo, *dests, **opts): |
|
819 | 820 | opts = pycompat.byteskwargs(opts) |
|
820 | 821 | bookmark = opts.get(b'bookmark') |
|
821 | 822 | # we only support pushing one infinitepush bookmark at once |
|
822 | 823 | if len(bookmark) == 1: |
|
823 | 824 | bookmark = bookmark[0] |
|
824 | 825 | else: |
|
825 | 826 | bookmark = b'' |
|
826 | 827 | |
|
827 | 828 | oldphasemove = None |
|
828 | 829 | overrides = {(experimental, configbookmark): bookmark} |
|
829 | 830 | |
|
830 | 831 | with ui.configoverride(overrides, b'infinitepush'): |
|
831 | 832 | scratchpush = opts.get(b'bundle_store') |
|
832 | 833 | if _scratchbranchmatcher(bookmark): |
|
833 | 834 | scratchpush = True |
|
834 | 835 | # bundle2 can be sent back after push (for example, bundle2 |
|
835 | 836 | # containing `pushkey` part to update bookmarks) |
|
836 | 837 | ui.setconfig(experimental, b'bundle2.pushback', True) |
|
837 | 838 | |
|
838 | 839 | if scratchpush: |
|
839 | 840 | # this is an infinitepush, we don't want the bookmark to be applied |
|
840 | 841 | # rather that should be stored in the bundlestore |
|
841 | 842 | opts[b'bookmark'] = [] |
|
842 | 843 | ui.setconfig(experimental, configscratchpush, True) |
|
843 | 844 | oldphasemove = extensions.wrapfunction( |
|
844 | 845 | exchange, b'_localphasemove', _phasemove |
|
845 | 846 | ) |
|
846 | 847 | |
|
847 | 848 | paths = list(urlutil.get_push_paths(repo, ui, dests)) |
|
848 | 849 | if len(paths) > 1: |
|
849 | 850 | msg = _(b'cannot push to multiple path with infinitepush') |
|
850 | 851 | raise error.Abort(msg) |
|
851 | 852 | |
|
852 | 853 | path = paths[0] |
|
853 | 854 | destpath = path.pushloc or path.loc |
|
854 | 855 | # Remote scratch bookmarks will be deleted because remotenames doesn't |
|
855 | 856 | # know about them. Let's save it before push and restore after |
|
856 | 857 | remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath) |
|
857 | 858 | result = orig(ui, repo, *dests, **pycompat.strkwargs(opts)) |
|
858 | 859 | if common.isremotebooksenabled(ui): |
|
859 | 860 | if bookmark and scratchpush: |
|
860 | 861 | other = hg.peer(repo, opts, destpath) |
|
861 | 862 | try: |
|
862 | 863 | fetchedbookmarks = other.listkeyspatterns( |
|
863 | 864 | b'bookmarks', patterns=[bookmark] |
|
864 | 865 | ) |
|
865 | 866 | remotescratchbookmarks.update(fetchedbookmarks) |
|
866 | 867 | finally: |
|
867 | 868 | other.close() |
|
868 | 869 | _saveremotebookmarks(repo, remotescratchbookmarks, destpath) |
|
869 | 870 | if oldphasemove: |
|
870 | 871 | exchange._localphasemove = oldphasemove |
|
871 | 872 | return result |
|
872 | 873 | |
|
873 | 874 | |
|
874 | 875 | def _deleteinfinitepushbookmarks(ui, repo, path, names): |
|
875 | 876 | """Prune remote names by removing the bookmarks we don't want anymore, |
|
876 | 877 | then writing the result back to disk |
|
877 | 878 | """ |
|
878 | 879 | remotenamesext = extensions.find(b'remotenames') |
|
879 | 880 | |
|
880 | 881 | # remotename format is: |
|
881 | 882 | # (node, nametype ("branches" or "bookmarks"), remote, name) |
|
882 | 883 | nametype_idx = 1 |
|
883 | 884 | remote_idx = 2 |
|
884 | 885 | name_idx = 3 |
|
885 | 886 | remotenames = [ |
|
886 | 887 | remotename |
|
887 | 888 | for remotename in remotenamesext.readremotenames(repo) |
|
888 | 889 | if remotename[remote_idx] == path |
|
889 | 890 | ] |
|
890 | 891 | remote_bm_names = [ |
|
891 | 892 | remotename[name_idx] |
|
892 | 893 | for remotename in remotenames |
|
893 | 894 | if remotename[nametype_idx] == b"bookmarks" |
|
894 | 895 | ] |
|
895 | 896 | |
|
896 | 897 | for name in names: |
|
897 | 898 | if name not in remote_bm_names: |
|
898 | 899 | raise error.Abort( |
|
899 | 900 | _( |
|
900 | 901 | b"infinitepush bookmark '{}' does not exist " |
|
901 | 902 | b"in path '{}'" |
|
902 | 903 | ).format(name, path) |
|
903 | 904 | ) |
|
904 | 905 | |
|
905 | 906 | bookmarks = {} |
|
906 | 907 | branches = collections.defaultdict(list) |
|
907 | 908 | for node, nametype, remote, name in remotenames: |
|
908 | 909 | if nametype == b"bookmarks" and name not in names: |
|
909 | 910 | bookmarks[name] = node |
|
910 | 911 | elif nametype == b"branches": |
|
911 | 912 | # saveremotenames wants binary nodes for branches |
|
912 | 913 | branches[name].append(bin(node)) |
|
913 | 914 | |
|
914 | 915 | remotenamesext.saveremotenames(repo, path, branches, bookmarks) |
|
915 | 916 | |
|
916 | 917 | |
|
917 | 918 | def _phasemove(orig, pushop, nodes, phase=phases.public): |
|
918 | 919 | """prevent commits from being marked public |
|
919 | 920 | |
|
920 | 921 | Since these are going to a scratch branch, they aren't really being |
|
921 | 922 | published.""" |
|
922 | 923 | |
|
923 | 924 | if phase != phases.public: |
|
924 | 925 | orig(pushop, nodes, phase) |
|
925 | 926 | |
|
926 | 927 | |
|
927 | 928 | @exchange.b2partsgenerator(scratchbranchparttype) |
|
928 | 929 | def partgen(pushop, bundler): |
|
929 | 930 | bookmark = pushop.ui.config(experimental, configbookmark) |
|
930 | 931 | scratchpush = pushop.ui.configbool(experimental, configscratchpush) |
|
931 | 932 | if b'changesets' in pushop.stepsdone or not scratchpush: |
|
932 | 933 | return |
|
933 | 934 | |
|
934 | 935 | if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote): |
|
935 | 936 | return |
|
936 | 937 | |
|
937 | 938 | pushop.stepsdone.add(b'changesets') |
|
938 | 939 | if not pushop.outgoing.missing: |
|
939 | 940 | pushop.ui.status(_(b'no changes found\n')) |
|
940 | 941 | pushop.cgresult = 0 |
|
941 | 942 | return |
|
942 | 943 | |
|
943 | 944 | # This parameter tells the server that the following bundle is an |
|
944 | 945 | # infinitepush. This let's it switch the part processing to our infinitepush |
|
945 | 946 | # code path. |
|
946 | 947 | bundler.addparam(b"infinitepush", b"True") |
|
947 | 948 | |
|
948 | 949 | scratchparts = bundleparts.getscratchbranchparts( |
|
949 | 950 | pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark |
|
950 | 951 | ) |
|
951 | 952 | |
|
952 | 953 | for scratchpart in scratchparts: |
|
953 | 954 | bundler.addpart(scratchpart) |
|
954 | 955 | |
|
955 | 956 | def handlereply(op): |
|
956 | 957 | # server either succeeds or aborts; no code to read |
|
957 | 958 | pushop.cgresult = 1 |
|
958 | 959 | |
|
959 | 960 | return handlereply |
|
960 | 961 | |
|
961 | 962 | |
|
962 | 963 | bundle2.capabilities[bundleparts.scratchbranchparttype] = () |
|
963 | 964 | |
|
964 | 965 | |
|
965 | 966 | def _getrevs(bundle, oldnode, force, bookmark): |
|
966 | 967 | b'extracts and validates the revs to be imported' |
|
967 | 968 | revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')] |
|
968 | 969 | |
|
969 | 970 | # new bookmark |
|
970 | 971 | if oldnode is None: |
|
971 | 972 | return revs |
|
972 | 973 | |
|
973 | 974 | # Fast forward update |
|
974 | 975 | if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)): |
|
975 | 976 | return revs |
|
976 | 977 | |
|
977 | 978 | return revs |
|
978 | 979 | |
|
979 | 980 | |
|
980 | 981 | @contextlib.contextmanager |
|
981 | 982 | def logservicecall(logger, service, **kwargs): |
|
982 | 983 | start = time.time() |
|
983 | 984 | logger(service, eventtype=b'start', **kwargs) |
|
984 | 985 | try: |
|
985 | 986 | yield |
|
986 | 987 | logger( |
|
987 | 988 | service, |
|
988 | 989 | eventtype=b'success', |
|
989 | 990 | elapsedms=(time.time() - start) * 1000, |
|
990 | 991 | **kwargs |
|
991 | 992 | ) |
|
992 | 993 | except Exception as e: |
|
993 | 994 | logger( |
|
994 | 995 | service, |
|
995 | 996 | eventtype=b'failure', |
|
996 | 997 | elapsedms=(time.time() - start) * 1000, |
|
997 | 998 | errormsg=stringutil.forcebytestr(e), |
|
998 | 999 | **kwargs |
|
999 | 1000 | ) |
|
1000 | 1001 | raise |
|
1001 | 1002 | |
|
1002 | 1003 | |
|
1003 | 1004 | def _getorcreateinfinitepushlogger(op): |
|
1004 | 1005 | logger = op.records[b'infinitepushlogger'] |
|
1005 | 1006 | if not logger: |
|
1006 | 1007 | ui = op.repo.ui |
|
1007 | 1008 | try: |
|
1008 | 1009 | username = procutil.getuser() |
|
1009 | 1010 | except Exception: |
|
1010 | 1011 | username = b'unknown' |
|
1011 | 1012 | # Generate random request id to be able to find all logged entries |
|
1012 | 1013 | # for the same request. Since requestid is pseudo-generated it may |
|
1013 | 1014 | # not be unique, but we assume that (hostname, username, requestid) |
|
1014 | 1015 | # is unique. |
|
1015 | 1016 | random.seed() |
|
1016 | 1017 | requestid = random.randint(0, 2000000000) |
|
1017 | 1018 | hostname = socket.gethostname() |
|
1018 | 1019 | logger = functools.partial( |
|
1019 | 1020 | ui.log, |
|
1020 | 1021 | b'infinitepush', |
|
1021 | 1022 | user=username, |
|
1022 | 1023 | requestid=requestid, |
|
1023 | 1024 | hostname=hostname, |
|
1024 | 1025 | reponame=ui.config(b'infinitepush', b'reponame'), |
|
1025 | 1026 | ) |
|
1026 | 1027 | op.records.add(b'infinitepushlogger', logger) |
|
1027 | 1028 | else: |
|
1028 | 1029 | logger = logger[0] |
|
1029 | 1030 | return logger |
|
1030 | 1031 | |
|
1031 | 1032 | |
|
1032 | 1033 | def storetobundlestore(orig, repo, op, unbundler): |
|
1033 | 1034 | """stores the incoming bundle coming from push command to the bundlestore |
|
1034 | 1035 | instead of applying on the revlogs""" |
|
1035 | 1036 | |
|
1036 | 1037 | repo.ui.status(_(b"storing changesets on the bundlestore\n")) |
|
1037 | 1038 | bundler = bundle2.bundle20(repo.ui) |
|
1038 | 1039 | |
|
1039 | 1040 | # processing each part and storing it in bundler |
|
1040 | 1041 | with bundle2.partiterator(repo, op, unbundler) as parts: |
|
1041 | 1042 | for part in parts: |
|
1042 | 1043 | bundlepart = None |
|
1043 | 1044 | if part.type == b'replycaps': |
|
1044 | 1045 | # This configures the current operation to allow reply parts. |
|
1045 | 1046 | bundle2._processpart(op, part) |
|
1046 | 1047 | else: |
|
1047 | 1048 | bundlepart = bundle2.bundlepart(part.type, data=part.read()) |
|
1048 | 1049 | for key, value in pycompat.iteritems(part.params): |
|
1049 | 1050 | bundlepart.addparam(key, value) |
|
1050 | 1051 | |
|
1051 | 1052 | # Certain parts require a response |
|
1052 | 1053 | if part.type in (b'pushkey', b'changegroup'): |
|
1053 | 1054 | if op.reply is not None: |
|
1054 | 1055 | rpart = op.reply.newpart(b'reply:%s' % part.type) |
|
1055 | 1056 | rpart.addparam( |
|
1056 | 1057 | b'in-reply-to', b'%d' % part.id, mandatory=False |
|
1057 | 1058 | ) |
|
1058 | 1059 | rpart.addparam(b'return', b'1', mandatory=False) |
|
1059 | 1060 | |
|
1060 | 1061 | op.records.add( |
|
1061 | 1062 | part.type, |
|
1062 | 1063 | { |
|
1063 | 1064 | b'return': 1, |
|
1064 | 1065 | }, |
|
1065 | 1066 | ) |
|
1066 | 1067 | if bundlepart: |
|
1067 | 1068 | bundler.addpart(bundlepart) |
|
1068 | 1069 | |
|
1069 | 1070 | # storing the bundle in the bundlestore |
|
1070 | 1071 | buf = util.chunkbuffer(bundler.getchunks()) |
|
1071 | 1072 | fd, bundlefile = pycompat.mkstemp() |
|
1072 | 1073 | try: |
|
1073 | 1074 | try: |
|
1074 | 1075 | fp = os.fdopen(fd, 'wb') |
|
1075 | 1076 | fp.write(buf.read()) |
|
1076 | 1077 | finally: |
|
1077 | 1078 | fp.close() |
|
1078 | 1079 | storebundle(op, {}, bundlefile) |
|
1079 | 1080 | finally: |
|
1080 | 1081 | try: |
|
1081 | 1082 | os.unlink(bundlefile) |
|
1082 | 1083 | except Exception: |
|
1083 | 1084 | # we would rather see the original exception |
|
1084 | 1085 | pass |
|
1085 | 1086 | |
|
1086 | 1087 | |
|
1087 | 1088 | def processparts(orig, repo, op, unbundler): |
|
1088 | 1089 | |
|
1089 | 1090 | # make sure we don't wrap processparts in case of `hg unbundle` |
|
1090 | 1091 | if op.source == b'unbundle': |
|
1091 | 1092 | return orig(repo, op, unbundler) |
|
1092 | 1093 | |
|
1093 | 1094 | # this server routes each push to bundle store |
|
1094 | 1095 | if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'): |
|
1095 | 1096 | return storetobundlestore(orig, repo, op, unbundler) |
|
1096 | 1097 | |
|
1097 | 1098 | if unbundler.params.get(b'infinitepush') != b'True': |
|
1098 | 1099 | return orig(repo, op, unbundler) |
|
1099 | 1100 | |
|
1100 | 1101 | handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts') |
|
1101 | 1102 | |
|
1102 | 1103 | bundler = bundle2.bundle20(repo.ui) |
|
1103 | 1104 | cgparams = None |
|
1104 | 1105 | with bundle2.partiterator(repo, op, unbundler) as parts: |
|
1105 | 1106 | for part in parts: |
|
1106 | 1107 | bundlepart = None |
|
1107 | 1108 | if part.type == b'replycaps': |
|
1108 | 1109 | # This configures the current operation to allow reply parts. |
|
1109 | 1110 | bundle2._processpart(op, part) |
|
1110 | 1111 | elif part.type == bundleparts.scratchbranchparttype: |
|
1111 | 1112 | # Scratch branch parts need to be converted to normal |
|
1112 | 1113 | # changegroup parts, and the extra parameters stored for later |
|
1113 | 1114 | # when we upload to the store. Eventually those parameters will |
|
1114 | 1115 | # be put on the actual bundle instead of this part, then we can |
|
1115 | 1116 | # send a vanilla changegroup instead of the scratchbranch part. |
|
1116 | 1117 | cgversion = part.params.get(b'cgversion', b'01') |
|
1117 | 1118 | bundlepart = bundle2.bundlepart( |
|
1118 | 1119 | b'changegroup', data=part.read() |
|
1119 | 1120 | ) |
|
1120 | 1121 | bundlepart.addparam(b'version', cgversion) |
|
1121 | 1122 | cgparams = part.params |
|
1122 | 1123 | |
|
1123 | 1124 | # If we're not dumping all parts into the new bundle, we need to |
|
1124 | 1125 | # alert the future pushkey and phase-heads handler to skip |
|
1125 | 1126 | # the part. |
|
1126 | 1127 | if not handleallparts: |
|
1127 | 1128 | op.records.add( |
|
1128 | 1129 | scratchbranchparttype + b'_skippushkey', True |
|
1129 | 1130 | ) |
|
1130 | 1131 | op.records.add( |
|
1131 | 1132 | scratchbranchparttype + b'_skipphaseheads', True |
|
1132 | 1133 | ) |
|
1133 | 1134 | else: |
|
1134 | 1135 | if handleallparts: |
|
1135 | 1136 | # Ideally we would not process any parts, and instead just |
|
1136 | 1137 | # forward them to the bundle for storage, but since this |
|
1137 | 1138 | # differs from previous behavior, we need to put it behind a |
|
1138 | 1139 | # config flag for incremental rollout. |
|
1139 | 1140 | bundlepart = bundle2.bundlepart(part.type, data=part.read()) |
|
1140 | 1141 | for key, value in pycompat.iteritems(part.params): |
|
1141 | 1142 | bundlepart.addparam(key, value) |
|
1142 | 1143 | |
|
1143 | 1144 | # Certain parts require a response |
|
1144 | 1145 | if part.type == b'pushkey': |
|
1145 | 1146 | if op.reply is not None: |
|
1146 | 1147 | rpart = op.reply.newpart(b'reply:pushkey') |
|
1147 | 1148 | rpart.addparam( |
|
1148 | 1149 | b'in-reply-to', str(part.id), mandatory=False |
|
1149 | 1150 | ) |
|
1150 | 1151 | rpart.addparam(b'return', b'1', mandatory=False) |
|
1151 | 1152 | else: |
|
1152 | 1153 | bundle2._processpart(op, part) |
|
1153 | 1154 | |
|
1154 | 1155 | if handleallparts: |
|
1155 | 1156 | op.records.add( |
|
1156 | 1157 | part.type, |
|
1157 | 1158 | { |
|
1158 | 1159 | b'return': 1, |
|
1159 | 1160 | }, |
|
1160 | 1161 | ) |
|
1161 | 1162 | if bundlepart: |
|
1162 | 1163 | bundler.addpart(bundlepart) |
|
1163 | 1164 | |
|
1164 | 1165 | # If commits were sent, store them |
|
1165 | 1166 | if cgparams: |
|
1166 | 1167 | buf = util.chunkbuffer(bundler.getchunks()) |
|
1167 | 1168 | fd, bundlefile = pycompat.mkstemp() |
|
1168 | 1169 | try: |
|
1169 | 1170 | try: |
|
1170 | 1171 | fp = os.fdopen(fd, 'wb') |
|
1171 | 1172 | fp.write(buf.read()) |
|
1172 | 1173 | finally: |
|
1173 | 1174 | fp.close() |
|
1174 | 1175 | storebundle(op, cgparams, bundlefile) |
|
1175 | 1176 | finally: |
|
1176 | 1177 | try: |
|
1177 | 1178 | os.unlink(bundlefile) |
|
1178 | 1179 | except Exception: |
|
1179 | 1180 | # we would rather see the original exception |
|
1180 | 1181 | pass |
|
1181 | 1182 | |
|
1182 | 1183 | |
|
1183 | 1184 | def storebundle(op, params, bundlefile): |
|
1184 | 1185 | log = _getorcreateinfinitepushlogger(op) |
|
1185 | 1186 | parthandlerstart = time.time() |
|
1186 | 1187 | log(scratchbranchparttype, eventtype=b'start') |
|
1187 | 1188 | index = op.repo.bundlestore.index |
|
1188 | 1189 | store = op.repo.bundlestore.store |
|
1189 | 1190 | op.records.add(scratchbranchparttype + b'_skippushkey', True) |
|
1190 | 1191 | |
|
1191 | 1192 | bundle = None |
|
1192 | 1193 | try: # guards bundle |
|
1193 | 1194 | bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile) |
|
1194 | 1195 | bundle = hg.repository(op.repo.ui, bundlepath) |
|
1195 | 1196 | |
|
1196 | 1197 | bookmark = params.get(b'bookmark') |
|
1197 | 1198 | bookprevnode = params.get(b'bookprevnode', b'') |
|
1198 | 1199 | force = params.get(b'force') |
|
1199 | 1200 | |
|
1200 | 1201 | if bookmark: |
|
1201 | 1202 | oldnode = index.getnode(bookmark) |
|
1202 | 1203 | else: |
|
1203 | 1204 | oldnode = None |
|
1204 | 1205 | bundleheads = bundle.revs(b'heads(bundle())') |
|
1205 | 1206 | if bookmark and len(bundleheads) > 1: |
|
1206 | 1207 | raise error.Abort( |
|
1207 | 1208 | _(b'cannot push more than one head to a scratch branch') |
|
1208 | 1209 | ) |
|
1209 | 1210 | |
|
1210 | 1211 | revs = _getrevs(bundle, oldnode, force, bookmark) |
|
1211 | 1212 | |
|
1212 | 1213 | # Notify the user of what is being pushed |
|
1213 | 1214 | plural = b's' if len(revs) > 1 else b'' |
|
1214 | 1215 | op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural)) |
|
1215 | 1216 | maxoutput = 10 |
|
1216 | 1217 | for i in range(0, min(len(revs), maxoutput)): |
|
1217 | 1218 | firstline = bundle[revs[i]].description().split(b'\n')[0][:50] |
|
1218 | 1219 | op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline)) |
|
1219 | 1220 | |
|
1220 | 1221 | if len(revs) > maxoutput + 1: |
|
1221 | 1222 | op.repo.ui.warn(b" ...\n") |
|
1222 | 1223 | firstline = bundle[revs[-1]].description().split(b'\n')[0][:50] |
|
1223 | 1224 | op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline)) |
|
1224 | 1225 | |
|
1225 | 1226 | nodesctx = [bundle[rev] for rev in revs] |
|
1226 | 1227 | inindex = lambda rev: bool(index.getbundle(bundle[rev].hex())) |
|
1227 | 1228 | if bundleheads: |
|
1228 | 1229 | newheadscount = sum(not inindex(rev) for rev in bundleheads) |
|
1229 | 1230 | else: |
|
1230 | 1231 | newheadscount = 0 |
|
1231 | 1232 | # If there's a bookmark specified, there should be only one head, |
|
1232 | 1233 | # so we choose the last node, which will be that head. |
|
1233 | 1234 | # If a bug or malicious client allows there to be a bookmark |
|
1234 | 1235 | # with multiple heads, we will place the bookmark on the last head. |
|
1235 | 1236 | bookmarknode = nodesctx[-1].hex() if nodesctx else None |
|
1236 | 1237 | key = None |
|
1237 | 1238 | if newheadscount: |
|
1238 | 1239 | with open(bundlefile, b'rb') as f: |
|
1239 | 1240 | bundledata = f.read() |
|
1240 | 1241 | with logservicecall( |
|
1241 | 1242 | log, b'bundlestore', bundlesize=len(bundledata) |
|
1242 | 1243 | ): |
|
1243 | 1244 | bundlesizelimit = 100 * 1024 * 1024 # 100 MB |
|
1244 | 1245 | if len(bundledata) > bundlesizelimit: |
|
1245 | 1246 | error_msg = ( |
|
1246 | 1247 | b'bundle is too big: %d bytes. ' |
|
1247 | 1248 | + b'max allowed size is 100 MB' |
|
1248 | 1249 | ) |
|
1249 | 1250 | raise error.Abort(error_msg % (len(bundledata),)) |
|
1250 | 1251 | key = store.write(bundledata) |
|
1251 | 1252 | |
|
1252 | 1253 | with logservicecall(log, b'index', newheadscount=newheadscount), index: |
|
1253 | 1254 | if key: |
|
1254 | 1255 | index.addbundle(key, nodesctx) |
|
1255 | 1256 | if bookmark: |
|
1256 | 1257 | index.addbookmark(bookmark, bookmarknode) |
|
1257 | 1258 | _maybeaddpushbackpart( |
|
1258 | 1259 | op, bookmark, bookmarknode, bookprevnode, params |
|
1259 | 1260 | ) |
|
1260 | 1261 | log( |
|
1261 | 1262 | scratchbranchparttype, |
|
1262 | 1263 | eventtype=b'success', |
|
1263 | 1264 | elapsedms=(time.time() - parthandlerstart) * 1000, |
|
1264 | 1265 | ) |
|
1265 | 1266 | |
|
1266 | 1267 | except Exception as e: |
|
1267 | 1268 | log( |
|
1268 | 1269 | scratchbranchparttype, |
|
1269 | 1270 | eventtype=b'failure', |
|
1270 | 1271 | elapsedms=(time.time() - parthandlerstart) * 1000, |
|
1271 | 1272 | errormsg=stringutil.forcebytestr(e), |
|
1272 | 1273 | ) |
|
1273 | 1274 | raise |
|
1274 | 1275 | finally: |
|
1275 | 1276 | if bundle: |
|
1276 | 1277 | bundle.close() |
|
1277 | 1278 | |
|
1278 | 1279 | |
|
1279 | 1280 | @bundle2.parthandler( |
|
1280 | 1281 | scratchbranchparttype, |
|
1281 | 1282 | ( |
|
1282 | 1283 | b'bookmark', |
|
1283 | 1284 | b'bookprevnode', |
|
1284 | 1285 | b'force', |
|
1285 | 1286 | b'pushbackbookmarks', |
|
1286 | 1287 | b'cgversion', |
|
1287 | 1288 | ), |
|
1288 | 1289 | ) |
|
1289 | 1290 | def bundle2scratchbranch(op, part): |
|
1290 | 1291 | '''unbundle a bundle2 part containing a changegroup to store''' |
|
1291 | 1292 | |
|
1292 | 1293 | bundler = bundle2.bundle20(op.repo.ui) |
|
1293 | 1294 | cgversion = part.params.get(b'cgversion', b'01') |
|
1294 | 1295 | cgpart = bundle2.bundlepart(b'changegroup', data=part.read()) |
|
1295 | 1296 | cgpart.addparam(b'version', cgversion) |
|
1296 | 1297 | bundler.addpart(cgpart) |
|
1297 | 1298 | buf = util.chunkbuffer(bundler.getchunks()) |
|
1298 | 1299 | |
|
1299 | 1300 | fd, bundlefile = pycompat.mkstemp() |
|
1300 | 1301 | try: |
|
1301 | 1302 | try: |
|
1302 | 1303 | fp = os.fdopen(fd, 'wb') |
|
1303 | 1304 | fp.write(buf.read()) |
|
1304 | 1305 | finally: |
|
1305 | 1306 | fp.close() |
|
1306 | 1307 | storebundle(op, part.params, bundlefile) |
|
1307 | 1308 | finally: |
|
1308 | 1309 | try: |
|
1309 | 1310 | os.unlink(bundlefile) |
|
1310 | 1311 | except OSError as e: |
|
1311 | 1312 | if e.errno != errno.ENOENT: |
|
1312 | 1313 | raise |
|
1313 | 1314 | |
|
1314 | 1315 | return 1 |
|
1315 | 1316 | |
|
1316 | 1317 | |
|
1317 | 1318 | def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params): |
|
1318 | 1319 | if params.get(b'pushbackbookmarks'): |
|
1319 | 1320 | if op.reply and b'pushback' in op.reply.capabilities: |
|
1320 | 1321 | params = { |
|
1321 | 1322 | b'namespace': b'bookmarks', |
|
1322 | 1323 | b'key': bookmark, |
|
1323 | 1324 | b'new': newnode, |
|
1324 | 1325 | b'old': oldnode, |
|
1325 | 1326 | } |
|
1326 | 1327 | op.reply.newpart( |
|
1327 | 1328 | b'pushkey', mandatoryparams=pycompat.iteritems(params) |
|
1328 | 1329 | ) |
|
1329 | 1330 | |
|
1330 | 1331 | |
|
1331 | 1332 | def bundle2pushkey(orig, op, part): |
|
1332 | 1333 | """Wrapper of bundle2.handlepushkey() |
|
1333 | 1334 | |
|
1334 | 1335 | The only goal is to skip calling the original function if flag is set. |
|
1335 | 1336 | It's set if infinitepush push is happening. |
|
1336 | 1337 | """ |
|
1337 | 1338 | if op.records[scratchbranchparttype + b'_skippushkey']: |
|
1338 | 1339 | if op.reply is not None: |
|
1339 | 1340 | rpart = op.reply.newpart(b'reply:pushkey') |
|
1340 | 1341 | rpart.addparam(b'in-reply-to', str(part.id), mandatory=False) |
|
1341 | 1342 | rpart.addparam(b'return', b'1', mandatory=False) |
|
1342 | 1343 | return 1 |
|
1343 | 1344 | |
|
1344 | 1345 | return orig(op, part) |
|
1345 | 1346 | |
|
1346 | 1347 | |
|
1347 | 1348 | def bundle2handlephases(orig, op, part): |
|
1348 | 1349 | """Wrapper of bundle2.handlephases() |
|
1349 | 1350 | |
|
1350 | 1351 | The only goal is to skip calling the original function if flag is set. |
|
1351 | 1352 | It's set if infinitepush push is happening. |
|
1352 | 1353 | """ |
|
1353 | 1354 | |
|
1354 | 1355 | if op.records[scratchbranchparttype + b'_skipphaseheads']: |
|
1355 | 1356 | return |
|
1356 | 1357 | |
|
1357 | 1358 | return orig(op, part) |
|
1358 | 1359 | |
|
1359 | 1360 | |
|
1360 | 1361 | def _asyncsavemetadata(root, nodes): |
|
1361 | 1362 | """starts a separate process that fills metadata for the nodes |
|
1362 | 1363 | |
|
1363 | 1364 | This function creates a separate process and doesn't wait for it's |
|
1364 | 1365 | completion. This was done to avoid slowing down pushes |
|
1365 | 1366 | """ |
|
1366 | 1367 | |
|
1367 | 1368 | maxnodes = 50 |
|
1368 | 1369 | if len(nodes) > maxnodes: |
|
1369 | 1370 | return |
|
1370 | 1371 | nodesargs = [] |
|
1371 | 1372 | for node in nodes: |
|
1372 | 1373 | nodesargs.append(b'--node') |
|
1373 | 1374 | nodesargs.append(node) |
|
1374 | 1375 | with open(os.devnull, b'w+b') as devnull: |
|
1375 | 1376 | cmdline = [ |
|
1376 | 1377 | util.hgexecutable(), |
|
1377 | 1378 | b'debugfillinfinitepushmetadata', |
|
1378 | 1379 | b'-R', |
|
1379 | 1380 | root, |
|
1380 | 1381 | ] + nodesargs |
|
1381 | 1382 | # Process will run in background. We don't care about the return code |
|
1382 | 1383 | subprocess.Popen( |
|
1383 | 1384 | pycompat.rapply(procutil.tonativestr, cmdline), |
|
1384 | 1385 | close_fds=True, |
|
1385 | 1386 | shell=False, |
|
1386 | 1387 | stdin=devnull, |
|
1387 | 1388 | stdout=devnull, |
|
1388 | 1389 | stderr=devnull, |
|
1389 | 1390 | ) |
@@ -1,675 +1,676 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''High-level command function for lfconvert, plus the cmdtable.''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import errno |
|
13 | 13 | import os |
|
14 | 14 | import shutil |
|
15 | 15 | |
|
16 | 16 | from mercurial.i18n import _ |
|
17 | 17 | from mercurial.node import ( |
|
18 | 18 | bin, |
|
19 | 19 | hex, |
|
20 | 20 | ) |
|
21 | 21 | |
|
22 | 22 | from mercurial import ( |
|
23 | 23 | cmdutil, |
|
24 | 24 | context, |
|
25 | 25 | error, |
|
26 | 26 | exthelper, |
|
27 | 27 | hg, |
|
28 | 28 | lock, |
|
29 | logcmdutil, | |
|
29 | 30 | match as matchmod, |
|
30 | 31 | pycompat, |
|
31 | 32 | scmutil, |
|
32 | 33 | util, |
|
33 | 34 | ) |
|
34 | 35 | from mercurial.utils import hashutil |
|
35 | 36 | |
|
36 | 37 | from ..convert import ( |
|
37 | 38 | convcmd, |
|
38 | 39 | filemap, |
|
39 | 40 | ) |
|
40 | 41 | |
|
41 | 42 | from . import lfutil, storefactory |
|
42 | 43 | |
|
43 | 44 | release = lock.release |
|
44 | 45 | |
|
45 | 46 | # -- Commands ---------------------------------------------------------- |
|
46 | 47 | |
|
47 | 48 | eh = exthelper.exthelper() |
|
48 | 49 | |
|
49 | 50 | |
|
50 | 51 | @eh.command( |
|
51 | 52 | b'lfconvert', |
|
52 | 53 | [ |
|
53 | 54 | ( |
|
54 | 55 | b's', |
|
55 | 56 | b'size', |
|
56 | 57 | b'', |
|
57 | 58 | _(b'minimum size (MB) for files to be converted as largefiles'), |
|
58 | 59 | b'SIZE', |
|
59 | 60 | ), |
|
60 | 61 | ( |
|
61 | 62 | b'', |
|
62 | 63 | b'to-normal', |
|
63 | 64 | False, |
|
64 | 65 | _(b'convert from a largefiles repo to a normal repo'), |
|
65 | 66 | ), |
|
66 | 67 | ], |
|
67 | 68 | _(b'hg lfconvert SOURCE DEST [FILE ...]'), |
|
68 | 69 | norepo=True, |
|
69 | 70 | inferrepo=True, |
|
70 | 71 | ) |
|
71 | 72 | def lfconvert(ui, src, dest, *pats, **opts): |
|
72 | 73 | """convert a normal repository to a largefiles repository |
|
73 | 74 | |
|
74 | 75 | Convert repository SOURCE to a new repository DEST, identical to |
|
75 | 76 | SOURCE except that certain files will be converted as largefiles: |
|
76 | 77 | specifically, any file that matches any PATTERN *or* whose size is |
|
77 | 78 | above the minimum size threshold is converted as a largefile. The |
|
78 | 79 | size used to determine whether or not to track a file as a |
|
79 | 80 | largefile is the size of the first version of the file. The |
|
80 | 81 | minimum size can be specified either with --size or in |
|
81 | 82 | configuration as ``largefiles.size``. |
|
82 | 83 | |
|
83 | 84 | After running this command you will need to make sure that |
|
84 | 85 | largefiles is enabled anywhere you intend to push the new |
|
85 | 86 | repository. |
|
86 | 87 | |
|
87 | 88 | Use --to-normal to convert largefiles back to normal files; after |
|
88 | 89 | this, the DEST repository can be used without largefiles at all.""" |
|
89 | 90 | |
|
90 | 91 | opts = pycompat.byteskwargs(opts) |
|
91 | 92 | if opts[b'to_normal']: |
|
92 | 93 | tolfile = False |
|
93 | 94 | else: |
|
94 | 95 | tolfile = True |
|
95 | 96 | size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None) |
|
96 | 97 | |
|
97 | 98 | if not hg.islocal(src): |
|
98 | 99 | raise error.Abort(_(b'%s is not a local Mercurial repo') % src) |
|
99 | 100 | if not hg.islocal(dest): |
|
100 | 101 | raise error.Abort(_(b'%s is not a local Mercurial repo') % dest) |
|
101 | 102 | |
|
102 | 103 | rsrc = hg.repository(ui, src) |
|
103 | 104 | ui.status(_(b'initializing destination %s\n') % dest) |
|
104 | 105 | rdst = hg.repository(ui, dest, create=True) |
|
105 | 106 | |
|
106 | 107 | success = False |
|
107 | 108 | dstwlock = dstlock = None |
|
108 | 109 | try: |
|
109 | 110 | # Get a list of all changesets in the source. The easy way to do this |
|
110 | 111 | # is to simply walk the changelog, using changelog.nodesbetween(). |
|
111 | 112 | # Take a look at mercurial/revlog.py:639 for more details. |
|
112 | 113 | # Use a generator instead of a list to decrease memory usage |
|
113 | 114 | ctxs = ( |
|
114 | 115 | rsrc[ctx] |
|
115 | 116 | for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0] |
|
116 | 117 | ) |
|
117 | 118 | revmap = {rsrc.nullid: rdst.nullid} |
|
118 | 119 | if tolfile: |
|
119 | 120 | # Lock destination to prevent modification while it is converted to. |
|
120 | 121 | # Don't need to lock src because we are just reading from its |
|
121 | 122 | # history which can't change. |
|
122 | 123 | dstwlock = rdst.wlock() |
|
123 | 124 | dstlock = rdst.lock() |
|
124 | 125 | |
|
125 | 126 | lfiles = set() |
|
126 | 127 | normalfiles = set() |
|
127 | 128 | if not pats: |
|
128 | 129 | pats = ui.configlist(lfutil.longname, b'patterns') |
|
129 | 130 | if pats: |
|
130 | 131 | matcher = matchmod.match(rsrc.root, b'', list(pats)) |
|
131 | 132 | else: |
|
132 | 133 | matcher = None |
|
133 | 134 | |
|
134 | 135 | lfiletohash = {} |
|
135 | 136 | with ui.makeprogress( |
|
136 | 137 | _(b'converting revisions'), |
|
137 | 138 | unit=_(b'revisions'), |
|
138 | 139 | total=rsrc[b'tip'].rev(), |
|
139 | 140 | ) as progress: |
|
140 | 141 | for ctx in ctxs: |
|
141 | 142 | progress.update(ctx.rev()) |
|
142 | 143 | _lfconvert_addchangeset( |
|
143 | 144 | rsrc, |
|
144 | 145 | rdst, |
|
145 | 146 | ctx, |
|
146 | 147 | revmap, |
|
147 | 148 | lfiles, |
|
148 | 149 | normalfiles, |
|
149 | 150 | matcher, |
|
150 | 151 | size, |
|
151 | 152 | lfiletohash, |
|
152 | 153 | ) |
|
153 | 154 | |
|
154 | 155 | if rdst.wvfs.exists(lfutil.shortname): |
|
155 | 156 | rdst.wvfs.rmtree(lfutil.shortname) |
|
156 | 157 | |
|
157 | 158 | for f in lfiletohash.keys(): |
|
158 | 159 | if rdst.wvfs.isfile(f): |
|
159 | 160 | rdst.wvfs.unlink(f) |
|
160 | 161 | try: |
|
161 | 162 | rdst.wvfs.removedirs(rdst.wvfs.dirname(f)) |
|
162 | 163 | except OSError: |
|
163 | 164 | pass |
|
164 | 165 | |
|
165 | 166 | # If there were any files converted to largefiles, add largefiles |
|
166 | 167 | # to the destination repository's requirements. |
|
167 | 168 | if lfiles: |
|
168 | 169 | rdst.requirements.add(b'largefiles') |
|
169 | 170 | scmutil.writereporequirements(rdst) |
|
170 | 171 | else: |
|
171 | 172 | |
|
172 | 173 | class lfsource(filemap.filemap_source): |
|
173 | 174 | def __init__(self, ui, source): |
|
174 | 175 | super(lfsource, self).__init__(ui, source, None) |
|
175 | 176 | self.filemapper.rename[lfutil.shortname] = b'.' |
|
176 | 177 | |
|
177 | 178 | def getfile(self, name, rev): |
|
178 | 179 | realname, realrev = rev |
|
179 | 180 | f = super(lfsource, self).getfile(name, rev) |
|
180 | 181 | |
|
181 | 182 | if ( |
|
182 | 183 | not realname.startswith(lfutil.shortnameslash) |
|
183 | 184 | or f[0] is None |
|
184 | 185 | ): |
|
185 | 186 | return f |
|
186 | 187 | |
|
187 | 188 | # Substitute in the largefile data for the hash |
|
188 | 189 | hash = f[0].strip() |
|
189 | 190 | path = lfutil.findfile(rsrc, hash) |
|
190 | 191 | |
|
191 | 192 | if path is None: |
|
192 | 193 | raise error.Abort( |
|
193 | 194 | _(b"missing largefile for '%s' in %s") |
|
194 | 195 | % (realname, realrev) |
|
195 | 196 | ) |
|
196 | 197 | return util.readfile(path), f[1] |
|
197 | 198 | |
|
198 | 199 | class converter(convcmd.converter): |
|
199 | 200 | def __init__(self, ui, source, dest, revmapfile, opts): |
|
200 | 201 | src = lfsource(ui, source) |
|
201 | 202 | |
|
202 | 203 | super(converter, self).__init__( |
|
203 | 204 | ui, src, dest, revmapfile, opts |
|
204 | 205 | ) |
|
205 | 206 | |
|
206 | 207 | found, missing = downloadlfiles(ui, rsrc) |
|
207 | 208 | if missing != 0: |
|
208 | 209 | raise error.Abort(_(b"all largefiles must be present locally")) |
|
209 | 210 | |
|
210 | 211 | orig = convcmd.converter |
|
211 | 212 | convcmd.converter = converter |
|
212 | 213 | |
|
213 | 214 | try: |
|
214 | 215 | convcmd.convert( |
|
215 | 216 | ui, src, dest, source_type=b'hg', dest_type=b'hg' |
|
216 | 217 | ) |
|
217 | 218 | finally: |
|
218 | 219 | convcmd.converter = orig |
|
219 | 220 | success = True |
|
220 | 221 | finally: |
|
221 | 222 | if tolfile: |
|
222 | 223 | rdst.dirstate.clear() |
|
223 | 224 | release(dstlock, dstwlock) |
|
224 | 225 | if not success: |
|
225 | 226 | # we failed, remove the new directory |
|
226 | 227 | shutil.rmtree(rdst.root) |
|
227 | 228 | |
|
228 | 229 | |
|
229 | 230 | def _lfconvert_addchangeset( |
|
230 | 231 | rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash |
|
231 | 232 | ): |
|
232 | 233 | # Convert src parents to dst parents |
|
233 | 234 | parents = _convertparents(ctx, revmap) |
|
234 | 235 | |
|
235 | 236 | # Generate list of changed files |
|
236 | 237 | files = _getchangedfiles(ctx, parents) |
|
237 | 238 | |
|
238 | 239 | dstfiles = [] |
|
239 | 240 | for f in files: |
|
240 | 241 | if f not in lfiles and f not in normalfiles: |
|
241 | 242 | islfile = _islfile(f, ctx, matcher, size) |
|
242 | 243 | # If this file was renamed or copied then copy |
|
243 | 244 | # the largefile-ness of its predecessor |
|
244 | 245 | if f in ctx.manifest(): |
|
245 | 246 | fctx = ctx.filectx(f) |
|
246 | 247 | renamed = fctx.copysource() |
|
247 | 248 | if renamed is None: |
|
248 | 249 | # the code below assumes renamed to be a boolean or a list |
|
249 | 250 | # and won't quite work with the value None |
|
250 | 251 | renamed = False |
|
251 | 252 | renamedlfile = renamed and renamed in lfiles |
|
252 | 253 | islfile |= renamedlfile |
|
253 | 254 | if b'l' in fctx.flags(): |
|
254 | 255 | if renamedlfile: |
|
255 | 256 | raise error.Abort( |
|
256 | 257 | _(b'renamed/copied largefile %s becomes symlink') |
|
257 | 258 | % f |
|
258 | 259 | ) |
|
259 | 260 | islfile = False |
|
260 | 261 | if islfile: |
|
261 | 262 | lfiles.add(f) |
|
262 | 263 | else: |
|
263 | 264 | normalfiles.add(f) |
|
264 | 265 | |
|
265 | 266 | if f in lfiles: |
|
266 | 267 | fstandin = lfutil.standin(f) |
|
267 | 268 | dstfiles.append(fstandin) |
|
268 | 269 | # largefile in manifest if it has not been removed/renamed |
|
269 | 270 | if f in ctx.manifest(): |
|
270 | 271 | fctx = ctx.filectx(f) |
|
271 | 272 | if b'l' in fctx.flags(): |
|
272 | 273 | renamed = fctx.copysource() |
|
273 | 274 | if renamed and renamed in lfiles: |
|
274 | 275 | raise error.Abort( |
|
275 | 276 | _(b'largefile %s becomes symlink') % f |
|
276 | 277 | ) |
|
277 | 278 | |
|
278 | 279 | # largefile was modified, update standins |
|
279 | 280 | m = hashutil.sha1(b'') |
|
280 | 281 | m.update(ctx[f].data()) |
|
281 | 282 | hash = hex(m.digest()) |
|
282 | 283 | if f not in lfiletohash or lfiletohash[f] != hash: |
|
283 | 284 | rdst.wwrite(f, ctx[f].data(), ctx[f].flags()) |
|
284 | 285 | executable = b'x' in ctx[f].flags() |
|
285 | 286 | lfutil.writestandin(rdst, fstandin, hash, executable) |
|
286 | 287 | lfiletohash[f] = hash |
|
287 | 288 | else: |
|
288 | 289 | # normal file |
|
289 | 290 | dstfiles.append(f) |
|
290 | 291 | |
|
291 | 292 | def getfilectx(repo, memctx, f): |
|
292 | 293 | srcfname = lfutil.splitstandin(f) |
|
293 | 294 | if srcfname is not None: |
|
294 | 295 | # if the file isn't in the manifest then it was removed |
|
295 | 296 | # or renamed, return None to indicate this |
|
296 | 297 | try: |
|
297 | 298 | fctx = ctx.filectx(srcfname) |
|
298 | 299 | except error.LookupError: |
|
299 | 300 | return None |
|
300 | 301 | renamed = fctx.copysource() |
|
301 | 302 | if renamed: |
|
302 | 303 | # standin is always a largefile because largefile-ness |
|
303 | 304 | # doesn't change after rename or copy |
|
304 | 305 | renamed = lfutil.standin(renamed) |
|
305 | 306 | |
|
306 | 307 | return context.memfilectx( |
|
307 | 308 | repo, |
|
308 | 309 | memctx, |
|
309 | 310 | f, |
|
310 | 311 | lfiletohash[srcfname] + b'\n', |
|
311 | 312 | b'l' in fctx.flags(), |
|
312 | 313 | b'x' in fctx.flags(), |
|
313 | 314 | renamed, |
|
314 | 315 | ) |
|
315 | 316 | else: |
|
316 | 317 | return _getnormalcontext(repo, ctx, f, revmap) |
|
317 | 318 | |
|
318 | 319 | # Commit |
|
319 | 320 | _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap) |
|
320 | 321 | |
|
321 | 322 | |
|
322 | 323 | def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap): |
|
323 | 324 | mctx = context.memctx( |
|
324 | 325 | rdst, |
|
325 | 326 | parents, |
|
326 | 327 | ctx.description(), |
|
327 | 328 | dstfiles, |
|
328 | 329 | getfilectx, |
|
329 | 330 | ctx.user(), |
|
330 | 331 | ctx.date(), |
|
331 | 332 | ctx.extra(), |
|
332 | 333 | ) |
|
333 | 334 | ret = rdst.commitctx(mctx) |
|
334 | 335 | lfutil.copyalltostore(rdst, ret) |
|
335 | 336 | rdst.setparents(ret) |
|
336 | 337 | revmap[ctx.node()] = rdst.changelog.tip() |
|
337 | 338 | |
|
338 | 339 | |
|
339 | 340 | # Generate list of changed files |
|
340 | 341 | def _getchangedfiles(ctx, parents): |
|
341 | 342 | files = set(ctx.files()) |
|
342 | 343 | if ctx.repo().nullid not in parents: |
|
343 | 344 | mc = ctx.manifest() |
|
344 | 345 | for pctx in ctx.parents(): |
|
345 | 346 | for fn in pctx.manifest().diff(mc): |
|
346 | 347 | files.add(fn) |
|
347 | 348 | return files |
|
348 | 349 | |
|
349 | 350 | |
|
350 | 351 | # Convert src parents to dst parents |
|
351 | 352 | def _convertparents(ctx, revmap): |
|
352 | 353 | parents = [] |
|
353 | 354 | for p in ctx.parents(): |
|
354 | 355 | parents.append(revmap[p.node()]) |
|
355 | 356 | while len(parents) < 2: |
|
356 | 357 | parents.append(ctx.repo().nullid) |
|
357 | 358 | return parents |
|
358 | 359 | |
|
359 | 360 | |
|
360 | 361 | # Get memfilectx for a normal file |
|
361 | 362 | def _getnormalcontext(repo, ctx, f, revmap): |
|
362 | 363 | try: |
|
363 | 364 | fctx = ctx.filectx(f) |
|
364 | 365 | except error.LookupError: |
|
365 | 366 | return None |
|
366 | 367 | renamed = fctx.copysource() |
|
367 | 368 | |
|
368 | 369 | data = fctx.data() |
|
369 | 370 | if f == b'.hgtags': |
|
370 | 371 | data = _converttags(repo.ui, revmap, data) |
|
371 | 372 | return context.memfilectx( |
|
372 | 373 | repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed |
|
373 | 374 | ) |
|
374 | 375 | |
|
375 | 376 | |
|
376 | 377 | # Remap tag data using a revision map |
|
377 | 378 | def _converttags(ui, revmap, data): |
|
378 | 379 | newdata = [] |
|
379 | 380 | for line in data.splitlines(): |
|
380 | 381 | try: |
|
381 | 382 | id, name = line.split(b' ', 1) |
|
382 | 383 | except ValueError: |
|
383 | 384 | ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line) |
|
384 | 385 | continue |
|
385 | 386 | try: |
|
386 | 387 | newid = bin(id) |
|
387 | 388 | except TypeError: |
|
388 | 389 | ui.warn(_(b'skipping incorrectly formatted id %s\n') % id) |
|
389 | 390 | continue |
|
390 | 391 | try: |
|
391 | 392 | newdata.append(b'%s %s\n' % (hex(revmap[newid]), name)) |
|
392 | 393 | except KeyError: |
|
393 | 394 | ui.warn(_(b'no mapping for id %s\n') % id) |
|
394 | 395 | continue |
|
395 | 396 | return b''.join(newdata) |
|
396 | 397 | |
|
397 | 398 | |
|
398 | 399 | def _islfile(file, ctx, matcher, size): |
|
399 | 400 | """Return true if file should be considered a largefile, i.e. |
|
400 | 401 | matcher matches it or it is larger than size.""" |
|
401 | 402 | # never store special .hg* files as largefiles |
|
402 | 403 | if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs': |
|
403 | 404 | return False |
|
404 | 405 | if matcher and matcher(file): |
|
405 | 406 | return True |
|
406 | 407 | try: |
|
407 | 408 | return ctx.filectx(file).size() >= size * 1024 * 1024 |
|
408 | 409 | except error.LookupError: |
|
409 | 410 | return False |
|
410 | 411 | |
|
411 | 412 | |
|
412 | 413 | def uploadlfiles(ui, rsrc, rdst, files): |
|
413 | 414 | '''upload largefiles to the central store''' |
|
414 | 415 | |
|
415 | 416 | if not files: |
|
416 | 417 | return |
|
417 | 418 | |
|
418 | 419 | store = storefactory.openstore(rsrc, rdst, put=True) |
|
419 | 420 | |
|
420 | 421 | at = 0 |
|
421 | 422 | ui.debug(b"sending statlfile command for %d largefiles\n" % len(files)) |
|
422 | 423 | retval = store.exists(files) |
|
423 | 424 | files = [h for h in files if not retval[h]] |
|
424 | 425 | ui.debug(b"%d largefiles need to be uploaded\n" % len(files)) |
|
425 | 426 | |
|
426 | 427 | with ui.makeprogress( |
|
427 | 428 | _(b'uploading largefiles'), unit=_(b'files'), total=len(files) |
|
428 | 429 | ) as progress: |
|
429 | 430 | for hash in files: |
|
430 | 431 | progress.update(at) |
|
431 | 432 | source = lfutil.findfile(rsrc, hash) |
|
432 | 433 | if not source: |
|
433 | 434 | raise error.Abort( |
|
434 | 435 | _( |
|
435 | 436 | b'largefile %s missing from store' |
|
436 | 437 | b' (needs to be uploaded)' |
|
437 | 438 | ) |
|
438 | 439 | % hash |
|
439 | 440 | ) |
|
440 | 441 | # XXX check for errors here |
|
441 | 442 | store.put(source, hash) |
|
442 | 443 | at += 1 |
|
443 | 444 | |
|
444 | 445 | |
|
445 | 446 | def verifylfiles(ui, repo, all=False, contents=False): |
|
446 | 447 | """Verify that every largefile revision in the current changeset |
|
447 | 448 | exists in the central store. With --contents, also verify that |
|
448 | 449 | the contents of each local largefile file revision are correct (SHA-1 hash |
|
449 | 450 | matches the revision ID). With --all, check every changeset in |
|
450 | 451 | this repository.""" |
|
451 | 452 | if all: |
|
452 | 453 | revs = repo.revs(b'all()') |
|
453 | 454 | else: |
|
454 | 455 | revs = [b'.'] |
|
455 | 456 | |
|
456 | 457 | store = storefactory.openstore(repo) |
|
457 | 458 | return store.verify(revs, contents=contents) |
|
458 | 459 | |
|
459 | 460 | |
|
460 | 461 | def cachelfiles(ui, repo, node, filelist=None): |
|
461 | 462 | """cachelfiles ensures that all largefiles needed by the specified revision |
|
462 | 463 | are present in the repository's largefile cache. |
|
463 | 464 | |
|
464 | 465 | returns a tuple (cached, missing). cached is the list of files downloaded |
|
465 | 466 | by this operation; missing is the list of files that were needed but could |
|
466 | 467 | not be found.""" |
|
467 | 468 | lfiles = lfutil.listlfiles(repo, node) |
|
468 | 469 | if filelist: |
|
469 | 470 | lfiles = set(lfiles) & set(filelist) |
|
470 | 471 | toget = [] |
|
471 | 472 | |
|
472 | 473 | ctx = repo[node] |
|
473 | 474 | for lfile in lfiles: |
|
474 | 475 | try: |
|
475 | 476 | expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)]) |
|
476 | 477 | except IOError as err: |
|
477 | 478 | if err.errno == errno.ENOENT: |
|
478 | 479 | continue # node must be None and standin wasn't found in wctx |
|
479 | 480 | raise |
|
480 | 481 | if not lfutil.findfile(repo, expectedhash): |
|
481 | 482 | toget.append((lfile, expectedhash)) |
|
482 | 483 | |
|
483 | 484 | if toget: |
|
484 | 485 | store = storefactory.openstore(repo) |
|
485 | 486 | ret = store.get(toget) |
|
486 | 487 | return ret |
|
487 | 488 | |
|
488 | 489 | return ([], []) |
|
489 | 490 | |
|
490 | 491 | |
|
491 | 492 | def downloadlfiles(ui, repo): |
|
492 | 493 | tonode = repo.changelog.node |
|
493 | 494 | totalsuccess = 0 |
|
494 | 495 | totalmissing = 0 |
|
495 | 496 | for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname): |
|
496 | 497 | success, missing = cachelfiles(ui, repo, tonode(rev)) |
|
497 | 498 | totalsuccess += len(success) |
|
498 | 499 | totalmissing += len(missing) |
|
499 | 500 | ui.status(_(b"%d additional largefiles cached\n") % totalsuccess) |
|
500 | 501 | if totalmissing > 0: |
|
501 | 502 | ui.status(_(b"%d largefiles failed to download\n") % totalmissing) |
|
502 | 503 | return totalsuccess, totalmissing |
|
503 | 504 | |
|
504 | 505 | |
|
505 | 506 | def updatelfiles( |
|
506 | 507 | ui, repo, filelist=None, printmessage=None, normallookup=False |
|
507 | 508 | ): |
|
508 | 509 | """Update largefiles according to standins in the working directory |
|
509 | 510 | |
|
510 | 511 | If ``printmessage`` is other than ``None``, it means "print (or |
|
511 | 512 | ignore, for false) message forcibly". |
|
512 | 513 | """ |
|
513 | 514 | statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) |
|
514 | 515 | with repo.wlock(): |
|
515 | 516 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
516 | 517 | lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) |
|
517 | 518 | |
|
518 | 519 | if filelist is not None: |
|
519 | 520 | filelist = set(filelist) |
|
520 | 521 | lfiles = [f for f in lfiles if f in filelist] |
|
521 | 522 | |
|
522 | 523 | with lfdirstate.parentchange(): |
|
523 | 524 | update = {} |
|
524 | 525 | dropped = set() |
|
525 | 526 | updated, removed = 0, 0 |
|
526 | 527 | wvfs = repo.wvfs |
|
527 | 528 | wctx = repo[None] |
|
528 | 529 | for lfile in lfiles: |
|
529 | 530 | lfileorig = os.path.relpath( |
|
530 | 531 | scmutil.backuppath(ui, repo, lfile), start=repo.root |
|
531 | 532 | ) |
|
532 | 533 | standin = lfutil.standin(lfile) |
|
533 | 534 | standinorig = os.path.relpath( |
|
534 | 535 | scmutil.backuppath(ui, repo, standin), start=repo.root |
|
535 | 536 | ) |
|
536 | 537 | if wvfs.exists(standin): |
|
537 | 538 | if wvfs.exists(standinorig) and wvfs.exists(lfile): |
|
538 | 539 | shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig)) |
|
539 | 540 | wvfs.unlinkpath(standinorig) |
|
540 | 541 | expecthash = lfutil.readasstandin(wctx[standin]) |
|
541 | 542 | if expecthash != b'': |
|
542 | 543 | if lfile not in wctx: # not switched to normal file |
|
543 |
if repo.dirstate |
|
|
544 | if repo.dirstate.get_entry(standin).any_tracked: | |
|
544 | 545 | wvfs.unlinkpath(lfile, ignoremissing=True) |
|
545 | 546 | else: |
|
546 | 547 | dropped.add(lfile) |
|
547 | 548 | |
|
548 | 549 | # use normallookup() to allocate an entry in largefiles |
|
549 | 550 | # dirstate to prevent lfilesrepo.status() from reporting |
|
550 | 551 | # missing files as removed. |
|
551 | 552 | lfdirstate.update_file( |
|
552 | 553 | lfile, |
|
553 | 554 | p1_tracked=True, |
|
554 | 555 | wc_tracked=True, |
|
555 | 556 | possibly_dirty=True, |
|
556 | 557 | ) |
|
557 | 558 | update[lfile] = expecthash |
|
558 | 559 | else: |
|
559 | 560 | # Remove lfiles for which the standin is deleted, unless the |
|
560 | 561 | # lfile is added to the repository again. This happens when a |
|
561 | 562 | # largefile is converted back to a normal file: the standin |
|
562 | 563 | # disappears, but a new (normal) file appears as the lfile. |
|
563 | 564 | if ( |
|
564 | 565 | wvfs.exists(lfile) |
|
565 | 566 | and repo.dirstate.normalize(lfile) not in wctx |
|
566 | 567 | ): |
|
567 | 568 | wvfs.unlinkpath(lfile) |
|
568 | 569 | removed += 1 |
|
569 | 570 | |
|
570 | 571 | # largefile processing might be slow and be interrupted - be prepared |
|
571 | lfdirstate.write() | |
|
572 | lfdirstate.write(repo.currenttransaction()) | |
|
572 | 573 | |
|
573 | 574 | if lfiles: |
|
574 | 575 | lfiles = [f for f in lfiles if f not in dropped] |
|
575 | 576 | |
|
576 | 577 | for f in dropped: |
|
577 | 578 | repo.wvfs.unlinkpath(lfutil.standin(f)) |
|
578 | 579 | # This needs to happen for dropped files, otherwise they stay in |
|
579 | 580 | # the M state. |
|
580 |
lfdirstate._ |
|
|
581 | lfdirstate._map.reset_state(f) | |
|
581 | 582 | |
|
582 | 583 | statuswriter(_(b'getting changed largefiles\n')) |
|
583 | 584 | cachelfiles(ui, repo, None, lfiles) |
|
584 | 585 | |
|
585 | 586 | with lfdirstate.parentchange(): |
|
586 | 587 | for lfile in lfiles: |
|
587 | 588 | update1 = 0 |
|
588 | 589 | |
|
589 | 590 | expecthash = update.get(lfile) |
|
590 | 591 | if expecthash: |
|
591 | 592 | if not lfutil.copyfromcache(repo, expecthash, lfile): |
|
592 | 593 | # failed ... but already removed and set to normallookup |
|
593 | 594 | continue |
|
594 | 595 | # Synchronize largefile dirstate to the last modified |
|
595 | 596 | # time of the file |
|
596 | 597 | lfdirstate.update_file( |
|
597 | 598 | lfile, p1_tracked=True, wc_tracked=True |
|
598 | 599 | ) |
|
599 | 600 | update1 = 1 |
|
600 | 601 | |
|
601 | 602 | # copy the exec mode of largefile standin from the repository's |
|
602 | 603 | # dirstate to its state in the lfdirstate. |
|
603 | 604 | standin = lfutil.standin(lfile) |
|
604 | 605 | if wvfs.exists(standin): |
|
605 | 606 | # exec is decided by the users permissions using mask 0o100 |
|
606 | 607 | standinexec = wvfs.stat(standin).st_mode & 0o100 |
|
607 | 608 | st = wvfs.stat(lfile) |
|
608 | 609 | mode = st.st_mode |
|
609 | 610 | if standinexec != mode & 0o100: |
|
610 | 611 | # first remove all X bits, then shift all R bits to X |
|
611 | 612 | mode &= ~0o111 |
|
612 | 613 | if standinexec: |
|
613 | 614 | mode |= (mode >> 2) & 0o111 & ~util.umask |
|
614 | 615 | wvfs.chmod(lfile, mode) |
|
615 | 616 | update1 = 1 |
|
616 | 617 | |
|
617 | 618 | updated += update1 |
|
618 | 619 | |
|
619 | 620 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) |
|
620 | 621 | |
|
621 | lfdirstate.write() | |
|
622 | lfdirstate.write(repo.currenttransaction()) | |
|
622 | 623 | if lfiles: |
|
623 | 624 | statuswriter( |
|
624 | 625 | _(b'%d largefiles updated, %d removed\n') % (updated, removed) |
|
625 | 626 | ) |
|
626 | 627 | |
|
627 | 628 | |
|
628 | 629 | @eh.command( |
|
629 | 630 | b'lfpull', |
|
630 | 631 | [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))] |
|
631 | 632 | + cmdutil.remoteopts, |
|
632 | 633 | _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'), |
|
633 | 634 | ) |
|
634 | 635 | def lfpull(ui, repo, source=b"default", **opts): |
|
635 | 636 | """pull largefiles for the specified revisions from the specified source |
|
636 | 637 | |
|
637 | 638 | Pull largefiles that are referenced from local changesets but missing |
|
638 | 639 | locally, pulling from a remote repository to the local cache. |
|
639 | 640 | |
|
640 | 641 | If SOURCE is omitted, the 'default' path will be used. |
|
641 | 642 | See :hg:`help urls` for more information. |
|
642 | 643 | |
|
643 | 644 | .. container:: verbose |
|
644 | 645 | |
|
645 | 646 | Some examples: |
|
646 | 647 | |
|
647 | 648 | - pull largefiles for all branch heads:: |
|
648 | 649 | |
|
649 | 650 | hg lfpull -r "head() and not closed()" |
|
650 | 651 | |
|
651 | 652 | - pull largefiles on the default branch:: |
|
652 | 653 | |
|
653 | 654 | hg lfpull -r "branch(default)" |
|
654 | 655 | """ |
|
655 | 656 | repo.lfpullsource = source |
|
656 | 657 | |
|
657 | 658 | revs = opts.get('rev', []) |
|
658 | 659 | if not revs: |
|
659 | 660 | raise error.Abort(_(b'no revisions specified')) |
|
660 |
revs = |
|
|
661 | revs = logcmdutil.revrange(repo, revs) | |
|
661 | 662 | |
|
662 | 663 | numcached = 0 |
|
663 | 664 | for rev in revs: |
|
664 | 665 | ui.note(_(b'pulling largefiles for revision %d\n') % rev) |
|
665 | 666 | (cached, missing) = cachelfiles(ui, repo, rev) |
|
666 | 667 | numcached += len(cached) |
|
667 | 668 | ui.status(_(b"%d largefiles cached\n") % numcached) |
|
668 | 669 | |
|
669 | 670 | |
|
670 | 671 | @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE')) |
|
671 | 672 | def debuglfput(ui, repo, filepath, **kwargs): |
|
672 | 673 | hash = lfutil.hashfile(filepath) |
|
673 | 674 | storefactory.openstore(repo).put(filepath, hash) |
|
674 | 675 | ui.write(b'%s\n' % hash) |
|
675 | 676 | return 0 |
@@ -1,798 +1,790 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''largefiles utility code: must not import other modules in this package.''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import contextlib |
|
13 | 13 | import copy |
|
14 | 14 | import os |
|
15 | 15 | import stat |
|
16 | 16 | |
|
17 | 17 | from mercurial.i18n import _ |
|
18 | 18 | from mercurial.node import hex |
|
19 | 19 | from mercurial.pycompat import open |
|
20 | 20 | |
|
21 | 21 | from mercurial import ( |
|
22 | 22 | dirstate, |
|
23 | 23 | encoding, |
|
24 | 24 | error, |
|
25 | 25 | httpconnection, |
|
26 | 26 | match as matchmod, |
|
27 | 27 | pycompat, |
|
28 | 28 | requirements, |
|
29 | 29 | scmutil, |
|
30 | 30 | sparse, |
|
31 | 31 | util, |
|
32 | 32 | vfs as vfsmod, |
|
33 | 33 | ) |
|
34 | 34 | from mercurial.utils import hashutil |
|
35 | 35 | |
|
36 | 36 | shortname = b'.hglf' |
|
37 | 37 | shortnameslash = shortname + b'/' |
|
38 | 38 | longname = b'largefiles' |
|
39 | 39 | |
|
40 | 40 | # -- Private worker functions ------------------------------------------ |
|
41 | 41 | |
|
42 | 42 | |
|
43 | 43 | @contextlib.contextmanager |
|
44 | 44 | def lfstatus(repo, value=True): |
|
45 | 45 | oldvalue = getattr(repo, 'lfstatus', False) |
|
46 | 46 | repo.lfstatus = value |
|
47 | 47 | try: |
|
48 | 48 | yield |
|
49 | 49 | finally: |
|
50 | 50 | repo.lfstatus = oldvalue |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | def getminsize(ui, assumelfiles, opt, default=10): |
|
54 | 54 | lfsize = opt |
|
55 | 55 | if not lfsize and assumelfiles: |
|
56 | 56 | lfsize = ui.config(longname, b'minsize', default=default) |
|
57 | 57 | if lfsize: |
|
58 | 58 | try: |
|
59 | 59 | lfsize = float(lfsize) |
|
60 | 60 | except ValueError: |
|
61 | 61 | raise error.Abort( |
|
62 | 62 | _(b'largefiles: size must be number (not %s)\n') % lfsize |
|
63 | 63 | ) |
|
64 | 64 | if lfsize is None: |
|
65 | 65 | raise error.Abort(_(b'minimum size for largefiles must be specified')) |
|
66 | 66 | return lfsize |
|
67 | 67 | |
|
68 | 68 | |
|
69 | 69 | def link(src, dest): |
|
70 | 70 | """Try to create hardlink - if that fails, efficiently make a copy.""" |
|
71 | 71 | util.makedirs(os.path.dirname(dest)) |
|
72 | 72 | try: |
|
73 | 73 | util.oslink(src, dest) |
|
74 | 74 | except OSError: |
|
75 | 75 | # if hardlinks fail, fallback on atomic copy |
|
76 | 76 | with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: |
|
77 | 77 | for chunk in util.filechunkiter(srcf): |
|
78 | 78 | dstf.write(chunk) |
|
79 | 79 | os.chmod(dest, os.stat(src).st_mode) |
|
80 | 80 | |
|
81 | 81 | |
|
82 | 82 | def usercachepath(ui, hash): |
|
83 | 83 | """Return the correct location in the "global" largefiles cache for a file |
|
84 | 84 | with the given hash. |
|
85 | 85 | This cache is used for sharing of largefiles across repositories - both |
|
86 | 86 | to preserve download bandwidth and storage space.""" |
|
87 | 87 | return os.path.join(_usercachedir(ui), hash) |
|
88 | 88 | |
|
89 | 89 | |
|
90 | 90 | def _usercachedir(ui, name=longname): |
|
91 | 91 | '''Return the location of the "global" largefiles cache.''' |
|
92 | 92 | path = ui.configpath(name, b'usercache') |
|
93 | 93 | if path: |
|
94 | 94 | return path |
|
95 | 95 | |
|
96 | 96 | hint = None |
|
97 | 97 | |
|
98 | 98 | if pycompat.iswindows: |
|
99 | 99 | appdata = encoding.environ.get( |
|
100 | 100 | b'LOCALAPPDATA', encoding.environ.get(b'APPDATA') |
|
101 | 101 | ) |
|
102 | 102 | if appdata: |
|
103 | 103 | return os.path.join(appdata, name) |
|
104 | 104 | |
|
105 | 105 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( |
|
106 | 106 | b"LOCALAPPDATA", |
|
107 | 107 | b"APPDATA", |
|
108 | 108 | name, |
|
109 | 109 | ) |
|
110 | 110 | elif pycompat.isdarwin: |
|
111 | 111 | home = encoding.environ.get(b'HOME') |
|
112 | 112 | if home: |
|
113 | 113 | return os.path.join(home, b'Library', b'Caches', name) |
|
114 | 114 | |
|
115 | 115 | hint = _(b"define %s in the environment, or set %s.usercache") % ( |
|
116 | 116 | b"HOME", |
|
117 | 117 | name, |
|
118 | 118 | ) |
|
119 | 119 | elif pycompat.isposix: |
|
120 | 120 | path = encoding.environ.get(b'XDG_CACHE_HOME') |
|
121 | 121 | if path: |
|
122 | 122 | return os.path.join(path, name) |
|
123 | 123 | home = encoding.environ.get(b'HOME') |
|
124 | 124 | if home: |
|
125 | 125 | return os.path.join(home, b'.cache', name) |
|
126 | 126 | |
|
127 | 127 | hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( |
|
128 | 128 | b"XDG_CACHE_HOME", |
|
129 | 129 | b"HOME", |
|
130 | 130 | name, |
|
131 | 131 | ) |
|
132 | 132 | else: |
|
133 | 133 | raise error.Abort( |
|
134 | 134 | _(b'unknown operating system: %s\n') % pycompat.osname |
|
135 | 135 | ) |
|
136 | 136 | |
|
137 | 137 | raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint) |
|
138 | 138 | |
|
139 | 139 | |
|
140 | 140 | def inusercache(ui, hash): |
|
141 | 141 | path = usercachepath(ui, hash) |
|
142 | 142 | return os.path.exists(path) |
|
143 | 143 | |
|
144 | 144 | |
|
145 | 145 | def findfile(repo, hash): |
|
146 | 146 | """Return store path of the largefile with the specified hash. |
|
147 | 147 | As a side effect, the file might be linked from user cache. |
|
148 | 148 | Return None if the file can't be found locally.""" |
|
149 | 149 | path, exists = findstorepath(repo, hash) |
|
150 | 150 | if exists: |
|
151 | 151 | repo.ui.note(_(b'found %s in store\n') % hash) |
|
152 | 152 | return path |
|
153 | 153 | elif inusercache(repo.ui, hash): |
|
154 | 154 | repo.ui.note(_(b'found %s in system cache\n') % hash) |
|
155 | 155 | path = storepath(repo, hash) |
|
156 | 156 | link(usercachepath(repo.ui, hash), path) |
|
157 | 157 | return path |
|
158 | 158 | return None |
|
159 | 159 | |
|
160 | 160 | |
|
161 | 161 | class largefilesdirstate(dirstate.dirstate): |
|
162 | 162 | def __getitem__(self, key): |
|
163 | 163 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
|
164 | 164 | |
|
165 | 165 | def set_tracked(self, f): |
|
166 | 166 | return super(largefilesdirstate, self).set_tracked(unixpath(f)) |
|
167 | 167 | |
|
168 | 168 | def set_untracked(self, f): |
|
169 | 169 | return super(largefilesdirstate, self).set_untracked(unixpath(f)) |
|
170 | 170 | |
|
171 | 171 | def normal(self, f, parentfiledata=None): |
|
172 | 172 | # not sure if we should pass the `parentfiledata` down or throw it |
|
173 | 173 | # away. So throwing it away to stay on the safe side. |
|
174 | 174 | return super(largefilesdirstate, self).normal(unixpath(f)) |
|
175 | 175 | |
|
176 | 176 | def remove(self, f): |
|
177 | 177 | return super(largefilesdirstate, self).remove(unixpath(f)) |
|
178 | 178 | |
|
179 | 179 | def add(self, f): |
|
180 | 180 | return super(largefilesdirstate, self).add(unixpath(f)) |
|
181 | 181 | |
|
182 | 182 | def drop(self, f): |
|
183 | 183 | return super(largefilesdirstate, self).drop(unixpath(f)) |
|
184 | 184 | |
|
185 | 185 | def forget(self, f): |
|
186 | 186 | return super(largefilesdirstate, self).forget(unixpath(f)) |
|
187 | 187 | |
|
188 | 188 | def normallookup(self, f): |
|
189 | 189 | return super(largefilesdirstate, self).normallookup(unixpath(f)) |
|
190 | 190 | |
|
191 | 191 | def _ignore(self, f): |
|
192 | 192 | return False |
|
193 | 193 | |
|
194 |
def write(self, tr |
|
|
194 | def write(self, tr): | |
|
195 | 195 | # (1) disable PENDING mode always |
|
196 | 196 | # (lfdirstate isn't yet managed as a part of the transaction) |
|
197 | 197 | # (2) avoid develwarn 'use dirstate.write with ....' |
|
198 | if tr: | |
|
199 | tr.addbackup(b'largefiles/dirstate', location=b'plain') | |
|
198 | 200 | super(largefilesdirstate, self).write(None) |
|
199 | 201 | |
|
200 | 202 | |
|
201 | 203 | def openlfdirstate(ui, repo, create=True): |
|
202 | 204 | """ |
|
203 | 205 | Return a dirstate object that tracks largefiles: i.e. its root is |
|
204 | 206 | the repo root, but it is saved in .hg/largefiles/dirstate. |
|
205 | 207 | """ |
|
206 | 208 | vfs = repo.vfs |
|
207 | 209 | lfstoredir = longname |
|
208 | 210 | opener = vfsmod.vfs(vfs.join(lfstoredir)) |
|
209 | 211 | use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements |
|
210 | 212 | lfdirstate = largefilesdirstate( |
|
211 | 213 | opener, |
|
212 | 214 | ui, |
|
213 | 215 | repo.root, |
|
214 | 216 | repo.dirstate._validate, |
|
215 | 217 | lambda: sparse.matcher(repo), |
|
216 | 218 | repo.nodeconstants, |
|
217 | 219 | use_dirstate_v2, |
|
218 | 220 | ) |
|
219 | 221 | |
|
220 | 222 | # If the largefiles dirstate does not exist, populate and create |
|
221 | 223 | # it. This ensures that we create it on the first meaningful |
|
222 | 224 | # largefiles operation in a new clone. |
|
223 | 225 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): |
|
224 | 226 | matcher = getstandinmatcher(repo) |
|
225 | 227 | standins = repo.dirstate.walk( |
|
226 | 228 | matcher, subrepos=[], unknown=False, ignored=False |
|
227 | 229 | ) |
|
228 | 230 | |
|
229 | 231 | if len(standins) > 0: |
|
230 | 232 | vfs.makedirs(lfstoredir) |
|
231 | 233 | |
|
232 | 234 | with lfdirstate.parentchange(): |
|
233 | 235 | for standin in standins: |
|
234 | 236 | lfile = splitstandin(standin) |
|
235 | 237 | lfdirstate.update_file( |
|
236 | 238 | lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True |
|
237 | 239 | ) |
|
238 | 240 | return lfdirstate |
|
239 | 241 | |
|
240 | 242 | |
|
241 | 243 | def lfdirstatestatus(lfdirstate, repo): |
|
242 | 244 | pctx = repo[b'.'] |
|
243 | 245 | match = matchmod.always() |
|
244 | 246 | unsure, s = lfdirstate.status( |
|
245 | 247 | match, subrepos=[], ignored=False, clean=False, unknown=False |
|
246 | 248 | ) |
|
247 | 249 | modified, clean = s.modified, s.clean |
|
248 | 250 | for lfile in unsure: |
|
249 | 251 | try: |
|
250 | 252 | fctx = pctx[standin(lfile)] |
|
251 | 253 | except LookupError: |
|
252 | 254 | fctx = None |
|
253 | 255 | if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)): |
|
254 | 256 | modified.append(lfile) |
|
255 | 257 | else: |
|
256 | 258 | clean.append(lfile) |
|
257 | 259 | lfdirstate.set_clean(lfile) |
|
258 | 260 | return s |
|
259 | 261 | |
|
260 | 262 | |
|
261 | 263 | def listlfiles(repo, rev=None, matcher=None): |
|
262 | 264 | """return a list of largefiles in the working copy or the |
|
263 | 265 | specified changeset""" |
|
264 | 266 | |
|
265 | 267 | if matcher is None: |
|
266 | 268 | matcher = getstandinmatcher(repo) |
|
267 | 269 | |
|
268 | 270 | # ignore unknown files in working directory |
|
269 | 271 | return [ |
|
270 | 272 | splitstandin(f) |
|
271 | 273 | for f in repo[rev].walk(matcher) |
|
272 |
if rev is not None or repo.dirstate |
|
|
274 | if rev is not None or repo.dirstate.get_entry(f).any_tracked | |
|
273 | 275 | ] |
|
274 | 276 | |
|
275 | 277 | |
|
276 | 278 | def instore(repo, hash, forcelocal=False): |
|
277 | 279 | '''Return true if a largefile with the given hash exists in the store''' |
|
278 | 280 | return os.path.exists(storepath(repo, hash, forcelocal)) |
|
279 | 281 | |
|
280 | 282 | |
|
281 | 283 | def storepath(repo, hash, forcelocal=False): |
|
282 | 284 | """Return the correct location in the repository largefiles store for a |
|
283 | 285 | file with the given hash.""" |
|
284 | 286 | if not forcelocal and repo.shared(): |
|
285 | 287 | return repo.vfs.reljoin(repo.sharedpath, longname, hash) |
|
286 | 288 | return repo.vfs.join(longname, hash) |
|
287 | 289 | |
|
288 | 290 | |
|
289 | 291 | def findstorepath(repo, hash): |
|
290 | 292 | """Search through the local store path(s) to find the file for the given |
|
291 | 293 | hash. If the file is not found, its path in the primary store is returned. |
|
292 | 294 | The return value is a tuple of (path, exists(path)). |
|
293 | 295 | """ |
|
294 | 296 | # For shared repos, the primary store is in the share source. But for |
|
295 | 297 | # backward compatibility, force a lookup in the local store if it wasn't |
|
296 | 298 | # found in the share source. |
|
297 | 299 | path = storepath(repo, hash, False) |
|
298 | 300 | |
|
299 | 301 | if instore(repo, hash): |
|
300 | 302 | return (path, True) |
|
301 | 303 | elif repo.shared() and instore(repo, hash, True): |
|
302 | 304 | return storepath(repo, hash, True), True |
|
303 | 305 | |
|
304 | 306 | return (path, False) |
|
305 | 307 | |
|
306 | 308 | |
|
307 | 309 | def copyfromcache(repo, hash, filename): |
|
308 | 310 | """Copy the specified largefile from the repo or system cache to |
|
309 | 311 | filename in the repository. Return true on success or false if the |
|
310 | 312 | file was not found in either cache (which should not happened: |
|
311 | 313 | this is meant to be called only after ensuring that the needed |
|
312 | 314 | largefile exists in the cache).""" |
|
313 | 315 | wvfs = repo.wvfs |
|
314 | 316 | path = findfile(repo, hash) |
|
315 | 317 | if path is None: |
|
316 | 318 | return False |
|
317 | 319 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) |
|
318 | 320 | # The write may fail before the file is fully written, but we |
|
319 | 321 | # don't use atomic writes in the working copy. |
|
320 | 322 | with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: |
|
321 | 323 | gothash = copyandhash(util.filechunkiter(srcfd), destfd) |
|
322 | 324 | if gothash != hash: |
|
323 | 325 | repo.ui.warn( |
|
324 | 326 | _(b'%s: data corruption in %s with hash %s\n') |
|
325 | 327 | % (filename, path, gothash) |
|
326 | 328 | ) |
|
327 | 329 | wvfs.unlink(filename) |
|
328 | 330 | return False |
|
329 | 331 | return True |
|
330 | 332 | |
|
331 | 333 | |
|
332 | 334 | def copytostore(repo, ctx, file, fstandin): |
|
333 | 335 | wvfs = repo.wvfs |
|
334 | 336 | hash = readasstandin(ctx[fstandin]) |
|
335 | 337 | if instore(repo, hash): |
|
336 | 338 | return |
|
337 | 339 | if wvfs.exists(file): |
|
338 | 340 | copytostoreabsolute(repo, wvfs.join(file), hash) |
|
339 | 341 | else: |
|
340 | 342 | repo.ui.warn( |
|
341 | 343 | _(b"%s: largefile %s not available from local store\n") |
|
342 | 344 | % (file, hash) |
|
343 | 345 | ) |
|
344 | 346 | |
|
345 | 347 | |
|
346 | 348 | def copyalltostore(repo, node): |
|
347 | 349 | '''Copy all largefiles in a given revision to the store''' |
|
348 | 350 | |
|
349 | 351 | ctx = repo[node] |
|
350 | 352 | for filename in ctx.files(): |
|
351 | 353 | realfile = splitstandin(filename) |
|
352 | 354 | if realfile is not None and filename in ctx.manifest(): |
|
353 | 355 | copytostore(repo, ctx, realfile, filename) |
|
354 | 356 | |
|
355 | 357 | |
|
356 | 358 | def copytostoreabsolute(repo, file, hash): |
|
357 | 359 | if inusercache(repo.ui, hash): |
|
358 | 360 | link(usercachepath(repo.ui, hash), storepath(repo, hash)) |
|
359 | 361 | else: |
|
360 | 362 | util.makedirs(os.path.dirname(storepath(repo, hash))) |
|
361 | 363 | with open(file, b'rb') as srcf: |
|
362 | 364 | with util.atomictempfile( |
|
363 | 365 | storepath(repo, hash), createmode=repo.store.createmode |
|
364 | 366 | ) as dstf: |
|
365 | 367 | for chunk in util.filechunkiter(srcf): |
|
366 | 368 | dstf.write(chunk) |
|
367 | 369 | linktousercache(repo, hash) |
|
368 | 370 | |
|
369 | 371 | |
|
370 | 372 | def linktousercache(repo, hash): |
|
371 | 373 | """Link / copy the largefile with the specified hash from the store |
|
372 | 374 | to the cache.""" |
|
373 | 375 | path = usercachepath(repo.ui, hash) |
|
374 | 376 | link(storepath(repo, hash), path) |
|
375 | 377 | |
|
376 | 378 | |
|
377 | 379 | def getstandinmatcher(repo, rmatcher=None): |
|
378 | 380 | '''Return a match object that applies rmatcher to the standin directory''' |
|
379 | 381 | wvfs = repo.wvfs |
|
380 | 382 | standindir = shortname |
|
381 | 383 | |
|
382 | 384 | # no warnings about missing files or directories |
|
383 | 385 | badfn = lambda f, msg: None |
|
384 | 386 | |
|
385 | 387 | if rmatcher and not rmatcher.always(): |
|
386 | 388 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] |
|
387 | 389 | if not pats: |
|
388 | 390 | pats = [wvfs.join(standindir)] |
|
389 | 391 | match = scmutil.match(repo[None], pats, badfn=badfn) |
|
390 | 392 | else: |
|
391 | 393 | # no patterns: relative to repo root |
|
392 | 394 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) |
|
393 | 395 | return match |
|
394 | 396 | |
|
395 | 397 | |
|
396 | 398 | def composestandinmatcher(repo, rmatcher): |
|
397 | 399 | """Return a matcher that accepts standins corresponding to the |
|
398 | 400 | files accepted by rmatcher. Pass the list of files in the matcher |
|
399 | 401 | as the paths specified by the user.""" |
|
400 | 402 | smatcher = getstandinmatcher(repo, rmatcher) |
|
401 | 403 | isstandin = smatcher.matchfn |
|
402 | 404 | |
|
403 | 405 | def composedmatchfn(f): |
|
404 | 406 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) |
|
405 | 407 | |
|
406 | 408 | smatcher.matchfn = composedmatchfn |
|
407 | 409 | |
|
408 | 410 | return smatcher |
|
409 | 411 | |
|
410 | 412 | |
|
411 | 413 | def standin(filename): |
|
412 | 414 | """Return the repo-relative path to the standin for the specified big |
|
413 | 415 | file.""" |
|
414 | 416 | # Notes: |
|
415 | 417 | # 1) Some callers want an absolute path, but for instance addlargefiles |
|
416 | 418 | # needs it repo-relative so it can be passed to repo[None].add(). So |
|
417 | 419 | # leave it up to the caller to use repo.wjoin() to get an absolute path. |
|
418 | 420 | # 2) Join with '/' because that's what dirstate always uses, even on |
|
419 | 421 | # Windows. Change existing separator to '/' first in case we are |
|
420 | 422 | # passed filenames from an external source (like the command line). |
|
421 | 423 | return shortnameslash + util.pconvert(filename) |
|
422 | 424 | |
|
423 | 425 | |
|
424 | 426 | def isstandin(filename): |
|
425 | 427 | """Return true if filename is a big file standin. filename must be |
|
426 | 428 | in Mercurial's internal form (slash-separated).""" |
|
427 | 429 | return filename.startswith(shortnameslash) |
|
428 | 430 | |
|
429 | 431 | |
|
430 | 432 | def splitstandin(filename): |
|
431 | 433 | # Split on / because that's what dirstate always uses, even on Windows. |
|
432 | 434 | # Change local separator to / first just in case we are passed filenames |
|
433 | 435 | # from an external source (like the command line). |
|
434 | 436 | bits = util.pconvert(filename).split(b'/', 1) |
|
435 | 437 | if len(bits) == 2 and bits[0] == shortname: |
|
436 | 438 | return bits[1] |
|
437 | 439 | else: |
|
438 | 440 | return None |
|
439 | 441 | |
|
440 | 442 | |
|
441 | 443 | def updatestandin(repo, lfile, standin): |
|
442 | 444 | """Re-calculate hash value of lfile and write it into standin |
|
443 | 445 | |
|
444 | 446 | This assumes that "lfutil.standin(lfile) == standin", for efficiency. |
|
445 | 447 | """ |
|
446 | 448 | file = repo.wjoin(lfile) |
|
447 | 449 | if repo.wvfs.exists(lfile): |
|
448 | 450 | hash = hashfile(file) |
|
449 | 451 | executable = getexecutable(file) |
|
450 | 452 | writestandin(repo, standin, hash, executable) |
|
451 | 453 | else: |
|
452 | 454 | raise error.Abort(_(b'%s: file not found!') % lfile) |
|
453 | 455 | |
|
454 | 456 | |
|
455 | 457 | def readasstandin(fctx): |
|
456 | 458 | """read hex hash from given filectx of standin file |
|
457 | 459 | |
|
458 | 460 | This encapsulates how "standin" data is stored into storage layer.""" |
|
459 | 461 | return fctx.data().strip() |
|
460 | 462 | |
|
461 | 463 | |
|
462 | 464 | def writestandin(repo, standin, hash, executable): |
|
463 | 465 | '''write hash to <repo.root>/<standin>''' |
|
464 | 466 | repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') |
|
465 | 467 | |
|
466 | 468 | |
|
467 | 469 | def copyandhash(instream, outfile): |
|
468 | 470 | """Read bytes from instream (iterable) and write them to outfile, |
|
469 | 471 | computing the SHA-1 hash of the data along the way. Return the hash.""" |
|
470 | 472 | hasher = hashutil.sha1(b'') |
|
471 | 473 | for data in instream: |
|
472 | 474 | hasher.update(data) |
|
473 | 475 | outfile.write(data) |
|
474 | 476 | return hex(hasher.digest()) |
|
475 | 477 | |
|
476 | 478 | |
|
477 | 479 | def hashfile(file): |
|
478 | 480 | if not os.path.exists(file): |
|
479 | 481 | return b'' |
|
480 | 482 | with open(file, b'rb') as fd: |
|
481 | 483 | return hexsha1(fd) |
|
482 | 484 | |
|
483 | 485 | |
|
484 | 486 | def getexecutable(filename): |
|
485 | 487 | mode = os.stat(filename).st_mode |
|
486 | 488 | return ( |
|
487 | 489 | (mode & stat.S_IXUSR) |
|
488 | 490 | and (mode & stat.S_IXGRP) |
|
489 | 491 | and (mode & stat.S_IXOTH) |
|
490 | 492 | ) |
|
491 | 493 | |
|
492 | 494 | |
|
493 | 495 | def urljoin(first, second, *arg): |
|
494 | 496 | def join(left, right): |
|
495 | 497 | if not left.endswith(b'/'): |
|
496 | 498 | left += b'/' |
|
497 | 499 | if right.startswith(b'/'): |
|
498 | 500 | right = right[1:] |
|
499 | 501 | return left + right |
|
500 | 502 | |
|
501 | 503 | url = join(first, second) |
|
502 | 504 | for a in arg: |
|
503 | 505 | url = join(url, a) |
|
504 | 506 | return url |
|
505 | 507 | |
|
506 | 508 | |
|
507 | 509 | def hexsha1(fileobj): |
|
508 | 510 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like |
|
509 | 511 | object data""" |
|
510 | 512 | h = hashutil.sha1() |
|
511 | 513 | for chunk in util.filechunkiter(fileobj): |
|
512 | 514 | h.update(chunk) |
|
513 | 515 | return hex(h.digest()) |
|
514 | 516 | |
|
515 | 517 | |
|
516 | 518 | def httpsendfile(ui, filename): |
|
517 | 519 | return httpconnection.httpsendfile(ui, filename, b'rb') |
|
518 | 520 | |
|
519 | 521 | |
|
520 | 522 | def unixpath(path): |
|
521 | 523 | '''Return a version of path normalized for use with the lfdirstate.''' |
|
522 | 524 | return util.pconvert(os.path.normpath(path)) |
|
523 | 525 | |
|
524 | 526 | |
|
525 | 527 | def islfilesrepo(repo): |
|
526 | 528 | '''Return true if the repo is a largefile repo.''' |
|
527 | 529 | if b'largefiles' in repo.requirements and any( |
|
528 | 530 | shortnameslash in f[1] for f in repo.store.datafiles() |
|
529 | 531 | ): |
|
530 | 532 | return True |
|
531 | 533 | |
|
532 | 534 | return any(openlfdirstate(repo.ui, repo, False)) |
|
533 | 535 | |
|
534 | 536 | |
|
535 | 537 | class storeprotonotcapable(Exception): |
|
536 | 538 | def __init__(self, storetypes): |
|
537 | 539 | self.storetypes = storetypes |
|
538 | 540 | |
|
539 | 541 | |
|
540 | 542 | def getstandinsstate(repo): |
|
541 | 543 | standins = [] |
|
542 | 544 | matcher = getstandinmatcher(repo) |
|
543 | 545 | wctx = repo[None] |
|
544 | 546 | for standin in repo.dirstate.walk( |
|
545 | 547 | matcher, subrepos=[], unknown=False, ignored=False |
|
546 | 548 | ): |
|
547 | 549 | lfile = splitstandin(standin) |
|
548 | 550 | try: |
|
549 | 551 | hash = readasstandin(wctx[standin]) |
|
550 | 552 | except IOError: |
|
551 | 553 | hash = None |
|
552 | 554 | standins.append((lfile, hash)) |
|
553 | 555 | return standins |
|
554 | 556 | |
|
555 | 557 | |
|
556 | 558 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
|
557 | 559 | lfstandin = standin(lfile) |
|
558 | 560 | if lfstandin not in repo.dirstate: |
|
559 | 561 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False) |
|
560 | 562 | else: |
|
561 |
|
|
|
562 | state, mtime = stat.state, stat.mtime | |
|
563 | if state == b'n': | |
|
564 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): | |
|
565 | # state 'n' doesn't ensure 'clean' in this case | |
|
566 | lfdirstate.update_file( | |
|
567 | lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True | |
|
568 |
|
|
|
569 | else: | |
|
570 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True) | |
|
571 | elif state == b'm': | |
|
572 | lfdirstate.update_file( | |
|
573 | lfile, p1_tracked=True, wc_tracked=True, merged=True | |
|
574 | ) | |
|
575 | elif state == b'r': | |
|
576 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False) | |
|
577 | elif state == b'a': | |
|
578 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True) | |
|
563 | entry = repo.dirstate.get_entry(lfstandin) | |
|
564 | lfdirstate.update_file( | |
|
565 | lfile, | |
|
566 | wc_tracked=entry.tracked, | |
|
567 | p1_tracked=entry.p1_tracked, | |
|
568 | p2_info=entry.p2_info, | |
|
569 | possibly_dirty=True, | |
|
570 | ) | |
|
579 | 571 | |
|
580 | 572 | |
|
581 | 573 | def markcommitted(orig, ctx, node): |
|
582 | 574 | repo = ctx.repo() |
|
583 | 575 | |
|
584 | 576 | lfdirstate = openlfdirstate(repo.ui, repo) |
|
585 | 577 | with lfdirstate.parentchange(): |
|
586 | 578 | orig(node) |
|
587 | 579 | |
|
588 | 580 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" |
|
589 | 581 | # because files coming from the 2nd parent are omitted in the latter. |
|
590 | 582 | # |
|
591 | 583 | # The former should be used to get targets of "synclfdirstate", |
|
592 | 584 | # because such files: |
|
593 | 585 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and |
|
594 | 586 | # - have to be marked as "n" after commit, but |
|
595 | 587 | # - aren't listed in "repo[node].files()" |
|
596 | 588 | |
|
597 | 589 | for f in ctx.files(): |
|
598 | 590 | lfile = splitstandin(f) |
|
599 | 591 | if lfile is not None: |
|
600 | 592 | synclfdirstate(repo, lfdirstate, lfile, False) |
|
601 | lfdirstate.write() | |
|
593 | lfdirstate.write(repo.currenttransaction()) | |
|
602 | 594 | |
|
603 | 595 | # As part of committing, copy all of the largefiles into the cache. |
|
604 | 596 | # |
|
605 | 597 | # Using "node" instead of "ctx" implies additional "repo[node]" |
|
606 | 598 | # lookup while copyalltostore(), but can omit redundant check for |
|
607 | 599 | # files comming from the 2nd parent, which should exist in store |
|
608 | 600 | # at merging. |
|
609 | 601 | copyalltostore(repo, node) |
|
610 | 602 | |
|
611 | 603 | |
|
612 | 604 | def getlfilestoupdate(oldstandins, newstandins): |
|
613 | 605 | changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) |
|
614 | 606 | filelist = [] |
|
615 | 607 | for f in changedstandins: |
|
616 | 608 | if f[0] not in filelist: |
|
617 | 609 | filelist.append(f[0]) |
|
618 | 610 | return filelist |
|
619 | 611 | |
|
620 | 612 | |
|
621 | 613 | def getlfilestoupload(repo, missing, addfunc): |
|
622 | 614 | makeprogress = repo.ui.makeprogress |
|
623 | 615 | with makeprogress( |
|
624 | 616 | _(b'finding outgoing largefiles'), |
|
625 | 617 | unit=_(b'revisions'), |
|
626 | 618 | total=len(missing), |
|
627 | 619 | ) as progress: |
|
628 | 620 | for i, n in enumerate(missing): |
|
629 | 621 | progress.update(i) |
|
630 | 622 | parents = [p for p in repo[n].parents() if p != repo.nullid] |
|
631 | 623 | |
|
632 | 624 | with lfstatus(repo, value=False): |
|
633 | 625 | ctx = repo[n] |
|
634 | 626 | |
|
635 | 627 | files = set(ctx.files()) |
|
636 | 628 | if len(parents) == 2: |
|
637 | 629 | mc = ctx.manifest() |
|
638 | 630 | mp1 = ctx.p1().manifest() |
|
639 | 631 | mp2 = ctx.p2().manifest() |
|
640 | 632 | for f in mp1: |
|
641 | 633 | if f not in mc: |
|
642 | 634 | files.add(f) |
|
643 | 635 | for f in mp2: |
|
644 | 636 | if f not in mc: |
|
645 | 637 | files.add(f) |
|
646 | 638 | for f in mc: |
|
647 | 639 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
648 | 640 | files.add(f) |
|
649 | 641 | for fn in files: |
|
650 | 642 | if isstandin(fn) and fn in ctx: |
|
651 | 643 | addfunc(fn, readasstandin(ctx[fn])) |
|
652 | 644 | |
|
653 | 645 | |
|
654 | 646 | def updatestandinsbymatch(repo, match): |
|
655 | 647 | """Update standins in the working directory according to specified match |
|
656 | 648 | |
|
657 | 649 | This returns (possibly modified) ``match`` object to be used for |
|
658 | 650 | subsequent commit process. |
|
659 | 651 | """ |
|
660 | 652 | |
|
661 | 653 | ui = repo.ui |
|
662 | 654 | |
|
663 | 655 | # Case 1: user calls commit with no specific files or |
|
664 | 656 | # include/exclude patterns: refresh and commit all files that |
|
665 | 657 | # are "dirty". |
|
666 | 658 | if match is None or match.always(): |
|
667 | 659 | # Spend a bit of time here to get a list of files we know |
|
668 | 660 | # are modified so we can compare only against those. |
|
669 | 661 | # It can cost a lot of time (several seconds) |
|
670 | 662 | # otherwise to update all standins if the largefiles are |
|
671 | 663 | # large. |
|
672 | 664 | lfdirstate = openlfdirstate(ui, repo) |
|
673 | 665 | dirtymatch = matchmod.always() |
|
674 | 666 | unsure, s = lfdirstate.status( |
|
675 | 667 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False |
|
676 | 668 | ) |
|
677 | 669 | modifiedfiles = unsure + s.modified + s.added + s.removed |
|
678 | 670 | lfiles = listlfiles(repo) |
|
679 | 671 | # this only loops through largefiles that exist (not |
|
680 | 672 | # removed/renamed) |
|
681 | 673 | for lfile in lfiles: |
|
682 | 674 | if lfile in modifiedfiles: |
|
683 | 675 | fstandin = standin(lfile) |
|
684 | 676 | if repo.wvfs.exists(fstandin): |
|
685 | 677 | # this handles the case where a rebase is being |
|
686 | 678 | # performed and the working copy is not updated |
|
687 | 679 | # yet. |
|
688 | 680 | if repo.wvfs.exists(lfile): |
|
689 | 681 | updatestandin(repo, lfile, fstandin) |
|
690 | 682 | |
|
691 | 683 | return match |
|
692 | 684 | |
|
693 | 685 | lfiles = listlfiles(repo) |
|
694 | 686 | match._files = repo._subdirlfs(match.files(), lfiles) |
|
695 | 687 | |
|
696 | 688 | # Case 2: user calls commit with specified patterns: refresh |
|
697 | 689 | # any matching big files. |
|
698 | 690 | smatcher = composestandinmatcher(repo, match) |
|
699 | 691 | standins = repo.dirstate.walk( |
|
700 | 692 | smatcher, subrepos=[], unknown=False, ignored=False |
|
701 | 693 | ) |
|
702 | 694 | |
|
703 | 695 | # No matching big files: get out of the way and pass control to |
|
704 | 696 | # the usual commit() method. |
|
705 | 697 | if not standins: |
|
706 | 698 | return match |
|
707 | 699 | |
|
708 | 700 | # Refresh all matching big files. It's possible that the |
|
709 | 701 | # commit will end up failing, in which case the big files will |
|
710 | 702 | # stay refreshed. No harm done: the user modified them and |
|
711 | 703 | # asked to commit them, so sooner or later we're going to |
|
712 | 704 | # refresh the standins. Might as well leave them refreshed. |
|
713 | 705 | lfdirstate = openlfdirstate(ui, repo) |
|
714 | 706 | for fstandin in standins: |
|
715 | 707 | lfile = splitstandin(fstandin) |
|
716 |
if lfdirstate |
|
|
708 | if lfdirstate.get_entry(lfile).tracked: | |
|
717 | 709 | updatestandin(repo, lfile, fstandin) |
|
718 | 710 | |
|
719 | 711 | # Cook up a new matcher that only matches regular files or |
|
720 | 712 | # standins corresponding to the big files requested by the |
|
721 | 713 | # user. Have to modify _files to prevent commit() from |
|
722 | 714 | # complaining "not tracked" for big files. |
|
723 | 715 | match = copy.copy(match) |
|
724 | 716 | origmatchfn = match.matchfn |
|
725 | 717 | |
|
726 | 718 | # Check both the list of largefiles and the list of |
|
727 | 719 | # standins because if a largefile was removed, it |
|
728 | 720 | # won't be in the list of largefiles at this point |
|
729 | 721 | match._files += sorted(standins) |
|
730 | 722 | |
|
731 | 723 | actualfiles = [] |
|
732 | 724 | for f in match._files: |
|
733 | 725 | fstandin = standin(f) |
|
734 | 726 | |
|
735 | 727 | # For largefiles, only one of the normal and standin should be |
|
736 | 728 | # committed (except if one of them is a remove). In the case of a |
|
737 | 729 | # standin removal, drop the normal file if it is unknown to dirstate. |
|
738 | 730 | # Thus, skip plain largefile names but keep the standin. |
|
739 | 731 | if f in lfiles or fstandin in standins: |
|
740 |
if repo.dirstate |
|
|
741 |
if repo.dirstate |
|
|
732 | if not repo.dirstate.get_entry(fstandin).removed: | |
|
733 | if not repo.dirstate.get_entry(f).removed: | |
|
742 | 734 | continue |
|
743 |
elif repo.dirstate |
|
|
735 | elif not repo.dirstate.get_entry(f).any_tracked: | |
|
744 | 736 | continue |
|
745 | 737 | |
|
746 | 738 | actualfiles.append(f) |
|
747 | 739 | match._files = actualfiles |
|
748 | 740 | |
|
749 | 741 | def matchfn(f): |
|
750 | 742 | if origmatchfn(f): |
|
751 | 743 | return f not in lfiles |
|
752 | 744 | else: |
|
753 | 745 | return f in standins |
|
754 | 746 | |
|
755 | 747 | match.matchfn = matchfn |
|
756 | 748 | |
|
757 | 749 | return match |
|
758 | 750 | |
|
759 | 751 | |
|
760 | 752 | class automatedcommithook(object): |
|
761 | 753 | """Stateful hook to update standins at the 1st commit of resuming |
|
762 | 754 | |
|
763 | 755 | For efficiency, updating standins in the working directory should |
|
764 | 756 | be avoided while automated committing (like rebase, transplant and |
|
765 | 757 | so on), because they should be updated before committing. |
|
766 | 758 | |
|
767 | 759 | But the 1st commit of resuming automated committing (e.g. ``rebase |
|
768 | 760 | --continue``) should update them, because largefiles may be |
|
769 | 761 | modified manually. |
|
770 | 762 | """ |
|
771 | 763 | |
|
772 | 764 | def __init__(self, resuming): |
|
773 | 765 | self.resuming = resuming |
|
774 | 766 | |
|
775 | 767 | def __call__(self, repo, match): |
|
776 | 768 | if self.resuming: |
|
777 | 769 | self.resuming = False # avoids updating at subsequent commits |
|
778 | 770 | return updatestandinsbymatch(repo, match) |
|
779 | 771 | else: |
|
780 | 772 | return match |
|
781 | 773 | |
|
782 | 774 | |
|
783 | 775 | def getstatuswriter(ui, repo, forcibly=None): |
|
784 | 776 | """Return the function to write largefiles specific status out |
|
785 | 777 | |
|
786 | 778 | If ``forcibly`` is ``None``, this returns the last element of |
|
787 | 779 | ``repo._lfstatuswriters`` as "default" writer function. |
|
788 | 780 | |
|
789 | 781 | Otherwise, this returns the function to always write out (or |
|
790 | 782 | ignore if ``not forcibly``) status. |
|
791 | 783 | """ |
|
792 | 784 | if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): |
|
793 | 785 | return repo._lfstatuswriters[-1] |
|
794 | 786 | else: |
|
795 | 787 | if forcibly: |
|
796 | 788 | return ui.status # forcibly WRITE OUT |
|
797 | 789 | else: |
|
798 | 790 | return lambda *msg, **opts: None # forcibly IGNORE |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file renamed from rust/hg-cpython/src/dirstate/owning.rs to rust/hg-core/src/dirstate_tree/owning.rs | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now