Show More
The requested changes are too big and content was truncated. Show full diff
@@ -0,0 +1,194 b'' | |||
|
1 | # | |
|
2 | # This file is autogenerated by pip-compile | |
|
3 | # To update, run: | |
|
4 | # | |
|
5 | # pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py3.5.txt contrib/automation/linux-requirements.txt.in | |
|
6 | # | |
|
7 | astroid==2.4.2 \ | |
|
8 | --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \ | |
|
9 | --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386 | |
|
10 | # via pylint | |
|
11 | docutils==0.17.1 \ | |
|
12 | --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \ | |
|
13 | --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61 | |
|
14 | # via -r contrib/automation/linux-requirements.txt.in | |
|
15 | fuzzywuzzy==0.18.0 \ | |
|
16 | --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \ | |
|
17 | --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993 | |
|
18 | # via -r contrib/automation/linux-requirements.txt.in | |
|
19 | idna==3.1 \ | |
|
20 | --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \ | |
|
21 | --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1 | |
|
22 | # via yarl | |
|
23 | isort==4.3.21 \ | |
|
24 | --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \ | |
|
25 | --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd | |
|
26 | # via | |
|
27 | # -r contrib/automation/linux-requirements.txt.in | |
|
28 | # pylint | |
|
29 | lazy-object-proxy==1.4.3 \ | |
|
30 | --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \ | |
|
31 | --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \ | |
|
32 | --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \ | |
|
33 | --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \ | |
|
34 | --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \ | |
|
35 | --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \ | |
|
36 | --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \ | |
|
37 | --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \ | |
|
38 | --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \ | |
|
39 | --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \ | |
|
40 | --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \ | |
|
41 | --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \ | |
|
42 | --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \ | |
|
43 | --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \ | |
|
44 | --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \ | |
|
45 | --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \ | |
|
46 | --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \ | |
|
47 | --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \ | |
|
48 | --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \ | |
|
49 | --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \ | |
|
50 | --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0 | |
|
51 | # via astroid | |
|
52 | mccabe==0.6.1 \ | |
|
53 | --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ | |
|
54 | --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f | |
|
55 | # via pylint | |
|
56 | multidict==5.0.2 \ | |
|
57 | --hash=sha256:060d68ae3e674c913ec41a464916f12c4d7ff17a3a9ebbf37ba7f2c681c2b33e \ | |
|
58 | --hash=sha256:06f39f0ddc308dab4e5fa282d145f90cd38d7ed75390fc83335636909a9ec191 \ | |
|
59 | --hash=sha256:17847fede1aafdb7e74e01bb34ab47a1a1ea726e8184c623c45d7e428d2d5d34 \ | |
|
60 | --hash=sha256:1cd102057b09223b919f9447c669cf2efabeefb42a42ae6233f25ffd7ee31a79 \ | |
|
61 | --hash=sha256:20cc9b2dd31761990abff7d0e63cd14dbfca4ebb52a77afc917b603473951a38 \ | |
|
62 | --hash=sha256:2576e30bbec004e863d87216bc34abe24962cc2e964613241a1c01c7681092ab \ | |
|
63 | --hash=sha256:2ab9cad4c5ef5c41e1123ed1f89f555aabefb9391d4e01fd6182de970b7267ed \ | |
|
64 | --hash=sha256:359ea00e1b53ceef282232308da9d9a3f60d645868a97f64df19485c7f9ef628 \ | |
|
65 | --hash=sha256:3e61cc244fd30bd9fdfae13bdd0c5ec65da51a86575ff1191255cae677045ffe \ | |
|
66 | --hash=sha256:43c7a87d8c31913311a1ab24b138254a0ee89142983b327a2c2eab7a7d10fea9 \ | |
|
67 | --hash=sha256:4a3f19da871befa53b48dd81ee48542f519beffa13090dc135fffc18d8fe36db \ | |
|
68 | --hash=sha256:4df708ef412fd9b59b7e6c77857e64c1f6b4c0116b751cb399384ec9a28baa66 \ | |
|
69 | --hash=sha256:59182e975b8c197d0146a003d0f0d5dc5487ce4899502061d8df585b0f51fba2 \ | |
|
70 | --hash=sha256:6128d2c0956fd60e39ec7d1c8f79426f0c915d36458df59ddd1f0cff0340305f \ | |
|
71 | --hash=sha256:6168839491a533fa75f3f5d48acbb829475e6c7d9fa5c6e245153b5f79b986a3 \ | |
|
72 | --hash=sha256:62abab8088704121297d39c8f47156cb8fab1da731f513e59ba73946b22cf3d0 \ | |
|
73 | --hash=sha256:653b2bbb0bbf282c37279dd04f429947ac92713049e1efc615f68d4e64b1dbc2 \ | |
|
74 | --hash=sha256:6566749cd78cb37cbf8e8171b5cd2cbfc03c99f0891de12255cf17a11c07b1a3 \ | |
|
75 | --hash=sha256:76cbdb22f48de64811f9ce1dd4dee09665f84f32d6a26de249a50c1e90e244e0 \ | |
|
76 | --hash=sha256:8efcf070d60fd497db771429b1c769a3783e3a0dd96c78c027e676990176adc5 \ | |
|
77 | --hash=sha256:8fa4549f341a057feec4c3139056ba73e17ed03a506469f447797a51f85081b5 \ | |
|
78 | --hash=sha256:9380b3f2b00b23a4106ba9dd022df3e6e2e84e1788acdbdd27603b621b3288df \ | |
|
79 | --hash=sha256:9ed9b280f7778ad6f71826b38a73c2fdca4077817c64bc1102fdada58e75c03c \ | |
|
80 | --hash=sha256:a7b8b5bd16376c8ac2977748bd978a200326af5145d8d0e7f799e2b355d425b6 \ | |
|
81 | --hash=sha256:af271c2540d1cd2a137bef8d95a8052230aa1cda26dd3b2c73d858d89993d518 \ | |
|
82 | --hash=sha256:b561e76c9e21402d9a446cdae13398f9942388b9bff529f32dfa46220af54d00 \ | |
|
83 | --hash=sha256:b82400ef848bbac6b9035a105ac6acaa1fb3eea0d164e35bbb21619b88e49fed \ | |
|
84 | --hash=sha256:b98af08d7bb37d3456a22f689819ea793e8d6961b9629322d7728c4039071641 \ | |
|
85 | --hash=sha256:c58e53e1c73109fdf4b759db9f2939325f510a8a5215135330fe6755921e4886 \ | |
|
86 | --hash=sha256:cbabfc12b401d074298bfda099c58dfa5348415ae2e4ec841290627cb7cb6b2e \ | |
|
87 | --hash=sha256:d4a6fb98e9e9be3f7d70fd3e852369c00a027bd5ed0f3e8ade3821bcad257408 \ | |
|
88 | --hash=sha256:d99da85d6890267292065e654a329e1d2f483a5d2485e347383800e616a8c0b1 \ | |
|
89 | --hash=sha256:e58db0e0d60029915f7fc95a8683fa815e204f2e1990f1fb46a7778d57ca8c35 \ | |
|
90 | --hash=sha256:e5bf89fe57f702a046c7ec718fe330ed50efd4bcf74722940db2eb0919cddb1c \ | |
|
91 | --hash=sha256:f612e8ef8408391a4a3366e3508bab8ef97b063b4918a317cb6e6de4415f01af \ | |
|
92 | --hash=sha256:f65a2442c113afde52fb09f9a6276bbc31da71add99dc76c3adf6083234e07c6 \ | |
|
93 | --hash=sha256:fa0503947a99a1be94f799fac89d67a5e20c333e78ddae16e8534b151cdc588a | |
|
94 | # via yarl | |
|
95 | pyflakes==2.3.1 \ | |
|
96 | --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \ | |
|
97 | --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db | |
|
98 | # via -r contrib/automation/linux-requirements.txt.in | |
|
99 | pygments==2.9.0 \ | |
|
100 | --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \ | |
|
101 | --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e | |
|
102 | # via -r contrib/automation/linux-requirements.txt.in | |
|
103 | pylint==2.6.2 \ | |
|
104 | --hash=sha256:718b74786ea7ed07aa0c58bf572154d4679f960d26e9641cc1de204a30b87fc9 \ | |
|
105 | --hash=sha256:e71c2e9614a4f06e36498f310027942b0f4f2fde20aebb01655b31edc63b9eaf | |
|
106 | # via -r contrib/automation/linux-requirements.txt.in | |
|
107 | python-levenshtein==0.12.2 \ | |
|
108 | --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6 | |
|
109 | # via -r contrib/automation/linux-requirements.txt.in | |
|
110 | pyyaml==5.3.1 \ | |
|
111 | --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \ | |
|
112 | --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \ | |
|
113 | --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \ | |
|
114 | --hash=sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e \ | |
|
115 | --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \ | |
|
116 | --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \ | |
|
117 | --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \ | |
|
118 | --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \ | |
|
119 | --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \ | |
|
120 | --hash=sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a \ | |
|
121 | --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \ | |
|
122 | --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \ | |
|
123 | --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a | |
|
124 | # via vcrpy | |
|
125 | six==1.16.0 \ | |
|
126 | --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ | |
|
127 | --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 | |
|
128 | # via | |
|
129 | # astroid | |
|
130 | # vcrpy | |
|
131 | toml==0.10.2 \ | |
|
132 | --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ | |
|
133 | --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f | |
|
134 | # via pylint | |
|
135 | typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \ | |
|
136 | --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \ | |
|
137 | --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \ | |
|
138 | --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \ | |
|
139 | --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \ | |
|
140 | --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \ | |
|
141 | --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \ | |
|
142 | --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \ | |
|
143 | --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \ | |
|
144 | --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \ | |
|
145 | --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \ | |
|
146 | --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \ | |
|
147 | --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \ | |
|
148 | --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \ | |
|
149 | --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \ | |
|
150 | --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \ | |
|
151 | --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \ | |
|
152 | --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \ | |
|
153 | --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \ | |
|
154 | --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \ | |
|
155 | --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \ | |
|
156 | --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \ | |
|
157 | --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \ | |
|
158 | --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \ | |
|
159 | --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \ | |
|
160 | --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \ | |
|
161 | --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \ | |
|
162 | --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \ | |
|
163 | --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \ | |
|
164 | --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \ | |
|
165 | --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65 | |
|
166 | # via | |
|
167 | # -r contrib/automation/linux-requirements.txt.in | |
|
168 | # astroid | |
|
169 | vcrpy==4.1.1 \ | |
|
170 | --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \ | |
|
171 | --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 | |
|
172 | # via -r contrib/automation/linux-requirements.txt.in | |
|
173 | wrapt==1.12.1 \ | |
|
174 | --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 | |
|
175 | # via | |
|
176 | # astroid | |
|
177 | # vcrpy | |
|
178 | yarl==1.3.0 \ | |
|
179 | --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \ | |
|
180 | --hash=sha256:2f3010703295fbe1aec51023740871e64bb9664c789cba5a6bdf404e93f7568f \ | |
|
181 | --hash=sha256:3890ab952d508523ef4881457c4099056546593fa05e93da84c7250516e632eb \ | |
|
182 | --hash=sha256:3e2724eb9af5dc41648e5bb304fcf4891adc33258c6e14e2a7414ea32541e320 \ | |
|
183 | --hash=sha256:5badb97dd0abf26623a9982cd448ff12cb39b8e4c94032ccdedf22ce01a64842 \ | |
|
184 | --hash=sha256:73f447d11b530d860ca1e6b582f947688286ad16ca42256413083d13f260b7a0 \ | |
|
185 | --hash=sha256:7ab825726f2940c16d92aaec7d204cfc34ac26c0040da727cf8ba87255a33829 \ | |
|
186 | --hash=sha256:b25de84a8c20540531526dfbb0e2d2b648c13fd5dd126728c496d7c3fea33310 \ | |
|
187 | --hash=sha256:c6e341f5a6562af74ba55205dbd56d248daf1b5748ec48a0200ba227bb9e33f4 \ | |
|
188 | --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \ | |
|
189 | --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1 | |
|
190 | # via vcrpy | |
|
191 | ||
|
192 | # WARNING: The following packages were not pinned, but pip requires them to be | |
|
193 | # pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag. | |
|
194 | # setuptools |
|
1 | NO CONTENT: new file 100644 |
@@ -0,0 +1,75 b'' | |||
|
1 | # dirstatedocket.py - docket file for dirstate-v2 | |
|
2 | # | |
|
3 | # Copyright Mercurial Contributors | |
|
4 | # | |
|
5 | # This software may be used and distributed according to the terms of the | |
|
6 | # GNU General Public License version 2 or any later version. | |
|
7 | ||
|
8 | from __future__ import absolute_import | |
|
9 | ||
|
10 | import struct | |
|
11 | ||
|
12 | from ..revlogutils import docket as docket_mod | |
|
13 | ||
|
14 | ||
|
15 | V2_FORMAT_MARKER = b"dirstate-v2\n" | |
|
16 | ||
|
17 | # Must match the constant of the same name in | |
|
18 | # `rust/hg-core/src/dirstate_tree/on_disk.rs` | |
|
19 | TREE_METADATA_SIZE = 44 | |
|
20 | ||
|
21 | # * 12 bytes: format marker | |
|
22 | # * 32 bytes: node ID of the working directory's first parent | |
|
23 | # * 32 bytes: node ID of the working directory's second parent | |
|
24 | # * 4 bytes: big-endian used size of the data file | |
|
25 | # * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately | |
|
26 | # * 1 byte: length of the data file's UUID | |
|
27 | # * variable: data file's UUID | |
|
28 | # | |
|
29 | # Node IDs are null-padded if shorter than 32 bytes. | |
|
30 | # A data file shorter than the specified used size is corrupted (truncated) | |
|
31 | HEADER = struct.Struct( | |
|
32 | ">{}s32s32sL{}sB".format(len(V2_FORMAT_MARKER), TREE_METADATA_SIZE) | |
|
33 | ) | |
|
34 | ||
|
35 | ||
|
36 | class DirstateDocket(object): | |
|
37 | data_filename_pattern = b'dirstate.%s.d' | |
|
38 | ||
|
39 | def __init__(self, parents, data_size, tree_metadata, uuid): | |
|
40 | self.parents = parents | |
|
41 | self.data_size = data_size | |
|
42 | self.tree_metadata = tree_metadata | |
|
43 | self.uuid = uuid | |
|
44 | ||
|
45 | @classmethod | |
|
46 | def with_new_uuid(cls, parents, data_size, tree_metadata): | |
|
47 | return cls(parents, data_size, tree_metadata, docket_mod.make_uid()) | |
|
48 | ||
|
49 | @classmethod | |
|
50 | def parse(cls, data, nodeconstants): | |
|
51 | if not data: | |
|
52 | parents = (nodeconstants.nullid, nodeconstants.nullid) | |
|
53 | return cls(parents, 0, b'', None) | |
|
54 | marker, p1, p2, data_size, meta, uuid_size = HEADER.unpack_from(data) | |
|
55 | if marker != V2_FORMAT_MARKER: | |
|
56 | raise ValueError("expected dirstate-v2 marker") | |
|
57 | uuid = data[HEADER.size : HEADER.size + uuid_size] | |
|
58 | p1 = p1[: nodeconstants.nodelen] | |
|
59 | p2 = p2[: nodeconstants.nodelen] | |
|
60 | return cls((p1, p2), data_size, meta, uuid) | |
|
61 | ||
|
62 | def serialize(self): | |
|
63 | p1, p2 = self.parents | |
|
64 | header = HEADER.pack( | |
|
65 | V2_FORMAT_MARKER, | |
|
66 | p1, | |
|
67 | p2, | |
|
68 | self.data_size, | |
|
69 | self.tree_metadata, | |
|
70 | len(self.uuid), | |
|
71 | ) | |
|
72 | return header + self.uuid | |
|
73 | ||
|
74 | def data_filename(self): | |
|
75 | return self.data_filename_pattern % self.uuid |
@@ -0,0 +1,56 b'' | |||
|
1 | Obsolescence markers make it possible to mark changesets that have been | |
|
2 | deleted or superseded in a new version of the changeset. | |
|
3 | ||
|
4 | Unlike the previous way of handling such changes, by stripping the old | |
|
5 | changesets from the repository, obsolescence markers can be propagated | |
|
6 | between repositories. This allows for a safe and simple way of exchanging | |
|
7 | mutable history and altering it after the fact. Changeset phases are | |
|
8 | respected, such that only draft and secret changesets can be altered (see | |
|
9 | :hg:`help phases` for details). | |
|
10 | ||
|
11 | Obsolescence is tracked using "obsolescence markers", a piece of metadata | |
|
12 | tracking which changesets have been made obsolete, potential successors for | |
|
13 | a given changeset, the moment the changeset was marked as obsolete, and the | |
|
14 | user who performed the rewriting operation. The markers are stored | |
|
15 | separately from standard changeset data can be exchanged without any of the | |
|
16 | precursor changesets, preventing unnecessary exchange of obsolescence data. | |
|
17 | ||
|
18 | The complete set of obsolescence markers describes a history of changeset | |
|
19 | modifications that is orthogonal to the repository history of file | |
|
20 | modifications. This changeset history allows for detection and automatic | |
|
21 | resolution of edge cases arising from multiple users rewriting the same part | |
|
22 | of history concurrently. | |
|
23 | ||
|
24 | Current feature status | |
|
25 | ====================== | |
|
26 | ||
|
27 | This feature is still in development. | |
|
28 | ||
|
29 | Instability | |
|
30 | =========== | |
|
31 | ||
|
32 | Rewriting changesets might introduce instability. | |
|
33 | ||
|
34 | There are two main kinds of instability: orphaning and diverging. | |
|
35 | ||
|
36 | Orphans are changesets left behind when their ancestors are rewritten. | |
|
37 | Divergence has two variants: | |
|
38 | ||
|
39 | * Content-divergence occurs when independent rewrites of the same changesets | |
|
40 | lead to different results. | |
|
41 | ||
|
42 | * Phase-divergence occurs when the old (obsolete) version of a changeset | |
|
43 | becomes public. | |
|
44 | ||
|
45 | It is possible to prevent local creation of orphans by using the following config:: | |
|
46 | ||
|
47 | [experimental] | |
|
48 | evolution.createmarkers = true | |
|
49 | evolution.exchange = true | |
|
50 | ||
|
51 | You can also enable that option explicitly:: | |
|
52 | ||
|
53 | [experimental] | |
|
54 | evolution.createmarkers = true | |
|
55 | evolution.exchange = true | |
|
56 | evolution.allowunstable = true |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100755 | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: new file 100755 | |
The requested commit or file is too big and content was truncated. Show full diff |
@@ -925,10 +925,15 b' def ensure_linux_dev_ami(c: AWSConnectio' | |||
|
925 | 925 | requirements3_path = ( |
|
926 | 926 | pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.txt' |
|
927 | 927 | ) |
|
928 | requirements35_path = ( | |
|
929 | pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.5.txt' | |
|
930 | ) | |
|
928 | 931 | with requirements2_path.open('r', encoding='utf-8') as fh: |
|
929 | 932 | requirements2 = fh.read() |
|
930 | 933 | with requirements3_path.open('r', encoding='utf-8') as fh: |
|
931 | 934 | requirements3 = fh.read() |
|
935 | with requirements35_path.open('r', encoding='utf-8') as fh: | |
|
936 | requirements35 = fh.read() | |
|
932 | 937 | |
|
933 | 938 | # Compute a deterministic fingerprint to determine whether image needs to |
|
934 | 939 | # be regenerated. |
@@ -938,6 +943,7 b' def ensure_linux_dev_ami(c: AWSConnectio' | |||
|
938 | 943 | 'bootstrap_script': BOOTSTRAP_DEBIAN, |
|
939 | 944 | 'requirements_py2': requirements2, |
|
940 | 945 | 'requirements_py3': requirements3, |
|
946 | 'requirements_py35': requirements35, | |
|
941 | 947 | } |
|
942 | 948 | ) |
|
943 | 949 | |
@@ -979,6 +985,10 b' def ensure_linux_dev_ami(c: AWSConnectio' | |||
|
979 | 985 | fh.write(requirements3) |
|
980 | 986 | fh.chmod(0o0700) |
|
981 | 987 | |
|
988 | with sftp.open('%s/requirements-py3.5.txt' % home, 'wb') as fh: | |
|
989 | fh.write(requirements35) | |
|
990 | fh.chmod(0o0700) | |
|
991 | ||
|
982 | 992 | print('executing bootstrap') |
|
983 | 993 | chan, stdin, stdout = ssh_exec_command( |
|
984 | 994 | client, '%s/bootstrap' % home |
@@ -26,11 +26,11 b' DISTROS = {' | |||
|
26 | 26 | |
|
27 | 27 | INSTALL_PYTHONS = r''' |
|
28 | 28 | PYENV2_VERSIONS="2.7.17 pypy2.7-7.2.0" |
|
29 |
PYENV3_VERSIONS="3.5.10 3.6.1 |
|
|
29 | PYENV3_VERSIONS="3.5.10 3.6.13 3.7.10 3.8.10 3.9.5 pypy3.5-7.0.0 pypy3.6-7.3.3 pypy3.7-7.3.3" | |
|
30 | 30 | |
|
31 | 31 | git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv |
|
32 | 32 | pushd /hgdev/pyenv |
|
33 | git checkout 8ac91b4fd678a8c04356f5ec85cfcd565c265e9a | |
|
33 | git checkout 328fd42c3a2fbf14ae46dae2021a087fe27ba7e2 | |
|
34 | 34 | popd |
|
35 | 35 | |
|
36 | 36 | export PYENV_ROOT="/hgdev/pyenv" |
@@ -56,7 +56,20 b' done' | |||
|
56 | 56 | for v in ${PYENV3_VERSIONS}; do |
|
57 | 57 | pyenv install -v ${v} |
|
58 | 58 | ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py |
|
59 | ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt | |
|
59 | ||
|
60 | case ${v} in | |
|
61 | 3.5.*) | |
|
62 | REQUIREMENTS=requirements-py3.5.txt | |
|
63 | ;; | |
|
64 | pypy3.5*) | |
|
65 | REQUIREMENTS=requirements-py3.5.txt | |
|
66 | ;; | |
|
67 | *) | |
|
68 | REQUIREMENTS=requirements-py3.txt | |
|
69 | ;; | |
|
70 | esac | |
|
71 | ||
|
72 | ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/${REQUIREMENTS} | |
|
60 | 73 | done |
|
61 | 74 | |
|
62 | 75 | pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system |
@@ -64,6 +77,18 b' pyenv global ${PYENV2_VERSIONS} ${PYENV3' | |||
|
64 | 77 | '\r\n', '\n' |
|
65 | 78 | ) |
|
66 | 79 | |
|
80 | INSTALL_PYOXIDIZER = r''' | |
|
81 | PYOXIDIZER_VERSION=0.16.0 | |
|
82 | PYOXIDIZER_SHA256=8875471c270312fbb934007fd30f65f1904cc0f5da6188d61c90ed2129b9f9c1 | |
|
83 | PYOXIDIZER_URL=https://github.com/indygreg/PyOxidizer/releases/download/pyoxidizer%2F${PYOXIDIZER_VERSION}/pyoxidizer-${PYOXIDIZER_VERSION}-linux_x86_64.zip | |
|
84 | ||
|
85 | wget -O pyoxidizer.zip --progress dot:mega ${PYOXIDIZER_URL} | |
|
86 | echo "${PYOXIDIZER_SHA256} pyoxidizer.zip" | sha256sum --check - | |
|
87 | ||
|
88 | unzip pyoxidizer.zip | |
|
89 | chmod +x pyoxidizer | |
|
90 | sudo mv pyoxidizer /usr/local/bin/pyoxidizer | |
|
91 | ''' | |
|
67 | 92 | |
|
68 | 93 | INSTALL_RUST = r''' |
|
69 | 94 | RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076 |
@@ -72,10 +97,8 b' echo "${RUSTUP_INIT_SHA256} rustup-init"' | |||
|
72 | 97 | |
|
73 | 98 | chmod +x rustup-init |
|
74 | 99 | sudo -H -u hg -g hg ./rustup-init -y |
|
75 |
sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1. |
|
|
100 | sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.41.1 1.52.0 | |
|
76 | 101 | sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy |
|
77 | ||
|
78 | sudo -H -u hg -g hg /home/hg/.cargo/bin/cargo install --version 0.10.3 pyoxidizer | |
|
79 | 102 | ''' |
|
80 | 103 | |
|
81 | 104 | |
@@ -306,9 +329,9 b' sudo mkdir /hgdev' | |||
|
306 | 329 | sudo chown `whoami` /hgdev |
|
307 | 330 | |
|
308 | 331 | {install_rust} |
|
332 | {install_pyoxidizer} | |
|
309 | 333 | |
|
310 |
cp requirements- |
|
|
311 | cp requirements-py3.txt /hgdev/requirements-py3.txt | |
|
334 | cp requirements-*.txt /hgdev/ | |
|
312 | 335 | |
|
313 | 336 | # Disable the pip version check because it uses the network and can |
|
314 | 337 | # be annoying. |
@@ -332,6 +355,7 b' sudo chown -R hg:hg /hgdev' | |||
|
332 | 355 | '''.lstrip() |
|
333 | 356 | .format( |
|
334 | 357 | install_rust=INSTALL_RUST, |
|
358 | install_pyoxidizer=INSTALL_PYOXIDIZER, | |
|
335 | 359 | install_pythons=INSTALL_PYTHONS, |
|
336 | 360 | bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV, |
|
337 | 361 | ) |
@@ -6,208 +6,299 b'' | |||
|
6 | 6 | # |
|
7 | 7 | appdirs==1.4.4 \ |
|
8 | 8 | --hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \ |
|
9 |
--hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 |
|
|
9 | --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 | |
|
10 | 10 | # via black |
|
11 |
astroid==2. |
|
|
12 | --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \ | |
|
13 | --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386 \ | |
|
11 | astroid==2.5.6 \ | |
|
12 | --hash=sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e \ | |
|
13 | --hash=sha256:8a398dfce302c13f14bab13e2b14fe385d32b73f4e4853b9bdfb64598baa1975 | |
|
14 | 14 | # via pylint |
|
15 |
attrs==2 |
|
|
16 | --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \ | |
|
17 | --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc \ | |
|
15 | attrs==21.1.0 \ | |
|
16 | --hash=sha256:3901be1cb7c2a780f14668691474d9252c070a756be0a9ead98cfeabfa11aeb8 \ | |
|
17 | --hash=sha256:8ee1e5f5a1afc5b19bdfae4fdf0c35ed324074bdce3500c939842c8f818645d9 | |
|
18 | 18 | # via black |
|
19 | 19 | black==19.10b0 ; python_version >= "3.6" and platform_python_implementation != "PyPy" \ |
|
20 | 20 | --hash=sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b \ |
|
21 |
--hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539 |
|
|
21 | --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539 | |
|
22 | 22 | # via -r contrib/automation/linux-requirements.txt.in |
|
23 | 23 | click==7.1.2 \ |
|
24 | 24 | --hash=sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a \ |
|
25 |
--hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc |
|
|
25 | --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc | |
|
26 | 26 | # via black |
|
27 |
docutils==0.1 |
|
|
28 | --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ | |
|
29 | --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \ | |
|
27 | docutils==0.17.1 \ | |
|
28 | --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \ | |
|
29 | --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61 | |
|
30 | 30 | # via -r contrib/automation/linux-requirements.txt.in |
|
31 | 31 | fuzzywuzzy==0.18.0 \ |
|
32 | 32 | --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \ |
|
33 |
--hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993 |
|
|
33 | --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993 | |
|
34 | 34 | # via -r contrib/automation/linux-requirements.txt.in |
|
35 |
idna== |
|
|
36 | --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ | |
|
37 | --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 \ | |
|
35 | idna==3.1 \ | |
|
36 | --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \ | |
|
37 | --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1 | |
|
38 | 38 | # via yarl |
|
39 | 39 | isort==4.3.21 \ |
|
40 | 40 | --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \ |
|
41 |
--hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd |
|
|
42 | # via -r contrib/automation/linux-requirements.txt.in, pylint | |
|
43 | lazy-object-proxy==1.4.3 \ | |
|
44 | --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \ | |
|
45 | --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \ | |
|
46 | --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \ | |
|
47 | --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \ | |
|
48 | --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \ | |
|
49 | --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \ | |
|
50 | --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \ | |
|
51 | --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \ | |
|
52 | --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \ | |
|
53 | --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \ | |
|
54 | --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \ | |
|
55 | --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \ | |
|
56 | --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \ | |
|
57 | --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \ | |
|
58 | --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \ | |
|
59 | --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \ | |
|
60 | --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \ | |
|
61 | --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \ | |
|
62 | --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \ | |
|
63 | --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \ | |
|
64 | --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0 \ | |
|
41 | --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd | |
|
42 | # via | |
|
43 | # -r contrib/automation/linux-requirements.txt.in | |
|
44 | # pylint | |
|
45 | lazy-object-proxy==1.6.0 \ | |
|
46 | --hash=sha256:17e0967ba374fc24141738c69736da90e94419338fd4c7c7bef01ee26b339653 \ | |
|
47 | --hash=sha256:1fee665d2638491f4d6e55bd483e15ef21f6c8c2095f235fef72601021e64f61 \ | |
|
48 | --hash=sha256:22ddd618cefe54305df49e4c069fa65715be4ad0e78e8d252a33debf00f6ede2 \ | |
|
49 | --hash=sha256:24a5045889cc2729033b3e604d496c2b6f588c754f7a62027ad4437a7ecc4837 \ | |
|
50 | --hash=sha256:410283732af311b51b837894fa2f24f2c0039aa7f220135192b38fcc42bd43d3 \ | |
|
51 | --hash=sha256:4732c765372bd78a2d6b2150a6e99d00a78ec963375f236979c0626b97ed8e43 \ | |
|
52 | --hash=sha256:489000d368377571c6f982fba6497f2aa13c6d1facc40660963da62f5c379726 \ | |
|
53 | --hash=sha256:4f60460e9f1eb632584c9685bccea152f4ac2130e299784dbaf9fae9f49891b3 \ | |
|
54 | --hash=sha256:5743a5ab42ae40caa8421b320ebf3a998f89c85cdc8376d6b2e00bd12bd1b587 \ | |
|
55 | --hash=sha256:85fb7608121fd5621cc4377a8961d0b32ccf84a7285b4f1d21988b2eae2868e8 \ | |
|
56 | --hash=sha256:9698110e36e2df951c7c36b6729e96429c9c32b3331989ef19976592c5f3c77a \ | |
|
57 | --hash=sha256:9d397bf41caad3f489e10774667310d73cb9c4258e9aed94b9ec734b34b495fd \ | |
|
58 | --hash=sha256:b579f8acbf2bdd9ea200b1d5dea36abd93cabf56cf626ab9c744a432e15c815f \ | |
|
59 | --hash=sha256:b865b01a2e7f96db0c5d12cfea590f98d8c5ba64ad222300d93ce6ff9138bcad \ | |
|
60 | --hash=sha256:bf34e368e8dd976423396555078def5cfc3039ebc6fc06d1ae2c5a65eebbcde4 \ | |
|
61 | --hash=sha256:c6938967f8528b3668622a9ed3b31d145fab161a32f5891ea7b84f6b790be05b \ | |
|
62 | --hash=sha256:d1c2676e3d840852a2de7c7d5d76407c772927addff8d742b9808fe0afccebdf \ | |
|
63 | --hash=sha256:d7124f52f3bd259f510651450e18e0fd081ed82f3c08541dffc7b94b883aa981 \ | |
|
64 | --hash=sha256:d900d949b707778696fdf01036f58c9876a0d8bfe116e8d220cfd4b15f14e741 \ | |
|
65 | --hash=sha256:ebfd274dcd5133e0afae738e6d9da4323c3eb021b3e13052d8cbd0e457b1256e \ | |
|
66 | --hash=sha256:ed361bb83436f117f9917d282a456f9e5009ea12fd6de8742d1a4752c3017e93 \ | |
|
67 | --hash=sha256:f5144c75445ae3ca2057faac03fda5a902eff196702b0a24daf1d6ce0650514b | |
|
65 | 68 | # via astroid |
|
66 | 69 | mccabe==0.6.1 \ |
|
67 | 70 | --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ |
|
68 |
--hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f |
|
|
71 | --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f | |
|
69 | 72 | # via pylint |
|
70 |
multidict== |
|
|
71 | --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \ | |
|
72 | --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \ | |
|
73 | --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \ | |
|
74 | --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \ | |
|
75 | --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \ | |
|
76 | --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \ | |
|
77 | --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \ | |
|
78 | --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \ | |
|
79 | --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \ | |
|
80 | --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \ | |
|
81 | --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \ | |
|
82 | --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \ | |
|
83 | --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \ | |
|
84 | --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \ | |
|
85 | --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \ | |
|
86 | --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \ | |
|
87 | --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d \ | |
|
73 | multidict==5.1.0 \ | |
|
74 | --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ | |
|
75 | --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ | |
|
76 | --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ | |
|
77 | --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ | |
|
78 | --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ | |
|
79 | --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ | |
|
80 | --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ | |
|
81 | --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \ | |
|
82 | --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ | |
|
83 | --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ | |
|
84 | --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ | |
|
85 | --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ | |
|
86 | --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ | |
|
87 | --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ | |
|
88 | --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ | |
|
89 | --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ | |
|
90 | --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ | |
|
91 | --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ | |
|
92 | --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ | |
|
93 | --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ | |
|
94 | --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ | |
|
95 | --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ | |
|
96 | --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ | |
|
97 | --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ | |
|
98 | --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ | |
|
99 | --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ | |
|
100 | --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ | |
|
101 | --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ | |
|
102 | --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ | |
|
103 | --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ | |
|
104 | --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ | |
|
105 | --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ | |
|
106 | --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ | |
|
107 | --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ | |
|
108 | --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ | |
|
109 | --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ | |
|
110 | --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 | |
|
88 | 111 | # via yarl |
|
89 |
pathspec==0.8. |
|
|
90 | --hash=sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0 \ | |
|
91 | --hash=sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061 \ | |
|
112 | pathspec==0.8.1 \ | |
|
113 | --hash=sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd \ | |
|
114 | --hash=sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d | |
|
92 | 115 | # via black |
|
93 |
pyflakes==2. |
|
|
94 | --hash=sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92 \ | |
|
95 | --hash=sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8 \ | |
|
116 | pyflakes==2.3.1 \ | |
|
117 | --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \ | |
|
118 | --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db | |
|
96 | 119 | # via -r contrib/automation/linux-requirements.txt.in |
|
97 |
pygments==2. |
|
|
98 | --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \ | |
|
99 | --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \ | |
|
120 | pygments==2.9.0 \ | |
|
121 | --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \ | |
|
122 | --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e | |
|
100 | 123 | # via -r contrib/automation/linux-requirements.txt.in |
|
101 |
pylint==2. |
|
|
102 | --hash=sha256:bb4a908c9dadbc3aac18860550e870f58e1a02c9f2c204fdf5693d73be061210 \ | |
|
103 | --hash=sha256:bfe68f020f8a0fece830a22dd4d5dddb4ecc6137db04face4c3420a46a52239f \ | |
|
124 | pylint==2.8.2 \ | |
|
125 | --hash=sha256:586d8fa9b1891f4b725f587ef267abe2a1bad89d6b184520c7f07a253dd6e217 \ | |
|
126 | --hash=sha256:f7e2072654a6b6afdf5e2fb38147d3e2d2d43c89f648637baab63e026481279b | |
|
127 | # via -r contrib/automation/linux-requirements.txt.in | |
|
128 | python-levenshtein==0.12.2 \ | |
|
129 | --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6 | |
|
104 | 130 | # via -r contrib/automation/linux-requirements.txt.in |
|
105 | python-levenshtein==0.12.0 \ | |
|
106 | --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 \ | |
|
107 | # via -r contrib/automation/linux-requirements.txt.in | |
|
108 | pyyaml==5.3.1 \ | |
|
109 | --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \ | |
|
110 | --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \ | |
|
111 | --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \ | |
|
112 | --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \ | |
|
113 | --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \ | |
|
114 | --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \ | |
|
115 | --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \ | |
|
116 | --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \ | |
|
117 | --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \ | |
|
118 | --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \ | |
|
119 | --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a \ | |
|
131 | pyyaml==5.4.1 \ | |
|
132 | --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ | |
|
133 | --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ | |
|
134 | --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ | |
|
135 | --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ | |
|
136 | --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ | |
|
137 | --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ | |
|
138 | --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ | |
|
139 | --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ | |
|
140 | --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ | |
|
141 | --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ | |
|
142 | --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ | |
|
143 | --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ | |
|
144 | --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ | |
|
145 | --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ | |
|
146 | --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ | |
|
147 | --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ | |
|
148 | --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ | |
|
149 | --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ | |
|
150 | --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ | |
|
151 | --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ | |
|
152 | --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ | |
|
153 | --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ | |
|
154 | --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ | |
|
155 | --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ | |
|
156 | --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ | |
|
157 | --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ | |
|
158 | --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ | |
|
159 | --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ | |
|
160 | --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 | |
|
120 | 161 | # via vcrpy |
|
121 |
regex==202 |
|
|
122 | --hash=sha256:088afc8c63e7bd187a3c70a94b9e50ab3f17e1d3f52a32750b5b77dbe99ef5ef \ | |
|
123 | --hash=sha256:1fe0a41437bbd06063aa184c34804efa886bcc128222e9916310c92cd54c3b4c \ | |
|
124 | --hash=sha256:3d20024a70b97b4f9546696cbf2fd30bae5f42229fbddf8661261b1eaff0deb7 \ | |
|
125 | --hash=sha256:41bb65f54bba392643557e617316d0d899ed5b4946dccee1cb6696152b29844b \ | |
|
126 | --hash=sha256:4318d56bccfe7d43e5addb272406ade7a2274da4b70eb15922a071c58ab0108c \ | |
|
127 | --hash=sha256:4707f3695b34335afdfb09be3802c87fa0bc27030471dbc082f815f23688bc63 \ | |
|
128 | --hash=sha256:49f23ebd5ac073765ecbcf046edc10d63dcab2f4ae2bce160982cb30df0c0302 \ | |
|
129 | --hash=sha256:5533a959a1748a5c042a6da71fe9267a908e21eded7a4f373efd23a2cbdb0ecc \ | |
|
130 | --hash=sha256:5d892a4f1c999834eaa3c32bc9e8b976c5825116cde553928c4c8e7e48ebda67 \ | |
|
131 | --hash=sha256:5f18875ac23d9aa2f060838e8b79093e8bb2313dbaaa9f54c6d8e52a5df097be \ | |
|
132 | --hash=sha256:60b0e9e6dc45683e569ec37c55ac20c582973841927a85f2d8a7d20ee80216ab \ | |
|
133 | --hash=sha256:816064fc915796ea1f26966163f6845de5af78923dfcecf6551e095f00983650 \ | |
|
134 | --hash=sha256:84cada8effefe9a9f53f9b0d2ba9b7b6f5edf8d2155f9fdbe34616e06ececf81 \ | |
|
135 | --hash=sha256:84e9407db1b2eb368b7ecc283121b5e592c9aaedbe8c78b1a2f1102eb2e21d19 \ | |
|
136 | --hash=sha256:8d69cef61fa50c8133382e61fd97439de1ae623fe943578e477e76a9d9471637 \ | |
|
137 | --hash=sha256:9a02d0ae31d35e1ec12a4ea4d4cca990800f66a917d0fb997b20fbc13f5321fc \ | |
|
138 | --hash=sha256:9bc13e0d20b97ffb07821aa3e113f9998e84994fe4d159ffa3d3a9d1b805043b \ | |
|
139 | --hash=sha256:a6f32aea4260dfe0e55dc9733ea162ea38f0ea86aa7d0f77b15beac5bf7b369d \ | |
|
140 | --hash=sha256:ae91972f8ac958039920ef6e8769277c084971a142ce2b660691793ae44aae6b \ | |
|
141 | --hash=sha256:c570f6fa14b9c4c8a4924aaad354652366577b4f98213cf76305067144f7b100 \ | |
|
142 | --hash=sha256:c9443124c67b1515e4fe0bb0aa18df640965e1030f468a2a5dc2589b26d130ad \ | |
|
143 | --hash=sha256:d23a18037313714fb3bb5a94434d3151ee4300bae631894b1ac08111abeaa4a3 \ | |
|
144 | --hash=sha256:eaf548d117b6737df379fdd53bdde4f08870e66d7ea653e230477f071f861121 \ | |
|
145 | --hash=sha256:ebbe29186a3d9b0c591e71b7393f1ae08c83cb2d8e517d2a822b8f7ec99dfd8b \ | |
|
146 | --hash=sha256:eda4771e0ace7f67f58bc5b560e27fb20f32a148cbc993b0c3835970935c2707 \ | |
|
147 | --hash=sha256:f1b3afc574a3db3b25c89161059d857bd4909a1269b0b3cb3c904677c8c4a3f7 \ | |
|
148 | --hash=sha256:f2388013e68e750eaa16ccbea62d4130180c26abb1d8e5d584b9baf69672b30f \ | |
|
162 | regex==2021.4.4 \ | |
|
163 | --hash=sha256:01afaf2ec48e196ba91b37451aa353cb7eda77efe518e481707e0515025f0cd5 \ | |
|
164 | --hash=sha256:11d773d75fa650cd36f68d7ca936e3c7afaae41b863b8c387a22aaa78d3c5c79 \ | |
|
165 | --hash=sha256:18c071c3eb09c30a264879f0d310d37fe5d3a3111662438889ae2eb6fc570c31 \ | |
|
166 | --hash=sha256:1e1c20e29358165242928c2de1482fb2cf4ea54a6a6dea2bd7a0e0d8ee321500 \ | |
|
167 | --hash=sha256:281d2fd05555079448537fe108d79eb031b403dac622621c78944c235f3fcf11 \ | |
|
168 | --hash=sha256:314d66636c494ed9c148a42731b3834496cc9a2c4251b1661e40936814542b14 \ | |
|
169 | --hash=sha256:32e65442138b7b76dd8173ffa2cf67356b7bc1768851dded39a7a13bf9223da3 \ | |
|
170 | --hash=sha256:339456e7d8c06dd36a22e451d58ef72cef293112b559010db3d054d5560ef439 \ | |
|
171 | --hash=sha256:3916d08be28a1149fb97f7728fca1f7c15d309a9f9682d89d79db75d5e52091c \ | |
|
172 | --hash=sha256:3a9cd17e6e5c7eb328517969e0cb0c3d31fd329298dd0c04af99ebf42e904f82 \ | |
|
173 | --hash=sha256:47bf5bf60cf04d72bf6055ae5927a0bd9016096bf3d742fa50d9bf9f45aa0711 \ | |
|
174 | --hash=sha256:4c46e22a0933dd783467cf32b3516299fb98cfebd895817d685130cc50cd1093 \ | |
|
175 | --hash=sha256:4c557a7b470908b1712fe27fb1ef20772b78079808c87d20a90d051660b1d69a \ | |
|
176 | --hash=sha256:52ba3d3f9b942c49d7e4bc105bb28551c44065f139a65062ab7912bef10c9afb \ | |
|
177 | --hash=sha256:563085e55b0d4fb8f746f6a335893bda5c2cef43b2f0258fe1020ab1dd874df8 \ | |
|
178 | --hash=sha256:598585c9f0af8374c28edd609eb291b5726d7cbce16be6a8b95aa074d252ee17 \ | |
|
179 | --hash=sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000 \ | |
|
180 | --hash=sha256:67bdb9702427ceddc6ef3dc382455e90f785af4c13d495f9626861763ee13f9d \ | |
|
181 | --hash=sha256:6d1b01031dedf2503631d0903cb563743f397ccaf6607a5e3b19a3d76fc10480 \ | |
|
182 | --hash=sha256:741a9647fcf2e45f3a1cf0e24f5e17febf3efe8d4ba1281dcc3aa0459ef424dc \ | |
|
183 | --hash=sha256:7c2a1af393fcc09e898beba5dd59196edaa3116191cc7257f9224beaed3e1aa0 \ | |
|
184 | --hash=sha256:7d9884d86dd4dd489e981d94a65cd30d6f07203d90e98f6f657f05170f6324c9 \ | |
|
185 | --hash=sha256:90f11ff637fe8798933fb29f5ae1148c978cccb0452005bf4c69e13db951e765 \ | |
|
186 | --hash=sha256:919859aa909429fb5aa9cf8807f6045592c85ef56fdd30a9a3747e513db2536e \ | |
|
187 | --hash=sha256:96fcd1888ab4d03adfc9303a7b3c0bd78c5412b2bfbe76db5b56d9eae004907a \ | |
|
188 | --hash=sha256:97f29f57d5b84e73fbaf99ab3e26134e6687348e95ef6b48cfd2c06807005a07 \ | |
|
189 | --hash=sha256:980d7be47c84979d9136328d882f67ec5e50008681d94ecc8afa8a65ed1f4a6f \ | |
|
190 | --hash=sha256:a91aa8619b23b79bcbeb37abe286f2f408d2f2d6f29a17237afda55bb54e7aac \ | |
|
191 | --hash=sha256:ade17eb5d643b7fead300a1641e9f45401c98eee23763e9ed66a43f92f20b4a7 \ | |
|
192 | --hash=sha256:b9c3db21af35e3b3c05764461b262d6f05bbca08a71a7849fd79d47ba7bc33ed \ | |
|
193 | --hash=sha256:bd28bc2e3a772acbb07787c6308e00d9626ff89e3bfcdebe87fa5afbfdedf968 \ | |
|
194 | --hash=sha256:bf5824bfac591ddb2c1f0a5f4ab72da28994548c708d2191e3b87dd207eb3ad7 \ | |
|
195 | --hash=sha256:c0502c0fadef0d23b128605d69b58edb2c681c25d44574fc673b0e52dce71ee2 \ | |
|
196 | --hash=sha256:c38c71df845e2aabb7fb0b920d11a1b5ac8526005e533a8920aea97efb8ec6a4 \ | |
|
197 | --hash=sha256:ce15b6d103daff8e9fee13cf7f0add05245a05d866e73926c358e871221eae87 \ | |
|
198 | --hash=sha256:d3029c340cfbb3ac0a71798100ccc13b97dddf373a4ae56b6a72cf70dfd53bc8 \ | |
|
199 | --hash=sha256:e512d8ef5ad7b898cdb2d8ee1cb09a8339e4f8be706d27eaa180c2f177248a10 \ | |
|
200 | --hash=sha256:e8e5b509d5c2ff12f8418006d5a90e9436766133b564db0abaec92fd27fcee29 \ | |
|
201 | --hash=sha256:ee54ff27bf0afaf4c3b3a62bcd016c12c3fdb4ec4f413391a90bd38bc3624605 \ | |
|
202 | --hash=sha256:fa4537fb4a98fe8fde99626e4681cc644bdcf2a795038533f9f711513a862ae6 \ | |
|
203 | --hash=sha256:fd45ff9293d9274c5008a2054ecef86a9bfe819a67c7be1afb65e69b405b3042 | |
|
149 | 204 | # via black |
|
150 |
six==1.1 |
|
|
151 | --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \ | |
|
152 | --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced \ | |
|
153 |
# via |
|
|
154 |
toml==0.10. |
|
|
155 | --hash=sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f \ | |
|
156 | --hash=sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88 \ | |
|
157 | # via black, pylint | |
|
158 | typed-ast==1.4.1 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \ | |
|
159 | --hash=sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355 \ | |
|
160 | --hash=sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919 \ | |
|
161 | --hash=sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa \ | |
|
162 | --hash=sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652 \ | |
|
163 | --hash=sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75 \ | |
|
164 | --hash=sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01 \ | |
|
165 | --hash=sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d \ | |
|
166 | --hash=sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1 \ | |
|
167 | --hash=sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907 \ | |
|
168 | --hash=sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c \ | |
|
169 | --hash=sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3 \ | |
|
170 | --hash=sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b \ | |
|
171 | --hash=sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614 \ | |
|
172 | --hash=sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb \ | |
|
173 | --hash=sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b \ | |
|
174 | --hash=sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41 \ | |
|
175 | --hash=sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6 \ | |
|
176 | --hash=sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34 \ | |
|
177 | --hash=sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe \ | |
|
178 | --hash=sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4 \ | |
|
179 | --hash=sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7 \ | |
|
180 | # via -r contrib/automation/linux-requirements.txt.in, astroid, black | |
|
181 | typing-extensions==3.7.4.3 \ | |
|
182 | --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \ | |
|
183 | --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \ | |
|
184 | --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f \ | |
|
205 | six==1.16.0 \ | |
|
206 | --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ | |
|
207 | --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 | |
|
208 | # via vcrpy | |
|
209 | toml==0.10.2 \ | |
|
210 | --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ | |
|
211 | --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f | |
|
212 | # via | |
|
213 | # black | |
|
214 | # pylint | |
|
215 | typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \ | |
|
216 | --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \ | |
|
217 | --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \ | |
|
218 | --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \ | |
|
219 | --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \ | |
|
220 | --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \ | |
|
221 | --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \ | |
|
222 | --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \ | |
|
223 | --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \ | |
|
224 | --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \ | |
|
225 | --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \ | |
|
226 | --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \ | |
|
227 | --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \ | |
|
228 | --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \ | |
|
229 | --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \ | |
|
230 | --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \ | |
|
231 | --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \ | |
|
232 | --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \ | |
|
233 | --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \ | |
|
234 | --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \ | |
|
235 | --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \ | |
|
236 | --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \ | |
|
237 | --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \ | |
|
238 | --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \ | |
|
239 | --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \ | |
|
240 | --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \ | |
|
241 | --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \ | |
|
242 | --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \ | |
|
243 | --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \ | |
|
244 | --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \ | |
|
245 | --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65 | |
|
246 | # via | |
|
247 | # -r contrib/automation/linux-requirements.txt.in | |
|
248 | # astroid | |
|
249 | # black | |
|
250 | typing-extensions==3.10.0.0 \ | |
|
251 | --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ | |
|
252 | --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \ | |
|
253 | --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 | |
|
185 | 254 | # via yarl |
|
186 |
vcrpy==4.1. |
|
|
187 | --hash=sha256:4138e79eb35981ad391406cbb7227bce7eba8bad788dcf1a89c2e4a8b740debe \ | |
|
188 | --hash=sha256:d833248442bbc560599add895c9ab0ef518676579e8dc72d8b0933bdb3880253 \ | |
|
255 | vcrpy==4.1.1 \ | |
|
256 | --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \ | |
|
257 | --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 | |
|
189 | 258 | # via -r contrib/automation/linux-requirements.txt.in |
|
190 | 259 | wrapt==1.12.1 \ |
|
191 |
--hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 |
|
|
192 | # via astroid, vcrpy | |
|
193 | yarl==1.6.0 \ | |
|
194 | --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \ | |
|
195 | --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \ | |
|
196 | --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \ | |
|
197 | --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \ | |
|
198 | --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \ | |
|
199 | --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \ | |
|
200 | --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \ | |
|
201 | --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \ | |
|
202 | --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \ | |
|
203 | --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \ | |
|
204 | --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \ | |
|
205 | --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \ | |
|
206 | --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \ | |
|
207 | --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \ | |
|
208 | --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \ | |
|
209 | --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \ | |
|
210 | --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a \ | |
|
260 | --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 | |
|
261 | # via | |
|
262 | # astroid | |
|
263 | # vcrpy | |
|
264 | yarl==1.6.3 \ | |
|
265 | --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ | |
|
266 | --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ | |
|
267 | --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ | |
|
268 | --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ | |
|
269 | --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ | |
|
270 | --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ | |
|
271 | --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ | |
|
272 | --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ | |
|
273 | --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ | |
|
274 | --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ | |
|
275 | --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ | |
|
276 | --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ | |
|
277 | --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ | |
|
278 | --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ | |
|
279 | --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ | |
|
280 | --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ | |
|
281 | --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ | |
|
282 | --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ | |
|
283 | --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ | |
|
284 | --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \ | |
|
285 | --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ | |
|
286 | --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ | |
|
287 | --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ | |
|
288 | --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ | |
|
289 | --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ | |
|
290 | --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ | |
|
291 | --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ | |
|
292 | --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ | |
|
293 | --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ | |
|
294 | --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ | |
|
295 | --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ | |
|
296 | --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ | |
|
297 | --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ | |
|
298 | --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ | |
|
299 | --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ | |
|
300 | --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ | |
|
301 | --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 | |
|
211 | 302 | # via vcrpy |
|
212 | 303 | |
|
213 | 304 | # WARNING: The following packages were not pinned, but pip requires them to be |
@@ -215,7 +215,6 b' utestpats = [' | |||
|
215 | 215 | "use regex test output patterns instead of sed", |
|
216 | 216 | ), |
|
217 | 217 | (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"), |
|
218 | (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"), | |
|
219 | 218 | ( |
|
220 | 219 | uprefix + r'.*\|\| echo.*(fail|error)', |
|
221 | 220 | "explicit exit code checks unnecessary", |
@@ -546,6 +545,22 b' commonpyfilters = [' | |||
|
546 | 545 | ), |
|
547 | 546 | ] |
|
548 | 547 | |
|
548 | # pattern only for mercurial and extensions | |
|
549 | core_py_pats = [ | |
|
550 | [ | |
|
551 | # Windows tend to get confused about capitalization of the drive letter | |
|
552 | # | |
|
553 | # see mercurial.windows.abspath for details | |
|
554 | ( | |
|
555 | r'os\.path\.abspath', | |
|
556 | "use util.abspath instead (windows)", | |
|
557 | r'#.*re-exports', | |
|
558 | ), | |
|
559 | ], | |
|
560 | # warnings | |
|
561 | [], | |
|
562 | ] | |
|
563 | ||
|
549 | 564 | # filters to convert normal *.py files |
|
550 | 565 | pyfilters = [] + commonpyfilters |
|
551 | 566 | |
@@ -701,6 +716,13 b' checks = [' | |||
|
701 | 716 | pyfilters, |
|
702 | 717 | py3pats, |
|
703 | 718 | ), |
|
719 | ( | |
|
720 | 'core files', | |
|
721 | r'.*(hgext|mercurial)/(?!demandimport|policy|pycompat).*\.py', | |
|
722 | '', | |
|
723 | pyfilters, | |
|
724 | core_py_pats, | |
|
725 | ), | |
|
704 | 726 | ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats), |
|
705 | 727 | ('c', r'.*\.[ch]$', '', cfilters, cpats), |
|
706 | 728 | ('unified test', r'.*\.t$', '', utestfilters, utestpats), |
@@ -240,13 +240,8 b' static void execcmdserver(const struct c' | |||
|
240 | 240 | const char *hgcmd = gethgcmd(); |
|
241 | 241 | |
|
242 | 242 | const char *baseargv[] = { |
|
243 | hgcmd, | |
|
244 | "serve", | |
|
245 | "--cmdserver", | |
|
246 | "chgunix", | |
|
247 | "--address", | |
|
248 | opts->initsockname, | |
|
249 | "--daemon-postexec", | |
|
243 | hgcmd, "serve", "--no-profile", "--cmdserver", | |
|
244 | "chgunix", "--address", opts->initsockname, "--daemon-postexec", | |
|
250 | 245 | "chdir:/", |
|
251 | 246 | }; |
|
252 | 247 | size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]); |
@@ -11,6 +11,7 b' from __future__ import absolute_import' | |||
|
11 | 11 | from mercurial import ( |
|
12 | 12 | dirstate, |
|
13 | 13 | extensions, |
|
14 | pycompat, | |
|
14 | 15 | ) |
|
15 | 16 | |
|
16 | 17 | |
@@ -18,7 +19,7 b' def nonnormalentries(dmap):' | |||
|
18 | 19 | """Compute nonnormal entries from dirstate's dmap""" |
|
19 | 20 | res = set() |
|
20 | 21 | for f, e in dmap.iteritems(): |
|
21 |
if e |
|
|
22 | if e.state != b'n' or e.mtime == -1: | |
|
22 | 23 | res.add(f) |
|
23 | 24 | return res |
|
24 | 25 | |
@@ -27,18 +28,21 b' def checkconsistency(ui, orig, dmap, _no' | |||
|
27 | 28 | """Compute nonnormalset from dmap, check that it matches _nonnormalset""" |
|
28 | 29 | nonnormalcomputedmap = nonnormalentries(dmap) |
|
29 | 30 | if _nonnormalset != nonnormalcomputedmap: |
|
30 | ui.develwarn(b"%s call to %s\n" % (label, orig), config=b'dirstate') | |
|
31 | b_orig = pycompat.sysbytes(repr(orig)) | |
|
32 | ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate') | |
|
31 | 33 | ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate') |
|
32 | ui.develwarn(b"[nonnormalset] %s\n" % _nonnormalset, config=b'dirstate') | |
|
33 |
ui.develwarn(b"[ |
|
|
34 | b_nonnormal = pycompat.sysbytes(repr(_nonnormalset)) | |
|
35 | ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate') | |
|
36 | b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap)) | |
|
37 | ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate') | |
|
34 | 38 | |
|
35 | 39 | |
|
36 | def _checkdirstate(orig, self, arg): | |
|
40 | def _checkdirstate(orig, self, *args, **kwargs): | |
|
37 | 41 | """Check nonnormal set consistency before and after the call to orig""" |
|
38 | 42 | checkconsistency( |
|
39 | 43 | self._ui, orig, self._map, self._map.nonnormalset, b"before" |
|
40 | 44 | ) |
|
41 | r = orig(self, arg) | |
|
45 | r = orig(self, *args, **kwargs) | |
|
42 | 46 | checkconsistency( |
|
43 | 47 | self._ui, orig, self._map, self._map.nonnormalset, b"after" |
|
44 | 48 | ) |
@@ -13,6 +13,10 b' from mercurial import (' | |||
|
13 | 13 | ) |
|
14 | 14 | from mercurial.utils import procutil |
|
15 | 15 | |
|
16 | from mercurial.revlogutils import ( | |
|
17 | constants as revlog_constants, | |
|
18 | ) | |
|
19 | ||
|
16 | 20 | for fp in (sys.stdin, sys.stdout, sys.stderr): |
|
17 | 21 | procutil.setbinary(fp) |
|
18 | 22 | |
@@ -32,7 +36,16 b" def printb(data, end=b'\\n'):" | |||
|
32 | 36 | |
|
33 | 37 | |
|
34 | 38 | for f in sys.argv[1:]: |
|
35 |
|
|
|
39 | localf = encoding.strtolocal(f) | |
|
40 | if not localf.endswith(b'.i'): | |
|
41 | print("file:", f, file=sys.stderr) | |
|
42 | print(" invalid filename", file=sys.stderr) | |
|
43 | ||
|
44 | r = revlog.revlog( | |
|
45 | binopen, | |
|
46 | target=(revlog_constants.KIND_OTHER, b'dump-revlog'), | |
|
47 | radix=localf[:-2], | |
|
48 | ) | |
|
36 | 49 | print("file:", f) |
|
37 | 50 | for i in r: |
|
38 | 51 | n = r.node(i) |
@@ -1,10 +1,15 b'' | |||
|
1 | 1 | from __future__ import absolute_import, print_function |
|
2 | 2 | |
|
3 | 3 | import argparse |
|
4 | import os | |
|
4 | 5 | import struct |
|
5 | 6 | import sys |
|
6 | 7 | import zipfile |
|
7 | 8 | |
|
9 | # Add ../.. to sys.path as an absolute path so we can import hg modules | |
|
10 | hgloc = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) | |
|
11 | sys.path[0:0] = [hgloc] | |
|
12 | ||
|
8 | 13 | from mercurial import ( |
|
9 | 14 | hg, |
|
10 | 15 | ui as uimod, |
@@ -139,3 +139,36 b' check-pytype-py3:' | |||
|
139 | 139 | RUNTEST_ARGS: " --allow-slow-tests tests/test-check-pytype.t" |
|
140 | 140 | PYTHON: python3 |
|
141 | 141 | TEST_HGMODULEPOLICY: "c" |
|
142 | ||
|
143 | # `sh.exe --login` sets a couple of extra environment variables that are defined | |
|
144 | # in the MinGW shell, but switches CWD to /home/$username. The previous value | |
|
145 | # is stored in OLDPWD. Of the added variables, MSYSTEM is crucial to running | |
|
146 | # run-tests.py- it is needed to make run-tests.py generate a `python3` script | |
|
147 | # that satisfies the various shebang lines and delegates to `py -3`. | |
|
148 | .window_runtests_template: &windows_runtests | |
|
149 | stage: tests | |
|
150 | before_script: | |
|
151 | # Temporary until this is adjusted in the environment | |
|
152 | - $Env:TEMP="C:/Temp" | |
|
153 | - $Env:TMP="C:/Temp" | |
|
154 | # TODO: find/install cvs, bzr, perforce, gpg, sqlite3 | |
|
155 | ||
|
156 | script: | |
|
157 | - echo "Entering script section" | |
|
158 | - echo "python used, $Env:PYTHON" | |
|
159 | - Invoke-Expression "$Env:PYTHON -V" | |
|
160 | - Invoke-Expression "$Env:PYTHON -m black --version" | |
|
161 | - echo "$Env:RUNTEST_ARGS" | |
|
162 | ||
|
163 | - C:/MinGW/msys/1.0/bin/sh.exe --login -c 'cd "$OLDPWD" && HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" $PYTHON tests/run-tests.py --color=always $RUNTEST_ARGS' | |
|
164 | ||
|
165 | windows-py3: | |
|
166 | <<: *windows_runtests | |
|
167 | when: manual | |
|
168 | tags: | |
|
169 | - windows | |
|
170 | timeout: 2h | |
|
171 | variables: | |
|
172 | TEST_HGMODULEPOLICY: "c" | |
|
173 | RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt" | |
|
174 | PYTHON: py -3 |
@@ -31,6 +31,7 b' command="hg-ssh --read-only repos/*"' | |||
|
31 | 31 | from __future__ import absolute_import |
|
32 | 32 | |
|
33 | 33 | import os |
|
34 | import re | |
|
34 | 35 | import shlex |
|
35 | 36 | import sys |
|
36 | 37 | |
@@ -51,6 +52,12 b' def main():' | |||
|
51 | 52 | dispatch.initstdio() |
|
52 | 53 | |
|
53 | 54 | cwd = os.getcwd() |
|
55 | if os.name == 'nt': | |
|
56 | # os.getcwd() is inconsistent on the capitalization of the drive | |
|
57 | # letter, so adjust it. see https://bugs.python.org/issue40368 | |
|
58 | if re.match('^[a-z]:', cwd): | |
|
59 | cwd = cwd[0:1].upper() + cwd[1:] | |
|
60 | ||
|
54 | 61 | readonly = False |
|
55 | 62 | args = sys.argv[1:] |
|
56 | 63 | while len(args): |
@@ -23,7 +23,7 b' import testparseutil' | |||
|
23 | 23 | # Whitelist of modules that symbols can be directly imported from. |
|
24 | 24 | allowsymbolimports = ( |
|
25 | 25 | '__future__', |
|
26 |
'b |
|
|
26 | 'breezy', | |
|
27 | 27 | 'hgclient', |
|
28 | 28 | 'mercurial', |
|
29 | 29 | 'mercurial.hgweb.common', |
@@ -32,15 +32,15 b'' | |||
|
32 | 32 | $PYTHON37_X64_URL = "https://www.python.org/ftp/python/3.7.9/python-3.7.9-amd64.exe" |
|
33 | 33 | $PYTHON37_x64_SHA256 = "e69ed52afb5a722e5c56f6c21d594e85c17cb29f12f18bb69751cf1714e0f987" |
|
34 | 34 | |
|
35 |
$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8. |
|
|
36 | $PYTHON38_x86_SHA256 = "287d5df01ff22ff09e6a487ae018603ee19eade71d462ec703850c96f1d5e8a0" | |
|
37 |
$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8. |
|
|
38 | $PYTHON38_x64_SHA256 = "328a257f189cb500606bb26ab0fbdd298ed0e05d8c36540a322a1744f489a0a0" | |
|
35 | $PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10.exe" | |
|
36 | $PYTHON38_x86_SHA256 = "ad07633a1f0cd795f3bf9da33729f662281df196b4567fa795829f3bb38a30ac" | |
|
37 | $PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe" | |
|
38 | $PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a" | |
|
39 | 39 | |
|
40 |
$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9. |
|
|
41 | $PYTHON39_x86_SHA256 = "a4c65917f4225d1543959342f0615c813a4e9e7ff1137c4394ff6a5290ac1913" | |
|
42 |
$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9. |
|
|
43 | $PYTHON39_x64_SHA256 = "fd2e2c6612d43bb6b213b72fc53f07d73d99059fa72c96e44bde12e7815073ae" | |
|
40 | $PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5.exe" | |
|
41 | $PYTHON39_x86_SHA256 = "505129081a839b699a6ab9064b441ad922ef03767b5dd4241fd0c2166baf64de" | |
|
42 | $PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5-amd64.exe" | |
|
43 | $PYTHON39_x64_SHA256 = "84d5243088ba00c11e51905c704dbe041040dfff044f4e1ce5476844ee2e6eac" | |
|
44 | 44 | |
|
45 | 45 | # PIP 19.2.3. |
|
46 | 46 | $PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py" |
@@ -62,6 +62,9 b'' | |||
|
62 | 62 | $RUSTUP_INIT_URL = "https://static.rust-lang.org/rustup/archive/1.21.1/x86_64-pc-windows-gnu/rustup-init.exe" |
|
63 | 63 | $RUSTUP_INIT_SHA256 = "d17df34ba974b9b19cf5c75883a95475aa22ddc364591d75d174090d55711c72" |
|
64 | 64 | |
|
65 | $PYOXIDIZER_URL = "https://github.com/indygreg/PyOxidizer/releases/download/pyoxidizer%2F0.16.0/PyOxidizer-0.16.0-x64.msi" | |
|
66 | $PYOXIDIZER_SHA256 = "2a9c58add9161c272c418d5e6dec13fbe648f624b5d26770190357e4d664f24e" | |
|
67 | ||
|
65 | 68 | # Writing progress slows down downloads substantially. So disable it. |
|
66 | 69 | $progressPreference = 'silentlyContinue' |
|
67 | 70 | |
@@ -121,11 +124,8 b' function Install-Rust($prefix) {' | |||
|
121 | 124 | |
|
122 | 125 | Invoke-Process "${prefix}\assets\rustup-init.exe" "-y --default-host x86_64-pc-windows-msvc" |
|
123 | 126 | Invoke-Process "${prefix}\cargo\bin\rustup.exe" "target add i686-pc-windows-msvc" |
|
124 |
Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1. |
|
|
127 | Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.52.0" | |
|
125 | 128 | Invoke-Process "${prefix}\cargo\bin\rustup.exe" "component add clippy" |
|
126 | ||
|
127 | # Install PyOxidizer for packaging. | |
|
128 | Invoke-Process "${prefix}\cargo\bin\cargo.exe" "install --version 0.10.3 pyoxidizer" | |
|
129 | 129 | } |
|
130 | 130 | |
|
131 | 131 | function Install-Dependencies($prefix) { |
@@ -151,6 +151,7 b' function Install-Dependencies($prefix) {' | |||
|
151 | 151 | Secure-Download $MINGW_BIN_URL ${prefix}\assets\mingw-get-bin.zip $MINGW_BIN_SHA256 |
|
152 | 152 | Secure-Download $MERCURIAL_WHEEL_URL ${prefix}\assets\${MERCURIAL_WHEEL_FILENAME} $MERCURIAL_WHEEL_SHA256 |
|
153 | 153 | Secure-Download $RUSTUP_INIT_URL ${prefix}\assets\rustup-init.exe $RUSTUP_INIT_SHA256 |
|
154 | Secure-Download $PYOXIDIZER_URL ${prefix}\assets\PyOxidizer.msi $PYOXIDIZER_SHA256 | |
|
154 | 155 | |
|
155 | 156 | Write-Output "installing Python 2.7 32-bit" |
|
156 | 157 | Invoke-Process msiexec.exe "/i ${prefix}\assets\python27-x86.msi /l* ${prefix}\assets\python27-x86.log /q TARGETDIR=${prefix}\python27-x86 ALLUSERS=" |
@@ -172,6 +173,9 b' function Install-Dependencies($prefix) {' | |||
|
172 | 173 | Write-Output "installing Visual Studio 2017 Build Tools and SDKs" |
|
173 | 174 | Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140" |
|
174 | 175 | |
|
176 | Write-Output "installing PyOxidizer" | |
|
177 | Invoke-Process msiexec.exe "/i ${prefix}\assets\PyOxidizer.msi /l* ${prefix}\assets\PyOxidizer.log /quiet" | |
|
178 | ||
|
175 | 179 | Install-Rust ${prefix} |
|
176 | 180 | |
|
177 | 181 | Write-Output "installing Visual C++ 9.0 for Python 2.7" |
@@ -64,6 +64,7 b' def build_wix(' | |||
|
64 | 64 | extra_packages_script=None, |
|
65 | 65 | extra_wxs=None, |
|
66 | 66 | extra_features=None, |
|
67 | extra_pyoxidizer_vars=None, | |
|
67 | 68 | ): |
|
68 | 69 | if not pyoxidizer_target and not python: |
|
69 | 70 | raise Exception("--python required unless building with PyOxidizer") |
@@ -105,7 +106,7 b' def build_wix(' | |||
|
105 | 106 | "timestamp_url": sign_timestamp_url, |
|
106 | 107 | } |
|
107 | 108 | |
|
108 | fn(**kwargs) | |
|
109 | fn(**kwargs, extra_pyoxidizer_vars=extra_pyoxidizer_vars) | |
|
109 | 110 | |
|
110 | 111 | |
|
111 | 112 | def get_parser(): |
@@ -168,6 +169,12 b' def get_parser():' | |||
|
168 | 169 | "in the installer from the extra wxs files" |
|
169 | 170 | ), |
|
170 | 171 | ) |
|
172 | ||
|
173 | sp.add_argument( | |
|
174 | "--extra-pyoxidizer-vars", | |
|
175 | help="json map of extra variables to pass to pyoxidizer", | |
|
176 | ) | |
|
177 | ||
|
171 | 178 | sp.set_defaults(func=build_wix) |
|
172 | 179 | |
|
173 | 180 | return parser |
@@ -18,7 +18,7 b' from .py2exe import (' | |||
|
18 | 18 | build_py2exe, |
|
19 | 19 | stage_install, |
|
20 | 20 | ) |
|
21 |
from .pyoxidizer import |
|
|
21 | from .pyoxidizer import create_pyoxidizer_install_layout | |
|
22 | 22 | from .util import ( |
|
23 | 23 | find_legacy_vc_runtime_files, |
|
24 | 24 | normalize_windows_version, |
@@ -136,7 +136,9 b' def build_with_pyoxidizer(' | |||
|
136 | 136 | staging_dir = inno_build_dir / "stage" |
|
137 | 137 | |
|
138 | 138 | inno_build_dir.mkdir(parents=True, exist_ok=True) |
|
139 | run_pyoxidizer(source_dir, inno_build_dir, staging_dir, target_triple) | |
|
139 | create_pyoxidizer_install_layout( | |
|
140 | source_dir, inno_build_dir, staging_dir, target_triple | |
|
141 | ) | |
|
140 | 142 | |
|
141 | 143 | process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir) |
|
142 | 144 |
@@ -12,6 +12,7 b' import pathlib' | |||
|
12 | 12 | import shutil |
|
13 | 13 | import subprocess |
|
14 | 14 | import sys |
|
15 | import typing | |
|
15 | 16 | |
|
16 | 17 | from .downloads import download_entry |
|
17 | 18 | from .util import ( |
@@ -53,17 +54,36 b' STAGING_EXCLUDES_WINDOWS = [' | |||
|
53 | 54 | ] |
|
54 | 55 | |
|
55 | 56 | |
|
57 | def build_docs_html(source_dir: pathlib.Path): | |
|
58 | """Ensures HTML documentation is built. | |
|
59 | ||
|
60 | This will fail if docutils isn't available. | |
|
61 | ||
|
62 | (The HTML docs aren't built as part of `pip install` so we need to build them | |
|
63 | out of band.) | |
|
64 | """ | |
|
65 | subprocess.run( | |
|
66 | [sys.executable, str(source_dir / "setup.py"), "build_doc", "--html"], | |
|
67 | cwd=str(source_dir), | |
|
68 | check=True, | |
|
69 | ) | |
|
70 | ||
|
71 | ||
|
56 | 72 | def run_pyoxidizer( |
|
57 | 73 | source_dir: pathlib.Path, |
|
58 | 74 | build_dir: pathlib.Path, |
|
59 | out_dir: pathlib.Path, | |
|
60 | 75 | target_triple: str, |
|
61 | ): | |
|
62 | """Build Mercurial with PyOxidizer and copy additional files into place. | |
|
76 | build_vars: typing.Optional[typing.Dict[str, str]] = None, | |
|
77 | target: typing.Optional[str] = None, | |
|
78 | ) -> pathlib.Path: | |
|
79 | """Run `pyoxidizer` in an environment with access to build dependencies. | |
|
63 | 80 | |
|
64 | After successful completion, ``out_dir`` contains files constituting a | |
|
65 | Mercurial install. | |
|
81 | Returns the output directory that pyoxidizer would have used for build | |
|
82 | artifacts. Actual build artifacts are likely in a sub-directory with the | |
|
83 | name of the pyoxidizer build target that was built. | |
|
66 | 84 | """ |
|
85 | build_vars = build_vars or {} | |
|
86 | ||
|
67 | 87 | # We need to make gettext binaries available for compiling i18n files. |
|
68 | 88 | gettext_pkg, gettext_entry = download_entry('gettext', build_dir) |
|
69 | 89 | gettext_dep_pkg = download_entry('gettext-dep', build_dir)[0] |
@@ -91,8 +111,31 b' def run_pyoxidizer(' | |||
|
91 | 111 | target_triple, |
|
92 | 112 | ] |
|
93 | 113 | |
|
114 | for k, v in sorted(build_vars.items()): | |
|
115 | args.extend(["--var", k, v]) | |
|
116 | ||
|
117 | if target: | |
|
118 | args.append(target) | |
|
119 | ||
|
94 | 120 | subprocess.run(args, env=env, check=True) |
|
95 | 121 | |
|
122 | return source_dir / "build" / "pyoxidizer" / target_triple / "release" | |
|
123 | ||
|
124 | ||
|
125 | def create_pyoxidizer_install_layout( | |
|
126 | source_dir: pathlib.Path, | |
|
127 | build_dir: pathlib.Path, | |
|
128 | out_dir: pathlib.Path, | |
|
129 | target_triple: str, | |
|
130 | ): | |
|
131 | """Build Mercurial with PyOxidizer and copy additional files into place. | |
|
132 | ||
|
133 | After successful completion, ``out_dir`` contains files constituting a | |
|
134 | Mercurial install. | |
|
135 | """ | |
|
136 | ||
|
137 | run_pyoxidizer(source_dir, build_dir, target_triple) | |
|
138 | ||
|
96 | 139 | if "windows" in target_triple: |
|
97 | 140 | target = "app_windows" |
|
98 | 141 | else: |
@@ -113,14 +156,7 b' def run_pyoxidizer(' | |||
|
113 | 156 | # is taught to use the importlib APIs for reading resources. |
|
114 | 157 | process_install_rules(STAGING_RULES_APP, build_dir, out_dir) |
|
115 | 158 | |
|
116 | # We also need to run setup.py build_doc to produce html files, | |
|
117 | # as they aren't built as part of ``pip install``. | |
|
118 | # This will fail if docutils isn't installed. | |
|
119 | subprocess.run( | |
|
120 | [sys.executable, str(source_dir / "setup.py"), "build_doc", "--html"], | |
|
121 | cwd=str(source_dir), | |
|
122 | check=True, | |
|
123 | ) | |
|
159 | build_docs_html(source_dir) | |
|
124 | 160 | |
|
125 | 161 | if "windows" in target_triple: |
|
126 | 162 | process_install_rules(STAGING_RULES_WINDOWS, source_dir, out_dir) |
@@ -8,6 +8,7 b'' | |||
|
8 | 8 | # no-check-code because Python 3 native. |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | import json | |
|
11 | 12 | import os |
|
12 | 13 | import pathlib |
|
13 | 14 | import re |
@@ -22,7 +23,11 b' from .py2exe import (' | |||
|
22 | 23 | build_py2exe, |
|
23 | 24 | stage_install, |
|
24 | 25 | ) |
|
25 |
from .pyoxidizer import |
|
|
26 | from .pyoxidizer import ( | |
|
27 | build_docs_html, | |
|
28 | create_pyoxidizer_install_layout, | |
|
29 | run_pyoxidizer, | |
|
30 | ) | |
|
26 | 31 | from .util import ( |
|
27 | 32 | extract_zip_to_directory, |
|
28 | 33 | normalize_windows_version, |
@@ -382,40 +387,74 b' def build_installer_pyoxidizer(' | |||
|
382 | 387 | extra_wxs: typing.Optional[typing.Dict[str, str]] = None, |
|
383 | 388 | extra_features: typing.Optional[typing.List[str]] = None, |
|
384 | 389 | signing_info: typing.Optional[typing.Dict[str, str]] = None, |
|
390 | extra_pyoxidizer_vars=None, | |
|
385 | 391 | ): |
|
386 | 392 | """Build a WiX MSI installer using PyOxidizer.""" |
|
387 | 393 | hg_build_dir = source_dir / "build" |
|
388 | 394 | build_dir = hg_build_dir / ("wix-%s" % target_triple) |
|
389 | staging_dir = build_dir / "stage" | |
|
390 | ||
|
391 | arch = "x64" if "x86_64" in target_triple else "x86" | |
|
392 | 395 | |
|
393 | 396 | build_dir.mkdir(parents=True, exist_ok=True) |
|
394 | run_pyoxidizer(source_dir, build_dir, staging_dir, target_triple) | |
|
397 | ||
|
398 | # Need to ensure docs HTML is built because this isn't done as part of | |
|
399 | # `pip install Mercurial`. | |
|
400 | build_docs_html(source_dir) | |
|
401 | ||
|
402 | build_vars = {} | |
|
395 | 403 | |
|
396 | # We also install some extra files. | |
|
397 | process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir) | |
|
404 | if msi_name: | |
|
405 | build_vars["MSI_NAME"] = msi_name | |
|
406 | ||
|
407 | if version: | |
|
408 | build_vars["VERSION"] = version | |
|
409 | ||
|
410 | if extra_features: | |
|
411 | build_vars["EXTRA_MSI_FEATURES"] = ";".join(extra_features) | |
|
398 | 412 | |
|
399 | # And remove some files we don't want. | |
|
400 | for f in STAGING_REMOVE_FILES: | |
|
401 | p = staging_dir / f | |
|
402 | if p.exists(): | |
|
403 | print('removing %s' % p) | |
|
404 | p.unlink() | |
|
413 | if signing_info: | |
|
414 | if signing_info["cert_path"]: | |
|
415 | build_vars["SIGNING_PFX_PATH"] = signing_info["cert_path"] | |
|
416 | if signing_info["cert_password"]: | |
|
417 | build_vars["SIGNING_PFX_PASSWORD"] = signing_info["cert_password"] | |
|
418 | if signing_info["subject_name"]: | |
|
419 | build_vars["SIGNING_SUBJECT_NAME"] = signing_info["subject_name"] | |
|
420 | if signing_info["timestamp_url"]: | |
|
421 | build_vars["TIME_STAMP_SERVER_URL"] = signing_info["timestamp_url"] | |
|
405 | 422 | |
|
406 | return run_wix_packaging( | |
|
423 | if extra_pyoxidizer_vars: | |
|
424 | build_vars.update(json.loads(extra_pyoxidizer_vars)) | |
|
425 | ||
|
426 | if extra_wxs: | |
|
427 | raise Exception( | |
|
428 | "support for extra .wxs files has been temporarily dropped" | |
|
429 | ) | |
|
430 | ||
|
431 | out_dir = run_pyoxidizer( | |
|
407 | 432 | source_dir, |
|
408 | 433 | build_dir, |
|
409 |
|
|
|
410 | arch, | |
|
411 |
|
|
|
412 | python2=False, | |
|
413 | msi_name=msi_name, | |
|
414 | extra_wxs=extra_wxs, | |
|
415 | extra_features=extra_features, | |
|
416 | signing_info=signing_info, | |
|
434 | target_triple, | |
|
435 | build_vars=build_vars, | |
|
436 | target="msi", | |
|
417 | 437 | ) |
|
418 | 438 | |
|
439 | msi_dir = out_dir / "msi" | |
|
440 | msi_files = [f for f in os.listdir(msi_dir) if f.endswith(".msi")] | |
|
441 | ||
|
442 | if len(msi_files) != 1: | |
|
443 | raise Exception("expected exactly 1 .msi file; got %d" % len(msi_files)) | |
|
444 | ||
|
445 | msi_filename = msi_files[0] | |
|
446 | ||
|
447 | msi_path = msi_dir / msi_filename | |
|
448 | dist_path = source_dir / "dist" / msi_filename | |
|
449 | ||
|
450 | dist_path.parent.mkdir(parents=True, exist_ok=True) | |
|
451 | ||
|
452 | shutil.copyfile(msi_path, dist_path) | |
|
453 | ||
|
454 | return { | |
|
455 | "msi_path": dist_path, | |
|
456 | } | |
|
457 | ||
|
419 | 458 | |
|
420 | 459 | def run_wix_packaging( |
|
421 | 460 | source_dir: pathlib.Path, |
@@ -135,9 +135,13 b'' | |||
|
135 | 135 | <UIRef Id="WixUI_FeatureTree" /> |
|
136 | 136 | <UIRef Id="WixUI_ErrorProgressText" /> |
|
137 | 137 | |
|
138 | <?ifdef PyOxidizer?> | |
|
139 | <WixVariable Id="WixUILicenseRtf" Value="COPYING.rtf" /> | |
|
140 | <Icon Id="hgIcon.ico" SourceFile="mercurial.ico" /> | |
|
141 | <?else?> | |
|
138 | 142 | <WixVariable Id="WixUILicenseRtf" Value="contrib\packaging\wix\COPYING.rtf" /> |
|
139 | ||
|
140 | 143 | <Icon Id="hgIcon.ico" SourceFile="contrib/win32/mercurial.ico" /> |
|
144 | <?endif?> | |
|
141 | 145 | |
|
142 | 146 | <Upgrade Id='$(var.ProductUpgradeCode)'> |
|
143 | 147 | <UpgradeVersion |
@@ -66,6 +66,8 b' import sys' | |||
|
66 | 66 | import tempfile |
|
67 | 67 | import threading |
|
68 | 68 | import time |
|
69 | ||
|
70 | import mercurial.revlog | |
|
69 | 71 | from mercurial import ( |
|
70 | 72 | changegroup, |
|
71 | 73 | cmdutil, |
@@ -76,7 +78,6 b' from mercurial import (' | |||
|
76 | 78 | hg, |
|
77 | 79 | mdiff, |
|
78 | 80 | merge, |
|
79 | revlog, | |
|
80 | 81 | util, |
|
81 | 82 | ) |
|
82 | 83 | |
@@ -119,6 +120,21 b' try:' | |||
|
119 | 120 | except ImportError: |
|
120 | 121 | profiling = None |
|
121 | 122 | |
|
123 | try: | |
|
124 | from mercurial.revlogutils import constants as revlog_constants | |
|
125 | ||
|
126 | perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf') | |
|
127 | ||
|
128 | def revlog(opener, *args, **kwargs): | |
|
129 | return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs) | |
|
130 | ||
|
131 | ||
|
132 | except (ImportError, AttributeError): | |
|
133 | perf_rl_kind = None | |
|
134 | ||
|
135 | def revlog(opener, *args, **kwargs): | |
|
136 | return mercurial.revlog.revlog(opener, *args, **kwargs) | |
|
137 | ||
|
122 | 138 | |
|
123 | 139 | def identity(a): |
|
124 | 140 | return a |
@@ -1131,7 +1147,10 b' def perfdirs(ui, repo, **opts):' | |||
|
1131 | 1147 | |
|
1132 | 1148 | def d(): |
|
1133 | 1149 | dirstate.hasdir(b'a') |
|
1134 | del dirstate._map._dirs | |
|
1150 | try: | |
|
1151 | del dirstate._map._dirs | |
|
1152 | except AttributeError: | |
|
1153 | pass | |
|
1135 | 1154 | |
|
1136 | 1155 | timer(d) |
|
1137 | 1156 | fm.end() |
@@ -1209,7 +1228,10 b' def perfdirstatedirs(ui, repo, **opts):' | |||
|
1209 | 1228 | repo.dirstate.hasdir(b"a") |
|
1210 | 1229 | |
|
1211 | 1230 | def setup(): |
|
1212 | del repo.dirstate._map._dirs | |
|
1231 | try: | |
|
1232 | del repo.dirstate._map._dirs | |
|
1233 | except AttributeError: | |
|
1234 | pass | |
|
1213 | 1235 | |
|
1214 | 1236 | def d(): |
|
1215 | 1237 | repo.dirstate.hasdir(b"a") |
@@ -1252,7 +1274,10 b' def perfdirfoldmap(ui, repo, **opts):' | |||
|
1252 | 1274 | |
|
1253 | 1275 | def setup(): |
|
1254 | 1276 | del dirstate._map.dirfoldmap |
|
1255 | del dirstate._map._dirs | |
|
1277 | try: | |
|
1278 | del dirstate._map._dirs | |
|
1279 | except AttributeError: | |
|
1280 | pass | |
|
1256 | 1281 | |
|
1257 | 1282 | def d(): |
|
1258 | 1283 | dirstate._map.dirfoldmap.get(b'a') |
@@ -1809,7 +1834,11 b' def perfnodelookup(ui, repo, rev, **opts' | |||
|
1809 | 1834 | |
|
1810 | 1835 | mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg |
|
1811 | 1836 | n = scmutil.revsingle(repo, rev).node() |
|
1812 | cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i") | |
|
1837 | ||
|
1838 | try: | |
|
1839 | cl = revlog(getsvfs(repo), radix=b"00changelog") | |
|
1840 | except TypeError: | |
|
1841 | cl = revlog(getsvfs(repo), indexfile=b"00changelog.i") | |
|
1813 | 1842 | |
|
1814 | 1843 | def d(): |
|
1815 | 1844 | cl.rev(n) |
@@ -2592,17 +2621,25 b' def perfrevlogindex(ui, repo, file_=None' | |||
|
2592 | 2621 | rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts) |
|
2593 | 2622 | |
|
2594 | 2623 | opener = getattr(rl, 'opener') # trick linter |
|
2595 | indexfile = rl.indexfile | |
|
2624 | # compat with hg <= 5.8 | |
|
2625 | radix = getattr(rl, 'radix', None) | |
|
2626 | indexfile = getattr(rl, '_indexfile', None) | |
|
2627 | if indexfile is None: | |
|
2628 | # compatibility with <= hg-5.8 | |
|
2629 | indexfile = getattr(rl, 'indexfile') | |
|
2596 | 2630 | data = opener.read(indexfile) |
|
2597 | 2631 | |
|
2598 | 2632 | header = struct.unpack(b'>I', data[0:4])[0] |
|
2599 | 2633 | version = header & 0xFFFF |
|
2600 | 2634 | if version == 1: |
|
2601 | revlogio = revlog.revlogio() | |
|
2602 | 2635 | inline = header & (1 << 16) |
|
2603 | 2636 | else: |
|
2604 | 2637 | raise error.Abort(b'unsupported revlog version: %d' % version) |
|
2605 | 2638 | |
|
2639 | parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None) | |
|
2640 | if parse_index_v1 is None: | |
|
2641 | parse_index_v1 = mercurial.revlog.revlogio().parseindex | |
|
2642 | ||
|
2606 | 2643 | rllen = len(rl) |
|
2607 | 2644 | |
|
2608 | 2645 | node0 = rl.node(0) |
@@ -2617,33 +2654,35 b' def perfrevlogindex(ui, repo, file_=None' | |||
|
2617 | 2654 | allnodesrev = list(reversed(allnodes)) |
|
2618 | 2655 | |
|
2619 | 2656 | def constructor(): |
|
2620 | revlog.revlog(opener, indexfile) | |
|
2657 | if radix is not None: | |
|
2658 | revlog(opener, radix=radix) | |
|
2659 | else: | |
|
2660 | # hg <= 5.8 | |
|
2661 | revlog(opener, indexfile=indexfile) | |
|
2621 | 2662 | |
|
2622 | 2663 | def read(): |
|
2623 | 2664 | with opener(indexfile) as fh: |
|
2624 | 2665 | fh.read() |
|
2625 | 2666 | |
|
2626 | 2667 | def parseindex(): |
|
2627 |
|
|
|
2668 | parse_index_v1(data, inline) | |
|
2628 | 2669 | |
|
2629 | 2670 | def getentry(revornode): |
|
2630 |
index = |
|
|
2671 | index = parse_index_v1(data, inline)[0] | |
|
2631 | 2672 | index[revornode] |
|
2632 | 2673 | |
|
2633 | 2674 | def getentries(revs, count=1): |
|
2634 |
index = |
|
|
2675 | index = parse_index_v1(data, inline)[0] | |
|
2635 | 2676 | |
|
2636 | 2677 | for i in range(count): |
|
2637 | 2678 | for rev in revs: |
|
2638 | 2679 | index[rev] |
|
2639 | 2680 | |
|
2640 | 2681 | def resolvenode(node): |
|
2641 |
index = |
|
|
2682 | index = parse_index_v1(data, inline)[0] | |
|
2642 | 2683 | rev = getattr(index, 'rev', None) |
|
2643 | 2684 | if rev is None: |
|
2644 | nodemap = getattr( | |
|
2645 | revlogio.parseindex(data, inline)[0], 'nodemap', None | |
|
2646 | ) | |
|
2685 | nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None) | |
|
2647 | 2686 | # This only works for the C code. |
|
2648 | 2687 | if nodemap is None: |
|
2649 | 2688 | return |
@@ -2655,12 +2694,10 b' def perfrevlogindex(ui, repo, file_=None' | |||
|
2655 | 2694 | pass |
|
2656 | 2695 | |
|
2657 | 2696 | def resolvenodes(nodes, count=1): |
|
2658 |
index = |
|
|
2697 | index = parse_index_v1(data, inline)[0] | |
|
2659 | 2698 | rev = getattr(index, 'rev', None) |
|
2660 | 2699 | if rev is None: |
|
2661 | nodemap = getattr( | |
|
2662 | revlogio.parseindex(data, inline)[0], 'nodemap', None | |
|
2663 | ) | |
|
2700 | nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None) | |
|
2664 | 2701 | # This only works for the C code. |
|
2665 | 2702 | if nodemap is None: |
|
2666 | 2703 | return |
@@ -3015,10 +3052,17 b' def _temprevlog(ui, orig, truncaterev):' | |||
|
3015 | 3052 | if util.safehasattr(orig, k): |
|
3016 | 3053 | revlogkwargs[k] = getattr(orig, k) |
|
3017 | 3054 | |
|
3018 | origindexpath = orig.opener.join(orig.indexfile) | |
|
3019 | origdatapath = orig.opener.join(orig.datafile) | |
|
3020 | indexname = 'revlog.i' | |
|
3021 | dataname = 'revlog.d' | |
|
3055 | indexfile = getattr(orig, '_indexfile', None) | |
|
3056 | if indexfile is None: | |
|
3057 | # compatibility with <= hg-5.8 | |
|
3058 | indexfile = getattr(orig, 'indexfile') | |
|
3059 | origindexpath = orig.opener.join(indexfile) | |
|
3060 | ||
|
3061 | datafile = getattr(orig, '_datafile', getattr(orig, 'datafile')) | |
|
3062 | origdatapath = orig.opener.join(datafile) | |
|
3063 | radix = b'revlog' | |
|
3064 | indexname = b'revlog.i' | |
|
3065 | dataname = b'revlog.d' | |
|
3022 | 3066 | |
|
3023 | 3067 | tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-') |
|
3024 | 3068 | try: |
@@ -3043,9 +3087,12 b' def _temprevlog(ui, orig, truncaterev):' | |||
|
3043 | 3087 | vfs = vfsmod.vfs(tmpdir) |
|
3044 | 3088 | vfs.options = getattr(orig.opener, 'options', None) |
|
3045 | 3089 | |
|
3046 | dest = revlog.revlog( | |
|
3047 | vfs, indexfile=indexname, datafile=dataname, **revlogkwargs | |
|
3048 | ) | |
|
3090 | try: | |
|
3091 | dest = revlog(vfs, radix=radix, **revlogkwargs) | |
|
3092 | except TypeError: | |
|
3093 | dest = revlog( | |
|
3094 | vfs, indexfile=indexname, datafile=dataname, **revlogkwargs | |
|
3095 | ) | |
|
3049 | 3096 | if dest._inline: |
|
3050 | 3097 | raise error.Abort('not supporting inline revlog (yet)') |
|
3051 | 3098 | # make sure internals are initialized |
@@ -3111,9 +3158,14 b' def perfrevlogchunks(ui, repo, file_=Non' | |||
|
3111 | 3158 | |
|
3112 | 3159 | def rlfh(rl): |
|
3113 | 3160 | if rl._inline: |
|
3114 | return getsvfs(repo)(rl.indexfile) | |
|
3161 | indexfile = getattr(rl, '_indexfile', None) | |
|
3162 | if indexfile is None: | |
|
3163 | # compatibility with <= hg-5.8 | |
|
3164 | indexfile = getattr(rl, 'indexfile') | |
|
3165 | return getsvfs(repo)(indexfile) | |
|
3115 | 3166 | else: |
|
3116 | return getsvfs(repo)(rl.datafile) | |
|
3167 | datafile = getattr(rl, 'datafile', getattr(rl, 'datafile')) | |
|
3168 | return getsvfs(repo)(datafile) | |
|
3117 | 3169 | |
|
3118 | 3170 | def doread(): |
|
3119 | 3171 | rl.clearcaches() |
@@ -15,6 +15,10 b' from mercurial import (' | |||
|
15 | 15 | ) |
|
16 | 16 | from mercurial.utils import procutil |
|
17 | 17 | |
|
18 | from mercurial.revlogutils import ( | |
|
19 | constants as revlog_constants, | |
|
20 | ) | |
|
21 | ||
|
18 | 22 | for fp in (sys.stdin, sys.stdout, sys.stderr): |
|
19 | 23 | procutil.setbinary(fp) |
|
20 | 24 | |
@@ -28,7 +32,12 b' while True:' | |||
|
28 | 32 | break |
|
29 | 33 | if l.startswith("file:"): |
|
30 | 34 | f = encoding.strtolocal(l[6:-1]) |
|
31 | r = revlog.revlog(opener, f) | |
|
35 | assert f.endswith(b'.i') | |
|
36 | r = revlog.revlog( | |
|
37 | opener, | |
|
38 | target=(revlog_constants.KIND_OTHER, b'undump-revlog'), | |
|
39 | radix=f[:-2], | |
|
40 | ) | |
|
32 | 41 | procutil.stdout.write(b'%s\n' % f) |
|
33 | 42 | elif l.startswith("node:"): |
|
34 | 43 | n = bin(l[6:-1]) |
@@ -38,7 +38,6 b' import collections' | |||
|
38 | 38 | from mercurial.i18n import _ |
|
39 | 39 | from mercurial.node import ( |
|
40 | 40 | hex, |
|
41 | nullid, | |
|
42 | 41 | short, |
|
43 | 42 | ) |
|
44 | 43 | from mercurial import ( |
@@ -109,7 +108,7 b' class emptyfilecontext(object):' | |||
|
109 | 108 | return b'' |
|
110 | 109 | |
|
111 | 110 | def node(self): |
|
112 | return nullid | |
|
111 | return self._repo.nullid | |
|
113 | 112 | |
|
114 | 113 | |
|
115 | 114 | def uniq(lst): |
@@ -927,7 +926,7 b' class fixupstate(object):' | |||
|
927 | 926 | the commit is a clone from ctx, with a (optionally) different p1, and |
|
928 | 927 | different file contents replaced by memworkingcopy. |
|
929 | 928 | """ |
|
930 | parents = p1 and (p1, nullid) | |
|
929 | parents = p1 and (p1, self.repo.nullid) | |
|
931 | 930 | extra = ctx.extra() |
|
932 | 931 | if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'): |
|
933 | 932 | extra[b'absorb_source'] = ctx.hex() |
@@ -16,7 +16,6 b' from mercurial.i18n import _' | |||
|
16 | 16 | from mercurial import ( |
|
17 | 17 | cmdutil, |
|
18 | 18 | commands, |
|
19 | pycompat, | |
|
20 | 19 | registrar, |
|
21 | 20 | ) |
|
22 | 21 | |
@@ -66,11 +65,10 b' def amend(ui, repo, *pats, **opts):' | |||
|
66 | 65 | |
|
67 | 66 | See :hg:`help commit` for more details. |
|
68 | 67 | """ |
|
69 | opts = pycompat.byteskwargs(opts) | |
|
70 | cmdutil.checknotesize(ui, opts) | |
|
68 | cmdutil.check_note_size(opts) | |
|
71 | 69 | |
|
72 | 70 | with repo.wlock(), repo.lock(): |
|
73 |
if not opts.get( |
|
|
74 |
opts[ |
|
|
75 |
opts[ |
|
|
76 |
return commands._docommit(ui, repo, *pats, ** |
|
|
71 | if not opts.get('logfile'): | |
|
72 | opts['message'] = opts.get('message') or repo[b'.'].description() | |
|
73 | opts['amend'] = True | |
|
74 | return commands._docommit(ui, repo, *pats, **opts) |
@@ -5,8 +5,9 b'' | |||
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | # This module is for handling 'bzr', that was formerly known as Bazaar-NG; | |
|
9 | # it cannot access 'bar' repositories, but they were never used very much | |
|
8 | # This module is for handling Breezy imports or `brz`, but it's also compatible | |
|
9 | # with Bazaar or `bzr`, that was formerly known as Bazaar-NG; | |
|
10 | # it cannot access `bar` repositories, but they were never used very much. | |
|
10 | 11 | from __future__ import absolute_import |
|
11 | 12 | |
|
12 | 13 | import os |
@@ -16,34 +17,36 b' from mercurial import (' | |||
|
16 | 17 | demandimport, |
|
17 | 18 | error, |
|
18 | 19 | pycompat, |
|
20 | util, | |
|
19 | 21 | ) |
|
20 | 22 | from . import common |
|
21 | 23 | |
|
24 | ||
|
22 | 25 | # these do not work with demandimport, blacklist |
|
23 | 26 | demandimport.IGNORES.update( |
|
24 | 27 | [ |
|
25 |
b'b |
|
|
26 |
b'b |
|
|
28 | b'breezy.transactions', | |
|
29 | b'breezy.urlutils', | |
|
27 | 30 | b'ElementPath', |
|
28 | 31 | ] |
|
29 | 32 | ) |
|
30 | 33 | |
|
31 | 34 | try: |
|
32 | 35 | # bazaar imports |
|
33 |
import b |
|
|
34 |
import b |
|
|
35 |
import b |
|
|
36 |
import b |
|
|
36 | import breezy.bzr.bzrdir | |
|
37 | import breezy.errors | |
|
38 | import breezy.revision | |
|
39 | import breezy.revisionspec | |
|
37 | 40 | |
|
38 |
bzrdir = b |
|
|
39 |
errors = b |
|
|
40 |
revision = b |
|
|
41 |
revisionspec = b |
|
|
41 | bzrdir = breezy.bzr.bzrdir | |
|
42 | errors = breezy.errors | |
|
43 | revision = breezy.revision | |
|
44 | revisionspec = breezy.revisionspec | |
|
42 | 45 | revisionspec.RevisionSpec |
|
43 | 46 | except ImportError: |
|
44 | 47 | pass |
|
45 | 48 | |
|
46 |
supportedkinds = ( |
|
|
49 | supportedkinds = ('file', 'symlink') | |
|
47 | 50 | |
|
48 | 51 | |
|
49 | 52 | class bzr_source(common.converter_source): |
@@ -58,15 +61,16 b' class bzr_source(common.converter_source' | |||
|
58 | 61 | ) |
|
59 | 62 | |
|
60 | 63 | try: |
|
61 |
# access b |
|
|
64 | # access breezy stuff | |
|
62 | 65 | bzrdir |
|
63 | 66 | except NameError: |
|
64 | 67 | raise common.NoRepo(_(b'Bazaar modules could not be loaded')) |
|
65 | 68 | |
|
66 |
path = |
|
|
69 | path = util.abspath(path) | |
|
67 | 70 | self._checkrepotype(path) |
|
68 | 71 | try: |
|
69 |
|
|
|
72 | bzr_dir = bzrdir.BzrDir.open(path.decode()) | |
|
73 | self.sourcerepo = bzr_dir.open_repository() | |
|
70 | 74 | except errors.NoRepositoryPresent: |
|
71 | 75 | raise common.NoRepo( |
|
72 | 76 | _(b'%s does not look like a Bazaar repository') % path |
@@ -78,7 +82,7 b' class bzr_source(common.converter_source' | |||
|
78 | 82 | # Lightweight checkouts detection is informational but probably |
|
79 | 83 | # fragile at API level. It should not terminate the conversion. |
|
80 | 84 | try: |
|
81 | dir = bzrdir.BzrDir.open_containing(path)[0] | |
|
85 | dir = bzrdir.BzrDir.open_containing(path.decode())[0] | |
|
82 | 86 | try: |
|
83 | 87 | tree = dir.open_workingtree(recommend_upgrade=False) |
|
84 | 88 | branch = tree.branch |
@@ -87,8 +91,8 b' class bzr_source(common.converter_source' | |||
|
87 | 91 | branch = dir.open_branch() |
|
88 | 92 | if ( |
|
89 | 93 | tree is not None |
|
90 |
and tree. |
|
|
91 |
!= branch. |
|
|
94 | and tree.controldir.root_transport.base | |
|
95 | != branch.controldir.root_transport.base | |
|
92 | 96 | ): |
|
93 | 97 | self.ui.warn( |
|
94 | 98 | _( |
@@ -127,7 +131,8 b' class bzr_source(common.converter_source' | |||
|
127 | 131 | revid = None |
|
128 | 132 | for branch in self._bzrbranches(): |
|
129 | 133 | try: |
|
130 |
r = |
|
|
134 | revspec = self.revs[0].decode() | |
|
135 | r = revisionspec.RevisionSpec.from_string(revspec) | |
|
131 | 136 | info = r.in_history(branch) |
|
132 | 137 | except errors.BzrError: |
|
133 | 138 | pass |
@@ -142,24 +147,26 b' class bzr_source(common.converter_source' | |||
|
142 | 147 | return heads |
|
143 | 148 | |
|
144 | 149 | def getfile(self, name, rev): |
|
150 | name = name.decode() | |
|
145 | 151 | revtree = self.sourcerepo.revision_tree(rev) |
|
146 | fileid = revtree.path2id(name.decode(self.encoding or b'utf-8')) | |
|
147 | kind = None | |
|
148 | if fileid is not None: | |
|
149 | kind = revtree.kind(fileid) | |
|
152 | ||
|
153 | try: | |
|
154 | kind = revtree.kind(name) | |
|
155 | except breezy.errors.NoSuchFile: | |
|
156 | return None, None | |
|
150 | 157 | if kind not in supportedkinds: |
|
151 | 158 | # the file is not available anymore - was deleted |
|
152 | 159 | return None, None |
|
153 | mode = self._modecache[(name, rev)] | |
|
154 |
if kind == |
|
|
155 |
target = revtree.get_symlink_target( |
|
|
160 | mode = self._modecache[(name.encode(), rev)] | |
|
161 | if kind == 'symlink': | |
|
162 | target = revtree.get_symlink_target(name) | |
|
156 | 163 | if target is None: |
|
157 | 164 | raise error.Abort( |
|
158 | 165 | _(b'%s.%s symlink has no target') % (name, rev) |
|
159 | 166 | ) |
|
160 | return target, mode | |
|
167 | return target.encode(), mode | |
|
161 | 168 | else: |
|
162 |
sio = revtree.get_file( |
|
|
169 | sio = revtree.get_file(name) | |
|
163 | 170 | return sio.read(), mode |
|
164 | 171 | |
|
165 | 172 | def getchanges(self, version, full): |
@@ -184,15 +191,15 b' class bzr_source(common.converter_source' | |||
|
184 | 191 | parents = self._filterghosts(rev.parent_ids) |
|
185 | 192 | self._parentids[version] = parents |
|
186 | 193 | |
|
187 |
branch = |
|
|
188 |
if branch == |
|
|
189 |
branch = |
|
|
194 | branch = rev.properties.get('branch-nick', 'default') | |
|
195 | if branch == 'trunk': | |
|
196 | branch = 'default' | |
|
190 | 197 | return common.commit( |
|
191 | 198 | parents=parents, |
|
192 | 199 | date=b'%d %d' % (rev.timestamp, -rev.timezone), |
|
193 | 200 | author=self.recode(rev.committer), |
|
194 | 201 | desc=self.recode(rev.message), |
|
195 | branch=branch, | |
|
202 | branch=branch.encode('utf8'), | |
|
196 | 203 | rev=version, |
|
197 | 204 | saverev=self._saverev, |
|
198 | 205 | ) |
@@ -234,35 +241,32 b' class bzr_source(common.converter_source' | |||
|
234 | 241 | |
|
235 | 242 | # Process the entries by reverse lexicographic name order to |
|
236 | 243 | # handle nested renames correctly, most specific first. |
|
244 | ||
|
245 | def key(c): | |
|
246 | return c.path[0] or c.path[1] or "" | |
|
247 | ||
|
237 | 248 | curchanges = sorted( |
|
238 | 249 | current.iter_changes(origin), |
|
239 | key=lambda c: c[1][0] or c[1][1], | |
|
250 | key=key, | |
|
240 | 251 | reverse=True, |
|
241 | 252 | ) |
|
242 | for ( | |
|
243 | fileid, | |
|
244 | paths, | |
|
245 |
change |
|
|
246 | versioned, | |
|
247 | parent, | |
|
248 | name, | |
|
249 | kind, | |
|
250 | executable, | |
|
251 | ) in curchanges: | |
|
252 | ||
|
253 | for change in curchanges: | |
|
254 | paths = change.path | |
|
255 | kind = change.kind | |
|
256 | executable = change.executable | |
|
253 | 257 | if paths[0] == u'' or paths[1] == u'': |
|
254 | 258 | # ignore changes to tree root |
|
255 | 259 | continue |
|
256 | 260 | |
|
257 | 261 | # bazaar tracks directories, mercurial does not, so |
|
258 | 262 | # we have to rename the directory contents |
|
259 |
if kind[1] == |
|
|
260 |
if kind[0] not in (None, |
|
|
263 | if kind[1] == 'directory': | |
|
264 | if kind[0] not in (None, 'directory'): | |
|
261 | 265 | # Replacing 'something' with a directory, record it |
|
262 | 266 | # so it can be removed. |
|
263 | 267 | changes.append((self.recode(paths[0]), revid)) |
|
264 | 268 | |
|
265 |
if kind[0] == |
|
|
269 | if kind[0] == 'directory' and None not in paths: | |
|
266 | 270 | renaming = paths[0] != paths[1] |
|
267 | 271 | # neither an add nor an delete - a move |
|
268 | 272 | # rename all directory contents manually |
@@ -270,9 +274,9 b' class bzr_source(common.converter_source' | |||
|
270 | 274 | # get all child-entries of the directory |
|
271 | 275 | for name, entry in inventory.iter_entries(subdir): |
|
272 | 276 | # hg does not track directory renames |
|
273 |
if entry.kind == |
|
|
277 | if entry.kind == 'directory': | |
|
274 | 278 | continue |
|
275 |
frompath = self.recode(paths[0] + |
|
|
279 | frompath = self.recode(paths[0] + '/' + name) | |
|
276 | 280 | if frompath in seen: |
|
277 | 281 | # Already handled by a more specific change entry |
|
278 | 282 | # This is important when you have: |
@@ -283,14 +287,14 b' class bzr_source(common.converter_source' | |||
|
283 | 287 | seen.add(frompath) |
|
284 | 288 | if not renaming: |
|
285 | 289 | continue |
|
286 |
topath = self.recode(paths[1] + |
|
|
290 | topath = self.recode(paths[1] + '/' + name) | |
|
287 | 291 | # register the files as changed |
|
288 | 292 | changes.append((frompath, revid)) |
|
289 | 293 | changes.append((topath, revid)) |
|
290 | 294 | # add to mode cache |
|
291 | 295 | mode = ( |
|
292 | 296 | (entry.executable and b'x') |
|
293 |
or (entry.kind == |
|
|
297 | or (entry.kind == 'symlink' and b's') | |
|
294 | 298 | or b'' |
|
295 | 299 | ) |
|
296 | 300 | self._modecache[(topath, revid)] = mode |
@@ -320,7 +324,7 b' class bzr_source(common.converter_source' | |||
|
320 | 324 | |
|
321 | 325 | # populate the mode cache |
|
322 | 326 | kind, executable = [e[1] for e in (kind, executable)] |
|
323 |
mode = (executable and b'x') or (kind == |
|
|
327 | mode = (executable and b'x') or (kind == 'symlink' and b'l') or b'' | |
|
324 | 328 | self._modecache[(topath, revid)] = mode |
|
325 | 329 | changes.append((topath, revid)) |
|
326 | 330 |
@@ -9,11 +9,12 b' from __future__ import absolute_import' | |||
|
9 | 9 | import os |
|
10 | 10 | |
|
11 | 11 | from mercurial.i18n import _ |
|
12 |
from mercurial.node import |
|
|
12 | from mercurial.node import sha1nodeconstants | |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | config, |
|
15 | 15 | error, |
|
16 | 16 | pycompat, |
|
17 | util, | |
|
17 | 18 | ) |
|
18 | 19 | |
|
19 | 20 | from . import common |
@@ -74,7 +75,7 b' class convert_git(common.converter_sourc' | |||
|
74 | 75 | |
|
75 | 76 | # Pass an absolute path to git to prevent from ever being interpreted |
|
76 | 77 | # as a URL |
|
77 |
path = |
|
|
78 | path = util.abspath(path) | |
|
78 | 79 | |
|
79 | 80 | if os.path.isdir(path + b"/.git"): |
|
80 | 81 | path += b"/.git" |
@@ -192,7 +193,7 b' class convert_git(common.converter_sourc' | |||
|
192 | 193 | return heads |
|
193 | 194 | |
|
194 | 195 | def catfile(self, rev, ftype): |
|
195 | if rev == nullhex: | |
|
196 | if rev == sha1nodeconstants.nullhex: | |
|
196 | 197 | raise IOError |
|
197 | 198 | self.catfilepipe[0].write(rev + b'\n') |
|
198 | 199 | self.catfilepipe[0].flush() |
@@ -214,7 +215,7 b' class convert_git(common.converter_sourc' | |||
|
214 | 215 | return data |
|
215 | 216 | |
|
216 | 217 | def getfile(self, name, rev): |
|
217 | if rev == nullhex: | |
|
218 | if rev == sha1nodeconstants.nullhex: | |
|
218 | 219 | return None, None |
|
219 | 220 | if name == b'.hgsub': |
|
220 | 221 | data = b'\n'.join([m.hgsub() for m in self.submoditer()]) |
@@ -228,7 +229,7 b' class convert_git(common.converter_sourc' | |||
|
228 | 229 | return data, mode |
|
229 | 230 | |
|
230 | 231 | def submoditer(self): |
|
231 | null = nullhex | |
|
232 | null = sha1nodeconstants.nullhex | |
|
232 | 233 | for m in sorted(self.submodules, key=lambda p: p.path): |
|
233 | 234 | if m.node != null: |
|
234 | 235 | yield m |
@@ -317,7 +318,7 b' class convert_git(common.converter_sourc' | |||
|
317 | 318 | subexists[0] = True |
|
318 | 319 | if entry[4] == b'D' or renamesource: |
|
319 | 320 | subdeleted[0] = True |
|
320 | changes.append((b'.hgsub', nullhex)) | |
|
321 | changes.append((b'.hgsub', sha1nodeconstants.nullhex)) | |
|
321 | 322 | else: |
|
322 | 323 | changes.append((b'.hgsub', b'')) |
|
323 | 324 | elif entry[1] == b'160000' or entry[0] == b':160000': |
@@ -325,7 +326,7 b' class convert_git(common.converter_sourc' | |||
|
325 | 326 | subexists[0] = True |
|
326 | 327 | else: |
|
327 | 328 | if renamesource: |
|
328 | h = nullhex | |
|
329 | h = sha1nodeconstants.nullhex | |
|
329 | 330 | self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b"" |
|
330 | 331 | changes.append((f, h)) |
|
331 | 332 | |
@@ -362,7 +363,7 b' class convert_git(common.converter_sourc' | |||
|
362 | 363 | |
|
363 | 364 | if subexists[0]: |
|
364 | 365 | if subdeleted[0]: |
|
365 | changes.append((b'.hgsubstate', nullhex)) | |
|
366 | changes.append((b'.hgsubstate', sha1nodeconstants.nullhex)) | |
|
366 | 367 | else: |
|
367 | 368 | self.retrievegitmodules(version) |
|
368 | 369 | changes.append((b'.hgsubstate', b'')) |
@@ -27,8 +27,7 b' from mercurial.pycompat import open' | |||
|
27 | 27 | from mercurial.node import ( |
|
28 | 28 | bin, |
|
29 | 29 | hex, |
|
30 | nullhex, | |
|
31 | nullid, | |
|
30 | sha1nodeconstants, | |
|
32 | 31 | ) |
|
33 | 32 | from mercurial import ( |
|
34 | 33 | bookmarks, |
@@ -160,7 +159,7 b' class mercurial_sink(common.converter_si' | |||
|
160 | 159 | continue |
|
161 | 160 | revid = revmap.get(source.lookuprev(s[0])) |
|
162 | 161 | if not revid: |
|
163 | if s[0] == nullhex: | |
|
162 | if s[0] == sha1nodeconstants.nullhex: | |
|
164 | 163 | revid = s[0] |
|
165 | 164 | else: |
|
166 | 165 | # missing, but keep for hash stability |
@@ -179,7 +178,7 b' class mercurial_sink(common.converter_si' | |||
|
179 | 178 | |
|
180 | 179 | revid = s[0] |
|
181 | 180 | subpath = s[1] |
|
182 | if revid != nullhex: | |
|
181 | if revid != sha1nodeconstants.nullhex: | |
|
183 | 182 | revmap = self.subrevmaps.get(subpath) |
|
184 | 183 | if revmap is None: |
|
185 | 184 | revmap = mapfile( |
@@ -304,9 +303,9 b' class mercurial_sink(common.converter_si' | |||
|
304 | 303 | parent = parents[0] |
|
305 | 304 | |
|
306 | 305 | if len(parents) < 2: |
|
307 | parents.append(nullid) | |
|
306 | parents.append(self.repo.nullid) | |
|
308 | 307 | if len(parents) < 2: |
|
309 | parents.append(nullid) | |
|
308 | parents.append(self.repo.nullid) | |
|
310 | 309 | p2 = parents.pop(0) |
|
311 | 310 | |
|
312 | 311 | text = commit.desc |
@@ -356,7 +355,7 b' class mercurial_sink(common.converter_si' | |||
|
356 | 355 | p2 = parents.pop(0) |
|
357 | 356 | p1ctx = self.repo[p1] |
|
358 | 357 | p2ctx = None |
|
359 | if p2 != nullid: | |
|
358 | if p2 != self.repo.nullid: | |
|
360 | 359 | p2ctx = self.repo[p2] |
|
361 | 360 | fileset = set(files) |
|
362 | 361 | if full: |
@@ -421,7 +420,7 b' class mercurial_sink(common.converter_si' | |||
|
421 | 420 | |
|
422 | 421 | def puttags(self, tags): |
|
423 | 422 | tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True) |
|
424 | tagparent = tagparent or nullid | |
|
423 | tagparent = tagparent or self.repo.nullid | |
|
425 | 424 | |
|
426 | 425 | oldlines = set() |
|
427 | 426 | for branch, heads in pycompat.iteritems(self.repo.branchmap()): |
@@ -164,7 +164,7 b' def geturl(path):' | |||
|
164 | 164 | # svn.client.url_from_path() fails with local repositories |
|
165 | 165 | pass |
|
166 | 166 | if os.path.isdir(path): |
|
167 |
path = os.path.normpath( |
|
|
167 | path = os.path.normpath(util.abspath(path)) | |
|
168 | 168 | if pycompat.iswindows: |
|
169 | 169 | path = b'/' + util.normpath(path) |
|
170 | 170 | # Module URL is later compared with the repository URL returned |
@@ -431,7 +431,7 b' def issvnurl(ui, url):' | |||
|
431 | 431 | path = unicodepath.encode(fsencoding) |
|
432 | 432 | except ValueError: |
|
433 | 433 | proto = b'file' |
|
434 |
path = |
|
|
434 | path = util.abspath(url) | |
|
435 | 435 | try: |
|
436 | 436 | path.decode(fsencoding) |
|
437 | 437 | except UnicodeDecodeError: |
@@ -442,7 +442,7 b' def reposetup(ui, repo):' | |||
|
442 | 442 | continue |
|
443 | 443 | # all normal files need to be looked at again since |
|
444 | 444 | # the new .hgeol file specify a different filter |
|
445 |
self.dirstate. |
|
|
445 | self.dirstate.set_possibly_dirty(f) | |
|
446 | 446 | # Write the cache to update mtime and cache .hgeol |
|
447 | 447 | with self.vfs(b"eol.cache", b"w") as f: |
|
448 | 448 | f.write(hgeoldata) |
@@ -757,7 +757,7 b' def writeworkingdir(repo, ctx, filedata,' | |||
|
757 | 757 | fctx = ctx[path] |
|
758 | 758 | fctx.write(data, fctx.flags()) |
|
759 | 759 | if repo.dirstate[path] == b'n': |
|
760 |
repo.dirstate. |
|
|
760 | repo.dirstate.set_possibly_dirty(path) | |
|
761 | 761 | |
|
762 | 762 | oldparentnodes = repo.dirstate.parents() |
|
763 | 763 | newparentnodes = [replacements.get(n, n) for n in oldparentnodes] |
@@ -284,7 +284,7 b' class gitbmstore(object):' | |||
|
284 | 284 | |
|
285 | 285 | def init(orig, ui, dest=b'.', **opts): |
|
286 | 286 | if opts.get('git', False): |
|
287 |
path = |
|
|
287 | path = util.abspath(dest) | |
|
288 | 288 | # TODO: walk up looking for the git repo |
|
289 | 289 | _setupdothg(ui, path) |
|
290 | 290 | return 0 |
@@ -4,7 +4,7 b' import contextlib' | |||
|
4 | 4 | import errno |
|
5 | 5 | import os |
|
6 | 6 | |
|
7 |
from mercurial.node import |
|
|
7 | from mercurial.node import sha1nodeconstants | |
|
8 | 8 | from mercurial import ( |
|
9 | 9 | error, |
|
10 | 10 | extensions, |
@@ -81,14 +81,16 b' class gitdirstate(object):' | |||
|
81 | 81 | except pygit2.GitError: |
|
82 | 82 | # Typically happens when peeling HEAD fails, as in an |
|
83 | 83 | # empty repository. |
|
84 | return nullid | |
|
84 | return sha1nodeconstants.nullid | |
|
85 | 85 | |
|
86 | 86 | def p2(self): |
|
87 | 87 | # TODO: MERGE_HEAD? something like that, right? |
|
88 | return nullid | |
|
88 | return sha1nodeconstants.nullid | |
|
89 | 89 | |
|
90 |
def setparents(self, p1, p2= |
|
|
91 | assert p2 == nullid, b'TODO merging support' | |
|
90 | def setparents(self, p1, p2=None): | |
|
91 | if p2 is None: | |
|
92 | p2 = sha1nodeconstants.nullid | |
|
93 | assert p2 == sha1nodeconstants.nullid, b'TODO merging support' | |
|
92 | 94 | self.git.head.set_target(gitutil.togitnode(p1)) |
|
93 | 95 | |
|
94 | 96 | @util.propertycache |
@@ -102,14 +104,14 b' class gitdirstate(object):' | |||
|
102 | 104 | |
|
103 | 105 | def parents(self): |
|
104 | 106 | # TODO how on earth do we find p2 if a merge is in flight? |
|
105 | return self.p1(), nullid | |
|
107 | return self.p1(), sha1nodeconstants.nullid | |
|
106 | 108 | |
|
107 | 109 | def __iter__(self): |
|
108 | 110 | return (pycompat.fsencode(f.path) for f in self.git.index) |
|
109 | 111 | |
|
110 | 112 | def items(self): |
|
111 | 113 | for ie in self.git.index: |
|
112 |
yield ie.path, None # value should be a |
|
|
114 | yield ie.path, None # value should be a DirstateItem | |
|
113 | 115 | |
|
114 | 116 | # py2,3 compat forward |
|
115 | 117 | iteritems = items |
@@ -5,11 +5,8 b' from mercurial.i18n import _' | |||
|
5 | 5 | from mercurial.node import ( |
|
6 | 6 | bin, |
|
7 | 7 | hex, |
|
8 | nullhex, | |
|
9 | nullid, | |
|
10 | 8 | nullrev, |
|
11 | 9 | sha1nodeconstants, |
|
12 | wdirhex, | |
|
13 | 10 | ) |
|
14 | 11 | from mercurial import ( |
|
15 | 12 | ancestor, |
@@ -47,7 +44,7 b' class baselog(object): # revlog.revlog)' | |||
|
47 | 44 | ) |
|
48 | 45 | |
|
49 | 46 | def rev(self, n): |
|
50 | if n == nullid: | |
|
47 | if n == sha1nodeconstants.nullid: | |
|
51 | 48 | return -1 |
|
52 | 49 | t = self._db.execute( |
|
53 | 50 | 'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),) |
@@ -58,7 +55,7 b' class baselog(object): # revlog.revlog)' | |||
|
58 | 55 | |
|
59 | 56 | def node(self, r): |
|
60 | 57 | if r == nullrev: |
|
61 | return nullid | |
|
58 | return sha1nodeconstants.nullid | |
|
62 | 59 | t = self._db.execute( |
|
63 | 60 | 'SELECT node FROM changelog WHERE rev = ?', (r,) |
|
64 | 61 | ).fetchone() |
@@ -135,7 +132,7 b' class changelog(baselog):' | |||
|
135 | 132 | bin(v[0]): v[1] |
|
136 | 133 | for v in self._db.execute('SELECT node, rev FROM changelog') |
|
137 | 134 | } |
|
138 | r[nullid] = nullrev | |
|
135 | r[sha1nodeconstants.nullid] = nullrev | |
|
139 | 136 | return r |
|
140 | 137 | |
|
141 | 138 | def tip(self): |
@@ -144,7 +141,7 b' class changelog(baselog):' | |||
|
144 | 141 | ).fetchone() |
|
145 | 142 | if t: |
|
146 | 143 | return bin(t[0]) |
|
147 | return nullid | |
|
144 | return sha1nodeconstants.nullid | |
|
148 | 145 | |
|
149 | 146 | def revs(self, start=0, stop=None): |
|
150 | 147 | if stop is None: |
@@ -167,7 +164,7 b' class changelog(baselog):' | |||
|
167 | 164 | return -1 |
|
168 | 165 | |
|
169 | 166 | def _partialmatch(self, id): |
|
170 | if wdirhex.startswith(id): | |
|
167 | if sha1nodeconstants.wdirhex.startswith(id): | |
|
171 | 168 | raise error.WdirUnsupported |
|
172 | 169 | candidates = [ |
|
173 | 170 | bin(x[0]) |
@@ -176,8 +173,8 b' class changelog(baselog):' | |||
|
176 | 173 | (pycompat.sysstr(id + b'%'),), |
|
177 | 174 | ) |
|
178 | 175 | ] |
|
179 | if nullhex.startswith(id): | |
|
180 | candidates.append(nullid) | |
|
176 | if sha1nodeconstants.nullhex.startswith(id): | |
|
177 | candidates.append(sha1nodeconstants.nullid) | |
|
181 | 178 | if len(candidates) > 1: |
|
182 | 179 | raise error.AmbiguousPrefixLookupError( |
|
183 | 180 | id, b'00changelog.i', _(b'ambiguous identifier') |
@@ -223,8 +220,10 b' class changelog(baselog):' | |||
|
223 | 220 | n = nodeorrev |
|
224 | 221 | extra = {b'branch': b'default'} |
|
225 | 222 | # handle looking up nullid |
|
226 | if n == nullid: | |
|
227 |
return hgchangelog._changelogrevision( |
|
|
223 | if n == sha1nodeconstants.nullid: | |
|
224 | return hgchangelog._changelogrevision( | |
|
225 | extra=extra, manifest=sha1nodeconstants.nullid | |
|
226 | ) | |
|
228 | 227 | hn = gitutil.togitnode(n) |
|
229 | 228 | # We've got a real commit! |
|
230 | 229 | files = [ |
@@ -301,7 +300,7 b' class changelog(baselog):' | |||
|
301 | 300 | not supplied, uses all of the revlog's heads. If common is not |
|
302 | 301 | supplied, uses nullid.""" |
|
303 | 302 | if common is None: |
|
304 | common = [nullid] | |
|
303 | common = [sha1nodeconstants.nullid] | |
|
305 | 304 | if heads is None: |
|
306 | 305 | heads = self.heads() |
|
307 | 306 | |
@@ -400,9 +399,9 b' class changelog(baselog):' | |||
|
400 | 399 | ): |
|
401 | 400 | parents = [] |
|
402 | 401 | hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2) |
|
403 | if p1 != nullid: | |
|
402 | if p1 != sha1nodeconstants.nullid: | |
|
404 | 403 | parents.append(hp1) |
|
405 | if p2 and p2 != nullid: | |
|
404 | if p2 and p2 != sha1nodeconstants.nullid: | |
|
406 | 405 | parents.append(hp2) |
|
407 | 406 | assert date is not None |
|
408 | 407 | timestamp, tz = date |
@@ -435,7 +434,7 b' class manifestlog(baselog):' | |||
|
435 | 434 | return self.get(b'', node) |
|
436 | 435 | |
|
437 | 436 | def get(self, relpath, node): |
|
438 | if node == nullid: | |
|
437 | if node == sha1nodeconstants.nullid: | |
|
439 | 438 | # TODO: this should almost certainly be a memgittreemanifestctx |
|
440 | 439 | return manifest.memtreemanifestctx(self, relpath) |
|
441 | 440 | commit = self.gitrepo[gitutil.togitnode(node)] |
@@ -454,9 +453,10 b' class filelog(baselog):' | |||
|
454 | 453 | super(filelog, self).__init__(gr, db) |
|
455 | 454 | assert isinstance(path, bytes) |
|
456 | 455 | self.path = path |
|
456 | self.nullid = sha1nodeconstants.nullid | |
|
457 | 457 | |
|
458 | 458 | def read(self, node): |
|
459 | if node == nullid: | |
|
459 | if node == sha1nodeconstants.nullid: | |
|
460 | 460 | return b'' |
|
461 | 461 | return self.gitrepo[gitutil.togitnode(node)].data |
|
462 | 462 |
@@ -1,7 +1,7 b'' | |||
|
1 | 1 | """utilities to assist in working with pygit2""" |
|
2 | 2 | from __future__ import absolute_import |
|
3 | 3 | |
|
4 |
from mercurial.node import bin, hex, |
|
|
4 | from mercurial.node import bin, hex, sha1nodeconstants | |
|
5 | 5 | |
|
6 | 6 | from mercurial import pycompat |
|
7 | 7 | |
@@ -50,4 +50,4 b' def fromgitnode(n):' | |||
|
50 | 50 | return bin(n) |
|
51 | 51 | |
|
52 | 52 | |
|
53 | nullgit = togitnode(nullid) | |
|
53 | nullgit = togitnode(sha1nodeconstants.nullid) |
@@ -5,9 +5,7 b' import os' | |||
|
5 | 5 | import sqlite3 |
|
6 | 6 | |
|
7 | 7 | from mercurial.i18n import _ |
|
8 |
from mercurial.node import |
|
|
9 | nullid, | |
|
10 | ) | |
|
8 | from mercurial.node import sha1nodeconstants | |
|
11 | 9 | |
|
12 | 10 | from mercurial import ( |
|
13 | 11 | encoding, |
@@ -317,7 +315,9 b' def _index_repo(' | |||
|
317 | 315 | ) |
|
318 | 316 | new_files = (p.delta.new_file for p in patchgen) |
|
319 | 317 | files = { |
|
320 |
nf.path: nf.id.hex |
|
|
318 | nf.path: nf.id.hex | |
|
319 | for nf in new_files | |
|
320 | if nf.id.raw != sha1nodeconstants.nullid | |
|
321 | 321 | } |
|
322 | 322 | for p, n in files.items(): |
|
323 | 323 | # We intentionally set NULLs for any file parentage |
@@ -14,7 +14,6 b' from mercurial.i18n import _' | |||
|
14 | 14 | from mercurial.node import ( |
|
15 | 15 | bin, |
|
16 | 16 | hex, |
|
17 | nullid, | |
|
18 | 17 | short, |
|
19 | 18 | ) |
|
20 | 19 | from mercurial import ( |
@@ -314,7 +313,9 b' def _dosign(ui, repo, *revs, **opts):' | |||
|
314 | 313 | if revs: |
|
315 | 314 | nodes = [repo.lookup(n) for n in revs] |
|
316 | 315 | else: |
|
317 | nodes = [node for node in repo.dirstate.parents() if node != nullid] | |
|
316 | nodes = [ | |
|
317 | node for node in repo.dirstate.parents() if node != repo.nullid | |
|
318 | ] | |
|
318 | 319 | if len(nodes) > 1: |
|
319 | 320 | raise error.Abort( |
|
320 | 321 | _(b'uncommitted merge - please provide a specific revision') |
@@ -40,7 +40,6 b' import os' | |||
|
40 | 40 | |
|
41 | 41 | from mercurial.i18n import _ |
|
42 | 42 | from mercurial.node import ( |
|
43 | nullid, | |
|
44 | 43 | nullrev, |
|
45 | 44 | short, |
|
46 | 45 | ) |
@@ -95,7 +94,7 b' def difftree(ui, repo, node1=None, node2' | |||
|
95 | 94 | mmap2 = repo[node2].manifest() |
|
96 | 95 | m = scmutil.match(repo[node1], files) |
|
97 | 96 | st = repo.status(node1, node2, m) |
|
98 | empty = short(nullid) | |
|
97 | empty = short(repo.nullid) | |
|
99 | 98 | |
|
100 | 99 | for f in st.modified: |
|
101 | 100 | # TODO get file permissions |
@@ -317,9 +316,9 b' def revtree(ui, args, repo, full=b"tree"' | |||
|
317 | 316 | parentstr = b"" |
|
318 | 317 | if parents: |
|
319 | 318 | pp = repo.changelog.parents(n) |
|
320 | if pp[0] != nullid: | |
|
319 | if pp[0] != repo.nullid: | |
|
321 | 320 | parentstr += b" " + short(pp[0]) |
|
322 | if pp[1] != nullid: | |
|
321 | if pp[1] != repo.nullid: | |
|
323 | 322 | parentstr += b" " + short(pp[1]) |
|
324 | 323 | if not full: |
|
325 | 324 | ui.write(b"%s%s\n" % (short(n), parentstr)) |
@@ -575,9 +575,8 b' class histeditaction(object):' | |||
|
575 | 575 | parentctx, but does not commit them.""" |
|
576 | 576 | repo = self.repo |
|
577 | 577 | rulectx = repo[self.node] |
|
578 | repo.ui.pushbuffer(error=True, labeled=True) | |
|
579 | hg.update(repo, self.state.parentctxnode, quietempty=True) | |
|
580 | repo.ui.popbuffer() | |
|
578 | with repo.ui.silent(): | |
|
579 | hg.update(repo, self.state.parentctxnode, quietempty=True) | |
|
581 | 580 | stats = applychanges(repo.ui, repo, rulectx, {}) |
|
582 | 581 | repo.dirstate.setbranch(rulectx.branch()) |
|
583 | 582 | if stats.unresolvedcount: |
@@ -654,10 +653,9 b' def applychanges(ui, repo, ctx, opts):' | |||
|
654 | 653 | if ctx.p1().node() == repo.dirstate.p1(): |
|
655 | 654 | # edits are "in place" we do not need to make any merge, |
|
656 | 655 | # just applies changes on parent for editing |
|
657 |
ui. |
|
|
658 | cmdutil.revert(ui, repo, ctx, all=True) | |
|
659 | stats = mergemod.updateresult(0, 0, 0, 0) | |
|
660 | ui.popbuffer() | |
|
656 | with ui.silent(): | |
|
657 | cmdutil.revert(ui, repo, ctx, all=True) | |
|
658 | stats = mergemod.updateresult(0, 0, 0, 0) | |
|
661 | 659 | else: |
|
662 | 660 | try: |
|
663 | 661 | # ui.forcemerge is an internal variable, do not document |
@@ -22,7 +22,6 b' from mercurial.i18n import _' | |||
|
22 | 22 | from mercurial.node import ( |
|
23 | 23 | bin, |
|
24 | 24 | hex, |
|
25 | nullid, | |
|
26 | 25 | ) |
|
27 | 26 | |
|
28 | 27 | from mercurial import ( |
@@ -117,8 +116,8 b' def recorddirstateparents(dirstate, old,' | |||
|
117 | 116 | new = list(new) |
|
118 | 117 | if util.safehasattr(dirstate, 'journalstorage'): |
|
119 | 118 | # only record two hashes if there was a merge |
|
120 | oldhashes = old[:1] if old[1] == nullid else old | |
|
121 | newhashes = new[:1] if new[1] == nullid else new | |
|
119 | oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old | |
|
120 | newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new | |
|
122 | 121 | dirstate.journalstorage.record( |
|
123 | 122 | wdirparenttype, b'.', oldhashes, newhashes |
|
124 | 123 | ) |
@@ -131,7 +130,7 b' def recordbookmarks(orig, store, fp):' | |||
|
131 | 130 | if util.safehasattr(repo, 'journal'): |
|
132 | 131 | oldmarks = bookmarks.bmstore(repo) |
|
133 | 132 | for mark, value in pycompat.iteritems(store): |
|
134 | oldvalue = oldmarks.get(mark, nullid) | |
|
133 | oldvalue = oldmarks.get(mark, repo.nullid) | |
|
135 | 134 | if value != oldvalue: |
|
136 | 135 | repo.journal.record(bookmarktype, mark, oldvalue, value) |
|
137 | 136 | return orig(store, fp) |
@@ -356,9 +356,9 b' class kwtemplater(object):' | |||
|
356 | 356 | fp.write(data) |
|
357 | 357 | fp.close() |
|
358 | 358 | if kwcmd: |
|
359 |
self.repo.dirstate. |
|
|
359 | self.repo.dirstate.set_clean(f) | |
|
360 | 360 | elif self.postcommit: |
|
361 |
self.repo.dirstate. |
|
|
361 | self.repo.dirstate.update_file_p1(f, p1_tracked=True) | |
|
362 | 362 | |
|
363 | 363 | def shrink(self, fname, text): |
|
364 | 364 | '''Returns text with all keyword substitutions removed.''' |
@@ -691,7 +691,7 b' def kw_amend(orig, ui, repo, old, extra,' | |||
|
691 | 691 | kwt = getattr(repo, '_keywordkwt', None) |
|
692 | 692 | if kwt is None: |
|
693 | 693 | return orig(ui, repo, old, extra, pats, opts) |
|
694 | with repo.wlock(): | |
|
694 | with repo.wlock(), repo.dirstate.parentchange(): | |
|
695 | 695 | kwt.postcommit = True |
|
696 | 696 | newid = orig(ui, repo, old, extra, pats, opts) |
|
697 | 697 | if newid != old.node(): |
@@ -757,8 +757,9 b' def kw_dorecord(orig, ui, repo, commitfu' | |||
|
757 | 757 | if ctx != recctx: |
|
758 | 758 | modified, added = _preselect(wstatus, recctx.files()) |
|
759 | 759 | kwt.restrict = False |
|
760 | kwt.overwrite(recctx, modified, False, True) | |
|
761 |
kwt.overwrite(recctx, |
|
|
760 | with repo.dirstate.parentchange(): | |
|
761 | kwt.overwrite(recctx, modified, False, True) | |
|
762 | kwt.overwrite(recctx, added, False, True, True) | |
|
762 | 763 | kwt.restrict = True |
|
763 | 764 | return ret |
|
764 | 765 |
@@ -11,7 +11,8 b' from __future__ import absolute_import' | |||
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | |
|
14 |
from mercurial import |
|
|
14 | from mercurial.node import short | |
|
15 | from mercurial import util | |
|
15 | 16 | from mercurial.utils import ( |
|
16 | 17 | urlutil, |
|
17 | 18 | ) |
@@ -137,7 +138,7 b' class basestore(object):' | |||
|
137 | 138 | filestocheck = [] # list of (cset, filename, expectedhash) |
|
138 | 139 | for rev in revs: |
|
139 | 140 | cctx = self.repo[rev] |
|
140 |
cset = b"%d:%s" % (cctx.rev(), |
|
|
141 | cset = b"%d:%s" % (cctx.rev(), short(cctx.node())) | |
|
141 | 142 | |
|
142 | 143 | for standin in cctx: |
|
143 | 144 | filename = lfutil.splitstandin(standin) |
@@ -17,7 +17,6 b' from mercurial.i18n import _' | |||
|
17 | 17 | from mercurial.node import ( |
|
18 | 18 | bin, |
|
19 | 19 | hex, |
|
20 | nullid, | |
|
21 | 20 | ) |
|
22 | 21 | |
|
23 | 22 | from mercurial import ( |
@@ -115,7 +114,7 b' def lfconvert(ui, src, dest, *pats, **op' | |||
|
115 | 114 | rsrc[ctx] |
|
116 | 115 | for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0] |
|
117 | 116 | ) |
|
118 | revmap = {nullid: nullid} | |
|
117 | revmap = {rsrc.nullid: rdst.nullid} | |
|
119 | 118 | if tolfile: |
|
120 | 119 | # Lock destination to prevent modification while it is converted to. |
|
121 | 120 | # Don't need to lock src because we are just reading from its |
@@ -340,7 +339,7 b' def _commitcontext(rdst, parents, ctx, d' | |||
|
340 | 339 | # Generate list of changed files |
|
341 | 340 | def _getchangedfiles(ctx, parents): |
|
342 | 341 | files = set(ctx.files()) |
|
343 | if nullid not in parents: | |
|
342 | if ctx.repo().nullid not in parents: | |
|
344 | 343 | mc = ctx.manifest() |
|
345 | 344 | for pctx in ctx.parents(): |
|
346 | 345 | for fn in pctx.manifest().diff(mc): |
@@ -354,7 +353,7 b' def _convertparents(ctx, revmap):' | |||
|
354 | 353 | for p in ctx.parents(): |
|
355 | 354 | parents.append(revmap[p.node()]) |
|
356 | 355 | while len(parents) < 2: |
|
357 | parents.append(nullid) | |
|
356 | parents.append(ctx.repo().nullid) | |
|
358 | 357 | return parents |
|
359 | 358 | |
|
360 | 359 | |
@@ -520,47 +519,53 b' def updatelfiles(' | |||
|
520 | 519 | filelist = set(filelist) |
|
521 | 520 | lfiles = [f for f in lfiles if f in filelist] |
|
522 | 521 | |
|
523 | update = {} | |
|
524 | dropped = set() | |
|
525 | updated, removed = 0, 0 | |
|
526 | wvfs = repo.wvfs | |
|
527 |
|
|
|
528 | for lfile in lfiles: | |
|
529 | lfileorig = os.path.relpath( | |
|
530 | scmutil.backuppath(ui, repo, lfile), start=repo.root | |
|
531 | ) | |
|
532 | standin = lfutil.standin(lfile) | |
|
533 | standinorig = os.path.relpath( | |
|
534 | scmutil.backuppath(ui, repo, standin), start=repo.root | |
|
535 | ) | |
|
536 | if wvfs.exists(standin): | |
|
537 |
if wvfs.exists(standin |
|
|
538 | shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig)) | |
|
539 | wvfs.unlinkpath(standinorig) | |
|
540 | expecthash = lfutil.readasstandin(wctx[standin]) | |
|
541 | if expecthash != b'': | |
|
542 | if lfile not in wctx: # not switched to normal file | |
|
543 | if repo.dirstate[standin] != b'?': | |
|
544 | wvfs.unlinkpath(lfile, ignoremissing=True) | |
|
545 | else: | |
|
546 |
|
|
|
522 | with lfdirstate.parentchange(): | |
|
523 | update = {} | |
|
524 | dropped = set() | |
|
525 | updated, removed = 0, 0 | |
|
526 | wvfs = repo.wvfs | |
|
527 | wctx = repo[None] | |
|
528 | for lfile in lfiles: | |
|
529 | lfileorig = os.path.relpath( | |
|
530 | scmutil.backuppath(ui, repo, lfile), start=repo.root | |
|
531 | ) | |
|
532 | standin = lfutil.standin(lfile) | |
|
533 | standinorig = os.path.relpath( | |
|
534 | scmutil.backuppath(ui, repo, standin), start=repo.root | |
|
535 | ) | |
|
536 | if wvfs.exists(standin): | |
|
537 | if wvfs.exists(standinorig) and wvfs.exists(lfile): | |
|
538 | shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig)) | |
|
539 | wvfs.unlinkpath(standinorig) | |
|
540 | expecthash = lfutil.readasstandin(wctx[standin]) | |
|
541 | if expecthash != b'': | |
|
542 | if lfile not in wctx: # not switched to normal file | |
|
543 | if repo.dirstate[standin] != b'?': | |
|
544 | wvfs.unlinkpath(lfile, ignoremissing=True) | |
|
545 | else: | |
|
546 | dropped.add(lfile) | |
|
547 | 547 | |
|
548 | # use normallookup() to allocate an entry in largefiles | |
|
549 | # dirstate to prevent lfilesrepo.status() from reporting | |
|
550 | # missing files as removed. | |
|
551 |
lfdirstate. |
|
|
552 |
|
|
|
553 | else: | |
|
554 | # Remove lfiles for which the standin is deleted, unless the | |
|
555 | # lfile is added to the repository again. This happens when a | |
|
556 | # largefile is converted back to a normal file: the standin | |
|
557 | # disappears, but a new (normal) file appears as the lfile. | |
|
558 |
|
|
|
559 | wvfs.exists(lfile) | |
|
560 | and repo.dirstate.normalize(lfile) not in wctx | |
|
561 | ): | |
|
562 | wvfs.unlinkpath(lfile) | |
|
563 |
|
|
|
548 | # use normallookup() to allocate an entry in largefiles | |
|
549 | # dirstate to prevent lfilesrepo.status() from reporting | |
|
550 | # missing files as removed. | |
|
551 | lfdirstate.update_file( | |
|
552 | lfile, | |
|
553 | p1_tracked=True, | |
|
554 | wc_tracked=True, | |
|
555 | possibly_dirty=True, | |
|
556 | ) | |
|
557 | update[lfile] = expecthash | |
|
558 | else: | |
|
559 | # Remove lfiles for which the standin is deleted, unless the | |
|
560 | # lfile is added to the repository again. This happens when a | |
|
561 | # largefile is converted back to a normal file: the standin | |
|
562 | # disappears, but a new (normal) file appears as the lfile. | |
|
563 | if ( | |
|
564 | wvfs.exists(lfile) | |
|
565 | and repo.dirstate.normalize(lfile) not in wctx | |
|
566 | ): | |
|
567 | wvfs.unlinkpath(lfile) | |
|
568 | removed += 1 | |
|
564 | 569 | |
|
565 | 570 | # largefile processing might be slow and be interrupted - be prepared |
|
566 | 571 | lfdirstate.write() |
@@ -570,46 +575,48 b' def updatelfiles(' | |||
|
570 | 575 | |
|
571 | 576 | for f in dropped: |
|
572 | 577 | repo.wvfs.unlinkpath(lfutil.standin(f)) |
|
573 | ||
|
574 | 578 | # This needs to happen for dropped files, otherwise they stay in |
|
575 | 579 | # the M state. |
|
576 | lfutil.synclfdirstate(repo, lfdirstate, f, normallookup) | |
|
580 | lfdirstate._drop(f) | |
|
577 | 581 | |
|
578 | 582 | statuswriter(_(b'getting changed largefiles\n')) |
|
579 | 583 | cachelfiles(ui, repo, None, lfiles) |
|
580 | 584 | |
|
581 | for lfile in lfiles: | |
|
582 | update1 = 0 | |
|
583 | ||
|
584 | expecthash = update.get(lfile) | |
|
585 | if expecthash: | |
|
586 | if not lfutil.copyfromcache(repo, expecthash, lfile): | |
|
587 | # failed ... but already removed and set to normallookup | |
|
588 | continue | |
|
589 | # Synchronize largefile dirstate to the last modified | |
|
590 | # time of the file | |
|
591 | lfdirstate.normal(lfile) | |
|
592 | update1 = 1 | |
|
585 | with lfdirstate.parentchange(): | |
|
586 | for lfile in lfiles: | |
|
587 | update1 = 0 | |
|
593 | 588 | |
|
594 | # copy the exec mode of largefile standin from the repository's | |
|
595 | # dirstate to its state in the lfdirstate. | |
|
596 | standin = lfutil.standin(lfile) | |
|
597 | if wvfs.exists(standin): | |
|
598 | # exec is decided by the users permissions using mask 0o100 | |
|
599 | standinexec = wvfs.stat(standin).st_mode & 0o100 | |
|
600 |
|
|
|
601 |
|
|
|
602 | if standinexec != mode & 0o100: | |
|
603 | # first remove all X bits, then shift all R bits to X | |
|
604 | mode &= ~0o111 | |
|
605 | if standinexec: | |
|
606 | mode |= (mode >> 2) & 0o111 & ~util.umask | |
|
607 | wvfs.chmod(lfile, mode) | |
|
589 | expecthash = update.get(lfile) | |
|
590 | if expecthash: | |
|
591 | if not lfutil.copyfromcache(repo, expecthash, lfile): | |
|
592 | # failed ... but already removed and set to normallookup | |
|
593 | continue | |
|
594 | # Synchronize largefile dirstate to the last modified | |
|
595 | # time of the file | |
|
596 | lfdirstate.update_file( | |
|
597 | lfile, p1_tracked=True, wc_tracked=True | |
|
598 | ) | |
|
608 | 599 | update1 = 1 |
|
609 | 600 | |
|
610 | updated += update1 | |
|
601 | # copy the exec mode of largefile standin from the repository's | |
|
602 | # dirstate to its state in the lfdirstate. | |
|
603 | standin = lfutil.standin(lfile) | |
|
604 | if wvfs.exists(standin): | |
|
605 | # exec is decided by the users permissions using mask 0o100 | |
|
606 | standinexec = wvfs.stat(standin).st_mode & 0o100 | |
|
607 | st = wvfs.stat(lfile) | |
|
608 | mode = st.st_mode | |
|
609 | if standinexec != mode & 0o100: | |
|
610 | # first remove all X bits, then shift all R bits to X | |
|
611 | mode &= ~0o111 | |
|
612 | if standinexec: | |
|
613 | mode |= (mode >> 2) & 0o111 & ~util.umask | |
|
614 | wvfs.chmod(lfile, mode) | |
|
615 | update1 = 1 | |
|
611 | 616 | |
|
612 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) | |
|
617 | updated += update1 | |
|
618 | ||
|
619 | lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) | |
|
613 | 620 | |
|
614 | 621 | lfdirstate.write() |
|
615 | 622 | if lfiles: |
@@ -15,10 +15,7 b' import os' | |||
|
15 | 15 | import stat |
|
16 | 16 | |
|
17 | 17 | from mercurial.i18n import _ |
|
18 |
from mercurial.node import |
|
|
19 | hex, | |
|
20 | nullid, | |
|
21 | ) | |
|
18 | from mercurial.node import hex | |
|
22 | 19 | from mercurial.pycompat import open |
|
23 | 20 | |
|
24 | 21 | from mercurial import ( |
@@ -28,6 +25,7 b' from mercurial import (' | |||
|
28 | 25 | httpconnection, |
|
29 | 26 | match as matchmod, |
|
30 | 27 | pycompat, |
|
28 | requirements, | |
|
31 | 29 | scmutil, |
|
32 | 30 | sparse, |
|
33 | 31 | util, |
@@ -164,7 +162,15 b' class largefilesdirstate(dirstate.dirsta' | |||
|
164 | 162 | def __getitem__(self, key): |
|
165 | 163 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) |
|
166 | 164 | |
|
167 |
def |
|
|
165 | def set_tracked(self, f): | |
|
166 | return super(largefilesdirstate, self).set_tracked(unixpath(f)) | |
|
167 | ||
|
168 | def set_untracked(self, f): | |
|
169 | return super(largefilesdirstate, self).set_untracked(unixpath(f)) | |
|
170 | ||
|
171 | def normal(self, f, parentfiledata=None): | |
|
172 | # not sure if we should pass the `parentfiledata` down or throw it | |
|
173 | # away. So throwing it away to stay on the safe side. | |
|
168 | 174 | return super(largefilesdirstate, self).normal(unixpath(f)) |
|
169 | 175 | |
|
170 | 176 | def remove(self, f): |
@@ -200,6 +206,7 b' def openlfdirstate(ui, repo, create=True' | |||
|
200 | 206 | vfs = repo.vfs |
|
201 | 207 | lfstoredir = longname |
|
202 | 208 | opener = vfsmod.vfs(vfs.join(lfstoredir)) |
|
209 | use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements | |
|
203 | 210 | lfdirstate = largefilesdirstate( |
|
204 | 211 | opener, |
|
205 | 212 | ui, |
@@ -207,6 +214,7 b' def openlfdirstate(ui, repo, create=True' | |||
|
207 | 214 | repo.dirstate._validate, |
|
208 | 215 | lambda: sparse.matcher(repo), |
|
209 | 216 | repo.nodeconstants, |
|
217 | use_dirstate_v2, | |
|
210 | 218 | ) |
|
211 | 219 | |
|
212 | 220 | # If the largefiles dirstate does not exist, populate and create |
@@ -221,9 +229,12 b' def openlfdirstate(ui, repo, create=True' | |||
|
221 | 229 | if len(standins) > 0: |
|
222 | 230 | vfs.makedirs(lfstoredir) |
|
223 | 231 | |
|
224 | for standin in standins: | |
|
225 |
|
|
|
226 | lfdirstate.normallookup(lfile) | |
|
232 | with lfdirstate.parentchange(): | |
|
233 | for standin in standins: | |
|
234 | lfile = splitstandin(standin) | |
|
235 | lfdirstate.update_file( | |
|
236 | lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True | |
|
237 | ) | |
|
227 | 238 | return lfdirstate |
|
228 | 239 | |
|
229 | 240 | |
@@ -243,7 +254,7 b' def lfdirstatestatus(lfdirstate, repo):' | |||
|
243 | 254 | modified.append(lfile) |
|
244 | 255 | else: |
|
245 | 256 | clean.append(lfile) |
|
246 |
lfdirstate. |
|
|
257 | lfdirstate.set_clean(lfile) | |
|
247 | 258 | return s |
|
248 | 259 | |
|
249 | 260 | |
@@ -544,46 +555,49 b' def getstandinsstate(repo):' | |||
|
544 | 555 | |
|
545 | 556 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): |
|
546 | 557 | lfstandin = standin(lfile) |
|
547 | if lfstandin in repo.dirstate: | |
|
548 | stat = repo.dirstate._map[lfstandin] | |
|
549 | state, mtime = stat[0], stat[3] | |
|
558 | if lfstandin not in repo.dirstate: | |
|
559 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False) | |
|
550 | 560 | else: |
|
551 | state, mtime = b'?', -1 | |
|
552 | if state == b'n': | |
|
553 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): | |
|
554 | # state 'n' doesn't ensure 'clean' in this case | |
|
555 | lfdirstate.normallookup(lfile) | |
|
556 | else: | |
|
557 | lfdirstate.normal(lfile) | |
|
558 | elif state == b'm': | |
|
559 | lfdirstate.normallookup(lfile) | |
|
560 | elif state == b'r': | |
|
561 | lfdirstate.remove(lfile) | |
|
562 | elif state == b'a': | |
|
563 | lfdirstate.add(lfile) | |
|
564 | elif state == b'?': | |
|
565 | lfdirstate.drop(lfile) | |
|
561 | stat = repo.dirstate._map[lfstandin] | |
|
562 | state, mtime = stat.state, stat.mtime | |
|
563 | if state == b'n': | |
|
564 | if normallookup or mtime < 0 or not repo.wvfs.exists(lfile): | |
|
565 | # state 'n' doesn't ensure 'clean' in this case | |
|
566 | lfdirstate.update_file( | |
|
567 | lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True | |
|
568 | ) | |
|
569 | else: | |
|
570 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True) | |
|
571 | elif state == b'm': | |
|
572 | lfdirstate.update_file( | |
|
573 | lfile, p1_tracked=True, wc_tracked=True, merged=True | |
|
574 | ) | |
|
575 | elif state == b'r': | |
|
576 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False) | |
|
577 | elif state == b'a': | |
|
578 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True) | |
|
566 | 579 | |
|
567 | 580 | |
|
568 | 581 | def markcommitted(orig, ctx, node): |
|
569 | 582 | repo = ctx.repo() |
|
570 | 583 | |
|
571 | orig(node) | |
|
584 | lfdirstate = openlfdirstate(repo.ui, repo) | |
|
585 | with lfdirstate.parentchange(): | |
|
586 | orig(node) | |
|
572 | 587 | |
|
573 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" | |
|
574 | # because files coming from the 2nd parent are omitted in the latter. | |
|
575 | # | |
|
576 | # The former should be used to get targets of "synclfdirstate", | |
|
577 | # because such files: | |
|
578 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and | |
|
579 | # - have to be marked as "n" after commit, but | |
|
580 | # - aren't listed in "repo[node].files()" | |
|
588 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" | |
|
589 | # because files coming from the 2nd parent are omitted in the latter. | |
|
590 | # | |
|
591 | # The former should be used to get targets of "synclfdirstate", | |
|
592 | # because such files: | |
|
593 | # - are marked as "a" by "patch.patch()" (e.g. via transplant), and | |
|
594 | # - have to be marked as "n" after commit, but | |
|
595 | # - aren't listed in "repo[node].files()" | |
|
581 | 596 | |
|
582 | lfdirstate = openlfdirstate(repo.ui, repo) | |
|
583 | for f in ctx.files(): | |
|
584 | lfile = splitstandin(f) | |
|
585 | if lfile is not None: | |
|
586 | synclfdirstate(repo, lfdirstate, lfile, False) | |
|
597 | for f in ctx.files(): | |
|
598 | lfile = splitstandin(f) | |
|
599 | if lfile is not None: | |
|
600 | synclfdirstate(repo, lfdirstate, lfile, False) | |
|
587 | 601 | lfdirstate.write() |
|
588 | 602 | |
|
589 | 603 | # As part of committing, copy all of the largefiles into the cache. |
@@ -613,7 +627,7 b' def getlfilestoupload(repo, missing, add' | |||
|
613 | 627 | ) as progress: |
|
614 | 628 | for i, n in enumerate(missing): |
|
615 | 629 | progress.update(i) |
|
616 | parents = [p for p in repo[n].parents() if p != nullid] | |
|
630 | parents = [p for p in repo[n].parents() if p != repo.nullid] | |
|
617 | 631 | |
|
618 | 632 | with lfstatus(repo, value=False): |
|
619 | 633 | ctx = repo[n] |
@@ -150,10 +150,7 b' def addlargefiles(ui, repo, isaddremove,' | |||
|
150 | 150 | executable=lfutil.getexecutable(repo.wjoin(f)), |
|
151 | 151 | ) |
|
152 | 152 | standins.append(standinname) |
|
153 |
|
|
|
154 | lfdirstate.normallookup(f) | |
|
155 | else: | |
|
156 | lfdirstate.add(f) | |
|
153 | lfdirstate.set_tracked(f) | |
|
157 | 154 | lfdirstate.write() |
|
158 | 155 | bad += [ |
|
159 | 156 | lfutil.splitstandin(f) |
@@ -230,9 +227,7 b' def removelargefiles(ui, repo, isaddremo' | |||
|
230 | 227 | repo[None].forget(remove) |
|
231 | 228 | |
|
232 | 229 | for f in remove: |
|
233 | lfutil.synclfdirstate( | |
|
234 | repo, lfdirstate, lfutil.splitstandin(f), False | |
|
235 | ) | |
|
230 | lfdirstate.set_untracked(lfutil.splitstandin(f)) | |
|
236 | 231 | |
|
237 | 232 | lfdirstate.write() |
|
238 | 233 | |
@@ -653,12 +648,17 b' def overridecalculateupdates(' | |||
|
653 | 648 | def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata): |
|
654 | 649 | if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions: |
|
655 | 650 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
656 | for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]: | |
|
657 | # this should be executed before 'orig', to execute 'remove' | |
|
658 | # before all other actions | |
|
659 | repo.dirstate.remove(lfile) | |
|
660 | # make sure lfile doesn't get synclfdirstate'd as normal | |
|
661 | lfdirstate.add(lfile) | |
|
651 | with lfdirstate.parentchange(): | |
|
652 | for lfile, args, msg in actions[ | |
|
653 | MERGE_ACTION_LARGEFILE_MARK_REMOVED | |
|
654 | ]: | |
|
655 | # this should be executed before 'orig', to execute 'remove' | |
|
656 | # before all other actions | |
|
657 | repo.dirstate.update_file( | |
|
658 | lfile, p1_tracked=True, wc_tracked=False | |
|
659 | ) | |
|
660 | # make sure lfile doesn't get synclfdirstate'd as normal | |
|
661 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True) | |
|
662 | 662 | lfdirstate.write() |
|
663 | 663 | |
|
664 | 664 | return orig(repo, actions, branchmerge, getfiledata) |
@@ -859,11 +859,11 b' def overridecopy(orig, ui, repo, pats, o' | |||
|
859 | 859 | # The file is gone, but this deletes any empty parent |
|
860 | 860 | # directories as a side-effect. |
|
861 | 861 | repo.wvfs.unlinkpath(srclfile, ignoremissing=True) |
|
862 |
lfdirstate. |
|
|
862 | lfdirstate.set_untracked(srclfile) | |
|
863 | 863 | else: |
|
864 | 864 | util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile)) |
|
865 | 865 | |
|
866 |
lfdirstate. |
|
|
866 | lfdirstate.set_tracked(destlfile) | |
|
867 | 867 | lfdirstate.write() |
|
868 | 868 | except error.Abort as e: |
|
869 | 869 | if e.message != _(b'no files to copy'): |
@@ -1382,10 +1382,7 b' def cmdutilforget(' | |||
|
1382 | 1382 | with repo.wlock(): |
|
1383 | 1383 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1384 | 1384 | for f in forget: |
|
1385 |
|
|
|
1386 | lfdirstate.drop(f) | |
|
1387 | else: | |
|
1388 | lfdirstate.remove(f) | |
|
1385 | lfdirstate.set_untracked(f) | |
|
1389 | 1386 | lfdirstate.write() |
|
1390 | 1387 | standins = [lfutil.standin(f) for f in forget] |
|
1391 | 1388 | for f in standins: |
@@ -1636,13 +1633,16 b' def overriderollback(orig, ui, repo, **o' | |||
|
1636 | 1633 | repo.wvfs.unlinkpath(standin, ignoremissing=True) |
|
1637 | 1634 | |
|
1638 | 1635 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
1639 | orphans = set(lfdirstate) | |
|
1640 | lfiles = lfutil.listlfiles(repo) | |
|
1641 | for file in lfiles: | |
|
1642 | lfutil.synclfdirstate(repo, lfdirstate, file, True) | |
|
1643 | orphans.discard(file) | |
|
1644 | for lfile in orphans: | |
|
1645 | lfdirstate.drop(lfile) | |
|
1636 | with lfdirstate.parentchange(): | |
|
1637 | orphans = set(lfdirstate) | |
|
1638 | lfiles = lfutil.listlfiles(repo) | |
|
1639 | for file in lfiles: | |
|
1640 | lfutil.synclfdirstate(repo, lfdirstate, file, True) | |
|
1641 | orphans.discard(file) | |
|
1642 | for lfile in orphans: | |
|
1643 | lfdirstate.update_file( | |
|
1644 | lfile, p1_tracked=False, wc_tracked=False | |
|
1645 | ) | |
|
1646 | 1646 | lfdirstate.write() |
|
1647 | 1647 | return result |
|
1648 | 1648 | |
@@ -1787,7 +1787,9 b' def mergeupdate(orig, repo, node, branch' | |||
|
1787 | 1787 | # mark all clean largefiles as dirty, just in case the update gets |
|
1788 | 1788 | # interrupted before largefiles and lfdirstate are synchronized |
|
1789 | 1789 | for lfile in oldclean: |
|
1790 |
lfdirstate. |
|
|
1790 | entry = lfdirstate._map.get(lfile) | |
|
1791 | assert not (entry.merged_removed or entry.from_p2_removed) | |
|
1792 | lfdirstate.set_possibly_dirty(lfile) | |
|
1791 | 1793 | lfdirstate.write() |
|
1792 | 1794 | |
|
1793 | 1795 | oldstandins = lfutil.getstandinsstate(repo) |
@@ -1798,23 +1800,24 b' def mergeupdate(orig, repo, node, branch' | |||
|
1798 | 1800 | raise error.ProgrammingError( |
|
1799 | 1801 | b'largefiles is not compatible with in-memory merge' |
|
1800 | 1802 | ) |
|
1801 | result = orig(repo, node, branchmerge, force, *args, **kwargs) | |
|
1803 | with lfdirstate.parentchange(): | |
|
1804 | result = orig(repo, node, branchmerge, force, *args, **kwargs) | |
|
1802 | 1805 | |
|
1803 | newstandins = lfutil.getstandinsstate(repo) | |
|
1804 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) | |
|
1806 | newstandins = lfutil.getstandinsstate(repo) | |
|
1807 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) | |
|
1805 | 1808 | |
|
1806 | # to avoid leaving all largefiles as dirty and thus rehash them, mark | |
|
1807 | # all the ones that didn't change as clean | |
|
1808 | for lfile in oldclean.difference(filelist): | |
|
1809 | lfdirstate.normal(lfile) | |
|
1810 | lfdirstate.write() | |
|
1809 | # to avoid leaving all largefiles as dirty and thus rehash them, mark | |
|
1810 | # all the ones that didn't change as clean | |
|
1811 | for lfile in oldclean.difference(filelist): | |
|
1812 | lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True) | |
|
1813 | lfdirstate.write() | |
|
1811 | 1814 | |
|
1812 | if branchmerge or force or partial: | |
|
1813 | filelist.extend(s.deleted + s.removed) | |
|
1815 | if branchmerge or force or partial: | |
|
1816 | filelist.extend(s.deleted + s.removed) | |
|
1814 | 1817 | |
|
1815 | lfcommands.updatelfiles( | |
|
1816 | repo.ui, repo, filelist=filelist, normallookup=partial | |
|
1817 | ) | |
|
1818 | lfcommands.updatelfiles( | |
|
1819 | repo.ui, repo, filelist=filelist, normallookup=partial | |
|
1820 | ) | |
|
1818 | 1821 | |
|
1819 | 1822 | return result |
|
1820 | 1823 |
@@ -222,7 +222,7 b' def reposetup(ui, repo):' | |||
|
222 | 222 | else: |
|
223 | 223 | if listclean: |
|
224 | 224 | clean.append(lfile) |
|
225 |
lfdirstate. |
|
|
225 | lfdirstate.set_clean(lfile) | |
|
226 | 226 | else: |
|
227 | 227 | tocheck = unsure + modified + added + clean |
|
228 | 228 | modified, added, clean = [], [], [] |
@@ -10,7 +10,7 b' from __future__ import absolute_import' | |||
|
10 | 10 | import hashlib |
|
11 | 11 | |
|
12 | 12 | from mercurial.i18n import _ |
|
13 |
from mercurial.node import bin, hex, |
|
|
13 | from mercurial.node import bin, hex, short | |
|
14 | 14 | from mercurial.pycompat import ( |
|
15 | 15 | getattr, |
|
16 | 16 | setattr, |
@@ -158,7 +158,7 b' def _islfs(rlog, node=None, rev=None):' | |||
|
158 | 158 | rev = rlog.rev(node) |
|
159 | 159 | else: |
|
160 | 160 | node = rlog.node(rev) |
|
161 | if node == nullid: | |
|
161 | if node == rlog.nullid: | |
|
162 | 162 | return False |
|
163 | 163 | flags = rlog.flags(rev) |
|
164 | 164 | return bool(flags & revlog.REVIDX_EXTSTORED) |
@@ -73,7 +73,6 b' from mercurial.i18n import _' | |||
|
73 | 73 | from mercurial.node import ( |
|
74 | 74 | bin, |
|
75 | 75 | hex, |
|
76 | nullid, | |
|
77 | 76 | nullrev, |
|
78 | 77 | short, |
|
79 | 78 | ) |
@@ -908,13 +907,13 b' class queue(object):' | |||
|
908 | 907 | """ |
|
909 | 908 | if rev is None: |
|
910 | 909 | (p1, p2) = repo.dirstate.parents() |
|
911 | if p2 == nullid: | |
|
910 | if p2 == repo.nullid: | |
|
912 | 911 | return p1 |
|
913 | 912 | if not self.applied: |
|
914 | 913 | return None |
|
915 | 914 | return self.applied[-1].node |
|
916 | 915 | p1, p2 = repo.changelog.parents(rev) |
|
917 | if p2 != nullid and p2 in [x.node for x in self.applied]: | |
|
916 | if p2 != repo.nullid and p2 in [x.node for x in self.applied]: | |
|
918 | 917 | return p2 |
|
919 | 918 | return p1 |
|
920 | 919 | |
@@ -1091,18 +1090,9 b' class queue(object):' | |||
|
1091 | 1090 | |
|
1092 | 1091 | if merge and files: |
|
1093 | 1092 | # Mark as removed/merged and update dirstate parent info |
|
1094 | removed = [] | |
|
1095 | merged = [] | |
|
1096 | for f in files: | |
|
1097 | if os.path.lexists(repo.wjoin(f)): | |
|
1098 | merged.append(f) | |
|
1099 | else: | |
|
1100 | removed.append(f) | |
|
1101 | 1093 | with repo.dirstate.parentchange(): |
|
1102 |
for f in |
|
|
1103 |
repo.dirstate. |
|
|
1104 | for f in merged: | |
|
1105 | repo.dirstate.merge(f) | |
|
1094 | for f in files: | |
|
1095 | repo.dirstate.update_file_p1(f, p1_tracked=True) | |
|
1106 | 1096 | p1 = repo.dirstate.p1() |
|
1107 | 1097 | repo.setparents(p1, merge) |
|
1108 | 1098 | |
@@ -1591,7 +1581,7 b' class queue(object):' | |||
|
1591 | 1581 | for hs in repo.branchmap().iterheads(): |
|
1592 | 1582 | heads.extend(hs) |
|
1593 | 1583 | if not heads: |
|
1594 | heads = [nullid] | |
|
1584 | heads = [repo.nullid] | |
|
1595 | 1585 | if repo.dirstate.p1() not in heads and not exact: |
|
1596 | 1586 | self.ui.status(_(b"(working directory not at a head)\n")) |
|
1597 | 1587 | |
@@ -1852,12 +1842,16 b' class queue(object):' | |||
|
1852 | 1842 | with repo.dirstate.parentchange(): |
|
1853 | 1843 | for f in a: |
|
1854 | 1844 | repo.wvfs.unlinkpath(f, ignoremissing=True) |
|
1855 |
repo.dirstate. |
|
|
1845 | repo.dirstate.update_file( | |
|
1846 | f, p1_tracked=False, wc_tracked=False | |
|
1847 | ) | |
|
1856 | 1848 | for f in m + r: |
|
1857 | 1849 | fctx = ctx[f] |
|
1858 | 1850 | repo.wwrite(f, fctx.data(), fctx.flags()) |
|
1859 |
repo.dirstate. |
|
|
1860 | repo.setparents(qp, nullid) | |
|
1851 | repo.dirstate.update_file( | |
|
1852 | f, p1_tracked=True, wc_tracked=True | |
|
1853 | ) | |
|
1854 | repo.setparents(qp, repo.nullid) | |
|
1861 | 1855 | for patch in reversed(self.applied[start:end]): |
|
1862 | 1856 | self.ui.status(_(b"popping %s\n") % patch.name) |
|
1863 | 1857 | del self.applied[start:end] |
@@ -2003,67 +1997,73 b' class queue(object):' | |||
|
2003 | 1997 | |
|
2004 | 1998 | bmlist = repo[top].bookmarks() |
|
2005 | 1999 | |
|
2006 | dsguard = None | |
|
2007 | try: | |
|
2008 | dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh') | |
|
2009 | if diffopts.git or diffopts.upgrade: | |
|
2010 | copies = {} | |
|
2011 |
f |
|
|
2012 |
s |
|
|
2013 | # during qfold, the source file for copies may | |
|
2014 | # be removed. Treat this as a simple add. | |
|
2015 | if src is not None and src in repo.dirstate: | |
|
2016 | copies.setdefault(src, []).append(dst) | |
|
2017 |
repo.dirstate |
|
|
2018 | # remember the copies between patchparent and qtip | |
|
2019 | for dst in aaa: | |
|
2020 | src = ctx[dst].copysource() | |
|
2021 | if src: | |
|
2022 | copies.setdefault(src, []).extend( | |
|
2023 | copies.get(dst, []) | |
|
2000 | with repo.dirstate.parentchange(): | |
|
2001 | # XXX do we actually need the dirstateguard | |
|
2002 | dsguard = None | |
|
2003 | try: | |
|
2004 | dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh') | |
|
2005 | if diffopts.git or diffopts.upgrade: | |
|
2006 | copies = {} | |
|
2007 | for dst in a: | |
|
2008 | src = repo.dirstate.copied(dst) | |
|
2009 | # during qfold, the source file for copies may | |
|
2010 | # be removed. Treat this as a simple add. | |
|
2011 | if src is not None and src in repo.dirstate: | |
|
2012 | copies.setdefault(src, []).append(dst) | |
|
2013 | repo.dirstate.update_file( | |
|
2014 | dst, p1_tracked=False, wc_tracked=True | |
|
2024 | 2015 | ) |
|
2025 | if dst in a: | |
|
2026 | copies[src].append(dst) | |
|
2027 | # we can't copy a file created by the patch itself | |
|
2028 |
if |
|
|
2029 |
del |
|
|
2030 | for src, dsts in pycompat.iteritems(copies): | |
|
2031 |
|
|
|
2032 |
|
|
|
2033 | else: | |
|
2034 | for dst in a: | |
|
2035 |
|
|
|
2036 |
|
|
|
2037 |
for |
|
|
2038 |
|
|
|
2039 | for f in r: | |
|
2040 |
|
|
|
2041 | # if the patch excludes a modified file, mark that | |
|
2042 | # file with mtime=0 so status can see it. | |
|
2043 | mm = [] | |
|
2044 | for i in pycompat.xrange(len(m) - 1, -1, -1): | |
|
2045 | if not match1(m[i]): | |
|
2046 | mm.append(m[i]) | |
|
2047 |
|
|
|
2048 |
for f in |
|
|
2049 |
repo.dirstate. |
|
|
2050 | for f in mm: | |
|
2051 | repo.dirstate.normallookup(f) | |
|
2052 |
|
|
|
2053 | repo.dirstate.drop(f) | |
|
2054 | ||
|
2055 | user = ph.user or ctx.user() | |
|
2056 | ||
|
2057 | oldphase = repo[top].phase() | |
|
2058 | ||
|
2059 | # assumes strip can roll itself back if interrupted | |
|
2060 | repo.setparents(*cparents) | |
|
2061 | self.applied.pop() | |
|
2062 | self.applieddirty = True | |
|
2063 | strip(self.ui, repo, [top], update=False, backup=False) | |
|
2064 | dsguard.close() | |
|
2065 | finally: | |
|
2066 | release(dsguard) | |
|
2016 | # remember the copies between patchparent and qtip | |
|
2017 | for dst in aaa: | |
|
2018 | src = ctx[dst].copysource() | |
|
2019 | if src: | |
|
2020 | copies.setdefault(src, []).extend( | |
|
2021 | copies.get(dst, []) | |
|
2022 | ) | |
|
2023 | if dst in a: | |
|
2024 | copies[src].append(dst) | |
|
2025 | # we can't copy a file created by the patch itself | |
|
2026 | if dst in copies: | |
|
2027 | del copies[dst] | |
|
2028 | for src, dsts in pycompat.iteritems(copies): | |
|
2029 | for dst in dsts: | |
|
2030 | repo.dirstate.copy(src, dst) | |
|
2031 | else: | |
|
2032 | for dst in a: | |
|
2033 | repo.dirstate.update_file( | |
|
2034 | dst, p1_tracked=False, wc_tracked=True | |
|
2035 | ) | |
|
2036 | # Drop useless copy information | |
|
2037 | for f in list(repo.dirstate.copies()): | |
|
2038 | repo.dirstate.copy(None, f) | |
|
2039 | for f in r: | |
|
2040 | repo.dirstate.update_file_p1(f, p1_tracked=True) | |
|
2041 | # if the patch excludes a modified file, mark that | |
|
2042 | # file with mtime=0 so status can see it. | |
|
2043 | mm = [] | |
|
2044 | for i in pycompat.xrange(len(m) - 1, -1, -1): | |
|
2045 | if not match1(m[i]): | |
|
2046 | mm.append(m[i]) | |
|
2047 | del m[i] | |
|
2048 | for f in m: | |
|
2049 | repo.dirstate.update_file_p1(f, p1_tracked=True) | |
|
2050 | for f in mm: | |
|
2051 | repo.dirstate.update_file_p1(f, p1_tracked=True) | |
|
2052 | for f in forget: | |
|
2053 | repo.dirstate.update_file_p1(f, p1_tracked=False) | |
|
2054 | ||
|
2055 | user = ph.user or ctx.user() | |
|
2056 | ||
|
2057 | oldphase = repo[top].phase() | |
|
2058 | ||
|
2059 | # assumes strip can roll itself back if interrupted | |
|
2060 | repo.setparents(*cparents) | |
|
2061 | self.applied.pop() | |
|
2062 | self.applieddirty = True | |
|
2063 | strip(self.ui, repo, [top], update=False, backup=False) | |
|
2064 | dsguard.close() | |
|
2065 | finally: | |
|
2066 | release(dsguard) | |
|
2067 | 2067 | |
|
2068 | 2068 | try: |
|
2069 | 2069 | # might be nice to attempt to roll back strip after this |
@@ -3639,8 +3639,8 b' def rename(ui, repo, patch, name=None, *' | |||
|
3639 | 3639 | wctx = r[None] |
|
3640 | 3640 | with r.wlock(): |
|
3641 | 3641 | if r.dirstate[patch] == b'a': |
|
3642 |
r.dirstate.d |
|
|
3643 |
r.dirstate. |
|
|
3642 | r.dirstate.set_untracked(patch) | |
|
3643 | r.dirstate.set_tracked(name) | |
|
3644 | 3644 | else: |
|
3645 | 3645 | wctx.copy(patch, name) |
|
3646 | 3646 | wctx.forget([patch]) |
@@ -11,7 +11,6 b' import errno' | |||
|
11 | 11 | import struct |
|
12 | 12 | |
|
13 | 13 | from mercurial.i18n import _ |
|
14 | from mercurial.node import nullid | |
|
15 | 14 | from mercurial import ( |
|
16 | 15 | bundle2, |
|
17 | 16 | changegroup, |
@@ -94,7 +93,7 b' def generateellipsesbundle2(' | |||
|
94 | 93 | raise error.Abort(_(b'depth must be positive, got %d') % depth) |
|
95 | 94 | |
|
96 | 95 | heads = set(heads or repo.heads()) |
|
97 | common = set(common or [nullid]) | |
|
96 | common = set(common or [repo.nullid]) | |
|
98 | 97 | |
|
99 | 98 | visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis( |
|
100 | 99 | repo, common, heads, set(), match, depth=depth |
@@ -128,7 +127,7 b' def generate_ellipses_bundle2_for_wideni' | |||
|
128 | 127 | common, |
|
129 | 128 | known, |
|
130 | 129 | ): |
|
131 | common = set(common or [nullid]) | |
|
130 | common = set(common or [repo.nullid]) | |
|
132 | 131 | # Steps: |
|
133 | 132 | # 1. Send kill for "$known & ::common" |
|
134 | 133 | # |
@@ -282,10 +281,10 b' def handlechangegroup_widen(op, inpart):' | |||
|
282 | 281 | try: |
|
283 | 282 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) |
|
284 | 283 | # silence internal shuffling chatter |
|
285 | override = {(b'ui', b'quiet'): True} | |
|
286 | if ui.verbose: | |
|
287 | override = {} | |
|
288 | with ui.configoverride(override): | |
|
284 | maybe_silent = ( | |
|
285 | ui.silent() if not ui.verbose else util.nullcontextmanager() | |
|
286 | ) | |
|
287 | with maybe_silent: | |
|
289 | 288 | if isinstance(gen, bundle2.unbundle20): |
|
290 | 289 | with repo.transaction(b'strip') as tr: |
|
291 | 290 | bundle2.processbundle(repo, gen, lambda: tr) |
@@ -12,7 +12,6 b' import os' | |||
|
12 | 12 | from mercurial.i18n import _ |
|
13 | 13 | from mercurial.node import ( |
|
14 | 14 | hex, |
|
15 | nullid, | |
|
16 | 15 | short, |
|
17 | 16 | ) |
|
18 | 17 | from mercurial import ( |
@@ -193,7 +192,7 b' def pullbundle2extraprepare(orig, pullop' | |||
|
193 | 192 | kwargs[b'known'] = [ |
|
194 | 193 | hex(ctx.node()) |
|
195 | 194 | for ctx in repo.set(b'::%ln', pullop.common) |
|
196 | if ctx.node() != nullid | |
|
195 | if ctx.node() != repo.nullid | |
|
197 | 196 | ] |
|
198 | 197 | if not kwargs[b'known']: |
|
199 | 198 | # Mercurial serializes an empty list as '' and deserializes it as |
@@ -228,10 +227,17 b' def _narrow(' | |||
|
228 | 227 | unfi = repo.unfiltered() |
|
229 | 228 | outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) |
|
230 | 229 | ui.status(_(b'looking for local changes to affected paths\n')) |
|
230 | progress = ui.makeprogress( | |
|
231 | topic=_(b'changesets'), | |
|
232 | unit=_(b'changesets'), | |
|
233 | total=len(outgoing.missing) + len(outgoing.excluded), | |
|
234 | ) | |
|
231 | 235 | localnodes = [] |
|
232 | for n in itertools.chain(outgoing.missing, outgoing.excluded): | |
|
233 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): | |
|
234 |
|
|
|
236 | with progress: | |
|
237 | for n in itertools.chain(outgoing.missing, outgoing.excluded): | |
|
238 | progress.increment() | |
|
239 | if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): | |
|
240 | localnodes.append(n) | |
|
235 | 241 | revstostrip = unfi.revs(b'descendants(%ln)', localnodes) |
|
236 | 242 | hiddenrevs = repoview.filterrevs(repo, b'visible') |
|
237 | 243 | visibletostrip = list( |
@@ -275,6 +281,10 b' def _narrow(' | |||
|
275 | 281 | ) |
|
276 | 282 | hg.clean(repo, urev) |
|
277 | 283 | overrides = {(b'devel', b'strip-obsmarkers'): False} |
|
284 | if backup: | |
|
285 | ui.status(_(b'moving unwanted changesets to backup\n')) | |
|
286 | else: | |
|
287 | ui.status(_(b'deleting unwanted changesets\n')) | |
|
278 | 288 | with ui.configoverride(overrides, b'narrow'): |
|
279 | 289 | repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) |
|
280 | 290 | |
@@ -310,8 +320,10 b' def _narrow(' | |||
|
310 | 320 | util.unlinkpath(repo.svfs.join(f)) |
|
311 | 321 | repo.store.markremoved(f) |
|
312 | 322 | |
|
313 | narrowspec.updateworkingcopy(repo, assumeclean=True) | |
|
314 | narrowspec.copytoworkingcopy(repo) | |
|
323 | ui.status(_(b'deleting unwanted files from working copy\n')) | |
|
324 | with repo.dirstate.parentchange(): | |
|
325 | narrowspec.updateworkingcopy(repo, assumeclean=True) | |
|
326 | narrowspec.copytoworkingcopy(repo) | |
|
315 | 327 | |
|
316 | 328 | repo.destroyed() |
|
317 | 329 | |
@@ -370,7 +382,7 b' def _widen(' | |||
|
370 | 382 | ds = repo.dirstate |
|
371 | 383 | p1, p2 = ds.p1(), ds.p2() |
|
372 | 384 | with ds.parentchange(): |
|
373 | ds.setparents(nullid, nullid) | |
|
385 | ds.setparents(repo.nullid, repo.nullid) | |
|
374 | 386 | if isoldellipses: |
|
375 | 387 | with wrappedextraprepare: |
|
376 | 388 | exchange.pull(repo, remote, heads=common) |
@@ -380,7 +392,7 b' def _widen(' | |||
|
380 | 392 | known = [ |
|
381 | 393 | ctx.node() |
|
382 | 394 | for ctx in repo.set(b'::%ln', common) |
|
383 | if ctx.node() != nullid | |
|
395 | if ctx.node() != repo.nullid | |
|
384 | 396 | ] |
|
385 | 397 | with remote.commandexecutor() as e: |
|
386 | 398 | bundle = e.callcommand( |
@@ -411,7 +423,7 b' def _widen(' | |||
|
411 | 423 | with ds.parentchange(): |
|
412 | 424 | ds.setparents(p1, p2) |
|
413 | 425 | |
|
414 | with repo.transaction(b'widening'): | |
|
426 | with repo.transaction(b'widening'), repo.dirstate.parentchange(): | |
|
415 | 427 | repo.setnewnarrowpats() |
|
416 | 428 | narrowspec.updateworkingcopy(repo) |
|
417 | 429 | narrowspec.copytoworkingcopy(repo) |
@@ -578,7 +590,9 b' def trackedcmd(ui, repo, remotepath=None' | |||
|
578 | 590 | return 0 |
|
579 | 591 | |
|
580 | 592 | if update_working_copy: |
|
581 |
with repo.wlock(), repo.lock(), repo.transaction( |
|
|
593 | with repo.wlock(), repo.lock(), repo.transaction( | |
|
594 | b'narrow-wc' | |
|
595 | ), repo.dirstate.parentchange(): | |
|
582 | 596 | narrowspec.updateworkingcopy(repo) |
|
583 | 597 | narrowspec.copytoworkingcopy(repo) |
|
584 | 598 | return 0 |
@@ -38,6 +38,14 b' def wrapdirstate(repo, dirstate):' | |||
|
38 | 38 | return super(narrowdirstate, self).normal(*args, **kwargs) |
|
39 | 39 | |
|
40 | 40 | @_editfunc |
|
41 | def set_tracked(self, *args): | |
|
42 | return super(narrowdirstate, self).set_tracked(*args) | |
|
43 | ||
|
44 | @_editfunc | |
|
45 | def set_untracked(self, *args): | |
|
46 | return super(narrowdirstate, self).set_untracked(*args) | |
|
47 | ||
|
48 | @_editfunc | |
|
41 | 49 | def add(self, *args): |
|
42 | 50 | return super(narrowdirstate, self).add(*args) |
|
43 | 51 |
@@ -69,7 +69,7 b' import operator' | |||
|
69 | 69 | import re |
|
70 | 70 | import time |
|
71 | 71 | |
|
72 |
from mercurial.node import bin, |
|
|
72 | from mercurial.node import bin, short | |
|
73 | 73 | from mercurial.i18n import _ |
|
74 | 74 | from mercurial.pycompat import getattr |
|
75 | 75 | from mercurial.thirdparty import attr |
@@ -586,7 +586,7 b' def getoldnodedrevmap(repo, nodelist):' | |||
|
586 | 586 | tags.tag( |
|
587 | 587 | repo, |
|
588 | 588 | tagname, |
|
589 | nullid, | |
|
589 | repo.nullid, | |
|
590 | 590 | message=None, |
|
591 | 591 | user=None, |
|
592 | 592 | date=None, |
@@ -1606,7 +1606,7 b' def phabsend(ui, repo, *revs, **opts):' | |||
|
1606 | 1606 | tags.tag( |
|
1607 | 1607 | repo, |
|
1608 | 1608 | tagname, |
|
1609 | nullid, | |
|
1609 | repo.nullid, | |
|
1610 | 1610 | message=None, |
|
1611 | 1611 | user=None, |
|
1612 | 1612 | date=None, |
@@ -25,8 +25,15 b'' | |||
|
25 | 25 | '''command to delete untracked files from the working directory (DEPRECATED) |
|
26 | 26 | |
|
27 | 27 | The functionality of this extension has been included in core Mercurial since |
|
28 |
version 5.7. Please use :hg:`purge ...` instead. :hg:`purge --confirm` is now |
|
|
28 | version 5.7. Please use :hg:`purge ...` instead. :hg:`purge --confirm` is now | |
|
29 | the default, unless the extension is enabled for backward compatibility. | |
|
29 | 30 | ''' |
|
30 | 31 | |
|
31 | 32 | # This empty extension looks pointless, but core mercurial checks if it's loaded |
|
32 | 33 | # to implement the slightly different behavior documented above. |
|
34 | ||
|
35 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
|
36 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
|
37 | # be specifying the version(s) of Mercurial they are tested with, or | |
|
38 | # leave the attribute unspecified. | |
|
39 | testedwith = b'ships-with-hg-core' |
@@ -190,18 +190,18 b' class rebaseruntime(object):' | |||
|
190 | 190 | self.destmap = {} |
|
191 | 191 | self.skipped = set() |
|
192 | 192 | |
|
193 |
self.collapsef = opts.get( |
|
|
194 | self.collapsemsg = cmdutil.logmessage(ui, opts) | |
|
195 |
self.date = opts.get( |
|
|
193 | self.collapsef = opts.get('collapse', False) | |
|
194 | self.collapsemsg = cmdutil.logmessage(ui, pycompat.byteskwargs(opts)) | |
|
195 | self.date = opts.get('date', None) | |
|
196 | 196 | |
|
197 |
e = opts.get( |
|
|
197 | e = opts.get('extrafn') # internal, used by e.g. hgsubversion | |
|
198 | 198 | self.extrafns = [_savegraft] |
|
199 | 199 | if e: |
|
200 | 200 | self.extrafns = [e] |
|
201 | 201 | |
|
202 | 202 | self.backupf = ui.configbool(b'rewrite', b'backup-bundle') |
|
203 |
self.keepf = opts.get( |
|
|
204 |
self.keepbranchesf = opts.get( |
|
|
203 | self.keepf = opts.get('keep', False) | |
|
204 | self.keepbranchesf = opts.get('keepbranches', False) | |
|
205 | 205 | self.skipemptysuccessorf = rewriteutil.skip_empty_successor( |
|
206 | 206 | repo.ui, b'rebase' |
|
207 | 207 | ) |
@@ -446,8 +446,15 b' class rebaseruntime(object):' | |||
|
446 | 446 | rebaseset = set(destmap.keys()) |
|
447 | 447 | rebaseset -= set(self.obsolete_with_successor_in_destination) |
|
448 | 448 | rebaseset -= self.obsolete_with_successor_in_rebase_set |
|
449 | # We have our own divergence-checking in the rebase extension | |
|
450 | overrides = {} | |
|
451 | if obsolete.isenabled(self.repo, obsolete.createmarkersopt): | |
|
452 | overrides = { | |
|
453 | (b'experimental', b'evolution.allowdivergence'): b'true' | |
|
454 | } | |
|
449 | 455 | try: |
|
450 | rewriteutil.precheck(self.repo, rebaseset, action=b'rebase') | |
|
456 | with self.ui.configoverride(overrides): | |
|
457 | rewriteutil.precheck(self.repo, rebaseset, action=b'rebase') | |
|
451 | 458 | except error.Abort as e: |
|
452 | 459 | if e.hint is None: |
|
453 | 460 | e.hint = _(b'use --keep to keep original changesets') |
@@ -623,7 +630,7 b' class rebaseruntime(object):' | |||
|
623 | 630 | repo.ui.debug(b'resuming interrupted rebase\n') |
|
624 | 631 | self.resume = False |
|
625 | 632 | else: |
|
626 |
overrides = {(b'ui', b'forcemerge'): opts.get( |
|
|
633 | overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')} | |
|
627 | 634 | with ui.configoverride(overrides, b'rebase'): |
|
628 | 635 | try: |
|
629 | 636 | rebasenode( |
@@ -670,9 +677,7 b' class rebaseruntime(object):' | |||
|
670 | 677 | if not self.collapsef: |
|
671 | 678 | merging = p2 != nullrev |
|
672 | 679 | editform = cmdutil.mergeeditform(merging, b'rebase') |
|
673 | editor = cmdutil.getcommiteditor( | |
|
674 | editform=editform, **pycompat.strkwargs(opts) | |
|
675 | ) | |
|
680 | editor = cmdutil.getcommiteditor(editform=editform, **opts) | |
|
676 | 681 | # We need to set parents again here just in case we're continuing |
|
677 | 682 | # a rebase started with an old hg version (before 9c9cfecd4600), |
|
678 | 683 | # because those old versions would have left us with two dirstate |
@@ -720,7 +725,7 b' class rebaseruntime(object):' | |||
|
720 | 725 | |
|
721 | 726 | def _finishrebase(self): |
|
722 | 727 | repo, ui, opts = self.repo, self.ui, self.opts |
|
723 | fm = ui.formatter(b'rebase', opts) | |
|
728 | fm = ui.formatter(b'rebase', pycompat.byteskwargs(opts)) | |
|
724 | 729 | fm.startitem() |
|
725 | 730 | if self.collapsef: |
|
726 | 731 | p1, p2, _base = defineparents( |
@@ -731,7 +736,7 b' class rebaseruntime(object):' | |||
|
731 | 736 | self.skipped, |
|
732 | 737 | self.obsolete_with_successor_in_destination, |
|
733 | 738 | ) |
|
734 |
editopt = opts.get( |
|
|
739 | editopt = opts.get('edit') | |
|
735 | 740 | editform = b'rebase.collapse' |
|
736 | 741 | if self.collapsemsg: |
|
737 | 742 | commitmsg = self.collapsemsg |
@@ -755,7 +760,7 b' class rebaseruntime(object):' | |||
|
755 | 760 | self.state[oldrev] = newrev |
|
756 | 761 | |
|
757 | 762 | if b'qtip' in repo.tags(): |
|
758 |
updatemq(repo, self.state, self.skipped, ** |
|
|
763 | updatemq(repo, self.state, self.skipped, **opts) | |
|
759 | 764 | |
|
760 | 765 | # restore original working directory |
|
761 | 766 | # (we do this before stripping) |
@@ -1056,18 +1061,17 b' def rebase(ui, repo, **opts):' | |||
|
1056 | 1061 | unresolved conflicts. |
|
1057 | 1062 | |
|
1058 | 1063 | """ |
|
1059 | opts = pycompat.byteskwargs(opts) | |
|
1060 | 1064 | inmemory = ui.configbool(b'rebase', b'experimental.inmemory') |
|
1061 |
action = cmdutil.check_at_most_one_arg(opts, |
|
|
1065 | action = cmdutil.check_at_most_one_arg(opts, 'abort', 'stop', 'continue') | |
|
1062 | 1066 | if action: |
|
1063 | 1067 | cmdutil.check_incompatible_arguments( |
|
1064 |
opts, action, [ |
|
|
1068 | opts, action, ['confirm', 'dry_run'] | |
|
1065 | 1069 | ) |
|
1066 | 1070 | cmdutil.check_incompatible_arguments( |
|
1067 |
opts, action, [ |
|
|
1071 | opts, action, ['rev', 'source', 'base', 'dest'] | |
|
1068 | 1072 | ) |
|
1069 |
cmdutil.check_at_most_one_arg(opts, |
|
|
1070 |
cmdutil.check_at_most_one_arg(opts, |
|
|
1073 | cmdutil.check_at_most_one_arg(opts, 'confirm', 'dry_run') | |
|
1074 | cmdutil.check_at_most_one_arg(opts, 'rev', 'source', 'base') | |
|
1071 | 1075 | |
|
1072 | 1076 | if action or repo.currenttransaction() is not None: |
|
1073 | 1077 | # in-memory rebase is not compatible with resuming rebases. |
@@ -1075,19 +1079,19 b' def rebase(ui, repo, **opts):' | |||
|
1075 | 1079 | # fail the entire transaction.) |
|
1076 | 1080 | inmemory = False |
|
1077 | 1081 | |
|
1078 |
if opts.get( |
|
|
1079 |
disallowed_opts = set(opts) - { |
|
|
1082 | if opts.get('auto_orphans'): | |
|
1083 | disallowed_opts = set(opts) - {'auto_orphans'} | |
|
1080 | 1084 | cmdutil.check_incompatible_arguments( |
|
1081 |
opts, |
|
|
1085 | opts, 'auto_orphans', disallowed_opts | |
|
1082 | 1086 | ) |
|
1083 | 1087 | |
|
1084 |
userrevs = list(repo.revs(opts.get( |
|
|
1085 |
opts[ |
|
|
1086 |
opts[ |
|
|
1088 | userrevs = list(repo.revs(opts.get('auto_orphans'))) | |
|
1089 | opts['rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)] | |
|
1090 | opts['dest'] = b'_destautoorphanrebase(SRC)' | |
|
1087 | 1091 | |
|
1088 |
if opts.get( |
|
|
1092 | if opts.get('dry_run') or opts.get('confirm'): | |
|
1089 | 1093 | return _dryrunrebase(ui, repo, action, opts) |
|
1090 |
elif action == |
|
|
1094 | elif action == 'stop': | |
|
1091 | 1095 | rbsrt = rebaseruntime(repo, ui) |
|
1092 | 1096 | with repo.wlock(), repo.lock(): |
|
1093 | 1097 | rbsrt.restorestatus() |
@@ -1136,7 +1140,7 b' def rebase(ui, repo, **opts):' | |||
|
1136 | 1140 | |
|
1137 | 1141 | def _dryrunrebase(ui, repo, action, opts): |
|
1138 | 1142 | rbsrt = rebaseruntime(repo, ui, inmemory=True, dryrun=True, opts=opts) |
|
1139 |
confirm = opts.get( |
|
|
1143 | confirm = opts.get('confirm') | |
|
1140 | 1144 | if confirm: |
|
1141 | 1145 | ui.status(_(b'starting in-memory rebase\n')) |
|
1142 | 1146 | else: |
@@ -1193,7 +1197,7 b' def _dryrunrebase(ui, repo, action, opts' | |||
|
1193 | 1197 | isabort=True, |
|
1194 | 1198 | backup=False, |
|
1195 | 1199 | suppwarns=True, |
|
1196 |
dryrun=opts.get( |
|
|
1200 | dryrun=opts.get('dry_run'), | |
|
1197 | 1201 | ) |
|
1198 | 1202 | |
|
1199 | 1203 | |
@@ -1203,9 +1207,9 b' def _dorebase(ui, repo, action, opts, in' | |||
|
1203 | 1207 | |
|
1204 | 1208 | |
|
1205 | 1209 | def _origrebase(ui, repo, action, opts, rbsrt): |
|
1206 |
assert action != |
|
|
1210 | assert action != 'stop' | |
|
1207 | 1211 | with repo.wlock(), repo.lock(): |
|
1208 |
if opts.get( |
|
|
1212 | if opts.get('interactive'): | |
|
1209 | 1213 | try: |
|
1210 | 1214 | if extensions.find(b'histedit'): |
|
1211 | 1215 | enablehistedit = b'' |
@@ -1231,29 +1235,27 b' def _origrebase(ui, repo, action, opts, ' | |||
|
1231 | 1235 | raise error.InputError( |
|
1232 | 1236 | _(b'cannot use collapse with continue or abort') |
|
1233 | 1237 | ) |
|
1234 |
if action == |
|
|
1238 | if action == 'abort' and opts.get('tool', False): | |
|
1235 | 1239 | ui.warn(_(b'tool option will be ignored\n')) |
|
1236 |
if action == |
|
|
1240 | if action == 'continue': | |
|
1237 | 1241 | ms = mergestatemod.mergestate.read(repo) |
|
1238 | 1242 | mergeutil.checkunresolved(ms) |
|
1239 | 1243 | |
|
1240 | retcode = rbsrt._prepareabortorcontinue( | |
|
1241 | isabort=(action == b'abort') | |
|
1242 | ) | |
|
1244 | retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort')) | |
|
1243 | 1245 | if retcode is not None: |
|
1244 | 1246 | return retcode |
|
1245 | 1247 | else: |
|
1246 | 1248 | # search default destination in this space |
|
1247 | 1249 | # used in the 'hg pull --rebase' case, see issue 5214. |
|
1248 |
destspace = opts.get( |
|
|
1250 | destspace = opts.get('_destspace') | |
|
1249 | 1251 | destmap = _definedestmap( |
|
1250 | 1252 | ui, |
|
1251 | 1253 | repo, |
|
1252 | 1254 | rbsrt.inmemory, |
|
1253 |
opts.get( |
|
|
1254 |
opts.get( |
|
|
1255 |
opts.get( |
|
|
1256 |
opts.get( |
|
|
1255 | opts.get('dest', None), | |
|
1256 | opts.get('source', []), | |
|
1257 | opts.get('base', []), | |
|
1258 | opts.get('rev', []), | |
|
1257 | 1259 | destspace=destspace, |
|
1258 | 1260 | ) |
|
1259 | 1261 | retcode = rbsrt._preparenewrebase(destmap) |
@@ -308,7 +308,7 b' class basestore(object):' | |||
|
308 | 308 | # Content matches the intended path |
|
309 | 309 | return True |
|
310 | 310 | return False |
|
311 |
except (ValueError, |
|
|
311 | except (ValueError, shallowutil.BadRemotefilelogHeader): | |
|
312 | 312 | pass |
|
313 | 313 | |
|
314 | 314 | return False |
@@ -2,7 +2,10 b' from __future__ import absolute_import' | |||
|
2 | 2 | |
|
3 | 3 | import threading |
|
4 | 4 | |
|
5 |
from mercurial.node import |
|
|
5 | from mercurial.node import ( | |
|
6 | hex, | |
|
7 | sha1nodeconstants, | |
|
8 | ) | |
|
6 | 9 | from mercurial.pycompat import getattr |
|
7 | 10 | from mercurial import ( |
|
8 | 11 | mdiff, |
@@ -55,7 +58,7 b' class unioncontentstore(basestore.baseun' | |||
|
55 | 58 | """ |
|
56 | 59 | chain = self.getdeltachain(name, node) |
|
57 | 60 | |
|
58 | if chain[-1][ChainIndicies.BASENODE] != nullid: | |
|
61 | if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid: | |
|
59 | 62 | # If we didn't receive a full chain, throw |
|
60 | 63 | raise KeyError((name, hex(node))) |
|
61 | 64 | |
@@ -92,7 +95,7 b' class unioncontentstore(basestore.baseun' | |||
|
92 | 95 | deltabasenode. |
|
93 | 96 | """ |
|
94 | 97 | chain = self._getpartialchain(name, node) |
|
95 | while chain[-1][ChainIndicies.BASENODE] != nullid: | |
|
98 | while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid: | |
|
96 | 99 | x, x, deltabasename, deltabasenode, x = chain[-1] |
|
97 | 100 | try: |
|
98 | 101 | morechain = self._getpartialchain(deltabasename, deltabasenode) |
@@ -187,7 +190,12 b' class remotefilelogcontentstore(basestor' | |||
|
187 | 190 | # Since remotefilelog content stores only contain full texts, just |
|
188 | 191 | # return that. |
|
189 | 192 | revision = self.get(name, node) |
|
190 | return revision, name, nullid, self.getmeta(name, node) | |
|
193 | return ( | |
|
194 | revision, | |
|
195 | name, | |
|
196 | sha1nodeconstants.nullid, | |
|
197 | self.getmeta(name, node), | |
|
198 | ) | |
|
191 | 199 | |
|
192 | 200 | def getdeltachain(self, name, node): |
|
193 | 201 | # Since remotefilelog content stores just contain full texts, we return |
@@ -195,7 +203,7 b' class remotefilelogcontentstore(basestor' | |||
|
195 | 203 | # The nullid in the deltabasenode slot indicates that the revision is a |
|
196 | 204 | # fulltext. |
|
197 | 205 | revision = self.get(name, node) |
|
198 | return [(name, node, None, nullid, revision)] | |
|
206 | return [(name, node, None, sha1nodeconstants.nullid, revision)] | |
|
199 | 207 | |
|
200 | 208 | def getmeta(self, name, node): |
|
201 | 209 | self._sanitizemetacache() |
@@ -237,7 +245,12 b' class remotecontentstore(object):' | |||
|
237 | 245 | |
|
238 | 246 | def getdelta(self, name, node): |
|
239 | 247 | revision = self.get(name, node) |
|
240 | return revision, name, nullid, self._shared.getmeta(name, node) | |
|
248 | return ( | |
|
249 | revision, | |
|
250 | name, | |
|
251 | sha1nodeconstants.nullid, | |
|
252 | self._shared.getmeta(name, node), | |
|
253 | ) | |
|
241 | 254 | |
|
242 | 255 | def getdeltachain(self, name, node): |
|
243 | 256 | # Since our remote content stores just contain full texts, we return a |
@@ -245,7 +258,7 b' class remotecontentstore(object):' | |||
|
245 | 258 | # The nullid in the deltabasenode slot indicates that the revision is a |
|
246 | 259 | # fulltext. |
|
247 | 260 | revision = self.get(name, node) |
|
248 | return [(name, node, None, nullid, revision)] | |
|
261 | return [(name, node, None, sha1nodeconstants.nullid, revision)] | |
|
249 | 262 | |
|
250 | 263 | def getmeta(self, name, node): |
|
251 | 264 | self._fileservice.prefetch( |
@@ -268,7 +281,7 b' class manifestrevlogstore(object):' | |||
|
268 | 281 | self._store = repo.store |
|
269 | 282 | self._svfs = repo.svfs |
|
270 | 283 | self._revlogs = dict() |
|
271 | self._cl = revlog.revlog(self._svfs, b'00changelog.i') | |
|
284 | self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i') | |
|
272 | 285 | self._repackstartlinkrev = 0 |
|
273 | 286 | |
|
274 | 287 | def get(self, name, node): |
@@ -276,11 +289,11 b' class manifestrevlogstore(object):' | |||
|
276 | 289 | |
|
277 | 290 | def getdelta(self, name, node): |
|
278 | 291 | revision = self.get(name, node) |
|
279 | return revision, name, nullid, self.getmeta(name, node) | |
|
292 | return revision, name, self._cl.nullid, self.getmeta(name, node) | |
|
280 | 293 | |
|
281 | 294 | def getdeltachain(self, name, node): |
|
282 | 295 | revision = self.get(name, node) |
|
283 | return [(name, node, None, nullid, revision)] | |
|
296 | return [(name, node, None, self._cl.nullid, revision)] | |
|
284 | 297 | |
|
285 | 298 | def getmeta(self, name, node): |
|
286 | 299 | rl = self._revlog(name) |
@@ -304,9 +317,9 b' class manifestrevlogstore(object):' | |||
|
304 | 317 | missing.discard(ancnode) |
|
305 | 318 | |
|
306 | 319 | p1, p2 = rl.parents(ancnode) |
|
307 | if p1 != nullid and p1 not in known: | |
|
320 | if p1 != self._cl.nullid and p1 not in known: | |
|
308 | 321 | missing.add(p1) |
|
309 | if p2 != nullid and p2 not in known: | |
|
322 | if p2 != self._cl.nullid and p2 not in known: | |
|
310 | 323 | missing.add(p2) |
|
311 | 324 | |
|
312 | 325 | linknode = self._cl.node(rl.linkrev(ancrev)) |
@@ -328,10 +341,10 b' class manifestrevlogstore(object):' | |||
|
328 | 341 | def _revlog(self, name): |
|
329 | 342 | rl = self._revlogs.get(name) |
|
330 | 343 | if rl is None: |
|
331 |
revlogname = b'00manifesttree |
|
|
344 | revlogname = b'00manifesttree' | |
|
332 | 345 | if name != b'': |
|
333 |
revlogname = b'meta/%s/00manifest |
|
|
334 | rl = revlog.revlog(self._svfs, revlogname) | |
|
346 | revlogname = b'meta/%s/00manifest' % name | |
|
347 | rl = revlog.revlog(self._svfs, radix=revlogname) | |
|
335 | 348 | self._revlogs[name] = rl |
|
336 | 349 | return rl |
|
337 | 350 | |
@@ -352,7 +365,7 b' class manifestrevlogstore(object):' | |||
|
352 | 365 | if options and options.get(constants.OPTION_PACKSONLY): |
|
353 | 366 | return |
|
354 | 367 | treename = b'' |
|
355 |
rl = revlog.revlog(self._svfs, b'00manifesttree |
|
|
368 | rl = revlog.revlog(self._svfs, radix=b'00manifesttree') | |
|
356 | 369 | startlinkrev = self._repackstartlinkrev |
|
357 | 370 | endlinkrev = self._repackendlinkrev |
|
358 | 371 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): |
@@ -369,9 +382,9 b' class manifestrevlogstore(object):' | |||
|
369 | 382 | if path[:5] != b'meta/' or path[-2:] != b'.i': |
|
370 | 383 | continue |
|
371 | 384 | |
|
372 |
treename = path[5 : -len(b'/00manifest |
|
|
385 | treename = path[5 : -len(b'/00manifest')] | |
|
373 | 386 | |
|
374 | rl = revlog.revlog(self._svfs, path) | |
|
387 | rl = revlog.revlog(self._svfs, indexfile=path[:-2]) | |
|
375 | 388 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): |
|
376 | 389 | linkrev = rl.linkrev(rev) |
|
377 | 390 | if linkrev < startlinkrev: |
@@ -3,7 +3,10 b' from __future__ import absolute_import' | |||
|
3 | 3 | import struct |
|
4 | 4 | import zlib |
|
5 | 5 | |
|
6 |
from mercurial.node import |
|
|
6 | from mercurial.node import ( | |
|
7 | hex, | |
|
8 | sha1nodeconstants, | |
|
9 | ) | |
|
7 | 10 | from mercurial.i18n import _ |
|
8 | 11 | from mercurial import ( |
|
9 | 12 | pycompat, |
@@ -458,7 +461,7 b' class mutabledatapack(basepack.mutableba' | |||
|
458 | 461 | rawindex = b'' |
|
459 | 462 | fmt = self.INDEXFORMAT |
|
460 | 463 | for node, deltabase, offset, size in entries: |
|
461 | if deltabase == nullid: | |
|
464 | if deltabase == sha1nodeconstants.nullid: | |
|
462 | 465 | deltabaselocation = FULLTEXTINDEXMARK |
|
463 | 466 | else: |
|
464 | 467 | # Instead of storing the deltabase node in the index, let's |
@@ -12,7 +12,7 b' import zlib' | |||
|
12 | 12 | from mercurial.node import ( |
|
13 | 13 | bin, |
|
14 | 14 | hex, |
|
15 | nullid, | |
|
15 | sha1nodeconstants, | |
|
16 | 16 | short, |
|
17 | 17 | ) |
|
18 | 18 | from mercurial.i18n import _ |
@@ -57,9 +57,9 b' def debugremotefilelog(ui, path, **opts)' | |||
|
57 | 57 | _(b"%s => %s %s %s %s\n") |
|
58 | 58 | % (short(node), short(p1), short(p2), short(linknode), copyfrom) |
|
59 | 59 | ) |
|
60 | if p1 != nullid: | |
|
60 | if p1 != sha1nodeconstants.nullid: | |
|
61 | 61 | queue.append(p1) |
|
62 | if p2 != nullid: | |
|
62 | if p2 != sha1nodeconstants.nullid: | |
|
63 | 63 | queue.append(p2) |
|
64 | 64 | |
|
65 | 65 | |
@@ -152,7 +152,7 b' def debugindex(orig, ui, repo, file_=Non' | |||
|
152 | 152 | try: |
|
153 | 153 | pp = r.parents(node) |
|
154 | 154 | except Exception: |
|
155 | pp = [nullid, nullid] | |
|
155 | pp = [repo.nullid, repo.nullid] | |
|
156 | 156 | ui.write( |
|
157 | 157 | b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n" |
|
158 | 158 | % ( |
@@ -197,7 +197,7 b' def debugindexdot(orig, ui, repo, file_)' | |||
|
197 | 197 | node = r.node(i) |
|
198 | 198 | pp = r.parents(node) |
|
199 | 199 | ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
200 | if pp[1] != nullid: | |
|
200 | if pp[1] != repo.nullid: | |
|
201 | 201 | ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
202 | 202 | ui.write(b"}\n") |
|
203 | 203 | |
@@ -212,7 +212,7 b' def verifyremotefilelog(ui, path, **opts' | |||
|
212 | 212 | filepath = os.path.join(root, file) |
|
213 | 213 | size, firstnode, mapping = parsefileblob(filepath, decompress) |
|
214 | 214 | for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping): |
|
215 | if linknode == nullid: | |
|
215 | if linknode == sha1nodeconstants.nullid: | |
|
216 | 216 | actualpath = os.path.relpath(root, path) |
|
217 | 217 | key = fileserverclient.getcachekey( |
|
218 | 218 | b"reponame", actualpath, file |
@@ -371,7 +371,7 b' def _sanitycheck(ui, nodes, bases):' | |||
|
371 | 371 | current = node |
|
372 | 372 | deltabase = bases[current] |
|
373 | 373 | |
|
374 | while deltabase != nullid: | |
|
374 | while deltabase != sha1nodeconstants.nullid: | |
|
375 | 375 | if deltabase not in nodes: |
|
376 | 376 | ui.warn( |
|
377 | 377 | ( |
@@ -397,7 +397,7 b' def _sanitycheck(ui, nodes, bases):' | |||
|
397 | 397 | deltabase = bases[current] |
|
398 | 398 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid |
|
399 | 399 | # so we don't traverse it again. |
|
400 | bases[node] = nullid | |
|
400 | bases[node] = sha1nodeconstants.nullid | |
|
401 | 401 | return failures |
|
402 | 402 | |
|
403 | 403 |
@@ -14,7 +14,7 b' import time' | |||
|
14 | 14 | import zlib |
|
15 | 15 | |
|
16 | 16 | from mercurial.i18n import _ |
|
17 |
from mercurial.node import bin, hex |
|
|
17 | from mercurial.node import bin, hex | |
|
18 | 18 | from mercurial import ( |
|
19 | 19 | error, |
|
20 | 20 | pycompat, |
@@ -272,7 +272,7 b' def _getfiles_optimistic(' | |||
|
272 | 272 | def _getfiles_threaded( |
|
273 | 273 | remote, receivemissing, progresstick, missed, idmap, step |
|
274 | 274 | ): |
|
275 | remote._callstream(b"getfiles") | |
|
275 | remote._callstream(b"x_rfl_getfiles") | |
|
276 | 276 | pipeo = remote._pipeo |
|
277 | 277 | pipei = remote._pipei |
|
278 | 278 | |
@@ -599,9 +599,13 b' class fileserverclient(object):' | |||
|
599 | 599 | |
|
600 | 600 | # partition missing nodes into nullid and not-nullid so we can |
|
601 | 601 | # warn about this filtering potentially shadowing bugs. |
|
602 | nullids = len([None for unused, id in missingids if id == nullid]) | |
|
602 | nullids = len( | |
|
603 | [None for unused, id in missingids if id == self.repo.nullid] | |
|
604 | ) | |
|
603 | 605 | if nullids: |
|
604 | missingids = [(f, id) for f, id in missingids if id != nullid] | |
|
606 | missingids = [ | |
|
607 | (f, id) for f, id in missingids if id != self.repo.nullid | |
|
608 | ] | |
|
605 | 609 | repo.ui.develwarn( |
|
606 | 610 | ( |
|
607 | 611 | b'remotefilelog not fetching %d null revs' |
@@ -2,7 +2,10 b' from __future__ import absolute_import' | |||
|
2 | 2 | |
|
3 | 3 | import struct |
|
4 | 4 | |
|
5 |
from mercurial.node import |
|
|
5 | from mercurial.node import ( | |
|
6 | hex, | |
|
7 | sha1nodeconstants, | |
|
8 | ) | |
|
6 | 9 | from mercurial import ( |
|
7 | 10 | pycompat, |
|
8 | 11 | util, |
@@ -147,9 +150,9 b' class historypack(basepack.basepack):' | |||
|
147 | 150 | pending.remove(ancnode) |
|
148 | 151 | p1node = entry[ANC_P1NODE] |
|
149 | 152 | p2node = entry[ANC_P2NODE] |
|
150 | if p1node != nullid and p1node not in known: | |
|
153 | if p1node != sha1nodeconstants.nullid and p1node not in known: | |
|
151 | 154 | pending.add(p1node) |
|
152 | if p2node != nullid and p2node not in known: | |
|
155 | if p2node != sha1nodeconstants.nullid and p2node not in known: | |
|
153 | 156 | pending.add(p2node) |
|
154 | 157 | |
|
155 | 158 | yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom) |
@@ -457,9 +460,9 b' class mutablehistorypack(basepack.mutabl' | |||
|
457 | 460 | def parentfunc(node): |
|
458 | 461 | x, p1, p2, x, x, x = entrymap[node] |
|
459 | 462 | parents = [] |
|
460 | if p1 != nullid: | |
|
463 | if p1 != sha1nodeconstants.nullid: | |
|
461 | 464 | parents.append(p1) |
|
462 | if p2 != nullid: | |
|
465 | if p2 != sha1nodeconstants.nullid: | |
|
463 | 466 | parents.append(p2) |
|
464 | 467 | return parents |
|
465 | 468 |
@@ -1,6 +1,9 b'' | |||
|
1 | 1 | from __future__ import absolute_import |
|
2 | 2 | |
|
3 |
from mercurial.node import |
|
|
3 | from mercurial.node import ( | |
|
4 | hex, | |
|
5 | sha1nodeconstants, | |
|
6 | ) | |
|
4 | 7 | from . import ( |
|
5 | 8 | basestore, |
|
6 | 9 | shallowutil, |
@@ -51,9 +54,9 b' class unionmetadatastore(basestore.baseu' | |||
|
51 | 54 | missing.append((name, node)) |
|
52 | 55 | continue |
|
53 | 56 | p1, p2, linknode, copyfrom = value |
|
54 | if p1 != nullid and p1 not in known: | |
|
57 | if p1 != sha1nodeconstants.nullid and p1 not in known: | |
|
55 | 58 | queue.append((copyfrom or curname, p1)) |
|
56 | if p2 != nullid and p2 not in known: | |
|
59 | if p2 != sha1nodeconstants.nullid and p2 not in known: | |
|
57 | 60 | queue.append((curname, p2)) |
|
58 | 61 | return missing |
|
59 | 62 |
@@ -9,7 +9,7 b' from __future__ import absolute_import' | |||
|
9 | 9 | import collections |
|
10 | 10 | import time |
|
11 | 11 | |
|
12 |
from mercurial.node import bin, hex, |
|
|
12 | from mercurial.node import bin, hex, nullrev | |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | ancestor, |
|
15 | 15 | context, |
@@ -35,7 +35,7 b' class remotefilectx(context.filectx):' | |||
|
35 | 35 | ancestormap=None, |
|
36 | 36 | ): |
|
37 | 37 | if fileid == nullrev: |
|
38 | fileid = nullid | |
|
38 | fileid = repo.nullid | |
|
39 | 39 | if fileid and len(fileid) == 40: |
|
40 | 40 | fileid = bin(fileid) |
|
41 | 41 | super(remotefilectx, self).__init__( |
@@ -78,7 +78,7 b' class remotefilectx(context.filectx):' | |||
|
78 | 78 | |
|
79 | 79 | @propertycache |
|
80 | 80 | def _linkrev(self): |
|
81 | if self._filenode == nullid: | |
|
81 | if self._filenode == self._repo.nullid: | |
|
82 | 82 | return nullrev |
|
83 | 83 | |
|
84 | 84 | ancestormap = self.ancestormap() |
@@ -174,7 +174,7 b' class remotefilectx(context.filectx):' | |||
|
174 | 174 | |
|
175 | 175 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] |
|
176 | 176 | results = [] |
|
177 | if p1 != nullid: | |
|
177 | if p1 != repo.nullid: | |
|
178 | 178 | path = copyfrom or self._path |
|
179 | 179 | flog = repo.file(path) |
|
180 | 180 | p1ctx = remotefilectx( |
@@ -183,7 +183,7 b' class remotefilectx(context.filectx):' | |||
|
183 | 183 | p1ctx._descendantrev = self.rev() |
|
184 | 184 | results.append(p1ctx) |
|
185 | 185 | |
|
186 | if p2 != nullid: | |
|
186 | if p2 != repo.nullid: | |
|
187 | 187 | path = self._path |
|
188 | 188 | flog = repo.file(path) |
|
189 | 189 | p2ctx = remotefilectx( |
@@ -504,25 +504,25 b' class remoteworkingfilectx(context.worki' | |||
|
504 | 504 | if renamed: |
|
505 | 505 | p1 = renamed |
|
506 | 506 | else: |
|
507 | p1 = (path, pcl[0]._manifest.get(path, nullid)) | |
|
507 | p1 = (path, pcl[0]._manifest.get(path, self._repo.nullid)) | |
|
508 | 508 | |
|
509 | p2 = (path, nullid) | |
|
509 | p2 = (path, self._repo.nullid) | |
|
510 | 510 | if len(pcl) > 1: |
|
511 | p2 = (path, pcl[1]._manifest.get(path, nullid)) | |
|
511 | p2 = (path, pcl[1]._manifest.get(path, self._repo.nullid)) | |
|
512 | 512 | |
|
513 | 513 | m = {} |
|
514 | if p1[1] != nullid: | |
|
514 | if p1[1] != self._repo.nullid: | |
|
515 | 515 | p1ctx = self._repo.filectx(p1[0], fileid=p1[1]) |
|
516 | 516 | m.update(p1ctx.filelog().ancestormap(p1[1])) |
|
517 | 517 | |
|
518 | if p2[1] != nullid: | |
|
518 | if p2[1] != self._repo.nullid: | |
|
519 | 519 | p2ctx = self._repo.filectx(p2[0], fileid=p2[1]) |
|
520 | 520 | m.update(p2ctx.filelog().ancestormap(p2[1])) |
|
521 | 521 | |
|
522 | 522 | copyfrom = b'' |
|
523 | 523 | if renamed: |
|
524 | 524 | copyfrom = renamed[0] |
|
525 | m[None] = (p1[1], p2[1], nullid, copyfrom) | |
|
525 | m[None] = (p1[1], p2[1], self._repo.nullid, copyfrom) | |
|
526 | 526 | self._ancestormap = m |
|
527 | 527 | |
|
528 | 528 | return self._ancestormap |
@@ -10,12 +10,7 b' from __future__ import absolute_import' | |||
|
10 | 10 | import collections |
|
11 | 11 | import os |
|
12 | 12 | |
|
13 |
from mercurial.node import |
|
|
14 | bin, | |
|
15 | nullid, | |
|
16 | wdirfilenodeids, | |
|
17 | wdirid, | |
|
18 | ) | |
|
13 | from mercurial.node import bin | |
|
19 | 14 | from mercurial.i18n import _ |
|
20 | 15 | from mercurial import ( |
|
21 | 16 | ancestor, |
@@ -100,7 +95,7 b' class remotefilelog(object):' | |||
|
100 | 95 | |
|
101 | 96 | pancestors = {} |
|
102 | 97 | queue = [] |
|
103 | if realp1 != nullid: | |
|
98 | if realp1 != self.repo.nullid: | |
|
104 | 99 | p1flog = self |
|
105 | 100 | if copyfrom: |
|
106 | 101 | p1flog = remotefilelog(self.opener, copyfrom, self.repo) |
@@ -108,7 +103,7 b' class remotefilelog(object):' | |||
|
108 | 103 | pancestors.update(p1flog.ancestormap(realp1)) |
|
109 | 104 | queue.append(realp1) |
|
110 | 105 | visited.add(realp1) |
|
111 | if p2 != nullid: | |
|
106 | if p2 != self.repo.nullid: | |
|
112 | 107 | pancestors.update(self.ancestormap(p2)) |
|
113 | 108 | queue.append(p2) |
|
114 | 109 | visited.add(p2) |
@@ -129,10 +124,10 b' class remotefilelog(object):' | |||
|
129 | 124 | pacopyfrom, |
|
130 | 125 | ) |
|
131 | 126 | |
|
132 | if pa1 != nullid and pa1 not in visited: | |
|
127 | if pa1 != self.repo.nullid and pa1 not in visited: | |
|
133 | 128 | queue.append(pa1) |
|
134 | 129 | visited.add(pa1) |
|
135 | if pa2 != nullid and pa2 not in visited: | |
|
130 | if pa2 != self.repo.nullid and pa2 not in visited: | |
|
136 | 131 | queue.append(pa2) |
|
137 | 132 | visited.add(pa2) |
|
138 | 133 | |
@@ -238,7 +233,7 b' class remotefilelog(object):' | |||
|
238 | 233 | returns True if text is different than what is stored. |
|
239 | 234 | """ |
|
240 | 235 | |
|
241 | if node == nullid: | |
|
236 | if node == self.repo.nullid: | |
|
242 | 237 | return True |
|
243 | 238 | |
|
244 | 239 | nodetext = self.read(node) |
@@ -275,13 +270,13 b' class remotefilelog(object):' | |||
|
275 | 270 | return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) |
|
276 | 271 | |
|
277 | 272 | def parents(self, node): |
|
278 | if node == nullid: | |
|
279 | return nullid, nullid | |
|
273 | if node == self.repo.nullid: | |
|
274 | return self.repo.nullid, self.repo.nullid | |
|
280 | 275 | |
|
281 | 276 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) |
|
282 | 277 | p1, p2, linknode, copyfrom = ancestormap[node] |
|
283 | 278 | if copyfrom: |
|
284 | p1 = nullid | |
|
279 | p1 = self.repo.nullid | |
|
285 | 280 | |
|
286 | 281 | return p1, p2 |
|
287 | 282 | |
@@ -317,8 +312,8 b' class remotefilelog(object):' | |||
|
317 | 312 | if prevnode is None: |
|
318 | 313 | basenode = prevnode = p1 |
|
319 | 314 | if basenode == node: |
|
320 | basenode = nullid | |
|
321 | if basenode != nullid: | |
|
315 | basenode = self.repo.nullid | |
|
316 | if basenode != self.repo.nullid: | |
|
322 | 317 | revision = None |
|
323 | 318 | delta = self.revdiff(basenode, node) |
|
324 | 319 | else: |
@@ -336,6 +331,8 b' class remotefilelog(object):' | |||
|
336 | 331 | delta=delta, |
|
337 | 332 | # Sidedata is not supported yet |
|
338 | 333 | sidedata=None, |
|
334 | # Protocol flags are not used yet | |
|
335 | protocol_flags=0, | |
|
339 | 336 | ) |
|
340 | 337 | |
|
341 | 338 | def revdiff(self, node1, node2): |
@@ -380,13 +377,16 b' class remotefilelog(object):' | |||
|
380 | 377 | this is generally only used for bundling and communicating with vanilla |
|
381 | 378 | hg clients. |
|
382 | 379 | """ |
|
383 | if node == nullid: | |
|
380 | if node == self.repo.nullid: | |
|
384 | 381 | return b"" |
|
385 | 382 | if len(node) != 20: |
|
386 | 383 | raise error.LookupError( |
|
387 | 384 | node, self.filename, _(b'invalid revision input') |
|
388 | 385 | ) |
|
389 | if node == wdirid or node in wdirfilenodeids: | |
|
386 | if ( | |
|
387 | node == self.repo.nodeconstants.wdirid | |
|
388 | or node in self.repo.nodeconstants.wdirfilenodeids | |
|
389 | ): | |
|
390 | 390 | raise error.WdirUnsupported |
|
391 | 391 | |
|
392 | 392 | store = self.repo.contentstore |
@@ -432,8 +432,8 b' class remotefilelog(object):' | |||
|
432 | 432 | return self.repo.metadatastore.getancestors(self.filename, node) |
|
433 | 433 | |
|
434 | 434 | def ancestor(self, a, b): |
|
435 | if a == nullid or b == nullid: | |
|
436 | return nullid | |
|
435 | if a == self.repo.nullid or b == self.repo.nullid: | |
|
436 | return self.repo.nullid | |
|
437 | 437 | |
|
438 | 438 | revmap, parentfunc = self._buildrevgraph(a, b) |
|
439 | 439 | nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)} |
@@ -442,13 +442,13 b' class remotefilelog(object):' | |||
|
442 | 442 | if ancs: |
|
443 | 443 | # choose a consistent winner when there's a tie |
|
444 | 444 | return min(map(nodemap.__getitem__, ancs)) |
|
445 | return nullid | |
|
445 | return self.repo.nullid | |
|
446 | 446 | |
|
447 | 447 | def commonancestorsheads(self, a, b): |
|
448 | 448 | """calculate all the heads of the common ancestors of nodes a and b""" |
|
449 | 449 | |
|
450 | if a == nullid or b == nullid: | |
|
451 | return nullid | |
|
450 | if a == self.repo.nullid or b == self.repo.nullid: | |
|
451 | return self.repo.nullid | |
|
452 | 452 | |
|
453 | 453 | revmap, parentfunc = self._buildrevgraph(a, b) |
|
454 | 454 | nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)} |
@@ -472,10 +472,10 b' class remotefilelog(object):' | |||
|
472 | 472 | p1, p2, linknode, copyfrom = pdata |
|
473 | 473 | # Don't follow renames (copyfrom). |
|
474 | 474 | # remotefilectx.ancestor does that. |
|
475 | if p1 != nullid and not copyfrom: | |
|
475 | if p1 != self.repo.nullid and not copyfrom: | |
|
476 | 476 | parents.append(p1) |
|
477 | 477 | allparents.add(p1) |
|
478 | if p2 != nullid: | |
|
478 | if p2 != self.repo.nullid: | |
|
479 | 479 | parents.append(p2) |
|
480 | 480 | allparents.add(p2) |
|
481 | 481 |
@@ -13,7 +13,7 b' import time' | |||
|
13 | 13 | import zlib |
|
14 | 14 | |
|
15 | 15 | from mercurial.i18n import _ |
|
16 |
from mercurial.node import bin, hex |
|
|
16 | from mercurial.node import bin, hex | |
|
17 | 17 | from mercurial.pycompat import open |
|
18 | 18 | from mercurial import ( |
|
19 | 19 | changegroup, |
@@ -242,7 +242,7 b' def _loadfileblob(repo, cachepath, path,' | |||
|
242 | 242 | filecachepath = os.path.join(cachepath, path, hex(node)) |
|
243 | 243 | if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0: |
|
244 | 244 | filectx = repo.filectx(path, fileid=node) |
|
245 | if filectx.node() == nullid: | |
|
245 | if filectx.node() == repo.nullid: | |
|
246 | 246 | repo.changelog = changelog.changelog(repo.svfs) |
|
247 | 247 | filectx = repo.filectx(path, fileid=node) |
|
248 | 248 | |
@@ -284,7 +284,7 b' def getflogheads(repo, proto, path):' | |||
|
284 | 284 | """A server api for requesting a filelog's heads""" |
|
285 | 285 | flog = repo.file(path) |
|
286 | 286 | heads = flog.heads() |
|
287 | return b'\n'.join((hex(head) for head in heads if head != nullid)) | |
|
287 | return b'\n'.join((hex(head) for head in heads if head != repo.nullid)) | |
|
288 | 288 | |
|
289 | 289 | |
|
290 | 290 | def getfile(repo, proto, file, node): |
@@ -302,7 +302,7 b' def getfile(repo, proto, file, node):' | |||
|
302 | 302 | if not cachepath: |
|
303 | 303 | cachepath = os.path.join(repo.path, b"remotefilelogcache") |
|
304 | 304 | node = bin(node.strip()) |
|
305 | if node == nullid: | |
|
305 | if node == repo.nullid: | |
|
306 | 306 | return b'0\0' |
|
307 | 307 | return b'0\0' + _loadfileblob(repo, cachepath, file, node) |
|
308 | 308 | |
@@ -327,7 +327,7 b' def getfiles(repo, proto):' | |||
|
327 | 327 | break |
|
328 | 328 | |
|
329 | 329 | node = bin(request[:40]) |
|
330 | if node == nullid: | |
|
330 | if node == repo.nullid: | |
|
331 | 331 | yield b'0\n' |
|
332 | 332 | continue |
|
333 | 333 | |
@@ -380,8 +380,8 b' def createfileblob(filectx):' | |||
|
380 | 380 | ancestortext = b"" |
|
381 | 381 | for ancestorctx in ancestors: |
|
382 | 382 | parents = ancestorctx.parents() |
|
383 | p1 = nullid | |
|
384 | p2 = nullid | |
|
383 | p1 = repo.nullid | |
|
384 | p2 = repo.nullid | |
|
385 | 385 | if len(parents) > 0: |
|
386 | 386 | p1 = parents[0].filenode() |
|
387 | 387 | if len(parents) > 1: |
@@ -4,10 +4,7 b' import os' | |||
|
4 | 4 | import time |
|
5 | 5 | |
|
6 | 6 | from mercurial.i18n import _ |
|
7 |
from mercurial.node import |
|
|
8 | nullid, | |
|
9 | short, | |
|
10 | ) | |
|
7 | from mercurial.node import short | |
|
11 | 8 | from mercurial import ( |
|
12 | 9 | encoding, |
|
13 | 10 | error, |
@@ -586,7 +583,7 b' class repacker(object):' | |||
|
586 | 583 | # Create one contiguous chain and reassign deltabases. |
|
587 | 584 | for i, node in enumerate(orphans): |
|
588 | 585 | if i == 0: |
|
589 | deltabases[node] = (nullid, 0) | |
|
586 | deltabases[node] = (self.repo.nullid, 0) | |
|
590 | 587 | else: |
|
591 | 588 | parent = orphans[i - 1] |
|
592 | 589 | deltabases[node] = (parent, deltabases[parent][1] + 1) |
@@ -676,8 +673,8 b' class repacker(object):' | |||
|
676 | 673 | # of immediate child |
|
677 | 674 | deltatuple = deltabases.get(node, None) |
|
678 | 675 | if deltatuple is None: |
|
679 | deltabase, chainlen = nullid, 0 | |
|
680 | deltabases[node] = (nullid, 0) | |
|
676 | deltabase, chainlen = self.repo.nullid, 0 | |
|
677 | deltabases[node] = (self.repo.nullid, 0) | |
|
681 | 678 | nobase.add(node) |
|
682 | 679 | else: |
|
683 | 680 | deltabase, chainlen = deltatuple |
@@ -692,7 +689,7 b' class repacker(object):' | |||
|
692 | 689 | # file was copied from elsewhere. So don't attempt to do any |
|
693 | 690 | # deltas with the other file. |
|
694 | 691 | if copyfrom: |
|
695 | p1 = nullid | |
|
692 | p1 = self.repo.nullid | |
|
696 | 693 | |
|
697 | 694 | if chainlen < maxchainlen: |
|
698 | 695 | # Record this child as the delta base for its parents. |
@@ -700,9 +697,9 b' class repacker(object):' | |||
|
700 | 697 | # many children, and this will only choose the last one. |
|
701 | 698 | # TODO: record all children and try all deltas to find |
|
702 | 699 | # best |
|
703 | if p1 != nullid: | |
|
700 | if p1 != self.repo.nullid: | |
|
704 | 701 | deltabases[p1] = (node, chainlen + 1) |
|
705 | if p2 != nullid: | |
|
702 | if p2 != self.repo.nullid: | |
|
706 | 703 | deltabases[p2] = (node, chainlen + 1) |
|
707 | 704 | |
|
708 | 705 | # experimental config: repack.chainorphansbysize |
@@ -719,7 +716,7 b' class repacker(object):' | |||
|
719 | 716 | # TODO: Optimize the deltachain fetching. Since we're |
|
720 | 717 | # iterating over the different version of the file, we may |
|
721 | 718 | # be fetching the same deltachain over and over again. |
|
722 | if deltabase != nullid: | |
|
719 | if deltabase != self.repo.nullid: | |
|
723 | 720 | deltaentry = self.data.getdelta(filename, node) |
|
724 | 721 | delta, deltabasename, origdeltabase, meta = deltaentry |
|
725 | 722 | size = meta.get(constants.METAKEYSIZE) |
@@ -791,9 +788,9 b' class repacker(object):' | |||
|
791 | 788 | # If copyfrom == filename, it means the copy history |
|
792 | 789 | # went to come other file, then came back to this one, so we |
|
793 | 790 | # should continue processing it. |
|
794 | if p1 != nullid and copyfrom != filename: | |
|
791 | if p1 != self.repo.nullid and copyfrom != filename: | |
|
795 | 792 | dontprocess.add(p1) |
|
796 | if p2 != nullid: | |
|
793 | if p2 != self.repo.nullid: | |
|
797 | 794 | dontprocess.add(p2) |
|
798 | 795 | continue |
|
799 | 796 | |
@@ -814,9 +811,9 b' class repacker(object):' | |||
|
814 | 811 | def parentfunc(node): |
|
815 | 812 | p1, p2, linknode, copyfrom = ancestors[node] |
|
816 | 813 | parents = [] |
|
817 | if p1 != nullid: | |
|
814 | if p1 != self.repo.nullid: | |
|
818 | 815 | parents.append(p1) |
|
819 | if p2 != nullid: | |
|
816 | if p2 != self.repo.nullid: | |
|
820 | 817 | parents.append(p2) |
|
821 | 818 | return parents |
|
822 | 819 |
@@ -7,7 +7,7 b'' | |||
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | from mercurial.i18n import _ |
|
10 |
from mercurial.node import bin, hex |
|
|
10 | from mercurial.node import bin, hex | |
|
11 | 11 | from mercurial import ( |
|
12 | 12 | bundlerepo, |
|
13 | 13 | changegroup, |
@@ -143,7 +143,7 b' class shallowcg1packer(changegroup.cgpac' | |||
|
143 | 143 | |
|
144 | 144 | def nodechunk(self, revlog, node, prevnode, linknode): |
|
145 | 145 | prefix = b'' |
|
146 | if prevnode == nullid: | |
|
146 | if prevnode == revlog.nullid: | |
|
147 | 147 | delta = revlog.rawdata(node) |
|
148 | 148 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
149 | 149 | else: |
@@ -225,7 +225,17 b' def addchangegroupfiles(' | |||
|
225 | 225 | |
|
226 | 226 | chain = None |
|
227 | 227 | while True: |
|
228 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None | |
|
228 | # returns: None or ( | |
|
229 | # node, | |
|
230 | # p1, | |
|
231 | # p2, | |
|
232 | # cs, | |
|
233 | # deltabase, | |
|
234 | # delta, | |
|
235 | # flags, | |
|
236 | # sidedata, | |
|
237 | # proto_flags | |
|
238 | # ) | |
|
229 | 239 | revisiondata = source.deltachunk(chain) |
|
230 | 240 | if not revisiondata: |
|
231 | 241 | break |
@@ -245,7 +255,7 b' def addchangegroupfiles(' | |||
|
245 | 255 | processed = set() |
|
246 | 256 | |
|
247 | 257 | def available(f, node, depf, depnode): |
|
248 | if depnode != nullid and (depf, depnode) not in processed: | |
|
258 | if depnode != repo.nullid and (depf, depnode) not in processed: | |
|
249 | 259 | if not (depf, depnode) in revisiondatas: |
|
250 | 260 | # It's not in the changegroup, assume it's already |
|
251 | 261 | # in the repo |
@@ -263,11 +273,11 b' def addchangegroupfiles(' | |||
|
263 | 273 | prefetchfiles = [] |
|
264 | 274 | for f, node in queue: |
|
265 | 275 | revisiondata = revisiondatas[(f, node)] |
|
266 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) | |
|
276 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags, sdata, pfl) | |
|
267 | 277 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] |
|
268 | 278 | |
|
269 | 279 | for dependent in dependents: |
|
270 | if dependent == nullid or (f, dependent) in revisiondatas: | |
|
280 | if dependent == repo.nullid or (f, dependent) in revisiondatas: | |
|
271 | 281 | continue |
|
272 | 282 | prefetchfiles.append((f, hex(dependent))) |
|
273 | 283 | |
@@ -287,8 +297,18 b' def addchangegroupfiles(' | |||
|
287 | 297 | fl = repo.file(f) |
|
288 | 298 | |
|
289 | 299 | revisiondata = revisiondatas[(f, node)] |
|
290 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) | |
|
291 | node, p1, p2, linknode, deltabase, delta, flags, sidedata = revisiondata | |
|
300 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags, sdata, pfl) | |
|
301 | ( | |
|
302 | node, | |
|
303 | p1, | |
|
304 | p2, | |
|
305 | linknode, | |
|
306 | deltabase, | |
|
307 | delta, | |
|
308 | flags, | |
|
309 | sidedata, | |
|
310 | proto_flags, | |
|
311 | ) = revisiondata | |
|
292 | 312 | |
|
293 | 313 | if not available(f, node, f, deltabase): |
|
294 | 314 | continue |
@@ -306,7 +326,7 b' def addchangegroupfiles(' | |||
|
306 | 326 | continue |
|
307 | 327 | |
|
308 | 328 | for p in [p1, p2]: |
|
309 | if p != nullid: | |
|
329 | if p != repo.nullid: | |
|
310 | 330 | if not available(f, node, f, p): |
|
311 | 331 | continue |
|
312 | 332 |
@@ -9,7 +9,7 b' from __future__ import absolute_import' | |||
|
9 | 9 | import os |
|
10 | 10 | |
|
11 | 11 | from mercurial.i18n import _ |
|
12 |
from mercurial.node import hex, |
|
|
12 | from mercurial.node import hex, nullrev | |
|
13 | 13 | from mercurial import ( |
|
14 | 14 | encoding, |
|
15 | 15 | error, |
@@ -206,8 +206,8 b' def wraprepo(repo):' | |||
|
206 | 206 | m1 = ctx.p1().manifest() |
|
207 | 207 | files = [] |
|
208 | 208 | for f in ctx.modified() + ctx.added(): |
|
209 | fparent1 = m1.get(f, nullid) | |
|
210 | if fparent1 != nullid: | |
|
209 | fparent1 = m1.get(f, self.nullid) | |
|
210 | if fparent1 != self.nullid: | |
|
211 | 211 | files.append((f, hex(fparent1))) |
|
212 | 212 | self.fileservice.prefetch(files) |
|
213 | 213 | return super(shallowrepository, self).commitctx( |
@@ -233,6 +233,10 b' def bin2int(buf):' | |||
|
233 | 233 | return x |
|
234 | 234 | |
|
235 | 235 | |
|
236 | class BadRemotefilelogHeader(error.StorageError): | |
|
237 | """Exception raised when parsing a remotefilelog blob header fails.""" | |
|
238 | ||
|
239 | ||
|
236 | 240 | def parsesizeflags(raw): |
|
237 | 241 | """given a remotefilelog blob, return (headersize, rawtextsize, flags) |
|
238 | 242 | |
@@ -243,26 +247,30 b' def parsesizeflags(raw):' | |||
|
243 | 247 | size = None |
|
244 | 248 | try: |
|
245 | 249 | index = raw.index(b'\0') |
|
246 | header = raw[:index] | |
|
247 | if header.startswith(b'v'): | |
|
248 | # v1 and above, header starts with 'v' | |
|
249 | if header.startswith(b'v1\n'): | |
|
250 | for s in header.split(b'\n'): | |
|
251 | if s.startswith(constants.METAKEYSIZE): | |
|
252 | size = int(s[len(constants.METAKEYSIZE) :]) | |
|
253 | elif s.startswith(constants.METAKEYFLAG): | |
|
254 | flags = int(s[len(constants.METAKEYFLAG) :]) | |
|
255 | else: | |
|
256 | raise RuntimeError( | |
|
257 | b'unsupported remotefilelog header: %s' % header | |
|
258 | ) | |
|
250 | except ValueError: | |
|
251 | raise BadRemotefilelogHeader( | |
|
252 | "unexpected remotefilelog header: illegal format" | |
|
253 | ) | |
|
254 | header = raw[:index] | |
|
255 | if header.startswith(b'v'): | |
|
256 | # v1 and above, header starts with 'v' | |
|
257 | if header.startswith(b'v1\n'): | |
|
258 | for s in header.split(b'\n'): | |
|
259 | if s.startswith(constants.METAKEYSIZE): | |
|
260 | size = int(s[len(constants.METAKEYSIZE) :]) | |
|
261 | elif s.startswith(constants.METAKEYFLAG): | |
|
262 | flags = int(s[len(constants.METAKEYFLAG) :]) | |
|
259 | 263 | else: |
|
260 | # v0, str(int(size)) is the header | |
|
261 | size = int(header) | |
|
262 | except ValueError: | |
|
263 | raise RuntimeError("unexpected remotefilelog header: illegal format") | |
|
264 | raise BadRemotefilelogHeader( | |
|
265 | b'unsupported remotefilelog header: %s' % header | |
|
266 | ) | |
|
267 | else: | |
|
268 | # v0, str(int(size)) is the header | |
|
269 | size = int(header) | |
|
264 | 270 | if size is None: |
|
265 | raise RuntimeError("unexpected remotefilelog header: no size found") | |
|
271 | raise BadRemotefilelogHeader( | |
|
272 | "unexpected remotefilelog header: no size found" | |
|
273 | ) | |
|
266 | 274 | return index + 1, size, flags |
|
267 | 275 | |
|
268 | 276 |
@@ -256,6 +256,8 b' def _setupdirstate(ui):' | |||
|
256 | 256 | # Prevent adding files that are outside the sparse checkout |
|
257 | 257 | editfuncs = [ |
|
258 | 258 | b'normal', |
|
259 | b'set_tracked', | |
|
260 | b'set_untracked', | |
|
259 | 261 | b'add', |
|
260 | 262 | b'normallookup', |
|
261 | 263 | b'copy', |
@@ -52,7 +52,6 b' import zlib' | |||
|
52 | 52 | |
|
53 | 53 | from mercurial.i18n import _ |
|
54 | 54 | from mercurial.node import ( |
|
55 | nullid, | |
|
56 | 55 | nullrev, |
|
57 | 56 | sha1nodeconstants, |
|
58 | 57 | short, |
@@ -290,6 +289,7 b' class sqliterevisiondelta(object):' | |||
|
290 | 289 | revision = attr.ib() |
|
291 | 290 | delta = attr.ib() |
|
292 | 291 | sidedata = attr.ib() |
|
292 | protocol_flags = attr.ib() | |
|
293 | 293 | linknode = attr.ib(default=None) |
|
294 | 294 | |
|
295 | 295 | |
@@ -366,12 +366,12 b' class sqlitefilestore(object):' | |||
|
366 | 366 | ) |
|
367 | 367 | |
|
368 | 368 | if p1rev == nullrev: |
|
369 | p1node = nullid | |
|
369 | p1node = sha1nodeconstants.nullid | |
|
370 | 370 | else: |
|
371 | 371 | p1node = self._revtonode[p1rev] |
|
372 | 372 | |
|
373 | 373 | if p2rev == nullrev: |
|
374 | p2node = nullid | |
|
374 | p2node = sha1nodeconstants.nullid | |
|
375 | 375 | else: |
|
376 | 376 | p2node = self._revtonode[p2rev] |
|
377 | 377 | |
@@ -400,7 +400,7 b' class sqlitefilestore(object):' | |||
|
400 | 400 | return iter(pycompat.xrange(len(self._revisions))) |
|
401 | 401 | |
|
402 | 402 | def hasnode(self, node): |
|
403 | if node == nullid: | |
|
403 | if node == sha1nodeconstants.nullid: | |
|
404 | 404 | return False |
|
405 | 405 | |
|
406 | 406 | return node in self._nodetorev |
@@ -411,8 +411,8 b' class sqlitefilestore(object):' | |||
|
411 | 411 | ) |
|
412 | 412 | |
|
413 | 413 | def parents(self, node): |
|
414 | if node == nullid: | |
|
415 | return nullid, nullid | |
|
414 | if node == sha1nodeconstants.nullid: | |
|
415 | return sha1nodeconstants.nullid, sha1nodeconstants.nullid | |
|
416 | 416 | |
|
417 | 417 | if node not in self._revisions: |
|
418 | 418 | raise error.LookupError(node, self._path, _(b'no node')) |
@@ -431,7 +431,7 b' class sqlitefilestore(object):' | |||
|
431 | 431 | return entry.p1rev, entry.p2rev |
|
432 | 432 | |
|
433 | 433 | def rev(self, node): |
|
434 | if node == nullid: | |
|
434 | if node == sha1nodeconstants.nullid: | |
|
435 | 435 | return nullrev |
|
436 | 436 | |
|
437 | 437 | if node not in self._nodetorev: |
@@ -441,7 +441,7 b' class sqlitefilestore(object):' | |||
|
441 | 441 | |
|
442 | 442 | def node(self, rev): |
|
443 | 443 | if rev == nullrev: |
|
444 | return nullid | |
|
444 | return sha1nodeconstants.nullid | |
|
445 | 445 | |
|
446 | 446 | if rev not in self._revtonode: |
|
447 | 447 | raise IndexError(rev) |
@@ -485,7 +485,7 b' class sqlitefilestore(object):' | |||
|
485 | 485 | def heads(self, start=None, stop=None): |
|
486 | 486 | if start is None and stop is None: |
|
487 | 487 | if not len(self): |
|
488 | return [nullid] | |
|
488 | return [sha1nodeconstants.nullid] | |
|
489 | 489 | |
|
490 | 490 | startrev = self.rev(start) if start is not None else nullrev |
|
491 | 491 | stoprevs = {self.rev(n) for n in stop or []} |
@@ -529,7 +529,7 b' class sqlitefilestore(object):' | |||
|
529 | 529 | return len(self.revision(node)) |
|
530 | 530 | |
|
531 | 531 | def revision(self, node, raw=False, _verifyhash=True): |
|
532 | if node in (nullid, nullrev): | |
|
532 | if node in (sha1nodeconstants.nullid, nullrev): | |
|
533 | 533 | return b'' |
|
534 | 534 | |
|
535 | 535 | if isinstance(node, int): |
@@ -596,7 +596,7 b' class sqlitefilestore(object):' | |||
|
596 | 596 | b'unhandled value for nodesorder: %s' % nodesorder |
|
597 | 597 | ) |
|
598 | 598 | |
|
599 | nodes = [n for n in nodes if n != nullid] | |
|
599 | nodes = [n for n in nodes if n != sha1nodeconstants.nullid] | |
|
600 | 600 | |
|
601 | 601 | if not nodes: |
|
602 | 602 | return |
@@ -705,12 +705,12 b' class sqlitefilestore(object):' | |||
|
705 | 705 | raise SQLiteStoreError(b'unhandled revision flag') |
|
706 | 706 | |
|
707 | 707 | if maybemissingparents: |
|
708 | if p1 != nullid and not self.hasnode(p1): | |
|
709 | p1 = nullid | |
|
708 | if p1 != sha1nodeconstants.nullid and not self.hasnode(p1): | |
|
709 | p1 = sha1nodeconstants.nullid | |
|
710 | 710 | storeflags |= FLAG_MISSING_P1 |
|
711 | 711 | |
|
712 | if p2 != nullid and not self.hasnode(p2): | |
|
713 | p2 = nullid | |
|
712 | if p2 != sha1nodeconstants.nullid and not self.hasnode(p2): | |
|
713 | p2 = sha1nodeconstants.nullid | |
|
714 | 714 | storeflags |= FLAG_MISSING_P2 |
|
715 | 715 | |
|
716 | 716 | baserev = self.rev(deltabase) |
@@ -736,7 +736,10 b' class sqlitefilestore(object):' | |||
|
736 | 736 | # Possibly reset parents to make them proper. |
|
737 | 737 | entry = self._revisions[node] |
|
738 | 738 | |
|
739 | if entry.flags & FLAG_MISSING_P1 and p1 != nullid: | |
|
739 | if ( | |
|
740 | entry.flags & FLAG_MISSING_P1 | |
|
741 | and p1 != sha1nodeconstants.nullid | |
|
742 | ): | |
|
740 | 743 | entry.p1node = p1 |
|
741 | 744 | entry.p1rev = self._nodetorev[p1] |
|
742 | 745 | entry.flags &= ~FLAG_MISSING_P1 |
@@ -746,7 +749,10 b' class sqlitefilestore(object):' | |||
|
746 | 749 | (self._nodetorev[p1], entry.flags, entry.rid), |
|
747 | 750 | ) |
|
748 | 751 | |
|
749 | if entry.flags & FLAG_MISSING_P2 and p2 != nullid: | |
|
752 | if ( | |
|
753 | entry.flags & FLAG_MISSING_P2 | |
|
754 | and p2 != sha1nodeconstants.nullid | |
|
755 | ): | |
|
750 | 756 | entry.p2node = p2 |
|
751 | 757 | entry.p2rev = self._nodetorev[p2] |
|
752 | 758 | entry.flags &= ~FLAG_MISSING_P2 |
@@ -761,7 +767,7 b' class sqlitefilestore(object):' | |||
|
761 | 767 | empty = False |
|
762 | 768 | continue |
|
763 | 769 | |
|
764 | if deltabase == nullid: | |
|
770 | if deltabase == sha1nodeconstants.nullid: | |
|
765 | 771 | text = mdiff.patch(b'', delta) |
|
766 | 772 | storedelta = None |
|
767 | 773 | else: |
@@ -1012,7 +1018,7 b' class sqlitefilestore(object):' | |||
|
1012 | 1018 | assert revisiondata is not None |
|
1013 | 1019 | deltabase = p1 |
|
1014 | 1020 | |
|
1015 | if deltabase == nullid: | |
|
1021 | if deltabase == sha1nodeconstants.nullid: | |
|
1016 | 1022 | delta = revisiondata |
|
1017 | 1023 | else: |
|
1018 | 1024 | delta = mdiff.textdiff( |
@@ -1021,7 +1027,7 b' class sqlitefilestore(object):' | |||
|
1021 | 1027 | |
|
1022 | 1028 | # File index stores a pointer to its delta and the parent delta. |
|
1023 | 1029 | # The parent delta is stored via a pointer to the fileindex PK. |
|
1024 | if deltabase == nullid: | |
|
1030 | if deltabase == sha1nodeconstants.nullid: | |
|
1025 | 1031 | baseid = None |
|
1026 | 1032 | else: |
|
1027 | 1033 | baseid = self._revisions[deltabase].rid |
@@ -1055,12 +1061,12 b' class sqlitefilestore(object):' | |||
|
1055 | 1061 | |
|
1056 | 1062 | rev = len(self) |
|
1057 | 1063 | |
|
1058 | if p1 == nullid: | |
|
1064 | if p1 == sha1nodeconstants.nullid: | |
|
1059 | 1065 | p1rev = nullrev |
|
1060 | 1066 | else: |
|
1061 | 1067 | p1rev = self._nodetorev[p1] |
|
1062 | 1068 | |
|
1063 | if p2 == nullid: | |
|
1069 | if p2 == sha1nodeconstants.nullid: | |
|
1064 | 1070 | p2rev = nullrev |
|
1065 | 1071 | else: |
|
1066 | 1072 | p2rev = self._nodetorev[p2] |
@@ -22,7 +22,6 b' from mercurial.pycompat import open' | |||
|
22 | 22 | from mercurial.node import ( |
|
23 | 23 | bin, |
|
24 | 24 | hex, |
|
25 | nullid, | |
|
26 | 25 | short, |
|
27 | 26 | ) |
|
28 | 27 | from mercurial import ( |
@@ -134,6 +133,7 b' class transplants(object):' | |||
|
134 | 133 | class transplanter(object): |
|
135 | 134 | def __init__(self, ui, repo, opts): |
|
136 | 135 | self.ui = ui |
|
136 | self.repo = repo | |
|
137 | 137 | self.path = repo.vfs.join(b'transplant') |
|
138 | 138 | self.opener = vfsmod.vfs(self.path) |
|
139 | 139 | self.transplants = transplants( |
@@ -221,7 +221,7 b' class transplanter(object):' | |||
|
221 | 221 | exchange.pull(repo, source.peer(), heads=[node]) |
|
222 | 222 | |
|
223 | 223 | skipmerge = False |
|
224 | if parents[1] != nullid: | |
|
224 | if parents[1] != repo.nullid: | |
|
225 | 225 | if not opts.get(b'parent'): |
|
226 | 226 | self.ui.note( |
|
227 | 227 | _(b'skipping merge changeset %d:%s\n') |
@@ -516,7 +516,7 b' class transplanter(object):' | |||
|
516 | 516 | def parselog(self, fp): |
|
517 | 517 | parents = [] |
|
518 | 518 | message = [] |
|
519 | node = nullid | |
|
519 | node = self.repo.nullid | |
|
520 | 520 | inmsg = False |
|
521 | 521 | user = None |
|
522 | 522 | date = None |
@@ -568,7 +568,7 b' class transplanter(object):' | |||
|
568 | 568 | def matchfn(node): |
|
569 | 569 | if self.applied(repo, node, root): |
|
570 | 570 | return False |
|
571 | if source.changelog.parents(node)[1] != nullid: | |
|
571 | if source.changelog.parents(node)[1] != repo.nullid: | |
|
572 | 572 | return False |
|
573 | 573 | extra = source.changelog.read(node)[5] |
|
574 | 574 | cnode = extra.get(b'transplant_source') |
@@ -804,7 +804,7 b' def _dotransplant(ui, repo, *revs, **opt' | |||
|
804 | 804 | tp = transplanter(ui, repo, opts) |
|
805 | 805 | |
|
806 | 806 | p1 = repo.dirstate.p1() |
|
807 | if len(repo) > 0 and p1 == nullid: | |
|
807 | if len(repo) > 0 and p1 == repo.nullid: | |
|
808 | 808 | raise error.Abort(_(b'no revision checked out')) |
|
809 | 809 | if opts.get(b'continue'): |
|
810 | 810 | if not tp.canresume(): |
@@ -20,7 +20,6 b' added and removed in the working directo' | |||
|
20 | 20 | from __future__ import absolute_import |
|
21 | 21 | |
|
22 | 22 | from mercurial.i18n import _ |
|
23 | from mercurial.node import nullid | |
|
24 | 23 | |
|
25 | 24 | from mercurial import ( |
|
26 | 25 | cmdutil, |
@@ -113,7 +112,7 b' def _commitfiltered(' | |||
|
113 | 112 | |
|
114 | 113 | new = context.memctx( |
|
115 | 114 | repo, |
|
116 | parents=[base.node(), nullid], | |
|
115 | parents=[base.node(), repo.nullid], | |
|
117 | 116 | text=message, |
|
118 | 117 | files=files, |
|
119 | 118 | filectxfn=filectxfn, |
@@ -154,11 +153,10 b' def uncommit(ui, repo, *pats, **opts):' | |||
|
154 | 153 | If no files are specified, the commit will be pruned, unless --keep is |
|
155 | 154 | given. |
|
156 | 155 | """ |
|
156 | cmdutil.check_note_size(opts) | |
|
157 | cmdutil.resolve_commit_options(ui, opts) | |
|
157 | 158 | opts = pycompat.byteskwargs(opts) |
|
158 | 159 | |
|
159 | cmdutil.checknotesize(ui, opts) | |
|
160 | cmdutil.resolvecommitoptions(ui, opts) | |
|
161 | ||
|
162 | 160 | with repo.wlock(), repo.lock(): |
|
163 | 161 | |
|
164 | 162 | st = repo.status() |
@@ -15,7 +15,6 b' from .node import (' | |||
|
15 | 15 | bin, |
|
16 | 16 | hex, |
|
17 | 17 | short, |
|
18 | wdirid, | |
|
19 | 18 | ) |
|
20 | 19 | from .pycompat import getattr |
|
21 | 20 | from . import ( |
@@ -601,11 +600,12 b' def _diverge(ui, b, path, localmarks, re' | |||
|
601 | 600 | # if an @pathalias already exists, we overwrite (update) it |
|
602 | 601 | if path.startswith(b"file:"): |
|
603 | 602 | path = urlutil.url(path).path |
|
604 |
for |
|
|
605 | if u.startswith(b"file:"): | |
|
606 | u = urlutil.url(u).path | |
|
607 | if path == u: | |
|
608 | return b'%s@%s' % (b, p) | |
|
603 | for name, p in urlutil.list_paths(ui): | |
|
604 | loc = p.rawloc | |
|
605 | if loc.startswith(b"file:"): | |
|
606 | loc = urlutil.url(loc).path | |
|
607 | if path == loc: | |
|
608 | return b'%s@%s' % (b, name) | |
|
609 | 609 | |
|
610 | 610 | # assign a unique "@number" suffix newly |
|
611 | 611 | for x in range(1, 100): |
@@ -642,7 +642,7 b' def binaryencode(repo, bookmarks):' | |||
|
642 | 642 | binarydata = [] |
|
643 | 643 | for book, node in bookmarks: |
|
644 | 644 | if not node: # None or '' |
|
645 | node = wdirid | |
|
645 | node = repo.nodeconstants.wdirid | |
|
646 | 646 | binarydata.append(_binaryentry.pack(node, len(book))) |
|
647 | 647 | binarydata.append(book) |
|
648 | 648 | return b''.join(binarydata) |
@@ -674,7 +674,7 b' def binarydecode(repo, stream):' | |||
|
674 | 674 | if len(bookmark) < length: |
|
675 | 675 | if entry: |
|
676 | 676 | raise error.Abort(_(b'bad bookmark stream')) |
|
677 | if node == wdirid: | |
|
677 | if node == repo.nodeconstants.wdirid: | |
|
678 | 678 | node = None |
|
679 | 679 | books.append((bookmark, node)) |
|
680 | 680 | return books |
@@ -12,7 +12,6 b' import struct' | |||
|
12 | 12 | from .node import ( |
|
13 | 13 | bin, |
|
14 | 14 | hex, |
|
15 | nullid, | |
|
16 | 15 | nullrev, |
|
17 | 16 | ) |
|
18 | 17 | from . import ( |
@@ -189,7 +188,7 b' class branchcache(object):' | |||
|
189 | 188 | self, |
|
190 | 189 | repo, |
|
191 | 190 | entries=(), |
|
192 |
tipnode= |
|
|
191 | tipnode=None, | |
|
193 | 192 | tiprev=nullrev, |
|
194 | 193 | filteredhash=None, |
|
195 | 194 | closednodes=None, |
@@ -200,7 +199,10 b' class branchcache(object):' | |||
|
200 | 199 | has a given node or not. If it's not provided, we assume that every node |
|
201 | 200 | we have exists in changelog""" |
|
202 | 201 | self._repo = repo |
|
203 |
|
|
|
202 | if tipnode is None: | |
|
203 | self.tipnode = repo.nullid | |
|
204 | else: | |
|
205 | self.tipnode = tipnode | |
|
204 | 206 | self.tiprev = tiprev |
|
205 | 207 | self.filteredhash = filteredhash |
|
206 | 208 | # closednodes is a set of nodes that close their branch. If the branch |
@@ -536,7 +538,7 b' class branchcache(object):' | |||
|
536 | 538 | |
|
537 | 539 | if not self.validfor(repo): |
|
538 | 540 | # cache key are not valid anymore |
|
539 | self.tipnode = nullid | |
|
541 | self.tipnode = repo.nullid | |
|
540 | 542 | self.tiprev = nullrev |
|
541 | 543 | for heads in self.iterheads(): |
|
542 | 544 | tiprev = max(cl.rev(node) for node in heads) |
@@ -158,7 +158,6 b' import sys' | |||
|
158 | 158 | from .i18n import _ |
|
159 | 159 | from .node import ( |
|
160 | 160 | hex, |
|
161 | nullid, | |
|
162 | 161 | short, |
|
163 | 162 | ) |
|
164 | 163 | from . import ( |
@@ -181,6 +180,7 b' from .utils import (' | |||
|
181 | 180 | stringutil, |
|
182 | 181 | urlutil, |
|
183 | 182 | ) |
|
183 | from .interfaces import repository | |
|
184 | 184 | |
|
185 | 185 | urlerr = util.urlerr |
|
186 | 186 | urlreq = util.urlreq |
@@ -1730,8 +1730,8 b' def _addpartsfromopts(ui, repo, bundler,' | |||
|
1730 | 1730 | part.addparam( |
|
1731 | 1731 | b'targetphase', b'%d' % phases.secret, mandatory=False |
|
1732 | 1732 | ) |
|
1733 | if b'exp-sidedata-flag' in repo.requirements: | |
|
1734 |
|
|
|
1733 | if repository.REPO_FEATURE_SIDE_DATA in repo.features: | |
|
1734 | part.addparam(b'exp-sidedata', b'1') | |
|
1735 | 1735 | |
|
1736 | 1736 | if opts.get(b'streamv2', False): |
|
1737 | 1737 | addpartbundlestream2(bundler, repo, stream=True) |
@@ -2014,13 +2014,6 b' def handlechangegroup(op, inpart):' | |||
|
2014 | 2014 | ) |
|
2015 | 2015 | scmutil.writereporequirements(op.repo) |
|
2016 | 2016 | |
|
2017 | bundlesidedata = bool(b'exp-sidedata' in inpart.params) | |
|
2018 | reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements) | |
|
2019 | if reposidedata and not bundlesidedata: | |
|
2020 | msg = b"repository is using sidedata but the bundle source do not" | |
|
2021 | hint = b'this is currently unsupported' | |
|
2022 | raise error.Abort(msg, hint=hint) | |
|
2023 | ||
|
2024 | 2017 | extrakwargs = {} |
|
2025 | 2018 | targetphase = inpart.params.get(b'targetphase') |
|
2026 | 2019 | if targetphase is not None: |
@@ -2576,7 +2569,7 b' def widen_bundle(' | |||
|
2576 | 2569 | fullnodes=commonnodes, |
|
2577 | 2570 | ) |
|
2578 | 2571 | cgdata = packer.generate( |
|
2579 | {nullid}, | |
|
2572 | {repo.nullid}, | |
|
2580 | 2573 | list(commonnodes), |
|
2581 | 2574 | False, |
|
2582 | 2575 | b'narrow_widen', |
@@ -2587,9 +2580,9 b' def widen_bundle(' | |||
|
2587 | 2580 | part.addparam(b'version', cgversion) |
|
2588 | 2581 | if scmutil.istreemanifest(repo): |
|
2589 | 2582 | part.addparam(b'treemanifest', b'1') |
|
2590 | if b'exp-sidedata-flag' in repo.requirements: | |
|
2591 |
|
|
|
2592 |
|
|
|
2593 |
|
|
|
2583 | if repository.REPO_FEATURE_SIDE_DATA in repo.features: | |
|
2584 | part.addparam(b'exp-sidedata', b'1') | |
|
2585 | wanted = format_remote_wanted_sidedata(repo) | |
|
2586 | part.addparam(b'exp-wanted-sidedata', wanted) | |
|
2594 | 2587 | |
|
2595 | 2588 | return bundler |
@@ -167,6 +167,8 b' def parsebundlespec(repo, spec, strict=T' | |||
|
167 | 167 | # Generaldelta repos require v2. |
|
168 | 168 | if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements: |
|
169 | 169 | version = b'v2' |
|
170 | elif requirementsmod.REVLOGV2_REQUIREMENT in repo.requirements: | |
|
171 | version = b'v2' | |
|
170 | 172 | # Modern compression engines require v2. |
|
171 | 173 | if compression not in _bundlespecv1compengines: |
|
172 | 174 | version = b'v2' |
@@ -19,7 +19,6 b' import shutil' | |||
|
19 | 19 | from .i18n import _ |
|
20 | 20 | from .node import ( |
|
21 | 21 | hex, |
|
22 | nullid, | |
|
23 | 22 | nullrev, |
|
24 | 23 | ) |
|
25 | 24 | |
@@ -40,6 +39,7 b' from . import (' | |||
|
40 | 39 | phases, |
|
41 | 40 | pycompat, |
|
42 | 41 | revlog, |
|
42 | revlogutils, | |
|
43 | 43 | util, |
|
44 | 44 | vfs as vfsmod, |
|
45 | 45 | ) |
@@ -47,9 +47,13 b' from .utils import (' | |||
|
47 | 47 | urlutil, |
|
48 | 48 | ) |
|
49 | 49 | |
|
50 | from .revlogutils import ( | |
|
51 | constants as revlog_constants, | |
|
52 | ) | |
|
53 | ||
|
50 | 54 | |
|
51 | 55 | class bundlerevlog(revlog.revlog): |
|
52 |
def __init__(self, opener, |
|
|
56 | def __init__(self, opener, target, radix, cgunpacker, linkmapper): | |
|
53 | 57 | # How it works: |
|
54 | 58 | # To retrieve a revision, we need to know the offset of the revision in |
|
55 | 59 | # the bundle (an unbundle object). We store this offset in the index |
@@ -58,7 +62,7 b' class bundlerevlog(revlog.revlog):' | |||
|
58 | 62 | # To differentiate a rev in the bundle from a rev in the revlog, we |
|
59 | 63 | # check revision against repotiprev. |
|
60 | 64 | opener = vfsmod.readonlyvfs(opener) |
|
61 |
revlog.revlog.__init__(self, opener, |
|
|
65 | revlog.revlog.__init__(self, opener, target=target, radix=radix) | |
|
62 | 66 | self.bundle = cgunpacker |
|
63 | 67 | n = len(self) |
|
64 | 68 | self.repotiprev = n - 1 |
@@ -81,25 +85,25 b' class bundlerevlog(revlog.revlog):' | |||
|
81 | 85 | for p in (p1, p2): |
|
82 | 86 | if not self.index.has_node(p): |
|
83 | 87 | raise error.LookupError( |
|
84 |
p, self. |
|
|
88 | p, self.display_id, _(b"unknown parent") | |
|
85 | 89 | ) |
|
86 | 90 | |
|
87 | 91 | if not self.index.has_node(deltabase): |
|
88 | 92 | raise LookupError( |
|
89 |
deltabase, self. |
|
|
93 | deltabase, self.display_id, _(b'unknown delta base') | |
|
90 | 94 | ) |
|
91 | 95 | |
|
92 | 96 | baserev = self.rev(deltabase) |
|
93 | # start, size, full unc. size, base (unused), link, p1, p2, node | |
|
94 | e = ( | |
|
95 | revlog.offset_type(start, flags), | |
|
96 |
s |
|
|
97 | -1, | |
|
98 | baserev, | |
|
99 | linkrev, | |
|
100 | self.rev(p1), | |
|
101 | self.rev(p2), | |
|
102 | node, | |
|
97 | # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused) | |
|
98 | e = revlogutils.entry( | |
|
99 | flags=flags, | |
|
100 | data_offset=start, | |
|
101 | data_compressed_length=size, | |
|
102 | data_delta_base=baserev, | |
|
103 | link_rev=linkrev, | |
|
104 | parent_rev_1=self.rev(p1), | |
|
105 | parent_rev_2=self.rev(p2), | |
|
106 | node_id=node, | |
|
103 | 107 | ) |
|
104 | 108 | self.index.append(e) |
|
105 | 109 | self.bundlerevs.add(n) |
@@ -172,7 +176,12 b' class bundlechangelog(bundlerevlog, chan' | |||
|
172 | 176 | changelog.changelog.__init__(self, opener) |
|
173 | 177 | linkmapper = lambda x: x |
|
174 | 178 | bundlerevlog.__init__( |
|
175 | self, opener, self.indexfile, cgunpacker, linkmapper | |
|
179 | self, | |
|
180 | opener, | |
|
181 | (revlog_constants.KIND_CHANGELOG, None), | |
|
182 | self.radix, | |
|
183 | cgunpacker, | |
|
184 | linkmapper, | |
|
176 | 185 | ) |
|
177 | 186 | |
|
178 | 187 | |
@@ -188,7 +197,12 b' class bundlemanifest(bundlerevlog, manif' | |||
|
188 | 197 | ): |
|
189 | 198 | manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir) |
|
190 | 199 | bundlerevlog.__init__( |
|
191 | self, opener, self.indexfile, cgunpacker, linkmapper | |
|
200 | self, | |
|
201 | opener, | |
|
202 | (revlog_constants.KIND_MANIFESTLOG, dir), | |
|
203 | self._revlog.radix, | |
|
204 | cgunpacker, | |
|
205 | linkmapper, | |
|
192 | 206 | ) |
|
193 | 207 | if dirlogstarts is None: |
|
194 | 208 | dirlogstarts = {} |
@@ -215,7 +229,12 b' class bundlefilelog(filelog.filelog):' | |||
|
215 | 229 | def __init__(self, opener, path, cgunpacker, linkmapper): |
|
216 | 230 | filelog.filelog.__init__(self, opener, path) |
|
217 | 231 | self._revlog = bundlerevlog( |
|
218 | opener, self.indexfile, cgunpacker, linkmapper | |
|
232 | opener, | |
|
233 | # XXX should use the unencoded path | |
|
234 | target=(revlog_constants.KIND_FILELOG, path), | |
|
235 | radix=self._revlog.radix, | |
|
236 | cgunpacker=cgunpacker, | |
|
237 | linkmapper=linkmapper, | |
|
219 | 238 | ) |
|
220 | 239 | |
|
221 | 240 | |
@@ -447,7 +466,9 b' class bundlerepository(object):' | |||
|
447 | 466 | return encoding.getcwd() # always outside the repo |
|
448 | 467 | |
|
449 | 468 | # Check if parents exist in localrepo before setting |
|
450 |
def setparents(self, p1, p2= |
|
|
469 | def setparents(self, p1, p2=None): | |
|
470 | if p2 is None: | |
|
471 | p2 = self.nullid | |
|
451 | 472 | p1rev = self.changelog.rev(p1) |
|
452 | 473 | p2rev = self.changelog.rev(p2) |
|
453 | 474 | msg = _(b"setting parent to node %s that only exists in the bundle\n") |
@@ -223,7 +223,7 b' PyObject *make_file_foldmap(PyObject *se' | |||
|
223 | 223 | PyObject *file_foldmap = NULL; |
|
224 | 224 | enum normcase_spec spec; |
|
225 | 225 | PyObject *k, *v; |
|
226 |
dirstate |
|
|
226 | dirstateItemObject *tuple; | |
|
227 | 227 | Py_ssize_t pos = 0; |
|
228 | 228 | const char *table; |
|
229 | 229 | |
@@ -263,7 +263,7 b' PyObject *make_file_foldmap(PyObject *se' | |||
|
263 | 263 | goto quit; |
|
264 | 264 | } |
|
265 | 265 | |
|
266 |
tuple = (dirstate |
|
|
266 | tuple = (dirstateItemObject *)v; | |
|
267 | 267 | if (tuple->state != 'r') { |
|
268 | 268 | PyObject *normed; |
|
269 | 269 | if (table != NULL) { |
@@ -177,7 +177,7 b' static int dirs_fromdict(PyObject *dirs,' | |||
|
177 | 177 | "expected a dirstate tuple"); |
|
178 | 178 | return -1; |
|
179 | 179 | } |
|
180 |
if (((dirstate |
|
|
180 | if (((dirstateItemObject *)value)->state == skipchar) | |
|
181 | 181 | continue; |
|
182 | 182 | } |
|
183 | 183 |
@@ -28,6 +28,7 b' typedef struct {' | |||
|
28 | 28 | typedef struct { |
|
29 | 29 | PyObject_HEAD |
|
30 | 30 | PyObject *pydata; |
|
31 | Py_ssize_t nodelen; | |
|
31 | 32 | line *lines; |
|
32 | 33 | int numlines; /* number of line entries */ |
|
33 | 34 | int livelines; /* number of non-deleted lines */ |
@@ -49,12 +50,11 b' static Py_ssize_t pathlen(line *l)' | |||
|
49 | 50 | } |
|
50 | 51 | |
|
51 | 52 | /* get the node value of a single line */ |
|
52 | static PyObject *nodeof(line *l, char *flag) | |
|
53 | static PyObject *nodeof(Py_ssize_t nodelen, line *l, char *flag) | |
|
53 | 54 | { |
|
54 | 55 | char *s = l->start; |
|
55 | 56 | Py_ssize_t llen = pathlen(l); |
|
56 | 57 | Py_ssize_t hlen = l->len - llen - 2; |
|
57 | Py_ssize_t hlen_raw; | |
|
58 | 58 | PyObject *hash; |
|
59 | 59 | if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */ |
|
60 | 60 | PyErr_SetString(PyExc_ValueError, "manifest line too short"); |
@@ -73,36 +73,29 b' static PyObject *nodeof(line *l, char *f' | |||
|
73 | 73 | break; |
|
74 | 74 | } |
|
75 | 75 | |
|
76 | switch (hlen) { | |
|
77 | case 40: /* sha1 */ | |
|
78 | hlen_raw = 20; | |
|
79 | break; | |
|
80 | case 64: /* new hash */ | |
|
81 | hlen_raw = 32; | |
|
82 | break; | |
|
83 | default: | |
|
76 | if (hlen != 2 * nodelen) { | |
|
84 | 77 | PyErr_SetString(PyExc_ValueError, "invalid node length in manifest"); |
|
85 | 78 | return NULL; |
|
86 | 79 | } |
|
87 |
hash = unhexlify(s + llen + 1, |
|
|
80 | hash = unhexlify(s + llen + 1, nodelen * 2); | |
|
88 | 81 | if (!hash) { |
|
89 | 82 | return NULL; |
|
90 | 83 | } |
|
91 | 84 | if (l->hash_suffix != '\0') { |
|
92 | 85 | char newhash[33]; |
|
93 |
memcpy(newhash, PyBytes_AsString(hash), |
|
|
86 | memcpy(newhash, PyBytes_AsString(hash), nodelen); | |
|
94 | 87 | Py_DECREF(hash); |
|
95 |
newhash[ |
|
|
96 |
hash = PyBytes_FromStringAndSize(newhash, |
|
|
88 | newhash[nodelen] = l->hash_suffix; | |
|
89 | hash = PyBytes_FromStringAndSize(newhash, nodelen + 1); | |
|
97 | 90 | } |
|
98 | 91 | return hash; |
|
99 | 92 | } |
|
100 | 93 | |
|
101 | 94 | /* get the node hash and flags of a line as a tuple */ |
|
102 | static PyObject *hashflags(line *l) | |
|
95 | static PyObject *hashflags(Py_ssize_t nodelen, line *l) | |
|
103 | 96 | { |
|
104 | 97 | char flag; |
|
105 | PyObject *hash = nodeof(l, &flag); | |
|
98 | PyObject *hash = nodeof(nodelen, l, &flag); | |
|
106 | 99 | PyObject *flags; |
|
107 | 100 | PyObject *tup; |
|
108 | 101 | |
@@ -190,17 +183,23 b' static void lazymanifest_init_early(lazy' | |||
|
190 | 183 | static int lazymanifest_init(lazymanifest *self, PyObject *args) |
|
191 | 184 | { |
|
192 | 185 | char *data; |
|
193 | Py_ssize_t len; | |
|
186 | Py_ssize_t nodelen, len; | |
|
194 | 187 | int err, ret; |
|
195 | 188 | PyObject *pydata; |
|
196 | 189 | |
|
197 | 190 | lazymanifest_init_early(self); |
|
198 | if (!PyArg_ParseTuple(args, "S", &pydata)) { | |
|
191 | if (!PyArg_ParseTuple(args, "nS", &nodelen, &pydata)) { | |
|
199 | 192 | return -1; |
|
200 | 193 | } |
|
201 | err = PyBytes_AsStringAndSize(pydata, &data, &len); | |
|
194 | if (nodelen != 20 && nodelen != 32) { | |
|
195 | /* See fixed buffer in nodeof */ | |
|
196 | PyErr_Format(PyExc_ValueError, "Unsupported node length"); | |
|
197 | return -1; | |
|
198 | } | |
|
199 | self->nodelen = nodelen; | |
|
200 | self->dirty = false; | |
|
202 | 201 | |
|
203 | self->dirty = false; | |
|
202 | err = PyBytes_AsStringAndSize(pydata, &data, &len); | |
|
204 | 203 | if (err == -1) |
|
205 | 204 | return -1; |
|
206 | 205 | self->pydata = pydata; |
@@ -291,17 +290,18 b' static line *lmiter_nextline(lmIter *sel' | |||
|
291 | 290 | |
|
292 | 291 | static PyObject *lmiter_iterentriesnext(PyObject *o) |
|
293 | 292 | { |
|
293 | lmIter *self = (lmIter *)o; | |
|
294 | 294 | Py_ssize_t pl; |
|
295 | 295 | line *l; |
|
296 | 296 | char flag; |
|
297 | 297 | PyObject *ret = NULL, *path = NULL, *hash = NULL, *flags = NULL; |
|
298 |
l = lmiter_nextline( |
|
|
298 | l = lmiter_nextline(self); | |
|
299 | 299 | if (!l) { |
|
300 | 300 | goto done; |
|
301 | 301 | } |
|
302 | 302 | pl = pathlen(l); |
|
303 | 303 | path = PyBytes_FromStringAndSize(l->start, pl); |
|
304 | hash = nodeof(l, &flag); | |
|
304 | hash = nodeof(self->m->nodelen, l, &flag); | |
|
305 | 305 | if (!path || !hash) { |
|
306 | 306 | goto done; |
|
307 | 307 | } |
@@ -471,7 +471,7 b' static PyObject *lazymanifest_getitem(la' | |||
|
471 | 471 | PyErr_Format(PyExc_KeyError, "No such manifest entry."); |
|
472 | 472 | return NULL; |
|
473 | 473 | } |
|
474 | return hashflags(hit); | |
|
474 | return hashflags(self->nodelen, hit); | |
|
475 | 475 | } |
|
476 | 476 | |
|
477 | 477 | static int lazymanifest_delitem(lazymanifest *self, PyObject *key) |
@@ -568,13 +568,13 b' static int lazymanifest_setitem(' | |||
|
568 | 568 | pyhash = PyTuple_GetItem(value, 0); |
|
569 | 569 | if (!PyBytes_Check(pyhash)) { |
|
570 | 570 | PyErr_Format(PyExc_TypeError, |
|
571 |
"node must be a |
|
|
571 | "node must be a %zi bytes string", self->nodelen); | |
|
572 | 572 | return -1; |
|
573 | 573 | } |
|
574 | 574 | hlen = PyBytes_Size(pyhash); |
|
575 |
if (hlen != |
|
|
575 | if (hlen != self->nodelen) { | |
|
576 | 576 | PyErr_Format(PyExc_TypeError, |
|
577 |
"node must be a |
|
|
577 | "node must be a %zi bytes string", self->nodelen); | |
|
578 | 578 | return -1; |
|
579 | 579 | } |
|
580 | 580 | hash = PyBytes_AsString(pyhash); |
@@ -739,6 +739,7 b' static lazymanifest *lazymanifest_copy(l' | |||
|
739 | 739 | goto nomem; |
|
740 | 740 | } |
|
741 | 741 | lazymanifest_init_early(copy); |
|
742 | copy->nodelen = self->nodelen; | |
|
742 | 743 | copy->numlines = self->numlines; |
|
743 | 744 | copy->livelines = self->livelines; |
|
744 | 745 | copy->dirty = false; |
@@ -777,6 +778,7 b' static lazymanifest *lazymanifest_filter' | |||
|
777 | 778 | goto nomem; |
|
778 | 779 | } |
|
779 | 780 | lazymanifest_init_early(copy); |
|
781 | copy->nodelen = self->nodelen; | |
|
780 | 782 | copy->dirty = true; |
|
781 | 783 | copy->lines = malloc(self->maxlines * sizeof(line)); |
|
782 | 784 | if (!copy->lines) { |
@@ -872,7 +874,7 b' static PyObject *lazymanifest_diff(lazym' | |||
|
872 | 874 | if (!key) |
|
873 | 875 | goto nomem; |
|
874 | 876 | if (result < 0) { |
|
875 | PyObject *l = hashflags(left); | |
|
877 | PyObject *l = hashflags(self->nodelen, left); | |
|
876 | 878 | if (!l) { |
|
877 | 879 | goto nomem; |
|
878 | 880 | } |
@@ -885,7 +887,7 b' static PyObject *lazymanifest_diff(lazym' | |||
|
885 | 887 | Py_DECREF(outer); |
|
886 | 888 | sneedle++; |
|
887 | 889 | } else if (result > 0) { |
|
888 | PyObject *r = hashflags(right); | |
|
890 | PyObject *r = hashflags(self->nodelen, right); | |
|
889 | 891 | if (!r) { |
|
890 | 892 | goto nomem; |
|
891 | 893 | } |
@@ -902,12 +904,12 b' static PyObject *lazymanifest_diff(lazym' | |||
|
902 | 904 | if (left->len != right->len |
|
903 | 905 | || memcmp(left->start, right->start, left->len) |
|
904 | 906 | || left->hash_suffix != right->hash_suffix) { |
|
905 | PyObject *l = hashflags(left); | |
|
907 | PyObject *l = hashflags(self->nodelen, left); | |
|
906 | 908 | PyObject *r; |
|
907 | 909 | if (!l) { |
|
908 | 910 | goto nomem; |
|
909 | 911 | } |
|
910 | r = hashflags(right); | |
|
912 | r = hashflags(self->nodelen, right); | |
|
911 | 913 | if (!r) { |
|
912 | 914 | Py_DECREF(l); |
|
913 | 915 | goto nomem; |
@@ -29,6 +29,10 b'' | |||
|
29 | 29 | |
|
30 | 30 | static const char *const versionerrortext = "Python minor version mismatch"; |
|
31 | 31 | |
|
32 | static const int dirstate_v1_from_p2 = -2; | |
|
33 | static const int dirstate_v1_nonnormal = -1; | |
|
34 | static const int ambiguous_time = -1; | |
|
35 | ||
|
32 | 36 | static PyObject *dict_new_presized(PyObject *self, PyObject *args) |
|
33 | 37 | { |
|
34 | 38 | Py_ssize_t expected_size; |
@@ -40,11 +44,11 b' static PyObject *dict_new_presized(PyObj' | |||
|
40 | 44 | return _dict_new_presized(expected_size); |
|
41 | 45 | } |
|
42 | 46 | |
|
43 |
static inline dirstate |
|
|
44 |
|
|
|
47 | static inline dirstateItemObject *make_dirstate_item(char state, int mode, | |
|
48 | int size, int mtime) | |
|
45 | 49 | { |
|
46 |
dirstate |
|
|
47 |
PyObject_New(dirstate |
|
|
50 | dirstateItemObject *t = | |
|
51 | PyObject_New(dirstateItemObject, &dirstateItemType); | |
|
48 | 52 | if (!t) { |
|
49 | 53 | return NULL; |
|
50 | 54 | } |
@@ -55,19 +59,19 b' static inline dirstateTupleObject *make_' | |||
|
55 | 59 | return t; |
|
56 | 60 | } |
|
57 | 61 | |
|
58 |
static PyObject *dirstate_t |
|
|
59 |
|
|
|
62 | static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args, | |
|
63 | PyObject *kwds) | |
|
60 | 64 | { |
|
61 | 65 | /* We do all the initialization here and not a tp_init function because |
|
62 |
* dirstate_t |
|
|
63 |
dirstate |
|
|
66 | * dirstate_item is immutable. */ | |
|
67 | dirstateItemObject *t; | |
|
64 | 68 | char state; |
|
65 | 69 | int size, mode, mtime; |
|
66 | 70 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { |
|
67 | 71 | return NULL; |
|
68 | 72 | } |
|
69 | 73 | |
|
70 |
t = (dirstate |
|
|
74 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
|
71 | 75 | if (!t) { |
|
72 | 76 | return NULL; |
|
73 | 77 | } |
@@ -79,19 +83,19 b' static PyObject *dirstate_tuple_new(PyTy' | |||
|
79 | 83 | return (PyObject *)t; |
|
80 | 84 | } |
|
81 | 85 | |
|
82 |
static void dirstate_t |
|
|
86 | static void dirstate_item_dealloc(PyObject *o) | |
|
83 | 87 | { |
|
84 | 88 | PyObject_Del(o); |
|
85 | 89 | } |
|
86 | 90 | |
|
87 |
static Py_ssize_t dirstate_t |
|
|
91 | static Py_ssize_t dirstate_item_length(PyObject *o) | |
|
88 | 92 | { |
|
89 | 93 | return 4; |
|
90 | 94 | } |
|
91 | 95 | |
|
92 |
static PyObject *dirstate_t |
|
|
96 | static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i) | |
|
93 | 97 | { |
|
94 |
dirstate |
|
|
98 | dirstateItemObject *t = (dirstateItemObject *)o; | |
|
95 | 99 | switch (i) { |
|
96 | 100 | case 0: |
|
97 | 101 | return PyBytes_FromStringAndSize(&t->state, 1); |
@@ -107,56 +111,279 b' static PyObject *dirstate_tuple_item(PyO' | |||
|
107 | 111 | } |
|
108 | 112 | } |
|
109 | 113 | |
|
110 |
static PySequenceMethods dirstate_t |
|
|
111 |
dirstate_t |
|
|
112 |
0, |
|
|
113 |
0, |
|
|
114 |
dirstate_t |
|
|
115 |
0, |
|
|
116 |
0, |
|
|
117 |
0, |
|
|
118 |
0 |
|
|
114 | static PySequenceMethods dirstate_item_sq = { | |
|
115 | dirstate_item_length, /* sq_length */ | |
|
116 | 0, /* sq_concat */ | |
|
117 | 0, /* sq_repeat */ | |
|
118 | dirstate_item_item, /* sq_item */ | |
|
119 | 0, /* sq_ass_item */ | |
|
120 | 0, /* sq_contains */ | |
|
121 | 0, /* sq_inplace_concat */ | |
|
122 | 0 /* sq_inplace_repeat */ | |
|
123 | }; | |
|
124 | ||
|
125 | static PyObject *dirstate_item_v1_state(dirstateItemObject *self) | |
|
126 | { | |
|
127 | return PyBytes_FromStringAndSize(&self->state, 1); | |
|
128 | }; | |
|
129 | ||
|
130 | static PyObject *dirstate_item_v1_mode(dirstateItemObject *self) | |
|
131 | { | |
|
132 | return PyInt_FromLong(self->mode); | |
|
133 | }; | |
|
134 | ||
|
135 | static PyObject *dirstate_item_v1_size(dirstateItemObject *self) | |
|
136 | { | |
|
137 | return PyInt_FromLong(self->size); | |
|
138 | }; | |
|
139 | ||
|
140 | static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self) | |
|
141 | { | |
|
142 | return PyInt_FromLong(self->mtime); | |
|
143 | }; | |
|
144 | ||
|
145 | static PyObject *dm_nonnormal(dirstateItemObject *self) | |
|
146 | { | |
|
147 | if (self->state != 'n' || self->mtime == ambiguous_time) { | |
|
148 | Py_RETURN_TRUE; | |
|
149 | } else { | |
|
150 | Py_RETURN_FALSE; | |
|
151 | } | |
|
152 | }; | |
|
153 | static PyObject *dm_otherparent(dirstateItemObject *self) | |
|
154 | { | |
|
155 | if (self->size == dirstate_v1_from_p2) { | |
|
156 | Py_RETURN_TRUE; | |
|
157 | } else { | |
|
158 | Py_RETURN_FALSE; | |
|
159 | } | |
|
160 | }; | |
|
161 | ||
|
162 | static PyObject *dirstate_item_need_delay(dirstateItemObject *self, | |
|
163 | PyObject *value) | |
|
164 | { | |
|
165 | long now; | |
|
166 | if (!pylong_to_long(value, &now)) { | |
|
167 | return NULL; | |
|
168 | } | |
|
169 | if (self->state == 'n' && self->mtime == now) { | |
|
170 | Py_RETURN_TRUE; | |
|
171 | } else { | |
|
172 | Py_RETURN_FALSE; | |
|
173 | } | |
|
174 | }; | |
|
175 | ||
|
176 | /* This will never change since it's bound to V1, unlike `make_dirstate_item` | |
|
177 | */ | |
|
178 | static inline dirstateItemObject * | |
|
179 | dirstate_item_from_v1_data(char state, int mode, int size, int mtime) | |
|
180 | { | |
|
181 | dirstateItemObject *t = | |
|
182 | PyObject_New(dirstateItemObject, &dirstateItemType); | |
|
183 | if (!t) { | |
|
184 | return NULL; | |
|
185 | } | |
|
186 | t->state = state; | |
|
187 | t->mode = mode; | |
|
188 | t->size = size; | |
|
189 | t->mtime = mtime; | |
|
190 | return t; | |
|
191 | } | |
|
192 | ||
|
193 | /* This will never change since it's bound to V1, unlike `dirstate_item_new` */ | |
|
194 | static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype, | |
|
195 | PyObject *args) | |
|
196 | { | |
|
197 | /* We do all the initialization here and not a tp_init function because | |
|
198 | * dirstate_item is immutable. */ | |
|
199 | dirstateItemObject *t; | |
|
200 | char state; | |
|
201 | int size, mode, mtime; | |
|
202 | if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) { | |
|
203 | return NULL; | |
|
204 | } | |
|
205 | ||
|
206 | t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1); | |
|
207 | if (!t) { | |
|
208 | return NULL; | |
|
209 | } | |
|
210 | t->state = state; | |
|
211 | t->mode = mode; | |
|
212 | t->size = size; | |
|
213 | t->mtime = mtime; | |
|
214 | ||
|
215 | return (PyObject *)t; | |
|
216 | }; | |
|
217 | ||
|
218 | /* This means the next status call will have to actually check its content | |
|
219 | to make sure it is correct. */ | |
|
220 | static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self) | |
|
221 | { | |
|
222 | self->mtime = ambiguous_time; | |
|
223 | Py_RETURN_NONE; | |
|
224 | } | |
|
225 | ||
|
226 | static PyMethodDef dirstate_item_methods[] = { | |
|
227 | {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS, | |
|
228 | "return a \"state\" suitable for v1 serialization"}, | |
|
229 | {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS, | |
|
230 | "return a \"mode\" suitable for v1 serialization"}, | |
|
231 | {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS, | |
|
232 | "return a \"size\" suitable for v1 serialization"}, | |
|
233 | {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS, | |
|
234 | "return a \"mtime\" suitable for v1 serialization"}, | |
|
235 | {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, | |
|
236 | "True if the stored mtime would be ambiguous with the current time"}, | |
|
237 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O, | |
|
238 | "build a new DirstateItem object from V1 data"}, | |
|
239 | {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty, | |
|
240 | METH_NOARGS, "mark a file as \"possibly dirty\""}, | |
|
241 | {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS, | |
|
242 | "True is the entry is non-normal in the dirstatemap sense"}, | |
|
243 | {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS, | |
|
244 | "True is the entry is `otherparent` in the dirstatemap sense"}, | |
|
245 | {NULL} /* Sentinel */ | |
|
119 | 246 | }; |
|
120 | 247 | |
|
121 | PyTypeObject dirstateTupleType = { | |
|
122 | PyVarObject_HEAD_INIT(NULL, 0) /* header */ | |
|
123 | "dirstate_tuple", /* tp_name */ | |
|
124 | sizeof(dirstateTupleObject), /* tp_basicsize */ | |
|
125 | 0, /* tp_itemsize */ | |
|
126 | (destructor)dirstate_tuple_dealloc, /* tp_dealloc */ | |
|
127 | 0, /* tp_print */ | |
|
128 | 0, /* tp_getattr */ | |
|
129 | 0, /* tp_setattr */ | |
|
130 | 0, /* tp_compare */ | |
|
131 | 0, /* tp_repr */ | |
|
132 | 0, /* tp_as_number */ | |
|
133 | &dirstate_tuple_sq, /* tp_as_sequence */ | |
|
134 | 0, /* tp_as_mapping */ | |
|
135 | 0, /* tp_hash */ | |
|
136 | 0, /* tp_call */ | |
|
137 | 0, /* tp_str */ | |
|
138 | 0, /* tp_getattro */ | |
|
139 | 0, /* tp_setattro */ | |
|
140 | 0, /* tp_as_buffer */ | |
|
141 | Py_TPFLAGS_DEFAULT, /* tp_flags */ | |
|
142 | "dirstate tuple", /* tp_doc */ | |
|
143 | 0, /* tp_traverse */ | |
|
144 | 0, /* tp_clear */ | |
|
145 | 0, /* tp_richcompare */ | |
|
146 | 0, /* tp_weaklistoffset */ | |
|
147 | 0, /* tp_iter */ | |
|
148 | 0, /* tp_iternext */ | |
|
149 | 0, /* tp_methods */ | |
|
150 | 0, /* tp_members */ | |
|
151 | 0, /* tp_getset */ | |
|
152 | 0, /* tp_base */ | |
|
153 | 0, /* tp_dict */ | |
|
154 | 0, /* tp_descr_get */ | |
|
155 | 0, /* tp_descr_set */ | |
|
156 | 0, /* tp_dictoffset */ | |
|
157 | 0, /* tp_init */ | |
|
158 | 0, /* tp_alloc */ | |
|
159 | dirstate_tuple_new, /* tp_new */ | |
|
248 | static PyObject *dirstate_item_get_mode(dirstateItemObject *self) | |
|
249 | { | |
|
250 | return PyInt_FromLong(self->mode); | |
|
251 | }; | |
|
252 | ||
|
253 | static PyObject *dirstate_item_get_size(dirstateItemObject *self) | |
|
254 | { | |
|
255 | return PyInt_FromLong(self->size); | |
|
256 | }; | |
|
257 | ||
|
258 | static PyObject *dirstate_item_get_mtime(dirstateItemObject *self) | |
|
259 | { | |
|
260 | return PyInt_FromLong(self->mtime); | |
|
261 | }; | |
|
262 | ||
|
263 | static PyObject *dirstate_item_get_state(dirstateItemObject *self) | |
|
264 | { | |
|
265 | return PyBytes_FromStringAndSize(&self->state, 1); | |
|
266 | }; | |
|
267 | ||
|
268 | static PyObject *dirstate_item_get_tracked(dirstateItemObject *self) | |
|
269 | { | |
|
270 | if (self->state == 'a' || self->state == 'm' || self->state == 'n') { | |
|
271 | Py_RETURN_TRUE; | |
|
272 | } else { | |
|
273 | Py_RETURN_FALSE; | |
|
274 | } | |
|
275 | }; | |
|
276 | ||
|
277 | static PyObject *dirstate_item_get_added(dirstateItemObject *self) | |
|
278 | { | |
|
279 | if (self->state == 'a') { | |
|
280 | Py_RETURN_TRUE; | |
|
281 | } else { | |
|
282 | Py_RETURN_FALSE; | |
|
283 | } | |
|
284 | }; | |
|
285 | ||
|
286 | static PyObject *dirstate_item_get_merged(dirstateItemObject *self) | |
|
287 | { | |
|
288 | if (self->state == 'm') { | |
|
289 | Py_RETURN_TRUE; | |
|
290 | } else { | |
|
291 | Py_RETURN_FALSE; | |
|
292 | } | |
|
293 | }; | |
|
294 | ||
|
295 | static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self) | |
|
296 | { | |
|
297 | if (self->state == 'r' && self->size == dirstate_v1_nonnormal) { | |
|
298 | Py_RETURN_TRUE; | |
|
299 | } else { | |
|
300 | Py_RETURN_FALSE; | |
|
301 | } | |
|
302 | }; | |
|
303 | ||
|
304 | static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self) | |
|
305 | { | |
|
306 | if (self->state == 'n' && self->size == dirstate_v1_from_p2) { | |
|
307 | Py_RETURN_TRUE; | |
|
308 | } else { | |
|
309 | Py_RETURN_FALSE; | |
|
310 | } | |
|
311 | }; | |
|
312 | ||
|
313 | static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self) | |
|
314 | { | |
|
315 | if (self->state == 'r' && self->size == dirstate_v1_from_p2) { | |
|
316 | Py_RETURN_TRUE; | |
|
317 | } else { | |
|
318 | Py_RETURN_FALSE; | |
|
319 | } | |
|
320 | }; | |
|
321 | ||
|
322 | static PyObject *dirstate_item_get_removed(dirstateItemObject *self) | |
|
323 | { | |
|
324 | if (self->state == 'r') { | |
|
325 | Py_RETURN_TRUE; | |
|
326 | } else { | |
|
327 | Py_RETURN_FALSE; | |
|
328 | } | |
|
329 | }; | |
|
330 | ||
|
331 | static PyGetSetDef dirstate_item_getset[] = { | |
|
332 | {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL}, | |
|
333 | {"size", (getter)dirstate_item_get_size, NULL, "size", NULL}, | |
|
334 | {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL}, | |
|
335 | {"state", (getter)dirstate_item_get_state, NULL, "state", NULL}, | |
|
336 | {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL}, | |
|
337 | {"added", (getter)dirstate_item_get_added, NULL, "added", NULL}, | |
|
338 | {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL, | |
|
339 | "merged_removed", NULL}, | |
|
340 | {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL}, | |
|
341 | {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL, | |
|
342 | "from_p2_removed", NULL}, | |
|
343 | {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL}, | |
|
344 | {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL}, | |
|
345 | {NULL} /* Sentinel */ | |
|
346 | }; | |
|
347 | ||
|
348 | PyTypeObject dirstateItemType = { | |
|
349 | PyVarObject_HEAD_INIT(NULL, 0) /* header */ | |
|
350 | "dirstate_tuple", /* tp_name */ | |
|
351 | sizeof(dirstateItemObject), /* tp_basicsize */ | |
|
352 | 0, /* tp_itemsize */ | |
|
353 | (destructor)dirstate_item_dealloc, /* tp_dealloc */ | |
|
354 | 0, /* tp_print */ | |
|
355 | 0, /* tp_getattr */ | |
|
356 | 0, /* tp_setattr */ | |
|
357 | 0, /* tp_compare */ | |
|
358 | 0, /* tp_repr */ | |
|
359 | 0, /* tp_as_number */ | |
|
360 | &dirstate_item_sq, /* tp_as_sequence */ | |
|
361 | 0, /* tp_as_mapping */ | |
|
362 | 0, /* tp_hash */ | |
|
363 | 0, /* tp_call */ | |
|
364 | 0, /* tp_str */ | |
|
365 | 0, /* tp_getattro */ | |
|
366 | 0, /* tp_setattro */ | |
|
367 | 0, /* tp_as_buffer */ | |
|
368 | Py_TPFLAGS_DEFAULT, /* tp_flags */ | |
|
369 | "dirstate tuple", /* tp_doc */ | |
|
370 | 0, /* tp_traverse */ | |
|
371 | 0, /* tp_clear */ | |
|
372 | 0, /* tp_richcompare */ | |
|
373 | 0, /* tp_weaklistoffset */ | |
|
374 | 0, /* tp_iter */ | |
|
375 | 0, /* tp_iternext */ | |
|
376 | dirstate_item_methods, /* tp_methods */ | |
|
377 | 0, /* tp_members */ | |
|
378 | dirstate_item_getset, /* tp_getset */ | |
|
379 | 0, /* tp_base */ | |
|
380 | 0, /* tp_dict */ | |
|
381 | 0, /* tp_descr_get */ | |
|
382 | 0, /* tp_descr_set */ | |
|
383 | 0, /* tp_dictoffset */ | |
|
384 | 0, /* tp_init */ | |
|
385 | 0, /* tp_alloc */ | |
|
386 | dirstate_item_new, /* tp_new */ | |
|
160 | 387 | }; |
|
161 | 388 | |
|
162 | 389 | static PyObject *parse_dirstate(PyObject *self, PyObject *args) |
@@ -212,8 +439,8 b' static PyObject *parse_dirstate(PyObject' | |||
|
212 | 439 | goto quit; |
|
213 | 440 | } |
|
214 | 441 | |
|
215 | entry = | |
|
216 | (PyObject *)make_dirstate_tuple(state, mode, size, mtime); | |
|
442 | entry = (PyObject *)dirstate_item_from_v1_data(state, mode, | |
|
443 | size, mtime); | |
|
217 | 444 | cpos = memchr(cur, 0, flen); |
|
218 | 445 | if (cpos) { |
|
219 | 446 | fname = PyBytes_FromStringAndSize(cur, cpos - cur); |
@@ -274,13 +501,13 b' static PyObject *nonnormalotherparentent' | |||
|
274 | 501 | |
|
275 | 502 | pos = 0; |
|
276 | 503 | while (PyDict_Next(dmap, &pos, &fname, &v)) { |
|
277 |
dirstate |
|
|
504 | dirstateItemObject *t; | |
|
278 | 505 | if (!dirstate_tuple_check(v)) { |
|
279 | 506 | PyErr_SetString(PyExc_TypeError, |
|
280 | 507 | "expected a dirstate tuple"); |
|
281 | 508 | goto bail; |
|
282 | 509 | } |
|
283 |
t = (dirstate |
|
|
510 | t = (dirstateItemObject *)v; | |
|
284 | 511 | |
|
285 | 512 | if (t->state == 'n' && t->size == -2) { |
|
286 | 513 | if (PySet_Add(otherpset, fname) == -1) { |
@@ -375,7 +602,7 b' static PyObject *pack_dirstate(PyObject ' | |||
|
375 | 602 | p += 20; |
|
376 | 603 | |
|
377 | 604 | for (pos = 0; PyDict_Next(map, &pos, &k, &v);) { |
|
378 |
dirstate |
|
|
605 | dirstateItemObject *tuple; | |
|
379 | 606 | char state; |
|
380 | 607 | int mode, size, mtime; |
|
381 | 608 | Py_ssize_t len, l; |
@@ -387,7 +614,7 b' static PyObject *pack_dirstate(PyObject ' | |||
|
387 | 614 | "expected a dirstate tuple"); |
|
388 | 615 | goto bail; |
|
389 | 616 | } |
|
390 |
tuple = (dirstate |
|
|
617 | tuple = (dirstateItemObject *)v; | |
|
391 | 618 | |
|
392 | 619 | state = tuple->state; |
|
393 | 620 | mode = tuple->mode; |
@@ -397,7 +624,7 b' static PyObject *pack_dirstate(PyObject ' | |||
|
397 | 624 | /* See pure/parsers.py:pack_dirstate for why we do |
|
398 | 625 | * this. */ |
|
399 | 626 | mtime = -1; |
|
400 |
mtime_unset = (PyObject *)make_dirstate_t |
|
|
627 | mtime_unset = (PyObject *)make_dirstate_item( | |
|
401 | 628 | state, mode, size, mtime); |
|
402 | 629 | if (!mtime_unset) { |
|
403 | 630 | goto bail; |
@@ -668,7 +895,7 b' void dirs_module_init(PyObject *mod);' | |||
|
668 | 895 | void manifest_module_init(PyObject *mod); |
|
669 | 896 | void revlog_module_init(PyObject *mod); |
|
670 | 897 | |
|
671 |
static const int version = |
|
|
898 | static const int version = 20; | |
|
672 | 899 | |
|
673 | 900 | static void module_init(PyObject *mod) |
|
674 | 901 | { |
@@ -690,17 +917,16 b' static void module_init(PyObject *mod)' | |||
|
690 | 917 | revlog_module_init(mod); |
|
691 | 918 | |
|
692 | 919 | capsule = PyCapsule_New( |
|
693 |
make_dirstate_t |
|
|
694 |
"mercurial.cext.parsers.make_dirstate_t |
|
|
920 | make_dirstate_item, | |
|
921 | "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL); | |
|
695 | 922 | if (capsule != NULL) |
|
696 |
PyModule_AddObject(mod, "make_dirstate_t |
|
|
923 | PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule); | |
|
697 | 924 | |
|
698 |
if (PyType_Ready(&dirstate |
|
|
925 | if (PyType_Ready(&dirstateItemType) < 0) { | |
|
699 | 926 | return; |
|
700 | 927 | } |
|
701 |
Py_INCREF(&dirstate |
|
|
702 |
PyModule_AddObject(mod, " |
|
|
703 | (PyObject *)&dirstateTupleType); | |
|
928 | Py_INCREF(&dirstateItemType); | |
|
929 | PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType); | |
|
704 | 930 | } |
|
705 | 931 | |
|
706 | 932 | static int check_python_version(void) |
@@ -12,7 +12,7 b' from typing import (' | |||
|
12 | 12 | version: int |
|
13 | 13 | versionerrortext: str |
|
14 | 14 | |
|
15 |
class |
|
|
15 | class DirstateItem: | |
|
16 | 16 | __doc__: str |
|
17 | 17 | |
|
18 | 18 | def __len__(self) -> int: ... |
@@ -29,7 +29,7 b' class dirs:' | |||
|
29 | 29 | |
|
30 | 30 | # From manifest.c |
|
31 | 31 | class lazymanifest: |
|
32 | def __init__(self, data: bytes): ... | |
|
32 | def __init__(self, nodelen: int, data: bytes): ... | |
|
33 | 33 | def __iter__(self) -> Iterator[bytes]: ... |
|
34 | 34 | |
|
35 | 35 | def __len__(self) -> int: ... |
@@ -99,7 +99,12 b' struct indexObjectStruct {' | |||
|
99 | 99 | int ntlookups; /* # lookups */ |
|
100 | 100 | int ntmisses; /* # lookups that miss the cache */ |
|
101 | 101 | int inlined; |
|
102 |
long |
|
|
102 | long entry_size; /* size of index headers. Differs in v1 v.s. v2 format | |
|
103 | */ | |
|
104 | long rust_ext_compat; /* compatibility with being used in rust | |
|
105 | extensions */ | |
|
106 | char format_version; /* size of index headers. Differs in v1 v.s. v2 | |
|
107 | format */ | |
|
103 | 108 | }; |
|
104 | 109 | |
|
105 | 110 | static Py_ssize_t index_length(const indexObject *self) |
@@ -115,18 +120,21 b' static Py_ssize_t inline_scan(indexObjec' | |||
|
115 | 120 | static int index_find_node(indexObject *self, const char *node); |
|
116 | 121 | |
|
117 | 122 | #if LONG_MAX == 0x7fffffffL |
|
118 |
static const char *const |
|
|
119 | static const char *const v2_tuple_format = PY23("Kiiiiiis#Ki", "Kiiiiiiy#Ki"); | |
|
123 | static const char *const tuple_format = PY23("Kiiiiiis#KiBB", "Kiiiiiiy#KiBB"); | |
|
120 | 124 | #else |
|
121 |
static const char *const |
|
|
122 | static const char *const v2_tuple_format = PY23("kiiiiiis#ki", "kiiiiiiy#ki"); | |
|
125 | static const char *const tuple_format = PY23("kiiiiiis#kiBB", "kiiiiiiy#kiBB"); | |
|
123 | 126 | #endif |
|
124 | 127 | |
|
125 | 128 | /* A RevlogNG v1 index entry is 64 bytes long. */ |
|
126 |
static const long v1_ |
|
|
129 | static const long v1_entry_size = 64; | |
|
127 | 130 | |
|
128 | 131 | /* A Revlogv2 index entry is 96 bytes long. */ |
|
129 |
static const long v2_ |
|
|
132 | static const long v2_entry_size = 96; | |
|
133 | ||
|
134 | static const long format_v1 = 1; /* Internal only, could be any number */ | |
|
135 | static const long format_v2 = 2; /* Internal only, could be any number */ | |
|
136 | ||
|
137 | static const char comp_mode_inline = 2; | |
|
130 | 138 | |
|
131 | 139 | static void raise_revlog_error(void) |
|
132 | 140 | { |
@@ -164,7 +172,7 b' cleanup:' | |||
|
164 | 172 | static const char *index_deref(indexObject *self, Py_ssize_t pos) |
|
165 | 173 | { |
|
166 | 174 | if (pos >= self->length) |
|
167 |
return self->added + (pos - self->length) * self-> |
|
|
175 | return self->added + (pos - self->length) * self->entry_size; | |
|
168 | 176 | |
|
169 | 177 | if (self->inlined && pos > 0) { |
|
170 | 178 | if (self->offsets == NULL) { |
@@ -181,7 +189,7 b' static const char *index_deref(indexObje' | |||
|
181 | 189 | return self->offsets[pos]; |
|
182 | 190 | } |
|
183 | 191 | |
|
184 |
return (const char *)(self->buf.buf) + pos * self-> |
|
|
192 | return (const char *)(self->buf.buf) + pos * self->entry_size; | |
|
185 | 193 | } |
|
186 | 194 | |
|
187 | 195 | /* |
@@ -290,6 +298,7 b' static PyObject *index_get(indexObject *' | |||
|
290 | 298 | uint64_t offset_flags, sidedata_offset; |
|
291 | 299 | int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2, |
|
292 | 300 | sidedata_comp_len; |
|
301 | char data_comp_mode, sidedata_comp_mode; | |
|
293 | 302 | const char *c_node_id; |
|
294 | 303 | const char *data; |
|
295 | 304 | Py_ssize_t length = index_length(self); |
@@ -328,19 +337,70 b' static PyObject *index_get(indexObject *' | |||
|
328 | 337 | parent_2 = getbe32(data + 28); |
|
329 | 338 | c_node_id = data + 32; |
|
330 | 339 | |
|
331 |
if (self-> |
|
|
332 | return Py_BuildValue(v1_tuple_format, offset_flags, comp_len, | |
|
333 | uncomp_len, base_rev, link_rev, parent_1, | |
|
334 | parent_2, c_node_id, self->nodelen); | |
|
340 | if (self->format_version == format_v1) { | |
|
341 | sidedata_offset = 0; | |
|
342 | sidedata_comp_len = 0; | |
|
343 | data_comp_mode = comp_mode_inline; | |
|
344 | sidedata_comp_mode = comp_mode_inline; | |
|
335 | 345 | } else { |
|
336 | 346 | sidedata_offset = getbe64(data + 64); |
|
337 | 347 | sidedata_comp_len = getbe32(data + 72); |
|
338 | ||
|
339 | return Py_BuildValue(v2_tuple_format, offset_flags, comp_len, | |
|
340 | uncomp_len, base_rev, link_rev, parent_1, | |
|
341 | parent_2, c_node_id, self->nodelen, | |
|
342 | sidedata_offset, sidedata_comp_len); | |
|
348 | data_comp_mode = data[76] & 3; | |
|
349 | sidedata_comp_mode = ((data[76] >> 2) & 3); | |
|
350 | } | |
|
351 | ||
|
352 | return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len, | |
|
353 | base_rev, link_rev, parent_1, parent_2, c_node_id, | |
|
354 | self->nodelen, sidedata_offset, sidedata_comp_len, | |
|
355 | data_comp_mode, sidedata_comp_mode); | |
|
356 | } | |
|
357 | /* | |
|
358 | * Pack header information in binary | |
|
359 | */ | |
|
360 | static PyObject *index_pack_header(indexObject *self, PyObject *args) | |
|
361 | { | |
|
362 | int header; | |
|
363 | char out[4]; | |
|
364 | if (!PyArg_ParseTuple(args, "I", &header)) { | |
|
365 | return NULL; | |
|
366 | } | |
|
367 | if (self->format_version != format_v1) { | |
|
368 | PyErr_Format(PyExc_RuntimeError, | |
|
369 | "version header should go in the docket, not the " | |
|
370 | "index: %lu", | |
|
371 | header); | |
|
372 | return NULL; | |
|
343 | 373 | } |
|
374 | putbe32(header, out); | |
|
375 | return PyBytes_FromStringAndSize(out, 4); | |
|
376 | } | |
|
377 | /* | |
|
378 | * Return the raw binary string representing a revision | |
|
379 | */ | |
|
380 | static PyObject *index_entry_binary(indexObject *self, PyObject *value) | |
|
381 | { | |
|
382 | long rev; | |
|
383 | const char *data; | |
|
384 | Py_ssize_t length = index_length(self); | |
|
385 | ||
|
386 | if (!pylong_to_long(value, &rev)) { | |
|
387 | return NULL; | |
|
388 | } | |
|
389 | if (rev < 0 || rev >= length) { | |
|
390 | PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld", | |
|
391 | rev); | |
|
392 | return NULL; | |
|
393 | }; | |
|
394 | ||
|
395 | data = index_deref(self, rev); | |
|
396 | if (data == NULL) | |
|
397 | return NULL; | |
|
398 | if (rev == 0 && self->format_version == format_v1) { | |
|
399 | /* the header is eating the start of the first entry */ | |
|
400 | return PyBytes_FromStringAndSize(data + 4, | |
|
401 | self->entry_size - 4); | |
|
402 | } | |
|
403 | return PyBytes_FromStringAndSize(data, self->entry_size); | |
|
344 | 404 | } |
|
345 | 405 | |
|
346 | 406 | /* |
@@ -393,46 +453,53 b' static PyObject *index_append(indexObjec' | |||
|
393 | 453 | { |
|
394 | 454 | uint64_t offset_flags, sidedata_offset; |
|
395 | 455 | int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2; |
|
456 | char data_comp_mode, sidedata_comp_mode; | |
|
396 | 457 | Py_ssize_t c_node_id_len, sidedata_comp_len; |
|
397 | 458 | const char *c_node_id; |
|
459 | char comp_field; | |
|
398 | 460 | char *data; |
|
399 | 461 | |
|
400 | if (self->hdrsize == v1_hdrsize) { | |
|
401 | if (!PyArg_ParseTuple(obj, v1_tuple_format, &offset_flags, | |
|
402 | &comp_len, &uncomp_len, &base_rev, | |
|
403 | &link_rev, &parent_1, &parent_2, | |
|
404 |
|
|
|
405 |
|
|
|
406 |
|
|
|
407 | } | |
|
408 | } else { | |
|
409 | if (!PyArg_ParseTuple(obj, v2_tuple_format, &offset_flags, | |
|
410 | &comp_len, &uncomp_len, &base_rev, | |
|
411 | &link_rev, &parent_1, &parent_2, | |
|
412 | &c_node_id, &c_node_id_len, | |
|
413 | &sidedata_offset, &sidedata_comp_len)) { | |
|
414 | PyErr_SetString(PyExc_TypeError, "10-tuple required"); | |
|
415 | return NULL; | |
|
416 | } | |
|
462 | if (!PyArg_ParseTuple(obj, tuple_format, &offset_flags, &comp_len, | |
|
463 | &uncomp_len, &base_rev, &link_rev, &parent_1, | |
|
464 | &parent_2, &c_node_id, &c_node_id_len, | |
|
465 | &sidedata_offset, &sidedata_comp_len, | |
|
466 | &data_comp_mode, &sidedata_comp_mode)) { | |
|
467 | PyErr_SetString(PyExc_TypeError, "11-tuple required"); | |
|
468 | return NULL; | |
|
417 | 469 | } |
|
418 | 470 | |
|
419 | 471 | if (c_node_id_len != self->nodelen) { |
|
420 | 472 | PyErr_SetString(PyExc_TypeError, "invalid node"); |
|
421 | 473 | return NULL; |
|
422 | 474 | } |
|
475 | if (self->format_version == format_v1) { | |
|
476 | ||
|
477 | if (data_comp_mode != comp_mode_inline) { | |
|
478 | PyErr_Format(PyExc_ValueError, | |
|
479 | "invalid data compression mode: %i", | |
|
480 | data_comp_mode); | |
|
481 | return NULL; | |
|
482 | } | |
|
483 | if (sidedata_comp_mode != comp_mode_inline) { | |
|
484 | PyErr_Format(PyExc_ValueError, | |
|
485 | "invalid sidedata compression mode: %i", | |
|
486 | sidedata_comp_mode); | |
|
487 | return NULL; | |
|
488 | } | |
|
489 | } | |
|
423 | 490 | |
|
424 | 491 | if (self->new_length == self->added_length) { |
|
425 | 492 | size_t new_added_length = |
|
426 | 493 | self->added_length ? self->added_length * 2 : 4096; |
|
427 |
void *new_added = PyMem_Realloc( |
|
|
428 | self->hdrsize); | |
|
494 | void *new_added = PyMem_Realloc( | |
|
495 | self->added, new_added_length * self->entry_size); | |
|
429 | 496 | if (!new_added) |
|
430 | 497 | return PyErr_NoMemory(); |
|
431 | 498 | self->added = new_added; |
|
432 | 499 | self->added_length = new_added_length; |
|
433 | 500 | } |
|
434 | 501 | rev = self->length + self->new_length; |
|
435 |
data = self->added + self-> |
|
|
502 | data = self->added + self->entry_size * self->new_length++; | |
|
436 | 503 | putbe32(offset_flags >> 32, data); |
|
437 | 504 | putbe32(offset_flags & 0xffffffffU, data + 4); |
|
438 | 505 | putbe32(comp_len, data + 8); |
@@ -444,11 +511,14 b' static PyObject *index_append(indexObjec' | |||
|
444 | 511 | memcpy(data + 32, c_node_id, c_node_id_len); |
|
445 | 512 | /* Padding since SHA-1 is only 20 bytes for now */ |
|
446 | 513 | memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len); |
|
447 | if (self->hdrsize != v1_hdrsize) { | |
|
514 | if (self->format_version == format_v2) { | |
|
448 | 515 | putbe64(sidedata_offset, data + 64); |
|
449 | 516 | putbe32(sidedata_comp_len, data + 72); |
|
517 | comp_field = data_comp_mode & 3; | |
|
518 | comp_field = comp_field | (sidedata_comp_mode & 3) << 2; | |
|
519 | data[76] = comp_field; | |
|
450 | 520 | /* Padding for 96 bytes alignment */ |
|
451 |
memset(data + 7 |
|
|
521 | memset(data + 77, 0, self->entry_size - 77); | |
|
452 | 522 | } |
|
453 | 523 | |
|
454 | 524 | if (self->ntinitialized) |
@@ -463,17 +533,18 b' static PyObject *index_append(indexObjec' | |||
|
463 | 533 | inside the transaction that creates the given revision. */ |
|
464 | 534 | static PyObject *index_replace_sidedata_info(indexObject *self, PyObject *args) |
|
465 | 535 | { |
|
466 | uint64_t sidedata_offset; | |
|
536 | uint64_t offset_flags, sidedata_offset; | |
|
467 | 537 | int rev; |
|
538 | char comp_mode; | |
|
468 | 539 | Py_ssize_t sidedata_comp_len; |
|
469 | 540 | char *data; |
|
470 | 541 | #if LONG_MAX == 0x7fffffffL |
|
471 | const char *const sidedata_format = PY23("nKi", "nKi"); | |
|
542 | const char *const sidedata_format = PY23("nKiKB", "nKiKB"); | |
|
472 | 543 | #else |
|
473 | const char *const sidedata_format = PY23("nki", "nki"); | |
|
544 | const char *const sidedata_format = PY23("nkikB", "nkikB"); | |
|
474 | 545 | #endif |
|
475 | 546 | |
|
476 |
if (self-> |
|
|
547 | if (self->entry_size == v1_entry_size || self->inlined) { | |
|
477 | 548 | /* |
|
478 | 549 | There is a bug in the transaction handling when going from an |
|
479 | 550 | inline revlog to a separate index and data file. Turn it off until |
@@ -485,7 +556,7 b' static PyObject *index_replace_sidedata_' | |||
|
485 | 556 | } |
|
486 | 557 | |
|
487 | 558 | if (!PyArg_ParseTuple(args, sidedata_format, &rev, &sidedata_offset, |
|
488 | &sidedata_comp_len)) | |
|
559 | &sidedata_comp_len, &offset_flags, &comp_mode)) | |
|
489 | 560 | return NULL; |
|
490 | 561 | |
|
491 | 562 | if (rev < 0 || rev >= index_length(self)) { |
@@ -501,9 +572,11 b' static PyObject *index_replace_sidedata_' | |||
|
501 | 572 | |
|
502 | 573 | /* Find the newly added node, offset from the "already on-disk" length |
|
503 | 574 | */ |
|
504 |
data = self->added + self-> |
|
|
575 | data = self->added + self->entry_size * (rev - self->length); | |
|
576 | putbe64(offset_flags, data); | |
|
505 | 577 | putbe64(sidedata_offset, data + 64); |
|
506 | 578 | putbe32(sidedata_comp_len, data + 72); |
|
579 | data[76] = (data[76] & ~(3 << 2)) | ((comp_mode & 3) << 2); | |
|
507 | 580 | |
|
508 | 581 | Py_RETURN_NONE; |
|
509 | 582 | } |
@@ -2652,17 +2725,17 b' static Py_ssize_t inline_scan(indexObjec' | |||
|
2652 | 2725 | const char *data = (const char *)self->buf.buf; |
|
2653 | 2726 | Py_ssize_t pos = 0; |
|
2654 | 2727 | Py_ssize_t end = self->buf.len; |
|
2655 |
long incr = self-> |
|
|
2728 | long incr = self->entry_size; | |
|
2656 | 2729 | Py_ssize_t len = 0; |
|
2657 | 2730 | |
|
2658 |
while (pos + self-> |
|
|
2731 | while (pos + self->entry_size <= end && pos >= 0) { | |
|
2659 | 2732 | uint32_t comp_len, sidedata_comp_len = 0; |
|
2660 | 2733 | /* 3rd element of header is length of compressed inline data */ |
|
2661 | 2734 | comp_len = getbe32(data + pos + 8); |
|
2662 |
if (self-> |
|
|
2735 | if (self->entry_size == v2_entry_size) { | |
|
2663 | 2736 | sidedata_comp_len = getbe32(data + pos + 72); |
|
2664 | 2737 | } |
|
2665 |
incr = self-> |
|
|
2738 | incr = self->entry_size + comp_len + sidedata_comp_len; | |
|
2666 | 2739 | if (offsets) |
|
2667 | 2740 | offsets[len] = data + pos; |
|
2668 | 2741 | len++; |
@@ -2699,6 +2772,7 b' static int index_init(indexObject *self,' | |||
|
2699 | 2772 | self->offsets = NULL; |
|
2700 | 2773 | self->nodelen = 20; |
|
2701 | 2774 | self->nullentry = NULL; |
|
2775 | self->rust_ext_compat = 1; | |
|
2702 | 2776 | |
|
2703 | 2777 | revlogv2 = NULL; |
|
2704 | 2778 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|O", kwlist, |
@@ -2715,20 +2789,16 b' static int index_init(indexObject *self,' | |||
|
2715 | 2789 | } |
|
2716 | 2790 | |
|
2717 | 2791 | if (revlogv2 && PyObject_IsTrue(revlogv2)) { |
|
2718 | self->hdrsize = v2_hdrsize; | |
|
2792 | self->format_version = format_v2; | |
|
2793 | self->entry_size = v2_entry_size; | |
|
2719 | 2794 | } else { |
|
2720 | self->hdrsize = v1_hdrsize; | |
|
2795 | self->format_version = format_v1; | |
|
2796 | self->entry_size = v1_entry_size; | |
|
2721 | 2797 | } |
|
2722 | 2798 | |
|
2723 | if (self->hdrsize == v1_hdrsize) { | |
|
2724 | self->nullentry = | |
|
2725 | Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1, | |
|
2726 | -1, -1, -1, nullid, self->nodelen); | |
|
2727 | } else { | |
|
2728 | self->nullentry = | |
|
2729 | Py_BuildValue(PY23("iiiiiiis#ii", "iiiiiiiy#ii"), 0, 0, 0, | |
|
2730 | -1, -1, -1, -1, nullid, self->nodelen, 0, 0); | |
|
2731 | } | |
|
2799 | self->nullentry = Py_BuildValue( | |
|
2800 | PY23("iiiiiiis#iiBB", "iiiiiiiy#iiBB"), 0, 0, 0, -1, -1, -1, -1, | |
|
2801 | nullid, self->nodelen, 0, 0, comp_mode_inline, comp_mode_inline); | |
|
2732 | 2802 | |
|
2733 | 2803 | if (!self->nullentry) |
|
2734 | 2804 | return -1; |
@@ -2751,11 +2821,11 b' static int index_init(indexObject *self,' | |||
|
2751 | 2821 | goto bail; |
|
2752 | 2822 | self->length = len; |
|
2753 | 2823 | } else { |
|
2754 |
if (size % self-> |
|
|
2824 | if (size % self->entry_size) { | |
|
2755 | 2825 | PyErr_SetString(PyExc_ValueError, "corrupt index file"); |
|
2756 | 2826 | goto bail; |
|
2757 | 2827 | } |
|
2758 |
self->length = size / self-> |
|
|
2828 | self->length = size / self->entry_size; | |
|
2759 | 2829 | } |
|
2760 | 2830 | |
|
2761 | 2831 | return 0; |
@@ -2860,6 +2930,10 b' static PyMethodDef index_methods[] = {' | |||
|
2860 | 2930 | {"shortest", (PyCFunction)index_shortest, METH_VARARGS, |
|
2861 | 2931 | "find length of shortest hex nodeid of a binary ID"}, |
|
2862 | 2932 | {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"}, |
|
2933 | {"entry_binary", (PyCFunction)index_entry_binary, METH_O, | |
|
2934 | "return an entry in binary form"}, | |
|
2935 | {"pack_header", (PyCFunction)index_pack_header, METH_VARARGS, | |
|
2936 | "pack the revlog header information into binary"}, | |
|
2863 | 2937 | {NULL} /* Sentinel */ |
|
2864 | 2938 | }; |
|
2865 | 2939 | |
@@ -2869,7 +2943,9 b' static PyGetSetDef index_getset[] = {' | |||
|
2869 | 2943 | }; |
|
2870 | 2944 | |
|
2871 | 2945 | static PyMemberDef index_members[] = { |
|
2872 |
{"entry_size", T_LONG, offsetof(indexObject, |
|
|
2946 | {"entry_size", T_LONG, offsetof(indexObject, entry_size), 0, | |
|
2947 | "size of an index entry"}, | |
|
2948 | {"rust_ext_compat", T_LONG, offsetof(indexObject, rust_ext_compat), 0, | |
|
2873 | 2949 | "size of an index entry"}, |
|
2874 | 2950 | {NULL} /* Sentinel */ |
|
2875 | 2951 | }; |
@@ -28,11 +28,11 b' typedef struct {' | |||
|
28 | 28 | int mode; |
|
29 | 29 | int size; |
|
30 | 30 | int mtime; |
|
31 |
} dirstate |
|
|
31 | } dirstateItemObject; | |
|
32 | 32 | /* clang-format on */ |
|
33 | 33 | |
|
34 |
extern PyTypeObject dirstate |
|
|
35 |
#define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstate |
|
|
34 | extern PyTypeObject dirstateItemType; | |
|
35 | #define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType) | |
|
36 | 36 | |
|
37 | 37 | #ifndef MIN |
|
38 | 38 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) |
@@ -7,7 +7,6 b'' | |||
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | import collections | |
|
11 | 10 | import os |
|
12 | 11 | import struct |
|
13 | 12 | import weakref |
@@ -15,7 +14,6 b' import weakref' | |||
|
15 | 14 | from .i18n import _ |
|
16 | 15 | from .node import ( |
|
17 | 16 | hex, |
|
18 | nullid, | |
|
19 | 17 | nullrev, |
|
20 | 18 | short, |
|
21 | 19 | ) |
@@ -34,10 +32,13 b' from . import (' | |||
|
34 | 32 | |
|
35 | 33 | from .interfaces import repository |
|
36 | 34 | from .revlogutils import sidedata as sidedatamod |
|
35 | from .revlogutils import constants as revlog_constants | |
|
36 | from .utils import storageutil | |
|
37 | 37 | |
|
38 | 38 | _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s") |
|
39 | 39 | _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s") |
|
40 | 40 | _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH") |
|
41 | _CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH") | |
|
41 | 42 | |
|
42 | 43 | LFS_REQUIREMENT = b'lfs' |
|
43 | 44 | |
@@ -194,19 +195,20 b' class cg1unpacker(object):' | |||
|
194 | 195 | else: |
|
195 | 196 | deltabase = prevnode |
|
196 | 197 | flags = 0 |
|
197 | return node, p1, p2, deltabase, cs, flags | |
|
198 | protocol_flags = 0 | |
|
199 | return node, p1, p2, deltabase, cs, flags, protocol_flags | |
|
198 | 200 | |
|
199 | 201 | def deltachunk(self, prevnode): |
|
202 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags) | |
|
200 | 203 | l = self._chunklength() |
|
201 | 204 | if not l: |
|
202 | 205 | return {} |
|
203 | 206 | headerdata = readexactly(self._stream, self.deltaheadersize) |
|
204 | 207 | header = self.deltaheader.unpack(headerdata) |
|
205 | 208 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
206 |
|
|
|
207 | # cg4 forward-compat | |
|
208 | sidedata = {} | |
|
209 | return (node, p1, p2, cs, deltabase, delta, flags, sidedata) | |
|
209 | header = self._deltaheader(header, prevnode) | |
|
210 | node, p1, p2, deltabase, cs, flags, protocol_flags = header | |
|
211 | return node, p1, p2, cs, deltabase, delta, flags, {}, protocol_flags | |
|
210 | 212 | |
|
211 | 213 | def getchunks(self): |
|
212 | 214 | """returns all the chunks contains in the bundle |
@@ -293,8 +295,16 b' class cg1unpacker(object):' | |||
|
293 | 295 | |
|
294 | 296 | # Only useful if we're adding sidedata categories. If both peers have |
|
295 | 297 | # the same categories, then we simply don't do anything. |
|
296 | if self.version == b'04' and srctype == b'pull': | |
|
297 | sidedata_helpers = get_sidedata_helpers( | |
|
298 | adding_sidedata = ( | |
|
299 | ( | |
|
300 | requirements.REVLOGV2_REQUIREMENT in repo.requirements | |
|
301 | or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements | |
|
302 | ) | |
|
303 | and self.version == b'04' | |
|
304 | and srctype == b'pull' | |
|
305 | ) | |
|
306 | if adding_sidedata: | |
|
307 | sidedata_helpers = sidedatamod.get_sidedata_helpers( | |
|
298 | 308 | repo, |
|
299 | 309 | sidedata_categories or set(), |
|
300 | 310 | pull=True, |
@@ -386,15 +396,16 b' class cg1unpacker(object):' | |||
|
386 | 396 | _(b'manifests'), unit=_(b'chunks'), total=changesets |
|
387 | 397 | ) |
|
388 | 398 | on_manifest_rev = None |
|
389 |
if sidedata_helpers |
|
|
399 | if sidedata_helpers: | |
|
400 | if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]: | |
|
390 | 401 | |
|
391 | def on_manifest_rev(manifest, rev): | |
|
392 | range = touched_manifests.get(manifest) | |
|
393 | if not range: | |
|
394 | touched_manifests[manifest] = (rev, rev) | |
|
395 | else: | |
|
396 | assert rev == range[1] + 1 | |
|
397 | touched_manifests[manifest] = (range[0], rev) | |
|
402 | def on_manifest_rev(manifest, rev): | |
|
403 | range = touched_manifests.get(manifest) | |
|
404 | if not range: | |
|
405 | touched_manifests[manifest] = (rev, rev) | |
|
406 | else: | |
|
407 | assert rev == range[1] + 1 | |
|
408 | touched_manifests[manifest] = (range[0], rev) | |
|
398 | 409 | |
|
399 | 410 | self._unpackmanifests( |
|
400 | 411 | repo, |
@@ -417,15 +428,16 b' class cg1unpacker(object):' | |||
|
417 | 428 | needfiles.setdefault(f, set()).add(n) |
|
418 | 429 | |
|
419 | 430 | on_filelog_rev = None |
|
420 |
if sidedata_helpers |
|
|
431 | if sidedata_helpers: | |
|
432 | if revlog_constants.KIND_FILELOG in sidedata_helpers[1]: | |
|
421 | 433 | |
|
422 | def on_filelog_rev(filelog, rev): | |
|
423 | range = touched_filelogs.get(filelog) | |
|
424 | if not range: | |
|
425 | touched_filelogs[filelog] = (rev, rev) | |
|
426 | else: | |
|
427 | assert rev == range[1] + 1 | |
|
428 | touched_filelogs[filelog] = (range[0], rev) | |
|
434 | def on_filelog_rev(filelog, rev): | |
|
435 | range = touched_filelogs.get(filelog) | |
|
436 | if not range: | |
|
437 | touched_filelogs[filelog] = (rev, rev) | |
|
438 | else: | |
|
439 | assert rev == range[1] + 1 | |
|
440 | touched_filelogs[filelog] = (range[0], rev) | |
|
429 | 441 | |
|
430 | 442 | # process the files |
|
431 | 443 | repo.ui.status(_(b"adding file changes\n")) |
@@ -440,12 +452,14 b' class cg1unpacker(object):' | |||
|
440 | 452 | ) |
|
441 | 453 | |
|
442 | 454 | if sidedata_helpers: |
|
443 |
if |
|
|
444 |
cl.rewrite_sidedata( |
|
|
455 | if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]: | |
|
456 | cl.rewrite_sidedata( | |
|
457 | trp, sidedata_helpers, clstart, clend - 1 | |
|
458 | ) | |
|
445 | 459 | for mf, (startrev, endrev) in touched_manifests.items(): |
|
446 | mf.rewrite_sidedata(sidedata_helpers, startrev, endrev) | |
|
460 | mf.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev) | |
|
447 | 461 | for fl, (startrev, endrev) in touched_filelogs.items(): |
|
448 | fl.rewrite_sidedata(sidedata_helpers, startrev, endrev) | |
|
462 | fl.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev) | |
|
449 | 463 | |
|
450 | 464 | # making sure the value exists |
|
451 | 465 | tr.changes.setdefault(b'changegroup-count-changesets', 0) |
@@ -570,8 +584,8 b' class cg1unpacker(object):' | |||
|
570 | 584 | """ |
|
571 | 585 | chain = None |
|
572 | 586 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): |
|
573 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata) | |
|
574 | yield chunkdata | |
|
587 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags) | |
|
588 | yield chunkdata[:8] | |
|
575 | 589 | chain = chunkdata[0] |
|
576 | 590 | |
|
577 | 591 | |
@@ -590,7 +604,8 b' class cg2unpacker(cg1unpacker):' | |||
|
590 | 604 | def _deltaheader(self, headertuple, prevnode): |
|
591 | 605 | node, p1, p2, deltabase, cs = headertuple |
|
592 | 606 | flags = 0 |
|
593 | return node, p1, p2, deltabase, cs, flags | |
|
607 | protocol_flags = 0 | |
|
608 | return node, p1, p2, deltabase, cs, flags, protocol_flags | |
|
594 | 609 | |
|
595 | 610 | |
|
596 | 611 | class cg3unpacker(cg2unpacker): |
@@ -608,7 +623,8 b' class cg3unpacker(cg2unpacker):' | |||
|
608 | 623 | |
|
609 | 624 | def _deltaheader(self, headertuple, prevnode): |
|
610 | 625 | node, p1, p2, deltabase, cs, flags = headertuple |
|
611 | return node, p1, p2, deltabase, cs, flags | |
|
626 | protocol_flags = 0 | |
|
627 | return node, p1, p2, deltabase, cs, flags, protocol_flags | |
|
612 | 628 | |
|
613 | 629 | def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None): |
|
614 | 630 | super(cg3unpacker, self)._unpackmanifests( |
@@ -631,21 +647,48 b' class cg4unpacker(cg3unpacker):' | |||
|
631 | 647 | cg4 streams add support for exchanging sidedata. |
|
632 | 648 | """ |
|
633 | 649 | |
|
650 | deltaheader = _CHANGEGROUPV4_DELTA_HEADER | |
|
651 | deltaheadersize = deltaheader.size | |
|
634 | 652 | version = b'04' |
|
635 | 653 | |
|
654 | def _deltaheader(self, headertuple, prevnode): | |
|
655 | protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple | |
|
656 | return node, p1, p2, deltabase, cs, flags, protocol_flags | |
|
657 | ||
|
636 | 658 | def deltachunk(self, prevnode): |
|
637 | 659 | res = super(cg4unpacker, self).deltachunk(prevnode) |
|
638 | 660 | if not res: |
|
639 | 661 | return res |
|
640 | 662 | |
|
641 | (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res | |
|
663 | ( | |
|
664 | node, | |
|
665 | p1, | |
|
666 | p2, | |
|
667 | cs, | |
|
668 | deltabase, | |
|
669 | delta, | |
|
670 | flags, | |
|
671 | sidedata, | |
|
672 | protocol_flags, | |
|
673 | ) = res | |
|
674 | assert not sidedata | |
|
642 | 675 | |
|
643 | sidedata_raw = getchunk(self._stream) | |
|
644 | 676 | sidedata = {} |
|
645 | if len(sidedata_raw) > 0: | |
|
677 | if protocol_flags & storageutil.CG_FLAG_SIDEDATA: | |
|
678 | sidedata_raw = getchunk(self._stream) | |
|
646 | 679 | sidedata = sidedatamod.deserialize_sidedata(sidedata_raw) |
|
647 | 680 | |
|
648 | return node, p1, p2, cs, deltabase, delta, flags, sidedata | |
|
681 | return ( | |
|
682 | node, | |
|
683 | p1, | |
|
684 | p2, | |
|
685 | cs, | |
|
686 | deltabase, | |
|
687 | delta, | |
|
688 | flags, | |
|
689 | sidedata, | |
|
690 | protocol_flags, | |
|
691 | ) | |
|
649 | 692 | |
|
650 | 693 | |
|
651 | 694 | class headerlessfixup(object): |
@@ -673,7 +716,7 b' def _revisiondeltatochunks(repo, delta, ' | |||
|
673 | 716 | |
|
674 | 717 | if delta.delta is not None: |
|
675 | 718 | prefix, data = b'', delta.delta |
|
676 | elif delta.basenode == nullid: | |
|
719 | elif delta.basenode == repo.nullid: | |
|
677 | 720 | data = delta.revision |
|
678 | 721 | prefix = mdiff.trivialdiffheader(len(data)) |
|
679 | 722 | else: |
@@ -688,10 +731,10 b' def _revisiondeltatochunks(repo, delta, ' | |||
|
688 | 731 | yield prefix |
|
689 | 732 | yield data |
|
690 | 733 | |
|
691 | sidedata = delta.sidedata | |
|
692 | if sidedata is not None: | |
|
734 | if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA: | |
|
693 | 735 | # Need a separate chunk for sidedata to be able to differentiate |
|
694 | 736 | # "raw delta" length and sidedata length |
|
737 | sidedata = delta.sidedata | |
|
695 | 738 | yield chunkheader(len(sidedata)) |
|
696 | 739 | yield sidedata |
|
697 | 740 | |
@@ -787,9 +830,15 b' def _resolvenarrowrevisioninfo(' | |||
|
787 | 830 | return i |
|
788 | 831 | # We failed to resolve a parent for this node, so |
|
789 | 832 | # we crash the changegroup construction. |
|
833 | if util.safehasattr(store, 'target'): | |
|
834 | target = store.display_id | |
|
835 | else: | |
|
836 | # some revlog not actually a revlog | |
|
837 | target = store._revlog.display_id | |
|
838 | ||
|
790 | 839 | raise error.Abort( |
|
791 | 840 | b"unable to resolve parent while packing '%s' %r" |
|
792 |
b' for changeset %r' % ( |
|
|
841 | b' for changeset %r' % (target, rev, clrev) | |
|
793 | 842 | ) |
|
794 | 843 | |
|
795 | 844 | return nullrev |
@@ -828,7 +877,8 b' def deltagroup(' | |||
|
828 | 877 | If topic is not None, progress detail will be generated using this |
|
829 | 878 | topic name (e.g. changesets, manifests, etc). |
|
830 | 879 | |
|
831 | See `storageutil.emitrevisions` for the doc on `sidedata_helpers`. | |
|
880 | See `revlogutil.sidedata.get_sidedata_helpers` for the doc on | |
|
881 | `sidedata_helpers`. | |
|
832 | 882 | """ |
|
833 | 883 | if not nodes: |
|
834 | 884 | return |
@@ -1056,7 +1106,9 b' class cgpacker(object):' | |||
|
1056 | 1106 | # TODO a better approach would be for the strip bundle to |
|
1057 | 1107 | # correctly advertise its sidedata categories directly. |
|
1058 | 1108 | remote_sidedata = repo._wanted_sidedata |
|
1059 |
sidedata_helpers = get_sidedata_helpers( |
|
|
1109 | sidedata_helpers = sidedatamod.get_sidedata_helpers( | |
|
1110 | repo, remote_sidedata | |
|
1111 | ) | |
|
1060 | 1112 | |
|
1061 | 1113 | clstate, deltas = self._generatechangelog( |
|
1062 | 1114 | cl, |
@@ -1194,7 +1246,8 b' class cgpacker(object):' | |||
|
1194 | 1246 | if generate is False, the state will be fully populated and no chunk |
|
1195 | 1247 | stream will be yielded |
|
1196 | 1248 | |
|
1197 | See `storageutil.emitrevisions` for the doc on `sidedata_helpers`. | |
|
1249 | See `revlogutil.sidedata.get_sidedata_helpers` for the doc on | |
|
1250 | `sidedata_helpers`. | |
|
1198 | 1251 | """ |
|
1199 | 1252 | clrevorder = {} |
|
1200 | 1253 | manifests = {} |
@@ -1299,7 +1352,8 b' class cgpacker(object):' | |||
|
1299 | 1352 | `source` is unused here, but is used by extensions like remotefilelog to |
|
1300 | 1353 | change what is sent based in pulls vs pushes, etc. |
|
1301 | 1354 | |
|
1302 | See `storageutil.emitrevisions` for the doc on `sidedata_helpers`. | |
|
1355 | See `revlogutil.sidedata.get_sidedata_helpers` for the doc on | |
|
1356 | `sidedata_helpers`. | |
|
1303 | 1357 | """ |
|
1304 | 1358 | repo = self._repo |
|
1305 | 1359 | mfl = repo.manifestlog |
@@ -1633,11 +1687,18 b' def _makecg4packer(' | |||
|
1633 | 1687 | fullnodes=None, |
|
1634 | 1688 | remote_sidedata=None, |
|
1635 | 1689 | ): |
|
1636 |
# S |
|
|
1637 |
# |
|
|
1638 | builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( | |
|
1639 | d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags | |
|
1640 | ) | |
|
1690 | # Sidedata is in a separate chunk from the delta to differentiate | |
|
1691 | # "raw delta" and sidedata. | |
|
1692 | def builddeltaheader(d): | |
|
1693 | return _CHANGEGROUPV4_DELTA_HEADER.pack( | |
|
1694 | d.protocol_flags, | |
|
1695 | d.node, | |
|
1696 | d.p1node, | |
|
1697 | d.p2node, | |
|
1698 | d.basenode, | |
|
1699 | d.linknode, | |
|
1700 | d.flags, | |
|
1701 | ) | |
|
1641 | 1702 | |
|
1642 | 1703 | return cgpacker( |
|
1643 | 1704 | repo, |
@@ -1682,11 +1743,15 b' def allsupportedversions(repo):' | |||
|
1682 | 1743 | # |
|
1683 | 1744 | # (or even to push subset of history) |
|
1684 | 1745 | needv03 = True |
|
1685 | has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements | |
|
1686 | if not has_revlogv2: | |
|
1687 | versions.discard(b'04') | |
|
1688 | 1746 | if not needv03: |
|
1689 | 1747 | versions.discard(b'03') |
|
1748 | want_v4 = ( | |
|
1749 | repo.ui.configbool(b'experimental', b'changegroup4') | |
|
1750 | or requirements.REVLOGV2_REQUIREMENT in repo.requirements | |
|
1751 | or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements | |
|
1752 | ) | |
|
1753 | if not want_v4: | |
|
1754 | versions.discard(b'04') | |
|
1690 | 1755 | return versions |
|
1691 | 1756 | |
|
1692 | 1757 | |
@@ -1913,25 +1978,3 b' def _addchangegroupfiles(' | |||
|
1913 | 1978 | ) |
|
1914 | 1979 | |
|
1915 | 1980 | return revisions, files |
|
1916 | ||
|
1917 | ||
|
1918 | def get_sidedata_helpers(repo, remote_sd_categories, pull=False): | |
|
1919 | # Computers for computing sidedata on-the-fly | |
|
1920 | sd_computers = collections.defaultdict(list) | |
|
1921 | # Computers for categories to remove from sidedata | |
|
1922 | sd_removers = collections.defaultdict(list) | |
|
1923 | ||
|
1924 | to_generate = remote_sd_categories - repo._wanted_sidedata | |
|
1925 | to_remove = repo._wanted_sidedata - remote_sd_categories | |
|
1926 | if pull: | |
|
1927 | to_generate, to_remove = to_remove, to_generate | |
|
1928 | ||
|
1929 | for revlog_kind, computers in repo._sidedata_computers.items(): | |
|
1930 | for category, computer in computers.items(): | |
|
1931 | if category in to_generate: | |
|
1932 | sd_computers[revlog_kind].append(computer) | |
|
1933 | if category in to_remove: | |
|
1934 | sd_removers[revlog_kind].append(computer) | |
|
1935 | ||
|
1936 | sidedata_helpers = (repo, sd_computers, sd_removers) | |
|
1937 | return sidedata_helpers |
@@ -11,7 +11,6 b' from .i18n import _' | |||
|
11 | 11 | from .node import ( |
|
12 | 12 | bin, |
|
13 | 13 | hex, |
|
14 | nullid, | |
|
15 | 14 | ) |
|
16 | 15 | from .thirdparty import attr |
|
17 | 16 | |
@@ -26,7 +25,10 b' from .utils import (' | |||
|
26 | 25 | dateutil, |
|
27 | 26 | stringutil, |
|
28 | 27 | ) |
|
29 |
from .revlogutils import |
|
|
28 | from .revlogutils import ( | |
|
29 | constants as revlog_constants, | |
|
30 | flagutil, | |
|
31 | ) | |
|
30 | 32 | |
|
31 | 33 | _defaultextra = {b'branch': b'default'} |
|
32 | 34 | |
@@ -221,7 +223,7 b' class changelogrevision(object):' | |||
|
221 | 223 | |
|
222 | 224 | def __new__(cls, cl, text, sidedata, cpsd): |
|
223 | 225 | if not text: |
|
224 | return _changelogrevision(extra=_defaultextra, manifest=nullid) | |
|
226 | return _changelogrevision(extra=_defaultextra, manifest=cl.nullid) | |
|
225 | 227 | |
|
226 | 228 | self = super(changelogrevision, cls).__new__(cls) |
|
227 | 229 | # We could return here and implement the following as an __init__. |
@@ -393,27 +395,22 b' class changelog(revlog.revlog):' | |||
|
393 | 395 | ``concurrencychecker`` will be passed to the revlog init function, see |
|
394 | 396 | the documentation there. |
|
395 | 397 | """ |
|
396 | if trypending and opener.exists(b'00changelog.i.a'): | |
|
397 | indexfile = b'00changelog.i.a' | |
|
398 | else: | |
|
399 | indexfile = b'00changelog.i' | |
|
400 | ||
|
401 | datafile = b'00changelog.d' | |
|
402 | 398 | revlog.revlog.__init__( |
|
403 | 399 | self, |
|
404 | 400 | opener, |
|
405 | indexfile, | |
|
406 | datafile=datafile, | |
|
401 | target=(revlog_constants.KIND_CHANGELOG, None), | |
|
402 | radix=b'00changelog', | |
|
407 | 403 | checkambig=True, |
|
408 | 404 | mmaplargeindex=True, |
|
409 | 405 | persistentnodemap=opener.options.get(b'persistent-nodemap', False), |
|
410 | 406 | concurrencychecker=concurrencychecker, |
|
407 | trypending=trypending, | |
|
411 | 408 | ) |
|
412 | 409 | |
|
413 |
if self._initempty and (self.version |
|
|
410 | if self._initempty and (self._format_version == revlog.REVLOGV1): | |
|
414 | 411 | # changelogs don't benefit from generaldelta. |
|
415 | 412 | |
|
416 |
self. |
|
|
413 | self._format_flags &= ~revlog.FLAG_GENERALDELTA | |
|
417 | 414 | self._generaldelta = False |
|
418 | 415 | |
|
419 | 416 | # Delta chains for changelogs tend to be very small because entries |
@@ -428,7 +425,6 b' class changelog(revlog.revlog):' | |||
|
428 | 425 | self._filteredrevs = frozenset() |
|
429 | 426 | self._filteredrevs_hashcache = {} |
|
430 | 427 | self._copiesstorage = opener.options.get(b'copies-storage') |
|
431 | self.revlog_kind = b'changelog' | |
|
432 | 428 | |
|
433 | 429 | @property |
|
434 | 430 | def filteredrevs(self): |
@@ -441,20 +437,25 b' class changelog(revlog.revlog):' | |||
|
441 | 437 | self._filteredrevs = val |
|
442 | 438 | self._filteredrevs_hashcache = {} |
|
443 | 439 | |
|
440 | def _write_docket(self, tr): | |
|
441 | if not self._delayed: | |
|
442 | super(changelog, self)._write_docket(tr) | |
|
443 | ||
|
444 | 444 | def delayupdate(self, tr): |
|
445 | 445 | """delay visibility of index updates to other readers""" |
|
446 | ||
|
447 | if not self._delayed: | |
|
446 | if self._docket is None and not self._delayed: | |
|
448 | 447 | if len(self) == 0: |
|
449 | 448 | self._divert = True |
|
450 | if self._realopener.exists(self.indexfile + b'.a'): | |
|
451 | self._realopener.unlink(self.indexfile + b'.a') | |
|
452 | self.opener = _divertopener(self._realopener, self.indexfile) | |
|
449 | if self._realopener.exists(self._indexfile + b'.a'): | |
|
450 | self._realopener.unlink(self._indexfile + b'.a') | |
|
451 | self.opener = _divertopener(self._realopener, self._indexfile) | |
|
453 | 452 | else: |
|
454 | 453 | self._delaybuf = [] |
|
455 | 454 | self.opener = _delayopener( |
|
456 | self._realopener, self.indexfile, self._delaybuf | |
|
455 | self._realopener, self._indexfile, self._delaybuf | |
|
457 | 456 | ) |
|
457 | self._segmentfile.opener = self.opener | |
|
458 | self._segmentfile_sidedata.opener = self.opener | |
|
458 | 459 | self._delayed = True |
|
459 | 460 | tr.addpending(b'cl-%i' % id(self), self._writepending) |
|
460 | 461 | tr.addfinalize(b'cl-%i' % id(self), self._finalize) |
@@ -463,15 +464,19 b' class changelog(revlog.revlog):' | |||
|
463 | 464 | """finalize index updates""" |
|
464 | 465 | self._delayed = False |
|
465 | 466 | self.opener = self._realopener |
|
467 | self._segmentfile.opener = self.opener | |
|
468 | self._segmentfile_sidedata.opener = self.opener | |
|
466 | 469 | # move redirected index data back into place |
|
467 |
if self._d |
|
|
470 | if self._docket is not None: | |
|
471 | self._write_docket(tr) | |
|
472 | elif self._divert: | |
|
468 | 473 | assert not self._delaybuf |
|
469 | tmpname = self.indexfile + b".a" | |
|
474 | tmpname = self._indexfile + b".a" | |
|
470 | 475 | nfile = self.opener.open(tmpname) |
|
471 | 476 | nfile.close() |
|
472 | self.opener.rename(tmpname, self.indexfile, checkambig=True) | |
|
477 | self.opener.rename(tmpname, self._indexfile, checkambig=True) | |
|
473 | 478 | elif self._delaybuf: |
|
474 | fp = self.opener(self.indexfile, b'a', checkambig=True) | |
|
479 | fp = self.opener(self._indexfile, b'a', checkambig=True) | |
|
475 | 480 | fp.write(b"".join(self._delaybuf)) |
|
476 | 481 | fp.close() |
|
477 | 482 | self._delaybuf = None |
@@ -482,10 +487,12 b' class changelog(revlog.revlog):' | |||
|
482 | 487 | def _writepending(self, tr): |
|
483 | 488 | """create a file containing the unfinalized state for |
|
484 | 489 | pretxnchangegroup""" |
|
490 | if self._docket: | |
|
491 | return self._docket.write(tr, pending=True) | |
|
485 | 492 | if self._delaybuf: |
|
486 | 493 | # make a temporary copy of the index |
|
487 | fp1 = self._realopener(self.indexfile) | |
|
488 | pendingfilename = self.indexfile + b".a" | |
|
494 | fp1 = self._realopener(self._indexfile) | |
|
495 | pendingfilename = self._indexfile + b".a" | |
|
489 | 496 | # register as a temp file to ensure cleanup on failure |
|
490 | 497 | tr.registertmp(pendingfilename) |
|
491 | 498 | # write existing data |
@@ -497,16 +504,18 b' class changelog(revlog.revlog):' | |||
|
497 | 504 | # switch modes so finalize can simply rename |
|
498 | 505 | self._delaybuf = None |
|
499 | 506 | self._divert = True |
|
500 | self.opener = _divertopener(self._realopener, self.indexfile) | |
|
507 | self.opener = _divertopener(self._realopener, self._indexfile) | |
|
508 | self._segmentfile.opener = self.opener | |
|
509 | self._segmentfile_sidedata.opener = self.opener | |
|
501 | 510 | |
|
502 | 511 | if self._divert: |
|
503 | 512 | return True |
|
504 | 513 | |
|
505 | 514 | return False |
|
506 | 515 | |
|
507 |
def _enforceinlinesize(self, tr |
|
|
516 | def _enforceinlinesize(self, tr): | |
|
508 | 517 | if not self._delayed: |
|
509 |
revlog.revlog._enforceinlinesize(self, tr |
|
|
518 | revlog.revlog._enforceinlinesize(self, tr) | |
|
510 | 519 | |
|
511 | 520 | def read(self, nodeorrev): |
|
512 | 521 | """Obtain data from a parsed changelog revision. |
@@ -524,15 +533,16 b' class changelog(revlog.revlog):' | |||
|
524 | 533 | ``changelogrevision`` instead, as it is faster for partial object |
|
525 | 534 | access. |
|
526 | 535 | """ |
|
527 |
d |
|
|
528 | c = changelogrevision( | |
|
529 |
|
|
|
530 | ) | |
|
536 | d = self._revisiondata(nodeorrev) | |
|
537 | sidedata = self.sidedata(nodeorrev) | |
|
538 | copy_sd = self._copiesstorage == b'changeset-sidedata' | |
|
539 | c = changelogrevision(self, d, sidedata, copy_sd) | |
|
531 | 540 | return (c.manifest, c.user, c.date, c.files, c.description, c.extra) |
|
532 | 541 | |
|
533 | 542 | def changelogrevision(self, nodeorrev): |
|
534 | 543 | """Obtain a ``changelogrevision`` for a node or revision.""" |
|
535 |
text |
|
|
544 | text = self._revisiondata(nodeorrev) | |
|
545 | sidedata = self.sidedata(nodeorrev) | |
|
536 | 546 | return changelogrevision( |
|
537 | 547 | self, text, sidedata, self._copiesstorage == b'changeset-sidedata' |
|
538 | 548 | ) |
@@ -320,7 +320,7 b' class channeledsystem(object):' | |||
|
320 | 320 | self.channel = channel |
|
321 | 321 | |
|
322 | 322 | def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None): |
|
323 |
args = [type, cmd, |
|
|
323 | args = [type, cmd, util.abspath(cwd or b'.')] | |
|
324 | 324 | args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ)) |
|
325 | 325 | data = b'\0'.join(args) |
|
326 | 326 | self.out.write(struct.pack(b'>cI', self.channel, len(data))) |
@@ -515,11 +515,9 b' class chgcmdserver(commandserver.server)' | |||
|
515 | 515 | if inst.hint: |
|
516 | 516 | self.ui.error(_(b"(%s)\n") % inst.hint) |
|
517 | 517 | errorraised = True |
|
518 |
except error. |
|
|
519 | if isinstance(inst, error.InputError): | |
|
520 |
detailed_exit_code = |
|
|
521 | elif isinstance(inst, error.ConfigError): | |
|
522 | detailed_exit_code = 30 | |
|
518 | except error.Error as inst: | |
|
519 | if inst.detailed_exit_code is not None: | |
|
520 | detailed_exit_code = inst.detailed_exit_code | |
|
523 | 521 | self.ui.error(inst.format()) |
|
524 | 522 | errorraised = True |
|
525 | 523 |
@@ -15,7 +15,6 b' import re' | |||
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .node import ( |
|
17 | 17 | hex, |
|
18 | nullid, | |
|
19 | 18 | nullrev, |
|
20 | 19 | short, |
|
21 | 20 | ) |
@@ -62,6 +61,10 b' from .utils import (' | |||
|
62 | 61 | stringutil, |
|
63 | 62 | ) |
|
64 | 63 | |
|
64 | from .revlogutils import ( | |
|
65 | constants as revlog_constants, | |
|
66 | ) | |
|
67 | ||
|
65 | 68 | if pycompat.TYPE_CHECKING: |
|
66 | 69 | from typing import ( |
|
67 | 70 | Any, |
@@ -298,37 +301,37 b' def check_incompatible_arguments(opts, f' | |||
|
298 | 301 | check_at_most_one_arg(opts, first, other) |
|
299 | 302 | |
|
300 | 303 | |
|
301 | def resolvecommitoptions(ui, opts): | |
|
304 | def resolve_commit_options(ui, opts): | |
|
302 | 305 | """modify commit options dict to handle related options |
|
303 | 306 | |
|
304 | 307 | The return value indicates that ``rewrite.update-timestamp`` is the reason |
|
305 | 308 | the ``date`` option is set. |
|
306 | 309 | """ |
|
307 |
check_at_most_one_arg(opts, |
|
|
308 |
check_at_most_one_arg(opts, |
|
|
310 | check_at_most_one_arg(opts, 'date', 'currentdate') | |
|
311 | check_at_most_one_arg(opts, 'user', 'currentuser') | |
|
309 | 312 | |
|
310 | 313 | datemaydiffer = False # date-only change should be ignored? |
|
311 | 314 | |
|
312 |
if opts.get( |
|
|
313 |
opts[ |
|
|
315 | if opts.get('currentdate'): | |
|
316 | opts['date'] = b'%d %d' % dateutil.makedate() | |
|
314 | 317 | elif ( |
|
315 |
not opts.get( |
|
|
318 | not opts.get('date') | |
|
316 | 319 | and ui.configbool(b'rewrite', b'update-timestamp') |
|
317 |
and opts.get( |
|
|
320 | and opts.get('currentdate') is None | |
|
318 | 321 | ): |
|
319 |
opts[ |
|
|
322 | opts['date'] = b'%d %d' % dateutil.makedate() | |
|
320 | 323 | datemaydiffer = True |
|
321 | 324 | |
|
322 |
if opts.get( |
|
|
323 |
opts[ |
|
|
325 | if opts.get('currentuser'): | |
|
326 | opts['user'] = ui.username() | |
|
324 | 327 | |
|
325 | 328 | return datemaydiffer |
|
326 | 329 | |
|
327 | 330 | |
|
328 |
def checknotesize( |
|
|
331 | def check_note_size(opts): | |
|
329 | 332 | """make sure note is of valid format""" |
|
330 | 333 | |
|
331 |
note = opts.get( |
|
|
334 | note = opts.get('note') | |
|
332 | 335 | if not note: |
|
333 | 336 | return |
|
334 | 337 | |
@@ -343,19 +346,18 b' def ishunk(x):' | |||
|
343 | 346 | return isinstance(x, hunkclasses) |
|
344 | 347 | |
|
345 | 348 | |
|
346 | def newandmodified(chunks, originalchunks): | |
|
349 | def isheader(x): | |
|
350 | headerclasses = (crecordmod.uiheader, patch.header) | |
|
351 | return isinstance(x, headerclasses) | |
|
352 | ||
|
353 | ||
|
354 | def newandmodified(chunks): | |
|
347 | 355 | newlyaddedandmodifiedfiles = set() |
|
348 | 356 | alsorestore = set() |
|
349 | 357 | for chunk in chunks: |
|
350 | if ( | |
|
351 | ishunk(chunk) | |
|
352 | and chunk.header.isnewfile() | |
|
353 | and chunk not in originalchunks | |
|
354 | ): | |
|
355 | newlyaddedandmodifiedfiles.add(chunk.header.filename()) | |
|
356 | alsorestore.update( | |
|
357 | set(chunk.header.files()) - {chunk.header.filename()} | |
|
358 | ) | |
|
358 | if isheader(chunk) and chunk.isnewfile(): | |
|
359 | newlyaddedandmodifiedfiles.add(chunk.filename()) | |
|
360 | alsorestore.update(set(chunk.files()) - {chunk.filename()}) | |
|
359 | 361 | return newlyaddedandmodifiedfiles, alsorestore |
|
360 | 362 | |
|
361 | 363 | |
@@ -514,12 +516,12 b' def dorecord(' | |||
|
514 | 516 | diffopts.git = True |
|
515 | 517 | diffopts.showfunc = True |
|
516 | 518 | originaldiff = patch.diff(repo, changes=status, opts=diffopts) |
|
517 |
original |
|
|
519 | original_headers = patch.parsepatch(originaldiff) | |
|
518 | 520 | match = scmutil.match(repo[None], pats) |
|
519 | 521 | |
|
520 | 522 | # 1. filter patch, since we are intending to apply subset of it |
|
521 | 523 | try: |
|
522 |
chunks, newopts = filterfn(ui, original |
|
|
524 | chunks, newopts = filterfn(ui, original_headers, match) | |
|
523 | 525 | except error.PatchError as err: |
|
524 | 526 | raise error.InputError(_(b'error parsing patch: %s') % err) |
|
525 | 527 | opts.update(newopts) |
@@ -529,15 +531,11 b' def dorecord(' | |||
|
529 | 531 | # version without the edit in the workdir. We also will need to restore |
|
530 | 532 | # files that were the sources of renames so that the patch application |
|
531 | 533 | # works. |
|
532 | newlyaddedandmodifiedfiles, alsorestore = newandmodified( | |
|
533 | chunks, originalchunks | |
|
534 | ) | |
|
534 | newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks) | |
|
535 | 535 | contenders = set() |
|
536 | 536 | for h in chunks: |
|
537 |
|
|
|
537 | if isheader(h): | |
|
538 | 538 | contenders.update(set(h.files())) |
|
539 | except AttributeError: | |
|
540 | pass | |
|
541 | 539 | |
|
542 | 540 | changed = status.modified + status.added + status.removed |
|
543 | 541 | newfiles = [f for f in changed if f in contenders] |
@@ -632,7 +630,19 b' def dorecord(' | |||
|
632 | 630 | # without normallookup, restoring timestamp |
|
633 | 631 | # may cause partially committed files |
|
634 | 632 | # to be treated as unmodified |
|
635 | dirstate.normallookup(realname) | |
|
633 | ||
|
634 | # XXX-PENDINGCHANGE: We should clarify the context in | |
|
635 | # which this function is called to make sure it | |
|
636 | # already called within a `pendingchange`, However we | |
|
637 | # are taking a shortcut here in order to be able to | |
|
638 | # quickly deprecated the older API. | |
|
639 | with dirstate.parentchange(): | |
|
640 | dirstate.update_file( | |
|
641 | realname, | |
|
642 | p1_tracked=True, | |
|
643 | wc_tracked=True, | |
|
644 | possibly_dirty=True, | |
|
645 | ) | |
|
636 | 646 | |
|
637 | 647 | # copystat=True here and above are a hack to trick any |
|
638 | 648 | # editors that have f open that we haven't modified them. |
@@ -998,11 +1008,6 b' def changebranch(ui, repo, revs, label, ' | |||
|
998 | 1008 | _(b"a branch of the same name already exists") |
|
999 | 1009 | ) |
|
1000 | 1010 | |
|
1001 | if repo.revs(b'obsolete() and %ld', revs): | |
|
1002 | raise error.InputError( | |
|
1003 | _(b"cannot change branch of a obsolete changeset") | |
|
1004 | ) | |
|
1005 | ||
|
1006 | 1011 | # make sure only topological heads |
|
1007 | 1012 | if repo.revs(b'heads(%ld) - head()', revs): |
|
1008 | 1013 | raise error.InputError( |
@@ -1097,7 +1102,7 b' def bailifchanged(repo, merge=True, hint' | |||
|
1097 | 1102 | 'hint' is the usual hint given to Abort exception. |
|
1098 | 1103 | """ |
|
1099 | 1104 | |
|
1100 | if merge and repo.dirstate.p2() != nullid: | |
|
1105 | if merge and repo.dirstate.p2() != repo.nullid: | |
|
1101 | 1106 | raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint) |
|
1102 | 1107 | st = repo.status() |
|
1103 | 1108 | if st.modified or st.added or st.removed or st.deleted: |
@@ -1434,8 +1439,12 b' def openstorage(repo, cmd, file_, opts, ' | |||
|
1434 | 1439 | raise error.CommandError(cmd, _(b'invalid arguments')) |
|
1435 | 1440 | if not os.path.isfile(file_): |
|
1436 | 1441 | raise error.InputError(_(b"revlog '%s' not found") % file_) |
|
1442 | ||
|
1443 | target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_) | |
|
1437 | 1444 | r = revlog.revlog( |
|
1438 |
vfsmod.vfs(encoding.getcwd(), audit=False), |
|
|
1445 | vfsmod.vfs(encoding.getcwd(), audit=False), | |
|
1446 | target=target, | |
|
1447 | radix=file_[:-2], | |
|
1439 | 1448 | ) |
|
1440 | 1449 | return r |
|
1441 | 1450 | |
@@ -1849,7 +1858,10 b' def copy(ui, repo, pats, opts, rename=Fa' | |||
|
1849 | 1858 | continue |
|
1850 | 1859 | copylist.append((tfn(pat, dest, srcs), srcs)) |
|
1851 | 1860 | if not copylist: |
|
1852 | raise error.InputError(_(b'no files to copy')) | |
|
1861 | hint = None | |
|
1862 | if rename: | |
|
1863 | hint = _(b'maybe you meant to use --after --at-rev=.') | |
|
1864 | raise error.InputError(_(b'no files to copy'), hint=hint) | |
|
1853 | 1865 | |
|
1854 | 1866 | errors = 0 |
|
1855 | 1867 | for targetpath, srcs in copylist: |
@@ -2104,7 +2116,7 b' def _exportsingle(repo, ctx, fm, match, ' | |||
|
2104 | 2116 | if parents: |
|
2105 | 2117 | prev = parents[0] |
|
2106 | 2118 | else: |
|
2107 | prev = nullid | |
|
2119 | prev = repo.nullid | |
|
2108 | 2120 | |
|
2109 | 2121 | fm.context(ctx=ctx) |
|
2110 | 2122 | fm.plain(b'# HG changeset patch\n') |
@@ -2810,7 +2822,8 b' def amend(ui, repo, old, extra, pats, op' | |||
|
2810 | 2822 | extra.update(wctx.extra()) |
|
2811 | 2823 | |
|
2812 | 2824 | # date-only change should be ignored? |
|
2813 | datemaydiffer = resolvecommitoptions(ui, opts) | |
|
2825 | datemaydiffer = resolve_commit_options(ui, opts) | |
|
2826 | opts = pycompat.byteskwargs(opts) | |
|
2814 | 2827 | |
|
2815 | 2828 | date = old.date() |
|
2816 | 2829 | if opts.get(b'date'): |
@@ -2966,29 +2979,32 b' def amend(ui, repo, old, extra, pats, op' | |||
|
2966 | 2979 | newid = repo.commitctx(new) |
|
2967 | 2980 | ms.reset() |
|
2968 | 2981 | |
|
2969 | # Reroute the working copy parent to the new changeset | |
|
2970 | repo.setparents(newid, nullid) | |
|
2971 | ||
|
2972 | # Fixing the dirstate because localrepo.commitctx does not update | |
|
2973 | # it. This is rather convenient because we did not need to update | |
|
2974 | # the dirstate for all the files in the new commit which commitctx | |
|
2975 | # could have done if it updated the dirstate. Now, we can | |
|
2976 | # selectively update the dirstate only for the amended files. | |
|
2977 | dirstate = repo.dirstate | |
|
2978 | ||
|
2979 | # Update the state of the files which were added and modified in the | |
|
2980 | # amend to "normal" in the dirstate. We need to use "normallookup" since | |
|
2981 | # the files may have changed since the command started; using "normal" | |
|
2982 | # would mark them as clean but with uncommitted contents. | |
|
2983 | normalfiles = set(wctx.modified() + wctx.added()) & filestoamend | |
|
2984 | for f in normalfiles: | |
|
2985 | dirstate.normallookup(f) | |
|
2986 | ||
|
2987 | # Update the state of files which were removed in the amend | |
|
2988 | # to "removed" in the dirstate. | |
|
2989 | removedfiles = set(wctx.removed()) & filestoamend | |
|
2990 | for f in removedfiles: | |
|
2991 | dirstate.drop(f) | |
|
2982 | with repo.dirstate.parentchange(): | |
|
2983 | # Reroute the working copy parent to the new changeset | |
|
2984 | repo.setparents(newid, repo.nullid) | |
|
2985 | ||
|
2986 | # Fixing the dirstate because localrepo.commitctx does not update | |
|
2987 | # it. This is rather convenient because we did not need to update | |
|
2988 | # the dirstate for all the files in the new commit which commitctx | |
|
2989 | # could have done if it updated the dirstate. Now, we can | |
|
2990 | # selectively update the dirstate only for the amended files. | |
|
2991 | dirstate = repo.dirstate | |
|
2992 | ||
|
2993 | # Update the state of the files which were added and modified in the | |
|
2994 | # amend to "normal" in the dirstate. We need to use "normallookup" since | |
|
2995 | # the files may have changed since the command started; using "normal" | |
|
2996 | # would mark them as clean but with uncommitted contents. | |
|
2997 | normalfiles = set(wctx.modified() + wctx.added()) & filestoamend | |
|
2998 | for f in normalfiles: | |
|
2999 | dirstate.update_file( | |
|
3000 | f, p1_tracked=True, wc_tracked=True, possibly_dirty=True | |
|
3001 | ) | |
|
3002 | ||
|
3003 | # Update the state of files which were removed in the amend | |
|
3004 | # to "removed" in the dirstate. | |
|
3005 | removedfiles = set(wctx.removed()) & filestoamend | |
|
3006 | for f in removedfiles: | |
|
3007 | dirstate.update_file(f, p1_tracked=False, wc_tracked=False) | |
|
2992 | 3008 | |
|
2993 | 3009 | mapping = {old.node(): (newid,)} |
|
2994 | 3010 | obsmetadata = None |
@@ -3322,7 +3338,7 b' def revert(ui, repo, ctx, *pats, **opts)' | |||
|
3322 | 3338 | |
|
3323 | 3339 | # in case of merge, files that are actually added can be reported as |
|
3324 | 3340 | # modified, we need to post process the result |
|
3325 | if p2 != nullid: | |
|
3341 | if p2 != repo.nullid: | |
|
3326 | 3342 | mergeadd = set(dsmodified) |
|
3327 | 3343 | for path in dsmodified: |
|
3328 | 3344 | if path in mf: |
@@ -3548,7 +3564,7 b' def _performrevert(' | |||
|
3548 | 3564 | repo.wvfs.unlinkpath(f, rmdir=rmdir) |
|
3549 | 3565 | except OSError: |
|
3550 | 3566 | pass |
|
3551 |
repo.dirstate. |
|
|
3567 | repo.dirstate.set_untracked(f) | |
|
3552 | 3568 | |
|
3553 | 3569 | def prntstatusmsg(action, f): |
|
3554 | 3570 | exact = names[f] |
@@ -3563,12 +3579,12 b' def _performrevert(' | |||
|
3563 | 3579 | ) |
|
3564 | 3580 | if choice == 0: |
|
3565 | 3581 | prntstatusmsg(b'forget', f) |
|
3566 |
repo.dirstate.d |
|
|
3582 | repo.dirstate.set_untracked(f) | |
|
3567 | 3583 | else: |
|
3568 | 3584 | excluded_files.append(f) |
|
3569 | 3585 | else: |
|
3570 | 3586 | prntstatusmsg(b'forget', f) |
|
3571 |
repo.dirstate.d |
|
|
3587 | repo.dirstate.set_untracked(f) | |
|
3572 | 3588 | for f in actions[b'remove'][0]: |
|
3573 | 3589 | audit_path(f) |
|
3574 | 3590 | if interactive: |
@@ -3586,17 +3602,17 b' def _performrevert(' | |||
|
3586 | 3602 | for f in actions[b'drop'][0]: |
|
3587 | 3603 | audit_path(f) |
|
3588 | 3604 | prntstatusmsg(b'drop', f) |
|
3589 |
repo.dirstate. |
|
|
3605 | repo.dirstate.set_untracked(f) | |
|
3590 | 3606 | |
|
3591 | 3607 | normal = None |
|
3592 | 3608 | if node == parent: |
|
3593 | 3609 | # We're reverting to our parent. If possible, we'd like status |
|
3594 | 3610 | # to report the file as clean. We have to use normallookup for |
|
3595 | 3611 | # merges to avoid losing information about merged/dirty files. |
|
3596 | if p2 != nullid: | |
|
3597 |
normal = repo.dirstate. |
|
|
3612 | if p2 != repo.nullid: | |
|
3613 | normal = repo.dirstate.set_tracked | |
|
3598 | 3614 | else: |
|
3599 |
normal = repo.dirstate. |
|
|
3615 | normal = repo.dirstate.set_clean | |
|
3600 | 3616 | |
|
3601 | 3617 | newlyaddedandmodifiedfiles = set() |
|
3602 | 3618 | if interactive: |
@@ -3624,12 +3640,12 b' def _performrevert(' | |||
|
3624 | 3640 | diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts) |
|
3625 | 3641 | else: |
|
3626 | 3642 | diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts) |
|
3627 |
original |
|
|
3643 | original_headers = patch.parsepatch(diff) | |
|
3628 | 3644 | |
|
3629 | 3645 | try: |
|
3630 | 3646 | |
|
3631 | 3647 | chunks, opts = recordfilter( |
|
3632 |
repo.ui, original |
|
|
3648 | repo.ui, original_headers, match, operation=operation | |
|
3633 | 3649 | ) |
|
3634 | 3650 | if operation == b'discard': |
|
3635 | 3651 | chunks = patch.reversehunks(chunks) |
@@ -3642,9 +3658,7 b' def _performrevert(' | |||
|
3642 | 3658 | # "remove added file <name> (Yn)?", so we don't need to worry about the |
|
3643 | 3659 | # alsorestore value. Ideally we'd be able to partially revert |
|
3644 | 3660 | # copied/renamed files. |
|
3645 | newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified( | |
|
3646 | chunks, originalchunks | |
|
3647 | ) | |
|
3661 | newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(chunks) | |
|
3648 | 3662 | if tobackup is None: |
|
3649 | 3663 | tobackup = set() |
|
3650 | 3664 | # Apply changes |
@@ -3687,11 +3701,11 b' def _performrevert(' | |||
|
3687 | 3701 | if f not in newlyaddedandmodifiedfiles: |
|
3688 | 3702 | prntstatusmsg(b'add', f) |
|
3689 | 3703 | checkout(f) |
|
3690 |
repo.dirstate. |
|
|
3691 | ||
|
3692 |
normal = repo.dirstate. |
|
|
3693 | if node == parent and p2 == nullid: | |
|
3694 |
normal = repo.dirstate. |
|
|
3704 | repo.dirstate.set_tracked(f) | |
|
3705 | ||
|
3706 | normal = repo.dirstate.set_tracked | |
|
3707 | if node == parent and p2 == repo.nullid: | |
|
3708 | normal = repo.dirstate.set_clean | |
|
3695 | 3709 | for f in actions[b'undelete'][0]: |
|
3696 | 3710 | if interactive: |
|
3697 | 3711 | choice = repo.ui.promptchoice( |
@@ -15,10 +15,8 b' import sys' | |||
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .node import ( |
|
17 | 17 | hex, |
|
18 | nullid, | |
|
19 | 18 | nullrev, |
|
20 | 19 | short, |
|
21 | wdirhex, | |
|
22 | 20 | wdirrev, |
|
23 | 21 | ) |
|
24 | 22 | from .pycompat import open |
@@ -486,7 +484,7 b' def annotate(ui, repo, *pats, **opts):' | |||
|
486 | 484 | return b'%d ' % rev |
|
487 | 485 | |
|
488 | 486 | def formathex(h): |
|
489 | if h == wdirhex: | |
|
487 | if h == repo.nodeconstants.wdirhex: | |
|
490 | 488 | return b'%s+' % shorthex(hex(ctx.p1().node())) |
|
491 | 489 | else: |
|
492 | 490 | return b'%s ' % shorthex(h) |
@@ -809,9 +807,9 b' def _dobackout(ui, repo, node=None, rev=' | |||
|
809 | 807 | ) |
|
810 | 808 | |
|
811 | 809 | p1, p2 = repo.changelog.parents(node) |
|
812 | if p1 == nullid: | |
|
810 | if p1 == repo.nullid: | |
|
813 | 811 | raise error.InputError(_(b'cannot backout a change with no parents')) |
|
814 | if p2 != nullid: | |
|
812 | if p2 != repo.nullid: | |
|
815 | 813 | if not opts.get(b'parent'): |
|
816 | 814 | raise error.InputError(_(b'cannot backout a merge changeset')) |
|
817 | 815 | p = repo.lookup(opts[b'parent']) |
@@ -1085,7 +1083,7 b' def bisect(' | |||
|
1085 | 1083 | ) |
|
1086 | 1084 | else: |
|
1087 | 1085 | node, p2 = repo.dirstate.parents() |
|
1088 | if p2 != nullid: | |
|
1086 | if p2 != repo.nullid: | |
|
1089 | 1087 | raise error.StateError(_(b'current bisect revision is a merge')) |
|
1090 | 1088 | if rev: |
|
1091 | 1089 | if not nodes: |
@@ -2079,9 +2077,8 b' def _docommit(ui, repo, *pats, **opts):' | |||
|
2079 | 2077 | # commit(), 1 if nothing changed or None on success. |
|
2080 | 2078 | return 1 if ret == 0 else ret |
|
2081 | 2079 | |
|
2082 | opts = pycompat.byteskwargs(opts) | |
|
2083 | if opts.get(b'subrepos'): | |
|
2084 | cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'amend']) | |
|
2080 | if opts.get('subrepos'): | |
|
2081 | cmdutil.check_incompatible_arguments(opts, 'subrepos', ['amend']) | |
|
2085 | 2082 | # Let --subrepos on the command line override config setting. |
|
2086 | 2083 | ui.setconfig(b'ui', b'commitsubrepos', True, b'commit') |
|
2087 | 2084 | |
@@ -2092,7 +2089,7 b' def _docommit(ui, repo, *pats, **opts):' | |||
|
2092 | 2089 | tip = repo.changelog.tip() |
|
2093 | 2090 | |
|
2094 | 2091 | extra = {} |
|
2095 |
if opts.get( |
|
|
2092 | if opts.get('close_branch') or opts.get('force_close_branch'): | |
|
2096 | 2093 | extra[b'close'] = b'1' |
|
2097 | 2094 | |
|
2098 | 2095 | if repo[b'.'].closesbranch(): |
@@ -2106,21 +2103,21 b' def _docommit(ui, repo, *pats, **opts):' | |||
|
2106 | 2103 | elif ( |
|
2107 | 2104 | branch == repo[b'.'].branch() |
|
2108 | 2105 | and repo[b'.'].node() not in bheads |
|
2109 |
and not opts.get( |
|
|
2106 | and not opts.get('force_close_branch') | |
|
2110 | 2107 | ): |
|
2111 | 2108 | hint = _( |
|
2112 | 2109 | b'use --force-close-branch to close branch from a non-head' |
|
2113 | 2110 | b' changeset' |
|
2114 | 2111 | ) |
|
2115 | 2112 | raise error.InputError(_(b'can only close branch heads'), hint=hint) |
|
2116 |
elif opts.get( |
|
|
2113 | elif opts.get('amend'): | |
|
2117 | 2114 | if ( |
|
2118 | 2115 | repo[b'.'].p1().branch() != branch |
|
2119 | 2116 | and repo[b'.'].p2().branch() != branch |
|
2120 | 2117 | ): |
|
2121 | 2118 | raise error.InputError(_(b'can only close branch heads')) |
|
2122 | 2119 | |
|
2123 |
if opts.get( |
|
|
2120 | if opts.get('amend'): | |
|
2124 | 2121 | if ui.configbool(b'ui', b'commitsubrepos'): |
|
2125 | 2122 | raise error.InputError( |
|
2126 | 2123 | _(b'cannot amend with ui.commitsubrepos enabled') |
@@ -2139,6 +2136,7 b' def _docommit(ui, repo, *pats, **opts):' | |||
|
2139 | 2136 | cmdutil.checkunfinished(repo) |
|
2140 | 2137 | |
|
2141 | 2138 | node = cmdutil.amend(ui, repo, old, extra, pats, opts) |
|
2139 | opts = pycompat.byteskwargs(opts) | |
|
2142 | 2140 | if node == old.node(): |
|
2143 | 2141 | ui.status(_(b"nothing changed\n")) |
|
2144 | 2142 | return 1 |
@@ -2167,6 +2165,7 b' def _docommit(ui, repo, *pats, **opts):' | |||
|
2167 | 2165 | extra=extra, |
|
2168 | 2166 | ) |
|
2169 | 2167 | |
|
2168 | opts = pycompat.byteskwargs(opts) | |
|
2170 | 2169 | node = cmdutil.commit(ui, repo, commitfunc, pats, opts) |
|
2171 | 2170 | |
|
2172 | 2171 | if not node: |
@@ -2202,8 +2201,24 b' def _docommit(ui, repo, *pats, **opts):' | |||
|
2202 | 2201 | b'config|showconfig|debugconfig', |
|
2203 | 2202 | [ |
|
2204 | 2203 | (b'u', b'untrusted', None, _(b'show untrusted configuration options')), |
|
2204 | # This is experimental because we need | |
|
2205 | # * reasonable behavior around aliases, | |
|
2206 | # * decide if we display [debug] [experimental] and [devel] section par | |
|
2207 | # default | |
|
2208 | # * some way to display "generic" config entry (the one matching | |
|
2209 | # regexp, | |
|
2210 | # * proper display of the different value type | |
|
2211 | # * a better way to handle <DYNAMIC> values (and variable types), | |
|
2212 | # * maybe some type information ? | |
|
2213 | ( | |
|
2214 | b'', | |
|
2215 | b'exp-all-known', | |
|
2216 | None, | |
|
2217 | _(b'show all known config option (EXPERIMENTAL)'), | |
|
2218 | ), | |
|
2205 | 2219 | (b'e', b'edit', None, _(b'edit user config')), |
|
2206 | 2220 | (b'l', b'local', None, _(b'edit repository config')), |
|
2221 | (b'', b'source', None, _(b'show source of configuration value')), | |
|
2207 | 2222 | ( |
|
2208 | 2223 | b'', |
|
2209 | 2224 | b'shared', |
@@ -2234,7 +2249,7 b' def config(ui, repo, *values, **opts):' | |||
|
2234 | 2249 | --global, edit the system-wide config file. With --local, edit the |
|
2235 | 2250 | repository-level config file. |
|
2236 | 2251 | |
|
2237 |
With -- |
|
|
2252 | With --source, the source (filename and line number) is printed | |
|
2238 | 2253 | for each config item. |
|
2239 | 2254 | |
|
2240 | 2255 | See :hg:`help config` for more information about config files. |
@@ -2337,7 +2352,10 b' def config(ui, repo, *values, **opts):' | |||
|
2337 | 2352 | selentries = set(selentries) |
|
2338 | 2353 | |
|
2339 | 2354 | matched = False |
|
2340 | for section, name, value in ui.walkconfig(untrusted=untrusted): | |
|
2355 | all_known = opts[b'exp_all_known'] | |
|
2356 | show_source = ui.debugflag or opts.get(b'source') | |
|
2357 | entries = ui.walkconfig(untrusted=untrusted, all_known=all_known) | |
|
2358 | for section, name, value in entries: | |
|
2341 | 2359 | source = ui.configsource(section, name, untrusted) |
|
2342 | 2360 | value = pycompat.bytestr(value) |
|
2343 | 2361 | defaultvalue = ui.configdefault(section, name) |
@@ -2348,7 +2366,7 b' def config(ui, repo, *values, **opts):' | |||
|
2348 | 2366 | if values and not (section in selsections or entryname in selentries): |
|
2349 | 2367 | continue |
|
2350 | 2368 | fm.startitem() |
|
2351 |
fm.condwrite( |
|
|
2369 | fm.condwrite(show_source, b'source', b'%s: ', source) | |
|
2352 | 2370 | if uniquesel: |
|
2353 | 2371 | fm.data(name=entryname) |
|
2354 | 2372 | fm.write(b'value', b'%s\n', value) |
@@ -3071,8 +3089,7 b' def graft(ui, repo, *revs, **opts):' | |||
|
3071 | 3089 | |
|
3072 | 3090 | |
|
3073 | 3091 | def _dograft(ui, repo, *revs, **opts): |
|
3074 | opts = pycompat.byteskwargs(opts) | |
|
3075 | if revs and opts.get(b'rev'): | |
|
3092 | if revs and opts.get('rev'): | |
|
3076 | 3093 | ui.warn( |
|
3077 | 3094 | _( |
|
3078 | 3095 | b'warning: inconsistent use of --rev might give unexpected ' |
@@ -3081,61 +3098,59 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3081 | 3098 | ) |
|
3082 | 3099 | |
|
3083 | 3100 | revs = list(revs) |
|
3084 |
revs.extend(opts.get( |
|
|
3101 | revs.extend(opts.get('rev')) | |
|
3085 | 3102 | # a dict of data to be stored in state file |
|
3086 | 3103 | statedata = {} |
|
3087 | 3104 | # list of new nodes created by ongoing graft |
|
3088 | 3105 | statedata[b'newnodes'] = [] |
|
3089 | 3106 | |
|
3090 | cmdutil.resolvecommitoptions(ui, opts) | |
|
3091 | ||
|
3092 | editor = cmdutil.getcommiteditor( | |
|
3093 | editform=b'graft', **pycompat.strkwargs(opts) | |
|
3094 | ) | |
|
3095 | ||
|
3096 | cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue') | |
|
3107 | cmdutil.resolve_commit_options(ui, opts) | |
|
3108 | ||
|
3109 | editor = cmdutil.getcommiteditor(editform=b'graft', **opts) | |
|
3110 | ||
|
3111 | cmdutil.check_at_most_one_arg(opts, 'abort', 'stop', 'continue') | |
|
3097 | 3112 | |
|
3098 | 3113 | cont = False |
|
3099 |
if opts.get( |
|
|
3114 | if opts.get('no_commit'): | |
|
3100 | 3115 | cmdutil.check_incompatible_arguments( |
|
3101 | 3116 | opts, |
|
3102 |
|
|
|
3103 |
[ |
|
|
3117 | 'no_commit', | |
|
3118 | ['edit', 'currentuser', 'currentdate', 'log'], | |
|
3104 | 3119 | ) |
|
3105 | 3120 | |
|
3106 | 3121 | graftstate = statemod.cmdstate(repo, b'graftstate') |
|
3107 | 3122 | |
|
3108 |
if opts.get( |
|
|
3123 | if opts.get('stop'): | |
|
3109 | 3124 | cmdutil.check_incompatible_arguments( |
|
3110 | 3125 | opts, |
|
3111 |
|
|
|
3126 | 'stop', | |
|
3112 | 3127 | [ |
|
3113 |
|
|
|
3114 |
|
|
|
3115 |
|
|
|
3116 |
|
|
|
3117 |
|
|
|
3118 |
|
|
|
3119 |
|
|
|
3128 | 'edit', | |
|
3129 | 'log', | |
|
3130 | 'user', | |
|
3131 | 'date', | |
|
3132 | 'currentdate', | |
|
3133 | 'currentuser', | |
|
3134 | 'rev', | |
|
3120 | 3135 | ], |
|
3121 | 3136 | ) |
|
3122 | 3137 | return _stopgraft(ui, repo, graftstate) |
|
3123 |
elif opts.get( |
|
|
3138 | elif opts.get('abort'): | |
|
3124 | 3139 | cmdutil.check_incompatible_arguments( |
|
3125 | 3140 | opts, |
|
3126 |
|
|
|
3141 | 'abort', | |
|
3127 | 3142 | [ |
|
3128 |
|
|
|
3129 |
|
|
|
3130 |
|
|
|
3131 |
|
|
|
3132 |
|
|
|
3133 |
|
|
|
3134 |
|
|
|
3143 | 'edit', | |
|
3144 | 'log', | |
|
3145 | 'user', | |
|
3146 | 'date', | |
|
3147 | 'currentdate', | |
|
3148 | 'currentuser', | |
|
3149 | 'rev', | |
|
3135 | 3150 | ], |
|
3136 | 3151 | ) |
|
3137 | 3152 | return cmdutil.abortgraft(ui, repo, graftstate) |
|
3138 |
elif opts.get( |
|
|
3153 | elif opts.get('continue'): | |
|
3139 | 3154 | cont = True |
|
3140 | 3155 | if revs: |
|
3141 | 3156 | raise error.InputError(_(b"can't specify --continue and revisions")) |
@@ -3143,15 +3158,15 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3143 | 3158 | if graftstate.exists(): |
|
3144 | 3159 | statedata = cmdutil.readgraftstate(repo, graftstate) |
|
3145 | 3160 | if statedata.get(b'date'): |
|
3146 |
opts[ |
|
|
3161 | opts['date'] = statedata[b'date'] | |
|
3147 | 3162 | if statedata.get(b'user'): |
|
3148 |
opts[ |
|
|
3163 | opts['user'] = statedata[b'user'] | |
|
3149 | 3164 | if statedata.get(b'log'): |
|
3150 |
opts[ |
|
|
3165 | opts['log'] = True | |
|
3151 | 3166 | if statedata.get(b'no_commit'): |
|
3152 |
opts[ |
|
|
3167 | opts['no_commit'] = statedata.get(b'no_commit') | |
|
3153 | 3168 | if statedata.get(b'base'): |
|
3154 |
opts[ |
|
|
3169 | opts['base'] = statedata.get(b'base') | |
|
3155 | 3170 | nodes = statedata[b'nodes'] |
|
3156 | 3171 | revs = [repo[node].rev() for node in nodes] |
|
3157 | 3172 | else: |
@@ -3165,8 +3180,8 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3165 | 3180 | |
|
3166 | 3181 | skipped = set() |
|
3167 | 3182 | basectx = None |
|
3168 |
if opts.get( |
|
|
3169 |
basectx = scmutil.revsingle(repo, opts[ |
|
|
3183 | if opts.get('base'): | |
|
3184 | basectx = scmutil.revsingle(repo, opts['base'], None) | |
|
3170 | 3185 | if basectx is None: |
|
3171 | 3186 | # check for merges |
|
3172 | 3187 | for rev in repo.revs(b'%ld and merge()', revs): |
@@ -3184,7 +3199,7 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3184 | 3199 | # way to the graftstate. With --force, any revisions we would have otherwise |
|
3185 | 3200 | # skipped would not have been filtered out, and if they hadn't been applied |
|
3186 | 3201 | # already, they'd have been in the graftstate. |
|
3187 |
if not (cont or opts.get( |
|
|
3202 | if not (cont or opts.get('force')) and basectx is None: | |
|
3188 | 3203 | # check for ancestors of dest branch |
|
3189 | 3204 | ancestors = repo.revs(b'%ld & (::.)', revs) |
|
3190 | 3205 | for rev in ancestors: |
@@ -3257,10 +3272,10 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3257 | 3272 | if not revs: |
|
3258 | 3273 | return -1 |
|
3259 | 3274 | |
|
3260 |
if opts.get( |
|
|
3275 | if opts.get('no_commit'): | |
|
3261 | 3276 | statedata[b'no_commit'] = True |
|
3262 |
if opts.get( |
|
|
3263 |
statedata[b'base'] = opts[ |
|
|
3277 | if opts.get('base'): | |
|
3278 | statedata[b'base'] = opts['base'] | |
|
3264 | 3279 | for pos, ctx in enumerate(repo.set(b"%ld", revs)): |
|
3265 | 3280 | desc = b'%d:%s "%s"' % ( |
|
3266 | 3281 | ctx.rev(), |
@@ -3271,7 +3286,7 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3271 | 3286 | if names: |
|
3272 | 3287 | desc += b' (%s)' % b' '.join(names) |
|
3273 | 3288 | ui.status(_(b'grafting %s\n') % desc) |
|
3274 |
if opts.get( |
|
|
3289 | if opts.get('dry_run'): | |
|
3275 | 3290 | continue |
|
3276 | 3291 | |
|
3277 | 3292 | source = ctx.extra().get(b'source') |
@@ -3282,22 +3297,22 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3282 | 3297 | else: |
|
3283 | 3298 | extra[b'source'] = ctx.hex() |
|
3284 | 3299 | user = ctx.user() |
|
3285 |
if opts.get( |
|
|
3286 |
user = opts[ |
|
|
3300 | if opts.get('user'): | |
|
3301 | user = opts['user'] | |
|
3287 | 3302 | statedata[b'user'] = user |
|
3288 | 3303 | date = ctx.date() |
|
3289 |
if opts.get( |
|
|
3290 |
date = opts[ |
|
|
3304 | if opts.get('date'): | |
|
3305 | date = opts['date'] | |
|
3291 | 3306 | statedata[b'date'] = date |
|
3292 | 3307 | message = ctx.description() |
|
3293 |
if opts.get( |
|
|
3308 | if opts.get('log'): | |
|
3294 | 3309 | message += b'\n(grafted from %s)' % ctx.hex() |
|
3295 | 3310 | statedata[b'log'] = True |
|
3296 | 3311 | |
|
3297 | 3312 | # we don't merge the first commit when continuing |
|
3298 | 3313 | if not cont: |
|
3299 | 3314 | # perform the graft merge with p1(rev) as 'ancestor' |
|
3300 |
overrides = {(b'ui', b'forcemerge'): opts.get( |
|
|
3315 | overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')} | |
|
3301 | 3316 | base = ctx.p1() if basectx is None else basectx |
|
3302 | 3317 | with ui.configoverride(overrides, b'graft'): |
|
3303 | 3318 | stats = mergemod.graft(repo, ctx, base, [b'local', b'graft']) |
@@ -3315,7 +3330,7 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3315 | 3330 | cont = False |
|
3316 | 3331 | |
|
3317 | 3332 | # commit if --no-commit is false |
|
3318 |
if not opts.get( |
|
|
3333 | if not opts.get('no_commit'): | |
|
3319 | 3334 | node = repo.commit( |
|
3320 | 3335 | text=message, user=user, date=date, extra=extra, editor=editor |
|
3321 | 3336 | ) |
@@ -3330,7 +3345,7 b' def _dograft(ui, repo, *revs, **opts):' | |||
|
3330 | 3345 | nn.append(node) |
|
3331 | 3346 | |
|
3332 | 3347 | # remove state when we complete successfully |
|
3333 |
if not opts.get( |
|
|
3348 | if not opts.get('dry_run'): | |
|
3334 | 3349 | graftstate.delete() |
|
3335 | 3350 | |
|
3336 | 3351 | return 0 |
@@ -4847,7 +4862,7 b' def merge(ui, repo, node=None, **opts):' | |||
|
4847 | 4862 | |
|
4848 | 4863 | opts = pycompat.byteskwargs(opts) |
|
4849 | 4864 | abort = opts.get(b'abort') |
|
4850 | if abort and repo.dirstate.p2() == nullid: | |
|
4865 | if abort and repo.dirstate.p2() == repo.nullid: | |
|
4851 | 4866 | cmdutil.wrongtooltocontinue(repo, _(b'merge')) |
|
4852 | 4867 | cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview']) |
|
4853 | 4868 | if abort: |
@@ -5072,7 +5087,7 b' def parents(ui, repo, file_=None, **opts' | |||
|
5072 | 5087 | |
|
5073 | 5088 | displayer = logcmdutil.changesetdisplayer(ui, repo, opts) |
|
5074 | 5089 | for n in p: |
|
5075 | if n != nullid: | |
|
5090 | if n != repo.nullid: | |
|
5076 | 5091 | displayer.show(repo[n]) |
|
5077 | 5092 | displayer.close() |
|
5078 | 5093 | |
@@ -5128,15 +5143,9 b' def paths(ui, repo, search=None, **opts)' | |||
|
5128 | 5143 | """ |
|
5129 | 5144 | |
|
5130 | 5145 | opts = pycompat.byteskwargs(opts) |
|
5146 | ||
|
5147 | pathitems = urlutil.list_paths(ui, search) | |
|
5131 | 5148 | ui.pager(b'paths') |
|
5132 | if search: | |
|
5133 | pathitems = [ | |
|
5134 | (name, path) | |
|
5135 | for name, path in pycompat.iteritems(ui.paths) | |
|
5136 | if name == search | |
|
5137 | ] | |
|
5138 | else: | |
|
5139 | pathitems = sorted(pycompat.iteritems(ui.paths)) | |
|
5140 | 5149 | |
|
5141 | 5150 | fm = ui.formatter(b'paths', opts) |
|
5142 | 5151 | if fm.isplain(): |
@@ -5157,6 +5166,11 b' def paths(ui, repo, search=None, **opts)' | |||
|
5157 | 5166 | assert subopt not in (b'name', b'url') |
|
5158 | 5167 | if showsubopts: |
|
5159 | 5168 | fm.plain(b'%s:%s = ' % (name, subopt)) |
|
5169 | if isinstance(value, bool): | |
|
5170 | if value: | |
|
5171 | value = b'yes' | |
|
5172 | else: | |
|
5173 | value = b'no' | |
|
5160 | 5174 | fm.condwrite(showsubopts, subopt, b'%s\n', value) |
|
5161 | 5175 | |
|
5162 | 5176 | fm.end() |
@@ -6105,7 +6119,7 b' def resolve(ui, repo, *pats, **opts):' | |||
|
6105 | 6119 | with repo.wlock(): |
|
6106 | 6120 | ms = mergestatemod.mergestate.read(repo) |
|
6107 | 6121 | |
|
6108 | if not (ms.active() or repo.dirstate.p2() != nullid): | |
|
6122 | if not (ms.active() or repo.dirstate.p2() != repo.nullid): | |
|
6109 | 6123 | raise error.StateError( |
|
6110 | 6124 | _(b'resolve command not applicable when not merging') |
|
6111 | 6125 | ) |
@@ -6223,8 +6237,21 b' def resolve(ui, repo, *pats, **opts):' | |||
|
6223 | 6237 | raise |
|
6224 | 6238 | |
|
6225 | 6239 | ms.commit() |
|
6226 | branchmerge = repo.dirstate.p2() != nullid | |
|
6227 | mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None) | |
|
6240 | branchmerge = repo.dirstate.p2() != repo.nullid | |
|
6241 | # resolve is not doing a parent change here, however, `record updates` | |
|
6242 | # will call some dirstate API that at intended for parent changes call. | |
|
6243 | # Ideally we would not need this and could implement a lighter version | |
|
6244 | # of the recordupdateslogic that will not have to deal with the part | |
|
6245 | # related to parent changes. However this would requires that: | |
|
6246 | # - we are sure we passed around enough information at update/merge | |
|
6247 | # time to no longer needs it at `hg resolve time` | |
|
6248 | # - we are sure we store that information well enough to be able to reuse it | |
|
6249 | # - we are the necessary logic to reuse it right. | |
|
6250 | # | |
|
6251 | # All this should eventually happens, but in the mean time, we use this | |
|
6252 | # context manager slightly out of the context it should be. | |
|
6253 | with repo.dirstate.parentchange(): | |
|
6254 | mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None) | |
|
6228 | 6255 | |
|
6229 | 6256 | if not didwork and pats: |
|
6230 | 6257 | hint = None |
@@ -6315,7 +6342,7 b' def revert(ui, repo, *pats, **opts):' | |||
|
6315 | 6342 | opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"]) |
|
6316 | 6343 | |
|
6317 | 6344 | parent, p2 = repo.dirstate.parents() |
|
6318 | if not opts.get(b'rev') and p2 != nullid: | |
|
6345 | if not opts.get(b'rev') and p2 != repo.nullid: | |
|
6319 | 6346 | # revert after merge is a trap for new users (issue2915) |
|
6320 | 6347 | raise error.InputError( |
|
6321 | 6348 | _(b'uncommitted merge with no revision specified'), |
@@ -6335,7 +6362,7 b' def revert(ui, repo, *pats, **opts):' | |||
|
6335 | 6362 | or opts.get(b'interactive') |
|
6336 | 6363 | ): |
|
6337 | 6364 | msg = _(b"no files or directories specified") |
|
6338 | if p2 != nullid: | |
|
6365 | if p2 != repo.nullid: | |
|
6339 | 6366 | hint = _( |
|
6340 | 6367 | b"uncommitted merge, use --all to discard all changes," |
|
6341 | 6368 | b" or 'hg update -C .' to abort the merge" |
@@ -7227,9 +7254,8 b' def summary(ui, repo, **opts):' | |||
|
7227 | 7254 | if revs: |
|
7228 | 7255 | revs = [other.lookup(rev) for rev in revs] |
|
7229 | 7256 | ui.debug(b'comparing with %s\n' % urlutil.hidepassword(source)) |
|
7230 |
repo.ui. |
|
|
7231 | commoninc = discovery.findcommonincoming(repo, other, heads=revs) | |
|
7232 | repo.ui.popbuffer() | |
|
7257 | with repo.ui.silent(): | |
|
7258 | commoninc = discovery.findcommonincoming(repo, other, heads=revs) | |
|
7233 | 7259 | return source, sbranch, other, commoninc, commoninc[1] |
|
7234 | 7260 | |
|
7235 | 7261 | if needsincoming: |
@@ -7273,11 +7299,10 b' def summary(ui, repo, **opts):' | |||
|
7273 | 7299 | common = commoninc |
|
7274 | 7300 | if revs: |
|
7275 | 7301 | revs = [repo.lookup(rev) for rev in revs] |
|
7276 |
repo.ui. |
|
|
7277 | outgoing = discovery.findcommonoutgoing( | |
|
7278 | repo, dother, onlyheads=revs, commoninc=common | |
|
7279 | ) | |
|
7280 | repo.ui.popbuffer() | |
|
7302 | with repo.ui.silent(): | |
|
7303 | outgoing = discovery.findcommonoutgoing( | |
|
7304 | repo, dother, onlyheads=revs, commoninc=common | |
|
7305 | ) | |
|
7281 | 7306 | return dest, dbranch, dother, outgoing |
|
7282 | 7307 | |
|
7283 | 7308 | if needsoutgoing: |
@@ -7396,7 +7421,7 b' def tag(ui, repo, name1, *names, **opts)' | |||
|
7396 | 7421 | for n in names: |
|
7397 | 7422 | if repo.tagtype(n) == b'global': |
|
7398 | 7423 | alltags = tagsmod.findglobaltags(ui, repo) |
|
7399 | if alltags[n][0] == nullid: | |
|
7424 | if alltags[n][0] == repo.nullid: | |
|
7400 | 7425 | raise error.InputError( |
|
7401 | 7426 | _(b"tag '%s' is already removed") % n |
|
7402 | 7427 | ) |
@@ -7423,7 +7448,7 b' def tag(ui, repo, name1, *names, **opts)' | |||
|
7423 | 7448 | ) |
|
7424 | 7449 | if not opts.get(b'local'): |
|
7425 | 7450 | p1, p2 = repo.dirstate.parents() |
|
7426 | if p2 != nullid: | |
|
7451 | if p2 != repo.nullid: | |
|
7427 | 7452 | raise error.StateError(_(b'uncommitted merge')) |
|
7428 | 7453 | bheads = repo.branchheads() |
|
7429 | 7454 | if not opts.get(b'force') and bheads and p1 not in bheads: |
@@ -429,7 +429,7 b' def setuplogging(ui, repo=None, fp=None)' | |||
|
429 | 429 | elif logpath == b'-': |
|
430 | 430 | logger = loggingutil.fileobjectlogger(ui.ferr, tracked) |
|
431 | 431 | else: |
|
432 |
logpath = |
|
|
432 | logpath = util.abspath(util.expandpath(logpath)) | |
|
433 | 433 | # developer config: cmdserver.max-log-files |
|
434 | 434 | maxfiles = ui.configint(b'cmdserver', b'max-log-files') |
|
435 | 435 | # developer config: cmdserver.max-log-size |
@@ -10,7 +10,6 b' import errno' | |||
|
10 | 10 | from .i18n import _ |
|
11 | 11 | from .node import ( |
|
12 | 12 | hex, |
|
13 | nullid, | |
|
14 | 13 | nullrev, |
|
15 | 14 | ) |
|
16 | 15 | |
@@ -277,10 +276,10 b' def _filecommit(' | |||
|
277 | 276 | """ |
|
278 | 277 | |
|
279 | 278 | fname = fctx.path() |
|
280 | fparent1 = manifest1.get(fname, nullid) | |
|
281 | fparent2 = manifest2.get(fname, nullid) | |
|
279 | fparent1 = manifest1.get(fname, repo.nullid) | |
|
280 | fparent2 = manifest2.get(fname, repo.nullid) | |
|
282 | 281 | touched = None |
|
283 | if fparent1 == fparent2 == nullid: | |
|
282 | if fparent1 == fparent2 == repo.nullid: | |
|
284 | 283 | touched = 'added' |
|
285 | 284 | |
|
286 | 285 | if isinstance(fctx, context.filectx): |
@@ -291,9 +290,11 b' def _filecommit(' | |||
|
291 | 290 | if node in [fparent1, fparent2]: |
|
292 | 291 | repo.ui.debug(b'reusing %s filelog entry\n' % fname) |
|
293 | 292 | if ( |
|
294 | fparent1 != nullid and manifest1.flags(fname) != fctx.flags() | |
|
293 | fparent1 != repo.nullid | |
|
294 | and manifest1.flags(fname) != fctx.flags() | |
|
295 | 295 | ) or ( |
|
296 | fparent2 != nullid and manifest2.flags(fname) != fctx.flags() | |
|
296 | fparent2 != repo.nullid | |
|
297 | and manifest2.flags(fname) != fctx.flags() | |
|
297 | 298 | ): |
|
298 | 299 | touched = 'modified' |
|
299 | 300 | return node, touched |
@@ -327,7 +328,9 b' def _filecommit(' | |||
|
327 | 328 | newfparent = fparent2 |
|
328 | 329 | |
|
329 | 330 | if manifest2: # branch merge |
|
330 | if fparent2 == nullid or cnode is None: # copied on remote side | |
|
331 | if ( | |
|
332 | fparent2 == repo.nullid or cnode is None | |
|
333 | ): # copied on remote side | |
|
331 | 334 | if cfname in manifest2: |
|
332 | 335 | cnode = manifest2[cfname] |
|
333 | 336 | newfparent = fparent1 |
@@ -346,7 +349,7 b' def _filecommit(' | |||
|
346 | 349 | if includecopymeta: |
|
347 | 350 | meta[b"copy"] = cfname |
|
348 | 351 | meta[b"copyrev"] = hex(cnode) |
|
349 | fparent1, fparent2 = nullid, newfparent | |
|
352 | fparent1, fparent2 = repo.nullid, newfparent | |
|
350 | 353 | else: |
|
351 | 354 | repo.ui.warn( |
|
352 | 355 | _( |
@@ -356,20 +359,20 b' def _filecommit(' | |||
|
356 | 359 | % (fname, cfname) |
|
357 | 360 | ) |
|
358 | 361 | |
|
359 | elif fparent1 == nullid: | |
|
360 | fparent1, fparent2 = fparent2, nullid | |
|
361 | elif fparent2 != nullid: | |
|
362 | elif fparent1 == repo.nullid: | |
|
363 | fparent1, fparent2 = fparent2, repo.nullid | |
|
364 | elif fparent2 != repo.nullid: | |
|
362 | 365 | if ms.active() and ms.extras(fname).get(b'filenode-source') == b'other': |
|
363 | fparent1, fparent2 = fparent2, nullid | |
|
366 | fparent1, fparent2 = fparent2, repo.nullid | |
|
364 | 367 | elif ms.active() and ms.extras(fname).get(b'merged') != b'yes': |
|
365 | fparent1, fparent2 = fparent1, nullid | |
|
368 | fparent1, fparent2 = fparent1, repo.nullid | |
|
366 | 369 | # is one parent an ancestor of the other? |
|
367 | 370 | else: |
|
368 | 371 | fparentancestors = flog.commonancestorsheads(fparent1, fparent2) |
|
369 | 372 | if fparent1 in fparentancestors: |
|
370 | fparent1, fparent2 = fparent2, nullid | |
|
373 | fparent1, fparent2 = fparent2, repo.nullid | |
|
371 | 374 | elif fparent2 in fparentancestors: |
|
372 | fparent2 = nullid | |
|
375 | fparent2 = repo.nullid | |
|
373 | 376 | |
|
374 | 377 | force_new_node = False |
|
375 | 378 | # The file might have been deleted by merge code and user explicitly choose |
@@ -384,9 +387,14 b' def _filecommit(' | |||
|
384 | 387 | force_new_node = True |
|
385 | 388 | # is the file changed? |
|
386 | 389 | text = fctx.data() |
|
387 | if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node: | |
|
390 | if ( | |
|
391 | fparent2 != repo.nullid | |
|
392 | or meta | |
|
393 | or flog.cmp(fparent1, text) | |
|
394 | or force_new_node | |
|
395 | ): | |
|
388 | 396 | if touched is None: # do not overwrite added |
|
389 | if fparent2 == nullid: | |
|
397 | if fparent2 == repo.nullid: | |
|
390 | 398 | touched = 'modified' |
|
391 | 399 | else: |
|
392 | 400 | touched = 'merged' |
@@ -258,93 +258,3 b' class config(object):' | |||
|
258 | 258 | self.parse( |
|
259 | 259 | path, fp.read(), sections=sections, remap=remap, include=include |
|
260 | 260 | ) |
|
261 | ||
|
262 | ||
|
263 | def parselist(value): | |
|
264 | """parse a configuration value as a list of comma/space separated strings | |
|
265 | ||
|
266 | >>> parselist(b'this,is "a small" ,test') | |
|
267 | ['this', 'is', 'a small', 'test'] | |
|
268 | """ | |
|
269 | ||
|
270 | def _parse_plain(parts, s, offset): | |
|
271 | whitespace = False | |
|
272 | while offset < len(s) and ( | |
|
273 | s[offset : offset + 1].isspace() or s[offset : offset + 1] == b',' | |
|
274 | ): | |
|
275 | whitespace = True | |
|
276 | offset += 1 | |
|
277 | if offset >= len(s): | |
|
278 | return None, parts, offset | |
|
279 | if whitespace: | |
|
280 | parts.append(b'') | |
|
281 | if s[offset : offset + 1] == b'"' and not parts[-1]: | |
|
282 | return _parse_quote, parts, offset + 1 | |
|
283 | elif s[offset : offset + 1] == b'"' and parts[-1][-1:] == b'\\': | |
|
284 | parts[-1] = parts[-1][:-1] + s[offset : offset + 1] | |
|
285 | return _parse_plain, parts, offset + 1 | |
|
286 | parts[-1] += s[offset : offset + 1] | |
|
287 | return _parse_plain, parts, offset + 1 | |
|
288 | ||
|
289 | def _parse_quote(parts, s, offset): | |
|
290 | if offset < len(s) and s[offset : offset + 1] == b'"': # "" | |
|
291 | parts.append(b'') | |
|
292 | offset += 1 | |
|
293 | while offset < len(s) and ( | |
|
294 | s[offset : offset + 1].isspace() | |
|
295 | or s[offset : offset + 1] == b',' | |
|
296 | ): | |
|
297 | offset += 1 | |
|
298 | return _parse_plain, parts, offset | |
|
299 | ||
|
300 | while offset < len(s) and s[offset : offset + 1] != b'"': | |
|
301 | if ( | |
|
302 | s[offset : offset + 1] == b'\\' | |
|
303 | and offset + 1 < len(s) | |
|
304 | and s[offset + 1 : offset + 2] == b'"' | |
|
305 | ): | |
|
306 | offset += 1 | |
|
307 | parts[-1] += b'"' | |
|
308 | else: | |
|
309 | parts[-1] += s[offset : offset + 1] | |
|
310 | offset += 1 | |
|
311 | ||
|
312 | if offset >= len(s): | |
|
313 | real_parts = _configlist(parts[-1]) | |
|
314 | if not real_parts: | |
|
315 | parts[-1] = b'"' | |
|
316 | else: | |
|
317 | real_parts[0] = b'"' + real_parts[0] | |
|
318 | parts = parts[:-1] | |
|
319 | parts.extend(real_parts) | |
|
320 | return None, parts, offset | |
|
321 | ||
|
322 | offset += 1 | |
|
323 | while offset < len(s) and s[offset : offset + 1] in [b' ', b',']: | |
|
324 | offset += 1 | |
|
325 | ||
|
326 | if offset < len(s): | |
|
327 | if offset + 1 == len(s) and s[offset : offset + 1] == b'"': | |
|
328 | parts[-1] += b'"' | |
|
329 | offset += 1 | |
|
330 | else: | |
|
331 | parts.append(b'') | |
|
332 | else: | |
|
333 | return None, parts, offset | |
|
334 | ||
|
335 | return _parse_plain, parts, offset | |
|
336 | ||
|
337 | def _configlist(s): | |
|
338 | s = s.rstrip(b' ,') | |
|
339 | if not s: | |
|
340 | return [] | |
|
341 | parser, parts, offset = _parse_plain, [b''], 0 | |
|
342 | while parser: | |
|
343 | parser, parts, offset = parser(parts, s, offset) | |
|
344 | return parts | |
|
345 | ||
|
346 | if value is not None and isinstance(value, bytes): | |
|
347 | result = _configlist(value.lstrip(b' ,\n')) | |
|
348 | else: | |
|
349 | result = value | |
|
350 | return result or [] |
@@ -904,6 +904,11 b' coreconfigitem(' | |||
|
904 | 904 | ) |
|
905 | 905 | coreconfigitem( |
|
906 | 906 | b'experimental', |
|
907 | b'changegroup4', | |
|
908 | default=False, | |
|
909 | ) | |
|
910 | coreconfigitem( | |
|
911 | b'experimental', | |
|
907 | 912 | b'cleanup-as-archived', |
|
908 | 913 | default=False, |
|
909 | 914 | ) |
@@ -954,6 +959,11 b' coreconfigitem(' | |||
|
954 | 959 | ) |
|
955 | 960 | coreconfigitem( |
|
956 | 961 | b'experimental', |
|
962 | b'dirstate-tree.in-memory', | |
|
963 | default=False, | |
|
964 | ) | |
|
965 | coreconfigitem( | |
|
966 | b'experimental', | |
|
957 | 967 | b'editortmpinhg', |
|
958 | 968 | default=False, |
|
959 | 969 | ) |
@@ -1138,6 +1148,27 b' coreconfigitem(' | |||
|
1138 | 1148 | b'revisions.prefixhexnode', |
|
1139 | 1149 | default=False, |
|
1140 | 1150 | ) |
|
1151 | # "out of experimental" todo list. | |
|
1152 | # | |
|
1153 | # * include management of a persistent nodemap in the main docket | |
|
1154 | # * enforce a "no-truncate" policy for mmap safety | |
|
1155 | # - for censoring operation | |
|
1156 | # - for stripping operation | |
|
1157 | # - for rollback operation | |
|
1158 | # * proper streaming (race free) of the docket file | |
|
1159 | # * track garbage data to evemtually allow rewriting -existing- sidedata. | |
|
1160 | # * Exchange-wise, we will also need to do something more efficient than | |
|
1161 | # keeping references to the affected revlogs, especially memory-wise when | |
|
1162 | # rewriting sidedata. | |
|
1163 | # * introduce a proper solution to reduce the number of filelog related files. | |
|
1164 | # * use caching for reading sidedata (similar to what we do for data). | |
|
1165 | # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation). | |
|
1166 | # * Improvement to consider | |
|
1167 | # - avoid compression header in chunk using the default compression? | |
|
1168 | # - forbid "inline" compression mode entirely? | |
|
1169 | # - split the data offset and flag field (the 2 bytes save are mostly trouble) | |
|
1170 | # - keep track of uncompressed -chunk- size (to preallocate memory better) | |
|
1171 | # - keep track of chain base or size (probably not that useful anymore) | |
|
1141 | 1172 | coreconfigitem( |
|
1142 | 1173 | b'experimental', |
|
1143 | 1174 | b'revlogv2', |
@@ -1272,6 +1303,14 b' coreconfigitem(' | |||
|
1272 | 1303 | experimental=True, |
|
1273 | 1304 | ) |
|
1274 | 1305 | coreconfigitem( |
|
1306 | # Enable this dirstate format *when creating a new repository*. | |
|
1307 | # Which format to use for existing repos is controlled by .hg/requires | |
|
1308 | b'format', | |
|
1309 | b'exp-dirstate-v2', | |
|
1310 | default=False, | |
|
1311 | experimental=True, | |
|
1312 | ) | |
|
1313 | coreconfigitem( | |
|
1275 | 1314 | b'format', |
|
1276 | 1315 | b'dotencode', |
|
1277 | 1316 | default=True, |
@@ -1310,6 +1349,20 b' coreconfigitem(' | |||
|
1310 | 1349 | default=lambda: [b'zstd', b'zlib'], |
|
1311 | 1350 | alias=[(b'experimental', b'format.compression')], |
|
1312 | 1351 | ) |
|
1352 | # Experimental TODOs: | |
|
1353 | # | |
|
1354 | # * Same as for evlogv2 (but for the reduction of the number of files) | |
|
1355 | # * Improvement to investigate | |
|
1356 | # - storing .hgtags fnode | |
|
1357 | # - storing `rank` of changesets | |
|
1358 | # - storing branch related identifier | |
|
1359 | ||
|
1360 | coreconfigitem( | |
|
1361 | b'format', | |
|
1362 | b'exp-use-changelog-v2', | |
|
1363 | default=None, | |
|
1364 | experimental=True, | |
|
1365 | ) | |
|
1313 | 1366 | coreconfigitem( |
|
1314 | 1367 | b'format', |
|
1315 | 1368 | b'usefncache', |
@@ -1342,20 +1395,6 b' coreconfigitem(' | |||
|
1342 | 1395 | b'use-persistent-nodemap', |
|
1343 | 1396 | default=_persistent_nodemap_default, |
|
1344 | 1397 | ) |
|
1345 | # TODO needs to grow a docket file to at least store the last offset of the data | |
|
1346 | # file when rewriting sidedata. | |
|
1347 | # Will also need a way of dealing with garbage data if we allow rewriting | |
|
1348 | # *existing* sidedata. | |
|
1349 | # Exchange-wise, we will also need to do something more efficient than keeping | |
|
1350 | # references to the affected revlogs, especially memory-wise when rewriting | |
|
1351 | # sidedata. | |
|
1352 | # Also... compress the sidedata? (this should be coming very soon) | |
|
1353 | coreconfigitem( | |
|
1354 | b'format', | |
|
1355 | b'exp-revlogv2.2', | |
|
1356 | default=False, | |
|
1357 | experimental=True, | |
|
1358 | ) | |
|
1359 | 1398 | coreconfigitem( |
|
1360 | 1399 | b'format', |
|
1361 | 1400 | b'exp-use-copies-side-data-changeset', |
@@ -1364,12 +1403,6 b' coreconfigitem(' | |||
|
1364 | 1403 | ) |
|
1365 | 1404 | coreconfigitem( |
|
1366 | 1405 | b'format', |
|
1367 | b'exp-use-side-data', | |
|
1368 | default=False, | |
|
1369 | experimental=True, | |
|
1370 | ) | |
|
1371 | coreconfigitem( | |
|
1372 | b'format', | |
|
1373 | 1406 | b'use-share-safe', |
|
1374 | 1407 | default=False, |
|
1375 | 1408 | ) |
@@ -14,14 +14,9 b' import stat' | |||
|
14 | 14 | |
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .node import ( |
|
17 | addednodeid, | |
|
18 | 17 | hex, |
|
19 | modifiednodeid, | |
|
20 | nullid, | |
|
21 | 18 | nullrev, |
|
22 | 19 | short, |
|
23 | wdirfilenodeids, | |
|
24 | wdirhex, | |
|
25 | 20 | ) |
|
26 | 21 | from .pycompat import ( |
|
27 | 22 | getattr, |
@@ -140,7 +135,7 b' class basectx(object):' | |||
|
140 | 135 | removed.append(fn) |
|
141 | 136 | elif flag1 != flag2: |
|
142 | 137 | modified.append(fn) |
|
143 | elif node2 not in wdirfilenodeids: | |
|
138 | elif node2 not in self._repo.nodeconstants.wdirfilenodeids: | |
|
144 | 139 | # When comparing files between two commits, we save time by |
|
145 | 140 | # not comparing the file contents when the nodeids differ. |
|
146 | 141 | # Note that this means we incorrectly report a reverted change |
@@ -737,7 +732,7 b' class changectx(basectx):' | |||
|
737 | 732 | n2 = c2._parents[0]._node |
|
738 | 733 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) |
|
739 | 734 | if not cahs: |
|
740 | anc = nullid | |
|
735 | anc = self._repo.nodeconstants.nullid | |
|
741 | 736 | elif len(cahs) == 1: |
|
742 | 737 | anc = cahs[0] |
|
743 | 738 | else: |
@@ -1132,7 +1127,11 b' class basefilectx(object):' | |||
|
1132 | 1127 | _path = self._path |
|
1133 | 1128 | fl = self._filelog |
|
1134 | 1129 | parents = self._filelog.parents(self._filenode) |
|
1135 | pl = [(_path, node, fl) for node in parents if node != nullid] | |
|
1130 | pl = [ | |
|
1131 | (_path, node, fl) | |
|
1132 | for node in parents | |
|
1133 | if node != self._repo.nodeconstants.nullid | |
|
1134 | ] | |
|
1136 | 1135 | |
|
1137 | 1136 | r = fl.renamed(self._filenode) |
|
1138 | 1137 | if r: |
@@ -1393,6 +1392,9 b' class committablectx(basectx):' | |||
|
1393 | 1392 | def __bytes__(self): |
|
1394 | 1393 | return bytes(self._parents[0]) + b"+" |
|
1395 | 1394 | |
|
1395 | def hex(self): | |
|
1396 | self._repo.nodeconstants.wdirhex | |
|
1397 | ||
|
1396 | 1398 | __str__ = encoding.strmethod(__bytes__) |
|
1397 | 1399 | |
|
1398 | 1400 | def __nonzero__(self): |
@@ -1556,12 +1558,12 b' class workingctx(committablectx):' | |||
|
1556 | 1558 | return self._repo.dirstate[key] not in b"?r" |
|
1557 | 1559 | |
|
1558 | 1560 | def hex(self): |
|
1559 | return wdirhex | |
|
1561 | return self._repo.nodeconstants.wdirhex | |
|
1560 | 1562 | |
|
1561 | 1563 | @propertycache |
|
1562 | 1564 | def _parents(self): |
|
1563 | 1565 | p = self._repo.dirstate.parents() |
|
1564 | if p[1] == nullid: | |
|
1566 | if p[1] == self._repo.nodeconstants.nullid: | |
|
1565 | 1567 | p = p[:-1] |
|
1566 | 1568 | # use unfiltered repo to delay/avoid loading obsmarkers |
|
1567 | 1569 | unfi = self._repo.unfiltered() |
@@ -1572,7 +1574,9 b' class workingctx(committablectx):' | |||
|
1572 | 1574 | for n in p |
|
1573 | 1575 | ] |
|
1574 | 1576 | |
|
1575 |
def setparents(self, p1node, p2node= |
|
|
1577 | def setparents(self, p1node, p2node=None): | |
|
1578 | if p2node is None: | |
|
1579 | p2node = self._repo.nodeconstants.nullid | |
|
1576 | 1580 | dirstate = self._repo.dirstate |
|
1577 | 1581 | with dirstate.parentchange(): |
|
1578 | 1582 | copies = dirstate.setparents(p1node, p2node) |
@@ -1584,7 +1588,7 b' class workingctx(committablectx):' | |||
|
1584 | 1588 | for f in copies: |
|
1585 | 1589 | if f not in pctx and copies[f] in pctx: |
|
1586 | 1590 | dirstate.copy(copies[f], f) |
|
1587 | if p2node == nullid: | |
|
1591 | if p2node == self._repo.nodeconstants.nullid: | |
|
1588 | 1592 | for f, s in sorted(dirstate.copies().items()): |
|
1589 | 1593 | if f not in pctx and s not in pctx: |
|
1590 | 1594 | dirstate.copy(None, f) |
@@ -1697,12 +1701,8 b' class workingctx(committablectx):' | |||
|
1697 | 1701 | % uipath(f) |
|
1698 | 1702 | ) |
|
1699 | 1703 | rejected.append(f) |
|
1700 |
elif ds |
|
|
1704 | elif not ds.set_tracked(f): | |
|
1701 | 1705 | ui.warn(_(b"%s already tracked!\n") % uipath(f)) |
|
1702 | elif ds[f] == b'r': | |
|
1703 | ds.normallookup(f) | |
|
1704 | else: | |
|
1705 | ds.add(f) | |
|
1706 | 1706 | return rejected |
|
1707 | 1707 | |
|
1708 | 1708 | def forget(self, files, prefix=b""): |
@@ -1711,13 +1711,9 b' class workingctx(committablectx):' | |||
|
1711 | 1711 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
|
1712 | 1712 | rejected = [] |
|
1713 | 1713 | for f in files: |
|
1714 |
if |
|
|
1714 | if not ds.set_untracked(f): | |
|
1715 | 1715 | self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f)) |
|
1716 | 1716 | rejected.append(f) |
|
1717 | elif ds[f] != b'a': | |
|
1718 | ds.remove(f) | |
|
1719 | else: | |
|
1720 | ds.drop(f) | |
|
1721 | 1717 | return rejected |
|
1722 | 1718 | |
|
1723 | 1719 | def copy(self, source, dest): |
@@ -1738,10 +1734,7 b' class workingctx(committablectx):' | |||
|
1738 | 1734 | else: |
|
1739 | 1735 | with self._repo.wlock(): |
|
1740 | 1736 | ds = self._repo.dirstate |
|
1741 |
|
|
|
1742 | ds.add(dest) | |
|
1743 | elif ds[dest] in b'r': | |
|
1744 | ds.normallookup(dest) | |
|
1737 | ds.set_tracked(dest) | |
|
1745 | 1738 | ds.copy(source, dest) |
|
1746 | 1739 | |
|
1747 | 1740 | def match( |
@@ -1836,7 +1829,7 b' class workingctx(committablectx):' | |||
|
1836 | 1829 | def _poststatusfixup(self, status, fixup): |
|
1837 | 1830 | """update dirstate for files that are actually clean""" |
|
1838 | 1831 | poststatus = self._repo.postdsstatus() |
|
1839 | if fixup or poststatus: | |
|
1832 | if fixup or poststatus or self._repo.dirstate._dirty: | |
|
1840 | 1833 | try: |
|
1841 | 1834 | oldid = self._repo.dirstate.identity() |
|
1842 | 1835 | |
@@ -1845,9 +1838,15 b' class workingctx(committablectx):' | |||
|
1845 | 1838 | # wlock can invalidate the dirstate, so cache normal _after_ |
|
1846 | 1839 | # taking the lock |
|
1847 | 1840 | with self._repo.wlock(False): |
|
1848 |
|
|
|
1841 | dirstate = self._repo.dirstate | |
|
1842 | if dirstate.identity() == oldid: | |
|
1849 | 1843 | if fixup: |
|
1850 |
|
|
|
1844 | if dirstate.pendingparentchange(): | |
|
1845 | normal = lambda f: dirstate.update_file( | |
|
1846 | f, p1_tracked=True, wc_tracked=True | |
|
1847 | ) | |
|
1848 | else: | |
|
1849 | normal = dirstate.set_clean | |
|
1851 | 1850 | for f in fixup: |
|
1852 | 1851 | normal(f) |
|
1853 | 1852 | # write changes out explicitly, because nesting |
@@ -1944,8 +1943,8 b' class workingctx(committablectx):' | |||
|
1944 | 1943 | |
|
1945 | 1944 | ff = self._flagfunc |
|
1946 | 1945 | for i, l in ( |
|
1947 | (addednodeid, status.added), | |
|
1948 | (modifiednodeid, status.modified), | |
|
1946 | (self._repo.nodeconstants.addednodeid, status.added), | |
|
1947 | (self._repo.nodeconstants.modifiednodeid, status.modified), | |
|
1949 | 1948 | ): |
|
1950 | 1949 | for f in l: |
|
1951 | 1950 | man[f] = i |
@@ -2023,19 +2022,23 b' class workingctx(committablectx):' | |||
|
2023 | 2022 | def markcommitted(self, node): |
|
2024 | 2023 | with self._repo.dirstate.parentchange(): |
|
2025 | 2024 | for f in self.modified() + self.added(): |
|
2026 |
self._repo.dirstate. |
|
|
2025 | self._repo.dirstate.update_file( | |
|
2026 | f, p1_tracked=True, wc_tracked=True | |
|
2027 | ) | |
|
2027 | 2028 | for f in self.removed(): |
|
2028 |
self._repo.dirstate. |
|
|
2029 | self._repo.dirstate.update_file( | |
|
2030 | f, p1_tracked=False, wc_tracked=False | |
|
2031 | ) | |
|
2029 | 2032 | self._repo.dirstate.setparents(node) |
|
2030 | 2033 | self._repo._quick_access_changeid_invalidate() |
|
2031 | 2034 | |
|
2035 | sparse.aftercommit(self._repo, node) | |
|
2036 | ||
|
2032 | 2037 | # write changes out explicitly, because nesting wlock at |
|
2033 | 2038 | # runtime may prevent 'wlock.release()' in 'repo.commit()' |
|
2034 | 2039 | # from immediately doing so for subsequent changing files |
|
2035 | 2040 | self._repo.dirstate.write(self._repo.currenttransaction()) |
|
2036 | 2041 | |
|
2037 | sparse.aftercommit(self._repo, node) | |
|
2038 | ||
|
2039 | 2042 | def mergestate(self, clean=False): |
|
2040 | 2043 | if clean: |
|
2041 | 2044 | return mergestatemod.mergestate.clean(self._repo) |
@@ -2070,13 +2073,18 b' class committablefilectx(basefilectx):' | |||
|
2070 | 2073 | path = self.copysource() |
|
2071 | 2074 | if not path: |
|
2072 | 2075 | return None |
|
2073 | return path, self._changectx._parents[0]._manifest.get(path, nullid) | |
|
2076 | return ( | |
|
2077 | path, | |
|
2078 | self._changectx._parents[0]._manifest.get( | |
|
2079 | path, self._repo.nodeconstants.nullid | |
|
2080 | ), | |
|
2081 | ) | |
|
2074 | 2082 | |
|
2075 | 2083 | def parents(self): |
|
2076 | 2084 | '''return parent filectxs, following copies if necessary''' |
|
2077 | 2085 | |
|
2078 | 2086 | def filenode(ctx, path): |
|
2079 | return ctx._manifest.get(path, nullid) | |
|
2087 | return ctx._manifest.get(path, self._repo.nodeconstants.nullid) | |
|
2080 | 2088 | |
|
2081 | 2089 | path = self._path |
|
2082 | 2090 | fl = self._filelog |
@@ -2094,7 +2102,7 b' class committablefilectx(basefilectx):' | |||
|
2094 | 2102 | return [ |
|
2095 | 2103 | self._parentfilectx(p, fileid=n, filelog=l) |
|
2096 | 2104 | for p, n, l in pl |
|
2097 | if n != nullid | |
|
2105 | if n != self._repo.nodeconstants.nullid | |
|
2098 | 2106 | ] |
|
2099 | 2107 | |
|
2100 | 2108 | def children(self): |
@@ -2222,7 +2230,9 b' class overlayworkingctx(committablectx):' | |||
|
2222 | 2230 | # ``overlayworkingctx`` (e.g. with --collapse). |
|
2223 | 2231 | util.clearcachedproperty(self, b'_manifest') |
|
2224 | 2232 | |
|
2225 |
def setparents(self, p1node, p2node= |
|
|
2233 | def setparents(self, p1node, p2node=None): | |
|
2234 | if p2node is None: | |
|
2235 | p2node = self._repo.nodeconstants.nullid | |
|
2226 | 2236 | assert p1node == self._wrappedctx.node() |
|
2227 | 2237 | self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]] |
|
2228 | 2238 | |
@@ -2248,10 +2258,10 b' class overlayworkingctx(committablectx):' | |||
|
2248 | 2258 | |
|
2249 | 2259 | flag = self._flagfunc |
|
2250 | 2260 | for path in self.added(): |
|
2251 | man[path] = addednodeid | |
|
2261 | man[path] = self._repo.nodeconstants.addednodeid | |
|
2252 | 2262 | man.setflag(path, flag(path)) |
|
2253 | 2263 | for path in self.modified(): |
|
2254 | man[path] = modifiednodeid | |
|
2264 | man[path] = self._repo.nodeconstants.modifiednodeid | |
|
2255 | 2265 | man.setflag(path, flag(path)) |
|
2256 | 2266 | for path in self.removed(): |
|
2257 | 2267 | del man[path] |
@@ -2827,7 +2837,7 b' class memctx(committablectx):' | |||
|
2827 | 2837 | ) |
|
2828 | 2838 | self._rev = None |
|
2829 | 2839 | self._node = None |
|
2830 | parents = [(p or nullid) for p in parents] | |
|
2840 | parents = [(p or self._repo.nodeconstants.nullid) for p in parents] | |
|
2831 | 2841 | p1, p2 = parents |
|
2832 | 2842 | self._parents = [self._repo[p] for p in (p1, p2)] |
|
2833 | 2843 | files = sorted(set(files)) |
@@ -2866,10 +2876,10 b' class memctx(committablectx):' | |||
|
2866 | 2876 | man = pctx.manifest().copy() |
|
2867 | 2877 | |
|
2868 | 2878 | for f in self._status.modified: |
|
2869 | man[f] = modifiednodeid | |
|
2879 | man[f] = self._repo.nodeconstants.modifiednodeid | |
|
2870 | 2880 | |
|
2871 | 2881 | for f in self._status.added: |
|
2872 | man[f] = addednodeid | |
|
2882 | man[f] = self._repo.nodeconstants.addednodeid | |
|
2873 | 2883 | |
|
2874 | 2884 | for f in self._status.removed: |
|
2875 | 2885 | if f in man: |
@@ -3006,12 +3016,12 b' class metadataonlyctx(committablectx):' | |||
|
3006 | 3016 | # sanity check to ensure that the reused manifest parents are |
|
3007 | 3017 | # manifests of our commit parents |
|
3008 | 3018 | mp1, mp2 = self.manifestctx().parents |
|
3009 | if p1 != nullid and p1.manifestnode() != mp1: | |
|
3019 | if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1: | |
|
3010 | 3020 | raise RuntimeError( |
|
3011 | 3021 | r"can't reuse the manifest: its p1 " |
|
3012 | 3022 | r"doesn't match the new ctx p1" |
|
3013 | 3023 | ) |
|
3014 | if p2 != nullid and p2.manifestnode() != mp2: | |
|
3024 | if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2: | |
|
3015 | 3025 | raise RuntimeError( |
|
3016 | 3026 | r"can't reuse the manifest: " |
|
3017 | 3027 | r"its p2 doesn't match the new ctx p2" |
@@ -12,10 +12,7 b' import collections' | |||
|
12 | 12 | import os |
|
13 | 13 | |
|
14 | 14 | from .i18n import _ |
|
15 |
from .node import |
|
|
16 | nullid, | |
|
17 | nullrev, | |
|
18 | ) | |
|
15 | from .node import nullrev | |
|
19 | 16 | |
|
20 | 17 | from . import ( |
|
21 | 18 | match as matchmod, |
@@ -321,15 +318,16 b' def _changesetforwardcopies(a, b, match)' | |||
|
321 | 318 | if p in children_count: |
|
322 | 319 | children_count[p] += 1 |
|
323 | 320 | revinfo = _revinfo_getter(repo, match) |
|
324 | return _combine_changeset_copies( | |
|
325 | revs, | |
|
326 |
|
|
|
327 | b.rev(), | |
|
328 |
|
|
|
329 |
|
|
|
330 |
|
|
|
331 | multi_thread, | |
|
332 | ) | |
|
321 | with repo.changelog.reading(): | |
|
322 | return _combine_changeset_copies( | |
|
323 | revs, | |
|
324 | children_count, | |
|
325 | b.rev(), | |
|
326 | revinfo, | |
|
327 | match, | |
|
328 | isancestor, | |
|
329 | multi_thread, | |
|
330 | ) | |
|
333 | 331 | else: |
|
334 | 332 | # When not using side-data, we will process the edges "from" the parent. |
|
335 | 333 | # so we need a full mapping of the parent -> children relation. |
@@ -579,7 +577,7 b' def _revinfo_getter_extra(repo):' | |||
|
579 | 577 | parents = fctx._filelog.parents(fctx._filenode) |
|
580 | 578 | nb_parents = 0 |
|
581 | 579 | for n in parents: |
|
582 | if n != nullid: | |
|
580 | if n != repo.nullid: | |
|
583 | 581 | nb_parents += 1 |
|
584 | 582 | return nb_parents >= 2 |
|
585 | 583 |
@@ -7,6 +7,7 b'' | |||
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | import binascii | |
|
10 | 11 | import codecs |
|
11 | 12 | import collections |
|
12 | 13 | import contextlib |
@@ -30,7 +31,6 b' from .i18n import _' | |||
|
30 | 31 | from .node import ( |
|
31 | 32 | bin, |
|
32 | 33 | hex, |
|
33 | nullid, | |
|
34 | 34 | nullrev, |
|
35 | 35 | short, |
|
36 | 36 | ) |
@@ -92,6 +92,7 b' from . import (' | |||
|
92 | 92 | wireprotoserver, |
|
93 | 93 | wireprotov2peer, |
|
94 | 94 | ) |
|
95 | from .interfaces import repository | |
|
95 | 96 | from .utils import ( |
|
96 | 97 | cborutil, |
|
97 | 98 | compression, |
@@ -794,7 +795,7 b' def debugdeltachain(ui, repo, file_=None' | |||
|
794 | 795 | index = r.index |
|
795 | 796 | start = r.start |
|
796 | 797 | length = r.length |
|
797 | generaldelta = r.version & revlog.FLAG_GENERALDELTA | |
|
798 | generaldelta = r._generaldelta | |
|
798 | 799 | withsparseread = getattr(r, '_withsparseread', False) |
|
799 | 800 | |
|
800 | 801 | def revinfo(rev): |
@@ -941,6 +942,12 b' def debugdeltachain(ui, repo, file_=None' | |||
|
941 | 942 | ), |
|
942 | 943 | (b'', b'dates', True, _(b'display the saved mtime')), |
|
943 | 944 | (b'', b'datesort', None, _(b'sort by saved mtime')), |
|
945 | ( | |
|
946 | b'', | |
|
947 | b'all', | |
|
948 | False, | |
|
949 | _(b'display dirstate-v2 tree nodes that would not exist in v1'), | |
|
950 | ), | |
|
944 | 951 | ], |
|
945 | 952 | _(b'[OPTION]...'), |
|
946 | 953 | ) |
@@ -953,29 +960,56 b' def debugstate(ui, repo, **opts):' | |||
|
953 | 960 | datesort = opts.get('datesort') |
|
954 | 961 | |
|
955 | 962 | if datesort: |
|
956 | keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename | |
|
963 | keyfunc = lambda x: ( | |
|
964 | x[1].v1_mtime(), | |
|
965 | x[0], | |
|
966 | ) # sort by mtime, then by filename | |
|
957 | 967 | else: |
|
958 | 968 | keyfunc = None # sort by filename |
|
959 | for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc): | |
|
960 | if ent[3] == -1: | |
|
969 | if opts['all']: | |
|
970 | entries = list(repo.dirstate._map.debug_iter()) | |
|
971 | else: | |
|
972 | entries = list(pycompat.iteritems(repo.dirstate)) | |
|
973 | entries.sort(key=keyfunc) | |
|
974 | for file_, ent in entries: | |
|
975 | if ent.v1_mtime() == -1: | |
|
961 | 976 | timestr = b'unset ' |
|
962 | 977 | elif nodates: |
|
963 | 978 | timestr = b'set ' |
|
964 | 979 | else: |
|
965 | 980 | timestr = time.strftime( |
|
966 |
"%Y-%m-%d %H:%M:%S ", time.localtime(ent |
|
|
981 | "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime()) | |
|
967 | 982 | ) |
|
968 | 983 | timestr = encoding.strtolocal(timestr) |
|
969 |
if ent |
|
|
984 | if ent.mode & 0o20000: | |
|
970 | 985 | mode = b'lnk' |
|
971 | 986 | else: |
|
972 |
mode = b'%3o' % (ent |
|
|
973 | ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) | |
|
987 | mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask) | |
|
988 | ui.write( | |
|
989 | b"%c %s %10d %s%s\n" | |
|
990 | % (ent.v1_state(), mode, ent.v1_size(), timestr, file_) | |
|
991 | ) | |
|
974 | 992 | for f in repo.dirstate.copies(): |
|
975 | 993 | ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) |
|
976 | 994 | |
|
977 | 995 | |
|
978 | 996 | @command( |
|
997 | b'debugdirstateignorepatternshash', | |
|
998 | [], | |
|
999 | _(b''), | |
|
1000 | ) | |
|
1001 | def debugdirstateignorepatternshash(ui, repo, **opts): | |
|
1002 | """show the hash of ignore patterns stored in dirstate if v2, | |
|
1003 | or nothing for dirstate-v2 | |
|
1004 | """ | |
|
1005 | if repo.dirstate._use_dirstate_v2: | |
|
1006 | docket = repo.dirstate._map.docket | |
|
1007 | hash_len = 20 # 160 bits for SHA-1 | |
|
1008 | hash_bytes = docket.tree_metadata[-hash_len:] | |
|
1009 | ui.write(binascii.hexlify(hash_bytes) + b'\n') | |
|
1010 | ||
|
1011 | ||
|
1012 | @command( | |
|
979 | 1013 | b'debugdiscovery', |
|
980 | 1014 | [ |
|
981 | 1015 | (b'', b'old', None, _(b'use old-style discovery')), |
@@ -1667,7 +1701,7 b' def debugindexdot(ui, repo, file_=None, ' | |||
|
1667 | 1701 | node = r.node(i) |
|
1668 | 1702 | pp = r.parents(node) |
|
1669 | 1703 | ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
1670 | if pp[1] != nullid: | |
|
1704 | if pp[1] != repo.nullid: | |
|
1671 | 1705 | ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
1672 | 1706 | ui.write(b"}\n") |
|
1673 | 1707 | |
@@ -1675,7 +1709,7 b' def debugindexdot(ui, repo, file_=None, ' | |||
|
1675 | 1709 | @command(b'debugindexstats', []) |
|
1676 | 1710 | def debugindexstats(ui, repo): |
|
1677 | 1711 | """show stats related to the changelog index""" |
|
1678 | repo.changelog.shortest(nullid, 1) | |
|
1712 | repo.changelog.shortest(repo.nullid, 1) | |
|
1679 | 1713 | index = repo.changelog.index |
|
1680 | 1714 | if not util.safehasattr(index, b'stats'): |
|
1681 | 1715 | raise error.Abort(_(b'debugindexstats only works with native code')) |
@@ -2425,7 +2459,7 b' def debugobsolete(ui, repo, precursor=No' | |||
|
2425 | 2459 | # arbitrary node identifiers, possibly not present in the |
|
2426 | 2460 | # local repository. |
|
2427 | 2461 | n = bin(s) |
|
2428 |
if len(n) != |
|
|
2462 | if len(n) != repo.nodeconstants.nodelen: | |
|
2429 | 2463 | raise TypeError() |
|
2430 | 2464 | return n |
|
2431 | 2465 | except TypeError: |
@@ -2603,7 +2637,7 b' def debugpathcomplete(ui, repo, *specs, ' | |||
|
2603 | 2637 | files, dirs = set(), set() |
|
2604 | 2638 | adddir, addfile = dirs.add, files.add |
|
2605 | 2639 | for f, st in pycompat.iteritems(dirstate): |
|
2606 |
if f.startswith(spec) and st |
|
|
2640 | if f.startswith(spec) and st.state in acceptable: | |
|
2607 | 2641 | if fixpaths: |
|
2608 | 2642 | f = f.replace(b'/', pycompat.ossep) |
|
2609 | 2643 | if fullpaths: |
@@ -2749,9 +2783,9 b' def debugpickmergetool(ui, repo, *pats, ' | |||
|
2749 | 2783 | changedelete = opts[b'changedelete'] |
|
2750 | 2784 | for path in ctx.walk(m): |
|
2751 | 2785 | fctx = ctx[path] |
|
2752 |
|
|
|
2753 |
|
|
|
2754 | ui.pushbuffer(error=True) | |
|
2786 | with ui.silent( | |
|
2787 | error=True | |
|
2788 | ) if not ui.debugflag else util.nullcontextmanager(): | |
|
2755 | 2789 | tool, toolpath = filemerge._picktool( |
|
2756 | 2790 | repo, |
|
2757 | 2791 | ui, |
@@ -2760,9 +2794,6 b' def debugpickmergetool(ui, repo, *pats, ' | |||
|
2760 | 2794 | b'l' in fctx.flags(), |
|
2761 | 2795 | changedelete, |
|
2762 | 2796 | ) |
|
2763 | finally: | |
|
2764 | if not ui.debugflag: | |
|
2765 | ui.popbuffer() | |
|
2766 | 2797 | ui.write(b'%s = %s\n' % (path, tool)) |
|
2767 | 2798 | |
|
2768 | 2799 | |
@@ -2973,8 +3004,8 b' def debugrevlog(ui, repo, file_=None, **' | |||
|
2973 | 3004 | ) |
|
2974 | 3005 | return 0 |
|
2975 | 3006 | |
|
2976 |
|
|
|
2977 | format = v & 0xFFFF | |
|
3007 | format = r._format_version | |
|
3008 | v = r._format_flags | |
|
2978 | 3009 | flags = [] |
|
2979 | 3010 | gdelta = False |
|
2980 | 3011 | if v & revlog.FLAG_INLINE_DATA: |
@@ -3328,7 +3359,7 b' def debugrevlogindex(ui, repo, file_=Non' | |||
|
3328 | 3359 | try: |
|
3329 | 3360 | pp = r.parents(node) |
|
3330 | 3361 | except Exception: |
|
3331 | pp = [nullid, nullid] | |
|
3362 | pp = [repo.nullid, repo.nullid] | |
|
3332 | 3363 | if ui.verbose: |
|
3333 | 3364 | ui.write( |
|
3334 | 3365 | b"% 6d % 9d % 7d % 7d %s %s %s\n" |
@@ -3742,7 +3773,9 b' def debugbackupbundle(ui, repo, *pats, *' | |||
|
3742 | 3773 | for n in chlist: |
|
3743 | 3774 | if limit is not None and count >= limit: |
|
3744 | 3775 | break |
|
3745 | parents = [True for p in other.changelog.parents(n) if p != nullid] | |
|
3776 | parents = [ | |
|
3777 | True for p in other.changelog.parents(n) if p != repo.nullid | |
|
3778 | ] | |
|
3746 | 3779 | if opts.get(b"no_merges") and len(parents) == 2: |
|
3747 | 3780 | continue |
|
3748 | 3781 | count += 1 |
@@ -3787,16 +3820,13 b' def debugbackupbundle(ui, repo, *pats, *' | |||
|
3787 | 3820 | if revs: |
|
3788 | 3821 | revs = [other.lookup(rev) for rev in revs] |
|
3789 | 3822 | |
|
3790 |
|
|
|
3791 | try: | |
|
3792 | ui.quiet = True | |
|
3793 | other, chlist, cleanupfn = bundlerepo.getremotechanges( | |
|
3794 | ui, repo, other, revs, opts[b"bundle"], opts[b"force"] | |
|
3795 | ) | |
|
3796 | except error.LookupError: | |
|
3797 | continue | |
|
3798 | finally: | |
|
3799 | ui.quiet = quiet | |
|
3823 | with ui.silent(): | |
|
3824 | try: | |
|
3825 | other, chlist, cleanupfn = bundlerepo.getremotechanges( | |
|
3826 | ui, repo, other, revs, opts[b"bundle"], opts[b"force"] | |
|
3827 | ) | |
|
3828 | except error.LookupError: | |
|
3829 | continue | |
|
3800 | 3830 | |
|
3801 | 3831 | try: |
|
3802 | 3832 | if not chlist: |
@@ -4046,7 +4076,7 b" def debuguiprompt(ui, prompt=b''):" | |||
|
4046 | 4076 | def debugupdatecaches(ui, repo, *pats, **opts): |
|
4047 | 4077 | """warm all known caches in the repository""" |
|
4048 | 4078 | with repo.wlock(), repo.lock(): |
|
4049 |
repo.updatecaches( |
|
|
4079 | repo.updatecaches(caches=repository.CACHES_ALL) | |
|
4050 | 4080 | |
|
4051 | 4081 | |
|
4052 | 4082 | @command( |
@@ -4573,17 +4603,16 b' def debugwireproto(ui, repo, path=None, ' | |||
|
4573 | 4603 | ui.write(_(b'creating http peer for wire protocol version 2\n')) |
|
4574 | 4604 | # We go through makepeer() because we need an API descriptor for |
|
4575 | 4605 | # the peer instance to be useful. |
|
4576 | with ui.configoverride( | |
|
4606 | maybe_silent = ( | |
|
4607 | ui.silent() | |
|
4608 | if opts[b'nologhandshake'] | |
|
4609 | else util.nullcontextmanager() | |
|
4610 | ) | |
|
4611 | with maybe_silent, ui.configoverride( | |
|
4577 | 4612 | {(b'experimental', b'httppeer.advertise-v2'): True} |
|
4578 | 4613 | ): |
|
4579 | if opts[b'nologhandshake']: | |
|
4580 | ui.pushbuffer() | |
|
4581 | ||
|
4582 | 4614 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
4583 | 4615 | |
|
4584 | if opts[b'nologhandshake']: | |
|
4585 | ui.popbuffer() | |
|
4586 | ||
|
4587 | 4616 | if not isinstance(peer, httppeer.httpv2peer): |
|
4588 | 4617 | raise error.Abort( |
|
4589 | 4618 | _( |
This diff has been collapsed as it changes many lines, (1127 lines changed) Show them Hide them | |||
@@ -14,12 +14,12 b' import os' | |||
|
14 | 14 | import stat |
|
15 | 15 | |
|
16 | 16 | from .i18n import _ |
|
17 | from .node import nullid | |
|
18 | 17 | from .pycompat import delattr |
|
19 | 18 | |
|
20 | 19 | from hgdemandimport import tracing |
|
21 | 20 | |
|
22 | 21 | from . import ( |
|
22 | dirstatemap, | |
|
23 | 23 | encoding, |
|
24 | 24 | error, |
|
25 | 25 | match as matchmod, |
@@ -28,7 +28,6 b' from . import (' | |||
|
28 | 28 | pycompat, |
|
29 | 29 | scmutil, |
|
30 | 30 | sparse, |
|
31 | txnutil, | |
|
32 | 31 | util, |
|
33 | 32 | ) |
|
34 | 33 | |
@@ -40,11 +39,13 b' from .interfaces import (' | |||
|
40 | 39 | parsers = policy.importmod('parsers') |
|
41 | 40 | rustmod = policy.importrust('dirstate') |
|
42 | 41 | |
|
42 | SUPPORTS_DIRSTATE_V2 = rustmod is not None | |
|
43 | ||
|
43 | 44 | propertycache = util.propertycache |
|
44 | 45 | filecache = scmutil.filecache |
|
45 | _rangemask = 0x7FFFFFFF | |
|
46 | _rangemask = dirstatemap.rangemask | |
|
46 | 47 | |
|
47 |
|
|
|
48 | DirstateItem = parsers.DirstateItem | |
|
48 | 49 | |
|
49 | 50 | |
|
50 | 51 | class repocache(filecache): |
@@ -71,10 +72,39 b' def _getfsnow(vfs):' | |||
|
71 | 72 | vfs.unlink(tmpname) |
|
72 | 73 | |
|
73 | 74 | |
|
75 | def requires_parents_change(func): | |
|
76 | def wrap(self, *args, **kwargs): | |
|
77 | if not self.pendingparentchange(): | |
|
78 | msg = 'calling `%s` outside of a parentchange context' | |
|
79 | msg %= func.__name__ | |
|
80 | raise error.ProgrammingError(msg) | |
|
81 | return func(self, *args, **kwargs) | |
|
82 | ||
|
83 | return wrap | |
|
84 | ||
|
85 | ||
|
86 | def requires_no_parents_change(func): | |
|
87 | def wrap(self, *args, **kwargs): | |
|
88 | if self.pendingparentchange(): | |
|
89 | msg = 'calling `%s` inside of a parentchange context' | |
|
90 | msg %= func.__name__ | |
|
91 | raise error.ProgrammingError(msg) | |
|
92 | return func(self, *args, **kwargs) | |
|
93 | ||
|
94 | return wrap | |
|
95 | ||
|
96 | ||
|
74 | 97 | @interfaceutil.implementer(intdirstate.idirstate) |
|
75 | 98 | class dirstate(object): |
|
76 | 99 | def __init__( |
|
77 | self, opener, ui, root, validate, sparsematchfn, nodeconstants | |
|
100 | self, | |
|
101 | opener, | |
|
102 | ui, | |
|
103 | root, | |
|
104 | validate, | |
|
105 | sparsematchfn, | |
|
106 | nodeconstants, | |
|
107 | use_dirstate_v2, | |
|
78 | 108 | ): |
|
79 | 109 | """Create a new dirstate object. |
|
80 | 110 | |
@@ -82,6 +112,7 b' class dirstate(object):' | |||
|
82 | 112 | dirstate file; root is the root of the directory tracked by |
|
83 | 113 | the dirstate. |
|
84 | 114 | """ |
|
115 | self._use_dirstate_v2 = use_dirstate_v2 | |
|
85 | 116 | self._nodeconstants = nodeconstants |
|
86 | 117 | self._opener = opener |
|
87 | 118 | self._validate = validate |
@@ -100,7 +131,7 b' class dirstate(object):' | |||
|
100 | 131 | self._plchangecallbacks = {} |
|
101 | 132 | self._origpl = None |
|
102 | 133 | self._updatedfiles = set() |
|
103 | self._mapcls = dirstatemap | |
|
134 | self._mapcls = dirstatemap.dirstatemap | |
|
104 | 135 | # Access and cache cwd early, so we don't access it for the first time |
|
105 | 136 | # after a working-copy update caused it to not exist (accessing it then |
|
106 | 137 | # raises an exception). |
@@ -140,7 +171,11 b' class dirstate(object):' | |||
|
140 | 171 | def _map(self): |
|
141 | 172 | """Return the dirstate contents (see documentation for dirstatemap).""" |
|
142 | 173 | self._map = self._mapcls( |
|
143 | self._ui, self._opener, self._root, self._nodeconstants | |
|
174 | self._ui, | |
|
175 | self._opener, | |
|
176 | self._root, | |
|
177 | self._nodeconstants, | |
|
178 | self._use_dirstate_v2, | |
|
144 | 179 | ) |
|
145 | 180 | return self._map |
|
146 | 181 | |
@@ -288,8 +323,15 b' class dirstate(object):' | |||
|
288 | 323 | r marked for removal |
|
289 | 324 | a marked for addition |
|
290 | 325 | ? not tracked |
|
326 | ||
|
327 | XXX The "state" is a bit obscure to be in the "public" API. we should | |
|
328 | consider migrating all user of this to going through the dirstate entry | |
|
329 | instead. | |
|
291 | 330 | """ |
|
292 |
|
|
|
331 | entry = self._map.get(key) | |
|
332 | if entry is not None: | |
|
333 | return entry.state | |
|
334 | return b'?' | |
|
293 | 335 | |
|
294 | 336 | def __contains__(self, key): |
|
295 | 337 | return key in self._map |
@@ -302,6 +344,9 b' class dirstate(object):' | |||
|
302 | 344 | |
|
303 | 345 | iteritems = items |
|
304 | 346 | |
|
347 | def directories(self): | |
|
348 | return self._map.directories() | |
|
349 | ||
|
305 | 350 | def parents(self): |
|
306 | 351 | return [self._validate(p) for p in self._pl] |
|
307 | 352 | |
@@ -311,18 +356,25 b' class dirstate(object):' | |||
|
311 | 356 | def p2(self): |
|
312 | 357 | return self._validate(self._pl[1]) |
|
313 | 358 | |
|
359 | @property | |
|
360 | def in_merge(self): | |
|
361 | """True if a merge is in progress""" | |
|
362 | return self._pl[1] != self._nodeconstants.nullid | |
|
363 | ||
|
314 | 364 | def branch(self): |
|
315 | 365 | return encoding.tolocal(self._branch) |
|
316 | 366 | |
|
317 |
def setparents(self, p1, p2= |
|
|
367 | def setparents(self, p1, p2=None): | |
|
318 | 368 | """Set dirstate parents to p1 and p2. |
|
319 | 369 | |
|
320 |
When moving from two parents to one, |
|
|
370 | When moving from two parents to one, "merged" entries a | |
|
321 | 371 | adjusted to normal and previous copy records discarded and |
|
322 | 372 | returned by the call. |
|
323 | 373 | |
|
324 | 374 | See localrepo.setparents() |
|
325 | 375 | """ |
|
376 | if p2 is None: | |
|
377 | p2 = self._nodeconstants.nullid | |
|
326 | 378 | if self._parentwriters == 0: |
|
327 | 379 | raise ValueError( |
|
328 | 380 | b"cannot set dirstate parent outside of " |
@@ -335,27 +387,29 b' class dirstate(object):' | |||
|
335 | 387 | self._origpl = self._pl |
|
336 | 388 | self._map.setparents(p1, p2) |
|
337 | 389 | copies = {} |
|
338 | if oldp2 != nullid and p2 == nullid: | |
|
339 | candidatefiles = self._map.nonnormalset.union( | |
|
340 | self._map.otherparentset | |
|
341 |
|
|
|
390 | if ( | |
|
391 | oldp2 != self._nodeconstants.nullid | |
|
392 | and p2 == self._nodeconstants.nullid | |
|
393 | ): | |
|
394 | candidatefiles = self._map.non_normal_or_other_parent_paths() | |
|
395 | ||
|
342 | 396 | for f in candidatefiles: |
|
343 | 397 | s = self._map.get(f) |
|
344 | 398 | if s is None: |
|
345 | 399 | continue |
|
346 | 400 | |
|
347 |
# Discard |
|
|
348 |
if s |
|
|
401 | # Discard "merged" markers when moving away from a merge state | |
|
402 | if s.merged: | |
|
349 | 403 | source = self._map.copymap.get(f) |
|
350 | 404 | if source: |
|
351 | 405 | copies[f] = source |
|
352 | self.normallookup(f) | |
|
406 | self._normallookup(f) | |
|
353 | 407 | # Also fix up otherparent markers |
|
354 |
elif s |
|
|
408 | elif s.from_p2: | |
|
355 | 409 | source = self._map.copymap.get(f) |
|
356 | 410 | if source: |
|
357 | 411 | copies[f] = source |
|
358 | self.add(f) | |
|
412 | self._add(f) | |
|
359 | 413 | return copies |
|
360 | 414 | |
|
361 | 415 | def setbranch(self, branch): |
@@ -408,27 +462,246 b' class dirstate(object):' | |||
|
408 | 462 | def copies(self): |
|
409 | 463 | return self._map.copymap |
|
410 | 464 | |
|
411 | def _addpath(self, f, state, mode, size, mtime): | |
|
412 | oldstate = self[f] | |
|
413 | if state == b'a' or oldstate == b'r': | |
|
465 | @requires_no_parents_change | |
|
466 | def set_tracked(self, filename): | |
|
467 | """a "public" method for generic code to mark a file as tracked | |
|
468 | ||
|
469 | This function is to be called outside of "update/merge" case. For | |
|
470 | example by a command like `hg add X`. | |
|
471 | ||
|
472 | return True the file was previously untracked, False otherwise. | |
|
473 | """ | |
|
474 | entry = self._map.get(filename) | |
|
475 | if entry is None: | |
|
476 | self._add(filename) | |
|
477 | return True | |
|
478 | elif not entry.tracked: | |
|
479 | self._normallookup(filename) | |
|
480 | return True | |
|
481 | # XXX This is probably overkill for more case, but we need this to | |
|
482 | # fully replace the `normallookup` call with `set_tracked` one. | |
|
483 | # Consider smoothing this in the future. | |
|
484 | self.set_possibly_dirty(filename) | |
|
485 | return False | |
|
486 | ||
|
487 | @requires_no_parents_change | |
|
488 | def set_untracked(self, filename): | |
|
489 | """a "public" method for generic code to mark a file as untracked | |
|
490 | ||
|
491 | This function is to be called outside of "update/merge" case. For | |
|
492 | example by a command like `hg remove X`. | |
|
493 | ||
|
494 | return True the file was previously tracked, False otherwise. | |
|
495 | """ | |
|
496 | entry = self._map.get(filename) | |
|
497 | if entry is None: | |
|
498 | return False | |
|
499 | elif entry.added: | |
|
500 | self._drop(filename) | |
|
501 | return True | |
|
502 | else: | |
|
503 | self._remove(filename) | |
|
504 | return True | |
|
505 | ||
|
506 | @requires_no_parents_change | |
|
507 | def set_clean(self, filename, parentfiledata=None): | |
|
508 | """record that the current state of the file on disk is known to be clean""" | |
|
509 | self._dirty = True | |
|
510 | self._updatedfiles.add(filename) | |
|
511 | self._normal(filename, parentfiledata=parentfiledata) | |
|
512 | ||
|
513 | @requires_no_parents_change | |
|
514 | def set_possibly_dirty(self, filename): | |
|
515 | """record that the current state of the file on disk is unknown""" | |
|
516 | self._dirty = True | |
|
517 | self._updatedfiles.add(filename) | |
|
518 | self._map.set_possibly_dirty(filename) | |
|
519 | ||
|
520 | @requires_parents_change | |
|
521 | def update_file_p1( | |
|
522 | self, | |
|
523 | filename, | |
|
524 | p1_tracked, | |
|
525 | ): | |
|
526 | """Set a file as tracked in the parent (or not) | |
|
527 | ||
|
528 | This is to be called when adjust the dirstate to a new parent after an history | |
|
529 | rewriting operation. | |
|
530 | ||
|
531 | It should not be called during a merge (p2 != nullid) and only within | |
|
532 | a `with dirstate.parentchange():` context. | |
|
533 | """ | |
|
534 | if self.in_merge: | |
|
535 | msg = b'update_file_reference should not be called when merging' | |
|
536 | raise error.ProgrammingError(msg) | |
|
537 | entry = self._map.get(filename) | |
|
538 | if entry is None: | |
|
539 | wc_tracked = False | |
|
540 | else: | |
|
541 | wc_tracked = entry.tracked | |
|
542 | possibly_dirty = False | |
|
543 | if p1_tracked and wc_tracked: | |
|
544 | # the underlying reference might have changed, we will have to | |
|
545 | # check it. | |
|
546 | possibly_dirty = True | |
|
547 | elif not (p1_tracked or wc_tracked): | |
|
548 | # the file is no longer relevant to anyone | |
|
549 | self._drop(filename) | |
|
550 | elif (not p1_tracked) and wc_tracked: | |
|
551 | if entry is not None and entry.added: | |
|
552 | return # avoid dropping copy information (maybe?) | |
|
553 | elif p1_tracked and not wc_tracked: | |
|
554 | pass | |
|
555 | else: | |
|
556 | assert False, 'unreachable' | |
|
557 | ||
|
558 | # this mean we are doing call for file we do not really care about the | |
|
559 | # data (eg: added or removed), however this should be a minor overhead | |
|
560 | # compared to the overall update process calling this. | |
|
561 | parentfiledata = None | |
|
562 | if wc_tracked: | |
|
563 | parentfiledata = self._get_filedata(filename) | |
|
564 | ||
|
565 | self._updatedfiles.add(filename) | |
|
566 | self._map.reset_state( | |
|
567 | filename, | |
|
568 | wc_tracked, | |
|
569 | p1_tracked, | |
|
570 | possibly_dirty=possibly_dirty, | |
|
571 | parentfiledata=parentfiledata, | |
|
572 | ) | |
|
573 | if ( | |
|
574 | parentfiledata is not None | |
|
575 | and parentfiledata[2] > self._lastnormaltime | |
|
576 | ): | |
|
577 | # Remember the most recent modification timeslot for status(), | |
|
578 | # to make sure we won't miss future size-preserving file content | |
|
579 | # modifications that happen within the same timeslot. | |
|
580 | self._lastnormaltime = parentfiledata[2] | |
|
581 | ||
|
582 | @requires_parents_change | |
|
583 | def update_file( | |
|
584 | self, | |
|
585 | filename, | |
|
586 | wc_tracked, | |
|
587 | p1_tracked, | |
|
588 | p2_tracked=False, | |
|
589 | merged=False, | |
|
590 | clean_p1=False, | |
|
591 | clean_p2=False, | |
|
592 | possibly_dirty=False, | |
|
593 | parentfiledata=None, | |
|
594 | ): | |
|
595 | """update the information about a file in the dirstate | |
|
596 | ||
|
597 | This is to be called when the direstates parent changes to keep track | |
|
598 | of what is the file situation in regards to the working copy and its parent. | |
|
599 | ||
|
600 | This function must be called within a `dirstate.parentchange` context. | |
|
601 | ||
|
602 | note: the API is at an early stage and we might need to ajust it | |
|
603 | depending of what information ends up being relevant and useful to | |
|
604 | other processing. | |
|
605 | """ | |
|
606 | if merged and (clean_p1 or clean_p2): | |
|
607 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' | |
|
608 | raise error.ProgrammingError(msg) | |
|
609 | ||
|
610 | # note: I do not think we need to double check name clash here since we | |
|
611 | # are in a update/merge case that should already have taken care of | |
|
612 | # this. The test agrees | |
|
613 | ||
|
614 | self._dirty = True | |
|
615 | self._updatedfiles.add(filename) | |
|
616 | ||
|
617 | need_parent_file_data = ( | |
|
618 | not (possibly_dirty or clean_p2 or merged) | |
|
619 | and wc_tracked | |
|
620 | and p1_tracked | |
|
621 | ) | |
|
622 | ||
|
623 | # this mean we are doing call for file we do not really care about the | |
|
624 | # data (eg: added or removed), however this should be a minor overhead | |
|
625 | # compared to the overall update process calling this. | |
|
626 | if need_parent_file_data: | |
|
627 | if parentfiledata is None: | |
|
628 | parentfiledata = self._get_filedata(filename) | |
|
629 | mtime = parentfiledata[2] | |
|
630 | ||
|
631 | if mtime > self._lastnormaltime: | |
|
632 | # Remember the most recent modification timeslot for | |
|
633 | # status(), to make sure we won't miss future | |
|
634 | # size-preserving file content modifications that happen | |
|
635 | # within the same timeslot. | |
|
636 | self._lastnormaltime = mtime | |
|
637 | ||
|
638 | self._map.reset_state( | |
|
639 | filename, | |
|
640 | wc_tracked, | |
|
641 | p1_tracked, | |
|
642 | p2_tracked=p2_tracked, | |
|
643 | merged=merged, | |
|
644 | clean_p1=clean_p1, | |
|
645 | clean_p2=clean_p2, | |
|
646 | possibly_dirty=possibly_dirty, | |
|
647 | parentfiledata=parentfiledata, | |
|
648 | ) | |
|
649 | if ( | |
|
650 | parentfiledata is not None | |
|
651 | and parentfiledata[2] > self._lastnormaltime | |
|
652 | ): | |
|
653 | # Remember the most recent modification timeslot for status(), | |
|
654 | # to make sure we won't miss future size-preserving file content | |
|
655 | # modifications that happen within the same timeslot. | |
|
656 | self._lastnormaltime = parentfiledata[2] | |
|
657 | ||
|
658 | def _addpath( | |
|
659 | self, | |
|
660 | f, | |
|
661 | mode=0, | |
|
662 | size=None, | |
|
663 | mtime=None, | |
|
664 | added=False, | |
|
665 | merged=False, | |
|
666 | from_p2=False, | |
|
667 | possibly_dirty=False, | |
|
668 | ): | |
|
669 | entry = self._map.get(f) | |
|
670 | if added or entry is not None and entry.removed: | |
|
414 | 671 | scmutil.checkfilename(f) |
|
415 | 672 | if self._map.hastrackeddir(f): |
|
416 | raise error.Abort( | |
|
417 |
|
|
|
418 | ) | |
|
673 | msg = _(b'directory %r already in dirstate') | |
|
674 | msg %= pycompat.bytestr(f) | |
|
675 | raise error.Abort(msg) | |
|
419 | 676 | # shadows |
|
420 | 677 | for d in pathutil.finddirs(f): |
|
421 | 678 | if self._map.hastrackeddir(d): |
|
422 | 679 | break |
|
423 | 680 | entry = self._map.get(d) |
|
424 |
if entry is not None and entry |
|
|
425 | raise error.Abort( | |
|
426 | _(b'file %r in dirstate clashes with %r') | |
|
427 | % (pycompat.bytestr(d), pycompat.bytestr(f)) | |
|
428 | ) | |
|
681 | if entry is not None and not entry.removed: | |
|
682 | msg = _(b'file %r in dirstate clashes with %r') | |
|
683 | msg %= (pycompat.bytestr(d), pycompat.bytestr(f)) | |
|
684 | raise error.Abort(msg) | |
|
429 | 685 | self._dirty = True |
|
430 | 686 | self._updatedfiles.add(f) |
|
431 | self._map.addfile(f, oldstate, state, mode, size, mtime) | |
|
687 | self._map.addfile( | |
|
688 | f, | |
|
689 | mode=mode, | |
|
690 | size=size, | |
|
691 | mtime=mtime, | |
|
692 | added=added, | |
|
693 | merged=merged, | |
|
694 | from_p2=from_p2, | |
|
695 | possibly_dirty=possibly_dirty, | |
|
696 | ) | |
|
697 | ||
|
698 | def _get_filedata(self, filename): | |
|
699 | """returns""" | |
|
700 | s = os.lstat(self._join(filename)) | |
|
701 | mode = s.st_mode | |
|
702 | size = s.st_size | |
|
703 | mtime = s[stat.ST_MTIME] | |
|
704 | return (mode, size, mtime) | |
|
432 | 705 | |
|
433 | 706 | def normal(self, f, parentfiledata=None): |
|
434 | 707 | """Mark a file normal and clean. |
@@ -440,14 +713,28 b' class dirstate(object):' | |||
|
440 | 713 | determined the file was clean, to limit the risk of the |
|
441 | 714 | file having been changed by an external process between the |
|
442 | 715 | moment where the file was determined to be clean and now.""" |
|
716 | if self.pendingparentchange(): | |
|
717 | util.nouideprecwarn( | |
|
718 | b"do not use `normal` inside of update/merge context." | |
|
719 | b" Use `update_file` or `update_file_p1`", | |
|
720 | b'6.0', | |
|
721 | stacklevel=2, | |
|
722 | ) | |
|
723 | else: | |
|
724 | util.nouideprecwarn( | |
|
725 | b"do not use `normal` outside of update/merge context." | |
|
726 | b" Use `set_tracked`", | |
|
727 | b'6.0', | |
|
728 | stacklevel=2, | |
|
729 | ) | |
|
730 | self._normal(f, parentfiledata=parentfiledata) | |
|
731 | ||
|
732 | def _normal(self, f, parentfiledata=None): | |
|
443 | 733 | if parentfiledata: |
|
444 | 734 | (mode, size, mtime) = parentfiledata |
|
445 | 735 | else: |
|
446 | s = os.lstat(self._join(f)) | |
|
447 | mode = s.st_mode | |
|
448 | size = s.st_size | |
|
449 | mtime = s[stat.ST_MTIME] | |
|
450 | self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask) | |
|
736 | (mode, size, mtime) = self._get_filedata(f) | |
|
737 | self._addpath(f, mode=mode, size=size, mtime=mtime) | |
|
451 | 738 | self._map.copymap.pop(f, None) |
|
452 | 739 | if f in self._map.nonnormalset: |
|
453 | 740 | self._map.nonnormalset.remove(f) |
@@ -459,77 +746,171 b' class dirstate(object):' | |||
|
459 | 746 | |
|
460 | 747 | def normallookup(self, f): |
|
461 | 748 | '''Mark a file normal, but possibly dirty.''' |
|
462 | if self._pl[1] != nullid: | |
|
749 | if self.pendingparentchange(): | |
|
750 | util.nouideprecwarn( | |
|
751 | b"do not use `normallookup` inside of update/merge context." | |
|
752 | b" Use `update_file` or `update_file_p1`", | |
|
753 | b'6.0', | |
|
754 | stacklevel=2, | |
|
755 | ) | |
|
756 | else: | |
|
757 | util.nouideprecwarn( | |
|
758 | b"do not use `normallookup` outside of update/merge context." | |
|
759 | b" Use `set_possibly_dirty` or `set_tracked`", | |
|
760 | b'6.0', | |
|
761 | stacklevel=2, | |
|
762 | ) | |
|
763 | self._normallookup(f) | |
|
764 | ||
|
765 | def _normallookup(self, f): | |
|
766 | '''Mark a file normal, but possibly dirty.''' | |
|
767 | if self.in_merge: | |
|
463 | 768 | # if there is a merge going on and the file was either |
|
464 |
# |
|
|
769 | # "merged" or coming from other parent (-2) before | |
|
465 | 770 | # being removed, restore that state. |
|
466 | 771 | entry = self._map.get(f) |
|
467 | 772 | if entry is not None: |
|
468 | if entry[0] == b'r' and entry[2] in (-1, -2): | |
|
773 | # XXX this should probably be dealt with a a lower level | |
|
774 | # (see `merged_removed` and `from_p2_removed`) | |
|
775 | if entry.merged_removed or entry.from_p2_removed: | |
|
469 | 776 | source = self._map.copymap.get(f) |
|
470 |
if entry |
|
|
471 | self.merge(f) | |
|
472 |
elif entry |
|
|
473 | self.otherparent(f) | |
|
474 | if source: | |
|
777 | if entry.merged_removed: | |
|
778 | self._merge(f) | |
|
779 | elif entry.from_p2_removed: | |
|
780 | self._otherparent(f) | |
|
781 | if source is not None: | |
|
475 | 782 | self.copy(source, f) |
|
476 | 783 | return |
|
477 | if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2: | |
|
784 | elif entry.merged or entry.from_p2: | |
|
478 | 785 | return |
|
479 |
self._addpath(f, |
|
|
786 | self._addpath(f, possibly_dirty=True) | |
|
480 | 787 | self._map.copymap.pop(f, None) |
|
481 | 788 | |
|
482 | 789 | def otherparent(self, f): |
|
483 | 790 | '''Mark as coming from the other parent, always dirty.''' |
|
484 | if self._pl[1] == nullid: | |
|
485 | raise error.Abort( | |
|
486 | _(b"setting %r to other parent only allowed in merges") % f | |
|
791 | if self.pendingparentchange(): | |
|
792 | util.nouideprecwarn( | |
|
793 | b"do not use `otherparent` inside of update/merge context." | |
|
794 | b" Use `update_file` or `update_file_p1`", | |
|
795 | b'6.0', | |
|
796 | stacklevel=2, | |
|
487 | 797 | ) |
|
488 | if f in self and self[f] == b'n': | |
|
798 | else: | |
|
799 | util.nouideprecwarn( | |
|
800 | b"do not use `otherparent` outside of update/merge context." | |
|
801 | b"It should have been set by the update/merge code", | |
|
802 | b'6.0', | |
|
803 | stacklevel=2, | |
|
804 | ) | |
|
805 | self._otherparent(f) | |
|
806 | ||
|
807 | def _otherparent(self, f): | |
|
808 | if not self.in_merge: | |
|
809 | msg = _(b"setting %r to other parent only allowed in merges") % f | |
|
810 | raise error.Abort(msg) | |
|
811 | entry = self._map.get(f) | |
|
812 | if entry is not None and entry.tracked: | |
|
489 | 813 | # merge-like |
|
490 |
self._addpath(f, |
|
|
814 | self._addpath(f, merged=True) | |
|
491 | 815 | else: |
|
492 | 816 | # add-like |
|
493 |
self._addpath(f, |
|
|
817 | self._addpath(f, from_p2=True) | |
|
494 | 818 | self._map.copymap.pop(f, None) |
|
495 | 819 | |
|
496 | 820 | def add(self, f): |
|
497 | 821 | '''Mark a file added.''' |
|
498 | self._addpath(f, b'a', 0, -1, -1) | |
|
499 | self._map.copymap.pop(f, None) | |
|
822 | if self.pendingparentchange(): | |
|
823 | util.nouideprecwarn( | |
|
824 | b"do not use `add` inside of update/merge context." | |
|
825 | b" Use `update_file`", | |
|
826 | b'6.0', | |
|
827 | stacklevel=2, | |
|
828 | ) | |
|
829 | else: | |
|
830 | util.nouideprecwarn( | |
|
831 | b"do not use `remove` outside of update/merge context." | |
|
832 | b" Use `set_tracked`", | |
|
833 | b'6.0', | |
|
834 | stacklevel=2, | |
|
835 | ) | |
|
836 | self._add(f) | |
|
837 | ||
|
838 | def _add(self, filename): | |
|
839 | """internal function to mark a file as added""" | |
|
840 | self._addpath(filename, added=True) | |
|
841 | self._map.copymap.pop(filename, None) | |
|
500 | 842 | |
|
501 | 843 | def remove(self, f): |
|
502 |
'''Mark a file removed |
|
|
844 | '''Mark a file removed''' | |
|
845 | if self.pendingparentchange(): | |
|
846 | util.nouideprecwarn( | |
|
847 | b"do not use `remove` insde of update/merge context." | |
|
848 | b" Use `update_file` or `update_file_p1`", | |
|
849 | b'6.0', | |
|
850 | stacklevel=2, | |
|
851 | ) | |
|
852 | else: | |
|
853 | util.nouideprecwarn( | |
|
854 | b"do not use `remove` outside of update/merge context." | |
|
855 | b" Use `set_untracked`", | |
|
856 | b'6.0', | |
|
857 | stacklevel=2, | |
|
858 | ) | |
|
859 | self._remove(f) | |
|
860 | ||
|
861 | def _remove(self, filename): | |
|
862 | """internal function to mark a file removed""" | |
|
503 | 863 | self._dirty = True |
|
504 | oldstate = self[f] | |
|
505 | size = 0 | |
|
506 | if self._pl[1] != nullid: | |
|
507 | entry = self._map.get(f) | |
|
508 | if entry is not None: | |
|
509 | # backup the previous state | |
|
510 | if entry[0] == b'm': # merge | |
|
511 | size = -1 | |
|
512 | elif entry[0] == b'n' and entry[2] == -2: # other parent | |
|
513 | size = -2 | |
|
514 | self._map.otherparentset.add(f) | |
|
515 | self._updatedfiles.add(f) | |
|
516 | self._map.removefile(f, oldstate, size) | |
|
517 | if size == 0: | |
|
518 | self._map.copymap.pop(f, None) | |
|
864 | self._updatedfiles.add(filename) | |
|
865 | self._map.removefile(filename, in_merge=self.in_merge) | |
|
519 | 866 | |
|
520 | 867 | def merge(self, f): |
|
521 | 868 | '''Mark a file merged.''' |
|
522 | if self._pl[1] == nullid: | |
|
523 | return self.normallookup(f) | |
|
524 | return self.otherparent(f) | |
|
869 | if self.pendingparentchange(): | |
|
870 | util.nouideprecwarn( | |
|
871 | b"do not use `merge` inside of update/merge context." | |
|
872 | b" Use `update_file`", | |
|
873 | b'6.0', | |
|
874 | stacklevel=2, | |
|
875 | ) | |
|
876 | else: | |
|
877 | util.nouideprecwarn( | |
|
878 | b"do not use `merge` outside of update/merge context." | |
|
879 | b"It should have been set by the update/merge code", | |
|
880 | b'6.0', | |
|
881 | stacklevel=2, | |
|
882 | ) | |
|
883 | self._merge(f) | |
|
884 | ||
|
885 | def _merge(self, f): | |
|
886 | if not self.in_merge: | |
|
887 | return self._normallookup(f) | |
|
888 | return self._otherparent(f) | |
|
525 | 889 | |
|
526 | 890 | def drop(self, f): |
|
527 | 891 | '''Drop a file from the dirstate''' |
|
528 | oldstate = self[f] | |
|
529 | if self._map.dropfile(f, oldstate): | |
|
892 | if self.pendingparentchange(): | |
|
893 | util.nouideprecwarn( | |
|
894 | b"do not use `drop` inside of update/merge context." | |
|
895 | b" Use `update_file`", | |
|
896 | b'6.0', | |
|
897 | stacklevel=2, | |
|
898 | ) | |
|
899 | else: | |
|
900 | util.nouideprecwarn( | |
|
901 | b"do not use `drop` outside of update/merge context." | |
|
902 | b" Use `set_untracked`", | |
|
903 | b'6.0', | |
|
904 | stacklevel=2, | |
|
905 | ) | |
|
906 | self._drop(f) | |
|
907 | ||
|
908 | def _drop(self, filename): | |
|
909 | """internal function to drop a file from the dirstate""" | |
|
910 | if self._map.dropfile(filename): | |
|
530 | 911 | self._dirty = True |
|
531 | self._updatedfiles.add(f) | |
|
532 | self._map.copymap.pop(f, None) | |
|
912 | self._updatedfiles.add(filename) | |
|
913 | self._map.copymap.pop(filename, None) | |
|
533 | 914 | |
|
534 | 915 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): |
|
535 | 916 | if exists is None: |
@@ -638,12 +1019,12 b' class dirstate(object):' | |||
|
638 | 1019 | |
|
639 | 1020 | if self._origpl is None: |
|
640 | 1021 | self._origpl = self._pl |
|
641 | self._map.setparents(parent, nullid) | |
|
1022 | self._map.setparents(parent, self._nodeconstants.nullid) | |
|
642 | 1023 | |
|
643 | 1024 | for f in to_lookup: |
|
644 | self.normallookup(f) | |
|
1025 | self._normallookup(f) | |
|
645 | 1026 | for f in to_drop: |
|
646 | self.drop(f) | |
|
1027 | self._drop(f) | |
|
647 | 1028 | |
|
648 | 1029 | self._dirty = True |
|
649 | 1030 | |
@@ -679,13 +1060,13 b' class dirstate(object):' | |||
|
679 | 1060 | tr.addfilegenerator( |
|
680 | 1061 | b'dirstate', |
|
681 | 1062 | (self._filename,), |
|
682 | self._writedirstate, | |
|
1063 | lambda f: self._writedirstate(tr, f), | |
|
683 | 1064 | location=b'plain', |
|
684 | 1065 | ) |
|
685 | 1066 | return |
|
686 | 1067 | |
|
687 | 1068 | st = self._opener(filename, b"w", atomictemp=True, checkambig=True) |
|
688 | self._writedirstate(st) | |
|
1069 | self._writedirstate(tr, st) | |
|
689 | 1070 | |
|
690 | 1071 | def addparentchangecallback(self, category, callback): |
|
691 | 1072 | """add a callback to be called when the wd parents are changed |
@@ -698,7 +1079,7 b' class dirstate(object):' | |||
|
698 | 1079 | """ |
|
699 | 1080 | self._plchangecallbacks[category] = callback |
|
700 | 1081 | |
|
701 | def _writedirstate(self, st): | |
|
1082 | def _writedirstate(self, tr, st): | |
|
702 | 1083 | # notify callbacks about parents change |
|
703 | 1084 | if self._origpl is not None and self._origpl != self._pl: |
|
704 | 1085 | for c, callback in sorted( |
@@ -716,7 +1097,7 b' class dirstate(object):' | |||
|
716 | 1097 | if delaywrite > 0: |
|
717 | 1098 | # do we have any files to delay for? |
|
718 | 1099 | for f, e in pycompat.iteritems(self._map): |
|
719 |
if e |
|
|
1100 | if e.need_delay(now): | |
|
720 | 1101 | import time # to avoid useless import |
|
721 | 1102 | |
|
722 | 1103 | # rather than sleep n seconds, sleep until the next |
@@ -728,7 +1109,7 b' class dirstate(object):' | |||
|
728 | 1109 | now = end # trust our estimate that the end is near now |
|
729 | 1110 | break |
|
730 | 1111 | |
|
731 | self._map.write(st, now) | |
|
1112 | self._map.write(tr, st, now) | |
|
732 | 1113 | self._lastnormaltime = 0 |
|
733 | 1114 | self._dirty = False |
|
734 | 1115 | |
@@ -1120,6 +1501,7 b' class dirstate(object):' | |||
|
1120 | 1501 | warnings, |
|
1121 | 1502 | bad, |
|
1122 | 1503 | traversed, |
|
1504 | dirty, | |
|
1123 | 1505 | ) = rustmod.status( |
|
1124 | 1506 | self._map._rustmap, |
|
1125 | 1507 | matcher, |
@@ -1133,6 +1515,8 b' class dirstate(object):' | |||
|
1133 | 1515 | bool(matcher.traversedir), |
|
1134 | 1516 | ) |
|
1135 | 1517 | |
|
1518 | self._dirty |= dirty | |
|
1519 | ||
|
1136 | 1520 | if matcher.traversedir: |
|
1137 | 1521 | for dir in traversed: |
|
1138 | 1522 | matcher.traversedir(dir) |
@@ -1267,21 +1651,26 b' class dirstate(object):' | |||
|
1267 | 1651 | # general. That is much slower than simply accessing and storing the |
|
1268 | 1652 | # tuple members one by one. |
|
1269 | 1653 | t = dget(fn) |
|
1270 |
|
|
|
1271 |
|
|
|
1272 |
|
|
|
1273 | time = t[3] | |
|
1654 | mode = t.mode | |
|
1655 | size = t.size | |
|
1656 | time = t.mtime | |
|
1274 | 1657 | |
|
1275 |
if not st and |
|
|
1658 | if not st and t.tracked: | |
|
1276 | 1659 | dadd(fn) |
|
1277 |
elif |
|
|
1660 | elif t.merged: | |
|
1661 | madd(fn) | |
|
1662 | elif t.added: | |
|
1663 | aadd(fn) | |
|
1664 | elif t.removed: | |
|
1665 | radd(fn) | |
|
1666 | elif t.tracked: | |
|
1278 | 1667 | if ( |
|
1279 | 1668 | size >= 0 |
|
1280 | 1669 | and ( |
|
1281 | 1670 | (size != st.st_size and size != st.st_size & _rangemask) |
|
1282 | 1671 | or ((mode ^ st.st_mode) & 0o100 and checkexec) |
|
1283 | 1672 | ) |
|
1284 | or size == -2 # other parent | |
|
1673 | or t.from_p2 | |
|
1285 | 1674 | or fn in copymap |
|
1286 | 1675 | ): |
|
1287 | 1676 | if stat.S_ISLNK(st.st_mode) and size != st.st_size: |
@@ -1303,12 +1692,6 b' class dirstate(object):' | |||
|
1303 | 1692 | ladd(fn) |
|
1304 | 1693 | elif listclean: |
|
1305 | 1694 | cadd(fn) |
|
1306 | elif state == b'm': | |
|
1307 | madd(fn) | |
|
1308 | elif state == b'a': | |
|
1309 | aadd(fn) | |
|
1310 | elif state == b'r': | |
|
1311 | radd(fn) | |
|
1312 | 1695 | status = scmutil.status( |
|
1313 | 1696 | modified, added, removed, deleted, unknown, ignored, clean |
|
1314 | 1697 | ) |
@@ -1351,7 +1734,8 b' class dirstate(object):' | |||
|
1351 | 1734 | # output file will be used to create backup of dirstate at this point. |
|
1352 | 1735 | if self._dirty or not self._opener.exists(filename): |
|
1353 | 1736 | self._writedirstate( |
|
1354 | self._opener(filename, b"w", atomictemp=True, checkambig=True) | |
|
1737 | tr, | |
|
1738 | self._opener(filename, b"w", atomictemp=True, checkambig=True), | |
|
1355 | 1739 | ) |
|
1356 | 1740 | |
|
1357 | 1741 | if tr: |
@@ -1361,7 +1745,7 b' class dirstate(object):' | |||
|
1361 | 1745 | tr.addfilegenerator( |
|
1362 | 1746 | b'dirstate', |
|
1363 | 1747 | (self._filename,), |
|
1364 | self._writedirstate, | |
|
1748 | lambda f: self._writedirstate(tr, f), | |
|
1365 | 1749 | location=b'plain', |
|
1366 | 1750 | ) |
|
1367 | 1751 | |
@@ -1394,546 +1778,3 b' class dirstate(object):' | |||
|
1394 | 1778 | def clearbackup(self, tr, backupname): |
|
1395 | 1779 | '''Clear backup file''' |
|
1396 | 1780 | self._opener.unlink(backupname) |
|
1397 | ||
|
1398 | ||
|
1399 | class dirstatemap(object): | |
|
1400 | """Map encapsulating the dirstate's contents. | |
|
1401 | ||
|
1402 | The dirstate contains the following state: | |
|
1403 | ||
|
1404 | - `identity` is the identity of the dirstate file, which can be used to | |
|
1405 | detect when changes have occurred to the dirstate file. | |
|
1406 | ||
|
1407 | - `parents` is a pair containing the parents of the working copy. The | |
|
1408 | parents are updated by calling `setparents`. | |
|
1409 | ||
|
1410 | - the state map maps filenames to tuples of (state, mode, size, mtime), | |
|
1411 | where state is a single character representing 'normal', 'added', | |
|
1412 | 'removed', or 'merged'. It is read by treating the dirstate as a | |
|
1413 | dict. File state is updated by calling the `addfile`, `removefile` and | |
|
1414 | `dropfile` methods. | |
|
1415 | ||
|
1416 | - `copymap` maps destination filenames to their source filename. | |
|
1417 | ||
|
1418 | The dirstate also provides the following views onto the state: | |
|
1419 | ||
|
1420 | - `nonnormalset` is a set of the filenames that have state other | |
|
1421 | than 'normal', or are normal but have an mtime of -1 ('normallookup'). | |
|
1422 | ||
|
1423 | - `otherparentset` is a set of the filenames that are marked as coming | |
|
1424 | from the second parent when the dirstate is currently being merged. | |
|
1425 | ||
|
1426 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized | |
|
1427 | form that they appear as in the dirstate. | |
|
1428 | ||
|
1429 | - `dirfoldmap` is a dict mapping normalized directory names to the | |
|
1430 | denormalized form that they appear as in the dirstate. | |
|
1431 | """ | |
|
1432 | ||
|
1433 | def __init__(self, ui, opener, root, nodeconstants): | |
|
1434 | self._ui = ui | |
|
1435 | self._opener = opener | |
|
1436 | self._root = root | |
|
1437 | self._filename = b'dirstate' | |
|
1438 | self._nodelen = 20 | |
|
1439 | self._nodeconstants = nodeconstants | |
|
1440 | ||
|
1441 | self._parents = None | |
|
1442 | self._dirtyparents = False | |
|
1443 | ||
|
1444 | # for consistent view between _pl() and _read() invocations | |
|
1445 | self._pendingmode = None | |
|
1446 | ||
|
1447 | @propertycache | |
|
1448 | def _map(self): | |
|
1449 | self._map = {} | |
|
1450 | self.read() | |
|
1451 | return self._map | |
|
1452 | ||
|
1453 | @propertycache | |
|
1454 | def copymap(self): | |
|
1455 | self.copymap = {} | |
|
1456 | self._map | |
|
1457 | return self.copymap | |
|
1458 | ||
|
1459 | def clear(self): | |
|
1460 | self._map.clear() | |
|
1461 | self.copymap.clear() | |
|
1462 | self.setparents(nullid, nullid) | |
|
1463 | util.clearcachedproperty(self, b"_dirs") | |
|
1464 | util.clearcachedproperty(self, b"_alldirs") | |
|
1465 | util.clearcachedproperty(self, b"filefoldmap") | |
|
1466 | util.clearcachedproperty(self, b"dirfoldmap") | |
|
1467 | util.clearcachedproperty(self, b"nonnormalset") | |
|
1468 | util.clearcachedproperty(self, b"otherparentset") | |
|
1469 | ||
|
1470 | def items(self): | |
|
1471 | return pycompat.iteritems(self._map) | |
|
1472 | ||
|
1473 | # forward for python2,3 compat | |
|
1474 | iteritems = items | |
|
1475 | ||
|
1476 | def __len__(self): | |
|
1477 | return len(self._map) | |
|
1478 | ||
|
1479 | def __iter__(self): | |
|
1480 | return iter(self._map) | |
|
1481 | ||
|
1482 | def get(self, key, default=None): | |
|
1483 | return self._map.get(key, default) | |
|
1484 | ||
|
1485 | def __contains__(self, key): | |
|
1486 | return key in self._map | |
|
1487 | ||
|
1488 | def __getitem__(self, key): | |
|
1489 | return self._map[key] | |
|
1490 | ||
|
1491 | def keys(self): | |
|
1492 | return self._map.keys() | |
|
1493 | ||
|
1494 | def preload(self): | |
|
1495 | """Loads the underlying data, if it's not already loaded""" | |
|
1496 | self._map | |
|
1497 | ||
|
1498 | def addfile(self, f, oldstate, state, mode, size, mtime): | |
|
1499 | """Add a tracked file to the dirstate.""" | |
|
1500 | if oldstate in b"?r" and "_dirs" in self.__dict__: | |
|
1501 | self._dirs.addpath(f) | |
|
1502 | if oldstate == b"?" and "_alldirs" in self.__dict__: | |
|
1503 | self._alldirs.addpath(f) | |
|
1504 | self._map[f] = dirstatetuple(state, mode, size, mtime) | |
|
1505 | if state != b'n' or mtime == -1: | |
|
1506 | self.nonnormalset.add(f) | |
|
1507 | if size == -2: | |
|
1508 | self.otherparentset.add(f) | |
|
1509 | ||
|
1510 | def removefile(self, f, oldstate, size): | |
|
1511 | """ | |
|
1512 | Mark a file as removed in the dirstate. | |
|
1513 | ||
|
1514 | The `size` parameter is used to store sentinel values that indicate | |
|
1515 | the file's previous state. In the future, we should refactor this | |
|
1516 | to be more explicit about what that state is. | |
|
1517 | """ | |
|
1518 | if oldstate not in b"?r" and "_dirs" in self.__dict__: | |
|
1519 | self._dirs.delpath(f) | |
|
1520 | if oldstate == b"?" and "_alldirs" in self.__dict__: | |
|
1521 | self._alldirs.addpath(f) | |
|
1522 | if "filefoldmap" in self.__dict__: | |
|
1523 | normed = util.normcase(f) | |
|
1524 | self.filefoldmap.pop(normed, None) | |
|
1525 | self._map[f] = dirstatetuple(b'r', 0, size, 0) | |
|
1526 | self.nonnormalset.add(f) | |
|
1527 | ||
|
1528 | def dropfile(self, f, oldstate): | |
|
1529 | """ | |
|
1530 | Remove a file from the dirstate. Returns True if the file was | |
|
1531 | previously recorded. | |
|
1532 | """ | |
|
1533 | exists = self._map.pop(f, None) is not None | |
|
1534 | if exists: | |
|
1535 | if oldstate != b"r" and "_dirs" in self.__dict__: | |
|
1536 | self._dirs.delpath(f) | |
|
1537 | if "_alldirs" in self.__dict__: | |
|
1538 | self._alldirs.delpath(f) | |
|
1539 | if "filefoldmap" in self.__dict__: | |
|
1540 | normed = util.normcase(f) | |
|
1541 | self.filefoldmap.pop(normed, None) | |
|
1542 | self.nonnormalset.discard(f) | |
|
1543 | return exists | |
|
1544 | ||
|
1545 | def clearambiguoustimes(self, files, now): | |
|
1546 | for f in files: | |
|
1547 | e = self.get(f) | |
|
1548 | if e is not None and e[0] == b'n' and e[3] == now: | |
|
1549 | self._map[f] = dirstatetuple(e[0], e[1], e[2], -1) | |
|
1550 | self.nonnormalset.add(f) | |
|
1551 | ||
|
1552 | def nonnormalentries(self): | |
|
1553 | '''Compute the nonnormal dirstate entries from the dmap''' | |
|
1554 | try: | |
|
1555 | return parsers.nonnormalotherparententries(self._map) | |
|
1556 | except AttributeError: | |
|
1557 | nonnorm = set() | |
|
1558 | otherparent = set() | |
|
1559 | for fname, e in pycompat.iteritems(self._map): | |
|
1560 | if e[0] != b'n' or e[3] == -1: | |
|
1561 | nonnorm.add(fname) | |
|
1562 | if e[0] == b'n' and e[2] == -2: | |
|
1563 | otherparent.add(fname) | |
|
1564 | return nonnorm, otherparent | |
|
1565 | ||
|
1566 | @propertycache | |
|
1567 | def filefoldmap(self): | |
|
1568 | """Returns a dictionary mapping normalized case paths to their | |
|
1569 | non-normalized versions. | |
|
1570 | """ | |
|
1571 | try: | |
|
1572 | makefilefoldmap = parsers.make_file_foldmap | |
|
1573 | except AttributeError: | |
|
1574 | pass | |
|
1575 | else: | |
|
1576 | return makefilefoldmap( | |
|
1577 | self._map, util.normcasespec, util.normcasefallback | |
|
1578 | ) | |
|
1579 | ||
|
1580 | f = {} | |
|
1581 | normcase = util.normcase | |
|
1582 | for name, s in pycompat.iteritems(self._map): | |
|
1583 | if s[0] != b'r': | |
|
1584 | f[normcase(name)] = name | |
|
1585 | f[b'.'] = b'.' # prevents useless util.fspath() invocation | |
|
1586 | return f | |
|
1587 | ||
|
1588 | def hastrackeddir(self, d): | |
|
1589 | """ | |
|
1590 | Returns True if the dirstate contains a tracked (not removed) file | |
|
1591 | in this directory. | |
|
1592 | """ | |
|
1593 | return d in self._dirs | |
|
1594 | ||
|
1595 | def hasdir(self, d): | |
|
1596 | """ | |
|
1597 | Returns True if the dirstate contains a file (tracked or removed) | |
|
1598 | in this directory. | |
|
1599 | """ | |
|
1600 | return d in self._alldirs | |
|
1601 | ||
|
1602 | @propertycache | |
|
1603 | def _dirs(self): | |
|
1604 | return pathutil.dirs(self._map, b'r') | |
|
1605 | ||
|
1606 | @propertycache | |
|
1607 | def _alldirs(self): | |
|
1608 | return pathutil.dirs(self._map) | |
|
1609 | ||
|
1610 | def _opendirstatefile(self): | |
|
1611 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) | |
|
1612 | if self._pendingmode is not None and self._pendingmode != mode: | |
|
1613 | fp.close() | |
|
1614 | raise error.Abort( | |
|
1615 | _(b'working directory state may be changed parallelly') | |
|
1616 | ) | |
|
1617 | self._pendingmode = mode | |
|
1618 | return fp | |
|
1619 | ||
|
1620 | def parents(self): | |
|
1621 | if not self._parents: | |
|
1622 | try: | |
|
1623 | fp = self._opendirstatefile() | |
|
1624 | st = fp.read(2 * self._nodelen) | |
|
1625 | fp.close() | |
|
1626 | except IOError as err: | |
|
1627 | if err.errno != errno.ENOENT: | |
|
1628 | raise | |
|
1629 | # File doesn't exist, so the current state is empty | |
|
1630 | st = b'' | |
|
1631 | ||
|
1632 | l = len(st) | |
|
1633 | if l == self._nodelen * 2: | |
|
1634 | self._parents = ( | |
|
1635 | st[: self._nodelen], | |
|
1636 | st[self._nodelen : 2 * self._nodelen], | |
|
1637 | ) | |
|
1638 | elif l == 0: | |
|
1639 | self._parents = (nullid, nullid) | |
|
1640 | else: | |
|
1641 | raise error.Abort( | |
|
1642 | _(b'working directory state appears damaged!') | |
|
1643 | ) | |
|
1644 | ||
|
1645 | return self._parents | |
|
1646 | ||
|
1647 | def setparents(self, p1, p2): | |
|
1648 | self._parents = (p1, p2) | |
|
1649 | self._dirtyparents = True | |
|
1650 | ||
|
1651 | def read(self): | |
|
1652 | # ignore HG_PENDING because identity is used only for writing | |
|
1653 | self.identity = util.filestat.frompath( | |
|
1654 | self._opener.join(self._filename) | |
|
1655 | ) | |
|
1656 | ||
|
1657 | try: | |
|
1658 | fp = self._opendirstatefile() | |
|
1659 | try: | |
|
1660 | st = fp.read() | |
|
1661 | finally: | |
|
1662 | fp.close() | |
|
1663 | except IOError as err: | |
|
1664 | if err.errno != errno.ENOENT: | |
|
1665 | raise | |
|
1666 | return | |
|
1667 | if not st: | |
|
1668 | return | |
|
1669 | ||
|
1670 | if util.safehasattr(parsers, b'dict_new_presized'): | |
|
1671 | # Make an estimate of the number of files in the dirstate based on | |
|
1672 | # its size. This trades wasting some memory for avoiding costly | |
|
1673 | # resizes. Each entry have a prefix of 17 bytes followed by one or | |
|
1674 | # two path names. Studies on various large-scale real-world repositories | |
|
1675 | # found 54 bytes a reasonable upper limit for the average path names. | |
|
1676 | # Copy entries are ignored for the sake of this estimate. | |
|
1677 | self._map = parsers.dict_new_presized(len(st) // 71) | |
|
1678 | ||
|
1679 | # Python's garbage collector triggers a GC each time a certain number | |
|
1680 | # of container objects (the number being defined by | |
|
1681 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple | |
|
1682 | # for each file in the dirstate. The C version then immediately marks | |
|
1683 | # them as not to be tracked by the collector. However, this has no | |
|
1684 | # effect on when GCs are triggered, only on what objects the GC looks | |
|
1685 | # into. This means that O(number of files) GCs are unavoidable. | |
|
1686 | # Depending on when in the process's lifetime the dirstate is parsed, | |
|
1687 | # this can get very expensive. As a workaround, disable GC while | |
|
1688 | # parsing the dirstate. | |
|
1689 | # | |
|
1690 | # (we cannot decorate the function directly since it is in a C module) | |
|
1691 | parse_dirstate = util.nogc(parsers.parse_dirstate) | |
|
1692 | p = parse_dirstate(self._map, self.copymap, st) | |
|
1693 | if not self._dirtyparents: | |
|
1694 | self.setparents(*p) | |
|
1695 | ||
|
1696 | # Avoid excess attribute lookups by fast pathing certain checks | |
|
1697 | self.__contains__ = self._map.__contains__ | |
|
1698 | self.__getitem__ = self._map.__getitem__ | |
|
1699 | self.get = self._map.get | |
|
1700 | ||
|
1701 | def write(self, st, now): | |
|
1702 | st.write( | |
|
1703 | parsers.pack_dirstate(self._map, self.copymap, self.parents(), now) | |
|
1704 | ) | |
|
1705 | st.close() | |
|
1706 | self._dirtyparents = False | |
|
1707 | self.nonnormalset, self.otherparentset = self.nonnormalentries() | |
|
1708 | ||
|
1709 | @propertycache | |
|
1710 | def nonnormalset(self): | |
|
1711 | nonnorm, otherparents = self.nonnormalentries() | |
|
1712 | self.otherparentset = otherparents | |
|
1713 | return nonnorm | |
|
1714 | ||
|
1715 | @propertycache | |
|
1716 | def otherparentset(self): | |
|
1717 | nonnorm, otherparents = self.nonnormalentries() | |
|
1718 | self.nonnormalset = nonnorm | |
|
1719 | return otherparents | |
|
1720 | ||
|
1721 | @propertycache | |
|
1722 | def identity(self): | |
|
1723 | self._map | |
|
1724 | return self.identity | |
|
1725 | ||
|
1726 | @propertycache | |
|
1727 | def dirfoldmap(self): | |
|
1728 | f = {} | |
|
1729 | normcase = util.normcase | |
|
1730 | for name in self._dirs: | |
|
1731 | f[normcase(name)] = name | |
|
1732 | return f | |
|
1733 | ||
|
1734 | ||
|
1735 | if rustmod is not None: | |
|
1736 | ||
|
1737 | class dirstatemap(object): | |
|
1738 | def __init__(self, ui, opener, root, nodeconstants): | |
|
1739 | self._nodeconstants = nodeconstants | |
|
1740 | self._ui = ui | |
|
1741 | self._opener = opener | |
|
1742 | self._root = root | |
|
1743 | self._filename = b'dirstate' | |
|
1744 | self._parents = None | |
|
1745 | self._dirtyparents = False | |
|
1746 | ||
|
1747 | # for consistent view between _pl() and _read() invocations | |
|
1748 | self._pendingmode = None | |
|
1749 | ||
|
1750 | def addfile(self, *args, **kwargs): | |
|
1751 | return self._rustmap.addfile(*args, **kwargs) | |
|
1752 | ||
|
1753 | def removefile(self, *args, **kwargs): | |
|
1754 | return self._rustmap.removefile(*args, **kwargs) | |
|
1755 | ||
|
1756 | def dropfile(self, *args, **kwargs): | |
|
1757 | return self._rustmap.dropfile(*args, **kwargs) | |
|
1758 | ||
|
1759 | def clearambiguoustimes(self, *args, **kwargs): | |
|
1760 | return self._rustmap.clearambiguoustimes(*args, **kwargs) | |
|
1761 | ||
|
1762 | def nonnormalentries(self): | |
|
1763 | return self._rustmap.nonnormalentries() | |
|
1764 | ||
|
1765 | def get(self, *args, **kwargs): | |
|
1766 | return self._rustmap.get(*args, **kwargs) | |
|
1767 | ||
|
1768 | @propertycache | |
|
1769 | def _rustmap(self): | |
|
1770 | """ | |
|
1771 | Fills the Dirstatemap when called. | |
|
1772 | Use `self._inner_rustmap` if reading the dirstate is not necessary. | |
|
1773 | """ | |
|
1774 | self._rustmap = self._inner_rustmap | |
|
1775 | self.read() | |
|
1776 | return self._rustmap | |
|
1777 | ||
|
1778 | @propertycache | |
|
1779 | def _inner_rustmap(self): | |
|
1780 | """ | |
|
1781 | Does not fill the Dirstatemap when called. This allows for | |
|
1782 | optimizations where only setting/getting the parents is needed. | |
|
1783 | """ | |
|
1784 | self._inner_rustmap = rustmod.DirstateMap(self._root) | |
|
1785 | return self._inner_rustmap | |
|
1786 | ||
|
1787 | @property | |
|
1788 | def copymap(self): | |
|
1789 | return self._rustmap.copymap() | |
|
1790 | ||
|
1791 | def preload(self): | |
|
1792 | self._rustmap | |
|
1793 | ||
|
1794 | def clear(self): | |
|
1795 | self._rustmap.clear() | |
|
1796 | self._inner_rustmap.clear() | |
|
1797 | self.setparents(nullid, nullid) | |
|
1798 | util.clearcachedproperty(self, b"_dirs") | |
|
1799 | util.clearcachedproperty(self, b"_alldirs") | |
|
1800 | util.clearcachedproperty(self, b"dirfoldmap") | |
|
1801 | ||
|
1802 | def items(self): | |
|
1803 | return self._rustmap.items() | |
|
1804 | ||
|
1805 | def keys(self): | |
|
1806 | return iter(self._rustmap) | |
|
1807 | ||
|
1808 | def __contains__(self, key): | |
|
1809 | return key in self._rustmap | |
|
1810 | ||
|
1811 | def __getitem__(self, item): | |
|
1812 | return self._rustmap[item] | |
|
1813 | ||
|
1814 | def __len__(self): | |
|
1815 | return len(self._rustmap) | |
|
1816 | ||
|
1817 | def __iter__(self): | |
|
1818 | return iter(self._rustmap) | |
|
1819 | ||
|
1820 | # forward for python2,3 compat | |
|
1821 | iteritems = items | |
|
1822 | ||
|
1823 | def _opendirstatefile(self): | |
|
1824 | fp, mode = txnutil.trypending( | |
|
1825 | self._root, self._opener, self._filename | |
|
1826 | ) | |
|
1827 | if self._pendingmode is not None and self._pendingmode != mode: | |
|
1828 | fp.close() | |
|
1829 | raise error.Abort( | |
|
1830 | _(b'working directory state may be changed parallelly') | |
|
1831 | ) | |
|
1832 | self._pendingmode = mode | |
|
1833 | return fp | |
|
1834 | ||
|
1835 | def setparents(self, p1, p2): | |
|
1836 | self._rustmap.setparents(p1, p2) | |
|
1837 | self._parents = (p1, p2) | |
|
1838 | self._dirtyparents = True | |
|
1839 | ||
|
1840 | def parents(self): | |
|
1841 | if not self._parents: | |
|
1842 | try: | |
|
1843 | fp = self._opendirstatefile() | |
|
1844 | st = fp.read(40) | |
|
1845 | fp.close() | |
|
1846 | except IOError as err: | |
|
1847 | if err.errno != errno.ENOENT: | |
|
1848 | raise | |
|
1849 | # File doesn't exist, so the current state is empty | |
|
1850 | st = b'' | |
|
1851 | ||
|
1852 | try: | |
|
1853 | self._parents = self._inner_rustmap.parents(st) | |
|
1854 | except ValueError: | |
|
1855 | raise error.Abort( | |
|
1856 | _(b'working directory state appears damaged!') | |
|
1857 | ) | |
|
1858 | ||
|
1859 | return self._parents | |
|
1860 | ||
|
1861 | def read(self): | |
|
1862 | # ignore HG_PENDING because identity is used only for writing | |
|
1863 | self.identity = util.filestat.frompath( | |
|
1864 | self._opener.join(self._filename) | |
|
1865 | ) | |
|
1866 | ||
|
1867 | try: | |
|
1868 | fp = self._opendirstatefile() | |
|
1869 | try: | |
|
1870 | st = fp.read() | |
|
1871 | finally: | |
|
1872 | fp.close() | |
|
1873 | except IOError as err: | |
|
1874 | if err.errno != errno.ENOENT: | |
|
1875 | raise | |
|
1876 | return | |
|
1877 | if not st: | |
|
1878 | return | |
|
1879 | ||
|
1880 | parse_dirstate = util.nogc(self._rustmap.read) | |
|
1881 | parents = parse_dirstate(st) | |
|
1882 | if parents and not self._dirtyparents: | |
|
1883 | self.setparents(*parents) | |
|
1884 | ||
|
1885 | self.__contains__ = self._rustmap.__contains__ | |
|
1886 | self.__getitem__ = self._rustmap.__getitem__ | |
|
1887 | self.get = self._rustmap.get | |
|
1888 | ||
|
1889 | def write(self, st, now): | |
|
1890 | parents = self.parents() | |
|
1891 | st.write(self._rustmap.write(parents[0], parents[1], now)) | |
|
1892 | st.close() | |
|
1893 | self._dirtyparents = False | |
|
1894 | ||
|
1895 | @propertycache | |
|
1896 | def filefoldmap(self): | |
|
1897 | """Returns a dictionary mapping normalized case paths to their | |
|
1898 | non-normalized versions. | |
|
1899 | """ | |
|
1900 | return self._rustmap.filefoldmapasdict() | |
|
1901 | ||
|
1902 | def hastrackeddir(self, d): | |
|
1903 | self._dirs # Trigger Python's propertycache | |
|
1904 | return self._rustmap.hastrackeddir(d) | |
|
1905 | ||
|
1906 | def hasdir(self, d): | |
|
1907 | self._dirs # Trigger Python's propertycache | |
|
1908 | return self._rustmap.hasdir(d) | |
|
1909 | ||
|
1910 | @propertycache | |
|
1911 | def _dirs(self): | |
|
1912 | return self._rustmap.getdirs() | |
|
1913 | ||
|
1914 | @propertycache | |
|
1915 | def _alldirs(self): | |
|
1916 | return self._rustmap.getalldirs() | |
|
1917 | ||
|
1918 | @propertycache | |
|
1919 | def identity(self): | |
|
1920 | self._rustmap | |
|
1921 | return self.identity | |
|
1922 | ||
|
1923 | @property | |
|
1924 | def nonnormalset(self): | |
|
1925 | nonnorm = self._rustmap.non_normal_entries() | |
|
1926 | return nonnorm | |
|
1927 | ||
|
1928 | @propertycache | |
|
1929 | def otherparentset(self): | |
|
1930 | otherparents = self._rustmap.other_parent_entries() | |
|
1931 | return otherparents | |
|
1932 | ||
|
1933 | @propertycache | |
|
1934 | def dirfoldmap(self): | |
|
1935 | f = {} | |
|
1936 | normcase = util.normcase | |
|
1937 | for name in self._dirs: | |
|
1938 | f[normcase(name)] = name | |
|
1939 | return f |
This diff has been collapsed as it changes many lines, (1929 lines changed) Show them Hide them | |||
@@ -1,1399 +1,45 b'' | |||
|
1 | # dirstate.py - working directory tracking for mercurial | |
|
2 | # | |
|
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
|
1 | # dirstatemap.py | |
|
4 | 2 | # |
|
5 | 3 | # This software may be used and distributed according to the terms of the |
|
6 | 4 | # GNU General Public License version 2 or any later version. |
|
7 | 5 | |
|
8 | 6 | from __future__ import absolute_import |
|
9 | 7 | |
|
10 | import collections | |
|
11 | import contextlib | |
|
12 | 8 | import errno |
|
13 | import os | |
|
14 | import stat | |
|
15 | 9 | |
|
16 | 10 | from .i18n import _ |
|
17 | from .node import nullid | |
|
18 | from .pycompat import delattr | |
|
19 | ||
|
20 | from hgdemandimport import tracing | |
|
21 | 11 | |
|
22 | 12 | from . import ( |
|
23 | encoding, | |
|
24 | 13 | error, |
|
25 | match as matchmod, | |
|
26 | 14 | pathutil, |
|
27 | 15 | policy, |
|
28 | 16 | pycompat, |
|
29 | scmutil, | |
|
30 | sparse, | |
|
31 | 17 | txnutil, |
|
32 | 18 | util, |
|
33 | 19 | ) |
|
34 | 20 | |
|
35 |
from . |
|
|
36 | dirstate as intdirstate, | |
|
37 | util as interfaceutil, | |
|
21 | from .dirstateutils import ( | |
|
22 | docket as docketmod, | |
|
38 | 23 | ) |
|
39 | 24 | |
|
40 | 25 | parsers = policy.importmod('parsers') |
|
41 | 26 | rustmod = policy.importrust('dirstate') |
|
42 | 27 | |
|
43 | 28 | propertycache = util.propertycache |
|
44 | filecache = scmutil.filecache | |
|
45 | _rangemask = 0x7FFFFFFF | |
|
46 | 29 | |
|
47 |
|
|
|
48 | ||
|
49 | ||
|
50 | class repocache(filecache): | |
|
51 | """filecache for files in .hg/""" | |
|
52 | ||
|
53 | def join(self, obj, fname): | |
|
54 | return obj._opener.join(fname) | |
|
55 | ||
|
56 | ||
|
57 | class rootcache(filecache): | |
|
58 | """filecache for files in the repository root""" | |
|
59 | ||
|
60 | def join(self, obj, fname): | |
|
61 | return obj._join(fname) | |
|
62 | ||
|
63 | ||
|
64 | def _getfsnow(vfs): | |
|
65 | '''Get "now" timestamp on filesystem''' | |
|
66 | tmpfd, tmpname = vfs.mkstemp() | |
|
67 | try: | |
|
68 | return os.fstat(tmpfd)[stat.ST_MTIME] | |
|
69 | finally: | |
|
70 | os.close(tmpfd) | |
|
71 | vfs.unlink(tmpname) | |
|
30 | DirstateItem = parsers.DirstateItem | |
|
72 | 31 | |
|
73 | 32 | |
|
74 | @interfaceutil.implementer(intdirstate.idirstate) | |
|
75 | class dirstate(object): | |
|
76 | def __init__( | |
|
77 | self, opener, ui, root, validate, sparsematchfn, nodeconstants | |
|
78 | ): | |
|
79 | """Create a new dirstate object. | |
|
80 | ||
|
81 | opener is an open()-like callable that can be used to open the | |
|
82 | dirstate file; root is the root of the directory tracked by | |
|
83 | the dirstate. | |
|
84 | """ | |
|
85 | self._nodeconstants = nodeconstants | |
|
86 | self._opener = opener | |
|
87 | self._validate = validate | |
|
88 | self._root = root | |
|
89 | self._sparsematchfn = sparsematchfn | |
|
90 | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is | |
|
91 | # UNC path pointing to root share (issue4557) | |
|
92 | self._rootdir = pathutil.normasprefix(root) | |
|
93 | self._dirty = False | |
|
94 | self._lastnormaltime = 0 | |
|
95 | self._ui = ui | |
|
96 | self._filecache = {} | |
|
97 | self._parentwriters = 0 | |
|
98 | self._filename = b'dirstate' | |
|
99 | self._pendingfilename = b'%s.pending' % self._filename | |
|
100 | self._plchangecallbacks = {} | |
|
101 | self._origpl = None | |
|
102 | self._updatedfiles = set() | |
|
103 | self._mapcls = dirstatemap | |
|
104 | # Access and cache cwd early, so we don't access it for the first time | |
|
105 | # after a working-copy update caused it to not exist (accessing it then | |
|
106 | # raises an exception). | |
|
107 | self._cwd | |
|
108 | ||
|
109 | def prefetch_parents(self): | |
|
110 | """make sure the parents are loaded | |
|
111 | ||
|
112 | Used to avoid a race condition. | |
|
113 | """ | |
|
114 | self._pl | |
|
115 | ||
|
116 | @contextlib.contextmanager | |
|
117 | def parentchange(self): | |
|
118 | """Context manager for handling dirstate parents. | |
|
119 | ||
|
120 | If an exception occurs in the scope of the context manager, | |
|
121 | the incoherent dirstate won't be written when wlock is | |
|
122 | released. | |
|
123 | """ | |
|
124 | self._parentwriters += 1 | |
|
125 | yield | |
|
126 | # Typically we want the "undo" step of a context manager in a | |
|
127 | # finally block so it happens even when an exception | |
|
128 | # occurs. In this case, however, we only want to decrement | |
|
129 | # parentwriters if the code in the with statement exits | |
|
130 | # normally, so we don't have a try/finally here on purpose. | |
|
131 | self._parentwriters -= 1 | |
|
132 | ||
|
133 | def pendingparentchange(self): | |
|
134 | """Returns true if the dirstate is in the middle of a set of changes | |
|
135 | that modify the dirstate parent. | |
|
136 | """ | |
|
137 | return self._parentwriters > 0 | |
|
138 | ||
|
139 | @propertycache | |
|
140 | def _map(self): | |
|
141 | """Return the dirstate contents (see documentation for dirstatemap).""" | |
|
142 | self._map = self._mapcls( | |
|
143 | self._ui, self._opener, self._root, self._nodeconstants | |
|
144 | ) | |
|
145 | return self._map | |
|
146 | ||
|
147 | @property | |
|
148 | def _sparsematcher(self): | |
|
149 | """The matcher for the sparse checkout. | |
|
150 | ||
|
151 | The working directory may not include every file from a manifest. The | |
|
152 | matcher obtained by this property will match a path if it is to be | |
|
153 | included in the working directory. | |
|
154 | """ | |
|
155 | # TODO there is potential to cache this property. For now, the matcher | |
|
156 | # is resolved on every access. (But the called function does use a | |
|
157 | # cache to keep the lookup fast.) | |
|
158 | return self._sparsematchfn() | |
|
159 | ||
|
160 | @repocache(b'branch') | |
|
161 | def _branch(self): | |
|
162 | try: | |
|
163 | return self._opener.read(b"branch").strip() or b"default" | |
|
164 | except IOError as inst: | |
|
165 | if inst.errno != errno.ENOENT: | |
|
166 | raise | |
|
167 | return b"default" | |
|
168 | ||
|
169 | @property | |
|
170 | def _pl(self): | |
|
171 | return self._map.parents() | |
|
172 | ||
|
173 | def hasdir(self, d): | |
|
174 | return self._map.hastrackeddir(d) | |
|
175 | ||
|
176 | @rootcache(b'.hgignore') | |
|
177 | def _ignore(self): | |
|
178 | files = self._ignorefiles() | |
|
179 | if not files: | |
|
180 | return matchmod.never() | |
|
181 | ||
|
182 | pats = [b'include:%s' % f for f in files] | |
|
183 | return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn) | |
|
184 | ||
|
185 | @propertycache | |
|
186 | def _slash(self): | |
|
187 | return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/' | |
|
188 | ||
|
189 | @propertycache | |
|
190 | def _checklink(self): | |
|
191 | return util.checklink(self._root) | |
|
192 | ||
|
193 | @propertycache | |
|
194 | def _checkexec(self): | |
|
195 | return bool(util.checkexec(self._root)) | |
|
196 | ||
|
197 | @propertycache | |
|
198 | def _checkcase(self): | |
|
199 | return not util.fscasesensitive(self._join(b'.hg')) | |
|
200 | ||
|
201 | def _join(self, f): | |
|
202 | # much faster than os.path.join() | |
|
203 | # it's safe because f is always a relative path | |
|
204 | return self._rootdir + f | |
|
205 | ||
|
206 | def flagfunc(self, buildfallback): | |
|
207 | if self._checklink and self._checkexec: | |
|
208 | ||
|
209 | def f(x): | |
|
210 | try: | |
|
211 | st = os.lstat(self._join(x)) | |
|
212 | if util.statislink(st): | |
|
213 | return b'l' | |
|
214 | if util.statisexec(st): | |
|
215 | return b'x' | |
|
216 | except OSError: | |
|
217 | pass | |
|
218 | return b'' | |
|
219 | ||
|
220 | return f | |
|
221 | ||
|
222 | fallback = buildfallback() | |
|
223 | if self._checklink: | |
|
224 | ||
|
225 | def f(x): | |
|
226 | if os.path.islink(self._join(x)): | |
|
227 | return b'l' | |
|
228 | if b'x' in fallback(x): | |
|
229 | return b'x' | |
|
230 | return b'' | |
|
231 | ||
|
232 | return f | |
|
233 | if self._checkexec: | |
|
234 | ||
|
235 | def f(x): | |
|
236 | if b'l' in fallback(x): | |
|
237 | return b'l' | |
|
238 | if util.isexec(self._join(x)): | |
|
239 | return b'x' | |
|
240 | return b'' | |
|
241 | ||
|
242 | return f | |
|
243 | else: | |
|
244 | return fallback | |
|
245 | ||
|
246 | @propertycache | |
|
247 | def _cwd(self): | |
|
248 | # internal config: ui.forcecwd | |
|
249 | forcecwd = self._ui.config(b'ui', b'forcecwd') | |
|
250 | if forcecwd: | |
|
251 | return forcecwd | |
|
252 | return encoding.getcwd() | |
|
253 | ||
|
254 | def getcwd(self): | |
|
255 | """Return the path from which a canonical path is calculated. | |
|
256 | ||
|
257 | This path should be used to resolve file patterns or to convert | |
|
258 | canonical paths back to file paths for display. It shouldn't be | |
|
259 | used to get real file paths. Use vfs functions instead. | |
|
260 | """ | |
|
261 | cwd = self._cwd | |
|
262 | if cwd == self._root: | |
|
263 | return b'' | |
|
264 | # self._root ends with a path separator if self._root is '/' or 'C:\' | |
|
265 | rootsep = self._root | |
|
266 | if not util.endswithsep(rootsep): | |
|
267 | rootsep += pycompat.ossep | |
|
268 | if cwd.startswith(rootsep): | |
|
269 | return cwd[len(rootsep) :] | |
|
270 | else: | |
|
271 | # we're outside the repo. return an absolute path. | |
|
272 | return cwd | |
|
273 | ||
|
274 | def pathto(self, f, cwd=None): | |
|
275 | if cwd is None: | |
|
276 | cwd = self.getcwd() | |
|
277 | path = util.pathto(self._root, cwd, f) | |
|
278 | if self._slash: | |
|
279 | return util.pconvert(path) | |
|
280 | return path | |
|
281 | ||
|
282 | def __getitem__(self, key): | |
|
283 | """Return the current state of key (a filename) in the dirstate. | |
|
284 | ||
|
285 | States are: | |
|
286 | n normal | |
|
287 | m needs merging | |
|
288 | r marked for removal | |
|
289 | a marked for addition | |
|
290 | ? not tracked | |
|
291 | """ | |
|
292 | return self._map.get(key, (b"?",))[0] | |
|
293 | ||
|
294 | def __contains__(self, key): | |
|
295 | return key in self._map | |
|
296 | ||
|
297 | def __iter__(self): | |
|
298 | return iter(sorted(self._map)) | |
|
299 | ||
|
300 | def items(self): | |
|
301 | return pycompat.iteritems(self._map) | |
|
302 | ||
|
303 | iteritems = items | |
|
304 | ||
|
305 | def parents(self): | |
|
306 | return [self._validate(p) for p in self._pl] | |
|
307 | ||
|
308 | def p1(self): | |
|
309 | return self._validate(self._pl[0]) | |
|
310 | ||
|
311 | def p2(self): | |
|
312 | return self._validate(self._pl[1]) | |
|
313 | ||
|
314 | def branch(self): | |
|
315 | return encoding.tolocal(self._branch) | |
|
316 | ||
|
317 | def setparents(self, p1, p2=nullid): | |
|
318 | """Set dirstate parents to p1 and p2. | |
|
319 | ||
|
320 | When moving from two parents to one, 'm' merged entries a | |
|
321 | adjusted to normal and previous copy records discarded and | |
|
322 | returned by the call. | |
|
323 | ||
|
324 | See localrepo.setparents() | |
|
325 | """ | |
|
326 | if self._parentwriters == 0: | |
|
327 | raise ValueError( | |
|
328 | b"cannot set dirstate parent outside of " | |
|
329 | b"dirstate.parentchange context manager" | |
|
330 | ) | |
|
331 | ||
|
332 | self._dirty = True | |
|
333 | oldp2 = self._pl[1] | |
|
334 | if self._origpl is None: | |
|
335 | self._origpl = self._pl | |
|
336 | self._map.setparents(p1, p2) | |
|
337 | copies = {} | |
|
338 | if oldp2 != nullid and p2 == nullid: | |
|
339 | candidatefiles = self._map.nonnormalset.union( | |
|
340 | self._map.otherparentset | |
|
341 | ) | |
|
342 | for f in candidatefiles: | |
|
343 | s = self._map.get(f) | |
|
344 | if s is None: | |
|
345 | continue | |
|
346 | ||
|
347 | # Discard 'm' markers when moving away from a merge state | |
|
348 | if s[0] == b'm': | |
|
349 | source = self._map.copymap.get(f) | |
|
350 | if source: | |
|
351 | copies[f] = source | |
|
352 | self.normallookup(f) | |
|
353 | # Also fix up otherparent markers | |
|
354 | elif s[0] == b'n' and s[2] == -2: | |
|
355 | source = self._map.copymap.get(f) | |
|
356 | if source: | |
|
357 | copies[f] = source | |
|
358 | self.add(f) | |
|
359 | return copies | |
|
360 | ||
|
361 | def setbranch(self, branch): | |
|
362 | self.__class__._branch.set(self, encoding.fromlocal(branch)) | |
|
363 | f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True) | |
|
364 | try: | |
|
365 | f.write(self._branch + b'\n') | |
|
366 | f.close() | |
|
367 | ||
|
368 | # make sure filecache has the correct stat info for _branch after | |
|
369 | # replacing the underlying file | |
|
370 | ce = self._filecache[b'_branch'] | |
|
371 | if ce: | |
|
372 | ce.refresh() | |
|
373 | except: # re-raises | |
|
374 | f.discard() | |
|
375 | raise | |
|
376 | ||
|
377 | def invalidate(self): | |
|
378 | """Causes the next access to reread the dirstate. | |
|
379 | ||
|
380 | This is different from localrepo.invalidatedirstate() because it always | |
|
381 | rereads the dirstate. Use localrepo.invalidatedirstate() if you want to | |
|
382 | check whether the dirstate has changed before rereading it.""" | |
|
383 | ||
|
384 | for a in ("_map", "_branch", "_ignore"): | |
|
385 | if a in self.__dict__: | |
|
386 | delattr(self, a) | |
|
387 | self._lastnormaltime = 0 | |
|
388 | self._dirty = False | |
|
389 | self._updatedfiles.clear() | |
|
390 | self._parentwriters = 0 | |
|
391 | self._origpl = None | |
|
392 | ||
|
393 | def copy(self, source, dest): | |
|
394 | """Mark dest as a copy of source. Unmark dest if source is None.""" | |
|
395 | if source == dest: | |
|
396 | return | |
|
397 | self._dirty = True | |
|
398 | if source is not None: | |
|
399 | self._map.copymap[dest] = source | |
|
400 | self._updatedfiles.add(source) | |
|
401 | self._updatedfiles.add(dest) | |
|
402 | elif self._map.copymap.pop(dest, None): | |
|
403 | self._updatedfiles.add(dest) | |
|
404 | ||
|
405 | def copied(self, file): | |
|
406 | return self._map.copymap.get(file, None) | |
|
407 | ||
|
408 | def copies(self): | |
|
409 | return self._map.copymap | |
|
410 | ||
|
411 | def _addpath(self, f, state, mode, size, mtime): | |
|
412 | oldstate = self[f] | |
|
413 | if state == b'a' or oldstate == b'r': | |
|
414 | scmutil.checkfilename(f) | |
|
415 | if self._map.hastrackeddir(f): | |
|
416 | raise error.Abort( | |
|
417 | _(b'directory %r already in dirstate') % pycompat.bytestr(f) | |
|
418 | ) | |
|
419 | # shadows | |
|
420 | for d in pathutil.finddirs(f): | |
|
421 | if self._map.hastrackeddir(d): | |
|
422 | break | |
|
423 | entry = self._map.get(d) | |
|
424 | if entry is not None and entry[0] != b'r': | |
|
425 | raise error.Abort( | |
|
426 | _(b'file %r in dirstate clashes with %r') | |
|
427 | % (pycompat.bytestr(d), pycompat.bytestr(f)) | |
|
428 | ) | |
|
429 | self._dirty = True | |
|
430 | self._updatedfiles.add(f) | |
|
431 | self._map.addfile(f, oldstate, state, mode, size, mtime) | |
|
432 | ||
|
433 | def normal(self, f, parentfiledata=None): | |
|
434 | """Mark a file normal and clean. | |
|
435 | ||
|
436 | parentfiledata: (mode, size, mtime) of the clean file | |
|
437 | ||
|
438 | parentfiledata should be computed from memory (for mode, | |
|
439 | size), as or close as possible from the point where we | |
|
440 | determined the file was clean, to limit the risk of the | |
|
441 | file having been changed by an external process between the | |
|
442 | moment where the file was determined to be clean and now.""" | |
|
443 | if parentfiledata: | |
|
444 | (mode, size, mtime) = parentfiledata | |
|
445 | else: | |
|
446 | s = os.lstat(self._join(f)) | |
|
447 | mode = s.st_mode | |
|
448 | size = s.st_size | |
|
449 | mtime = s[stat.ST_MTIME] | |
|
450 | self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask) | |
|
451 | self._map.copymap.pop(f, None) | |
|
452 | if f in self._map.nonnormalset: | |
|
453 | self._map.nonnormalset.remove(f) | |
|
454 | if mtime > self._lastnormaltime: | |
|
455 | # Remember the most recent modification timeslot for status(), | |
|
456 | # to make sure we won't miss future size-preserving file content | |
|
457 | # modifications that happen within the same timeslot. | |
|
458 | self._lastnormaltime = mtime | |
|
459 | ||
|
460 | def normallookup(self, f): | |
|
461 | '''Mark a file normal, but possibly dirty.''' | |
|
462 | if self._pl[1] != nullid: | |
|
463 | # if there is a merge going on and the file was either | |
|
464 | # in state 'm' (-1) or coming from other parent (-2) before | |
|
465 | # being removed, restore that state. | |
|
466 | entry = self._map.get(f) | |
|
467 | if entry is not None: | |
|
468 | if entry[0] == b'r' and entry[2] in (-1, -2): | |
|
469 | source = self._map.copymap.get(f) | |
|
470 | if entry[2] == -1: | |
|
471 | self.merge(f) | |
|
472 | elif entry[2] == -2: | |
|
473 | self.otherparent(f) | |
|
474 | if source: | |
|
475 | self.copy(source, f) | |
|
476 | return | |
|
477 | if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2: | |
|
478 | return | |
|
479 | self._addpath(f, b'n', 0, -1, -1) | |
|
480 | self._map.copymap.pop(f, None) | |
|
481 | ||
|
482 | def otherparent(self, f): | |
|
483 | '''Mark as coming from the other parent, always dirty.''' | |
|
484 | if self._pl[1] == nullid: | |
|
485 | raise error.Abort( | |
|
486 | _(b"setting %r to other parent only allowed in merges") % f | |
|
487 | ) | |
|
488 | if f in self and self[f] == b'n': | |
|
489 | # merge-like | |
|
490 | self._addpath(f, b'm', 0, -2, -1) | |
|
491 | else: | |
|
492 | # add-like | |
|
493 | self._addpath(f, b'n', 0, -2, -1) | |
|
494 | self._map.copymap.pop(f, None) | |
|
495 | ||
|
496 | def add(self, f): | |
|
497 | '''Mark a file added.''' | |
|
498 | self._addpath(f, b'a', 0, -1, -1) | |
|
499 | self._map.copymap.pop(f, None) | |
|
500 | ||
|
501 | def remove(self, f): | |
|
502 | '''Mark a file removed.''' | |
|
503 | self._dirty = True | |
|
504 | oldstate = self[f] | |
|
505 | size = 0 | |
|
506 | if self._pl[1] != nullid: | |
|
507 | entry = self._map.get(f) | |
|
508 | if entry is not None: | |
|
509 | # backup the previous state | |
|
510 | if entry[0] == b'm': # merge | |
|
511 | size = -1 | |
|
512 | elif entry[0] == b'n' and entry[2] == -2: # other parent | |
|
513 | size = -2 | |
|
514 | self._map.otherparentset.add(f) | |
|
515 | self._updatedfiles.add(f) | |
|
516 | self._map.removefile(f, oldstate, size) | |
|
517 | if size == 0: | |
|
518 | self._map.copymap.pop(f, None) | |
|
519 | ||
|
520 | def merge(self, f): | |
|
521 | '''Mark a file merged.''' | |
|
522 | if self._pl[1] == nullid: | |
|
523 | return self.normallookup(f) | |
|
524 | return self.otherparent(f) | |
|
525 | ||
|
526 | def drop(self, f): | |
|
527 | '''Drop a file from the dirstate''' | |
|
528 | oldstate = self[f] | |
|
529 | if self._map.dropfile(f, oldstate): | |
|
530 | self._dirty = True | |
|
531 | self._updatedfiles.add(f) | |
|
532 | self._map.copymap.pop(f, None) | |
|
533 | ||
|
534 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): | |
|
535 | if exists is None: | |
|
536 | exists = os.path.lexists(os.path.join(self._root, path)) | |
|
537 | if not exists: | |
|
538 | # Maybe a path component exists | |
|
539 | if not ignoremissing and b'/' in path: | |
|
540 | d, f = path.rsplit(b'/', 1) | |
|
541 | d = self._normalize(d, False, ignoremissing, None) | |
|
542 | folded = d + b"/" + f | |
|
543 | else: | |
|
544 | # No path components, preserve original case | |
|
545 | folded = path | |
|
546 | else: | |
|
547 | # recursively normalize leading directory components | |
|
548 | # against dirstate | |
|
549 | if b'/' in normed: | |
|
550 | d, f = normed.rsplit(b'/', 1) | |
|
551 | d = self._normalize(d, False, ignoremissing, True) | |
|
552 | r = self._root + b"/" + d | |
|
553 | folded = d + b"/" + util.fspath(f, r) | |
|
554 | else: | |
|
555 | folded = util.fspath(normed, self._root) | |
|
556 | storemap[normed] = folded | |
|
557 | ||
|
558 | return folded | |
|
559 | ||
|
560 | def _normalizefile(self, path, isknown, ignoremissing=False, exists=None): | |
|
561 | normed = util.normcase(path) | |
|
562 | folded = self._map.filefoldmap.get(normed, None) | |
|
563 | if folded is None: | |
|
564 | if isknown: | |
|
565 | folded = path | |
|
566 | else: | |
|
567 | folded = self._discoverpath( | |
|
568 | path, normed, ignoremissing, exists, self._map.filefoldmap | |
|
569 | ) | |
|
570 | return folded | |
|
571 | ||
|
572 | def _normalize(self, path, isknown, ignoremissing=False, exists=None): | |
|
573 | normed = util.normcase(path) | |
|
574 | folded = self._map.filefoldmap.get(normed, None) | |
|
575 | if folded is None: | |
|
576 | folded = self._map.dirfoldmap.get(normed, None) | |
|
577 | if folded is None: | |
|
578 | if isknown: | |
|
579 | folded = path | |
|
580 | else: | |
|
581 | # store discovered result in dirfoldmap so that future | |
|
582 | # normalizefile calls don't start matching directories | |
|
583 | folded = self._discoverpath( | |
|
584 | path, normed, ignoremissing, exists, self._map.dirfoldmap | |
|
585 | ) | |
|
586 | return folded | |
|
587 | ||
|
588 | def normalize(self, path, isknown=False, ignoremissing=False): | |
|
589 | """ | |
|
590 | normalize the case of a pathname when on a casefolding filesystem | |
|
591 | ||
|
592 | isknown specifies whether the filename came from walking the | |
|
593 | disk, to avoid extra filesystem access. | |
|
594 | ||
|
595 | If ignoremissing is True, missing path are returned | |
|
596 | unchanged. Otherwise, we try harder to normalize possibly | |
|
597 | existing path components. | |
|
598 | ||
|
599 | The normalized case is determined based on the following precedence: | |
|
600 | ||
|
601 | - version of name already stored in the dirstate | |
|
602 | - version of name stored on disk | |
|
603 | - version provided via command arguments | |
|
604 | """ | |
|
605 | ||
|
606 | if self._checkcase: | |
|
607 | return self._normalize(path, isknown, ignoremissing) | |
|
608 | return path | |
|
609 | ||
|
610 | def clear(self): | |
|
611 | self._map.clear() | |
|
612 | self._lastnormaltime = 0 | |
|
613 | self._updatedfiles.clear() | |
|
614 | self._dirty = True | |
|
615 | ||
|
616 | def rebuild(self, parent, allfiles, changedfiles=None): | |
|
617 | if changedfiles is None: | |
|
618 | # Rebuild entire dirstate | |
|
619 | to_lookup = allfiles | |
|
620 | to_drop = [] | |
|
621 | lastnormaltime = self._lastnormaltime | |
|
622 | self.clear() | |
|
623 | self._lastnormaltime = lastnormaltime | |
|
624 | elif len(changedfiles) < 10: | |
|
625 | # Avoid turning allfiles into a set, which can be expensive if it's | |
|
626 | # large. | |
|
627 | to_lookup = [] | |
|
628 | to_drop = [] | |
|
629 | for f in changedfiles: | |
|
630 | if f in allfiles: | |
|
631 | to_lookup.append(f) | |
|
632 | else: | |
|
633 | to_drop.append(f) | |
|
634 | else: | |
|
635 | changedfilesset = set(changedfiles) | |
|
636 | to_lookup = changedfilesset & set(allfiles) | |
|
637 | to_drop = changedfilesset - to_lookup | |
|
638 | ||
|
639 | if self._origpl is None: | |
|
640 | self._origpl = self._pl | |
|
641 | self._map.setparents(parent, nullid) | |
|
642 | ||
|
643 | for f in to_lookup: | |
|
644 | self.normallookup(f) | |
|
645 | for f in to_drop: | |
|
646 | self.drop(f) | |
|
647 | ||
|
648 | self._dirty = True | |
|
649 | ||
|
650 | def identity(self): | |
|
651 | """Return identity of dirstate itself to detect changing in storage | |
|
652 | ||
|
653 | If identity of previous dirstate is equal to this, writing | |
|
654 | changes based on the former dirstate out can keep consistency. | |
|
655 | """ | |
|
656 | return self._map.identity | |
|
657 | ||
|
658 | def write(self, tr): | |
|
659 | if not self._dirty: | |
|
660 | return | |
|
661 | ||
|
662 | filename = self._filename | |
|
663 | if tr: | |
|
664 | # 'dirstate.write()' is not only for writing in-memory | |
|
665 | # changes out, but also for dropping ambiguous timestamp. | |
|
666 | # delayed writing re-raise "ambiguous timestamp issue". | |
|
667 | # See also the wiki page below for detail: | |
|
668 | # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan | |
|
669 | ||
|
670 | # emulate dropping timestamp in 'parsers.pack_dirstate' | |
|
671 | now = _getfsnow(self._opener) | |
|
672 | self._map.clearambiguoustimes(self._updatedfiles, now) | |
|
673 | ||
|
674 | # emulate that all 'dirstate.normal' results are written out | |
|
675 | self._lastnormaltime = 0 | |
|
676 | self._updatedfiles.clear() | |
|
677 | ||
|
678 | # delay writing in-memory changes out | |
|
679 | tr.addfilegenerator( | |
|
680 | b'dirstate', | |
|
681 | (self._filename,), | |
|
682 | self._writedirstate, | |
|
683 | location=b'plain', | |
|
684 | ) | |
|
685 | return | |
|
686 | ||
|
687 | st = self._opener(filename, b"w", atomictemp=True, checkambig=True) | |
|
688 | self._writedirstate(st) | |
|
689 | ||
|
690 | def addparentchangecallback(self, category, callback): | |
|
691 | """add a callback to be called when the wd parents are changed | |
|
692 | ||
|
693 | Callback will be called with the following arguments: | |
|
694 | dirstate, (oldp1, oldp2), (newp1, newp2) | |
|
695 | ||
|
696 | Category is a unique identifier to allow overwriting an old callback | |
|
697 | with a newer callback. | |
|
698 | """ | |
|
699 | self._plchangecallbacks[category] = callback | |
|
700 | ||
|
701 | def _writedirstate(self, st): | |
|
702 | # notify callbacks about parents change | |
|
703 | if self._origpl is not None and self._origpl != self._pl: | |
|
704 | for c, callback in sorted( | |
|
705 | pycompat.iteritems(self._plchangecallbacks) | |
|
706 | ): | |
|
707 | callback(self, self._origpl, self._pl) | |
|
708 | self._origpl = None | |
|
709 | # use the modification time of the newly created temporary file as the | |
|
710 | # filesystem's notion of 'now' | |
|
711 | now = util.fstat(st)[stat.ST_MTIME] & _rangemask | |
|
712 | ||
|
713 | # enough 'delaywrite' prevents 'pack_dirstate' from dropping | |
|
714 | # timestamp of each entries in dirstate, because of 'now > mtime' | |
|
715 | delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite') | |
|
716 | if delaywrite > 0: | |
|
717 | # do we have any files to delay for? | |
|
718 | for f, e in pycompat.iteritems(self._map): | |
|
719 | if e[0] == b'n' and e[3] == now: | |
|
720 | import time # to avoid useless import | |
|
721 | ||
|
722 | # rather than sleep n seconds, sleep until the next | |
|
723 | # multiple of n seconds | |
|
724 | clock = time.time() | |
|
725 | start = int(clock) - (int(clock) % delaywrite) | |
|
726 | end = start + delaywrite | |
|
727 | time.sleep(end - clock) | |
|
728 | now = end # trust our estimate that the end is near now | |
|
729 | break | |
|
730 | ||
|
731 | self._map.write(st, now) | |
|
732 | self._lastnormaltime = 0 | |
|
733 | self._dirty = False | |
|
33 | # a special value used internally for `size` if the file come from the other parent | |
|
34 | FROM_P2 = -2 | |
|
734 | 35 | |
|
735 | def _dirignore(self, f): | |
|
736 | if self._ignore(f): | |
|
737 | return True | |
|
738 | for p in pathutil.finddirs(f): | |
|
739 | if self._ignore(p): | |
|
740 | return True | |
|
741 | return False | |
|
742 | ||
|
743 | def _ignorefiles(self): | |
|
744 | files = [] | |
|
745 | if os.path.exists(self._join(b'.hgignore')): | |
|
746 | files.append(self._join(b'.hgignore')) | |
|
747 | for name, path in self._ui.configitems(b"ui"): | |
|
748 | if name == b'ignore' or name.startswith(b'ignore.'): | |
|
749 | # we need to use os.path.join here rather than self._join | |
|
750 | # because path is arbitrary and user-specified | |
|
751 | files.append(os.path.join(self._rootdir, util.expandpath(path))) | |
|
752 | return files | |
|
753 | ||
|
754 | def _ignorefileandline(self, f): | |
|
755 | files = collections.deque(self._ignorefiles()) | |
|
756 | visited = set() | |
|
757 | while files: | |
|
758 | i = files.popleft() | |
|
759 | patterns = matchmod.readpatternfile( | |
|
760 | i, self._ui.warn, sourceinfo=True | |
|
761 | ) | |
|
762 | for pattern, lineno, line in patterns: | |
|
763 | kind, p = matchmod._patsplit(pattern, b'glob') | |
|
764 | if kind == b"subinclude": | |
|
765 | if p not in visited: | |
|
766 | files.append(p) | |
|
767 | continue | |
|
768 | m = matchmod.match( | |
|
769 | self._root, b'', [], [pattern], warn=self._ui.warn | |
|
770 | ) | |
|
771 | if m(f): | |
|
772 | return (i, lineno, line) | |
|
773 | visited.add(i) | |
|
774 | return (None, -1, b"") | |
|
775 | ||
|
776 | def _walkexplicit(self, match, subrepos): | |
|
777 | """Get stat data about the files explicitly specified by match. | |
|
778 | ||
|
779 | Return a triple (results, dirsfound, dirsnotfound). | |
|
780 | - results is a mapping from filename to stat result. It also contains | |
|
781 | listings mapping subrepos and .hg to None. | |
|
782 | - dirsfound is a list of files found to be directories. | |
|
783 | - dirsnotfound is a list of files that the dirstate thinks are | |
|
784 | directories and that were not found.""" | |
|
785 | ||
|
786 | def badtype(mode): | |
|
787 | kind = _(b'unknown') | |
|
788 | if stat.S_ISCHR(mode): | |
|
789 | kind = _(b'character device') | |
|
790 | elif stat.S_ISBLK(mode): | |
|
791 | kind = _(b'block device') | |
|
792 | elif stat.S_ISFIFO(mode): | |
|
793 | kind = _(b'fifo') | |
|
794 | elif stat.S_ISSOCK(mode): | |
|
795 | kind = _(b'socket') | |
|
796 | elif stat.S_ISDIR(mode): | |
|
797 | kind = _(b'directory') | |
|
798 | return _(b'unsupported file type (type is %s)') % kind | |
|
799 | ||
|
800 | badfn = match.bad | |
|
801 | dmap = self._map | |
|
802 | lstat = os.lstat | |
|
803 | getkind = stat.S_IFMT | |
|
804 | dirkind = stat.S_IFDIR | |
|
805 | regkind = stat.S_IFREG | |
|
806 | lnkkind = stat.S_IFLNK | |
|
807 | join = self._join | |
|
808 | dirsfound = [] | |
|
809 | foundadd = dirsfound.append | |
|
810 | dirsnotfound = [] | |
|
811 | notfoundadd = dirsnotfound.append | |
|
812 | ||
|
813 | if not match.isexact() and self._checkcase: | |
|
814 | normalize = self._normalize | |
|
815 | else: | |
|
816 | normalize = None | |
|
817 | ||
|
818 | files = sorted(match.files()) | |
|
819 | subrepos.sort() | |
|
820 | i, j = 0, 0 | |
|
821 | while i < len(files) and j < len(subrepos): | |
|
822 | subpath = subrepos[j] + b"/" | |
|
823 | if files[i] < subpath: | |
|
824 | i += 1 | |
|
825 | continue | |
|
826 | while i < len(files) and files[i].startswith(subpath): | |
|
827 | del files[i] | |
|
828 | j += 1 | |
|
829 | ||
|
830 | if not files or b'' in files: | |
|
831 | files = [b''] | |
|
832 | # constructing the foldmap is expensive, so don't do it for the | |
|
833 | # common case where files is [''] | |
|
834 | normalize = None | |
|
835 | results = dict.fromkeys(subrepos) | |
|
836 | results[b'.hg'] = None | |
|
837 | ||
|
838 | for ff in files: | |
|
839 | if normalize: | |
|
840 | nf = normalize(ff, False, True) | |
|
841 | else: | |
|
842 | nf = ff | |
|
843 | if nf in results: | |
|
844 | continue | |
|
845 | ||
|
846 | try: | |
|
847 | st = lstat(join(nf)) | |
|
848 | kind = getkind(st.st_mode) | |
|
849 | if kind == dirkind: | |
|
850 | if nf in dmap: | |
|
851 | # file replaced by dir on disk but still in dirstate | |
|
852 | results[nf] = None | |
|
853 | foundadd((nf, ff)) | |
|
854 | elif kind == regkind or kind == lnkkind: | |
|
855 | results[nf] = st | |
|
856 | else: | |
|
857 | badfn(ff, badtype(kind)) | |
|
858 | if nf in dmap: | |
|
859 | results[nf] = None | |
|
860 | except OSError as inst: # nf not found on disk - it is dirstate only | |
|
861 | if nf in dmap: # does it exactly match a missing file? | |
|
862 | results[nf] = None | |
|
863 | else: # does it match a missing directory? | |
|
864 | if self._map.hasdir(nf): | |
|
865 | notfoundadd(nf) | |
|
866 | else: | |
|
867 | badfn(ff, encoding.strtolocal(inst.strerror)) | |
|
868 | ||
|
869 | # match.files() may contain explicitly-specified paths that shouldn't | |
|
870 | # be taken; drop them from the list of files found. dirsfound/notfound | |
|
871 | # aren't filtered here because they will be tested later. | |
|
872 | if match.anypats(): | |
|
873 | for f in list(results): | |
|
874 | if f == b'.hg' or f in subrepos: | |
|
875 | # keep sentinel to disable further out-of-repo walks | |
|
876 | continue | |
|
877 | if not match(f): | |
|
878 | del results[f] | |
|
879 | ||
|
880 | # Case insensitive filesystems cannot rely on lstat() failing to detect | |
|
881 | # a case-only rename. Prune the stat object for any file that does not | |
|
882 | # match the case in the filesystem, if there are multiple files that | |
|
883 | # normalize to the same path. | |
|
884 | if match.isexact() and self._checkcase: | |
|
885 | normed = {} | |
|
886 | ||
|
887 | for f, st in pycompat.iteritems(results): | |
|
888 | if st is None: | |
|
889 | continue | |
|
890 | ||
|
891 | nc = util.normcase(f) | |
|
892 | paths = normed.get(nc) | |
|
893 | ||
|
894 | if paths is None: | |
|
895 | paths = set() | |
|
896 | normed[nc] = paths | |
|
897 | ||
|
898 | paths.add(f) | |
|
899 | ||
|
900 | for norm, paths in pycompat.iteritems(normed): | |
|
901 | if len(paths) > 1: | |
|
902 | for path in paths: | |
|
903 | folded = self._discoverpath( | |
|
904 | path, norm, True, None, self._map.dirfoldmap | |
|
905 | ) | |
|
906 | if path != folded: | |
|
907 | results[path] = None | |
|
908 | ||
|
909 | return results, dirsfound, dirsnotfound | |
|
910 | ||
|
911 | def walk(self, match, subrepos, unknown, ignored, full=True): | |
|
912 | """ | |
|
913 | Walk recursively through the directory tree, finding all files | |
|
914 | matched by match. | |
|
915 | ||
|
916 | If full is False, maybe skip some known-clean files. | |
|
917 | ||
|
918 | Return a dict mapping filename to stat-like object (either | |
|
919 | mercurial.osutil.stat instance or return value of os.stat()). | |
|
920 | ||
|
921 | """ | |
|
922 | # full is a flag that extensions that hook into walk can use -- this | |
|
923 | # implementation doesn't use it at all. This satisfies the contract | |
|
924 | # because we only guarantee a "maybe". | |
|
925 | ||
|
926 | if ignored: | |
|
927 | ignore = util.never | |
|
928 | dirignore = util.never | |
|
929 | elif unknown: | |
|
930 | ignore = self._ignore | |
|
931 | dirignore = self._dirignore | |
|
932 | else: | |
|
933 | # if not unknown and not ignored, drop dir recursion and step 2 | |
|
934 | ignore = util.always | |
|
935 | dirignore = util.always | |
|
936 | ||
|
937 | matchfn = match.matchfn | |
|
938 | matchalways = match.always() | |
|
939 | matchtdir = match.traversedir | |
|
940 | dmap = self._map | |
|
941 | listdir = util.listdir | |
|
942 | lstat = os.lstat | |
|
943 | dirkind = stat.S_IFDIR | |
|
944 | regkind = stat.S_IFREG | |
|
945 | lnkkind = stat.S_IFLNK | |
|
946 | join = self._join | |
|
947 | ||
|
948 | exact = skipstep3 = False | |
|
949 | if match.isexact(): # match.exact | |
|
950 | exact = True | |
|
951 | dirignore = util.always # skip step 2 | |
|
952 | elif match.prefix(): # match.match, no patterns | |
|
953 | skipstep3 = True | |
|
954 | ||
|
955 | if not exact and self._checkcase: | |
|
956 | normalize = self._normalize | |
|
957 | normalizefile = self._normalizefile | |
|
958 | skipstep3 = False | |
|
959 | else: | |
|
960 | normalize = self._normalize | |
|
961 | normalizefile = None | |
|
962 | ||
|
963 | # step 1: find all explicit files | |
|
964 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) | |
|
965 | if matchtdir: | |
|
966 | for d in work: | |
|
967 | matchtdir(d[0]) | |
|
968 | for d in dirsnotfound: | |
|
969 | matchtdir(d) | |
|
970 | ||
|
971 | skipstep3 = skipstep3 and not (work or dirsnotfound) | |
|
972 | work = [d for d in work if not dirignore(d[0])] | |
|
973 | ||
|
974 | # step 2: visit subdirectories | |
|
975 | def traverse(work, alreadynormed): | |
|
976 | wadd = work.append | |
|
977 | while work: | |
|
978 | tracing.counter('dirstate.walk work', len(work)) | |
|
979 | nd = work.pop() | |
|
980 | visitentries = match.visitchildrenset(nd) | |
|
981 | if not visitentries: | |
|
982 | continue | |
|
983 | if visitentries == b'this' or visitentries == b'all': | |
|
984 | visitentries = None | |
|
985 | skip = None | |
|
986 | if nd != b'': | |
|
987 | skip = b'.hg' | |
|
988 | try: | |
|
989 | with tracing.log('dirstate.walk.traverse listdir %s', nd): | |
|
990 | entries = listdir(join(nd), stat=True, skip=skip) | |
|
991 | except OSError as inst: | |
|
992 | if inst.errno in (errno.EACCES, errno.ENOENT): | |
|
993 | match.bad( | |
|
994 | self.pathto(nd), encoding.strtolocal(inst.strerror) | |
|
995 | ) | |
|
996 | continue | |
|
997 | raise | |
|
998 | for f, kind, st in entries: | |
|
999 | # Some matchers may return files in the visitentries set, | |
|
1000 | # instead of 'this', if the matcher explicitly mentions them | |
|
1001 | # and is not an exactmatcher. This is acceptable; we do not | |
|
1002 | # make any hard assumptions about file-or-directory below | |
|
1003 | # based on the presence of `f` in visitentries. If | |
|
1004 | # visitchildrenset returned a set, we can always skip the | |
|
1005 | # entries *not* in the set it provided regardless of whether | |
|
1006 | # they're actually a file or a directory. | |
|
1007 | if visitentries and f not in visitentries: | |
|
1008 | continue | |
|
1009 | if normalizefile: | |
|
1010 | # even though f might be a directory, we're only | |
|
1011 | # interested in comparing it to files currently in the | |
|
1012 | # dmap -- therefore normalizefile is enough | |
|
1013 | nf = normalizefile( | |
|
1014 | nd and (nd + b"/" + f) or f, True, True | |
|
1015 | ) | |
|
1016 | else: | |
|
1017 | nf = nd and (nd + b"/" + f) or f | |
|
1018 | if nf not in results: | |
|
1019 | if kind == dirkind: | |
|
1020 | if not ignore(nf): | |
|
1021 | if matchtdir: | |
|
1022 | matchtdir(nf) | |
|
1023 | wadd(nf) | |
|
1024 | if nf in dmap and (matchalways or matchfn(nf)): | |
|
1025 | results[nf] = None | |
|
1026 | elif kind == regkind or kind == lnkkind: | |
|
1027 | if nf in dmap: | |
|
1028 | if matchalways or matchfn(nf): | |
|
1029 | results[nf] = st | |
|
1030 | elif (matchalways or matchfn(nf)) and not ignore( | |
|
1031 | nf | |
|
1032 | ): | |
|
1033 | # unknown file -- normalize if necessary | |
|
1034 | if not alreadynormed: | |
|
1035 | nf = normalize(nf, False, True) | |
|
1036 | results[nf] = st | |
|
1037 | elif nf in dmap and (matchalways or matchfn(nf)): | |
|
1038 | results[nf] = None | |
|
1039 | ||
|
1040 | for nd, d in work: | |
|
1041 | # alreadynormed means that processwork doesn't have to do any | |
|
1042 | # expensive directory normalization | |
|
1043 | alreadynormed = not normalize or nd == d | |
|
1044 | traverse([d], alreadynormed) | |
|
1045 | ||
|
1046 | for s in subrepos: | |
|
1047 | del results[s] | |
|
1048 | del results[b'.hg'] | |
|
1049 | ||
|
1050 | # step 3: visit remaining files from dmap | |
|
1051 | if not skipstep3 and not exact: | |
|
1052 | # If a dmap file is not in results yet, it was either | |
|
1053 | # a) not matching matchfn b) ignored, c) missing, or d) under a | |
|
1054 | # symlink directory. | |
|
1055 | if not results and matchalways: | |
|
1056 | visit = [f for f in dmap] | |
|
1057 | else: | |
|
1058 | visit = [f for f in dmap if f not in results and matchfn(f)] | |
|
1059 | visit.sort() | |
|
36 | # a special value used internally for `size` if the file is modified/merged/added | |
|
37 | NONNORMAL = -1 | |
|
1060 | 38 | |
|
1061 | if unknown: | |
|
1062 | # unknown == True means we walked all dirs under the roots | |
|
1063 | # that wasn't ignored, and everything that matched was stat'ed | |
|
1064 | # and is already in results. | |
|
1065 | # The rest must thus be ignored or under a symlink. | |
|
1066 | audit_path = pathutil.pathauditor(self._root, cached=True) | |
|
1067 | ||
|
1068 | for nf in iter(visit): | |
|
1069 | # If a stat for the same file was already added with a | |
|
1070 | # different case, don't add one for this, since that would | |
|
1071 | # make it appear as if the file exists under both names | |
|
1072 | # on disk. | |
|
1073 | if ( | |
|
1074 | normalizefile | |
|
1075 | and normalizefile(nf, True, True) in results | |
|
1076 | ): | |
|
1077 | results[nf] = None | |
|
1078 | # Report ignored items in the dmap as long as they are not | |
|
1079 | # under a symlink directory. | |
|
1080 | elif audit_path.check(nf): | |
|
1081 | try: | |
|
1082 | results[nf] = lstat(join(nf)) | |
|
1083 | # file was just ignored, no links, and exists | |
|
1084 | except OSError: | |
|
1085 | # file doesn't exist | |
|
1086 | results[nf] = None | |
|
1087 | else: | |
|
1088 | # It's either missing or under a symlink directory | |
|
1089 | # which we in this case report as missing | |
|
1090 | results[nf] = None | |
|
1091 | else: | |
|
1092 | # We may not have walked the full directory tree above, | |
|
1093 | # so stat and check everything we missed. | |
|
1094 | iv = iter(visit) | |
|
1095 | for st in util.statfiles([join(i) for i in visit]): | |
|
1096 | results[next(iv)] = st | |
|
1097 | return results | |
|
1098 | ||
|
1099 | def _rust_status(self, matcher, list_clean, list_ignored, list_unknown): | |
|
1100 | # Force Rayon (Rust parallelism library) to respect the number of | |
|
1101 | # workers. This is a temporary workaround until Rust code knows | |
|
1102 | # how to read the config file. | |
|
1103 | numcpus = self._ui.configint(b"worker", b"numcpus") | |
|
1104 | if numcpus is not None: | |
|
1105 | encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus) | |
|
1106 | ||
|
1107 | workers_enabled = self._ui.configbool(b"worker", b"enabled", True) | |
|
1108 | if not workers_enabled: | |
|
1109 | encoding.environ[b"RAYON_NUM_THREADS"] = b"1" | |
|
1110 | ||
|
1111 | ( | |
|
1112 | lookup, | |
|
1113 | modified, | |
|
1114 | added, | |
|
1115 | removed, | |
|
1116 | deleted, | |
|
1117 | clean, | |
|
1118 | ignored, | |
|
1119 | unknown, | |
|
1120 | warnings, | |
|
1121 | bad, | |
|
1122 | traversed, | |
|
1123 | ) = rustmod.status( | |
|
1124 | self._map._rustmap, | |
|
1125 | matcher, | |
|
1126 | self._rootdir, | |
|
1127 | self._ignorefiles(), | |
|
1128 | self._checkexec, | |
|
1129 | self._lastnormaltime, | |
|
1130 | bool(list_clean), | |
|
1131 | bool(list_ignored), | |
|
1132 | bool(list_unknown), | |
|
1133 | bool(matcher.traversedir), | |
|
1134 | ) | |
|
1135 | ||
|
1136 | if matcher.traversedir: | |
|
1137 | for dir in traversed: | |
|
1138 | matcher.traversedir(dir) | |
|
1139 | ||
|
1140 | if self._ui.warn: | |
|
1141 | for item in warnings: | |
|
1142 | if isinstance(item, tuple): | |
|
1143 | file_path, syntax = item | |
|
1144 | msg = _(b"%s: ignoring invalid syntax '%s'\n") % ( | |
|
1145 | file_path, | |
|
1146 | syntax, | |
|
1147 | ) | |
|
1148 | self._ui.warn(msg) | |
|
1149 | else: | |
|
1150 | msg = _(b"skipping unreadable pattern file '%s': %s\n") | |
|
1151 | self._ui.warn( | |
|
1152 | msg | |
|
1153 | % ( | |
|
1154 | pathutil.canonpath( | |
|
1155 | self._rootdir, self._rootdir, item | |
|
1156 | ), | |
|
1157 | b"No such file or directory", | |
|
1158 | ) | |
|
1159 | ) | |
|
1160 | ||
|
1161 | for (fn, message) in bad: | |
|
1162 | matcher.bad(fn, encoding.strtolocal(message)) | |
|
1163 | ||
|
1164 | status = scmutil.status( | |
|
1165 | modified=modified, | |
|
1166 | added=added, | |
|
1167 | removed=removed, | |
|
1168 | deleted=deleted, | |
|
1169 | unknown=unknown, | |
|
1170 | ignored=ignored, | |
|
1171 | clean=clean, | |
|
1172 | ) | |
|
1173 | return (lookup, status) | |
|
1174 | ||
|
1175 | def status(self, match, subrepos, ignored, clean, unknown): | |
|
1176 | """Determine the status of the working copy relative to the | |
|
1177 | dirstate and return a pair of (unsure, status), where status is of type | |
|
1178 | scmutil.status and: | |
|
1179 | ||
|
1180 | unsure: | |
|
1181 | files that might have been modified since the dirstate was | |
|
1182 | written, but need to be read to be sure (size is the same | |
|
1183 | but mtime differs) | |
|
1184 | status.modified: | |
|
1185 | files that have definitely been modified since the dirstate | |
|
1186 | was written (different size or mode) | |
|
1187 | status.clean: | |
|
1188 | files that have definitely not been modified since the | |
|
1189 | dirstate was written | |
|
1190 | """ | |
|
1191 | listignored, listclean, listunknown = ignored, clean, unknown | |
|
1192 | lookup, modified, added, unknown, ignored = [], [], [], [], [] | |
|
1193 | removed, deleted, clean = [], [], [] | |
|
1194 | ||
|
1195 | dmap = self._map | |
|
1196 | dmap.preload() | |
|
1197 | ||
|
1198 | use_rust = True | |
|
1199 | ||
|
1200 | allowed_matchers = ( | |
|
1201 | matchmod.alwaysmatcher, | |
|
1202 | matchmod.exactmatcher, | |
|
1203 | matchmod.includematcher, | |
|
1204 | ) | |
|
1205 | ||
|
1206 | if rustmod is None: | |
|
1207 | use_rust = False | |
|
1208 | elif self._checkcase: | |
|
1209 | # Case-insensitive filesystems are not handled yet | |
|
1210 | use_rust = False | |
|
1211 | elif subrepos: | |
|
1212 | use_rust = False | |
|
1213 | elif sparse.enabled: | |
|
1214 | use_rust = False | |
|
1215 | elif not isinstance(match, allowed_matchers): | |
|
1216 | # Some matchers have yet to be implemented | |
|
1217 | use_rust = False | |
|
1218 | ||
|
1219 | if use_rust: | |
|
1220 | try: | |
|
1221 | return self._rust_status( | |
|
1222 | match, listclean, listignored, listunknown | |
|
1223 | ) | |
|
1224 | except rustmod.FallbackError: | |
|
1225 | pass | |
|
39 | # a special value used internally for `time` if the time is ambigeous | |
|
40 | AMBIGUOUS_TIME = -1 | |
|
1226 | 41 | |
|
1227 | def noop(f): | |
|
1228 | pass | |
|
1229 | ||
|
1230 | dcontains = dmap.__contains__ | |
|
1231 | dget = dmap.__getitem__ | |
|
1232 | ladd = lookup.append # aka "unsure" | |
|
1233 | madd = modified.append | |
|
1234 | aadd = added.append | |
|
1235 | uadd = unknown.append if listunknown else noop | |
|
1236 | iadd = ignored.append if listignored else noop | |
|
1237 | radd = removed.append | |
|
1238 | dadd = deleted.append | |
|
1239 | cadd = clean.append if listclean else noop | |
|
1240 | mexact = match.exact | |
|
1241 | dirignore = self._dirignore | |
|
1242 | checkexec = self._checkexec | |
|
1243 | copymap = self._map.copymap | |
|
1244 | lastnormaltime = self._lastnormaltime | |
|
1245 | ||
|
1246 | # We need to do full walks when either | |
|
1247 | # - we're listing all clean files, or | |
|
1248 | # - match.traversedir does something, because match.traversedir should | |
|
1249 | # be called for every dir in the working dir | |
|
1250 | full = listclean or match.traversedir is not None | |
|
1251 | for fn, st in pycompat.iteritems( | |
|
1252 | self.walk(match, subrepos, listunknown, listignored, full=full) | |
|
1253 | ): | |
|
1254 | if not dcontains(fn): | |
|
1255 | if (listignored or mexact(fn)) and dirignore(fn): | |
|
1256 | if listignored: | |
|
1257 | iadd(fn) | |
|
1258 | else: | |
|
1259 | uadd(fn) | |
|
1260 | continue | |
|
1261 | ||
|
1262 | # This is equivalent to 'state, mode, size, time = dmap[fn]' but not | |
|
1263 | # written like that for performance reasons. dmap[fn] is not a | |
|
1264 | # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE | |
|
1265 | # opcode has fast paths when the value to be unpacked is a tuple or | |
|
1266 | # a list, but falls back to creating a full-fledged iterator in | |
|
1267 | # general. That is much slower than simply accessing and storing the | |
|
1268 | # tuple members one by one. | |
|
1269 | t = dget(fn) | |
|
1270 | state = t[0] | |
|
1271 | mode = t[1] | |
|
1272 | size = t[2] | |
|
1273 | time = t[3] | |
|
1274 | ||
|
1275 | if not st and state in b"nma": | |
|
1276 | dadd(fn) | |
|
1277 | elif state == b'n': | |
|
1278 | if ( | |
|
1279 | size >= 0 | |
|
1280 | and ( | |
|
1281 | (size != st.st_size and size != st.st_size & _rangemask) | |
|
1282 | or ((mode ^ st.st_mode) & 0o100 and checkexec) | |
|
1283 | ) | |
|
1284 | or size == -2 # other parent | |
|
1285 | or fn in copymap | |
|
1286 | ): | |
|
1287 | if stat.S_ISLNK(st.st_mode) and size != st.st_size: | |
|
1288 | # issue6456: Size returned may be longer due to | |
|
1289 | # encryption on EXT-4 fscrypt, undecided. | |
|
1290 | ladd(fn) | |
|
1291 | else: | |
|
1292 | madd(fn) | |
|
1293 | elif ( | |
|
1294 | time != st[stat.ST_MTIME] | |
|
1295 | and time != st[stat.ST_MTIME] & _rangemask | |
|
1296 | ): | |
|
1297 | ladd(fn) | |
|
1298 | elif st[stat.ST_MTIME] == lastnormaltime: | |
|
1299 | # fn may have just been marked as normal and it may have | |
|
1300 | # changed in the same second without changing its size. | |
|
1301 | # This can happen if we quickly do multiple commits. | |
|
1302 | # Force lookup, so we don't miss such a racy file change. | |
|
1303 | ladd(fn) | |
|
1304 | elif listclean: | |
|
1305 | cadd(fn) | |
|
1306 | elif state == b'm': | |
|
1307 | madd(fn) | |
|
1308 | elif state == b'a': | |
|
1309 | aadd(fn) | |
|
1310 | elif state == b'r': | |
|
1311 | radd(fn) | |
|
1312 | status = scmutil.status( | |
|
1313 | modified, added, removed, deleted, unknown, ignored, clean | |
|
1314 | ) | |
|
1315 | return (lookup, status) | |
|
1316 | ||
|
1317 | def matches(self, match): | |
|
1318 | """ | |
|
1319 | return files in the dirstate (in whatever state) filtered by match | |
|
1320 | """ | |
|
1321 | dmap = self._map | |
|
1322 | if rustmod is not None: | |
|
1323 | dmap = self._map._rustmap | |
|
1324 | ||
|
1325 | if match.always(): | |
|
1326 | return dmap.keys() | |
|
1327 | files = match.files() | |
|
1328 | if match.isexact(): | |
|
1329 | # fast path -- filter the other way around, since typically files is | |
|
1330 | # much smaller than dmap | |
|
1331 | return [f for f in files if f in dmap] | |
|
1332 | if match.prefix() and all(fn in dmap for fn in files): | |
|
1333 | # fast path -- all the values are known to be files, so just return | |
|
1334 | # that | |
|
1335 | return list(files) | |
|
1336 | return [f for f in dmap if match(f)] | |
|
1337 | ||
|
1338 | def _actualfilename(self, tr): | |
|
1339 | if tr: | |
|
1340 | return self._pendingfilename | |
|
1341 | else: | |
|
1342 | return self._filename | |
|
1343 | ||
|
1344 | def savebackup(self, tr, backupname): | |
|
1345 | '''Save current dirstate into backup file''' | |
|
1346 | filename = self._actualfilename(tr) | |
|
1347 | assert backupname != filename | |
|
1348 | ||
|
1349 | # use '_writedirstate' instead of 'write' to write changes certainly, | |
|
1350 | # because the latter omits writing out if transaction is running. | |
|
1351 | # output file will be used to create backup of dirstate at this point. | |
|
1352 | if self._dirty or not self._opener.exists(filename): | |
|
1353 | self._writedirstate( | |
|
1354 | self._opener(filename, b"w", atomictemp=True, checkambig=True) | |
|
1355 | ) | |
|
1356 | ||
|
1357 | if tr: | |
|
1358 | # ensure that subsequent tr.writepending returns True for | |
|
1359 | # changes written out above, even if dirstate is never | |
|
1360 | # changed after this | |
|
1361 | tr.addfilegenerator( | |
|
1362 | b'dirstate', | |
|
1363 | (self._filename,), | |
|
1364 | self._writedirstate, | |
|
1365 | location=b'plain', | |
|
1366 | ) | |
|
1367 | ||
|
1368 | # ensure that pending file written above is unlinked at | |
|
1369 | # failure, even if tr.writepending isn't invoked until the | |
|
1370 | # end of this transaction | |
|
1371 | tr.registertmp(filename, location=b'plain') | |
|
1372 | ||
|
1373 | self._opener.tryunlink(backupname) | |
|
1374 | # hardlink backup is okay because _writedirstate is always called | |
|
1375 | # with an "atomictemp=True" file. | |
|
1376 | util.copyfile( | |
|
1377 | self._opener.join(filename), | |
|
1378 | self._opener.join(backupname), | |
|
1379 | hardlink=True, | |
|
1380 | ) | |
|
1381 | ||
|
1382 | def restorebackup(self, tr, backupname): | |
|
1383 | '''Restore dirstate by backup file''' | |
|
1384 | # this "invalidate()" prevents "wlock.release()" from writing | |
|
1385 | # changes of dirstate out after restoring from backup file | |
|
1386 | self.invalidate() | |
|
1387 | filename = self._actualfilename(tr) | |
|
1388 | o = self._opener | |
|
1389 | if util.samefile(o.join(backupname), o.join(filename)): | |
|
1390 | o.unlink(backupname) | |
|
1391 | else: | |
|
1392 | o.rename(backupname, filename, checkambig=True) | |
|
1393 | ||
|
1394 | def clearbackup(self, tr, backupname): | |
|
1395 | '''Clear backup file''' | |
|
1396 | self._opener.unlink(backupname) | |
|
42 | rangemask = 0x7FFFFFFF | |
|
1397 | 43 | |
|
1398 | 44 | |
|
1399 | 45 | class dirstatemap(object): |
@@ -1430,13 +76,16 b' class dirstatemap(object):' | |||
|
1430 | 76 | denormalized form that they appear as in the dirstate. |
|
1431 | 77 | """ |
|
1432 | 78 | |
|
1433 | def __init__(self, ui, opener, root, nodeconstants): | |
|
79 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): | |
|
1434 | 80 | self._ui = ui |
|
1435 | 81 | self._opener = opener |
|
1436 | 82 | self._root = root |
|
1437 | 83 | self._filename = b'dirstate' |
|
1438 | 84 | self._nodelen = 20 |
|
1439 | 85 | self._nodeconstants = nodeconstants |
|
86 | assert ( | |
|
87 | not use_dirstate_v2 | |
|
88 | ), "should have detected unsupported requirement" | |
|
1440 | 89 | |
|
1441 | 90 | self._parents = None |
|
1442 | 91 | self._dirtyparents = False |
@@ -1459,7 +108,7 b' class dirstatemap(object):' | |||
|
1459 | 108 | def clear(self): |
|
1460 | 109 | self._map.clear() |
|
1461 | 110 | self.copymap.clear() |
|
1462 | self.setparents(nullid, nullid) | |
|
111 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) | |
|
1463 | 112 | util.clearcachedproperty(self, b"_dirs") |
|
1464 | 113 | util.clearcachedproperty(self, b"_alldirs") |
|
1465 | 114 | util.clearcachedproperty(self, b"filefoldmap") |
@@ -1473,6 +122,8 b' class dirstatemap(object):' | |||
|
1473 | 122 | # forward for python2,3 compat |
|
1474 | 123 | iteritems = items |
|
1475 | 124 | |
|
125 | debug_iter = items | |
|
126 | ||
|
1476 | 127 | def __len__(self): |
|
1477 | 128 | return len(self._map) |
|
1478 | 129 | |
@@ -1495,19 +146,161 b' class dirstatemap(object):' | |||
|
1495 | 146 | """Loads the underlying data, if it's not already loaded""" |
|
1496 | 147 | self._map |
|
1497 | 148 | |
|
1498 | def addfile(self, f, oldstate, state, mode, size, mtime): | |
|
149 | def _dirs_incr(self, filename, old_entry=None): | |
|
150 | """incremente the dirstate counter if applicable""" | |
|
151 | if ( | |
|
152 | old_entry is None or old_entry.removed | |
|
153 | ) and "_dirs" in self.__dict__: | |
|
154 | self._dirs.addpath(filename) | |
|
155 | if old_entry is None and "_alldirs" in self.__dict__: | |
|
156 | self._alldirs.addpath(filename) | |
|
157 | ||
|
158 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): | |
|
159 | """decremente the dirstate counter if applicable""" | |
|
160 | if old_entry is not None: | |
|
161 | if "_dirs" in self.__dict__ and not old_entry.removed: | |
|
162 | self._dirs.delpath(filename) | |
|
163 | if "_alldirs" in self.__dict__ and not remove_variant: | |
|
164 | self._alldirs.delpath(filename) | |
|
165 | elif remove_variant and "_alldirs" in self.__dict__: | |
|
166 | self._alldirs.addpath(filename) | |
|
167 | if "filefoldmap" in self.__dict__: | |
|
168 | normed = util.normcase(filename) | |
|
169 | self.filefoldmap.pop(normed, None) | |
|
170 | ||
|
171 | def set_possibly_dirty(self, filename): | |
|
172 | """record that the current state of the file on disk is unknown""" | |
|
173 | self[filename].set_possibly_dirty() | |
|
174 | ||
|
175 | def addfile( | |
|
176 | self, | |
|
177 | f, | |
|
178 | mode=0, | |
|
179 | size=None, | |
|
180 | mtime=None, | |
|
181 | added=False, | |
|
182 | merged=False, | |
|
183 | from_p2=False, | |
|
184 | possibly_dirty=False, | |
|
185 | ): | |
|
1499 | 186 | """Add a tracked file to the dirstate.""" |
|
1500 | if oldstate in b"?r" and "_dirs" in self.__dict__: | |
|
1501 | self._dirs.addpath(f) | |
|
1502 | if oldstate == b"?" and "_alldirs" in self.__dict__: | |
|
1503 | self._alldirs.addpath(f) | |
|
1504 | self._map[f] = dirstatetuple(state, mode, size, mtime) | |
|
1505 | if state != b'n' or mtime == -1: | |
|
187 | if added: | |
|
188 | assert not merged | |
|
189 | assert not possibly_dirty | |
|
190 | assert not from_p2 | |
|
191 | state = b'a' | |
|
192 | size = NONNORMAL | |
|
193 | mtime = AMBIGUOUS_TIME | |
|
194 | elif merged: | |
|
195 | assert not possibly_dirty | |
|
196 | assert not from_p2 | |
|
197 | state = b'm' | |
|
198 | size = FROM_P2 | |
|
199 | mtime = AMBIGUOUS_TIME | |
|
200 | elif from_p2: | |
|
201 | assert not possibly_dirty | |
|
202 | state = b'n' | |
|
203 | size = FROM_P2 | |
|
204 | mtime = AMBIGUOUS_TIME | |
|
205 | elif possibly_dirty: | |
|
206 | state = b'n' | |
|
207 | size = NONNORMAL | |
|
208 | mtime = AMBIGUOUS_TIME | |
|
209 | else: | |
|
210 | assert size != FROM_P2 | |
|
211 | assert size != NONNORMAL | |
|
212 | state = b'n' | |
|
213 | size = size & rangemask | |
|
214 | mtime = mtime & rangemask | |
|
215 | assert state is not None | |
|
216 | assert size is not None | |
|
217 | assert mtime is not None | |
|
218 | old_entry = self.get(f) | |
|
219 | self._dirs_incr(f, old_entry) | |
|
220 | e = self._map[f] = DirstateItem(state, mode, size, mtime) | |
|
221 | if e.dm_nonnormal: | |
|
1506 | 222 | self.nonnormalset.add(f) |
|
1507 | if size == -2: | |
|
223 | if e.dm_otherparent: | |
|
1508 | 224 | self.otherparentset.add(f) |
|
1509 | 225 | |
|
1510 | def removefile(self, f, oldstate, size): | |
|
226 | def reset_state( | |
|
227 | self, | |
|
228 | filename, | |
|
229 | wc_tracked, | |
|
230 | p1_tracked, | |
|
231 | p2_tracked=False, | |
|
232 | merged=False, | |
|
233 | clean_p1=False, | |
|
234 | clean_p2=False, | |
|
235 | possibly_dirty=False, | |
|
236 | parentfiledata=None, | |
|
237 | ): | |
|
238 | """Set a entry to a given state, diregarding all previous state | |
|
239 | ||
|
240 | This is to be used by the part of the dirstate API dedicated to | |
|
241 | adjusting the dirstate after a update/merge. | |
|
242 | ||
|
243 | note: calling this might result to no entry existing at all if the | |
|
244 | dirstate map does not see any point at having one for this file | |
|
245 | anymore. | |
|
246 | """ | |
|
247 | if merged and (clean_p1 or clean_p2): | |
|
248 | msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' | |
|
249 | raise error.ProgrammingError(msg) | |
|
250 | # copy information are now outdated | |
|
251 | # (maybe new information should be in directly passed to this function) | |
|
252 | self.copymap.pop(filename, None) | |
|
253 | ||
|
254 | if not (p1_tracked or p2_tracked or wc_tracked): | |
|
255 | self.dropfile(filename) | |
|
256 | elif merged: | |
|
257 | # XXX might be merged and removed ? | |
|
258 | entry = self.get(filename) | |
|
259 | if entry is not None and entry.tracked: | |
|
260 | # XXX mostly replicate dirstate.other parent. We should get | |
|
261 | # the higher layer to pass us more reliable data where `merged` | |
|
262 | # actually mean merged. Dropping the else clause will show | |
|
263 | # failure in `test-graft.t` | |
|
264 | self.addfile(filename, merged=True) | |
|
265 | else: | |
|
266 | self.addfile(filename, from_p2=True) | |
|
267 | elif not (p1_tracked or p2_tracked) and wc_tracked: | |
|
268 | self.addfile(filename, added=True, possibly_dirty=possibly_dirty) | |
|
269 | elif (p1_tracked or p2_tracked) and not wc_tracked: | |
|
270 | # XXX might be merged and removed ? | |
|
271 | old_entry = self._map.get(filename) | |
|
272 | self._dirs_decr(filename, old_entry=old_entry, remove_variant=True) | |
|
273 | self._map[filename] = DirstateItem(b'r', 0, 0, 0) | |
|
274 | self.nonnormalset.add(filename) | |
|
275 | elif clean_p2 and wc_tracked: | |
|
276 | if p1_tracked or self.get(filename) is not None: | |
|
277 | # XXX the `self.get` call is catching some case in | |
|
278 | # `test-merge-remove.t` where the file is tracked in p1, the | |
|
279 | # p1_tracked argument is False. | |
|
280 | # | |
|
281 | # In addition, this seems to be a case where the file is marked | |
|
282 | # as merged without actually being the result of a merge | |
|
283 | # action. So thing are not ideal here. | |
|
284 | self.addfile(filename, merged=True) | |
|
285 | else: | |
|
286 | self.addfile(filename, from_p2=True) | |
|
287 | elif not p1_tracked and p2_tracked and wc_tracked: | |
|
288 | self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty) | |
|
289 | elif possibly_dirty: | |
|
290 | self.addfile(filename, possibly_dirty=possibly_dirty) | |
|
291 | elif wc_tracked: | |
|
292 | # this is a "normal" file | |
|
293 | if parentfiledata is None: | |
|
294 | msg = b'failed to pass parentfiledata for a normal file: %s' | |
|
295 | msg %= filename | |
|
296 | raise error.ProgrammingError(msg) | |
|
297 | mode, size, mtime = parentfiledata | |
|
298 | self.addfile(filename, mode=mode, size=size, mtime=mtime) | |
|
299 | self.nonnormalset.discard(filename) | |
|
300 | else: | |
|
301 | assert False, 'unreachable' | |
|
302 | ||
|
303 | def removefile(self, f, in_merge=False): | |
|
1511 | 304 | """ |
|
1512 | 305 | Mark a file as removed in the dirstate. |
|
1513 | 306 | |
@@ -1515,38 +308,41 b' class dirstatemap(object):' | |||
|
1515 | 308 | the file's previous state. In the future, we should refactor this |
|
1516 | 309 | to be more explicit about what that state is. |
|
1517 | 310 | """ |
|
1518 | if oldstate not in b"?r" and "_dirs" in self.__dict__: | |
|
1519 | self._dirs.delpath(f) | |
|
1520 | if oldstate == b"?" and "_alldirs" in self.__dict__: | |
|
1521 | self._alldirs.addpath(f) | |
|
1522 | if "filefoldmap" in self.__dict__: | |
|
1523 | normed = util.normcase(f) | |
|
1524 | self.filefoldmap.pop(normed, None) | |
|
1525 | self._map[f] = dirstatetuple(b'r', 0, size, 0) | |
|
311 | entry = self.get(f) | |
|
312 | size = 0 | |
|
313 | if in_merge: | |
|
314 | # XXX we should not be able to have 'm' state and 'FROM_P2' if not | |
|
315 | # during a merge. So I (marmoute) am not sure we need the | |
|
316 | # conditionnal at all. Adding double checking this with assert | |
|
317 | # would be nice. | |
|
318 | if entry is not None: | |
|
319 | # backup the previous state | |
|
320 | if entry.merged: # merge | |
|
321 | size = NONNORMAL | |
|
322 | elif entry.from_p2: | |
|
323 | size = FROM_P2 | |
|
324 | self.otherparentset.add(f) | |
|
325 | if entry is not None and not (entry.merged or entry.from_p2): | |
|
326 | self.copymap.pop(f, None) | |
|
327 | self._dirs_decr(f, old_entry=entry, remove_variant=True) | |
|
328 | self._map[f] = DirstateItem(b'r', 0, size, 0) | |
|
1526 | 329 | self.nonnormalset.add(f) |
|
1527 | 330 | |
|
1528 |
def dropfile(self, f |
|
|
331 | def dropfile(self, f): | |
|
1529 | 332 | """ |
|
1530 | 333 | Remove a file from the dirstate. Returns True if the file was |
|
1531 | 334 | previously recorded. |
|
1532 | 335 | """ |
|
1533 |
|
|
|
1534 | if exists: | |
|
1535 | if oldstate != b"r" and "_dirs" in self.__dict__: | |
|
1536 | self._dirs.delpath(f) | |
|
1537 | if "_alldirs" in self.__dict__: | |
|
1538 | self._alldirs.delpath(f) | |
|
1539 | if "filefoldmap" in self.__dict__: | |
|
1540 | normed = util.normcase(f) | |
|
1541 | self.filefoldmap.pop(normed, None) | |
|
336 | old_entry = self._map.pop(f, None) | |
|
337 | self._dirs_decr(f, old_entry=old_entry) | |
|
1542 | 338 | self.nonnormalset.discard(f) |
|
1543 |
return |
|
|
339 | return old_entry is not None | |
|
1544 | 340 | |
|
1545 | 341 | def clearambiguoustimes(self, files, now): |
|
1546 | 342 | for f in files: |
|
1547 | 343 | e = self.get(f) |
|
1548 |
if e is not None and e |
|
|
1549 | self._map[f] = dirstatetuple(e[0], e[1], e[2], -1) | |
|
344 | if e is not None and e.need_delay(now): | |
|
345 | e.set_possibly_dirty() | |
|
1550 | 346 | self.nonnormalset.add(f) |
|
1551 | 347 | |
|
1552 | 348 | def nonnormalentries(self): |
@@ -1557,9 +353,9 b' class dirstatemap(object):' | |||
|
1557 | 353 | nonnorm = set() |
|
1558 | 354 | otherparent = set() |
|
1559 | 355 | for fname, e in pycompat.iteritems(self._map): |
|
1560 |
if e |
|
|
356 | if e.dm_nonnormal: | |
|
1561 | 357 | nonnorm.add(fname) |
|
1562 | if e[0] == b'n' and e[2] == -2: | |
|
358 | if e.from_p2: | |
|
1563 | 359 | otherparent.add(fname) |
|
1564 | 360 | return nonnorm, otherparent |
|
1565 | 361 | |
@@ -1580,7 +376,7 b' class dirstatemap(object):' | |||
|
1580 | 376 | f = {} |
|
1581 | 377 | normcase = util.normcase |
|
1582 | 378 | for name, s in pycompat.iteritems(self._map): |
|
1583 |
if s |
|
|
379 | if not s.removed: | |
|
1584 | 380 | f[normcase(name)] = name |
|
1585 | 381 | f[b'.'] = b'.' # prevents useless util.fspath() invocation |
|
1586 | 382 | return f |
@@ -1636,7 +432,10 b' class dirstatemap(object):' | |||
|
1636 | 432 | st[self._nodelen : 2 * self._nodelen], |
|
1637 | 433 | ) |
|
1638 | 434 | elif l == 0: |
|
1639 |
self._parents = ( |
|
|
435 | self._parents = ( | |
|
436 | self._nodeconstants.nullid, | |
|
437 | self._nodeconstants.nullid, | |
|
438 | ) | |
|
1640 | 439 | else: |
|
1641 | 440 | raise error.Abort( |
|
1642 | 441 | _(b'working directory state appears damaged!') |
@@ -1698,7 +497,7 b' class dirstatemap(object):' | |||
|
1698 | 497 | self.__getitem__ = self._map.__getitem__ |
|
1699 | 498 | self.get = self._map.get |
|
1700 | 499 | |
|
1701 | def write(self, st, now): | |
|
500 | def write(self, _tr, st, now): | |
|
1702 | 501 | st.write( |
|
1703 | 502 | parsers.pack_dirstate(self._map, self.copymap, self.parents(), now) |
|
1704 | 503 | ) |
@@ -1718,6 +517,9 b' class dirstatemap(object):' | |||
|
1718 | 517 | self.nonnormalset = nonnorm |
|
1719 | 518 | return otherparents |
|
1720 | 519 | |
|
520 | def non_normal_or_other_parent_paths(self): | |
|
521 | return self.nonnormalset.union(self.otherparentset) | |
|
522 | ||
|
1721 | 523 | @propertycache |
|
1722 | 524 | def identity(self): |
|
1723 | 525 | self._map |
@@ -1735,20 +537,129 b' class dirstatemap(object):' | |||
|
1735 | 537 | if rustmod is not None: |
|
1736 | 538 | |
|
1737 | 539 | class dirstatemap(object): |
|
1738 | def __init__(self, ui, opener, root, nodeconstants): | |
|
540 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): | |
|
541 | self._use_dirstate_v2 = use_dirstate_v2 | |
|
1739 | 542 | self._nodeconstants = nodeconstants |
|
1740 | 543 | self._ui = ui |
|
1741 | 544 | self._opener = opener |
|
1742 | 545 | self._root = root |
|
1743 | 546 | self._filename = b'dirstate' |
|
547 | self._nodelen = 20 # Also update Rust code when changing this! | |
|
1744 | 548 | self._parents = None |
|
1745 | 549 | self._dirtyparents = False |
|
550 | self._docket = None | |
|
1746 | 551 | |
|
1747 | 552 | # for consistent view between _pl() and _read() invocations |
|
1748 | 553 | self._pendingmode = None |
|
1749 | 554 | |
|
1750 | def addfile(self, *args, **kwargs): | |
|
1751 | return self._rustmap.addfile(*args, **kwargs) | |
|
555 | self._use_dirstate_tree = self._ui.configbool( | |
|
556 | b"experimental", | |
|
557 | b"dirstate-tree.in-memory", | |
|
558 | False, | |
|
559 | ) | |
|
560 | ||
|
561 | def addfile( | |
|
562 | self, | |
|
563 | f, | |
|
564 | mode=0, | |
|
565 | size=None, | |
|
566 | mtime=None, | |
|
567 | added=False, | |
|
568 | merged=False, | |
|
569 | from_p2=False, | |
|
570 | possibly_dirty=False, | |
|
571 | ): | |
|
572 | return self._rustmap.addfile( | |
|
573 | f, | |
|
574 | mode, | |
|
575 | size, | |
|
576 | mtime, | |
|
577 | added, | |
|
578 | merged, | |
|
579 | from_p2, | |
|
580 | possibly_dirty, | |
|
581 | ) | |
|
582 | ||
|
583 | def reset_state( | |
|
584 | self, | |
|
585 | filename, | |
|
586 | wc_tracked, | |
|
587 | p1_tracked, | |
|
588 | p2_tracked=False, | |
|
589 | merged=False, | |
|
590 | clean_p1=False, | |
|
591 | clean_p2=False, | |
|
592 | possibly_dirty=False, | |
|
593 | parentfiledata=None, | |
|
594 | ): | |
|
595 | """Set a entry to a given state, disregarding all previous state | |
|
596 | ||
|
597 | This is to be used by the part of the dirstate API dedicated to | |
|
598 | adjusting the dirstate after a update/merge. | |
|
599 | ||
|
600 | note: calling this might result to no entry existing at all if the | |
|
601 | dirstate map does not see any point at having one for this file | |
|
602 | anymore. | |
|
603 | """ | |
|
604 | if merged and (clean_p1 or clean_p2): | |
|
605 | msg = ( | |
|
606 | b'`merged` argument incompatible with `clean_p1`/`clean_p2`' | |
|
607 | ) | |
|
608 | raise error.ProgrammingError(msg) | |
|
609 | # copy information are now outdated | |
|
610 | # (maybe new information should be in directly passed to this function) | |
|
611 | self.copymap.pop(filename, None) | |
|
612 | ||
|
613 | if not (p1_tracked or p2_tracked or wc_tracked): | |
|
614 | self.dropfile(filename) | |
|
615 | elif merged: | |
|
616 | # XXX might be merged and removed ? | |
|
617 | entry = self.get(filename) | |
|
618 | if entry is not None and entry.tracked: | |
|
619 | # XXX mostly replicate dirstate.other parent. We should get | |
|
620 | # the higher layer to pass us more reliable data where `merged` | |
|
621 | # actually mean merged. Dropping the else clause will show | |
|
622 | # failure in `test-graft.t` | |
|
623 | self.addfile(filename, merged=True) | |
|
624 | else: | |
|
625 | self.addfile(filename, from_p2=True) | |
|
626 | elif not (p1_tracked or p2_tracked) and wc_tracked: | |
|
627 | self.addfile( | |
|
628 | filename, added=True, possibly_dirty=possibly_dirty | |
|
629 | ) | |
|
630 | elif (p1_tracked or p2_tracked) and not wc_tracked: | |
|
631 | # XXX might be merged and removed ? | |
|
632 | self[filename] = DirstateItem(b'r', 0, 0, 0) | |
|
633 | self.nonnormalset.add(filename) | |
|
634 | elif clean_p2 and wc_tracked: | |
|
635 | if p1_tracked or self.get(filename) is not None: | |
|
636 | # XXX the `self.get` call is catching some case in | |
|
637 | # `test-merge-remove.t` where the file is tracked in p1, the | |
|
638 | # p1_tracked argument is False. | |
|
639 | # | |
|
640 | # In addition, this seems to be a case where the file is marked | |
|
641 | # as merged without actually being the result of a merge | |
|
642 | # action. So thing are not ideal here. | |
|
643 | self.addfile(filename, merged=True) | |
|
644 | else: | |
|
645 | self.addfile(filename, from_p2=True) | |
|
646 | elif not p1_tracked and p2_tracked and wc_tracked: | |
|
647 | self.addfile( | |
|
648 | filename, from_p2=True, possibly_dirty=possibly_dirty | |
|
649 | ) | |
|
650 | elif possibly_dirty: | |
|
651 | self.addfile(filename, possibly_dirty=possibly_dirty) | |
|
652 | elif wc_tracked: | |
|
653 | # this is a "normal" file | |
|
654 | if parentfiledata is None: | |
|
655 | msg = b'failed to pass parentfiledata for a normal file: %s' | |
|
656 | msg %= filename | |
|
657 | raise error.ProgrammingError(msg) | |
|
658 | mode, size, mtime = parentfiledata | |
|
659 | self.addfile(filename, mode=mode, size=size, mtime=mtime) | |
|
660 | self.nonnormalset.discard(filename) | |
|
661 | else: | |
|
662 | assert False, 'unreachable' | |
|
1752 | 663 | |
|
1753 | 664 | def removefile(self, *args, **kwargs): |
|
1754 | 665 | return self._rustmap.removefile(*args, **kwargs) |
@@ -1765,36 +676,24 b' if rustmod is not None:' | |||
|
1765 | 676 | def get(self, *args, **kwargs): |
|
1766 | 677 | return self._rustmap.get(*args, **kwargs) |
|
1767 | 678 | |
|
1768 | @propertycache | |
|
1769 | def _rustmap(self): | |
|
1770 | """ | |
|
1771 | Fills the Dirstatemap when called. | |
|
1772 | Use `self._inner_rustmap` if reading the dirstate is not necessary. | |
|
1773 | """ | |
|
1774 | self._rustmap = self._inner_rustmap | |
|
1775 | self.read() | |
|
1776 | return self._rustmap | |
|
1777 | ||
|
1778 | @propertycache | |
|
1779 | def _inner_rustmap(self): | |
|
1780 | """ | |
|
1781 | Does not fill the Dirstatemap when called. This allows for | |
|
1782 | optimizations where only setting/getting the parents is needed. | |
|
1783 | """ | |
|
1784 | self._inner_rustmap = rustmod.DirstateMap(self._root) | |
|
1785 | return self._inner_rustmap | |
|
1786 | ||
|
1787 | 679 | @property |
|
1788 | 680 | def copymap(self): |
|
1789 | 681 | return self._rustmap.copymap() |
|
1790 | 682 | |
|
683 | def directories(self): | |
|
684 | return self._rustmap.directories() | |
|
685 | ||
|
686 | def debug_iter(self): | |
|
687 | return self._rustmap.debug_iter() | |
|
688 | ||
|
1791 | 689 | def preload(self): |
|
1792 | 690 | self._rustmap |
|
1793 | 691 | |
|
1794 | 692 | def clear(self): |
|
1795 | 693 | self._rustmap.clear() |
|
1796 |
self. |
|
|
1797 |
self. |
|
|
694 | self.setparents( | |
|
695 | self._nodeconstants.nullid, self._nodeconstants.nullid | |
|
696 | ) | |
|
1798 | 697 | util.clearcachedproperty(self, b"_dirs") |
|
1799 | 698 | util.clearcachedproperty(self, b"_alldirs") |
|
1800 | 699 | util.clearcachedproperty(self, b"dirfoldmap") |
@@ -1832,64 +731,145 b' if rustmod is not None:' | |||
|
1832 | 731 | self._pendingmode = mode |
|
1833 | 732 | return fp |
|
1834 | 733 | |
|
734 | def _readdirstatefile(self, size=-1): | |
|
735 | try: | |
|
736 | with self._opendirstatefile() as fp: | |
|
737 | return fp.read(size) | |
|
738 | except IOError as err: | |
|
739 | if err.errno != errno.ENOENT: | |
|
740 | raise | |
|
741 | # File doesn't exist, so the current state is empty | |
|
742 | return b'' | |
|
743 | ||
|
1835 | 744 | def setparents(self, p1, p2): |
|
1836 | self._rustmap.setparents(p1, p2) | |
|
1837 | 745 | self._parents = (p1, p2) |
|
1838 | 746 | self._dirtyparents = True |
|
1839 | 747 | |
|
1840 | 748 | def parents(self): |
|
1841 | 749 | if not self._parents: |
|
1842 | try: | |
|
1843 |
fp = self. |
|
|
1844 |
|
|
|
1845 | fp.close() | |
|
1846 | except IOError as err: | |
|
1847 |
|
|
|
1848 |
|
|
|
1849 | # File doesn't exist, so the current state is empty | |
|
1850 |
st |
|
|
1851 | ||
|
1852 |
|
|
|
1853 | self._parents = self._inner_rustmap.parents(st) | |
|
1854 | except ValueError: | |
|
1855 | raise error.Abort( | |
|
1856 | _(b'working directory state appears damaged!') | |
|
1857 | ) | |
|
750 | if self._use_dirstate_v2: | |
|
751 | self._parents = self.docket.parents | |
|
752 | else: | |
|
753 | read_len = self._nodelen * 2 | |
|
754 | st = self._readdirstatefile(read_len) | |
|
755 | l = len(st) | |
|
756 | if l == read_len: | |
|
757 | self._parents = ( | |
|
758 | st[: self._nodelen], | |
|
759 | st[self._nodelen : 2 * self._nodelen], | |
|
760 | ) | |
|
761 | elif l == 0: | |
|
762 | self._parents = ( | |
|
763 | self._nodeconstants.nullid, | |
|
764 | self._nodeconstants.nullid, | |
|
765 | ) | |
|
766 | else: | |
|
767 | raise error.Abort( | |
|
768 | _(b'working directory state appears damaged!') | |
|
769 | ) | |
|
1858 | 770 | |
|
1859 | 771 | return self._parents |
|
1860 | 772 | |
|
1861 | def read(self): | |
|
773 | @property | |
|
774 | def docket(self): | |
|
775 | if not self._docket: | |
|
776 | if not self._use_dirstate_v2: | |
|
777 | raise error.ProgrammingError( | |
|
778 | b'dirstate only has a docket in v2 format' | |
|
779 | ) | |
|
780 | self._docket = docketmod.DirstateDocket.parse( | |
|
781 | self._readdirstatefile(), self._nodeconstants | |
|
782 | ) | |
|
783 | return self._docket | |
|
784 | ||
|
785 | @propertycache | |
|
786 | def _rustmap(self): | |
|
787 | """ | |
|
788 | Fills the Dirstatemap when called. | |
|
789 | """ | |
|
1862 | 790 | # ignore HG_PENDING because identity is used only for writing |
|
1863 | 791 | self.identity = util.filestat.frompath( |
|
1864 | 792 | self._opener.join(self._filename) |
|
1865 | 793 | ) |
|
1866 | 794 | |
|
1867 | try: | |
|
1868 |
f |
|
|
1869 | try: | |
|
1870 |
|
|
|
1871 |
|
|
|
1872 |
|
|
|
1873 | except IOError as err: | |
|
1874 | if err.errno != errno.ENOENT: | |
|
1875 |
|
|
|
1876 |
|
|
|
1877 |
|
|
|
1878 | return | |
|
795 | if self._use_dirstate_v2: | |
|
796 | if self.docket.uuid: | |
|
797 | # TODO: use mmap when possible | |
|
798 | data = self._opener.read(self.docket.data_filename()) | |
|
799 | else: | |
|
800 | data = b'' | |
|
801 | self._rustmap = rustmod.DirstateMap.new_v2( | |
|
802 | data, self.docket.data_size, self.docket.tree_metadata | |
|
803 | ) | |
|
804 | parents = self.docket.parents | |
|
805 | else: | |
|
806 | self._rustmap, parents = rustmod.DirstateMap.new_v1( | |
|
807 | self._use_dirstate_tree, self._readdirstatefile() | |
|
808 | ) | |
|
1879 | 809 | |
|
1880 | parse_dirstate = util.nogc(self._rustmap.read) | |
|
1881 | parents = parse_dirstate(st) | |
|
1882 | 810 | if parents and not self._dirtyparents: |
|
1883 | 811 | self.setparents(*parents) |
|
1884 | 812 | |
|
1885 | 813 | self.__contains__ = self._rustmap.__contains__ |
|
1886 | 814 | self.__getitem__ = self._rustmap.__getitem__ |
|
1887 | 815 | self.get = self._rustmap.get |
|
816 | return self._rustmap | |
|
1888 | 817 | |
|
1889 | def write(self, st, now): | |
|
1890 | parents = self.parents() | |
|
1891 | st.write(self._rustmap.write(parents[0], parents[1], now)) | |
|
1892 | st.close() | |
|
818 | def write(self, tr, st, now): | |
|
819 | if not self._use_dirstate_v2: | |
|
820 | p1, p2 = self.parents() | |
|
821 | packed = self._rustmap.write_v1(p1, p2, now) | |
|
822 | st.write(packed) | |
|
823 | st.close() | |
|
824 | self._dirtyparents = False | |
|
825 | return | |
|
826 | ||
|
827 | # We can only append to an existing data file if there is one | |
|
828 | can_append = self.docket.uuid is not None | |
|
829 | packed, meta, append = self._rustmap.write_v2(now, can_append) | |
|
830 | if append: | |
|
831 | docket = self.docket | |
|
832 | data_filename = docket.data_filename() | |
|
833 | if tr: | |
|
834 | tr.add(data_filename, docket.data_size) | |
|
835 | with self._opener(data_filename, b'r+b') as fp: | |
|
836 | fp.seek(docket.data_size) | |
|
837 | assert fp.tell() == docket.data_size | |
|
838 | written = fp.write(packed) | |
|
839 | if written is not None: # py2 may return None | |
|
840 | assert written == len(packed), (written, len(packed)) | |
|
841 | docket.data_size += len(packed) | |
|
842 | docket.parents = self.parents() | |
|
843 | docket.tree_metadata = meta | |
|
844 | st.write(docket.serialize()) | |
|
845 | st.close() | |
|
846 | else: | |
|
847 | old_docket = self.docket | |
|
848 | new_docket = docketmod.DirstateDocket.with_new_uuid( | |
|
849 | self.parents(), len(packed), meta | |
|
850 | ) | |
|
851 | data_filename = new_docket.data_filename() | |
|
852 | if tr: | |
|
853 | tr.add(data_filename, 0) | |
|
854 | self._opener.write(data_filename, packed) | |
|
855 | # Write the new docket after the new data file has been | |
|
856 | # written. Because `st` was opened with `atomictemp=True`, | |
|
857 | # the actual `.hg/dirstate` file is only affected on close. | |
|
858 | st.write(new_docket.serialize()) | |
|
859 | st.close() | |
|
860 | # Remove the old data file after the new docket pointing to | |
|
861 | # the new data file was written. | |
|
862 | if old_docket.uuid: | |
|
863 | data_filename = old_docket.data_filename() | |
|
864 | unlink = lambda _tr=None: self._opener.unlink(data_filename) | |
|
865 | if tr: | |
|
866 | category = b"dirstate-v2-clean-" + old_docket.uuid | |
|
867 | tr.addpostclose(category, unlink) | |
|
868 | else: | |
|
869 | unlink() | |
|
870 | self._docket = new_docket | |
|
871 | # Reload from the newly-written file | |
|
872 | util.clearcachedproperty(self, b"_rustmap") | |
|
1893 | 873 | self._dirtyparents = False |
|
1894 | 874 | |
|
1895 | 875 | @propertycache |
@@ -1900,22 +880,12 b' if rustmod is not None:' | |||
|
1900 | 880 | return self._rustmap.filefoldmapasdict() |
|
1901 | 881 | |
|
1902 | 882 | def hastrackeddir(self, d): |
|
1903 | self._dirs # Trigger Python's propertycache | |
|
1904 | 883 | return self._rustmap.hastrackeddir(d) |
|
1905 | 884 | |
|
1906 | 885 | def hasdir(self, d): |
|
1907 | self._dirs # Trigger Python's propertycache | |
|
1908 | 886 | return self._rustmap.hasdir(d) |
|
1909 | 887 | |
|
1910 | 888 | @propertycache |
|
1911 | def _dirs(self): | |
|
1912 | return self._rustmap.getdirs() | |
|
1913 | ||
|
1914 | @propertycache | |
|
1915 | def _alldirs(self): | |
|
1916 | return self._rustmap.getalldirs() | |
|
1917 | ||
|
1918 | @propertycache | |
|
1919 | 889 | def identity(self): |
|
1920 | 890 | self._rustmap |
|
1921 | 891 | return self.identity |
@@ -1930,10 +900,23 b' if rustmod is not None:' | |||
|
1930 | 900 | otherparents = self._rustmap.other_parent_entries() |
|
1931 | 901 | return otherparents |
|
1932 | 902 | |
|
903 | def non_normal_or_other_parent_paths(self): | |
|
904 | return self._rustmap.non_normal_or_other_parent_paths() | |
|
905 | ||
|
1933 | 906 | @propertycache |
|
1934 | 907 | def dirfoldmap(self): |
|
1935 | 908 | f = {} |
|
1936 | 909 | normcase = util.normcase |
|
1937 | for name in self._dirs: | |
|
910 | for name in self._rustmap.tracked_dirs(): | |
|
1938 | 911 | f[normcase(name)] = name |
|
1939 | 912 | return f |
|
913 | ||
|
914 | def set_possibly_dirty(self, filename): | |
|
915 | """record that the current state of the file on disk is unknown""" | |
|
916 | entry = self[filename] | |
|
917 | entry.set_possibly_dirty() | |
|
918 | self._rustmap.set_v1(filename, entry) | |
|
919 | ||
|
920 | def __setitem__(self, key, value): | |
|
921 | assert isinstance(value, DirstateItem) | |
|
922 | self._rustmap.set_v1(key, value) |
@@ -12,7 +12,6 b' import functools' | |||
|
12 | 12 | from .i18n import _ |
|
13 | 13 | from .node import ( |
|
14 | 14 | hex, |
|
15 | nullid, | |
|
16 | 15 | short, |
|
17 | 16 | ) |
|
18 | 17 | |
@@ -107,7 +106,7 b' class outgoing(object):' | |||
|
107 | 106 | if missingroots: |
|
108 | 107 | discbases = [] |
|
109 | 108 | for n in missingroots: |
|
110 | discbases.extend([p for p in cl.parents(n) if p != nullid]) | |
|
109 | discbases.extend([p for p in cl.parents(n) if p != repo.nullid]) | |
|
111 | 110 | # TODO remove call to nodesbetween. |
|
112 | 111 | # TODO populate attributes on outgoing instance instead of setting |
|
113 | 112 | # discbases. |
@@ -116,7 +115,7 b' class outgoing(object):' | |||
|
116 | 115 | ancestorsof = heads |
|
117 | 116 | commonheads = [n for n in discbases if n not in included] |
|
118 | 117 | elif not commonheads: |
|
119 | commonheads = [nullid] | |
|
118 | commonheads = [repo.nullid] | |
|
120 | 119 | self.commonheads = commonheads |
|
121 | 120 | self.ancestorsof = ancestorsof |
|
122 | 121 | self._revlog = cl |
@@ -381,7 +380,7 b' def checkheads(pushop):' | |||
|
381 | 380 | # - a local outgoing head descended from update |
|
382 | 381 | # - a remote head that's known locally and not |
|
383 | 382 | # ancestral to an outgoing head |
|
384 | if remoteheads == [nullid]: | |
|
383 | if remoteheads == [repo.nullid]: | |
|
385 | 384 | # remote is empty, nothing to check. |
|
386 | 385 | return |
|
387 | 386 |
@@ -1064,6 +1064,16 b' def _dispatch(req):' | |||
|
1064 | 1064 | if req.earlyoptions[b'profile']: |
|
1065 | 1065 | for ui_ in uis: |
|
1066 | 1066 | ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile') |
|
1067 | elif req.earlyoptions[b'profile'] is False: | |
|
1068 | # Check for it being set already, so that we don't pollute the config | |
|
1069 | # with this when using chg in the very common case that it's not | |
|
1070 | # enabled. | |
|
1071 | if lui.configbool(b'profiling', b'enabled'): | |
|
1072 | # Only do this on lui so that `chg foo` with a user config setting | |
|
1073 | # profiling.enabled=1 still shows profiling information (chg will | |
|
1074 | # specify `--no-profile` when `hg serve` is starting up, we don't | |
|
1075 | # want that to propagate to every later invocation). | |
|
1076 | lui.setconfig(b'profiling', b'enabled', b'false', b'--no-profile') | |
|
1067 | 1077 | |
|
1068 | 1078 | profile = lui.configbool(b'profiling', b'enabled') |
|
1069 | 1079 | with profiling.profile(lui, enabled=profile) as profiler: |
@@ -9,6 +9,7 b' from __future__ import absolute_import, ' | |||
|
9 | 9 | |
|
10 | 10 | import locale |
|
11 | 11 | import os |
|
12 | import re | |
|
12 | 13 | import unicodedata |
|
13 | 14 | |
|
14 | 15 | from .pycompat import getattr |
@@ -284,13 +285,75 b' else:' | |||
|
284 | 285 | |
|
285 | 286 | strmethod = pycompat.identity |
|
286 | 287 | |
|
288 | ||
|
289 | def lower(s): | |
|
290 | # type: (bytes) -> bytes | |
|
291 | """best-effort encoding-aware case-folding of local string s""" | |
|
292 | try: | |
|
293 | return asciilower(s) | |
|
294 | except UnicodeDecodeError: | |
|
295 | pass | |
|
296 | try: | |
|
297 | if isinstance(s, localstr): | |
|
298 | u = s._utf8.decode("utf-8") | |
|
299 | else: | |
|
300 | u = s.decode(_sysstr(encoding), _sysstr(encodingmode)) | |
|
301 | ||
|
302 | lu = u.lower() | |
|
303 | if u == lu: | |
|
304 | return s # preserve localstring | |
|
305 | return lu.encode(_sysstr(encoding)) | |
|
306 | except UnicodeError: | |
|
307 | return s.lower() # we don't know how to fold this except in ASCII | |
|
308 | except LookupError as k: | |
|
309 | raise error.Abort(k, hint=b"please check your locale settings") | |
|
310 | ||
|
311 | ||
|
312 | def upper(s): | |
|
313 | # type: (bytes) -> bytes | |
|
314 | """best-effort encoding-aware case-folding of local string s""" | |
|
315 | try: | |
|
316 | return asciiupper(s) | |
|
317 | except UnicodeDecodeError: | |
|
318 | return upperfallback(s) | |
|
319 | ||
|
320 | ||
|
321 | def upperfallback(s): | |
|
322 | # type: (Any) -> Any | |
|
323 | try: | |
|
324 | if isinstance(s, localstr): | |
|
325 | u = s._utf8.decode("utf-8") | |
|
326 | else: | |
|
327 | u = s.decode(_sysstr(encoding), _sysstr(encodingmode)) | |
|
328 | ||
|
329 | uu = u.upper() | |
|
330 | if u == uu: | |
|
331 | return s # preserve localstring | |
|
332 | return uu.encode(_sysstr(encoding)) | |
|
333 | except UnicodeError: | |
|
334 | return s.upper() # we don't know how to fold this except in ASCII | |
|
335 | except LookupError as k: | |
|
336 | raise error.Abort(k, hint=b"please check your locale settings") | |
|
337 | ||
|
338 | ||
|
287 | 339 | if not _nativeenviron: |
|
288 | 340 | # now encoding and helper functions are available, recreate the environ |
|
289 | 341 | # dict to be exported to other modules |
|
290 | environ = { | |
|
291 | tolocal(k.encode('utf-8')): tolocal(v.encode('utf-8')) | |
|
292 | for k, v in os.environ.items() # re-exports | |
|
293 | } | |
|
342 | if pycompat.iswindows and pycompat.ispy3: | |
|
343 | ||
|
344 | class WindowsEnviron(dict): | |
|
345 | """`os.environ` normalizes environment variables to uppercase on windows""" | |
|
346 | ||
|
347 | def get(self, key, default=None): | |
|
348 | return super().get(upper(key), default) | |
|
349 | ||
|
350 | environ = WindowsEnviron() | |
|
351 | ||
|
352 | for k, v in os.environ.items(): # re-exports | |
|
353 | environ[tolocal(k.encode('utf-8'))] = tolocal(v.encode('utf-8')) | |
|
354 | ||
|
355 | ||
|
356 | DRIVE_RE = re.compile(b'^[a-z]:') | |
|
294 | 357 | |
|
295 | 358 | if pycompat.ispy3: |
|
296 | 359 | # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which |
@@ -303,7 +366,21 b' if pycompat.ispy3:' | |||
|
303 | 366 | # os.path.realpath(), which is used on ``repo.root``. Since those |
|
304 | 367 | # strings are compared in various places as simple strings, also call |
|
305 | 368 | # realpath here. See https://bugs.python.org/issue40368 |
|
306 | getcwd = lambda: strtolocal(os.path.realpath(os.getcwd())) # re-exports | |
|
369 | # | |
|
370 | # However this is not reliable, so lets explicitly make this drive | |
|
371 | # letter upper case. | |
|
372 | # | |
|
373 | # note: we should consider dropping realpath here since it seems to | |
|
374 | # change the semantic of `getcwd`. | |
|
375 | ||
|
376 | def getcwd(): | |
|
377 | cwd = os.getcwd() # re-exports | |
|
378 | cwd = os.path.realpath(cwd) | |
|
379 | cwd = strtolocal(cwd) | |
|
380 | if DRIVE_RE.match(cwd): | |
|
381 | cwd = cwd[0:1].upper() + cwd[1:] | |
|
382 | return cwd | |
|
383 | ||
|
307 | 384 | else: |
|
308 | 385 | getcwd = os.getcwdb # re-exports |
|
309 | 386 | else: |
@@ -441,56 +518,6 b" def trim(s, width, ellipsis=b'', leftsid" | |||
|
441 | 518 | return ellipsis # no enough room for multi-column characters |
|
442 | 519 | |
|
443 | 520 | |
|
444 | def lower(s): | |
|
445 | # type: (bytes) -> bytes | |
|
446 | """best-effort encoding-aware case-folding of local string s""" | |
|
447 | try: | |
|
448 | return asciilower(s) | |
|
449 | except UnicodeDecodeError: | |
|
450 | pass | |
|
451 | try: | |
|
452 | if isinstance(s, localstr): | |
|
453 | u = s._utf8.decode("utf-8") | |
|
454 | else: | |
|
455 | u = s.decode(_sysstr(encoding), _sysstr(encodingmode)) | |
|
456 | ||
|
457 | lu = u.lower() | |
|
458 | if u == lu: | |
|
459 | return s # preserve localstring | |
|
460 | return lu.encode(_sysstr(encoding)) | |
|
461 | except UnicodeError: | |
|
462 | return s.lower() # we don't know how to fold this except in ASCII | |
|
463 | except LookupError as k: | |
|
464 | raise error.Abort(k, hint=b"please check your locale settings") | |
|
465 | ||
|
466 | ||
|
467 | def upper(s): | |
|
468 | # type: (bytes) -> bytes | |
|
469 | """best-effort encoding-aware case-folding of local string s""" | |
|
470 | try: | |
|
471 | return asciiupper(s) | |
|
472 | except UnicodeDecodeError: | |
|
473 | return upperfallback(s) | |
|
474 | ||
|
475 | ||
|
476 | def upperfallback(s): | |
|
477 | # type: (Any) -> Any | |
|
478 | try: | |
|
479 | if isinstance(s, localstr): | |
|
480 | u = s._utf8.decode("utf-8") | |
|
481 | else: | |
|
482 | u = s.decode(_sysstr(encoding), _sysstr(encodingmode)) | |
|
483 | ||
|
484 | uu = u.upper() | |
|
485 | if u == uu: | |
|
486 | return s # preserve localstring | |
|
487 | return uu.encode(_sysstr(encoding)) | |
|
488 | except UnicodeError: | |
|
489 | return s.upper() # we don't know how to fold this except in ASCII | |
|
490 | except LookupError as k: | |
|
491 | raise error.Abort(k, hint=b"please check your locale settings") | |
|
492 | ||
|
493 | ||
|
494 | 521 | class normcasespecs(object): |
|
495 | 522 | """what a platform's normcase does to ASCII strings |
|
496 | 523 |
@@ -51,13 +51,52 b' class Hint(object):' | |||
|
51 | 51 | super(Hint, self).__init__(*args, **kw) |
|
52 | 52 | |
|
53 | 53 | |
|
54 |
class |
|
|
54 | class Error(Hint, Exception): | |
|
55 | """Base class for Mercurial errors.""" | |
|
56 | ||
|
57 | coarse_exit_code = None | |
|
58 | detailed_exit_code = None | |
|
59 | ||
|
60 | def __init__(self, message, hint=None): | |
|
61 | # type: (bytes, Optional[bytes]) -> None | |
|
62 | self.message = message | |
|
63 | self.hint = hint | |
|
64 | # Pass the message into the Exception constructor to help extensions | |
|
65 | # that look for exc.args[0]. | |
|
66 | Exception.__init__(self, message) | |
|
67 | ||
|
68 | def __bytes__(self): | |
|
69 | return self.message | |
|
70 | ||
|
71 | if pycompat.ispy3: | |
|
72 | ||
|
73 | def __str__(self): | |
|
74 | # the output would be unreadable if the message was translated, | |
|
75 | # but do not replace it with encoding.strfromlocal(), which | |
|
76 | # may raise another exception. | |
|
77 | return pycompat.sysstr(self.__bytes__()) | |
|
78 | ||
|
79 | def format(self): | |
|
80 | # type: () -> bytes | |
|
81 | from .i18n import _ | |
|
82 | ||
|
83 | message = _(b"abort: %s\n") % self.message | |
|
84 | if self.hint: | |
|
85 | message += _(b"(%s)\n") % self.hint | |
|
86 | return message | |
|
87 | ||
|
88 | ||
|
89 | class Abort(Error): | |
|
90 | """Raised if a command needs to print an error and exit.""" | |
|
91 | ||
|
92 | ||
|
93 | class StorageError(Error): | |
|
55 | 94 | """Raised when an error occurs in a storage layer. |
|
56 | 95 | |
|
57 | 96 | Usually subclassed by a storage-specific exception. |
|
58 | 97 | """ |
|
59 | 98 | |
|
60 | __bytes__ = _tobytes | |
|
99 | detailed_exit_code = 50 | |
|
61 | 100 | |
|
62 | 101 | |
|
63 | 102 | class RevlogError(StorageError): |
@@ -159,10 +198,20 b' class WorkerError(Exception):' | |||
|
159 | 198 | __bytes__ = _tobytes |
|
160 | 199 | |
|
161 | 200 | |
|
162 |
class InterventionRequired( |
|
|
201 | class InterventionRequired(Abort): | |
|
163 | 202 | """Exception raised when a command requires human intervention.""" |
|
164 | 203 | |
|
165 | __bytes__ = _tobytes | |
|
204 | coarse_exit_code = 1 | |
|
205 | detailed_exit_code = 240 | |
|
206 | ||
|
207 | def format(self): | |
|
208 | # type: () -> bytes | |
|
209 | from .i18n import _ | |
|
210 | ||
|
211 | message = _(b"%s\n") % self.message | |
|
212 | if self.hint: | |
|
213 | message += _(b"(%s)\n") % self.hint | |
|
214 | return message | |
|
166 | 215 | |
|
167 | 216 | |
|
168 | 217 | class ConflictResolutionRequired(InterventionRequired): |
@@ -182,44 +231,14 b' class ConflictResolutionRequired(Interve' | |||
|
182 | 231 | ) |
|
183 | 232 | |
|
184 | 233 | |
|
185 | class Abort(Hint, Exception): | |
|
186 | """Raised if a command needs to print an error and exit.""" | |
|
187 | ||
|
188 | def __init__(self, message, hint=None): | |
|
189 | # type: (bytes, Optional[bytes]) -> None | |
|
190 | self.message = message | |
|
191 | self.hint = hint | |
|
192 | # Pass the message into the Exception constructor to help extensions | |
|
193 | # that look for exc.args[0]. | |
|
194 | Exception.__init__(self, message) | |
|
195 | ||
|
196 | def __bytes__(self): | |
|
197 | return self.message | |
|
198 | ||
|
199 | if pycompat.ispy3: | |
|
200 | ||
|
201 | def __str__(self): | |
|
202 | # the output would be unreadable if the message was translated, | |
|
203 | # but do not replace it with encoding.strfromlocal(), which | |
|
204 | # may raise another exception. | |
|
205 | return pycompat.sysstr(self.__bytes__()) | |
|
206 | ||
|
207 | def format(self): | |
|
208 | # type: () -> bytes | |
|
209 | from .i18n import _ | |
|
210 | ||
|
211 | message = _(b"abort: %s\n") % self.message | |
|
212 | if self.hint: | |
|
213 | message += _(b"(%s)\n") % self.hint | |
|
214 | return message | |
|
215 | ||
|
216 | ||
|
217 | 234 | class InputError(Abort): |
|
218 | 235 | """Indicates that the user made an error in their input. |
|
219 | 236 | |
|
220 | 237 | Examples: Invalid command, invalid flags, invalid revision. |
|
221 | 238 | """ |
|
222 | 239 | |
|
240 | detailed_exit_code = 10 | |
|
241 | ||
|
223 | 242 | |
|
224 | 243 | class StateError(Abort): |
|
225 | 244 | """Indicates that the operation might work if retried in a different state. |
@@ -227,6 +246,8 b' class StateError(Abort):' | |||
|
227 | 246 | Examples: Unresolved merge conflicts, unfinished operations. |
|
228 | 247 | """ |
|
229 | 248 | |
|
249 | detailed_exit_code = 20 | |
|
250 | ||
|
230 | 251 | |
|
231 | 252 | class CanceledError(Abort): |
|
232 | 253 | """Indicates that the user canceled the operation. |
@@ -234,6 +255,8 b' class CanceledError(Abort):' | |||
|
234 | 255 | Examples: Close commit editor with error status, quit chistedit. |
|
235 | 256 | """ |
|
236 | 257 | |
|
258 | detailed_exit_code = 250 | |
|
259 | ||
|
237 | 260 | |
|
238 | 261 | class SecurityError(Abort): |
|
239 | 262 | """Indicates that some aspect of security failed. |
@@ -242,6 +265,8 b' class SecurityError(Abort):' | |||
|
242 | 265 | filesystem, mismatched GPG signature, DoS protection. |
|
243 | 266 | """ |
|
244 | 267 | |
|
268 | detailed_exit_code = 150 | |
|
269 | ||
|
245 | 270 | |
|
246 | 271 | class HookLoadError(Abort): |
|
247 | 272 | """raised when loading a hook fails, aborting an operation |
@@ -254,10 +279,14 b' class HookAbort(Abort):' | |||
|
254 | 279 | |
|
255 | 280 | Exists to allow more specialized catching.""" |
|
256 | 281 | |
|
282 | detailed_exit_code = 40 | |
|
283 | ||
|
257 | 284 | |
|
258 | 285 | class ConfigError(Abort): |
|
259 | 286 | """Exception raised when parsing config files""" |
|
260 | 287 | |
|
288 | detailed_exit_code = 30 | |
|
289 | ||
|
261 | 290 | def __init__(self, message, location=None, hint=None): |
|
262 | 291 | # type: (bytes, Optional[bytes], Optional[bytes]) -> None |
|
263 | 292 | super(ConfigError, self).__init__(message, hint=hint) |
@@ -307,6 +336,8 b' class ResponseExpected(Abort):' | |||
|
307 | 336 | class RemoteError(Abort): |
|
308 | 337 | """Exception raised when interacting with a remote repo fails""" |
|
309 | 338 | |
|
339 | detailed_exit_code = 100 | |
|
340 | ||
|
310 | 341 | |
|
311 | 342 | class OutOfBandError(RemoteError): |
|
312 | 343 | """Exception raised when a remote repo reports failure""" |
@@ -325,6 +356,8 b' class OutOfBandError(RemoteError):' | |||
|
325 | 356 | class ParseError(Abort): |
|
326 | 357 | """Raised when parsing config files and {rev,file}sets (msg[, pos])""" |
|
327 | 358 | |
|
359 | detailed_exit_code = 10 | |
|
360 | ||
|
328 | 361 | def __init__(self, message, location=None, hint=None): |
|
329 | 362 | # type: (bytes, Optional[Union[bytes, int]], Optional[bytes]) -> None |
|
330 | 363 | super(ParseError, self).__init__(message, hint=hint) |
@@ -13,7 +13,6 b' import weakref' | |||
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .node import ( |
|
15 | 15 | hex, |
|
16 | nullid, | |
|
17 | 16 | nullrev, |
|
18 | 17 | ) |
|
19 | 18 | from . import ( |
@@ -44,6 +43,7 b' from .utils import (' | |||
|
44 | 43 | stringutil, |
|
45 | 44 | urlutil, |
|
46 | 45 | ) |
|
46 | from .interfaces import repository | |
|
47 | 47 | |
|
48 | 48 | urlerr = util.urlerr |
|
49 | 49 | urlreq = util.urlreq |
@@ -164,7 +164,7 b' def _computeoutgoing(repo, heads, common' | |||
|
164 | 164 | hasnode = cl.hasnode |
|
165 | 165 | common = [n for n in common if hasnode(n)] |
|
166 | 166 | else: |
|
167 | common = [nullid] | |
|
167 | common = [repo.nullid] | |
|
168 | 168 | if not heads: |
|
169 | 169 | heads = cl.heads() |
|
170 | 170 | return discovery.outgoing(repo, common, heads) |
@@ -184,6 +184,10 b' def _checkpublish(pushop):' | |||
|
184 | 184 | published = repo.filtered(b'served').revs(b'not public()') |
|
185 | 185 | else: |
|
186 | 186 | published = repo.revs(b'::%ln - public()', pushop.revs) |
|
187 | # we want to use pushop.revs in the revset even if they themselves are | |
|
188 | # secret, but we don't want to have anything that the server won't see | |
|
189 | # in the result of this expression | |
|
190 | published &= repo.filtered(b'served') | |
|
187 | 191 | if published: |
|
188 | 192 | if behavior == b'warn': |
|
189 | 193 | ui.warn( |
@@ -894,7 +898,7 b' def _pushb2ctx(pushop, bundler):' | |||
|
894 | 898 | cgpart.addparam(b'version', version) |
|
895 | 899 | if scmutil.istreemanifest(pushop.repo): |
|
896 | 900 | cgpart.addparam(b'treemanifest', b'1') |
|
897 | if b'exp-sidedata-flag' in pushop.repo.requirements: | |
|
901 | if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features: | |
|
898 | 902 | cgpart.addparam(b'exp-sidedata', b'1') |
|
899 | 903 | |
|
900 | 904 | def handlereply(op): |
@@ -1839,7 +1843,7 b' def _pullbundle2(pullop):' | |||
|
1839 | 1843 | if ( |
|
1840 | 1844 | pullop.remote.capable(b'clonebundles') |
|
1841 | 1845 | and pullop.heads is None |
|
1842 | and list(pullop.common) == [nullid] | |
|
1846 | and list(pullop.common) == [pullop.repo.nullid] | |
|
1843 | 1847 | ): |
|
1844 | 1848 | kwargs[b'cbattempted'] = pullop.clonebundleattempted |
|
1845 | 1849 | |
@@ -1849,7 +1853,7 b' def _pullbundle2(pullop):' | |||
|
1849 | 1853 | pullop.repo.ui.status(_(b"no changes found\n")) |
|
1850 | 1854 | pullop.cgresult = 0 |
|
1851 | 1855 | else: |
|
1852 | if pullop.heads is None and list(pullop.common) == [nullid]: | |
|
1856 | if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]: | |
|
1853 | 1857 | pullop.repo.ui.status(_(b"requesting all changes\n")) |
|
1854 | 1858 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
1855 | 1859 | remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps) |
@@ -1920,7 +1924,7 b' def _pullchangeset(pullop):' | |||
|
1920 | 1924 | pullop.cgresult = 0 |
|
1921 | 1925 | return |
|
1922 | 1926 | tr = pullop.gettransaction() |
|
1923 | if pullop.heads is None and list(pullop.common) == [nullid]: | |
|
1927 | if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]: | |
|
1924 | 1928 | pullop.repo.ui.status(_(b"requesting all changes\n")) |
|
1925 | 1929 | elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'): |
|
1926 | 1930 | # issue1320, avoid a race if remote changed after discovery |
@@ -2428,7 +2432,7 b' def _getbundlechangegrouppart(' | |||
|
2428 | 2432 | if scmutil.istreemanifest(repo): |
|
2429 | 2433 | part.addparam(b'treemanifest', b'1') |
|
2430 | 2434 | |
|
2431 | if b'exp-sidedata-flag' in repo.requirements: | |
|
2435 | if repository.REPO_FEATURE_SIDE_DATA in repo.features: | |
|
2432 | 2436 | part.addparam(b'exp-sidedata', b'1') |
|
2433 | 2437 | sidedata = bundle2.format_remote_wanted_sidedata(repo) |
|
2434 | 2438 | part.addparam(b'exp-wanted-sidedata', sidedata) |
@@ -11,10 +11,7 b' import collections' | |||
|
11 | 11 | import weakref |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 |
from .node import |
|
|
15 | nullid, | |
|
16 | short, | |
|
17 | ) | |
|
14 | from .node import short | |
|
18 | 15 | from . import ( |
|
19 | 16 | bookmarks, |
|
20 | 17 | error, |
@@ -304,7 +301,7 b' def _pullchangesetdiscovery(repo, remote' | |||
|
304 | 301 | if set(remoteheads).issubset(common): |
|
305 | 302 | fetch = [] |
|
306 | 303 | |
|
307 | common.discard(nullid) | |
|
304 | common.discard(repo.nullid) | |
|
308 | 305 | |
|
309 | 306 | return common, fetch, remoteheads |
|
310 | 307 | |
@@ -413,7 +410,7 b' def _processchangesetdata(repo, tr, objs' | |||
|
413 | 410 | # Linknode is always itself for changesets. |
|
414 | 411 | cset[b'node'], |
|
415 | 412 | # We always send full revisions. So delta base is not set. |
|
416 | nullid, | |
|
413 | repo.nullid, | |
|
417 | 414 | mdiff.trivialdiffheader(len(data)) + data, |
|
418 | 415 | # Flags not yet supported. |
|
419 | 416 | 0, |
@@ -478,7 +475,7 b' def _fetchmanifests(repo, tr, remote, ma' | |||
|
478 | 475 | basenode = manifest[b'deltabasenode'] |
|
479 | 476 | delta = extrafields[b'delta'] |
|
480 | 477 | elif b'revision' in extrafields: |
|
481 | basenode = nullid | |
|
478 | basenode = repo.nullid | |
|
482 | 479 | revision = extrafields[b'revision'] |
|
483 | 480 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
484 | 481 | else: |
@@ -610,7 +607,7 b' def _fetchfiles(repo, tr, remote, fnodes' | |||
|
610 | 607 | basenode = filerevision[b'deltabasenode'] |
|
611 | 608 | delta = extrafields[b'delta'] |
|
612 | 609 | elif b'revision' in extrafields: |
|
613 | basenode = nullid | |
|
610 | basenode = repo.nullid | |
|
614 | 611 | revision = extrafields[b'revision'] |
|
615 | 612 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
616 | 613 | else: |
@@ -705,7 +702,7 b' def _fetchfilesfromcsets(' | |||
|
705 | 702 | basenode = filerevision[b'deltabasenode'] |
|
706 | 703 | delta = extrafields[b'delta'] |
|
707 | 704 | elif b'revision' in extrafields: |
|
708 | basenode = nullid | |
|
705 | basenode = repo.nullid | |
|
709 | 706 | revision = extrafields[b'revision'] |
|
710 | 707 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
711 | 708 | else: |
@@ -48,7 +48,7 b' int _tmain(int argc, TCHAR *argv[])' | |||
|
48 | 48 | int(__cdecl * Py_Main)(int argc, TCHAR *argv[]); |
|
49 | 49 | |
|
50 | 50 | #if PY_MAJOR_VERSION >= 3 |
|
51 | Py_LegacyWindowsStdioFlag = 1; | |
|
51 | _wputenv(L"PYTHONLEGACYWINDOWSSTDIO=1"); | |
|
52 | 52 | #endif |
|
53 | 53 | |
|
54 | 54 | if (GetModuleFileName(NULL, pyscript, _countof(pyscript)) == 0) { |
@@ -713,7 +713,7 b' def _disabledpaths():' | |||
|
713 | 713 | # it might not be on a filesystem even if it does. |
|
714 | 714 | if util.safehasattr(hgext, '__file__'): |
|
715 | 715 | extpath = os.path.dirname( |
|
716 |
|
|
|
716 | util.abspath(pycompat.fsencode(hgext.__file__)) | |
|
717 | 717 | ) |
|
718 | 718 | try: |
|
719 | 719 | files = os.listdir(extpath) |
@@ -8,10 +8,7 b'' | |||
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | from .i18n import _ |
|
11 |
from .node import |
|
|
12 | nullid, | |
|
13 | nullrev, | |
|
14 | ) | |
|
11 | from .node import nullrev | |
|
15 | 12 | from . import ( |
|
16 | 13 | error, |
|
17 | 14 | revlog, |
@@ -21,18 +18,24 b' from .interfaces import (' | |||
|
21 | 18 | util as interfaceutil, |
|
22 | 19 | ) |
|
23 | 20 | from .utils import storageutil |
|
21 | from .revlogutils import ( | |
|
22 | constants as revlog_constants, | |
|
23 | ) | |
|
24 | 24 | |
|
25 | 25 | |
|
26 | 26 | @interfaceutil.implementer(repository.ifilestorage) |
|
27 | 27 | class filelog(object): |
|
28 | 28 | def __init__(self, opener, path): |
|
29 | 29 | self._revlog = revlog.revlog( |
|
30 | opener, b'/'.join((b'data', path + b'.i')), censorable=True | |
|
30 | opener, | |
|
31 | # XXX should use the unencoded path | |
|
32 | target=(revlog_constants.KIND_FILELOG, path), | |
|
33 | radix=b'/'.join((b'data', path)), | |
|
34 | censorable=True, | |
|
31 | 35 | ) |
|
32 | 36 | # Full name of the user visible file, relative to the repository root. |
|
33 | 37 | # Used by LFS. |
|
34 | 38 | self._revlog.filename = path |
|
35 | self._revlog.revlog_kind = b'filelog' | |
|
36 | 39 | self.nullid = self._revlog.nullid |
|
37 | 40 | |
|
38 | 41 | def __len__(self): |
@@ -42,7 +45,7 b' class filelog(object):' | |||
|
42 | 45 | return self._revlog.__iter__() |
|
43 | 46 | |
|
44 | 47 | def hasnode(self, node): |
|
45 | if node in (nullid, nullrev): | |
|
48 | if node in (self.nullid, nullrev): | |
|
46 | 49 | return False |
|
47 | 50 | |
|
48 | 51 | try: |
@@ -68,7 +71,7 b' class filelog(object):' | |||
|
68 | 71 | |
|
69 | 72 | def lookup(self, node): |
|
70 | 73 | return storageutil.fileidlookup( |
|
71 |
self._revlog, node, self._revlog. |
|
|
74 | self._revlog, node, self._revlog.display_id | |
|
72 | 75 | ) |
|
73 | 76 | |
|
74 | 77 | def linkrev(self, rev): |
@@ -225,18 +228,6 b' class filelog(object):' | |||
|
225 | 228 | storedsize=storedsize, |
|
226 | 229 | ) |
|
227 | 230 | |
|
228 | # TODO these aren't part of the interface and aren't internal methods. | |
|
229 | # Callers should be fixed to not use them. | |
|
230 | ||
|
231 | # Used by bundlefilelog, unionfilelog. | |
|
232 | @property | |
|
233 | def indexfile(self): | |
|
234 | return self._revlog.indexfile | |
|
235 | ||
|
236 | @indexfile.setter | |
|
237 | def indexfile(self, value): | |
|
238 | self._revlog.indexfile = value | |
|
239 | ||
|
240 | 231 | # Used by repo upgrade. |
|
241 | 232 | def clone(self, tr, destrevlog, **kwargs): |
|
242 | 233 | if not isinstance(destrevlog, filelog): |
@@ -15,7 +15,6 b' import shutil' | |||
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .node import ( |
|
17 | 17 | hex, |
|
18 | nullid, | |
|
19 | 18 | short, |
|
20 | 19 | ) |
|
21 | 20 | from .pycompat import ( |
@@ -111,7 +110,7 b' class absentfilectx(object):' | |||
|
111 | 110 | return None |
|
112 | 111 | |
|
113 | 112 | def filenode(self): |
|
114 | return nullid | |
|
113 | return self._ctx.repo().nullid | |
|
115 | 114 | |
|
116 | 115 | _customcmp = True |
|
117 | 116 |
@@ -540,6 +540,12 b' helptable = sorted(' | |||
|
540 | 540 | TOPIC_CATEGORY_CONCEPTS, |
|
541 | 541 | ), |
|
542 | 542 | ( |
|
543 | [b"evolution"], | |
|
544 | _(b"Safely rewriting history (EXPERIMENTAL)"), | |
|
545 | loaddoc(b'evolution'), | |
|
546 | TOPIC_CATEGORY_CONCEPTS, | |
|
547 | ), | |
|
548 | ( | |
|
543 | 549 | [b'scripting'], |
|
544 | 550 | _(b'Using Mercurial from scripts and automation'), |
|
545 | 551 | loaddoc(b'scripting'), |
@@ -5,7 +5,7 b' Troubleshooting' | |||
|
5 | 5 | =============== |
|
6 | 6 | |
|
7 | 7 | If you're having problems with your configuration, |
|
8 |
:hg:`config -- |
|
|
8 | :hg:`config --source` can help you understand what is introducing | |
|
9 | 9 | a setting into your environment. |
|
10 | 10 | |
|
11 | 11 | See :hg:`help config.syntax` and :hg:`help config.files` |
@@ -1718,6 +1718,12 b' the path they point to.' | |||
|
1718 | 1718 | |
|
1719 | 1719 | The following sub-options can be defined: |
|
1720 | 1720 | |
|
1721 | ``multi-urls`` | |
|
1722 | A boolean option. When enabled the value of the `[paths]` entry will be | |
|
1723 | parsed as a list and the alias will resolve to multiple destination. If some | |
|
1724 | of the list entry use the `path://` syntax, the suboption will be inherited | |
|
1725 | individually. | |
|
1726 | ||
|
1721 | 1727 | ``pushurl`` |
|
1722 | 1728 | The URL to use for push operations. If not defined, the location |
|
1723 | 1729 | defined by the path's main entry is used. |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: file was removed | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now