##// END OF EJS Templates
merge with stable
Augie Fackler -
r40855:cb372d09 merge default
parent child Browse files
Show More
@@ -1,174 +1,175 b''
1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
52 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 0 iD8DBQBPT/fvywK+sNU5EO8RAnfYAKCn7d0vwqIb100YfWm1F7nFD5B+FACeM02YHpQLSNsztrBCObtqcnfod7Q=
52 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 0 iD8DBQBPT/fvywK+sNU5EO8RAnfYAKCn7d0vwqIb100YfWm1F7nFD5B+FACeM02YHpQLSNsztrBCObtqcnfod7Q=
53 b9bd95e61b49c221c4cca24e6da7c946fc02f992 0 iD8DBQBPeLsIywK+sNU5EO8RAvpNAKCtKe2gitz8dYn52IRF0hFOPCR7AQCfRJL/RWCFweu2T1vH/mUOCf8SXXc=
53 b9bd95e61b49c221c4cca24e6da7c946fc02f992 0 iD8DBQBPeLsIywK+sNU5EO8RAvpNAKCtKe2gitz8dYn52IRF0hFOPCR7AQCfRJL/RWCFweu2T1vH/mUOCf8SXXc=
54 d9e2f09d5488c395ae9ddbb320ceacd24757e055 0 iD8DBQBPju/dywK+sNU5EO8RArBYAJ9xtifdbk+hCOJO8OZa4JfHX8OYZQCeKPMBaBWiT8N/WHoOm1XU0q+iono=
54 d9e2f09d5488c395ae9ddbb320ceacd24757e055 0 iD8DBQBPju/dywK+sNU5EO8RArBYAJ9xtifdbk+hCOJO8OZa4JfHX8OYZQCeKPMBaBWiT8N/WHoOm1XU0q+iono=
55 00182b3d087909e3c3ae44761efecdde8f319ef3 0 iD8DBQBPoFhIywK+sNU5EO8RAhzhAKCBj1n2jxPTkZNJJ5pSp3soa+XHIgCgsZZpAQxOpXwCp0eCdNGe0+pmxmg=
55 00182b3d087909e3c3ae44761efecdde8f319ef3 0 iD8DBQBPoFhIywK+sNU5EO8RAhzhAKCBj1n2jxPTkZNJJ5pSp3soa+XHIgCgsZZpAQxOpXwCp0eCdNGe0+pmxmg=
56 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 0 iD8DBQBPovNWywK+sNU5EO8RAhgiAJ980T91FdPTRMmVONDhpkMsZwVIMACgg3bKvoWSeuCW28llUhAJtUjrMv0=
56 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 0 iD8DBQBPovNWywK+sNU5EO8RAhgiAJ980T91FdPTRMmVONDhpkMsZwVIMACgg3bKvoWSeuCW28llUhAJtUjrMv0=
57 85a358df5bbbe404ca25730c9c459b34263441dc 0 iD8DBQBPyZsWywK+sNU5EO8RAnpLAJ48qrGDJRT+pteS0mSQ11haqHstPwCdG4ccGbk+0JHb7aNy8/NRGAOqn9w=
57 85a358df5bbbe404ca25730c9c459b34263441dc 0 iD8DBQBPyZsWywK+sNU5EO8RAnpLAJ48qrGDJRT+pteS0mSQ11haqHstPwCdG4ccGbk+0JHb7aNy8/NRGAOqn9w=
58 b013baa3898e117959984fc64c29d8c784d2f28b 0 iD8DBQBP8QOPywK+sNU5EO8RAqimAKCFRSx0lvG6y8vne2IhNG062Hn0dACeMLI5/zhpWpHBIVeAAquYfx2XFeA=
58 b013baa3898e117959984fc64c29d8c784d2f28b 0 iD8DBQBP8QOPywK+sNU5EO8RAqimAKCFRSx0lvG6y8vne2IhNG062Hn0dACeMLI5/zhpWpHBIVeAAquYfx2XFeA=
59 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 0 iD8DBQBQGiL8ywK+sNU5EO8RAq5oAJ4rMMCPx6O+OuzNXVOexogedWz/QgCeIiIxLd76I4pXO48tdXhr0hQcBuM=
59 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 0 iD8DBQBQGiL8ywK+sNU5EO8RAq5oAJ4rMMCPx6O+OuzNXVOexogedWz/QgCeIiIxLd76I4pXO48tdXhr0hQcBuM=
60 072209ae4ddb654eb2d5fd35bff358c738414432 0 iD8DBQBQQkq0ywK+sNU5EO8RArDTAJ9nk5CySnNAjAXYvqvx4uWCw9ThZwCgqmFRehH/l+oTwj3f8nw8u8qTCdc=
60 072209ae4ddb654eb2d5fd35bff358c738414432 0 iD8DBQBQQkq0ywK+sNU5EO8RArDTAJ9nk5CySnNAjAXYvqvx4uWCw9ThZwCgqmFRehH/l+oTwj3f8nw8u8qTCdc=
61 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 0 iD8DBQBQamltywK+sNU5EO8RAlsqAJ4qF/m6aFu4mJCOKTiAP5RvZFK02ACfawYShUZO6OXEFfveU0aAxDR0M1k=
61 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 0 iD8DBQBQamltywK+sNU5EO8RAlsqAJ4qF/m6aFu4mJCOKTiAP5RvZFK02ACfawYShUZO6OXEFfveU0aAxDR0M1k=
62 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 0 iD8DBQBQgPV5ywK+sNU5EO8RArylAJ0abcx5NlDjyv3ZDWpAfRIHyRsJtQCgn4TMuEayqgxzrvadQZHdTEU2g38=
62 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 0 iD8DBQBQgPV5ywK+sNU5EO8RArylAJ0abcx5NlDjyv3ZDWpAfRIHyRsJtQCgn4TMuEayqgxzrvadQZHdTEU2g38=
63 195ad823b5d58c68903a6153a25e3fb4ed25239d 0 iD8DBQBQkuT9ywK+sNU5EO8RAhB4AKCeerItoK2Jipm2cVf4euGofAa/WACeJj3TVd4pFILpb+ogj7ebweFLJi0=
63 195ad823b5d58c68903a6153a25e3fb4ed25239d 0 iD8DBQBQkuT9ywK+sNU5EO8RAhB4AKCeerItoK2Jipm2cVf4euGofAa/WACeJj3TVd4pFILpb+ogj7ebweFLJi0=
64 0c10cf8191469e7c3c8844922e17e71a176cb7cb 0 iD8DBQBQvQWoywK+sNU5EO8RAnq3AJoCn98u4geFx5YaQaeh99gFhCd7bQCgjoBwBSUyOvGd0yBy60E3Vv3VZhM=
64 0c10cf8191469e7c3c8844922e17e71a176cb7cb 0 iD8DBQBQvQWoywK+sNU5EO8RAnq3AJoCn98u4geFx5YaQaeh99gFhCd7bQCgjoBwBSUyOvGd0yBy60E3Vv3VZhM=
65 a4765077b65e6ae29ba42bab7834717b5072d5ba 0 iD8DBQBQ486sywK+sNU5EO8RAhmJAJ90aLfLKZhmcZN7kqphigQJxiFOQACeJ5IUZxjGKH4xzi3MrgIcx9n+dB0=
65 a4765077b65e6ae29ba42bab7834717b5072d5ba 0 iD8DBQBQ486sywK+sNU5EO8RAhmJAJ90aLfLKZhmcZN7kqphigQJxiFOQACeJ5IUZxjGKH4xzi3MrgIcx9n+dB0=
66 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 0 iD8DBQBQ+yuYywK+sNU5EO8RAm9JAJoD/UciWvpGeKBcpGtZJBFJVcL/HACghDXSgQ+xQDjB+6uGrdgAQsRR1Lg=
66 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 0 iD8DBQBQ+yuYywK+sNU5EO8RAm9JAJoD/UciWvpGeKBcpGtZJBFJVcL/HACghDXSgQ+xQDjB+6uGrdgAQsRR1Lg=
67 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 0 iD8DBQBRDDROywK+sNU5EO8RAh75AJ9uJCGoCWnP0Lv/+XuYs4hvUl+sAgCcD36QgAnuw8IQXrvv684BAXAnHcA=
67 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 0 iD8DBQBRDDROywK+sNU5EO8RAh75AJ9uJCGoCWnP0Lv/+XuYs4hvUl+sAgCcD36QgAnuw8IQXrvv684BAXAnHcA=
68 7511d4df752e61fe7ae4f3682e0a0008573b0402 0 iD8DBQBRFYaoywK+sNU5EO8RAuErAJoDyhXn+lptU3+AevVdwAIeNFyR2gCdHzPHyWd+JDeWCUR+pSOBi8O2ppM=
68 7511d4df752e61fe7ae4f3682e0a0008573b0402 0 iD8DBQBRFYaoywK+sNU5EO8RAuErAJoDyhXn+lptU3+AevVdwAIeNFyR2gCdHzPHyWd+JDeWCUR+pSOBi8O2ppM=
69 5b7175377babacce80a6c1e12366d8032a6d4340 0 iD8DBQBRMCYgywK+sNU5EO8RAq1/AKCWKlt9ysibyQgYwoxxIOZv5J8rpwCcDSHQaaf1fFZUTnQsOePwcM2Y/Sg=
69 5b7175377babacce80a6c1e12366d8032a6d4340 0 iD8DBQBRMCYgywK+sNU5EO8RAq1/AKCWKlt9ysibyQgYwoxxIOZv5J8rpwCcDSHQaaf1fFZUTnQsOePwcM2Y/Sg=
70 50c922c1b5145dab8baefefb0437d363b6a6c21c 0 iD8DBQBRWnUnywK+sNU5EO8RAuQRAJwM42cJqJPeqJ0jVNdMqKMDqr4dSACeP0cRVGz1gitMuV0x8f3mrZrqc7I=
70 50c922c1b5145dab8baefefb0437d363b6a6c21c 0 iD8DBQBRWnUnywK+sNU5EO8RAuQRAJwM42cJqJPeqJ0jVNdMqKMDqr4dSACeP0cRVGz1gitMuV0x8f3mrZrqc7I=
71 8a7bd2dccd44ed571afe7424cd7f95594f27c092 0 iD8DBQBRXfBvywK+sNU5EO8RAn+LAKCsMmflbuXjYRxlzFwId5ptm8TZcwCdGkyLbZcASBOkzQUm/WW1qfknJHU=
71 8a7bd2dccd44ed571afe7424cd7f95594f27c092 0 iD8DBQBRXfBvywK+sNU5EO8RAn+LAKCsMmflbuXjYRxlzFwId5ptm8TZcwCdGkyLbZcASBOkzQUm/WW1qfknJHU=
72 292cd385856d98bacb2c3086f8897bc660c2beea 0 iD8DBQBRcM0BywK+sNU5EO8RAjp4AKCJBykQbvXhKuvLSMxKx3a2TBiXcACfbr/kLg5GlZTF/XDPmY+PyHgI/GM=
72 292cd385856d98bacb2c3086f8897bc660c2beea 0 iD8DBQBRcM0BywK+sNU5EO8RAjp4AKCJBykQbvXhKuvLSMxKx3a2TBiXcACfbr/kLg5GlZTF/XDPmY+PyHgI/GM=
73 23f785b38af38d2fca6b8f3db56b8007a84cd73a 0 iD8DBQBRgZwNywK+sNU5EO8RAmO4AJ4u2ILGuimRP6MJgE2t65LZ5dAdkACgiENEstIdrlFC80p+sWKD81kKIYI=
73 23f785b38af38d2fca6b8f3db56b8007a84cd73a 0 iD8DBQBRgZwNywK+sNU5EO8RAmO4AJ4u2ILGuimRP6MJgE2t65LZ5dAdkACgiENEstIdrlFC80p+sWKD81kKIYI=
74 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 0 iD8DBQBRkswvywK+sNU5EO8RAiYYAJsHTHyHbJeAgmGvBTmDrfcKu4doUgCeLm7eGBjx7yAPUvEtxef8rAkQmXI=
74 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 0 iD8DBQBRkswvywK+sNU5EO8RAiYYAJsHTHyHbJeAgmGvBTmDrfcKu4doUgCeLm7eGBjx7yAPUvEtxef8rAkQmXI=
75 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 0 iD8DBQBRqnFLywK+sNU5EO8RAsWNAJ9RR6t+y1DLFc2HeH0eN9VfZAKF9gCeJ8ezvhtKq/LMs0/nvcgKQc/d5jk=
75 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 0 iD8DBQBRqnFLywK+sNU5EO8RAsWNAJ9RR6t+y1DLFc2HeH0eN9VfZAKF9gCeJ8ezvhtKq/LMs0/nvcgKQc/d5jk=
76 009794acc6e37a650f0fae37872e733382ac1c0c 0 iD8DBQBR0guxywK+sNU5EO8RArNkAKCq9pMihVzP8Os5kCmgbWpe5C37wgCgqzuPZTHvAsXF5wTyaSTMVa9Ccq4=
76 009794acc6e37a650f0fae37872e733382ac1c0c 0 iD8DBQBR0guxywK+sNU5EO8RArNkAKCq9pMihVzP8Os5kCmgbWpe5C37wgCgqzuPZTHvAsXF5wTyaSTMVa9Ccq4=
77 f0d7721d7322dcfb5af33599c2543f27335334bb 0 iD8DBQBR8taaywK+sNU5EO8RAqeEAJ4idDhhDuEsgsUjeQgWNj498matHACfT67gSF5w0ylsrBx1Hb52HkGXDm0=
77 f0d7721d7322dcfb5af33599c2543f27335334bb 0 iD8DBQBR8taaywK+sNU5EO8RAqeEAJ4idDhhDuEsgsUjeQgWNj498matHACfT67gSF5w0ylsrBx1Hb52HkGXDm0=
78 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 0 iD8DBQBR+ymFywK+sNU5EO8RAuSdAJkBMcd9DAZ3rWE9WGKPm2YZ8LBoXACfXn/wbEsVy7ZgJoUwiWmHSnQaWCI=
78 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 0 iD8DBQBR+ymFywK+sNU5EO8RAuSdAJkBMcd9DAZ3rWE9WGKPm2YZ8LBoXACfXn/wbEsVy7ZgJoUwiWmHSnQaWCI=
79 335a558f81dc73afeab4d7be63617392b130117f 0 iQIVAwUAUiZrIyBXgaxoKi1yAQK2iw//cquNqqSkc8Re5/TZT9I6NH+lh6DbOKjJP0Xl1Wqq0K+KSIUgZG4G32ovaEb2l5X0uY+3unRPiZ0ebl0YSw4Fb2ZiPIADXLBTOYRrY2Wwd3tpJeGI6wEgZt3SfcITV/g7NJrCjT3FlYoSOIayrExM80InSdcEM0Q3Rx6HKzY2acyxzgZeAtAW5ohFvHilSvY6p5Gcm4+QptMxvw45GPdreUmjeXZxNXNXZ8P+MjMz/QJbai/N7PjmK8lqnhkBsT48Ng/KhhmOkGntNJ2/ImBWLFGcWngSvJ7sfWwnyhndvGhe0Hq1NcCf7I8TjNDxU5TR+m+uW7xjXdLoDbUjBdX4sKXnh8ZjbYiODKBOrrDq25cf8nA/tnpKyE/qsVy60kOk6loY4XKiYmn1V49Ta0emmDx0hqo3HgxHHsHX0NDnGdWGol7cPRET0RzVobKq1A0jnrhPooWidvLh9bPzLonrWDo+ib+DuySoRkuYUK4pgZJ2mbg6daFOBEZygkSyRB8bo1UQUP7EgQDrWe4khb/5GHEfDkrQz3qu/sXvc0Ir1mOUWBFPHC2DjjCn/oMJuUkG1SwM8l2Bfv7h67ssES6YQ2+RjOix4yid7EXS/Ogl45PzCIPSI5+BbNs10JhE0w5uErBHlF53EDTe/TSLc+GU6DB6PP6dH912Njdr3jpNSUQ=
79 335a558f81dc73afeab4d7be63617392b130117f 0 iQIVAwUAUiZrIyBXgaxoKi1yAQK2iw//cquNqqSkc8Re5/TZT9I6NH+lh6DbOKjJP0Xl1Wqq0K+KSIUgZG4G32ovaEb2l5X0uY+3unRPiZ0ebl0YSw4Fb2ZiPIADXLBTOYRrY2Wwd3tpJeGI6wEgZt3SfcITV/g7NJrCjT3FlYoSOIayrExM80InSdcEM0Q3Rx6HKzY2acyxzgZeAtAW5ohFvHilSvY6p5Gcm4+QptMxvw45GPdreUmjeXZxNXNXZ8P+MjMz/QJbai/N7PjmK8lqnhkBsT48Ng/KhhmOkGntNJ2/ImBWLFGcWngSvJ7sfWwnyhndvGhe0Hq1NcCf7I8TjNDxU5TR+m+uW7xjXdLoDbUjBdX4sKXnh8ZjbYiODKBOrrDq25cf8nA/tnpKyE/qsVy60kOk6loY4XKiYmn1V49Ta0emmDx0hqo3HgxHHsHX0NDnGdWGol7cPRET0RzVobKq1A0jnrhPooWidvLh9bPzLonrWDo+ib+DuySoRkuYUK4pgZJ2mbg6daFOBEZygkSyRB8bo1UQUP7EgQDrWe4khb/5GHEfDkrQz3qu/sXvc0Ir1mOUWBFPHC2DjjCn/oMJuUkG1SwM8l2Bfv7h67ssES6YQ2+RjOix4yid7EXS/Ogl45PzCIPSI5+BbNs10JhE0w5uErBHlF53EDTe/TSLc+GU6DB6PP6dH912Njdr3jpNSUQ=
80 e7fa36d2ad3a7944a52dca126458d6f482db3524 0 iQIVAwUAUktg4yBXgaxoKi1yAQLO0g//du/2ypYYUfmM/yZ4zztNKIvgMSGTDVbCCGB2y2/wk2EcolpjpGTkcgnJT413ksYtw78ZU+mvv0RjgrFCm8DQ8kroJaQZ2qHmtSUb42hPBPvtg6kL9YaA4yvp87uUBpFRavGS5uX4hhEIyvZKzhXUBvqtL3TfwR7ld21bj8j00wudqELyyU9IrojIY9jkJ3XL/4shBGgP7u6OK5g8yJ6zTnWgysUetxHBPrYjG25lziiiZQFvZqK1B3PUqAOaFPltQs0PB8ipOCAHQgJsjaREj8VmC3+rskmSSy66NHm6gAB9+E8oAgOcU7FzWbdYgnz4kR3M7TQvHX9U61NinPXC6Q9d1VPhO3E6sIGvqJ4YeQOn65V9ezYuIpFSlgQzCHMmLVnOV96Uv1R/Z39I4w7D3S5qoZcQT/siQwGbsZoPMGFYmqOK1da5TZWrrJWkYzc9xvzT9m3q3Wds5pmCmo4b/dIqDifWwYEcNAZ0/YLHwCN5SEZWuunkEwtU5o7TZAv3bvDDA6WxUrrHI/y9/qvvhXxsJnY8IueNhshdmWZfXKz+lJi2Dvk7DUlEQ1zZWSsozi1E+3biMPJO47jsxjoT/jmE5+GHLCgcnXXDVBeaVal99IOaTRFukiz2EMsry1s8fnwEE5XKDKRlU/dOPfsje0gc7bgE0QD/u3E4NJ99g9A=
80 e7fa36d2ad3a7944a52dca126458d6f482db3524 0 iQIVAwUAUktg4yBXgaxoKi1yAQLO0g//du/2ypYYUfmM/yZ4zztNKIvgMSGTDVbCCGB2y2/wk2EcolpjpGTkcgnJT413ksYtw78ZU+mvv0RjgrFCm8DQ8kroJaQZ2qHmtSUb42hPBPvtg6kL9YaA4yvp87uUBpFRavGS5uX4hhEIyvZKzhXUBvqtL3TfwR7ld21bj8j00wudqELyyU9IrojIY9jkJ3XL/4shBGgP7u6OK5g8yJ6zTnWgysUetxHBPrYjG25lziiiZQFvZqK1B3PUqAOaFPltQs0PB8ipOCAHQgJsjaREj8VmC3+rskmSSy66NHm6gAB9+E8oAgOcU7FzWbdYgnz4kR3M7TQvHX9U61NinPXC6Q9d1VPhO3E6sIGvqJ4YeQOn65V9ezYuIpFSlgQzCHMmLVnOV96Uv1R/Z39I4w7D3S5qoZcQT/siQwGbsZoPMGFYmqOK1da5TZWrrJWkYzc9xvzT9m3q3Wds5pmCmo4b/dIqDifWwYEcNAZ0/YLHwCN5SEZWuunkEwtU5o7TZAv3bvDDA6WxUrrHI/y9/qvvhXxsJnY8IueNhshdmWZfXKz+lJi2Dvk7DUlEQ1zZWSsozi1E+3biMPJO47jsxjoT/jmE5+GHLCgcnXXDVBeaVal99IOaTRFukiz2EMsry1s8fnwEE5XKDKRlU/dOPfsje0gc7bgE0QD/u3E4NJ99g9A=
81 1596f2d8f2421314b1ddead8f7d0c91009358994 0 iQIVAwUAUmRq+yBXgaxoKi1yAQLolhAAi+l4ZFdQTu9yJDv22YmkmHH4fI3d5VBYgvfJPufpyaj7pX626QNW18UNcGSw2BBpYHIJzWPkk/4XznLVKr4Ciw2N3/yqloEFV0V2SSrTbMWiR9qXI4KJH+Df3KZnKs3FgiYpXkErL4GWkc1jLVR50xQ5RnkMljjtCd0NTeV2PHZ6gP2qbu6CS+5sm3AFhTDGnx8GicbMw76ZNw5M2G+T48yH9jn5KQi2SBThfi4H9Bpr8FDuR7PzQLgw9SbtYxtdQxNkK55k0nG4oLDxduNakU6SH9t8n8tdCfMt58kTzlQVrPFiTFjKu2n2JioDTz2HEivbZ5H757cu7SvpX8gW3paeBc57e+GOLMisMZABXLICq59c3QnrMwFY4FG+5cpiHVXoaZz/0bYCJx+IhU4QLWqZuzb18KSyHUCqQRzXlzS6QV5O7dY5YNQXFC44j/dS5zdgWMYo2mc6mVP2OaPUn7F6aQh5MCDYorPIOkcNjOg7ytajo7DXbzWt5Al8qt6386BJksyR3GAonc09+l8IFeNxk8HZNP4ETQ8aWj0dC9jgBDPK43T2Bju/i84s+U/bRe4tGSQalZUEv06mkIH/VRJp5w2izYTsdIjA4FT9d36OhaxlfoO1X6tHR9AyA3bF/g/ozvBwuo3kTRUUqo+Ggvx/DmcPQdDiZZQIqDBXch0=
81 1596f2d8f2421314b1ddead8f7d0c91009358994 0 iQIVAwUAUmRq+yBXgaxoKi1yAQLolhAAi+l4ZFdQTu9yJDv22YmkmHH4fI3d5VBYgvfJPufpyaj7pX626QNW18UNcGSw2BBpYHIJzWPkk/4XznLVKr4Ciw2N3/yqloEFV0V2SSrTbMWiR9qXI4KJH+Df3KZnKs3FgiYpXkErL4GWkc1jLVR50xQ5RnkMljjtCd0NTeV2PHZ6gP2qbu6CS+5sm3AFhTDGnx8GicbMw76ZNw5M2G+T48yH9jn5KQi2SBThfi4H9Bpr8FDuR7PzQLgw9SbtYxtdQxNkK55k0nG4oLDxduNakU6SH9t8n8tdCfMt58kTzlQVrPFiTFjKu2n2JioDTz2HEivbZ5H757cu7SvpX8gW3paeBc57e+GOLMisMZABXLICq59c3QnrMwFY4FG+5cpiHVXoaZz/0bYCJx+IhU4QLWqZuzb18KSyHUCqQRzXlzS6QV5O7dY5YNQXFC44j/dS5zdgWMYo2mc6mVP2OaPUn7F6aQh5MCDYorPIOkcNjOg7ytajo7DXbzWt5Al8qt6386BJksyR3GAonc09+l8IFeNxk8HZNP4ETQ8aWj0dC9jgBDPK43T2Bju/i84s+U/bRe4tGSQalZUEv06mkIH/VRJp5w2izYTsdIjA4FT9d36OhaxlfoO1X6tHR9AyA3bF/g/ozvBwuo3kTRUUqo+Ggvx/DmcPQdDiZZQIqDBXch0=
82 d825e4025e39d1c39db943cdc89818abd0a87c27 0 iQIVAwUAUnQlXiBXgaxoKi1yAQJd3BAAi7LjMSpXmdR7B8K98C3/By4YHsCOAocMl3JXiLd7SXwKmlta1zxtkgWwWJnNYE3lVJvGCl+l4YsGKmFu755MGXlyORh1x4ohckoC1a8cqnbNAgD6CSvjSaZfnINLGZQP1wIP4yWj0FftKVANQBjj/xkkxO530mjBYnUvyA4PeDd5A1AOUUu6qHzX6S5LcprEt7iktLI+Ae1dYTkiCpckDtyYUKIk3RK/4AGWwGCPddVWeV5bDxLs8GHyMbqdBwx+2EAMtyZfXT+z6MDRsL/gEBVOXHb/UR0qpYED+qFnbtTlxqQkRE/wBhwDoRzUgcSuukQ9iPn79WNDSdT5b6Jd393uEO5BNF/DB6rrOiWmlpoooWgTY9kcwGB02v0hhLrH5r1wkv8baaPl+qjCjBxf4CNKm/83KN5/umGbZlORqPSN5JVxK6vDNwFFmHLaZbMT1g27GsGOWm84VH+dgolgk4nmRNSO37eTNM5Y1C3Zf2amiqDSRcAxCgseg0Jh10G7i52SSTcZPI2MqrwT9eIyg8PTIxT1D5bPcCzkg5nTTL6S7bet7OSwynRnHslhvVUBly8aIj4eY/5cQqAucUUa5sq6xLD8N27Tl+sQi+kE6KtWu2c0ZhpouflYp55XNMHgU4KeFcVcDtHfJRF6THT6tFcHFNauCHbhfN2F33ANMP4=
82 d825e4025e39d1c39db943cdc89818abd0a87c27 0 iQIVAwUAUnQlXiBXgaxoKi1yAQJd3BAAi7LjMSpXmdR7B8K98C3/By4YHsCOAocMl3JXiLd7SXwKmlta1zxtkgWwWJnNYE3lVJvGCl+l4YsGKmFu755MGXlyORh1x4ohckoC1a8cqnbNAgD6CSvjSaZfnINLGZQP1wIP4yWj0FftKVANQBjj/xkkxO530mjBYnUvyA4PeDd5A1AOUUu6qHzX6S5LcprEt7iktLI+Ae1dYTkiCpckDtyYUKIk3RK/4AGWwGCPddVWeV5bDxLs8GHyMbqdBwx+2EAMtyZfXT+z6MDRsL/gEBVOXHb/UR0qpYED+qFnbtTlxqQkRE/wBhwDoRzUgcSuukQ9iPn79WNDSdT5b6Jd393uEO5BNF/DB6rrOiWmlpoooWgTY9kcwGB02v0hhLrH5r1wkv8baaPl+qjCjBxf4CNKm/83KN5/umGbZlORqPSN5JVxK6vDNwFFmHLaZbMT1g27GsGOWm84VH+dgolgk4nmRNSO37eTNM5Y1C3Zf2amiqDSRcAxCgseg0Jh10G7i52SSTcZPI2MqrwT9eIyg8PTIxT1D5bPcCzkg5nTTL6S7bet7OSwynRnHslhvVUBly8aIj4eY/5cQqAucUUa5sq6xLD8N27Tl+sQi+kE6KtWu2c0ZhpouflYp55XNMHgU4KeFcVcDtHfJRF6THT6tFcHFNauCHbhfN2F33ANMP4=
83 209e04a06467e2969c0cc6501335be0406d46ef0 0 iQIVAwUAUpv1oCBXgaxoKi1yAQKOFBAAma2wlsr3w/5NvDwq2rmOrgtNDq1DnNqcXloaOdwegX1z3/N++5uVjLjI0VyguexnwK+7E8rypMZ+4glaiZvIiGPnGMYbG9iOoz5XBhtUHzI5ECYfm5QU81by9VmCIvArDFe5Hlnz4XaXpEGnAwPywD+yzV3/+tyoV7MgsVinCMtbX9OF84/ubWKNzq2810FpQRfYoCOrF8sUed/1TcQrSm1eMB/PnuxjFCFySiR6J7Urd9bJoJIDtdZOQeeHaL5Z8Pcsyzjoe/9oTwJ3L3tl/NMZtRxiQUWtfRA0zvEnQ4QEkZSDMd/JnGiWHPVeP4P92+YN15za9yhneEAtustrTNAmVF2Uh92RIlmkG475HFhvwPJ4DfCx0vU1OOKX/U4c1rifW7H7HaipoaMlsDU2VFsAHcc3YF8ulVt27bH2yUaLGJz7eqpt+3DzZTKp4d/brZA2EkbVgsoYP+XYLbzxfwWlaMwiN3iCnlTFbNogH8MxhfHFWBj6ouikqOz8HlNl6BmSQiUCBnz5fquVpXmW2Md+TDekk+uOW9mvk1QMU62br+Z6PEZupkdTrqKaz+8ZMWvTRct8SiOcu7R11LpfERyrwYGGPei0P2YrEGIWGgXvEobXoPTSl7J+mpOA/rp2Q1zA3ihjgzwtGZZF+ThQXZGIMGaA2YPgzuYRqY8l5oc=
83 209e04a06467e2969c0cc6501335be0406d46ef0 0 iQIVAwUAUpv1oCBXgaxoKi1yAQKOFBAAma2wlsr3w/5NvDwq2rmOrgtNDq1DnNqcXloaOdwegX1z3/N++5uVjLjI0VyguexnwK+7E8rypMZ+4glaiZvIiGPnGMYbG9iOoz5XBhtUHzI5ECYfm5QU81by9VmCIvArDFe5Hlnz4XaXpEGnAwPywD+yzV3/+tyoV7MgsVinCMtbX9OF84/ubWKNzq2810FpQRfYoCOrF8sUed/1TcQrSm1eMB/PnuxjFCFySiR6J7Urd9bJoJIDtdZOQeeHaL5Z8Pcsyzjoe/9oTwJ3L3tl/NMZtRxiQUWtfRA0zvEnQ4QEkZSDMd/JnGiWHPVeP4P92+YN15za9yhneEAtustrTNAmVF2Uh92RIlmkG475HFhvwPJ4DfCx0vU1OOKX/U4c1rifW7H7HaipoaMlsDU2VFsAHcc3YF8ulVt27bH2yUaLGJz7eqpt+3DzZTKp4d/brZA2EkbVgsoYP+XYLbzxfwWlaMwiN3iCnlTFbNogH8MxhfHFWBj6ouikqOz8HlNl6BmSQiUCBnz5fquVpXmW2Md+TDekk+uOW9mvk1QMU62br+Z6PEZupkdTrqKaz+8ZMWvTRct8SiOcu7R11LpfERyrwYGGPei0P2YrEGIWGgXvEobXoPTSl7J+mpOA/rp2Q1zA3ihjgzwtGZZF+ThQXZGIMGaA2YPgzuYRqY8l5oc=
84 ca387377df7a3a67dbb90b6336b781cdadc3ef41 0 iQIVAwUAUsThISBXgaxoKi1yAQJpvRAAkRkCWLjHBZnWxX9Oe6t2HQgkSsmn9wMHvXXGFkcAmrqJ86yfyrxLq2Ns0X7Qwky37kOwKsywM53FQlsx9j//Y+ncnGZoObFTz9YTuSbOHGVsTbAruXWxBrGOf1nFTlg8afcbH0jPfQXwxf3ptfBhgsFCzORcqc8HNopAW+2sgXGhHnbVtq6LF90PWkbKjCCQLiX3da1uETGAElrl4jA5Y2i64S1Q/2X+UFrNslkIIRCGmAJ6BnE6KLJaUftpfbN7Br7a3z9xxWqxRYDOinxDgfAPAucOJPLgMVQ0bJIallaRu7KTmIWKIuSBgg1/hgfoX8I1w49WrTGp0gGY140kl8RWwczAz/SB03Xtbl2+h6PV7rUV2K/5g61DkwdVbWqXM9wmJZmvjEKK0qQbBT0By4QSEDNcKKqtaFFwhFzx4dkXph0igHOtXhSNzMd8PsFx/NRn9NLFIpirxfqVDwakpDNBZw4Q9hUAlTPxSFL3vD9/Zs7lV4/dAvvl+tixJEi2k/iv248b/AI1PrPIQEqDvjrozzzYvrS4HtbkUn+IiHiepQaYnpqKoXvBu6btK/nv0GTxB5OwVJzMA1RPDcxIFfZA2AazHjrXiPAl5uWYEddEvRjaCiF8xkQkfiXzLOoqhKQHdwPGcfMFEs9lNR8BrB2ZOajBJc8RPsFDswhT5h4=
84 ca387377df7a3a67dbb90b6336b781cdadc3ef41 0 iQIVAwUAUsThISBXgaxoKi1yAQJpvRAAkRkCWLjHBZnWxX9Oe6t2HQgkSsmn9wMHvXXGFkcAmrqJ86yfyrxLq2Ns0X7Qwky37kOwKsywM53FQlsx9j//Y+ncnGZoObFTz9YTuSbOHGVsTbAruXWxBrGOf1nFTlg8afcbH0jPfQXwxf3ptfBhgsFCzORcqc8HNopAW+2sgXGhHnbVtq6LF90PWkbKjCCQLiX3da1uETGAElrl4jA5Y2i64S1Q/2X+UFrNslkIIRCGmAJ6BnE6KLJaUftpfbN7Br7a3z9xxWqxRYDOinxDgfAPAucOJPLgMVQ0bJIallaRu7KTmIWKIuSBgg1/hgfoX8I1w49WrTGp0gGY140kl8RWwczAz/SB03Xtbl2+h6PV7rUV2K/5g61DkwdVbWqXM9wmJZmvjEKK0qQbBT0By4QSEDNcKKqtaFFwhFzx4dkXph0igHOtXhSNzMd8PsFx/NRn9NLFIpirxfqVDwakpDNBZw4Q9hUAlTPxSFL3vD9/Zs7lV4/dAvvl+tixJEi2k/iv248b/AI1PrPIQEqDvjrozzzYvrS4HtbkUn+IiHiepQaYnpqKoXvBu6btK/nv0GTxB5OwVJzMA1RPDcxIFfZA2AazHjrXiPAl5uWYEddEvRjaCiF8xkQkfiXzLOoqhKQHdwPGcfMFEs9lNR8BrB2ZOajBJc8RPsFDswhT5h4=
85 8862469e16f9236208581b20de5f96bd13cc039d 0 iQIVAwUAUt7cLSBXgaxoKi1yAQLOkRAAidp501zafqe+JnDwlf7ORcJc+FgCE6mK1gxDfReCbkMsY7AzspogU7orqfSmr6XXdrDwmk3Y5x3mf44OGzNQjvuNWhqnTgJ7sOcU/lICGQUc8WiGNzHEMFGX9S+K4dpUaBf8Tcl8pU3iArhlthDghW6SZeDFB/FDBaUx9dkdFp6eXrmu4OuGRZEvwUvPtCGxIL7nKNnufI1du/MsWQxvC2ORHbMNtRq6tjA0fLZi4SvbySuYifQRS32BfHkFS5Qu4/40+1k7kd0YFyyQUvIsVa17lrix3zDqMavG8x7oOlqM/axDMBT6DhpdBMAdc5qqf8myz8lwjlFjyDUL6u3Z4/yE0nUrmEudXiXwG0xbVoEN8SCNrDmmvFMt6qdCpdDMkHr2TuSh0Hh4FT5CDkzPI8ZRssv/01j/QvIO3c/xlbpGRPWpsPXEVOz3pmjYN4qyQesnBKWCENsQLy/8s2rey8iQgx2GtsrNw8+wGX6XE4v3QtwUrRe12hWoNrEHWl0xnLv2mvAFqdMAMpFY6EpOKLlE4hoCs2CmTJ2dv6e2tiGTXGU6/frI5iuNRK61OXnH5OjEc8DCGH/GC7NXyDOXOB+7BdBvvf50l2C/vxR2TKgTncLtHeLCrR0GHNHsxqRo1UDwOWur0r7fdfCRvb2tIr5LORCqKYVKd60/BAXjHWc=
85 8862469e16f9236208581b20de5f96bd13cc039d 0 iQIVAwUAUt7cLSBXgaxoKi1yAQLOkRAAidp501zafqe+JnDwlf7ORcJc+FgCE6mK1gxDfReCbkMsY7AzspogU7orqfSmr6XXdrDwmk3Y5x3mf44OGzNQjvuNWhqnTgJ7sOcU/lICGQUc8WiGNzHEMFGX9S+K4dpUaBf8Tcl8pU3iArhlthDghW6SZeDFB/FDBaUx9dkdFp6eXrmu4OuGRZEvwUvPtCGxIL7nKNnufI1du/MsWQxvC2ORHbMNtRq6tjA0fLZi4SvbySuYifQRS32BfHkFS5Qu4/40+1k7kd0YFyyQUvIsVa17lrix3zDqMavG8x7oOlqM/axDMBT6DhpdBMAdc5qqf8myz8lwjlFjyDUL6u3Z4/yE0nUrmEudXiXwG0xbVoEN8SCNrDmmvFMt6qdCpdDMkHr2TuSh0Hh4FT5CDkzPI8ZRssv/01j/QvIO3c/xlbpGRPWpsPXEVOz3pmjYN4qyQesnBKWCENsQLy/8s2rey8iQgx2GtsrNw8+wGX6XE4v3QtwUrRe12hWoNrEHWl0xnLv2mvAFqdMAMpFY6EpOKLlE4hoCs2CmTJ2dv6e2tiGTXGU6/frI5iuNRK61OXnH5OjEc8DCGH/GC7NXyDOXOB+7BdBvvf50l2C/vxR2TKgTncLtHeLCrR0GHNHsxqRo1UDwOWur0r7fdfCRvb2tIr5LORCqKYVKd60/BAXjHWc=
86 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 0 iQIVAwUAUu1lIyBXgaxoKi1yAQIzCBAAizSWvTkWt8+tReM9jUetoSToF+XahLhn381AYdErFCBErX4bNL+vyEj+Jt2DHsAfabkvNBe3k7rtFlXHwpq6POa/ciFGPDhFlplNv6yN1jOKBlMsgdjpn7plZKcLHODOigU7IMlgg70Um8qVrRgQ8FhvbVgR2I5+CD6bucFzqo78wNl9mCIHIQCpGKIUoz56GbwT+rUpEB182Z3u6rf4NWj35RZLGAicVV2A2eAAFh4ZvuC+Z0tXMkp6Gq9cINawZgqfLbzVYJeXBtJC39lHPyp5P3LaEVRhntc9YTwbfkVGjyJZR60iYrieeKpOYRnzgHauPVdgVhkTkBxshmEPY7svKYSQqlj8hLuFa+a3ajbIPrpQAAi1MgtamA991atNqGiSTjdZa9kLQvfdn0k80+gkCxpuO56PhvtdjKsYVRgQMTYmQVQdh3x4WbQOSqTADXXIZUaWxx4RmNSlxY7KD+3lPP09teOD+A3B2cP60bC5NsCfULtQFXQzdC7NvfIyYfYBTZa+Pv6HFkVe10cbnqTt83hBy0D77vdaegPRe56qDNU+GrIG2/rosnlKGFjFoK/pTYkR9uzfkrhEjLwyfkoXlBqY+376W0PC5fP10pJeQBS9DuXpCPlgtyW0Jy1ayCT1YR4QJC4n75vZwTFBFRBhSi0HqFquOgy83+O0Q/k=
86 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 0 iQIVAwUAUu1lIyBXgaxoKi1yAQIzCBAAizSWvTkWt8+tReM9jUetoSToF+XahLhn381AYdErFCBErX4bNL+vyEj+Jt2DHsAfabkvNBe3k7rtFlXHwpq6POa/ciFGPDhFlplNv6yN1jOKBlMsgdjpn7plZKcLHODOigU7IMlgg70Um8qVrRgQ8FhvbVgR2I5+CD6bucFzqo78wNl9mCIHIQCpGKIUoz56GbwT+rUpEB182Z3u6rf4NWj35RZLGAicVV2A2eAAFh4ZvuC+Z0tXMkp6Gq9cINawZgqfLbzVYJeXBtJC39lHPyp5P3LaEVRhntc9YTwbfkVGjyJZR60iYrieeKpOYRnzgHauPVdgVhkTkBxshmEPY7svKYSQqlj8hLuFa+a3ajbIPrpQAAi1MgtamA991atNqGiSTjdZa9kLQvfdn0k80+gkCxpuO56PhvtdjKsYVRgQMTYmQVQdh3x4WbQOSqTADXXIZUaWxx4RmNSlxY7KD+3lPP09teOD+A3B2cP60bC5NsCfULtQFXQzdC7NvfIyYfYBTZa+Pv6HFkVe10cbnqTt83hBy0D77vdaegPRe56qDNU+GrIG2/rosnlKGFjFoK/pTYkR9uzfkrhEjLwyfkoXlBqY+376W0PC5fP10pJeQBS9DuXpCPlgtyW0Jy1ayCT1YR4QJC4n75vZwTFBFRBhSi0HqFquOgy83+O0Q/k=
87 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 0 iQIVAwUAUxJPlyBXgaxoKi1yAQLIRA//Qh9qzoYthPAWAUNbzybWXC/oMBI2X89NQC7l1ivKhv7cn9L79D8SWXM18q7LTwLdlwOkV/a0NTE3tkQTLvxJpfnRLCBbMOcGiIn/PxsAae8IhMAUbR7qz+XOynHOs60ZhK9X8seQHJRf1YtOI9gYTL/WYk8Cnpmc6xZQ90TNhoPPkpdfe8Y236V11SbYtN14fmrPaWQ3GXwyrvQaqM1F7BxSnC/sbm9+/wprsTa8gRQo7YQL/T5jJQgFiatG3yayrDdJtoRq3TZKtsxw8gtQdfVCrrBibbysjM8++dnwA92apHNUY8LzyptPy7rSDXRrIpPUWGGTQTD+6HQwkcLFtIuUpw4I75SV3z2r6LyOLKzDJUIunKOOYFS/rEIQGxZHxZOBAvbI+73mHAn3pJqm+UAA7R1n7tk3JyQncg50qJlm9zIUPGpNFcdEqak5iXzGYx292VlcE+fbJYeIPWggpilaVUgdmXtMCG0O0uX6C8MDmzVDCjd6FzDJ4GTZwgmWJaamvls85CkZgyN/UqlisfFXub0A1h7qAzBSVpP1+Ti+UbBjlrGX8BMRYHRGYIeIq16elcWwSpLgshjDwNn2r2EdwX8xKU5mucgTzSLprbOYGdQaqnvf6e8IX5WMBgwVW9YdY9yJKSLF7kE1AlM9nfVcXwOK4mHoMvnNgiX3zsw=
87 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 0 iQIVAwUAUxJPlyBXgaxoKi1yAQLIRA//Qh9qzoYthPAWAUNbzybWXC/oMBI2X89NQC7l1ivKhv7cn9L79D8SWXM18q7LTwLdlwOkV/a0NTE3tkQTLvxJpfnRLCBbMOcGiIn/PxsAae8IhMAUbR7qz+XOynHOs60ZhK9X8seQHJRf1YtOI9gYTL/WYk8Cnpmc6xZQ90TNhoPPkpdfe8Y236V11SbYtN14fmrPaWQ3GXwyrvQaqM1F7BxSnC/sbm9+/wprsTa8gRQo7YQL/T5jJQgFiatG3yayrDdJtoRq3TZKtsxw8gtQdfVCrrBibbysjM8++dnwA92apHNUY8LzyptPy7rSDXRrIpPUWGGTQTD+6HQwkcLFtIuUpw4I75SV3z2r6LyOLKzDJUIunKOOYFS/rEIQGxZHxZOBAvbI+73mHAn3pJqm+UAA7R1n7tk3JyQncg50qJlm9zIUPGpNFcdEqak5iXzGYx292VlcE+fbJYeIPWggpilaVUgdmXtMCG0O0uX6C8MDmzVDCjd6FzDJ4GTZwgmWJaamvls85CkZgyN/UqlisfFXub0A1h7qAzBSVpP1+Ti+UbBjlrGX8BMRYHRGYIeIq16elcWwSpLgshjDwNn2r2EdwX8xKU5mucgTzSLprbOYGdQaqnvf6e8IX5WMBgwVW9YdY9yJKSLF7kE1AlM9nfVcXwOK4mHoMvnNgiX3zsw=
88 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 0 iQIVAwUAUztENyBXgaxoKi1yAQIpkhAAmJj5JRTSn0Dn/OTAHggalw8KYFbAck1X35Wg9O7ku7sd+cOnNnkYfqAdz2m5ikqWHP7aWMiNkNy7Ree2110NqkQVYG/2AJStXBdIOmewqnjDlNt+rbJQN/JsjeKSCy+ToNvhqX5cTM9DF2pwRjMsTXVff307S6/3pga244i+RFAeG3WCUrzfDu641MGFLjG4atCj8ZFLg9DcW5bsRiOs5ZK5Il+UAb2yyoS2KNQ70VLhYULhGtqq9tuO4nLRGN3DX/eDcYfncPCav1GckW4OZKakcbLtAdW0goSgGWloxcM+j2E6Z1JZ9tOTTkFN77EvX0ZWZLmYM7sUN1meFnKbVxrtGKlMelwKwlT252c65PAKa9zsTaRUKvN7XclyxZAYVCsiCQ/V08NXhNgXJXcoKUAeGNf6wruOyvRU9teia8fAiuHJoY58WC8jC4nYG3iZTnl+zNj2A5xuEUpYHhjUfe3rNJeK7CwUpJKlbxopu5mnW9AE9ITfI490eaapRLTojOBDJNqCORAtbggMD46fLeCOzzB8Gl70U2p5P34F92Sn6mgERFKh/10XwJcj4ZIeexbQK8lqQ2cIanDN9dAmbvavPTY8grbANuq+vXDGxjIjfxapqzsSPqUJ5KnfTQyLq5NWwquR9t38XvHZfktkd140BFKwIUAIlKKaFfYXXtM=
88 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 0 iQIVAwUAUztENyBXgaxoKi1yAQIpkhAAmJj5JRTSn0Dn/OTAHggalw8KYFbAck1X35Wg9O7ku7sd+cOnNnkYfqAdz2m5ikqWHP7aWMiNkNy7Ree2110NqkQVYG/2AJStXBdIOmewqnjDlNt+rbJQN/JsjeKSCy+ToNvhqX5cTM9DF2pwRjMsTXVff307S6/3pga244i+RFAeG3WCUrzfDu641MGFLjG4atCj8ZFLg9DcW5bsRiOs5ZK5Il+UAb2yyoS2KNQ70VLhYULhGtqq9tuO4nLRGN3DX/eDcYfncPCav1GckW4OZKakcbLtAdW0goSgGWloxcM+j2E6Z1JZ9tOTTkFN77EvX0ZWZLmYM7sUN1meFnKbVxrtGKlMelwKwlT252c65PAKa9zsTaRUKvN7XclyxZAYVCsiCQ/V08NXhNgXJXcoKUAeGNf6wruOyvRU9teia8fAiuHJoY58WC8jC4nYG3iZTnl+zNj2A5xuEUpYHhjUfe3rNJeK7CwUpJKlbxopu5mnW9AE9ITfI490eaapRLTojOBDJNqCORAtbggMD46fLeCOzzB8Gl70U2p5P34F92Sn6mgERFKh/10XwJcj4ZIeexbQK8lqQ2cIanDN9dAmbvavPTY8grbANuq+vXDGxjIjfxapqzsSPqUJ5KnfTQyLq5NWwquR9t38XvHZfktkd140BFKwIUAIlKKaFfYXXtM=
89 564f55b251224f16508dd1311452db7780dafe2b 0 iQIVAwUAU1BmFSBXgaxoKi1yAQJ2Aw//bjK++xJuZCIdktg/i5FxBwoxdbipfTkKsN/YjUwrEmroYM8IkqIsO+U54OGCYWr3NPJ3VS8wUQeJ+NF3ffcjmjC297R9J+X0c5G90DdQUYX44jG/tP8Tqpev4Q7DLCXT26aRwEMdJQpq0eGaqv55E5Cxnyt3RrLCqe7RjPresZFg7iYrro5nq8TGYwBhessHXnCix9QI0HtXiLpms+0UGz8Sbi9nEYW+M0OZCyO1TvykCpFzEsLNwqqtFvhOMD/AMiWcTKNUpjmOn3V83xjWl+jnDUt7BxJ7n1efUnlwl4IeWlSUb73q/durtaymb97cSdKFmXHv4pdAShQEuEpVVGO1WELsKoXmbj30ItTW2V3KvNbjFsvIdDo7zLCpXyTq1HC56W7QCIMINX2qT+hrAMWC12tPQ05f89Cv1+jpk6eOPFqIHFdi663AjyrnGll8nwN7HJWwtA5wTXisu3bec51FAq4yJTzPMtOE9spz36E+Go2hZ1cAv9oCSceZcM0wB8KiMfaZJKNZNZk1jvsdiio4CcdASOFQPOspz07GqQxVP7W+F1Oz32LgwcNAEAS/f3juwDj45GYfAWJrTh3dnJy5DTD2LVC7KtkxxUVkWkqxivnDB9anj++FN9eyekxzut5eFED+WrCfZMcSPW0ai7wbslhKUhCwSf/v3DgGwsM=
89 564f55b251224f16508dd1311452db7780dafe2b 0 iQIVAwUAU1BmFSBXgaxoKi1yAQJ2Aw//bjK++xJuZCIdktg/i5FxBwoxdbipfTkKsN/YjUwrEmroYM8IkqIsO+U54OGCYWr3NPJ3VS8wUQeJ+NF3ffcjmjC297R9J+X0c5G90DdQUYX44jG/tP8Tqpev4Q7DLCXT26aRwEMdJQpq0eGaqv55E5Cxnyt3RrLCqe7RjPresZFg7iYrro5nq8TGYwBhessHXnCix9QI0HtXiLpms+0UGz8Sbi9nEYW+M0OZCyO1TvykCpFzEsLNwqqtFvhOMD/AMiWcTKNUpjmOn3V83xjWl+jnDUt7BxJ7n1efUnlwl4IeWlSUb73q/durtaymb97cSdKFmXHv4pdAShQEuEpVVGO1WELsKoXmbj30ItTW2V3KvNbjFsvIdDo7zLCpXyTq1HC56W7QCIMINX2qT+hrAMWC12tPQ05f89Cv1+jpk6eOPFqIHFdi663AjyrnGll8nwN7HJWwtA5wTXisu3bec51FAq4yJTzPMtOE9spz36E+Go2hZ1cAv9oCSceZcM0wB8KiMfaZJKNZNZk1jvsdiio4CcdASOFQPOspz07GqQxVP7W+F1Oz32LgwcNAEAS/f3juwDj45GYfAWJrTh3dnJy5DTD2LVC7KtkxxUVkWkqxivnDB9anj++FN9eyekxzut5eFED+WrCfZMcSPW0ai7wbslhKUhCwSf/v3DgGwsM=
90 2195ac506c6ababe86985b932f4948837c0891b5 0 iQIVAwUAU2LO/CBXgaxoKi1yAQI/3w/7BT/VRPyxey6tYp7i5cONIlEB3gznebGYwm0SGYNE6lsvS2VLh6ztb+j4eqOadr8Ssna6bslBx+dVsm+VuJ+vrNLMucD5Uc+fhn6dAfVqg+YBzUEaedI5yNsJizcJUDI7hUVsxiPiiYd9hchCWJ+z2tVt2jCyG2lMV2rbW36AM89sgz/wn5/AaAFsgoS6up/uzA3Tmw+qZSO6dZChb4Q8midIUWEbNzVhokgYcw7/HmjmvkvV9RJYiG8aBnMdQmxTE69q2dTjnnDL6wu61WU2FpTN09HRFbemUqzAfoJp8MmXq6jWgfLcm0cI3kRo7ZNpnEkmVKsfKQCXXiaR4alt9IQpQ6Jl7LSYsYI+D4ejpYysIsZyAE8qzltYhBKJWqO27A5V4WdJsoTgA/RwKfPRlci4PY8I4N466S7PBXVz/Cc5EpFkecvrgceTmBafb8JEi+gPiD2Po4vtW3bCeV4xldiEXHeJ77byUz7fZU7jL78SjJVOCCQTJfKZVr36kTz3KlaOz3E700RxzEFDYbK7I41mdANeQBmNNbcvRTy5ma6W6I3McEcAH4wqM5fFQ8YS+QWJxk85Si8KtaDPqoEdC/0dQPavuU/jAVjhV8IbmmkOtO7WvOHQDBtrR15yMxGMnUwMrPHaRNKdHNYRG0LL7lpCtdMi1mzLQgHYY9SRYvI=
90 2195ac506c6ababe86985b932f4948837c0891b5 0 iQIVAwUAU2LO/CBXgaxoKi1yAQI/3w/7BT/VRPyxey6tYp7i5cONIlEB3gznebGYwm0SGYNE6lsvS2VLh6ztb+j4eqOadr8Ssna6bslBx+dVsm+VuJ+vrNLMucD5Uc+fhn6dAfVqg+YBzUEaedI5yNsJizcJUDI7hUVsxiPiiYd9hchCWJ+z2tVt2jCyG2lMV2rbW36AM89sgz/wn5/AaAFsgoS6up/uzA3Tmw+qZSO6dZChb4Q8midIUWEbNzVhokgYcw7/HmjmvkvV9RJYiG8aBnMdQmxTE69q2dTjnnDL6wu61WU2FpTN09HRFbemUqzAfoJp8MmXq6jWgfLcm0cI3kRo7ZNpnEkmVKsfKQCXXiaR4alt9IQpQ6Jl7LSYsYI+D4ejpYysIsZyAE8qzltYhBKJWqO27A5V4WdJsoTgA/RwKfPRlci4PY8I4N466S7PBXVz/Cc5EpFkecvrgceTmBafb8JEi+gPiD2Po4vtW3bCeV4xldiEXHeJ77byUz7fZU7jL78SjJVOCCQTJfKZVr36kTz3KlaOz3E700RxzEFDYbK7I41mdANeQBmNNbcvRTy5ma6W6I3McEcAH4wqM5fFQ8YS+QWJxk85Si8KtaDPqoEdC/0dQPavuU/jAVjhV8IbmmkOtO7WvOHQDBtrR15yMxGMnUwMrPHaRNKdHNYRG0LL7lpCtdMi1mzLQgHYY9SRYvI=
91 269c80ee5b3cb3684fa8edc61501b3506d02eb10 0 iQIVAwUAU4uX5CBXgaxoKi1yAQLpdg/+OxulOKwZN+Nr7xsRhUijYjyAElRf2mGDvMrbAOA2xNf85DOXjOrX5TKETumf1qANA5cHa1twA8wYgxUzhx30H+w5EsLjyeSsOncRnD5WZNqSoIq2XevT0T4c8xdyNftyBqK4h/SC/t2h3vEiSCUaGcfNK8yk4XO45MIk4kk9nlA9jNWdA5ZMLgEFBye2ggz0JjEAPUkVDqlr9sNORDEbnwZxGPV8CK9HaL/I8VWClaFgjKQmjqV3SQsNFe2XPffzXmIipFJ+ODuXVxYpAsvLiGmcfuUfSDHQ4L9QvjBsWe1PgYMr/6CY/lPYmR+xW5mJUE9eIdN4MYcXgicLrmMpdF5pToNccNCMtfa6CDvEasPRqe2bDzL/Q9dQbdOVE/boaYBlgmYLL+/u+dpqip9KkyGgbSo9uJzst1mLTCzJmr5bw+surul28i9HM+4+Lewg4UUdHLz46no1lfTlB5o5EAhiOZBTEVdoBaKfewVpDa/aBRvtWX7UMVRG5qrtA0sXwydN00Jaqkr9m20W0jWjtc1ZC72QCrynVHOyfIb2rN98rnuy2QN4bTvjNpNjHOhhhPTOoVo0YYPdiUupm46vymUTQCmWsglU4Rlaa3vXneP7JenL5TV8WLPs9J28lF0IkOnyBXY7OFcpvYO1euu7iR1VdjfrQukMyaX18usymiA=
91 269c80ee5b3cb3684fa8edc61501b3506d02eb10 0 iQIVAwUAU4uX5CBXgaxoKi1yAQLpdg/+OxulOKwZN+Nr7xsRhUijYjyAElRf2mGDvMrbAOA2xNf85DOXjOrX5TKETumf1qANA5cHa1twA8wYgxUzhx30H+w5EsLjyeSsOncRnD5WZNqSoIq2XevT0T4c8xdyNftyBqK4h/SC/t2h3vEiSCUaGcfNK8yk4XO45MIk4kk9nlA9jNWdA5ZMLgEFBye2ggz0JjEAPUkVDqlr9sNORDEbnwZxGPV8CK9HaL/I8VWClaFgjKQmjqV3SQsNFe2XPffzXmIipFJ+ODuXVxYpAsvLiGmcfuUfSDHQ4L9QvjBsWe1PgYMr/6CY/lPYmR+xW5mJUE9eIdN4MYcXgicLrmMpdF5pToNccNCMtfa6CDvEasPRqe2bDzL/Q9dQbdOVE/boaYBlgmYLL+/u+dpqip9KkyGgbSo9uJzst1mLTCzJmr5bw+surul28i9HM+4+Lewg4UUdHLz46no1lfTlB5o5EAhiOZBTEVdoBaKfewVpDa/aBRvtWX7UMVRG5qrtA0sXwydN00Jaqkr9m20W0jWjtc1ZC72QCrynVHOyfIb2rN98rnuy2QN4bTvjNpNjHOhhhPTOoVo0YYPdiUupm46vymUTQCmWsglU4Rlaa3vXneP7JenL5TV8WLPs9J28lF0IkOnyBXY7OFcpvYO1euu7iR1VdjfrQukMyaX18usymiA=
92 2d8cd3d0e83c7336c0cb45a9f88638363f993848 0 iQIVAwUAU7OLTCBXgaxoKi1yAQJ+pw/+M3yOesgf55eo3PUTZw02QZxDyEg9ElrRc6664/QFXaJuYdz8H3LGG/NYs8uEdYihiGpS1Qc70jwd1IoUlrCELsaSSZpzWQ+VpQFX29aooBoetfL+8WgqV8zJHCtY0E1EBg/Z3ZL3n2OS++fVeWlKtp5mwEq8uLTUmhIS7GseP3bIG/CwF2Zz4bzhmPGK8V2s74aUvELZLCfkBE1ULNs7Nou1iPDGnhYOD53eq1KGIPlIg1rnLbyYw5bhS20wy5IxkWf2eCaXfmQBTG61kO5m3nkzfVgtxmZHLqYggISTJXUovfGsWZcp5a71clCSMVal+Mfviw8L/UPHG0Ie1c36djJiFLxM0f2HlwVMjegQOZSAeMGg1YL1xnIys2zMMsKgEeR+JISTal1pJyLcT9x5mr1HCnUczSGXE5zsixN+PORRnZOqcEZTa2mHJ1h5jJeEm36B/eR57BMJG+i0QgZqTpLzYTFrp2eWokGMjFB1MvgAkL2YoRsw9h6TeIwqzK8mFwLi28bf1c90gX9uMbwY/NOqGzfQKBR9bvCjs2k/gmJ+qd5AbC3DvOxHnN6hRZUqNq76Bo4F+CUVcjQ/NXnfnOIVNbILpl5Un5kl+8wLFM+mNxDxduajaUwLhSHZofKmmCSLbuuaGmQTC7a/4wzhQM9e5dX0X/8sOo8CptW7uw4=
92 2d8cd3d0e83c7336c0cb45a9f88638363f993848 0 iQIVAwUAU7OLTCBXgaxoKi1yAQJ+pw/+M3yOesgf55eo3PUTZw02QZxDyEg9ElrRc6664/QFXaJuYdz8H3LGG/NYs8uEdYihiGpS1Qc70jwd1IoUlrCELsaSSZpzWQ+VpQFX29aooBoetfL+8WgqV8zJHCtY0E1EBg/Z3ZL3n2OS++fVeWlKtp5mwEq8uLTUmhIS7GseP3bIG/CwF2Zz4bzhmPGK8V2s74aUvELZLCfkBE1ULNs7Nou1iPDGnhYOD53eq1KGIPlIg1rnLbyYw5bhS20wy5IxkWf2eCaXfmQBTG61kO5m3nkzfVgtxmZHLqYggISTJXUovfGsWZcp5a71clCSMVal+Mfviw8L/UPHG0Ie1c36djJiFLxM0f2HlwVMjegQOZSAeMGg1YL1xnIys2zMMsKgEeR+JISTal1pJyLcT9x5mr1HCnUczSGXE5zsixN+PORRnZOqcEZTa2mHJ1h5jJeEm36B/eR57BMJG+i0QgZqTpLzYTFrp2eWokGMjFB1MvgAkL2YoRsw9h6TeIwqzK8mFwLi28bf1c90gX9uMbwY/NOqGzfQKBR9bvCjs2k/gmJ+qd5AbC3DvOxHnN6hRZUqNq76Bo4F+CUVcjQ/NXnfnOIVNbILpl5Un5kl+8wLFM+mNxDxduajaUwLhSHZofKmmCSLbuuaGmQTC7a/4wzhQM9e5dX0X/8sOo8CptW7uw4=
93 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 0 iQIVAwUAU8n97yBXgaxoKi1yAQKqcA/+MT0VFoP6N8fHnlxj85maoM2HfZbAzX7oEW1B8F1WH6rHESHDexDWIYWJ2XnEeTD4GCXN0/1p+O/I0IMPNzqoSz8BU0SR4+ejhRkGrKG7mcFiF5G8enxaiISn9nmax6DyRfqtOQBzuXYGObXg9PGvMS6zbR0SorJK61xX7fSsUNN6BAvHJfpwcVkOrrFAIpEhs/Gh9wg0oUKCffO/Abs6oS+P6nGLylpIyXqC7rKZ4uPVc6Ljh9DOcpV4NCU6kQbNE7Ty79E0/JWWLsHOEY4F4WBzI7rVh7dOkRMmfNGaqvKkuNkJOEqTR1o1o73Hhbxn4NU7IPbVP/zFKC+/4QVtcPk2IPlpK1MqA1H2hBNYZhJlNhvAa7LwkIxM0916/zQ8dbFAzp6Ay/t/L0tSEcIrudTz2KTrY0WKw+pkzB/nTwaS3XZre6H2B+gszskmf1Y41clkIy/nH9K7zBuzANWyK3+bm40vmMoBbbnsweUAKkyCwqm4KTyQoYQWzu/ZiZcI+Uuk/ajJ9s7EhJbIlSnYG9ttWL/IZ1h+qPU9mqVO9fcaqkeL/NIRh+IsnzaWo0zmHU1bK+/E29PPGGf3v6+IEJmXg7lvNl5pHiMd2tb7RNO/UaNSv1Y2E9naD4FQwSWo38GRBcnRGuKCLdZNHGUR+6dYo6BJCGG8wtZvNXb3TOo=
93 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 0 iQIVAwUAU8n97yBXgaxoKi1yAQKqcA/+MT0VFoP6N8fHnlxj85maoM2HfZbAzX7oEW1B8F1WH6rHESHDexDWIYWJ2XnEeTD4GCXN0/1p+O/I0IMPNzqoSz8BU0SR4+ejhRkGrKG7mcFiF5G8enxaiISn9nmax6DyRfqtOQBzuXYGObXg9PGvMS6zbR0SorJK61xX7fSsUNN6BAvHJfpwcVkOrrFAIpEhs/Gh9wg0oUKCffO/Abs6oS+P6nGLylpIyXqC7rKZ4uPVc6Ljh9DOcpV4NCU6kQbNE7Ty79E0/JWWLsHOEY4F4WBzI7rVh7dOkRMmfNGaqvKkuNkJOEqTR1o1o73Hhbxn4NU7IPbVP/zFKC+/4QVtcPk2IPlpK1MqA1H2hBNYZhJlNhvAa7LwkIxM0916/zQ8dbFAzp6Ay/t/L0tSEcIrudTz2KTrY0WKw+pkzB/nTwaS3XZre6H2B+gszskmf1Y41clkIy/nH9K7zBuzANWyK3+bm40vmMoBbbnsweUAKkyCwqm4KTyQoYQWzu/ZiZcI+Uuk/ajJ9s7EhJbIlSnYG9ttWL/IZ1h+qPU9mqVO9fcaqkeL/NIRh+IsnzaWo0zmHU1bK+/E29PPGGf3v6+IEJmXg7lvNl5pHiMd2tb7RNO/UaNSv1Y2E9naD4FQwSWo38GRBcnRGuKCLdZNHGUR+6dYo6BJCGG8wtZvNXb3TOo=
94 3178e49892020336491cdc6945885c4de26ffa8b 0 iQIVAwUAU9whUCBXgaxoKi1yAQJDKxAAoGzdHXV/BvZ598VExEQ8IqkmBVIP1QZDVBr/orMc1eFM4tbGKxumMGbqgJsg+NetI0irkh/YWeJQ13lT4Og72iJ+4UC9eF9pcpUKr/0eBYdU2N/p2MIbVNWh3aF5QkbuQpSri0VbHOWkxqwoqrrwXEjgHaKYP4PKh+Dzukax4yzBUIyzAG38pt4a8hbjnozCl2uAikxk4Ojg+ZufhPoZWgFEuYzSfK5SrwVKOwuxKYFGbbVGTQMIXLvBhOipAmHp4JMEYHfG85kwuyx/DCDbGmXKPQYQfClwjJ4ob/IwG8asyMsPWs+09vrvpVO08HBuph3GjuiWJ1fhEef/ImWmZdQySI9Y4SjwP4dMVfzLCnY+PYPDM9Sq/5Iee13gI2lVM2NtAfQZPXh9l8u6SbCir1UhMNMx0qVMkqMAATmiZ+ETHCO75q4Wdcmnv5fk2PbvaGBVtrHGeiyuz5mK/j4cMbd0R9R0hR1PyC4dOhNqOnbqELNIe0rKNByG1RkpiQYsqZTU6insmnZrv4fVsxfA4JOObPfKNT4oa24MHS73ldLFCfQAuIxVE7RDJJ3bHeh/yO6Smo28FuVRldBl5e+wj2MykS8iVcuSa1smw6gJ14iLBH369nlR3fAAQxI0omVYPDHLr7SsH3vJasTaCD7V3SL4lW6vo/yaAh4ImlTAE+Y=
94 3178e49892020336491cdc6945885c4de26ffa8b 0 iQIVAwUAU9whUCBXgaxoKi1yAQJDKxAAoGzdHXV/BvZ598VExEQ8IqkmBVIP1QZDVBr/orMc1eFM4tbGKxumMGbqgJsg+NetI0irkh/YWeJQ13lT4Og72iJ+4UC9eF9pcpUKr/0eBYdU2N/p2MIbVNWh3aF5QkbuQpSri0VbHOWkxqwoqrrwXEjgHaKYP4PKh+Dzukax4yzBUIyzAG38pt4a8hbjnozCl2uAikxk4Ojg+ZufhPoZWgFEuYzSfK5SrwVKOwuxKYFGbbVGTQMIXLvBhOipAmHp4JMEYHfG85kwuyx/DCDbGmXKPQYQfClwjJ4ob/IwG8asyMsPWs+09vrvpVO08HBuph3GjuiWJ1fhEef/ImWmZdQySI9Y4SjwP4dMVfzLCnY+PYPDM9Sq/5Iee13gI2lVM2NtAfQZPXh9l8u6SbCir1UhMNMx0qVMkqMAATmiZ+ETHCO75q4Wdcmnv5fk2PbvaGBVtrHGeiyuz5mK/j4cMbd0R9R0hR1PyC4dOhNqOnbqELNIe0rKNByG1RkpiQYsqZTU6insmnZrv4fVsxfA4JOObPfKNT4oa24MHS73ldLFCfQAuIxVE7RDJJ3bHeh/yO6Smo28FuVRldBl5e+wj2MykS8iVcuSa1smw6gJ14iLBH369nlR3fAAQxI0omVYPDHLr7SsH3vJasTaCD7V3SL4lW6vo/yaAh4ImlTAE+Y=
95 5dc91146f35369949ea56b40172308158b59063a 0 iQIVAwUAVAUgJyBXgaxoKi1yAQJkEg/9EXFZvPpuvU7AjII1dlIT8F534AXrO30+H6hweg+h2mUCSb/mZnbo3Jr1tATgBWbIKkYmmsiIKNlJMFNPZTWhImGcVA93t6v85tSFiNJRI2QP9ypl5wTt2KhiS/s7GbUYCtPDm6xyNYoSvDo6vXJ5mfGlgFZY5gYLwEHq/lIRWLWD4EWYWbk5yN+B7rHu6A1n3yro73UR8DudEhYYqC23KbWEqFOiNd1IGj3UJlxIHUE4AcDukxbfiMWrKvv1kuT/vXak3X7cLXlO56aUbMopvaUflA3PSr3XAqynDd69cxACo/T36fuwzCQN4ICpdzGTos0rQALSr7CKF5YP9LMhVhCsOn0pCsAkSiw4HxxbcHQLl+t+0rchNysc4dWGwDt6GAfYcdm3fPtGFtA3qsN8lOpCquFH3TAZ3TrIjLFoTOk6s1xX1x5rjP/DAHc/y3KZU0Ffx3TwdQEEEIFaAXaxQG848rdfzV42+dnFnXh1G/MIrKAmv3ZSUkQ3XJfGc7iu82FsYE1NLHriUQDmMRBzCoQ1Rn1Kji119Cxf5rsMcQ6ZISR1f0jDCUS/qxlHvSqETLp8H63NSUfvuKSC7uC6pGvq9XQm1JRNO5UuJfK6tHzy0jv9bt2IRo2xbmvpDu9L5oHHd3JePsAmFmbrFf/7Qem3JyzEvRcpdcdHtefxcxc=
95 5dc91146f35369949ea56b40172308158b59063a 0 iQIVAwUAVAUgJyBXgaxoKi1yAQJkEg/9EXFZvPpuvU7AjII1dlIT8F534AXrO30+H6hweg+h2mUCSb/mZnbo3Jr1tATgBWbIKkYmmsiIKNlJMFNPZTWhImGcVA93t6v85tSFiNJRI2QP9ypl5wTt2KhiS/s7GbUYCtPDm6xyNYoSvDo6vXJ5mfGlgFZY5gYLwEHq/lIRWLWD4EWYWbk5yN+B7rHu6A1n3yro73UR8DudEhYYqC23KbWEqFOiNd1IGj3UJlxIHUE4AcDukxbfiMWrKvv1kuT/vXak3X7cLXlO56aUbMopvaUflA3PSr3XAqynDd69cxACo/T36fuwzCQN4ICpdzGTos0rQALSr7CKF5YP9LMhVhCsOn0pCsAkSiw4HxxbcHQLl+t+0rchNysc4dWGwDt6GAfYcdm3fPtGFtA3qsN8lOpCquFH3TAZ3TrIjLFoTOk6s1xX1x5rjP/DAHc/y3KZU0Ffx3TwdQEEEIFaAXaxQG848rdfzV42+dnFnXh1G/MIrKAmv3ZSUkQ3XJfGc7iu82FsYE1NLHriUQDmMRBzCoQ1Rn1Kji119Cxf5rsMcQ6ZISR1f0jDCUS/qxlHvSqETLp8H63NSUfvuKSC7uC6pGvq9XQm1JRNO5UuJfK6tHzy0jv9bt2IRo2xbmvpDu9L5oHHd3JePsAmFmbrFf/7Qem3JyzEvRcpdcdHtefxcxc=
96 f768c888aaa68d12dd7f509dcc7f01c9584357d0 0 iQIVAwUAVCxczSBXgaxoKi1yAQJYiA/9HnqKuU7IsGACgsUGt+YaqZQumg077Anj158kihSytmSts6xDxqVY1UQB38dqAKLJrQc7RbN0YK0NVCKZZrx/4OqgWvjiL5qWUJKqQzsDx4LGTUlbPlZNZawW2urmmYW6c9ZZDs1EVnVeZMDrOdntddtnBgtILDwrZ8o3U7FwSlfnm03vTkqUMj9okA3AsI8+lQIlo4qbqjQJYwvUC1ZezRdQwaT1LyoWUgjmhoZ1XWcWKOs9baikaJr6fMv8vZpwmaOY1+pztxYlROeSPVWt9P6yOf0Hi/2eg8AwSZLaX96xfk9IvXUSItg/wjTWP9BhnNs/ulwTnN8QOgSXpYxH4RXwsYOyU7BvwAekA9xi17wuzPrGEliScplxICIZ7jiiwv/VngMvM9AYw2mNBvZt2ZIGrrLaK6pq/zBm5tbviwqt5/8U5aqO8k1O0e4XYm5WmQ1c2AkXRO+xwvFpondlSF2y0flzf2FRXP82QMfsy7vxIP0KmaQ4ex+J8krZgMjNTwXh2M4tdYNtu5AehJQEP3l6giy2srkMDuFLqoe1yECjVlGdgA86ve3J/84I8KGgsufYMhfQnwHHGXCbONcNsDvO0QOee6CIQVcdKCG7dac3M89SC6Ns2CjuC8BIYDRnxbGQb7Fvn4ZcadyJKKbXQJzMgRV25K6BAwTIdvYAtgU=
96 f768c888aaa68d12dd7f509dcc7f01c9584357d0 0 iQIVAwUAVCxczSBXgaxoKi1yAQJYiA/9HnqKuU7IsGACgsUGt+YaqZQumg077Anj158kihSytmSts6xDxqVY1UQB38dqAKLJrQc7RbN0YK0NVCKZZrx/4OqgWvjiL5qWUJKqQzsDx4LGTUlbPlZNZawW2urmmYW6c9ZZDs1EVnVeZMDrOdntddtnBgtILDwrZ8o3U7FwSlfnm03vTkqUMj9okA3AsI8+lQIlo4qbqjQJYwvUC1ZezRdQwaT1LyoWUgjmhoZ1XWcWKOs9baikaJr6fMv8vZpwmaOY1+pztxYlROeSPVWt9P6yOf0Hi/2eg8AwSZLaX96xfk9IvXUSItg/wjTWP9BhnNs/ulwTnN8QOgSXpYxH4RXwsYOyU7BvwAekA9xi17wuzPrGEliScplxICIZ7jiiwv/VngMvM9AYw2mNBvZt2ZIGrrLaK6pq/zBm5tbviwqt5/8U5aqO8k1O0e4XYm5WmQ1c2AkXRO+xwvFpondlSF2y0flzf2FRXP82QMfsy7vxIP0KmaQ4ex+J8krZgMjNTwXh2M4tdYNtu5AehJQEP3l6giy2srkMDuFLqoe1yECjVlGdgA86ve3J/84I8KGgsufYMhfQnwHHGXCbONcNsDvO0QOee6CIQVcdKCG7dac3M89SC6Ns2CjuC8BIYDRnxbGQb7Fvn4ZcadyJKKbXQJzMgRV25K6BAwTIdvYAtgU=
97 7f8d16af8cae246fa5a48e723d48d58b015aed94 0 iQIVAwUAVEL0XyBXgaxoKi1yAQJLkRAAjZhpUju5nnSYtN9S0/vXS/tjuAtBTUdGwc0mz97VrM6Yhc6BjSCZL59tjeqQaoH7Lqf94pRAtZyIB2Vj/VVMDbM+/eaoSr1JixxppU+a4eqScaj82944u4C5YMSMC22PMvEwqKmy87RinZKJlFwSQ699zZ5g6mnNq8xeAiDlYhoF2QKzUXwnKxzpvjGsYhYGDMmVS1QPmky4WGvuTl6KeGkv8LidKf7r6/2RZeMcq+yjJ7R0RTtyjo1cM5dMcn/jRdwZxuV4cmFweCAeoy5guV+X6du022TpVndjOSDoKiRgdk7pTuaToXIy+9bleHpEo9bwKx58wvOMg7sirAYjrA4Xcx762RHiUuidTTPktm8sNsBQmgwJZ8Pzm+8TyHjFGLnBfeiDbQQEdLCXloz0jVOVRflDfMays1WpAYUV8XNOsgxnD2jDU8L0NLkJiX5Y0OerGq9AZ+XbgJFVBFhaOfsm2PEc3jq00GOLzrGzA+4b3CGpFzM3EyK9OnnwbP7SqCGb7PJgjmQ7IO8IWEmVYGaKtWONSm8zRLcKdH8xuk8iN1qCkBXMty/wfTEVTkIlMVEDbslYkVfj0rAPJ8B37bfe0Yz4CEMkCmARIB1rIOpMhnavXGuD50OP2PBBY/8DyC5aY97z9f04na/ffk+l7rWaHihjHufKIApt5OnfJ1w=
97 7f8d16af8cae246fa5a48e723d48d58b015aed94 0 iQIVAwUAVEL0XyBXgaxoKi1yAQJLkRAAjZhpUju5nnSYtN9S0/vXS/tjuAtBTUdGwc0mz97VrM6Yhc6BjSCZL59tjeqQaoH7Lqf94pRAtZyIB2Vj/VVMDbM+/eaoSr1JixxppU+a4eqScaj82944u4C5YMSMC22PMvEwqKmy87RinZKJlFwSQ699zZ5g6mnNq8xeAiDlYhoF2QKzUXwnKxzpvjGsYhYGDMmVS1QPmky4WGvuTl6KeGkv8LidKf7r6/2RZeMcq+yjJ7R0RTtyjo1cM5dMcn/jRdwZxuV4cmFweCAeoy5guV+X6du022TpVndjOSDoKiRgdk7pTuaToXIy+9bleHpEo9bwKx58wvOMg7sirAYjrA4Xcx762RHiUuidTTPktm8sNsBQmgwJZ8Pzm+8TyHjFGLnBfeiDbQQEdLCXloz0jVOVRflDfMays1WpAYUV8XNOsgxnD2jDU8L0NLkJiX5Y0OerGq9AZ+XbgJFVBFhaOfsm2PEc3jq00GOLzrGzA+4b3CGpFzM3EyK9OnnwbP7SqCGb7PJgjmQ7IO8IWEmVYGaKtWONSm8zRLcKdH8xuk8iN1qCkBXMty/wfTEVTkIlMVEDbslYkVfj0rAPJ8B37bfe0Yz4CEMkCmARIB1rIOpMhnavXGuD50OP2PBBY/8DyC5aY97z9f04na/ffk+l7rWaHihjHufKIApt5OnfJ1w=
98 ced632394371a36953ce4d394f86278ae51a2aae 0 iQIVAwUAVFWpfSBXgaxoKi1yAQLCQw//cvCi/Di3z/2ZEDQt4Ayyxv18gzewqrYyoElgnEzr5uTynD9Mf25hprstKla/Y5C6q+y0K6qCHPimGOkz3H+wZ2GVUgLKAwMABkfSb5IZiLTGaB2DjAJKZRwB6h43wG/DSFggE3dYszWuyHW88c72ZzVF5CSNc4J1ARLjDSgnNYJQ6XdPw3C9KgiLFDXzynPpZbPg0AK5bdPUKJruMeIKPn36Hx/Tv5GXUrbc2/lcnyRDFWisaDl0X/5eLdA+r3ID0cSmyPLYOeCgszRiW++KGw+PPDsWVeM3ZaZ9SgaBWU7MIn9A7yQMnnSzgDbN+9v/VMT3zbk1WJXlQQK8oA+CCdHH9EY33RfZ6ST/lr3pSQbUG1hdK6Sw+H6WMkOnnEk6HtLwa4xZ3HjDpoPkhVV+S0C7D5WWOovbubxuBiW5v8tK4sIOS6bAaKevTBKRbo4Rs6qmS/Ish5Q+z5bKst80cyEdi4QSoPZ/W+6kh1KfOprMxynwPQhtEcDYW2gfLpgPIM7RdXPKukLlkV2qX3eF/tqApGU4KNdP4I3N80Ri0h+6tVU/K4TMYzlRV3ziLBumJ4TnBrTHU3X6AfZUfTgslQzokX8/7a3tbctX6kZuJPggLGisdFSdirHbrUc+y5VKuJtPr+LxxgZKRFbs2VpJRem6FvwGNyndWLv32v0GMtQ=
98 ced632394371a36953ce4d394f86278ae51a2aae 0 iQIVAwUAVFWpfSBXgaxoKi1yAQLCQw//cvCi/Di3z/2ZEDQt4Ayyxv18gzewqrYyoElgnEzr5uTynD9Mf25hprstKla/Y5C6q+y0K6qCHPimGOkz3H+wZ2GVUgLKAwMABkfSb5IZiLTGaB2DjAJKZRwB6h43wG/DSFggE3dYszWuyHW88c72ZzVF5CSNc4J1ARLjDSgnNYJQ6XdPw3C9KgiLFDXzynPpZbPg0AK5bdPUKJruMeIKPn36Hx/Tv5GXUrbc2/lcnyRDFWisaDl0X/5eLdA+r3ID0cSmyPLYOeCgszRiW++KGw+PPDsWVeM3ZaZ9SgaBWU7MIn9A7yQMnnSzgDbN+9v/VMT3zbk1WJXlQQK8oA+CCdHH9EY33RfZ6ST/lr3pSQbUG1hdK6Sw+H6WMkOnnEk6HtLwa4xZ3HjDpoPkhVV+S0C7D5WWOovbubxuBiW5v8tK4sIOS6bAaKevTBKRbo4Rs6qmS/Ish5Q+z5bKst80cyEdi4QSoPZ/W+6kh1KfOprMxynwPQhtEcDYW2gfLpgPIM7RdXPKukLlkV2qX3eF/tqApGU4KNdP4I3N80Ri0h+6tVU/K4TMYzlRV3ziLBumJ4TnBrTHU3X6AfZUfTgslQzokX8/7a3tbctX6kZuJPggLGisdFSdirHbrUc+y5VKuJtPr+LxxgZKRFbs2VpJRem6FvwGNyndWLv32v0GMtQ=
99 643c58303fb0ec020907af28b9e486be299ba043 0 iQIVAwUAVGKawCBXgaxoKi1yAQL7zxAAjpXKNvzm/PKVlTfDjuVOYZ9H8w9QKUZ0vfrNJrN6Eo6hULIostbdRc25FcMWocegTqvKbz3IG+L2TKOIdZJS9M9QS4URybUd37URq4Jai8kMiJY31KixNNnjO2G1B39aIXUhY+EPx12aY31/OVy4laXIVtN6qpSncjo9baXSOMZmx6RyA1dbyfwXRjT/aODCGHZXgLJHS/kHlkCsThVlqYQ4rUCDkXIeMqIGF1CR0KjfmKpp1fS14OMgpLgdnt9+pnBZ+qcf1YdpOeQob1zwunjMYOyYC74FyOTdwaynU2iDsuBrmkE8kgEedIn7+WWe9fp/6TQJMVOeTQPZBNSRRSUYCw5Tg/0L/+jLtzjc2mY4444sDPbR7scrtU+/GtvlR5z0Y5pofwEdFME7PZNOp9a4kMiSa7ZERyGdN7U1pDu9JU6BZRz+nPzW217PVnTF7YFV/GGUzMTk9i7EZb5M4T9r9gfxFSMPeT5ct712CdBfyRlsSbSWk8XclTXwW385kLVYNDtOukWrvEiwxpA14Xb/ZUXbIDZVf5rP2HrZHMkghzeUYPjRn/IlgYUt7sDNmqFZNIc9mRFrZC9uFQ/Nul5InZodNODQDM+nHpxaztt4xl4qKep8SDEPAQjNr8biC6T9MtLKbWbSKDlqYYNv0pb2PuGub3y9rvkF1Y05mgM=
99 643c58303fb0ec020907af28b9e486be299ba043 0 iQIVAwUAVGKawCBXgaxoKi1yAQL7zxAAjpXKNvzm/PKVlTfDjuVOYZ9H8w9QKUZ0vfrNJrN6Eo6hULIostbdRc25FcMWocegTqvKbz3IG+L2TKOIdZJS9M9QS4URybUd37URq4Jai8kMiJY31KixNNnjO2G1B39aIXUhY+EPx12aY31/OVy4laXIVtN6qpSncjo9baXSOMZmx6RyA1dbyfwXRjT/aODCGHZXgLJHS/kHlkCsThVlqYQ4rUCDkXIeMqIGF1CR0KjfmKpp1fS14OMgpLgdnt9+pnBZ+qcf1YdpOeQob1zwunjMYOyYC74FyOTdwaynU2iDsuBrmkE8kgEedIn7+WWe9fp/6TQJMVOeTQPZBNSRRSUYCw5Tg/0L/+jLtzjc2mY4444sDPbR7scrtU+/GtvlR5z0Y5pofwEdFME7PZNOp9a4kMiSa7ZERyGdN7U1pDu9JU6BZRz+nPzW217PVnTF7YFV/GGUzMTk9i7EZb5M4T9r9gfxFSMPeT5ct712CdBfyRlsSbSWk8XclTXwW385kLVYNDtOukWrvEiwxpA14Xb/ZUXbIDZVf5rP2HrZHMkghzeUYPjRn/IlgYUt7sDNmqFZNIc9mRFrZC9uFQ/Nul5InZodNODQDM+nHpxaztt4xl4qKep8SDEPAQjNr8biC6T9MtLKbWbSKDlqYYNv0pb2PuGub3y9rvkF1Y05mgM=
100 902554884335e5ca3661d63be9978eb4aec3f68a 0 iQIVAwUAVH0KMyBXgaxoKi1yAQLUKxAAjgyYpmqD0Ji5OQ3995yX0dmwHOaaSuYpq71VUsOMYBskjH4xE2UgcTrX8RWUf0E+Ya91Nw3veTf+IZlYLaWuOYuJPRzw+zD1sVY8xprwqBOXNaA7n8SsTqZPSh6qgw4S0pUm0xJUOZzUP1l9S7BtIdJP7KwZ7hs9YZev4r9M3G15xOIPn5qJqBAtIeE6f5+ezoyOpSPZFtLFc4qKQ/YWzOT5uuSaYogXgVByXRFaO84+1TD93LR0PyVWxhwU9JrDU5d7P/bUTW1BXdjsxTbBnigWswKHC71EHpgz/HCYxivVL30qNdOm4Fow1Ec2GdUzGunSqTPrq18ScZDYW1x87f3JuqPM+ce/lxRWBBqP1yE30/8l/Us67m6enWXdGER8aL1lYTGOIWAhvJpfzv9KebaUq1gMFLo6j+OfwR3rYPiCHgi20nTNBa+LOceWFjCGzFa3T9UQWHW/MBElfAxK65uecbGRRYY9V1/+wxtTUiS6ixpmzL8S7uUd5n6oMaeeMiD82NLgPIbMyUHQv6eFEcCj0U9NT2uKbFRmclMs5V+8D+RTCsLJ55R9PD5OoRw/6K/coqqPShYmJvgYsFQPzXVpQdCRae31xdfGFmd5KUetqyrT+4GUdJWzSm0giSgovpEJNxXglrvNdvSO7fX3R1oahhwOwtGqMwNilcK+iDw=
100 902554884335e5ca3661d63be9978eb4aec3f68a 0 iQIVAwUAVH0KMyBXgaxoKi1yAQLUKxAAjgyYpmqD0Ji5OQ3995yX0dmwHOaaSuYpq71VUsOMYBskjH4xE2UgcTrX8RWUf0E+Ya91Nw3veTf+IZlYLaWuOYuJPRzw+zD1sVY8xprwqBOXNaA7n8SsTqZPSh6qgw4S0pUm0xJUOZzUP1l9S7BtIdJP7KwZ7hs9YZev4r9M3G15xOIPn5qJqBAtIeE6f5+ezoyOpSPZFtLFc4qKQ/YWzOT5uuSaYogXgVByXRFaO84+1TD93LR0PyVWxhwU9JrDU5d7P/bUTW1BXdjsxTbBnigWswKHC71EHpgz/HCYxivVL30qNdOm4Fow1Ec2GdUzGunSqTPrq18ScZDYW1x87f3JuqPM+ce/lxRWBBqP1yE30/8l/Us67m6enWXdGER8aL1lYTGOIWAhvJpfzv9KebaUq1gMFLo6j+OfwR3rYPiCHgi20nTNBa+LOceWFjCGzFa3T9UQWHW/MBElfAxK65uecbGRRYY9V1/+wxtTUiS6ixpmzL8S7uUd5n6oMaeeMiD82NLgPIbMyUHQv6eFEcCj0U9NT2uKbFRmclMs5V+8D+RTCsLJ55R9PD5OoRw/6K/coqqPShYmJvgYsFQPzXVpQdCRae31xdfGFmd5KUetqyrT+4GUdJWzSm0giSgovpEJNxXglrvNdvSO7fX3R1oahhwOwtGqMwNilcK+iDw=
101 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 0 iQIVAwUAVJNALCBXgaxoKi1yAQKgmw/+OFbHHOMmN2zs2lI2Y0SoMALPNQBInMBq2E6RMCMbfcS9Cn75iD29DnvBwAYNWaWsYEGyheJ7JjGBiuNKPOrLaHkdjG+5ypbhAfNDyHDiteMsXfH7D1L+cTOAB8yvhimZHOTTVF0zb/uRyVIPNowAyervUVRjDptzdfcvjUS+X+/Ufgwms6Y4CcuzFLFCxpmryJhLtOpwUPLlzIqeNkFOYWkHanCgtZX03PNIWhorH3AWOc9yztwWPQ+kcKl3FMlyuNMPhS/ElxSF6GHGtreRbtP+ZLoSIOMb2QBKpGDpZLgJ3JQEHDcZ0h5CLZWL9dDUJR3M8pg1qglqMFSWMgRPTzxPS4QntPgT/Ewd3+U5oCZUh052fG41OeCZ0CnVCpqi5PjUIDhzQkONxRCN2zbjQ2GZY7glbXoqytissihEIVP9m7RmBVq1rbjOKr+yUetJ9gOZcsMtZiCEq4Uj2cbA1x32MQv7rxwAgQP1kgQ62b0sN08HTjQpI7/IkNALLIDHoQWWr45H97i34qK1dd5uCOnYk7juvhGNX5XispxNnC01/CUVNnqChfDHpgnDjgT+1H618LiTgUAD3zo4IVAhCqF5XWsS4pQEENOB3Msffi62fYowvJx7f/htWeRLZ2OA+B85hhDiD4QBdHCRoz3spVp0asNqDxX4f4ndj8RlzfM=
101 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 0 iQIVAwUAVJNALCBXgaxoKi1yAQKgmw/+OFbHHOMmN2zs2lI2Y0SoMALPNQBInMBq2E6RMCMbfcS9Cn75iD29DnvBwAYNWaWsYEGyheJ7JjGBiuNKPOrLaHkdjG+5ypbhAfNDyHDiteMsXfH7D1L+cTOAB8yvhimZHOTTVF0zb/uRyVIPNowAyervUVRjDptzdfcvjUS+X+/Ufgwms6Y4CcuzFLFCxpmryJhLtOpwUPLlzIqeNkFOYWkHanCgtZX03PNIWhorH3AWOc9yztwWPQ+kcKl3FMlyuNMPhS/ElxSF6GHGtreRbtP+ZLoSIOMb2QBKpGDpZLgJ3JQEHDcZ0h5CLZWL9dDUJR3M8pg1qglqMFSWMgRPTzxPS4QntPgT/Ewd3+U5oCZUh052fG41OeCZ0CnVCpqi5PjUIDhzQkONxRCN2zbjQ2GZY7glbXoqytissihEIVP9m7RmBVq1rbjOKr+yUetJ9gOZcsMtZiCEq4Uj2cbA1x32MQv7rxwAgQP1kgQ62b0sN08HTjQpI7/IkNALLIDHoQWWr45H97i34qK1dd5uCOnYk7juvhGNX5XispxNnC01/CUVNnqChfDHpgnDjgT+1H618LiTgUAD3zo4IVAhCqF5XWsS4pQEENOB3Msffi62fYowvJx7f/htWeRLZ2OA+B85hhDiD4QBdHCRoz3spVp0asNqDxX4f4ndj8RlzfM=
102 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 0 iQIVAwUAVKXKYCBXgaxoKi1yAQIfsA/+PFfaWuZ6Jna12Y3MpKMnBCXYLWEJgMNlWHWzwU8lD26SKSlvMyHQsVZlkld2JmFugUCn1OV3OA4YWT6BA7VALq6Zsdcu5Dc8LRbyajBUkzGRpOUyWuFzjkCpGVbrQzbCR/bel/BBXzSqL4ipdtWgJ4y+WpZIhWkNXclBkR52b5hUTjN9vzhyhVVI7eURGwIEf7vVs1fDOcEGtaGY/ynzMTzyxIDsEEygCZau86wpKlYlqhCgxKDyzyGfpH3B1UlNGFt1afW8AWe1eHjdqC7TJZpMqmQ/Ju8vco8Xht6OXw4ZLHj7y39lpccfKTBLiK/cAKSg+xgyaH/BLhzoEkNAwYSFAB4i4IoV0KUC8nFxHfsoswBxJnMqU751ziMrpZ/XHZ1xQoEOdXgz2I04vlRn8xtynOVhcgjoAXwtbia7oNh/qCH/hl5/CdAtaawuCxJBf237F+cwur4PMAAvsGefRfZco/DInpr3qegr8rwInTxlO48ZG+o5xA4TPwT0QQTUjMdNfC146ZSbp65wG7VxJDocMZ8KJN/lqPaOvX+FVYWq4YnJhlldiV9DGgmym1AAaP0D3te2GcfHXpt/f6NYUPpgiBHy0GnOlNcQyGnnONg1A6oKVWB3k7WP28+PQbQEiCIFk2nkf5VZmye7OdHRGKOFfuprYFP1WwTWnVoNX9c=
102 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 0 iQIVAwUAVKXKYCBXgaxoKi1yAQIfsA/+PFfaWuZ6Jna12Y3MpKMnBCXYLWEJgMNlWHWzwU8lD26SKSlvMyHQsVZlkld2JmFugUCn1OV3OA4YWT6BA7VALq6Zsdcu5Dc8LRbyajBUkzGRpOUyWuFzjkCpGVbrQzbCR/bel/BBXzSqL4ipdtWgJ4y+WpZIhWkNXclBkR52b5hUTjN9vzhyhVVI7eURGwIEf7vVs1fDOcEGtaGY/ynzMTzyxIDsEEygCZau86wpKlYlqhCgxKDyzyGfpH3B1UlNGFt1afW8AWe1eHjdqC7TJZpMqmQ/Ju8vco8Xht6OXw4ZLHj7y39lpccfKTBLiK/cAKSg+xgyaH/BLhzoEkNAwYSFAB4i4IoV0KUC8nFxHfsoswBxJnMqU751ziMrpZ/XHZ1xQoEOdXgz2I04vlRn8xtynOVhcgjoAXwtbia7oNh/qCH/hl5/CdAtaawuCxJBf237F+cwur4PMAAvsGefRfZco/DInpr3qegr8rwInTxlO48ZG+o5xA4TPwT0QQTUjMdNfC146ZSbp65wG7VxJDocMZ8KJN/lqPaOvX+FVYWq4YnJhlldiV9DGgmym1AAaP0D3te2GcfHXpt/f6NYUPpgiBHy0GnOlNcQyGnnONg1A6oKVWB3k7WP28+PQbQEiCIFk2nkf5VZmye7OdHRGKOFfuprYFP1WwTWnVoNX9c=
103 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 0 iQIVAwUAVLsaciBXgaxoKi1yAQKMIA//a90/GvySL9UID+iYvzV2oDaAPDD0T+4Xs43I7DT5NIoDz+3yq2VV54XevQe5lYiURmsb/Q9nX2VR/Qq1J9c/R6Gy+CIfmJ3HzMZ0aAX8ZlZgQPYZKh/2kY5Ojl++k6MTqbqcrICNs4+UE/4IAxPyOfu5gy7TpdJmRZo2J3lWVC2Jbhd02Mzb+tjtfbOM+QcQxPwt9PpqmQszJceyVYOSm3jvD1uJdSOC04tBQrQwrxktQ09Om0LUMMaB5zFXpJtqUzfw7l4U4AaddEmkd3vUfLtHxc21RB01c3cpe2dJnjifDfwseLsI8rS4jmi/91c74TeBatSOhvbqzEkm/p8xZFXE4Uh+EpWjTsVqmfQaRq6NfNCR7I/kvGv8Ps6w8mg8uX8fd8lx+GJbodj+Uy0X3oqHyqPMky/df5i79zADBDuz+yuxFfDD9i22DJPIYcilfGgwpIUuO2lER5nSMVmReuWTVBnT6SEN66Q4KR8zLtIRr+t1qUUCy6wYbgwrdHVCbgMF8RPOVZPjbs17RIqcHjch0Xc7bShKGhQg4WHDjXHK61w4tOa1Yp7jT6COkl01XC9BLcGxJYKFvNCbeDZQGvVgJNoEvHxBxD9rGMVRjfuxeJawc2fGzZJn0ySyLDW0pfd4EJNgTh9bLdPjWz2VlXqn4A6bgaLgTPqjmN0VBXw=
103 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 0 iQIVAwUAVLsaciBXgaxoKi1yAQKMIA//a90/GvySL9UID+iYvzV2oDaAPDD0T+4Xs43I7DT5NIoDz+3yq2VV54XevQe5lYiURmsb/Q9nX2VR/Qq1J9c/R6Gy+CIfmJ3HzMZ0aAX8ZlZgQPYZKh/2kY5Ojl++k6MTqbqcrICNs4+UE/4IAxPyOfu5gy7TpdJmRZo2J3lWVC2Jbhd02Mzb+tjtfbOM+QcQxPwt9PpqmQszJceyVYOSm3jvD1uJdSOC04tBQrQwrxktQ09Om0LUMMaB5zFXpJtqUzfw7l4U4AaddEmkd3vUfLtHxc21RB01c3cpe2dJnjifDfwseLsI8rS4jmi/91c74TeBatSOhvbqzEkm/p8xZFXE4Uh+EpWjTsVqmfQaRq6NfNCR7I/kvGv8Ps6w8mg8uX8fd8lx+GJbodj+Uy0X3oqHyqPMky/df5i79zADBDuz+yuxFfDD9i22DJPIYcilfGgwpIUuO2lER5nSMVmReuWTVBnT6SEN66Q4KR8zLtIRr+t1qUUCy6wYbgwrdHVCbgMF8RPOVZPjbs17RIqcHjch0Xc7bShKGhQg4WHDjXHK61w4tOa1Yp7jT6COkl01XC9BLcGxJYKFvNCbeDZQGvVgJNoEvHxBxD9rGMVRjfuxeJawc2fGzZJn0ySyLDW0pfd4EJNgTh9bLdPjWz2VlXqn4A6bgaLgTPqjmN0VBXw=
104 fbdd5195528fae4f41feebc1838215c110b25d6a 0 iQIVAwUAVM7fBCBXgaxoKi1yAQKoYw/+LeIGcjQmHIVFQULsiBtPDf+eGAADQoP3mKBy+eX/3Fa0qqUNfES2Q3Y6RRApyZ1maPRMt8BvvhZMgQsu9QIrmf3zsFxZGFwoyrIj4hM3xvAbEZXqmWiR85/Ywd4ImeLaZ0c7mkO1/HGF1n2Mv47bfM4hhNe7VGJSSrTY4srFHDfk4IG9f18DukJVzRD9/dZeBw6eUN1ukuLEgQAD5Sl47bUdKSetglOSR1PjXfZ1hjtz5ywUyBc5P9p3LC4wSvlcJKl22zEvB3L0hkoDcPsdIPEnJAeXxKlR1rQpoA3fEgrstGiSNUW/9Tj0VekAHLO95SExmQyoG/AhbjRRzIj4uQ0aevCJyiAhkv+ffOSf99PMW9L1k3tVjLhpMWEz9BOAWyX7cDFWj5t/iktI046O9HGN9SGVx18e9xM6pEgRcLA2TyjEmtkA4jX0JeN7WeCweMLiSxyGP7pSPSJdpJeXaFtRpSF62p/G0Z5wN9s05LHqDyqNVtCvg4WjkuV5LZSdLbMcYBWGBxQzCG6qowXFXIawmbaFiBZwTfOgNls9ndz5RGupAaxY317prxPFv/pXoesc1P8bdK09ZvjhbmmD66Q/BmS2dOMQ8rXRjuVdlR8j2QBtFZxekMcRD02nBAVnwHg1VWQMIRaGjdgmW4wOkirWVn7me177FnBxrxW1tG4=
104 fbdd5195528fae4f41feebc1838215c110b25d6a 0 iQIVAwUAVM7fBCBXgaxoKi1yAQKoYw/+LeIGcjQmHIVFQULsiBtPDf+eGAADQoP3mKBy+eX/3Fa0qqUNfES2Q3Y6RRApyZ1maPRMt8BvvhZMgQsu9QIrmf3zsFxZGFwoyrIj4hM3xvAbEZXqmWiR85/Ywd4ImeLaZ0c7mkO1/HGF1n2Mv47bfM4hhNe7VGJSSrTY4srFHDfk4IG9f18DukJVzRD9/dZeBw6eUN1ukuLEgQAD5Sl47bUdKSetglOSR1PjXfZ1hjtz5ywUyBc5P9p3LC4wSvlcJKl22zEvB3L0hkoDcPsdIPEnJAeXxKlR1rQpoA3fEgrstGiSNUW/9Tj0VekAHLO95SExmQyoG/AhbjRRzIj4uQ0aevCJyiAhkv+ffOSf99PMW9L1k3tVjLhpMWEz9BOAWyX7cDFWj5t/iktI046O9HGN9SGVx18e9xM6pEgRcLA2TyjEmtkA4jX0JeN7WeCweMLiSxyGP7pSPSJdpJeXaFtRpSF62p/G0Z5wN9s05LHqDyqNVtCvg4WjkuV5LZSdLbMcYBWGBxQzCG6qowXFXIawmbaFiBZwTfOgNls9ndz5RGupAaxY317prxPFv/pXoesc1P8bdK09ZvjhbmmD66Q/BmS2dOMQ8rXRjuVdlR8j2QBtFZxekMcRD02nBAVnwHg1VWQMIRaGjdgmW4wOkirWVn7me177FnBxrxW1tG4=
105 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 0 iQIVAwUAVPQL9CBXgaxoKi1yAQJIXxAAtD2hWhaKa+lABmCOYG92FE/WdqY/91Xv5atTL8Xeko/MkirIKZiOuxNWX+J34TVevINZSWmMfDSc5TkGxktL9jW/pDB/CXn+CVZpxRabPYFH9HM2K3g8VaTV1MFtV2+feOMDIPCmq5ogMF9/kXjmifiEBrJcFsE82fdexJ3OHoOY4iHFxEhh3GzvNqEQygk4VeU6VYziNvSQj9G//PsK3Bmk7zm5ScsZcMVML3SIYFuej1b1PI1v0N8mmCRooVNBGhD/eA0iLtdh/hSb9s/8UgJ4f9HOcx9zqs8V4i14lpd/fo0+yvFuVrVbWGzrDrk5EKLENhVPwvc1KA32PTQ4Z9u7VQIBIxq3K5lL2VlCMIYc1BSaSQBjuiLm8VdN6iDuf5poNZhk1rvtpQgpxJzh362dlGtR/iTJuLCeW7gCqWUAorLTeHy0bLQ/jSOeTAGys8bUHtlRL4QbnhLbUmJmRYVvCJ+Yt1aTgTSNcoFjoLJarR1169BXgdCA38BgReUL6kB224UJSTzB1hJUyB2LvCWrXZMipZmR99Iwdq7MePD3+AoSIXQNUMY9blxuuF5x7W2ikNXmVWuab4Z8rQRtmGqEuIMBSunxAnZSn+i8057dFKlq+/yGy+WW3RQg+RnLnwZs1zCDTfu98/GT5k5hFpjXZeUWWiOVwQJ5HrqncCw=
105 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 0 iQIVAwUAVPQL9CBXgaxoKi1yAQJIXxAAtD2hWhaKa+lABmCOYG92FE/WdqY/91Xv5atTL8Xeko/MkirIKZiOuxNWX+J34TVevINZSWmMfDSc5TkGxktL9jW/pDB/CXn+CVZpxRabPYFH9HM2K3g8VaTV1MFtV2+feOMDIPCmq5ogMF9/kXjmifiEBrJcFsE82fdexJ3OHoOY4iHFxEhh3GzvNqEQygk4VeU6VYziNvSQj9G//PsK3Bmk7zm5ScsZcMVML3SIYFuej1b1PI1v0N8mmCRooVNBGhD/eA0iLtdh/hSb9s/8UgJ4f9HOcx9zqs8V4i14lpd/fo0+yvFuVrVbWGzrDrk5EKLENhVPwvc1KA32PTQ4Z9u7VQIBIxq3K5lL2VlCMIYc1BSaSQBjuiLm8VdN6iDuf5poNZhk1rvtpQgpxJzh362dlGtR/iTJuLCeW7gCqWUAorLTeHy0bLQ/jSOeTAGys8bUHtlRL4QbnhLbUmJmRYVvCJ+Yt1aTgTSNcoFjoLJarR1169BXgdCA38BgReUL6kB224UJSTzB1hJUyB2LvCWrXZMipZmR99Iwdq7MePD3+AoSIXQNUMY9blxuuF5x7W2ikNXmVWuab4Z8rQRtmGqEuIMBSunxAnZSn+i8057dFKlq+/yGy+WW3RQg+RnLnwZs1zCDTfu98/GT5k5hFpjXZeUWWiOVwQJ5HrqncCw=
106 07a92bbd02e5e3a625e0820389b47786b02b2cea 0 iQIVAwUAVPSP9SBXgaxoKi1yAQLkBQ//dRQExJHFepJfZ0gvGnUoYI4APsLmne5XtfeXJ8OtUyC4a6RylxA5BavDWgXwUh9BGhOX2cBSz1fyvzohrPrvNnlBrYKAvOIJGEAiBTXHYTxHINEKPtDF92Uz23T0Rn/wnSvvlbWF7Pvd+0DMJpFDEyr9n6jvVLR7mgxMaCqZbVaB1W/wTwDjni780WgVx8OPUXkLx3/DyarMcIiPeI5UN+FeHDovTsBWFC95msFLm80PMRPuHOejWp65yyEemGujZEPO2D5VVah7fshM2HTz63+bkEBYoqrftuv3vXKBRG78MIrUrKpqxmnCKNKDUUWJ4yk3+NwuOiHlKdly5kZ7MNFaL73XKo8HH287lDWz0lIazs91dQA9a9JOyTsp8YqGtIJGGCbhrUDtiQJ199oBU84mw3VH/EEzm4mPv4sW5fm7BnnoH/a+9vXySc+498rkdLlzFwxrQkWyJ/pFOx4UA3mCtGQK+OSwLPc+X4SRqA4fiyqKxVAL1kpLTSDL3QA82I7GzBaXsxUXzS4nmteMhUyzTdwAhKVydL0gC3d7NmkAFSyRjdGzutUUXshYxg0ywRgYebe8uzJcTj4nNRgaalYLdg3guuDulD+dJmILsrcLmA6KD/pvfDn8PYt+4ZjNIvN2E9GF6uXDu4Ux+AlOTLk9BChxUF8uBX9ev5cvWtQ=
106 07a92bbd02e5e3a625e0820389b47786b02b2cea 0 iQIVAwUAVPSP9SBXgaxoKi1yAQLkBQ//dRQExJHFepJfZ0gvGnUoYI4APsLmne5XtfeXJ8OtUyC4a6RylxA5BavDWgXwUh9BGhOX2cBSz1fyvzohrPrvNnlBrYKAvOIJGEAiBTXHYTxHINEKPtDF92Uz23T0Rn/wnSvvlbWF7Pvd+0DMJpFDEyr9n6jvVLR7mgxMaCqZbVaB1W/wTwDjni780WgVx8OPUXkLx3/DyarMcIiPeI5UN+FeHDovTsBWFC95msFLm80PMRPuHOejWp65yyEemGujZEPO2D5VVah7fshM2HTz63+bkEBYoqrftuv3vXKBRG78MIrUrKpqxmnCKNKDUUWJ4yk3+NwuOiHlKdly5kZ7MNFaL73XKo8HH287lDWz0lIazs91dQA9a9JOyTsp8YqGtIJGGCbhrUDtiQJ199oBU84mw3VH/EEzm4mPv4sW5fm7BnnoH/a+9vXySc+498rkdLlzFwxrQkWyJ/pFOx4UA3mCtGQK+OSwLPc+X4SRqA4fiyqKxVAL1kpLTSDL3QA82I7GzBaXsxUXzS4nmteMhUyzTdwAhKVydL0gC3d7NmkAFSyRjdGzutUUXshYxg0ywRgYebe8uzJcTj4nNRgaalYLdg3guuDulD+dJmILsrcLmA6KD/pvfDn8PYt+4ZjNIvN2E9GF6uXDu4Ux+AlOTLk9BChxUF8uBX9ev5cvWtQ=
107 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 0 iQIVAwUAVRw4nyBXgaxoKi1yAQIFExAAkbCPtLjQlJvPaYCL1KhNR+ZVAmn7JrFH3XhvR26RayYbs4NxR3W1BhwhDy9+W+28szEx1kQvmr6t1bXAFywY0tNJOeuLU7uFfmbgAfYgkQ9kpsQNqFYkjbCyftw0S9vX9VOJ9DqUoDWuKfX7VzjkwE9dCfKI5F+dvzxnd6ZFjB85nyHBQuTZlzXl0+csY212RJ2G2j/mzEBVyeZj9l7Rm+1X8AC1xQMWRJGiyd0b7nhYqoOcceeJFAV1t9QO4+gjmkM5kL0orjxTnuVsxPTxcC5ca1BfidPWrZEto3duHWNiATGnCDylxxr52BxCAS+BWePW9J0PROtw1pYaZ9pF4N5X5LSXJzqX7ZiNGckxqIjry09+Tbsa8FS0VkkYBEiGotpuo4Jd05V6qpXfW2JqAfEVo6X6aGvPM2B7ZUtKi30I4J+WprrOP3WgZ/ZWHe1ERYKgjDqisn3t/D40q30WQUeQGltGsOX0Udqma2RjBugO5BHGzJ2yer4GdJXg7q1OMzrjAEuz1IoKvIB/o1pg86quVA4H2gQnL1B8t1M38/DIafyw7mrEY4Z3GL44Reev63XVvDE099Vbhqp7ufwq81Fpq7Xxa5vsr9SJ+8IqqQr8AcYSuK3G3L6BmIuSUAYMRqgl35FWoWkGyZIG5c6K6zI8w5Pb0aGi6Lb2Wfb9zbc=
107 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 0 iQIVAwUAVRw4nyBXgaxoKi1yAQIFExAAkbCPtLjQlJvPaYCL1KhNR+ZVAmn7JrFH3XhvR26RayYbs4NxR3W1BhwhDy9+W+28szEx1kQvmr6t1bXAFywY0tNJOeuLU7uFfmbgAfYgkQ9kpsQNqFYkjbCyftw0S9vX9VOJ9DqUoDWuKfX7VzjkwE9dCfKI5F+dvzxnd6ZFjB85nyHBQuTZlzXl0+csY212RJ2G2j/mzEBVyeZj9l7Rm+1X8AC1xQMWRJGiyd0b7nhYqoOcceeJFAV1t9QO4+gjmkM5kL0orjxTnuVsxPTxcC5ca1BfidPWrZEto3duHWNiATGnCDylxxr52BxCAS+BWePW9J0PROtw1pYaZ9pF4N5X5LSXJzqX7ZiNGckxqIjry09+Tbsa8FS0VkkYBEiGotpuo4Jd05V6qpXfW2JqAfEVo6X6aGvPM2B7ZUtKi30I4J+WprrOP3WgZ/ZWHe1ERYKgjDqisn3t/D40q30WQUeQGltGsOX0Udqma2RjBugO5BHGzJ2yer4GdJXg7q1OMzrjAEuz1IoKvIB/o1pg86quVA4H2gQnL1B8t1M38/DIafyw7mrEY4Z3GL44Reev63XVvDE099Vbhqp7ufwq81Fpq7Xxa5vsr9SJ+8IqqQr8AcYSuK3G3L6BmIuSUAYMRqgl35FWoWkGyZIG5c6K6zI8w5Pb0aGi6Lb2Wfb9zbc=
108 e89f909edffad558b56f4affa8239e4832f88de0 0 iQIVAwUAVTBozCBXgaxoKi1yAQLHeg/+IvfpPmG7OSqCoHvMVETYdrqT7lKCwfCQWMFOC/2faWs1n4R/qQNm6ckE5OY888RK8tVQ7ue03Pg/iyWgQlYfS7Njd3WPjS4JsnEBxIvuGkIu6TPIXAUAH0PFTBh0cZEICDpPEVT2X3bPRwDHA+hUE9RrxM5zJ39Fpk/pTYCjQ9UKfEhXlEfka75YB39g2Y/ssaSbn5w/tAAx8sL72Y4G96D4IV2seLHZhB3VQ7UZKThEWn6UdVOoKj+urIwGaBYMeekGVtHSh6fnHOw3EtDO9mQ5HtAz2Bl4CwRYN8eSN+Dwgr+mdk8MWpQQJ+i1A8jUhUp8gn1Pe5GkIH4CWZ9+AvLLnshe2MkVaTT1g7EQk37tFkkdZDRBsOHIvpF71B9pEA1gMUlX4gKgh5YwukgpQlDmFCfY7XmX6eXw9Ub+EckEwYuGMz7Fbwe9J/Ce4DxvgJgq3/cu/jb3bmbewH6tZmcrlqziqqA8GySIwcURnF1c37e7+e7x1jhFJfCWpHzvCusjKhUp9tZsl9Rt1Bo/y41QY+avY7//ymhbwTMKgqjzCYoA+ipF4JfZlFiZF+JhvOSIFb0ltkfdqKD+qOjlkFaglvQU1bpGKLJ6cz4Xk2Jqt5zhcrpyDMGVv9aiWywCK2ZP34RNaJ6ZFwzwdpXihqgkm5dBGoZ4ztFUfmjXzIg=
108 e89f909edffad558b56f4affa8239e4832f88de0 0 iQIVAwUAVTBozCBXgaxoKi1yAQLHeg/+IvfpPmG7OSqCoHvMVETYdrqT7lKCwfCQWMFOC/2faWs1n4R/qQNm6ckE5OY888RK8tVQ7ue03Pg/iyWgQlYfS7Njd3WPjS4JsnEBxIvuGkIu6TPIXAUAH0PFTBh0cZEICDpPEVT2X3bPRwDHA+hUE9RrxM5zJ39Fpk/pTYCjQ9UKfEhXlEfka75YB39g2Y/ssaSbn5w/tAAx8sL72Y4G96D4IV2seLHZhB3VQ7UZKThEWn6UdVOoKj+urIwGaBYMeekGVtHSh6fnHOw3EtDO9mQ5HtAz2Bl4CwRYN8eSN+Dwgr+mdk8MWpQQJ+i1A8jUhUp8gn1Pe5GkIH4CWZ9+AvLLnshe2MkVaTT1g7EQk37tFkkdZDRBsOHIvpF71B9pEA1gMUlX4gKgh5YwukgpQlDmFCfY7XmX6eXw9Ub+EckEwYuGMz7Fbwe9J/Ce4DxvgJgq3/cu/jb3bmbewH6tZmcrlqziqqA8GySIwcURnF1c37e7+e7x1jhFJfCWpHzvCusjKhUp9tZsl9Rt1Bo/y41QY+avY7//ymhbwTMKgqjzCYoA+ipF4JfZlFiZF+JhvOSIFb0ltkfdqKD+qOjlkFaglvQU1bpGKLJ6cz4Xk2Jqt5zhcrpyDMGVv9aiWywCK2ZP34RNaJ6ZFwzwdpXihqgkm5dBGoZ4ztFUfmjXzIg=
109 8cc6036bca532e06681c5a8fa37efaa812de67b5 0 iQIVAwUAVUP0xCBXgaxoKi1yAQLIChAAme3kg1Z0V8t5PnWKDoIvscIeAsD2s6EhMy1SofmdZ4wvYD1VmGC6TgXMCY7ssvRBhxqwG3GxwYpwELASuw2GYfVot2scN7+b8Hs5jHtkQevKbxarYni+ZI9mw/KldnJixD1yW3j+LoJFh/Fu6GD2yrfGIhimFLozcwUu3EbLk7JzyHSn7/8NFjLJz0foAYfcbowU9/BFwNVLrQPnsUbWcEifsq5bYso9MBO9k+25yLgqHoqMbGpJcgjubNy1cWoKnlKS+lOJl0/waAk+aIjHXMzFpRRuJDjxEZn7V4VdV5d23nrBTcit1BfMzga5df7VrLPVRbom1Bi0kQ0BDeDex3hHNqHS5X+HSrd/njzP1xp8twG8hTE+njv85PWoGBTo1eUGW/esChIJKA5f3/F4B9ErgBNNOKnYmRgxixd562OWAwAQZK0r0roe2H/Mfg2VvgxT0kHd22NQLoAv0YI4jcXcCFrnV/80vHUQ8AsAYAbkLcz1jkfk3YwYDP8jbJCqcwJRt9ialYKJwvXlEe0TMeGdq7EjCO0z/pIpu82k2R/C0FtCFih3bUvJEmWoVVx8UGkDDQEORLbzxQCt0IOiQGFcoCCxgQmL0x9ZoljCWg5vZuuhU4uSOuRTuM+aa4xoLkeOcvgGRSOXrqfkV8JpWKoJB4dmY2qSuxw8LsAAzK0=
109 8cc6036bca532e06681c5a8fa37efaa812de67b5 0 iQIVAwUAVUP0xCBXgaxoKi1yAQLIChAAme3kg1Z0V8t5PnWKDoIvscIeAsD2s6EhMy1SofmdZ4wvYD1VmGC6TgXMCY7ssvRBhxqwG3GxwYpwELASuw2GYfVot2scN7+b8Hs5jHtkQevKbxarYni+ZI9mw/KldnJixD1yW3j+LoJFh/Fu6GD2yrfGIhimFLozcwUu3EbLk7JzyHSn7/8NFjLJz0foAYfcbowU9/BFwNVLrQPnsUbWcEifsq5bYso9MBO9k+25yLgqHoqMbGpJcgjubNy1cWoKnlKS+lOJl0/waAk+aIjHXMzFpRRuJDjxEZn7V4VdV5d23nrBTcit1BfMzga5df7VrLPVRbom1Bi0kQ0BDeDex3hHNqHS5X+HSrd/njzP1xp8twG8hTE+njv85PWoGBTo1eUGW/esChIJKA5f3/F4B9ErgBNNOKnYmRgxixd562OWAwAQZK0r0roe2H/Mfg2VvgxT0kHd22NQLoAv0YI4jcXcCFrnV/80vHUQ8AsAYAbkLcz1jkfk3YwYDP8jbJCqcwJRt9ialYKJwvXlEe0TMeGdq7EjCO0z/pIpu82k2R/C0FtCFih3bUvJEmWoVVx8UGkDDQEORLbzxQCt0IOiQGFcoCCxgQmL0x9ZoljCWg5vZuuhU4uSOuRTuM+aa4xoLkeOcvgGRSOXrqfkV8JpWKoJB4dmY2qSuxw8LsAAzK0=
110 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 0 iQIVAwUAVWy9mCBXgaxoKi1yAQIm+Q/+I/tV8DC51d4f/6T5OR+motlIx9U5za5p9XUUzfp3tzSY2PutVko/FclajVdFekZsK5pUzlh/GZhfe1jjyEEIr3UC3yWk8hMcvvS+2UDmfy81QxN7Uf0kz4mZOlME6d/fYDzf4cDKkkCXoec3kyZBw7L84mteUcrJoyb5K3fkQBrK5CG/CV7+uZN6b9+quKjtDhDEkAyc6phNanzWNgiHGucEbNgXsKM01HmV1TnN4GXTKx8y2UDalIJOPyes2OWHggibMHbaNnGnwSBAK+k29yaQ5FD0rsA+q0j3TijA1NfqvtluNEPbFOx/wJV4CxonYad93gWyEdgU34LRqqw1bx7PFUvew2/T3TJsxQLoCt67OElE7ScG8evuNEe8/4r3LDnzYFx7QMP5r5+B7PxVpj/DT+buS16BhYS8pXMMqLynFOQkX5uhEM7mNC0JTXQsBMHSDAcizVDrdFCF2OSfQjLpUfFP1VEWX7EInqj7hZrd+GE7TfBD8/rwSBSkkCX2aa9uKyt6Ius1GgQUuEETskAUvvpsNBzZxtvGpMMhqQLGlJYnBbhOmsbOyTSnXU66KJ5e/H3O0KRrF09i74v30DaY4uIH8xG6KpSkfw5s/oiLCtagfc0goUvvojk9pACDR3CKM/jVC63EVp2oUcjT72jUgSLxBgi7siLD8IW86wc=
110 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 0 iQIVAwUAVWy9mCBXgaxoKi1yAQIm+Q/+I/tV8DC51d4f/6T5OR+motlIx9U5za5p9XUUzfp3tzSY2PutVko/FclajVdFekZsK5pUzlh/GZhfe1jjyEEIr3UC3yWk8hMcvvS+2UDmfy81QxN7Uf0kz4mZOlME6d/fYDzf4cDKkkCXoec3kyZBw7L84mteUcrJoyb5K3fkQBrK5CG/CV7+uZN6b9+quKjtDhDEkAyc6phNanzWNgiHGucEbNgXsKM01HmV1TnN4GXTKx8y2UDalIJOPyes2OWHggibMHbaNnGnwSBAK+k29yaQ5FD0rsA+q0j3TijA1NfqvtluNEPbFOx/wJV4CxonYad93gWyEdgU34LRqqw1bx7PFUvew2/T3TJsxQLoCt67OElE7ScG8evuNEe8/4r3LDnzYFx7QMP5r5+B7PxVpj/DT+buS16BhYS8pXMMqLynFOQkX5uhEM7mNC0JTXQsBMHSDAcizVDrdFCF2OSfQjLpUfFP1VEWX7EInqj7hZrd+GE7TfBD8/rwSBSkkCX2aa9uKyt6Ius1GgQUuEETskAUvvpsNBzZxtvGpMMhqQLGlJYnBbhOmsbOyTSnXU66KJ5e/H3O0KRrF09i74v30DaY4uIH8xG6KpSkfw5s/oiLCtagfc0goUvvojk9pACDR3CKM/jVC63EVp2oUcjT72jUgSLxBgi7siLD8IW86wc=
111 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 0 iQIVAwUAVZRtzSBXgaxoKi1yAQJVLhAAtfn+8OzHIp6wRC4NUbkImAJRLsNTRPKeRSWPCF5O5XXQ84hp+86qjhndIE6mcJSAt4cVP8uky6sEa8ULd6b3ACRBvtgZtsecA9S/KtRjyE9CKr8nP+ogBNqJPaYlTz9RuwGedOd+8I9lYgsnRjfaHSByNMX08WEHtWqAWhSkAz/HO32ardS38cN97fckCgQtA8v7c77nBT7vcw4epgxyUQvMUxUhqmCVVhVfz8JXa5hyJxFrOtqgaVuQ1B5Y/EKxcyZT+JNHPtu3V1uc1awS/w16CEPstNBSFHax5MuT9UbY0mV2ZITP99EkM+vdomh82VHdnMo0i7Pz7XF45ychD4cteroO9gGqDDt9j7hd1rubBX1bfkPsd/APJlyeshusyTj+FqsUD/HDlvM9LRjY1HpU7i7yAlLQQ3851XKMLUPNFYu2r3bo8Wt/CCHtJvB4wYuH+7Wo3muudpU01ziJBxQrUWwPbUrG+7LvO1iEEVxB8l+8Vq0mU3Te7lJi1kGetm6xHNbtvQip5P2YUqvv+lLo/K8KoJDxsh63Y01JGwdmUDb8mnFlRx4J7hQJaoNEvz3cgnc4X8gDJD8sUOjGOPnbtz2QwTY+zj/5+FdLxWDCxNrHX5vvkVdJHcCqEfVvQTKfDMOUeKuhjI7GD7t3xRPfUxq19jjoLPe7aqn1Z1s=
111 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 0 iQIVAwUAVZRtzSBXgaxoKi1yAQJVLhAAtfn+8OzHIp6wRC4NUbkImAJRLsNTRPKeRSWPCF5O5XXQ84hp+86qjhndIE6mcJSAt4cVP8uky6sEa8ULd6b3ACRBvtgZtsecA9S/KtRjyE9CKr8nP+ogBNqJPaYlTz9RuwGedOd+8I9lYgsnRjfaHSByNMX08WEHtWqAWhSkAz/HO32ardS38cN97fckCgQtA8v7c77nBT7vcw4epgxyUQvMUxUhqmCVVhVfz8JXa5hyJxFrOtqgaVuQ1B5Y/EKxcyZT+JNHPtu3V1uc1awS/w16CEPstNBSFHax5MuT9UbY0mV2ZITP99EkM+vdomh82VHdnMo0i7Pz7XF45ychD4cteroO9gGqDDt9j7hd1rubBX1bfkPsd/APJlyeshusyTj+FqsUD/HDlvM9LRjY1HpU7i7yAlLQQ3851XKMLUPNFYu2r3bo8Wt/CCHtJvB4wYuH+7Wo3muudpU01ziJBxQrUWwPbUrG+7LvO1iEEVxB8l+8Vq0mU3Te7lJi1kGetm6xHNbtvQip5P2YUqvv+lLo/K8KoJDxsh63Y01JGwdmUDb8mnFlRx4J7hQJaoNEvz3cgnc4X8gDJD8sUOjGOPnbtz2QwTY+zj/5+FdLxWDCxNrHX5vvkVdJHcCqEfVvQTKfDMOUeKuhjI7GD7t3xRPfUxq19jjoLPe7aqn1Z1s=
112 96a38d44ba093bd1d1ecfd34119e94056030278b 0 iQIVAwUAVarUUyBXgaxoKi1yAQIfJw/+MG/0736F/9IvzgCTF6omIC+9kS8JH0n/JBGPhpbPAHK4xxjhOOz6m3Ia3c3HNoy+I6calwU6YV7k5dUzlyLhM0Z5oYpdrH+OBNxDEsD5SfhclfR63MK1kmgtD33izijsZ++6a+ZaVfyxpMTksKOktWSIDD63a5b/avb6nKY64KwJcbbeXPdelxvXV7TXYm0GvWc46BgvrHOJpYHCDaXorAn6BMq7EQF8sxdNK4GVMNMVk1njve0HOg3Kz8llPB/7QmddZXYLFGmWqICyUn1IsJDfePxzh8sOYVCbxAgitTJHJJmmH5gzVzw7t7ljtmxSJpcUGQJB2MphejmNFGfgvJPB9c6xOCfUqDjxN5m24V+UYesZntpfgs3lpfvE7785IpVnf6WfKG4PKty01ome/joHlDlrRTekKMlpiBapGMfv8EHvPBrOA+5yAHNfKsmcyCcjD1nvXYZ2/X9qY35AhdcBuNkyp55oPDOdtYIHfnOIxlYMKG1dusDx3Z4eveF0lQTzfRVoE5w+k9A2Ov3Zx0aiSkFFevJjrq5QBfs9dAiT8JYgBmWhaJzCtJm12lQirRMKR/br88Vwt/ry/UVY9cereMNvRYUGOGfC8CGGDCw4WDD+qWvyB3mmrXVuMlXxQRIZRJy5KazaQXsBWuIsx4kgGqC5Uo+yzpiQ1VMuCyI=
112 96a38d44ba093bd1d1ecfd34119e94056030278b 0 iQIVAwUAVarUUyBXgaxoKi1yAQIfJw/+MG/0736F/9IvzgCTF6omIC+9kS8JH0n/JBGPhpbPAHK4xxjhOOz6m3Ia3c3HNoy+I6calwU6YV7k5dUzlyLhM0Z5oYpdrH+OBNxDEsD5SfhclfR63MK1kmgtD33izijsZ++6a+ZaVfyxpMTksKOktWSIDD63a5b/avb6nKY64KwJcbbeXPdelxvXV7TXYm0GvWc46BgvrHOJpYHCDaXorAn6BMq7EQF8sxdNK4GVMNMVk1njve0HOg3Kz8llPB/7QmddZXYLFGmWqICyUn1IsJDfePxzh8sOYVCbxAgitTJHJJmmH5gzVzw7t7ljtmxSJpcUGQJB2MphejmNFGfgvJPB9c6xOCfUqDjxN5m24V+UYesZntpfgs3lpfvE7785IpVnf6WfKG4PKty01ome/joHlDlrRTekKMlpiBapGMfv8EHvPBrOA+5yAHNfKsmcyCcjD1nvXYZ2/X9qY35AhdcBuNkyp55oPDOdtYIHfnOIxlYMKG1dusDx3Z4eveF0lQTzfRVoE5w+k9A2Ov3Zx0aiSkFFevJjrq5QBfs9dAiT8JYgBmWhaJzCtJm12lQirRMKR/br88Vwt/ry/UVY9cereMNvRYUGOGfC8CGGDCw4WDD+qWvyB3mmrXVuMlXxQRIZRJy5KazaQXsBWuIsx4kgGqC5Uo+yzpiQ1VMuCyI=
113 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 0 iQIVAwUAVbuouCBXgaxoKi1yAQL2ng//eI1w51F4YkDiUAhrZuc8RE/chEd2o4F6Jyu9laA03vbim598ntqGjX3+UkOyTQ/zGVeZfW2cNG8zkJjSLk138DHCYl2YPPD/yxqMOJp/a7U34+HrA0aE5Y2pcfx+FofZHRvRtt40UCngicjKivko8au7Ezayidpa/vQbc6dNvGrwwk4KMgOP2HYIfHgCirR5UmaWtNpzlLhf9E7JSNL5ZXij3nt6AgEPyn0OvmmOLyUARO/JTJ6vVyLEtwiXg7B3sF5RpmyFDhrkZ+MbFHgL4k/3y9Lb97WaZl8nXJIaNPOTPJqkApFY/56S12PKYK4js2OgU+QsX1XWvouAhEx6CC6Jk9EHhr6+9qxYFhBJw7RjbswUG6LvJy/kBe+Ei5UbYg9dATf3VxQ6Gqs19lebtzltERH2yNwaHyVeqqakPSonOaUyxGMRRosvNHyrTTor38j8d27KksgpocXzBPZcc1MlS3vJg2nIwZlc9EKM9z5R0J1KAi1Z/+xzBjiGRYg5EZY6ElAw30eCjGta7tXlBssJiKeHut7QTLxCZHQuX1tKxDDs1qlXlGCMbrFqo0EiF9hTssptRG3ZyLwMdzEjnh4ki6gzONZKDI8uayAS3N+CEtWcGUtiA9OwuiFXTwodmles/Mh14LEhiVZoDK3L9TPcY22o2qRuku/6wq6QKsg=
113 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 0 iQIVAwUAVbuouCBXgaxoKi1yAQL2ng//eI1w51F4YkDiUAhrZuc8RE/chEd2o4F6Jyu9laA03vbim598ntqGjX3+UkOyTQ/zGVeZfW2cNG8zkJjSLk138DHCYl2YPPD/yxqMOJp/a7U34+HrA0aE5Y2pcfx+FofZHRvRtt40UCngicjKivko8au7Ezayidpa/vQbc6dNvGrwwk4KMgOP2HYIfHgCirR5UmaWtNpzlLhf9E7JSNL5ZXij3nt6AgEPyn0OvmmOLyUARO/JTJ6vVyLEtwiXg7B3sF5RpmyFDhrkZ+MbFHgL4k/3y9Lb97WaZl8nXJIaNPOTPJqkApFY/56S12PKYK4js2OgU+QsX1XWvouAhEx6CC6Jk9EHhr6+9qxYFhBJw7RjbswUG6LvJy/kBe+Ei5UbYg9dATf3VxQ6Gqs19lebtzltERH2yNwaHyVeqqakPSonOaUyxGMRRosvNHyrTTor38j8d27KksgpocXzBPZcc1MlS3vJg2nIwZlc9EKM9z5R0J1KAi1Z/+xzBjiGRYg5EZY6ElAw30eCjGta7tXlBssJiKeHut7QTLxCZHQuX1tKxDDs1qlXlGCMbrFqo0EiF9hTssptRG3ZyLwMdzEjnh4ki6gzONZKDI8uayAS3N+CEtWcGUtiA9OwuiFXTwodmles/Mh14LEhiVZoDK3L9TPcY22o2qRuku/6wq6QKsg=
114 1a45e49a6bed023deb229102a8903234d18054d3 0 iQIVAwUAVeYa2SBXgaxoKi1yAQLWVA//Q7vU0YzngbxIbrTPvfFiNTJcT4bx9u1xMHRZf6QBIE3KtRHKTooJwH9lGR0HHM+8DWWZup3Vzo6JuWHMGoW0v5fzDyk2czwM9BgQQPfEmoJ/ZuBMevTkTZngjgHVwhP3tHFym8Rk9vVxyiZd35EcxP+4F817GCzD+K7XliIBqVggmv9YeQDXfEtvo7UZrMPPec79t8tzt2UadI3KC1jWUriTS1Fg1KxgXW6srD80D10bYyCkkdo/KfF6BGZ9SkF+U3b95cuqSmOfoyyQwUA3JbMXXOnIefnC7lqRC2QTC6mYDx5hIkBiwymXJBe8rpq/S94VVvPGfW6A5upyeCZISLEEnAz0GlykdpIy/NogzhmWpbAMOus05Xnen6xPdNig6c/M5ZleRxVobNrZSd7c5qI3aUUyfMKXlY1j9oiUTjSKH1IizwaI3aL/MM70eErBxXiLs2tpQvZeaVLn3kwCB5YhywO3LK0x+FNx4Gl90deAXMYibGNiLTq9grpB8fuLg9M90JBjFkeYkrSJ2yGYumYyP/WBA3mYEYGDLNstOby4riTU3WCqVl+eah6ss3l+gNDjLxiMtJZ/g0gQACaAvxQ9tYp5eeRMuLRTp79QQPxv97s8IyVwE/TlPlcSFlEXAzsBvqvsolQXRVi9AxA6M2davYabBYAgRf6rRfgujoU=
114 1a45e49a6bed023deb229102a8903234d18054d3 0 iQIVAwUAVeYa2SBXgaxoKi1yAQLWVA//Q7vU0YzngbxIbrTPvfFiNTJcT4bx9u1xMHRZf6QBIE3KtRHKTooJwH9lGR0HHM+8DWWZup3Vzo6JuWHMGoW0v5fzDyk2czwM9BgQQPfEmoJ/ZuBMevTkTZngjgHVwhP3tHFym8Rk9vVxyiZd35EcxP+4F817GCzD+K7XliIBqVggmv9YeQDXfEtvo7UZrMPPec79t8tzt2UadI3KC1jWUriTS1Fg1KxgXW6srD80D10bYyCkkdo/KfF6BGZ9SkF+U3b95cuqSmOfoyyQwUA3JbMXXOnIefnC7lqRC2QTC6mYDx5hIkBiwymXJBe8rpq/S94VVvPGfW6A5upyeCZISLEEnAz0GlykdpIy/NogzhmWpbAMOus05Xnen6xPdNig6c/M5ZleRxVobNrZSd7c5qI3aUUyfMKXlY1j9oiUTjSKH1IizwaI3aL/MM70eErBxXiLs2tpQvZeaVLn3kwCB5YhywO3LK0x+FNx4Gl90deAXMYibGNiLTq9grpB8fuLg9M90JBjFkeYkrSJ2yGYumYyP/WBA3mYEYGDLNstOby4riTU3WCqVl+eah6ss3l+gNDjLxiMtJZ/g0gQACaAvxQ9tYp5eeRMuLRTp79QQPxv97s8IyVwE/TlPlcSFlEXAzsBvqvsolQXRVi9AxA6M2davYabBYAgRf6rRfgujoU=
115 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 0 iQIVAwUAVg1oMSBXgaxoKi1yAQLPag/+Pv0+pR9b9Y5RflEcERUzVu92q+l/JEiP7PHP9pAZuXoQ0ikYBFo1Ygw8tkIG00dgEaLk/2b7E3OxaU9pjU3thoX//XpTcbkJtVhe7Bkjh9/S3dRpm2FWNL9n0qnywebziB45Xs8XzUwBZTYOkVRInYr/NzSo8KNbQH1B4u2g56veb8u/7GtEvBSGnMGVYKhVUZ3jxyDf371QkdafMOJPpogkZcVhXusvMZPDBYtTIzswyxBJ2jxHzjt8+EKs+FI3FxzvQ9Ze3M5Daa7xfiHI3sOgECO8GMVaJi0F49lttKx08KONw8xLlEof+cJ+qxLxQ42X5XOQglJ2/bv5ES5JiZYAti2XSXbZK96p4wexqL4hnaLVU/2iEUfqB9Sj6itEuhGOknPD9fQo1rZXYIS8CT5nGTNG4rEpLFN6VwWn1btIMNkEHw998zU7N3HAOk6adD6zGcntUfMBvQC3V4VK3o7hp8PGeySrWrOLcC/xLKM+XRonz46woJK5D8w8lCVYAxBWEGKAFtj9hv9R8Ye9gCW0Q8BvJ7MwGpn+7fLQ1BVZdV1LZQTSBUr5u8mNeDsRo4H2hITQRhUeElIwlMsUbbN078a4JPOUgPz1+Fi8oHRccBchN6I40QohL934zhcKXQ+NXYN8BgpCicPztSg8O8Y/qvhFP12Zu4tOH8P/dFY=
115 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 0 iQIVAwUAVg1oMSBXgaxoKi1yAQLPag/+Pv0+pR9b9Y5RflEcERUzVu92q+l/JEiP7PHP9pAZuXoQ0ikYBFo1Ygw8tkIG00dgEaLk/2b7E3OxaU9pjU3thoX//XpTcbkJtVhe7Bkjh9/S3dRpm2FWNL9n0qnywebziB45Xs8XzUwBZTYOkVRInYr/NzSo8KNbQH1B4u2g56veb8u/7GtEvBSGnMGVYKhVUZ3jxyDf371QkdafMOJPpogkZcVhXusvMZPDBYtTIzswyxBJ2jxHzjt8+EKs+FI3FxzvQ9Ze3M5Daa7xfiHI3sOgECO8GMVaJi0F49lttKx08KONw8xLlEof+cJ+qxLxQ42X5XOQglJ2/bv5ES5JiZYAti2XSXbZK96p4wexqL4hnaLVU/2iEUfqB9Sj6itEuhGOknPD9fQo1rZXYIS8CT5nGTNG4rEpLFN6VwWn1btIMNkEHw998zU7N3HAOk6adD6zGcntUfMBvQC3V4VK3o7hp8PGeySrWrOLcC/xLKM+XRonz46woJK5D8w8lCVYAxBWEGKAFtj9hv9R8Ye9gCW0Q8BvJ7MwGpn+7fLQ1BVZdV1LZQTSBUr5u8mNeDsRo4H2hITQRhUeElIwlMsUbbN078a4JPOUgPz1+Fi8oHRccBchN6I40QohL934zhcKXQ+NXYN8BgpCicPztSg8O8Y/qvhFP12Zu4tOH8P/dFY=
116 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 0 iQIVAwUAViarTyBXgaxoKi1yAQLZgRAAh7c7ebn7kUWI5M/b/T6qHGjFrU5azkjamzy9IG+KIa2hZgSMxyEM7JJUFqKP4TiWa3sW03bjKGSM/SjjDSSyheX+JIVSPNyKrBwneYhPq45Ius8eiHziClkt0CSsl2d9xDRpI0JmHbN0Pf8nh7rnbL+231GDAOT6dP+2S8K1HGa/0BgEcL9gpYs4/2GyjL+hBSUjyrabzvwe48DCN5W0tEJbGFw5YEADxdfbVbNEuXL81tR4PFGiJxPW0QKRLDB74MWmiWC0gi2ZC/IhbNBZ2sLb6694d4Bx4PVwtiARh63HNXVMEaBrFu1S9NcMQyHvAOc6Zw4izF/PCeTcdEnPk8J1t5PTz09Lp0EAKxe7CWIViy350ke5eiaxO3ySrNMX6d83BOHLDqEFMSWm+ad+KEMT4CJrK4X/n/XMgEFAaU5nWlIRqrLRIeU2Ifc625T0Xh4BgTqXPpytQxhgV5b+Fi6duNk4cy+QnHT4ymxI6BPD9HvSQwc+O7h37qjvJVZmpQX6AP8O75Yza8ZbcYKRIIxZzOkwNpzE5A/vpvP5bCRn7AGcT3ORWmAYr/etr3vxUvt2fQz6U/R4S915V+AeWBdcp+uExu6VZ42M0vhhh0lyzx1VRJGVdV+LoxFKkaC42d0yT+O1QEhSB7WL1D3/a/iWubv6ieB/cvNMhFaK9DA=
116 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 0 iQIVAwUAViarTyBXgaxoKi1yAQLZgRAAh7c7ebn7kUWI5M/b/T6qHGjFrU5azkjamzy9IG+KIa2hZgSMxyEM7JJUFqKP4TiWa3sW03bjKGSM/SjjDSSyheX+JIVSPNyKrBwneYhPq45Ius8eiHziClkt0CSsl2d9xDRpI0JmHbN0Pf8nh7rnbL+231GDAOT6dP+2S8K1HGa/0BgEcL9gpYs4/2GyjL+hBSUjyrabzvwe48DCN5W0tEJbGFw5YEADxdfbVbNEuXL81tR4PFGiJxPW0QKRLDB74MWmiWC0gi2ZC/IhbNBZ2sLb6694d4Bx4PVwtiARh63HNXVMEaBrFu1S9NcMQyHvAOc6Zw4izF/PCeTcdEnPk8J1t5PTz09Lp0EAKxe7CWIViy350ke5eiaxO3ySrNMX6d83BOHLDqEFMSWm+ad+KEMT4CJrK4X/n/XMgEFAaU5nWlIRqrLRIeU2Ifc625T0Xh4BgTqXPpytQxhgV5b+Fi6duNk4cy+QnHT4ymxI6BPD9HvSQwc+O7h37qjvJVZmpQX6AP8O75Yza8ZbcYKRIIxZzOkwNpzE5A/vpvP5bCRn7AGcT3ORWmAYr/etr3vxUvt2fQz6U/R4S915V+AeWBdcp+uExu6VZ42M0vhhh0lyzx1VRJGVdV+LoxFKkaC42d0yT+O1QEhSB7WL1D3/a/iWubv6ieB/cvNMhFaK9DA=
117 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 0 iQIVAwUAVjZiKiBXgaxoKi1yAQKBWQ/+JcE37vprSOA5e0ezs/avC7leR6hTlXy9O5bpFnvMpbVMTUp+KfBE4HxTT0KKXKh9lGtNaQ+lAmHuy1OQE1hBKPIaCUd8/1gunGsXgRM3TJ9LwjFd4qFpOMxvOouc6kW5kmea7V9W2fg6aFNjjc/4/0J3HMOIjmf2fFz87xqR1xX8iezJ57A4pUPNViJlOWXRzfa56cI6VUe5qOMD0NRXcY+JyI5qW25Y/aL5D9loeKflpzd53Ue+Pu3qlhddJd3PVkaAiVDH+DYyRb8sKgwuiEsyaBO18IBgC8eDmTohEJt6707A+WNhwBJwp9aOUhHC7caaKRYhEKuDRQ3op++VqwuxbFRXx22XYR9bEzQIlpsv9GY2k8SShU5MZqUKIhk8vppFI6RaID5bmALnLLmjmXfSPYSJDzDuCP5UTQgI3PKPOATorVrqMdKzfb7FiwtcTvtHAXpOgLaY9P9XIePbnei6Rx9TfoHYDvzFWRqzSjl21xR+ZUrJtG2fx7XLbMjEAZJcnjP++GRvNbHBOi57aX0l2LO1peQqZVMULoIivaoLFP3i16RuXXQ/bvKyHmKjJzGrLc0QCa0yfrvV2m30RRMaYlOv7ToJfdfZLXvSAP0zbAuDaXdjGnq7gpfIlNE3xM+kQ75Akcf4V4fK1p061EGBQvQz6Ov3PkPiWL/bxrQ=
117 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 0 iQIVAwUAVjZiKiBXgaxoKi1yAQKBWQ/+JcE37vprSOA5e0ezs/avC7leR6hTlXy9O5bpFnvMpbVMTUp+KfBE4HxTT0KKXKh9lGtNaQ+lAmHuy1OQE1hBKPIaCUd8/1gunGsXgRM3TJ9LwjFd4qFpOMxvOouc6kW5kmea7V9W2fg6aFNjjc/4/0J3HMOIjmf2fFz87xqR1xX8iezJ57A4pUPNViJlOWXRzfa56cI6VUe5qOMD0NRXcY+JyI5qW25Y/aL5D9loeKflpzd53Ue+Pu3qlhddJd3PVkaAiVDH+DYyRb8sKgwuiEsyaBO18IBgC8eDmTohEJt6707A+WNhwBJwp9aOUhHC7caaKRYhEKuDRQ3op++VqwuxbFRXx22XYR9bEzQIlpsv9GY2k8SShU5MZqUKIhk8vppFI6RaID5bmALnLLmjmXfSPYSJDzDuCP5UTQgI3PKPOATorVrqMdKzfb7FiwtcTvtHAXpOgLaY9P9XIePbnei6Rx9TfoHYDvzFWRqzSjl21xR+ZUrJtG2fx7XLbMjEAZJcnjP++GRvNbHBOi57aX0l2LO1peQqZVMULoIivaoLFP3i16RuXXQ/bvKyHmKjJzGrLc0QCa0yfrvV2m30RRMaYlOv7ToJfdfZLXvSAP0zbAuDaXdjGnq7gpfIlNE3xM+kQ75Akcf4V4fK1p061EGBQvQz6Ov3PkPiWL/bxrQ=
118 1aa5083cbebbe7575c88f3402ab377539b484897 0 iQIVAwUAVkEdCCBXgaxoKi1yAQKdWg//crTr5gsnHQppuD1p+PPn3/7SMsWJ7bgbuaXgERDLC0zWMfhM2oMmu/4jqXnpangdBVvb0SojejgzxoBo9FfRQiIoKt0vxmmn+S8CrEwb99rpP4M7lgyMAInKPMXQdYxkoDNwL70Afmog6eBtlxjYnu8nmUE/swu6JoVns+tF8UOvIKFYbuCcGujo2pUOQC0xBGiHeHSGRDJOlWmY2d7D/PkQtQE/u/d4QZt7enTHMiV44XVJ8+0U0f1ZQE7V+hNWf+IjwcZtL95dnQzUKs6tXMIln/OwO+eJ3d61BfLvmABvCwUC9IepPssNSFBUfGqBAP5wXOzFIPSYn00IWpmZtCnpUNL99X1IV3RP+p99gnEDTScQFPYt5B0q5I1nFdRh1p48BSF/kjPA7V++UfBwMXrrYLKhUR9BjmrRzYnyXJKwbH6iCNj5hsXUkVrBdBi/FnMczgsVILfFcIXUfnJD3E/dG+1lmuObg6dEynxiGChTuaR4KkLa5ZRkUcUl6fWlSRsqSNbGEEbdwcI+nTCZqJUlLSghumhs0Z89Hs1nltBd1ALX2VLJEHrKMrFQ8NfEBeCB6ENqMJi5qPlq354MCdGOZ9RvisX/HlxE4Q61BW0+EwnyXSch6LFSOS3axOocUazMoK1XiOTJSv/5bAsnwb0ztDWeUj9fZEJL+SWtgB8=
118 1aa5083cbebbe7575c88f3402ab377539b484897 0 iQIVAwUAVkEdCCBXgaxoKi1yAQKdWg//crTr5gsnHQppuD1p+PPn3/7SMsWJ7bgbuaXgERDLC0zWMfhM2oMmu/4jqXnpangdBVvb0SojejgzxoBo9FfRQiIoKt0vxmmn+S8CrEwb99rpP4M7lgyMAInKPMXQdYxkoDNwL70Afmog6eBtlxjYnu8nmUE/swu6JoVns+tF8UOvIKFYbuCcGujo2pUOQC0xBGiHeHSGRDJOlWmY2d7D/PkQtQE/u/d4QZt7enTHMiV44XVJ8+0U0f1ZQE7V+hNWf+IjwcZtL95dnQzUKs6tXMIln/OwO+eJ3d61BfLvmABvCwUC9IepPssNSFBUfGqBAP5wXOzFIPSYn00IWpmZtCnpUNL99X1IV3RP+p99gnEDTScQFPYt5B0q5I1nFdRh1p48BSF/kjPA7V++UfBwMXrrYLKhUR9BjmrRzYnyXJKwbH6iCNj5hsXUkVrBdBi/FnMczgsVILfFcIXUfnJD3E/dG+1lmuObg6dEynxiGChTuaR4KkLa5ZRkUcUl6fWlSRsqSNbGEEbdwcI+nTCZqJUlLSghumhs0Z89Hs1nltBd1ALX2VLJEHrKMrFQ8NfEBeCB6ENqMJi5qPlq354MCdGOZ9RvisX/HlxE4Q61BW0+EwnyXSch6LFSOS3axOocUazMoK1XiOTJSv/5bAsnwb0ztDWeUj9fZEJL+SWtgB8=
119 2d437a0f3355834a9485bbbeb30a52a052c98f19 0 iQIVAwUAVl5U9CBXgaxoKi1yAQLocg//a4YFz9UVSIEzVEJMUPJnN2dBvEXRpwpb5CdKPd428+18K6VWZd5Mc6xNNRV5AV/hCYylgqDplIvyOvwCj7uN8nEOrLUQQ0Pp37M5ZIX8ZVCK/wgchJ2ltabUG1NrZ7/JA84U79VGLAECMnD0Z9WvZDESpVXmdXfxrk1eCc3omRB0ofNghEx+xpYworfZsu8aap1GHQuBsjPv4VyUWGpMq/KA01PdxRTELmrJnfSyr0nPKwxlI5KsbA1GOe+Mk3tp5HJ42DZqLtKSGPirf6E+6lRJeB0H7EpotN4wD3yZDsw6AgRb2C/ay/3T3Oz7CN+45mwuujV9Cxx5zs1EeOgZcqgA/hXMcwlQyvQDMrWpO8ytSBm6MhOuFOTB3HnUxfsnfSocLJsbNwGWKceAzACcXSqapveVAz/7h+InFgl/8Qce28UJdnX5wro5gP6UWt+xrvc7vfmVGgI3oxbiOUrfglhkjmrxBjEiDQy4BWH7HWMZUVxnqPQRcxIE10+dv0KtM/PBkbUtnbGJ88opFBGkFweje5vQcZy/duuPEIufRkPr8EV47QjOxlvldEjlLq3+QUdJZEgCIFw1X0y7Pix4dsPFjwOmAyo4El1ePrdFzG3dXSVA3eHvMDRnYnNlue9wHvKhYbBle5xTOZBgGuMzhDVe+54JLql5JYr4WrI1pvA=
119 2d437a0f3355834a9485bbbeb30a52a052c98f19 0 iQIVAwUAVl5U9CBXgaxoKi1yAQLocg//a4YFz9UVSIEzVEJMUPJnN2dBvEXRpwpb5CdKPd428+18K6VWZd5Mc6xNNRV5AV/hCYylgqDplIvyOvwCj7uN8nEOrLUQQ0Pp37M5ZIX8ZVCK/wgchJ2ltabUG1NrZ7/JA84U79VGLAECMnD0Z9WvZDESpVXmdXfxrk1eCc3omRB0ofNghEx+xpYworfZsu8aap1GHQuBsjPv4VyUWGpMq/KA01PdxRTELmrJnfSyr0nPKwxlI5KsbA1GOe+Mk3tp5HJ42DZqLtKSGPirf6E+6lRJeB0H7EpotN4wD3yZDsw6AgRb2C/ay/3T3Oz7CN+45mwuujV9Cxx5zs1EeOgZcqgA/hXMcwlQyvQDMrWpO8ytSBm6MhOuFOTB3HnUxfsnfSocLJsbNwGWKceAzACcXSqapveVAz/7h+InFgl/8Qce28UJdnX5wro5gP6UWt+xrvc7vfmVGgI3oxbiOUrfglhkjmrxBjEiDQy4BWH7HWMZUVxnqPQRcxIE10+dv0KtM/PBkbUtnbGJ88opFBGkFweje5vQcZy/duuPEIufRkPr8EV47QjOxlvldEjlLq3+QUdJZEgCIFw1X0y7Pix4dsPFjwOmAyo4El1ePrdFzG3dXSVA3eHvMDRnYnNlue9wHvKhYbBle5xTOZBgGuMzhDVe+54JLql5JYr4WrI1pvA=
120 ea389970c08449440587712117f178d33bab3f1e 0 iQIVAwUAVociGyBXgaxoKi1yAQJx9Q//TzMypcls5CQW3DM9xY1Q+RFeIw1LcDIev6NDBjUYxULb2WIK2qPw4Th5czF622SMd+XO/kiQeWYp9IW90MZOUVT1YGgUPKlKWMjkf0lZEPzprHjHq0+z/no1kBCBQg2uUOLsb6Y7zom4hFCyPsxXOk5nnxcFEK0VDbODa9zoKb/flyQ7rtzs+Z6BljIQ0TJAJsXs+6XgrW1XJ/f6nbeqsQyPklIBJuGKiaU1Pg8wQe6QqFaO1NYgM3hBETku6r3OTpUhu/2FTUZ7yDWGGzBqmifxzdHoj7/B+2qzRpII77PlZqoe6XF+UOObSFnhKvXKLjlGY5cy3SXBMbHkPcYtHua8wYR8LqO2bYYnsDd9qD0DJ+LlqH0ZMUkB2Cdk9q/cp1PGJWGlYYecHP87DLuWKwS+a6LhVI9TGkIUosVtLaIMsUUEz83RJFb4sSGOXtjk5DDznn9QW8ltXXMTdGQwFq1vmuiXATYenhszbvagrnbAnDyNFths4IhS1jG8237SB36nGmO3zQm5V7AMHfSrISB/8VPyY4Si7uvAV2kMWxuMhYuQbBwVx/KxbKrYjowuvJvCKaV101rWxvSeU2wDih20v+dnQKPveRNnO8AAK/ICflVVsISkd7hXcfk+SnhfxcPQTr+HQIJEW9wt5Q8WbgHk9wuR8kgXQEX6tCGpT/w=
120 ea389970c08449440587712117f178d33bab3f1e 0 iQIVAwUAVociGyBXgaxoKi1yAQJx9Q//TzMypcls5CQW3DM9xY1Q+RFeIw1LcDIev6NDBjUYxULb2WIK2qPw4Th5czF622SMd+XO/kiQeWYp9IW90MZOUVT1YGgUPKlKWMjkf0lZEPzprHjHq0+z/no1kBCBQg2uUOLsb6Y7zom4hFCyPsxXOk5nnxcFEK0VDbODa9zoKb/flyQ7rtzs+Z6BljIQ0TJAJsXs+6XgrW1XJ/f6nbeqsQyPklIBJuGKiaU1Pg8wQe6QqFaO1NYgM3hBETku6r3OTpUhu/2FTUZ7yDWGGzBqmifxzdHoj7/B+2qzRpII77PlZqoe6XF+UOObSFnhKvXKLjlGY5cy3SXBMbHkPcYtHua8wYR8LqO2bYYnsDd9qD0DJ+LlqH0ZMUkB2Cdk9q/cp1PGJWGlYYecHP87DLuWKwS+a6LhVI9TGkIUosVtLaIMsUUEz83RJFb4sSGOXtjk5DDznn9QW8ltXXMTdGQwFq1vmuiXATYenhszbvagrnbAnDyNFths4IhS1jG8237SB36nGmO3zQm5V7AMHfSrISB/8VPyY4Si7uvAV2kMWxuMhYuQbBwVx/KxbKrYjowuvJvCKaV101rWxvSeU2wDih20v+dnQKPveRNnO8AAK/ICflVVsISkd7hXcfk+SnhfxcPQTr+HQIJEW9wt5Q8WbgHk9wuR8kgXQEX6tCGpT/w=
121 158bdc8965720ca4061f8f8d806563cfc7cdb62e 0 iQIVAwUAVqBhFyBXgaxoKi1yAQLJpQ//S8kdgmVlS+CI0d2hQVGYWB/eK+tcntG+bZKLto4bvVy5d0ymlDL0x7VrJMOkwzkU1u/GaYo3L6CVEiM/JGCgB32bllrpx+KwQ0AyHswMZruo/6xrjDIYymLMEJ9yonXBZsG7pf2saYTHm3C5/ZIPkrDZSlssJHJDdeWqd75hUnx3nX8dZ4jIIxYDhtdB5/EmuEGOVlbeBHVpwfDXidSJUHJRwJvDqezUlN003sQdUvOHHtRqBrhsYEhHqPMOxDidAgCvjSfWZQKOTKaPE/gQo/BP3GU++Fg55jBz+SBXpdfQJI2Gd8FZfjLkhFa9vTTTcd10YCd4CZbYLpj/4R2xWj1U4oTVEFa6d+AA5Yyu8xG53XSCCPyzfagyuyfLqsaq5r1qDZO/Mh5KZCTvc9xSF5KXj57mKvzMDpiNeQcamGmsV4yXxymKJKGMQvbnzqp+ItIdbnfk38Nuac8rqNnGmFYwMIPa50680vSZT/NhrlPJ8FVTJlfHtSUZbdjPpsqw7BgjFWaVUdwgCKIGERiK7zfR0innj9rF5oVwT8EbKiaR1uVxOKnTwZzPCbdO1euNg/HutZLVQmugiLAv5Z38L3YZf5bH7zJdUydhiTI4mGn/mgncsKXoSarnnduhoYu9OsQZc9pndhxjAEuAslEIyBsLy81fR2HOhUzw5FGNgdY=
121 158bdc8965720ca4061f8f8d806563cfc7cdb62e 0 iQIVAwUAVqBhFyBXgaxoKi1yAQLJpQ//S8kdgmVlS+CI0d2hQVGYWB/eK+tcntG+bZKLto4bvVy5d0ymlDL0x7VrJMOkwzkU1u/GaYo3L6CVEiM/JGCgB32bllrpx+KwQ0AyHswMZruo/6xrjDIYymLMEJ9yonXBZsG7pf2saYTHm3C5/ZIPkrDZSlssJHJDdeWqd75hUnx3nX8dZ4jIIxYDhtdB5/EmuEGOVlbeBHVpwfDXidSJUHJRwJvDqezUlN003sQdUvOHHtRqBrhsYEhHqPMOxDidAgCvjSfWZQKOTKaPE/gQo/BP3GU++Fg55jBz+SBXpdfQJI2Gd8FZfjLkhFa9vTTTcd10YCd4CZbYLpj/4R2xWj1U4oTVEFa6d+AA5Yyu8xG53XSCCPyzfagyuyfLqsaq5r1qDZO/Mh5KZCTvc9xSF5KXj57mKvzMDpiNeQcamGmsV4yXxymKJKGMQvbnzqp+ItIdbnfk38Nuac8rqNnGmFYwMIPa50680vSZT/NhrlPJ8FVTJlfHtSUZbdjPpsqw7BgjFWaVUdwgCKIGERiK7zfR0innj9rF5oVwT8EbKiaR1uVxOKnTwZzPCbdO1euNg/HutZLVQmugiLAv5Z38L3YZf5bH7zJdUydhiTI4mGn/mgncsKXoSarnnduhoYu9OsQZc9pndhxjAEuAslEIyBsLy81fR2HOhUzw5FGNgdY=
122 2408645de650d8a29a6ce9e7dce601d8dd0d1474 0 iQIVAwUAVq/xFSBXgaxoKi1yAQLsxhAAg+E6uJCtZZOugrrFi9S6C20SRPBwHwmw22PC5z3Ufp9Vf3vqSL/+zmWI9d/yezIVcTXgM9rKCvq58sZvo4FuO2ngPx7bL9LMJ3qx0IyHUKjwa3AwrzjSzvVhNIrRoimD+lVBI/GLmoszpMICM+Nyg3D41fNJKs6YpnwwsHNJkjMwz0n2SHAShWAgIilyANNVnwnzHE68AIkB/gBkUGtrjf6xB9mXQxAv4GPco/234FAkX9xSWsM0Rx+JLLrSBXoHmIlmu9LPjC0AKn8/DDke+fj7bFaF7hdJBUYOtlYH6f7NIvyZSpw0FHl7jPxoRCtXzIV+1dZEbbIMIXzNtzPFVDYDfMhLqpTgthkZ9x0UaMaHecCUWYYBp8G/IyVS40GJodl8xnRiXUkFejbK/NDdR1f9iZS0dtiFu66cATMdb6d+MG+zW0nDKiQmBt6bwynysqn4g3SIGQFEPyEoRy0bXiefHrlkeHbdfc4zgoejx3ywcRDMGvUbpWs5C43EPu44irKXcqC695vAny3A7nZpt/XP5meDdOF67DNQPvhFdjPPbJBpSsUi2hUlZ+599wUfr3lNVzeEzHT7XApTOf6ysuGtHH3qcVHpFqQSRL1MI0f2xL13UadgTVWYrnHEis7f+ncwlWiR0ucpJB3+dQQh3NVGVo89MfbIZPkA8iil03U=
122 2408645de650d8a29a6ce9e7dce601d8dd0d1474 0 iQIVAwUAVq/xFSBXgaxoKi1yAQLsxhAAg+E6uJCtZZOugrrFi9S6C20SRPBwHwmw22PC5z3Ufp9Vf3vqSL/+zmWI9d/yezIVcTXgM9rKCvq58sZvo4FuO2ngPx7bL9LMJ3qx0IyHUKjwa3AwrzjSzvVhNIrRoimD+lVBI/GLmoszpMICM+Nyg3D41fNJKs6YpnwwsHNJkjMwz0n2SHAShWAgIilyANNVnwnzHE68AIkB/gBkUGtrjf6xB9mXQxAv4GPco/234FAkX9xSWsM0Rx+JLLrSBXoHmIlmu9LPjC0AKn8/DDke+fj7bFaF7hdJBUYOtlYH6f7NIvyZSpw0FHl7jPxoRCtXzIV+1dZEbbIMIXzNtzPFVDYDfMhLqpTgthkZ9x0UaMaHecCUWYYBp8G/IyVS40GJodl8xnRiXUkFejbK/NDdR1f9iZS0dtiFu66cATMdb6d+MG+zW0nDKiQmBt6bwynysqn4g3SIGQFEPyEoRy0bXiefHrlkeHbdfc4zgoejx3ywcRDMGvUbpWs5C43EPu44irKXcqC695vAny3A7nZpt/XP5meDdOF67DNQPvhFdjPPbJBpSsUi2hUlZ+599wUfr3lNVzeEzHT7XApTOf6ysuGtHH3qcVHpFqQSRL1MI0f2xL13UadgTVWYrnHEis7f+ncwlWiR0ucpJB3+dQQh3NVGVo89MfbIZPkA8iil03U=
123 b698abf971e7377d9b7ec7fc8c52df45255b0329 0 iQIVAwUAVrJ4YCBXgaxoKi1yAQJsKw/+JHSR0bIyarO4/VilFwsYxCprOnPxmUdS4qc4yjvpbf7Dqqr/OnOHJA29LrMoqWqsHgREepemjqiNindwNtlZec+KgmbF08ihSBBpls96UTTYTcytKRkkbrB+FhwB0iDl/o8RgGPniyG6M7gOp6p8pXQVRCOToIY1B/G0rtpkcU1N3GbiZntO5Fm/LPAVIE74VaDsamMopQ/wEB8qiERngX/M8SjO1ZSaVNW6KjRUsarLXQB9ziVJBolK/WnQsDwEeuWU2udpjBiOHnFC6h84uBpc8rLGhr419bKMJcjgl+0sl2zHGPY2edQYuJqVjVENzf4zzZA+xPgKw3GrSTpd37PEnGU/fufdJ0X+pp3kvmO1cV3TsvVMTCn7NvS6+w8SGdHdwKQQwelYI6vmJnjuOCATbafJiHMaOQ0GVYYk6PPoGrYcQ081x6dStCMaHIPOV1Wirwd2wq+SN9Ql8H6njftBf5Sa5tVWdW/zrhsltMsdZYZagZ/oFT3t83exL0rgZ96bZFs0j3HO3APELygIVuQ6ybPsFyToMDbURNDvr7ZqPKhQkkdHIUMqEez5ReuVgpbO9CWV/yWpB1/ZCpjNBZyDvw05kG2mOoC7AbHc8aLUS/8DetAmhwyb48LW4qjfUkO7RyxVSxqdnaBOMlsg1wsP2S+SlkZKsDHjcquZJ5U=
123 b698abf971e7377d9b7ec7fc8c52df45255b0329 0 iQIVAwUAVrJ4YCBXgaxoKi1yAQJsKw/+JHSR0bIyarO4/VilFwsYxCprOnPxmUdS4qc4yjvpbf7Dqqr/OnOHJA29LrMoqWqsHgREepemjqiNindwNtlZec+KgmbF08ihSBBpls96UTTYTcytKRkkbrB+FhwB0iDl/o8RgGPniyG6M7gOp6p8pXQVRCOToIY1B/G0rtpkcU1N3GbiZntO5Fm/LPAVIE74VaDsamMopQ/wEB8qiERngX/M8SjO1ZSaVNW6KjRUsarLXQB9ziVJBolK/WnQsDwEeuWU2udpjBiOHnFC6h84uBpc8rLGhr419bKMJcjgl+0sl2zHGPY2edQYuJqVjVENzf4zzZA+xPgKw3GrSTpd37PEnGU/fufdJ0X+pp3kvmO1cV3TsvVMTCn7NvS6+w8SGdHdwKQQwelYI6vmJnjuOCATbafJiHMaOQ0GVYYk6PPoGrYcQ081x6dStCMaHIPOV1Wirwd2wq+SN9Ql8H6njftBf5Sa5tVWdW/zrhsltMsdZYZagZ/oFT3t83exL0rgZ96bZFs0j3HO3APELygIVuQ6ybPsFyToMDbURNDvr7ZqPKhQkkdHIUMqEez5ReuVgpbO9CWV/yWpB1/ZCpjNBZyDvw05kG2mOoC7AbHc8aLUS/8DetAmhwyb48LW4qjfUkO7RyxVSxqdnaBOMlsg1wsP2S+SlkZKsDHjcquZJ5U=
124 d493d64757eb45ada99fcb3693e479a51b7782da 0 iQIVAwUAVtYt4SBXgaxoKi1yAQL6TQ/9FzYE/xOSC2LYqPdPjCXNjGuZdN1WMf/8fUMYT83NNOoLEBGx37C0bAxgD4/P03FwYMuP37IjIcX8vN6fWvtG9Oo0o2n/oR3SKjpsheh2zxhAFX3vXhFD4U18wCz/DnM0O1qGJwJ49kk/99WNgDWeW4n9dMzTFpcaeZBCu1REbZQS40Z+ArXTDCr60g5TLN1XR1WKEzQJvF71rvaE6P8d3GLoGobTIJMLi5UnMwGsnsv2/EIPrWHQiAY9ZEnYq6deU/4RMh9c7afZie9I+ycIA/qVH6vXNt3/a2BP3Frmv8IvKPzqwnoWmIUamew9lLf1joD5joBy8Yu+qMW0/s6DYUGQ4Slk9qIfn6wh4ySgT/7FJUMcayx9ONDq7920RjRc+XFpD8B3Zhj2mM+0g9At1FgX2w2Gkf957oz2nlgTVh9sdPvP6UvWzhqszPMpdG5Vt0oc5vuyobW333qSkufCxi5gmH7do1DIzErMcy8b6IpZUDeQ/dakKwLQpZVVPF15IrNa/zsOW55SrGrL8/ErM/mXNQBBAqvRsOLq2njFqK2JaoG6biH21DMjHVZFw2wBRoLQxbOppfz2/e3mNkNy9HjgJTW3+0iHWvRzMSjwRbk9BlbkmH6kG5163ElHq3Ft3uuQyZBL9I5SQxlHi9s/CV0YSTYthpWR3ChKIMoqBQ0=
124 d493d64757eb45ada99fcb3693e479a51b7782da 0 iQIVAwUAVtYt4SBXgaxoKi1yAQL6TQ/9FzYE/xOSC2LYqPdPjCXNjGuZdN1WMf/8fUMYT83NNOoLEBGx37C0bAxgD4/P03FwYMuP37IjIcX8vN6fWvtG9Oo0o2n/oR3SKjpsheh2zxhAFX3vXhFD4U18wCz/DnM0O1qGJwJ49kk/99WNgDWeW4n9dMzTFpcaeZBCu1REbZQS40Z+ArXTDCr60g5TLN1XR1WKEzQJvF71rvaE6P8d3GLoGobTIJMLi5UnMwGsnsv2/EIPrWHQiAY9ZEnYq6deU/4RMh9c7afZie9I+ycIA/qVH6vXNt3/a2BP3Frmv8IvKPzqwnoWmIUamew9lLf1joD5joBy8Yu+qMW0/s6DYUGQ4Slk9qIfn6wh4ySgT/7FJUMcayx9ONDq7920RjRc+XFpD8B3Zhj2mM+0g9At1FgX2w2Gkf957oz2nlgTVh9sdPvP6UvWzhqszPMpdG5Vt0oc5vuyobW333qSkufCxi5gmH7do1DIzErMcy8b6IpZUDeQ/dakKwLQpZVVPF15IrNa/zsOW55SrGrL8/ErM/mXNQBBAqvRsOLq2njFqK2JaoG6biH21DMjHVZFw2wBRoLQxbOppfz2/e3mNkNy9HjgJTW3+0iHWvRzMSjwRbk9BlbkmH6kG5163ElHq3Ft3uuQyZBL9I5SQxlHi9s/CV0YSTYthpWR3ChKIMoqBQ0=
125 ae279d4a19e9683214cbd1fe8298cf0b50571432 0 iQIVAwUAVvqzViBXgaxoKi1yAQKUCxAAtctMD3ydbe+li3iYjhY5qT0wyHwPr9fcLqsQUJ4ZtD4sK3oxCRZFWFxNBk5bIIyiwusSEJPiPddoQ7NljSZlYDI0HR3R4vns55fmDwPG07Ykf7aSyqr+c2ppCGzn2/2ID476FNtzKqjF+LkVyadgI9vgZk5S4BgdSlfSRBL+1KtB1BlF5etIZnc5U9qs1uqzZJc06xyyF8HlrmMZkAvRUbsx/JzA5LgzZ2WzueaxZgYzYjDk0nPLgyPPBj0DVyWXnW/kdRNmKHNbaZ9aZlWmdPCEoq5iBm71d7Xoa61shmeuVZWvxHNqXdjVMHVeT61cRxjdfxTIkJwvlRGwpy7V17vTgzWFxw6QJpmr7kupRo3idsDydLDPHGUsxP3uMZFsp6+4rEe6qbafjNajkRyiw7kVGCxboOFN0rLVJPZwZGksEIkw58IHcPhZNT1bHHocWOA/uHJTAynfKsAdv/LDdGKcZWUCFOzlokw54xbPvdrBtEOnYNp15OY01IAJd2FCUki5WHvhELUggTjfank1Tc3/Rt1KrGOFhg80CWq6eMiuiWkHGvYq3fjNLbgjl3JJatUFoB+cX1ulDOGsLJEXQ4v5DNHgel0o2H395owNlStksSeW1UBVk0hUK/ADtVUYKAPEIFiboh1iDpEOl40JVnYdsGz3w5FLj2w+16/1vWs=
125 ae279d4a19e9683214cbd1fe8298cf0b50571432 0 iQIVAwUAVvqzViBXgaxoKi1yAQKUCxAAtctMD3ydbe+li3iYjhY5qT0wyHwPr9fcLqsQUJ4ZtD4sK3oxCRZFWFxNBk5bIIyiwusSEJPiPddoQ7NljSZlYDI0HR3R4vns55fmDwPG07Ykf7aSyqr+c2ppCGzn2/2ID476FNtzKqjF+LkVyadgI9vgZk5S4BgdSlfSRBL+1KtB1BlF5etIZnc5U9qs1uqzZJc06xyyF8HlrmMZkAvRUbsx/JzA5LgzZ2WzueaxZgYzYjDk0nPLgyPPBj0DVyWXnW/kdRNmKHNbaZ9aZlWmdPCEoq5iBm71d7Xoa61shmeuVZWvxHNqXdjVMHVeT61cRxjdfxTIkJwvlRGwpy7V17vTgzWFxw6QJpmr7kupRo3idsDydLDPHGUsxP3uMZFsp6+4rEe6qbafjNajkRyiw7kVGCxboOFN0rLVJPZwZGksEIkw58IHcPhZNT1bHHocWOA/uHJTAynfKsAdv/LDdGKcZWUCFOzlokw54xbPvdrBtEOnYNp15OY01IAJd2FCUki5WHvhELUggTjfank1Tc3/Rt1KrGOFhg80CWq6eMiuiWkHGvYq3fjNLbgjl3JJatUFoB+cX1ulDOGsLJEXQ4v5DNHgel0o2H395owNlStksSeW1UBVk0hUK/ADtVUYKAPEIFiboh1iDpEOl40JVnYdsGz3w5FLj2w+16/1vWs=
126 740156eedf2c450aee58b1a90b0e826f47c5da64 0 iQIVAwUAVxLGMCBXgaxoKi1yAQLhIg/8DDX+sCz7LmqO47/FfTo+OqGR+bTTqpfK3WebitL0Z6hbXPj7s45jijqIFGqKgMPqS5oom1xeuGTPHdYA0NNoc/mxSCuNLfuXYolpNWPN71HeSDRV9SnhMThG5HSxI+P0Ye4rbsCHrVV+ib1rV81QE2kZ9aZsJd0HnGd512xJ+2ML7AXweM/4lcLmMthN+oi/dv1OGLzfckrcr/fEATCLZt55eO7idx11J1Fk4ptQ6dQ/bKznlD4hneyy1HMPsGxw+bCXrMF2C/nUiRLHdKgGqZ+cDq6loQRfFlQoIhfoEnWC424qbjH4rvHgkZHqC59Oi/ti9Hi75oq9Tb79yzlCY/fGsdrlJpEzrTQdHFMHUoO9CC+JYObXHRo3ALnC5350ZBKxlkdpmucrHTgcDabfhRlx9vDxP4RDopm2hAjk2LJH7bdxnGEyZYkTOZ3hXKnVpt2hUQb4jyzzC9Kl47TFpPKNVKI+NLqRRZAIdXXiy24KD7WzzE6L0NNK0/IeqKBENLL8I1PmDQ6XmYTQVhTuad1jjm2PZDyGiXmJFZO1O/NGecVTvVynKsDT6XhEvzyEtjXqD98rrhbeMHTcmNSwwJMDvm9ws0075sLQyq2EYFG6ECWFypdA/jfumTmxOTkMtuy/V1Gyq7YJ8YaksZ7fXNY9VuJFP72grmlXc6Dvpr4=
126 740156eedf2c450aee58b1a90b0e826f47c5da64 0 iQIVAwUAVxLGMCBXgaxoKi1yAQLhIg/8DDX+sCz7LmqO47/FfTo+OqGR+bTTqpfK3WebitL0Z6hbXPj7s45jijqIFGqKgMPqS5oom1xeuGTPHdYA0NNoc/mxSCuNLfuXYolpNWPN71HeSDRV9SnhMThG5HSxI+P0Ye4rbsCHrVV+ib1rV81QE2kZ9aZsJd0HnGd512xJ+2ML7AXweM/4lcLmMthN+oi/dv1OGLzfckrcr/fEATCLZt55eO7idx11J1Fk4ptQ6dQ/bKznlD4hneyy1HMPsGxw+bCXrMF2C/nUiRLHdKgGqZ+cDq6loQRfFlQoIhfoEnWC424qbjH4rvHgkZHqC59Oi/ti9Hi75oq9Tb79yzlCY/fGsdrlJpEzrTQdHFMHUoO9CC+JYObXHRo3ALnC5350ZBKxlkdpmucrHTgcDabfhRlx9vDxP4RDopm2hAjk2LJH7bdxnGEyZYkTOZ3hXKnVpt2hUQb4jyzzC9Kl47TFpPKNVKI+NLqRRZAIdXXiy24KD7WzzE6L0NNK0/IeqKBENLL8I1PmDQ6XmYTQVhTuad1jjm2PZDyGiXmJFZO1O/NGecVTvVynKsDT6XhEvzyEtjXqD98rrhbeMHTcmNSwwJMDvm9ws0075sLQyq2EYFG6ECWFypdA/jfumTmxOTkMtuy/V1Gyq7YJ8YaksZ7fXNY9VuJFP72grmlXc6Dvpr4=
127 f85de28eae32e7d3064b1a1321309071bbaaa069 0 iQIVAwUAVyZQaiBXgaxoKi1yAQJhCQ//WrRZ55k3VI/OgY+I/HvgFHOC0sbhe207Kedxvy00a3AtXM6wa5E95GNX04QxUfTWUf5ZHDfEgj0/mQywNrH1oJG47iPZSs+qXNLqtgAaXtrih6r4/ruUwFCRFxqK9mkhjG61SKicw3Q7uGva950g6ZUE5BsZ7XJWgoDcJzWKR+AH992G6H//Fhi4zFQAmB34++sm80wV6wMxVKA/qhQzetooTR2x9qrHpvCKMzKllleJe48yzPLJjQoaaVgXCDav0eIePFNw0WvVSldOEp/ADDdTGa65qsC1rO2BB1Cu5+frJ/vUoo0PwIgqgD6p2i41hfIKvkp6130TxmRVxUx+ma8gBYEpPIabV0flLU72gq8lMlGBBSnQ+fcZsfs/Ug0xRN0tzkEScmZFiDxRGk0y7IalXzv6irwOyC2fZCajXGJDzkROQXWMgy9eKkwuFhZBmPVYtrATSq3jHLVmJg5vfdeiVzA6NKxAgGm2z8AsRrijKK8WRqFYiH6xcWKG5u+FroPQdKa0nGCkPSTH3tvC6fAHTVm7JeXch5QE/LiS9Y575pM2PeIP+k+Fr1ugK0AEvYJAXa5UIIcdszPyI+TwPTtWaQ83X99qGAdmRWLvSYjqevOVr7F/fhO3XKFXRCcHA3EzVYnG7nWiVACYF3H2UgN4PWjStbx/Qhhdi9xAuks=
127 f85de28eae32e7d3064b1a1321309071bbaaa069 0 iQIVAwUAVyZQaiBXgaxoKi1yAQJhCQ//WrRZ55k3VI/OgY+I/HvgFHOC0sbhe207Kedxvy00a3AtXM6wa5E95GNX04QxUfTWUf5ZHDfEgj0/mQywNrH1oJG47iPZSs+qXNLqtgAaXtrih6r4/ruUwFCRFxqK9mkhjG61SKicw3Q7uGva950g6ZUE5BsZ7XJWgoDcJzWKR+AH992G6H//Fhi4zFQAmB34++sm80wV6wMxVKA/qhQzetooTR2x9qrHpvCKMzKllleJe48yzPLJjQoaaVgXCDav0eIePFNw0WvVSldOEp/ADDdTGa65qsC1rO2BB1Cu5+frJ/vUoo0PwIgqgD6p2i41hfIKvkp6130TxmRVxUx+ma8gBYEpPIabV0flLU72gq8lMlGBBSnQ+fcZsfs/Ug0xRN0tzkEScmZFiDxRGk0y7IalXzv6irwOyC2fZCajXGJDzkROQXWMgy9eKkwuFhZBmPVYtrATSq3jHLVmJg5vfdeiVzA6NKxAgGm2z8AsRrijKK8WRqFYiH6xcWKG5u+FroPQdKa0nGCkPSTH3tvC6fAHTVm7JeXch5QE/LiS9Y575pM2PeIP+k+Fr1ugK0AEvYJAXa5UIIcdszPyI+TwPTtWaQ83X99qGAdmRWLvSYjqevOVr7F/fhO3XKFXRCcHA3EzVYnG7nWiVACYF3H2UgN4PWjStbx/Qhhdi9xAuks=
128 a56296f55a5e1038ea5016dace2076b693c28a56 0 iQIVAwUAVyZarCBXgaxoKi1yAQL87g/8D7whM3e08HVGDHHEkVUgqLIfueVy1mx0AkRvelmZmwaocFNGpZTd3AjSwy6qXbRNZFXrWU85JJvQCi3PSo/8bK43kwqLJ4lv+Hv2zVTvz30vbLWTSndH3oVRu38lIA7b5K9J4y50pMCwjKLG9iyp+aQG4RBz76fJMlhXy0gu38A8JZVKEeAnQCbtzxKXBzsC8k0/ku/bEQEoo9D4AAGlVTbl5AsHMp3Z6NWu7kEHAX/52/VKU2I0LxYqRxoL1tjTVGkAQfkOHz1gOhLXUgGSYmA9Fb265AYj9cnGWCfyNonlE0Rrk2kAsrjBTGiLyb8WvK/TZmRo4ZpNukzenS9UuAOKxA22Kf9+oN9kKBu1HnwqusYDH9pto1WInCZKV1al7DMBXbGFcnyTXk2xuiTGhVRG5LzCO2QMByBLXiYl77WqqJnzxK3v5lAc/immJl5qa3ATUlTnVBjAs+6cbsbCoY6sjXCT0ClndA9+iZZ1TjPnmLrSeFh5AoE8WHmnFV6oqGN4caX6wiIW5vO+x5Q2ruSsDrwXosXIYzm+0KYKRq9O+MaTwR44Dvq3/RyeIu/cif/Nc7B8bR5Kf7OiRf2T5u97MYAomwGcQfXqgUfm6y7D3Yg+IdAdAJKitxhRPsqqdxIuteXMvOvwukXNDiWP1zsKoYLI37EcwzvbGLUlZvg=
128 a56296f55a5e1038ea5016dace2076b693c28a56 0 iQIVAwUAVyZarCBXgaxoKi1yAQL87g/8D7whM3e08HVGDHHEkVUgqLIfueVy1mx0AkRvelmZmwaocFNGpZTd3AjSwy6qXbRNZFXrWU85JJvQCi3PSo/8bK43kwqLJ4lv+Hv2zVTvz30vbLWTSndH3oVRu38lIA7b5K9J4y50pMCwjKLG9iyp+aQG4RBz76fJMlhXy0gu38A8JZVKEeAnQCbtzxKXBzsC8k0/ku/bEQEoo9D4AAGlVTbl5AsHMp3Z6NWu7kEHAX/52/VKU2I0LxYqRxoL1tjTVGkAQfkOHz1gOhLXUgGSYmA9Fb265AYj9cnGWCfyNonlE0Rrk2kAsrjBTGiLyb8WvK/TZmRo4ZpNukzenS9UuAOKxA22Kf9+oN9kKBu1HnwqusYDH9pto1WInCZKV1al7DMBXbGFcnyTXk2xuiTGhVRG5LzCO2QMByBLXiYl77WqqJnzxK3v5lAc/immJl5qa3ATUlTnVBjAs+6cbsbCoY6sjXCT0ClndA9+iZZ1TjPnmLrSeFh5AoE8WHmnFV6oqGN4caX6wiIW5vO+x5Q2ruSsDrwXosXIYzm+0KYKRq9O+MaTwR44Dvq3/RyeIu/cif/Nc7B8bR5Kf7OiRf2T5u97MYAomwGcQfXqgUfm6y7D3Yg+IdAdAJKitxhRPsqqdxIuteXMvOvwukXNDiWP1zsKoYLI37EcwzvbGLUlZvg=
129 aaabed77791a75968a12b8c43ad263631a23ee81 0 iQIVAwUAVzpH4CBXgaxoKi1yAQLm5A/9GUYv9CeIepjcdWSBAtNhCBJcqgk2cBcV0XaeQomfxqYWfbW2fze6eE+TrXPKTX1ajycgqquMyo3asQolhHXwasv8+5CQxowjGfyVg7N/kyyjgmJljI+rCi74VfnsEhvG/J4GNr8JLVQmSICfALqQjw7XN8doKthYhwOfIY2vY419613v4oeBQXSsItKC/tfKw9lYvlk4qJKDffJQFyAekgv43ovWqHNkl4LaR6ubtjOsxCnxHfr7OtpX3muM9MLT/obBax5I3EsmiDTQBOjbvI6TcLczs5tVCnTa1opQsPUcEmdA4WpUEiTnLl9lk9le/BIImfYfEP33oVYmubRlKhJYnUiu89ao9L+48FBoqCY88HqbjQI1GO6icfRJN/+NLVeE9wubltbWFETH6e2Q+Ex4+lkul1tQMLPcPt10suMHnEo3/FcOTPt6/DKeMpsYgckHSJq5KzTg632xifyySmb9qkpdGGpY9lRal6FHw3rAhRBqucMgxso4BwC51h04RImtCUQPoA3wpb4BvCHba/thpsUFnHefOvsu3ei4JyHXZK84LPwOj31PcucNFdGDTW6jvKrF1vVUIVS9uMJkJXPu0V4i/oEQSUKifJZivROlpvj1eHy3KeMtjq2kjGyXY2KdzxpT8wX/oYJhCtm1XWMui5f24XBjE6xOcjjm8k4=
129 aaabed77791a75968a12b8c43ad263631a23ee81 0 iQIVAwUAVzpH4CBXgaxoKi1yAQLm5A/9GUYv9CeIepjcdWSBAtNhCBJcqgk2cBcV0XaeQomfxqYWfbW2fze6eE+TrXPKTX1ajycgqquMyo3asQolhHXwasv8+5CQxowjGfyVg7N/kyyjgmJljI+rCi74VfnsEhvG/J4GNr8JLVQmSICfALqQjw7XN8doKthYhwOfIY2vY419613v4oeBQXSsItKC/tfKw9lYvlk4qJKDffJQFyAekgv43ovWqHNkl4LaR6ubtjOsxCnxHfr7OtpX3muM9MLT/obBax5I3EsmiDTQBOjbvI6TcLczs5tVCnTa1opQsPUcEmdA4WpUEiTnLl9lk9le/BIImfYfEP33oVYmubRlKhJYnUiu89ao9L+48FBoqCY88HqbjQI1GO6icfRJN/+NLVeE9wubltbWFETH6e2Q+Ex4+lkul1tQMLPcPt10suMHnEo3/FcOTPt6/DKeMpsYgckHSJq5KzTg632xifyySmb9qkpdGGpY9lRal6FHw3rAhRBqucMgxso4BwC51h04RImtCUQPoA3wpb4BvCHba/thpsUFnHefOvsu3ei4JyHXZK84LPwOj31PcucNFdGDTW6jvKrF1vVUIVS9uMJkJXPu0V4i/oEQSUKifJZivROlpvj1eHy3KeMtjq2kjGyXY2KdzxpT8wX/oYJhCtm1XWMui5f24XBjE6xOcjjm8k4=
130 a9764ab80e11bcf6a37255db7dd079011f767c6c 0 iQIVAwUAV09KHyBXgaxoKi1yAQJBWg/+OywRrqU+zvnL1tHJ95PgatsF7S4ZAHZFR098+oCjUDtKpvnm71o2TKiY4D5cckyD2KNwLWg/qW6V+5+2EYU0Y/ViwPVcngib/ZeJP+Nr44TK3YZMRmfFuUEEzA7sZ2r2Gm8eswv//W79I0hXJeFd/o6FgLnn7AbOjcOn3IhWdGAP6jUHv9zyJigQv6K9wgyvAnK1RQE+2CgMcoyeqao/zs23IPXI6XUHOwfrQ7XrQ83+ciMqN7XNRx+TKsUQoYeUew4AanoDSMPAQ4kIudsP5tOgKeLRPmHX9zg6Y5S1nTpLRNdyAxuNuyZtkQxDYcG5Hft/SIx27tZUo3gywHL2U+9RYD2nvXqaWzT3sYB2sPBOiq7kjHRgvothkXemAFsbq2nKFrN0PRua9WG4l3ny0xYmDFPlJ/s0E9XhmQaqy+uXtVbA2XdLEvE6pQ0YWbHEKMniW26w6LJkx4IV6RX/7Kpq7byw/bW65tu/BzgISKau5FYLY4CqZJH7f8QBg3XWpzB91AR494tdsD+ugM45wrY/6awGQx9CY5SAzGqTyFuSFQxgB2rBurb01seZPf8nqG8V13UYXfX/O3/WMOBMr7U/RVqmAA0ZMYOyEwfVUmHqrFjkxpXX+JdNKRiA1GJp5sdRpCxSeXdQ/Ni6AAGZV2IyRb4G4Y++1vP4yPBalas=
130 a9764ab80e11bcf6a37255db7dd079011f767c6c 0 iQIVAwUAV09KHyBXgaxoKi1yAQJBWg/+OywRrqU+zvnL1tHJ95PgatsF7S4ZAHZFR098+oCjUDtKpvnm71o2TKiY4D5cckyD2KNwLWg/qW6V+5+2EYU0Y/ViwPVcngib/ZeJP+Nr44TK3YZMRmfFuUEEzA7sZ2r2Gm8eswv//W79I0hXJeFd/o6FgLnn7AbOjcOn3IhWdGAP6jUHv9zyJigQv6K9wgyvAnK1RQE+2CgMcoyeqao/zs23IPXI6XUHOwfrQ7XrQ83+ciMqN7XNRx+TKsUQoYeUew4AanoDSMPAQ4kIudsP5tOgKeLRPmHX9zg6Y5S1nTpLRNdyAxuNuyZtkQxDYcG5Hft/SIx27tZUo3gywHL2U+9RYD2nvXqaWzT3sYB2sPBOiq7kjHRgvothkXemAFsbq2nKFrN0PRua9WG4l3ny0xYmDFPlJ/s0E9XhmQaqy+uXtVbA2XdLEvE6pQ0YWbHEKMniW26w6LJkx4IV6RX/7Kpq7byw/bW65tu/BzgISKau5FYLY4CqZJH7f8QBg3XWpzB91AR494tdsD+ugM45wrY/6awGQx9CY5SAzGqTyFuSFQxgB2rBurb01seZPf8nqG8V13UYXfX/O3/WMOBMr7U/RVqmAA0ZMYOyEwfVUmHqrFjkxpXX+JdNKRiA1GJp5sdRpCxSeXdQ/Ni6AAGZV2IyRb4G4Y++1vP4yPBalas=
131 26a5d605b8683a292bb89aea11f37a81b06ac016 0 iQIVAwUAV3bOsSBXgaxoKi1yAQLiDg//fxmcNpTUedsXqEwNdGFJsJ2E25OANgyv1saZHNfbYFWXIR8g4nyjNaj2SjtXF0wzOq5aHlMWXjMZPOT6pQBdTnOYDdgv+O8DGpgHs5x/f+uuxtpVkdxR6uRP0/ImlTEtDix8VQiN3nTu5A0N3C7E2y+D1JIIyTp6vyjzxvGQTY0MD/qgB55Dn6khx8c3phDtMkzmVEwL4ItJxVRVNw1m+2FOXHu++hJEruJdeMV0CKOV6LVbXHho+yt3jQDKhlIgJ65EPLKrf+yRalQtSWpu7y/vUMcEUde9XeQ5x05ebCiI4MkJ0ULQro/Bdx9vBHkAstUC7D+L5y45ZnhHjOwxz9c3GQMZQt1HuyORqbBhf9hvOkUQ2GhlDHc5U04nBe0VhEoCw9ra54n+AgUyqWr4CWimSW6pMTdquCzAAbcJWgdNMwDHrMalCYHhJksKFARKq3uSTR1Noz7sOCSIEQvOozawKSQfOwGxn/5bNepKh4uIRelC1uEDoqculqCLgAruzcMNIMndNVYaJ09IohJzA9jVApa+SZVPAeREg71lnS3d8jaWh1Lu5JFlAAKQeKGVJmNm40Y3HBjtHQDrI67TT59oDAhjo420Wf9VFCaj2k0weYBLWSeJhfUZ5x3PVpAHUvP/rnHPwNYyY0wVoQEvM/bnQdcpICmKhqcK+vKjDrM=
131 26a5d605b8683a292bb89aea11f37a81b06ac016 0 iQIVAwUAV3bOsSBXgaxoKi1yAQLiDg//fxmcNpTUedsXqEwNdGFJsJ2E25OANgyv1saZHNfbYFWXIR8g4nyjNaj2SjtXF0wzOq5aHlMWXjMZPOT6pQBdTnOYDdgv+O8DGpgHs5x/f+uuxtpVkdxR6uRP0/ImlTEtDix8VQiN3nTu5A0N3C7E2y+D1JIIyTp6vyjzxvGQTY0MD/qgB55Dn6khx8c3phDtMkzmVEwL4ItJxVRVNw1m+2FOXHu++hJEruJdeMV0CKOV6LVbXHho+yt3jQDKhlIgJ65EPLKrf+yRalQtSWpu7y/vUMcEUde9XeQ5x05ebCiI4MkJ0ULQro/Bdx9vBHkAstUC7D+L5y45ZnhHjOwxz9c3GQMZQt1HuyORqbBhf9hvOkUQ2GhlDHc5U04nBe0VhEoCw9ra54n+AgUyqWr4CWimSW6pMTdquCzAAbcJWgdNMwDHrMalCYHhJksKFARKq3uSTR1Noz7sOCSIEQvOozawKSQfOwGxn/5bNepKh4uIRelC1uEDoqculqCLgAruzcMNIMndNVYaJ09IohJzA9jVApa+SZVPAeREg71lnS3d8jaWh1Lu5JFlAAKQeKGVJmNm40Y3HBjtHQDrI67TT59oDAhjo420Wf9VFCaj2k0weYBLWSeJhfUZ5x3PVpAHUvP/rnHPwNYyY0wVoQEvM/bnQdcpICmKhqcK+vKjDrM=
132 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 0 iQIVAwUAV42tNyBXgaxoKi1yAQI/Iw//V0NtxpVD4sClotAwffBVW42Uv+SG+07CJoOuFYnmHZv/plOzXuuJlmm95L00/qyRCCTUyAGxK/eP5cAKP2V99ln6rNhh8gpgvmZlnYjU3gqFv8tCQ+fkwgRiWmgKjRL6/bK9FY5cO7ATLVu3kCkFd8CEgzlAaUqBfkNFxZxLDLvKqRlhXxVXhKjvkKg5DZ6eJqRQY7w3UqqR+sF1rMLtVyt490Wqv7YQKwcvY7MEKTyH4twGLx/RhBpBi+GccVKvWC011ffjSjxqAfQqrrSVt0Ld1Khj2/p1bDDYpTgtdDgCzclSXWEQpmSdFRBF5wYs/pDMUreI/E6mlWkB4hfZZk1NBRPRWYikXwnhU3ziubCGesZDyBYLrK1vT+tf6giseo22YQmDnOftbS999Pcn04cyCafeFuOjkubYaINB25T20GS5Wb4a0nHPRAOOVxzk/m/arwYgF0ZZZDDvJ48TRMDf3XOc1jc5qZ7AN/OQKbvh2B08vObnnPm3lmBY1qOnhwzJxpNiq+Z/ypokGXQkGBfKUo7rWHJy5iXLb3Biv9AhxY9d5pSTjBmTAYJEic3q03ztzlnfMyi+C13+YxFAbSSNGBP8Hejkkz0NvmB1TBuCKpnZA8spxY5rhZ/zMx+cCw8hQvWHHDUURps7SQvZEfrJSCGJFPDHL3vbfK+LNwI=
132 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 0 iQIVAwUAV42tNyBXgaxoKi1yAQI/Iw//V0NtxpVD4sClotAwffBVW42Uv+SG+07CJoOuFYnmHZv/plOzXuuJlmm95L00/qyRCCTUyAGxK/eP5cAKP2V99ln6rNhh8gpgvmZlnYjU3gqFv8tCQ+fkwgRiWmgKjRL6/bK9FY5cO7ATLVu3kCkFd8CEgzlAaUqBfkNFxZxLDLvKqRlhXxVXhKjvkKg5DZ6eJqRQY7w3UqqR+sF1rMLtVyt490Wqv7YQKwcvY7MEKTyH4twGLx/RhBpBi+GccVKvWC011ffjSjxqAfQqrrSVt0Ld1Khj2/p1bDDYpTgtdDgCzclSXWEQpmSdFRBF5wYs/pDMUreI/E6mlWkB4hfZZk1NBRPRWYikXwnhU3ziubCGesZDyBYLrK1vT+tf6giseo22YQmDnOftbS999Pcn04cyCafeFuOjkubYaINB25T20GS5Wb4a0nHPRAOOVxzk/m/arwYgF0ZZZDDvJ48TRMDf3XOc1jc5qZ7AN/OQKbvh2B08vObnnPm3lmBY1qOnhwzJxpNiq+Z/ypokGXQkGBfKUo7rWHJy5iXLb3Biv9AhxY9d5pSTjBmTAYJEic3q03ztzlnfMyi+C13+YxFAbSSNGBP8Hejkkz0NvmB1TBuCKpnZA8spxY5rhZ/zMx+cCw8hQvWHHDUURps7SQvZEfrJSCGJFPDHL3vbfK+LNwI=
133 299546f84e68dbb9bd026f0f3a974ce4bdb93686 0 iQIcBAABCAAGBQJXn3rFAAoJELnJ3IJKpb3VmZoQAK0cdOfi/OURglnN0vYYGwdvSXTPpZauPEYEpwML3dW1j6HRnl5L+H8D8vlYzahK95X4+NNBhqtyyB6wmIVI0NkYfXfd6ACntJE/EnTdLIHIP2NAAoVsggIjiNr26ubRegaD5ya63Ofxz+Yq5iRsUUfHet7o+CyFhExyzdu+Vcz1/E9GztxNfTDVpC/mf+RMLwQTfHOhoTVbaamLCmGAIjw39w72X+vRMJoYNF44te6PvsfI67+6uuC0+9DjMnp5eL/hquSQ1qfks71rnWwxuiPcUDZloIueowVmt0z0sO4loSP1nZ5IP/6ZOoAzSjspqsxeay9sKP0kzSYLGsmCi29otyVSnXiKtyMCW5z5iM6k8XQcMi5mWy9RcpqlNYD7RUTn3g0+a8u7F6UEtske3/qoweJLPhtTmBNOfDNw4JXwOBSZea0QnIIjCeCc4ZGqfojPpbvcA4rkRpxI23YoMrT2v/kp4wgwrqK9fi8ctt8WbXpmGoAQDXWj2bWcuzj94HsAhLduFKv6sxoDz871hqjmjjnjQSU7TSNNnVzdzwqYkMB+BvhcNYxk6lcx3Aif3AayGdrWDubtU/ZRNoLzBwe6gm0udRMXBj4D/60GD6TIkYeL7HjJwfBb6Bf7qvQ6y7g0zbYG9uwBmMeduU7XchErGqQGSEyyJH3DG9OLaFOj
133 299546f84e68dbb9bd026f0f3a974ce4bdb93686 0 iQIcBAABCAAGBQJXn3rFAAoJELnJ3IJKpb3VmZoQAK0cdOfi/OURglnN0vYYGwdvSXTPpZauPEYEpwML3dW1j6HRnl5L+H8D8vlYzahK95X4+NNBhqtyyB6wmIVI0NkYfXfd6ACntJE/EnTdLIHIP2NAAoVsggIjiNr26ubRegaD5ya63Ofxz+Yq5iRsUUfHet7o+CyFhExyzdu+Vcz1/E9GztxNfTDVpC/mf+RMLwQTfHOhoTVbaamLCmGAIjw39w72X+vRMJoYNF44te6PvsfI67+6uuC0+9DjMnp5eL/hquSQ1qfks71rnWwxuiPcUDZloIueowVmt0z0sO4loSP1nZ5IP/6ZOoAzSjspqsxeay9sKP0kzSYLGsmCi29otyVSnXiKtyMCW5z5iM6k8XQcMi5mWy9RcpqlNYD7RUTn3g0+a8u7F6UEtske3/qoweJLPhtTmBNOfDNw4JXwOBSZea0QnIIjCeCc4ZGqfojPpbvcA4rkRpxI23YoMrT2v/kp4wgwrqK9fi8ctt8WbXpmGoAQDXWj2bWcuzj94HsAhLduFKv6sxoDz871hqjmjjnjQSU7TSNNnVzdzwqYkMB+BvhcNYxk6lcx3Aif3AayGdrWDubtU/ZRNoLzBwe6gm0udRMXBj4D/60GD6TIkYeL7HjJwfBb6Bf7qvQ6y7g0zbYG9uwBmMeduU7XchErGqQGSEyyJH3DG9OLaFOj
134 ccd436f7db6d5d7b9af89715179b911d031d44f1 0 iQIVAwUAV8h7F0emf/qjRqrOAQjmdhAAgYhom8fzL/YHeVLddm71ZB+pKDviKASKGSrBHY4D5Szrh/pYTedmG9IptYue5vzXpspHAaGvZN5xkwrz1/5nmnCsLA8DFaYT9qCkize6EYzxSBtA/W1S9Mv5tObinr1EX9rCSyI4HEJYE8i1IQM5h07SqUsMKDoasd4e29t6gRWg5pfOYq1kc2MTck35W9ff1Fii8S28dqbO3cLU6g5K0pT0JLCZIq7hyTNQdxHAYfebxkVl7PZrZR383IrnyotXVKFFc44qinv94T50uR4yUNYPQ8Gu0TgoGQQjBjk1Lrxot2xpgPQAy8vx+EOJgpg/yNZnYkmJZMxjDkTGVrwvXtOXZzmy2jti7PniET9hUBCU7aNHnoJJLzIf+Vb1CIRP0ypJl8GYCZx6HIYwOQH6EtcaeUqq3r+WXWv74ijIE7OApotmutM9buTvdOLdZddBzFPIjykc6cXO+W4E0kl6u9/OHtaZ3Nynh0ejBRafRWAVw2yU3T9SgQyICsmYWJCThkj14WqCJr2b7jfGlg9MkQOUG6/3f4xz2R3SgyUD8KiGsq/vdBE53zh0YA9gppLoum6AY+z61G1NhVGlrtps90txZBehuARUUz2dJC0pBMRy8XFwXMewDSIe6ATg25pHZsxHfhcalBpJncBl8pORs7oQl+GKBVxlnV4jm1pCzLU=
134 ccd436f7db6d5d7b9af89715179b911d031d44f1 0 iQIVAwUAV8h7F0emf/qjRqrOAQjmdhAAgYhom8fzL/YHeVLddm71ZB+pKDviKASKGSrBHY4D5Szrh/pYTedmG9IptYue5vzXpspHAaGvZN5xkwrz1/5nmnCsLA8DFaYT9qCkize6EYzxSBtA/W1S9Mv5tObinr1EX9rCSyI4HEJYE8i1IQM5h07SqUsMKDoasd4e29t6gRWg5pfOYq1kc2MTck35W9ff1Fii8S28dqbO3cLU6g5K0pT0JLCZIq7hyTNQdxHAYfebxkVl7PZrZR383IrnyotXVKFFc44qinv94T50uR4yUNYPQ8Gu0TgoGQQjBjk1Lrxot2xpgPQAy8vx+EOJgpg/yNZnYkmJZMxjDkTGVrwvXtOXZzmy2jti7PniET9hUBCU7aNHnoJJLzIf+Vb1CIRP0ypJl8GYCZx6HIYwOQH6EtcaeUqq3r+WXWv74ijIE7OApotmutM9buTvdOLdZddBzFPIjykc6cXO+W4E0kl6u9/OHtaZ3Nynh0ejBRafRWAVw2yU3T9SgQyICsmYWJCThkj14WqCJr2b7jfGlg9MkQOUG6/3f4xz2R3SgyUD8KiGsq/vdBE53zh0YA9gppLoum6AY+z61G1NhVGlrtps90txZBehuARUUz2dJC0pBMRy8XFwXMewDSIe6ATg25pHZsxHfhcalBpJncBl8pORs7oQl+GKBVxlnV4jm1pCzLU=
135 149433e68974eb5c63ccb03f794d8b57339a80c4 0 iQIcBAABAgAGBQJX8AfCAAoJELnJ3IJKpb3VnNAP/3umS8tohcZTr4m6DJm9u4XGr2m3FWQmjTEfimGpsOuBC8oCgsq0eAlORYcV68zDax+vQHQu3pqfPXaX+y4ZFDuz0ForNRiPJn+Q+tj1+NrOT1e8h4gH0nSK4rDxEGaa6x01fyC/xQMqN6iNfzbLLB7+WadZlyBRbHaUeZFDlPxPDf1rjDpu1vqwtOrVzSxMasRGEceiUegwsFdFMAefCq0ya/pKe9oV+GgGfR4qNrP7BfpOBcN/Po/ctkFCbLOhHbu6M7HpBSiD57BUy5lfhQQtSjzCKEVTyrWEH0ApjjXKuJzLSyq7xsHKQSOPMgGQprGehyzdCETlZOdauGrC0t9vBCr7kXEhXtycqxBC03vknA2eNeV610VX+HgO9VpCVZWHtENiArhALCcpoEsJvT29xCBYpSii/wnTpYJFT9yW8tjQCxH0zrmEZJvO1/nMINEBQFScB/nzUELn9asnghNf6vMpSGy0fSM27j87VAXCzJ5lqa6WCL/RrKgvYflow/m5AzUfMQhpqpH1vmh4ba1zZ4123lgnW4pNZDV9kmwXrEagGbWe1rnmsMzHugsECiYQyIngjWzHfpHgyEr49Uc5bMM1MlTypeHYYL4kV1jJ8Ou0SC4aV+49p8Onmb2NlVY7JKV7hqDCuZPI164YXMxhPNst4XK0/ENhoOE+8iB6
135 149433e68974eb5c63ccb03f794d8b57339a80c4 0 iQIcBAABAgAGBQJX8AfCAAoJELnJ3IJKpb3VnNAP/3umS8tohcZTr4m6DJm9u4XGr2m3FWQmjTEfimGpsOuBC8oCgsq0eAlORYcV68zDax+vQHQu3pqfPXaX+y4ZFDuz0ForNRiPJn+Q+tj1+NrOT1e8h4gH0nSK4rDxEGaa6x01fyC/xQMqN6iNfzbLLB7+WadZlyBRbHaUeZFDlPxPDf1rjDpu1vqwtOrVzSxMasRGEceiUegwsFdFMAefCq0ya/pKe9oV+GgGfR4qNrP7BfpOBcN/Po/ctkFCbLOhHbu6M7HpBSiD57BUy5lfhQQtSjzCKEVTyrWEH0ApjjXKuJzLSyq7xsHKQSOPMgGQprGehyzdCETlZOdauGrC0t9vBCr7kXEhXtycqxBC03vknA2eNeV610VX+HgO9VpCVZWHtENiArhALCcpoEsJvT29xCBYpSii/wnTpYJFT9yW8tjQCxH0zrmEZJvO1/nMINEBQFScB/nzUELn9asnghNf6vMpSGy0fSM27j87VAXCzJ5lqa6WCL/RrKgvYflow/m5AzUfMQhpqpH1vmh4ba1zZ4123lgnW4pNZDV9kmwXrEagGbWe1rnmsMzHugsECiYQyIngjWzHfpHgyEr49Uc5bMM1MlTypeHYYL4kV1jJ8Ou0SC4aV+49p8Onmb2NlVY7JKV7hqDCuZPI164YXMxhPNst4XK0/ENhoOE+8iB6
136 438173c415874f6ac653efc1099dec9c9150e90f 0 iQIVAwUAWAZ3okemf/qjRqrOAQj89xAAw/6QZ07yqvH+aZHeGQfgJ/X1Nze/hSMzkqbwGkuUOWD5ztN8+c39EXCn8JlqyLUPD7uGzhTV0299k5fGRihLIseXr0hy/cvVW16uqfeKJ/4/qL9zLS3rwSAgWbaHd1s6UQZVfGCb8V6oC1dkJxfrE9h6kugBqV97wStIRxmCpMDjsFv/zdNwsv6eEdxbiMilLn2/IbWXFOVKJzzv9iEY5Pu5McFR+nnrMyUZQhyGtVPLSkoEPsOysorfCZaVLJ6MnVaJunp9XEv94Pqx9+k+shsQvJHWkc0Nnb6uDHZYkLR5v2AbFsbJ9jDHsdr9A7qeQTiZay7PGI0uPoIrkmLya3cYbU1ADhwloAeQ/3gZLaJaKEjrXcFSsz7AZ9yq74rTwiPulF8uqZxJUodk2m/zy83HBrxxp/vgxWJ5JP2WXPtB8qKY+05umAt4rQS+fd2H/xOu2V2d5Mq1WmgknLBLC0ItaNaf91sSHtgEy22GtcvWQE7S6VWU1PoSYmOLITdJKAsmb7Eq+yKDW9nt0lOpUu2wUhBGctlgXgcWOmJP6gL6edIg66czAkVBp/fpKNl8Z/A0hhpuH7nW7GW/mzLVQnc+JW4wqUVkwlur3NRfvSt5ZyTY/SaR++nRf62h7PHIjU+f0kWQRdCcEQ0X38b8iAjeXcsOW8NCOPpm0zcz3i8=
136 438173c415874f6ac653efc1099dec9c9150e90f 0 iQIVAwUAWAZ3okemf/qjRqrOAQj89xAAw/6QZ07yqvH+aZHeGQfgJ/X1Nze/hSMzkqbwGkuUOWD5ztN8+c39EXCn8JlqyLUPD7uGzhTV0299k5fGRihLIseXr0hy/cvVW16uqfeKJ/4/qL9zLS3rwSAgWbaHd1s6UQZVfGCb8V6oC1dkJxfrE9h6kugBqV97wStIRxmCpMDjsFv/zdNwsv6eEdxbiMilLn2/IbWXFOVKJzzv9iEY5Pu5McFR+nnrMyUZQhyGtVPLSkoEPsOysorfCZaVLJ6MnVaJunp9XEv94Pqx9+k+shsQvJHWkc0Nnb6uDHZYkLR5v2AbFsbJ9jDHsdr9A7qeQTiZay7PGI0uPoIrkmLya3cYbU1ADhwloAeQ/3gZLaJaKEjrXcFSsz7AZ9yq74rTwiPulF8uqZxJUodk2m/zy83HBrxxp/vgxWJ5JP2WXPtB8qKY+05umAt4rQS+fd2H/xOu2V2d5Mq1WmgknLBLC0ItaNaf91sSHtgEy22GtcvWQE7S6VWU1PoSYmOLITdJKAsmb7Eq+yKDW9nt0lOpUu2wUhBGctlgXgcWOmJP6gL6edIg66czAkVBp/fpKNl8Z/A0hhpuH7nW7GW/mzLVQnc+JW4wqUVkwlur3NRfvSt5ZyTY/SaR++nRf62h7PHIjU+f0kWQRdCcEQ0X38b8iAjeXcsOW8NCOPpm0zcz3i8=
137 eab27446995210c334c3d06f1a659e3b9b5da769 0 iQIcBAABCAAGBQJYGNsXAAoJELnJ3IJKpb3Vf30QAK/dq5vEHEkufLGiYxxkvIyiRaswS+8jamXeHMQrdK8CuokcQYhEv9xiUI6FMIoX4Zc0xfoFCBc+X4qE+Ed9SFYWgQkDs/roJq1C1mTYA+KANMqJkDt00QZq536snFQvjCXAA5fwR/DpgGOOuGMRfvbjh7x8mPyVoPr4HDQCGFXnTYdn193HpTOqUsipzIV5OJqQ9p0sfJjwKP4ZfD0tqqdjTkNwMyJuwuRaReXFvGGCjH2PqkZE/FwQG0NJJjt0xaMUmv5U5tXHC9tEVobVV/qEslqfbH2v1YPF5d8Jmdn7F76FU5J0nTd+3rIVjYGYSt01cR6wtGnzvr/7kw9kbChw4wYhXxnmIALSd48FpA1qWjlPcAdHfUUwObxOxfqmlnBGtAQFK+p5VXCsxDZEIT9MSxscfCjyDQZpkY5S5B3PFIRg6V9bdl5a4rEt27aucuKTHj1Ok2vip4WfaIKk28YMjjzuOQRbr6Pp7mJcCC1/ERHUJdLsaQP+dy18z6XbDjX3O2JDRNYbCBexQyV/Kfrt5EOS5fXiByQUHv+PyR+9Ju6QWkkcFBfgsxq25kFl+eos4V9lxPOY5jDpw2BWu9TyHtTWkjL/YxDUGwUO9WA/WzrcT4skr9FYrFV/oEgi8MkwydC0cFICDfd6tr9upqkkr1W025Im1UBXXJ89bTVj
137 eab27446995210c334c3d06f1a659e3b9b5da769 0 iQIcBAABCAAGBQJYGNsXAAoJELnJ3IJKpb3Vf30QAK/dq5vEHEkufLGiYxxkvIyiRaswS+8jamXeHMQrdK8CuokcQYhEv9xiUI6FMIoX4Zc0xfoFCBc+X4qE+Ed9SFYWgQkDs/roJq1C1mTYA+KANMqJkDt00QZq536snFQvjCXAA5fwR/DpgGOOuGMRfvbjh7x8mPyVoPr4HDQCGFXnTYdn193HpTOqUsipzIV5OJqQ9p0sfJjwKP4ZfD0tqqdjTkNwMyJuwuRaReXFvGGCjH2PqkZE/FwQG0NJJjt0xaMUmv5U5tXHC9tEVobVV/qEslqfbH2v1YPF5d8Jmdn7F76FU5J0nTd+3rIVjYGYSt01cR6wtGnzvr/7kw9kbChw4wYhXxnmIALSd48FpA1qWjlPcAdHfUUwObxOxfqmlnBGtAQFK+p5VXCsxDZEIT9MSxscfCjyDQZpkY5S5B3PFIRg6V9bdl5a4rEt27aucuKTHj1Ok2vip4WfaIKk28YMjjzuOQRbr6Pp7mJcCC1/ERHUJdLsaQP+dy18z6XbDjX3O2JDRNYbCBexQyV/Kfrt5EOS5fXiByQUHv+PyR+9Ju6QWkkcFBfgsxq25kFl+eos4V9lxPOY5jDpw2BWu9TyHtTWkjL/YxDUGwUO9WA/WzrcT4skr9FYrFV/oEgi8MkwydC0cFICDfd6tr9upqkkr1W025Im1UBXXJ89bTVj
138 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 0 iQIVAwUAWECEaEemf/qjRqrOAQjuZw/+IWJKnKOsaUMcB9ly3Fo/eskqDL6A0j69IXTJDeBDGMoyGbQU/gZyX2yc6Sw3EhwTSCXu5vKpzg3a6e8MNrC1iHqli4wJ/jPY7XtmiqTYDixdsBLNk46VfOi73ooFe08wVDSNB65xpZsrtPDSioNmQ2kSJwSHb71UlauS4xGkM74vuDpWvX5OZRSfBqMh6NjG5RwBBnS8mzA0SW2dCI2jSc5SCGIzIZpzM0xUN21xzq0YQbrk9qEsmi7ks0eowdhUjeET2wSWwhOK4jS4IfMyRO7KueUB05yHs4mChj9kNFNWtSzXKwKBQbZzwO/1Y7IJjU+AsbWkiUu+6ipqBPQWzS28gCwGOrv5BcIJS+tzsvLUKWgcixyfy5UAqJ32gCdzKC54FUpT2zL6Ad0vXGM6WkpZA7yworN4RCFPexXbi0x2GSTLG8PyIoZ4Iwgtj5NtsEDHrz0380FxgnKUIC3ny2SVuPlyD+9wepD3QYcxdRk1BIzcFT9ZxNlgil3IXRVPwVejvQ/zr6/ILdhBnZ8ojjvVCy3b86B1OhZj/ZByYo5QaykVqWl0V9vJOZlZfvOpm2HiDhm/2uNrVWxG4O6EwhnekAdaJYmeLq1YbhIfGA6KVOaB9Yi5A5BxK9QGXBZ6sLj+dIUD3QR47r9yAqVQE8Gr/Oh6oQXBQqOQv7WzBBs=
138 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 0 iQIVAwUAWECEaEemf/qjRqrOAQjuZw/+IWJKnKOsaUMcB9ly3Fo/eskqDL6A0j69IXTJDeBDGMoyGbQU/gZyX2yc6Sw3EhwTSCXu5vKpzg3a6e8MNrC1iHqli4wJ/jPY7XtmiqTYDixdsBLNk46VfOi73ooFe08wVDSNB65xpZsrtPDSioNmQ2kSJwSHb71UlauS4xGkM74vuDpWvX5OZRSfBqMh6NjG5RwBBnS8mzA0SW2dCI2jSc5SCGIzIZpzM0xUN21xzq0YQbrk9qEsmi7ks0eowdhUjeET2wSWwhOK4jS4IfMyRO7KueUB05yHs4mChj9kNFNWtSzXKwKBQbZzwO/1Y7IJjU+AsbWkiUu+6ipqBPQWzS28gCwGOrv5BcIJS+tzsvLUKWgcixyfy5UAqJ32gCdzKC54FUpT2zL6Ad0vXGM6WkpZA7yworN4RCFPexXbi0x2GSTLG8PyIoZ4Iwgtj5NtsEDHrz0380FxgnKUIC3ny2SVuPlyD+9wepD3QYcxdRk1BIzcFT9ZxNlgil3IXRVPwVejvQ/zr6/ILdhBnZ8ojjvVCy3b86B1OhZj/ZByYo5QaykVqWl0V9vJOZlZfvOpm2HiDhm/2uNrVWxG4O6EwhnekAdaJYmeLq1YbhIfGA6KVOaB9Yi5A5BxK9QGXBZ6sLj+dIUD3QR47r9yAqVQE8Gr/Oh6oQXBQqOQv7WzBBs=
139 e69874dc1f4e142746ff3df91e678a09c6fc208c 0 iQIVAwUAWG0oGUemf/qjRqrOAQh3uhAAu4TN7jkkgH7Hxn8S1cB6Ru0x8MQutzzzpjShhsE/G7nzCxsZ5eWdJ5ItwXmKhunb7T0og54CGcTxfmdPtCI7AhhHh9/TM2Hv1EBcsXCiwjG8E+P6X1UJkijgTGjNWuCvEDOsQAvgywslECBNnXp2QA5I5UdCMeqDdTAb8ujvbD8I4pxUx1xXKY18DgQGJh13mRlfkEVnPxUi2n8emnwPLjbVVkVISkMFUkaOl8a4fOeZC1xzDpoQocoH2Q8DYa9RCPPSHHSYPNMWGCdNGN2CoAurcHWWvc7jNU28/tBhTazfFv8LYh63lLQ8SIIPZHJAOxo45ufMspzUfNgoD6y3vlF5aW7DpdxwYHnueh7S1Fxgtd9cOnxmxQsgiF4LK0a+VXOi/Tli/fivZHDRCGHJvJgsMQm7pzkay9sGohes6jAnsOv2E8DwFC71FO/btrAp07IRFxH9WhUeMsXLMS9oBlubMxMM58M+xzSKApK6bz2MkLsx9cewmfmfbJnRIK1xDv+J+77pWWNGlxCCjl1WU+aA3M7G8HzwAqjL75ASOWtBrJlFXvlLgzobwwetg6cm44Rv1P39i3rDySZvi4BDlOQHWFupgMKiXnZ1PeL7eBDs/aawrE0V2ysNkf9An+XJZkos2JSLPWcoNigfXNUu5c1AqsERvHA246XJzqvCEK8=
139 e69874dc1f4e142746ff3df91e678a09c6fc208c 0 iQIVAwUAWG0oGUemf/qjRqrOAQh3uhAAu4TN7jkkgH7Hxn8S1cB6Ru0x8MQutzzzpjShhsE/G7nzCxsZ5eWdJ5ItwXmKhunb7T0og54CGcTxfmdPtCI7AhhHh9/TM2Hv1EBcsXCiwjG8E+P6X1UJkijgTGjNWuCvEDOsQAvgywslECBNnXp2QA5I5UdCMeqDdTAb8ujvbD8I4pxUx1xXKY18DgQGJh13mRlfkEVnPxUi2n8emnwPLjbVVkVISkMFUkaOl8a4fOeZC1xzDpoQocoH2Q8DYa9RCPPSHHSYPNMWGCdNGN2CoAurcHWWvc7jNU28/tBhTazfFv8LYh63lLQ8SIIPZHJAOxo45ufMspzUfNgoD6y3vlF5aW7DpdxwYHnueh7S1Fxgtd9cOnxmxQsgiF4LK0a+VXOi/Tli/fivZHDRCGHJvJgsMQm7pzkay9sGohes6jAnsOv2E8DwFC71FO/btrAp07IRFxH9WhUeMsXLMS9oBlubMxMM58M+xzSKApK6bz2MkLsx9cewmfmfbJnRIK1xDv+J+77pWWNGlxCCjl1WU+aA3M7G8HzwAqjL75ASOWtBrJlFXvlLgzobwwetg6cm44Rv1P39i3rDySZvi4BDlOQHWFupgMKiXnZ1PeL7eBDs/aawrE0V2ysNkf9An+XJZkos2JSLPWcoNigfXNUu5c1AqsERvHA246XJzqvCEK8=
140 a1dd2c0c479e0550040542e392e87bc91262517e 0 iQIcBAABCAAGBQJYgBBEAAoJELnJ3IJKpb3VJosP/10rr3onsVbL8E+ri1Q0TJc8uhqIsBVyD/vS1MJtbxRaAdIV92o13YOent0o5ASFF/0yzVKlOWPQRjsYYbYY967k1TruDaWxJAnpeFgMni2Afl/qyWrW4AY2xegZNZCfMmwJA+uSJDdAn+jPV40XbuCZ+OgyZo5S05dfclHFxdc8rPKeUsJtvs5PMmCL3iQl1sulp1ASjuhRtFWZgSFsC6rb2Y7evD66ikL93+0/BPEB4SVX17vB/XEzdmh4ntyt4+d1XAznLHS33IU8UHbTkUmLy+82WnNH7HBB2V7gO47m/HhvaYjEfeW0bqMzN3aOUf30Vy/wB4HHsvkBGDgL5PYVHRRovGcAuCmnYbOkawqbRewW5oDs7UT3HbShNpxCxfsYpo7deHr11zWA3ooWCSlIRRREU4BfwVmn+Ds1hT5HM28Q6zr6GQZegDUbiT9i1zU0EpyfTpH7gc6NTVQrO1z1p70NBnQMqXcHjWJwjSwLER2Qify9MjrGXTL6ofD5zVZKobeRmq94mf3lDq26H7coraM9X5h9xa49VgAcRHzn/WQ6wcFCKDQr6FT67hTUOlF7Jriv8/5h/ziSZr10fCObKeKWN8Skur29VIAHHY4NuUqbM55WohD+jZ2O3d4tze1eWm5MDgWD8RlrfYhQ+cLOwH65AOtts0LNZwlvJuC7
140 a1dd2c0c479e0550040542e392e87bc91262517e 0 iQIcBAABCAAGBQJYgBBEAAoJELnJ3IJKpb3VJosP/10rr3onsVbL8E+ri1Q0TJc8uhqIsBVyD/vS1MJtbxRaAdIV92o13YOent0o5ASFF/0yzVKlOWPQRjsYYbYY967k1TruDaWxJAnpeFgMni2Afl/qyWrW4AY2xegZNZCfMmwJA+uSJDdAn+jPV40XbuCZ+OgyZo5S05dfclHFxdc8rPKeUsJtvs5PMmCL3iQl1sulp1ASjuhRtFWZgSFsC6rb2Y7evD66ikL93+0/BPEB4SVX17vB/XEzdmh4ntyt4+d1XAznLHS33IU8UHbTkUmLy+82WnNH7HBB2V7gO47m/HhvaYjEfeW0bqMzN3aOUf30Vy/wB4HHsvkBGDgL5PYVHRRovGcAuCmnYbOkawqbRewW5oDs7UT3HbShNpxCxfsYpo7deHr11zWA3ooWCSlIRRREU4BfwVmn+Ds1hT5HM28Q6zr6GQZegDUbiT9i1zU0EpyfTpH7gc6NTVQrO1z1p70NBnQMqXcHjWJwjSwLER2Qify9MjrGXTL6ofD5zVZKobeRmq94mf3lDq26H7coraM9X5h9xa49VgAcRHzn/WQ6wcFCKDQr6FT67hTUOlF7Jriv8/5h/ziSZr10fCObKeKWN8Skur29VIAHHY4NuUqbM55WohD+jZ2O3d4tze1eWm5MDgWD8RlrfYhQ+cLOwH65AOtts0LNZwlvJuC7
141 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 0 iQIVAwUAWJIKpUemf/qjRqrOAQjjThAAvl1K/GZBrkanwEPXomewHkWKTEy1s5d5oWmPPGrSb9G4LM/3/abSbQ7fnzkS6IWi4Ao0za68w/MohaVGKoMAslRbelaTqlus0wE3zxb2yQ/j2NeZzFnFEuR/vbUug7uzH+onko2jXrt7VcPNXLOa1/g5CWwaf/YPfJO4zv+atlzBHvuFcQCkdbcOJkccCnBUoR7y0PJoBJX6K7wJQ+hWLdcY4nVaxkGPRmsZJo9qogXZMw1CwJVjofxRI0S/5vMtEqh8srYsg7qlTNv8eYnwdpfuunn2mI7Khx10Tz85PZDnr3SGRiFvdfmT30pI7jL3bhOHALkaoy2VevteJjIyMxANTvjIUBNQUi+7Kj3VIKmkL9NAMAQBbshiQL1wTrXdqOeC8Nm1BfCQEox2yiC6pDFbXVbguwJZ5VKFizTTK6f6BdNYKTVx8lNEdjAsWH8ojgGWwGXBbTkClULHezJ/sODaZzK/+M/IzbGmlF27jJYpdJX8fUoybZNw9lXwIfQQWHmQHEOJYCljD9G1tvYY70+xAFexgBX5Ib48UK4DRITVNecyQZL7bLTzGcM0TAE0EtD4M42wawsYP3Cva9UxShFLICQdPoa4Wmfs6uLbXG1DDLol/j7b6bL+6W8E3AlW+aAPc8GZm51/w3VlYqqciWTc12OJpu8FiD0pZ/iBw+E=
141 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 0 iQIVAwUAWJIKpUemf/qjRqrOAQjjThAAvl1K/GZBrkanwEPXomewHkWKTEy1s5d5oWmPPGrSb9G4LM/3/abSbQ7fnzkS6IWi4Ao0za68w/MohaVGKoMAslRbelaTqlus0wE3zxb2yQ/j2NeZzFnFEuR/vbUug7uzH+onko2jXrt7VcPNXLOa1/g5CWwaf/YPfJO4zv+atlzBHvuFcQCkdbcOJkccCnBUoR7y0PJoBJX6K7wJQ+hWLdcY4nVaxkGPRmsZJo9qogXZMw1CwJVjofxRI0S/5vMtEqh8srYsg7qlTNv8eYnwdpfuunn2mI7Khx10Tz85PZDnr3SGRiFvdfmT30pI7jL3bhOHALkaoy2VevteJjIyMxANTvjIUBNQUi+7Kj3VIKmkL9NAMAQBbshiQL1wTrXdqOeC8Nm1BfCQEox2yiC6pDFbXVbguwJZ5VKFizTTK6f6BdNYKTVx8lNEdjAsWH8ojgGWwGXBbTkClULHezJ/sODaZzK/+M/IzbGmlF27jJYpdJX8fUoybZNw9lXwIfQQWHmQHEOJYCljD9G1tvYY70+xAFexgBX5Ib48UK4DRITVNecyQZL7bLTzGcM0TAE0EtD4M42wawsYP3Cva9UxShFLICQdPoa4Wmfs6uLbXG1DDLol/j7b6bL+6W8E3AlW+aAPc8GZm51/w3VlYqqciWTc12OJpu8FiD0pZ/iBw+E=
142 25703b624d27e3917d978af56d6ad59331e0464a 0 iQIcBAABCAAGBQJYuMSwAAoJELnJ3IJKpb3VL3YP/iKWY3+K3cLUBD3Ne5MhfS7N3t6rlk9YD4kmU8JnVeV1oAfg36VCylpbJLBnmQdvC8AfBJOkXi6DHp9RKXXmlsOeoppdWYGX5RMOzuwuGPBii6cA6KFd+WBpBJlRtklz61qGCAtv4q8V1mga0yucihghzt4lD/PPz7mk6yUBL8s3rK+bIHGdEhnK2dfnn/U2G0K/vGgsYZESORISuBclCrrc7M3/v1D+FBMCEYX9FXYU4PhYkKXK1mSqzCB7oENu/WP4ijl1nRnEIyzBV9pKO4ylnXTpbZAr/e4PofzjzPXb0zume1191C3wvgJ4eDautGide/Pxls5s6fJRaIowf5XVYQ5srX/NC9N3K77Hy01t5u8nwcyAhjmajZYuB9j37nmiwFawqS/y2eHovrUjkGdelV8OM7/iAexPRC8i2NcGk0m6XuzWy1Dxr8453VD8Hh3tTeafd6v5uHXSLjwogpu/th5rk/i9/5GBzc1MyJgRTwBhVHi/yFxfyakrSU7HT2cwX/Lb5KgWccogqfvrFYQABIBanxLIeZxTv8OIjC75EYknbxYtvvgb35ZdJytwrTHSZN0S7Ua2dHx2KUnHB6thbLu/v9fYrCgFF76DK4Ogd22Cbvv6NqRoglG26d0bqdwz/l1n3o416YjupteW8LMxHzuwiJy69WP1yi10eNDq
142 25703b624d27e3917d978af56d6ad59331e0464a 0 iQIcBAABCAAGBQJYuMSwAAoJELnJ3IJKpb3VL3YP/iKWY3+K3cLUBD3Ne5MhfS7N3t6rlk9YD4kmU8JnVeV1oAfg36VCylpbJLBnmQdvC8AfBJOkXi6DHp9RKXXmlsOeoppdWYGX5RMOzuwuGPBii6cA6KFd+WBpBJlRtklz61qGCAtv4q8V1mga0yucihghzt4lD/PPz7mk6yUBL8s3rK+bIHGdEhnK2dfnn/U2G0K/vGgsYZESORISuBclCrrc7M3/v1D+FBMCEYX9FXYU4PhYkKXK1mSqzCB7oENu/WP4ijl1nRnEIyzBV9pKO4ylnXTpbZAr/e4PofzjzPXb0zume1191C3wvgJ4eDautGide/Pxls5s6fJRaIowf5XVYQ5srX/NC9N3K77Hy01t5u8nwcyAhjmajZYuB9j37nmiwFawqS/y2eHovrUjkGdelV8OM7/iAexPRC8i2NcGk0m6XuzWy1Dxr8453VD8Hh3tTeafd6v5uHXSLjwogpu/th5rk/i9/5GBzc1MyJgRTwBhVHi/yFxfyakrSU7HT2cwX/Lb5KgWccogqfvrFYQABIBanxLIeZxTv8OIjC75EYknbxYtvvgb35ZdJytwrTHSZN0S7Ua2dHx2KUnHB6thbLu/v9fYrCgFF76DK4Ogd22Cbvv6NqRoglG26d0bqdwz/l1n3o416YjupteW8LMxHzuwiJy69WP1yi10eNDq
143 ed5b25874d998ababb181a939dd37a16ea644435 0 iQIcBAABCAAGBQJY4r/gAAoJELnJ3IJKpb3VtwYP/RuTmo252ExXQk/n5zGJZvZQnI86vO1+yGuyOlGFFBwf1v3sOLW1HD7fxF6/GdT8CSQrRqtC17Ya3qtayfY/0AEiSuH2bklBXSB1H5wPyguS5iLqyilCJY0SkHYBIDhJ0xftuIjsa805wdMm3OdclnTOkYT+K1WL8Ylbx/Ni2Lsx1rPpYdcQ/HlTkr5ca1ZbNOOSxSNI4+ilGlKbdSYeEsmqB2sDEiSaDEoxGGoSgzAE9+5Q2FfCGXV0bq4vfmEPoT9lhB4kANE+gcFUvsJTu8Z7EdF8y3CJLiy8+KHO/VLKTGJ1pMperbig9nAXl1AOt+izBFGJGTolbR/ShkkDWB/QVcqIF5CysAWMgnHAx7HjnMDBOANcKzhMMfOi3GUvOCNNIqIIoJHKRHaRk0YbMdt7z2mKpTrRQ9Zadz764jXOqqrPgQFM3jkBHzAvZz9yShrHGh42Y+iReAF9pAN0xPjyZ5Y2qp+DSl0bIQqrAet6Zd3QuoJtXczAeRrAvgn7O9MyLnMyE5s7xxI7o8M7zfWtChLF8ytJUzmRo3iVJNOJH+Zls9N30PGw6vubQAnB5ieaVTv8lnNpcAnEQD/i0tmRSxzyyqoOQbnItIPKFOsaYW+eX9sgJmObU3yDc5k3cs+yAFD2CM/uiUsLcTKyxPNcP1JHBYpwhOjIGczSHVS1
143 ed5b25874d998ababb181a939dd37a16ea644435 0 iQIcBAABCAAGBQJY4r/gAAoJELnJ3IJKpb3VtwYP/RuTmo252ExXQk/n5zGJZvZQnI86vO1+yGuyOlGFFBwf1v3sOLW1HD7fxF6/GdT8CSQrRqtC17Ya3qtayfY/0AEiSuH2bklBXSB1H5wPyguS5iLqyilCJY0SkHYBIDhJ0xftuIjsa805wdMm3OdclnTOkYT+K1WL8Ylbx/Ni2Lsx1rPpYdcQ/HlTkr5ca1ZbNOOSxSNI4+ilGlKbdSYeEsmqB2sDEiSaDEoxGGoSgzAE9+5Q2FfCGXV0bq4vfmEPoT9lhB4kANE+gcFUvsJTu8Z7EdF8y3CJLiy8+KHO/VLKTGJ1pMperbig9nAXl1AOt+izBFGJGTolbR/ShkkDWB/QVcqIF5CysAWMgnHAx7HjnMDBOANcKzhMMfOi3GUvOCNNIqIIoJHKRHaRk0YbMdt7z2mKpTrRQ9Zadz764jXOqqrPgQFM3jkBHzAvZz9yShrHGh42Y+iReAF9pAN0xPjyZ5Y2qp+DSl0bIQqrAet6Zd3QuoJtXczAeRrAvgn7O9MyLnMyE5s7xxI7o8M7zfWtChLF8ytJUzmRo3iVJNOJH+Zls9N30PGw6vubQAnB5ieaVTv8lnNpcAnEQD/i0tmRSxzyyqoOQbnItIPKFOsaYW+eX9sgJmObU3yDc5k3cs+yAFD2CM/uiUsLcTKyxPNcP1JHBYpwhOjIGczSHVS1
144 77eaf9539499a1b8be259ffe7ada787d07857f80 0 iQIcBAABCAAGBQJY9iz9AAoJELnJ3IJKpb3VYqEQAJNkB09sXgYRLA4kGQv3p4v02q9WZ1lHkAhOlNwIh7Zp+pGvT33nHZffByA0v+xtJNV9TNMIFFjkCg3jl5Z42CCe33ZlezGBAzXU+70QPvOR0ojlYk+FdMfeSyCBzWYokIpImwNmwNGKVrUAfywdikCsUC2aRjKg4Mn7GnqWl9WrBG6JEOOUamdx8qV2f6g/utRiqj4YQ86P0y4K3yakwc1LMM+vRfrwvsf1+DZ9t7QRENNKQ6gRnUdfryqSFIWn1VkBVMwIN5W3yIrTMfgH1wAZxbnYHrN5qDK7mcbP7bOA3XWJuEC+3QRnheRFd/21O1dMFuYjaKApXPHRlTGRMOaz2eydbfBopUS1BtfYEh4/B/1yJb9/HDw6LiAjea7ACHiaNec83z643005AvtUuWhjX3QTPkYlQzWaosanGy1IOGtXCPp1L0A+9gUpqyqycfPjQCbST5KRzYSZn3Ngmed5Bb6jsgvg5e5y0En/SQgK/pTKnxemAmFFVvIIrrWGRKj0AD0IFEHEepmwprPRs97EZPoBPFAGmVRuASBeIhFQxSDIXV0ebHJoUmz5w1rTy7U3Eq0ff6nW14kjWOUplatXz5LpWJ3VkZKrI+4gelto5xpTI6gJl2nmezhXQIlInk17cPuxmiHjeMdlOHZRh/zICLhQNL5fGne0ZL+qlrXY
144 77eaf9539499a1b8be259ffe7ada787d07857f80 0 iQIcBAABCAAGBQJY9iz9AAoJELnJ3IJKpb3VYqEQAJNkB09sXgYRLA4kGQv3p4v02q9WZ1lHkAhOlNwIh7Zp+pGvT33nHZffByA0v+xtJNV9TNMIFFjkCg3jl5Z42CCe33ZlezGBAzXU+70QPvOR0ojlYk+FdMfeSyCBzWYokIpImwNmwNGKVrUAfywdikCsUC2aRjKg4Mn7GnqWl9WrBG6JEOOUamdx8qV2f6g/utRiqj4YQ86P0y4K3yakwc1LMM+vRfrwvsf1+DZ9t7QRENNKQ6gRnUdfryqSFIWn1VkBVMwIN5W3yIrTMfgH1wAZxbnYHrN5qDK7mcbP7bOA3XWJuEC+3QRnheRFd/21O1dMFuYjaKApXPHRlTGRMOaz2eydbfBopUS1BtfYEh4/B/1yJb9/HDw6LiAjea7ACHiaNec83z643005AvtUuWhjX3QTPkYlQzWaosanGy1IOGtXCPp1L0A+9gUpqyqycfPjQCbST5KRzYSZn3Ngmed5Bb6jsgvg5e5y0En/SQgK/pTKnxemAmFFVvIIrrWGRKj0AD0IFEHEepmwprPRs97EZPoBPFAGmVRuASBeIhFQxSDIXV0ebHJoUmz5w1rTy7U3Eq0ff6nW14kjWOUplatXz5LpWJ3VkZKrI+4gelto5xpTI6gJl2nmezhXQIlInk17cPuxmiHjeMdlOHZRh/zICLhQNL5fGne0ZL+qlrXY
145 616e788321cc4ae9975b7f0c54c849f36d82182b 0 iQIVAwUAWPZuQkemf/qjRqrOAQjFlg/9HXEegJMv8FP+uILPoaiA2UCiqWUL2MVJ0K1cvafkwUq+Iwir8sTe4VJ1v6V+ZRiOuzs4HMnoGJrIks4vHRbAxJ3J6xCfvrsbHdl59grv54vuoL5FlZvkdIe8L7/ovKrUmNwPWZX2v+ffFPrsEBeVlVrXpp4wOPhDxCKTmjYVOp87YqXfJsud7EQFPqpV4jX8DEDtJWT95OE9x0srBg0HpSE95d/BM4TuXTVNI8fV41YEqearKeFIhLxu37HxUmGmkAALCi8RJmm4hVpUHgk3tAVzImI8DglUqnC6VEfaYb+PKzIqHelhb66JO/48qN2S/JXihpNHAVUBysBT0b1xEnc6eNsF2fQEB+bEcf8IGj7/ILee1cmwPtoK2OXR2+xWWWjlu2keVcKeI0yAajJw/dP21yvVzVq0ypst7iD+EGHLJWJSmZscbyH5ICr+TJ5yQvIGZJtfsAdAUUTM2xpqSDW4mT5kYyg75URbQ3AKI7lOhJBmkkGQErE4zIQMkaAqcWziVF20xiRWfJoFxT2fK5weaRGIjELH49NLlyvZxYc4LlRo9lIdC7l/6lYDdTx15VuEj1zx/91y/d7OtPm+KCA2Bbdqth8m/fMD8trfQ6jSG/wgsvjZ+S0eoXa92qIR/igsCI+6EwP7duuzL2iyKOPXupQVNN10PKI7EuKv4Lk=
145 616e788321cc4ae9975b7f0c54c849f36d82182b 0 iQIVAwUAWPZuQkemf/qjRqrOAQjFlg/9HXEegJMv8FP+uILPoaiA2UCiqWUL2MVJ0K1cvafkwUq+Iwir8sTe4VJ1v6V+ZRiOuzs4HMnoGJrIks4vHRbAxJ3J6xCfvrsbHdl59grv54vuoL5FlZvkdIe8L7/ovKrUmNwPWZX2v+ffFPrsEBeVlVrXpp4wOPhDxCKTmjYVOp87YqXfJsud7EQFPqpV4jX8DEDtJWT95OE9x0srBg0HpSE95d/BM4TuXTVNI8fV41YEqearKeFIhLxu37HxUmGmkAALCi8RJmm4hVpUHgk3tAVzImI8DglUqnC6VEfaYb+PKzIqHelhb66JO/48qN2S/JXihpNHAVUBysBT0b1xEnc6eNsF2fQEB+bEcf8IGj7/ILee1cmwPtoK2OXR2+xWWWjlu2keVcKeI0yAajJw/dP21yvVzVq0ypst7iD+EGHLJWJSmZscbyH5ICr+TJ5yQvIGZJtfsAdAUUTM2xpqSDW4mT5kYyg75URbQ3AKI7lOhJBmkkGQErE4zIQMkaAqcWziVF20xiRWfJoFxT2fK5weaRGIjELH49NLlyvZxYc4LlRo9lIdC7l/6lYDdTx15VuEj1zx/91y/d7OtPm+KCA2Bbdqth8m/fMD8trfQ6jSG/wgsvjZ+S0eoXa92qIR/igsCI+6EwP7duuzL2iyKOPXupQVNN10PKI7EuKv4Lk=
146 bb96d4a497432722623ae60d9bc734a1e360179e 0 iQIVAwUAWQkDfEemf/qjRqrOAQierQ/7BuQ0IW0T0cglgqIgkLuYLx2VXJCTEtRNCWmrH2UMK7fAdpAhN0xf+xedv56zYHrlyHpbskDbWvsKIHJdw/4bQitXaIFTyuMMtSR5vXy4Nly34O/Xs2uGb3Y5qwdubeK2nZr4lSPgiRHb/zI/B1Oy8GX830ljmIOY7B0nUWy4DrXcy/M41SnAMLFyD1K6T/8tkv7M4Fai7dQoF9EmIIkShVPktI3lqp3m7infZ4XnJqcqUB0NSfQZwZaUaoalOdCvEIe3ab5ewgl/CuvlDI4oqMQGjXCtNLbtiZSwo6hvudO6ewT+Zn/VdabkZyRtXUxu56ajjd6h22nU1+vknqDzo5tzw6oh1Ubzf8tzyv3Gmmr+tlOjzfK7tXXnT3vR9aEGli0qri0DzOpsDSY0pDC7EsS4LINPoNdsGQrGQdoX++AISROlNjvyuo4Vrp26tPHCSupkKOXuZaiozycAa2Q+aI1EvkPZSXe8SAXKDVtFn05ZB58YVkFzZKAYAxkE/ven59zb4aIbOgR12tZbJoZZsVHrlf/TcDtiXVfIMEMsCtJ1tPgD1rAsEURWRxK3mJ0Ev6KTHgNz4PeBhq1gIP/Y665aX2+cCjc4+vApPUienh5aOr1bQFpIDyYZsafHGMUFNCwRh8bX98oTGa0hjqz4ypwXE4Wztjdc+48UiHARp/Y=
146 bb96d4a497432722623ae60d9bc734a1e360179e 0 iQIVAwUAWQkDfEemf/qjRqrOAQierQ/7BuQ0IW0T0cglgqIgkLuYLx2VXJCTEtRNCWmrH2UMK7fAdpAhN0xf+xedv56zYHrlyHpbskDbWvsKIHJdw/4bQitXaIFTyuMMtSR5vXy4Nly34O/Xs2uGb3Y5qwdubeK2nZr4lSPgiRHb/zI/B1Oy8GX830ljmIOY7B0nUWy4DrXcy/M41SnAMLFyD1K6T/8tkv7M4Fai7dQoF9EmIIkShVPktI3lqp3m7infZ4XnJqcqUB0NSfQZwZaUaoalOdCvEIe3ab5ewgl/CuvlDI4oqMQGjXCtNLbtiZSwo6hvudO6ewT+Zn/VdabkZyRtXUxu56ajjd6h22nU1+vknqDzo5tzw6oh1Ubzf8tzyv3Gmmr+tlOjzfK7tXXnT3vR9aEGli0qri0DzOpsDSY0pDC7EsS4LINPoNdsGQrGQdoX++AISROlNjvyuo4Vrp26tPHCSupkKOXuZaiozycAa2Q+aI1EvkPZSXe8SAXKDVtFn05ZB58YVkFzZKAYAxkE/ven59zb4aIbOgR12tZbJoZZsVHrlf/TcDtiXVfIMEMsCtJ1tPgD1rAsEURWRxK3mJ0Ev6KTHgNz4PeBhq1gIP/Y665aX2+cCjc4+vApPUienh5aOr1bQFpIDyYZsafHGMUFNCwRh8bX98oTGa0hjqz4ypwXE4Wztjdc+48UiHARp/Y=
147 c850f0ed54c1d42f9aa079ad528f8127e5775217 0 iQIVAwUAWTQINUemf/qjRqrOAQjZDw//b4pEgHYfWRVDEmLZtevysfhlJzbSyLAnWgNnRUVdSwl4WRF1r6ds/q7N4Ege5wQHjOpRtx4jC3y/riMbrLUlaeUXzCdqKgm4JcINS1nXy3IfkeDdUKyOR9upjaVhIEzCMRpyzabdYuflh5CoxayO7GFk2iZ8c1oAl4QzuLSspn9w+znqDg0HrMDbRNijStSulNjkqutih9UqT/PYizhE1UjL0NSnpYyD1vDljsHModJc2dhSzuZ1c4VFZHkienk+CNyeLtVKg8aC+Ej/Ppwq6FlE461T/RxOEzf+WFAc9F4iJibSN2kAFB4ySJ43y+OKkvzAwc5XbUx0y6OlWn2Ph+5T54sIwqasG3DjXyVrwVtAvCrcWUmOyS0RfkKoDVepMPIhFXyrhGqUYSq25Gt6tHVtIrlcWARIGGWlsE+PSHi87qcnSjs4xUzZwVvJWz4fuM1AUG/GTpyt4w3kB85XQikIINkmSTmsM/2/ar75T6jBL3kqOCGOL3n7bVZsGXllhkkQ7e/jqPPWnNXm8scDYdT3WENNu34zZp5ZmqdTXPAIIaqGswnU04KfUSEoYtOMri3E2VvrgMkiINm9BOKpgeTsMb3dkYRw2ZY3UAH9QfdX9BZywk6v3kkE5ghLWMUoQ4sqRlTo7mJKA8+EodjmIGRV/kAv1f7pigg6pIWWEyo=
147 c850f0ed54c1d42f9aa079ad528f8127e5775217 0 iQIVAwUAWTQINUemf/qjRqrOAQjZDw//b4pEgHYfWRVDEmLZtevysfhlJzbSyLAnWgNnRUVdSwl4WRF1r6ds/q7N4Ege5wQHjOpRtx4jC3y/riMbrLUlaeUXzCdqKgm4JcINS1nXy3IfkeDdUKyOR9upjaVhIEzCMRpyzabdYuflh5CoxayO7GFk2iZ8c1oAl4QzuLSspn9w+znqDg0HrMDbRNijStSulNjkqutih9UqT/PYizhE1UjL0NSnpYyD1vDljsHModJc2dhSzuZ1c4VFZHkienk+CNyeLtVKg8aC+Ej/Ppwq6FlE461T/RxOEzf+WFAc9F4iJibSN2kAFB4ySJ43y+OKkvzAwc5XbUx0y6OlWn2Ph+5T54sIwqasG3DjXyVrwVtAvCrcWUmOyS0RfkKoDVepMPIhFXyrhGqUYSq25Gt6tHVtIrlcWARIGGWlsE+PSHi87qcnSjs4xUzZwVvJWz4fuM1AUG/GTpyt4w3kB85XQikIINkmSTmsM/2/ar75T6jBL3kqOCGOL3n7bVZsGXllhkkQ7e/jqPPWnNXm8scDYdT3WENNu34zZp5ZmqdTXPAIIaqGswnU04KfUSEoYtOMri3E2VvrgMkiINm9BOKpgeTsMb3dkYRw2ZY3UAH9QfdX9BZywk6v3kkE5ghLWMUoQ4sqRlTo7mJKA8+EodjmIGRV/kAv1f7pigg6pIWWEyo=
148 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 0 iQIcBAABCAAGBQJZXQSmAAoJELnJ3IJKpb3VmTwP/jsxFTlKzWU8EnEhEViiP2YREOD3AXU7685DIMnoyVAsZgxrt0CG6Y92b5sINCeh5B0ORPQ7+xi2Xmz6tX8EeAR+/Dpdx6K623yExf8kq91zgfMvYkatNMu6ZVfywibYZAASq02oKoX7WqSPcQG/OwgtdFiGacCrG5iMH7wRv0N9hPc6D5vAV8/H/Inq8twpSG5SGDpCdKj7KPZiY8DFu/3OXatJtl+byg8zWT4FCYKkBPvmZp8/sRhDKBgwr3RvF1p84uuw/QxXjt+DmGxgtjvObjHr+shCMcKBAuZ4RtZmyEo/0L81uaTElHu1ejsEzsEKxs+8YifnH070PTFoV4VXQyXfTc8AyaqHE6rzX96a/HjQiJnL4dFeTZIrUhGK3AkObFLWJxVTo4J8+oliBQQldIh1H2yb1ZMfwapLnUGIqSieHDGZ6K2ccNJK8Q7IRhTCvYc0cjsnbwTpV4cebGqf3WXZhX0cZN+TNfhh/HGRzR1EeAAavjJqpDam1OBA5TmtJd/lHLIRVR5jyG+r4SK0XDlJ8uSfah7MpVH6aQ6UrycPyFusGXQlIqJ1DYQaBrI/SRJfIvRUmvVz9WgKLe83oC3Ui3aWR9rNjMb2InuQuXjeZaeaYfBAUYACcGfCZpZZvoEkMHCqtTng1rbbFnKMFk5kVy9YWuVgK9Iuh0O5
148 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 0 iQIcBAABCAAGBQJZXQSmAAoJELnJ3IJKpb3VmTwP/jsxFTlKzWU8EnEhEViiP2YREOD3AXU7685DIMnoyVAsZgxrt0CG6Y92b5sINCeh5B0ORPQ7+xi2Xmz6tX8EeAR+/Dpdx6K623yExf8kq91zgfMvYkatNMu6ZVfywibYZAASq02oKoX7WqSPcQG/OwgtdFiGacCrG5iMH7wRv0N9hPc6D5vAV8/H/Inq8twpSG5SGDpCdKj7KPZiY8DFu/3OXatJtl+byg8zWT4FCYKkBPvmZp8/sRhDKBgwr3RvF1p84uuw/QxXjt+DmGxgtjvObjHr+shCMcKBAuZ4RtZmyEo/0L81uaTElHu1ejsEzsEKxs+8YifnH070PTFoV4VXQyXfTc8AyaqHE6rzX96a/HjQiJnL4dFeTZIrUhGK3AkObFLWJxVTo4J8+oliBQQldIh1H2yb1ZMfwapLnUGIqSieHDGZ6K2ccNJK8Q7IRhTCvYc0cjsnbwTpV4cebGqf3WXZhX0cZN+TNfhh/HGRzR1EeAAavjJqpDam1OBA5TmtJd/lHLIRVR5jyG+r4SK0XDlJ8uSfah7MpVH6aQ6UrycPyFusGXQlIqJ1DYQaBrI/SRJfIvRUmvVz9WgKLe83oC3Ui3aWR9rNjMb2InuQuXjeZaeaYfBAUYACcGfCZpZZvoEkMHCqtTng1rbbFnKMFk5kVy9YWuVgK9Iuh0O5
149 857876ebaed4e315f63157bd157d6ce553c7ab73 0 iQIVAwUAWW9XW0emf/qjRqrOAQhI7A//cKXIM4l8vrWWsc1Os4knXm/2UaexmAwV70TpviKL9RxCy5zBP/EapCaGRCH8uNPOQTkWGR9Aucm3CtxhggCMzULQxxeH86mEpWf1xILWLySPXW/t2f+2zxrwLSAxxqFJtuYv83Pe8CnS3y4BlgHnBKYXH8XXuW8uvfc0lHKblhrspGBIAinx7vPLoGQcpYrn9USWUKq5d9FaCLQCDT9501FHKf5dlYQajevCUDnewtn5ohelOXjTJQClW3aygv/z+98Kq7ZhayeIiZu+SeP+Ay7lZPklXcy6eyRiQtGCa1yesb9v53jKtgxWewV4o6zyuUesdknZ/IBeNUgw8LepqTIJo6/ckyvBOsSQcda81DuYNUChZLYTSXYPHEUmYiz6CvNoLEgHF/oO5p6CZXOPWbmLWrAFd+0+1Tuq8BSh+PSdEREM3ZLOikkXoVzTKBgu4zpMvmBnjliBg7WhixkcG0v5WunlV9/oHAIpsKdL7AatU+oCPulp+xDpTKzRazEemYiWG9zYKzwSMk9Nc17e2tk+EtFSPsPo4iVCXMgdIZSTNBvynKEFXZQVPWVa+bYRdAmbSY8awiX7exxYL10UcpnN2q/AH/F7rQzAmo8eZ3OtD0+3Nk3JRx0/CMyzKLPYDpdUgwmaPb+s2Bsy7f7TfmA7jTa69YqB1/zVwlWULr0=
149 857876ebaed4e315f63157bd157d6ce553c7ab73 0 iQIVAwUAWW9XW0emf/qjRqrOAQhI7A//cKXIM4l8vrWWsc1Os4knXm/2UaexmAwV70TpviKL9RxCy5zBP/EapCaGRCH8uNPOQTkWGR9Aucm3CtxhggCMzULQxxeH86mEpWf1xILWLySPXW/t2f+2zxrwLSAxxqFJtuYv83Pe8CnS3y4BlgHnBKYXH8XXuW8uvfc0lHKblhrspGBIAinx7vPLoGQcpYrn9USWUKq5d9FaCLQCDT9501FHKf5dlYQajevCUDnewtn5ohelOXjTJQClW3aygv/z+98Kq7ZhayeIiZu+SeP+Ay7lZPklXcy6eyRiQtGCa1yesb9v53jKtgxWewV4o6zyuUesdknZ/IBeNUgw8LepqTIJo6/ckyvBOsSQcda81DuYNUChZLYTSXYPHEUmYiz6CvNoLEgHF/oO5p6CZXOPWbmLWrAFd+0+1Tuq8BSh+PSdEREM3ZLOikkXoVzTKBgu4zpMvmBnjliBg7WhixkcG0v5WunlV9/oHAIpsKdL7AatU+oCPulp+xDpTKzRazEemYiWG9zYKzwSMk9Nc17e2tk+EtFSPsPo4iVCXMgdIZSTNBvynKEFXZQVPWVa+bYRdAmbSY8awiX7exxYL10UcpnN2q/AH/F7rQzAmo8eZ3OtD0+3Nk3JRx0/CMyzKLPYDpdUgwmaPb+s2Bsy7f7TfmA7jTa69YqB1/zVwlWULr0=
150 5544af8622863796a0027566f6b646e10d522c4c 0 iQIcBAABCAAGBQJZjJflAAoJELnJ3IJKpb3V19kQALCvTdPrpce5+rBNbFtLGNFxTMDol1dUy87EUAWiArnfOzW3rKBdYxvxDL23BpgUfjRm1fAXdayVvlj6VC6Dyb195OLmc/I9z7SjFxsfmxWilF6U0GIa3W0x37i05EjfcccrBIuSLrvR6AWyJhjLOBCcyAqD/HcEom00/L+o2ry9CDQNLEeVuNewJiupcUqsTIG2yS26lWbtLZuoqS2T4Nlg8wjJhiSXlsZSuAF55iUJKlTQP6KyWReiaYuEVfm/Bybp0A2bFcZCYpWPwnwKBdSCHhIalH8PO57gh9J7xJVnyyBg5PU6n4l6PrGOmKhNiU/xyNe36tEAdMW6svcVvt8hiY0dnwWqR6wgnFFDu0lnTMUcjsy5M5FBY6wSw9Fph8zcNRzYyaeUbasNonPvrIrk21nT3ET3RzVR3ri2nJDVF+0GlpogGfk9k7wY3808091BMsyV3448ZPKQeWiK4Yy4UOUwbKV7YAsS5MdDnC1uKjl4GwLn9UCY/+Q2/2R0CBZ13Tox+Nbo6hBRuRGtFIbLK9j7IIUhhZrIZFSh8cDNkC+UMaS52L5z7ECvoYIUpw+MJ7NkMLHIVGZ2Nxn0C7IbGO6uHyR7D6bdNpxilU+WZStHk0ppZItRTm/htar4jifnaCI8F8OQNYmZ3cQhxx6qV2Tyow8arvWb1NYXrocG
150 5544af8622863796a0027566f6b646e10d522c4c 0 iQIcBAABCAAGBQJZjJflAAoJELnJ3IJKpb3V19kQALCvTdPrpce5+rBNbFtLGNFxTMDol1dUy87EUAWiArnfOzW3rKBdYxvxDL23BpgUfjRm1fAXdayVvlj6VC6Dyb195OLmc/I9z7SjFxsfmxWilF6U0GIa3W0x37i05EjfcccrBIuSLrvR6AWyJhjLOBCcyAqD/HcEom00/L+o2ry9CDQNLEeVuNewJiupcUqsTIG2yS26lWbtLZuoqS2T4Nlg8wjJhiSXlsZSuAF55iUJKlTQP6KyWReiaYuEVfm/Bybp0A2bFcZCYpWPwnwKBdSCHhIalH8PO57gh9J7xJVnyyBg5PU6n4l6PrGOmKhNiU/xyNe36tEAdMW6svcVvt8hiY0dnwWqR6wgnFFDu0lnTMUcjsy5M5FBY6wSw9Fph8zcNRzYyaeUbasNonPvrIrk21nT3ET3RzVR3ri2nJDVF+0GlpogGfk9k7wY3808091BMsyV3448ZPKQeWiK4Yy4UOUwbKV7YAsS5MdDnC1uKjl4GwLn9UCY/+Q2/2R0CBZ13Tox+Nbo6hBRuRGtFIbLK9j7IIUhhZrIZFSh8cDNkC+UMaS52L5z7ECvoYIUpw+MJ7NkMLHIVGZ2Nxn0C7IbGO6uHyR7D6bdNpxilU+WZStHk0ppZItRTm/htar4jifnaCI8F8OQNYmZ3cQhxx6qV2Tyow8arvWb1NYXrocG
151 943c91326b23954e6e1c6960d0239511f9530258 0 iQIcBAABCAAGBQJZjKKZAAoJELnJ3IJKpb3VGQkP/0iF6Khef0lBaRhbSAPwa7RUBb3iaBeuwmeic/hUjMoU1E5NR36bDDaF3u2di5mIYPBONFIeCPf9/DKyFkidueX1UnlAQa3mjh/QfKTb4/yO2Nrk7eH+QtrYxVUUYYjwgp4rS0Nd/++I1IUOor54vqJzJ7ZnM5O1RsE7VI1esAC/BTlUuO354bbm08B0owsZBwVvcVvpV4zeTvq5qyPxBJ3M0kw83Pgwh3JZB9IYhOabhSUBcA2fIPHgYGYnJVC+bLOeMWI1HJkJeoYfClNUiQUjAmi0cdTC733eQnHkDw7xyyFi+zkKu6JmU1opxkHSuj4Hrjul7Gtw3vVWWUPufz3AK7oymNp2Xr5y1HQLDtNJP3jicTTG1ae2TdX5Az3ze0I8VGbpR81/6ShAvY2cSKttV3I+2k4epxTTTf0xaZS1eUdnFOox6acElG2reNzx7EYYxpHj17K8N2qNzyY78iPgbJ+L39PBFoiGXMZJqWCxxIHoK1MxlXa8WwSnsXAU768dJvEn2N1x3fl+aeaWzeM4/5Qd83YjFuCeycuRnIo3rejSX3rWFAwZE0qQHKI5YWdKDLxIfdHTjdfMP7np+zLcHt0DV/dHmj2hKQgU0OK04fx7BrmdS1tw67Y9bL3H3TDohn7khU1FrqrKVuqSLbLsxnNyWRbZQF+DCoYrHlIW
151 943c91326b23954e6e1c6960d0239511f9530258 0 iQIcBAABCAAGBQJZjKKZAAoJELnJ3IJKpb3VGQkP/0iF6Khef0lBaRhbSAPwa7RUBb3iaBeuwmeic/hUjMoU1E5NR36bDDaF3u2di5mIYPBONFIeCPf9/DKyFkidueX1UnlAQa3mjh/QfKTb4/yO2Nrk7eH+QtrYxVUUYYjwgp4rS0Nd/++I1IUOor54vqJzJ7ZnM5O1RsE7VI1esAC/BTlUuO354bbm08B0owsZBwVvcVvpV4zeTvq5qyPxBJ3M0kw83Pgwh3JZB9IYhOabhSUBcA2fIPHgYGYnJVC+bLOeMWI1HJkJeoYfClNUiQUjAmi0cdTC733eQnHkDw7xyyFi+zkKu6JmU1opxkHSuj4Hrjul7Gtw3vVWWUPufz3AK7oymNp2Xr5y1HQLDtNJP3jicTTG1ae2TdX5Az3ze0I8VGbpR81/6ShAvY2cSKttV3I+2k4epxTTTf0xaZS1eUdnFOox6acElG2reNzx7EYYxpHj17K8N2qNzyY78iPgbJ+L39PBFoiGXMZJqWCxxIHoK1MxlXa8WwSnsXAU768dJvEn2N1x3fl+aeaWzeM4/5Qd83YjFuCeycuRnIo3rejSX3rWFAwZE0qQHKI5YWdKDLxIfdHTjdfMP7np+zLcHt0DV/dHmj2hKQgU0OK04fx7BrmdS1tw67Y9bL3H3TDohn7khU1FrqrKVuqSLbLsxnNyWRbZQF+DCoYrHlIW
152 3fee7f7d2da04226914c2258cc2884dc27384fd7 0 iQIcBAABCAAGBQJZjOJfAAoJELnJ3IJKpb3VvikP/iGjfahwkl2BDZYGq6Ia64a0bhEh0iltoWTCCDKMbHuuO+7h07fHpBl/XX5XPnS7imBUVWLOARhVL7aDPb0tu5NZzMKN57XUC/0FWFyf7lXXAVaOapR4kP8RtQvnoxfNSLRgiZQL88KIRBgFc8pbl8hLA6UbcHPsOk4dXKvmfPfHBHnzdUEDcSXDdyOBhuyOSzRs8egXVi3WeX6OaXG3twkw/uCF3pgOMOSyWVDwD+KvK+IBmSxCTKXzsb+pqpc7pPOFWhSXjpbuYUcI5Qy7mpd0bFL3qNqgvUNq2gX5mT6zH/TsVD10oSUjYYqKMO+gi34OgTVWRRoQfWBwrQwxsC/MxH6ZeOetl2YkS13OxdmYpNAFNQ8ye0vZigJRA+wHoC9dn0h8c5X4VJt/dufHeXc887EGJpLg6GDXi5Emr2ydAUhBJKlpi2yss22AmiQ4G9NE1hAjxqhPvkgBK/hpbr3FurV4hjTG6XKsF8I0WdbYz2CW/FEbp1+4T49ChhrwW0orZdEQX7IEjXr45Hs5sTInT90Hy2XG3Kovi0uVMt15cKsSEYDoFHkR4NgCZX2Y+qS5ryH8yqor3xtel3KsBIy6Ywn8pAo2f8flW3nro/O6x+0NKGV+ZZ0uo/FctuQLBrQVs025T1ai/6MbscQXvFVZVPKrUzlQaNPf/IwNOaRa
152 3fee7f7d2da04226914c2258cc2884dc27384fd7 0 iQIcBAABCAAGBQJZjOJfAAoJELnJ3IJKpb3VvikP/iGjfahwkl2BDZYGq6Ia64a0bhEh0iltoWTCCDKMbHuuO+7h07fHpBl/XX5XPnS7imBUVWLOARhVL7aDPb0tu5NZzMKN57XUC/0FWFyf7lXXAVaOapR4kP8RtQvnoxfNSLRgiZQL88KIRBgFc8pbl8hLA6UbcHPsOk4dXKvmfPfHBHnzdUEDcSXDdyOBhuyOSzRs8egXVi3WeX6OaXG3twkw/uCF3pgOMOSyWVDwD+KvK+IBmSxCTKXzsb+pqpc7pPOFWhSXjpbuYUcI5Qy7mpd0bFL3qNqgvUNq2gX5mT6zH/TsVD10oSUjYYqKMO+gi34OgTVWRRoQfWBwrQwxsC/MxH6ZeOetl2YkS13OxdmYpNAFNQ8ye0vZigJRA+wHoC9dn0h8c5X4VJt/dufHeXc887EGJpLg6GDXi5Emr2ydAUhBJKlpi2yss22AmiQ4G9NE1hAjxqhPvkgBK/hpbr3FurV4hjTG6XKsF8I0WdbYz2CW/FEbp1+4T49ChhrwW0orZdEQX7IEjXr45Hs5sTInT90Hy2XG3Kovi0uVMt15cKsSEYDoFHkR4NgCZX2Y+qS5ryH8yqor3xtel3KsBIy6Ywn8pAo2f8flW3nro/O6x+0NKGV+ZZ0uo/FctuQLBrQVs025T1ai/6MbscQXvFVZVPKrUzlQaNPf/IwNOaRa
153 920977f72c7b70acfdaf56ab35360584d7845827 0 iQIcBAABCAAGBQJZv+wSAAoJELnJ3IJKpb3VH3kQAJp3OkV6qOPXBnlOSSodbVZveEQ5dGJfG9hk+VokcK6MFnieAFouROoGNlQXQtzj6cMqK+LGCP/NeJEG323gAxpxMzc32g7TqbVEhKNqNK8HvQSt04aCVZXtBmP0cPzc348UPP1X1iPTkyZxaJ0kHulaHVptwGbFZZyhwGefauU4eMafJsYqwgiGmvDpjUFu6P8YJXliYeTo1HX2lNChS1xmvJbop1YHfBYACsi8Eron0vMuhaQ+TKYq8Zd762u2roRYnaQ23ubEaVsjGDUYxXXVmit2gdaEKk+6Rq2I+EgcI5XvFzK8gvoP7siz6FL1jVf715k9/UYoWj9KDNUm8cweiyiUpjHQt0S+Ro9ryKvQy6tQVunRZqBN/kZWVth/FlMbUENbxVyXZcXv+m7OLvk+vyK7UZ7yT+OBzgRr0PyUuafzSVW3e+RZJtGxYGM5ew2bWQ8L6wuBucRYZOSnXXtCw7cKEMlK3BTjfAfpHUdIZIG492R9d6aOECUK/MpNvCiXXaZoh5Kj4a0dARiuWFCZxWwt3bmOg13oQ841zLdzOi/YZe15vCm8OB4Ffg6CkmPKhZhnMwVbFmlaBcoaeMzzpMuog91J1M2zgEUBTYwe/HKiNr/0iilJMPFRpZ+zEb2GvVoc8FMttXi8aomlXf/6LHCC9ndexGC29jIzl41+
153 920977f72c7b70acfdaf56ab35360584d7845827 0 iQIcBAABCAAGBQJZv+wSAAoJELnJ3IJKpb3VH3kQAJp3OkV6qOPXBnlOSSodbVZveEQ5dGJfG9hk+VokcK6MFnieAFouROoGNlQXQtzj6cMqK+LGCP/NeJEG323gAxpxMzc32g7TqbVEhKNqNK8HvQSt04aCVZXtBmP0cPzc348UPP1X1iPTkyZxaJ0kHulaHVptwGbFZZyhwGefauU4eMafJsYqwgiGmvDpjUFu6P8YJXliYeTo1HX2lNChS1xmvJbop1YHfBYACsi8Eron0vMuhaQ+TKYq8Zd762u2roRYnaQ23ubEaVsjGDUYxXXVmit2gdaEKk+6Rq2I+EgcI5XvFzK8gvoP7siz6FL1jVf715k9/UYoWj9KDNUm8cweiyiUpjHQt0S+Ro9ryKvQy6tQVunRZqBN/kZWVth/FlMbUENbxVyXZcXv+m7OLvk+vyK7UZ7yT+OBzgRr0PyUuafzSVW3e+RZJtGxYGM5ew2bWQ8L6wuBucRYZOSnXXtCw7cKEMlK3BTjfAfpHUdIZIG492R9d6aOECUK/MpNvCiXXaZoh5Kj4a0dARiuWFCZxWwt3bmOg13oQ841zLdzOi/YZe15vCm8OB4Ffg6CkmPKhZhnMwVbFmlaBcoaeMzzpMuog91J1M2zgEUBTYwe/HKiNr/0iilJMPFRpZ+zEb2GvVoc8FMttXi8aomlXf/6LHCC9ndexGC29jIzl41+
154 2f427b57bf9019c6dc3750baa539dc22c1be50f6 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlnQtVIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TTkD/409sWTM9vUH2qkqNTb1IXyGpqzb9UGOSVDioz6rvgZEBgh9D1oBTWnfBXW8sOWR0A7iCL6qZh2Yi7g7p0mKGXh9LZViLtSwwMSXpNiGBO7RVPW+NQ6DOY5Rhr0i08UBiVEkZXHeIVCd2Bd6mhAiUsm5iUh9Jne10wO8cIxeAUnsx4DBdHBMWLg6AZKWllSgN+r9H+7wnOhDbkvj1Cu6+ugKpEs+xvbTh47OTyM+w9tC1aoZD4HhfR5w5O16FC+TIoE6wmWut6e2pxIMHDB3H08Dky6gNjucY/ntJXvOZW5kYrQA3LHKks8ebpjsIXesOAvReOAsDz0drwzbWZan9Cbj8yWoYz/HCgHCnX3WqKKORSP5pvdrsqYua9DXtJwBeSWY4vbIM2kECAiyw1SrOGudxlyWBlW1f1jhGR2DsBlwoieeAvUVoaNwO7pYirwxR4nFPdLDRCQ4hLK/GFiuyr+lGoc1WUzVRNBYD3udcOZAbqq4JhWLf0Gvd5xP0rn1cJNhHMvrPH4Ki4a5KeeK6gQI7GT9/+PPQzTdpxXj6KwofktJtVNqm5sJmJ+wMIddnobFlNNLZ/F7OMONWajuVhh+vSOV34YLdhqzAR5XItkeJL6qyAJjNH5PjsnhT7nMqjgwriPz6xxYOLJWgtK5ZqcSCx4gWy9KJVVja8wJ7rRUg==
154 2f427b57bf9019c6dc3750baa539dc22c1be50f6 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlnQtVIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TTkD/409sWTM9vUH2qkqNTb1IXyGpqzb9UGOSVDioz6rvgZEBgh9D1oBTWnfBXW8sOWR0A7iCL6qZh2Yi7g7p0mKGXh9LZViLtSwwMSXpNiGBO7RVPW+NQ6DOY5Rhr0i08UBiVEkZXHeIVCd2Bd6mhAiUsm5iUh9Jne10wO8cIxeAUnsx4DBdHBMWLg6AZKWllSgN+r9H+7wnOhDbkvj1Cu6+ugKpEs+xvbTh47OTyM+w9tC1aoZD4HhfR5w5O16FC+TIoE6wmWut6e2pxIMHDB3H08Dky6gNjucY/ntJXvOZW5kYrQA3LHKks8ebpjsIXesOAvReOAsDz0drwzbWZan9Cbj8yWoYz/HCgHCnX3WqKKORSP5pvdrsqYua9DXtJwBeSWY4vbIM2kECAiyw1SrOGudxlyWBlW1f1jhGR2DsBlwoieeAvUVoaNwO7pYirwxR4nFPdLDRCQ4hLK/GFiuyr+lGoc1WUzVRNBYD3udcOZAbqq4JhWLf0Gvd5xP0rn1cJNhHMvrPH4Ki4a5KeeK6gQI7GT9/+PPQzTdpxXj6KwofktJtVNqm5sJmJ+wMIddnobFlNNLZ/F7OMONWajuVhh+vSOV34YLdhqzAR5XItkeJL6qyAJjNH5PjsnhT7nMqjgwriPz6xxYOLJWgtK5ZqcSCx4gWy9KJVVja8wJ7rRUg==
155 1e2454b60e5936f5e77498cab2648db469504487 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlnqRBUhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOAQQP/28EzmTKFL/RxmNYePdzqrmcdJ2tn+s7OYmGdtneN2sESZ4MK0xb5Q8Mkm+41aXS52zzJdz9ynwdun8DG4wZ3sE5MOG+GgK6K0ecOv1XTKS3a2DkUM0fl5hlcXN7Zz7m7m5M6sy6vSxHP7kTyzQWt//z175ZLSQEu1a0nm/BLH+HP9e8DfnJ2Nfcnwp32kV0Nj1xTqjRV1Yo/oCnXfVvsxEJU+CDUGBiLc29ZcoWVbTw9c1VcxihJ6k0pK711KZ+bedSk7yc1OudiJF7idjB0bLQY6ESHNNNjK8uLppok0RsyuhvvDTAoTsl1rMKGmXMM0Ela3/5oxZ/5lUZB73vEJhzEi48ULvstpq82EO39KylkEfQxwMBPhnBIHQaGRkl7QPLXGOYUDMY6gT08Sm3e8/NqEJc/AgckXehpH3gSS2Ji2xg7/E8H5plGsswFidw//oYTTwm0j0halWpB521TD2wmjkjRHXzk1mj0EoFQUMfwHTIZU3E8flUBasD3mZ9XqZJPr66RV7QCrXayH75B/i0CyNqd/Hv5Tkf2TlC3EkEBZwZyAjqw7EyL1LuS936sc7fWuMFsH5k/fwjVwzIc1LmP+nmk2Dd9hIC66vec4w1QZeeAXuDKgOJjvQzj2n+uYRuObl4kKcxvoXqgQN0glGuB1IW7lPllGHR1kplhoub
155 1e2454b60e5936f5e77498cab2648db469504487 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlnqRBUhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOAQQP/28EzmTKFL/RxmNYePdzqrmcdJ2tn+s7OYmGdtneN2sESZ4MK0xb5Q8Mkm+41aXS52zzJdz9ynwdun8DG4wZ3sE5MOG+GgK6K0ecOv1XTKS3a2DkUM0fl5hlcXN7Zz7m7m5M6sy6vSxHP7kTyzQWt//z175ZLSQEu1a0nm/BLH+HP9e8DfnJ2Nfcnwp32kV0Nj1xTqjRV1Yo/oCnXfVvsxEJU+CDUGBiLc29ZcoWVbTw9c1VcxihJ6k0pK711KZ+bedSk7yc1OudiJF7idjB0bLQY6ESHNNNjK8uLppok0RsyuhvvDTAoTsl1rMKGmXMM0Ela3/5oxZ/5lUZB73vEJhzEi48ULvstpq82EO39KylkEfQxwMBPhnBIHQaGRkl7QPLXGOYUDMY6gT08Sm3e8/NqEJc/AgckXehpH3gSS2Ji2xg7/E8H5plGsswFidw//oYTTwm0j0halWpB521TD2wmjkjRHXzk1mj0EoFQUMfwHTIZU3E8flUBasD3mZ9XqZJPr66RV7QCrXayH75B/i0CyNqd/Hv5Tkf2TlC3EkEBZwZyAjqw7EyL1LuS936sc7fWuMFsH5k/fwjVwzIc1LmP+nmk2Dd9hIC66vec4w1QZeeAXuDKgOJjvQzj2n+uYRuObl4kKcxvoXqgQN0glGuB1IW7lPllGHR1kplhoub
156 0ccb43d4cf01d013ae05917ec4f305509f851b2d 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAln6Qp8hHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOJ8MP/2ufm/dbrFoE0F8hewhztG1vS4stus13lZ9lmM9kza8OKeOgY/MDH8GaV3O8GnRiCNUFsVD8JEIexE31c84H2Ie7VQO0GQSUHSyMCRrbED6IvfrWp6EZ6RDNPk4LHBfxCuPmuVHGRoGZtsLKJBPIxIHJKWMlEJlj9BZuUxZp/8kurQ6CXwblVbFzXdOaZQlioOBH27Bk3S0+gXfJ+wA2ed5XOQvT9jwjqC8y/1t8obaoPTpzyAvb9NArG+9RT9vfNN42aWISZNwg6RW5oLJISqoGrAes6EoG7dZfOC0UoKMVYXoNvZzJvVlMHyjugIoid+WI+V8y9bPrRTfbPCmocCzEzCOLEHQta8roNijB0bKcq8hmQPHcMyXlj1Srnqlco49jbhftgJoPTwzb10wQyU0VFvaZDPW/EQUT3M/k4j3sVESjANdyG1iu6EDV080LK1LgAdhjpKMBbf6mcgAe06/07XFMbKNrZMEislOcVFp98BSKjdioUNpy91rCeSmkEsASJ3yMArRnSkuVgpyrtJaGWl79VUcmOwKhUOA/8MXMz/Oqu7hvve/sgv71xlnim460nnLw6YHPyeeCsz6KSoUK3knFXAbTk/0jvU1ixUZbI122aMzX04UgPGeTukCOUw49XfaOdN+x0YXlkl4PsrnRQhIoixY2gosPpK4YO73G
156 0ccb43d4cf01d013ae05917ec4f305509f851b2d 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAln6Qp8hHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOJ8MP/2ufm/dbrFoE0F8hewhztG1vS4stus13lZ9lmM9kza8OKeOgY/MDH8GaV3O8GnRiCNUFsVD8JEIexE31c84H2Ie7VQO0GQSUHSyMCRrbED6IvfrWp6EZ6RDNPk4LHBfxCuPmuVHGRoGZtsLKJBPIxIHJKWMlEJlj9BZuUxZp/8kurQ6CXwblVbFzXdOaZQlioOBH27Bk3S0+gXfJ+wA2ed5XOQvT9jwjqC8y/1t8obaoPTpzyAvb9NArG+9RT9vfNN42aWISZNwg6RW5oLJISqoGrAes6EoG7dZfOC0UoKMVYXoNvZzJvVlMHyjugIoid+WI+V8y9bPrRTfbPCmocCzEzCOLEHQta8roNijB0bKcq8hmQPHcMyXlj1Srnqlco49jbhftgJoPTwzb10wQyU0VFvaZDPW/EQUT3M/k4j3sVESjANdyG1iu6EDV080LK1LgAdhjpKMBbf6mcgAe06/07XFMbKNrZMEislOcVFp98BSKjdioUNpy91rCeSmkEsASJ3yMArRnSkuVgpyrtJaGWl79VUcmOwKhUOA/8MXMz/Oqu7hvve/sgv71xlnim460nnLw6YHPyeeCsz6KSoUK3knFXAbTk/0jvU1ixUZbI122aMzX04UgPGeTukCOUw49XfaOdN+x0YXlkl4PsrnRQhIoixY2gosPpK4YO73G
157 cabc840ffdee8a72f3689fb77dd74d04fdc2bc04 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAloB+EYQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TfwEAC/pYW7TC8mQnqSJzde4yiv2+zgflfJzRlg5rbvlUQl1gSBla3sFADZcic0ebAc+8XUu8eIzyPX+oa4wjsHvL13silUCkUzTEEQLqfKPX1bhA4mwfSDb5A7v2VZ5q8qhRGnlhTsB79ML8uBOhR/Bigdm2ixURPEZ37pWljiMp9XWBMtxPxXn/m0n5CDViibX6QqQCR4k3orcsIGd72YXU6B8NGbBN8qlqMSd0pGvSF4vM2cgVhz7D71+zU4XL/HVP97aU9GsOwN9QWW029DOJu6KG6x51WWtfD/tzyNDu7+lZ5/IKyqHX4tyqCIXEGAsQ3XypeHgCq5hV3E6LJLRqPcLpUNDiQlCg6tNPRaOuMC878MRIlffKqMH+sWo8Z7zHrut+LfRh5/k1aCh4J+FIlE6Hgbvbvv2Z8JxDpUKl0Tr+i0oHNTapbGXIecq1ZFR4kcdchodUHXBC2E6HWR50/ek5YKPddzw8WPGsBtzXMfkhFr3WkvyP2Gbe2XJnkuYptTJA+u2CfhrvgmWsYlvt/myTaMZQEzZ+uir4Xoo5NvzqTL30SFqPrP4Nh0n9G6vpVJl/eZxoYK9jL3VC0vDhnZXitkvDpjXZuJqw/HgExXWKZFfiQ3X2HY48v1gvJiSegZ5rX+uGGJtW2/Mp5FidePEgnFIqZW/yhBfs2Hzj1D2A==
157 cabc840ffdee8a72f3689fb77dd74d04fdc2bc04 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAloB+EYQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TfwEAC/pYW7TC8mQnqSJzde4yiv2+zgflfJzRlg5rbvlUQl1gSBla3sFADZcic0ebAc+8XUu8eIzyPX+oa4wjsHvL13silUCkUzTEEQLqfKPX1bhA4mwfSDb5A7v2VZ5q8qhRGnlhTsB79ML8uBOhR/Bigdm2ixURPEZ37pWljiMp9XWBMtxPxXn/m0n5CDViibX6QqQCR4k3orcsIGd72YXU6B8NGbBN8qlqMSd0pGvSF4vM2cgVhz7D71+zU4XL/HVP97aU9GsOwN9QWW029DOJu6KG6x51WWtfD/tzyNDu7+lZ5/IKyqHX4tyqCIXEGAsQ3XypeHgCq5hV3E6LJLRqPcLpUNDiQlCg6tNPRaOuMC878MRIlffKqMH+sWo8Z7zHrut+LfRh5/k1aCh4J+FIlE6Hgbvbvv2Z8JxDpUKl0Tr+i0oHNTapbGXIecq1ZFR4kcdchodUHXBC2E6HWR50/ek5YKPddzw8WPGsBtzXMfkhFr3WkvyP2Gbe2XJnkuYptTJA+u2CfhrvgmWsYlvt/myTaMZQEzZ+uir4Xoo5NvzqTL30SFqPrP4Nh0n9G6vpVJl/eZxoYK9jL3VC0vDhnZXitkvDpjXZuJqw/HgExXWKZFfiQ3X2HY48v1gvJiSegZ5rX+uGGJtW2/Mp5FidePEgnFIqZW/yhBfs2Hzj1D2A==
158 a92b9f8e11ba330614cdfd6af0e03b15c1ff3797 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlohslshHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrO7P8P/1qGts96acEdB9BZbK/Eesalb1wUByLXZoP8j+1wWwqh/Kq/q7V4Qe0z1jw/92oZbmnLy2C8sDhWv/XKxACKv69oPrcqQix1E8M+07u88ZXqHJMSxkOmvA2Vimp9EG1qgje+qchgOVgvhEhysA96bRpEnc6V0RnBqI5UdfbKtlfBmX5mUE/qsoBZhly1FTmzV1bhYlGgNLyqtJQpcbA34wyPoywsp8DRBiHWrIzz5XNR+DJFTOe4Kqio1i5r8R4QSIM5vtTbj5pbsmtGcP2CsFC9S3xTSAU6AEJKxGpubPk3ckNj3P9zolvR7krU5Jt8LIgXSVaKLt9rPhmxCbPrLtORgXkUupJcrwzQl+oYz5bkl9kowFa959waIPYoCuuW402mOTDq/L3xwDH9AKK5rELPl3fNo+5OIDKAKRIu6zRSAzBtyGT6kkfb1NSghumP4scR7cgUmLaNibZBa8eJj92gwf+ucSGoB/dF/YHWNe0jY09LFK3nyCoftmyLzxcRk1JLGNngw8MCIuisHTskhxSm/qlX7qjunoZnA3yy9behhy/YaFt4YzYZbMTivt2gszX5ktToaDqfxWDYdIa79kp8G68rYPeybelTS74LwbK3blXPI3I1nddkW52znHYLvW6BYyi+QQ5jPZLkiOC+AF0q+c4gYmPaLVN/mpMZjjmB
158 a92b9f8e11ba330614cdfd6af0e03b15c1ff3797 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlohslshHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrO7P8P/1qGts96acEdB9BZbK/Eesalb1wUByLXZoP8j+1wWwqh/Kq/q7V4Qe0z1jw/92oZbmnLy2C8sDhWv/XKxACKv69oPrcqQix1E8M+07u88ZXqHJMSxkOmvA2Vimp9EG1qgje+qchgOVgvhEhysA96bRpEnc6V0RnBqI5UdfbKtlfBmX5mUE/qsoBZhly1FTmzV1bhYlGgNLyqtJQpcbA34wyPoywsp8DRBiHWrIzz5XNR+DJFTOe4Kqio1i5r8R4QSIM5vtTbj5pbsmtGcP2CsFC9S3xTSAU6AEJKxGpubPk3ckNj3P9zolvR7krU5Jt8LIgXSVaKLt9rPhmxCbPrLtORgXkUupJcrwzQl+oYz5bkl9kowFa959waIPYoCuuW402mOTDq/L3xwDH9AKK5rELPl3fNo+5OIDKAKRIu6zRSAzBtyGT6kkfb1NSghumP4scR7cgUmLaNibZBa8eJj92gwf+ucSGoB/dF/YHWNe0jY09LFK3nyCoftmyLzxcRk1JLGNngw8MCIuisHTskhxSm/qlX7qjunoZnA3yy9behhy/YaFt4YzYZbMTivt2gszX5ktToaDqfxWDYdIa79kp8G68rYPeybelTS74LwbK3blXPI3I1nddkW52znHYLvW6BYyi+QQ5jPZLkiOC+AF0q+c4gYmPaLVN/mpMZjjmB
159 27b6df1b5adbdf647cf5c6675b40575e1b197c60 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlpmbwIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91W4BD/4h+y7QH7FkNcueOBrmdci7w1apkPX7KuknKxf8+FmA1QDGWYATnqD6IcAk3+f4reO4n9qc0y2BGrIz/pyTSIHvJW+ORrbPCKVrXlfUgkUK3TumtRObt8B75BVBBNaJ93r1yOALpo/K8wSwRrBF+Yl6aCoFiibUEbfcfaOAHVqZXKC1ZPtLRwq5NHIw0wWB0qNoAXj+FJV1EHO7SEjj2lXqw/r0HriQMdObWLgAb6QVUq7oVMpAumUeuQtZ169qHdqYfF1OLdCnsVBcwYEz/cBLC43bvYiwFxSkbAFyl656caWiwA3PISFSzP9Co0zWU/Qf8f7dTdAdT/orzCfUq8YoXqryfRSxi+8L8/EMxankzdW73Rx5X+0539pSq+gDDtTOyNuW6+CZwa5D84b31rsd+jTx8zVm3SRHRKsoGF2EEMQkWmDbhIFjX5W1fE84Ul3umypv+lPSvCPlQpIqv2hZmcTR12sgjdBjU8z+Zcq22SHFybqiYNmWpkVUtiMvTlHMoJfi5PI6xF8D2dxV4ErG+NflqdjaXydgnbO6D3/A1FCASig0wL4jMxSeRqnRRqLihN3VaGG2QH6MLJ+Ty6YuoonKtopw9JNOZydr/XN7K5LcjX1T3+31qmnHZyBXRSejWl9XN93IDbQcnMBWHkz/cJLN0kKu4pvnV8UGUcyXfA==
159 27b6df1b5adbdf647cf5c6675b40575e1b197c60 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlpmbwIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91W4BD/4h+y7QH7FkNcueOBrmdci7w1apkPX7KuknKxf8+FmA1QDGWYATnqD6IcAk3+f4reO4n9qc0y2BGrIz/pyTSIHvJW+ORrbPCKVrXlfUgkUK3TumtRObt8B75BVBBNaJ93r1yOALpo/K8wSwRrBF+Yl6aCoFiibUEbfcfaOAHVqZXKC1ZPtLRwq5NHIw0wWB0qNoAXj+FJV1EHO7SEjj2lXqw/r0HriQMdObWLgAb6QVUq7oVMpAumUeuQtZ169qHdqYfF1OLdCnsVBcwYEz/cBLC43bvYiwFxSkbAFyl656caWiwA3PISFSzP9Co0zWU/Qf8f7dTdAdT/orzCfUq8YoXqryfRSxi+8L8/EMxankzdW73Rx5X+0539pSq+gDDtTOyNuW6+CZwa5D84b31rsd+jTx8zVm3SRHRKsoGF2EEMQkWmDbhIFjX5W1fE84Ul3umypv+lPSvCPlQpIqv2hZmcTR12sgjdBjU8z+Zcq22SHFybqiYNmWpkVUtiMvTlHMoJfi5PI6xF8D2dxV4ErG+NflqdjaXydgnbO6D3/A1FCASig0wL4jMxSeRqnRRqLihN3VaGG2QH6MLJ+Ty6YuoonKtopw9JNOZydr/XN7K5LcjX1T3+31qmnHZyBXRSejWl9XN93IDbQcnMBWHkz/cJLN0kKu4pvnV8UGUcyXfA==
160 d334afc585e29577f271c5eda03378736a16ca6b 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlpzZuUQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TiDEADDD6Tn04UjgrZ36nAqOcHaG1ZT2Cm1/sbTw+6duAhf3+uKWFqi2bgcdCBkdfRH7KfEU0GNsPpiC6mzWw3PDWmGhnLJAkR+9FTBU0edK01hkNW8RelDTL5J9IzIGwrP4KFfcUue6yrxU8GnSxnf5Vy/N5ZZzLV/P3hdBte5We9PD5KHPAwTzzcZ9Wiog700rFDDChyFq7hNQ3H0GpknF6+Ck5XmJ3DOqt1MFHk9V4Z/ASU59cQXKOeaMChlBpTb1gIIWjOE99v5aY06dc1WlwttuHtCZvZgtAduRAB6XYWyniS/7nXBv0MXD3EWbpH1pkOaWUxw217HpNP4g9Yo3u/i8UW+NkSJOeXtC1CFjWmUNj138IhS1pogaiPPnIs+H6eOJsmnGhN2KbOMjA5Dn9vSTi6s/98TarfUSiwxA4L7fJy5qowFETftuBO0fJpbB8+ZtpnjNp0MMKed27OUSv69i6BmLrP+eqk+MVO6PovvIySlWAP9/REM/I5/mFkqoI+ruT4a9osNGDZ4Jqb382b7EmpEMDdgb7+ezsybgDfizuaTs/LBae7h79o1m30DxZ/EZ5C+2LY8twbGSORvZN4ViMVhIhWBTlOE/iVBOj807Y2OaUURcuLfHRmaCcfF1uIzg0uNB/aM/WSE0+AXh2IX+mipoTS3eh/V2EKldBHcOQ==
160 d334afc585e29577f271c5eda03378736a16ca6b 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlpzZuUQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91TiDEADDD6Tn04UjgrZ36nAqOcHaG1ZT2Cm1/sbTw+6duAhf3+uKWFqi2bgcdCBkdfRH7KfEU0GNsPpiC6mzWw3PDWmGhnLJAkR+9FTBU0edK01hkNW8RelDTL5J9IzIGwrP4KFfcUue6yrxU8GnSxnf5Vy/N5ZZzLV/P3hdBte5We9PD5KHPAwTzzcZ9Wiog700rFDDChyFq7hNQ3H0GpknF6+Ck5XmJ3DOqt1MFHk9V4Z/ASU59cQXKOeaMChlBpTb1gIIWjOE99v5aY06dc1WlwttuHtCZvZgtAduRAB6XYWyniS/7nXBv0MXD3EWbpH1pkOaWUxw217HpNP4g9Yo3u/i8UW+NkSJOeXtC1CFjWmUNj138IhS1pogaiPPnIs+H6eOJsmnGhN2KbOMjA5Dn9vSTi6s/98TarfUSiwxA4L7fJy5qowFETftuBO0fJpbB8+ZtpnjNp0MMKed27OUSv69i6BmLrP+eqk+MVO6PovvIySlWAP9/REM/I5/mFkqoI+ruT4a9osNGDZ4Jqb382b7EmpEMDdgb7+ezsybgDfizuaTs/LBae7h79o1m30DxZ/EZ5C+2LY8twbGSORvZN4ViMVhIhWBTlOE/iVBOj807Y2OaUURcuLfHRmaCcfF1uIzg0uNB/aM/WSE0+AXh2IX+mipoTS3eh/V2EKldBHcOQ==
161 369aadf7a3264b03c8b09efce715bc41e6ab4a9b 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlqe5w8hHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrO1lUQAK6+S26rE3AMt6667ClT+ubPl+nNMRkWJXa8EyPplBUGTPdMheViOe+28dCsveJxqUF7A4TMLMA/eIj4cRIwmVbBaivfQKnG5GMZ+9N6j6oqE/OAJujdHzzZ3+o9KJGtRgJP2tzdY/6qkXwL3WN6KULz7pSkrKZLOiNfj4k2bf3bXeB7d3N5erxJYlhddlPBlHXImRkWiPR/bdaAaYJq+EEWCbia6MWXlSAqEjIgQi+ytuh/9Z+QSsJCsECDRqEExZClqHGkCLYhST99NqqdYCGJzAFMgh+xWxZxI0LO08pJxYctHGoHm+vvRVMfmdbxEydEy01H6jX+1e7Yq44bovIiIOkaXCTSuEBol+R5aPKJhgvqgZ5IlcTLoIYQBE3MZMKZ89NWy3TvgcNkQiOPCCkKs1+DukXKqTt62zOTxfa6mIZDCXdGai6vZBJ5b0yeEd3HV96yHb9dFlS5w1cG7prIBRv5BkqEaFbRMGZGV31Ri7BuVu0O68Pfdq+R+4A1YLdJ0H5DySe2dGlwE2DMKhdtVu1bie4UWHK10TphmqhBk6B9Ew2+tASCU7iczAqRzyzMLBTHIfCYO2R+5Yuh0CApt47KV23OcLje9nORyE2yaDTbVUPiXzdOnbRaCQf7eW5/1y/LLjG6OwtuETTcHKh7ruko+u7rFL96a4DNlNdk
161 369aadf7a3264b03c8b09efce715bc41e6ab4a9b 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlqe5w8hHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrO1lUQAK6+S26rE3AMt6667ClT+ubPl+nNMRkWJXa8EyPplBUGTPdMheViOe+28dCsveJxqUF7A4TMLMA/eIj4cRIwmVbBaivfQKnG5GMZ+9N6j6oqE/OAJujdHzzZ3+o9KJGtRgJP2tzdY/6qkXwL3WN6KULz7pSkrKZLOiNfj4k2bf3bXeB7d3N5erxJYlhddlPBlHXImRkWiPR/bdaAaYJq+EEWCbia6MWXlSAqEjIgQi+ytuh/9Z+QSsJCsECDRqEExZClqHGkCLYhST99NqqdYCGJzAFMgh+xWxZxI0LO08pJxYctHGoHm+vvRVMfmdbxEydEy01H6jX+1e7Yq44bovIiIOkaXCTSuEBol+R5aPKJhgvqgZ5IlcTLoIYQBE3MZMKZ89NWy3TvgcNkQiOPCCkKs1+DukXKqTt62zOTxfa6mIZDCXdGai6vZBJ5b0yeEd3HV96yHb9dFlS5w1cG7prIBRv5BkqEaFbRMGZGV31Ri7BuVu0O68Pfdq+R+4A1YLdJ0H5DySe2dGlwE2DMKhdtVu1bie4UWHK10TphmqhBk6B9Ew2+tASCU7iczAqRzyzMLBTHIfCYO2R+5Yuh0CApt47KV23OcLje9nORyE2yaDTbVUPiXzdOnbRaCQf7eW5/1y/LLjG6OwtuETTcHKh7ruko+u7rFL96a4DNlNdk
162 8bba684efde7f45add05f737952093bb2aa07155 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlqe6dkhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOJmIQALUVCoWUFYYaRxGH4OpmIQ2o1JrMefvarFhaPY1r3+G87sjXgw15uobEQDtoybTUYbcdSxJQT1KE1FOm3wU0VyN6PY9c1PMEAVgJlve0eDiXNNlBsoYMXnpq1HidZknkjpXgUPdE/LElxpJJRlJQZlS29bkGmEDZQBoOvlcZoBRDSYcbM07wn7d+1gmJkcHViDBMAbSrudfO0OYzDC1BjtGyKm7Mes2WB1yFYw+ySa8hF/xPKEDvoZINOE5n3PBJiCvPuTw3PqsHvWgKOA1Obx9fATlxj7EHBLfKBTNfpUwPMRSH1cmA+qUS9mRDrdLvrThwalr6D3r2RJ2ntOipcZpKMmxARRV+VUAI1K6H0/Ws3XAxENqhF7RgRruJFVq8G8EcHJLZEoVHsR+VOnd/pzgkFKS+tIsYYRcMpL0DdMF8pV3xrEFahgRhaEZOh4jsG3Z+sGLVFFl7DdMqeGs6m/TwDrvfuYtGczfGRB0wqu8KOwhR1BjNJKcr4lk35GKwSXmI1vk6Z1gAm0e13995lqbCJwkuOKynQlHWVOR6hu3ypvAgV/zXLF5t8HHtL48sOJ8a33THuJT4whbXSIb9BQXu/NQnNhK8G3Kly5UN88vL4a3sZi/Y86h4R2fKOSib/txJ3ydLbMeS8LlJMqeF/hrBanVF0r15NZ2CdmL1Qxim
162 8bba684efde7f45add05f737952093bb2aa07155 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlqe6dkhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOJmIQALUVCoWUFYYaRxGH4OpmIQ2o1JrMefvarFhaPY1r3+G87sjXgw15uobEQDtoybTUYbcdSxJQT1KE1FOm3wU0VyN6PY9c1PMEAVgJlve0eDiXNNlBsoYMXnpq1HidZknkjpXgUPdE/LElxpJJRlJQZlS29bkGmEDZQBoOvlcZoBRDSYcbM07wn7d+1gmJkcHViDBMAbSrudfO0OYzDC1BjtGyKm7Mes2WB1yFYw+ySa8hF/xPKEDvoZINOE5n3PBJiCvPuTw3PqsHvWgKOA1Obx9fATlxj7EHBLfKBTNfpUwPMRSH1cmA+qUS9mRDrdLvrThwalr6D3r2RJ2ntOipcZpKMmxARRV+VUAI1K6H0/Ws3XAxENqhF7RgRruJFVq8G8EcHJLZEoVHsR+VOnd/pzgkFKS+tIsYYRcMpL0DdMF8pV3xrEFahgRhaEZOh4jsG3Z+sGLVFFl7DdMqeGs6m/TwDrvfuYtGczfGRB0wqu8KOwhR1BjNJKcr4lk35GKwSXmI1vk6Z1gAm0e13995lqbCJwkuOKynQlHWVOR6hu3ypvAgV/zXLF5t8HHtL48sOJ8a33THuJT4whbXSIb9BQXu/NQnNhK8G3Kly5UN88vL4a3sZi/Y86h4R2fKOSib/txJ3ydLbMeS8LlJMqeF/hrBanVF0r15NZ2CdmL1Qxim
163 7de7bd407251af2bc98e5b809c8598ee95830daf 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlrE4p0QHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91c4UD/4tC+mBWxBw/JYm4vlFTKWLHopLEa1/uhFRK/uGsdgcCyexbCDbisjJpl3JTQb+wQDlZnUorm8zB206y418YqhJ7lCauRgcoqKka0e3kvKnwmklwmuGkwOIoruWxxhCcgRCT4C+jZ/ZE3Kre0CKnUvlASsHtbkqrCqFClEcIlPVohlccmjbpQXN+akB40tkMF5Xf0AMBPYG7UievmeHhz3pO/yex/Uc6RhgWAqD4zjA1bh+3REGs3CaoYgKUTXZw/XYI9cqAI0FobRuXSVbq2dqkXCFLfD+WizxUz55rZA+CP4pqLndwxGm4fLy4gk2iLHxKfrHsAul7n5e4tHmxDcOOa1K0fIJDBijuXoNfXN7nF4NQUlfpmtOxUxfniVohvXJeYV8ecepsDMSFqDtEtbdhsep5QDx85lGLNLQAA1f36swJzLBSqGw688Hjql2c9txK2eVrVxNp+M8tqn9qU/h2/firgu9a2DxQB45M7ISfkutmpizN5TNlEyElH0htHnKG7+AIbRAm4novCXfSzP8eepk0kVwj9QMIx/rw4aeicRdPWBTcDIG0gWELb0skunTQqeZwPPESwimntdmwCxfFksgT0t79ZEDAWWfxNLhJP/HWO2mYG5GUJOzNQ4rj/YXLcye6A4KkhvuZlVCaKAbnm60ivoG082HYuozV4qPOQ==
163 7de7bd407251af2bc98e5b809c8598ee95830daf 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlrE4p0QHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91c4UD/4tC+mBWxBw/JYm4vlFTKWLHopLEa1/uhFRK/uGsdgcCyexbCDbisjJpl3JTQb+wQDlZnUorm8zB206y418YqhJ7lCauRgcoqKka0e3kvKnwmklwmuGkwOIoruWxxhCcgRCT4C+jZ/ZE3Kre0CKnUvlASsHtbkqrCqFClEcIlPVohlccmjbpQXN+akB40tkMF5Xf0AMBPYG7UievmeHhz3pO/yex/Uc6RhgWAqD4zjA1bh+3REGs3CaoYgKUTXZw/XYI9cqAI0FobRuXSVbq2dqkXCFLfD+WizxUz55rZA+CP4pqLndwxGm4fLy4gk2iLHxKfrHsAul7n5e4tHmxDcOOa1K0fIJDBijuXoNfXN7nF4NQUlfpmtOxUxfniVohvXJeYV8ecepsDMSFqDtEtbdhsep5QDx85lGLNLQAA1f36swJzLBSqGw688Hjql2c9txK2eVrVxNp+M8tqn9qU/h2/firgu9a2DxQB45M7ISfkutmpizN5TNlEyElH0htHnKG7+AIbRAm4novCXfSzP8eepk0kVwj9QMIx/rw4aeicRdPWBTcDIG0gWELb0skunTQqeZwPPESwimntdmwCxfFksgT0t79ZEDAWWfxNLhJP/HWO2mYG5GUJOzNQ4rj/YXLcye6A4KkhvuZlVCaKAbnm60ivoG082HYuozV4qPOQ==
164 ed5448edcbfa747b9154099e18630e49024fd47b 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlrXnuoQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91fSHEACBVg4FsCE2nN5aEKAQb7l7rG4XTQ9FbvoTYB3tkvmsLQSRfh2GB2ZDBOI7Vswo2UxXupr4qSkUQbeHrwrk9A1s5b/T5e4wSKZuFJOrkwLVZDFfUHumKomqdoVj/D8+LDt7Rz+Wm7OClO/4dTAsl2E4rkl7XPtqjC3jESGad8IBANlPVBhNUMER4eFcPZzq1qi2MrlJKEKpdeZEWJ/ow7gka/aTLqHMfRwhA3kS5X34Yai17kLQZGQdWISWYiM9Zd2b/FSTHZGy8rf9cvjXs3EXfEB5nePveDrFOfmuubVRDplO+/naJjNBqwxeB99jb7Fk3sekPZNW/NqR/w1jvQFA3OP9fS2g1OwfXMWyx6DvBJNfQwppNH3JUvA5PEiorul4GJ2nuubXk+Or1yzoRJtwOGz/GQi2BcsPKaL6niewrInFw18jMVhx/4Jbpu+glaim4EvT/PfJ5KdSwF7pJxsoiqvw7A2C2/DsZRbCeal9GrTulkNf/hgpCJOBK1DqVVq1O5MI/oYQ69HxgMq9Ip1OGJJhse3qjevBJbpNCosCpjb3htlo4go29H8yyGJb09i05WtNW2EQchrTHrlruFr7mKJ5h1mAYket74QQyaGzqwgD5kwSVnIcwHpfb8oiJTwA5R+LtbAQXWC/fFu1g1KEp/4hGOQoRU04+mYuPsrzaA==
164 ed5448edcbfa747b9154099e18630e49024fd47b 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlrXnuoQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91fSHEACBVg4FsCE2nN5aEKAQb7l7rG4XTQ9FbvoTYB3tkvmsLQSRfh2GB2ZDBOI7Vswo2UxXupr4qSkUQbeHrwrk9A1s5b/T5e4wSKZuFJOrkwLVZDFfUHumKomqdoVj/D8+LDt7Rz+Wm7OClO/4dTAsl2E4rkl7XPtqjC3jESGad8IBANlPVBhNUMER4eFcPZzq1qi2MrlJKEKpdeZEWJ/ow7gka/aTLqHMfRwhA3kS5X34Yai17kLQZGQdWISWYiM9Zd2b/FSTHZGy8rf9cvjXs3EXfEB5nePveDrFOfmuubVRDplO+/naJjNBqwxeB99jb7Fk3sekPZNW/NqR/w1jvQFA3OP9fS2g1OwfXMWyx6DvBJNfQwppNH3JUvA5PEiorul4GJ2nuubXk+Or1yzoRJtwOGz/GQi2BcsPKaL6niewrInFw18jMVhx/4Jbpu+glaim4EvT/PfJ5KdSwF7pJxsoiqvw7A2C2/DsZRbCeal9GrTulkNf/hgpCJOBK1DqVVq1O5MI/oYQ69HxgMq9Ip1OGJJhse3qjevBJbpNCosCpjb3htlo4go29H8yyGJb09i05WtNW2EQchrTHrlruFr7mKJ5h1mAYket74QQyaGzqwgD5kwSVnIcwHpfb8oiJTwA5R+LtbAQXWC/fFu1g1KEp/4hGOQoRU04+mYuPsrzaA==
165 1ec874717d8a93b19e0d50628443e0ee5efab3a9 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlraM3wQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91RAJEACSnf/HWwS0/OZaqz4Hfh0UBgkXDmH1IC90Pc/kczf//WuXu5AVnnRHDziOlCYYZAnZ2iKu0EQI6GT2K2garaWkaEhukOnjz4WADVys6DAzJyw5iOXeEpIOlZH6hbYbsW3zVcPjiMPo8cY5tIYEy4E/8RcVly1SDtWxvt/nWYQd2MxObLrpU7bPP6a2Db4Vy8WpGRbZRJmOvDNworld5rB5M/OGgHyMa9hg2Hjn+cLtQSEJY4O92A6h2hix9xpDC7zzfoluD2piDslocTm/gyeln2BJJBAtr+aRoHO9hI0baq5yFRQLO8aqQRJJP8dXgYZIWgSU/9oVGPZoGotJyw24iiB37R/YCisKE+cEUjfVclHTDFCkzmYP2ZMbGaktohJeF7EMau0ZJ8II5F0ja3bj6GrwfpGGY5OOcQrzIYW7nB0msFWTljb34qN3nd7m+hQ5hji3Hp9CFXEbCboVmm46LqwukSDWTmnfcP8knxWbBlJ4xDxySwTtcHAJhnUmKxu7oe3D/0Ttdv7HscI40eeMdr01pLQ0Ee3a4OumQ1hn+oL+o+tlqg8PKT20q528CMHgSJp6aIlU7pEK81b+Zj6B57us4P97qSL6XLNUIfubADCaf/KUDwh1HvKhHXV2aRli1GX1REFsy0ItGZn0yhQxIDJKc/FKsEMBKvlVIHGQFw==
165 1ec874717d8a93b19e0d50628443e0ee5efab3a9 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlraM3wQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91RAJEACSnf/HWwS0/OZaqz4Hfh0UBgkXDmH1IC90Pc/kczf//WuXu5AVnnRHDziOlCYYZAnZ2iKu0EQI6GT2K2garaWkaEhukOnjz4WADVys6DAzJyw5iOXeEpIOlZH6hbYbsW3zVcPjiMPo8cY5tIYEy4E/8RcVly1SDtWxvt/nWYQd2MxObLrpU7bPP6a2Db4Vy8WpGRbZRJmOvDNworld5rB5M/OGgHyMa9hg2Hjn+cLtQSEJY4O92A6h2hix9xpDC7zzfoluD2piDslocTm/gyeln2BJJBAtr+aRoHO9hI0baq5yFRQLO8aqQRJJP8dXgYZIWgSU/9oVGPZoGotJyw24iiB37R/YCisKE+cEUjfVclHTDFCkzmYP2ZMbGaktohJeF7EMau0ZJ8II5F0ja3bj6GrwfpGGY5OOcQrzIYW7nB0msFWTljb34qN3nd7m+hQ5hji3Hp9CFXEbCboVmm46LqwukSDWTmnfcP8knxWbBlJ4xDxySwTtcHAJhnUmKxu7oe3D/0Ttdv7HscI40eeMdr01pLQ0Ee3a4OumQ1hn+oL+o+tlqg8PKT20q528CMHgSJp6aIlU7pEK81b+Zj6B57us4P97qSL6XLNUIfubADCaf/KUDwh1HvKhHXV2aRli1GX1REFsy0ItGZn0yhQxIDJKc/FKsEMBKvlVIHGQFw==
166 6614cac550aea66d19c601e45efd1b7bd08d7c40 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlruOCQhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOENQQAI1ttaffqYucUEyBARP1GDlZMIGDJgNG7smPMU4Sw7YEzB9mcmxnBFlPx/9n973ucEnLJVONBSZq0VWIKJwPp1RMBpAHuGrMlhkMvYIAukg5EBN3YpA1UogHYycwLj2Ye7fNgiN5FIkaodt9++c4d1Lfu658A2pAeg8qUn5uJ77vVcZRp988u9eVDQfubS8P6bB4KZc87VDAUUeXy+AcS9KHGBmdRAabwU4m09VPZ4h8NEj3+YUPnKXBaNK9pXK5pnkmB8uFePayimnw6St6093oylQTVw/tfxGLBImnHw+6KCu2ut9r5PxXEVxVYpranGbS4jYqpzRtpQBxyo/Igu7fqrioR2rGLQL5NcHsoUEdOC7VW+0HgHjXKtRy7agmcFcgjFco47D3hor7Y16lwgm+RV2EWQ/u2M4Bbo1EWj1oxQ/0j5DOM5UeAJ3Jh64gb4sCDqJfADR8NQaxh7QiqYhn69IcjsEfzU/11VuqWXlQgghJhEEP/bojRyM0qee87CKLiTescafIfnRsNQhyhsKqdHU1QAp29cCqh3mzNxJH3PDYg4fjRaGW4PM7K5gmSXFn/Ifeza0cuZ4XLdYZ76Z1BG80pqBpKZy1unGob+RpItlSmO5jQw7OoRuf0q3Id92gawUDDLuQ7Xg3zOVqV8/wJBlHM7ZUz162bnNsO5Hn
166 6614cac550aea66d19c601e45efd1b7bd08d7c40 0 iQJVBAABCAA/FiEEOoFVFj0OIKUw/LeGR6Z/+qNGqs4FAlruOCQhHGtidWxsb2NrK21lcmN1cmlhbEByaW5nd29ybGQub3JnAAoJEEemf/qjRqrOENQQAI1ttaffqYucUEyBARP1GDlZMIGDJgNG7smPMU4Sw7YEzB9mcmxnBFlPx/9n973ucEnLJVONBSZq0VWIKJwPp1RMBpAHuGrMlhkMvYIAukg5EBN3YpA1UogHYycwLj2Ye7fNgiN5FIkaodt9++c4d1Lfu658A2pAeg8qUn5uJ77vVcZRp988u9eVDQfubS8P6bB4KZc87VDAUUeXy+AcS9KHGBmdRAabwU4m09VPZ4h8NEj3+YUPnKXBaNK9pXK5pnkmB8uFePayimnw6St6093oylQTVw/tfxGLBImnHw+6KCu2ut9r5PxXEVxVYpranGbS4jYqpzRtpQBxyo/Igu7fqrioR2rGLQL5NcHsoUEdOC7VW+0HgHjXKtRy7agmcFcgjFco47D3hor7Y16lwgm+RV2EWQ/u2M4Bbo1EWj1oxQ/0j5DOM5UeAJ3Jh64gb4sCDqJfADR8NQaxh7QiqYhn69IcjsEfzU/11VuqWXlQgghJhEEP/bojRyM0qee87CKLiTescafIfnRsNQhyhsKqdHU1QAp29cCqh3mzNxJH3PDYg4fjRaGW4PM7K5gmSXFn/Ifeza0cuZ4XLdYZ76Z1BG80pqBpKZy1unGob+RpItlSmO5jQw7OoRuf0q3Id92gawUDDLuQ7Xg3zOVqV8/wJBlHM7ZUz162bnNsO5Hn
167 9c5ced5276d6e7d54f7c3dadf5247b7ee98ec79c 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlsYGdAQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91S3fEACmrG3S5eAUhnKqkXFe+HZUwmUvLKRhyWDLlWQzEHaJZQCFWxqSM1ag7JtAx3WkWwmWrOZ0+T/w/xMv81h9JAv9RsoszUT/RH4RsnWoc2ddcK93Q/PrNJ29kFjvC8j3LF42WfHEIeNqAki5c3GbprUL86KG7XVYuMvpPI/SeNSz8siPaKjXo6sg6bAupPCyapisTmeRHcCUc5UfeTTq4YQdS9UI0p9Fo8/vcqmnWY6XnQCRYs2U8Y2I2QCJBHBE5p4KrxrFsAdPWMCg0dJT0goSbzpfDjukPHQaAnUKjCtXCwrzA/KY8fDH9hm5tt1FnC6nl6BRpEHRoHqTfE1ag2QktJZTn5+JWpzz85qFDl5ktmxj1gS80jkOUJ2699RykBy7NACu+TtLJdBk+E1TN0pAU+zsrTSGiteuikEBjQP/8i4whUZCFIHLPgVlxrHWwn0/oszj1Q/u86sCxnYTflR2GLZs3fbSGBEKDDrjqwetxMlwi/3Qhf0PN9aAI7S13YnA89tGLGRLTsVsOoKiQoTExQaCUpE5jFYBLVjsTPh2AjPhG3Zaf7R5ZIvW4CbVYORNTMaYhFNnFyczILJLRid+INHLVifNiJuaLiAFD5Izq9Me4H+GpwB5AI7aG1r+01Si2KbqqpdfoK430UeDV+U/MvEU7v0RoeF30M7uVYv+kg==
167 9c5ced5276d6e7d54f7c3dadf5247b7ee98ec79c 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlsYGdAQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91S3fEACmrG3S5eAUhnKqkXFe+HZUwmUvLKRhyWDLlWQzEHaJZQCFWxqSM1ag7JtAx3WkWwmWrOZ0+T/w/xMv81h9JAv9RsoszUT/RH4RsnWoc2ddcK93Q/PrNJ29kFjvC8j3LF42WfHEIeNqAki5c3GbprUL86KG7XVYuMvpPI/SeNSz8siPaKjXo6sg6bAupPCyapisTmeRHcCUc5UfeTTq4YQdS9UI0p9Fo8/vcqmnWY6XnQCRYs2U8Y2I2QCJBHBE5p4KrxrFsAdPWMCg0dJT0goSbzpfDjukPHQaAnUKjCtXCwrzA/KY8fDH9hm5tt1FnC6nl6BRpEHRoHqTfE1ag2QktJZTn5+JWpzz85qFDl5ktmxj1gS80jkOUJ2699RykBy7NACu+TtLJdBk+E1TN0pAU+zsrTSGiteuikEBjQP/8i4whUZCFIHLPgVlxrHWwn0/oszj1Q/u86sCxnYTflR2GLZs3fbSGBEKDDrjqwetxMlwi/3Qhf0PN9aAI7S13YnA89tGLGRLTsVsOoKiQoTExQaCUpE5jFYBLVjsTPh2AjPhG3Zaf7R5ZIvW4CbVYORNTMaYhFNnFyczILJLRid+INHLVifNiJuaLiAFD5Izq9Me4H+GpwB5AI7aG1r+01Si2KbqqpdfoK430UeDV+U/MvEU7v0RoeF30M7uVYv+kg==
168 0b63a6743010dfdbf8a8154186e119949bdaa1cc 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAls7n+0QHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91XVGEAC1aPuUmW9R0QjWUmyY4vMO7AOT4F1sHKrkgNaoG/RCvczuZOCz/fGliEKQ52pkvThrOgOvNfJlIGOu91noLKsYUybO8eeTksCzc7agUjk6/Xsed35D8gNEPuiVTNu379sTQRnOA2T/plQnVCY2PjMzBe6nQ2DJYnggJelCUxuqUsLM76OvMEeNlXvyxZmyAcFT5dfSBYbjAt0kklRRQWgaug3GwLJY/+0tmXhq0tCpAF6myXoVQm/ynSxjR+5+2/+F5nudOQmDnL0zGayOAQU97RLAAxf1L+3DTRfbtxams9ZrGfRzQGcI1d4I4ernfnFYI19kSzMPcW4qI7gQQlTfOzs8X5d2fKiqUFjlgOO42hgM6cQv2Hx3u+bxF00sAvrW8sWRjfMQACuNH3FJoeIubpohN5o1Madv4ayGAZkcyskYRCs9X40gn+Q9gv34uknjaF/mep7BBl08JC9zFqwGaLyCssSsHV7ncekkUZfcWfq4TNNEUZFIu7UtsnZYz0aYrueAKMp+4udTjfKKnSZL2o0n1g11iH9KTQO/dWP7rVbu/OIbLeE+D87oXOWGfDNBRyHLItrM70Vum0HxtFuWc1clj8qzF61Mx0umFfUmdGQcl9DGivmc7TLNzBKG11ElDuDIey6Yxc6nwWiAJ6v1H5bO3WBi/klbT2fWguOo5w==
168 0b63a6743010dfdbf8a8154186e119949bdaa1cc 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAls7n+0QHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91XVGEAC1aPuUmW9R0QjWUmyY4vMO7AOT4F1sHKrkgNaoG/RCvczuZOCz/fGliEKQ52pkvThrOgOvNfJlIGOu91noLKsYUybO8eeTksCzc7agUjk6/Xsed35D8gNEPuiVTNu379sTQRnOA2T/plQnVCY2PjMzBe6nQ2DJYnggJelCUxuqUsLM76OvMEeNlXvyxZmyAcFT5dfSBYbjAt0kklRRQWgaug3GwLJY/+0tmXhq0tCpAF6myXoVQm/ynSxjR+5+2/+F5nudOQmDnL0zGayOAQU97RLAAxf1L+3DTRfbtxams9ZrGfRzQGcI1d4I4ernfnFYI19kSzMPcW4qI7gQQlTfOzs8X5d2fKiqUFjlgOO42hgM6cQv2Hx3u+bxF00sAvrW8sWRjfMQACuNH3FJoeIubpohN5o1Madv4ayGAZkcyskYRCs9X40gn+Q9gv34uknjaF/mep7BBl08JC9zFqwGaLyCssSsHV7ncekkUZfcWfq4TNNEUZFIu7UtsnZYz0aYrueAKMp+4udTjfKKnSZL2o0n1g11iH9KTQO/dWP7rVbu/OIbLeE+D87oXOWGfDNBRyHLItrM70Vum0HxtFuWc1clj8qzF61Mx0umFfUmdGQcl9DGivmc7TLNzBKG11ElDuDIey6Yxc6nwWiAJ6v1H5bO3WBi/klbT2fWguOo5w==
169 e90130af47ce8dd53a3109aed9d15876b3e7dee8 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAltQ1bUQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91RQVD/9NA5t2mlt7pFc0Sswktc5dI8GaSYxgeknacLkEdkYx9L+mzg77G7TGueeu5duovjdI/vDIzdadGtJJ+zJE5icCqeUFDfNZNZLQ+7StuC8/f+4i/DaCzjHJ4tDYd0x6R5efisLWRKkWoodI1Iit7gCL493gj1HZaIzRLaqYkbOk3PhOEkTcov2cnhb4h54OKm07qlg6PYH507WGmmTDDnhL9SwdfBXHA2ps9dCe52NzPMyebXoZYA9T5Yz67eQ8D+YCh9bLauA59dW0Iyx59yGJ0tmLwVKBgbUkynAknwk/hdNlF7r6wLqbR00NLKmAZl8crdVSqFUU/vAsPQLn3BkbtpzqjmisIq2BWEt/YWYZOHUvJoK81cRcsVpPuAOIQM/rTm9pprTq7RFtuVnCj+QnmWwEPZJcS/7pnnIXte3gQt76ovLuFxr7dq99anEA7gnTbSdADIzgZhJMM8hJcrcgvbI4xz0H1qKn3webTNl/jPgTsNjAPYcmRZcoU2wUIR+OPhZvfwhvreRX0dGUV6gqxWnx3u3dsWE9jcBIGlNfYnIkLXyqBdOL6f4yQoxaVjRg/ScEt3hU17TknuPIDOXE/iMgWnYpnTqKBolt/Vbx7qB1OiK7AmQvXY1bnhtkIfOoIwZ9X1Zi2vmV1Wz4G0a5Vxq5eNKpQgACA2HE0MS2HQ==
169 e90130af47ce8dd53a3109aed9d15876b3e7dee8 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAltQ1bUQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91RQVD/9NA5t2mlt7pFc0Sswktc5dI8GaSYxgeknacLkEdkYx9L+mzg77G7TGueeu5duovjdI/vDIzdadGtJJ+zJE5icCqeUFDfNZNZLQ+7StuC8/f+4i/DaCzjHJ4tDYd0x6R5efisLWRKkWoodI1Iit7gCL493gj1HZaIzRLaqYkbOk3PhOEkTcov2cnhb4h54OKm07qlg6PYH507WGmmTDDnhL9SwdfBXHA2ps9dCe52NzPMyebXoZYA9T5Yz67eQ8D+YCh9bLauA59dW0Iyx59yGJ0tmLwVKBgbUkynAknwk/hdNlF7r6wLqbR00NLKmAZl8crdVSqFUU/vAsPQLn3BkbtpzqjmisIq2BWEt/YWYZOHUvJoK81cRcsVpPuAOIQM/rTm9pprTq7RFtuVnCj+QnmWwEPZJcS/7pnnIXte3gQt76ovLuFxr7dq99anEA7gnTbSdADIzgZhJMM8hJcrcgvbI4xz0H1qKn3webTNl/jPgTsNjAPYcmRZcoU2wUIR+OPhZvfwhvreRX0dGUV6gqxWnx3u3dsWE9jcBIGlNfYnIkLXyqBdOL6f4yQoxaVjRg/ScEt3hU17TknuPIDOXE/iMgWnYpnTqKBolt/Vbx7qB1OiK7AmQvXY1bnhtkIfOoIwZ9X1Zi2vmV1Wz4G0a5Vxq5eNKpQgACA2HE0MS2HQ==
170 33ac6a72308a215e6086fbced347ec10aa963b0a 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlthwaIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91atOD/0de4nA55WJpiQzAqTg4xWIRZB6y0pkQ8D4cKNQkNiwPQAdDEPf85RuYmoPusNxhM40qfJlmHOw8sbRaqqabhVBPEzL1DpKe4GBucagLZqoL3pycyMzhkhzMka2RJT6nekCchTKJTIs2gx4FOA/QwaFYNkXFfguAEvi01isVdMo0GFLQ7pf7wU8UO1PPdkYphH0xPUvsreQ3pR3+6WwMLovk4JYW4cSaM4YkLlqJQPSO2YAlyXAwiQRvu2A227ydVqHOgLeV5zMQPy2v2zTgl2AoMdWp8+g2lJrYwclkNR+LAk5OlGYamyZwlmsTO7OX3n7xJYtfjbqdoqEKhO1igMi3ZSjqwkaBxxkXxArrteD19bpUyInTjbwTRO3mSe5aNkEDGoOYWn8UOn5ZkeEo7NyhP4OTXqyxQs9rwjD79xZk+6fGB777vuZDUdLZYRQFOPEximpmCGJDrZWj5PeIALWkrRGWBl2eFJ5sl6/pFlUJDjDEstnrsfosp6NJ3VFiD9EunFWsTlV2qXaueh9+TfaSRmGHVuwFCDt7nATVEzTt8l74xsL3xUPS4u9EcNPuEhCRu1zLojCGjemEA29R9tJS8oWd6SwXKryzjo8SyN7yQVSM/yl212IOiOHTQF8vVZuJnailtcWc3D4NoOxntnnv8fnd1nr8M5QSjYQVzSkHw==
170 33ac6a72308a215e6086fbced347ec10aa963b0a 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlthwaIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91atOD/0de4nA55WJpiQzAqTg4xWIRZB6y0pkQ8D4cKNQkNiwPQAdDEPf85RuYmoPusNxhM40qfJlmHOw8sbRaqqabhVBPEzL1DpKe4GBucagLZqoL3pycyMzhkhzMka2RJT6nekCchTKJTIs2gx4FOA/QwaFYNkXFfguAEvi01isVdMo0GFLQ7pf7wU8UO1PPdkYphH0xPUvsreQ3pR3+6WwMLovk4JYW4cSaM4YkLlqJQPSO2YAlyXAwiQRvu2A227ydVqHOgLeV5zMQPy2v2zTgl2AoMdWp8+g2lJrYwclkNR+LAk5OlGYamyZwlmsTO7OX3n7xJYtfjbqdoqEKhO1igMi3ZSjqwkaBxxkXxArrteD19bpUyInTjbwTRO3mSe5aNkEDGoOYWn8UOn5ZkeEo7NyhP4OTXqyxQs9rwjD79xZk+6fGB777vuZDUdLZYRQFOPEximpmCGJDrZWj5PeIALWkrRGWBl2eFJ5sl6/pFlUJDjDEstnrsfosp6NJ3VFiD9EunFWsTlV2qXaueh9+TfaSRmGHVuwFCDt7nATVEzTt8l74xsL3xUPS4u9EcNPuEhCRu1zLojCGjemEA29R9tJS8oWd6SwXKryzjo8SyN7yQVSM/yl212IOiOHTQF8vVZuJnailtcWc3D4NoOxntnnv8fnd1nr8M5QSjYQVzSkHw==
171 ede3bf31fe63677fdf5bd8db687977d4e3d792ed 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAluOq84QHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91ao3D/oC9zKNbk+MMUP0cSfl+ESRbP/sAI466IYDkr9f1klooIFMsdqCd16eS36DVwIwrBYapRaNszC6Pg0KCFKCdeAWJLcgeIawwOkZPrLKQmS3I9GTl9gxtExeFvRryaAdP1DAPEU6JkyHo3xmURkJB58VjuBquZz4cYnL2aE1ag04CWAoRFiLu6bt1hEZ8pONU6cbDpHaJVyUZmJRB+llpybgdLnlBTrhfWjNofTh8MM6+vz67lIienYoSbepY+029J98phBTV+UEfWSBWw1hcNT/+QmOBGWWTLfBARsNDZFeYgQQOo3gRghKO7qUA/hqzDTmMG4/a2obs0LGsBlcMZ1Ky//zhdAJ/EN7uH9svM1t1fkw1RgvftmybptK5KiusZ9AWhnggHSwZtj1I6i/sojqsj9MrtdrD+1LfiKuAv/FtcMHSeff8IfItrd2B67JIj4wCzU8vDrAbAAqODHx7AnssvNbYrH2iOigSINFMNJoLU/xLxBhTxitU2Zf8puHA4CQ3+BybgOH9HPqCtGcVAB7bcp4hiezGrachM+2oec2YwcGCpIobMPl43cmWkLhtGF5qfl7APVfbo18UXk8ZGmBY8YAYwEyksk2SBMJV6+XHw9J7uaaugc3uN8PuMVLqvSMpWN1ZdRsSkxrOJK+UNW7kbUi0wHnsV1rN0U0BIfVOQ==
171 ede3bf31fe63677fdf5bd8db687977d4e3d792ed 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAluOq84QHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91ao3D/oC9zKNbk+MMUP0cSfl+ESRbP/sAI466IYDkr9f1klooIFMsdqCd16eS36DVwIwrBYapRaNszC6Pg0KCFKCdeAWJLcgeIawwOkZPrLKQmS3I9GTl9gxtExeFvRryaAdP1DAPEU6JkyHo3xmURkJB58VjuBquZz4cYnL2aE1ag04CWAoRFiLu6bt1hEZ8pONU6cbDpHaJVyUZmJRB+llpybgdLnlBTrhfWjNofTh8MM6+vz67lIienYoSbepY+029J98phBTV+UEfWSBWw1hcNT/+QmOBGWWTLfBARsNDZFeYgQQOo3gRghKO7qUA/hqzDTmMG4/a2obs0LGsBlcMZ1Ky//zhdAJ/EN7uH9svM1t1fkw1RgvftmybptK5KiusZ9AWhnggHSwZtj1I6i/sojqsj9MrtdrD+1LfiKuAv/FtcMHSeff8IfItrd2B67JIj4wCzU8vDrAbAAqODHx7AnssvNbYrH2iOigSINFMNJoLU/xLxBhTxitU2Zf8puHA4CQ3+BybgOH9HPqCtGcVAB7bcp4hiezGrachM+2oec2YwcGCpIobMPl43cmWkLhtGF5qfl7APVfbo18UXk8ZGmBY8YAYwEyksk2SBMJV6+XHw9J7uaaugc3uN8PuMVLqvSMpWN1ZdRsSkxrOJK+UNW7kbUi0wHnsV1rN0U0BIfVOQ==
172 5405cb1a79010ac50c58cd84e6f50c4556bf2a4c 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAluyfokQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91eWpD/0eu/JfD6SfaT4Ozd2767ojNIW4M9BgcRH/FehFBd/3iQ/YQmaMVd6GmdaagM5YUpD9U+rDK95l8rUstuTglXeKD2SVcDM4Oq9ToyZyp5aizWjkxRxHT60W95G5FQO/tBbs63jfNrVDWDElbkpcn/gUG6JbX+q/S/mKd6WsuwNQC1N4VOWp0OWCmFGBWN7t/DqxGLGEajJM0NB97/r/IV6TzrGtaPf1CXaepDVvZwIIeas/eQgGInyqry7WBSn5sCUq4opIh1UigMABUAgzIZbgTg8NLGSmEgRgk0Vb4K+pLejLLDb5YD7ZwuUCkbd8oJImKQfU6++Ajd70TbNQRvVhMtd15iCtOOjLR+VNkUiDXm0g1U53sREMLdj/+SMJZB6Z18DotdgpaeCmwA/wWijXOdt76xwUKjByioxyQilPrzrWGaoSG4ynjiD2Y+eSRS1DxbpDgt4YEuiVA6U3ay99oW7KkhFjQsUtKl4SJ5SQWiEofvgtb2maNrXkPtKOtNRHhc61v73zYnsxtl2qduC99YOTin90FykD80XvgJZfyow/LICb77MNGwYBsJJMDQ3jG1YyUC2CQsb8wyrWM4TO3tspKAQPyMegUaVtBqw7ZhgiC3OXEes+z+AL5YRSZXALfurXPYbja8M8uGL2TYB3/5bKYvBXxvfmSGIeY6VieQ==
172 5405cb1a79010ac50c58cd84e6f50c4556bf2a4c 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAluyfokQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91eWpD/0eu/JfD6SfaT4Ozd2767ojNIW4M9BgcRH/FehFBd/3iQ/YQmaMVd6GmdaagM5YUpD9U+rDK95l8rUstuTglXeKD2SVcDM4Oq9ToyZyp5aizWjkxRxHT60W95G5FQO/tBbs63jfNrVDWDElbkpcn/gUG6JbX+q/S/mKd6WsuwNQC1N4VOWp0OWCmFGBWN7t/DqxGLGEajJM0NB97/r/IV6TzrGtaPf1CXaepDVvZwIIeas/eQgGInyqry7WBSn5sCUq4opIh1UigMABUAgzIZbgTg8NLGSmEgRgk0Vb4K+pLejLLDb5YD7ZwuUCkbd8oJImKQfU6++Ajd70TbNQRvVhMtd15iCtOOjLR+VNkUiDXm0g1U53sREMLdj/+SMJZB6Z18DotdgpaeCmwA/wWijXOdt76xwUKjByioxyQilPrzrWGaoSG4ynjiD2Y+eSRS1DxbpDgt4YEuiVA6U3ay99oW7KkhFjQsUtKl4SJ5SQWiEofvgtb2maNrXkPtKOtNRHhc61v73zYnsxtl2qduC99YOTin90FykD80XvgJZfyow/LICb77MNGwYBsJJMDQ3jG1YyUC2CQsb8wyrWM4TO3tspKAQPyMegUaVtBqw7ZhgiC3OXEes+z+AL5YRSZXALfurXPYbja8M8uGL2TYB3/5bKYvBXxvfmSGIeY6VieQ==
173 956ec6f1320df26f3133ec40f3de866ea0695fd7 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlvOG20QHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91eZ+EACb/XfPWaMkwIX54JaFWtL/nVkDcaL8xLVzlI+PxL0ZtHdQTGVQNp5f1BnZU9RKPZ9QOuz+QKNvb4hOOXBwmCi2AAjmTYUqtKThHmOT50ZRICkllY+YlZ3tI6JXRDhh7pSXaus8jBFG/VwuUlVmK5sA2TP+lIJijOgV9rThszfS4Q2I8sBTIaeZS1hyujFxGRO++tjYR+jPuo/98FhqJ5EylVYvKmnflWkOYLFNFqgDI6DQs7Dl+u2nrNAzZJQlgk+1ekd66T3WyK8U3tcFLZGRQ+gpzINH0Syn6USaaE+0nGi4we1hJS8JK0txWyHXJGNZYaWQAC2l1hIBfA38azwVLSe2w9JatXhS3HWByILy8JkEQ2kSo1xTD4mBkszZo/kWZpZRsAWydxCnzhNgKmTJYxASFTTX1mpdX4EzJBOs/++52y1OjVc0Ko0+6vSwxsC6zgIGJx1Os7vVgWHql0XbDmJ1NDdNmz7q5HjFcbNOWScKf6UGcBKV4dpW1w+7CvdoMFHUsVTa2zn6YOki3NEt0GWLXq+0aXbHSw8XETcyunQKjDi9ddKOw0rYGip6EKUKhOILZimQ0lgYRE23RDdT5Tl2D8s66SUuipgP9vGjbMaE/FhO3OAb7406jyCrOVfDis7sK0Hvw074GhIfZUjA4W4Ey2TeExCZHHhBdoPTrg==
173 956ec6f1320df26f3133ec40f3de866ea0695fd7 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlvOG20QHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91eZ+EACb/XfPWaMkwIX54JaFWtL/nVkDcaL8xLVzlI+PxL0ZtHdQTGVQNp5f1BnZU9RKPZ9QOuz+QKNvb4hOOXBwmCi2AAjmTYUqtKThHmOT50ZRICkllY+YlZ3tI6JXRDhh7pSXaus8jBFG/VwuUlVmK5sA2TP+lIJijOgV9rThszfS4Q2I8sBTIaeZS1hyujFxGRO++tjYR+jPuo/98FhqJ5EylVYvKmnflWkOYLFNFqgDI6DQs7Dl+u2nrNAzZJQlgk+1ekd66T3WyK8U3tcFLZGRQ+gpzINH0Syn6USaaE+0nGi4we1hJS8JK0txWyHXJGNZYaWQAC2l1hIBfA38azwVLSe2w9JatXhS3HWByILy8JkEQ2kSo1xTD4mBkszZo/kWZpZRsAWydxCnzhNgKmTJYxASFTTX1mpdX4EzJBOs/++52y1OjVc0Ko0+6vSwxsC6zgIGJx1Os7vVgWHql0XbDmJ1NDdNmz7q5HjFcbNOWScKf6UGcBKV4dpW1w+7CvdoMFHUsVTa2zn6YOki3NEt0GWLXq+0aXbHSw8XETcyunQKjDi9ddKOw0rYGip6EKUKhOILZimQ0lgYRE23RDdT5Tl2D8s66SUuipgP9vGjbMaE/FhO3OAb7406jyCrOVfDis7sK0Hvw074GhIfZUjA4W4Ey2TeExCZHHhBdoPTrg==
174 a91a2837150bdcb27ae76b3646e6c93cd6a15904 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlvclPMQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91fc0EADF/62jqCARFaQRRcKpobPNBZupwSbnQ7E296ZRwHdZvT8CVGfkWBUIStyh+r8bfmBzzea6d9/SUoRqCoV9rwCXuRbeCZZRMMkqx9IblV3foaIOxyQi0KE2lpzGJAHxPiNxD3czZV4B+P6X2wNmG9OLjmHyQ7o64GvPAJ+Ko/EsND1tkx4qB16mEuEHVxtfaG6hbjgpLekIA3+3xur3E8cWBsNO28HtQBK83r2qURwv6eG3TfkbmiE+Ie5TNC15LPVhAOHVSD7miZdI82uk2063puCKZxIJXsy7EMjHfChTM9c7B4+TdEBjms3y+Byz2EV7kRfjplGOnBbYvfY7qiteTn/22+rLrTTQNkndDN/Sqr1DjwsvxKDeIfsqgXzGQPupLOrGdGf4ILAtA0Reme7VKNN5Px6dNxnjKKwsnSrKTQ7ZcmD+W1LKlL63lBEQvEy+TLmmFLfM2xvvBxL5177AKZrj/8gMUzEi1K2MelDGrasA7OSjTlABoleDvZzVOf1nC0Bv83tFc8FeMHLwNOxkFSsjORvZuIH/G9BYUTAd96iLwQRBxXLOVNitxAOQT+s3hs7JEaUzTHlAY+lNeFAxUujb4H0V40Xgr20O1u7PJ53tzApIrg9JQPgvUXntmRs8fpNo6f3P6Sg8XtaCCHIUAB6qTHiose56llf6bzl66A==
174 a91a2837150bdcb27ae76b3646e6c93cd6a15904 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlvclPMQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91fc0EADF/62jqCARFaQRRcKpobPNBZupwSbnQ7E296ZRwHdZvT8CVGfkWBUIStyh+r8bfmBzzea6d9/SUoRqCoV9rwCXuRbeCZZRMMkqx9IblV3foaIOxyQi0KE2lpzGJAHxPiNxD3czZV4B+P6X2wNmG9OLjmHyQ7o64GvPAJ+Ko/EsND1tkx4qB16mEuEHVxtfaG6hbjgpLekIA3+3xur3E8cWBsNO28HtQBK83r2qURwv6eG3TfkbmiE+Ie5TNC15LPVhAOHVSD7miZdI82uk2063puCKZxIJXsy7EMjHfChTM9c7B4+TdEBjms3y+Byz2EV7kRfjplGOnBbYvfY7qiteTn/22+rLrTTQNkndDN/Sqr1DjwsvxKDeIfsqgXzGQPupLOrGdGf4ILAtA0Reme7VKNN5Px6dNxnjKKwsnSrKTQ7ZcmD+W1LKlL63lBEQvEy+TLmmFLfM2xvvBxL5177AKZrj/8gMUzEi1K2MelDGrasA7OSjTlABoleDvZzVOf1nC0Bv83tFc8FeMHLwNOxkFSsjORvZuIH/G9BYUTAd96iLwQRBxXLOVNitxAOQT+s3hs7JEaUzTHlAY+lNeFAxUujb4H0V40Xgr20O1u7PJ53tzApIrg9JQPgvUXntmRs8fpNo6f3P6Sg8XtaCCHIUAB6qTHiose56llf6bzl66A==
175 1c8c54cf97256f4468da2eb4dbee24f7f3888e71 0 iQJEBAABCAAuFiEEK8zhT1xnJaouqK63ucncgkqlvdUFAlwG+eIQHHJhZkBkdXJpbjQyLmNvbQAKCRC5ydyCSqW91YqSD/9IAwdaPrOeiT+DVBW2x33oFeY1X1f5CBG/vCJptalOd2QDIsD0ANEzQHmzV25RKD851v155Txt/BPlkuBfO/kg0BbOoqTpGZk+5CcoFWeyhJct2CxtCLdEpyZ/98/htMR4VfWprCX2GHXPjS813l9pebsN3WgBUOc2VaUdHNRoAGsMVgWC5BWwNP4XSA9oixFL/O4aGLQ6pPfP3vmMFySWXWnIN8gUZ4sm53eKaT0QCICAgzFh+GzRd81uACDfoJn1d8RS9GK+h6j8x0crLY5CpQQy8lRVkokvc0h6XK44ofc57p9GHAOfprHY3DbBhD9H6fLAf5raUsqPkLRYVGqhg8bOsBr3vJ56hiXJYOYPZSYXGjnHRcUrgfPVrY+6mPTeCIQMPmWBHwYH5Tc5TLrPuxxCL4wVywqGbfmIVP+WFUikkykAAwuPOZAswxJJOB0gsnnxcApmTeXRznBXyvzscMlWVZiMjzflKRRJ9V5RI4Fdc6n1wQ4vuLSO4AUnIypIsV6ZFAOBuFKH7x6nPG0tP3FYzcICaMOPbxEx3LStnuU+UuEs6TIxM6IiR3LPiiDGZ2BA2gjJhDxQFV8hAl8KDO3LsYuyUQCv3RTAP+YejH21bIXdnwDlNqy8Hrd53rq7jZsdb2pMVvOZZ3VmIu64f+jVkD/r5msDUkQL3M9jwg==
@@ -1,187 +1,188 b''
1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
11 3a56574f329a368d645853e0f9e09472aee62349 0.8
11 3a56574f329a368d645853e0f9e09472aee62349 0.8
12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
61 6344043924497cd06d781d9014c66802285072e4 2.0.2
61 6344043924497cd06d781d9014c66802285072e4 2.0.2
62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
64 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 2.1.1
64 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 2.1.1
65 b9bd95e61b49c221c4cca24e6da7c946fc02f992 2.1.2
65 b9bd95e61b49c221c4cca24e6da7c946fc02f992 2.1.2
66 d9e2f09d5488c395ae9ddbb320ceacd24757e055 2.2-rc
66 d9e2f09d5488c395ae9ddbb320ceacd24757e055 2.2-rc
67 00182b3d087909e3c3ae44761efecdde8f319ef3 2.2
67 00182b3d087909e3c3ae44761efecdde8f319ef3 2.2
68 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 2.2.1
68 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 2.2.1
69 85a358df5bbbe404ca25730c9c459b34263441dc 2.2.2
69 85a358df5bbbe404ca25730c9c459b34263441dc 2.2.2
70 b013baa3898e117959984fc64c29d8c784d2f28b 2.2.3
70 b013baa3898e117959984fc64c29d8c784d2f28b 2.2.3
71 a06e2681dd1786e2354d84a5fa9c1c88dd4fa3e0 2.3-rc
71 a06e2681dd1786e2354d84a5fa9c1c88dd4fa3e0 2.3-rc
72 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 2.3
72 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 2.3
73 072209ae4ddb654eb2d5fd35bff358c738414432 2.3.1
73 072209ae4ddb654eb2d5fd35bff358c738414432 2.3.1
74 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 2.3.2
74 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 2.3.2
75 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 2.4-rc
75 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 2.4-rc
76 195ad823b5d58c68903a6153a25e3fb4ed25239d 2.4
76 195ad823b5d58c68903a6153a25e3fb4ed25239d 2.4
77 0c10cf8191469e7c3c8844922e17e71a176cb7cb 2.4.1
77 0c10cf8191469e7c3c8844922e17e71a176cb7cb 2.4.1
78 a4765077b65e6ae29ba42bab7834717b5072d5ba 2.4.2
78 a4765077b65e6ae29ba42bab7834717b5072d5ba 2.4.2
79 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 2.5-rc
79 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 2.5-rc
80 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 2.5
80 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 2.5
81 7511d4df752e61fe7ae4f3682e0a0008573b0402 2.5.1
81 7511d4df752e61fe7ae4f3682e0a0008573b0402 2.5.1
82 5b7175377babacce80a6c1e12366d8032a6d4340 2.5.2
82 5b7175377babacce80a6c1e12366d8032a6d4340 2.5.2
83 50c922c1b5145dab8baefefb0437d363b6a6c21c 2.5.3
83 50c922c1b5145dab8baefefb0437d363b6a6c21c 2.5.3
84 8a7bd2dccd44ed571afe7424cd7f95594f27c092 2.5.4
84 8a7bd2dccd44ed571afe7424cd7f95594f27c092 2.5.4
85 292cd385856d98bacb2c3086f8897bc660c2beea 2.6-rc
85 292cd385856d98bacb2c3086f8897bc660c2beea 2.6-rc
86 23f785b38af38d2fca6b8f3db56b8007a84cd73a 2.6
86 23f785b38af38d2fca6b8f3db56b8007a84cd73a 2.6
87 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 2.6.1
87 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 2.6.1
88 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 2.6.2
88 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 2.6.2
89 009794acc6e37a650f0fae37872e733382ac1c0c 2.6.3
89 009794acc6e37a650f0fae37872e733382ac1c0c 2.6.3
90 f0d7721d7322dcfb5af33599c2543f27335334bb 2.7-rc
90 f0d7721d7322dcfb5af33599c2543f27335334bb 2.7-rc
91 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 2.7
91 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 2.7
92 335a558f81dc73afeab4d7be63617392b130117f 2.7.1
92 335a558f81dc73afeab4d7be63617392b130117f 2.7.1
93 e7fa36d2ad3a7944a52dca126458d6f482db3524 2.7.2
93 e7fa36d2ad3a7944a52dca126458d6f482db3524 2.7.2
94 1596f2d8f2421314b1ddead8f7d0c91009358994 2.8-rc
94 1596f2d8f2421314b1ddead8f7d0c91009358994 2.8-rc
95 d825e4025e39d1c39db943cdc89818abd0a87c27 2.8
95 d825e4025e39d1c39db943cdc89818abd0a87c27 2.8
96 209e04a06467e2969c0cc6501335be0406d46ef0 2.8.1
96 209e04a06467e2969c0cc6501335be0406d46ef0 2.8.1
97 ca387377df7a3a67dbb90b6336b781cdadc3ef41 2.8.2
97 ca387377df7a3a67dbb90b6336b781cdadc3ef41 2.8.2
98 8862469e16f9236208581b20de5f96bd13cc039d 2.9-rc
98 8862469e16f9236208581b20de5f96bd13cc039d 2.9-rc
99 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 2.9
99 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 2.9
100 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 2.9.1
100 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 2.9.1
101 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 2.9.2
101 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 2.9.2
102 564f55b251224f16508dd1311452db7780dafe2b 3.0-rc
102 564f55b251224f16508dd1311452db7780dafe2b 3.0-rc
103 2195ac506c6ababe86985b932f4948837c0891b5 3.0
103 2195ac506c6ababe86985b932f4948837c0891b5 3.0
104 269c80ee5b3cb3684fa8edc61501b3506d02eb10 3.0.1
104 269c80ee5b3cb3684fa8edc61501b3506d02eb10 3.0.1
105 2d8cd3d0e83c7336c0cb45a9f88638363f993848 3.0.2
105 2d8cd3d0e83c7336c0cb45a9f88638363f993848 3.0.2
106 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 3.1-rc
106 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 3.1-rc
107 3178e49892020336491cdc6945885c4de26ffa8b 3.1
107 3178e49892020336491cdc6945885c4de26ffa8b 3.1
108 5dc91146f35369949ea56b40172308158b59063a 3.1.1
108 5dc91146f35369949ea56b40172308158b59063a 3.1.1
109 f768c888aaa68d12dd7f509dcc7f01c9584357d0 3.1.2
109 f768c888aaa68d12dd7f509dcc7f01c9584357d0 3.1.2
110 7f8d16af8cae246fa5a48e723d48d58b015aed94 3.2-rc
110 7f8d16af8cae246fa5a48e723d48d58b015aed94 3.2-rc
111 ced632394371a36953ce4d394f86278ae51a2aae 3.2
111 ced632394371a36953ce4d394f86278ae51a2aae 3.2
112 643c58303fb0ec020907af28b9e486be299ba043 3.2.1
112 643c58303fb0ec020907af28b9e486be299ba043 3.2.1
113 902554884335e5ca3661d63be9978eb4aec3f68a 3.2.2
113 902554884335e5ca3661d63be9978eb4aec3f68a 3.2.2
114 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 3.2.3
114 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 3.2.3
115 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 3.2.4
115 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 3.2.4
116 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 3.3-rc
116 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 3.3-rc
117 fbdd5195528fae4f41feebc1838215c110b25d6a 3.3
117 fbdd5195528fae4f41feebc1838215c110b25d6a 3.3
118 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 3.3.1
118 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 3.3.1
119 07a92bbd02e5e3a625e0820389b47786b02b2cea 3.3.2
119 07a92bbd02e5e3a625e0820389b47786b02b2cea 3.3.2
120 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 3.3.3
120 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 3.3.3
121 e89f909edffad558b56f4affa8239e4832f88de0 3.4-rc
121 e89f909edffad558b56f4affa8239e4832f88de0 3.4-rc
122 8cc6036bca532e06681c5a8fa37efaa812de67b5 3.4
122 8cc6036bca532e06681c5a8fa37efaa812de67b5 3.4
123 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 3.4.1
123 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 3.4.1
124 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 3.4.2
124 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 3.4.2
125 96a38d44ba093bd1d1ecfd34119e94056030278b 3.5-rc
125 96a38d44ba093bd1d1ecfd34119e94056030278b 3.5-rc
126 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 3.5
126 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 3.5
127 1a45e49a6bed023deb229102a8903234d18054d3 3.5.1
127 1a45e49a6bed023deb229102a8903234d18054d3 3.5.1
128 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 3.5.2
128 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 3.5.2
129 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 3.6-rc
129 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 3.6-rc
130 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 3.6
130 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 3.6
131 1aa5083cbebbe7575c88f3402ab377539b484897 3.6.1
131 1aa5083cbebbe7575c88f3402ab377539b484897 3.6.1
132 2d437a0f3355834a9485bbbeb30a52a052c98f19 3.6.2
132 2d437a0f3355834a9485bbbeb30a52a052c98f19 3.6.2
133 ea389970c08449440587712117f178d33bab3f1e 3.6.3
133 ea389970c08449440587712117f178d33bab3f1e 3.6.3
134 158bdc8965720ca4061f8f8d806563cfc7cdb62e 3.7-rc
134 158bdc8965720ca4061f8f8d806563cfc7cdb62e 3.7-rc
135 2408645de650d8a29a6ce9e7dce601d8dd0d1474 3.7
135 2408645de650d8a29a6ce9e7dce601d8dd0d1474 3.7
136 b698abf971e7377d9b7ec7fc8c52df45255b0329 3.7.1
136 b698abf971e7377d9b7ec7fc8c52df45255b0329 3.7.1
137 d493d64757eb45ada99fcb3693e479a51b7782da 3.7.2
137 d493d64757eb45ada99fcb3693e479a51b7782da 3.7.2
138 ae279d4a19e9683214cbd1fe8298cf0b50571432 3.7.3
138 ae279d4a19e9683214cbd1fe8298cf0b50571432 3.7.3
139 740156eedf2c450aee58b1a90b0e826f47c5da64 3.8-rc
139 740156eedf2c450aee58b1a90b0e826f47c5da64 3.8-rc
140 f85de28eae32e7d3064b1a1321309071bbaaa069 3.8
140 f85de28eae32e7d3064b1a1321309071bbaaa069 3.8
141 a56296f55a5e1038ea5016dace2076b693c28a56 3.8.1
141 a56296f55a5e1038ea5016dace2076b693c28a56 3.8.1
142 aaabed77791a75968a12b8c43ad263631a23ee81 3.8.2
142 aaabed77791a75968a12b8c43ad263631a23ee81 3.8.2
143 a9764ab80e11bcf6a37255db7dd079011f767c6c 3.8.3
143 a9764ab80e11bcf6a37255db7dd079011f767c6c 3.8.3
144 26a5d605b8683a292bb89aea11f37a81b06ac016 3.8.4
144 26a5d605b8683a292bb89aea11f37a81b06ac016 3.8.4
145 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 3.9-rc
145 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 3.9-rc
146 299546f84e68dbb9bd026f0f3a974ce4bdb93686 3.9
146 299546f84e68dbb9bd026f0f3a974ce4bdb93686 3.9
147 ccd436f7db6d5d7b9af89715179b911d031d44f1 3.9.1
147 ccd436f7db6d5d7b9af89715179b911d031d44f1 3.9.1
148 149433e68974eb5c63ccb03f794d8b57339a80c4 3.9.2
148 149433e68974eb5c63ccb03f794d8b57339a80c4 3.9.2
149 438173c415874f6ac653efc1099dec9c9150e90f 4.0-rc
149 438173c415874f6ac653efc1099dec9c9150e90f 4.0-rc
150 eab27446995210c334c3d06f1a659e3b9b5da769 4.0
150 eab27446995210c334c3d06f1a659e3b9b5da769 4.0
151 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 4.0.1
151 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 4.0.1
152 e69874dc1f4e142746ff3df91e678a09c6fc208c 4.0.2
152 e69874dc1f4e142746ff3df91e678a09c6fc208c 4.0.2
153 a1dd2c0c479e0550040542e392e87bc91262517e 4.1-rc
153 a1dd2c0c479e0550040542e392e87bc91262517e 4.1-rc
154 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 4.1
154 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 4.1
155 25703b624d27e3917d978af56d6ad59331e0464a 4.1.1
155 25703b624d27e3917d978af56d6ad59331e0464a 4.1.1
156 ed5b25874d998ababb181a939dd37a16ea644435 4.1.2
156 ed5b25874d998ababb181a939dd37a16ea644435 4.1.2
157 77eaf9539499a1b8be259ffe7ada787d07857f80 4.1.3
157 77eaf9539499a1b8be259ffe7ada787d07857f80 4.1.3
158 616e788321cc4ae9975b7f0c54c849f36d82182b 4.2-rc
158 616e788321cc4ae9975b7f0c54c849f36d82182b 4.2-rc
159 bb96d4a497432722623ae60d9bc734a1e360179e 4.2
159 bb96d4a497432722623ae60d9bc734a1e360179e 4.2
160 c850f0ed54c1d42f9aa079ad528f8127e5775217 4.2.1
160 c850f0ed54c1d42f9aa079ad528f8127e5775217 4.2.1
161 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 4.2.2
161 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 4.2.2
162 857876ebaed4e315f63157bd157d6ce553c7ab73 4.3-rc
162 857876ebaed4e315f63157bd157d6ce553c7ab73 4.3-rc
163 5544af8622863796a0027566f6b646e10d522c4c 4.3
163 5544af8622863796a0027566f6b646e10d522c4c 4.3
164 943c91326b23954e6e1c6960d0239511f9530258 4.2.3
164 943c91326b23954e6e1c6960d0239511f9530258 4.2.3
165 3fee7f7d2da04226914c2258cc2884dc27384fd7 4.3.1
165 3fee7f7d2da04226914c2258cc2884dc27384fd7 4.3.1
166 920977f72c7b70acfdaf56ab35360584d7845827 4.3.2
166 920977f72c7b70acfdaf56ab35360584d7845827 4.3.2
167 2f427b57bf9019c6dc3750baa539dc22c1be50f6 4.3.3
167 2f427b57bf9019c6dc3750baa539dc22c1be50f6 4.3.3
168 1e2454b60e5936f5e77498cab2648db469504487 4.4-rc
168 1e2454b60e5936f5e77498cab2648db469504487 4.4-rc
169 0ccb43d4cf01d013ae05917ec4f305509f851b2d 4.4
169 0ccb43d4cf01d013ae05917ec4f305509f851b2d 4.4
170 cabc840ffdee8a72f3689fb77dd74d04fdc2bc04 4.4.1
170 cabc840ffdee8a72f3689fb77dd74d04fdc2bc04 4.4.1
171 a92b9f8e11ba330614cdfd6af0e03b15c1ff3797 4.4.2
171 a92b9f8e11ba330614cdfd6af0e03b15c1ff3797 4.4.2
172 27b6df1b5adbdf647cf5c6675b40575e1b197c60 4.5-rc
172 27b6df1b5adbdf647cf5c6675b40575e1b197c60 4.5-rc
173 d334afc585e29577f271c5eda03378736a16ca6b 4.5
173 d334afc585e29577f271c5eda03378736a16ca6b 4.5
174 369aadf7a3264b03c8b09efce715bc41e6ab4a9b 4.5.1
174 369aadf7a3264b03c8b09efce715bc41e6ab4a9b 4.5.1
175 8bba684efde7f45add05f737952093bb2aa07155 4.5.2
175 8bba684efde7f45add05f737952093bb2aa07155 4.5.2
176 7de7bd407251af2bc98e5b809c8598ee95830daf 4.5.3
176 7de7bd407251af2bc98e5b809c8598ee95830daf 4.5.3
177 ed5448edcbfa747b9154099e18630e49024fd47b 4.6rc0
177 ed5448edcbfa747b9154099e18630e49024fd47b 4.6rc0
178 1ec874717d8a93b19e0d50628443e0ee5efab3a9 4.6rc1
178 1ec874717d8a93b19e0d50628443e0ee5efab3a9 4.6rc1
179 6614cac550aea66d19c601e45efd1b7bd08d7c40 4.6
179 6614cac550aea66d19c601e45efd1b7bd08d7c40 4.6
180 9c5ced5276d6e7d54f7c3dadf5247b7ee98ec79c 4.6.1
180 9c5ced5276d6e7d54f7c3dadf5247b7ee98ec79c 4.6.1
181 0b63a6743010dfdbf8a8154186e119949bdaa1cc 4.6.2
181 0b63a6743010dfdbf8a8154186e119949bdaa1cc 4.6.2
182 e90130af47ce8dd53a3109aed9d15876b3e7dee8 4.7rc0
182 e90130af47ce8dd53a3109aed9d15876b3e7dee8 4.7rc0
183 33ac6a72308a215e6086fbced347ec10aa963b0a 4.7
183 33ac6a72308a215e6086fbced347ec10aa963b0a 4.7
184 ede3bf31fe63677fdf5bd8db687977d4e3d792ed 4.7.1
184 ede3bf31fe63677fdf5bd8db687977d4e3d792ed 4.7.1
185 5405cb1a79010ac50c58cd84e6f50c4556bf2a4c 4.7.2
185 5405cb1a79010ac50c58cd84e6f50c4556bf2a4c 4.7.2
186 956ec6f1320df26f3133ec40f3de866ea0695fd7 4.8rc0
186 956ec6f1320df26f3133ec40f3de866ea0695fd7 4.8rc0
187 a91a2837150bdcb27ae76b3646e6c93cd6a15904 4.8
187 a91a2837150bdcb27ae76b3646e6c93cd6a15904 4.8
188 1c8c54cf97256f4468da2eb4dbee24f7f3888e71 4.8.1
@@ -1,436 +1,437 b''
1 # extdiff.py - external diff program support for mercurial
1 # extdiff.py - external diff program support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to allow external programs to compare revisions
8 '''command to allow external programs to compare revisions
9
9
10 The extdiff Mercurial extension allows you to use external programs
10 The extdiff Mercurial extension allows you to use external programs
11 to compare revisions, or revision with working directory. The external
11 to compare revisions, or revision with working directory. The external
12 diff programs are called with a configurable set of options and two
12 diff programs are called with a configurable set of options and two
13 non-option arguments: paths to directories containing snapshots of
13 non-option arguments: paths to directories containing snapshots of
14 files to compare.
14 files to compare.
15
15
16 If there is more than one file being compared and the "child" revision
16 If there is more than one file being compared and the "child" revision
17 is the working directory, any modifications made in the external diff
17 is the working directory, any modifications made in the external diff
18 program will be copied back to the working directory from the temporary
18 program will be copied back to the working directory from the temporary
19 directory.
19 directory.
20
20
21 The extdiff extension also allows you to configure new diff commands, so
21 The extdiff extension also allows you to configure new diff commands, so
22 you do not need to type :hg:`extdiff -p kdiff3` always. ::
22 you do not need to type :hg:`extdiff -p kdiff3` always. ::
23
23
24 [extdiff]
24 [extdiff]
25 # add new command that runs GNU diff(1) in 'context diff' mode
25 # add new command that runs GNU diff(1) in 'context diff' mode
26 cdiff = gdiff -Nprc5
26 cdiff = gdiff -Nprc5
27 ## or the old way:
27 ## or the old way:
28 #cmd.cdiff = gdiff
28 #cmd.cdiff = gdiff
29 #opts.cdiff = -Nprc5
29 #opts.cdiff = -Nprc5
30
30
31 # add new command called meld, runs meld (no need to name twice). If
31 # add new command called meld, runs meld (no need to name twice). If
32 # the meld executable is not available, the meld tool in [merge-tools]
32 # the meld executable is not available, the meld tool in [merge-tools]
33 # will be used, if available
33 # will be used, if available
34 meld =
34 meld =
35
35
36 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
36 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
37 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
37 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
38 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
38 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
39 # your .vimrc
39 # your .vimrc
40 vimdiff = gvim -f "+next" \\
40 vimdiff = gvim -f "+next" \\
41 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
41 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
42
42
43 Tool arguments can include variables that are expanded at runtime::
43 Tool arguments can include variables that are expanded at runtime::
44
44
45 $parent1, $plabel1 - filename, descriptive label of first parent
45 $parent1, $plabel1 - filename, descriptive label of first parent
46 $child, $clabel - filename, descriptive label of child revision
46 $child, $clabel - filename, descriptive label of child revision
47 $parent2, $plabel2 - filename, descriptive label of second parent
47 $parent2, $plabel2 - filename, descriptive label of second parent
48 $root - repository root
48 $root - repository root
49 $parent is an alias for $parent1.
49 $parent is an alias for $parent1.
50
50
51 The extdiff extension will look in your [diff-tools] and [merge-tools]
51 The extdiff extension will look in your [diff-tools] and [merge-tools]
52 sections for diff tool arguments, when none are specified in [extdiff].
52 sections for diff tool arguments, when none are specified in [extdiff].
53
53
54 ::
54 ::
55
55
56 [extdiff]
56 [extdiff]
57 kdiff3 =
57 kdiff3 =
58
58
59 [diff-tools]
59 [diff-tools]
60 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
60 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
61
61
62 You can use -I/-X and list of file or directory names like normal
62 You can use -I/-X and list of file or directory names like normal
63 :hg:`diff` command. The extdiff extension makes snapshots of only
63 :hg:`diff` command. The extdiff extension makes snapshots of only
64 needed files, so running the external diff program will actually be
64 needed files, so running the external diff program will actually be
65 pretty fast (at least faster than having to compare the entire tree).
65 pretty fast (at least faster than having to compare the entire tree).
66 '''
66 '''
67
67
68 from __future__ import absolute_import
68 from __future__ import absolute_import
69
69
70 import os
70 import os
71 import re
71 import re
72 import shutil
72 import shutil
73 import stat
73 import stat
74
74
75 from mercurial.i18n import _
75 from mercurial.i18n import _
76 from mercurial.node import (
76 from mercurial.node import (
77 nullid,
77 nullid,
78 short,
78 short,
79 )
79 )
80 from mercurial import (
80 from mercurial import (
81 archival,
81 archival,
82 cmdutil,
82 cmdutil,
83 error,
83 error,
84 filemerge,
84 filemerge,
85 formatter,
85 formatter,
86 pycompat,
86 pycompat,
87 registrar,
87 registrar,
88 scmutil,
88 scmutil,
89 util,
89 util,
90 )
90 )
91 from mercurial.utils import (
91 from mercurial.utils import (
92 procutil,
92 procutil,
93 stringutil,
93 stringutil,
94 )
94 )
95
95
96 cmdtable = {}
96 cmdtable = {}
97 command = registrar.command(cmdtable)
97 command = registrar.command(cmdtable)
98
98
99 configtable = {}
99 configtable = {}
100 configitem = registrar.configitem(configtable)
100 configitem = registrar.configitem(configtable)
101
101
102 configitem('extdiff', br'opts\..*',
102 configitem('extdiff', br'opts\..*',
103 default='',
103 default='',
104 generic=True,
104 generic=True,
105 )
105 )
106
106
107 configitem('diff-tools', br'.*\.diffargs$',
107 configitem('diff-tools', br'.*\.diffargs$',
108 default=None,
108 default=None,
109 generic=True,
109 generic=True,
110 )
110 )
111
111
112 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
112 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
113 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
113 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
114 # be specifying the version(s) of Mercurial they are tested with, or
114 # be specifying the version(s) of Mercurial they are tested with, or
115 # leave the attribute unspecified.
115 # leave the attribute unspecified.
116 testedwith = 'ships-with-hg-core'
116 testedwith = 'ships-with-hg-core'
117
117
118 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
118 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
119 '''snapshot files as of some revision
119 '''snapshot files as of some revision
120 if not using snapshot, -I/-X does not work and recursive diff
120 if not using snapshot, -I/-X does not work and recursive diff
121 in tools like kdiff3 and meld displays too many files.'''
121 in tools like kdiff3 and meld displays too many files.'''
122 dirname = os.path.basename(repo.root)
122 dirname = os.path.basename(repo.root)
123 if dirname == "":
123 if dirname == "":
124 dirname = "root"
124 dirname = "root"
125 if node is not None:
125 if node is not None:
126 dirname = '%s.%s' % (dirname, short(node))
126 dirname = '%s.%s' % (dirname, short(node))
127 base = os.path.join(tmproot, dirname)
127 base = os.path.join(tmproot, dirname)
128 os.mkdir(base)
128 os.mkdir(base)
129 fnsandstat = []
129 fnsandstat = []
130
130
131 if node is not None:
131 if node is not None:
132 ui.note(_('making snapshot of %d files from rev %s\n') %
132 ui.note(_('making snapshot of %d files from rev %s\n') %
133 (len(files), short(node)))
133 (len(files), short(node)))
134 else:
134 else:
135 ui.note(_('making snapshot of %d files from working directory\n') %
135 ui.note(_('making snapshot of %d files from working directory\n') %
136 (len(files)))
136 (len(files)))
137
137
138 if files:
138 if files:
139 repo.ui.setconfig("ui", "archivemeta", False)
139 repo.ui.setconfig("ui", "archivemeta", False)
140
140
141 archival.archive(repo, base, node, 'files',
141 archival.archive(repo, base, node, 'files',
142 match=scmutil.matchfiles(repo, files),
142 match=scmutil.matchfiles(repo, files),
143 subrepos=listsubrepos)
143 subrepos=listsubrepos)
144
144
145 for fn in sorted(files):
145 for fn in sorted(files):
146 wfn = util.pconvert(fn)
146 wfn = util.pconvert(fn)
147 ui.note(' %s\n' % wfn)
147 ui.note(' %s\n' % wfn)
148
148
149 if node is None:
149 if node is None:
150 dest = os.path.join(base, wfn)
150 dest = os.path.join(base, wfn)
151
151
152 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
152 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
153 return dirname, fnsandstat
153 return dirname, fnsandstat
154
154
155 def dodiff(ui, repo, cmdline, pats, opts):
155 def dodiff(ui, repo, cmdline, pats, opts):
156 '''Do the actual diff:
156 '''Do the actual diff:
157
157
158 - copy to a temp structure if diffing 2 internal revisions
158 - copy to a temp structure if diffing 2 internal revisions
159 - copy to a temp structure if diffing working revision with
159 - copy to a temp structure if diffing working revision with
160 another one and more than 1 file is changed
160 another one and more than 1 file is changed
161 - just invoke the diff for a single file in the working dir
161 - just invoke the diff for a single file in the working dir
162 '''
162 '''
163
163
164 revs = opts.get('rev')
164 revs = opts.get('rev')
165 change = opts.get('change')
165 change = opts.get('change')
166 do3way = '$parent2' in cmdline
166 do3way = '$parent2' in cmdline
167
167
168 if revs and change:
168 if revs and change:
169 msg = _('cannot specify --rev and --change at the same time')
169 msg = _('cannot specify --rev and --change at the same time')
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 elif change:
171 elif change:
172 ctx2 = scmutil.revsingle(repo, change, None)
172 ctx2 = scmutil.revsingle(repo, change, None)
173 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
173 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
174 else:
174 else:
175 ctx1a, ctx2 = scmutil.revpair(repo, revs)
175 ctx1a, ctx2 = scmutil.revpair(repo, revs)
176 if not revs:
176 if not revs:
177 ctx1b = repo[None].p2()
177 ctx1b = repo[None].p2()
178 else:
178 else:
179 ctx1b = repo[nullid]
179 ctx1b = repo[nullid]
180
180
181 node1a = ctx1a.node()
181 node1a = ctx1a.node()
182 node1b = ctx1b.node()
182 node1b = ctx1b.node()
183 node2 = ctx2.node()
183 node2 = ctx2.node()
184
184
185 # Disable 3-way merge if there is only one parent
185 # Disable 3-way merge if there is only one parent
186 if do3way:
186 if do3way:
187 if node1b == nullid:
187 if node1b == nullid:
188 do3way = False
188 do3way = False
189
189
190 subrepos=opts.get('subrepos')
190 subrepos=opts.get('subrepos')
191
191
192 matcher = scmutil.match(repo[node2], pats, opts)
192 matcher = scmutil.match(repo[node2], pats, opts)
193
193
194 if opts.get('patch'):
194 if opts.get('patch'):
195 if subrepos:
195 if subrepos:
196 raise error.Abort(_('--patch cannot be used with --subrepos'))
196 raise error.Abort(_('--patch cannot be used with --subrepos'))
197 if node2 is None:
197 if node2 is None:
198 raise error.Abort(_('--patch requires two revisions'))
198 raise error.Abort(_('--patch requires two revisions'))
199 else:
199 else:
200 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
200 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
201 listsubrepos=subrepos)[:3])
201 listsubrepos=subrepos)[:3])
202 if do3way:
202 if do3way:
203 mod_b, add_b, rem_b = map(set,
203 mod_b, add_b, rem_b = map(set,
204 repo.status(node1b, node2, matcher,
204 repo.status(node1b, node2, matcher,
205 listsubrepos=subrepos)[:3])
205 listsubrepos=subrepos)[:3])
206 else:
206 else:
207 mod_b, add_b, rem_b = set(), set(), set()
207 mod_b, add_b, rem_b = set(), set(), set()
208 modadd = mod_a | add_a | mod_b | add_b
208 modadd = mod_a | add_a | mod_b | add_b
209 common = modadd | rem_a | rem_b
209 common = modadd | rem_a | rem_b
210 if not common:
210 if not common:
211 return 0
211 return 0
212
212
213 tmproot = pycompat.mkdtemp(prefix='extdiff.')
213 tmproot = pycompat.mkdtemp(prefix='extdiff.')
214 try:
214 try:
215 if not opts.get('patch'):
215 if not opts.get('patch'):
216 # Always make a copy of node1a (and node1b, if applicable)
216 # Always make a copy of node1a (and node1b, if applicable)
217 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
217 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
218 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
218 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
219 subrepos)[0]
219 subrepos)[0]
220 rev1a = '@%d' % repo[node1a].rev()
220 rev1a = '@%d' % repo[node1a].rev()
221 if do3way:
221 if do3way:
222 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
222 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
223 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
223 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
224 subrepos)[0]
224 subrepos)[0]
225 rev1b = '@%d' % repo[node1b].rev()
225 rev1b = '@%d' % repo[node1b].rev()
226 else:
226 else:
227 dir1b = None
227 dir1b = None
228 rev1b = ''
228 rev1b = ''
229
229
230 fnsandstat = []
230 fnsandstat = []
231
231
232 # If node2 in not the wc or there is >1 change, copy it
232 # If node2 in not the wc or there is >1 change, copy it
233 dir2root = ''
233 dir2root = ''
234 rev2 = ''
234 rev2 = ''
235 if node2:
235 if node2:
236 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
236 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
237 rev2 = '@%d' % repo[node2].rev()
237 rev2 = '@%d' % repo[node2].rev()
238 elif len(common) > 1:
238 elif len(common) > 1:
239 #we only actually need to get the files to copy back to
239 #we only actually need to get the files to copy back to
240 #the working dir in this case (because the other cases
240 #the working dir in this case (because the other cases
241 #are: diffing 2 revisions or single file -- in which case
241 #are: diffing 2 revisions or single file -- in which case
242 #the file is already directly passed to the diff tool).
242 #the file is already directly passed to the diff tool).
243 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot,
243 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot,
244 subrepos)
244 subrepos)
245 else:
245 else:
246 # This lets the diff tool open the changed file directly
246 # This lets the diff tool open the changed file directly
247 dir2 = ''
247 dir2 = ''
248 dir2root = repo.root
248 dir2root = repo.root
249
249
250 label1a = rev1a
250 label1a = rev1a
251 label1b = rev1b
251 label1b = rev1b
252 label2 = rev2
252 label2 = rev2
253
253
254 # If only one change, diff the files instead of the directories
254 # If only one change, diff the files instead of the directories
255 # Handle bogus modifies correctly by checking if the files exist
255 # Handle bogus modifies correctly by checking if the files exist
256 if len(common) == 1:
256 if len(common) == 1:
257 common_file = util.localpath(common.pop())
257 common_file = util.localpath(common.pop())
258 dir1a = os.path.join(tmproot, dir1a, common_file)
258 dir1a = os.path.join(tmproot, dir1a, common_file)
259 label1a = common_file + rev1a
259 label1a = common_file + rev1a
260 if not os.path.isfile(dir1a):
260 if not os.path.isfile(dir1a):
261 dir1a = os.devnull
261 dir1a = os.devnull
262 if do3way:
262 if do3way:
263 dir1b = os.path.join(tmproot, dir1b, common_file)
263 dir1b = os.path.join(tmproot, dir1b, common_file)
264 label1b = common_file + rev1b
264 label1b = common_file + rev1b
265 if not os.path.isfile(dir1b):
265 if not os.path.isfile(dir1b):
266 dir1b = os.devnull
266 dir1b = os.devnull
267 dir2 = os.path.join(dir2root, dir2, common_file)
267 dir2 = os.path.join(dir2root, dir2, common_file)
268 label2 = common_file + rev2
268 label2 = common_file + rev2
269 else:
269 else:
270 template = 'hg-%h.patch'
270 template = 'hg-%h.patch'
271 with formatter.nullformatter(ui, 'extdiff', {}) as fm:
271 with formatter.nullformatter(ui, 'extdiff', {}) as fm:
272 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
272 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
273 fm,
273 fm,
274 fntemplate=repo.vfs.reljoin(tmproot, template),
274 fntemplate=repo.vfs.reljoin(tmproot, template),
275 match=matcher)
275 match=matcher)
276 label1a = cmdutil.makefilename(repo[node1a], template)
276 label1a = cmdutil.makefilename(repo[node1a], template)
277 label2 = cmdutil.makefilename(repo[node2], template)
277 label2 = cmdutil.makefilename(repo[node2], template)
278 dir1a = repo.vfs.reljoin(tmproot, label1a)
278 dir1a = repo.vfs.reljoin(tmproot, label1a)
279 dir2 = repo.vfs.reljoin(tmproot, label2)
279 dir2 = repo.vfs.reljoin(tmproot, label2)
280 dir1b = None
280 dir1b = None
281 label1b = None
281 label1b = None
282 fnsandstat = []
282 fnsandstat = []
283
283
284 # Function to quote file/dir names in the argument string.
284 # Function to quote file/dir names in the argument string.
285 # When not operating in 3-way mode, an empty string is
285 # When not operating in 3-way mode, an empty string is
286 # returned for parent2
286 # returned for parent2
287 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
287 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
288 'plabel1': label1a, 'plabel2': label1b,
288 'plabel1': label1a, 'plabel2': label1b,
289 'clabel': label2, 'child': dir2,
289 'clabel': label2, 'child': dir2,
290 'root': repo.root}
290 'root': repo.root}
291 def quote(match):
291 def quote(match):
292 pre = match.group(2)
292 pre = match.group(2)
293 key = match.group(3)
293 key = match.group(3)
294 if not do3way and key == 'parent2':
294 if not do3way and key == 'parent2':
295 return pre
295 return pre
296 return pre + procutil.shellquote(replace[key])
296 return pre + procutil.shellquote(replace[key])
297
297
298 # Match parent2 first, so 'parent1?' will match both parent1 and parent
298 # Match parent2 first, so 'parent1?' will match both parent1 and parent
299 regex = (br'''(['"]?)([^\s'"$]*)'''
299 regex = (br'''(['"]?)([^\s'"$]*)'''
300 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
300 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
301 if not do3way and not re.search(regex, cmdline):
301 if not do3way and not re.search(regex, cmdline):
302 cmdline += ' $parent1 $child'
302 cmdline += ' $parent1 $child'
303 cmdline = re.sub(regex, quote, cmdline)
303 cmdline = re.sub(regex, quote, cmdline)
304
304
305 ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
305 ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
306 ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
306 ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
307
307
308 for copy_fn, working_fn, st in fnsandstat:
308 for copy_fn, working_fn, st in fnsandstat:
309 cpstat = os.lstat(copy_fn)
309 cpstat = os.lstat(copy_fn)
310 # Some tools copy the file and attributes, so mtime may not detect
310 # Some tools copy the file and attributes, so mtime may not detect
311 # all changes. A size check will detect more cases, but not all.
311 # all changes. A size check will detect more cases, but not all.
312 # The only certain way to detect every case is to diff all files,
312 # The only certain way to detect every case is to diff all files,
313 # which could be expensive.
313 # which could be expensive.
314 # copyfile() carries over the permission, so the mode check could
314 # copyfile() carries over the permission, so the mode check could
315 # be in an 'elif' branch, but for the case where the file has
315 # be in an 'elif' branch, but for the case where the file has
316 # changed without affecting mtime or size.
316 # changed without affecting mtime or size.
317 if (cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
317 if (cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
318 or cpstat.st_size != st.st_size
318 or cpstat.st_size != st.st_size
319 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
319 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)):
320 ui.debug('file changed while diffing. '
320 ui.debug('file changed while diffing. '
321 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
321 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
322 util.copyfile(copy_fn, working_fn)
322 util.copyfile(copy_fn, working_fn)
323
323
324 return 1
324 return 1
325 finally:
325 finally:
326 ui.note(_('cleaning up temp directory\n'))
326 ui.note(_('cleaning up temp directory\n'))
327 shutil.rmtree(tmproot)
327 shutil.rmtree(tmproot)
328
328
329 extdiffopts = [
329 extdiffopts = [
330 ('o', 'option', [],
330 ('o', 'option', [],
331 _('pass option to comparison program'), _('OPT')),
331 _('pass option to comparison program'), _('OPT')),
332 ('r', 'rev', [], _('revision'), _('REV')),
332 ('r', 'rev', [], _('revision'), _('REV')),
333 ('c', 'change', '', _('change made by revision'), _('REV')),
333 ('c', 'change', '', _('change made by revision'), _('REV')),
334 ('', 'patch', None, _('compare patches for two revisions'))
334 ('', 'patch', None, _('compare patches for two revisions'))
335 ] + cmdutil.walkopts + cmdutil.subrepoopts
335 ] + cmdutil.walkopts + cmdutil.subrepoopts
336
336
337 @command('extdiff',
337 @command('extdiff',
338 [('p', 'program', '', _('comparison program to run'), _('CMD')),
338 [('p', 'program', '', _('comparison program to run'), _('CMD')),
339 ] + extdiffopts,
339 ] + extdiffopts,
340 _('hg extdiff [OPT]... [FILE]...'),
340 _('hg extdiff [OPT]... [FILE]...'),
341 helpcategory=command.CATEGORY_FILE_CONTENTS,
341 helpcategory=command.CATEGORY_FILE_CONTENTS,
342 inferrepo=True)
342 inferrepo=True)
343 def extdiff(ui, repo, *pats, **opts):
343 def extdiff(ui, repo, *pats, **opts):
344 '''use external program to diff repository (or selected files)
344 '''use external program to diff repository (or selected files)
345
345
346 Show differences between revisions for the specified files, using
346 Show differences between revisions for the specified files, using
347 an external program. The default program used is diff, with
347 an external program. The default program used is diff, with
348 default options "-Npru".
348 default options "-Npru".
349
349
350 To select a different program, use the -p/--program option. The
350 To select a different program, use the -p/--program option. The
351 program will be passed the names of two directories to compare. To
351 program will be passed the names of two directories to compare. To
352 pass additional options to the program, use -o/--option. These
352 pass additional options to the program, use -o/--option. These
353 will be passed before the names of the directories to compare.
353 will be passed before the names of the directories to compare.
354
354
355 When two revision arguments are given, then changes are shown
355 When two revision arguments are given, then changes are shown
356 between those revisions. If only one revision is specified then
356 between those revisions. If only one revision is specified then
357 that revision is compared to the working directory, and, when no
357 that revision is compared to the working directory, and, when no
358 revisions are specified, the working directory files are compared
358 revisions are specified, the working directory files are compared
359 to its parent.'''
359 to its parent.'''
360 opts = pycompat.byteskwargs(opts)
360 opts = pycompat.byteskwargs(opts)
361 program = opts.get('program')
361 program = opts.get('program')
362 option = opts.get('option')
362 option = opts.get('option')
363 if not program:
363 if not program:
364 program = 'diff'
364 program = 'diff'
365 option = option or ['-Npru']
365 option = option or ['-Npru']
366 cmdline = ' '.join(map(procutil.shellquote, [program] + option))
366 cmdline = ' '.join(map(procutil.shellquote, [program] + option))
367 return dodiff(ui, repo, cmdline, pats, opts)
367 return dodiff(ui, repo, cmdline, pats, opts)
368
368
369 class savedcmd(object):
369 class savedcmd(object):
370 """use external program to diff repository (or selected files)
370 """use external program to diff repository (or selected files)
371
371
372 Show differences between revisions for the specified files, using
372 Show differences between revisions for the specified files, using
373 the following program::
373 the following program::
374
374
375 %(path)s
375 %(path)s
376
376
377 When two revision arguments are given, then changes are shown
377 When two revision arguments are given, then changes are shown
378 between those revisions. If only one revision is specified then
378 between those revisions. If only one revision is specified then
379 that revision is compared to the working directory, and, when no
379 that revision is compared to the working directory, and, when no
380 revisions are specified, the working directory files are compared
380 revisions are specified, the working directory files are compared
381 to its parent.
381 to its parent.
382 """
382 """
383
383
384 def __init__(self, path, cmdline):
384 def __init__(self, path, cmdline):
385 # We can't pass non-ASCII through docstrings (and path is
385 # We can't pass non-ASCII through docstrings (and path is
386 # in an unknown encoding anyway), but avoid double separators on
386 # in an unknown encoding anyway), but avoid double separators on
387 # Windows
387 # Windows
388 docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
388 docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
389 self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))}
389 self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))}
390 self._cmdline = cmdline
390 self._cmdline = cmdline
391
391
392 def __call__(self, ui, repo, *pats, **opts):
392 def __call__(self, ui, repo, *pats, **opts):
393 opts = pycompat.byteskwargs(opts)
393 opts = pycompat.byteskwargs(opts)
394 options = ' '.join(map(procutil.shellquote, opts['option']))
394 options = ' '.join(map(procutil.shellquote, opts['option']))
395 if options:
395 if options:
396 options = ' ' + options
396 options = ' ' + options
397 return dodiff(ui, repo, self._cmdline + options, pats, opts)
397 return dodiff(ui, repo, self._cmdline + options, pats, opts)
398
398
399 def uisetup(ui):
399 def uisetup(ui):
400 for cmd, path in ui.configitems('extdiff'):
400 for cmd, path in ui.configitems('extdiff'):
401 path = util.expandpath(path)
401 path = util.expandpath(path)
402 if cmd.startswith('cmd.'):
402 if cmd.startswith('cmd.'):
403 cmd = cmd[4:]
403 cmd = cmd[4:]
404 if not path:
404 if not path:
405 path = procutil.findexe(cmd)
405 path = procutil.findexe(cmd)
406 if path is None:
406 if path is None:
407 path = filemerge.findexternaltool(ui, cmd) or cmd
407 path = filemerge.findexternaltool(ui, cmd) or cmd
408 diffopts = ui.config('extdiff', 'opts.' + cmd)
408 diffopts = ui.config('extdiff', 'opts.' + cmd)
409 cmdline = procutil.shellquote(path)
409 cmdline = procutil.shellquote(path)
410 if diffopts:
410 if diffopts:
411 cmdline += ' ' + diffopts
411 cmdline += ' ' + diffopts
412 elif cmd.startswith('opts.'):
412 elif cmd.startswith('opts.'):
413 continue
413 continue
414 else:
414 else:
415 if path:
415 if path:
416 # case "cmd = path opts"
416 # case "cmd = path opts"
417 cmdline = path
417 cmdline = path
418 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
418 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
419 else:
419 else:
420 # case "cmd ="
420 # case "cmd ="
421 path = procutil.findexe(cmd)
421 path = procutil.findexe(cmd)
422 if path is None:
422 if path is None:
423 path = filemerge.findexternaltool(ui, cmd) or cmd
423 path = filemerge.findexternaltool(ui, cmd) or cmd
424 cmdline = procutil.shellquote(path)
424 cmdline = procutil.shellquote(path)
425 diffopts = False
425 diffopts = False
426 # look for diff arguments in [diff-tools] then [merge-tools]
426 # look for diff arguments in [diff-tools] then [merge-tools]
427 if not diffopts:
427 if not diffopts:
428 args = ui.config('diff-tools', cmd+'.diffargs') or \
428 args = ui.config('diff-tools', cmd+'.diffargs') or \
429 ui.config('merge-tools', cmd+'.diffargs')
429 ui.config('merge-tools', cmd+'.diffargs')
430 if args:
430 if args:
431 cmdline += ' ' + args
431 cmdline += ' ' + args
432 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
432 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
433 helpcategory=command.CATEGORY_FILE_CONTENTS,
433 inferrepo=True)(savedcmd(path, cmdline))
434 inferrepo=True)(savedcmd(path, cmdline))
434
435
435 # tell hggettext to extract docstrings from these functions:
436 # tell hggettext to extract docstrings from these functions:
436 i18nfunctions = [savedcmd]
437 i18nfunctions = [savedcmd]
@@ -1,1951 +1,1955 b''
1 # rebase.py - rebasing feature for mercurial
1 # rebase.py - rebasing feature for mercurial
2 #
2 #
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to move sets of revisions to a different ancestor
8 '''command to move sets of revisions to a different ancestor
9
9
10 This extension lets you rebase changesets in an existing Mercurial
10 This extension lets you rebase changesets in an existing Mercurial
11 repository.
11 repository.
12
12
13 For more information:
13 For more information:
14 https://mercurial-scm.org/wiki/RebaseExtension
14 https://mercurial-scm.org/wiki/RebaseExtension
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 import errno
19 import errno
20 import os
20 import os
21
21
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import (
23 from mercurial.node import (
24 nullrev,
24 nullrev,
25 short,
25 short,
26 )
26 )
27 from mercurial import (
27 from mercurial import (
28 bookmarks,
28 bookmarks,
29 cmdutil,
29 cmdutil,
30 commands,
30 commands,
31 copies,
31 copies,
32 destutil,
32 destutil,
33 dirstateguard,
33 dirstateguard,
34 error,
34 error,
35 extensions,
35 extensions,
36 hg,
36 hg,
37 merge as mergemod,
37 merge as mergemod,
38 mergeutil,
38 mergeutil,
39 obsolete,
39 obsolete,
40 obsutil,
40 obsutil,
41 patch,
41 patch,
42 phases,
42 phases,
43 pycompat,
43 pycompat,
44 registrar,
44 registrar,
45 repair,
45 repair,
46 revset,
46 revset,
47 revsetlang,
47 revsetlang,
48 scmutil,
48 scmutil,
49 smartset,
49 smartset,
50 state as statemod,
50 state as statemod,
51 util,
51 util,
52 )
52 )
53
53
54 # The following constants are used throughout the rebase module. The ordering of
54 # The following constants are used throughout the rebase module. The ordering of
55 # their values must be maintained.
55 # their values must be maintained.
56
56
57 # Indicates that a revision needs to be rebased
57 # Indicates that a revision needs to be rebased
58 revtodo = -1
58 revtodo = -1
59 revtodostr = '-1'
59 revtodostr = '-1'
60
60
61 # legacy revstates no longer needed in current code
61 # legacy revstates no longer needed in current code
62 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
62 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
63 legacystates = {'-2', '-3', '-4', '-5'}
63 legacystates = {'-2', '-3', '-4', '-5'}
64
64
65 cmdtable = {}
65 cmdtable = {}
66 command = registrar.command(cmdtable)
66 command = registrar.command(cmdtable)
67 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
67 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
68 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
68 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
69 # be specifying the version(s) of Mercurial they are tested with, or
69 # be specifying the version(s) of Mercurial they are tested with, or
70 # leave the attribute unspecified.
70 # leave the attribute unspecified.
71 testedwith = 'ships-with-hg-core'
71 testedwith = 'ships-with-hg-core'
72
72
73 def _nothingtorebase():
73 def _nothingtorebase():
74 return 1
74 return 1
75
75
76 def _savegraft(ctx, extra):
76 def _savegraft(ctx, extra):
77 s = ctx.extra().get('source', None)
77 s = ctx.extra().get('source', None)
78 if s is not None:
78 if s is not None:
79 extra['source'] = s
79 extra['source'] = s
80 s = ctx.extra().get('intermediate-source', None)
80 s = ctx.extra().get('intermediate-source', None)
81 if s is not None:
81 if s is not None:
82 extra['intermediate-source'] = s
82 extra['intermediate-source'] = s
83
83
84 def _savebranch(ctx, extra):
84 def _savebranch(ctx, extra):
85 extra['branch'] = ctx.branch()
85 extra['branch'] = ctx.branch()
86
86
87 def _destrebase(repo, sourceset, destspace=None):
87 def _destrebase(repo, sourceset, destspace=None):
88 """small wrapper around destmerge to pass the right extra args
88 """small wrapper around destmerge to pass the right extra args
89
89
90 Please wrap destutil.destmerge instead."""
90 Please wrap destutil.destmerge instead."""
91 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
91 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
92 onheadcheck=False, destspace=destspace)
92 onheadcheck=False, destspace=destspace)
93
93
94 revsetpredicate = registrar.revsetpredicate()
94 revsetpredicate = registrar.revsetpredicate()
95
95
96 @revsetpredicate('_destrebase')
96 @revsetpredicate('_destrebase')
97 def _revsetdestrebase(repo, subset, x):
97 def _revsetdestrebase(repo, subset, x):
98 # ``_rebasedefaultdest()``
98 # ``_rebasedefaultdest()``
99
99
100 # default destination for rebase.
100 # default destination for rebase.
101 # # XXX: Currently private because I expect the signature to change.
101 # # XXX: Currently private because I expect the signature to change.
102 # # XXX: - bailing out in case of ambiguity vs returning all data.
102 # # XXX: - bailing out in case of ambiguity vs returning all data.
103 # i18n: "_rebasedefaultdest" is a keyword
103 # i18n: "_rebasedefaultdest" is a keyword
104 sourceset = None
104 sourceset = None
105 if x is not None:
105 if x is not None:
106 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
106 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
107 return subset & smartset.baseset([_destrebase(repo, sourceset)])
107 return subset & smartset.baseset([_destrebase(repo, sourceset)])
108
108
109 @revsetpredicate('_destautoorphanrebase')
109 @revsetpredicate('_destautoorphanrebase')
110 def _revsetdestautoorphanrebase(repo, subset, x):
110 def _revsetdestautoorphanrebase(repo, subset, x):
111 """automatic rebase destination for a single orphan revision"""
111 """automatic rebase destination for a single orphan revision"""
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 obsoleted = unfi.revs('obsolete()')
113 obsoleted = unfi.revs('obsolete()')
114
114
115 src = revset.getset(repo, subset, x).first()
115 src = revset.getset(repo, subset, x).first()
116
116
117 # Empty src or already obsoleted - Do not return a destination
117 # Empty src or already obsoleted - Do not return a destination
118 if not src or src in obsoleted:
118 if not src or src in obsoleted:
119 return smartset.baseset()
119 return smartset.baseset()
120 dests = destutil.orphanpossibledestination(repo, src)
120 dests = destutil.orphanpossibledestination(repo, src)
121 if len(dests) > 1:
121 if len(dests) > 1:
122 raise error.Abort(
122 raise error.Abort(
123 _("ambiguous automatic rebase: %r could end up on any of %r") % (
123 _("ambiguous automatic rebase: %r could end up on any of %r") % (
124 src, dests))
124 src, dests))
125 # We have zero or one destination, so we can just return here.
125 # We have zero or one destination, so we can just return here.
126 return smartset.baseset(dests)
126 return smartset.baseset(dests)
127
127
128 def _ctxdesc(ctx):
128 def _ctxdesc(ctx):
129 """short description for a context"""
129 """short description for a context"""
130 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
130 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
131 ctx.description().split('\n', 1)[0])
131 ctx.description().split('\n', 1)[0])
132 repo = ctx.repo()
132 repo = ctx.repo()
133 names = []
133 names = []
134 for nsname, ns in repo.names.iteritems():
134 for nsname, ns in repo.names.iteritems():
135 if nsname == 'branches':
135 if nsname == 'branches':
136 continue
136 continue
137 names.extend(ns.names(repo, ctx.node()))
137 names.extend(ns.names(repo, ctx.node()))
138 if names:
138 if names:
139 desc += ' (%s)' % ' '.join(names)
139 desc += ' (%s)' % ' '.join(names)
140 return desc
140 return desc
141
141
142 class rebaseruntime(object):
142 class rebaseruntime(object):
143 """This class is a container for rebase runtime state"""
143 """This class is a container for rebase runtime state"""
144 def __init__(self, repo, ui, inmemory=False, opts=None):
144 def __init__(self, repo, ui, inmemory=False, opts=None):
145 if opts is None:
145 if opts is None:
146 opts = {}
146 opts = {}
147
147
148 # prepared: whether we have rebasestate prepared or not. Currently it
148 # prepared: whether we have rebasestate prepared or not. Currently it
149 # decides whether "self.repo" is unfiltered or not.
149 # decides whether "self.repo" is unfiltered or not.
150 # The rebasestate has explicit hash to hash instructions not depending
150 # The rebasestate has explicit hash to hash instructions not depending
151 # on visibility. If rebasestate exists (in-memory or on-disk), use
151 # on visibility. If rebasestate exists (in-memory or on-disk), use
152 # unfiltered repo to avoid visibility issues.
152 # unfiltered repo to avoid visibility issues.
153 # Before knowing rebasestate (i.e. when starting a new rebase (not
153 # Before knowing rebasestate (i.e. when starting a new rebase (not
154 # --continue or --abort)), the original repo should be used so
154 # --continue or --abort)), the original repo should be used so
155 # visibility-dependent revsets are correct.
155 # visibility-dependent revsets are correct.
156 self.prepared = False
156 self.prepared = False
157 self._repo = repo
157 self._repo = repo
158
158
159 self.ui = ui
159 self.ui = ui
160 self.opts = opts
160 self.opts = opts
161 self.originalwd = None
161 self.originalwd = None
162 self.external = nullrev
162 self.external = nullrev
163 # Mapping between the old revision id and either what is the new rebased
163 # Mapping between the old revision id and either what is the new rebased
164 # revision or what needs to be done with the old revision. The state
164 # revision or what needs to be done with the old revision. The state
165 # dict will be what contains most of the rebase progress state.
165 # dict will be what contains most of the rebase progress state.
166 self.state = {}
166 self.state = {}
167 self.activebookmark = None
167 self.activebookmark = None
168 self.destmap = {}
168 self.destmap = {}
169 self.skipped = set()
169 self.skipped = set()
170
170
171 self.collapsef = opts.get('collapse', False)
171 self.collapsef = opts.get('collapse', False)
172 self.collapsemsg = cmdutil.logmessage(ui, opts)
172 self.collapsemsg = cmdutil.logmessage(ui, opts)
173 self.date = opts.get('date', None)
173 self.date = opts.get('date', None)
174
174
175 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
175 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
176 self.extrafns = [_savegraft]
176 self.extrafns = [_savegraft]
177 if e:
177 if e:
178 self.extrafns = [e]
178 self.extrafns = [e]
179
179
180 self.backupf = ui.configbool('ui', 'history-editing-backup')
180 self.backupf = ui.configbool('ui', 'history-editing-backup')
181 self.keepf = opts.get('keep', False)
181 self.keepf = opts.get('keep', False)
182 self.keepbranchesf = opts.get('keepbranches', False)
182 self.keepbranchesf = opts.get('keepbranches', False)
183 self.obsoletenotrebased = {}
183 self.obsoletenotrebased = {}
184 self.obsoletewithoutsuccessorindestination = set()
184 self.obsoletewithoutsuccessorindestination = set()
185 self.inmemory = inmemory
185 self.inmemory = inmemory
186 self.stateobj = statemod.cmdstate(repo, 'rebasestate')
186 self.stateobj = statemod.cmdstate(repo, 'rebasestate')
187
187
188 @property
188 @property
189 def repo(self):
189 def repo(self):
190 if self.prepared:
190 if self.prepared:
191 return self._repo.unfiltered()
191 return self._repo.unfiltered()
192 else:
192 else:
193 return self._repo
193 return self._repo
194
194
195 def storestatus(self, tr=None):
195 def storestatus(self, tr=None):
196 """Store the current status to allow recovery"""
196 """Store the current status to allow recovery"""
197 if tr:
197 if tr:
198 tr.addfilegenerator('rebasestate', ('rebasestate',),
198 tr.addfilegenerator('rebasestate', ('rebasestate',),
199 self._writestatus, location='plain')
199 self._writestatus, location='plain')
200 else:
200 else:
201 with self.repo.vfs("rebasestate", "w") as f:
201 with self.repo.vfs("rebasestate", "w") as f:
202 self._writestatus(f)
202 self._writestatus(f)
203
203
204 def _writestatus(self, f):
204 def _writestatus(self, f):
205 repo = self.repo
205 repo = self.repo
206 assert repo.filtername is None
206 assert repo.filtername is None
207 f.write(repo[self.originalwd].hex() + '\n')
207 f.write(repo[self.originalwd].hex() + '\n')
208 # was "dest". we now write dest per src root below.
208 # was "dest". we now write dest per src root below.
209 f.write('\n')
209 f.write('\n')
210 f.write(repo[self.external].hex() + '\n')
210 f.write(repo[self.external].hex() + '\n')
211 f.write('%d\n' % int(self.collapsef))
211 f.write('%d\n' % int(self.collapsef))
212 f.write('%d\n' % int(self.keepf))
212 f.write('%d\n' % int(self.keepf))
213 f.write('%d\n' % int(self.keepbranchesf))
213 f.write('%d\n' % int(self.keepbranchesf))
214 f.write('%s\n' % (self.activebookmark or ''))
214 f.write('%s\n' % (self.activebookmark or ''))
215 destmap = self.destmap
215 destmap = self.destmap
216 for d, v in self.state.iteritems():
216 for d, v in self.state.iteritems():
217 oldrev = repo[d].hex()
217 oldrev = repo[d].hex()
218 if v >= 0:
218 if v >= 0:
219 newrev = repo[v].hex()
219 newrev = repo[v].hex()
220 else:
220 else:
221 newrev = "%d" % v
221 newrev = "%d" % v
222 destnode = repo[destmap[d]].hex()
222 destnode = repo[destmap[d]].hex()
223 f.write("%s:%s:%s\n" % (oldrev, newrev, destnode))
223 f.write("%s:%s:%s\n" % (oldrev, newrev, destnode))
224 repo.ui.debug('rebase status stored\n')
224 repo.ui.debug('rebase status stored\n')
225
225
226 def restorestatus(self):
226 def restorestatus(self):
227 """Restore a previously stored status"""
227 """Restore a previously stored status"""
228 if not self.stateobj.exists():
228 if not self.stateobj.exists():
229 cmdutil.wrongtooltocontinue(self.repo, _('rebase'))
229 cmdutil.wrongtooltocontinue(self.repo, _('rebase'))
230
230
231 data = self._read()
231 data = self._read()
232 self.repo.ui.debug('rebase status resumed\n')
232 self.repo.ui.debug('rebase status resumed\n')
233
233
234 self.originalwd = data['originalwd']
234 self.originalwd = data['originalwd']
235 self.destmap = data['destmap']
235 self.destmap = data['destmap']
236 self.state = data['state']
236 self.state = data['state']
237 self.skipped = data['skipped']
237 self.skipped = data['skipped']
238 self.collapsef = data['collapse']
238 self.collapsef = data['collapse']
239 self.keepf = data['keep']
239 self.keepf = data['keep']
240 self.keepbranchesf = data['keepbranches']
240 self.keepbranchesf = data['keepbranches']
241 self.external = data['external']
241 self.external = data['external']
242 self.activebookmark = data['activebookmark']
242 self.activebookmark = data['activebookmark']
243
243
244 def _read(self):
244 def _read(self):
245 self.prepared = True
245 self.prepared = True
246 repo = self.repo
246 repo = self.repo
247 assert repo.filtername is None
247 assert repo.filtername is None
248 data = {'keepbranches': None, 'collapse': None, 'activebookmark': None,
248 data = {'keepbranches': None, 'collapse': None, 'activebookmark': None,
249 'external': nullrev, 'keep': None, 'originalwd': None}
249 'external': nullrev, 'keep': None, 'originalwd': None}
250 legacydest = None
250 legacydest = None
251 state = {}
251 state = {}
252 destmap = {}
252 destmap = {}
253
253
254 if True:
254 if True:
255 f = repo.vfs("rebasestate")
255 f = repo.vfs("rebasestate")
256 for i, l in enumerate(f.read().splitlines()):
256 for i, l in enumerate(f.read().splitlines()):
257 if i == 0:
257 if i == 0:
258 data['originalwd'] = repo[l].rev()
258 data['originalwd'] = repo[l].rev()
259 elif i == 1:
259 elif i == 1:
260 # this line should be empty in newer version. but legacy
260 # this line should be empty in newer version. but legacy
261 # clients may still use it
261 # clients may still use it
262 if l:
262 if l:
263 legacydest = repo[l].rev()
263 legacydest = repo[l].rev()
264 elif i == 2:
264 elif i == 2:
265 data['external'] = repo[l].rev()
265 data['external'] = repo[l].rev()
266 elif i == 3:
266 elif i == 3:
267 data['collapse'] = bool(int(l))
267 data['collapse'] = bool(int(l))
268 elif i == 4:
268 elif i == 4:
269 data['keep'] = bool(int(l))
269 data['keep'] = bool(int(l))
270 elif i == 5:
270 elif i == 5:
271 data['keepbranches'] = bool(int(l))
271 data['keepbranches'] = bool(int(l))
272 elif i == 6 and not (len(l) == 81 and ':' in l):
272 elif i == 6 and not (len(l) == 81 and ':' in l):
273 # line 6 is a recent addition, so for backwards
273 # line 6 is a recent addition, so for backwards
274 # compatibility check that the line doesn't look like the
274 # compatibility check that the line doesn't look like the
275 # oldrev:newrev lines
275 # oldrev:newrev lines
276 data['activebookmark'] = l
276 data['activebookmark'] = l
277 else:
277 else:
278 args = l.split(':')
278 args = l.split(':')
279 oldrev = repo[args[0]].rev()
279 oldrev = repo[args[0]].rev()
280 newrev = args[1]
280 newrev = args[1]
281 if newrev in legacystates:
281 if newrev in legacystates:
282 continue
282 continue
283 if len(args) > 2:
283 if len(args) > 2:
284 destrev = repo[args[2]].rev()
284 destrev = repo[args[2]].rev()
285 else:
285 else:
286 destrev = legacydest
286 destrev = legacydest
287 destmap[oldrev] = destrev
287 destmap[oldrev] = destrev
288 if newrev == revtodostr:
288 if newrev == revtodostr:
289 state[oldrev] = revtodo
289 state[oldrev] = revtodo
290 # Legacy compat special case
290 # Legacy compat special case
291 else:
291 else:
292 state[oldrev] = repo[newrev].rev()
292 state[oldrev] = repo[newrev].rev()
293
293
294 if data['keepbranches'] is None:
294 if data['keepbranches'] is None:
295 raise error.Abort(_('.hg/rebasestate is incomplete'))
295 raise error.Abort(_('.hg/rebasestate is incomplete'))
296
296
297 data['destmap'] = destmap
297 data['destmap'] = destmap
298 data['state'] = state
298 data['state'] = state
299 skipped = set()
299 skipped = set()
300 # recompute the set of skipped revs
300 # recompute the set of skipped revs
301 if not data['collapse']:
301 if not data['collapse']:
302 seen = set(destmap.values())
302 seen = set(destmap.values())
303 for old, new in sorted(state.items()):
303 for old, new in sorted(state.items()):
304 if new != revtodo and new in seen:
304 if new != revtodo and new in seen:
305 skipped.add(old)
305 skipped.add(old)
306 seen.add(new)
306 seen.add(new)
307 data['skipped'] = skipped
307 data['skipped'] = skipped
308 repo.ui.debug('computed skipped revs: %s\n' %
308 repo.ui.debug('computed skipped revs: %s\n' %
309 (' '.join('%d' % r for r in sorted(skipped)) or ''))
309 (' '.join('%d' % r for r in sorted(skipped)) or ''))
310
310
311 return data
311 return data
312
312
313 def _handleskippingobsolete(self, obsoleterevs, destmap):
313 def _handleskippingobsolete(self, obsoleterevs, destmap):
314 """Compute structures necessary for skipping obsolete revisions
314 """Compute structures necessary for skipping obsolete revisions
315
315
316 obsoleterevs: iterable of all obsolete revisions in rebaseset
316 obsoleterevs: iterable of all obsolete revisions in rebaseset
317 destmap: {srcrev: destrev} destination revisions
317 destmap: {srcrev: destrev} destination revisions
318 """
318 """
319 self.obsoletenotrebased = {}
319 self.obsoletenotrebased = {}
320 if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
320 if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
321 return
321 return
322 obsoleteset = set(obsoleterevs)
322 obsoleteset = set(obsoleterevs)
323 (self.obsoletenotrebased,
323 (self.obsoletenotrebased,
324 self.obsoletewithoutsuccessorindestination,
324 self.obsoletewithoutsuccessorindestination,
325 obsoleteextinctsuccessors) = _computeobsoletenotrebased(
325 obsoleteextinctsuccessors) = _computeobsoletenotrebased(
326 self.repo, obsoleteset, destmap)
326 self.repo, obsoleteset, destmap)
327 skippedset = set(self.obsoletenotrebased)
327 skippedset = set(self.obsoletenotrebased)
328 skippedset.update(self.obsoletewithoutsuccessorindestination)
328 skippedset.update(self.obsoletewithoutsuccessorindestination)
329 skippedset.update(obsoleteextinctsuccessors)
329 skippedset.update(obsoleteextinctsuccessors)
330 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
330 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
331
331
332 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
332 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
333 try:
333 try:
334 self.restorestatus()
334 self.restorestatus()
335 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
335 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
336 except error.RepoLookupError:
336 except error.RepoLookupError:
337 if isabort:
337 if isabort:
338 clearstatus(self.repo)
338 clearstatus(self.repo)
339 clearcollapsemsg(self.repo)
339 clearcollapsemsg(self.repo)
340 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
340 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
341 ' only broken state is cleared)\n'))
341 ' only broken state is cleared)\n'))
342 return 0
342 return 0
343 else:
343 else:
344 msg = _('cannot continue inconsistent rebase')
344 msg = _('cannot continue inconsistent rebase')
345 hint = _('use "hg rebase --abort" to clear broken state')
345 hint = _('use "hg rebase --abort" to clear broken state')
346 raise error.Abort(msg, hint=hint)
346 raise error.Abort(msg, hint=hint)
347
347
348 if isabort:
348 if isabort:
349 backup = backup and self.backupf
349 backup = backup and self.backupf
350 return abort(self.repo, self.originalwd, self.destmap, self.state,
350 return abort(self.repo, self.originalwd, self.destmap, self.state,
351 activebookmark=self.activebookmark, backup=backup,
351 activebookmark=self.activebookmark, backup=backup,
352 suppwarns=suppwarns)
352 suppwarns=suppwarns)
353
353
354 def _preparenewrebase(self, destmap):
354 def _preparenewrebase(self, destmap):
355 if not destmap:
355 if not destmap:
356 return _nothingtorebase()
356 return _nothingtorebase()
357
357
358 rebaseset = destmap.keys()
358 rebaseset = destmap.keys()
359 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
359 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
360 if (not (self.keepf or allowunstable)
360 if (not (self.keepf or allowunstable)
361 and self.repo.revs('first(children(%ld) - %ld)',
361 and self.repo.revs('first(children(%ld) - %ld)',
362 rebaseset, rebaseset)):
362 rebaseset, rebaseset)):
363 raise error.Abort(
363 raise error.Abort(
364 _("can't remove original changesets with"
364 _("can't remove original changesets with"
365 " unrebased descendants"),
365 " unrebased descendants"),
366 hint=_('use --keep to keep original changesets'))
366 hint=_('use --keep to keep original changesets'))
367
367
368 result = buildstate(self.repo, destmap, self.collapsef)
368 result = buildstate(self.repo, destmap, self.collapsef)
369
369
370 if not result:
370 if not result:
371 # Empty state built, nothing to rebase
371 # Empty state built, nothing to rebase
372 self.ui.status(_('nothing to rebase\n'))
372 self.ui.status(_('nothing to rebase\n'))
373 return _nothingtorebase()
373 return _nothingtorebase()
374
374
375 for root in self.repo.set('roots(%ld)', rebaseset):
375 for root in self.repo.set('roots(%ld)', rebaseset):
376 if not self.keepf and not root.mutable():
376 if not self.keepf and not root.mutable():
377 raise error.Abort(_("can't rebase public changeset %s")
377 raise error.Abort(_("can't rebase public changeset %s")
378 % root,
378 % root,
379 hint=_("see 'hg help phases' for details"))
379 hint=_("see 'hg help phases' for details"))
380
380
381 (self.originalwd, self.destmap, self.state) = result
381 (self.originalwd, self.destmap, self.state) = result
382 if self.collapsef:
382 if self.collapsef:
383 dests = set(self.destmap.values())
383 dests = set(self.destmap.values())
384 if len(dests) != 1:
384 if len(dests) != 1:
385 raise error.Abort(
385 raise error.Abort(
386 _('--collapse does not work with multiple destinations'))
386 _('--collapse does not work with multiple destinations'))
387 destrev = next(iter(dests))
387 destrev = next(iter(dests))
388 destancestors = self.repo.changelog.ancestors([destrev],
388 destancestors = self.repo.changelog.ancestors([destrev],
389 inclusive=True)
389 inclusive=True)
390 self.external = externalparent(self.repo, self.state, destancestors)
390 self.external = externalparent(self.repo, self.state, destancestors)
391
391
392 for destrev in sorted(set(destmap.values())):
392 for destrev in sorted(set(destmap.values())):
393 dest = self.repo[destrev]
393 dest = self.repo[destrev]
394 if dest.closesbranch() and not self.keepbranchesf:
394 if dest.closesbranch() and not self.keepbranchesf:
395 self.ui.status(_('reopening closed branch head %s\n') % dest)
395 self.ui.status(_('reopening closed branch head %s\n') % dest)
396
396
397 self.prepared = True
397 self.prepared = True
398
398
399 def _assignworkingcopy(self):
399 def _assignworkingcopy(self):
400 if self.inmemory:
400 if self.inmemory:
401 from mercurial.context import overlayworkingctx
401 from mercurial.context import overlayworkingctx
402 self.wctx = overlayworkingctx(self.repo)
402 self.wctx = overlayworkingctx(self.repo)
403 self.repo.ui.debug("rebasing in-memory\n")
403 self.repo.ui.debug("rebasing in-memory\n")
404 else:
404 else:
405 self.wctx = self.repo[None]
405 self.wctx = self.repo[None]
406 self.repo.ui.debug("rebasing on disk\n")
406 self.repo.ui.debug("rebasing on disk\n")
407 self.repo.ui.log("rebase",
407 self.repo.ui.log("rebase",
408 "using in-memory rebase: %r\n", self.inmemory,
408 "using in-memory rebase: %r\n", self.inmemory,
409 rebase_imm_used=self.inmemory)
409 rebase_imm_used=self.inmemory)
410
410
411 def _performrebase(self, tr):
411 def _performrebase(self, tr):
412 self._assignworkingcopy()
412 self._assignworkingcopy()
413 repo, ui = self.repo, self.ui
413 repo, ui = self.repo, self.ui
414 if self.keepbranchesf:
414 if self.keepbranchesf:
415 # insert _savebranch at the start of extrafns so if
415 # insert _savebranch at the start of extrafns so if
416 # there's a user-provided extrafn it can clobber branch if
416 # there's a user-provided extrafn it can clobber branch if
417 # desired
417 # desired
418 self.extrafns.insert(0, _savebranch)
418 self.extrafns.insert(0, _savebranch)
419 if self.collapsef:
419 if self.collapsef:
420 branches = set()
420 branches = set()
421 for rev in self.state:
421 for rev in self.state:
422 branches.add(repo[rev].branch())
422 branches.add(repo[rev].branch())
423 if len(branches) > 1:
423 if len(branches) > 1:
424 raise error.Abort(_('cannot collapse multiple named '
424 raise error.Abort(_('cannot collapse multiple named '
425 'branches'))
425 'branches'))
426
426
427 # Calculate self.obsoletenotrebased
427 # Calculate self.obsoletenotrebased
428 obsrevs = _filterobsoleterevs(self.repo, self.state)
428 obsrevs = _filterobsoleterevs(self.repo, self.state)
429 self._handleskippingobsolete(obsrevs, self.destmap)
429 self._handleskippingobsolete(obsrevs, self.destmap)
430
430
431 # Keep track of the active bookmarks in order to reset them later
431 # Keep track of the active bookmarks in order to reset them later
432 self.activebookmark = self.activebookmark or repo._activebookmark
432 self.activebookmark = self.activebookmark or repo._activebookmark
433 if self.activebookmark:
433 if self.activebookmark:
434 bookmarks.deactivate(repo)
434 bookmarks.deactivate(repo)
435
435
436 # Store the state before we begin so users can run 'hg rebase --abort'
436 # Store the state before we begin so users can run 'hg rebase --abort'
437 # if we fail before the transaction closes.
437 # if we fail before the transaction closes.
438 self.storestatus()
438 self.storestatus()
439 if tr:
439 if tr:
440 # When using single transaction, store state when transaction
440 # When using single transaction, store state when transaction
441 # commits.
441 # commits.
442 self.storestatus(tr)
442 self.storestatus(tr)
443
443
444 cands = [k for k, v in self.state.iteritems() if v == revtodo]
444 cands = [k for k, v in self.state.iteritems() if v == revtodo]
445 p = repo.ui.makeprogress(_("rebasing"), unit=_('changesets'),
445 p = repo.ui.makeprogress(_("rebasing"), unit=_('changesets'),
446 total=len(cands))
446 total=len(cands))
447 def progress(ctx):
447 def progress(ctx):
448 p.increment(item=("%d:%s" % (ctx.rev(), ctx)))
448 p.increment(item=("%d:%s" % (ctx.rev(), ctx)))
449 allowdivergence = self.ui.configbool(
449 allowdivergence = self.ui.configbool(
450 'experimental', 'evolution.allowdivergence')
450 'experimental', 'evolution.allowdivergence')
451 for subset in sortsource(self.destmap):
451 for subset in sortsource(self.destmap):
452 sortedrevs = self.repo.revs('sort(%ld, -topo)', subset)
452 sortedrevs = self.repo.revs('sort(%ld, -topo)', subset)
453 if not allowdivergence:
453 if not allowdivergence:
454 sortedrevs -= self.repo.revs(
454 sortedrevs -= self.repo.revs(
455 'descendants(%ld) and not %ld',
455 'descendants(%ld) and not %ld',
456 self.obsoletewithoutsuccessorindestination,
456 self.obsoletewithoutsuccessorindestination,
457 self.obsoletewithoutsuccessorindestination,
457 self.obsoletewithoutsuccessorindestination,
458 )
458 )
459 for rev in sortedrevs:
459 for rev in sortedrevs:
460 self._rebasenode(tr, rev, allowdivergence, progress)
460 self._rebasenode(tr, rev, allowdivergence, progress)
461 p.complete()
461 p.complete()
462 ui.note(_('rebase merging completed\n'))
462 ui.note(_('rebase merging completed\n'))
463
463
464 def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
464 def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
465 '''Commit the wd changes with parents p1 and p2.
465 '''Commit the wd changes with parents p1 and p2.
466
466
467 Reuse commit info from rev but also store useful information in extra.
467 Reuse commit info from rev but also store useful information in extra.
468 Return node of committed revision.'''
468 Return node of committed revision.'''
469 repo = self.repo
469 repo = self.repo
470 ctx = repo[rev]
470 ctx = repo[rev]
471 if commitmsg is None:
471 if commitmsg is None:
472 commitmsg = ctx.description()
472 commitmsg = ctx.description()
473 date = self.date
473 date = self.date
474 if date is None:
474 if date is None:
475 date = ctx.date()
475 date = ctx.date()
476 extra = {'rebase_source': ctx.hex()}
476 extra = {'rebase_source': ctx.hex()}
477 for c in self.extrafns:
477 for c in self.extrafns:
478 c(ctx, extra)
478 c(ctx, extra)
479 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
479 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
480 destphase = max(ctx.phase(), phases.draft)
480 destphase = max(ctx.phase(), phases.draft)
481 overrides = {('phases', 'new-commit'): destphase}
481 overrides = {('phases', 'new-commit'): destphase}
482 if keepbranch:
482 if keepbranch:
483 overrides[('ui', 'allowemptycommit')] = True
483 overrides[('ui', 'allowemptycommit')] = True
484 with repo.ui.configoverride(overrides, 'rebase'):
484 with repo.ui.configoverride(overrides, 'rebase'):
485 if self.inmemory:
485 if self.inmemory:
486 newnode = commitmemorynode(repo, p1, p2,
486 newnode = commitmemorynode(repo, p1, p2,
487 wctx=self.wctx,
487 wctx=self.wctx,
488 extra=extra,
488 extra=extra,
489 commitmsg=commitmsg,
489 commitmsg=commitmsg,
490 editor=editor,
490 editor=editor,
491 user=ctx.user(),
491 user=ctx.user(),
492 date=date)
492 date=date)
493 mergemod.mergestate.clean(repo)
493 mergemod.mergestate.clean(repo)
494 else:
494 else:
495 newnode = commitnode(repo, p1, p2,
495 newnode = commitnode(repo, p1, p2,
496 extra=extra,
496 extra=extra,
497 commitmsg=commitmsg,
497 commitmsg=commitmsg,
498 editor=editor,
498 editor=editor,
499 user=ctx.user(),
499 user=ctx.user(),
500 date=date)
500 date=date)
501
501
502 if newnode is None:
502 if newnode is None:
503 # If it ended up being a no-op commit, then the normal
503 # If it ended up being a no-op commit, then the normal
504 # merge state clean-up path doesn't happen, so do it
504 # merge state clean-up path doesn't happen, so do it
505 # here. Fix issue5494
505 # here. Fix issue5494
506 mergemod.mergestate.clean(repo)
506 mergemod.mergestate.clean(repo)
507 return newnode
507 return newnode
508
508
509 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
509 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
510 repo, ui, opts = self.repo, self.ui, self.opts
510 repo, ui, opts = self.repo, self.ui, self.opts
511 dest = self.destmap[rev]
511 dest = self.destmap[rev]
512 ctx = repo[rev]
512 ctx = repo[rev]
513 desc = _ctxdesc(ctx)
513 desc = _ctxdesc(ctx)
514 if self.state[rev] == rev:
514 if self.state[rev] == rev:
515 ui.status(_('already rebased %s\n') % desc)
515 ui.status(_('already rebased %s\n') % desc)
516 elif (not allowdivergence
516 elif (not allowdivergence
517 and rev in self.obsoletewithoutsuccessorindestination):
517 and rev in self.obsoletewithoutsuccessorindestination):
518 msg = _('note: not rebasing %s and its descendants as '
518 msg = _('note: not rebasing %s and its descendants as '
519 'this would cause divergence\n') % desc
519 'this would cause divergence\n') % desc
520 repo.ui.status(msg)
520 repo.ui.status(msg)
521 self.skipped.add(rev)
521 self.skipped.add(rev)
522 elif rev in self.obsoletenotrebased:
522 elif rev in self.obsoletenotrebased:
523 succ = self.obsoletenotrebased[rev]
523 succ = self.obsoletenotrebased[rev]
524 if succ is None:
524 if succ is None:
525 msg = _('note: not rebasing %s, it has no '
525 msg = _('note: not rebasing %s, it has no '
526 'successor\n') % desc
526 'successor\n') % desc
527 else:
527 else:
528 succdesc = _ctxdesc(repo[succ])
528 succdesc = _ctxdesc(repo[succ])
529 msg = (_('note: not rebasing %s, already in '
529 msg = (_('note: not rebasing %s, already in '
530 'destination as %s\n') % (desc, succdesc))
530 'destination as %s\n') % (desc, succdesc))
531 repo.ui.status(msg)
531 repo.ui.status(msg)
532 # Make clearrebased aware state[rev] is not a true successor
532 # Make clearrebased aware state[rev] is not a true successor
533 self.skipped.add(rev)
533 self.skipped.add(rev)
534 # Record rev as moved to its desired destination in self.state.
534 # Record rev as moved to its desired destination in self.state.
535 # This helps bookmark and working parent movement.
535 # This helps bookmark and working parent movement.
536 dest = max(adjustdest(repo, rev, self.destmap, self.state,
536 dest = max(adjustdest(repo, rev, self.destmap, self.state,
537 self.skipped))
537 self.skipped))
538 self.state[rev] = dest
538 self.state[rev] = dest
539 elif self.state[rev] == revtodo:
539 elif self.state[rev] == revtodo:
540 ui.status(_('rebasing %s\n') % desc)
540 ui.status(_('rebasing %s\n') % desc)
541 progressfn(ctx)
541 progressfn(ctx)
542 p1, p2, base = defineparents(repo, rev, self.destmap,
542 p1, p2, base = defineparents(repo, rev, self.destmap,
543 self.state, self.skipped,
543 self.state, self.skipped,
544 self.obsoletenotrebased)
544 self.obsoletenotrebased)
545 if len(repo[None].parents()) == 2:
545 if not self.inmemory and len(repo[None].parents()) == 2:
546 repo.ui.debug('resuming interrupted rebase\n')
546 repo.ui.debug('resuming interrupted rebase\n')
547 else:
547 else:
548 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
548 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
549 with ui.configoverride(overrides, 'rebase'):
549 with ui.configoverride(overrides, 'rebase'):
550 stats = rebasenode(repo, rev, p1, base, self.collapsef,
550 stats = rebasenode(repo, rev, p1, base, self.collapsef,
551 dest, wctx=self.wctx)
551 dest, wctx=self.wctx)
552 if stats.unresolvedcount > 0:
552 if stats.unresolvedcount > 0:
553 if self.inmemory:
553 if self.inmemory:
554 raise error.InMemoryMergeConflictsError()
554 raise error.InMemoryMergeConflictsError()
555 else:
555 else:
556 raise error.InterventionRequired(
556 raise error.InterventionRequired(
557 _('unresolved conflicts (see hg '
557 _('unresolved conflicts (see hg '
558 'resolve, then hg rebase --continue)'))
558 'resolve, then hg rebase --continue)'))
559 if not self.collapsef:
559 if not self.collapsef:
560 merging = p2 != nullrev
560 merging = p2 != nullrev
561 editform = cmdutil.mergeeditform(merging, 'rebase')
561 editform = cmdutil.mergeeditform(merging, 'rebase')
562 editor = cmdutil.getcommiteditor(editform=editform,
562 editor = cmdutil.getcommiteditor(editform=editform,
563 **pycompat.strkwargs(opts))
563 **pycompat.strkwargs(opts))
564 newnode = self._concludenode(rev, p1, p2, editor)
564 newnode = self._concludenode(rev, p1, p2, editor)
565 else:
565 else:
566 # Skip commit if we are collapsing
566 # Skip commit if we are collapsing
567 if self.inmemory:
567 if self.inmemory:
568 self.wctx.setbase(repo[p1])
568 self.wctx.setbase(repo[p1])
569 else:
569 else:
570 repo.setparents(repo[p1].node())
570 repo.setparents(repo[p1].node())
571 newnode = None
571 newnode = None
572 # Update the state
572 # Update the state
573 if newnode is not None:
573 if newnode is not None:
574 self.state[rev] = repo[newnode].rev()
574 self.state[rev] = repo[newnode].rev()
575 ui.debug('rebased as %s\n' % short(newnode))
575 ui.debug('rebased as %s\n' % short(newnode))
576 else:
576 else:
577 if not self.collapsef:
577 if not self.collapsef:
578 ui.warn(_('note: rebase of %d:%s created no changes '
578 ui.warn(_('note: rebase of %d:%s created no changes '
579 'to commit\n') % (rev, ctx))
579 'to commit\n') % (rev, ctx))
580 self.skipped.add(rev)
580 self.skipped.add(rev)
581 self.state[rev] = p1
581 self.state[rev] = p1
582 ui.debug('next revision set to %d\n' % p1)
582 ui.debug('next revision set to %d\n' % p1)
583 else:
583 else:
584 ui.status(_('already rebased %s as %s\n') %
584 ui.status(_('already rebased %s as %s\n') %
585 (desc, repo[self.state[rev]]))
585 (desc, repo[self.state[rev]]))
586 if not tr:
586 if not tr:
587 # When not using single transaction, store state after each
587 # When not using single transaction, store state after each
588 # commit is completely done. On InterventionRequired, we thus
588 # commit is completely done. On InterventionRequired, we thus
589 # won't store the status. Instead, we'll hit the "len(parents) == 2"
589 # won't store the status. Instead, we'll hit the "len(parents) == 2"
590 # case and realize that the commit was in progress.
590 # case and realize that the commit was in progress.
591 self.storestatus()
591 self.storestatus()
592
592
593 def _finishrebase(self):
593 def _finishrebase(self):
594 repo, ui, opts = self.repo, self.ui, self.opts
594 repo, ui, opts = self.repo, self.ui, self.opts
595 fm = ui.formatter('rebase', opts)
595 fm = ui.formatter('rebase', opts)
596 fm.startitem()
596 fm.startitem()
597 if self.collapsef:
597 if self.collapsef:
598 p1, p2, _base = defineparents(repo, min(self.state), self.destmap,
598 p1, p2, _base = defineparents(repo, min(self.state), self.destmap,
599 self.state, self.skipped,
599 self.state, self.skipped,
600 self.obsoletenotrebased)
600 self.obsoletenotrebased)
601 editopt = opts.get('edit')
601 editopt = opts.get('edit')
602 editform = 'rebase.collapse'
602 editform = 'rebase.collapse'
603 if self.collapsemsg:
603 if self.collapsemsg:
604 commitmsg = self.collapsemsg
604 commitmsg = self.collapsemsg
605 else:
605 else:
606 commitmsg = 'Collapsed revision'
606 commitmsg = 'Collapsed revision'
607 for rebased in sorted(self.state):
607 for rebased in sorted(self.state):
608 if rebased not in self.skipped:
608 if rebased not in self.skipped:
609 commitmsg += '\n* %s' % repo[rebased].description()
609 commitmsg += '\n* %s' % repo[rebased].description()
610 editopt = True
610 editopt = True
611 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
611 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
612 revtoreuse = max(self.state)
612 revtoreuse = max(self.state)
613
613
614 newnode = self._concludenode(revtoreuse, p1, self.external,
614 newnode = self._concludenode(revtoreuse, p1, self.external,
615 editor, commitmsg=commitmsg)
615 editor, commitmsg=commitmsg)
616
616
617 if newnode is not None:
617 if newnode is not None:
618 newrev = repo[newnode].rev()
618 newrev = repo[newnode].rev()
619 for oldrev in self.state:
619 for oldrev in self.state:
620 self.state[oldrev] = newrev
620 self.state[oldrev] = newrev
621
621
622 if 'qtip' in repo.tags():
622 if 'qtip' in repo.tags():
623 updatemq(repo, self.state, self.skipped,
623 updatemq(repo, self.state, self.skipped,
624 **pycompat.strkwargs(opts))
624 **pycompat.strkwargs(opts))
625
625
626 # restore original working directory
626 # restore original working directory
627 # (we do this before stripping)
627 # (we do this before stripping)
628 newwd = self.state.get(self.originalwd, self.originalwd)
628 newwd = self.state.get(self.originalwd, self.originalwd)
629 if newwd < 0:
629 if newwd < 0:
630 # original directory is a parent of rebase set root or ignored
630 # original directory is a parent of rebase set root or ignored
631 newwd = self.originalwd
631 newwd = self.originalwd
632 if newwd not in [c.rev() for c in repo[None].parents()]:
632 if newwd not in [c.rev() for c in repo[None].parents()]:
633 ui.note(_("update back to initial working directory parent\n"))
633 ui.note(_("update back to initial working directory parent\n"))
634 hg.updaterepo(repo, newwd, overwrite=False)
634 hg.updaterepo(repo, newwd, overwrite=False)
635
635
636 collapsedas = None
636 collapsedas = None
637 if self.collapsef and not self.keepf:
637 if self.collapsef and not self.keepf:
638 collapsedas = newnode
638 collapsedas = newnode
639 clearrebased(ui, repo, self.destmap, self.state, self.skipped,
639 clearrebased(ui, repo, self.destmap, self.state, self.skipped,
640 collapsedas, self.keepf, fm=fm, backup=self.backupf)
640 collapsedas, self.keepf, fm=fm, backup=self.backupf)
641
641
642 clearstatus(repo)
642 clearstatus(repo)
643 clearcollapsemsg(repo)
643 clearcollapsemsg(repo)
644
644
645 ui.note(_("rebase completed\n"))
645 ui.note(_("rebase completed\n"))
646 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
646 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
647 if self.skipped:
647 if self.skipped:
648 skippedlen = len(self.skipped)
648 skippedlen = len(self.skipped)
649 ui.note(_("%d revisions have been skipped\n") % skippedlen)
649 ui.note(_("%d revisions have been skipped\n") % skippedlen)
650 fm.end()
650 fm.end()
651
651
652 if (self.activebookmark and self.activebookmark in repo._bookmarks and
652 if (self.activebookmark and self.activebookmark in repo._bookmarks and
653 repo['.'].node() == repo._bookmarks[self.activebookmark]):
653 repo['.'].node() == repo._bookmarks[self.activebookmark]):
654 bookmarks.activate(repo, self.activebookmark)
654 bookmarks.activate(repo, self.activebookmark)
655
655
656 @command('rebase',
656 @command('rebase',
657 [('s', 'source', '',
657 [('s', 'source', '',
658 _('rebase the specified changeset and descendants'), _('REV')),
658 _('rebase the specified changeset and descendants'), _('REV')),
659 ('b', 'base', '',
659 ('b', 'base', '',
660 _('rebase everything from branching point of specified changeset'),
660 _('rebase everything from branching point of specified changeset'),
661 _('REV')),
661 _('REV')),
662 ('r', 'rev', [],
662 ('r', 'rev', [],
663 _('rebase these revisions'),
663 _('rebase these revisions'),
664 _('REV')),
664 _('REV')),
665 ('d', 'dest', '',
665 ('d', 'dest', '',
666 _('rebase onto the specified changeset'), _('REV')),
666 _('rebase onto the specified changeset'), _('REV')),
667 ('', 'collapse', False, _('collapse the rebased changesets')),
667 ('', 'collapse', False, _('collapse the rebased changesets')),
668 ('m', 'message', '',
668 ('m', 'message', '',
669 _('use text as collapse commit message'), _('TEXT')),
669 _('use text as collapse commit message'), _('TEXT')),
670 ('e', 'edit', False, _('invoke editor on commit messages')),
670 ('e', 'edit', False, _('invoke editor on commit messages')),
671 ('l', 'logfile', '',
671 ('l', 'logfile', '',
672 _('read collapse commit message from file'), _('FILE')),
672 _('read collapse commit message from file'), _('FILE')),
673 ('k', 'keep', False, _('keep original changesets')),
673 ('k', 'keep', False, _('keep original changesets')),
674 ('', 'keepbranches', False, _('keep original branch names')),
674 ('', 'keepbranches', False, _('keep original branch names')),
675 ('D', 'detach', False, _('(DEPRECATED)')),
675 ('D', 'detach', False, _('(DEPRECATED)')),
676 ('i', 'interactive', False, _('(DEPRECATED)')),
676 ('i', 'interactive', False, _('(DEPRECATED)')),
677 ('t', 'tool', '', _('specify merge tool')),
677 ('t', 'tool', '', _('specify merge tool')),
678 ('', 'stop', False, _('stop interrupted rebase')),
678 ('', 'stop', False, _('stop interrupted rebase')),
679 ('c', 'continue', False, _('continue an interrupted rebase')),
679 ('c', 'continue', False, _('continue an interrupted rebase')),
680 ('a', 'abort', False, _('abort an interrupted rebase')),
680 ('a', 'abort', False, _('abort an interrupted rebase')),
681 ('', 'auto-orphans', '', _('automatically rebase orphan revisions '
681 ('', 'auto-orphans', '', _('automatically rebase orphan revisions '
682 'in the specified revset (EXPERIMENTAL)')),
682 'in the specified revset (EXPERIMENTAL)')),
683 ] + cmdutil.dryrunopts + cmdutil.formatteropts + cmdutil.confirmopts,
683 ] + cmdutil.dryrunopts + cmdutil.formatteropts + cmdutil.confirmopts,
684 _('[-s REV | -b REV] [-d REV] [OPTION]'),
684 _('[-s REV | -b REV] [-d REV] [OPTION]'),
685 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
685 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
686 def rebase(ui, repo, **opts):
686 def rebase(ui, repo, **opts):
687 """move changeset (and descendants) to a different branch
687 """move changeset (and descendants) to a different branch
688
688
689 Rebase uses repeated merging to graft changesets from one part of
689 Rebase uses repeated merging to graft changesets from one part of
690 history (the source) onto another (the destination). This can be
690 history (the source) onto another (the destination). This can be
691 useful for linearizing *local* changes relative to a master
691 useful for linearizing *local* changes relative to a master
692 development tree.
692 development tree.
693
693
694 Published commits cannot be rebased (see :hg:`help phases`).
694 Published commits cannot be rebased (see :hg:`help phases`).
695 To copy commits, see :hg:`help graft`.
695 To copy commits, see :hg:`help graft`.
696
696
697 If you don't specify a destination changeset (``-d/--dest``), rebase
697 If you don't specify a destination changeset (``-d/--dest``), rebase
698 will use the same logic as :hg:`merge` to pick a destination. if
698 will use the same logic as :hg:`merge` to pick a destination. if
699 the current branch contains exactly one other head, the other head
699 the current branch contains exactly one other head, the other head
700 is merged with by default. Otherwise, an explicit revision with
700 is merged with by default. Otherwise, an explicit revision with
701 which to merge with must be provided. (destination changeset is not
701 which to merge with must be provided. (destination changeset is not
702 modified by rebasing, but new changesets are added as its
702 modified by rebasing, but new changesets are added as its
703 descendants.)
703 descendants.)
704
704
705 Here are the ways to select changesets:
705 Here are the ways to select changesets:
706
706
707 1. Explicitly select them using ``--rev``.
707 1. Explicitly select them using ``--rev``.
708
708
709 2. Use ``--source`` to select a root changeset and include all of its
709 2. Use ``--source`` to select a root changeset and include all of its
710 descendants.
710 descendants.
711
711
712 3. Use ``--base`` to select a changeset; rebase will find ancestors
712 3. Use ``--base`` to select a changeset; rebase will find ancestors
713 and their descendants which are not also ancestors of the destination.
713 and their descendants which are not also ancestors of the destination.
714
714
715 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
715 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
716 rebase will use ``--base .`` as above.
716 rebase will use ``--base .`` as above.
717
717
718 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
718 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
719 can be used in ``--dest``. Destination would be calculated per source
719 can be used in ``--dest``. Destination would be calculated per source
720 revision with ``SRC`` substituted by that single source revision and
720 revision with ``SRC`` substituted by that single source revision and
721 ``ALLSRC`` substituted by all source revisions.
721 ``ALLSRC`` substituted by all source revisions.
722
722
723 Rebase will destroy original changesets unless you use ``--keep``.
723 Rebase will destroy original changesets unless you use ``--keep``.
724 It will also move your bookmarks (even if you do).
724 It will also move your bookmarks (even if you do).
725
725
726 Some changesets may be dropped if they do not contribute changes
726 Some changesets may be dropped if they do not contribute changes
727 (e.g. merges from the destination branch).
727 (e.g. merges from the destination branch).
728
728
729 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
729 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
730 a named branch with two heads. You will need to explicitly specify source
730 a named branch with two heads. You will need to explicitly specify source
731 and/or destination.
731 and/or destination.
732
732
733 If you need to use a tool to automate merge/conflict decisions, you
733 If you need to use a tool to automate merge/conflict decisions, you
734 can specify one with ``--tool``, see :hg:`help merge-tools`.
734 can specify one with ``--tool``, see :hg:`help merge-tools`.
735 As a caveat: the tool will not be used to mediate when a file was
735 As a caveat: the tool will not be used to mediate when a file was
736 deleted, there is no hook presently available for this.
736 deleted, there is no hook presently available for this.
737
737
738 If a rebase is interrupted to manually resolve a conflict, it can be
738 If a rebase is interrupted to manually resolve a conflict, it can be
739 continued with --continue/-c, aborted with --abort/-a, or stopped with
739 continued with --continue/-c, aborted with --abort/-a, or stopped with
740 --stop.
740 --stop.
741
741
742 .. container:: verbose
742 .. container:: verbose
743
743
744 Examples:
744 Examples:
745
745
746 - move "local changes" (current commit back to branching point)
746 - move "local changes" (current commit back to branching point)
747 to the current branch tip after a pull::
747 to the current branch tip after a pull::
748
748
749 hg rebase
749 hg rebase
750
750
751 - move a single changeset to the stable branch::
751 - move a single changeset to the stable branch::
752
752
753 hg rebase -r 5f493448 -d stable
753 hg rebase -r 5f493448 -d stable
754
754
755 - splice a commit and all its descendants onto another part of history::
755 - splice a commit and all its descendants onto another part of history::
756
756
757 hg rebase --source c0c3 --dest 4cf9
757 hg rebase --source c0c3 --dest 4cf9
758
758
759 - rebase everything on a branch marked by a bookmark onto the
759 - rebase everything on a branch marked by a bookmark onto the
760 default branch::
760 default branch::
761
761
762 hg rebase --base myfeature --dest default
762 hg rebase --base myfeature --dest default
763
763
764 - collapse a sequence of changes into a single commit::
764 - collapse a sequence of changes into a single commit::
765
765
766 hg rebase --collapse -r 1520:1525 -d .
766 hg rebase --collapse -r 1520:1525 -d .
767
767
768 - move a named branch while preserving its name::
768 - move a named branch while preserving its name::
769
769
770 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
770 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
771
771
772 - stabilize orphaned changesets so history looks linear::
772 - stabilize orphaned changesets so history looks linear::
773
773
774 hg rebase -r 'orphan()-obsolete()'\
774 hg rebase -r 'orphan()-obsolete()'\
775 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
775 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
776 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
776 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
777
777
778 Configuration Options:
778 Configuration Options:
779
779
780 You can make rebase require a destination if you set the following config
780 You can make rebase require a destination if you set the following config
781 option::
781 option::
782
782
783 [commands]
783 [commands]
784 rebase.requiredest = True
784 rebase.requiredest = True
785
785
786 By default, rebase will close the transaction after each commit. For
786 By default, rebase will close the transaction after each commit. For
787 performance purposes, you can configure rebase to use a single transaction
787 performance purposes, you can configure rebase to use a single transaction
788 across the entire rebase. WARNING: This setting introduces a significant
788 across the entire rebase. WARNING: This setting introduces a significant
789 risk of losing the work you've done in a rebase if the rebase aborts
789 risk of losing the work you've done in a rebase if the rebase aborts
790 unexpectedly::
790 unexpectedly::
791
791
792 [rebase]
792 [rebase]
793 singletransaction = True
793 singletransaction = True
794
794
795 By default, rebase writes to the working copy, but you can configure it to
795 By default, rebase writes to the working copy, but you can configure it to
796 run in-memory for for better performance, and to allow it to run if the
796 run in-memory for for better performance, and to allow it to run if the
797 working copy is dirty::
797 working copy is dirty::
798
798
799 [rebase]
799 [rebase]
800 experimental.inmemory = True
800 experimental.inmemory = True
801
801
802 Return Values:
802 Return Values:
803
803
804 Returns 0 on success, 1 if nothing to rebase or there are
804 Returns 0 on success, 1 if nothing to rebase or there are
805 unresolved conflicts.
805 unresolved conflicts.
806
806
807 """
807 """
808 opts = pycompat.byteskwargs(opts)
808 opts = pycompat.byteskwargs(opts)
809 inmemory = ui.configbool('rebase', 'experimental.inmemory')
809 inmemory = ui.configbool('rebase', 'experimental.inmemory')
810 dryrun = opts.get('dry_run')
810 dryrun = opts.get('dry_run')
811 confirm = opts.get('confirm')
811 confirm = opts.get('confirm')
812 selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)]
812 selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)]
813 if len(selactions) > 1:
813 if len(selactions) > 1:
814 raise error.Abort(_('cannot use --%s with --%s')
814 raise error.Abort(_('cannot use --%s with --%s')
815 % tuple(selactions[:2]))
815 % tuple(selactions[:2]))
816 action = selactions[0] if selactions else None
816 action = selactions[0] if selactions else None
817 if dryrun and action:
817 if dryrun and action:
818 raise error.Abort(_('cannot specify both --dry-run and --%s') % action)
818 raise error.Abort(_('cannot specify both --dry-run and --%s') % action)
819 if confirm and action:
819 if confirm and action:
820 raise error.Abort(_('cannot specify both --confirm and --%s') % action)
820 raise error.Abort(_('cannot specify both --confirm and --%s') % action)
821 if dryrun and confirm:
821 if dryrun and confirm:
822 raise error.Abort(_('cannot specify both --confirm and --dry-run'))
822 raise error.Abort(_('cannot specify both --confirm and --dry-run'))
823
823
824 if action or repo.currenttransaction() is not None:
824 if action or repo.currenttransaction() is not None:
825 # in-memory rebase is not compatible with resuming rebases.
825 # in-memory rebase is not compatible with resuming rebases.
826 # (Or if it is run within a transaction, since the restart logic can
826 # (Or if it is run within a transaction, since the restart logic can
827 # fail the entire transaction.)
827 # fail the entire transaction.)
828 inmemory = False
828 inmemory = False
829
829
830 if opts.get('auto_orphans'):
830 if opts.get('auto_orphans'):
831 for key in opts:
831 for key in opts:
832 if key != 'auto_orphans' and opts.get(key):
832 if key != 'auto_orphans' and opts.get(key):
833 raise error.Abort(_('--auto-orphans is incompatible with %s') %
833 raise error.Abort(_('--auto-orphans is incompatible with %s') %
834 ('--' + key))
834 ('--' + key))
835 userrevs = list(repo.revs(opts.get('auto_orphans')))
835 userrevs = list(repo.revs(opts.get('auto_orphans')))
836 opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)]
836 opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)]
837 opts['dest'] = '_destautoorphanrebase(SRC)'
837 opts['dest'] = '_destautoorphanrebase(SRC)'
838
838
839 if dryrun or confirm:
839 if dryrun or confirm:
840 return _dryrunrebase(ui, repo, action, opts)
840 return _dryrunrebase(ui, repo, action, opts)
841 elif action == 'stop':
841 elif action == 'stop':
842 rbsrt = rebaseruntime(repo, ui)
842 rbsrt = rebaseruntime(repo, ui)
843 with repo.wlock(), repo.lock():
843 with repo.wlock(), repo.lock():
844 rbsrt.restorestatus()
844 rbsrt.restorestatus()
845 if rbsrt.collapsef:
845 if rbsrt.collapsef:
846 raise error.Abort(_("cannot stop in --collapse session"))
846 raise error.Abort(_("cannot stop in --collapse session"))
847 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
847 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
848 if not (rbsrt.keepf or allowunstable):
848 if not (rbsrt.keepf or allowunstable):
849 raise error.Abort(_("cannot remove original changesets with"
849 raise error.Abort(_("cannot remove original changesets with"
850 " unrebased descendants"),
850 " unrebased descendants"),
851 hint=_('either enable obsmarkers to allow unstable '
851 hint=_('either enable obsmarkers to allow unstable '
852 'revisions or use --keep to keep original '
852 'revisions or use --keep to keep original '
853 'changesets'))
853 'changesets'))
854 if needupdate(repo, rbsrt.state):
854 if needupdate(repo, rbsrt.state):
855 # update to the current working revision
855 # update to the current working revision
856 # to clear interrupted merge
856 # to clear interrupted merge
857 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
857 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
858 rbsrt._finishrebase()
858 rbsrt._finishrebase()
859 return 0
859 return 0
860 elif inmemory:
860 elif inmemory:
861 try:
861 try:
862 # in-memory merge doesn't support conflicts, so if we hit any, abort
862 # in-memory merge doesn't support conflicts, so if we hit any, abort
863 # and re-run as an on-disk merge.
863 # and re-run as an on-disk merge.
864 overrides = {('rebase', 'singletransaction'): True}
864 overrides = {('rebase', 'singletransaction'): True}
865 with ui.configoverride(overrides, 'rebase'):
865 with ui.configoverride(overrides, 'rebase'):
866 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
866 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
867 except error.InMemoryMergeConflictsError:
867 except error.InMemoryMergeConflictsError:
868 ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
868 ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
869 ' merge\n'))
869 ' merge\n'))
870 _dorebase(ui, repo, action='abort', opts={})
870 # TODO: Make in-memory merge not use the on-disk merge state, so
871 # we don't have to clean it here
872 mergemod.mergestate.clean(repo)
873 clearstatus(repo)
874 clearcollapsemsg(repo)
871 return _dorebase(ui, repo, action, opts, inmemory=False)
875 return _dorebase(ui, repo, action, opts, inmemory=False)
872 else:
876 else:
873 return _dorebase(ui, repo, action, opts)
877 return _dorebase(ui, repo, action, opts)
874
878
875 def _dryrunrebase(ui, repo, action, opts):
879 def _dryrunrebase(ui, repo, action, opts):
876 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
880 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
877 confirm = opts.get('confirm')
881 confirm = opts.get('confirm')
878 if confirm:
882 if confirm:
879 ui.status(_('starting in-memory rebase\n'))
883 ui.status(_('starting in-memory rebase\n'))
880 else:
884 else:
881 ui.status(_('starting dry-run rebase; repository will not be '
885 ui.status(_('starting dry-run rebase; repository will not be '
882 'changed\n'))
886 'changed\n'))
883 with repo.wlock(), repo.lock():
887 with repo.wlock(), repo.lock():
884 needsabort = True
888 needsabort = True
885 try:
889 try:
886 overrides = {('rebase', 'singletransaction'): True}
890 overrides = {('rebase', 'singletransaction'): True}
887 with ui.configoverride(overrides, 'rebase'):
891 with ui.configoverride(overrides, 'rebase'):
888 _origrebase(ui, repo, action, opts, rbsrt, inmemory=True,
892 _origrebase(ui, repo, action, opts, rbsrt, inmemory=True,
889 leaveunfinished=True)
893 leaveunfinished=True)
890 except error.InMemoryMergeConflictsError:
894 except error.InMemoryMergeConflictsError:
891 ui.status(_('hit a merge conflict\n'))
895 ui.status(_('hit a merge conflict\n'))
892 return 1
896 return 1
893 else:
897 else:
894 if confirm:
898 if confirm:
895 ui.status(_('rebase completed successfully\n'))
899 ui.status(_('rebase completed successfully\n'))
896 if not ui.promptchoice(_(b'apply changes (yn)?'
900 if not ui.promptchoice(_(b'apply changes (yn)?'
897 b'$$ &Yes $$ &No')):
901 b'$$ &Yes $$ &No')):
898 # finish unfinished rebase
902 # finish unfinished rebase
899 rbsrt._finishrebase()
903 rbsrt._finishrebase()
900 else:
904 else:
901 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
905 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
902 suppwarns=True)
906 suppwarns=True)
903 needsabort = False
907 needsabort = False
904 else:
908 else:
905 ui.status(_('dry-run rebase completed successfully; run without'
909 ui.status(_('dry-run rebase completed successfully; run without'
906 ' -n/--dry-run to perform this rebase\n'))
910 ' -n/--dry-run to perform this rebase\n'))
907 return 0
911 return 0
908 finally:
912 finally:
909 if needsabort:
913 if needsabort:
910 # no need to store backup in case of dryrun
914 # no need to store backup in case of dryrun
911 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
915 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
912 suppwarns=True)
916 suppwarns=True)
913
917
914 def _dorebase(ui, repo, action, opts, inmemory=False):
918 def _dorebase(ui, repo, action, opts, inmemory=False):
915 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
919 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
916 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
920 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
917
921
918 def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False,
922 def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False,
919 leaveunfinished=False):
923 leaveunfinished=False):
920 assert action != 'stop'
924 assert action != 'stop'
921 with repo.wlock(), repo.lock():
925 with repo.wlock(), repo.lock():
922 # Validate input and define rebasing points
926 # Validate input and define rebasing points
923 destf = opts.get('dest', None)
927 destf = opts.get('dest', None)
924 srcf = opts.get('source', None)
928 srcf = opts.get('source', None)
925 basef = opts.get('base', None)
929 basef = opts.get('base', None)
926 revf = opts.get('rev', [])
930 revf = opts.get('rev', [])
927 # search default destination in this space
931 # search default destination in this space
928 # used in the 'hg pull --rebase' case, see issue 5214.
932 # used in the 'hg pull --rebase' case, see issue 5214.
929 destspace = opts.get('_destspace')
933 destspace = opts.get('_destspace')
930 if opts.get('interactive'):
934 if opts.get('interactive'):
931 try:
935 try:
932 if extensions.find('histedit'):
936 if extensions.find('histedit'):
933 enablehistedit = ''
937 enablehistedit = ''
934 except KeyError:
938 except KeyError:
935 enablehistedit = " --config extensions.histedit="
939 enablehistedit = " --config extensions.histedit="
936 help = "hg%s help -e histedit" % enablehistedit
940 help = "hg%s help -e histedit" % enablehistedit
937 msg = _("interactive history editing is supported by the "
941 msg = _("interactive history editing is supported by the "
938 "'histedit' extension (see \"%s\")") % help
942 "'histedit' extension (see \"%s\")") % help
939 raise error.Abort(msg)
943 raise error.Abort(msg)
940
944
941 if rbsrt.collapsemsg and not rbsrt.collapsef:
945 if rbsrt.collapsemsg and not rbsrt.collapsef:
942 raise error.Abort(
946 raise error.Abort(
943 _('message can only be specified with collapse'))
947 _('message can only be specified with collapse'))
944
948
945 if action:
949 if action:
946 if rbsrt.collapsef:
950 if rbsrt.collapsef:
947 raise error.Abort(
951 raise error.Abort(
948 _('cannot use collapse with continue or abort'))
952 _('cannot use collapse with continue or abort'))
949 if srcf or basef or destf:
953 if srcf or basef or destf:
950 raise error.Abort(
954 raise error.Abort(
951 _('abort and continue do not allow specifying revisions'))
955 _('abort and continue do not allow specifying revisions'))
952 if action == 'abort' and opts.get('tool', False):
956 if action == 'abort' and opts.get('tool', False):
953 ui.warn(_('tool option will be ignored\n'))
957 ui.warn(_('tool option will be ignored\n'))
954 if action == 'continue':
958 if action == 'continue':
955 ms = mergemod.mergestate.read(repo)
959 ms = mergemod.mergestate.read(repo)
956 mergeutil.checkunresolved(ms)
960 mergeutil.checkunresolved(ms)
957
961
958 retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort'))
962 retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort'))
959 if retcode is not None:
963 if retcode is not None:
960 return retcode
964 return retcode
961 else:
965 else:
962 destmap = _definedestmap(ui, repo, inmemory, destf, srcf, basef,
966 destmap = _definedestmap(ui, repo, inmemory, destf, srcf, basef,
963 revf, destspace=destspace)
967 revf, destspace=destspace)
964 retcode = rbsrt._preparenewrebase(destmap)
968 retcode = rbsrt._preparenewrebase(destmap)
965 if retcode is not None:
969 if retcode is not None:
966 return retcode
970 return retcode
967 storecollapsemsg(repo, rbsrt.collapsemsg)
971 storecollapsemsg(repo, rbsrt.collapsemsg)
968
972
969 tr = None
973 tr = None
970
974
971 singletr = ui.configbool('rebase', 'singletransaction')
975 singletr = ui.configbool('rebase', 'singletransaction')
972 if singletr:
976 if singletr:
973 tr = repo.transaction('rebase')
977 tr = repo.transaction('rebase')
974
978
975 # If `rebase.singletransaction` is enabled, wrap the entire operation in
979 # If `rebase.singletransaction` is enabled, wrap the entire operation in
976 # one transaction here. Otherwise, transactions are obtained when
980 # one transaction here. Otherwise, transactions are obtained when
977 # committing each node, which is slower but allows partial success.
981 # committing each node, which is slower but allows partial success.
978 with util.acceptintervention(tr):
982 with util.acceptintervention(tr):
979 # Same logic for the dirstate guard, except we don't create one when
983 # Same logic for the dirstate guard, except we don't create one when
980 # rebasing in-memory (it's not needed).
984 # rebasing in-memory (it's not needed).
981 dsguard = None
985 dsguard = None
982 if singletr and not inmemory:
986 if singletr and not inmemory:
983 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
987 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
984 with util.acceptintervention(dsguard):
988 with util.acceptintervention(dsguard):
985 rbsrt._performrebase(tr)
989 rbsrt._performrebase(tr)
986 if not leaveunfinished:
990 if not leaveunfinished:
987 rbsrt._finishrebase()
991 rbsrt._finishrebase()
988
992
989 def _definedestmap(ui, repo, inmemory, destf=None, srcf=None, basef=None,
993 def _definedestmap(ui, repo, inmemory, destf=None, srcf=None, basef=None,
990 revf=None, destspace=None):
994 revf=None, destspace=None):
991 """use revisions argument to define destmap {srcrev: destrev}"""
995 """use revisions argument to define destmap {srcrev: destrev}"""
992 if revf is None:
996 if revf is None:
993 revf = []
997 revf = []
994
998
995 # destspace is here to work around issues with `hg pull --rebase` see
999 # destspace is here to work around issues with `hg pull --rebase` see
996 # issue5214 for details
1000 # issue5214 for details
997 if srcf and basef:
1001 if srcf and basef:
998 raise error.Abort(_('cannot specify both a source and a base'))
1002 raise error.Abort(_('cannot specify both a source and a base'))
999 if revf and basef:
1003 if revf and basef:
1000 raise error.Abort(_('cannot specify both a revision and a base'))
1004 raise error.Abort(_('cannot specify both a revision and a base'))
1001 if revf and srcf:
1005 if revf and srcf:
1002 raise error.Abort(_('cannot specify both a revision and a source'))
1006 raise error.Abort(_('cannot specify both a revision and a source'))
1003
1007
1004 if not inmemory:
1008 if not inmemory:
1005 cmdutil.checkunfinished(repo)
1009 cmdutil.checkunfinished(repo)
1006 cmdutil.bailifchanged(repo)
1010 cmdutil.bailifchanged(repo)
1007
1011
1008 if ui.configbool('commands', 'rebase.requiredest') and not destf:
1012 if ui.configbool('commands', 'rebase.requiredest') and not destf:
1009 raise error.Abort(_('you must specify a destination'),
1013 raise error.Abort(_('you must specify a destination'),
1010 hint=_('use: hg rebase -d REV'))
1014 hint=_('use: hg rebase -d REV'))
1011
1015
1012 dest = None
1016 dest = None
1013
1017
1014 if revf:
1018 if revf:
1015 rebaseset = scmutil.revrange(repo, revf)
1019 rebaseset = scmutil.revrange(repo, revf)
1016 if not rebaseset:
1020 if not rebaseset:
1017 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
1021 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
1018 return None
1022 return None
1019 elif srcf:
1023 elif srcf:
1020 src = scmutil.revrange(repo, [srcf])
1024 src = scmutil.revrange(repo, [srcf])
1021 if not src:
1025 if not src:
1022 ui.status(_('empty "source" revision set - nothing to rebase\n'))
1026 ui.status(_('empty "source" revision set - nothing to rebase\n'))
1023 return None
1027 return None
1024 rebaseset = repo.revs('(%ld)::', src)
1028 rebaseset = repo.revs('(%ld)::', src)
1025 assert rebaseset
1029 assert rebaseset
1026 else:
1030 else:
1027 base = scmutil.revrange(repo, [basef or '.'])
1031 base = scmutil.revrange(repo, [basef or '.'])
1028 if not base:
1032 if not base:
1029 ui.status(_('empty "base" revision set - '
1033 ui.status(_('empty "base" revision set - '
1030 "can't compute rebase set\n"))
1034 "can't compute rebase set\n"))
1031 return None
1035 return None
1032 if destf:
1036 if destf:
1033 # --base does not support multiple destinations
1037 # --base does not support multiple destinations
1034 dest = scmutil.revsingle(repo, destf)
1038 dest = scmutil.revsingle(repo, destf)
1035 else:
1039 else:
1036 dest = repo[_destrebase(repo, base, destspace=destspace)]
1040 dest = repo[_destrebase(repo, base, destspace=destspace)]
1037 destf = bytes(dest)
1041 destf = bytes(dest)
1038
1042
1039 roots = [] # selected children of branching points
1043 roots = [] # selected children of branching points
1040 bpbase = {} # {branchingpoint: [origbase]}
1044 bpbase = {} # {branchingpoint: [origbase]}
1041 for b in base: # group bases by branching points
1045 for b in base: # group bases by branching points
1042 bp = repo.revs('ancestor(%d, %d)', b, dest.rev()).first()
1046 bp = repo.revs('ancestor(%d, %d)', b, dest.rev()).first()
1043 bpbase[bp] = bpbase.get(bp, []) + [b]
1047 bpbase[bp] = bpbase.get(bp, []) + [b]
1044 if None in bpbase:
1048 if None in bpbase:
1045 # emulate the old behavior, showing "nothing to rebase" (a better
1049 # emulate the old behavior, showing "nothing to rebase" (a better
1046 # behavior may be abort with "cannot find branching point" error)
1050 # behavior may be abort with "cannot find branching point" error)
1047 bpbase.clear()
1051 bpbase.clear()
1048 for bp, bs in bpbase.iteritems(): # calculate roots
1052 for bp, bs in bpbase.iteritems(): # calculate roots
1049 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
1053 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
1050
1054
1051 rebaseset = repo.revs('%ld::', roots)
1055 rebaseset = repo.revs('%ld::', roots)
1052
1056
1053 if not rebaseset:
1057 if not rebaseset:
1054 # transform to list because smartsets are not comparable to
1058 # transform to list because smartsets are not comparable to
1055 # lists. This should be improved to honor laziness of
1059 # lists. This should be improved to honor laziness of
1056 # smartset.
1060 # smartset.
1057 if list(base) == [dest.rev()]:
1061 if list(base) == [dest.rev()]:
1058 if basef:
1062 if basef:
1059 ui.status(_('nothing to rebase - %s is both "base"'
1063 ui.status(_('nothing to rebase - %s is both "base"'
1060 ' and destination\n') % dest)
1064 ' and destination\n') % dest)
1061 else:
1065 else:
1062 ui.status(_('nothing to rebase - working directory '
1066 ui.status(_('nothing to rebase - working directory '
1063 'parent is also destination\n'))
1067 'parent is also destination\n'))
1064 elif not repo.revs('%ld - ::%d', base, dest.rev()):
1068 elif not repo.revs('%ld - ::%d', base, dest.rev()):
1065 if basef:
1069 if basef:
1066 ui.status(_('nothing to rebase - "base" %s is '
1070 ui.status(_('nothing to rebase - "base" %s is '
1067 'already an ancestor of destination '
1071 'already an ancestor of destination '
1068 '%s\n') %
1072 '%s\n') %
1069 ('+'.join(bytes(repo[r]) for r in base),
1073 ('+'.join(bytes(repo[r]) for r in base),
1070 dest))
1074 dest))
1071 else:
1075 else:
1072 ui.status(_('nothing to rebase - working '
1076 ui.status(_('nothing to rebase - working '
1073 'directory parent is already an '
1077 'directory parent is already an '
1074 'ancestor of destination %s\n') % dest)
1078 'ancestor of destination %s\n') % dest)
1075 else: # can it happen?
1079 else: # can it happen?
1076 ui.status(_('nothing to rebase from %s to %s\n') %
1080 ui.status(_('nothing to rebase from %s to %s\n') %
1077 ('+'.join(bytes(repo[r]) for r in base), dest))
1081 ('+'.join(bytes(repo[r]) for r in base), dest))
1078 return None
1082 return None
1079
1083
1080 rebasingwcp = repo['.'].rev() in rebaseset
1084 rebasingwcp = repo['.'].rev() in rebaseset
1081 ui.log("rebase", "rebasing working copy parent: %r\n", rebasingwcp,
1085 ui.log("rebase", "rebasing working copy parent: %r\n", rebasingwcp,
1082 rebase_rebasing_wcp=rebasingwcp)
1086 rebase_rebasing_wcp=rebasingwcp)
1083 if inmemory and rebasingwcp:
1087 if inmemory and rebasingwcp:
1084 # Check these since we did not before.
1088 # Check these since we did not before.
1085 cmdutil.checkunfinished(repo)
1089 cmdutil.checkunfinished(repo)
1086 cmdutil.bailifchanged(repo)
1090 cmdutil.bailifchanged(repo)
1087
1091
1088 if not destf:
1092 if not destf:
1089 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1093 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1090 destf = bytes(dest)
1094 destf = bytes(dest)
1091
1095
1092 allsrc = revsetlang.formatspec('%ld', rebaseset)
1096 allsrc = revsetlang.formatspec('%ld', rebaseset)
1093 alias = {'ALLSRC': allsrc}
1097 alias = {'ALLSRC': allsrc}
1094
1098
1095 if dest is None:
1099 if dest is None:
1096 try:
1100 try:
1097 # fast path: try to resolve dest without SRC alias
1101 # fast path: try to resolve dest without SRC alias
1098 dest = scmutil.revsingle(repo, destf, localalias=alias)
1102 dest = scmutil.revsingle(repo, destf, localalias=alias)
1099 except error.RepoLookupError:
1103 except error.RepoLookupError:
1100 # multi-dest path: resolve dest for each SRC separately
1104 # multi-dest path: resolve dest for each SRC separately
1101 destmap = {}
1105 destmap = {}
1102 for r in rebaseset:
1106 for r in rebaseset:
1103 alias['SRC'] = revsetlang.formatspec('%d', r)
1107 alias['SRC'] = revsetlang.formatspec('%d', r)
1104 # use repo.anyrevs instead of scmutil.revsingle because we
1108 # use repo.anyrevs instead of scmutil.revsingle because we
1105 # don't want to abort if destset is empty.
1109 # don't want to abort if destset is empty.
1106 destset = repo.anyrevs([destf], user=True, localalias=alias)
1110 destset = repo.anyrevs([destf], user=True, localalias=alias)
1107 size = len(destset)
1111 size = len(destset)
1108 if size == 1:
1112 if size == 1:
1109 destmap[r] = destset.first()
1113 destmap[r] = destset.first()
1110 elif size == 0:
1114 elif size == 0:
1111 ui.note(_('skipping %s - empty destination\n') % repo[r])
1115 ui.note(_('skipping %s - empty destination\n') % repo[r])
1112 else:
1116 else:
1113 raise error.Abort(_('rebase destination for %s is not '
1117 raise error.Abort(_('rebase destination for %s is not '
1114 'unique') % repo[r])
1118 'unique') % repo[r])
1115
1119
1116 if dest is not None:
1120 if dest is not None:
1117 # single-dest case: assign dest to each rev in rebaseset
1121 # single-dest case: assign dest to each rev in rebaseset
1118 destrev = dest.rev()
1122 destrev = dest.rev()
1119 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1123 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1120
1124
1121 if not destmap:
1125 if not destmap:
1122 ui.status(_('nothing to rebase - empty destination\n'))
1126 ui.status(_('nothing to rebase - empty destination\n'))
1123 return None
1127 return None
1124
1128
1125 return destmap
1129 return destmap
1126
1130
1127 def externalparent(repo, state, destancestors):
1131 def externalparent(repo, state, destancestors):
1128 """Return the revision that should be used as the second parent
1132 """Return the revision that should be used as the second parent
1129 when the revisions in state is collapsed on top of destancestors.
1133 when the revisions in state is collapsed on top of destancestors.
1130 Abort if there is more than one parent.
1134 Abort if there is more than one parent.
1131 """
1135 """
1132 parents = set()
1136 parents = set()
1133 source = min(state)
1137 source = min(state)
1134 for rev in state:
1138 for rev in state:
1135 if rev == source:
1139 if rev == source:
1136 continue
1140 continue
1137 for p in repo[rev].parents():
1141 for p in repo[rev].parents():
1138 if (p.rev() not in state
1142 if (p.rev() not in state
1139 and p.rev() not in destancestors):
1143 and p.rev() not in destancestors):
1140 parents.add(p.rev())
1144 parents.add(p.rev())
1141 if not parents:
1145 if not parents:
1142 return nullrev
1146 return nullrev
1143 if len(parents) == 1:
1147 if len(parents) == 1:
1144 return parents.pop()
1148 return parents.pop()
1145 raise error.Abort(_('unable to collapse on top of %d, there is more '
1149 raise error.Abort(_('unable to collapse on top of %d, there is more '
1146 'than one external parent: %s') %
1150 'than one external parent: %s') %
1147 (max(destancestors),
1151 (max(destancestors),
1148 ', '.join("%d" % p for p in sorted(parents))))
1152 ', '.join("%d" % p for p in sorted(parents))))
1149
1153
1150 def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
1154 def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
1151 '''Commit the memory changes with parents p1 and p2.
1155 '''Commit the memory changes with parents p1 and p2.
1152 Return node of committed revision.'''
1156 Return node of committed revision.'''
1153 # Replicates the empty check in ``repo.commit``.
1157 # Replicates the empty check in ``repo.commit``.
1154 if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'):
1158 if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'):
1155 return None
1159 return None
1156
1160
1157 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1161 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1158 # ``branch`` (used when passing ``--keepbranches``).
1162 # ``branch`` (used when passing ``--keepbranches``).
1159 branch = repo[p1].branch()
1163 branch = repo[p1].branch()
1160 if 'branch' in extra:
1164 if 'branch' in extra:
1161 branch = extra['branch']
1165 branch = extra['branch']
1162
1166
1163 memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date,
1167 memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date,
1164 extra=extra, user=user, branch=branch, editor=editor)
1168 extra=extra, user=user, branch=branch, editor=editor)
1165 commitres = repo.commitctx(memctx)
1169 commitres = repo.commitctx(memctx)
1166 wctx.clean() # Might be reused
1170 wctx.clean() # Might be reused
1167 return commitres
1171 return commitres
1168
1172
1169 def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
1173 def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
1170 '''Commit the wd changes with parents p1 and p2.
1174 '''Commit the wd changes with parents p1 and p2.
1171 Return node of committed revision.'''
1175 Return node of committed revision.'''
1172 dsguard = util.nullcontextmanager()
1176 dsguard = util.nullcontextmanager()
1173 if not repo.ui.configbool('rebase', 'singletransaction'):
1177 if not repo.ui.configbool('rebase', 'singletransaction'):
1174 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
1178 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
1175 with dsguard:
1179 with dsguard:
1176 repo.setparents(repo[p1].node(), repo[p2].node())
1180 repo.setparents(repo[p1].node(), repo[p2].node())
1177
1181
1178 # Commit might fail if unresolved files exist
1182 # Commit might fail if unresolved files exist
1179 newnode = repo.commit(text=commitmsg, user=user, date=date,
1183 newnode = repo.commit(text=commitmsg, user=user, date=date,
1180 extra=extra, editor=editor)
1184 extra=extra, editor=editor)
1181
1185
1182 repo.dirstate.setbranch(repo[newnode].branch())
1186 repo.dirstate.setbranch(repo[newnode].branch())
1183 return newnode
1187 return newnode
1184
1188
1185 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
1189 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
1186 'Rebase a single revision rev on top of p1 using base as merge ancestor'
1190 'Rebase a single revision rev on top of p1 using base as merge ancestor'
1187 # Merge phase
1191 # Merge phase
1188 # Update to destination and merge it with local
1192 # Update to destination and merge it with local
1189 if wctx.isinmemory():
1193 if wctx.isinmemory():
1190 wctx.setbase(repo[p1])
1194 wctx.setbase(repo[p1])
1191 else:
1195 else:
1192 if repo['.'].rev() != p1:
1196 if repo['.'].rev() != p1:
1193 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
1197 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
1194 mergemod.update(repo, p1, branchmerge=False, force=True)
1198 mergemod.update(repo, p1, branchmerge=False, force=True)
1195 else:
1199 else:
1196 repo.ui.debug(" already in destination\n")
1200 repo.ui.debug(" already in destination\n")
1197 # This is, alas, necessary to invalidate workingctx's manifest cache,
1201 # This is, alas, necessary to invalidate workingctx's manifest cache,
1198 # as well as other data we litter on it in other places.
1202 # as well as other data we litter on it in other places.
1199 wctx = repo[None]
1203 wctx = repo[None]
1200 repo.dirstate.write(repo.currenttransaction())
1204 repo.dirstate.write(repo.currenttransaction())
1201 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
1205 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
1202 if base is not None:
1206 if base is not None:
1203 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
1207 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
1204 # When collapsing in-place, the parent is the common ancestor, we
1208 # When collapsing in-place, the parent is the common ancestor, we
1205 # have to allow merging with it.
1209 # have to allow merging with it.
1206 stats = mergemod.update(repo, rev, branchmerge=True, force=True,
1210 stats = mergemod.update(repo, rev, branchmerge=True, force=True,
1207 ancestor=base, mergeancestor=collapse,
1211 ancestor=base, mergeancestor=collapse,
1208 labels=['dest', 'source'], wc=wctx)
1212 labels=['dest', 'source'], wc=wctx)
1209 if collapse:
1213 if collapse:
1210 copies.duplicatecopies(repo, wctx, rev, dest)
1214 copies.duplicatecopies(repo, wctx, rev, dest)
1211 else:
1215 else:
1212 # If we're not using --collapse, we need to
1216 # If we're not using --collapse, we need to
1213 # duplicate copies between the revision we're
1217 # duplicate copies between the revision we're
1214 # rebasing and its first parent, but *not*
1218 # rebasing and its first parent, but *not*
1215 # duplicate any copies that have already been
1219 # duplicate any copies that have already been
1216 # performed in the destination.
1220 # performed in the destination.
1217 p1rev = repo[rev].p1().rev()
1221 p1rev = repo[rev].p1().rev()
1218 copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
1222 copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
1219 return stats
1223 return stats
1220
1224
1221 def adjustdest(repo, rev, destmap, state, skipped):
1225 def adjustdest(repo, rev, destmap, state, skipped):
1222 """adjust rebase destination given the current rebase state
1226 """adjust rebase destination given the current rebase state
1223
1227
1224 rev is what is being rebased. Return a list of two revs, which are the
1228 rev is what is being rebased. Return a list of two revs, which are the
1225 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1229 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1226 nullrev, return dest without adjustment for it.
1230 nullrev, return dest without adjustment for it.
1227
1231
1228 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1232 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1229 to B1, and E's destination will be adjusted from F to B1.
1233 to B1, and E's destination will be adjusted from F to B1.
1230
1234
1231 B1 <- written during rebasing B
1235 B1 <- written during rebasing B
1232 |
1236 |
1233 F <- original destination of B, E
1237 F <- original destination of B, E
1234 |
1238 |
1235 | E <- rev, which is being rebased
1239 | E <- rev, which is being rebased
1236 | |
1240 | |
1237 | D <- prev, one parent of rev being checked
1241 | D <- prev, one parent of rev being checked
1238 | |
1242 | |
1239 | x <- skipped, ex. no successor or successor in (::dest)
1243 | x <- skipped, ex. no successor or successor in (::dest)
1240 | |
1244 | |
1241 | C <- rebased as C', different destination
1245 | C <- rebased as C', different destination
1242 | |
1246 | |
1243 | B <- rebased as B1 C'
1247 | B <- rebased as B1 C'
1244 |/ |
1248 |/ |
1245 A G <- destination of C, different
1249 A G <- destination of C, different
1246
1250
1247 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1251 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1248 first move C to C1, G to G1, and when it's checking H, the adjusted
1252 first move C to C1, G to G1, and when it's checking H, the adjusted
1249 destinations will be [C1, G1].
1253 destinations will be [C1, G1].
1250
1254
1251 H C1 G1
1255 H C1 G1
1252 /| | /
1256 /| | /
1253 F G |/
1257 F G |/
1254 K | | -> K
1258 K | | -> K
1255 | C D |
1259 | C D |
1256 | |/ |
1260 | |/ |
1257 | B | ...
1261 | B | ...
1258 |/ |/
1262 |/ |/
1259 A A
1263 A A
1260
1264
1261 Besides, adjust dest according to existing rebase information. For example,
1265 Besides, adjust dest according to existing rebase information. For example,
1262
1266
1263 B C D B needs to be rebased on top of C, C needs to be rebased on top
1267 B C D B needs to be rebased on top of C, C needs to be rebased on top
1264 \|/ of D. We will rebase C first.
1268 \|/ of D. We will rebase C first.
1265 A
1269 A
1266
1270
1267 C' After rebasing C, when considering B's destination, use C'
1271 C' After rebasing C, when considering B's destination, use C'
1268 | instead of the original C.
1272 | instead of the original C.
1269 B D
1273 B D
1270 \ /
1274 \ /
1271 A
1275 A
1272 """
1276 """
1273 # pick already rebased revs with same dest from state as interesting source
1277 # pick already rebased revs with same dest from state as interesting source
1274 dest = destmap[rev]
1278 dest = destmap[rev]
1275 source = [s for s, d in state.items()
1279 source = [s for s, d in state.items()
1276 if d > 0 and destmap[s] == dest and s not in skipped]
1280 if d > 0 and destmap[s] == dest and s not in skipped]
1277
1281
1278 result = []
1282 result = []
1279 for prev in repo.changelog.parentrevs(rev):
1283 for prev in repo.changelog.parentrevs(rev):
1280 adjusted = dest
1284 adjusted = dest
1281 if prev != nullrev:
1285 if prev != nullrev:
1282 candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
1286 candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
1283 if candidate is not None:
1287 if candidate is not None:
1284 adjusted = state[candidate]
1288 adjusted = state[candidate]
1285 if adjusted == dest and dest in state:
1289 if adjusted == dest and dest in state:
1286 adjusted = state[dest]
1290 adjusted = state[dest]
1287 if adjusted == revtodo:
1291 if adjusted == revtodo:
1288 # sortsource should produce an order that makes this impossible
1292 # sortsource should produce an order that makes this impossible
1289 raise error.ProgrammingError(
1293 raise error.ProgrammingError(
1290 'rev %d should be rebased already at this time' % dest)
1294 'rev %d should be rebased already at this time' % dest)
1291 result.append(adjusted)
1295 result.append(adjusted)
1292 return result
1296 return result
1293
1297
1294 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1298 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1295 """
1299 """
1296 Abort if rebase will create divergence or rebase is noop because of markers
1300 Abort if rebase will create divergence or rebase is noop because of markers
1297
1301
1298 `rebaseobsrevs`: set of obsolete revision in source
1302 `rebaseobsrevs`: set of obsolete revision in source
1299 `rebaseobsskipped`: set of revisions from source skipped because they have
1303 `rebaseobsskipped`: set of revisions from source skipped because they have
1300 successors in destination or no non-obsolete successor.
1304 successors in destination or no non-obsolete successor.
1301 """
1305 """
1302 # Obsolete node with successors not in dest leads to divergence
1306 # Obsolete node with successors not in dest leads to divergence
1303 divergenceok = ui.configbool('experimental',
1307 divergenceok = ui.configbool('experimental',
1304 'evolution.allowdivergence')
1308 'evolution.allowdivergence')
1305 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1309 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1306
1310
1307 if divergencebasecandidates and not divergenceok:
1311 if divergencebasecandidates and not divergenceok:
1308 divhashes = (bytes(repo[r])
1312 divhashes = (bytes(repo[r])
1309 for r in divergencebasecandidates)
1313 for r in divergencebasecandidates)
1310 msg = _("this rebase will cause "
1314 msg = _("this rebase will cause "
1311 "divergences from: %s")
1315 "divergences from: %s")
1312 h = _("to force the rebase please set "
1316 h = _("to force the rebase please set "
1313 "experimental.evolution.allowdivergence=True")
1317 "experimental.evolution.allowdivergence=True")
1314 raise error.Abort(msg % (",".join(divhashes),), hint=h)
1318 raise error.Abort(msg % (",".join(divhashes),), hint=h)
1315
1319
1316 def successorrevs(unfi, rev):
1320 def successorrevs(unfi, rev):
1317 """yield revision numbers for successors of rev"""
1321 """yield revision numbers for successors of rev"""
1318 assert unfi.filtername is None
1322 assert unfi.filtername is None
1319 nodemap = unfi.changelog.nodemap
1323 nodemap = unfi.changelog.nodemap
1320 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1324 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1321 if s in nodemap:
1325 if s in nodemap:
1322 yield nodemap[s]
1326 yield nodemap[s]
1323
1327
1324 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1328 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1325 """Return new parents and optionally a merge base for rev being rebased
1329 """Return new parents and optionally a merge base for rev being rebased
1326
1330
1327 The destination specified by "dest" cannot always be used directly because
1331 The destination specified by "dest" cannot always be used directly because
1328 previously rebase result could affect destination. For example,
1332 previously rebase result could affect destination. For example,
1329
1333
1330 D E rebase -r C+D+E -d B
1334 D E rebase -r C+D+E -d B
1331 |/ C will be rebased to C'
1335 |/ C will be rebased to C'
1332 B C D's new destination will be C' instead of B
1336 B C D's new destination will be C' instead of B
1333 |/ E's new destination will be C' instead of B
1337 |/ E's new destination will be C' instead of B
1334 A
1338 A
1335
1339
1336 The new parents of a merge is slightly more complicated. See the comment
1340 The new parents of a merge is slightly more complicated. See the comment
1337 block below.
1341 block below.
1338 """
1342 """
1339 # use unfiltered changelog since successorrevs may return filtered nodes
1343 # use unfiltered changelog since successorrevs may return filtered nodes
1340 assert repo.filtername is None
1344 assert repo.filtername is None
1341 cl = repo.changelog
1345 cl = repo.changelog
1342 isancestor = cl.isancestorrev
1346 isancestor = cl.isancestorrev
1343
1347
1344 dest = destmap[rev]
1348 dest = destmap[rev]
1345 oldps = repo.changelog.parentrevs(rev) # old parents
1349 oldps = repo.changelog.parentrevs(rev) # old parents
1346 newps = [nullrev, nullrev] # new parents
1350 newps = [nullrev, nullrev] # new parents
1347 dests = adjustdest(repo, rev, destmap, state, skipped)
1351 dests = adjustdest(repo, rev, destmap, state, skipped)
1348 bases = list(oldps) # merge base candidates, initially just old parents
1352 bases = list(oldps) # merge base candidates, initially just old parents
1349
1353
1350 if all(r == nullrev for r in oldps[1:]):
1354 if all(r == nullrev for r in oldps[1:]):
1351 # For non-merge changeset, just move p to adjusted dest as requested.
1355 # For non-merge changeset, just move p to adjusted dest as requested.
1352 newps[0] = dests[0]
1356 newps[0] = dests[0]
1353 else:
1357 else:
1354 # For merge changeset, if we move p to dests[i] unconditionally, both
1358 # For merge changeset, if we move p to dests[i] unconditionally, both
1355 # parents may change and the end result looks like "the merge loses a
1359 # parents may change and the end result looks like "the merge loses a
1356 # parent", which is a surprise. This is a limit because "--dest" only
1360 # parent", which is a surprise. This is a limit because "--dest" only
1357 # accepts one dest per src.
1361 # accepts one dest per src.
1358 #
1362 #
1359 # Therefore, only move p with reasonable conditions (in this order):
1363 # Therefore, only move p with reasonable conditions (in this order):
1360 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1364 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1361 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1365 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1362 #
1366 #
1363 # Comparing with adjustdest, the logic here does some additional work:
1367 # Comparing with adjustdest, the logic here does some additional work:
1364 # 1. decide which parents will not be moved towards dest
1368 # 1. decide which parents will not be moved towards dest
1365 # 2. if the above decision is "no", should a parent still be moved
1369 # 2. if the above decision is "no", should a parent still be moved
1366 # because it was rebased?
1370 # because it was rebased?
1367 #
1371 #
1368 # For example:
1372 # For example:
1369 #
1373 #
1370 # C # "rebase -r C -d D" is an error since none of the parents
1374 # C # "rebase -r C -d D" is an error since none of the parents
1371 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1375 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1372 # A B D # B (using rule "2."), since B will be rebased.
1376 # A B D # B (using rule "2."), since B will be rebased.
1373 #
1377 #
1374 # The loop tries to be not rely on the fact that a Mercurial node has
1378 # The loop tries to be not rely on the fact that a Mercurial node has
1375 # at most 2 parents.
1379 # at most 2 parents.
1376 for i, p in enumerate(oldps):
1380 for i, p in enumerate(oldps):
1377 np = p # new parent
1381 np = p # new parent
1378 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1382 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1379 np = dests[i]
1383 np = dests[i]
1380 elif p in state and state[p] > 0:
1384 elif p in state and state[p] > 0:
1381 np = state[p]
1385 np = state[p]
1382
1386
1383 # "bases" only record "special" merge bases that cannot be
1387 # "bases" only record "special" merge bases that cannot be
1384 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1388 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1385 # For example:
1389 # For example:
1386 #
1390 #
1387 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1391 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1388 # | C # is B', but merge base for C is B, instead of
1392 # | C # is B', but merge base for C is B, instead of
1389 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1393 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1390 # | B # "state" edges are merged (so there will be an edge from
1394 # | B # "state" edges are merged (so there will be an edge from
1391 # |/ # B to B'), the merge base is still ancestor(C, B') in
1395 # |/ # B to B'), the merge base is still ancestor(C, B') in
1392 # A # the merged graph.
1396 # A # the merged graph.
1393 #
1397 #
1394 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1398 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1395 # which uses "virtual null merge" to explain this situation.
1399 # which uses "virtual null merge" to explain this situation.
1396 if isancestor(p, np):
1400 if isancestor(p, np):
1397 bases[i] = nullrev
1401 bases[i] = nullrev
1398
1402
1399 # If one parent becomes an ancestor of the other, drop the ancestor
1403 # If one parent becomes an ancestor of the other, drop the ancestor
1400 for j, x in enumerate(newps[:i]):
1404 for j, x in enumerate(newps[:i]):
1401 if x == nullrev:
1405 if x == nullrev:
1402 continue
1406 continue
1403 if isancestor(np, x): # CASE-1
1407 if isancestor(np, x): # CASE-1
1404 np = nullrev
1408 np = nullrev
1405 elif isancestor(x, np): # CASE-2
1409 elif isancestor(x, np): # CASE-2
1406 newps[j] = np
1410 newps[j] = np
1407 np = nullrev
1411 np = nullrev
1408 # New parents forming an ancestor relationship does not
1412 # New parents forming an ancestor relationship does not
1409 # mean the old parents have a similar relationship. Do not
1413 # mean the old parents have a similar relationship. Do not
1410 # set bases[x] to nullrev.
1414 # set bases[x] to nullrev.
1411 bases[j], bases[i] = bases[i], bases[j]
1415 bases[j], bases[i] = bases[i], bases[j]
1412
1416
1413 newps[i] = np
1417 newps[i] = np
1414
1418
1415 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1419 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1416 # base. If only p2 changes, merging using unchanged p1 as merge base is
1420 # base. If only p2 changes, merging using unchanged p1 as merge base is
1417 # suboptimal. Therefore swap parents to make the merge sane.
1421 # suboptimal. Therefore swap parents to make the merge sane.
1418 if newps[1] != nullrev and oldps[0] == newps[0]:
1422 if newps[1] != nullrev and oldps[0] == newps[0]:
1419 assert len(newps) == 2 and len(oldps) == 2
1423 assert len(newps) == 2 and len(oldps) == 2
1420 newps.reverse()
1424 newps.reverse()
1421 bases.reverse()
1425 bases.reverse()
1422
1426
1423 # No parent change might be an error because we fail to make rev a
1427 # No parent change might be an error because we fail to make rev a
1424 # descendent of requested dest. This can happen, for example:
1428 # descendent of requested dest. This can happen, for example:
1425 #
1429 #
1426 # C # rebase -r C -d D
1430 # C # rebase -r C -d D
1427 # /| # None of A and B will be changed to D and rebase fails.
1431 # /| # None of A and B will be changed to D and rebase fails.
1428 # A B D
1432 # A B D
1429 if set(newps) == set(oldps) and dest not in newps:
1433 if set(newps) == set(oldps) and dest not in newps:
1430 raise error.Abort(_('cannot rebase %d:%s without '
1434 raise error.Abort(_('cannot rebase %d:%s without '
1431 'moving at least one of its parents')
1435 'moving at least one of its parents')
1432 % (rev, repo[rev]))
1436 % (rev, repo[rev]))
1433
1437
1434 # Source should not be ancestor of dest. The check here guarantees it's
1438 # Source should not be ancestor of dest. The check here guarantees it's
1435 # impossible. With multi-dest, the initial check does not cover complex
1439 # impossible. With multi-dest, the initial check does not cover complex
1436 # cases since we don't have abstractions to dry-run rebase cheaply.
1440 # cases since we don't have abstractions to dry-run rebase cheaply.
1437 if any(p != nullrev and isancestor(rev, p) for p in newps):
1441 if any(p != nullrev and isancestor(rev, p) for p in newps):
1438 raise error.Abort(_('source is ancestor of destination'))
1442 raise error.Abort(_('source is ancestor of destination'))
1439
1443
1440 # "rebasenode" updates to new p1, use the corresponding merge base.
1444 # "rebasenode" updates to new p1, use the corresponding merge base.
1441 if bases[0] != nullrev:
1445 if bases[0] != nullrev:
1442 base = bases[0]
1446 base = bases[0]
1443 else:
1447 else:
1444 base = None
1448 base = None
1445
1449
1446 # Check if the merge will contain unwanted changes. That may happen if
1450 # Check if the merge will contain unwanted changes. That may happen if
1447 # there are multiple special (non-changelog ancestor) merge bases, which
1451 # there are multiple special (non-changelog ancestor) merge bases, which
1448 # cannot be handled well by the 3-way merge algorithm. For example:
1452 # cannot be handled well by the 3-way merge algorithm. For example:
1449 #
1453 #
1450 # F
1454 # F
1451 # /|
1455 # /|
1452 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1456 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1453 # | | # as merge base, the difference between D and F will include
1457 # | | # as merge base, the difference between D and F will include
1454 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1458 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1455 # |/ # chosen, the rebased F will contain B.
1459 # |/ # chosen, the rebased F will contain B.
1456 # A Z
1460 # A Z
1457 #
1461 #
1458 # But our merge base candidates (D and E in above case) could still be
1462 # But our merge base candidates (D and E in above case) could still be
1459 # better than the default (ancestor(F, Z) == null). Therefore still
1463 # better than the default (ancestor(F, Z) == null). Therefore still
1460 # pick one (so choose p1 above).
1464 # pick one (so choose p1 above).
1461 if sum(1 for b in bases if b != nullrev) > 1:
1465 if sum(1 for b in bases if b != nullrev) > 1:
1462 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1466 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1463 for i, base in enumerate(bases):
1467 for i, base in enumerate(bases):
1464 if base == nullrev:
1468 if base == nullrev:
1465 continue
1469 continue
1466 # Revisions in the side (not chosen as merge base) branch that
1470 # Revisions in the side (not chosen as merge base) branch that
1467 # might contain "surprising" contents
1471 # might contain "surprising" contents
1468 siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))',
1472 siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))',
1469 bases, base, base, dest))
1473 bases, base, base, dest))
1470
1474
1471 # If those revisions are covered by rebaseset, the result is good.
1475 # If those revisions are covered by rebaseset, the result is good.
1472 # A merge in rebaseset would be considered to cover its ancestors.
1476 # A merge in rebaseset would be considered to cover its ancestors.
1473 if siderevs:
1477 if siderevs:
1474 rebaseset = [r for r, d in state.items()
1478 rebaseset = [r for r, d in state.items()
1475 if d > 0 and r not in obsskipped]
1479 if d > 0 and r not in obsskipped]
1476 merges = [r for r in rebaseset
1480 merges = [r for r in rebaseset
1477 if cl.parentrevs(r)[1] != nullrev]
1481 if cl.parentrevs(r)[1] != nullrev]
1478 unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld',
1482 unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld',
1479 siderevs, merges, rebaseset))
1483 siderevs, merges, rebaseset))
1480
1484
1481 # Choose a merge base that has a minimal number of unwanted revs.
1485 # Choose a merge base that has a minimal number of unwanted revs.
1482 l, i = min((len(revs), i)
1486 l, i = min((len(revs), i)
1483 for i, revs in enumerate(unwanted) if revs is not None)
1487 for i, revs in enumerate(unwanted) if revs is not None)
1484 base = bases[i]
1488 base = bases[i]
1485
1489
1486 # newps[0] should match merge base if possible. Currently, if newps[i]
1490 # newps[0] should match merge base if possible. Currently, if newps[i]
1487 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1491 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1488 # the other's ancestor. In that case, it's fine to not swap newps here.
1492 # the other's ancestor. In that case, it's fine to not swap newps here.
1489 # (see CASE-1 and CASE-2 above)
1493 # (see CASE-1 and CASE-2 above)
1490 if i != 0 and newps[i] != nullrev:
1494 if i != 0 and newps[i] != nullrev:
1491 newps[0], newps[i] = newps[i], newps[0]
1495 newps[0], newps[i] = newps[i], newps[0]
1492
1496
1493 # The merge will include unwanted revisions. Abort now. Revisit this if
1497 # The merge will include unwanted revisions. Abort now. Revisit this if
1494 # we have a more advanced merge algorithm that handles multiple bases.
1498 # we have a more advanced merge algorithm that handles multiple bases.
1495 if l > 0:
1499 if l > 0:
1496 unwanteddesc = _(' or ').join(
1500 unwanteddesc = _(' or ').join(
1497 (', '.join('%d:%s' % (r, repo[r]) for r in revs)
1501 (', '.join('%d:%s' % (r, repo[r]) for r in revs)
1498 for revs in unwanted if revs is not None))
1502 for revs in unwanted if revs is not None))
1499 raise error.Abort(
1503 raise error.Abort(
1500 _('rebasing %d:%s will include unwanted changes from %s')
1504 _('rebasing %d:%s will include unwanted changes from %s')
1501 % (rev, repo[rev], unwanteddesc))
1505 % (rev, repo[rev], unwanteddesc))
1502
1506
1503 repo.ui.debug(" future parents are %d and %d\n" % tuple(newps))
1507 repo.ui.debug(" future parents are %d and %d\n" % tuple(newps))
1504
1508
1505 return newps[0], newps[1], base
1509 return newps[0], newps[1], base
1506
1510
1507 def isagitpatch(repo, patchname):
1511 def isagitpatch(repo, patchname):
1508 'Return true if the given patch is in git format'
1512 'Return true if the given patch is in git format'
1509 mqpatch = os.path.join(repo.mq.path, patchname)
1513 mqpatch = os.path.join(repo.mq.path, patchname)
1510 for line in patch.linereader(open(mqpatch, 'rb')):
1514 for line in patch.linereader(open(mqpatch, 'rb')):
1511 if line.startswith('diff --git'):
1515 if line.startswith('diff --git'):
1512 return True
1516 return True
1513 return False
1517 return False
1514
1518
1515 def updatemq(repo, state, skipped, **opts):
1519 def updatemq(repo, state, skipped, **opts):
1516 'Update rebased mq patches - finalize and then import them'
1520 'Update rebased mq patches - finalize and then import them'
1517 mqrebase = {}
1521 mqrebase = {}
1518 mq = repo.mq
1522 mq = repo.mq
1519 original_series = mq.fullseries[:]
1523 original_series = mq.fullseries[:]
1520 skippedpatches = set()
1524 skippedpatches = set()
1521
1525
1522 for p in mq.applied:
1526 for p in mq.applied:
1523 rev = repo[p.node].rev()
1527 rev = repo[p.node].rev()
1524 if rev in state:
1528 if rev in state:
1525 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1529 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1526 (rev, p.name))
1530 (rev, p.name))
1527 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1531 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1528 else:
1532 else:
1529 # Applied but not rebased, not sure this should happen
1533 # Applied but not rebased, not sure this should happen
1530 skippedpatches.add(p.name)
1534 skippedpatches.add(p.name)
1531
1535
1532 if mqrebase:
1536 if mqrebase:
1533 mq.finish(repo, mqrebase.keys())
1537 mq.finish(repo, mqrebase.keys())
1534
1538
1535 # We must start import from the newest revision
1539 # We must start import from the newest revision
1536 for rev in sorted(mqrebase, reverse=True):
1540 for rev in sorted(mqrebase, reverse=True):
1537 if rev not in skipped:
1541 if rev not in skipped:
1538 name, isgit = mqrebase[rev]
1542 name, isgit = mqrebase[rev]
1539 repo.ui.note(_('updating mq patch %s to %d:%s\n') %
1543 repo.ui.note(_('updating mq patch %s to %d:%s\n') %
1540 (name, state[rev], repo[state[rev]]))
1544 (name, state[rev], repo[state[rev]]))
1541 mq.qimport(repo, (), patchname=name, git=isgit,
1545 mq.qimport(repo, (), patchname=name, git=isgit,
1542 rev=["%d" % state[rev]])
1546 rev=["%d" % state[rev]])
1543 else:
1547 else:
1544 # Rebased and skipped
1548 # Rebased and skipped
1545 skippedpatches.add(mqrebase[rev][0])
1549 skippedpatches.add(mqrebase[rev][0])
1546
1550
1547 # Patches were either applied and rebased and imported in
1551 # Patches were either applied and rebased and imported in
1548 # order, applied and removed or unapplied. Discard the removed
1552 # order, applied and removed or unapplied. Discard the removed
1549 # ones while preserving the original series order and guards.
1553 # ones while preserving the original series order and guards.
1550 newseries = [s for s in original_series
1554 newseries = [s for s in original_series
1551 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1555 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1552 mq.fullseries[:] = newseries
1556 mq.fullseries[:] = newseries
1553 mq.seriesdirty = True
1557 mq.seriesdirty = True
1554 mq.savedirty()
1558 mq.savedirty()
1555
1559
1556 def storecollapsemsg(repo, collapsemsg):
1560 def storecollapsemsg(repo, collapsemsg):
1557 'Store the collapse message to allow recovery'
1561 'Store the collapse message to allow recovery'
1558 collapsemsg = collapsemsg or ''
1562 collapsemsg = collapsemsg or ''
1559 f = repo.vfs("last-message.txt", "w")
1563 f = repo.vfs("last-message.txt", "w")
1560 f.write("%s\n" % collapsemsg)
1564 f.write("%s\n" % collapsemsg)
1561 f.close()
1565 f.close()
1562
1566
1563 def clearcollapsemsg(repo):
1567 def clearcollapsemsg(repo):
1564 'Remove collapse message file'
1568 'Remove collapse message file'
1565 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1569 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1566
1570
1567 def restorecollapsemsg(repo, isabort):
1571 def restorecollapsemsg(repo, isabort):
1568 'Restore previously stored collapse message'
1572 'Restore previously stored collapse message'
1569 try:
1573 try:
1570 f = repo.vfs("last-message.txt")
1574 f = repo.vfs("last-message.txt")
1571 collapsemsg = f.readline().strip()
1575 collapsemsg = f.readline().strip()
1572 f.close()
1576 f.close()
1573 except IOError as err:
1577 except IOError as err:
1574 if err.errno != errno.ENOENT:
1578 if err.errno != errno.ENOENT:
1575 raise
1579 raise
1576 if isabort:
1580 if isabort:
1577 # Oh well, just abort like normal
1581 # Oh well, just abort like normal
1578 collapsemsg = ''
1582 collapsemsg = ''
1579 else:
1583 else:
1580 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1584 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1581 return collapsemsg
1585 return collapsemsg
1582
1586
1583 def clearstatus(repo):
1587 def clearstatus(repo):
1584 'Remove the status files'
1588 'Remove the status files'
1585 # Make sure the active transaction won't write the state file
1589 # Make sure the active transaction won't write the state file
1586 tr = repo.currenttransaction()
1590 tr = repo.currenttransaction()
1587 if tr:
1591 if tr:
1588 tr.removefilegenerator('rebasestate')
1592 tr.removefilegenerator('rebasestate')
1589 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1593 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1590
1594
1591 def needupdate(repo, state):
1595 def needupdate(repo, state):
1592 '''check whether we should `update --clean` away from a merge, or if
1596 '''check whether we should `update --clean` away from a merge, or if
1593 somehow the working dir got forcibly updated, e.g. by older hg'''
1597 somehow the working dir got forcibly updated, e.g. by older hg'''
1594 parents = [p.rev() for p in repo[None].parents()]
1598 parents = [p.rev() for p in repo[None].parents()]
1595
1599
1596 # Are we in a merge state at all?
1600 # Are we in a merge state at all?
1597 if len(parents) < 2:
1601 if len(parents) < 2:
1598 return False
1602 return False
1599
1603
1600 # We should be standing on the first as-of-yet unrebased commit.
1604 # We should be standing on the first as-of-yet unrebased commit.
1601 firstunrebased = min([old for old, new in state.iteritems()
1605 firstunrebased = min([old for old, new in state.iteritems()
1602 if new == nullrev])
1606 if new == nullrev])
1603 if firstunrebased in parents:
1607 if firstunrebased in parents:
1604 return True
1608 return True
1605
1609
1606 return False
1610 return False
1607
1611
1608 def abort(repo, originalwd, destmap, state, activebookmark=None, backup=True,
1612 def abort(repo, originalwd, destmap, state, activebookmark=None, backup=True,
1609 suppwarns=False):
1613 suppwarns=False):
1610 '''Restore the repository to its original state. Additional args:
1614 '''Restore the repository to its original state. Additional args:
1611
1615
1612 activebookmark: the name of the bookmark that should be active after the
1616 activebookmark: the name of the bookmark that should be active after the
1613 restore'''
1617 restore'''
1614
1618
1615 try:
1619 try:
1616 # If the first commits in the rebased set get skipped during the rebase,
1620 # If the first commits in the rebased set get skipped during the rebase,
1617 # their values within the state mapping will be the dest rev id. The
1621 # their values within the state mapping will be the dest rev id. The
1618 # rebased list must must not contain the dest rev (issue4896)
1622 # rebased list must must not contain the dest rev (issue4896)
1619 rebased = [s for r, s in state.items()
1623 rebased = [s for r, s in state.items()
1620 if s >= 0 and s != r and s != destmap[r]]
1624 if s >= 0 and s != r and s != destmap[r]]
1621 immutable = [d for d in rebased if not repo[d].mutable()]
1625 immutable = [d for d in rebased if not repo[d].mutable()]
1622 cleanup = True
1626 cleanup = True
1623 if immutable:
1627 if immutable:
1624 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1628 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1625 % ', '.join(bytes(repo[r]) for r in immutable),
1629 % ', '.join(bytes(repo[r]) for r in immutable),
1626 hint=_("see 'hg help phases' for details"))
1630 hint=_("see 'hg help phases' for details"))
1627 cleanup = False
1631 cleanup = False
1628
1632
1629 descendants = set()
1633 descendants = set()
1630 if rebased:
1634 if rebased:
1631 descendants = set(repo.changelog.descendants(rebased))
1635 descendants = set(repo.changelog.descendants(rebased))
1632 if descendants - set(rebased):
1636 if descendants - set(rebased):
1633 repo.ui.warn(_("warning: new changesets detected on destination "
1637 repo.ui.warn(_("warning: new changesets detected on destination "
1634 "branch, can't strip\n"))
1638 "branch, can't strip\n"))
1635 cleanup = False
1639 cleanup = False
1636
1640
1637 if cleanup:
1641 if cleanup:
1638 shouldupdate = False
1642 shouldupdate = False
1639 if rebased:
1643 if rebased:
1640 strippoints = [
1644 strippoints = [
1641 c.node() for c in repo.set('roots(%ld)', rebased)]
1645 c.node() for c in repo.set('roots(%ld)', rebased)]
1642
1646
1643 updateifonnodes = set(rebased)
1647 updateifonnodes = set(rebased)
1644 updateifonnodes.update(destmap.values())
1648 updateifonnodes.update(destmap.values())
1645 updateifonnodes.add(originalwd)
1649 updateifonnodes.add(originalwd)
1646 shouldupdate = repo['.'].rev() in updateifonnodes
1650 shouldupdate = repo['.'].rev() in updateifonnodes
1647
1651
1648 # Update away from the rebase if necessary
1652 # Update away from the rebase if necessary
1649 if shouldupdate or needupdate(repo, state):
1653 if shouldupdate or needupdate(repo, state):
1650 mergemod.update(repo, originalwd, branchmerge=False, force=True)
1654 mergemod.update(repo, originalwd, branchmerge=False, force=True)
1651
1655
1652 # Strip from the first rebased revision
1656 # Strip from the first rebased revision
1653 if rebased:
1657 if rebased:
1654 repair.strip(repo.ui, repo, strippoints, backup=backup)
1658 repair.strip(repo.ui, repo, strippoints, backup=backup)
1655
1659
1656 if activebookmark and activebookmark in repo._bookmarks:
1660 if activebookmark and activebookmark in repo._bookmarks:
1657 bookmarks.activate(repo, activebookmark)
1661 bookmarks.activate(repo, activebookmark)
1658
1662
1659 finally:
1663 finally:
1660 clearstatus(repo)
1664 clearstatus(repo)
1661 clearcollapsemsg(repo)
1665 clearcollapsemsg(repo)
1662 if not suppwarns:
1666 if not suppwarns:
1663 repo.ui.warn(_('rebase aborted\n'))
1667 repo.ui.warn(_('rebase aborted\n'))
1664 return 0
1668 return 0
1665
1669
1666 def sortsource(destmap):
1670 def sortsource(destmap):
1667 """yield source revisions in an order that we only rebase things once
1671 """yield source revisions in an order that we only rebase things once
1668
1672
1669 If source and destination overlaps, we should filter out revisions
1673 If source and destination overlaps, we should filter out revisions
1670 depending on other revisions which hasn't been rebased yet.
1674 depending on other revisions which hasn't been rebased yet.
1671
1675
1672 Yield a sorted list of revisions each time.
1676 Yield a sorted list of revisions each time.
1673
1677
1674 For example, when rebasing A to B, B to C. This function yields [B], then
1678 For example, when rebasing A to B, B to C. This function yields [B], then
1675 [A], indicating B needs to be rebased first.
1679 [A], indicating B needs to be rebased first.
1676
1680
1677 Raise if there is a cycle so the rebase is impossible.
1681 Raise if there is a cycle so the rebase is impossible.
1678 """
1682 """
1679 srcset = set(destmap)
1683 srcset = set(destmap)
1680 while srcset:
1684 while srcset:
1681 srclist = sorted(srcset)
1685 srclist = sorted(srcset)
1682 result = []
1686 result = []
1683 for r in srclist:
1687 for r in srclist:
1684 if destmap[r] not in srcset:
1688 if destmap[r] not in srcset:
1685 result.append(r)
1689 result.append(r)
1686 if not result:
1690 if not result:
1687 raise error.Abort(_('source and destination form a cycle'))
1691 raise error.Abort(_('source and destination form a cycle'))
1688 srcset -= set(result)
1692 srcset -= set(result)
1689 yield result
1693 yield result
1690
1694
1691 def buildstate(repo, destmap, collapse):
1695 def buildstate(repo, destmap, collapse):
1692 '''Define which revisions are going to be rebased and where
1696 '''Define which revisions are going to be rebased and where
1693
1697
1694 repo: repo
1698 repo: repo
1695 destmap: {srcrev: destrev}
1699 destmap: {srcrev: destrev}
1696 '''
1700 '''
1697 rebaseset = destmap.keys()
1701 rebaseset = destmap.keys()
1698 originalwd = repo['.'].rev()
1702 originalwd = repo['.'].rev()
1699
1703
1700 # This check isn't strictly necessary, since mq detects commits over an
1704 # This check isn't strictly necessary, since mq detects commits over an
1701 # applied patch. But it prevents messing up the working directory when
1705 # applied patch. But it prevents messing up the working directory when
1702 # a partially completed rebase is blocked by mq.
1706 # a partially completed rebase is blocked by mq.
1703 if 'qtip' in repo.tags():
1707 if 'qtip' in repo.tags():
1704 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1708 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1705 if set(destmap.values()) & mqapplied:
1709 if set(destmap.values()) & mqapplied:
1706 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1710 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1707
1711
1708 # Get "cycle" error early by exhausting the generator.
1712 # Get "cycle" error early by exhausting the generator.
1709 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1713 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1710 if not sortedsrc:
1714 if not sortedsrc:
1711 raise error.Abort(_('no matching revisions'))
1715 raise error.Abort(_('no matching revisions'))
1712
1716
1713 # Only check the first batch of revisions to rebase not depending on other
1717 # Only check the first batch of revisions to rebase not depending on other
1714 # rebaseset. This means "source is ancestor of destination" for the second
1718 # rebaseset. This means "source is ancestor of destination" for the second
1715 # (and following) batches of revisions are not checked here. We rely on
1719 # (and following) batches of revisions are not checked here. We rely on
1716 # "defineparents" to do that check.
1720 # "defineparents" to do that check.
1717 roots = list(repo.set('roots(%ld)', sortedsrc[0]))
1721 roots = list(repo.set('roots(%ld)', sortedsrc[0]))
1718 if not roots:
1722 if not roots:
1719 raise error.Abort(_('no matching revisions'))
1723 raise error.Abort(_('no matching revisions'))
1720 def revof(r):
1724 def revof(r):
1721 return r.rev()
1725 return r.rev()
1722 roots = sorted(roots, key=revof)
1726 roots = sorted(roots, key=revof)
1723 state = dict.fromkeys(rebaseset, revtodo)
1727 state = dict.fromkeys(rebaseset, revtodo)
1724 emptyrebase = (len(sortedsrc) == 1)
1728 emptyrebase = (len(sortedsrc) == 1)
1725 for root in roots:
1729 for root in roots:
1726 dest = repo[destmap[root.rev()]]
1730 dest = repo[destmap[root.rev()]]
1727 commonbase = root.ancestor(dest)
1731 commonbase = root.ancestor(dest)
1728 if commonbase == root:
1732 if commonbase == root:
1729 raise error.Abort(_('source is ancestor of destination'))
1733 raise error.Abort(_('source is ancestor of destination'))
1730 if commonbase == dest:
1734 if commonbase == dest:
1731 wctx = repo[None]
1735 wctx = repo[None]
1732 if dest == wctx.p1():
1736 if dest == wctx.p1():
1733 # when rebasing to '.', it will use the current wd branch name
1737 # when rebasing to '.', it will use the current wd branch name
1734 samebranch = root.branch() == wctx.branch()
1738 samebranch = root.branch() == wctx.branch()
1735 else:
1739 else:
1736 samebranch = root.branch() == dest.branch()
1740 samebranch = root.branch() == dest.branch()
1737 if not collapse and samebranch and dest in root.parents():
1741 if not collapse and samebranch and dest in root.parents():
1738 # mark the revision as done by setting its new revision
1742 # mark the revision as done by setting its new revision
1739 # equal to its old (current) revisions
1743 # equal to its old (current) revisions
1740 state[root.rev()] = root.rev()
1744 state[root.rev()] = root.rev()
1741 repo.ui.debug('source is a child of destination\n')
1745 repo.ui.debug('source is a child of destination\n')
1742 continue
1746 continue
1743
1747
1744 emptyrebase = False
1748 emptyrebase = False
1745 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1749 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1746 if emptyrebase:
1750 if emptyrebase:
1747 return None
1751 return None
1748 for rev in sorted(state):
1752 for rev in sorted(state):
1749 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1753 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1750 # if all parents of this revision are done, then so is this revision
1754 # if all parents of this revision are done, then so is this revision
1751 if parents and all((state.get(p) == p for p in parents)):
1755 if parents and all((state.get(p) == p for p in parents)):
1752 state[rev] = rev
1756 state[rev] = rev
1753 return originalwd, destmap, state
1757 return originalwd, destmap, state
1754
1758
1755 def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
1759 def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
1756 keepf=False, fm=None, backup=True):
1760 keepf=False, fm=None, backup=True):
1757 """dispose of rebased revision at the end of the rebase
1761 """dispose of rebased revision at the end of the rebase
1758
1762
1759 If `collapsedas` is not None, the rebase was a collapse whose result if the
1763 If `collapsedas` is not None, the rebase was a collapse whose result if the
1760 `collapsedas` node.
1764 `collapsedas` node.
1761
1765
1762 If `keepf` is not True, the rebase has --keep set and no nodes should be
1766 If `keepf` is not True, the rebase has --keep set and no nodes should be
1763 removed (but bookmarks still need to be moved).
1767 removed (but bookmarks still need to be moved).
1764
1768
1765 If `backup` is False, no backup will be stored when stripping rebased
1769 If `backup` is False, no backup will be stored when stripping rebased
1766 revisions.
1770 revisions.
1767 """
1771 """
1768 tonode = repo.changelog.node
1772 tonode = repo.changelog.node
1769 replacements = {}
1773 replacements = {}
1770 moves = {}
1774 moves = {}
1771 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
1775 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
1772
1776
1773 collapsednodes = []
1777 collapsednodes = []
1774 for rev, newrev in sorted(state.items()):
1778 for rev, newrev in sorted(state.items()):
1775 if newrev >= 0 and newrev != rev:
1779 if newrev >= 0 and newrev != rev:
1776 oldnode = tonode(rev)
1780 oldnode = tonode(rev)
1777 newnode = collapsedas or tonode(newrev)
1781 newnode = collapsedas or tonode(newrev)
1778 moves[oldnode] = newnode
1782 moves[oldnode] = newnode
1779 if not keepf:
1783 if not keepf:
1780 succs = None
1784 succs = None
1781 if rev in skipped:
1785 if rev in skipped:
1782 if stripcleanup or not repo[rev].obsolete():
1786 if stripcleanup or not repo[rev].obsolete():
1783 succs = ()
1787 succs = ()
1784 elif collapsedas:
1788 elif collapsedas:
1785 collapsednodes.append(oldnode)
1789 collapsednodes.append(oldnode)
1786 else:
1790 else:
1787 succs = (newnode,)
1791 succs = (newnode,)
1788 if succs is not None:
1792 if succs is not None:
1789 replacements[(oldnode,)] = succs
1793 replacements[(oldnode,)] = succs
1790 if collapsednodes:
1794 if collapsednodes:
1791 replacements[tuple(collapsednodes)] = (collapsedas,)
1795 replacements[tuple(collapsednodes)] = (collapsedas,)
1792 scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup)
1796 scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup)
1793 if fm:
1797 if fm:
1794 hf = fm.hexfunc
1798 hf = fm.hexfunc
1795 fl = fm.formatlist
1799 fl = fm.formatlist
1796 fd = fm.formatdict
1800 fd = fm.formatdict
1797 changes = {}
1801 changes = {}
1798 for oldns, newn in replacements.iteritems():
1802 for oldns, newn in replacements.iteritems():
1799 for oldn in oldns:
1803 for oldn in oldns:
1800 changes[hf(oldn)] = fl([hf(n) for n in newn], name='node')
1804 changes[hf(oldn)] = fl([hf(n) for n in newn], name='node')
1801 nodechanges = fd(changes, key="oldnode", value="newnodes")
1805 nodechanges = fd(changes, key="oldnode", value="newnodes")
1802 fm.data(nodechanges=nodechanges)
1806 fm.data(nodechanges=nodechanges)
1803
1807
1804 def pullrebase(orig, ui, repo, *args, **opts):
1808 def pullrebase(orig, ui, repo, *args, **opts):
1805 'Call rebase after pull if the latter has been invoked with --rebase'
1809 'Call rebase after pull if the latter has been invoked with --rebase'
1806 ret = None
1810 ret = None
1807 if opts.get(r'rebase'):
1811 if opts.get(r'rebase'):
1808 if ui.configbool('commands', 'rebase.requiredest'):
1812 if ui.configbool('commands', 'rebase.requiredest'):
1809 msg = _('rebase destination required by configuration')
1813 msg = _('rebase destination required by configuration')
1810 hint = _('use hg pull followed by hg rebase -d DEST')
1814 hint = _('use hg pull followed by hg rebase -d DEST')
1811 raise error.Abort(msg, hint=hint)
1815 raise error.Abort(msg, hint=hint)
1812
1816
1813 with repo.wlock(), repo.lock():
1817 with repo.wlock(), repo.lock():
1814 if opts.get(r'update'):
1818 if opts.get(r'update'):
1815 del opts[r'update']
1819 del opts[r'update']
1816 ui.debug('--update and --rebase are not compatible, ignoring '
1820 ui.debug('--update and --rebase are not compatible, ignoring '
1817 'the update flag\n')
1821 'the update flag\n')
1818
1822
1819 cmdutil.checkunfinished(repo)
1823 cmdutil.checkunfinished(repo)
1820 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1824 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1821 'please commit or shelve your changes first'))
1825 'please commit or shelve your changes first'))
1822
1826
1823 revsprepull = len(repo)
1827 revsprepull = len(repo)
1824 origpostincoming = commands.postincoming
1828 origpostincoming = commands.postincoming
1825 def _dummy(*args, **kwargs):
1829 def _dummy(*args, **kwargs):
1826 pass
1830 pass
1827 commands.postincoming = _dummy
1831 commands.postincoming = _dummy
1828 try:
1832 try:
1829 ret = orig(ui, repo, *args, **opts)
1833 ret = orig(ui, repo, *args, **opts)
1830 finally:
1834 finally:
1831 commands.postincoming = origpostincoming
1835 commands.postincoming = origpostincoming
1832 revspostpull = len(repo)
1836 revspostpull = len(repo)
1833 if revspostpull > revsprepull:
1837 if revspostpull > revsprepull:
1834 # --rev option from pull conflict with rebase own --rev
1838 # --rev option from pull conflict with rebase own --rev
1835 # dropping it
1839 # dropping it
1836 if r'rev' in opts:
1840 if r'rev' in opts:
1837 del opts[r'rev']
1841 del opts[r'rev']
1838 # positional argument from pull conflicts with rebase's own
1842 # positional argument from pull conflicts with rebase's own
1839 # --source.
1843 # --source.
1840 if r'source' in opts:
1844 if r'source' in opts:
1841 del opts[r'source']
1845 del opts[r'source']
1842 # revsprepull is the len of the repo, not revnum of tip.
1846 # revsprepull is the len of the repo, not revnum of tip.
1843 destspace = list(repo.changelog.revs(start=revsprepull))
1847 destspace = list(repo.changelog.revs(start=revsprepull))
1844 opts[r'_destspace'] = destspace
1848 opts[r'_destspace'] = destspace
1845 try:
1849 try:
1846 rebase(ui, repo, **opts)
1850 rebase(ui, repo, **opts)
1847 except error.NoMergeDestAbort:
1851 except error.NoMergeDestAbort:
1848 # we can maybe update instead
1852 # we can maybe update instead
1849 rev, _a, _b = destutil.destupdate(repo)
1853 rev, _a, _b = destutil.destupdate(repo)
1850 if rev == repo['.'].rev():
1854 if rev == repo['.'].rev():
1851 ui.status(_('nothing to rebase\n'))
1855 ui.status(_('nothing to rebase\n'))
1852 else:
1856 else:
1853 ui.status(_('nothing to rebase - updating instead\n'))
1857 ui.status(_('nothing to rebase - updating instead\n'))
1854 # not passing argument to get the bare update behavior
1858 # not passing argument to get the bare update behavior
1855 # with warning and trumpets
1859 # with warning and trumpets
1856 commands.update(ui, repo)
1860 commands.update(ui, repo)
1857 else:
1861 else:
1858 if opts.get(r'tool'):
1862 if opts.get(r'tool'):
1859 raise error.Abort(_('--tool can only be used with --rebase'))
1863 raise error.Abort(_('--tool can only be used with --rebase'))
1860 ret = orig(ui, repo, *args, **opts)
1864 ret = orig(ui, repo, *args, **opts)
1861
1865
1862 return ret
1866 return ret
1863
1867
1864 def _filterobsoleterevs(repo, revs):
1868 def _filterobsoleterevs(repo, revs):
1865 """returns a set of the obsolete revisions in revs"""
1869 """returns a set of the obsolete revisions in revs"""
1866 return set(r for r in revs if repo[r].obsolete())
1870 return set(r for r in revs if repo[r].obsolete())
1867
1871
1868 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
1872 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
1869 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
1873 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
1870
1874
1871 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
1875 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
1872 obsolete nodes to be rebased given in `rebaseobsrevs`.
1876 obsolete nodes to be rebased given in `rebaseobsrevs`.
1873
1877
1874 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
1878 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
1875 without a successor in destination.
1879 without a successor in destination.
1876
1880
1877 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
1881 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
1878 obsolete successors.
1882 obsolete successors.
1879 """
1883 """
1880 obsoletenotrebased = {}
1884 obsoletenotrebased = {}
1881 obsoletewithoutsuccessorindestination = set([])
1885 obsoletewithoutsuccessorindestination = set([])
1882 obsoleteextinctsuccessors = set([])
1886 obsoleteextinctsuccessors = set([])
1883
1887
1884 assert repo.filtername is None
1888 assert repo.filtername is None
1885 cl = repo.changelog
1889 cl = repo.changelog
1886 nodemap = cl.nodemap
1890 nodemap = cl.nodemap
1887 extinctrevs = set(repo.revs('extinct()'))
1891 extinctrevs = set(repo.revs('extinct()'))
1888 for srcrev in rebaseobsrevs:
1892 for srcrev in rebaseobsrevs:
1889 srcnode = cl.node(srcrev)
1893 srcnode = cl.node(srcrev)
1890 # XXX: more advanced APIs are required to handle split correctly
1894 # XXX: more advanced APIs are required to handle split correctly
1891 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
1895 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
1892 # obsutil.allsuccessors includes node itself
1896 # obsutil.allsuccessors includes node itself
1893 successors.remove(srcnode)
1897 successors.remove(srcnode)
1894 succrevs = {nodemap[s] for s in successors if s in nodemap}
1898 succrevs = {nodemap[s] for s in successors if s in nodemap}
1895 if succrevs.issubset(extinctrevs):
1899 if succrevs.issubset(extinctrevs):
1896 # all successors are extinct
1900 # all successors are extinct
1897 obsoleteextinctsuccessors.add(srcrev)
1901 obsoleteextinctsuccessors.add(srcrev)
1898 if not successors:
1902 if not successors:
1899 # no successor
1903 # no successor
1900 obsoletenotrebased[srcrev] = None
1904 obsoletenotrebased[srcrev] = None
1901 else:
1905 else:
1902 dstrev = destmap[srcrev]
1906 dstrev = destmap[srcrev]
1903 for succrev in succrevs:
1907 for succrev in succrevs:
1904 if cl.isancestorrev(succrev, dstrev):
1908 if cl.isancestorrev(succrev, dstrev):
1905 obsoletenotrebased[srcrev] = succrev
1909 obsoletenotrebased[srcrev] = succrev
1906 break
1910 break
1907 else:
1911 else:
1908 # If 'srcrev' has a successor in rebase set but none in
1912 # If 'srcrev' has a successor in rebase set but none in
1909 # destination (which would be catched above), we shall skip it
1913 # destination (which would be catched above), we shall skip it
1910 # and its descendants to avoid divergence.
1914 # and its descendants to avoid divergence.
1911 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
1915 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
1912 obsoletewithoutsuccessorindestination.add(srcrev)
1916 obsoletewithoutsuccessorindestination.add(srcrev)
1913
1917
1914 return (
1918 return (
1915 obsoletenotrebased,
1919 obsoletenotrebased,
1916 obsoletewithoutsuccessorindestination,
1920 obsoletewithoutsuccessorindestination,
1917 obsoleteextinctsuccessors,
1921 obsoleteextinctsuccessors,
1918 )
1922 )
1919
1923
1920 def summaryhook(ui, repo):
1924 def summaryhook(ui, repo):
1921 if not repo.vfs.exists('rebasestate'):
1925 if not repo.vfs.exists('rebasestate'):
1922 return
1926 return
1923 try:
1927 try:
1924 rbsrt = rebaseruntime(repo, ui, {})
1928 rbsrt = rebaseruntime(repo, ui, {})
1925 rbsrt.restorestatus()
1929 rbsrt.restorestatus()
1926 state = rbsrt.state
1930 state = rbsrt.state
1927 except error.RepoLookupError:
1931 except error.RepoLookupError:
1928 # i18n: column positioning for "hg summary"
1932 # i18n: column positioning for "hg summary"
1929 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1933 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1930 ui.write(msg)
1934 ui.write(msg)
1931 return
1935 return
1932 numrebased = len([i for i in state.itervalues() if i >= 0])
1936 numrebased = len([i for i in state.itervalues() if i >= 0])
1933 # i18n: column positioning for "hg summary"
1937 # i18n: column positioning for "hg summary"
1934 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1938 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1935 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1939 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1936 ui.label(_('%d remaining'), 'rebase.remaining') %
1940 ui.label(_('%d remaining'), 'rebase.remaining') %
1937 (len(state) - numrebased)))
1941 (len(state) - numrebased)))
1938
1942
1939 def uisetup(ui):
1943 def uisetup(ui):
1940 #Replace pull with a decorator to provide --rebase option
1944 #Replace pull with a decorator to provide --rebase option
1941 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1945 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1942 entry[1].append(('', 'rebase', None,
1946 entry[1].append(('', 'rebase', None,
1943 _("rebase working directory to branch head")))
1947 _("rebase working directory to branch head")))
1944 entry[1].append(('t', 'tool', '',
1948 entry[1].append(('t', 'tool', '',
1945 _("specify merge tool for rebase")))
1949 _("specify merge tool for rebase")))
1946 cmdutil.summaryhooks.add('rebase', summaryhook)
1950 cmdutil.summaryhooks.add('rebase', summaryhook)
1947 cmdutil.unfinishedstates.append(
1951 cmdutil.unfinishedstates.append(
1948 ['rebasestate', False, False, _('rebase in progress'),
1952 ['rebasestate', False, False, _('rebase in progress'),
1949 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1953 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1950 cmdutil.afterresolvedstates.append(
1954 cmdutil.afterresolvedstates.append(
1951 ['rebasestate', _('hg rebase --continue')])
1955 ['rebasestate', _('hg rebase --continue')])
@@ -1,2857 +1,2874 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #include <Python.h>
10 #include <Python.h>
11 #include <assert.h>
11 #include <assert.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <limits.h>
13 #include <limits.h>
14 #include <stddef.h>
14 #include <stddef.h>
15 #include <stdlib.h>
15 #include <stdlib.h>
16 #include <string.h>
16 #include <string.h>
17
17
18 #include "bitmanipulation.h"
18 #include "bitmanipulation.h"
19 #include "charencode.h"
19 #include "charencode.h"
20 #include "util.h"
20 #include "util.h"
21
21
22 #ifdef IS_PY3K
22 #ifdef IS_PY3K
23 /* The mapping of Python types is meant to be temporary to get Python
23 /* The mapping of Python types is meant to be temporary to get Python
24 * 3 to compile. We should remove this once Python 3 support is fully
24 * 3 to compile. We should remove this once Python 3 support is fully
25 * supported and proper types are used in the extensions themselves. */
25 * supported and proper types are used in the extensions themselves. */
26 #define PyInt_Check PyLong_Check
26 #define PyInt_Check PyLong_Check
27 #define PyInt_FromLong PyLong_FromLong
27 #define PyInt_FromLong PyLong_FromLong
28 #define PyInt_FromSsize_t PyLong_FromSsize_t
28 #define PyInt_FromSsize_t PyLong_FromSsize_t
29 #define PyInt_AsLong PyLong_AsLong
29 #define PyInt_AsLong PyLong_AsLong
30 #endif
30 #endif
31
31
32 typedef struct indexObjectStruct indexObject;
32 typedef struct indexObjectStruct indexObject;
33
33
34 typedef struct {
34 typedef struct {
35 int children[16];
35 int children[16];
36 } nodetreenode;
36 } nodetreenode;
37
37
38 /*
38 /*
39 * A base-16 trie for fast node->rev mapping.
39 * A base-16 trie for fast node->rev mapping.
40 *
40 *
41 * Positive value is index of the next node in the trie
41 * Positive value is index of the next node in the trie
42 * Negative value is a leaf: -(rev + 2)
42 * Negative value is a leaf: -(rev + 2)
43 * Zero is empty
43 * Zero is empty
44 */
44 */
45 typedef struct {
45 typedef struct {
46 indexObject *index;
46 indexObject *index;
47 nodetreenode *nodes;
47 nodetreenode *nodes;
48 unsigned length; /* # nodes in use */
48 unsigned length; /* # nodes in use */
49 unsigned capacity; /* # nodes allocated */
49 unsigned capacity; /* # nodes allocated */
50 int depth; /* maximum depth of tree */
50 int depth; /* maximum depth of tree */
51 int splits; /* # splits performed */
51 int splits; /* # splits performed */
52 } nodetree;
52 } nodetree;
53
53
54 typedef struct {
54 typedef struct {
55 PyObject_HEAD /* ; */
55 PyObject_HEAD /* ; */
56 nodetree nt;
56 nodetree nt;
57 } nodetreeObject;
57 } nodetreeObject;
58
58
59 /*
59 /*
60 * This class has two behaviors.
60 * This class has two behaviors.
61 *
61 *
62 * When used in a list-like way (with integer keys), we decode an
62 * When used in a list-like way (with integer keys), we decode an
63 * entry in a RevlogNG index file on demand. Our last entry is a
63 * entry in a RevlogNG index file on demand. Our last entry is a
64 * sentinel, always a nullid. We have limited support for
64 * sentinel, always a nullid. We have limited support for
65 * integer-keyed insert and delete, only at elements right before the
65 * integer-keyed insert and delete, only at elements right before the
66 * sentinel.
66 * sentinel.
67 *
67 *
68 * With string keys, we lazily perform a reverse mapping from node to
68 * With string keys, we lazily perform a reverse mapping from node to
69 * rev, using a base-16 trie.
69 * rev, using a base-16 trie.
70 */
70 */
71 struct indexObjectStruct {
71 struct indexObjectStruct {
72 PyObject_HEAD
72 PyObject_HEAD
73 /* Type-specific fields go here. */
73 /* Type-specific fields go here. */
74 PyObject *data; /* raw bytes of index */
74 PyObject *data; /* raw bytes of index */
75 Py_buffer buf; /* buffer of data */
75 Py_buffer buf; /* buffer of data */
76 PyObject **cache; /* cached tuples */
76 PyObject **cache; /* cached tuples */
77 const char **offsets; /* populated on demand */
77 const char **offsets; /* populated on demand */
78 Py_ssize_t raw_length; /* original number of elements */
78 Py_ssize_t raw_length; /* original number of elements */
79 Py_ssize_t length; /* current number of elements */
79 Py_ssize_t length; /* current number of elements */
80 PyObject *added; /* populated on demand */
80 PyObject *added; /* populated on demand */
81 PyObject *headrevs; /* cache, invalidated on changes */
81 PyObject *headrevs; /* cache, invalidated on changes */
82 PyObject *filteredrevs; /* filtered revs set */
82 PyObject *filteredrevs; /* filtered revs set */
83 nodetree nt; /* base-16 trie */
83 nodetree nt; /* base-16 trie */
84 int ntinitialized; /* 0 or 1 */
84 int ntinitialized; /* 0 or 1 */
85 int ntrev; /* last rev scanned */
85 int ntrev; /* last rev scanned */
86 int ntlookups; /* # lookups */
86 int ntlookups; /* # lookups */
87 int ntmisses; /* # lookups that miss the cache */
87 int ntmisses; /* # lookups that miss the cache */
88 int inlined;
88 int inlined;
89 };
89 };
90
90
91 static Py_ssize_t index_length(const indexObject *self)
91 static Py_ssize_t index_length(const indexObject *self)
92 {
92 {
93 if (self->added == NULL)
93 if (self->added == NULL)
94 return self->length;
94 return self->length;
95 return self->length + PyList_GET_SIZE(self->added);
95 return self->length + PyList_GET_SIZE(self->added);
96 }
96 }
97
97
98 static PyObject *nullentry = NULL;
98 static PyObject *nullentry = NULL;
99 static const char nullid[20] = {0};
99 static const char nullid[20] = {0};
100
100
101 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
101 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
102
102
103 #if LONG_MAX == 0x7fffffffL
103 #if LONG_MAX == 0x7fffffffL
104 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
104 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
105 #else
105 #else
106 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
106 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
107 #endif
107 #endif
108
108
109 /* A RevlogNG v1 index entry is 64 bytes long. */
109 /* A RevlogNG v1 index entry is 64 bytes long. */
110 static const long v1_hdrsize = 64;
110 static const long v1_hdrsize = 64;
111
111
112 static void raise_revlog_error(void)
112 static void raise_revlog_error(void)
113 {
113 {
114 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
114 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
115
115
116 mod = PyImport_ImportModule("mercurial.error");
116 mod = PyImport_ImportModule("mercurial.error");
117 if (mod == NULL) {
117 if (mod == NULL) {
118 goto cleanup;
118 goto cleanup;
119 }
119 }
120
120
121 dict = PyModule_GetDict(mod);
121 dict = PyModule_GetDict(mod);
122 if (dict == NULL) {
122 if (dict == NULL) {
123 goto cleanup;
123 goto cleanup;
124 }
124 }
125 Py_INCREF(dict);
125 Py_INCREF(dict);
126
126
127 errclass = PyDict_GetItemString(dict, "RevlogError");
127 errclass = PyDict_GetItemString(dict, "RevlogError");
128 if (errclass == NULL) {
128 if (errclass == NULL) {
129 PyErr_SetString(PyExc_SystemError,
129 PyErr_SetString(PyExc_SystemError,
130 "could not find RevlogError");
130 "could not find RevlogError");
131 goto cleanup;
131 goto cleanup;
132 }
132 }
133
133
134 /* value of exception is ignored by callers */
134 /* value of exception is ignored by callers */
135 PyErr_SetString(errclass, "RevlogError");
135 PyErr_SetString(errclass, "RevlogError");
136
136
137 cleanup:
137 cleanup:
138 Py_XDECREF(dict);
138 Py_XDECREF(dict);
139 Py_XDECREF(mod);
139 Py_XDECREF(mod);
140 }
140 }
141
141
142 /*
142 /*
143 * Return a pointer to the beginning of a RevlogNG record.
143 * Return a pointer to the beginning of a RevlogNG record.
144 */
144 */
145 static const char *index_deref(indexObject *self, Py_ssize_t pos)
145 static const char *index_deref(indexObject *self, Py_ssize_t pos)
146 {
146 {
147 if (self->inlined && pos > 0) {
147 if (self->inlined && pos > 0) {
148 if (self->offsets == NULL) {
148 if (self->offsets == NULL) {
149 self->offsets = PyMem_Malloc(self->raw_length *
149 self->offsets = PyMem_Malloc(self->raw_length *
150 sizeof(*self->offsets));
150 sizeof(*self->offsets));
151 if (self->offsets == NULL)
151 if (self->offsets == NULL)
152 return (const char *)PyErr_NoMemory();
152 return (const char *)PyErr_NoMemory();
153 inline_scan(self, self->offsets);
153 inline_scan(self, self->offsets);
154 }
154 }
155 return self->offsets[pos];
155 return self->offsets[pos];
156 }
156 }
157
157
158 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
158 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
159 }
159 }
160
160
161 /*
162 * Get parents of the given rev.
163 *
164 * The specified rev must be valid and must not be nullrev. A returned
165 * parent revision may be nullrev, but is guaranteed to be in valid range.
166 */
161 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
167 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
162 int maxrev)
168 int maxrev)
163 {
169 {
164 if (rev >= self->length) {
170 if (rev >= self->length) {
165 long tmp;
171 long tmp;
166 PyObject *tuple =
172 PyObject *tuple =
167 PyList_GET_ITEM(self->added, rev - self->length);
173 PyList_GET_ITEM(self->added, rev - self->length);
168 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
174 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
169 return -1;
175 return -1;
170 }
176 }
171 ps[0] = (int)tmp;
177 ps[0] = (int)tmp;
172 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
178 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
173 return -1;
179 return -1;
174 }
180 }
175 ps[1] = (int)tmp;
181 ps[1] = (int)tmp;
176 } else {
182 } else {
177 const char *data = index_deref(self, rev);
183 const char *data = index_deref(self, rev);
178 ps[0] = getbe32(data + 24);
184 ps[0] = getbe32(data + 24);
179 ps[1] = getbe32(data + 28);
185 ps[1] = getbe32(data + 28);
180 }
186 }
181 /* If index file is corrupted, ps[] may point to invalid revisions. So
187 /* If index file is corrupted, ps[] may point to invalid revisions. So
182 * there is a risk of buffer overflow to trust them unconditionally. */
188 * there is a risk of buffer overflow to trust them unconditionally. */
183 if (ps[0] > maxrev || ps[1] > maxrev) {
189 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
184 PyErr_SetString(PyExc_ValueError, "parent out of range");
190 PyErr_SetString(PyExc_ValueError, "parent out of range");
185 return -1;
191 return -1;
186 }
192 }
187 return 0;
193 return 0;
188 }
194 }
189
195
190 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
196 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
191 {
197 {
192 uint64_t offset;
198 uint64_t offset;
193 if (rev >= self->length) {
199 if (rev >= self->length) {
194 PyObject *tuple;
200 PyObject *tuple;
195 PyObject *pylong;
201 PyObject *pylong;
196 PY_LONG_LONG tmp;
202 PY_LONG_LONG tmp;
197 tuple = PyList_GET_ITEM(self->added, rev - self->length);
203 tuple = PyList_GET_ITEM(self->added, rev - self->length);
198 pylong = PyTuple_GET_ITEM(tuple, 0);
204 pylong = PyTuple_GET_ITEM(tuple, 0);
199 tmp = PyLong_AsLongLong(pylong);
205 tmp = PyLong_AsLongLong(pylong);
200 if (tmp == -1 && PyErr_Occurred()) {
206 if (tmp == -1 && PyErr_Occurred()) {
201 return -1;
207 return -1;
202 }
208 }
203 if (tmp < 0) {
209 if (tmp < 0) {
204 PyErr_Format(PyExc_OverflowError,
210 PyErr_Format(PyExc_OverflowError,
205 "revlog entry size out of bound (%lld)",
211 "revlog entry size out of bound (%lld)",
206 (long long)tmp);
212 (long long)tmp);
207 return -1;
213 return -1;
208 }
214 }
209 offset = (uint64_t)tmp;
215 offset = (uint64_t)tmp;
210 } else {
216 } else {
211 const char *data = index_deref(self, rev);
217 const char *data = index_deref(self, rev);
212 offset = getbe32(data + 4);
218 offset = getbe32(data + 4);
213 if (rev == 0) {
219 if (rev == 0) {
214 /* mask out version number for the first entry */
220 /* mask out version number for the first entry */
215 offset &= 0xFFFF;
221 offset &= 0xFFFF;
216 } else {
222 } else {
217 uint32_t offset_high = getbe32(data);
223 uint32_t offset_high = getbe32(data);
218 offset |= ((uint64_t)offset_high) << 32;
224 offset |= ((uint64_t)offset_high) << 32;
219 }
225 }
220 }
226 }
221 return (int64_t)(offset >> 16);
227 return (int64_t)(offset >> 16);
222 }
228 }
223
229
224 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
230 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
225 {
231 {
226 if (rev >= self->length) {
232 if (rev >= self->length) {
227 PyObject *tuple;
233 PyObject *tuple;
228 PyObject *pylong;
234 PyObject *pylong;
229 long ret;
235 long ret;
230 tuple = PyList_GET_ITEM(self->added, rev - self->length);
236 tuple = PyList_GET_ITEM(self->added, rev - self->length);
231 pylong = PyTuple_GET_ITEM(tuple, 1);
237 pylong = PyTuple_GET_ITEM(tuple, 1);
232 ret = PyInt_AsLong(pylong);
238 ret = PyInt_AsLong(pylong);
233 if (ret == -1 && PyErr_Occurred()) {
239 if (ret == -1 && PyErr_Occurred()) {
234 return -1;
240 return -1;
235 }
241 }
236 if (ret < 0 || ret > (long)INT_MAX) {
242 if (ret < 0 || ret > (long)INT_MAX) {
237 PyErr_Format(PyExc_OverflowError,
243 PyErr_Format(PyExc_OverflowError,
238 "revlog entry size out of bound (%ld)",
244 "revlog entry size out of bound (%ld)",
239 ret);
245 ret);
240 return -1;
246 return -1;
241 }
247 }
242 return (int)ret;
248 return (int)ret;
243 } else {
249 } else {
244 const char *data = index_deref(self, rev);
250 const char *data = index_deref(self, rev);
245 int tmp = (int)getbe32(data + 8);
251 int tmp = (int)getbe32(data + 8);
246 if (tmp < 0) {
252 if (tmp < 0) {
247 PyErr_Format(PyExc_OverflowError,
253 PyErr_Format(PyExc_OverflowError,
248 "revlog entry size out of bound (%d)",
254 "revlog entry size out of bound (%d)",
249 tmp);
255 tmp);
250 return -1;
256 return -1;
251 }
257 }
252 return tmp;
258 return tmp;
253 }
259 }
254 }
260 }
255
261
256 /*
262 /*
257 * RevlogNG format (all in big endian, data may be inlined):
263 * RevlogNG format (all in big endian, data may be inlined):
258 * 6 bytes: offset
264 * 6 bytes: offset
259 * 2 bytes: flags
265 * 2 bytes: flags
260 * 4 bytes: compressed length
266 * 4 bytes: compressed length
261 * 4 bytes: uncompressed length
267 * 4 bytes: uncompressed length
262 * 4 bytes: base revision
268 * 4 bytes: base revision
263 * 4 bytes: link revision
269 * 4 bytes: link revision
264 * 4 bytes: parent 1 revision
270 * 4 bytes: parent 1 revision
265 * 4 bytes: parent 2 revision
271 * 4 bytes: parent 2 revision
266 * 32 bytes: nodeid (only 20 bytes used)
272 * 32 bytes: nodeid (only 20 bytes used)
267 */
273 */
268 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
274 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
269 {
275 {
270 uint64_t offset_flags;
276 uint64_t offset_flags;
271 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
277 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
272 const char *c_node_id;
278 const char *c_node_id;
273 const char *data;
279 const char *data;
274 Py_ssize_t length = index_length(self);
280 Py_ssize_t length = index_length(self);
275 PyObject *entry;
281 PyObject *entry;
276
282
277 if (pos == -1) {
283 if (pos == -1) {
278 Py_INCREF(nullentry);
284 Py_INCREF(nullentry);
279 return nullentry;
285 return nullentry;
280 }
286 }
281
287
282 if (pos < 0 || pos >= length) {
288 if (pos < 0 || pos >= length) {
283 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
289 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
284 return NULL;
290 return NULL;
285 }
291 }
286
292
287 if (pos >= self->length) {
293 if (pos >= self->length) {
288 PyObject *obj;
294 PyObject *obj;
289 obj = PyList_GET_ITEM(self->added, pos - self->length);
295 obj = PyList_GET_ITEM(self->added, pos - self->length);
290 Py_INCREF(obj);
296 Py_INCREF(obj);
291 return obj;
297 return obj;
292 }
298 }
293
299
294 if (self->cache) {
300 if (self->cache) {
295 if (self->cache[pos]) {
301 if (self->cache[pos]) {
296 Py_INCREF(self->cache[pos]);
302 Py_INCREF(self->cache[pos]);
297 return self->cache[pos];
303 return self->cache[pos];
298 }
304 }
299 } else {
305 } else {
300 self->cache = calloc(self->raw_length, sizeof(PyObject *));
306 self->cache = calloc(self->raw_length, sizeof(PyObject *));
301 if (self->cache == NULL)
307 if (self->cache == NULL)
302 return PyErr_NoMemory();
308 return PyErr_NoMemory();
303 }
309 }
304
310
305 data = index_deref(self, pos);
311 data = index_deref(self, pos);
306 if (data == NULL)
312 if (data == NULL)
307 return NULL;
313 return NULL;
308
314
309 offset_flags = getbe32(data + 4);
315 offset_flags = getbe32(data + 4);
310 if (pos == 0) /* mask out version number for the first entry */
316 if (pos == 0) /* mask out version number for the first entry */
311 offset_flags &= 0xFFFF;
317 offset_flags &= 0xFFFF;
312 else {
318 else {
313 uint32_t offset_high = getbe32(data);
319 uint32_t offset_high = getbe32(data);
314 offset_flags |= ((uint64_t)offset_high) << 32;
320 offset_flags |= ((uint64_t)offset_high) << 32;
315 }
321 }
316
322
317 comp_len = getbe32(data + 8);
323 comp_len = getbe32(data + 8);
318 uncomp_len = getbe32(data + 12);
324 uncomp_len = getbe32(data + 12);
319 base_rev = getbe32(data + 16);
325 base_rev = getbe32(data + 16);
320 link_rev = getbe32(data + 20);
326 link_rev = getbe32(data + 20);
321 parent_1 = getbe32(data + 24);
327 parent_1 = getbe32(data + 24);
322 parent_2 = getbe32(data + 28);
328 parent_2 = getbe32(data + 28);
323 c_node_id = data + 32;
329 c_node_id = data + 32;
324
330
325 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
331 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
326 base_rev, link_rev, parent_1, parent_2, c_node_id,
332 base_rev, link_rev, parent_1, parent_2, c_node_id,
327 20);
333 20);
328
334
329 if (entry) {
335 if (entry) {
330 PyObject_GC_UnTrack(entry);
336 PyObject_GC_UnTrack(entry);
331 Py_INCREF(entry);
337 Py_INCREF(entry);
332 }
338 }
333
339
334 self->cache[pos] = entry;
340 self->cache[pos] = entry;
335
341
336 return entry;
342 return entry;
337 }
343 }
338
344
339 /*
345 /*
340 * Return the 20-byte SHA of the node corresponding to the given rev.
346 * Return the 20-byte SHA of the node corresponding to the given rev.
341 */
347 */
342 static const char *index_node(indexObject *self, Py_ssize_t pos)
348 static const char *index_node(indexObject *self, Py_ssize_t pos)
343 {
349 {
344 Py_ssize_t length = index_length(self);
350 Py_ssize_t length = index_length(self);
345 const char *data;
351 const char *data;
346
352
347 if (pos == -1)
353 if (pos == -1)
348 return nullid;
354 return nullid;
349
355
350 if (pos >= length)
356 if (pos >= length)
351 return NULL;
357 return NULL;
352
358
353 if (pos >= self->length) {
359 if (pos >= self->length) {
354 PyObject *tuple, *str;
360 PyObject *tuple, *str;
355 tuple = PyList_GET_ITEM(self->added, pos - self->length);
361 tuple = PyList_GET_ITEM(self->added, pos - self->length);
356 str = PyTuple_GetItem(tuple, 7);
362 str = PyTuple_GetItem(tuple, 7);
357 return str ? PyBytes_AS_STRING(str) : NULL;
363 return str ? PyBytes_AS_STRING(str) : NULL;
358 }
364 }
359
365
360 data = index_deref(self, pos);
366 data = index_deref(self, pos);
361 return data ? data + 32 : NULL;
367 return data ? data + 32 : NULL;
362 }
368 }
363
369
364 /*
370 /*
365 * Return the 20-byte SHA of the node corresponding to the given rev. The
371 * Return the 20-byte SHA of the node corresponding to the given rev. The
366 * rev is assumed to be existing. If not, an exception is set.
372 * rev is assumed to be existing. If not, an exception is set.
367 */
373 */
368 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
374 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
369 {
375 {
370 const char *node = index_node(self, pos);
376 const char *node = index_node(self, pos);
371 if (node == NULL) {
377 if (node == NULL) {
372 PyErr_Format(PyExc_IndexError, "could not access rev %d",
378 PyErr_Format(PyExc_IndexError, "could not access rev %d",
373 (int)pos);
379 (int)pos);
374 }
380 }
375 return node;
381 return node;
376 }
382 }
377
383
378 static int nt_insert(nodetree *self, const char *node, int rev);
384 static int nt_insert(nodetree *self, const char *node, int rev);
379
385
380 static int node_check(PyObject *obj, char **node)
386 static int node_check(PyObject *obj, char **node)
381 {
387 {
382 Py_ssize_t nodelen;
388 Py_ssize_t nodelen;
383 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
389 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
384 return -1;
390 return -1;
385 if (nodelen == 20)
391 if (nodelen == 20)
386 return 0;
392 return 0;
387 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
393 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
388 return -1;
394 return -1;
389 }
395 }
390
396
391 static PyObject *index_append(indexObject *self, PyObject *obj)
397 static PyObject *index_append(indexObject *self, PyObject *obj)
392 {
398 {
393 char *node;
399 char *node;
394 Py_ssize_t len;
400 Py_ssize_t len;
395
401
396 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
402 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
397 PyErr_SetString(PyExc_TypeError, "8-tuple required");
403 PyErr_SetString(PyExc_TypeError, "8-tuple required");
398 return NULL;
404 return NULL;
399 }
405 }
400
406
401 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
407 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
402 return NULL;
408 return NULL;
403
409
404 len = index_length(self);
410 len = index_length(self);
405
411
406 if (self->added == NULL) {
412 if (self->added == NULL) {
407 self->added = PyList_New(0);
413 self->added = PyList_New(0);
408 if (self->added == NULL)
414 if (self->added == NULL)
409 return NULL;
415 return NULL;
410 }
416 }
411
417
412 if (PyList_Append(self->added, obj) == -1)
418 if (PyList_Append(self->added, obj) == -1)
413 return NULL;
419 return NULL;
414
420
415 if (self->ntinitialized)
421 if (self->ntinitialized)
416 nt_insert(&self->nt, node, (int)len);
422 nt_insert(&self->nt, node, (int)len);
417
423
418 Py_CLEAR(self->headrevs);
424 Py_CLEAR(self->headrevs);
419 Py_RETURN_NONE;
425 Py_RETURN_NONE;
420 }
426 }
421
427
422 static PyObject *index_stats(indexObject *self)
428 static PyObject *index_stats(indexObject *self)
423 {
429 {
424 PyObject *obj = PyDict_New();
430 PyObject *obj = PyDict_New();
425 PyObject *s = NULL;
431 PyObject *s = NULL;
426 PyObject *t = NULL;
432 PyObject *t = NULL;
427
433
428 if (obj == NULL)
434 if (obj == NULL)
429 return NULL;
435 return NULL;
430
436
431 #define istat(__n, __d) \
437 #define istat(__n, __d) \
432 do { \
438 do { \
433 s = PyBytes_FromString(__d); \
439 s = PyBytes_FromString(__d); \
434 t = PyInt_FromSsize_t(self->__n); \
440 t = PyInt_FromSsize_t(self->__n); \
435 if (!s || !t) \
441 if (!s || !t) \
436 goto bail; \
442 goto bail; \
437 if (PyDict_SetItem(obj, s, t) == -1) \
443 if (PyDict_SetItem(obj, s, t) == -1) \
438 goto bail; \
444 goto bail; \
439 Py_CLEAR(s); \
445 Py_CLEAR(s); \
440 Py_CLEAR(t); \
446 Py_CLEAR(t); \
441 } while (0)
447 } while (0)
442
448
443 if (self->added) {
449 if (self->added) {
444 Py_ssize_t len = PyList_GET_SIZE(self->added);
450 Py_ssize_t len = PyList_GET_SIZE(self->added);
445 s = PyBytes_FromString("index entries added");
451 s = PyBytes_FromString("index entries added");
446 t = PyInt_FromSsize_t(len);
452 t = PyInt_FromSsize_t(len);
447 if (!s || !t)
453 if (!s || !t)
448 goto bail;
454 goto bail;
449 if (PyDict_SetItem(obj, s, t) == -1)
455 if (PyDict_SetItem(obj, s, t) == -1)
450 goto bail;
456 goto bail;
451 Py_CLEAR(s);
457 Py_CLEAR(s);
452 Py_CLEAR(t);
458 Py_CLEAR(t);
453 }
459 }
454
460
455 if (self->raw_length != self->length)
461 if (self->raw_length != self->length)
456 istat(raw_length, "revs on disk");
462 istat(raw_length, "revs on disk");
457 istat(length, "revs in memory");
463 istat(length, "revs in memory");
458 istat(ntlookups, "node trie lookups");
464 istat(ntlookups, "node trie lookups");
459 istat(ntmisses, "node trie misses");
465 istat(ntmisses, "node trie misses");
460 istat(ntrev, "node trie last rev scanned");
466 istat(ntrev, "node trie last rev scanned");
461 if (self->ntinitialized) {
467 if (self->ntinitialized) {
462 istat(nt.capacity, "node trie capacity");
468 istat(nt.capacity, "node trie capacity");
463 istat(nt.depth, "node trie depth");
469 istat(nt.depth, "node trie depth");
464 istat(nt.length, "node trie count");
470 istat(nt.length, "node trie count");
465 istat(nt.splits, "node trie splits");
471 istat(nt.splits, "node trie splits");
466 }
472 }
467
473
468 #undef istat
474 #undef istat
469
475
470 return obj;
476 return obj;
471
477
472 bail:
478 bail:
473 Py_XDECREF(obj);
479 Py_XDECREF(obj);
474 Py_XDECREF(s);
480 Py_XDECREF(s);
475 Py_XDECREF(t);
481 Py_XDECREF(t);
476 return NULL;
482 return NULL;
477 }
483 }
478
484
479 /*
485 /*
480 * When we cache a list, we want to be sure the caller can't mutate
486 * When we cache a list, we want to be sure the caller can't mutate
481 * the cached copy.
487 * the cached copy.
482 */
488 */
483 static PyObject *list_copy(PyObject *list)
489 static PyObject *list_copy(PyObject *list)
484 {
490 {
485 Py_ssize_t len = PyList_GET_SIZE(list);
491 Py_ssize_t len = PyList_GET_SIZE(list);
486 PyObject *newlist = PyList_New(len);
492 PyObject *newlist = PyList_New(len);
487 Py_ssize_t i;
493 Py_ssize_t i;
488
494
489 if (newlist == NULL)
495 if (newlist == NULL)
490 return NULL;
496 return NULL;
491
497
492 for (i = 0; i < len; i++) {
498 for (i = 0; i < len; i++) {
493 PyObject *obj = PyList_GET_ITEM(list, i);
499 PyObject *obj = PyList_GET_ITEM(list, i);
494 Py_INCREF(obj);
500 Py_INCREF(obj);
495 PyList_SET_ITEM(newlist, i, obj);
501 PyList_SET_ITEM(newlist, i, obj);
496 }
502 }
497
503
498 return newlist;
504 return newlist;
499 }
505 }
500
506
501 static int check_filter(PyObject *filter, Py_ssize_t arg)
507 static int check_filter(PyObject *filter, Py_ssize_t arg)
502 {
508 {
503 if (filter) {
509 if (filter) {
504 PyObject *arglist, *result;
510 PyObject *arglist, *result;
505 int isfiltered;
511 int isfiltered;
506
512
507 arglist = Py_BuildValue("(n)", arg);
513 arglist = Py_BuildValue("(n)", arg);
508 if (!arglist) {
514 if (!arglist) {
509 return -1;
515 return -1;
510 }
516 }
511
517
512 result = PyEval_CallObject(filter, arglist);
518 result = PyEval_CallObject(filter, arglist);
513 Py_DECREF(arglist);
519 Py_DECREF(arglist);
514 if (!result) {
520 if (!result) {
515 return -1;
521 return -1;
516 }
522 }
517
523
518 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
524 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
519 * same as this function, so we can just return it directly.*/
525 * same as this function, so we can just return it directly.*/
520 isfiltered = PyObject_IsTrue(result);
526 isfiltered = PyObject_IsTrue(result);
521 Py_DECREF(result);
527 Py_DECREF(result);
522 return isfiltered;
528 return isfiltered;
523 } else {
529 } else {
524 return 0;
530 return 0;
525 }
531 }
526 }
532 }
527
533
528 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
534 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
529 Py_ssize_t marker, char *phases)
535 Py_ssize_t marker, char *phases)
530 {
536 {
531 PyObject *iter = NULL;
537 PyObject *iter = NULL;
532 PyObject *iter_item = NULL;
538 PyObject *iter_item = NULL;
533 Py_ssize_t min_idx = index_length(self) + 2;
539 Py_ssize_t min_idx = index_length(self) + 2;
534 long iter_item_long;
540 long iter_item_long;
535
541
536 if (PyList_GET_SIZE(list) != 0) {
542 if (PyList_GET_SIZE(list) != 0) {
537 iter = PyObject_GetIter(list);
543 iter = PyObject_GetIter(list);
538 if (iter == NULL)
544 if (iter == NULL)
539 return -2;
545 return -2;
540 while ((iter_item = PyIter_Next(iter))) {
546 while ((iter_item = PyIter_Next(iter))) {
541 if (!pylong_to_long(iter_item, &iter_item_long)) {
547 if (!pylong_to_long(iter_item, &iter_item_long)) {
542 Py_DECREF(iter_item);
548 Py_DECREF(iter_item);
543 return -2;
549 return -2;
544 }
550 }
545 Py_DECREF(iter_item);
551 Py_DECREF(iter_item);
546 if (iter_item_long < min_idx)
552 if (iter_item_long < min_idx)
547 min_idx = iter_item_long;
553 min_idx = iter_item_long;
548 phases[iter_item_long] = (char)marker;
554 phases[iter_item_long] = (char)marker;
549 }
555 }
550 Py_DECREF(iter);
556 Py_DECREF(iter);
551 }
557 }
552
558
553 return min_idx;
559 return min_idx;
554 }
560 }
555
561
556 static inline void set_phase_from_parents(char *phases, int parent_1,
562 static inline void set_phase_from_parents(char *phases, int parent_1,
557 int parent_2, Py_ssize_t i)
563 int parent_2, Py_ssize_t i)
558 {
564 {
559 if (parent_1 >= 0 && phases[parent_1] > phases[i])
565 if (parent_1 >= 0 && phases[parent_1] > phases[i])
560 phases[i] = phases[parent_1];
566 phases[i] = phases[parent_1];
561 if (parent_2 >= 0 && phases[parent_2] > phases[i])
567 if (parent_2 >= 0 && phases[parent_2] > phases[i])
562 phases[i] = phases[parent_2];
568 phases[i] = phases[parent_2];
563 }
569 }
564
570
565 static PyObject *reachableroots2(indexObject *self, PyObject *args)
571 static PyObject *reachableroots2(indexObject *self, PyObject *args)
566 {
572 {
567
573
568 /* Input */
574 /* Input */
569 long minroot;
575 long minroot;
570 PyObject *includepatharg = NULL;
576 PyObject *includepatharg = NULL;
571 int includepath = 0;
577 int includepath = 0;
572 /* heads and roots are lists */
578 /* heads and roots are lists */
573 PyObject *heads = NULL;
579 PyObject *heads = NULL;
574 PyObject *roots = NULL;
580 PyObject *roots = NULL;
575 PyObject *reachable = NULL;
581 PyObject *reachable = NULL;
576
582
577 PyObject *val;
583 PyObject *val;
578 Py_ssize_t len = index_length(self);
584 Py_ssize_t len = index_length(self);
579 long revnum;
585 long revnum;
580 Py_ssize_t k;
586 Py_ssize_t k;
581 Py_ssize_t i;
587 Py_ssize_t i;
582 Py_ssize_t l;
588 Py_ssize_t l;
583 int r;
589 int r;
584 int parents[2];
590 int parents[2];
585
591
586 /* Internal data structure:
592 /* Internal data structure:
587 * tovisit: array of length len+1 (all revs + nullrev), filled upto
593 * tovisit: array of length len+1 (all revs + nullrev), filled upto
588 * lentovisit
594 * lentovisit
589 *
595 *
590 * revstates: array of length len+1 (all revs + nullrev) */
596 * revstates: array of length len+1 (all revs + nullrev) */
591 int *tovisit = NULL;
597 int *tovisit = NULL;
592 long lentovisit = 0;
598 long lentovisit = 0;
593 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
599 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
594 char *revstates = NULL;
600 char *revstates = NULL;
595
601
596 /* Get arguments */
602 /* Get arguments */
597 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
603 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
598 &PyList_Type, &roots, &PyBool_Type,
604 &PyList_Type, &roots, &PyBool_Type,
599 &includepatharg))
605 &includepatharg))
600 goto bail;
606 goto bail;
601
607
602 if (includepatharg == Py_True)
608 if (includepatharg == Py_True)
603 includepath = 1;
609 includepath = 1;
604
610
605 /* Initialize return set */
611 /* Initialize return set */
606 reachable = PyList_New(0);
612 reachable = PyList_New(0);
607 if (reachable == NULL)
613 if (reachable == NULL)
608 goto bail;
614 goto bail;
609
615
610 /* Initialize internal datastructures */
616 /* Initialize internal datastructures */
611 tovisit = (int *)malloc((len + 1) * sizeof(int));
617 tovisit = (int *)malloc((len + 1) * sizeof(int));
612 if (tovisit == NULL) {
618 if (tovisit == NULL) {
613 PyErr_NoMemory();
619 PyErr_NoMemory();
614 goto bail;
620 goto bail;
615 }
621 }
616
622
617 revstates = (char *)calloc(len + 1, 1);
623 revstates = (char *)calloc(len + 1, 1);
618 if (revstates == NULL) {
624 if (revstates == NULL) {
619 PyErr_NoMemory();
625 PyErr_NoMemory();
620 goto bail;
626 goto bail;
621 }
627 }
622
628
623 l = PyList_GET_SIZE(roots);
629 l = PyList_GET_SIZE(roots);
624 for (i = 0; i < l; i++) {
630 for (i = 0; i < l; i++) {
625 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
631 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
626 if (revnum == -1 && PyErr_Occurred())
632 if (revnum == -1 && PyErr_Occurred())
627 goto bail;
633 goto bail;
628 /* If root is out of range, e.g. wdir(), it must be unreachable
634 /* If root is out of range, e.g. wdir(), it must be unreachable
629 * from heads. So we can just ignore it. */
635 * from heads. So we can just ignore it. */
630 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
636 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
631 continue;
637 continue;
632 revstates[revnum + 1] |= RS_ROOT;
638 revstates[revnum + 1] |= RS_ROOT;
633 }
639 }
634
640
635 /* Populate tovisit with all the heads */
641 /* Populate tovisit with all the heads */
636 l = PyList_GET_SIZE(heads);
642 l = PyList_GET_SIZE(heads);
637 for (i = 0; i < l; i++) {
643 for (i = 0; i < l; i++) {
638 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
644 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
639 if (revnum == -1 && PyErr_Occurred())
645 if (revnum == -1 && PyErr_Occurred())
640 goto bail;
646 goto bail;
641 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
647 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
642 PyErr_SetString(PyExc_IndexError, "head out of range");
648 PyErr_SetString(PyExc_IndexError, "head out of range");
643 goto bail;
649 goto bail;
644 }
650 }
645 if (!(revstates[revnum + 1] & RS_SEEN)) {
651 if (!(revstates[revnum + 1] & RS_SEEN)) {
646 tovisit[lentovisit++] = (int)revnum;
652 tovisit[lentovisit++] = (int)revnum;
647 revstates[revnum + 1] |= RS_SEEN;
653 revstates[revnum + 1] |= RS_SEEN;
648 }
654 }
649 }
655 }
650
656
651 /* Visit the tovisit list and find the reachable roots */
657 /* Visit the tovisit list and find the reachable roots */
652 k = 0;
658 k = 0;
653 while (k < lentovisit) {
659 while (k < lentovisit) {
654 /* Add the node to reachable if it is a root*/
660 /* Add the node to reachable if it is a root*/
655 revnum = tovisit[k++];
661 revnum = tovisit[k++];
656 if (revstates[revnum + 1] & RS_ROOT) {
662 if (revstates[revnum + 1] & RS_ROOT) {
657 revstates[revnum + 1] |= RS_REACHABLE;
663 revstates[revnum + 1] |= RS_REACHABLE;
658 val = PyInt_FromLong(revnum);
664 val = PyInt_FromLong(revnum);
659 if (val == NULL)
665 if (val == NULL)
660 goto bail;
666 goto bail;
661 r = PyList_Append(reachable, val);
667 r = PyList_Append(reachable, val);
662 Py_DECREF(val);
668 Py_DECREF(val);
663 if (r < 0)
669 if (r < 0)
664 goto bail;
670 goto bail;
665 if (includepath == 0)
671 if (includepath == 0)
666 continue;
672 continue;
667 }
673 }
668
674
669 /* Add its parents to the list of nodes to visit */
675 /* Add its parents to the list of nodes to visit */
670 if (revnum == -1)
676 if (revnum == -1)
671 continue;
677 continue;
672 r = index_get_parents(self, revnum, parents, (int)len - 1);
678 r = index_get_parents(self, revnum, parents, (int)len - 1);
673 if (r < 0)
679 if (r < 0)
674 goto bail;
680 goto bail;
675 for (i = 0; i < 2; i++) {
681 for (i = 0; i < 2; i++) {
676 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
682 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
677 parents[i] >= minroot) {
683 parents[i] >= minroot) {
678 tovisit[lentovisit++] = parents[i];
684 tovisit[lentovisit++] = parents[i];
679 revstates[parents[i] + 1] |= RS_SEEN;
685 revstates[parents[i] + 1] |= RS_SEEN;
680 }
686 }
681 }
687 }
682 }
688 }
683
689
684 /* Find all the nodes in between the roots we found and the heads
690 /* Find all the nodes in between the roots we found and the heads
685 * and add them to the reachable set */
691 * and add them to the reachable set */
686 if (includepath == 1) {
692 if (includepath == 1) {
687 long minidx = minroot;
693 long minidx = minroot;
688 if (minidx < 0)
694 if (minidx < 0)
689 minidx = 0;
695 minidx = 0;
690 for (i = minidx; i < len; i++) {
696 for (i = minidx; i < len; i++) {
691 if (!(revstates[i + 1] & RS_SEEN))
697 if (!(revstates[i + 1] & RS_SEEN))
692 continue;
698 continue;
693 r = index_get_parents(self, i, parents, (int)len - 1);
699 r = index_get_parents(self, i, parents, (int)len - 1);
694 /* Corrupted index file, error is set from
700 /* Corrupted index file, error is set from
695 * index_get_parents */
701 * index_get_parents */
696 if (r < 0)
702 if (r < 0)
697 goto bail;
703 goto bail;
698 if (((revstates[parents[0] + 1] |
704 if (((revstates[parents[0] + 1] |
699 revstates[parents[1] + 1]) &
705 revstates[parents[1] + 1]) &
700 RS_REACHABLE) &&
706 RS_REACHABLE) &&
701 !(revstates[i + 1] & RS_REACHABLE)) {
707 !(revstates[i + 1] & RS_REACHABLE)) {
702 revstates[i + 1] |= RS_REACHABLE;
708 revstates[i + 1] |= RS_REACHABLE;
703 val = PyInt_FromSsize_t(i);
709 val = PyInt_FromSsize_t(i);
704 if (val == NULL)
710 if (val == NULL)
705 goto bail;
711 goto bail;
706 r = PyList_Append(reachable, val);
712 r = PyList_Append(reachable, val);
707 Py_DECREF(val);
713 Py_DECREF(val);
708 if (r < 0)
714 if (r < 0)
709 goto bail;
715 goto bail;
710 }
716 }
711 }
717 }
712 }
718 }
713
719
714 free(revstates);
720 free(revstates);
715 free(tovisit);
721 free(tovisit);
716 return reachable;
722 return reachable;
717 bail:
723 bail:
718 Py_XDECREF(reachable);
724 Py_XDECREF(reachable);
719 free(revstates);
725 free(revstates);
720 free(tovisit);
726 free(tovisit);
721 return NULL;
727 return NULL;
722 }
728 }
723
729
724 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
730 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
725 {
731 {
726 PyObject *roots = Py_None;
732 PyObject *roots = Py_None;
727 PyObject *ret = NULL;
733 PyObject *ret = NULL;
728 PyObject *phasessize = NULL;
734 PyObject *phasessize = NULL;
729 PyObject *phaseroots = NULL;
735 PyObject *phaseroots = NULL;
730 PyObject *phaseset = NULL;
736 PyObject *phaseset = NULL;
731 PyObject *phasessetlist = NULL;
737 PyObject *phasessetlist = NULL;
732 PyObject *rev = NULL;
738 PyObject *rev = NULL;
733 Py_ssize_t len = index_length(self);
739 Py_ssize_t len = index_length(self);
734 Py_ssize_t numphase = 0;
740 Py_ssize_t numphase = 0;
735 Py_ssize_t minrevallphases = 0;
741 Py_ssize_t minrevallphases = 0;
736 Py_ssize_t minrevphase = 0;
742 Py_ssize_t minrevphase = 0;
737 Py_ssize_t i = 0;
743 Py_ssize_t i = 0;
738 char *phases = NULL;
744 char *phases = NULL;
739 long phase;
745 long phase;
740
746
741 if (!PyArg_ParseTuple(args, "O", &roots))
747 if (!PyArg_ParseTuple(args, "O", &roots))
742 goto done;
748 goto done;
743 if (roots == NULL || !PyList_Check(roots)) {
749 if (roots == NULL || !PyList_Check(roots)) {
744 PyErr_SetString(PyExc_TypeError, "roots must be a list");
750 PyErr_SetString(PyExc_TypeError, "roots must be a list");
745 goto done;
751 goto done;
746 }
752 }
747
753
748 phases = calloc(
754 phases = calloc(
749 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
755 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
750 if (phases == NULL) {
756 if (phases == NULL) {
751 PyErr_NoMemory();
757 PyErr_NoMemory();
752 goto done;
758 goto done;
753 }
759 }
754 /* Put the phase information of all the roots in phases */
760 /* Put the phase information of all the roots in phases */
755 numphase = PyList_GET_SIZE(roots) + 1;
761 numphase = PyList_GET_SIZE(roots) + 1;
756 minrevallphases = len + 1;
762 minrevallphases = len + 1;
757 phasessetlist = PyList_New(numphase);
763 phasessetlist = PyList_New(numphase);
758 if (phasessetlist == NULL)
764 if (phasessetlist == NULL)
759 goto done;
765 goto done;
760
766
761 PyList_SET_ITEM(phasessetlist, 0, Py_None);
767 PyList_SET_ITEM(phasessetlist, 0, Py_None);
762 Py_INCREF(Py_None);
768 Py_INCREF(Py_None);
763
769
764 for (i = 0; i < numphase - 1; i++) {
770 for (i = 0; i < numphase - 1; i++) {
765 phaseroots = PyList_GET_ITEM(roots, i);
771 phaseroots = PyList_GET_ITEM(roots, i);
766 phaseset = PySet_New(NULL);
772 phaseset = PySet_New(NULL);
767 if (phaseset == NULL)
773 if (phaseset == NULL)
768 goto release;
774 goto release;
769 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
775 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
770 if (!PyList_Check(phaseroots)) {
776 if (!PyList_Check(phaseroots)) {
771 PyErr_SetString(PyExc_TypeError,
777 PyErr_SetString(PyExc_TypeError,
772 "roots item must be a list");
778 "roots item must be a list");
773 goto release;
779 goto release;
774 }
780 }
775 minrevphase =
781 minrevphase =
776 add_roots_get_min(self, phaseroots, i + 1, phases);
782 add_roots_get_min(self, phaseroots, i + 1, phases);
777 if (minrevphase == -2) /* Error from add_roots_get_min */
783 if (minrevphase == -2) /* Error from add_roots_get_min */
778 goto release;
784 goto release;
779 minrevallphases = MIN(minrevallphases, minrevphase);
785 minrevallphases = MIN(minrevallphases, minrevphase);
780 }
786 }
781 /* Propagate the phase information from the roots to the revs */
787 /* Propagate the phase information from the roots to the revs */
782 if (minrevallphases != -1) {
788 if (minrevallphases != -1) {
783 int parents[2];
789 int parents[2];
784 for (i = minrevallphases; i < len; i++) {
790 for (i = minrevallphases; i < len; i++) {
785 if (index_get_parents(self, i, parents, (int)len - 1) <
791 if (index_get_parents(self, i, parents, (int)len - 1) <
786 0)
792 0)
787 goto release;
793 goto release;
788 set_phase_from_parents(phases, parents[0], parents[1],
794 set_phase_from_parents(phases, parents[0], parents[1],
789 i);
795 i);
790 }
796 }
791 }
797 }
792 /* Transform phase list to a python list */
798 /* Transform phase list to a python list */
793 phasessize = PyInt_FromSsize_t(len);
799 phasessize = PyInt_FromSsize_t(len);
794 if (phasessize == NULL)
800 if (phasessize == NULL)
795 goto release;
801 goto release;
796 for (i = 0; i < len; i++) {
802 for (i = 0; i < len; i++) {
797 phase = phases[i];
803 phase = phases[i];
798 /* We only store the sets of phase for non public phase, the
804 /* We only store the sets of phase for non public phase, the
799 * public phase is computed as a difference */
805 * public phase is computed as a difference */
800 if (phase != 0) {
806 if (phase != 0) {
801 phaseset = PyList_GET_ITEM(phasessetlist, phase);
807 phaseset = PyList_GET_ITEM(phasessetlist, phase);
802 rev = PyInt_FromSsize_t(i);
808 rev = PyInt_FromSsize_t(i);
803 if (rev == NULL)
809 if (rev == NULL)
804 goto release;
810 goto release;
805 PySet_Add(phaseset, rev);
811 PySet_Add(phaseset, rev);
806 Py_XDECREF(rev);
812 Py_XDECREF(rev);
807 }
813 }
808 }
814 }
809 ret = PyTuple_Pack(2, phasessize, phasessetlist);
815 ret = PyTuple_Pack(2, phasessize, phasessetlist);
810
816
811 release:
817 release:
812 Py_XDECREF(phasessize);
818 Py_XDECREF(phasessize);
813 Py_XDECREF(phasessetlist);
819 Py_XDECREF(phasessetlist);
814 done:
820 done:
815 free(phases);
821 free(phases);
816 return ret;
822 return ret;
817 }
823 }
818
824
819 static PyObject *index_headrevs(indexObject *self, PyObject *args)
825 static PyObject *index_headrevs(indexObject *self, PyObject *args)
820 {
826 {
821 Py_ssize_t i, j, len;
827 Py_ssize_t i, j, len;
822 char *nothead = NULL;
828 char *nothead = NULL;
823 PyObject *heads = NULL;
829 PyObject *heads = NULL;
824 PyObject *filter = NULL;
830 PyObject *filter = NULL;
825 PyObject *filteredrevs = Py_None;
831 PyObject *filteredrevs = Py_None;
826
832
827 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
833 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
828 return NULL;
834 return NULL;
829 }
835 }
830
836
831 if (self->headrevs && filteredrevs == self->filteredrevs)
837 if (self->headrevs && filteredrevs == self->filteredrevs)
832 return list_copy(self->headrevs);
838 return list_copy(self->headrevs);
833
839
834 Py_DECREF(self->filteredrevs);
840 Py_DECREF(self->filteredrevs);
835 self->filteredrevs = filteredrevs;
841 self->filteredrevs = filteredrevs;
836 Py_INCREF(filteredrevs);
842 Py_INCREF(filteredrevs);
837
843
838 if (filteredrevs != Py_None) {
844 if (filteredrevs != Py_None) {
839 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
845 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
840 if (!filter) {
846 if (!filter) {
841 PyErr_SetString(
847 PyErr_SetString(
842 PyExc_TypeError,
848 PyExc_TypeError,
843 "filteredrevs has no attribute __contains__");
849 "filteredrevs has no attribute __contains__");
844 goto bail;
850 goto bail;
845 }
851 }
846 }
852 }
847
853
848 len = index_length(self);
854 len = index_length(self);
849 heads = PyList_New(0);
855 heads = PyList_New(0);
850 if (heads == NULL)
856 if (heads == NULL)
851 goto bail;
857 goto bail;
852 if (len == 0) {
858 if (len == 0) {
853 PyObject *nullid = PyInt_FromLong(-1);
859 PyObject *nullid = PyInt_FromLong(-1);
854 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
860 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
855 Py_XDECREF(nullid);
861 Py_XDECREF(nullid);
856 goto bail;
862 goto bail;
857 }
863 }
858 goto done;
864 goto done;
859 }
865 }
860
866
861 nothead = calloc(len, 1);
867 nothead = calloc(len, 1);
862 if (nothead == NULL) {
868 if (nothead == NULL) {
863 PyErr_NoMemory();
869 PyErr_NoMemory();
864 goto bail;
870 goto bail;
865 }
871 }
866
872
867 for (i = len - 1; i >= 0; i--) {
873 for (i = len - 1; i >= 0; i--) {
868 int isfiltered;
874 int isfiltered;
869 int parents[2];
875 int parents[2];
870
876
871 /* If nothead[i] == 1, it means we've seen an unfiltered child
877 /* If nothead[i] == 1, it means we've seen an unfiltered child
872 * of this node already, and therefore this node is not
878 * of this node already, and therefore this node is not
873 * filtered. So we can skip the expensive check_filter step.
879 * filtered. So we can skip the expensive check_filter step.
874 */
880 */
875 if (nothead[i] != 1) {
881 if (nothead[i] != 1) {
876 isfiltered = check_filter(filter, i);
882 isfiltered = check_filter(filter, i);
877 if (isfiltered == -1) {
883 if (isfiltered == -1) {
878 PyErr_SetString(PyExc_TypeError,
884 PyErr_SetString(PyExc_TypeError,
879 "unable to check filter");
885 "unable to check filter");
880 goto bail;
886 goto bail;
881 }
887 }
882
888
883 if (isfiltered) {
889 if (isfiltered) {
884 nothead[i] = 1;
890 nothead[i] = 1;
885 continue;
891 continue;
886 }
892 }
887 }
893 }
888
894
889 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
895 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
890 goto bail;
896 goto bail;
891 for (j = 0; j < 2; j++) {
897 for (j = 0; j < 2; j++) {
892 if (parents[j] >= 0)
898 if (parents[j] >= 0)
893 nothead[parents[j]] = 1;
899 nothead[parents[j]] = 1;
894 }
900 }
895 }
901 }
896
902
897 for (i = 0; i < len; i++) {
903 for (i = 0; i < len; i++) {
898 PyObject *head;
904 PyObject *head;
899
905
900 if (nothead[i])
906 if (nothead[i])
901 continue;
907 continue;
902 head = PyInt_FromSsize_t(i);
908 head = PyInt_FromSsize_t(i);
903 if (head == NULL || PyList_Append(heads, head) == -1) {
909 if (head == NULL || PyList_Append(heads, head) == -1) {
904 Py_XDECREF(head);
910 Py_XDECREF(head);
905 goto bail;
911 goto bail;
906 }
912 }
907 }
913 }
908
914
909 done:
915 done:
910 self->headrevs = heads;
916 self->headrevs = heads;
911 Py_XDECREF(filter);
917 Py_XDECREF(filter);
912 free(nothead);
918 free(nothead);
913 return list_copy(self->headrevs);
919 return list_copy(self->headrevs);
914 bail:
920 bail:
915 Py_XDECREF(filter);
921 Py_XDECREF(filter);
916 Py_XDECREF(heads);
922 Py_XDECREF(heads);
917 free(nothead);
923 free(nothead);
918 return NULL;
924 return NULL;
919 }
925 }
920
926
921 /**
927 /**
922 * Obtain the base revision index entry.
928 * Obtain the base revision index entry.
923 *
929 *
924 * Callers must ensure that rev >= 0 or illegal memory access may occur.
930 * Callers must ensure that rev >= 0 or illegal memory access may occur.
925 */
931 */
926 static inline int index_baserev(indexObject *self, int rev)
932 static inline int index_baserev(indexObject *self, int rev)
927 {
933 {
928 const char *data;
934 const char *data;
929
935
930 if (rev >= self->length) {
936 if (rev >= self->length) {
931 PyObject *tuple =
937 PyObject *tuple =
932 PyList_GET_ITEM(self->added, rev - self->length);
938 PyList_GET_ITEM(self->added, rev - self->length);
933 long ret;
939 long ret;
934 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
940 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
935 return -2;
941 return -2;
936 }
942 }
937 return (int)ret;
943 return (int)ret;
938 } else {
944 } else {
939 data = index_deref(self, rev);
945 data = index_deref(self, rev);
940 if (data == NULL) {
946 if (data == NULL) {
941 return -2;
947 return -2;
942 }
948 }
943
949
944 return getbe32(data + 16);
950 return getbe32(data + 16);
945 }
951 }
946 }
952 }
947
953
948 static PyObject *index_deltachain(indexObject *self, PyObject *args)
954 static PyObject *index_deltachain(indexObject *self, PyObject *args)
949 {
955 {
950 int rev, generaldelta;
956 int rev, generaldelta;
951 PyObject *stoparg;
957 PyObject *stoparg;
952 int stoprev, iterrev, baserev = -1;
958 int stoprev, iterrev, baserev = -1;
953 int stopped;
959 int stopped;
954 PyObject *chain = NULL, *result = NULL;
960 PyObject *chain = NULL, *result = NULL;
955 const Py_ssize_t length = index_length(self);
961 const Py_ssize_t length = index_length(self);
956
962
957 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
963 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
958 return NULL;
964 return NULL;
959 }
965 }
960
966
961 if (PyInt_Check(stoparg)) {
967 if (PyInt_Check(stoparg)) {
962 stoprev = (int)PyInt_AsLong(stoparg);
968 stoprev = (int)PyInt_AsLong(stoparg);
963 if (stoprev == -1 && PyErr_Occurred()) {
969 if (stoprev == -1 && PyErr_Occurred()) {
964 return NULL;
970 return NULL;
965 }
971 }
966 } else if (stoparg == Py_None) {
972 } else if (stoparg == Py_None) {
967 stoprev = -2;
973 stoprev = -2;
968 } else {
974 } else {
969 PyErr_SetString(PyExc_ValueError,
975 PyErr_SetString(PyExc_ValueError,
970 "stoprev must be integer or None");
976 "stoprev must be integer or None");
971 return NULL;
977 return NULL;
972 }
978 }
973
979
974 if (rev < 0 || rev >= length) {
980 if (rev < 0 || rev >= length) {
975 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
981 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
976 return NULL;
982 return NULL;
977 }
983 }
978
984
979 chain = PyList_New(0);
985 chain = PyList_New(0);
980 if (chain == NULL) {
986 if (chain == NULL) {
981 return NULL;
987 return NULL;
982 }
988 }
983
989
984 baserev = index_baserev(self, rev);
990 baserev = index_baserev(self, rev);
985
991
986 /* This should never happen. */
992 /* This should never happen. */
987 if (baserev <= -2) {
993 if (baserev <= -2) {
988 /* Error should be set by index_deref() */
994 /* Error should be set by index_deref() */
989 assert(PyErr_Occurred());
995 assert(PyErr_Occurred());
990 goto bail;
996 goto bail;
991 }
997 }
992
998
993 iterrev = rev;
999 iterrev = rev;
994
1000
995 while (iterrev != baserev && iterrev != stoprev) {
1001 while (iterrev != baserev && iterrev != stoprev) {
996 PyObject *value = PyInt_FromLong(iterrev);
1002 PyObject *value = PyInt_FromLong(iterrev);
997 if (value == NULL) {
1003 if (value == NULL) {
998 goto bail;
1004 goto bail;
999 }
1005 }
1000 if (PyList_Append(chain, value)) {
1006 if (PyList_Append(chain, value)) {
1001 Py_DECREF(value);
1007 Py_DECREF(value);
1002 goto bail;
1008 goto bail;
1003 }
1009 }
1004 Py_DECREF(value);
1010 Py_DECREF(value);
1005
1011
1006 if (generaldelta) {
1012 if (generaldelta) {
1007 iterrev = baserev;
1013 iterrev = baserev;
1008 } else {
1014 } else {
1009 iterrev--;
1015 iterrev--;
1010 }
1016 }
1011
1017
1012 if (iterrev < 0) {
1018 if (iterrev < 0) {
1013 break;
1019 break;
1014 }
1020 }
1015
1021
1016 if (iterrev >= length) {
1022 if (iterrev >= length) {
1017 PyErr_SetString(PyExc_IndexError,
1023 PyErr_SetString(PyExc_IndexError,
1018 "revision outside index");
1024 "revision outside index");
1019 return NULL;
1025 return NULL;
1020 }
1026 }
1021
1027
1022 baserev = index_baserev(self, iterrev);
1028 baserev = index_baserev(self, iterrev);
1023
1029
1024 /* This should never happen. */
1030 /* This should never happen. */
1025 if (baserev <= -2) {
1031 if (baserev <= -2) {
1026 /* Error should be set by index_deref() */
1032 /* Error should be set by index_deref() */
1027 assert(PyErr_Occurred());
1033 assert(PyErr_Occurred());
1028 goto bail;
1034 goto bail;
1029 }
1035 }
1030 }
1036 }
1031
1037
1032 if (iterrev == stoprev) {
1038 if (iterrev == stoprev) {
1033 stopped = 1;
1039 stopped = 1;
1034 } else {
1040 } else {
1035 PyObject *value = PyInt_FromLong(iterrev);
1041 PyObject *value = PyInt_FromLong(iterrev);
1036 if (value == NULL) {
1042 if (value == NULL) {
1037 goto bail;
1043 goto bail;
1038 }
1044 }
1039 if (PyList_Append(chain, value)) {
1045 if (PyList_Append(chain, value)) {
1040 Py_DECREF(value);
1046 Py_DECREF(value);
1041 goto bail;
1047 goto bail;
1042 }
1048 }
1043 Py_DECREF(value);
1049 Py_DECREF(value);
1044
1050
1045 stopped = 0;
1051 stopped = 0;
1046 }
1052 }
1047
1053
1048 if (PyList_Reverse(chain)) {
1054 if (PyList_Reverse(chain)) {
1049 goto bail;
1055 goto bail;
1050 }
1056 }
1051
1057
1052 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1058 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1053 Py_DECREF(chain);
1059 Py_DECREF(chain);
1054 return result;
1060 return result;
1055
1061
1056 bail:
1062 bail:
1057 Py_DECREF(chain);
1063 Py_DECREF(chain);
1058 return NULL;
1064 return NULL;
1059 }
1065 }
1060
1066
1061 static inline int64_t
1067 static inline int64_t
1062 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1068 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1063 {
1069 {
1064 int64_t start_offset;
1070 int64_t start_offset;
1065 int64_t end_offset;
1071 int64_t end_offset;
1066 int end_size;
1072 int end_size;
1067 start_offset = index_get_start(self, start_rev);
1073 start_offset = index_get_start(self, start_rev);
1068 if (start_offset < 0) {
1074 if (start_offset < 0) {
1069 return -1;
1075 return -1;
1070 }
1076 }
1071 end_offset = index_get_start(self, end_rev);
1077 end_offset = index_get_start(self, end_rev);
1072 if (end_offset < 0) {
1078 if (end_offset < 0) {
1073 return -1;
1079 return -1;
1074 }
1080 }
1075 end_size = index_get_length(self, end_rev);
1081 end_size = index_get_length(self, end_rev);
1076 if (end_size < 0) {
1082 if (end_size < 0) {
1077 return -1;
1083 return -1;
1078 }
1084 }
1079 if (end_offset < start_offset) {
1085 if (end_offset < start_offset) {
1080 PyErr_Format(PyExc_ValueError,
1086 PyErr_Format(PyExc_ValueError,
1081 "corrupted revlog index: inconsistent offset "
1087 "corrupted revlog index: inconsistent offset "
1082 "between revisions (%zd) and (%zd)",
1088 "between revisions (%zd) and (%zd)",
1083 start_rev, end_rev);
1089 start_rev, end_rev);
1084 return -1;
1090 return -1;
1085 }
1091 }
1086 return (end_offset - start_offset) + (int64_t)end_size;
1092 return (end_offset - start_offset) + (int64_t)end_size;
1087 }
1093 }
1088
1094
1089 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1095 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1090 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1096 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1091 Py_ssize_t startidx, Py_ssize_t endidx)
1097 Py_ssize_t startidx, Py_ssize_t endidx)
1092 {
1098 {
1093 int length;
1099 int length;
1094 while (endidx > 1 && endidx > startidx) {
1100 while (endidx > 1 && endidx > startidx) {
1095 length = index_get_length(self, revs[endidx - 1]);
1101 length = index_get_length(self, revs[endidx - 1]);
1096 if (length < 0) {
1102 if (length < 0) {
1097 return -1;
1103 return -1;
1098 }
1104 }
1099 if (length != 0) {
1105 if (length != 0) {
1100 break;
1106 break;
1101 }
1107 }
1102 endidx -= 1;
1108 endidx -= 1;
1103 }
1109 }
1104 return endidx;
1110 return endidx;
1105 }
1111 }
1106
1112
1107 struct Gap {
1113 struct Gap {
1108 int64_t size;
1114 int64_t size;
1109 Py_ssize_t idx;
1115 Py_ssize_t idx;
1110 };
1116 };
1111
1117
1112 static int gap_compare(const void *left, const void *right)
1118 static int gap_compare(const void *left, const void *right)
1113 {
1119 {
1114 const struct Gap *l_left = ((const struct Gap *)left);
1120 const struct Gap *l_left = ((const struct Gap *)left);
1115 const struct Gap *l_right = ((const struct Gap *)right);
1121 const struct Gap *l_right = ((const struct Gap *)right);
1116 if (l_left->size < l_right->size) {
1122 if (l_left->size < l_right->size) {
1117 return -1;
1123 return -1;
1118 } else if (l_left->size > l_right->size) {
1124 } else if (l_left->size > l_right->size) {
1119 return 1;
1125 return 1;
1120 }
1126 }
1121 return 0;
1127 return 0;
1122 }
1128 }
1123 static int Py_ssize_t_compare(const void *left, const void *right)
1129 static int Py_ssize_t_compare(const void *left, const void *right)
1124 {
1130 {
1125 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1131 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1126 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1132 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1127 if (l_left < l_right) {
1133 if (l_left < l_right) {
1128 return -1;
1134 return -1;
1129 } else if (l_left > l_right) {
1135 } else if (l_left > l_right) {
1130 return 1;
1136 return 1;
1131 }
1137 }
1132 return 0;
1138 return 0;
1133 }
1139 }
1134
1140
1135 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1141 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1136 {
1142 {
1137 /* method arguments */
1143 /* method arguments */
1138 PyObject *list_revs = NULL; /* revisions in the chain */
1144 PyObject *list_revs = NULL; /* revisions in the chain */
1139 double targetdensity = 0; /* min density to achieve */
1145 double targetdensity = 0; /* min density to achieve */
1140 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1146 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1141
1147
1142 /* other core variables */
1148 /* other core variables */
1143 Py_ssize_t idxlen = index_length(self);
1149 Py_ssize_t idxlen = index_length(self);
1144 Py_ssize_t i; /* used for various iteration */
1150 Py_ssize_t i; /* used for various iteration */
1145 PyObject *result = NULL; /* the final return of the function */
1151 PyObject *result = NULL; /* the final return of the function */
1146
1152
1147 /* generic information about the delta chain being slice */
1153 /* generic information about the delta chain being slice */
1148 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1154 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1149 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1155 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1150 int64_t chainpayload = 0; /* sum of all delta in the chain */
1156 int64_t chainpayload = 0; /* sum of all delta in the chain */
1151 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1157 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1152
1158
1153 /* variable used for slicing the delta chain */
1159 /* variable used for slicing the delta chain */
1154 int64_t readdata = 0; /* amount of data currently planned to be read */
1160 int64_t readdata = 0; /* amount of data currently planned to be read */
1155 double density = 0; /* ration of payload data compared to read ones */
1161 double density = 0; /* ration of payload data compared to read ones */
1156 int64_t previous_end;
1162 int64_t previous_end;
1157 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1163 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1158 Py_ssize_t num_gaps =
1164 Py_ssize_t num_gaps =
1159 0; /* total number of notable gap recorded so far */
1165 0; /* total number of notable gap recorded so far */
1160 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1166 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1161 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1167 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1162 PyObject *chunk = NULL; /* individual slice */
1168 PyObject *chunk = NULL; /* individual slice */
1163 PyObject *allchunks = NULL; /* all slices */
1169 PyObject *allchunks = NULL; /* all slices */
1164 Py_ssize_t previdx;
1170 Py_ssize_t previdx;
1165
1171
1166 /* parsing argument */
1172 /* parsing argument */
1167 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1173 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1168 &targetdensity, &mingapsize)) {
1174 &targetdensity, &mingapsize)) {
1169 goto bail;
1175 goto bail;
1170 }
1176 }
1171
1177
1172 /* If the delta chain contains a single element, we do not need slicing
1178 /* If the delta chain contains a single element, we do not need slicing
1173 */
1179 */
1174 num_revs = PyList_GET_SIZE(list_revs);
1180 num_revs = PyList_GET_SIZE(list_revs);
1175 if (num_revs <= 1) {
1181 if (num_revs <= 1) {
1176 result = PyTuple_Pack(1, list_revs);
1182 result = PyTuple_Pack(1, list_revs);
1177 goto done;
1183 goto done;
1178 }
1184 }
1179
1185
1180 /* Turn the python list into a native integer array (for efficiency) */
1186 /* Turn the python list into a native integer array (for efficiency) */
1181 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1187 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1182 if (revs == NULL) {
1188 if (revs == NULL) {
1183 PyErr_NoMemory();
1189 PyErr_NoMemory();
1184 goto bail;
1190 goto bail;
1185 }
1191 }
1186 for (i = 0; i < num_revs; i++) {
1192 for (i = 0; i < num_revs; i++) {
1187 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1193 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1188 if (revnum == -1 && PyErr_Occurred()) {
1194 if (revnum == -1 && PyErr_Occurred()) {
1189 goto bail;
1195 goto bail;
1190 }
1196 }
1191 if (revnum < 0 || revnum >= idxlen) {
1197 if (revnum < 0 || revnum >= idxlen) {
1192 PyErr_Format(PyExc_IndexError,
1198 PyErr_Format(PyExc_IndexError,
1193 "index out of range: %zd", revnum);
1199 "index out of range: %zd", revnum);
1194 goto bail;
1200 goto bail;
1195 }
1201 }
1196 revs[i] = revnum;
1202 revs[i] = revnum;
1197 }
1203 }
1198
1204
1199 /* Compute and check various property of the unsliced delta chain */
1205 /* Compute and check various property of the unsliced delta chain */
1200 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1206 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1201 if (deltachainspan < 0) {
1207 if (deltachainspan < 0) {
1202 goto bail;
1208 goto bail;
1203 }
1209 }
1204
1210
1205 if (deltachainspan <= mingapsize) {
1211 if (deltachainspan <= mingapsize) {
1206 result = PyTuple_Pack(1, list_revs);
1212 result = PyTuple_Pack(1, list_revs);
1207 goto done;
1213 goto done;
1208 }
1214 }
1209 chainpayload = 0;
1215 chainpayload = 0;
1210 for (i = 0; i < num_revs; i++) {
1216 for (i = 0; i < num_revs; i++) {
1211 int tmp = index_get_length(self, revs[i]);
1217 int tmp = index_get_length(self, revs[i]);
1212 if (tmp < 0) {
1218 if (tmp < 0) {
1213 goto bail;
1219 goto bail;
1214 }
1220 }
1215 chainpayload += tmp;
1221 chainpayload += tmp;
1216 }
1222 }
1217
1223
1218 readdata = deltachainspan;
1224 readdata = deltachainspan;
1219 density = 1.0;
1225 density = 1.0;
1220
1226
1221 if (0 < deltachainspan) {
1227 if (0 < deltachainspan) {
1222 density = (double)chainpayload / (double)deltachainspan;
1228 density = (double)chainpayload / (double)deltachainspan;
1223 }
1229 }
1224
1230
1225 if (density >= targetdensity) {
1231 if (density >= targetdensity) {
1226 result = PyTuple_Pack(1, list_revs);
1232 result = PyTuple_Pack(1, list_revs);
1227 goto done;
1233 goto done;
1228 }
1234 }
1229
1235
1230 /* if chain is too sparse, look for relevant gaps */
1236 /* if chain is too sparse, look for relevant gaps */
1231 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1237 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1232 if (gaps == NULL) {
1238 if (gaps == NULL) {
1233 PyErr_NoMemory();
1239 PyErr_NoMemory();
1234 goto bail;
1240 goto bail;
1235 }
1241 }
1236
1242
1237 previous_end = -1;
1243 previous_end = -1;
1238 for (i = 0; i < num_revs; i++) {
1244 for (i = 0; i < num_revs; i++) {
1239 int64_t revstart;
1245 int64_t revstart;
1240 int revsize;
1246 int revsize;
1241 revstart = index_get_start(self, revs[i]);
1247 revstart = index_get_start(self, revs[i]);
1242 if (revstart < 0) {
1248 if (revstart < 0) {
1243 goto bail;
1249 goto bail;
1244 };
1250 };
1245 revsize = index_get_length(self, revs[i]);
1251 revsize = index_get_length(self, revs[i]);
1246 if (revsize < 0) {
1252 if (revsize < 0) {
1247 goto bail;
1253 goto bail;
1248 };
1254 };
1249 if (revsize == 0) {
1255 if (revsize == 0) {
1250 continue;
1256 continue;
1251 }
1257 }
1252 if (previous_end >= 0) {
1258 if (previous_end >= 0) {
1253 int64_t gapsize = revstart - previous_end;
1259 int64_t gapsize = revstart - previous_end;
1254 if (gapsize > mingapsize) {
1260 if (gapsize > mingapsize) {
1255 gaps[num_gaps].size = gapsize;
1261 gaps[num_gaps].size = gapsize;
1256 gaps[num_gaps].idx = i;
1262 gaps[num_gaps].idx = i;
1257 num_gaps += 1;
1263 num_gaps += 1;
1258 }
1264 }
1259 }
1265 }
1260 previous_end = revstart + revsize;
1266 previous_end = revstart + revsize;
1261 }
1267 }
1262 if (num_gaps == 0) {
1268 if (num_gaps == 0) {
1263 result = PyTuple_Pack(1, list_revs);
1269 result = PyTuple_Pack(1, list_revs);
1264 goto done;
1270 goto done;
1265 }
1271 }
1266 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1272 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1267
1273
1268 /* Slice the largest gap first, they improve the density the most */
1274 /* Slice the largest gap first, they improve the density the most */
1269 selected_indices =
1275 selected_indices =
1270 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1276 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1271 if (selected_indices == NULL) {
1277 if (selected_indices == NULL) {
1272 PyErr_NoMemory();
1278 PyErr_NoMemory();
1273 goto bail;
1279 goto bail;
1274 }
1280 }
1275
1281
1276 for (i = num_gaps - 1; i >= 0; i--) {
1282 for (i = num_gaps - 1; i >= 0; i--) {
1277 selected_indices[num_selected] = gaps[i].idx;
1283 selected_indices[num_selected] = gaps[i].idx;
1278 readdata -= gaps[i].size;
1284 readdata -= gaps[i].size;
1279 num_selected += 1;
1285 num_selected += 1;
1280 if (readdata <= 0) {
1286 if (readdata <= 0) {
1281 density = 1.0;
1287 density = 1.0;
1282 } else {
1288 } else {
1283 density = (double)chainpayload / (double)readdata;
1289 density = (double)chainpayload / (double)readdata;
1284 }
1290 }
1285 if (density >= targetdensity) {
1291 if (density >= targetdensity) {
1286 break;
1292 break;
1287 }
1293 }
1288 }
1294 }
1289 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1295 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1290 &Py_ssize_t_compare);
1296 &Py_ssize_t_compare);
1291
1297
1292 /* create the resulting slice */
1298 /* create the resulting slice */
1293 allchunks = PyList_New(0);
1299 allchunks = PyList_New(0);
1294 if (allchunks == NULL) {
1300 if (allchunks == NULL) {
1295 goto bail;
1301 goto bail;
1296 }
1302 }
1297 previdx = 0;
1303 previdx = 0;
1298 selected_indices[num_selected] = num_revs;
1304 selected_indices[num_selected] = num_revs;
1299 for (i = 0; i <= num_selected; i++) {
1305 for (i = 0; i <= num_selected; i++) {
1300 Py_ssize_t idx = selected_indices[i];
1306 Py_ssize_t idx = selected_indices[i];
1301 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1307 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1302 if (endidx < 0) {
1308 if (endidx < 0) {
1303 goto bail;
1309 goto bail;
1304 }
1310 }
1305 if (previdx < endidx) {
1311 if (previdx < endidx) {
1306 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1312 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1307 if (chunk == NULL) {
1313 if (chunk == NULL) {
1308 goto bail;
1314 goto bail;
1309 }
1315 }
1310 if (PyList_Append(allchunks, chunk) == -1) {
1316 if (PyList_Append(allchunks, chunk) == -1) {
1311 goto bail;
1317 goto bail;
1312 }
1318 }
1313 Py_DECREF(chunk);
1319 Py_DECREF(chunk);
1314 chunk = NULL;
1320 chunk = NULL;
1315 }
1321 }
1316 previdx = idx;
1322 previdx = idx;
1317 }
1323 }
1318 result = allchunks;
1324 result = allchunks;
1319 goto done;
1325 goto done;
1320
1326
1321 bail:
1327 bail:
1322 Py_XDECREF(allchunks);
1328 Py_XDECREF(allchunks);
1323 Py_XDECREF(chunk);
1329 Py_XDECREF(chunk);
1324 done:
1330 done:
1325 free(revs);
1331 free(revs);
1326 free(gaps);
1332 free(gaps);
1327 free(selected_indices);
1333 free(selected_indices);
1328 return result;
1334 return result;
1329 }
1335 }
1330
1336
1331 static inline int nt_level(const char *node, Py_ssize_t level)
1337 static inline int nt_level(const char *node, Py_ssize_t level)
1332 {
1338 {
1333 int v = node[level >> 1];
1339 int v = node[level >> 1];
1334 if (!(level & 1))
1340 if (!(level & 1))
1335 v >>= 4;
1341 v >>= 4;
1336 return v & 0xf;
1342 return v & 0xf;
1337 }
1343 }
1338
1344
1339 /*
1345 /*
1340 * Return values:
1346 * Return values:
1341 *
1347 *
1342 * -4: match is ambiguous (multiple candidates)
1348 * -4: match is ambiguous (multiple candidates)
1343 * -2: not found
1349 * -2: not found
1344 * rest: valid rev
1350 * rest: valid rev
1345 */
1351 */
1346 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1352 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1347 int hex)
1353 int hex)
1348 {
1354 {
1349 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1355 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1350 int level, maxlevel, off;
1356 int level, maxlevel, off;
1351
1357
1352 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1358 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1353 return -1;
1359 return -1;
1354
1360
1355 if (hex)
1361 if (hex)
1356 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1362 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1357 else
1363 else
1358 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1364 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1359
1365
1360 for (level = off = 0; level < maxlevel; level++) {
1366 for (level = off = 0; level < maxlevel; level++) {
1361 int k = getnybble(node, level);
1367 int k = getnybble(node, level);
1362 nodetreenode *n = &self->nodes[off];
1368 nodetreenode *n = &self->nodes[off];
1363 int v = n->children[k];
1369 int v = n->children[k];
1364
1370
1365 if (v < 0) {
1371 if (v < 0) {
1366 const char *n;
1372 const char *n;
1367 Py_ssize_t i;
1373 Py_ssize_t i;
1368
1374
1369 v = -(v + 2);
1375 v = -(v + 2);
1370 n = index_node(self->index, v);
1376 n = index_node(self->index, v);
1371 if (n == NULL)
1377 if (n == NULL)
1372 return -2;
1378 return -2;
1373 for (i = level; i < maxlevel; i++)
1379 for (i = level; i < maxlevel; i++)
1374 if (getnybble(node, i) != nt_level(n, i))
1380 if (getnybble(node, i) != nt_level(n, i))
1375 return -2;
1381 return -2;
1376 return v;
1382 return v;
1377 }
1383 }
1378 if (v == 0)
1384 if (v == 0)
1379 return -2;
1385 return -2;
1380 off = v;
1386 off = v;
1381 }
1387 }
1382 /* multiple matches against an ambiguous prefix */
1388 /* multiple matches against an ambiguous prefix */
1383 return -4;
1389 return -4;
1384 }
1390 }
1385
1391
1386 static int nt_new(nodetree *self)
1392 static int nt_new(nodetree *self)
1387 {
1393 {
1388 if (self->length == self->capacity) {
1394 if (self->length == self->capacity) {
1389 unsigned newcapacity;
1395 unsigned newcapacity;
1390 nodetreenode *newnodes;
1396 nodetreenode *newnodes;
1391 newcapacity = self->capacity * 2;
1397 newcapacity = self->capacity * 2;
1392 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1398 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1393 PyErr_SetString(PyExc_MemoryError,
1399 PyErr_SetString(PyExc_MemoryError,
1394 "overflow in nt_new");
1400 "overflow in nt_new");
1395 return -1;
1401 return -1;
1396 }
1402 }
1397 newnodes =
1403 newnodes =
1398 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1404 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1399 if (newnodes == NULL) {
1405 if (newnodes == NULL) {
1400 PyErr_SetString(PyExc_MemoryError, "out of memory");
1406 PyErr_SetString(PyExc_MemoryError, "out of memory");
1401 return -1;
1407 return -1;
1402 }
1408 }
1403 self->capacity = newcapacity;
1409 self->capacity = newcapacity;
1404 self->nodes = newnodes;
1410 self->nodes = newnodes;
1405 memset(&self->nodes[self->length], 0,
1411 memset(&self->nodes[self->length], 0,
1406 sizeof(nodetreenode) * (self->capacity - self->length));
1412 sizeof(nodetreenode) * (self->capacity - self->length));
1407 }
1413 }
1408 return self->length++;
1414 return self->length++;
1409 }
1415 }
1410
1416
1411 static int nt_insert(nodetree *self, const char *node, int rev)
1417 static int nt_insert(nodetree *self, const char *node, int rev)
1412 {
1418 {
1413 int level = 0;
1419 int level = 0;
1414 int off = 0;
1420 int off = 0;
1415
1421
1416 while (level < 40) {
1422 while (level < 40) {
1417 int k = nt_level(node, level);
1423 int k = nt_level(node, level);
1418 nodetreenode *n;
1424 nodetreenode *n;
1419 int v;
1425 int v;
1420
1426
1421 n = &self->nodes[off];
1427 n = &self->nodes[off];
1422 v = n->children[k];
1428 v = n->children[k];
1423
1429
1424 if (v == 0) {
1430 if (v == 0) {
1425 n->children[k] = -rev - 2;
1431 n->children[k] = -rev - 2;
1426 return 0;
1432 return 0;
1427 }
1433 }
1428 if (v < 0) {
1434 if (v < 0) {
1429 const char *oldnode =
1435 const char *oldnode =
1430 index_node_existing(self->index, -(v + 2));
1436 index_node_existing(self->index, -(v + 2));
1431 int noff;
1437 int noff;
1432
1438
1433 if (oldnode == NULL)
1439 if (oldnode == NULL)
1434 return -1;
1440 return -1;
1435 if (!memcmp(oldnode, node, 20)) {
1441 if (!memcmp(oldnode, node, 20)) {
1436 n->children[k] = -rev - 2;
1442 n->children[k] = -rev - 2;
1437 return 0;
1443 return 0;
1438 }
1444 }
1439 noff = nt_new(self);
1445 noff = nt_new(self);
1440 if (noff == -1)
1446 if (noff == -1)
1441 return -1;
1447 return -1;
1442 /* self->nodes may have been changed by realloc */
1448 /* self->nodes may have been changed by realloc */
1443 self->nodes[off].children[k] = noff;
1449 self->nodes[off].children[k] = noff;
1444 off = noff;
1450 off = noff;
1445 n = &self->nodes[off];
1451 n = &self->nodes[off];
1446 n->children[nt_level(oldnode, ++level)] = v;
1452 n->children[nt_level(oldnode, ++level)] = v;
1447 if (level > self->depth)
1453 if (level > self->depth)
1448 self->depth = level;
1454 self->depth = level;
1449 self->splits += 1;
1455 self->splits += 1;
1450 } else {
1456 } else {
1451 level += 1;
1457 level += 1;
1452 off = v;
1458 off = v;
1453 }
1459 }
1454 }
1460 }
1455
1461
1456 return -1;
1462 return -1;
1457 }
1463 }
1458
1464
1459 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1465 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1460 {
1466 {
1461 Py_ssize_t rev;
1467 Py_ssize_t rev;
1462 const char *node;
1468 const char *node;
1463 Py_ssize_t length;
1469 Py_ssize_t length;
1464 if (!PyArg_ParseTuple(args, "n", &rev))
1470 if (!PyArg_ParseTuple(args, "n", &rev))
1465 return NULL;
1471 return NULL;
1466 length = index_length(self->nt.index);
1472 length = index_length(self->nt.index);
1467 if (rev < 0 || rev >= length) {
1473 if (rev < 0 || rev >= length) {
1468 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1474 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1469 return NULL;
1475 return NULL;
1470 }
1476 }
1471 node = index_node_existing(self->nt.index, rev);
1477 node = index_node_existing(self->nt.index, rev);
1472 if (nt_insert(&self->nt, node, (int)rev) == -1)
1478 if (nt_insert(&self->nt, node, (int)rev) == -1)
1473 return NULL;
1479 return NULL;
1474 Py_RETURN_NONE;
1480 Py_RETURN_NONE;
1475 }
1481 }
1476
1482
1477 static int nt_delete_node(nodetree *self, const char *node)
1483 static int nt_delete_node(nodetree *self, const char *node)
1478 {
1484 {
1479 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1485 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1480 */
1486 */
1481 return nt_insert(self, node, -2);
1487 return nt_insert(self, node, -2);
1482 }
1488 }
1483
1489
1484 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1490 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1485 {
1491 {
1486 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1492 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1487 self->nodes = NULL;
1493 self->nodes = NULL;
1488
1494
1489 self->index = index;
1495 self->index = index;
1490 /* The input capacity is in terms of revisions, while the field is in
1496 /* The input capacity is in terms of revisions, while the field is in
1491 * terms of nodetree nodes. */
1497 * terms of nodetree nodes. */
1492 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1498 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1493 self->depth = 0;
1499 self->depth = 0;
1494 self->splits = 0;
1500 self->splits = 0;
1495 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1501 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1496 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1502 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1497 return -1;
1503 return -1;
1498 }
1504 }
1499 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1505 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1500 if (self->nodes == NULL) {
1506 if (self->nodes == NULL) {
1501 PyErr_NoMemory();
1507 PyErr_NoMemory();
1502 return -1;
1508 return -1;
1503 }
1509 }
1504 self->length = 1;
1510 self->length = 1;
1505 return 0;
1511 return 0;
1506 }
1512 }
1507
1513
1508 static PyTypeObject indexType;
1514 static PyTypeObject indexType;
1509
1515
1510 static int ntobj_init(nodetreeObject *self, PyObject *args)
1516 static int ntobj_init(nodetreeObject *self, PyObject *args)
1511 {
1517 {
1512 PyObject *index;
1518 PyObject *index;
1513 unsigned capacity;
1519 unsigned capacity;
1514 if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
1520 if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
1515 return -1;
1521 return -1;
1516 Py_INCREF(index);
1522 Py_INCREF(index);
1517 return nt_init(&self->nt, (indexObject *)index, capacity);
1523 return nt_init(&self->nt, (indexObject *)index, capacity);
1518 }
1524 }
1519
1525
1520 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1526 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1521 {
1527 {
1522 return nt_find(self, node, nodelen, 1);
1528 return nt_find(self, node, nodelen, 1);
1523 }
1529 }
1524
1530
1525 /*
1531 /*
1526 * Find the length of the shortest unique prefix of node.
1532 * Find the length of the shortest unique prefix of node.
1527 *
1533 *
1528 * Return values:
1534 * Return values:
1529 *
1535 *
1530 * -3: error (exception set)
1536 * -3: error (exception set)
1531 * -2: not found (no exception set)
1537 * -2: not found (no exception set)
1532 * rest: length of shortest prefix
1538 * rest: length of shortest prefix
1533 */
1539 */
1534 static int nt_shortest(nodetree *self, const char *node)
1540 static int nt_shortest(nodetree *self, const char *node)
1535 {
1541 {
1536 int level, off;
1542 int level, off;
1537
1543
1538 for (level = off = 0; level < 40; level++) {
1544 for (level = off = 0; level < 40; level++) {
1539 int k, v;
1545 int k, v;
1540 nodetreenode *n = &self->nodes[off];
1546 nodetreenode *n = &self->nodes[off];
1541 k = nt_level(node, level);
1547 k = nt_level(node, level);
1542 v = n->children[k];
1548 v = n->children[k];
1543 if (v < 0) {
1549 if (v < 0) {
1544 const char *n;
1550 const char *n;
1545 v = -(v + 2);
1551 v = -(v + 2);
1546 n = index_node_existing(self->index, v);
1552 n = index_node_existing(self->index, v);
1547 if (n == NULL)
1553 if (n == NULL)
1548 return -3;
1554 return -3;
1549 if (memcmp(node, n, 20) != 0)
1555 if (memcmp(node, n, 20) != 0)
1550 /*
1556 /*
1551 * Found a unique prefix, but it wasn't for the
1557 * Found a unique prefix, but it wasn't for the
1552 * requested node (i.e the requested node does
1558 * requested node (i.e the requested node does
1553 * not exist).
1559 * not exist).
1554 */
1560 */
1555 return -2;
1561 return -2;
1556 return level + 1;
1562 return level + 1;
1557 }
1563 }
1558 if (v == 0)
1564 if (v == 0)
1559 return -2;
1565 return -2;
1560 off = v;
1566 off = v;
1561 }
1567 }
1562 /*
1568 /*
1563 * The node was still not unique after 40 hex digits, so this won't
1569 * The node was still not unique after 40 hex digits, so this won't
1564 * happen. Also, if we get here, then there's a programming error in
1570 * happen. Also, if we get here, then there's a programming error in
1565 * this file that made us insert a node longer than 40 hex digits.
1571 * this file that made us insert a node longer than 40 hex digits.
1566 */
1572 */
1567 PyErr_SetString(PyExc_Exception, "broken node tree");
1573 PyErr_SetString(PyExc_Exception, "broken node tree");
1568 return -3;
1574 return -3;
1569 }
1575 }
1570
1576
1571 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1577 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1572 {
1578 {
1573 PyObject *val;
1579 PyObject *val;
1574 char *node;
1580 char *node;
1575 int length;
1581 int length;
1576
1582
1577 if (!PyArg_ParseTuple(args, "O", &val))
1583 if (!PyArg_ParseTuple(args, "O", &val))
1578 return NULL;
1584 return NULL;
1579 if (node_check(val, &node) == -1)
1585 if (node_check(val, &node) == -1)
1580 return NULL;
1586 return NULL;
1581
1587
1582 length = nt_shortest(&self->nt, node);
1588 length = nt_shortest(&self->nt, node);
1583 if (length == -3)
1589 if (length == -3)
1584 return NULL;
1590 return NULL;
1585 if (length == -2) {
1591 if (length == -2) {
1586 raise_revlog_error();
1592 raise_revlog_error();
1587 return NULL;
1593 return NULL;
1588 }
1594 }
1589 return PyInt_FromLong(length);
1595 return PyInt_FromLong(length);
1590 }
1596 }
1591
1597
1592 static void nt_dealloc(nodetree *self)
1598 static void nt_dealloc(nodetree *self)
1593 {
1599 {
1594 free(self->nodes);
1600 free(self->nodes);
1595 self->nodes = NULL;
1601 self->nodes = NULL;
1596 }
1602 }
1597
1603
1598 static void ntobj_dealloc(nodetreeObject *self)
1604 static void ntobj_dealloc(nodetreeObject *self)
1599 {
1605 {
1600 Py_XDECREF(self->nt.index);
1606 Py_XDECREF(self->nt.index);
1601 nt_dealloc(&self->nt);
1607 nt_dealloc(&self->nt);
1602 PyObject_Del(self);
1608 PyObject_Del(self);
1603 }
1609 }
1604
1610
1605 static PyMethodDef ntobj_methods[] = {
1611 static PyMethodDef ntobj_methods[] = {
1606 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1612 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1607 "insert an index entry"},
1613 "insert an index entry"},
1608 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1614 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1609 "find length of shortest hex nodeid of a binary ID"},
1615 "find length of shortest hex nodeid of a binary ID"},
1610 {NULL} /* Sentinel */
1616 {NULL} /* Sentinel */
1611 };
1617 };
1612
1618
1613 static PyTypeObject nodetreeType = {
1619 static PyTypeObject nodetreeType = {
1614 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1620 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1615 "parsers.nodetree", /* tp_name */
1621 "parsers.nodetree", /* tp_name */
1616 sizeof(nodetreeObject), /* tp_basicsize */
1622 sizeof(nodetreeObject), /* tp_basicsize */
1617 0, /* tp_itemsize */
1623 0, /* tp_itemsize */
1618 (destructor)ntobj_dealloc, /* tp_dealloc */
1624 (destructor)ntobj_dealloc, /* tp_dealloc */
1619 0, /* tp_print */
1625 0, /* tp_print */
1620 0, /* tp_getattr */
1626 0, /* tp_getattr */
1621 0, /* tp_setattr */
1627 0, /* tp_setattr */
1622 0, /* tp_compare */
1628 0, /* tp_compare */
1623 0, /* tp_repr */
1629 0, /* tp_repr */
1624 0, /* tp_as_number */
1630 0, /* tp_as_number */
1625 0, /* tp_as_sequence */
1631 0, /* tp_as_sequence */
1626 0, /* tp_as_mapping */
1632 0, /* tp_as_mapping */
1627 0, /* tp_hash */
1633 0, /* tp_hash */
1628 0, /* tp_call */
1634 0, /* tp_call */
1629 0, /* tp_str */
1635 0, /* tp_str */
1630 0, /* tp_getattro */
1636 0, /* tp_getattro */
1631 0, /* tp_setattro */
1637 0, /* tp_setattro */
1632 0, /* tp_as_buffer */
1638 0, /* tp_as_buffer */
1633 Py_TPFLAGS_DEFAULT, /* tp_flags */
1639 Py_TPFLAGS_DEFAULT, /* tp_flags */
1634 "nodetree", /* tp_doc */
1640 "nodetree", /* tp_doc */
1635 0, /* tp_traverse */
1641 0, /* tp_traverse */
1636 0, /* tp_clear */
1642 0, /* tp_clear */
1637 0, /* tp_richcompare */
1643 0, /* tp_richcompare */
1638 0, /* tp_weaklistoffset */
1644 0, /* tp_weaklistoffset */
1639 0, /* tp_iter */
1645 0, /* tp_iter */
1640 0, /* tp_iternext */
1646 0, /* tp_iternext */
1641 ntobj_methods, /* tp_methods */
1647 ntobj_methods, /* tp_methods */
1642 0, /* tp_members */
1648 0, /* tp_members */
1643 0, /* tp_getset */
1649 0, /* tp_getset */
1644 0, /* tp_base */
1650 0, /* tp_base */
1645 0, /* tp_dict */
1651 0, /* tp_dict */
1646 0, /* tp_descr_get */
1652 0, /* tp_descr_get */
1647 0, /* tp_descr_set */
1653 0, /* tp_descr_set */
1648 0, /* tp_dictoffset */
1654 0, /* tp_dictoffset */
1649 (initproc)ntobj_init, /* tp_init */
1655 (initproc)ntobj_init, /* tp_init */
1650 0, /* tp_alloc */
1656 0, /* tp_alloc */
1651 };
1657 };
1652
1658
1653 static int index_init_nt(indexObject *self)
1659 static int index_init_nt(indexObject *self)
1654 {
1660 {
1655 if (!self->ntinitialized) {
1661 if (!self->ntinitialized) {
1656 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1662 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1657 nt_dealloc(&self->nt);
1663 nt_dealloc(&self->nt);
1658 return -1;
1664 return -1;
1659 }
1665 }
1660 if (nt_insert(&self->nt, nullid, -1) == -1) {
1666 if (nt_insert(&self->nt, nullid, -1) == -1) {
1661 nt_dealloc(&self->nt);
1667 nt_dealloc(&self->nt);
1662 return -1;
1668 return -1;
1663 }
1669 }
1664 self->ntinitialized = 1;
1670 self->ntinitialized = 1;
1665 self->ntrev = (int)index_length(self);
1671 self->ntrev = (int)index_length(self);
1666 self->ntlookups = 1;
1672 self->ntlookups = 1;
1667 self->ntmisses = 0;
1673 self->ntmisses = 0;
1668 }
1674 }
1669 return 0;
1675 return 0;
1670 }
1676 }
1671
1677
1672 /*
1678 /*
1673 * Return values:
1679 * Return values:
1674 *
1680 *
1675 * -3: error (exception set)
1681 * -3: error (exception set)
1676 * -2: not found (no exception set)
1682 * -2: not found (no exception set)
1677 * rest: valid rev
1683 * rest: valid rev
1678 */
1684 */
1679 static int index_find_node(indexObject *self, const char *node,
1685 static int index_find_node(indexObject *self, const char *node,
1680 Py_ssize_t nodelen)
1686 Py_ssize_t nodelen)
1681 {
1687 {
1682 int rev;
1688 int rev;
1683
1689
1684 if (index_init_nt(self) == -1)
1690 if (index_init_nt(self) == -1)
1685 return -3;
1691 return -3;
1686
1692
1687 self->ntlookups++;
1693 self->ntlookups++;
1688 rev = nt_find(&self->nt, node, nodelen, 0);
1694 rev = nt_find(&self->nt, node, nodelen, 0);
1689 if (rev >= -1)
1695 if (rev >= -1)
1690 return rev;
1696 return rev;
1691
1697
1692 /*
1698 /*
1693 * For the first handful of lookups, we scan the entire index,
1699 * For the first handful of lookups, we scan the entire index,
1694 * and cache only the matching nodes. This optimizes for cases
1700 * and cache only the matching nodes. This optimizes for cases
1695 * like "hg tip", where only a few nodes are accessed.
1701 * like "hg tip", where only a few nodes are accessed.
1696 *
1702 *
1697 * After that, we cache every node we visit, using a single
1703 * After that, we cache every node we visit, using a single
1698 * scan amortized over multiple lookups. This gives the best
1704 * scan amortized over multiple lookups. This gives the best
1699 * bulk performance, e.g. for "hg log".
1705 * bulk performance, e.g. for "hg log".
1700 */
1706 */
1701 if (self->ntmisses++ < 4) {
1707 if (self->ntmisses++ < 4) {
1702 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1708 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1703 const char *n = index_node_existing(self, rev);
1709 const char *n = index_node_existing(self, rev);
1704 if (n == NULL)
1710 if (n == NULL)
1705 return -3;
1711 return -3;
1706 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1712 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1707 if (nt_insert(&self->nt, n, rev) == -1)
1713 if (nt_insert(&self->nt, n, rev) == -1)
1708 return -3;
1714 return -3;
1709 break;
1715 break;
1710 }
1716 }
1711 }
1717 }
1712 } else {
1718 } else {
1713 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1719 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1714 const char *n = index_node_existing(self, rev);
1720 const char *n = index_node_existing(self, rev);
1715 if (n == NULL)
1721 if (n == NULL)
1716 return -3;
1722 return -3;
1717 if (nt_insert(&self->nt, n, rev) == -1) {
1723 if (nt_insert(&self->nt, n, rev) == -1) {
1718 self->ntrev = rev + 1;
1724 self->ntrev = rev + 1;
1719 return -3;
1725 return -3;
1720 }
1726 }
1721 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1727 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1722 break;
1728 break;
1723 }
1729 }
1724 }
1730 }
1725 self->ntrev = rev;
1731 self->ntrev = rev;
1726 }
1732 }
1727
1733
1728 if (rev >= 0)
1734 if (rev >= 0)
1729 return rev;
1735 return rev;
1730 return -2;
1736 return -2;
1731 }
1737 }
1732
1738
1733 static PyObject *index_getitem(indexObject *self, PyObject *value)
1739 static PyObject *index_getitem(indexObject *self, PyObject *value)
1734 {
1740 {
1735 char *node;
1741 char *node;
1736 int rev;
1742 int rev;
1737
1743
1738 if (PyInt_Check(value)) {
1744 if (PyInt_Check(value)) {
1739 long idx;
1745 long idx;
1740 if (!pylong_to_long(value, &idx)) {
1746 if (!pylong_to_long(value, &idx)) {
1741 return NULL;
1747 return NULL;
1742 }
1748 }
1743 return index_get(self, idx);
1749 return index_get(self, idx);
1744 }
1750 }
1745
1751
1746 if (node_check(value, &node) == -1)
1752 if (node_check(value, &node) == -1)
1747 return NULL;
1753 return NULL;
1748 rev = index_find_node(self, node, 20);
1754 rev = index_find_node(self, node, 20);
1749 if (rev >= -1)
1755 if (rev >= -1)
1750 return PyInt_FromLong(rev);
1756 return PyInt_FromLong(rev);
1751 if (rev == -2)
1757 if (rev == -2)
1752 raise_revlog_error();
1758 raise_revlog_error();
1753 return NULL;
1759 return NULL;
1754 }
1760 }
1755
1761
1756 /*
1762 /*
1757 * Fully populate the radix tree.
1763 * Fully populate the radix tree.
1758 */
1764 */
1759 static int index_populate_nt(indexObject *self)
1765 static int index_populate_nt(indexObject *self)
1760 {
1766 {
1761 int rev;
1767 int rev;
1762 if (self->ntrev > 0) {
1768 if (self->ntrev > 0) {
1763 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1769 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1764 const char *n = index_node_existing(self, rev);
1770 const char *n = index_node_existing(self, rev);
1765 if (n == NULL)
1771 if (n == NULL)
1766 return -1;
1772 return -1;
1767 if (nt_insert(&self->nt, n, rev) == -1)
1773 if (nt_insert(&self->nt, n, rev) == -1)
1768 return -1;
1774 return -1;
1769 }
1775 }
1770 self->ntrev = -1;
1776 self->ntrev = -1;
1771 }
1777 }
1772 return 0;
1778 return 0;
1773 }
1779 }
1774
1780
1775 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1781 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1776 {
1782 {
1777 const char *fullnode;
1783 const char *fullnode;
1778 int nodelen;
1784 int nodelen;
1779 char *node;
1785 char *node;
1780 int rev, i;
1786 int rev, i;
1781
1787
1782 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1788 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1783 return NULL;
1789 return NULL;
1784
1790
1785 if (nodelen < 1) {
1791 if (nodelen < 1) {
1786 PyErr_SetString(PyExc_ValueError, "key too short");
1792 PyErr_SetString(PyExc_ValueError, "key too short");
1787 return NULL;
1793 return NULL;
1788 }
1794 }
1789
1795
1790 if (nodelen > 40) {
1796 if (nodelen > 40) {
1791 PyErr_SetString(PyExc_ValueError, "key too long");
1797 PyErr_SetString(PyExc_ValueError, "key too long");
1792 return NULL;
1798 return NULL;
1793 }
1799 }
1794
1800
1795 for (i = 0; i < nodelen; i++)
1801 for (i = 0; i < nodelen; i++)
1796 hexdigit(node, i);
1802 hexdigit(node, i);
1797 if (PyErr_Occurred()) {
1803 if (PyErr_Occurred()) {
1798 /* input contains non-hex characters */
1804 /* input contains non-hex characters */
1799 PyErr_Clear();
1805 PyErr_Clear();
1800 Py_RETURN_NONE;
1806 Py_RETURN_NONE;
1801 }
1807 }
1802
1808
1803 if (index_init_nt(self) == -1)
1809 if (index_init_nt(self) == -1)
1804 return NULL;
1810 return NULL;
1805 if (index_populate_nt(self) == -1)
1811 if (index_populate_nt(self) == -1)
1806 return NULL;
1812 return NULL;
1807 rev = nt_partialmatch(&self->nt, node, nodelen);
1813 rev = nt_partialmatch(&self->nt, node, nodelen);
1808
1814
1809 switch (rev) {
1815 switch (rev) {
1810 case -4:
1816 case -4:
1811 raise_revlog_error();
1817 raise_revlog_error();
1812 return NULL;
1818 return NULL;
1813 case -2:
1819 case -2:
1814 Py_RETURN_NONE;
1820 Py_RETURN_NONE;
1815 case -1:
1821 case -1:
1816 return PyBytes_FromStringAndSize(nullid, 20);
1822 return PyBytes_FromStringAndSize(nullid, 20);
1817 }
1823 }
1818
1824
1819 fullnode = index_node_existing(self, rev);
1825 fullnode = index_node_existing(self, rev);
1820 if (fullnode == NULL) {
1826 if (fullnode == NULL) {
1821 return NULL;
1827 return NULL;
1822 }
1828 }
1823 return PyBytes_FromStringAndSize(fullnode, 20);
1829 return PyBytes_FromStringAndSize(fullnode, 20);
1824 }
1830 }
1825
1831
1826 static PyObject *index_shortest(indexObject *self, PyObject *args)
1832 static PyObject *index_shortest(indexObject *self, PyObject *args)
1827 {
1833 {
1828 PyObject *val;
1834 PyObject *val;
1829 char *node;
1835 char *node;
1830 int length;
1836 int length;
1831
1837
1832 if (!PyArg_ParseTuple(args, "O", &val))
1838 if (!PyArg_ParseTuple(args, "O", &val))
1833 return NULL;
1839 return NULL;
1834 if (node_check(val, &node) == -1)
1840 if (node_check(val, &node) == -1)
1835 return NULL;
1841 return NULL;
1836
1842
1837 self->ntlookups++;
1843 self->ntlookups++;
1838 if (index_init_nt(self) == -1)
1844 if (index_init_nt(self) == -1)
1839 return NULL;
1845 return NULL;
1840 if (index_populate_nt(self) == -1)
1846 if (index_populate_nt(self) == -1)
1841 return NULL;
1847 return NULL;
1842 length = nt_shortest(&self->nt, node);
1848 length = nt_shortest(&self->nt, node);
1843 if (length == -3)
1849 if (length == -3)
1844 return NULL;
1850 return NULL;
1845 if (length == -2) {
1851 if (length == -2) {
1846 raise_revlog_error();
1852 raise_revlog_error();
1847 return NULL;
1853 return NULL;
1848 }
1854 }
1849 return PyInt_FromLong(length);
1855 return PyInt_FromLong(length);
1850 }
1856 }
1851
1857
1852 static PyObject *index_m_get(indexObject *self, PyObject *args)
1858 static PyObject *index_m_get(indexObject *self, PyObject *args)
1853 {
1859 {
1854 PyObject *val;
1860 PyObject *val;
1855 char *node;
1861 char *node;
1856 int rev;
1862 int rev;
1857
1863
1858 if (!PyArg_ParseTuple(args, "O", &val))
1864 if (!PyArg_ParseTuple(args, "O", &val))
1859 return NULL;
1865 return NULL;
1860 if (node_check(val, &node) == -1)
1866 if (node_check(val, &node) == -1)
1861 return NULL;
1867 return NULL;
1862 rev = index_find_node(self, node, 20);
1868 rev = index_find_node(self, node, 20);
1863 if (rev == -3)
1869 if (rev == -3)
1864 return NULL;
1870 return NULL;
1865 if (rev == -2)
1871 if (rev == -2)
1866 Py_RETURN_NONE;
1872 Py_RETURN_NONE;
1867 return PyInt_FromLong(rev);
1873 return PyInt_FromLong(rev);
1868 }
1874 }
1869
1875
1870 static int index_contains(indexObject *self, PyObject *value)
1876 static int index_contains(indexObject *self, PyObject *value)
1871 {
1877 {
1872 char *node;
1878 char *node;
1873
1879
1874 if (PyInt_Check(value)) {
1880 if (PyInt_Check(value)) {
1875 long rev;
1881 long rev;
1876 if (!pylong_to_long(value, &rev)) {
1882 if (!pylong_to_long(value, &rev)) {
1877 return -1;
1883 return -1;
1878 }
1884 }
1879 return rev >= -1 && rev < index_length(self);
1885 return rev >= -1 && rev < index_length(self);
1880 }
1886 }
1881
1887
1882 if (node_check(value, &node) == -1)
1888 if (node_check(value, &node) == -1)
1883 return -1;
1889 return -1;
1884
1890
1885 switch (index_find_node(self, node, 20)) {
1891 switch (index_find_node(self, node, 20)) {
1886 case -3:
1892 case -3:
1887 return -1;
1893 return -1;
1888 case -2:
1894 case -2:
1889 return 0;
1895 return 0;
1890 default:
1896 default:
1891 return 1;
1897 return 1;
1892 }
1898 }
1893 }
1899 }
1894
1900
1895 typedef uint64_t bitmask;
1901 typedef uint64_t bitmask;
1896
1902
1897 /*
1903 /*
1898 * Given a disjoint set of revs, return all candidates for the
1904 * Given a disjoint set of revs, return all candidates for the
1899 * greatest common ancestor. In revset notation, this is the set
1905 * greatest common ancestor. In revset notation, this is the set
1900 * "heads(::a and ::b and ...)"
1906 * "heads(::a and ::b and ...)"
1901 */
1907 */
1902 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1908 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1903 int revcount)
1909 int revcount)
1904 {
1910 {
1905 const bitmask allseen = (1ull << revcount) - 1;
1911 const bitmask allseen = (1ull << revcount) - 1;
1906 const bitmask poison = 1ull << revcount;
1912 const bitmask poison = 1ull << revcount;
1907 PyObject *gca = PyList_New(0);
1913 PyObject *gca = PyList_New(0);
1908 int i, v, interesting;
1914 int i, v, interesting;
1909 int maxrev = -1;
1915 int maxrev = -1;
1910 bitmask sp;
1916 bitmask sp;
1911 bitmask *seen;
1917 bitmask *seen;
1912
1918
1913 if (gca == NULL)
1919 if (gca == NULL)
1914 return PyErr_NoMemory();
1920 return PyErr_NoMemory();
1915
1921
1916 for (i = 0; i < revcount; i++) {
1922 for (i = 0; i < revcount; i++) {
1917 if (revs[i] > maxrev)
1923 if (revs[i] > maxrev)
1918 maxrev = revs[i];
1924 maxrev = revs[i];
1919 }
1925 }
1920
1926
1921 seen = calloc(sizeof(*seen), maxrev + 1);
1927 seen = calloc(sizeof(*seen), maxrev + 1);
1922 if (seen == NULL) {
1928 if (seen == NULL) {
1923 Py_DECREF(gca);
1929 Py_DECREF(gca);
1924 return PyErr_NoMemory();
1930 return PyErr_NoMemory();
1925 }
1931 }
1926
1932
1927 for (i = 0; i < revcount; i++)
1933 for (i = 0; i < revcount; i++)
1928 seen[revs[i]] = 1ull << i;
1934 seen[revs[i]] = 1ull << i;
1929
1935
1930 interesting = revcount;
1936 interesting = revcount;
1931
1937
1932 for (v = maxrev; v >= 0 && interesting; v--) {
1938 for (v = maxrev; v >= 0 && interesting; v--) {
1933 bitmask sv = seen[v];
1939 bitmask sv = seen[v];
1934 int parents[2];
1940 int parents[2];
1935
1941
1936 if (!sv)
1942 if (!sv)
1937 continue;
1943 continue;
1938
1944
1939 if (sv < poison) {
1945 if (sv < poison) {
1940 interesting -= 1;
1946 interesting -= 1;
1941 if (sv == allseen) {
1947 if (sv == allseen) {
1942 PyObject *obj = PyInt_FromLong(v);
1948 PyObject *obj = PyInt_FromLong(v);
1943 if (obj == NULL)
1949 if (obj == NULL)
1944 goto bail;
1950 goto bail;
1945 if (PyList_Append(gca, obj) == -1) {
1951 if (PyList_Append(gca, obj) == -1) {
1946 Py_DECREF(obj);
1952 Py_DECREF(obj);
1947 goto bail;
1953 goto bail;
1948 }
1954 }
1949 sv |= poison;
1955 sv |= poison;
1950 for (i = 0; i < revcount; i++) {
1956 for (i = 0; i < revcount; i++) {
1951 if (revs[i] == v)
1957 if (revs[i] == v)
1952 goto done;
1958 goto done;
1953 }
1959 }
1954 }
1960 }
1955 }
1961 }
1956 if (index_get_parents(self, v, parents, maxrev) < 0)
1962 if (index_get_parents(self, v, parents, maxrev) < 0)
1957 goto bail;
1963 goto bail;
1958
1964
1959 for (i = 0; i < 2; i++) {
1965 for (i = 0; i < 2; i++) {
1960 int p = parents[i];
1966 int p = parents[i];
1961 if (p == -1)
1967 if (p == -1)
1962 continue;
1968 continue;
1963 sp = seen[p];
1969 sp = seen[p];
1964 if (sv < poison) {
1970 if (sv < poison) {
1965 if (sp == 0) {
1971 if (sp == 0) {
1966 seen[p] = sv;
1972 seen[p] = sv;
1967 interesting++;
1973 interesting++;
1968 } else if (sp != sv)
1974 } else if (sp != sv)
1969 seen[p] |= sv;
1975 seen[p] |= sv;
1970 } else {
1976 } else {
1971 if (sp && sp < poison)
1977 if (sp && sp < poison)
1972 interesting--;
1978 interesting--;
1973 seen[p] = sv;
1979 seen[p] = sv;
1974 }
1980 }
1975 }
1981 }
1976 }
1982 }
1977
1983
1978 done:
1984 done:
1979 free(seen);
1985 free(seen);
1980 return gca;
1986 return gca;
1981 bail:
1987 bail:
1982 free(seen);
1988 free(seen);
1983 Py_XDECREF(gca);
1989 Py_XDECREF(gca);
1984 return NULL;
1990 return NULL;
1985 }
1991 }
1986
1992
1987 /*
1993 /*
1988 * Given a disjoint set of revs, return the subset with the longest
1994 * Given a disjoint set of revs, return the subset with the longest
1989 * path to the root.
1995 * path to the root.
1990 */
1996 */
1991 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1997 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1992 {
1998 {
1993 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1999 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1994 static const Py_ssize_t capacity = 24;
2000 static const Py_ssize_t capacity = 24;
1995 int *depth, *interesting = NULL;
2001 int *depth, *interesting = NULL;
1996 int i, j, v, ninteresting;
2002 int i, j, v, ninteresting;
1997 PyObject *dict = NULL, *keys = NULL;
2003 PyObject *dict = NULL, *keys = NULL;
1998 long *seen = NULL;
2004 long *seen = NULL;
1999 int maxrev = -1;
2005 int maxrev = -1;
2000 long final;
2006 long final;
2001
2007
2002 if (revcount > capacity) {
2008 if (revcount > capacity) {
2003 PyErr_Format(PyExc_OverflowError,
2009 PyErr_Format(PyExc_OverflowError,
2004 "bitset size (%ld) > capacity (%ld)",
2010 "bitset size (%ld) > capacity (%ld)",
2005 (long)revcount, (long)capacity);
2011 (long)revcount, (long)capacity);
2006 return NULL;
2012 return NULL;
2007 }
2013 }
2008
2014
2009 for (i = 0; i < revcount; i++) {
2015 for (i = 0; i < revcount; i++) {
2010 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2016 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2011 if (n > maxrev)
2017 if (n > maxrev)
2012 maxrev = n;
2018 maxrev = n;
2013 }
2019 }
2014
2020
2015 depth = calloc(sizeof(*depth), maxrev + 1);
2021 depth = calloc(sizeof(*depth), maxrev + 1);
2016 if (depth == NULL)
2022 if (depth == NULL)
2017 return PyErr_NoMemory();
2023 return PyErr_NoMemory();
2018
2024
2019 seen = calloc(sizeof(*seen), maxrev + 1);
2025 seen = calloc(sizeof(*seen), maxrev + 1);
2020 if (seen == NULL) {
2026 if (seen == NULL) {
2021 PyErr_NoMemory();
2027 PyErr_NoMemory();
2022 goto bail;
2028 goto bail;
2023 }
2029 }
2024
2030
2025 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2031 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2026 if (interesting == NULL) {
2032 if (interesting == NULL) {
2027 PyErr_NoMemory();
2033 PyErr_NoMemory();
2028 goto bail;
2034 goto bail;
2029 }
2035 }
2030
2036
2031 if (PyList_Sort(revs) == -1)
2037 if (PyList_Sort(revs) == -1)
2032 goto bail;
2038 goto bail;
2033
2039
2034 for (i = 0; i < revcount; i++) {
2040 for (i = 0; i < revcount; i++) {
2035 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2041 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2036 long b = 1l << i;
2042 long b = 1l << i;
2037 depth[n] = 1;
2043 depth[n] = 1;
2038 seen[n] = b;
2044 seen[n] = b;
2039 interesting[b] = 1;
2045 interesting[b] = 1;
2040 }
2046 }
2041
2047
2042 /* invariant: ninteresting is the number of non-zero entries in
2048 /* invariant: ninteresting is the number of non-zero entries in
2043 * interesting. */
2049 * interesting. */
2044 ninteresting = (int)revcount;
2050 ninteresting = (int)revcount;
2045
2051
2046 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2052 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2047 int dv = depth[v];
2053 int dv = depth[v];
2048 int parents[2];
2054 int parents[2];
2049 long sv;
2055 long sv;
2050
2056
2051 if (dv == 0)
2057 if (dv == 0)
2052 continue;
2058 continue;
2053
2059
2054 sv = seen[v];
2060 sv = seen[v];
2055 if (index_get_parents(self, v, parents, maxrev) < 0)
2061 if (index_get_parents(self, v, parents, maxrev) < 0)
2056 goto bail;
2062 goto bail;
2057
2063
2058 for (i = 0; i < 2; i++) {
2064 for (i = 0; i < 2; i++) {
2059 int p = parents[i];
2065 int p = parents[i];
2060 long sp;
2066 long sp;
2061 int dp;
2067 int dp;
2062
2068
2063 if (p == -1)
2069 if (p == -1)
2064 continue;
2070 continue;
2065
2071
2066 dp = depth[p];
2072 dp = depth[p];
2067 sp = seen[p];
2073 sp = seen[p];
2068 if (dp <= dv) {
2074 if (dp <= dv) {
2069 depth[p] = dv + 1;
2075 depth[p] = dv + 1;
2070 if (sp != sv) {
2076 if (sp != sv) {
2071 interesting[sv] += 1;
2077 interesting[sv] += 1;
2072 seen[p] = sv;
2078 seen[p] = sv;
2073 if (sp) {
2079 if (sp) {
2074 interesting[sp] -= 1;
2080 interesting[sp] -= 1;
2075 if (interesting[sp] == 0)
2081 if (interesting[sp] == 0)
2076 ninteresting -= 1;
2082 ninteresting -= 1;
2077 }
2083 }
2078 }
2084 }
2079 } else if (dv == dp - 1) {
2085 } else if (dv == dp - 1) {
2080 long nsp = sp | sv;
2086 long nsp = sp | sv;
2081 if (nsp == sp)
2087 if (nsp == sp)
2082 continue;
2088 continue;
2083 seen[p] = nsp;
2089 seen[p] = nsp;
2084 interesting[sp] -= 1;
2090 interesting[sp] -= 1;
2085 if (interesting[sp] == 0)
2091 if (interesting[sp] == 0)
2086 ninteresting -= 1;
2092 ninteresting -= 1;
2087 if (interesting[nsp] == 0)
2093 if (interesting[nsp] == 0)
2088 ninteresting += 1;
2094 ninteresting += 1;
2089 interesting[nsp] += 1;
2095 interesting[nsp] += 1;
2090 }
2096 }
2091 }
2097 }
2092 interesting[sv] -= 1;
2098 interesting[sv] -= 1;
2093 if (interesting[sv] == 0)
2099 if (interesting[sv] == 0)
2094 ninteresting -= 1;
2100 ninteresting -= 1;
2095 }
2101 }
2096
2102
2097 final = 0;
2103 final = 0;
2098 j = ninteresting;
2104 j = ninteresting;
2099 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2105 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2100 if (interesting[i] == 0)
2106 if (interesting[i] == 0)
2101 continue;
2107 continue;
2102 final |= i;
2108 final |= i;
2103 j -= 1;
2109 j -= 1;
2104 }
2110 }
2105 if (final == 0) {
2111 if (final == 0) {
2106 keys = PyList_New(0);
2112 keys = PyList_New(0);
2107 goto bail;
2113 goto bail;
2108 }
2114 }
2109
2115
2110 dict = PyDict_New();
2116 dict = PyDict_New();
2111 if (dict == NULL)
2117 if (dict == NULL)
2112 goto bail;
2118 goto bail;
2113
2119
2114 for (i = 0; i < revcount; i++) {
2120 for (i = 0; i < revcount; i++) {
2115 PyObject *key;
2121 PyObject *key;
2116
2122
2117 if ((final & (1 << i)) == 0)
2123 if ((final & (1 << i)) == 0)
2118 continue;
2124 continue;
2119
2125
2120 key = PyList_GET_ITEM(revs, i);
2126 key = PyList_GET_ITEM(revs, i);
2121 Py_INCREF(key);
2127 Py_INCREF(key);
2122 Py_INCREF(Py_None);
2128 Py_INCREF(Py_None);
2123 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2129 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2124 Py_DECREF(key);
2130 Py_DECREF(key);
2125 Py_DECREF(Py_None);
2131 Py_DECREF(Py_None);
2126 goto bail;
2132 goto bail;
2127 }
2133 }
2128 }
2134 }
2129
2135
2130 keys = PyDict_Keys(dict);
2136 keys = PyDict_Keys(dict);
2131
2137
2132 bail:
2138 bail:
2133 free(depth);
2139 free(depth);
2134 free(seen);
2140 free(seen);
2135 free(interesting);
2141 free(interesting);
2136 Py_XDECREF(dict);
2142 Py_XDECREF(dict);
2137
2143
2138 return keys;
2144 return keys;
2139 }
2145 }
2140
2146
2141 /*
2147 /*
2142 * Given a (possibly overlapping) set of revs, return all the
2148 * Given a (possibly overlapping) set of revs, return all the
2143 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2149 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2144 */
2150 */
2145 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2151 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2146 {
2152 {
2147 PyObject *ret = NULL;
2153 PyObject *ret = NULL;
2148 Py_ssize_t argcount, i, len;
2154 Py_ssize_t argcount, i, len;
2149 bitmask repeat = 0;
2155 bitmask repeat = 0;
2150 int revcount = 0;
2156 int revcount = 0;
2151 int *revs;
2157 int *revs;
2152
2158
2153 argcount = PySequence_Length(args);
2159 argcount = PySequence_Length(args);
2154 revs = PyMem_Malloc(argcount * sizeof(*revs));
2160 revs = PyMem_Malloc(argcount * sizeof(*revs));
2155 if (argcount > 0 && revs == NULL)
2161 if (argcount > 0 && revs == NULL)
2156 return PyErr_NoMemory();
2162 return PyErr_NoMemory();
2157 len = index_length(self);
2163 len = index_length(self);
2158
2164
2159 for (i = 0; i < argcount; i++) {
2165 for (i = 0; i < argcount; i++) {
2160 static const int capacity = 24;
2166 static const int capacity = 24;
2161 PyObject *obj = PySequence_GetItem(args, i);
2167 PyObject *obj = PySequence_GetItem(args, i);
2162 bitmask x;
2168 bitmask x;
2163 long val;
2169 long val;
2164
2170
2165 if (!PyInt_Check(obj)) {
2171 if (!PyInt_Check(obj)) {
2166 PyErr_SetString(PyExc_TypeError,
2172 PyErr_SetString(PyExc_TypeError,
2167 "arguments must all be ints");
2173 "arguments must all be ints");
2168 Py_DECREF(obj);
2174 Py_DECREF(obj);
2169 goto bail;
2175 goto bail;
2170 }
2176 }
2171 val = PyInt_AsLong(obj);
2177 val = PyInt_AsLong(obj);
2172 Py_DECREF(obj);
2178 Py_DECREF(obj);
2173 if (val == -1) {
2179 if (val == -1) {
2174 ret = PyList_New(0);
2180 ret = PyList_New(0);
2175 goto done;
2181 goto done;
2176 }
2182 }
2177 if (val < 0 || val >= len) {
2183 if (val < 0 || val >= len) {
2178 PyErr_SetString(PyExc_IndexError, "index out of range");
2184 PyErr_SetString(PyExc_IndexError, "index out of range");
2179 goto bail;
2185 goto bail;
2180 }
2186 }
2181 /* this cheesy bloom filter lets us avoid some more
2187 /* this cheesy bloom filter lets us avoid some more
2182 * expensive duplicate checks in the common set-is-disjoint
2188 * expensive duplicate checks in the common set-is-disjoint
2183 * case */
2189 * case */
2184 x = 1ull << (val & 0x3f);
2190 x = 1ull << (val & 0x3f);
2185 if (repeat & x) {
2191 if (repeat & x) {
2186 int k;
2192 int k;
2187 for (k = 0; k < revcount; k++) {
2193 for (k = 0; k < revcount; k++) {
2188 if (val == revs[k])
2194 if (val == revs[k])
2189 goto duplicate;
2195 goto duplicate;
2190 }
2196 }
2191 } else
2197 } else
2192 repeat |= x;
2198 repeat |= x;
2193 if (revcount >= capacity) {
2199 if (revcount >= capacity) {
2194 PyErr_Format(PyExc_OverflowError,
2200 PyErr_Format(PyExc_OverflowError,
2195 "bitset size (%d) > capacity (%d)",
2201 "bitset size (%d) > capacity (%d)",
2196 revcount, capacity);
2202 revcount, capacity);
2197 goto bail;
2203 goto bail;
2198 }
2204 }
2199 revs[revcount++] = (int)val;
2205 revs[revcount++] = (int)val;
2200 duplicate:;
2206 duplicate:;
2201 }
2207 }
2202
2208
2203 if (revcount == 0) {
2209 if (revcount == 0) {
2204 ret = PyList_New(0);
2210 ret = PyList_New(0);
2205 goto done;
2211 goto done;
2206 }
2212 }
2207 if (revcount == 1) {
2213 if (revcount == 1) {
2208 PyObject *obj;
2214 PyObject *obj;
2209 ret = PyList_New(1);
2215 ret = PyList_New(1);
2210 if (ret == NULL)
2216 if (ret == NULL)
2211 goto bail;
2217 goto bail;
2212 obj = PyInt_FromLong(revs[0]);
2218 obj = PyInt_FromLong(revs[0]);
2213 if (obj == NULL)
2219 if (obj == NULL)
2214 goto bail;
2220 goto bail;
2215 PyList_SET_ITEM(ret, 0, obj);
2221 PyList_SET_ITEM(ret, 0, obj);
2216 goto done;
2222 goto done;
2217 }
2223 }
2218
2224
2219 ret = find_gca_candidates(self, revs, revcount);
2225 ret = find_gca_candidates(self, revs, revcount);
2220 if (ret == NULL)
2226 if (ret == NULL)
2221 goto bail;
2227 goto bail;
2222
2228
2223 done:
2229 done:
2224 PyMem_Free(revs);
2230 PyMem_Free(revs);
2225 return ret;
2231 return ret;
2226
2232
2227 bail:
2233 bail:
2228 PyMem_Free(revs);
2234 PyMem_Free(revs);
2229 Py_XDECREF(ret);
2235 Py_XDECREF(ret);
2230 return NULL;
2236 return NULL;
2231 }
2237 }
2232
2238
2233 /*
2239 /*
2234 * Given a (possibly overlapping) set of revs, return the greatest
2240 * Given a (possibly overlapping) set of revs, return the greatest
2235 * common ancestors: those with the longest path to the root.
2241 * common ancestors: those with the longest path to the root.
2236 */
2242 */
2237 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2243 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2238 {
2244 {
2239 PyObject *ret;
2245 PyObject *ret;
2240 PyObject *gca = index_commonancestorsheads(self, args);
2246 PyObject *gca = index_commonancestorsheads(self, args);
2241 if (gca == NULL)
2247 if (gca == NULL)
2242 return NULL;
2248 return NULL;
2243
2249
2244 if (PyList_GET_SIZE(gca) <= 1) {
2250 if (PyList_GET_SIZE(gca) <= 1) {
2245 return gca;
2251 return gca;
2246 }
2252 }
2247
2253
2248 ret = find_deepest(self, gca);
2254 ret = find_deepest(self, gca);
2249 Py_DECREF(gca);
2255 Py_DECREF(gca);
2250 return ret;
2256 return ret;
2251 }
2257 }
2252
2258
2253 /*
2259 /*
2254 * Invalidate any trie entries introduced by added revs.
2260 * Invalidate any trie entries introduced by added revs.
2255 */
2261 */
2256 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2262 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2257 {
2263 {
2258 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2264 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2259
2265
2260 for (i = start; i < len; i++) {
2266 for (i = start; i < len; i++) {
2261 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2267 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2262 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2268 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2263
2269
2264 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2270 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2265 }
2271 }
2266
2272
2267 if (start == 0)
2273 if (start == 0)
2268 Py_CLEAR(self->added);
2274 Py_CLEAR(self->added);
2269 }
2275 }
2270
2276
2271 /*
2277 /*
2272 * Delete a numeric range of revs, which must be at the end of the
2278 * Delete a numeric range of revs, which must be at the end of the
2273 * range, but exclude the sentinel nullid entry.
2279 * range, but exclude the sentinel nullid entry.
2274 */
2280 */
2275 static int index_slice_del(indexObject *self, PyObject *item)
2281 static int index_slice_del(indexObject *self, PyObject *item)
2276 {
2282 {
2277 Py_ssize_t start, stop, step, slicelength;
2283 Py_ssize_t start, stop, step, slicelength;
2278 Py_ssize_t length = index_length(self) + 1;
2284 Py_ssize_t length = index_length(self) + 1;
2279 int ret = 0;
2285 int ret = 0;
2280
2286
2281 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2287 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2282 #ifdef IS_PY3K
2288 #ifdef IS_PY3K
2283 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2289 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2284 &slicelength) < 0)
2290 &slicelength) < 0)
2285 #else
2291 #else
2286 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2292 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2287 &step, &slicelength) < 0)
2293 &step, &slicelength) < 0)
2288 #endif
2294 #endif
2289 return -1;
2295 return -1;
2290
2296
2291 if (slicelength <= 0)
2297 if (slicelength <= 0)
2292 return 0;
2298 return 0;
2293
2299
2294 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2300 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2295 stop = start;
2301 stop = start;
2296
2302
2297 if (step < 0) {
2303 if (step < 0) {
2298 stop = start + 1;
2304 stop = start + 1;
2299 start = stop + step * (slicelength - 1) - 1;
2305 start = stop + step * (slicelength - 1) - 1;
2300 step = -step;
2306 step = -step;
2301 }
2307 }
2302
2308
2303 if (step != 1) {
2309 if (step != 1) {
2304 PyErr_SetString(PyExc_ValueError,
2310 PyErr_SetString(PyExc_ValueError,
2305 "revlog index delete requires step size of 1");
2311 "revlog index delete requires step size of 1");
2306 return -1;
2312 return -1;
2307 }
2313 }
2308
2314
2309 if (stop != length - 1) {
2315 if (stop != length - 1) {
2310 PyErr_SetString(PyExc_IndexError,
2316 PyErr_SetString(PyExc_IndexError,
2311 "revlog index deletion indices are invalid");
2317 "revlog index deletion indices are invalid");
2312 return -1;
2318 return -1;
2313 }
2319 }
2314
2320
2315 if (start < self->length) {
2321 if (start < self->length) {
2316 if (self->ntinitialized) {
2322 if (self->ntinitialized) {
2317 Py_ssize_t i;
2323 Py_ssize_t i;
2318
2324
2319 for (i = start + 1; i < self->length; i++) {
2325 for (i = start + 1; i < self->length; i++) {
2320 const char *node = index_node_existing(self, i);
2326 const char *node = index_node_existing(self, i);
2321 if (node == NULL)
2327 if (node == NULL)
2322 return -1;
2328 return -1;
2323
2329
2324 nt_delete_node(&self->nt, node);
2330 nt_delete_node(&self->nt, node);
2325 }
2331 }
2326 if (self->added)
2332 if (self->added)
2327 index_invalidate_added(self, 0);
2333 index_invalidate_added(self, 0);
2328 if (self->ntrev > start)
2334 if (self->ntrev > start)
2329 self->ntrev = (int)start;
2335 self->ntrev = (int)start;
2330 }
2336 }
2331 self->length = start;
2337 self->length = start;
2332 if (start < self->raw_length) {
2338 if (start < self->raw_length) {
2333 if (self->cache) {
2339 if (self->cache) {
2334 Py_ssize_t i;
2340 Py_ssize_t i;
2335 for (i = start; i < self->raw_length; i++)
2341 for (i = start; i < self->raw_length; i++)
2336 Py_CLEAR(self->cache[i]);
2342 Py_CLEAR(self->cache[i]);
2337 }
2343 }
2338 self->raw_length = start;
2344 self->raw_length = start;
2339 }
2345 }
2340 goto done;
2346 goto done;
2341 }
2347 }
2342
2348
2343 if (self->ntinitialized) {
2349 if (self->ntinitialized) {
2344 index_invalidate_added(self, start - self->length);
2350 index_invalidate_added(self, start - self->length);
2345 if (self->ntrev > start)
2351 if (self->ntrev > start)
2346 self->ntrev = (int)start;
2352 self->ntrev = (int)start;
2347 }
2353 }
2348 if (self->added)
2354 if (self->added)
2349 ret = PyList_SetSlice(self->added, start - self->length,
2355 ret = PyList_SetSlice(self->added, start - self->length,
2350 PyList_GET_SIZE(self->added), NULL);
2356 PyList_GET_SIZE(self->added), NULL);
2351 done:
2357 done:
2352 Py_CLEAR(self->headrevs);
2358 Py_CLEAR(self->headrevs);
2353 return ret;
2359 return ret;
2354 }
2360 }
2355
2361
2356 /*
2362 /*
2357 * Supported ops:
2363 * Supported ops:
2358 *
2364 *
2359 * slice deletion
2365 * slice deletion
2360 * string assignment (extend node->rev mapping)
2366 * string assignment (extend node->rev mapping)
2361 * string deletion (shrink node->rev mapping)
2367 * string deletion (shrink node->rev mapping)
2362 */
2368 */
2363 static int index_assign_subscript(indexObject *self, PyObject *item,
2369 static int index_assign_subscript(indexObject *self, PyObject *item,
2364 PyObject *value)
2370 PyObject *value)
2365 {
2371 {
2366 char *node;
2372 char *node;
2367 long rev;
2373 long rev;
2368
2374
2369 if (PySlice_Check(item) && value == NULL)
2375 if (PySlice_Check(item) && value == NULL)
2370 return index_slice_del(self, item);
2376 return index_slice_del(self, item);
2371
2377
2372 if (node_check(item, &node) == -1)
2378 if (node_check(item, &node) == -1)
2373 return -1;
2379 return -1;
2374
2380
2375 if (value == NULL)
2381 if (value == NULL)
2376 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2382 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2377 : 0;
2383 : 0;
2378 rev = PyInt_AsLong(value);
2384 rev = PyInt_AsLong(value);
2379 if (rev > INT_MAX || rev < 0) {
2385 if (rev > INT_MAX || rev < 0) {
2380 if (!PyErr_Occurred())
2386 if (!PyErr_Occurred())
2381 PyErr_SetString(PyExc_ValueError, "rev out of range");
2387 PyErr_SetString(PyExc_ValueError, "rev out of range");
2382 return -1;
2388 return -1;
2383 }
2389 }
2384
2390
2385 if (index_init_nt(self) == -1)
2391 if (index_init_nt(self) == -1)
2386 return -1;
2392 return -1;
2387 return nt_insert(&self->nt, node, (int)rev);
2393 return nt_insert(&self->nt, node, (int)rev);
2388 }
2394 }
2389
2395
2390 /*
2396 /*
2391 * Find all RevlogNG entries in an index that has inline data. Update
2397 * Find all RevlogNG entries in an index that has inline data. Update
2392 * the optional "offsets" table with those entries.
2398 * the optional "offsets" table with those entries.
2393 */
2399 */
2394 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2400 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2395 {
2401 {
2396 const char *data = (const char *)self->buf.buf;
2402 const char *data = (const char *)self->buf.buf;
2397 Py_ssize_t pos = 0;
2403 Py_ssize_t pos = 0;
2398 Py_ssize_t end = self->buf.len;
2404 Py_ssize_t end = self->buf.len;
2399 long incr = v1_hdrsize;
2405 long incr = v1_hdrsize;
2400 Py_ssize_t len = 0;
2406 Py_ssize_t len = 0;
2401
2407
2402 while (pos + v1_hdrsize <= end && pos >= 0) {
2408 while (pos + v1_hdrsize <= end && pos >= 0) {
2403 uint32_t comp_len;
2409 uint32_t comp_len;
2404 /* 3rd element of header is length of compressed inline data */
2410 /* 3rd element of header is length of compressed inline data */
2405 comp_len = getbe32(data + pos + 8);
2411 comp_len = getbe32(data + pos + 8);
2406 incr = v1_hdrsize + comp_len;
2412 incr = v1_hdrsize + comp_len;
2407 if (offsets)
2413 if (offsets)
2408 offsets[len] = data + pos;
2414 offsets[len] = data + pos;
2409 len++;
2415 len++;
2410 pos += incr;
2416 pos += incr;
2411 }
2417 }
2412
2418
2413 if (pos != end) {
2419 if (pos != end) {
2414 if (!PyErr_Occurred())
2420 if (!PyErr_Occurred())
2415 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2421 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2416 return -1;
2422 return -1;
2417 }
2423 }
2418
2424
2419 return len;
2425 return len;
2420 }
2426 }
2421
2427
2422 static int index_init(indexObject *self, PyObject *args)
2428 static int index_init(indexObject *self, PyObject *args)
2423 {
2429 {
2424 PyObject *data_obj, *inlined_obj;
2430 PyObject *data_obj, *inlined_obj;
2425 Py_ssize_t size;
2431 Py_ssize_t size;
2426
2432
2427 /* Initialize before argument-checking to avoid index_dealloc() crash.
2433 /* Initialize before argument-checking to avoid index_dealloc() crash.
2428 */
2434 */
2429 self->raw_length = 0;
2435 self->raw_length = 0;
2430 self->added = NULL;
2436 self->added = NULL;
2431 self->cache = NULL;
2437 self->cache = NULL;
2432 self->data = NULL;
2438 self->data = NULL;
2433 memset(&self->buf, 0, sizeof(self->buf));
2439 memset(&self->buf, 0, sizeof(self->buf));
2434 self->headrevs = NULL;
2440 self->headrevs = NULL;
2435 self->filteredrevs = Py_None;
2441 self->filteredrevs = Py_None;
2436 Py_INCREF(Py_None);
2442 Py_INCREF(Py_None);
2437 self->ntinitialized = 0;
2443 self->ntinitialized = 0;
2438 self->offsets = NULL;
2444 self->offsets = NULL;
2439
2445
2440 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2446 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2441 return -1;
2447 return -1;
2442 if (!PyObject_CheckBuffer(data_obj)) {
2448 if (!PyObject_CheckBuffer(data_obj)) {
2443 PyErr_SetString(PyExc_TypeError,
2449 PyErr_SetString(PyExc_TypeError,
2444 "data does not support buffer interface");
2450 "data does not support buffer interface");
2445 return -1;
2451 return -1;
2446 }
2452 }
2447
2453
2448 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2454 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2449 return -1;
2455 return -1;
2450 size = self->buf.len;
2456 size = self->buf.len;
2451
2457
2452 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2458 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2453 self->data = data_obj;
2459 self->data = data_obj;
2454
2460
2455 self->ntlookups = self->ntmisses = 0;
2461 self->ntlookups = self->ntmisses = 0;
2456 self->ntrev = -1;
2462 self->ntrev = -1;
2457 Py_INCREF(self->data);
2463 Py_INCREF(self->data);
2458
2464
2459 if (self->inlined) {
2465 if (self->inlined) {
2460 Py_ssize_t len = inline_scan(self, NULL);
2466 Py_ssize_t len = inline_scan(self, NULL);
2461 if (len == -1)
2467 if (len == -1)
2462 goto bail;
2468 goto bail;
2463 self->raw_length = len;
2469 self->raw_length = len;
2464 self->length = len;
2470 self->length = len;
2465 } else {
2471 } else {
2466 if (size % v1_hdrsize) {
2472 if (size % v1_hdrsize) {
2467 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2473 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2468 goto bail;
2474 goto bail;
2469 }
2475 }
2470 self->raw_length = size / v1_hdrsize;
2476 self->raw_length = size / v1_hdrsize;
2471 self->length = self->raw_length;
2477 self->length = self->raw_length;
2472 }
2478 }
2473
2479
2474 return 0;
2480 return 0;
2475 bail:
2481 bail:
2476 return -1;
2482 return -1;
2477 }
2483 }
2478
2484
2479 static PyObject *index_nodemap(indexObject *self)
2485 static PyObject *index_nodemap(indexObject *self)
2480 {
2486 {
2481 Py_INCREF(self);
2487 Py_INCREF(self);
2482 return (PyObject *)self;
2488 return (PyObject *)self;
2483 }
2489 }
2484
2490
2485 static void _index_clearcaches(indexObject *self)
2491 static void _index_clearcaches(indexObject *self)
2486 {
2492 {
2487 if (self->cache) {
2493 if (self->cache) {
2488 Py_ssize_t i;
2494 Py_ssize_t i;
2489
2495
2490 for (i = 0; i < self->raw_length; i++)
2496 for (i = 0; i < self->raw_length; i++)
2491 Py_CLEAR(self->cache[i]);
2497 Py_CLEAR(self->cache[i]);
2492 free(self->cache);
2498 free(self->cache);
2493 self->cache = NULL;
2499 self->cache = NULL;
2494 }
2500 }
2495 if (self->offsets) {
2501 if (self->offsets) {
2496 PyMem_Free((void *)self->offsets);
2502 PyMem_Free((void *)self->offsets);
2497 self->offsets = NULL;
2503 self->offsets = NULL;
2498 }
2504 }
2499 if (self->ntinitialized) {
2505 if (self->ntinitialized) {
2500 nt_dealloc(&self->nt);
2506 nt_dealloc(&self->nt);
2501 }
2507 }
2502 self->ntinitialized = 0;
2508 self->ntinitialized = 0;
2503 Py_CLEAR(self->headrevs);
2509 Py_CLEAR(self->headrevs);
2504 }
2510 }
2505
2511
2506 static PyObject *index_clearcaches(indexObject *self)
2512 static PyObject *index_clearcaches(indexObject *self)
2507 {
2513 {
2508 _index_clearcaches(self);
2514 _index_clearcaches(self);
2509 self->ntrev = -1;
2515 self->ntrev = -1;
2510 self->ntlookups = self->ntmisses = 0;
2516 self->ntlookups = self->ntmisses = 0;
2511 Py_RETURN_NONE;
2517 Py_RETURN_NONE;
2512 }
2518 }
2513
2519
2514 static void index_dealloc(indexObject *self)
2520 static void index_dealloc(indexObject *self)
2515 {
2521 {
2516 _index_clearcaches(self);
2522 _index_clearcaches(self);
2517 Py_XDECREF(self->filteredrevs);
2523 Py_XDECREF(self->filteredrevs);
2518 if (self->buf.buf) {
2524 if (self->buf.buf) {
2519 PyBuffer_Release(&self->buf);
2525 PyBuffer_Release(&self->buf);
2520 memset(&self->buf, 0, sizeof(self->buf));
2526 memset(&self->buf, 0, sizeof(self->buf));
2521 }
2527 }
2522 Py_XDECREF(self->data);
2528 Py_XDECREF(self->data);
2523 Py_XDECREF(self->added);
2529 Py_XDECREF(self->added);
2524 PyObject_Del(self);
2530 PyObject_Del(self);
2525 }
2531 }
2526
2532
2527 static PySequenceMethods index_sequence_methods = {
2533 static PySequenceMethods index_sequence_methods = {
2528 (lenfunc)index_length, /* sq_length */
2534 (lenfunc)index_length, /* sq_length */
2529 0, /* sq_concat */
2535 0, /* sq_concat */
2530 0, /* sq_repeat */
2536 0, /* sq_repeat */
2531 (ssizeargfunc)index_get, /* sq_item */
2537 (ssizeargfunc)index_get, /* sq_item */
2532 0, /* sq_slice */
2538 0, /* sq_slice */
2533 0, /* sq_ass_item */
2539 0, /* sq_ass_item */
2534 0, /* sq_ass_slice */
2540 0, /* sq_ass_slice */
2535 (objobjproc)index_contains, /* sq_contains */
2541 (objobjproc)index_contains, /* sq_contains */
2536 };
2542 };
2537
2543
2538 static PyMappingMethods index_mapping_methods = {
2544 static PyMappingMethods index_mapping_methods = {
2539 (lenfunc)index_length, /* mp_length */
2545 (lenfunc)index_length, /* mp_length */
2540 (binaryfunc)index_getitem, /* mp_subscript */
2546 (binaryfunc)index_getitem, /* mp_subscript */
2541 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2547 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2542 };
2548 };
2543
2549
2544 static PyMethodDef index_methods[] = {
2550 static PyMethodDef index_methods[] = {
2545 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2551 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2546 "return the gca set of the given revs"},
2552 "return the gca set of the given revs"},
2547 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2553 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2548 METH_VARARGS,
2554 METH_VARARGS,
2549 "return the heads of the common ancestors of the given revs"},
2555 "return the heads of the common ancestors of the given revs"},
2550 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2556 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2551 "clear the index caches"},
2557 "clear the index caches"},
2552 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2558 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2553 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2559 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2554 "compute phases"},
2560 "compute phases"},
2555 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2561 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2556 "reachableroots"},
2562 "reachableroots"},
2557 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2563 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2558 "get head revisions"}, /* Can do filtering since 3.2 */
2564 "get head revisions"}, /* Can do filtering since 3.2 */
2559 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2565 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2560 "get filtered head revisions"}, /* Can always do filtering */
2566 "get filtered head revisions"}, /* Can always do filtering */
2561 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2567 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2562 "determine revisions with deltas to reconstruct fulltext"},
2568 "determine revisions with deltas to reconstruct fulltext"},
2563 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2569 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2564 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2570 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2565 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2571 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2566 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2572 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2567 "match a potentially ambiguous node ID"},
2573 "match a potentially ambiguous node ID"},
2568 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2574 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2569 "find length of shortest hex nodeid of a binary ID"},
2575 "find length of shortest hex nodeid of a binary ID"},
2570 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2576 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2571 {NULL} /* Sentinel */
2577 {NULL} /* Sentinel */
2572 };
2578 };
2573
2579
2574 static PyGetSetDef index_getset[] = {
2580 static PyGetSetDef index_getset[] = {
2575 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2581 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2576 {NULL} /* Sentinel */
2582 {NULL} /* Sentinel */
2577 };
2583 };
2578
2584
2579 static PyTypeObject indexType = {
2585 static PyTypeObject indexType = {
2580 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2586 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2581 "parsers.index", /* tp_name */
2587 "parsers.index", /* tp_name */
2582 sizeof(indexObject), /* tp_basicsize */
2588 sizeof(indexObject), /* tp_basicsize */
2583 0, /* tp_itemsize */
2589 0, /* tp_itemsize */
2584 (destructor)index_dealloc, /* tp_dealloc */
2590 (destructor)index_dealloc, /* tp_dealloc */
2585 0, /* tp_print */
2591 0, /* tp_print */
2586 0, /* tp_getattr */
2592 0, /* tp_getattr */
2587 0, /* tp_setattr */
2593 0, /* tp_setattr */
2588 0, /* tp_compare */
2594 0, /* tp_compare */
2589 0, /* tp_repr */
2595 0, /* tp_repr */
2590 0, /* tp_as_number */
2596 0, /* tp_as_number */
2591 &index_sequence_methods, /* tp_as_sequence */
2597 &index_sequence_methods, /* tp_as_sequence */
2592 &index_mapping_methods, /* tp_as_mapping */
2598 &index_mapping_methods, /* tp_as_mapping */
2593 0, /* tp_hash */
2599 0, /* tp_hash */
2594 0, /* tp_call */
2600 0, /* tp_call */
2595 0, /* tp_str */
2601 0, /* tp_str */
2596 0, /* tp_getattro */
2602 0, /* tp_getattro */
2597 0, /* tp_setattro */
2603 0, /* tp_setattro */
2598 0, /* tp_as_buffer */
2604 0, /* tp_as_buffer */
2599 Py_TPFLAGS_DEFAULT, /* tp_flags */
2605 Py_TPFLAGS_DEFAULT, /* tp_flags */
2600 "revlog index", /* tp_doc */
2606 "revlog index", /* tp_doc */
2601 0, /* tp_traverse */
2607 0, /* tp_traverse */
2602 0, /* tp_clear */
2608 0, /* tp_clear */
2603 0, /* tp_richcompare */
2609 0, /* tp_richcompare */
2604 0, /* tp_weaklistoffset */
2610 0, /* tp_weaklistoffset */
2605 0, /* tp_iter */
2611 0, /* tp_iter */
2606 0, /* tp_iternext */
2612 0, /* tp_iternext */
2607 index_methods, /* tp_methods */
2613 index_methods, /* tp_methods */
2608 0, /* tp_members */
2614 0, /* tp_members */
2609 index_getset, /* tp_getset */
2615 index_getset, /* tp_getset */
2610 0, /* tp_base */
2616 0, /* tp_base */
2611 0, /* tp_dict */
2617 0, /* tp_dict */
2612 0, /* tp_descr_get */
2618 0, /* tp_descr_get */
2613 0, /* tp_descr_set */
2619 0, /* tp_descr_set */
2614 0, /* tp_dictoffset */
2620 0, /* tp_dictoffset */
2615 (initproc)index_init, /* tp_init */
2621 (initproc)index_init, /* tp_init */
2616 0, /* tp_alloc */
2622 0, /* tp_alloc */
2617 };
2623 };
2618
2624
2619 /*
2625 /*
2620 * returns a tuple of the form (index, index, cache) with elements as
2626 * returns a tuple of the form (index, index, cache) with elements as
2621 * follows:
2627 * follows:
2622 *
2628 *
2623 * index: an index object that lazily parses RevlogNG records
2629 * index: an index object that lazily parses RevlogNG records
2624 * cache: if data is inlined, a tuple (0, index_file_content), else None
2630 * cache: if data is inlined, a tuple (0, index_file_content), else None
2625 * index_file_content could be a string, or a buffer
2631 * index_file_content could be a string, or a buffer
2626 *
2632 *
2627 * added complications are for backwards compatibility
2633 * added complications are for backwards compatibility
2628 */
2634 */
2629 PyObject *parse_index2(PyObject *self, PyObject *args)
2635 PyObject *parse_index2(PyObject *self, PyObject *args)
2630 {
2636 {
2631 PyObject *tuple = NULL, *cache = NULL;
2637 PyObject *tuple = NULL, *cache = NULL;
2632 indexObject *idx;
2638 indexObject *idx;
2633 int ret;
2639 int ret;
2634
2640
2635 idx = PyObject_New(indexObject, &indexType);
2641 idx = PyObject_New(indexObject, &indexType);
2636 if (idx == NULL)
2642 if (idx == NULL)
2637 goto bail;
2643 goto bail;
2638
2644
2639 ret = index_init(idx, args);
2645 ret = index_init(idx, args);
2640 if (ret == -1)
2646 if (ret == -1)
2641 goto bail;
2647 goto bail;
2642
2648
2643 if (idx->inlined) {
2649 if (idx->inlined) {
2644 cache = Py_BuildValue("iO", 0, idx->data);
2650 cache = Py_BuildValue("iO", 0, idx->data);
2645 if (cache == NULL)
2651 if (cache == NULL)
2646 goto bail;
2652 goto bail;
2647 } else {
2653 } else {
2648 cache = Py_None;
2654 cache = Py_None;
2649 Py_INCREF(cache);
2655 Py_INCREF(cache);
2650 }
2656 }
2651
2657
2652 tuple = Py_BuildValue("NN", idx, cache);
2658 tuple = Py_BuildValue("NN", idx, cache);
2653 if (!tuple)
2659 if (!tuple)
2654 goto bail;
2660 goto bail;
2655 return tuple;
2661 return tuple;
2656
2662
2657 bail:
2663 bail:
2658 Py_XDECREF(idx);
2664 Py_XDECREF(idx);
2659 Py_XDECREF(cache);
2665 Py_XDECREF(cache);
2660 Py_XDECREF(tuple);
2666 Py_XDECREF(tuple);
2661 return NULL;
2667 return NULL;
2662 }
2668 }
2663
2669
2664 #ifdef WITH_RUST
2670 #ifdef WITH_RUST
2665
2671
2666 /* rustlazyancestors: iteration over ancestors implemented in Rust
2672 /* rustlazyancestors: iteration over ancestors implemented in Rust
2667 *
2673 *
2668 * This class holds a reference to an index and to the Rust iterator.
2674 * This class holds a reference to an index and to the Rust iterator.
2669 */
2675 */
2670 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2676 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2671
2677
2672 struct rustlazyancestorsObjectStruct {
2678 struct rustlazyancestorsObjectStruct {
2673 PyObject_HEAD
2679 PyObject_HEAD
2674 /* Type-specific fields go here. */
2680 /* Type-specific fields go here. */
2675 indexObject *index; /* Ref kept to avoid GC'ing the index */
2681 indexObject *index; /* Ref kept to avoid GC'ing the index */
2676 void *iter; /* Rust iterator */
2682 void *iter; /* Rust iterator */
2677 };
2683 };
2678
2684
2679 /* FFI exposed from Rust code */
2685 /* FFI exposed from Rust code */
2680 rustlazyancestorsObject *
2686 rustlazyancestorsObject *
2681 rustlazyancestors_init(indexObject *index,
2687 rustlazyancestors_init(indexObject *index,
2682 /* to pass index_get_parents() */
2688 /* to pass index_get_parents() */
2683 int (*)(indexObject *, Py_ssize_t, int *, int),
2689 int (*)(indexObject *, Py_ssize_t, int *, int),
2684 /* intrevs vector */
2690 /* intrevs vector */
2685 Py_ssize_t initrevslen, long *initrevs, long stoprev,
2691 Py_ssize_t initrevslen, long *initrevs, long stoprev,
2686 int inclusive);
2692 int inclusive);
2687 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2693 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2688 int rustlazyancestors_next(rustlazyancestorsObject *self);
2694 int rustlazyancestors_next(rustlazyancestorsObject *self);
2689 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2695 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2690
2696
2697 static int index_get_parents_checked(indexObject *self, Py_ssize_t rev, int *ps,
2698 int maxrev)
2699 {
2700 if (rev < 0 || rev >= index_length(self)) {
2701 PyErr_SetString(PyExc_ValueError, "rev out of range");
2702 return -1;
2703 }
2704 return index_get_parents(self, rev, ps, maxrev);
2705 }
2706
2691 /* CPython instance methods */
2707 /* CPython instance methods */
2692 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2708 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2693 {
2709 {
2694 PyObject *initrevsarg = NULL;
2710 PyObject *initrevsarg = NULL;
2695 PyObject *inclusivearg = NULL;
2711 PyObject *inclusivearg = NULL;
2696 long stoprev = 0;
2712 long stoprev = 0;
2697 long *initrevs = NULL;
2713 long *initrevs = NULL;
2698 int inclusive = 0;
2714 int inclusive = 0;
2699 Py_ssize_t i;
2715 Py_ssize_t i;
2700
2716
2701 indexObject *index;
2717 indexObject *index;
2702 if (!PyArg_ParseTuple(args, "O!O!lO!", &indexType, &index, &PyList_Type,
2718 if (!PyArg_ParseTuple(args, "O!O!lO!", &indexType, &index, &PyList_Type,
2703 &initrevsarg, &stoprev, &PyBool_Type,
2719 &initrevsarg, &stoprev, &PyBool_Type,
2704 &inclusivearg))
2720 &inclusivearg))
2705 return -1;
2721 return -1;
2706
2722
2707 Py_INCREF(index);
2723 Py_INCREF(index);
2708 self->index = index;
2724 self->index = index;
2709
2725
2710 if (inclusivearg == Py_True)
2726 if (inclusivearg == Py_True)
2711 inclusive = 1;
2727 inclusive = 1;
2712
2728
2713 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2729 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2714
2730
2715 initrevs = (long *)calloc(linit, sizeof(long));
2731 initrevs = (long *)calloc(linit, sizeof(long));
2716
2732
2717 if (initrevs == NULL) {
2733 if (initrevs == NULL) {
2718 PyErr_NoMemory();
2734 PyErr_NoMemory();
2719 goto bail;
2735 goto bail;
2720 }
2736 }
2721
2737
2722 for (i = 0; i < linit; i++) {
2738 for (i = 0; i < linit; i++) {
2723 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2739 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2724 }
2740 }
2725 if (PyErr_Occurred())
2741 if (PyErr_Occurred())
2726 goto bail;
2742 goto bail;
2727
2743
2728 self->iter = rustlazyancestors_init(index, index_get_parents, linit,
2744 self->iter = rustlazyancestors_init(index, index_get_parents, linit,
2729 initrevs, stoprev, inclusive);
2745 initrevs, stoprev, inclusive);
2730 if (self->iter == NULL) {
2746 if (self->iter == NULL) {
2731 /* if this is because of GraphError::ParentOutOfRange
2747 /* if this is because of GraphError::ParentOutOfRange
2732 * index_get_parents() has already set the proper ValueError */
2748 * index_get_parents_checked() has already set the proper
2749 * ValueError */
2733 goto bail;
2750 goto bail;
2734 }
2751 }
2735
2752
2736 free(initrevs);
2753 free(initrevs);
2737 return 0;
2754 return 0;
2738
2755
2739 bail:
2756 bail:
2740 free(initrevs);
2757 free(initrevs);
2741 return -1;
2758 return -1;
2742 };
2759 };
2743
2760
2744 static void rustla_dealloc(rustlazyancestorsObject *self)
2761 static void rustla_dealloc(rustlazyancestorsObject *self)
2745 {
2762 {
2746 Py_XDECREF(self->index);
2763 Py_XDECREF(self->index);
2747 if (self->iter != NULL) { /* can happen if rustla_init failed */
2764 if (self->iter != NULL) { /* can happen if rustla_init failed */
2748 rustlazyancestors_drop(self->iter);
2765 rustlazyancestors_drop(self->iter);
2749 }
2766 }
2750 PyObject_Del(self);
2767 PyObject_Del(self);
2751 }
2768 }
2752
2769
2753 static PyObject *rustla_next(rustlazyancestorsObject *self)
2770 static PyObject *rustla_next(rustlazyancestorsObject *self)
2754 {
2771 {
2755 int res = rustlazyancestors_next(self->iter);
2772 int res = rustlazyancestors_next(self->iter);
2756 if (res == -1) {
2773 if (res == -1) {
2757 /* Setting an explicit exception seems unnecessary
2774 /* Setting an explicit exception seems unnecessary
2758 * as examples from Python source code (Objects/rangeobjets.c
2775 * as examples from Python source code (Objects/rangeobjets.c
2759 * and Modules/_io/stringio.c) seem to demonstrate.
2776 * and Modules/_io/stringio.c) seem to demonstrate.
2760 */
2777 */
2761 return NULL;
2778 return NULL;
2762 }
2779 }
2763 return PyInt_FromLong(res);
2780 return PyInt_FromLong(res);
2764 }
2781 }
2765
2782
2766 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2783 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2767 {
2784 {
2768 long lrev;
2785 long lrev;
2769 if (!pylong_to_long(rev, &lrev)) {
2786 if (!pylong_to_long(rev, &lrev)) {
2770 PyErr_Clear();
2787 PyErr_Clear();
2771 return 0;
2788 return 0;
2772 }
2789 }
2773 return rustlazyancestors_contains(self->iter, lrev);
2790 return rustlazyancestors_contains(self->iter, lrev);
2774 }
2791 }
2775
2792
2776 static PySequenceMethods rustla_sequence_methods = {
2793 static PySequenceMethods rustla_sequence_methods = {
2777 0, /* sq_length */
2794 0, /* sq_length */
2778 0, /* sq_concat */
2795 0, /* sq_concat */
2779 0, /* sq_repeat */
2796 0, /* sq_repeat */
2780 0, /* sq_item */
2797 0, /* sq_item */
2781 0, /* sq_slice */
2798 0, /* sq_slice */
2782 0, /* sq_ass_item */
2799 0, /* sq_ass_item */
2783 0, /* sq_ass_slice */
2800 0, /* sq_ass_slice */
2784 (objobjproc)rustla_contains, /* sq_contains */
2801 (objobjproc)rustla_contains, /* sq_contains */
2785 };
2802 };
2786
2803
2787 static PyTypeObject rustlazyancestorsType = {
2804 static PyTypeObject rustlazyancestorsType = {
2788 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2805 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2789 "parsers.rustlazyancestors", /* tp_name */
2806 "parsers.rustlazyancestors", /* tp_name */
2790 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2807 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2791 0, /* tp_itemsize */
2808 0, /* tp_itemsize */
2792 (destructor)rustla_dealloc, /* tp_dealloc */
2809 (destructor)rustla_dealloc, /* tp_dealloc */
2793 0, /* tp_print */
2810 0, /* tp_print */
2794 0, /* tp_getattr */
2811 0, /* tp_getattr */
2795 0, /* tp_setattr */
2812 0, /* tp_setattr */
2796 0, /* tp_compare */
2813 0, /* tp_compare */
2797 0, /* tp_repr */
2814 0, /* tp_repr */
2798 0, /* tp_as_number */
2815 0, /* tp_as_number */
2799 &rustla_sequence_methods, /* tp_as_sequence */
2816 &rustla_sequence_methods, /* tp_as_sequence */
2800 0, /* tp_as_mapping */
2817 0, /* tp_as_mapping */
2801 0, /* tp_hash */
2818 0, /* tp_hash */
2802 0, /* tp_call */
2819 0, /* tp_call */
2803 0, /* tp_str */
2820 0, /* tp_str */
2804 0, /* tp_getattro */
2821 0, /* tp_getattro */
2805 0, /* tp_setattro */
2822 0, /* tp_setattro */
2806 0, /* tp_as_buffer */
2823 0, /* tp_as_buffer */
2807 Py_TPFLAGS_DEFAULT, /* tp_flags */
2824 Py_TPFLAGS_DEFAULT, /* tp_flags */
2808 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2825 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2809 0, /* tp_traverse */
2826 0, /* tp_traverse */
2810 0, /* tp_clear */
2827 0, /* tp_clear */
2811 0, /* tp_richcompare */
2828 0, /* tp_richcompare */
2812 0, /* tp_weaklistoffset */
2829 0, /* tp_weaklistoffset */
2813 0, /* tp_iter */
2830 0, /* tp_iter */
2814 (iternextfunc)rustla_next, /* tp_iternext */
2831 (iternextfunc)rustla_next, /* tp_iternext */
2815 0, /* tp_methods */
2832 0, /* tp_methods */
2816 0, /* tp_members */
2833 0, /* tp_members */
2817 0, /* tp_getset */
2834 0, /* tp_getset */
2818 0, /* tp_base */
2835 0, /* tp_base */
2819 0, /* tp_dict */
2836 0, /* tp_dict */
2820 0, /* tp_descr_get */
2837 0, /* tp_descr_get */
2821 0, /* tp_descr_set */
2838 0, /* tp_descr_set */
2822 0, /* tp_dictoffset */
2839 0, /* tp_dictoffset */
2823 (initproc)rustla_init, /* tp_init */
2840 (initproc)rustla_init, /* tp_init */
2824 0, /* tp_alloc */
2841 0, /* tp_alloc */
2825 };
2842 };
2826 #endif /* WITH_RUST */
2843 #endif /* WITH_RUST */
2827
2844
2828 void revlog_module_init(PyObject *mod)
2845 void revlog_module_init(PyObject *mod)
2829 {
2846 {
2830 indexType.tp_new = PyType_GenericNew;
2847 indexType.tp_new = PyType_GenericNew;
2831 if (PyType_Ready(&indexType) < 0)
2848 if (PyType_Ready(&indexType) < 0)
2832 return;
2849 return;
2833 Py_INCREF(&indexType);
2850 Py_INCREF(&indexType);
2834 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2851 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2835
2852
2836 nodetreeType.tp_new = PyType_GenericNew;
2853 nodetreeType.tp_new = PyType_GenericNew;
2837 if (PyType_Ready(&nodetreeType) < 0)
2854 if (PyType_Ready(&nodetreeType) < 0)
2838 return;
2855 return;
2839 Py_INCREF(&nodetreeType);
2856 Py_INCREF(&nodetreeType);
2840 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2857 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2841
2858
2842 if (!nullentry) {
2859 if (!nullentry) {
2843 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2860 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2844 0, -1, -1, -1, -1, nullid, 20);
2861 0, -1, -1, -1, -1, nullid, 20);
2845 }
2862 }
2846 if (nullentry)
2863 if (nullentry)
2847 PyObject_GC_UnTrack(nullentry);
2864 PyObject_GC_UnTrack(nullentry);
2848
2865
2849 #ifdef WITH_RUST
2866 #ifdef WITH_RUST
2850 rustlazyancestorsType.tp_new = PyType_GenericNew;
2867 rustlazyancestorsType.tp_new = PyType_GenericNew;
2851 if (PyType_Ready(&rustlazyancestorsType) < 0)
2868 if (PyType_Ready(&rustlazyancestorsType) < 0)
2852 return;
2869 return;
2853 Py_INCREF(&rustlazyancestorsType);
2870 Py_INCREF(&rustlazyancestorsType);
2854 PyModule_AddObject(mod, "rustlazyancestors",
2871 PyModule_AddObject(mod, "rustlazyancestors",
2855 (PyObject *)&rustlazyancestorsType);
2872 (PyObject *)&rustlazyancestorsType);
2856 #endif
2873 #endif
2857 }
2874 }
@@ -1,592 +1,600 b''
1 # commandserver.py - communicate with Mercurial's API over a pipe
1 # commandserver.py - communicate with Mercurial's API over a pipe
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import gc
11 import gc
12 import os
12 import os
13 import random
13 import random
14 import signal
14 import signal
15 import socket
15 import socket
16 import struct
16 import struct
17 import traceback
17 import traceback
18
18
19 try:
19 try:
20 import selectors
20 import selectors
21 selectors.BaseSelector
21 selectors.BaseSelector
22 except ImportError:
22 except ImportError:
23 from .thirdparty import selectors2 as selectors
23 from .thirdparty import selectors2 as selectors
24
24
25 from .i18n import _
25 from .i18n import _
26 from . import (
26 from . import (
27 encoding,
27 encoding,
28 error,
28 error,
29 pycompat,
29 pycompat,
30 util,
30 util,
31 )
31 )
32 from .utils import (
32 from .utils import (
33 cborutil,
33 cborutil,
34 procutil,
34 procutil,
35 )
35 )
36
36
37 logfile = None
37 logfile = None
38
38
39 def log(*args):
39 def log(*args):
40 if not logfile:
40 if not logfile:
41 return
41 return
42
42
43 for a in args:
43 for a in args:
44 logfile.write(str(a))
44 logfile.write(str(a))
45
45
46 logfile.flush()
46 logfile.flush()
47
47
48 class channeledoutput(object):
48 class channeledoutput(object):
49 """
49 """
50 Write data to out in the following format:
50 Write data to out in the following format:
51
51
52 data length (unsigned int),
52 data length (unsigned int),
53 data
53 data
54 """
54 """
55 def __init__(self, out, channel):
55 def __init__(self, out, channel):
56 self.out = out
56 self.out = out
57 self.channel = channel
57 self.channel = channel
58
58
59 @property
59 @property
60 def name(self):
60 def name(self):
61 return '<%c-channel>' % self.channel
61 return '<%c-channel>' % self.channel
62
62
63 def write(self, data):
63 def write(self, data):
64 if not data:
64 if not data:
65 return
65 return
66 # single write() to guarantee the same atomicity as the underlying file
66 # single write() to guarantee the same atomicity as the underlying file
67 self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
67 self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
68 self.out.flush()
68 self.out.flush()
69
69
70 def __getattr__(self, attr):
70 def __getattr__(self, attr):
71 if attr in (r'isatty', r'fileno', r'tell', r'seek'):
71 if attr in (r'isatty', r'fileno', r'tell', r'seek'):
72 raise AttributeError(attr)
72 raise AttributeError(attr)
73 return getattr(self.out, attr)
73 return getattr(self.out, attr)
74
74
75 class channeledmessage(object):
75 class channeledmessage(object):
76 """
76 """
77 Write encoded message and metadata to out in the following format:
77 Write encoded message and metadata to out in the following format:
78
78
79 data length (unsigned int),
79 data length (unsigned int),
80 encoded message and metadata, as a flat key-value dict.
80 encoded message and metadata, as a flat key-value dict.
81
81
82 Each message should have 'type' attribute. Messages of unknown type
82 Each message should have 'type' attribute. Messages of unknown type
83 should be ignored.
83 should be ignored.
84 """
84 """
85
85
86 # teach ui that write() can take **opts
86 # teach ui that write() can take **opts
87 structured = True
87 structured = True
88
88
89 def __init__(self, out, channel, encodename, encodefn):
89 def __init__(self, out, channel, encodename, encodefn):
90 self._cout = channeledoutput(out, channel)
90 self._cout = channeledoutput(out, channel)
91 self.encoding = encodename
91 self.encoding = encodename
92 self._encodefn = encodefn
92 self._encodefn = encodefn
93
93
94 def write(self, data, **opts):
94 def write(self, data, **opts):
95 opts = pycompat.byteskwargs(opts)
95 opts = pycompat.byteskwargs(opts)
96 if data is not None:
96 if data is not None:
97 opts[b'data'] = data
97 opts[b'data'] = data
98 self._cout.write(self._encodefn(opts))
98 self._cout.write(self._encodefn(opts))
99
99
100 def __getattr__(self, attr):
100 def __getattr__(self, attr):
101 return getattr(self._cout, attr)
101 return getattr(self._cout, attr)
102
102
103 class channeledinput(object):
103 class channeledinput(object):
104 """
104 """
105 Read data from in_.
105 Read data from in_.
106
106
107 Requests for input are written to out in the following format:
107 Requests for input are written to out in the following format:
108 channel identifier - 'I' for plain input, 'L' line based (1 byte)
108 channel identifier - 'I' for plain input, 'L' line based (1 byte)
109 how many bytes to send at most (unsigned int),
109 how many bytes to send at most (unsigned int),
110
110
111 The client replies with:
111 The client replies with:
112 data length (unsigned int), 0 meaning EOF
112 data length (unsigned int), 0 meaning EOF
113 data
113 data
114 """
114 """
115
115
116 maxchunksize = 4 * 1024
116 maxchunksize = 4 * 1024
117
117
118 def __init__(self, in_, out, channel):
118 def __init__(self, in_, out, channel):
119 self.in_ = in_
119 self.in_ = in_
120 self.out = out
120 self.out = out
121 self.channel = channel
121 self.channel = channel
122
122
123 @property
123 @property
124 def name(self):
124 def name(self):
125 return '<%c-channel>' % self.channel
125 return '<%c-channel>' % self.channel
126
126
127 def read(self, size=-1):
127 def read(self, size=-1):
128 if size < 0:
128 if size < 0:
129 # if we need to consume all the clients input, ask for 4k chunks
129 # if we need to consume all the clients input, ask for 4k chunks
130 # so the pipe doesn't fill up risking a deadlock
130 # so the pipe doesn't fill up risking a deadlock
131 size = self.maxchunksize
131 size = self.maxchunksize
132 s = self._read(size, self.channel)
132 s = self._read(size, self.channel)
133 buf = s
133 buf = s
134 while s:
134 while s:
135 s = self._read(size, self.channel)
135 s = self._read(size, self.channel)
136 buf += s
136 buf += s
137
137
138 return buf
138 return buf
139 else:
139 else:
140 return self._read(size, self.channel)
140 return self._read(size, self.channel)
141
141
142 def _read(self, size, channel):
142 def _read(self, size, channel):
143 if not size:
143 if not size:
144 return ''
144 return ''
145 assert size > 0
145 assert size > 0
146
146
147 # tell the client we need at most size bytes
147 # tell the client we need at most size bytes
148 self.out.write(struct.pack('>cI', channel, size))
148 self.out.write(struct.pack('>cI', channel, size))
149 self.out.flush()
149 self.out.flush()
150
150
151 length = self.in_.read(4)
151 length = self.in_.read(4)
152 length = struct.unpack('>I', length)[0]
152 length = struct.unpack('>I', length)[0]
153 if not length:
153 if not length:
154 return ''
154 return ''
155 else:
155 else:
156 return self.in_.read(length)
156 return self.in_.read(length)
157
157
158 def readline(self, size=-1):
158 def readline(self, size=-1):
159 if size < 0:
159 if size < 0:
160 size = self.maxchunksize
160 size = self.maxchunksize
161 s = self._read(size, 'L')
161 s = self._read(size, 'L')
162 buf = s
162 buf = s
163 # keep asking for more until there's either no more or
163 # keep asking for more until there's either no more or
164 # we got a full line
164 # we got a full line
165 while s and s[-1] != '\n':
165 while s and s[-1] != '\n':
166 s = self._read(size, 'L')
166 s = self._read(size, 'L')
167 buf += s
167 buf += s
168
168
169 return buf
169 return buf
170 else:
170 else:
171 return self._read(size, 'L')
171 return self._read(size, 'L')
172
172
173 def __iter__(self):
173 def __iter__(self):
174 return self
174 return self
175
175
176 def next(self):
176 def next(self):
177 l = self.readline()
177 l = self.readline()
178 if not l:
178 if not l:
179 raise StopIteration
179 raise StopIteration
180 return l
180 return l
181
181
182 __next__ = next
182 __next__ = next
183
183
184 def __getattr__(self, attr):
184 def __getattr__(self, attr):
185 if attr in (r'isatty', r'fileno', r'tell', r'seek'):
185 if attr in (r'isatty', r'fileno', r'tell', r'seek'):
186 raise AttributeError(attr)
186 raise AttributeError(attr)
187 return getattr(self.in_, attr)
187 return getattr(self.in_, attr)
188
188
189 _messageencoders = {
189 _messageencoders = {
190 b'cbor': lambda v: b''.join(cborutil.streamencode(v)),
190 b'cbor': lambda v: b''.join(cborutil.streamencode(v)),
191 }
191 }
192
192
193 def _selectmessageencoder(ui):
193 def _selectmessageencoder(ui):
194 # experimental config: cmdserver.message-encodings
194 # experimental config: cmdserver.message-encodings
195 encnames = ui.configlist(b'cmdserver', b'message-encodings')
195 encnames = ui.configlist(b'cmdserver', b'message-encodings')
196 for n in encnames:
196 for n in encnames:
197 f = _messageencoders.get(n)
197 f = _messageencoders.get(n)
198 if f:
198 if f:
199 return n, f
199 return n, f
200 raise error.Abort(b'no supported message encodings: %s'
200 raise error.Abort(b'no supported message encodings: %s'
201 % b' '.join(encnames))
201 % b' '.join(encnames))
202
202
203 class server(object):
203 class server(object):
204 """
204 """
205 Listens for commands on fin, runs them and writes the output on a channel
205 Listens for commands on fin, runs them and writes the output on a channel
206 based stream to fout.
206 based stream to fout.
207 """
207 """
208 def __init__(self, ui, repo, fin, fout):
208 def __init__(self, ui, repo, fin, fout):
209 self.cwd = encoding.getcwd()
209 self.cwd = encoding.getcwd()
210
210
211 # developer config: cmdserver.log
211 # developer config: cmdserver.log
212 logpath = ui.config("cmdserver", "log")
212 logpath = ui.config("cmdserver", "log")
213 if logpath:
213 if logpath:
214 global logfile
214 global logfile
215 if logpath == '-':
215 if logpath == '-':
216 # write log on a special 'd' (debug) channel
216 # write log on a special 'd' (debug) channel
217 logfile = channeledoutput(fout, 'd')
217 logfile = channeledoutput(fout, 'd')
218 else:
218 else:
219 logfile = open(logpath, 'a')
219 logfile = open(logpath, 'a')
220
220
221 if repo:
221 if repo:
222 # the ui here is really the repo ui so take its baseui so we don't
222 # the ui here is really the repo ui so take its baseui so we don't
223 # end up with its local configuration
223 # end up with its local configuration
224 self.ui = repo.baseui
224 self.ui = repo.baseui
225 self.repo = repo
225 self.repo = repo
226 self.repoui = repo.ui
226 self.repoui = repo.ui
227 else:
227 else:
228 self.ui = ui
228 self.ui = ui
229 self.repo = self.repoui = None
229 self.repo = self.repoui = None
230
230
231 self.cerr = channeledoutput(fout, 'e')
231 self.cerr = channeledoutput(fout, 'e')
232 self.cout = channeledoutput(fout, 'o')
232 self.cout = channeledoutput(fout, 'o')
233 self.cin = channeledinput(fin, fout, 'I')
233 self.cin = channeledinput(fin, fout, 'I')
234 self.cresult = channeledoutput(fout, 'r')
234 self.cresult = channeledoutput(fout, 'r')
235
235
236 # TODO: add this to help/config.txt when stabilized
236 # TODO: add this to help/config.txt when stabilized
237 # ``channel``
237 # ``channel``
238 # Use separate channel for structured output. (Command-server only)
238 # Use separate channel for structured output. (Command-server only)
239 self.cmsg = None
239 self.cmsg = None
240 if ui.config(b'ui', b'message-output') == b'channel':
240 if ui.config(b'ui', b'message-output') == b'channel':
241 encname, encfn = _selectmessageencoder(ui)
241 encname, encfn = _selectmessageencoder(ui)
242 self.cmsg = channeledmessage(fout, b'm', encname, encfn)
242 self.cmsg = channeledmessage(fout, b'm', encname, encfn)
243
243
244 self.client = fin
244 self.client = fin
245
245
246 def cleanup(self):
246 def cleanup(self):
247 """release and restore resources taken during server session"""
247 """release and restore resources taken during server session"""
248
248
249 def _read(self, size):
249 def _read(self, size):
250 if not size:
250 if not size:
251 return ''
251 return ''
252
252
253 data = self.client.read(size)
253 data = self.client.read(size)
254
254
255 # is the other end closed?
255 # is the other end closed?
256 if not data:
256 if not data:
257 raise EOFError
257 raise EOFError
258
258
259 return data
259 return data
260
260
261 def _readstr(self):
261 def _readstr(self):
262 """read a string from the channel
262 """read a string from the channel
263
263
264 format:
264 format:
265 data length (uint32), data
265 data length (uint32), data
266 """
266 """
267 length = struct.unpack('>I', self._read(4))[0]
267 length = struct.unpack('>I', self._read(4))[0]
268 if not length:
268 if not length:
269 return ''
269 return ''
270 return self._read(length)
270 return self._read(length)
271
271
272 def _readlist(self):
272 def _readlist(self):
273 """read a list of NULL separated strings from the channel"""
273 """read a list of NULL separated strings from the channel"""
274 s = self._readstr()
274 s = self._readstr()
275 if s:
275 if s:
276 return s.split('\0')
276 return s.split('\0')
277 else:
277 else:
278 return []
278 return []
279
279
280 def runcommand(self):
280 def runcommand(self):
281 """ reads a list of \0 terminated arguments, executes
281 """ reads a list of \0 terminated arguments, executes
282 and writes the return code to the result channel """
282 and writes the return code to the result channel """
283 from . import dispatch # avoid cycle
283 from . import dispatch # avoid cycle
284
284
285 args = self._readlist()
285 args = self._readlist()
286
286
287 # copy the uis so changes (e.g. --config or --verbose) don't
287 # copy the uis so changes (e.g. --config or --verbose) don't
288 # persist between requests
288 # persist between requests
289 copiedui = self.ui.copy()
289 copiedui = self.ui.copy()
290 uis = [copiedui]
290 uis = [copiedui]
291 if self.repo:
291 if self.repo:
292 self.repo.baseui = copiedui
292 self.repo.baseui = copiedui
293 # clone ui without using ui.copy because this is protected
293 # clone ui without using ui.copy because this is protected
294 repoui = self.repoui.__class__(self.repoui)
294 repoui = self.repoui.__class__(self.repoui)
295 repoui.copy = copiedui.copy # redo copy protection
295 repoui.copy = copiedui.copy # redo copy protection
296 uis.append(repoui)
296 uis.append(repoui)
297 self.repo.ui = self.repo.dirstate._ui = repoui
297 self.repo.ui = self.repo.dirstate._ui = repoui
298 self.repo.invalidateall()
298 self.repo.invalidateall()
299
299
300 for ui in uis:
300 for ui in uis:
301 ui.resetstate()
301 ui.resetstate()
302 # any kind of interaction must use server channels, but chg may
302 # any kind of interaction must use server channels, but chg may
303 # replace channels by fully functional tty files. so nontty is
303 # replace channels by fully functional tty files. so nontty is
304 # enforced only if cin is a channel.
304 # enforced only if cin is a channel.
305 if not util.safehasattr(self.cin, 'fileno'):
305 if not util.safehasattr(self.cin, 'fileno'):
306 ui.setconfig('ui', 'nontty', 'true', 'commandserver')
306 ui.setconfig('ui', 'nontty', 'true', 'commandserver')
307
307
308 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
308 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
309 self.cout, self.cerr, self.cmsg)
309 self.cout, self.cerr, self.cmsg)
310
310
311 try:
311 try:
312 ret = dispatch.dispatch(req) & 255
312 ret = dispatch.dispatch(req) & 255
313 self.cresult.write(struct.pack('>i', int(ret)))
313 self.cresult.write(struct.pack('>i', int(ret)))
314 finally:
314 finally:
315 # restore old cwd
315 # restore old cwd
316 if '--cwd' in args:
316 if '--cwd' in args:
317 os.chdir(self.cwd)
317 os.chdir(self.cwd)
318
318
319 def getencoding(self):
319 def getencoding(self):
320 """ writes the current encoding to the result channel """
320 """ writes the current encoding to the result channel """
321 self.cresult.write(encoding.encoding)
321 self.cresult.write(encoding.encoding)
322
322
323 def serveone(self):
323 def serveone(self):
324 cmd = self.client.readline()[:-1]
324 cmd = self.client.readline()[:-1]
325 if cmd:
325 if cmd:
326 handler = self.capabilities.get(cmd)
326 handler = self.capabilities.get(cmd)
327 if handler:
327 if handler:
328 handler(self)
328 handler(self)
329 else:
329 else:
330 # clients are expected to check what commands are supported by
330 # clients are expected to check what commands are supported by
331 # looking at the servers capabilities
331 # looking at the servers capabilities
332 raise error.Abort(_('unknown command %s') % cmd)
332 raise error.Abort(_('unknown command %s') % cmd)
333
333
334 return cmd != ''
334 return cmd != ''
335
335
336 capabilities = {'runcommand': runcommand,
336 capabilities = {'runcommand': runcommand,
337 'getencoding': getencoding}
337 'getencoding': getencoding}
338
338
339 def serve(self):
339 def serve(self):
340 hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
340 hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
341 hellomsg += '\n'
341 hellomsg += '\n'
342 hellomsg += 'encoding: ' + encoding.encoding
342 hellomsg += 'encoding: ' + encoding.encoding
343 hellomsg += '\n'
343 hellomsg += '\n'
344 if self.cmsg:
344 if self.cmsg:
345 hellomsg += 'message-encoding: %s\n' % self.cmsg.encoding
345 hellomsg += 'message-encoding: %s\n' % self.cmsg.encoding
346 hellomsg += 'pid: %d' % procutil.getpid()
346 hellomsg += 'pid: %d' % procutil.getpid()
347 if util.safehasattr(os, 'getpgid'):
347 if util.safehasattr(os, 'getpgid'):
348 hellomsg += '\n'
348 hellomsg += '\n'
349 hellomsg += 'pgid: %d' % os.getpgid(0)
349 hellomsg += 'pgid: %d' % os.getpgid(0)
350
350
351 # write the hello msg in -one- chunk
351 # write the hello msg in -one- chunk
352 self.cout.write(hellomsg)
352 self.cout.write(hellomsg)
353
353
354 try:
354 try:
355 while self.serveone():
355 while self.serveone():
356 pass
356 pass
357 except EOFError:
357 except EOFError:
358 # we'll get here if the client disconnected while we were reading
358 # we'll get here if the client disconnected while we were reading
359 # its request
359 # its request
360 return 1
360 return 1
361
361
362 return 0
362 return 0
363
363
364 class pipeservice(object):
364 class pipeservice(object):
365 def __init__(self, ui, repo, opts):
365 def __init__(self, ui, repo, opts):
366 self.ui = ui
366 self.ui = ui
367 self.repo = repo
367 self.repo = repo
368
368
369 def init(self):
369 def init(self):
370 pass
370 pass
371
371
372 def run(self):
372 def run(self):
373 ui = self.ui
373 ui = self.ui
374 # redirect stdio to null device so that broken extensions or in-process
374 # redirect stdio to null device so that broken extensions or in-process
375 # hooks will never cause corruption of channel protocol.
375 # hooks will never cause corruption of channel protocol.
376 with procutil.protectedstdio(ui.fin, ui.fout) as (fin, fout):
376 with procutil.protectedstdio(ui.fin, ui.fout) as (fin, fout):
377 sv = server(ui, self.repo, fin, fout)
377 sv = server(ui, self.repo, fin, fout)
378 try:
378 try:
379 return sv.serve()
379 return sv.serve()
380 finally:
380 finally:
381 sv.cleanup()
381 sv.cleanup()
382
382
383 def _initworkerprocess():
383 def _initworkerprocess():
384 # use a different process group from the master process, in order to:
384 # use a different process group from the master process, in order to:
385 # 1. make the current process group no longer "orphaned" (because the
385 # 1. make the current process group no longer "orphaned" (because the
386 # parent of this process is in a different process group while
386 # parent of this process is in a different process group while
387 # remains in a same session)
387 # remains in a same session)
388 # according to POSIX 2.2.2.52, orphaned process group will ignore
388 # according to POSIX 2.2.2.52, orphaned process group will ignore
389 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
389 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
390 # cause trouble for things like ncurses.
390 # cause trouble for things like ncurses.
391 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
391 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
392 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
392 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
393 # processes like ssh will be killed properly, without affecting
393 # processes like ssh will be killed properly, without affecting
394 # unrelated processes.
394 # unrelated processes.
395 os.setpgid(0, 0)
395 os.setpgid(0, 0)
396 # change random state otherwise forked request handlers would have a
396 # change random state otherwise forked request handlers would have a
397 # same state inherited from parent.
397 # same state inherited from parent.
398 random.seed()
398 random.seed()
399
399
400 def _serverequest(ui, repo, conn, createcmdserver):
400 def _serverequest(ui, repo, conn, createcmdserver):
401 fin = conn.makefile(r'rb')
401 fin = conn.makefile(r'rb')
402 fout = conn.makefile(r'wb')
402 fout = conn.makefile(r'wb')
403 sv = None
403 sv = None
404 try:
404 try:
405 sv = createcmdserver(repo, conn, fin, fout)
405 sv = createcmdserver(repo, conn, fin, fout)
406 try:
406 try:
407 sv.serve()
407 sv.serve()
408 # handle exceptions that may be raised by command server. most of
408 # handle exceptions that may be raised by command server. most of
409 # known exceptions are caught by dispatch.
409 # known exceptions are caught by dispatch.
410 except error.Abort as inst:
410 except error.Abort as inst:
411 ui.error(_('abort: %s\n') % inst)
411 ui.error(_('abort: %s\n') % inst)
412 except IOError as inst:
412 except IOError as inst:
413 if inst.errno != errno.EPIPE:
413 if inst.errno != errno.EPIPE:
414 raise
414 raise
415 except KeyboardInterrupt:
415 except KeyboardInterrupt:
416 pass
416 pass
417 finally:
417 finally:
418 sv.cleanup()
418 sv.cleanup()
419 except: # re-raises
419 except: # re-raises
420 # also write traceback to error channel. otherwise client cannot
420 # also write traceback to error channel. otherwise client cannot
421 # see it because it is written to server's stderr by default.
421 # see it because it is written to server's stderr by default.
422 if sv:
422 if sv:
423 cerr = sv.cerr
423 cerr = sv.cerr
424 else:
424 else:
425 cerr = channeledoutput(fout, 'e')
425 cerr = channeledoutput(fout, 'e')
426 cerr.write(encoding.strtolocal(traceback.format_exc()))
426 cerr.write(encoding.strtolocal(traceback.format_exc()))
427 raise
427 raise
428 finally:
428 finally:
429 fin.close()
429 fin.close()
430 try:
430 try:
431 fout.close() # implicit flush() may cause another EPIPE
431 fout.close() # implicit flush() may cause another EPIPE
432 except IOError as inst:
432 except IOError as inst:
433 if inst.errno != errno.EPIPE:
433 if inst.errno != errno.EPIPE:
434 raise
434 raise
435
435
436 class unixservicehandler(object):
436 class unixservicehandler(object):
437 """Set of pluggable operations for unix-mode services
437 """Set of pluggable operations for unix-mode services
438
438
439 Almost all methods except for createcmdserver() are called in the main
439 Almost all methods except for createcmdserver() are called in the main
440 process. You can't pass mutable resource back from createcmdserver().
440 process. You can't pass mutable resource back from createcmdserver().
441 """
441 """
442
442
443 pollinterval = None
443 pollinterval = None
444
444
445 def __init__(self, ui):
445 def __init__(self, ui):
446 self.ui = ui
446 self.ui = ui
447
447
448 def bindsocket(self, sock, address):
448 def bindsocket(self, sock, address):
449 util.bindunixsocket(sock, address)
449 util.bindunixsocket(sock, address)
450 sock.listen(socket.SOMAXCONN)
450 sock.listen(socket.SOMAXCONN)
451 self.ui.status(_('listening at %s\n') % address)
451 self.ui.status(_('listening at %s\n') % address)
452 self.ui.flush() # avoid buffering of status message
452 self.ui.flush() # avoid buffering of status message
453
453
454 def unlinksocket(self, address):
454 def unlinksocket(self, address):
455 os.unlink(address)
455 os.unlink(address)
456
456
457 def shouldexit(self):
457 def shouldexit(self):
458 """True if server should shut down; checked per pollinterval"""
458 """True if server should shut down; checked per pollinterval"""
459 return False
459 return False
460
460
461 def newconnection(self):
461 def newconnection(self):
462 """Called when main process notices new connection"""
462 """Called when main process notices new connection"""
463
463
464 def createcmdserver(self, repo, conn, fin, fout):
464 def createcmdserver(self, repo, conn, fin, fout):
465 """Create new command server instance; called in the process that
465 """Create new command server instance; called in the process that
466 serves for the current connection"""
466 serves for the current connection"""
467 return server(self.ui, repo, fin, fout)
467 return server(self.ui, repo, fin, fout)
468
468
469 class unixforkingservice(object):
469 class unixforkingservice(object):
470 """
470 """
471 Listens on unix domain socket and forks server per connection
471 Listens on unix domain socket and forks server per connection
472 """
472 """
473
473
474 def __init__(self, ui, repo, opts, handler=None):
474 def __init__(self, ui, repo, opts, handler=None):
475 self.ui = ui
475 self.ui = ui
476 self.repo = repo
476 self.repo = repo
477 self.address = opts['address']
477 self.address = opts['address']
478 if not util.safehasattr(socket, 'AF_UNIX'):
478 if not util.safehasattr(socket, 'AF_UNIX'):
479 raise error.Abort(_('unsupported platform'))
479 raise error.Abort(_('unsupported platform'))
480 if not self.address:
480 if not self.address:
481 raise error.Abort(_('no socket path specified with --address'))
481 raise error.Abort(_('no socket path specified with --address'))
482 self._servicehandler = handler or unixservicehandler(ui)
482 self._servicehandler = handler or unixservicehandler(ui)
483 self._sock = None
483 self._sock = None
484 self._oldsigchldhandler = None
484 self._oldsigchldhandler = None
485 self._workerpids = set() # updated by signal handler; do not iterate
485 self._workerpids = set() # updated by signal handler; do not iterate
486 self._socketunlinked = None
486 self._socketunlinked = None
487
487
488 def init(self):
488 def init(self):
489 self._sock = socket.socket(socket.AF_UNIX)
489 self._sock = socket.socket(socket.AF_UNIX)
490 self._servicehandler.bindsocket(self._sock, self.address)
490 self._servicehandler.bindsocket(self._sock, self.address)
491 if util.safehasattr(procutil, 'unblocksignal'):
491 if util.safehasattr(procutil, 'unblocksignal'):
492 procutil.unblocksignal(signal.SIGCHLD)
492 procutil.unblocksignal(signal.SIGCHLD)
493 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
493 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
494 self._oldsigchldhandler = o
494 self._oldsigchldhandler = o
495 self._socketunlinked = False
495 self._socketunlinked = False
496
496
497 def _unlinksocket(self):
497 def _unlinksocket(self):
498 if not self._socketunlinked:
498 if not self._socketunlinked:
499 self._servicehandler.unlinksocket(self.address)
499 self._servicehandler.unlinksocket(self.address)
500 self._socketunlinked = True
500 self._socketunlinked = True
501
501
502 def _cleanup(self):
502 def _cleanup(self):
503 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
503 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
504 self._sock.close()
504 self._sock.close()
505 self._unlinksocket()
505 self._unlinksocket()
506 # don't kill child processes as they have active clients, just wait
506 # don't kill child processes as they have active clients, just wait
507 self._reapworkers(0)
507 self._reapworkers(0)
508
508
509 def run(self):
509 def run(self):
510 try:
510 try:
511 self._mainloop()
511 self._mainloop()
512 finally:
512 finally:
513 self._cleanup()
513 self._cleanup()
514
514
515 def _mainloop(self):
515 def _mainloop(self):
516 exiting = False
516 exiting = False
517 h = self._servicehandler
517 h = self._servicehandler
518 selector = selectors.DefaultSelector()
518 selector = selectors.DefaultSelector()
519 selector.register(self._sock, selectors.EVENT_READ)
519 selector.register(self._sock, selectors.EVENT_READ)
520 while True:
520 while True:
521 if not exiting and h.shouldexit():
521 if not exiting and h.shouldexit():
522 # clients can no longer connect() to the domain socket, so
522 # clients can no longer connect() to the domain socket, so
523 # we stop queuing new requests.
523 # we stop queuing new requests.
524 # for requests that are queued (connect()-ed, but haven't been
524 # for requests that are queued (connect()-ed, but haven't been
525 # accept()-ed), handle them before exit. otherwise, clients
525 # accept()-ed), handle them before exit. otherwise, clients
526 # waiting for recv() will receive ECONNRESET.
526 # waiting for recv() will receive ECONNRESET.
527 self._unlinksocket()
527 self._unlinksocket()
528 exiting = True
528 exiting = True
529 ready = selector.select(timeout=h.pollinterval)
529 try:
530 ready = selector.select(timeout=h.pollinterval)
531 except OSError as inst:
532 # selectors2 raises ETIMEDOUT if timeout exceeded while
533 # handling signal interrupt. That's probably wrong, but
534 # we can easily get around it.
535 if inst.errno != errno.ETIMEDOUT:
536 raise
537 ready = []
530 if not ready:
538 if not ready:
531 # only exit if we completed all queued requests
539 # only exit if we completed all queued requests
532 if exiting:
540 if exiting:
533 break
541 break
534 continue
542 continue
535 try:
543 try:
536 conn, _addr = self._sock.accept()
544 conn, _addr = self._sock.accept()
537 except socket.error as inst:
545 except socket.error as inst:
538 if inst.args[0] == errno.EINTR:
546 if inst.args[0] == errno.EINTR:
539 continue
547 continue
540 raise
548 raise
541
549
542 pid = os.fork()
550 pid = os.fork()
543 if pid:
551 if pid:
544 try:
552 try:
545 self.ui.debug('forked worker process (pid=%d)\n' % pid)
553 self.ui.debug('forked worker process (pid=%d)\n' % pid)
546 self._workerpids.add(pid)
554 self._workerpids.add(pid)
547 h.newconnection()
555 h.newconnection()
548 finally:
556 finally:
549 conn.close() # release handle in parent process
557 conn.close() # release handle in parent process
550 else:
558 else:
551 try:
559 try:
552 selector.close()
560 selector.close()
553 self._sock.close()
561 self._sock.close()
554 self._runworker(conn)
562 self._runworker(conn)
555 conn.close()
563 conn.close()
556 os._exit(0)
564 os._exit(0)
557 except: # never return, hence no re-raises
565 except: # never return, hence no re-raises
558 try:
566 try:
559 self.ui.traceback(force=True)
567 self.ui.traceback(force=True)
560 finally:
568 finally:
561 os._exit(255)
569 os._exit(255)
562 selector.close()
570 selector.close()
563
571
564 def _sigchldhandler(self, signal, frame):
572 def _sigchldhandler(self, signal, frame):
565 self._reapworkers(os.WNOHANG)
573 self._reapworkers(os.WNOHANG)
566
574
567 def _reapworkers(self, options):
575 def _reapworkers(self, options):
568 while self._workerpids:
576 while self._workerpids:
569 try:
577 try:
570 pid, _status = os.waitpid(-1, options)
578 pid, _status = os.waitpid(-1, options)
571 except OSError as inst:
579 except OSError as inst:
572 if inst.errno == errno.EINTR:
580 if inst.errno == errno.EINTR:
573 continue
581 continue
574 if inst.errno != errno.ECHILD:
582 if inst.errno != errno.ECHILD:
575 raise
583 raise
576 # no child processes at all (reaped by other waitpid()?)
584 # no child processes at all (reaped by other waitpid()?)
577 self._workerpids.clear()
585 self._workerpids.clear()
578 return
586 return
579 if pid == 0:
587 if pid == 0:
580 # no waitable child processes
588 # no waitable child processes
581 return
589 return
582 self.ui.debug('worker process exited (pid=%d)\n' % pid)
590 self.ui.debug('worker process exited (pid=%d)\n' % pid)
583 self._workerpids.discard(pid)
591 self._workerpids.discard(pid)
584
592
585 def _runworker(self, conn):
593 def _runworker(self, conn):
586 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
594 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
587 _initworkerprocess()
595 _initworkerprocess()
588 h = self._servicehandler
596 h = self._servicehandler
589 try:
597 try:
590 _serverequest(self.ui, self.repo, conn, h.createcmdserver)
598 _serverequest(self.ui, self.repo, conn, h.createcmdserver)
591 finally:
599 finally:
592 gc.collect() # trigger __del__ since worker process uses os._exit
600 gc.collect() # trigger __del__ since worker process uses os._exit
@@ -1,2475 +1,2480 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirid,
24 wdirid,
25 )
25 )
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 obsolete as obsmod,
32 obsolete as obsmod,
33 patch,
33 patch,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 scmutil,
38 scmutil,
39 sparse,
39 sparse,
40 subrepo,
40 subrepo,
41 subrepoutil,
41 subrepoutil,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 dateutil,
45 dateutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 propertycache = util.propertycache
49 propertycache = util.propertycache
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58
58
59 def __init__(self, repo):
59 def __init__(self, repo):
60 self._repo = repo
60 self._repo = repo
61
61
62 def __bytes__(self):
62 def __bytes__(self):
63 return short(self.node())
63 return short(self.node())
64
64
65 __str__ = encoding.strmethod(__bytes__)
65 __str__ = encoding.strmethod(__bytes__)
66
66
67 def __repr__(self):
67 def __repr__(self):
68 return r"<%s %s>" % (type(self).__name__, str(self))
68 return r"<%s %s>" % (type(self).__name__, str(self))
69
69
70 def __eq__(self, other):
70 def __eq__(self, other):
71 try:
71 try:
72 return type(self) == type(other) and self._rev == other._rev
72 return type(self) == type(other) and self._rev == other._rev
73 except AttributeError:
73 except AttributeError:
74 return False
74 return False
75
75
76 def __ne__(self, other):
76 def __ne__(self, other):
77 return not (self == other)
77 return not (self == other)
78
78
79 def __contains__(self, key):
79 def __contains__(self, key):
80 return key in self._manifest
80 return key in self._manifest
81
81
82 def __getitem__(self, key):
82 def __getitem__(self, key):
83 return self.filectx(key)
83 return self.filectx(key)
84
84
85 def __iter__(self):
85 def __iter__(self):
86 return iter(self._manifest)
86 return iter(self._manifest)
87
87
88 def _buildstatusmanifest(self, status):
88 def _buildstatusmanifest(self, status):
89 """Builds a manifest that includes the given status results, if this is
89 """Builds a manifest that includes the given status results, if this is
90 a working copy context. For non-working copy contexts, it just returns
90 a working copy context. For non-working copy contexts, it just returns
91 the normal manifest."""
91 the normal manifest."""
92 return self.manifest()
92 return self.manifest()
93
93
94 def _matchstatus(self, other, match):
94 def _matchstatus(self, other, match):
95 """This internal method provides a way for child objects to override the
95 """This internal method provides a way for child objects to override the
96 match operator.
96 match operator.
97 """
97 """
98 return match
98 return match
99
99
100 def _buildstatus(self, other, s, match, listignored, listclean,
100 def _buildstatus(self, other, s, match, listignored, listclean,
101 listunknown):
101 listunknown):
102 """build a status with respect to another context"""
102 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
108 # delta application.
109 mf2 = None
109 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
113 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115
115
116 modified, added = [], []
116 modified, added = [], []
117 removed = []
117 removed = []
118 clean = []
118 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
120 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.iteritems():
122 for fn, value in d.iteritems():
123 if fn in deletedset:
123 if fn in deletedset:
124 continue
124 continue
125 if value is None:
125 if value is None:
126 clean.append(fn)
126 clean.append(fn)
127 continue
127 continue
128 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
129 if node1 is None:
130 added.append(fn)
130 added.append(fn)
131 elif node2 is None:
131 elif node2 is None:
132 removed.append(fn)
132 removed.append(fn)
133 elif flag1 != flag2:
133 elif flag1 != flag2:
134 modified.append(fn)
134 modified.append(fn)
135 elif node2 not in wdirfilenodeids:
135 elif node2 not in wdirfilenodeids:
136 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
139 # to a file as a modification.
140 modified.append(fn)
140 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
142 modified.append(fn)
143 else:
143 else:
144 clean.append(fn)
144 clean.append(fn)
145
145
146 if removed:
146 if removed:
147 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
148 unknown = [fn for fn in unknown if fn not in mf1 and
148 unknown = [fn for fn in unknown if fn not in mf1 and
149 (not match or match(fn))]
149 (not match or match(fn))]
150 ignored = [fn for fn in ignored if fn not in mf1 and
150 ignored = [fn for fn in ignored if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 # if they're deleted, don't report them as removed
152 # if they're deleted, don't report them as removed
153 removed = [fn for fn in removed if fn not in deletedset]
153 removed = [fn for fn in removed if fn not in deletedset]
154
154
155 return scmutil.status(modified, added, removed, deleted, unknown,
155 return scmutil.status(modified, added, removed, deleted, unknown,
156 ignored, clean)
156 ignored, clean)
157
157
158 @propertycache
158 @propertycache
159 def substate(self):
159 def substate(self):
160 return subrepoutil.state(self, self._repo.ui)
160 return subrepoutil.state(self, self._repo.ui)
161
161
162 def subrev(self, subpath):
162 def subrev(self, subpath):
163 return self.substate[subpath][1]
163 return self.substate[subpath][1]
164
164
165 def rev(self):
165 def rev(self):
166 return self._rev
166 return self._rev
167 def node(self):
167 def node(self):
168 return self._node
168 return self._node
169 def hex(self):
169 def hex(self):
170 return hex(self.node())
170 return hex(self.node())
171 def manifest(self):
171 def manifest(self):
172 return self._manifest
172 return self._manifest
173 def manifestctx(self):
173 def manifestctx(self):
174 return self._manifestctx
174 return self._manifestctx
175 def repo(self):
175 def repo(self):
176 return self._repo
176 return self._repo
177 def phasestr(self):
177 def phasestr(self):
178 return phases.phasenames[self.phase()]
178 return phases.phasenames[self.phase()]
179 def mutable(self):
179 def mutable(self):
180 return self.phase() > phases.public
180 return self.phase() > phases.public
181
181
182 def matchfileset(self, expr, badfn=None):
182 def matchfileset(self, expr, badfn=None):
183 return fileset.match(self, expr, badfn=badfn)
183 return fileset.match(self, expr, badfn=badfn)
184
184
185 def obsolete(self):
185 def obsolete(self):
186 """True if the changeset is obsolete"""
186 """True if the changeset is obsolete"""
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188
188
189 def extinct(self):
189 def extinct(self):
190 """True if the changeset is extinct"""
190 """True if the changeset is extinct"""
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192
192
193 def orphan(self):
193 def orphan(self):
194 """True if the changeset is not obsolete, but its ancestor is"""
194 """True if the changeset is not obsolete, but its ancestor is"""
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196
196
197 def phasedivergent(self):
197 def phasedivergent(self):
198 """True if the changeset tries to be a successor of a public changeset
198 """True if the changeset tries to be a successor of a public changeset
199
199
200 Only non-public and non-obsolete changesets may be phase-divergent.
200 Only non-public and non-obsolete changesets may be phase-divergent.
201 """
201 """
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203
203
204 def contentdivergent(self):
204 def contentdivergent(self):
205 """Is a successor of a changeset with multiple possible successor sets
205 """Is a successor of a changeset with multiple possible successor sets
206
206
207 Only non-public and non-obsolete changesets may be content-divergent.
207 Only non-public and non-obsolete changesets may be content-divergent.
208 """
208 """
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210
210
211 def isunstable(self):
211 def isunstable(self):
212 """True if the changeset is either orphan, phase-divergent or
212 """True if the changeset is either orphan, phase-divergent or
213 content-divergent"""
213 content-divergent"""
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215
215
216 def instabilities(self):
216 def instabilities(self):
217 """return the list of instabilities affecting this changeset.
217 """return the list of instabilities affecting this changeset.
218
218
219 Instabilities are returned as strings. possible values are:
219 Instabilities are returned as strings. possible values are:
220 - orphan,
220 - orphan,
221 - phase-divergent,
221 - phase-divergent,
222 - content-divergent.
222 - content-divergent.
223 """
223 """
224 instabilities = []
224 instabilities = []
225 if self.orphan():
225 if self.orphan():
226 instabilities.append('orphan')
226 instabilities.append('orphan')
227 if self.phasedivergent():
227 if self.phasedivergent():
228 instabilities.append('phase-divergent')
228 instabilities.append('phase-divergent')
229 if self.contentdivergent():
229 if self.contentdivergent():
230 instabilities.append('content-divergent')
230 instabilities.append('content-divergent')
231 return instabilities
231 return instabilities
232
232
233 def parents(self):
233 def parents(self):
234 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
235 return self._parents
235 return self._parents
236
236
237 def p1(self):
237 def p1(self):
238 return self._parents[0]
238 return self._parents[0]
239
239
240 def p2(self):
240 def p2(self):
241 parents = self._parents
241 parents = self._parents
242 if len(parents) == 2:
242 if len(parents) == 2:
243 return parents[1]
243 return parents[1]
244 return self._repo[nullrev]
244 return self._repo[nullrev]
245
245
246 def _fileinfo(self, path):
246 def _fileinfo(self, path):
247 if r'_manifest' in self.__dict__:
247 if r'_manifest' in self.__dict__:
248 try:
248 try:
249 return self._manifest[path], self._manifest.flags(path)
249 return self._manifest[path], self._manifest.flags(path)
250 except KeyError:
250 except KeyError:
251 raise error.ManifestLookupError(self._node, path,
251 raise error.ManifestLookupError(self._node, path,
252 _('not found in manifest'))
252 _('not found in manifest'))
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if path in self._manifestdelta:
254 if path in self._manifestdelta:
255 return (self._manifestdelta[path],
255 return (self._manifestdelta[path],
256 self._manifestdelta.flags(path))
256 self._manifestdelta.flags(path))
257 mfl = self._repo.manifestlog
257 mfl = self._repo.manifestlog
258 try:
258 try:
259 node, flag = mfl[self._changeset.manifest].find(path)
259 node, flag = mfl[self._changeset.manifest].find(path)
260 except KeyError:
260 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
262 _('not found in manifest'))
263
263
264 return node, flag
264 return node, flag
265
265
266 def filenode(self, path):
266 def filenode(self, path):
267 return self._fileinfo(path)[0]
267 return self._fileinfo(path)[0]
268
268
269 def flags(self, path):
269 def flags(self, path):
270 try:
270 try:
271 return self._fileinfo(path)[1]
271 return self._fileinfo(path)[1]
272 except error.LookupError:
272 except error.LookupError:
273 return ''
273 return ''
274
274
275 def sub(self, path, allowcreate=True):
275 def sub(self, path, allowcreate=True):
276 '''return a subrepo for the stored revision of path, never wdir()'''
276 '''return a subrepo for the stored revision of path, never wdir()'''
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278
278
279 def nullsub(self, path, pctx):
279 def nullsub(self, path, pctx):
280 return subrepo.nullsubrepo(self, path, pctx)
280 return subrepo.nullsubrepo(self, path, pctx)
281
281
282 def workingsub(self, path):
282 def workingsub(self, path):
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 context.
284 context.
285 '''
285 '''
286 return subrepo.subrepo(self, path, allowwdir=True)
286 return subrepo.subrepo(self, path, allowwdir=True)
287
287
288 def match(self, pats=None, include=None, exclude=None, default='glob',
288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 listsubrepos=False, badfn=None):
289 listsubrepos=False, badfn=None):
290 r = self._repo
290 r = self._repo
291 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
292 include, exclude, default,
292 include, exclude, default,
293 auditor=r.nofsauditor, ctx=self,
293 auditor=r.nofsauditor, ctx=self,
294 listsubrepos=listsubrepos, badfn=badfn)
294 listsubrepos=listsubrepos, badfn=badfn)
295
295
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
298 hunksfilterfn=None):
298 hunksfilterfn=None):
299 """Returns a diff generator for the given contexts and matcher"""
299 """Returns a diff generator for the given contexts and matcher"""
300 if ctx2 is None:
300 if ctx2 is None:
301 ctx2 = self.p1()
301 ctx2 = self.p1()
302 if ctx2 is not None:
302 if ctx2 is not None:
303 ctx2 = self._repo[ctx2]
303 ctx2 = self._repo[ctx2]
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 relroot=relroot, copy=copy,
306 relroot=relroot, copy=copy,
307 hunksfilterfn=hunksfilterfn)
307 hunksfilterfn=hunksfilterfn)
308
308
309 def dirs(self):
309 def dirs(self):
310 return self._manifest.dirs()
310 return self._manifest.dirs()
311
311
312 def hasdir(self, dir):
312 def hasdir(self, dir):
313 return self._manifest.hasdir(dir)
313 return self._manifest.hasdir(dir)
314
314
315 def status(self, other=None, match=None, listignored=False,
315 def status(self, other=None, match=None, listignored=False,
316 listclean=False, listunknown=False, listsubrepos=False):
316 listclean=False, listunknown=False, listsubrepos=False):
317 """return status of files between two nodes or node and working
317 """return status of files between two nodes or node and working
318 directory.
318 directory.
319
319
320 If other is None, compare this node with working directory.
320 If other is None, compare this node with working directory.
321
321
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 """
323 """
324
324
325 ctx1 = self
325 ctx1 = self
326 ctx2 = self._repo[other]
326 ctx2 = self._repo[other]
327
327
328 # This next code block is, admittedly, fragile logic that tests for
328 # This next code block is, admittedly, fragile logic that tests for
329 # reversing the contexts and wouldn't need to exist if it weren't for
329 # reversing the contexts and wouldn't need to exist if it weren't for
330 # the fast (and common) code path of comparing the working directory
330 # the fast (and common) code path of comparing the working directory
331 # with its first parent.
331 # with its first parent.
332 #
332 #
333 # What we're aiming for here is the ability to call:
333 # What we're aiming for here is the ability to call:
334 #
334 #
335 # workingctx.status(parentctx)
335 # workingctx.status(parentctx)
336 #
336 #
337 # If we always built the manifest for each context and compared those,
337 # If we always built the manifest for each context and compared those,
338 # then we'd be done. But the special case of the above call means we
338 # then we'd be done. But the special case of the above call means we
339 # just copy the manifest of the parent.
339 # just copy the manifest of the parent.
340 reversed = False
340 reversed = False
341 if (not isinstance(ctx1, changectx)
341 if (not isinstance(ctx1, changectx)
342 and isinstance(ctx2, changectx)):
342 and isinstance(ctx2, changectx)):
343 reversed = True
343 reversed = True
344 ctx1, ctx2 = ctx2, ctx1
344 ctx1, ctx2 = ctx2, ctx1
345
345
346 match = self._repo.narrowmatch(match)
346 match = self._repo.narrowmatch(match)
347 match = ctx2._matchstatus(ctx1, match)
347 match = ctx2._matchstatus(ctx1, match)
348 r = scmutil.status([], [], [], [], [], [], [])
348 r = scmutil.status([], [], [], [], [], [], [])
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 listunknown)
350 listunknown)
351
351
352 if reversed:
352 if reversed:
353 # Reverse added and removed. Clear deleted, unknown and ignored as
353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # these make no sense to reverse.
354 # these make no sense to reverse.
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r.clean)
356 r.clean)
357
357
358 if listsubrepos:
358 if listsubrepos:
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 try:
360 try:
361 rev2 = ctx2.subrev(subpath)
361 rev2 = ctx2.subrev(subpath)
362 except KeyError:
362 except KeyError:
363 # A subrepo that existed in node1 was deleted between
363 # A subrepo that existed in node1 was deleted between
364 # node1 and node2 (inclusive). Thus, ctx2's substate
364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # won't contain that subpath. The best we can do ignore it.
365 # won't contain that subpath. The best we can do ignore it.
366 rev2 = None
366 rev2 = None
367 submatch = matchmod.subdirmatcher(subpath, match)
367 submatch = matchmod.subdirmatcher(subpath, match)
368 s = sub.status(rev2, match=submatch, ignored=listignored,
368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 clean=listclean, unknown=listunknown,
369 clean=listclean, unknown=listunknown,
370 listsubrepos=True)
370 listsubrepos=True)
371 for rfiles, sfiles in zip(r, s):
371 for rfiles, sfiles in zip(r, s):
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373
373
374 for l in r:
374 for l in r:
375 l.sort()
375 l.sort()
376
376
377 return r
377 return r
378
378
379 class changectx(basectx):
379 class changectx(basectx):
380 """A changecontext object makes access to data related to a particular
380 """A changecontext object makes access to data related to a particular
381 changeset convenient. It represents a read-only context already present in
381 changeset convenient. It represents a read-only context already present in
382 the repo."""
382 the repo."""
383 def __init__(self, repo, rev, node):
383 def __init__(self, repo, rev, node):
384 super(changectx, self).__init__(repo)
384 super(changectx, self).__init__(repo)
385 self._rev = rev
385 self._rev = rev
386 self._node = node
386 self._node = node
387
387
388 def __hash__(self):
388 def __hash__(self):
389 try:
389 try:
390 return hash(self._rev)
390 return hash(self._rev)
391 except AttributeError:
391 except AttributeError:
392 return id(self)
392 return id(self)
393
393
394 def __nonzero__(self):
394 def __nonzero__(self):
395 return self._rev != nullrev
395 return self._rev != nullrev
396
396
397 __bool__ = __nonzero__
397 __bool__ = __nonzero__
398
398
399 @propertycache
399 @propertycache
400 def _changeset(self):
400 def _changeset(self):
401 return self._repo.changelog.changelogrevision(self.rev())
401 return self._repo.changelog.changelogrevision(self.rev())
402
402
403 @propertycache
403 @propertycache
404 def _manifest(self):
404 def _manifest(self):
405 return self._manifestctx.read()
405 return self._manifestctx.read()
406
406
407 @property
407 @property
408 def _manifestctx(self):
408 def _manifestctx(self):
409 return self._repo.manifestlog[self._changeset.manifest]
409 return self._repo.manifestlog[self._changeset.manifest]
410
410
411 @propertycache
411 @propertycache
412 def _manifestdelta(self):
412 def _manifestdelta(self):
413 return self._manifestctx.readdelta()
413 return self._manifestctx.readdelta()
414
414
415 @propertycache
415 @propertycache
416 def _parents(self):
416 def _parents(self):
417 repo = self._repo
417 repo = self._repo
418 p1, p2 = repo.changelog.parentrevs(self._rev)
418 p1, p2 = repo.changelog.parentrevs(self._rev)
419 if p2 == nullrev:
419 if p2 == nullrev:
420 return [repo[p1]]
420 return [repo[p1]]
421 return [repo[p1], repo[p2]]
421 return [repo[p1], repo[p2]]
422
422
423 def changeset(self):
423 def changeset(self):
424 c = self._changeset
424 c = self._changeset
425 return (
425 return (
426 c.manifest,
426 c.manifest,
427 c.user,
427 c.user,
428 c.date,
428 c.date,
429 c.files,
429 c.files,
430 c.description,
430 c.description,
431 c.extra,
431 c.extra,
432 )
432 )
433 def manifestnode(self):
433 def manifestnode(self):
434 return self._changeset.manifest
434 return self._changeset.manifest
435
435
436 def user(self):
436 def user(self):
437 return self._changeset.user
437 return self._changeset.user
438 def date(self):
438 def date(self):
439 return self._changeset.date
439 return self._changeset.date
440 def files(self):
440 def files(self):
441 return self._changeset.files
441 return self._changeset.files
442 def description(self):
442 def description(self):
443 return self._changeset.description
443 return self._changeset.description
444 def branch(self):
444 def branch(self):
445 return encoding.tolocal(self._changeset.extra.get("branch"))
445 return encoding.tolocal(self._changeset.extra.get("branch"))
446 def closesbranch(self):
446 def closesbranch(self):
447 return 'close' in self._changeset.extra
447 return 'close' in self._changeset.extra
448 def extra(self):
448 def extra(self):
449 """Return a dict of extra information."""
449 """Return a dict of extra information."""
450 return self._changeset.extra
450 return self._changeset.extra
451 def tags(self):
451 def tags(self):
452 """Return a list of byte tag names"""
452 """Return a list of byte tag names"""
453 return self._repo.nodetags(self._node)
453 return self._repo.nodetags(self._node)
454 def bookmarks(self):
454 def bookmarks(self):
455 """Return a list of byte bookmark names."""
455 """Return a list of byte bookmark names."""
456 return self._repo.nodebookmarks(self._node)
456 return self._repo.nodebookmarks(self._node)
457 def phase(self):
457 def phase(self):
458 return self._repo._phasecache.phase(self._repo, self._rev)
458 return self._repo._phasecache.phase(self._repo, self._rev)
459 def hidden(self):
459 def hidden(self):
460 return self._rev in repoview.filterrevs(self._repo, 'visible')
460 return self._rev in repoview.filterrevs(self._repo, 'visible')
461
461
462 def isinmemory(self):
462 def isinmemory(self):
463 return False
463 return False
464
464
465 def children(self):
465 def children(self):
466 """return list of changectx contexts for each child changeset.
466 """return list of changectx contexts for each child changeset.
467
467
468 This returns only the immediate child changesets. Use descendants() to
468 This returns only the immediate child changesets. Use descendants() to
469 recursively walk children.
469 recursively walk children.
470 """
470 """
471 c = self._repo.changelog.children(self._node)
471 c = self._repo.changelog.children(self._node)
472 return [self._repo[x] for x in c]
472 return [self._repo[x] for x in c]
473
473
474 def ancestors(self):
474 def ancestors(self):
475 for a in self._repo.changelog.ancestors([self._rev]):
475 for a in self._repo.changelog.ancestors([self._rev]):
476 yield self._repo[a]
476 yield self._repo[a]
477
477
478 def descendants(self):
478 def descendants(self):
479 """Recursively yield all children of the changeset.
479 """Recursively yield all children of the changeset.
480
480
481 For just the immediate children, use children()
481 For just the immediate children, use children()
482 """
482 """
483 for d in self._repo.changelog.descendants([self._rev]):
483 for d in self._repo.changelog.descendants([self._rev]):
484 yield self._repo[d]
484 yield self._repo[d]
485
485
486 def filectx(self, path, fileid=None, filelog=None):
486 def filectx(self, path, fileid=None, filelog=None):
487 """get a file context from this changeset"""
487 """get a file context from this changeset"""
488 if fileid is None:
488 if fileid is None:
489 fileid = self.filenode(path)
489 fileid = self.filenode(path)
490 return filectx(self._repo, path, fileid=fileid,
490 return filectx(self._repo, path, fileid=fileid,
491 changectx=self, filelog=filelog)
491 changectx=self, filelog=filelog)
492
492
493 def ancestor(self, c2, warn=False):
493 def ancestor(self, c2, warn=False):
494 """return the "best" ancestor context of self and c2
494 """return the "best" ancestor context of self and c2
495
495
496 If there are multiple candidates, it will show a message and check
496 If there are multiple candidates, it will show a message and check
497 merge.preferancestor configuration before falling back to the
497 merge.preferancestor configuration before falling back to the
498 revlog ancestor."""
498 revlog ancestor."""
499 # deal with workingctxs
499 # deal with workingctxs
500 n2 = c2._node
500 n2 = c2._node
501 if n2 is None:
501 if n2 is None:
502 n2 = c2._parents[0]._node
502 n2 = c2._parents[0]._node
503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
503 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
504 if not cahs:
504 if not cahs:
505 anc = nullid
505 anc = nullid
506 elif len(cahs) == 1:
506 elif len(cahs) == 1:
507 anc = cahs[0]
507 anc = cahs[0]
508 else:
508 else:
509 # experimental config: merge.preferancestor
509 # experimental config: merge.preferancestor
510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
510 for r in self._repo.ui.configlist('merge', 'preferancestor'):
511 try:
511 try:
512 ctx = scmutil.revsymbol(self._repo, r)
512 ctx = scmutil.revsymbol(self._repo, r)
513 except error.RepoLookupError:
513 except error.RepoLookupError:
514 continue
514 continue
515 anc = ctx.node()
515 anc = ctx.node()
516 if anc in cahs:
516 if anc in cahs:
517 break
517 break
518 else:
518 else:
519 anc = self._repo.changelog.ancestor(self._node, n2)
519 anc = self._repo.changelog.ancestor(self._node, n2)
520 if warn:
520 if warn:
521 self._repo.ui.status(
521 self._repo.ui.status(
522 (_("note: using %s as ancestor of %s and %s\n") %
522 (_("note: using %s as ancestor of %s and %s\n") %
523 (short(anc), short(self._node), short(n2))) +
523 (short(anc), short(self._node), short(n2))) +
524 ''.join(_(" alternatively, use --config "
524 ''.join(_(" alternatively, use --config "
525 "merge.preferancestor=%s\n") %
525 "merge.preferancestor=%s\n") %
526 short(n) for n in sorted(cahs) if n != anc))
526 short(n) for n in sorted(cahs) if n != anc))
527 return self._repo[anc]
527 return self._repo[anc]
528
528
529 def isancestorof(self, other):
529 def isancestorof(self, other):
530 """True if this changeset is an ancestor of other"""
530 """True if this changeset is an ancestor of other"""
531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
531 return self._repo.changelog.isancestorrev(self._rev, other._rev)
532
532
533 def walk(self, match):
533 def walk(self, match):
534 '''Generates matching file names.'''
534 '''Generates matching file names.'''
535
535
536 # Wrap match.bad method to have message with nodeid
536 # Wrap match.bad method to have message with nodeid
537 def bad(fn, msg):
537 def bad(fn, msg):
538 # The manifest doesn't know about subrepos, so don't complain about
538 # The manifest doesn't know about subrepos, so don't complain about
539 # paths into valid subrepos.
539 # paths into valid subrepos.
540 if any(fn == s or fn.startswith(s + '/')
540 if any(fn == s or fn.startswith(s + '/')
541 for s in self.substate):
541 for s in self.substate):
542 return
542 return
543 match.bad(fn, _('no such file in rev %s') % self)
543 match.bad(fn, _('no such file in rev %s') % self)
544
544
545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
545 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
546 return self._manifest.walk(m)
546 return self._manifest.walk(m)
547
547
548 def matches(self, match):
548 def matches(self, match):
549 return self.walk(match)
549 return self.walk(match)
550
550
551 class basefilectx(object):
551 class basefilectx(object):
552 """A filecontext object represents the common logic for its children:
552 """A filecontext object represents the common logic for its children:
553 filectx: read-only access to a filerevision that is already present
553 filectx: read-only access to a filerevision that is already present
554 in the repo,
554 in the repo,
555 workingfilectx: a filecontext that represents files from the working
555 workingfilectx: a filecontext that represents files from the working
556 directory,
556 directory,
557 memfilectx: a filecontext that represents files in-memory,
557 memfilectx: a filecontext that represents files in-memory,
558 """
558 """
559 @propertycache
559 @propertycache
560 def _filelog(self):
560 def _filelog(self):
561 return self._repo.file(self._path)
561 return self._repo.file(self._path)
562
562
563 @propertycache
563 @propertycache
564 def _changeid(self):
564 def _changeid(self):
565 if r'_changectx' in self.__dict__:
565 if r'_changectx' in self.__dict__:
566 return self._changectx.rev()
566 return self._changectx.rev()
567 elif r'_descendantrev' in self.__dict__:
567 elif r'_descendantrev' in self.__dict__:
568 # this file context was created from a revision with a known
568 # this file context was created from a revision with a known
569 # descendant, we can (lazily) correct for linkrev aliases
569 # descendant, we can (lazily) correct for linkrev aliases
570 return self._adjustlinkrev(self._descendantrev)
570 return self._adjustlinkrev(self._descendantrev)
571 else:
571 else:
572 return self._filelog.linkrev(self._filerev)
572 return self._filelog.linkrev(self._filerev)
573
573
574 @propertycache
574 @propertycache
575 def _filenode(self):
575 def _filenode(self):
576 if r'_fileid' in self.__dict__:
576 if r'_fileid' in self.__dict__:
577 return self._filelog.lookup(self._fileid)
577 return self._filelog.lookup(self._fileid)
578 else:
578 else:
579 return self._changectx.filenode(self._path)
579 return self._changectx.filenode(self._path)
580
580
581 @propertycache
581 @propertycache
582 def _filerev(self):
582 def _filerev(self):
583 return self._filelog.rev(self._filenode)
583 return self._filelog.rev(self._filenode)
584
584
585 @propertycache
585 @propertycache
586 def _repopath(self):
586 def _repopath(self):
587 return self._path
587 return self._path
588
588
589 def __nonzero__(self):
589 def __nonzero__(self):
590 try:
590 try:
591 self._filenode
591 self._filenode
592 return True
592 return True
593 except error.LookupError:
593 except error.LookupError:
594 # file is missing
594 # file is missing
595 return False
595 return False
596
596
597 __bool__ = __nonzero__
597 __bool__ = __nonzero__
598
598
599 def __bytes__(self):
599 def __bytes__(self):
600 try:
600 try:
601 return "%s@%s" % (self.path(), self._changectx)
601 return "%s@%s" % (self.path(), self._changectx)
602 except error.LookupError:
602 except error.LookupError:
603 return "%s@???" % self.path()
603 return "%s@???" % self.path()
604
604
605 __str__ = encoding.strmethod(__bytes__)
605 __str__ = encoding.strmethod(__bytes__)
606
606
607 def __repr__(self):
607 def __repr__(self):
608 return r"<%s %s>" % (type(self).__name__, str(self))
608 return r"<%s %s>" % (type(self).__name__, str(self))
609
609
610 def __hash__(self):
610 def __hash__(self):
611 try:
611 try:
612 return hash((self._path, self._filenode))
612 return hash((self._path, self._filenode))
613 except AttributeError:
613 except AttributeError:
614 return id(self)
614 return id(self)
615
615
616 def __eq__(self, other):
616 def __eq__(self, other):
617 try:
617 try:
618 return (type(self) == type(other) and self._path == other._path
618 return (type(self) == type(other) and self._path == other._path
619 and self._filenode == other._filenode)
619 and self._filenode == other._filenode)
620 except AttributeError:
620 except AttributeError:
621 return False
621 return False
622
622
623 def __ne__(self, other):
623 def __ne__(self, other):
624 return not (self == other)
624 return not (self == other)
625
625
626 def filerev(self):
626 def filerev(self):
627 return self._filerev
627 return self._filerev
628 def filenode(self):
628 def filenode(self):
629 return self._filenode
629 return self._filenode
630 @propertycache
630 @propertycache
631 def _flags(self):
631 def _flags(self):
632 return self._changectx.flags(self._path)
632 return self._changectx.flags(self._path)
633 def flags(self):
633 def flags(self):
634 return self._flags
634 return self._flags
635 def filelog(self):
635 def filelog(self):
636 return self._filelog
636 return self._filelog
637 def rev(self):
637 def rev(self):
638 return self._changeid
638 return self._changeid
639 def linkrev(self):
639 def linkrev(self):
640 return self._filelog.linkrev(self._filerev)
640 return self._filelog.linkrev(self._filerev)
641 def node(self):
641 def node(self):
642 return self._changectx.node()
642 return self._changectx.node()
643 def hex(self):
643 def hex(self):
644 return self._changectx.hex()
644 return self._changectx.hex()
645 def user(self):
645 def user(self):
646 return self._changectx.user()
646 return self._changectx.user()
647 def date(self):
647 def date(self):
648 return self._changectx.date()
648 return self._changectx.date()
649 def files(self):
649 def files(self):
650 return self._changectx.files()
650 return self._changectx.files()
651 def description(self):
651 def description(self):
652 return self._changectx.description()
652 return self._changectx.description()
653 def branch(self):
653 def branch(self):
654 return self._changectx.branch()
654 return self._changectx.branch()
655 def extra(self):
655 def extra(self):
656 return self._changectx.extra()
656 return self._changectx.extra()
657 def phase(self):
657 def phase(self):
658 return self._changectx.phase()
658 return self._changectx.phase()
659 def phasestr(self):
659 def phasestr(self):
660 return self._changectx.phasestr()
660 return self._changectx.phasestr()
661 def obsolete(self):
661 def obsolete(self):
662 return self._changectx.obsolete()
662 return self._changectx.obsolete()
663 def instabilities(self):
663 def instabilities(self):
664 return self._changectx.instabilities()
664 return self._changectx.instabilities()
665 def manifest(self):
665 def manifest(self):
666 return self._changectx.manifest()
666 return self._changectx.manifest()
667 def changectx(self):
667 def changectx(self):
668 return self._changectx
668 return self._changectx
669 def renamed(self):
669 def renamed(self):
670 return self._copied
670 return self._copied
671 def repo(self):
671 def repo(self):
672 return self._repo
672 return self._repo
673 def size(self):
673 def size(self):
674 return len(self.data())
674 return len(self.data())
675
675
676 def path(self):
676 def path(self):
677 return self._path
677 return self._path
678
678
679 def isbinary(self):
679 def isbinary(self):
680 try:
680 try:
681 return stringutil.binary(self.data())
681 return stringutil.binary(self.data())
682 except IOError:
682 except IOError:
683 return False
683 return False
684 def isexec(self):
684 def isexec(self):
685 return 'x' in self.flags()
685 return 'x' in self.flags()
686 def islink(self):
686 def islink(self):
687 return 'l' in self.flags()
687 return 'l' in self.flags()
688
688
689 def isabsent(self):
689 def isabsent(self):
690 """whether this filectx represents a file not in self._changectx
690 """whether this filectx represents a file not in self._changectx
691
691
692 This is mainly for merge code to detect change/delete conflicts. This is
692 This is mainly for merge code to detect change/delete conflicts. This is
693 expected to be True for all subclasses of basectx."""
693 expected to be True for all subclasses of basectx."""
694 return False
694 return False
695
695
696 _customcmp = False
696 _customcmp = False
697 def cmp(self, fctx):
697 def cmp(self, fctx):
698 """compare with other file context
698 """compare with other file context
699
699
700 returns True if different than fctx.
700 returns True if different than fctx.
701 """
701 """
702 if fctx._customcmp:
702 if fctx._customcmp:
703 return fctx.cmp(self)
703 return fctx.cmp(self)
704
704
705 if (fctx._filenode is None
705 if (fctx._filenode is None
706 and (self._repo._encodefilterpats
706 and (self._repo._encodefilterpats
707 # if file data starts with '\1\n', empty metadata block is
707 # if file data starts with '\1\n', empty metadata block is
708 # prepended, which adds 4 bytes to filelog.size().
708 # prepended, which adds 4 bytes to filelog.size().
709 or self.size() - 4 == fctx.size())
709 or self.size() - 4 == fctx.size())
710 or self.size() == fctx.size()):
710 or self.size() == fctx.size()):
711 return self._filelog.cmp(self._filenode, fctx.data())
711 return self._filelog.cmp(self._filenode, fctx.data())
712
712
713 return True
713 return True
714
714
715 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
715 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
716 """return the first ancestor of <srcrev> introducing <fnode>
716 """return the first ancestor of <srcrev> introducing <fnode>
717
717
718 If the linkrev of the file revision does not point to an ancestor of
718 If the linkrev of the file revision does not point to an ancestor of
719 srcrev, we'll walk down the ancestors until we find one introducing
719 srcrev, we'll walk down the ancestors until we find one introducing
720 this file revision.
720 this file revision.
721
721
722 :srcrev: the changeset revision we search ancestors from
722 :srcrev: the changeset revision we search ancestors from
723 :inclusive: if true, the src revision will also be checked
723 :inclusive: if true, the src revision will also be checked
724 :stoprev: an optional revision to stop the walk at. If no introduction
724 :stoprev: an optional revision to stop the walk at. If no introduction
725 of this file content could be found before this floor
725 of this file content could be found before this floor
726 revision, the function will returns "None" and stops its
726 revision, the function will returns "None" and stops its
727 iteration.
727 iteration.
728 """
728 """
729 repo = self._repo
729 repo = self._repo
730 cl = repo.unfiltered().changelog
730 cl = repo.unfiltered().changelog
731 mfl = repo.manifestlog
731 mfl = repo.manifestlog
732 # fetch the linkrev
732 # fetch the linkrev
733 lkr = self.linkrev()
733 lkr = self.linkrev()
734 if srcrev == lkr:
734 if srcrev == lkr:
735 return lkr
735 return lkr
736 # hack to reuse ancestor computation when searching for renames
736 # hack to reuse ancestor computation when searching for renames
737 memberanc = getattr(self, '_ancestrycontext', None)
737 memberanc = getattr(self, '_ancestrycontext', None)
738 iteranc = None
738 iteranc = None
739 if srcrev is None:
739 if srcrev is None:
740 # wctx case, used by workingfilectx during mergecopy
740 # wctx case, used by workingfilectx during mergecopy
741 revs = [p.rev() for p in self._repo[None].parents()]
741 revs = [p.rev() for p in self._repo[None].parents()]
742 inclusive = True # we skipped the real (revless) source
742 inclusive = True # we skipped the real (revless) source
743 else:
743 else:
744 revs = [srcrev]
744 revs = [srcrev]
745 if memberanc is None:
745 if memberanc is None:
746 memberanc = iteranc = cl.ancestors(revs, lkr,
746 memberanc = iteranc = cl.ancestors(revs, lkr,
747 inclusive=inclusive)
747 inclusive=inclusive)
748 # check if this linkrev is an ancestor of srcrev
748 # check if this linkrev is an ancestor of srcrev
749 if lkr not in memberanc:
749 if lkr not in memberanc:
750 if iteranc is None:
750 if iteranc is None:
751 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
751 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
752 fnode = self._filenode
752 fnode = self._filenode
753 path = self._path
753 path = self._path
754 for a in iteranc:
754 for a in iteranc:
755 if stoprev is not None and a < stoprev:
755 if stoprev is not None and a < stoprev:
756 return None
756 return None
757 ac = cl.read(a) # get changeset data (we avoid object creation)
757 ac = cl.read(a) # get changeset data (we avoid object creation)
758 if path in ac[3]: # checking the 'files' field.
758 if path in ac[3]: # checking the 'files' field.
759 # The file has been touched, check if the content is
759 # The file has been touched, check if the content is
760 # similar to the one we search for.
760 # similar to the one we search for.
761 if fnode == mfl[ac[0]].readfast().get(path):
761 if fnode == mfl[ac[0]].readfast().get(path):
762 return a
762 return a
763 # In theory, we should never get out of that loop without a result.
763 # In theory, we should never get out of that loop without a result.
764 # But if manifest uses a buggy file revision (not children of the
764 # But if manifest uses a buggy file revision (not children of the
765 # one it replaces) we could. Such a buggy situation will likely
765 # one it replaces) we could. Such a buggy situation will likely
766 # result is crash somewhere else at to some point.
766 # result is crash somewhere else at to some point.
767 return lkr
767 return lkr
768
768
769 def isintroducedafter(self, changelogrev):
769 def isintroducedafter(self, changelogrev):
770 """True if a filectx has been introduced after a given floor revision
770 """True if a filectx has been introduced after a given floor revision
771 """
771 """
772 if self.linkrev() >= changelogrev:
772 if self.linkrev() >= changelogrev:
773 return True
773 return True
774 introrev = self._introrev(stoprev=changelogrev)
774 introrev = self._introrev(stoprev=changelogrev)
775 if introrev is None:
775 if introrev is None:
776 return False
776 return False
777 return introrev >= changelogrev
777 return introrev >= changelogrev
778
778
779 def introrev(self):
779 def introrev(self):
780 """return the rev of the changeset which introduced this file revision
780 """return the rev of the changeset which introduced this file revision
781
781
782 This method is different from linkrev because it take into account the
782 This method is different from linkrev because it take into account the
783 changeset the filectx was created from. It ensures the returned
783 changeset the filectx was created from. It ensures the returned
784 revision is one of its ancestors. This prevents bugs from
784 revision is one of its ancestors. This prevents bugs from
785 'linkrev-shadowing' when a file revision is used by multiple
785 'linkrev-shadowing' when a file revision is used by multiple
786 changesets.
786 changesets.
787 """
787 """
788 return self._introrev()
788 return self._introrev()
789
789
790 def _introrev(self, stoprev=None):
790 def _introrev(self, stoprev=None):
791 """
791 """
792 Same as `introrev` but, with an extra argument to limit changelog
792 Same as `introrev` but, with an extra argument to limit changelog
793 iteration range in some internal usecase.
793 iteration range in some internal usecase.
794
794
795 If `stoprev` is set, the `introrev` will not be searched past that
795 If `stoprev` is set, the `introrev` will not be searched past that
796 `stoprev` revision and "None" might be returned. This is useful to
796 `stoprev` revision and "None" might be returned. This is useful to
797 limit the iteration range.
797 limit the iteration range.
798 """
798 """
799 toprev = None
799 toprev = None
800 attrs = vars(self)
800 attrs = vars(self)
801 if r'_changeid' in attrs:
801 if r'_changeid' in attrs:
802 # We have a cached value already
802 # We have a cached value already
803 toprev = self._changeid
803 toprev = self._changeid
804 elif r'_changectx' in attrs:
804 elif r'_changectx' in attrs:
805 # We know which changelog entry we are coming from
805 # We know which changelog entry we are coming from
806 toprev = self._changectx.rev()
806 toprev = self._changectx.rev()
807
807
808 if toprev is not None:
808 if toprev is not None:
809 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
809 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
810 elif r'_descendantrev' in attrs:
810 elif r'_descendantrev' in attrs:
811 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
811 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
812 # be nice and cache the result of the computation
812 # be nice and cache the result of the computation
813 if introrev is not None:
813 if introrev is not None:
814 self._changeid = introrev
814 self._changeid = introrev
815 return introrev
815 return introrev
816 else:
816 else:
817 return self.linkrev()
817 return self.linkrev()
818
818
819 def introfilectx(self):
819 def introfilectx(self):
820 """Return filectx having identical contents, but pointing to the
820 """Return filectx having identical contents, but pointing to the
821 changeset revision where this filectx was introduced"""
821 changeset revision where this filectx was introduced"""
822 introrev = self.introrev()
822 introrev = self.introrev()
823 if self.rev() == introrev:
823 if self.rev() == introrev:
824 return self
824 return self
825 return self.filectx(self.filenode(), changeid=introrev)
825 return self.filectx(self.filenode(), changeid=introrev)
826
826
827 def _parentfilectx(self, path, fileid, filelog):
827 def _parentfilectx(self, path, fileid, filelog):
828 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
828 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
829 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
829 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
830 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
830 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
831 # If self is associated with a changeset (probably explicitly
831 # If self is associated with a changeset (probably explicitly
832 # fed), ensure the created filectx is associated with a
832 # fed), ensure the created filectx is associated with a
833 # changeset that is an ancestor of self.changectx.
833 # changeset that is an ancestor of self.changectx.
834 # This lets us later use _adjustlinkrev to get a correct link.
834 # This lets us later use _adjustlinkrev to get a correct link.
835 fctx._descendantrev = self.rev()
835 fctx._descendantrev = self.rev()
836 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
836 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
837 elif r'_descendantrev' in vars(self):
837 elif r'_descendantrev' in vars(self):
838 # Otherwise propagate _descendantrev if we have one associated.
838 # Otherwise propagate _descendantrev if we have one associated.
839 fctx._descendantrev = self._descendantrev
839 fctx._descendantrev = self._descendantrev
840 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
840 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
841 return fctx
841 return fctx
842
842
843 def parents(self):
843 def parents(self):
844 _path = self._path
844 _path = self._path
845 fl = self._filelog
845 fl = self._filelog
846 parents = self._filelog.parents(self._filenode)
846 parents = self._filelog.parents(self._filenode)
847 pl = [(_path, node, fl) for node in parents if node != nullid]
847 pl = [(_path, node, fl) for node in parents if node != nullid]
848
848
849 r = fl.renamed(self._filenode)
849 r = fl.renamed(self._filenode)
850 if r:
850 if r:
851 # - In the simple rename case, both parent are nullid, pl is empty.
851 # - In the simple rename case, both parent are nullid, pl is empty.
852 # - In case of merge, only one of the parent is null id and should
852 # - In case of merge, only one of the parent is null id and should
853 # be replaced with the rename information. This parent is -always-
853 # be replaced with the rename information. This parent is -always-
854 # the first one.
854 # the first one.
855 #
855 #
856 # As null id have always been filtered out in the previous list
856 # As null id have always been filtered out in the previous list
857 # comprehension, inserting to 0 will always result in "replacing
857 # comprehension, inserting to 0 will always result in "replacing
858 # first nullid parent with rename information.
858 # first nullid parent with rename information.
859 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
859 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
860
860
861 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
861 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
862
862
863 def p1(self):
863 def p1(self):
864 return self.parents()[0]
864 return self.parents()[0]
865
865
866 def p2(self):
866 def p2(self):
867 p = self.parents()
867 p = self.parents()
868 if len(p) == 2:
868 if len(p) == 2:
869 return p[1]
869 return p[1]
870 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
870 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
871
871
872 def annotate(self, follow=False, skiprevs=None, diffopts=None):
872 def annotate(self, follow=False, skiprevs=None, diffopts=None):
873 """Returns a list of annotateline objects for each line in the file
873 """Returns a list of annotateline objects for each line in the file
874
874
875 - line.fctx is the filectx of the node where that line was last changed
875 - line.fctx is the filectx of the node where that line was last changed
876 - line.lineno is the line number at the first appearance in the managed
876 - line.lineno is the line number at the first appearance in the managed
877 file
877 file
878 - line.text is the data on that line (including newline character)
878 - line.text is the data on that line (including newline character)
879 """
879 """
880 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
880 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
881
881
882 def parents(f):
882 def parents(f):
883 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
883 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
884 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
884 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
885 # from the topmost introrev (= srcrev) down to p.linkrev() if it
885 # from the topmost introrev (= srcrev) down to p.linkrev() if it
886 # isn't an ancestor of the srcrev.
886 # isn't an ancestor of the srcrev.
887 f._changeid
887 f._changeid
888 pl = f.parents()
888 pl = f.parents()
889
889
890 # Don't return renamed parents if we aren't following.
890 # Don't return renamed parents if we aren't following.
891 if not follow:
891 if not follow:
892 pl = [p for p in pl if p.path() == f.path()]
892 pl = [p for p in pl if p.path() == f.path()]
893
893
894 # renamed filectx won't have a filelog yet, so set it
894 # renamed filectx won't have a filelog yet, so set it
895 # from the cache to save time
895 # from the cache to save time
896 for p in pl:
896 for p in pl:
897 if not r'_filelog' in p.__dict__:
897 if not r'_filelog' in p.__dict__:
898 p._filelog = getlog(p.path())
898 p._filelog = getlog(p.path())
899
899
900 return pl
900 return pl
901
901
902 # use linkrev to find the first changeset where self appeared
902 # use linkrev to find the first changeset where self appeared
903 base = self.introfilectx()
903 base = self.introfilectx()
904 if getattr(base, '_ancestrycontext', None) is None:
904 if getattr(base, '_ancestrycontext', None) is None:
905 cl = self._repo.changelog
905 cl = self._repo.changelog
906 if base.rev() is None:
906 if base.rev() is None:
907 # wctx is not inclusive, but works because _ancestrycontext
907 # wctx is not inclusive, but works because _ancestrycontext
908 # is used to test filelog revisions
908 # is used to test filelog revisions
909 ac = cl.ancestors([p.rev() for p in base.parents()],
909 ac = cl.ancestors([p.rev() for p in base.parents()],
910 inclusive=True)
910 inclusive=True)
911 else:
911 else:
912 ac = cl.ancestors([base.rev()], inclusive=True)
912 ac = cl.ancestors([base.rev()], inclusive=True)
913 base._ancestrycontext = ac
913 base._ancestrycontext = ac
914
914
915 return dagop.annotate(base, parents, skiprevs=skiprevs,
915 return dagop.annotate(base, parents, skiprevs=skiprevs,
916 diffopts=diffopts)
916 diffopts=diffopts)
917
917
918 def ancestors(self, followfirst=False):
918 def ancestors(self, followfirst=False):
919 visit = {}
919 visit = {}
920 c = self
920 c = self
921 if followfirst:
921 if followfirst:
922 cut = 1
922 cut = 1
923 else:
923 else:
924 cut = None
924 cut = None
925
925
926 while True:
926 while True:
927 for parent in c.parents()[:cut]:
927 for parent in c.parents()[:cut]:
928 visit[(parent.linkrev(), parent.filenode())] = parent
928 visit[(parent.linkrev(), parent.filenode())] = parent
929 if not visit:
929 if not visit:
930 break
930 break
931 c = visit.pop(max(visit))
931 c = visit.pop(max(visit))
932 yield c
932 yield c
933
933
934 def decodeddata(self):
934 def decodeddata(self):
935 """Returns `data()` after running repository decoding filters.
935 """Returns `data()` after running repository decoding filters.
936
936
937 This is often equivalent to how the data would be expressed on disk.
937 This is often equivalent to how the data would be expressed on disk.
938 """
938 """
939 return self._repo.wwritedata(self.path(), self.data())
939 return self._repo.wwritedata(self.path(), self.data())
940
940
941 class filectx(basefilectx):
941 class filectx(basefilectx):
942 """A filecontext object makes access to data related to a particular
942 """A filecontext object makes access to data related to a particular
943 filerevision convenient."""
943 filerevision convenient."""
944 def __init__(self, repo, path, changeid=None, fileid=None,
944 def __init__(self, repo, path, changeid=None, fileid=None,
945 filelog=None, changectx=None):
945 filelog=None, changectx=None):
946 """changeid must be a revision number, if specified.
946 """changeid must be a revision number, if specified.
947 fileid can be a file revision or node."""
947 fileid can be a file revision or node."""
948 self._repo = repo
948 self._repo = repo
949 self._path = path
949 self._path = path
950
950
951 assert (changeid is not None
951 assert (changeid is not None
952 or fileid is not None
952 or fileid is not None
953 or changectx is not None), \
953 or changectx is not None), \
954 ("bad args: changeid=%r, fileid=%r, changectx=%r"
954 ("bad args: changeid=%r, fileid=%r, changectx=%r"
955 % (changeid, fileid, changectx))
955 % (changeid, fileid, changectx))
956
956
957 if filelog is not None:
957 if filelog is not None:
958 self._filelog = filelog
958 self._filelog = filelog
959
959
960 if changeid is not None:
960 if changeid is not None:
961 self._changeid = changeid
961 self._changeid = changeid
962 if changectx is not None:
962 if changectx is not None:
963 self._changectx = changectx
963 self._changectx = changectx
964 if fileid is not None:
964 if fileid is not None:
965 self._fileid = fileid
965 self._fileid = fileid
966
966
967 @propertycache
967 @propertycache
968 def _changectx(self):
968 def _changectx(self):
969 try:
969 try:
970 return self._repo[self._changeid]
970 return self._repo[self._changeid]
971 except error.FilteredRepoLookupError:
971 except error.FilteredRepoLookupError:
972 # Linkrev may point to any revision in the repository. When the
972 # Linkrev may point to any revision in the repository. When the
973 # repository is filtered this may lead to `filectx` trying to build
973 # repository is filtered this may lead to `filectx` trying to build
974 # `changectx` for filtered revision. In such case we fallback to
974 # `changectx` for filtered revision. In such case we fallback to
975 # creating `changectx` on the unfiltered version of the reposition.
975 # creating `changectx` on the unfiltered version of the reposition.
976 # This fallback should not be an issue because `changectx` from
976 # This fallback should not be an issue because `changectx` from
977 # `filectx` are not used in complex operations that care about
977 # `filectx` are not used in complex operations that care about
978 # filtering.
978 # filtering.
979 #
979 #
980 # This fallback is a cheap and dirty fix that prevent several
980 # This fallback is a cheap and dirty fix that prevent several
981 # crashes. It does not ensure the behavior is correct. However the
981 # crashes. It does not ensure the behavior is correct. However the
982 # behavior was not correct before filtering either and "incorrect
982 # behavior was not correct before filtering either and "incorrect
983 # behavior" is seen as better as "crash"
983 # behavior" is seen as better as "crash"
984 #
984 #
985 # Linkrevs have several serious troubles with filtering that are
985 # Linkrevs have several serious troubles with filtering that are
986 # complicated to solve. Proper handling of the issue here should be
986 # complicated to solve. Proper handling of the issue here should be
987 # considered when solving linkrev issue are on the table.
987 # considered when solving linkrev issue are on the table.
988 return self._repo.unfiltered()[self._changeid]
988 return self._repo.unfiltered()[self._changeid]
989
989
990 def filectx(self, fileid, changeid=None):
990 def filectx(self, fileid, changeid=None):
991 '''opens an arbitrary revision of the file without
991 '''opens an arbitrary revision of the file without
992 opening a new filelog'''
992 opening a new filelog'''
993 return filectx(self._repo, self._path, fileid=fileid,
993 return filectx(self._repo, self._path, fileid=fileid,
994 filelog=self._filelog, changeid=changeid)
994 filelog=self._filelog, changeid=changeid)
995
995
996 def rawdata(self):
996 def rawdata(self):
997 return self._filelog.revision(self._filenode, raw=True)
997 return self._filelog.revision(self._filenode, raw=True)
998
998
999 def rawflags(self):
999 def rawflags(self):
1000 """low-level revlog flags"""
1000 """low-level revlog flags"""
1001 return self._filelog.flags(self._filerev)
1001 return self._filelog.flags(self._filerev)
1002
1002
1003 def data(self):
1003 def data(self):
1004 try:
1004 try:
1005 return self._filelog.read(self._filenode)
1005 return self._filelog.read(self._filenode)
1006 except error.CensoredNodeError:
1006 except error.CensoredNodeError:
1007 if self._repo.ui.config("censor", "policy") == "ignore":
1007 if self._repo.ui.config("censor", "policy") == "ignore":
1008 return ""
1008 return ""
1009 raise error.Abort(_("censored node: %s") % short(self._filenode),
1009 raise error.Abort(_("censored node: %s") % short(self._filenode),
1010 hint=_("set censor.policy to ignore errors"))
1010 hint=_("set censor.policy to ignore errors"))
1011
1011
1012 def size(self):
1012 def size(self):
1013 return self._filelog.size(self._filerev)
1013 return self._filelog.size(self._filerev)
1014
1014
1015 @propertycache
1015 @propertycache
1016 def _copied(self):
1016 def _copied(self):
1017 """check if file was actually renamed in this changeset revision
1017 """check if file was actually renamed in this changeset revision
1018
1018
1019 If rename logged in file revision, we report copy for changeset only
1019 If rename logged in file revision, we report copy for changeset only
1020 if file revisions linkrev points back to the changeset in question
1020 if file revisions linkrev points back to the changeset in question
1021 or both changeset parents contain different file revisions.
1021 or both changeset parents contain different file revisions.
1022 """
1022 """
1023
1023
1024 renamed = self._filelog.renamed(self._filenode)
1024 renamed = self._filelog.renamed(self._filenode)
1025 if not renamed:
1025 if not renamed:
1026 return None
1026 return None
1027
1027
1028 if self.rev() == self.linkrev():
1028 if self.rev() == self.linkrev():
1029 return renamed
1029 return renamed
1030
1030
1031 name = self.path()
1031 name = self.path()
1032 fnode = self._filenode
1032 fnode = self._filenode
1033 for p in self._changectx.parents():
1033 for p in self._changectx.parents():
1034 try:
1034 try:
1035 if fnode == p.filenode(name):
1035 if fnode == p.filenode(name):
1036 return None
1036 return None
1037 except error.LookupError:
1037 except error.LookupError:
1038 pass
1038 pass
1039 return renamed
1039 return renamed
1040
1040
1041 def children(self):
1041 def children(self):
1042 # hard for renames
1042 # hard for renames
1043 c = self._filelog.children(self._filenode)
1043 c = self._filelog.children(self._filenode)
1044 return [filectx(self._repo, self._path, fileid=x,
1044 return [filectx(self._repo, self._path, fileid=x,
1045 filelog=self._filelog) for x in c]
1045 filelog=self._filelog) for x in c]
1046
1046
1047 class committablectx(basectx):
1047 class committablectx(basectx):
1048 """A committablectx object provides common functionality for a context that
1048 """A committablectx object provides common functionality for a context that
1049 wants the ability to commit, e.g. workingctx or memctx."""
1049 wants the ability to commit, e.g. workingctx or memctx."""
1050 def __init__(self, repo, text="", user=None, date=None, extra=None,
1050 def __init__(self, repo, text="", user=None, date=None, extra=None,
1051 changes=None):
1051 changes=None):
1052 super(committablectx, self).__init__(repo)
1052 super(committablectx, self).__init__(repo)
1053 self._rev = None
1053 self._rev = None
1054 self._node = None
1054 self._node = None
1055 self._text = text
1055 self._text = text
1056 if date:
1056 if date:
1057 self._date = dateutil.parsedate(date)
1057 self._date = dateutil.parsedate(date)
1058 if user:
1058 if user:
1059 self._user = user
1059 self._user = user
1060 if changes:
1060 if changes:
1061 self._status = changes
1061 self._status = changes
1062
1062
1063 self._extra = {}
1063 self._extra = {}
1064 if extra:
1064 if extra:
1065 self._extra = extra.copy()
1065 self._extra = extra.copy()
1066 if 'branch' not in self._extra:
1066 if 'branch' not in self._extra:
1067 try:
1067 try:
1068 branch = encoding.fromlocal(self._repo.dirstate.branch())
1068 branch = encoding.fromlocal(self._repo.dirstate.branch())
1069 except UnicodeDecodeError:
1069 except UnicodeDecodeError:
1070 raise error.Abort(_('branch name not in UTF-8!'))
1070 raise error.Abort(_('branch name not in UTF-8!'))
1071 self._extra['branch'] = branch
1071 self._extra['branch'] = branch
1072 if self._extra['branch'] == '':
1072 if self._extra['branch'] == '':
1073 self._extra['branch'] = 'default'
1073 self._extra['branch'] = 'default'
1074
1074
1075 def __bytes__(self):
1075 def __bytes__(self):
1076 return bytes(self._parents[0]) + "+"
1076 return bytes(self._parents[0]) + "+"
1077
1077
1078 __str__ = encoding.strmethod(__bytes__)
1078 __str__ = encoding.strmethod(__bytes__)
1079
1079
1080 def __nonzero__(self):
1080 def __nonzero__(self):
1081 return True
1081 return True
1082
1082
1083 __bool__ = __nonzero__
1083 __bool__ = __nonzero__
1084
1084
1085 def _buildflagfunc(self):
1085 def _buildflagfunc(self):
1086 # Create a fallback function for getting file flags when the
1086 # Create a fallback function for getting file flags when the
1087 # filesystem doesn't support them
1087 # filesystem doesn't support them
1088
1088
1089 copiesget = self._repo.dirstate.copies().get
1089 copiesget = self._repo.dirstate.copies().get
1090 parents = self.parents()
1090 parents = self.parents()
1091 if len(parents) < 2:
1091 if len(parents) < 2:
1092 # when we have one parent, it's easy: copy from parent
1092 # when we have one parent, it's easy: copy from parent
1093 man = parents[0].manifest()
1093 man = parents[0].manifest()
1094 def func(f):
1094 def func(f):
1095 f = copiesget(f, f)
1095 f = copiesget(f, f)
1096 return man.flags(f)
1096 return man.flags(f)
1097 else:
1097 else:
1098 # merges are tricky: we try to reconstruct the unstored
1098 # merges are tricky: we try to reconstruct the unstored
1099 # result from the merge (issue1802)
1099 # result from the merge (issue1802)
1100 p1, p2 = parents
1100 p1, p2 = parents
1101 pa = p1.ancestor(p2)
1101 pa = p1.ancestor(p2)
1102 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1102 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1103
1103
1104 def func(f):
1104 def func(f):
1105 f = copiesget(f, f) # may be wrong for merges with copies
1105 f = copiesget(f, f) # may be wrong for merges with copies
1106 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1106 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1107 if fl1 == fl2:
1107 if fl1 == fl2:
1108 return fl1
1108 return fl1
1109 if fl1 == fla:
1109 if fl1 == fla:
1110 return fl2
1110 return fl2
1111 if fl2 == fla:
1111 if fl2 == fla:
1112 return fl1
1112 return fl1
1113 return '' # punt for conflicts
1113 return '' # punt for conflicts
1114
1114
1115 return func
1115 return func
1116
1116
1117 @propertycache
1117 @propertycache
1118 def _flagfunc(self):
1118 def _flagfunc(self):
1119 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1119 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1120
1120
1121 @propertycache
1121 @propertycache
1122 def _status(self):
1122 def _status(self):
1123 return self._repo.status()
1123 return self._repo.status()
1124
1124
1125 @propertycache
1125 @propertycache
1126 def _user(self):
1126 def _user(self):
1127 return self._repo.ui.username()
1127 return self._repo.ui.username()
1128
1128
1129 @propertycache
1129 @propertycache
1130 def _date(self):
1130 def _date(self):
1131 ui = self._repo.ui
1131 ui = self._repo.ui
1132 date = ui.configdate('devel', 'default-date')
1132 date = ui.configdate('devel', 'default-date')
1133 if date is None:
1133 if date is None:
1134 date = dateutil.makedate()
1134 date = dateutil.makedate()
1135 return date
1135 return date
1136
1136
1137 def subrev(self, subpath):
1137 def subrev(self, subpath):
1138 return None
1138 return None
1139
1139
1140 def manifestnode(self):
1140 def manifestnode(self):
1141 return None
1141 return None
1142 def user(self):
1142 def user(self):
1143 return self._user or self._repo.ui.username()
1143 return self._user or self._repo.ui.username()
1144 def date(self):
1144 def date(self):
1145 return self._date
1145 return self._date
1146 def description(self):
1146 def description(self):
1147 return self._text
1147 return self._text
1148 def files(self):
1148 def files(self):
1149 return sorted(self._status.modified + self._status.added +
1149 return sorted(self._status.modified + self._status.added +
1150 self._status.removed)
1150 self._status.removed)
1151
1151
1152 def modified(self):
1152 def modified(self):
1153 return self._status.modified
1153 return self._status.modified
1154 def added(self):
1154 def added(self):
1155 return self._status.added
1155 return self._status.added
1156 def removed(self):
1156 def removed(self):
1157 return self._status.removed
1157 return self._status.removed
1158 def deleted(self):
1158 def deleted(self):
1159 return self._status.deleted
1159 return self._status.deleted
1160 def branch(self):
1160 def branch(self):
1161 return encoding.tolocal(self._extra['branch'])
1161 return encoding.tolocal(self._extra['branch'])
1162 def closesbranch(self):
1162 def closesbranch(self):
1163 return 'close' in self._extra
1163 return 'close' in self._extra
1164 def extra(self):
1164 def extra(self):
1165 return self._extra
1165 return self._extra
1166
1166
1167 def isinmemory(self):
1167 def isinmemory(self):
1168 return False
1168 return False
1169
1169
1170 def tags(self):
1170 def tags(self):
1171 return []
1171 return []
1172
1172
1173 def bookmarks(self):
1173 def bookmarks(self):
1174 b = []
1174 b = []
1175 for p in self.parents():
1175 for p in self.parents():
1176 b.extend(p.bookmarks())
1176 b.extend(p.bookmarks())
1177 return b
1177 return b
1178
1178
1179 def phase(self):
1179 def phase(self):
1180 phase = phases.draft # default phase to draft
1180 phase = phases.draft # default phase to draft
1181 for p in self.parents():
1181 for p in self.parents():
1182 phase = max(phase, p.phase())
1182 phase = max(phase, p.phase())
1183 return phase
1183 return phase
1184
1184
1185 def hidden(self):
1185 def hidden(self):
1186 return False
1186 return False
1187
1187
1188 def children(self):
1188 def children(self):
1189 return []
1189 return []
1190
1190
1191 def flags(self, path):
1191 def flags(self, path):
1192 if r'_manifest' in self.__dict__:
1192 if r'_manifest' in self.__dict__:
1193 try:
1193 try:
1194 return self._manifest.flags(path)
1194 return self._manifest.flags(path)
1195 except KeyError:
1195 except KeyError:
1196 return ''
1196 return ''
1197
1197
1198 try:
1198 try:
1199 return self._flagfunc(path)
1199 return self._flagfunc(path)
1200 except OSError:
1200 except OSError:
1201 return ''
1201 return ''
1202
1202
1203 def ancestor(self, c2):
1203 def ancestor(self, c2):
1204 """return the "best" ancestor context of self and c2"""
1204 """return the "best" ancestor context of self and c2"""
1205 return self._parents[0].ancestor(c2) # punt on two parents for now
1205 return self._parents[0].ancestor(c2) # punt on two parents for now
1206
1206
1207 def walk(self, match):
1207 def walk(self, match):
1208 '''Generates matching file names.'''
1208 '''Generates matching file names.'''
1209 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1209 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1210 subrepos=sorted(self.substate),
1210 subrepos=sorted(self.substate),
1211 unknown=True, ignored=False))
1211 unknown=True, ignored=False))
1212
1212
1213 def matches(self, match):
1213 def matches(self, match):
1214 match = self._repo.narrowmatch(match)
1214 match = self._repo.narrowmatch(match)
1215 ds = self._repo.dirstate
1215 ds = self._repo.dirstate
1216 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1216 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1217
1217
1218 def ancestors(self):
1218 def ancestors(self):
1219 for p in self._parents:
1219 for p in self._parents:
1220 yield p
1220 yield p
1221 for a in self._repo.changelog.ancestors(
1221 for a in self._repo.changelog.ancestors(
1222 [p.rev() for p in self._parents]):
1222 [p.rev() for p in self._parents]):
1223 yield self._repo[a]
1223 yield self._repo[a]
1224
1224
1225 def markcommitted(self, node):
1225 def markcommitted(self, node):
1226 """Perform post-commit cleanup necessary after committing this ctx
1226 """Perform post-commit cleanup necessary after committing this ctx
1227
1227
1228 Specifically, this updates backing stores this working context
1228 Specifically, this updates backing stores this working context
1229 wraps to reflect the fact that the changes reflected by this
1229 wraps to reflect the fact that the changes reflected by this
1230 workingctx have been committed. For example, it marks
1230 workingctx have been committed. For example, it marks
1231 modified and added files as normal in the dirstate.
1231 modified and added files as normal in the dirstate.
1232
1232
1233 """
1233 """
1234
1234
1235 with self._repo.dirstate.parentchange():
1235 with self._repo.dirstate.parentchange():
1236 for f in self.modified() + self.added():
1236 for f in self.modified() + self.added():
1237 self._repo.dirstate.normal(f)
1237 self._repo.dirstate.normal(f)
1238 for f in self.removed():
1238 for f in self.removed():
1239 self._repo.dirstate.drop(f)
1239 self._repo.dirstate.drop(f)
1240 self._repo.dirstate.setparents(node)
1240 self._repo.dirstate.setparents(node)
1241
1241
1242 # write changes out explicitly, because nesting wlock at
1242 # write changes out explicitly, because nesting wlock at
1243 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1243 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1244 # from immediately doing so for subsequent changing files
1244 # from immediately doing so for subsequent changing files
1245 self._repo.dirstate.write(self._repo.currenttransaction())
1245 self._repo.dirstate.write(self._repo.currenttransaction())
1246
1246
1247 def dirty(self, missing=False, merge=True, branch=True):
1247 def dirty(self, missing=False, merge=True, branch=True):
1248 return False
1248 return False
1249
1249
1250 class workingctx(committablectx):
1250 class workingctx(committablectx):
1251 """A workingctx object makes access to data related to
1251 """A workingctx object makes access to data related to
1252 the current working directory convenient.
1252 the current working directory convenient.
1253 date - any valid date string or (unixtime, offset), or None.
1253 date - any valid date string or (unixtime, offset), or None.
1254 user - username string, or None.
1254 user - username string, or None.
1255 extra - a dictionary of extra values, or None.
1255 extra - a dictionary of extra values, or None.
1256 changes - a list of file lists as returned by localrepo.status()
1256 changes - a list of file lists as returned by localrepo.status()
1257 or None to use the repository status.
1257 or None to use the repository status.
1258 """
1258 """
1259 def __init__(self, repo, text="", user=None, date=None, extra=None,
1259 def __init__(self, repo, text="", user=None, date=None, extra=None,
1260 changes=None):
1260 changes=None):
1261 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1261 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1262
1262
1263 def __iter__(self):
1263 def __iter__(self):
1264 d = self._repo.dirstate
1264 d = self._repo.dirstate
1265 for f in d:
1265 for f in d:
1266 if d[f] != 'r':
1266 if d[f] != 'r':
1267 yield f
1267 yield f
1268
1268
1269 def __contains__(self, key):
1269 def __contains__(self, key):
1270 return self._repo.dirstate[key] not in "?r"
1270 return self._repo.dirstate[key] not in "?r"
1271
1271
1272 def hex(self):
1272 def hex(self):
1273 return hex(wdirid)
1273 return hex(wdirid)
1274
1274
1275 @propertycache
1275 @propertycache
1276 def _parents(self):
1276 def _parents(self):
1277 p = self._repo.dirstate.parents()
1277 p = self._repo.dirstate.parents()
1278 if p[1] == nullid:
1278 if p[1] == nullid:
1279 p = p[:-1]
1279 p = p[:-1]
1280 # use unfiltered repo to delay/avoid loading obsmarkers
1280 # use unfiltered repo to delay/avoid loading obsmarkers
1281 unfi = self._repo.unfiltered()
1281 unfi = self._repo.unfiltered()
1282 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1282 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1283
1283
1284 def _fileinfo(self, path):
1284 def _fileinfo(self, path):
1285 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1285 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1286 self._manifest
1286 self._manifest
1287 return super(workingctx, self)._fileinfo(path)
1287 return super(workingctx, self)._fileinfo(path)
1288
1288
1289 def filectx(self, path, filelog=None):
1289 def filectx(self, path, filelog=None):
1290 """get a file context from the working directory"""
1290 """get a file context from the working directory"""
1291 return workingfilectx(self._repo, path, workingctx=self,
1291 return workingfilectx(self._repo, path, workingctx=self,
1292 filelog=filelog)
1292 filelog=filelog)
1293
1293
1294 def dirty(self, missing=False, merge=True, branch=True):
1294 def dirty(self, missing=False, merge=True, branch=True):
1295 "check whether a working directory is modified"
1295 "check whether a working directory is modified"
1296 # check subrepos first
1296 # check subrepos first
1297 for s in sorted(self.substate):
1297 for s in sorted(self.substate):
1298 if self.sub(s).dirty(missing=missing):
1298 if self.sub(s).dirty(missing=missing):
1299 return True
1299 return True
1300 # check current working dir
1300 # check current working dir
1301 return ((merge and self.p2()) or
1301 return ((merge and self.p2()) or
1302 (branch and self.branch() != self.p1().branch()) or
1302 (branch and self.branch() != self.p1().branch()) or
1303 self.modified() or self.added() or self.removed() or
1303 self.modified() or self.added() or self.removed() or
1304 (missing and self.deleted()))
1304 (missing and self.deleted()))
1305
1305
1306 def add(self, list, prefix=""):
1306 def add(self, list, prefix=""):
1307 with self._repo.wlock():
1307 with self._repo.wlock():
1308 ui, ds = self._repo.ui, self._repo.dirstate
1308 ui, ds = self._repo.ui, self._repo.dirstate
1309 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1309 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1310 rejected = []
1310 rejected = []
1311 lstat = self._repo.wvfs.lstat
1311 lstat = self._repo.wvfs.lstat
1312 for f in list:
1312 for f in list:
1313 # ds.pathto() returns an absolute file when this is invoked from
1313 # ds.pathto() returns an absolute file when this is invoked from
1314 # the keyword extension. That gets flagged as non-portable on
1314 # the keyword extension. That gets flagged as non-portable on
1315 # Windows, since it contains the drive letter and colon.
1315 # Windows, since it contains the drive letter and colon.
1316 scmutil.checkportable(ui, os.path.join(prefix, f))
1316 scmutil.checkportable(ui, os.path.join(prefix, f))
1317 try:
1317 try:
1318 st = lstat(f)
1318 st = lstat(f)
1319 except OSError:
1319 except OSError:
1320 ui.warn(_("%s does not exist!\n") % uipath(f))
1320 ui.warn(_("%s does not exist!\n") % uipath(f))
1321 rejected.append(f)
1321 rejected.append(f)
1322 continue
1322 continue
1323 limit = ui.configbytes('ui', 'large-file-limit')
1323 limit = ui.configbytes('ui', 'large-file-limit')
1324 if limit != 0 and st.st_size > limit:
1324 if limit != 0 and st.st_size > limit:
1325 ui.warn(_("%s: up to %d MB of RAM may be required "
1325 ui.warn(_("%s: up to %d MB of RAM may be required "
1326 "to manage this file\n"
1326 "to manage this file\n"
1327 "(use 'hg revert %s' to cancel the "
1327 "(use 'hg revert %s' to cancel the "
1328 "pending addition)\n")
1328 "pending addition)\n")
1329 % (f, 3 * st.st_size // 1000000, uipath(f)))
1329 % (f, 3 * st.st_size // 1000000, uipath(f)))
1330 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1330 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1331 ui.warn(_("%s not added: only files and symlinks "
1331 ui.warn(_("%s not added: only files and symlinks "
1332 "supported currently\n") % uipath(f))
1332 "supported currently\n") % uipath(f))
1333 rejected.append(f)
1333 rejected.append(f)
1334 elif ds[f] in 'amn':
1334 elif ds[f] in 'amn':
1335 ui.warn(_("%s already tracked!\n") % uipath(f))
1335 ui.warn(_("%s already tracked!\n") % uipath(f))
1336 elif ds[f] == 'r':
1336 elif ds[f] == 'r':
1337 ds.normallookup(f)
1337 ds.normallookup(f)
1338 else:
1338 else:
1339 ds.add(f)
1339 ds.add(f)
1340 return rejected
1340 return rejected
1341
1341
1342 def forget(self, files, prefix=""):
1342 def forget(self, files, prefix=""):
1343 with self._repo.wlock():
1343 with self._repo.wlock():
1344 ds = self._repo.dirstate
1344 ds = self._repo.dirstate
1345 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1345 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1346 rejected = []
1346 rejected = []
1347 for f in files:
1347 for f in files:
1348 if f not in self._repo.dirstate:
1348 if f not in self._repo.dirstate:
1349 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1349 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1350 rejected.append(f)
1350 rejected.append(f)
1351 elif self._repo.dirstate[f] != 'a':
1351 elif self._repo.dirstate[f] != 'a':
1352 self._repo.dirstate.remove(f)
1352 self._repo.dirstate.remove(f)
1353 else:
1353 else:
1354 self._repo.dirstate.drop(f)
1354 self._repo.dirstate.drop(f)
1355 return rejected
1355 return rejected
1356
1356
1357 def undelete(self, list):
1357 def undelete(self, list):
1358 pctxs = self.parents()
1358 pctxs = self.parents()
1359 with self._repo.wlock():
1359 with self._repo.wlock():
1360 ds = self._repo.dirstate
1360 ds = self._repo.dirstate
1361 for f in list:
1361 for f in list:
1362 if self._repo.dirstate[f] != 'r':
1362 if self._repo.dirstate[f] != 'r':
1363 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1363 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1364 else:
1364 else:
1365 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1365 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1366 t = fctx.data()
1366 t = fctx.data()
1367 self._repo.wwrite(f, t, fctx.flags())
1367 self._repo.wwrite(f, t, fctx.flags())
1368 self._repo.dirstate.normal(f)
1368 self._repo.dirstate.normal(f)
1369
1369
1370 def copy(self, source, dest):
1370 def copy(self, source, dest):
1371 try:
1371 try:
1372 st = self._repo.wvfs.lstat(dest)
1372 st = self._repo.wvfs.lstat(dest)
1373 except OSError as err:
1373 except OSError as err:
1374 if err.errno != errno.ENOENT:
1374 if err.errno != errno.ENOENT:
1375 raise
1375 raise
1376 self._repo.ui.warn(_("%s does not exist!\n")
1376 self._repo.ui.warn(_("%s does not exist!\n")
1377 % self._repo.dirstate.pathto(dest))
1377 % self._repo.dirstate.pathto(dest))
1378 return
1378 return
1379 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1379 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1380 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1380 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1381 "symbolic link\n")
1381 "symbolic link\n")
1382 % self._repo.dirstate.pathto(dest))
1382 % self._repo.dirstate.pathto(dest))
1383 else:
1383 else:
1384 with self._repo.wlock():
1384 with self._repo.wlock():
1385 if self._repo.dirstate[dest] in '?':
1385 if self._repo.dirstate[dest] in '?':
1386 self._repo.dirstate.add(dest)
1386 self._repo.dirstate.add(dest)
1387 elif self._repo.dirstate[dest] in 'r':
1387 elif self._repo.dirstate[dest] in 'r':
1388 self._repo.dirstate.normallookup(dest)
1388 self._repo.dirstate.normallookup(dest)
1389 self._repo.dirstate.copy(source, dest)
1389 self._repo.dirstate.copy(source, dest)
1390
1390
1391 def match(self, pats=None, include=None, exclude=None, default='glob',
1391 def match(self, pats=None, include=None, exclude=None, default='glob',
1392 listsubrepos=False, badfn=None):
1392 listsubrepos=False, badfn=None):
1393 r = self._repo
1393 r = self._repo
1394
1394
1395 # Only a case insensitive filesystem needs magic to translate user input
1395 # Only a case insensitive filesystem needs magic to translate user input
1396 # to actual case in the filesystem.
1396 # to actual case in the filesystem.
1397 icasefs = not util.fscasesensitive(r.root)
1397 icasefs = not util.fscasesensitive(r.root)
1398 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1398 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1399 default, auditor=r.auditor, ctx=self,
1399 default, auditor=r.auditor, ctx=self,
1400 listsubrepos=listsubrepos, badfn=badfn,
1400 listsubrepos=listsubrepos, badfn=badfn,
1401 icasefs=icasefs)
1401 icasefs=icasefs)
1402
1402
1403 def _filtersuspectsymlink(self, files):
1403 def _filtersuspectsymlink(self, files):
1404 if not files or self._repo.dirstate._checklink:
1404 if not files or self._repo.dirstate._checklink:
1405 return files
1405 return files
1406
1406
1407 # Symlink placeholders may get non-symlink-like contents
1407 # Symlink placeholders may get non-symlink-like contents
1408 # via user error or dereferencing by NFS or Samba servers,
1408 # via user error or dereferencing by NFS or Samba servers,
1409 # so we filter out any placeholders that don't look like a
1409 # so we filter out any placeholders that don't look like a
1410 # symlink
1410 # symlink
1411 sane = []
1411 sane = []
1412 for f in files:
1412 for f in files:
1413 if self.flags(f) == 'l':
1413 if self.flags(f) == 'l':
1414 d = self[f].data()
1414 d = self[f].data()
1415 if (d == '' or len(d) >= 1024 or '\n' in d
1415 if (d == '' or len(d) >= 1024 or '\n' in d
1416 or stringutil.binary(d)):
1416 or stringutil.binary(d)):
1417 self._repo.ui.debug('ignoring suspect symlink placeholder'
1417 self._repo.ui.debug('ignoring suspect symlink placeholder'
1418 ' "%s"\n' % f)
1418 ' "%s"\n' % f)
1419 continue
1419 continue
1420 sane.append(f)
1420 sane.append(f)
1421 return sane
1421 return sane
1422
1422
1423 def _checklookup(self, files):
1423 def _checklookup(self, files):
1424 # check for any possibly clean files
1424 # check for any possibly clean files
1425 if not files:
1425 if not files:
1426 return [], [], []
1426 return [], [], []
1427
1427
1428 modified = []
1428 modified = []
1429 deleted = []
1429 deleted = []
1430 fixup = []
1430 fixup = []
1431 pctx = self._parents[0]
1431 pctx = self._parents[0]
1432 # do a full compare of any files that might have changed
1432 # do a full compare of any files that might have changed
1433 for f in sorted(files):
1433 for f in sorted(files):
1434 try:
1434 try:
1435 # This will return True for a file that got replaced by a
1435 # This will return True for a file that got replaced by a
1436 # directory in the interim, but fixing that is pretty hard.
1436 # directory in the interim, but fixing that is pretty hard.
1437 if (f not in pctx or self.flags(f) != pctx.flags(f)
1437 if (f not in pctx or self.flags(f) != pctx.flags(f)
1438 or pctx[f].cmp(self[f])):
1438 or pctx[f].cmp(self[f])):
1439 modified.append(f)
1439 modified.append(f)
1440 else:
1440 else:
1441 fixup.append(f)
1441 fixup.append(f)
1442 except (IOError, OSError):
1442 except (IOError, OSError):
1443 # A file become inaccessible in between? Mark it as deleted,
1443 # A file become inaccessible in between? Mark it as deleted,
1444 # matching dirstate behavior (issue5584).
1444 # matching dirstate behavior (issue5584).
1445 # The dirstate has more complex behavior around whether a
1445 # The dirstate has more complex behavior around whether a
1446 # missing file matches a directory, etc, but we don't need to
1446 # missing file matches a directory, etc, but we don't need to
1447 # bother with that: if f has made it to this point, we're sure
1447 # bother with that: if f has made it to this point, we're sure
1448 # it's in the dirstate.
1448 # it's in the dirstate.
1449 deleted.append(f)
1449 deleted.append(f)
1450
1450
1451 return modified, deleted, fixup
1451 return modified, deleted, fixup
1452
1452
1453 def _poststatusfixup(self, status, fixup):
1453 def _poststatusfixup(self, status, fixup):
1454 """update dirstate for files that are actually clean"""
1454 """update dirstate for files that are actually clean"""
1455 poststatus = self._repo.postdsstatus()
1455 poststatus = self._repo.postdsstatus()
1456 if fixup or poststatus:
1456 if fixup or poststatus:
1457 try:
1457 try:
1458 oldid = self._repo.dirstate.identity()
1458 oldid = self._repo.dirstate.identity()
1459
1459
1460 # updating the dirstate is optional
1460 # updating the dirstate is optional
1461 # so we don't wait on the lock
1461 # so we don't wait on the lock
1462 # wlock can invalidate the dirstate, so cache normal _after_
1462 # wlock can invalidate the dirstate, so cache normal _after_
1463 # taking the lock
1463 # taking the lock
1464 with self._repo.wlock(False):
1464 with self._repo.wlock(False):
1465 if self._repo.dirstate.identity() == oldid:
1465 if self._repo.dirstate.identity() == oldid:
1466 if fixup:
1466 if fixup:
1467 normal = self._repo.dirstate.normal
1467 normal = self._repo.dirstate.normal
1468 for f in fixup:
1468 for f in fixup:
1469 normal(f)
1469 normal(f)
1470 # write changes out explicitly, because nesting
1470 # write changes out explicitly, because nesting
1471 # wlock at runtime may prevent 'wlock.release()'
1471 # wlock at runtime may prevent 'wlock.release()'
1472 # after this block from doing so for subsequent
1472 # after this block from doing so for subsequent
1473 # changing files
1473 # changing files
1474 tr = self._repo.currenttransaction()
1474 tr = self._repo.currenttransaction()
1475 self._repo.dirstate.write(tr)
1475 self._repo.dirstate.write(tr)
1476
1476
1477 if poststatus:
1477 if poststatus:
1478 for ps in poststatus:
1478 for ps in poststatus:
1479 ps(self, status)
1479 ps(self, status)
1480 else:
1480 else:
1481 # in this case, writing changes out breaks
1481 # in this case, writing changes out breaks
1482 # consistency, because .hg/dirstate was
1482 # consistency, because .hg/dirstate was
1483 # already changed simultaneously after last
1483 # already changed simultaneously after last
1484 # caching (see also issue5584 for detail)
1484 # caching (see also issue5584 for detail)
1485 self._repo.ui.debug('skip updating dirstate: '
1485 self._repo.ui.debug('skip updating dirstate: '
1486 'identity mismatch\n')
1486 'identity mismatch\n')
1487 except error.LockError:
1487 except error.LockError:
1488 pass
1488 pass
1489 finally:
1489 finally:
1490 # Even if the wlock couldn't be grabbed, clear out the list.
1490 # Even if the wlock couldn't be grabbed, clear out the list.
1491 self._repo.clearpostdsstatus()
1491 self._repo.clearpostdsstatus()
1492
1492
1493 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1493 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1494 '''Gets the status from the dirstate -- internal use only.'''
1494 '''Gets the status from the dirstate -- internal use only.'''
1495 subrepos = []
1495 subrepos = []
1496 if '.hgsub' in self:
1496 if '.hgsub' in self:
1497 subrepos = sorted(self.substate)
1497 subrepos = sorted(self.substate)
1498 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1498 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1499 clean=clean, unknown=unknown)
1499 clean=clean, unknown=unknown)
1500
1500
1501 # check for any possibly clean files
1501 # check for any possibly clean files
1502 fixup = []
1502 fixup = []
1503 if cmp:
1503 if cmp:
1504 modified2, deleted2, fixup = self._checklookup(cmp)
1504 modified2, deleted2, fixup = self._checklookup(cmp)
1505 s.modified.extend(modified2)
1505 s.modified.extend(modified2)
1506 s.deleted.extend(deleted2)
1506 s.deleted.extend(deleted2)
1507
1507
1508 if fixup and clean:
1508 if fixup and clean:
1509 s.clean.extend(fixup)
1509 s.clean.extend(fixup)
1510
1510
1511 self._poststatusfixup(s, fixup)
1511 self._poststatusfixup(s, fixup)
1512
1512
1513 if match.always():
1513 if match.always():
1514 # cache for performance
1514 # cache for performance
1515 if s.unknown or s.ignored or s.clean:
1515 if s.unknown or s.ignored or s.clean:
1516 # "_status" is cached with list*=False in the normal route
1516 # "_status" is cached with list*=False in the normal route
1517 self._status = scmutil.status(s.modified, s.added, s.removed,
1517 self._status = scmutil.status(s.modified, s.added, s.removed,
1518 s.deleted, [], [], [])
1518 s.deleted, [], [], [])
1519 else:
1519 else:
1520 self._status = s
1520 self._status = s
1521
1521
1522 return s
1522 return s
1523
1523
1524 @propertycache
1524 @propertycache
1525 def _manifest(self):
1525 def _manifest(self):
1526 """generate a manifest corresponding to the values in self._status
1526 """generate a manifest corresponding to the values in self._status
1527
1527
1528 This reuse the file nodeid from parent, but we use special node
1528 This reuse the file nodeid from parent, but we use special node
1529 identifiers for added and modified files. This is used by manifests
1529 identifiers for added and modified files. This is used by manifests
1530 merge to see that files are different and by update logic to avoid
1530 merge to see that files are different and by update logic to avoid
1531 deleting newly added files.
1531 deleting newly added files.
1532 """
1532 """
1533 return self._buildstatusmanifest(self._status)
1533 return self._buildstatusmanifest(self._status)
1534
1534
1535 def _buildstatusmanifest(self, status):
1535 def _buildstatusmanifest(self, status):
1536 """Builds a manifest that includes the given status results."""
1536 """Builds a manifest that includes the given status results."""
1537 parents = self.parents()
1537 parents = self.parents()
1538
1538
1539 man = parents[0].manifest().copy()
1539 man = parents[0].manifest().copy()
1540
1540
1541 ff = self._flagfunc
1541 ff = self._flagfunc
1542 for i, l in ((addednodeid, status.added),
1542 for i, l in ((addednodeid, status.added),
1543 (modifiednodeid, status.modified)):
1543 (modifiednodeid, status.modified)):
1544 for f in l:
1544 for f in l:
1545 man[f] = i
1545 man[f] = i
1546 try:
1546 try:
1547 man.setflag(f, ff(f))
1547 man.setflag(f, ff(f))
1548 except OSError:
1548 except OSError:
1549 pass
1549 pass
1550
1550
1551 for f in status.deleted + status.removed:
1551 for f in status.deleted + status.removed:
1552 if f in man:
1552 if f in man:
1553 del man[f]
1553 del man[f]
1554
1554
1555 return man
1555 return man
1556
1556
1557 def _buildstatus(self, other, s, match, listignored, listclean,
1557 def _buildstatus(self, other, s, match, listignored, listclean,
1558 listunknown):
1558 listunknown):
1559 """build a status with respect to another context
1559 """build a status with respect to another context
1560
1560
1561 This includes logic for maintaining the fast path of status when
1561 This includes logic for maintaining the fast path of status when
1562 comparing the working directory against its parent, which is to skip
1562 comparing the working directory against its parent, which is to skip
1563 building a new manifest if self (working directory) is not comparing
1563 building a new manifest if self (working directory) is not comparing
1564 against its parent (repo['.']).
1564 against its parent (repo['.']).
1565 """
1565 """
1566 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1566 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1567 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1567 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1568 # might have accidentally ended up with the entire contents of the file
1568 # might have accidentally ended up with the entire contents of the file
1569 # they are supposed to be linking to.
1569 # they are supposed to be linking to.
1570 s.modified[:] = self._filtersuspectsymlink(s.modified)
1570 s.modified[:] = self._filtersuspectsymlink(s.modified)
1571 if other != self._repo['.']:
1571 if other != self._repo['.']:
1572 s = super(workingctx, self)._buildstatus(other, s, match,
1572 s = super(workingctx, self)._buildstatus(other, s, match,
1573 listignored, listclean,
1573 listignored, listclean,
1574 listunknown)
1574 listunknown)
1575 return s
1575 return s
1576
1576
1577 def _matchstatus(self, other, match):
1577 def _matchstatus(self, other, match):
1578 """override the match method with a filter for directory patterns
1578 """override the match method with a filter for directory patterns
1579
1579
1580 We use inheritance to customize the match.bad method only in cases of
1580 We use inheritance to customize the match.bad method only in cases of
1581 workingctx since it belongs only to the working directory when
1581 workingctx since it belongs only to the working directory when
1582 comparing against the parent changeset.
1582 comparing against the parent changeset.
1583
1583
1584 If we aren't comparing against the working directory's parent, then we
1584 If we aren't comparing against the working directory's parent, then we
1585 just use the default match object sent to us.
1585 just use the default match object sent to us.
1586 """
1586 """
1587 if other != self._repo['.']:
1587 if other != self._repo['.']:
1588 def bad(f, msg):
1588 def bad(f, msg):
1589 # 'f' may be a directory pattern from 'match.files()',
1589 # 'f' may be a directory pattern from 'match.files()',
1590 # so 'f not in ctx1' is not enough
1590 # so 'f not in ctx1' is not enough
1591 if f not in other and not other.hasdir(f):
1591 if f not in other and not other.hasdir(f):
1592 self._repo.ui.warn('%s: %s\n' %
1592 self._repo.ui.warn('%s: %s\n' %
1593 (self._repo.dirstate.pathto(f), msg))
1593 (self._repo.dirstate.pathto(f), msg))
1594 match.bad = bad
1594 match.bad = bad
1595 return match
1595 return match
1596
1596
1597 def markcommitted(self, node):
1597 def markcommitted(self, node):
1598 super(workingctx, self).markcommitted(node)
1598 super(workingctx, self).markcommitted(node)
1599
1599
1600 sparse.aftercommit(self._repo, node)
1600 sparse.aftercommit(self._repo, node)
1601
1601
1602 class committablefilectx(basefilectx):
1602 class committablefilectx(basefilectx):
1603 """A committablefilectx provides common functionality for a file context
1603 """A committablefilectx provides common functionality for a file context
1604 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1604 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1605 def __init__(self, repo, path, filelog=None, ctx=None):
1605 def __init__(self, repo, path, filelog=None, ctx=None):
1606 self._repo = repo
1606 self._repo = repo
1607 self._path = path
1607 self._path = path
1608 self._changeid = None
1608 self._changeid = None
1609 self._filerev = self._filenode = None
1609 self._filerev = self._filenode = None
1610
1610
1611 if filelog is not None:
1611 if filelog is not None:
1612 self._filelog = filelog
1612 self._filelog = filelog
1613 if ctx:
1613 if ctx:
1614 self._changectx = ctx
1614 self._changectx = ctx
1615
1615
1616 def __nonzero__(self):
1616 def __nonzero__(self):
1617 return True
1617 return True
1618
1618
1619 __bool__ = __nonzero__
1619 __bool__ = __nonzero__
1620
1620
1621 def linkrev(self):
1621 def linkrev(self):
1622 # linked to self._changectx no matter if file is modified or not
1622 # linked to self._changectx no matter if file is modified or not
1623 return self.rev()
1623 return self.rev()
1624
1624
1625 def parents(self):
1625 def parents(self):
1626 '''return parent filectxs, following copies if necessary'''
1626 '''return parent filectxs, following copies if necessary'''
1627 def filenode(ctx, path):
1627 def filenode(ctx, path):
1628 return ctx._manifest.get(path, nullid)
1628 return ctx._manifest.get(path, nullid)
1629
1629
1630 path = self._path
1630 path = self._path
1631 fl = self._filelog
1631 fl = self._filelog
1632 pcl = self._changectx._parents
1632 pcl = self._changectx._parents
1633 renamed = self.renamed()
1633 renamed = self.renamed()
1634
1634
1635 if renamed:
1635 if renamed:
1636 pl = [renamed + (None,)]
1636 pl = [renamed + (None,)]
1637 else:
1637 else:
1638 pl = [(path, filenode(pcl[0], path), fl)]
1638 pl = [(path, filenode(pcl[0], path), fl)]
1639
1639
1640 for pc in pcl[1:]:
1640 for pc in pcl[1:]:
1641 pl.append((path, filenode(pc, path), fl))
1641 pl.append((path, filenode(pc, path), fl))
1642
1642
1643 return [self._parentfilectx(p, fileid=n, filelog=l)
1643 return [self._parentfilectx(p, fileid=n, filelog=l)
1644 for p, n, l in pl if n != nullid]
1644 for p, n, l in pl if n != nullid]
1645
1645
1646 def children(self):
1646 def children(self):
1647 return []
1647 return []
1648
1648
1649 class workingfilectx(committablefilectx):
1649 class workingfilectx(committablefilectx):
1650 """A workingfilectx object makes access to data related to a particular
1650 """A workingfilectx object makes access to data related to a particular
1651 file in the working directory convenient."""
1651 file in the working directory convenient."""
1652 def __init__(self, repo, path, filelog=None, workingctx=None):
1652 def __init__(self, repo, path, filelog=None, workingctx=None):
1653 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1653 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1654
1654
1655 @propertycache
1655 @propertycache
1656 def _changectx(self):
1656 def _changectx(self):
1657 return workingctx(self._repo)
1657 return workingctx(self._repo)
1658
1658
1659 def data(self):
1659 def data(self):
1660 return self._repo.wread(self._path)
1660 return self._repo.wread(self._path)
1661 def renamed(self):
1661 def renamed(self):
1662 rp = self._repo.dirstate.copied(self._path)
1662 rp = self._repo.dirstate.copied(self._path)
1663 if not rp:
1663 if not rp:
1664 return None
1664 return None
1665 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1665 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1666
1666
1667 def size(self):
1667 def size(self):
1668 return self._repo.wvfs.lstat(self._path).st_size
1668 return self._repo.wvfs.lstat(self._path).st_size
1669 def date(self):
1669 def date(self):
1670 t, tz = self._changectx.date()
1670 t, tz = self._changectx.date()
1671 try:
1671 try:
1672 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1672 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1673 except OSError as err:
1673 except OSError as err:
1674 if err.errno != errno.ENOENT:
1674 if err.errno != errno.ENOENT:
1675 raise
1675 raise
1676 return (t, tz)
1676 return (t, tz)
1677
1677
1678 def exists(self):
1678 def exists(self):
1679 return self._repo.wvfs.exists(self._path)
1679 return self._repo.wvfs.exists(self._path)
1680
1680
1681 def lexists(self):
1681 def lexists(self):
1682 return self._repo.wvfs.lexists(self._path)
1682 return self._repo.wvfs.lexists(self._path)
1683
1683
1684 def audit(self):
1684 def audit(self):
1685 return self._repo.wvfs.audit(self._path)
1685 return self._repo.wvfs.audit(self._path)
1686
1686
1687 def cmp(self, fctx):
1687 def cmp(self, fctx):
1688 """compare with other file context
1688 """compare with other file context
1689
1689
1690 returns True if different than fctx.
1690 returns True if different than fctx.
1691 """
1691 """
1692 # fctx should be a filectx (not a workingfilectx)
1692 # fctx should be a filectx (not a workingfilectx)
1693 # invert comparison to reuse the same code path
1693 # invert comparison to reuse the same code path
1694 return fctx.cmp(self)
1694 return fctx.cmp(self)
1695
1695
1696 def remove(self, ignoremissing=False):
1696 def remove(self, ignoremissing=False):
1697 """wraps unlink for a repo's working directory"""
1697 """wraps unlink for a repo's working directory"""
1698 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1698 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1699 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1699 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1700 rmdir=rmdir)
1700 rmdir=rmdir)
1701
1701
1702 def write(self, data, flags, backgroundclose=False, **kwargs):
1702 def write(self, data, flags, backgroundclose=False, **kwargs):
1703 """wraps repo.wwrite"""
1703 """wraps repo.wwrite"""
1704 self._repo.wwrite(self._path, data, flags,
1704 self._repo.wwrite(self._path, data, flags,
1705 backgroundclose=backgroundclose,
1705 backgroundclose=backgroundclose,
1706 **kwargs)
1706 **kwargs)
1707
1707
1708 def markcopied(self, src):
1708 def markcopied(self, src):
1709 """marks this file a copy of `src`"""
1709 """marks this file a copy of `src`"""
1710 if self._repo.dirstate[self._path] in "nma":
1710 if self._repo.dirstate[self._path] in "nma":
1711 self._repo.dirstate.copy(src, self._path)
1711 self._repo.dirstate.copy(src, self._path)
1712
1712
1713 def clearunknown(self):
1713 def clearunknown(self):
1714 """Removes conflicting items in the working directory so that
1714 """Removes conflicting items in the working directory so that
1715 ``write()`` can be called successfully.
1715 ``write()`` can be called successfully.
1716 """
1716 """
1717 wvfs = self._repo.wvfs
1717 wvfs = self._repo.wvfs
1718 f = self._path
1718 f = self._path
1719 wvfs.audit(f)
1719 wvfs.audit(f)
1720 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1720 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1721 # remove files under the directory as they should already be
1721 # remove files under the directory as they should already be
1722 # warned and backed up
1722 # warned and backed up
1723 if wvfs.isdir(f) and not wvfs.islink(f):
1723 if wvfs.isdir(f) and not wvfs.islink(f):
1724 wvfs.rmtree(f, forcibly=True)
1724 wvfs.rmtree(f, forcibly=True)
1725 for p in reversed(list(util.finddirs(f))):
1725 for p in reversed(list(util.finddirs(f))):
1726 if wvfs.isfileorlink(p):
1726 if wvfs.isfileorlink(p):
1727 wvfs.unlink(p)
1727 wvfs.unlink(p)
1728 break
1728 break
1729 else:
1729 else:
1730 # don't remove files if path conflicts are not processed
1730 # don't remove files if path conflicts are not processed
1731 if wvfs.isdir(f) and not wvfs.islink(f):
1731 if wvfs.isdir(f) and not wvfs.islink(f):
1732 wvfs.removedirs(f)
1732 wvfs.removedirs(f)
1733
1733
1734 def setflags(self, l, x):
1734 def setflags(self, l, x):
1735 self._repo.wvfs.setflags(self._path, l, x)
1735 self._repo.wvfs.setflags(self._path, l, x)
1736
1736
1737 class overlayworkingctx(committablectx):
1737 class overlayworkingctx(committablectx):
1738 """Wraps another mutable context with a write-back cache that can be
1738 """Wraps another mutable context with a write-back cache that can be
1739 converted into a commit context.
1739 converted into a commit context.
1740
1740
1741 self._cache[path] maps to a dict with keys: {
1741 self._cache[path] maps to a dict with keys: {
1742 'exists': bool?
1742 'exists': bool?
1743 'date': date?
1743 'date': date?
1744 'data': str?
1744 'data': str?
1745 'flags': str?
1745 'flags': str?
1746 'copied': str? (path or None)
1746 'copied': str? (path or None)
1747 }
1747 }
1748 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1748 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1749 is `False`, the file was deleted.
1749 is `False`, the file was deleted.
1750 """
1750 """
1751
1751
1752 def __init__(self, repo):
1752 def __init__(self, repo):
1753 super(overlayworkingctx, self).__init__(repo)
1753 super(overlayworkingctx, self).__init__(repo)
1754 self.clean()
1754 self.clean()
1755
1755
1756 def setbase(self, wrappedctx):
1756 def setbase(self, wrappedctx):
1757 self._wrappedctx = wrappedctx
1757 self._wrappedctx = wrappedctx
1758 self._parents = [wrappedctx]
1758 self._parents = [wrappedctx]
1759 # Drop old manifest cache as it is now out of date.
1759 # Drop old manifest cache as it is now out of date.
1760 # This is necessary when, e.g., rebasing several nodes with one
1760 # This is necessary when, e.g., rebasing several nodes with one
1761 # ``overlayworkingctx`` (e.g. with --collapse).
1761 # ``overlayworkingctx`` (e.g. with --collapse).
1762 util.clearcachedproperty(self, '_manifest')
1762 util.clearcachedproperty(self, '_manifest')
1763
1763
1764 def data(self, path):
1764 def data(self, path):
1765 if self.isdirty(path):
1765 if self.isdirty(path):
1766 if self._cache[path]['exists']:
1766 if self._cache[path]['exists']:
1767 if self._cache[path]['data']:
1767 if self._cache[path]['data']:
1768 return self._cache[path]['data']
1768 return self._cache[path]['data']
1769 else:
1769 else:
1770 # Must fallback here, too, because we only set flags.
1770 # Must fallback here, too, because we only set flags.
1771 return self._wrappedctx[path].data()
1771 return self._wrappedctx[path].data()
1772 else:
1772 else:
1773 raise error.ProgrammingError("No such file or directory: %s" %
1773 raise error.ProgrammingError("No such file or directory: %s" %
1774 path)
1774 path)
1775 else:
1775 else:
1776 return self._wrappedctx[path].data()
1776 return self._wrappedctx[path].data()
1777
1777
1778 @propertycache
1778 @propertycache
1779 def _manifest(self):
1779 def _manifest(self):
1780 parents = self.parents()
1780 parents = self.parents()
1781 man = parents[0].manifest().copy()
1781 man = parents[0].manifest().copy()
1782
1782
1783 flag = self._flagfunc
1783 flag = self._flagfunc
1784 for path in self.added():
1784 for path in self.added():
1785 man[path] = addednodeid
1785 man[path] = addednodeid
1786 man.setflag(path, flag(path))
1786 man.setflag(path, flag(path))
1787 for path in self.modified():
1787 for path in self.modified():
1788 man[path] = modifiednodeid
1788 man[path] = modifiednodeid
1789 man.setflag(path, flag(path))
1789 man.setflag(path, flag(path))
1790 for path in self.removed():
1790 for path in self.removed():
1791 del man[path]
1791 del man[path]
1792 return man
1792 return man
1793
1793
1794 @propertycache
1794 @propertycache
1795 def _flagfunc(self):
1795 def _flagfunc(self):
1796 def f(path):
1796 def f(path):
1797 return self._cache[path]['flags']
1797 return self._cache[path]['flags']
1798 return f
1798 return f
1799
1799
1800 def files(self):
1800 def files(self):
1801 return sorted(self.added() + self.modified() + self.removed())
1801 return sorted(self.added() + self.modified() + self.removed())
1802
1802
1803 def modified(self):
1803 def modified(self):
1804 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1804 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1805 self._existsinparent(f)]
1805 self._existsinparent(f)]
1806
1806
1807 def added(self):
1807 def added(self):
1808 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1808 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1809 not self._existsinparent(f)]
1809 not self._existsinparent(f)]
1810
1810
1811 def removed(self):
1811 def removed(self):
1812 return [f for f in self._cache.keys() if
1812 return [f for f in self._cache.keys() if
1813 not self._cache[f]['exists'] and self._existsinparent(f)]
1813 not self._cache[f]['exists'] and self._existsinparent(f)]
1814
1814
1815 def isinmemory(self):
1815 def isinmemory(self):
1816 return True
1816 return True
1817
1817
1818 def filedate(self, path):
1818 def filedate(self, path):
1819 if self.isdirty(path):
1819 if self.isdirty(path):
1820 return self._cache[path]['date']
1820 return self._cache[path]['date']
1821 else:
1821 else:
1822 return self._wrappedctx[path].date()
1822 return self._wrappedctx[path].date()
1823
1823
1824 def markcopied(self, path, origin):
1824 def markcopied(self, path, origin):
1825 if self.isdirty(path):
1825 if self.isdirty(path):
1826 self._cache[path]['copied'] = origin
1826 self._cache[path]['copied'] = origin
1827 else:
1827 else:
1828 raise error.ProgrammingError('markcopied() called on clean context')
1828 raise error.ProgrammingError('markcopied() called on clean context')
1829
1829
1830 def copydata(self, path):
1830 def copydata(self, path):
1831 if self.isdirty(path):
1831 if self.isdirty(path):
1832 return self._cache[path]['copied']
1832 return self._cache[path]['copied']
1833 else:
1833 else:
1834 raise error.ProgrammingError('copydata() called on clean context')
1834 raise error.ProgrammingError('copydata() called on clean context')
1835
1835
1836 def flags(self, path):
1836 def flags(self, path):
1837 if self.isdirty(path):
1837 if self.isdirty(path):
1838 if self._cache[path]['exists']:
1838 if self._cache[path]['exists']:
1839 return self._cache[path]['flags']
1839 return self._cache[path]['flags']
1840 else:
1840 else:
1841 raise error.ProgrammingError("No such file or directory: %s" %
1841 raise error.ProgrammingError("No such file or directory: %s" %
1842 self._path)
1842 self._path)
1843 else:
1843 else:
1844 return self._wrappedctx[path].flags()
1844 return self._wrappedctx[path].flags()
1845
1845
1846 def __contains__(self, key):
1847 if key in self._cache:
1848 return self._cache[key]['exists']
1849 return key in self.p1()
1850
1846 def _existsinparent(self, path):
1851 def _existsinparent(self, path):
1847 try:
1852 try:
1848 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1853 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1849 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1854 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1850 # with an ``exists()`` function.
1855 # with an ``exists()`` function.
1851 self._wrappedctx[path]
1856 self._wrappedctx[path]
1852 return True
1857 return True
1853 except error.ManifestLookupError:
1858 except error.ManifestLookupError:
1854 return False
1859 return False
1855
1860
1856 def _auditconflicts(self, path):
1861 def _auditconflicts(self, path):
1857 """Replicates conflict checks done by wvfs.write().
1862 """Replicates conflict checks done by wvfs.write().
1858
1863
1859 Since we never write to the filesystem and never call `applyupdates` in
1864 Since we never write to the filesystem and never call `applyupdates` in
1860 IMM, we'll never check that a path is actually writable -- e.g., because
1865 IMM, we'll never check that a path is actually writable -- e.g., because
1861 it adds `a/foo`, but `a` is actually a file in the other commit.
1866 it adds `a/foo`, but `a` is actually a file in the other commit.
1862 """
1867 """
1863 def fail(path, component):
1868 def fail(path, component):
1864 # p1() is the base and we're receiving "writes" for p2()'s
1869 # p1() is the base and we're receiving "writes" for p2()'s
1865 # files.
1870 # files.
1866 if 'l' in self.p1()[component].flags():
1871 if 'l' in self.p1()[component].flags():
1867 raise error.Abort("error: %s conflicts with symlink %s "
1872 raise error.Abort("error: %s conflicts with symlink %s "
1868 "in %s." % (path, component,
1873 "in %s." % (path, component,
1869 self.p1().rev()))
1874 self.p1().rev()))
1870 else:
1875 else:
1871 raise error.Abort("error: '%s' conflicts with file '%s' in "
1876 raise error.Abort("error: '%s' conflicts with file '%s' in "
1872 "%s." % (path, component,
1877 "%s." % (path, component,
1873 self.p1().rev()))
1878 self.p1().rev()))
1874
1879
1875 # Test that each new directory to be created to write this path from p2
1880 # Test that each new directory to be created to write this path from p2
1876 # is not a file in p1.
1881 # is not a file in p1.
1877 components = path.split('/')
1882 components = path.split('/')
1878 for i in pycompat.xrange(len(components)):
1883 for i in pycompat.xrange(len(components)):
1879 component = "/".join(components[0:i])
1884 component = "/".join(components[0:i])
1880 if component in self.p1() and self._cache[component]['exists']:
1885 if component in self:
1881 fail(path, component)
1886 fail(path, component)
1882
1887
1883 # Test the other direction -- that this path from p2 isn't a directory
1888 # Test the other direction -- that this path from p2 isn't a directory
1884 # in p1 (test that p1 doesn't any paths matching `path/*`).
1889 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1885 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1890 match = self.match(pats=[path + '/'], default=b'path')
1886 matches = self.p1().manifest().matches(match)
1891 matches = self.p1().manifest().matches(match)
1887 mfiles = matches.keys()
1892 mfiles = matches.keys()
1888 if len(mfiles) > 0:
1893 if len(mfiles) > 0:
1889 if len(mfiles) == 1 and mfiles[0] == path:
1894 if len(mfiles) == 1 and mfiles[0] == path:
1890 return
1895 return
1891 # omit the files which are deleted in current IMM wctx
1896 # omit the files which are deleted in current IMM wctx
1892 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1897 mfiles = [m for m in mfiles if m in self]
1893 if not mfiles:
1898 if not mfiles:
1894 return
1899 return
1895 raise error.Abort("error: file '%s' cannot be written because "
1900 raise error.Abort("error: file '%s' cannot be written because "
1896 " '%s/' is a folder in %s (containing %d "
1901 " '%s/' is a folder in %s (containing %d "
1897 "entries: %s)"
1902 "entries: %s)"
1898 % (path, path, self.p1(), len(mfiles),
1903 % (path, path, self.p1(), len(mfiles),
1899 ', '.join(mfiles)))
1904 ', '.join(mfiles)))
1900
1905
1901 def write(self, path, data, flags='', **kwargs):
1906 def write(self, path, data, flags='', **kwargs):
1902 if data is None:
1907 if data is None:
1903 raise error.ProgrammingError("data must be non-None")
1908 raise error.ProgrammingError("data must be non-None")
1904 self._auditconflicts(path)
1909 self._auditconflicts(path)
1905 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1910 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1906 flags=flags)
1911 flags=flags)
1907
1912
1908 def setflags(self, path, l, x):
1913 def setflags(self, path, l, x):
1909 flag = ''
1914 flag = ''
1910 if l:
1915 if l:
1911 flag = 'l'
1916 flag = 'l'
1912 elif x:
1917 elif x:
1913 flag = 'x'
1918 flag = 'x'
1914 self._markdirty(path, exists=True, date=dateutil.makedate(),
1919 self._markdirty(path, exists=True, date=dateutil.makedate(),
1915 flags=flag)
1920 flags=flag)
1916
1921
1917 def remove(self, path):
1922 def remove(self, path):
1918 self._markdirty(path, exists=False)
1923 self._markdirty(path, exists=False)
1919
1924
1920 def exists(self, path):
1925 def exists(self, path):
1921 """exists behaves like `lexists`, but needs to follow symlinks and
1926 """exists behaves like `lexists`, but needs to follow symlinks and
1922 return False if they are broken.
1927 return False if they are broken.
1923 """
1928 """
1924 if self.isdirty(path):
1929 if self.isdirty(path):
1925 # If this path exists and is a symlink, "follow" it by calling
1930 # If this path exists and is a symlink, "follow" it by calling
1926 # exists on the destination path.
1931 # exists on the destination path.
1927 if (self._cache[path]['exists'] and
1932 if (self._cache[path]['exists'] and
1928 'l' in self._cache[path]['flags']):
1933 'l' in self._cache[path]['flags']):
1929 return self.exists(self._cache[path]['data'].strip())
1934 return self.exists(self._cache[path]['data'].strip())
1930 else:
1935 else:
1931 return self._cache[path]['exists']
1936 return self._cache[path]['exists']
1932
1937
1933 return self._existsinparent(path)
1938 return self._existsinparent(path)
1934
1939
1935 def lexists(self, path):
1940 def lexists(self, path):
1936 """lexists returns True if the path exists"""
1941 """lexists returns True if the path exists"""
1937 if self.isdirty(path):
1942 if self.isdirty(path):
1938 return self._cache[path]['exists']
1943 return self._cache[path]['exists']
1939
1944
1940 return self._existsinparent(path)
1945 return self._existsinparent(path)
1941
1946
1942 def size(self, path):
1947 def size(self, path):
1943 if self.isdirty(path):
1948 if self.isdirty(path):
1944 if self._cache[path]['exists']:
1949 if self._cache[path]['exists']:
1945 return len(self._cache[path]['data'])
1950 return len(self._cache[path]['data'])
1946 else:
1951 else:
1947 raise error.ProgrammingError("No such file or directory: %s" %
1952 raise error.ProgrammingError("No such file or directory: %s" %
1948 self._path)
1953 self._path)
1949 return self._wrappedctx[path].size()
1954 return self._wrappedctx[path].size()
1950
1955
1951 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1956 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1952 user=None, editor=None):
1957 user=None, editor=None):
1953 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1958 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1954 committed.
1959 committed.
1955
1960
1956 ``text`` is the commit message.
1961 ``text`` is the commit message.
1957 ``parents`` (optional) are rev numbers.
1962 ``parents`` (optional) are rev numbers.
1958 """
1963 """
1959 # Default parents to the wrapped contexts' if not passed.
1964 # Default parents to the wrapped contexts' if not passed.
1960 if parents is None:
1965 if parents is None:
1961 parents = self._wrappedctx.parents()
1966 parents = self._wrappedctx.parents()
1962 if len(parents) == 1:
1967 if len(parents) == 1:
1963 parents = (parents[0], None)
1968 parents = (parents[0], None)
1964
1969
1965 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1970 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1966 if parents[1] is None:
1971 if parents[1] is None:
1967 parents = (self._repo[parents[0]], None)
1972 parents = (self._repo[parents[0]], None)
1968 else:
1973 else:
1969 parents = (self._repo[parents[0]], self._repo[parents[1]])
1974 parents = (self._repo[parents[0]], self._repo[parents[1]])
1970
1975
1971 files = self._cache.keys()
1976 files = self._cache.keys()
1972 def getfile(repo, memctx, path):
1977 def getfile(repo, memctx, path):
1973 if self._cache[path]['exists']:
1978 if self._cache[path]['exists']:
1974 return memfilectx(repo, memctx, path,
1979 return memfilectx(repo, memctx, path,
1975 self._cache[path]['data'],
1980 self._cache[path]['data'],
1976 'l' in self._cache[path]['flags'],
1981 'l' in self._cache[path]['flags'],
1977 'x' in self._cache[path]['flags'],
1982 'x' in self._cache[path]['flags'],
1978 self._cache[path]['copied'])
1983 self._cache[path]['copied'])
1979 else:
1984 else:
1980 # Returning None, but including the path in `files`, is
1985 # Returning None, but including the path in `files`, is
1981 # necessary for memctx to register a deletion.
1986 # necessary for memctx to register a deletion.
1982 return None
1987 return None
1983 return memctx(self._repo, parents, text, files, getfile, date=date,
1988 return memctx(self._repo, parents, text, files, getfile, date=date,
1984 extra=extra, user=user, branch=branch, editor=editor)
1989 extra=extra, user=user, branch=branch, editor=editor)
1985
1990
1986 def isdirty(self, path):
1991 def isdirty(self, path):
1987 return path in self._cache
1992 return path in self._cache
1988
1993
1989 def isempty(self):
1994 def isempty(self):
1990 # We need to discard any keys that are actually clean before the empty
1995 # We need to discard any keys that are actually clean before the empty
1991 # commit check.
1996 # commit check.
1992 self._compact()
1997 self._compact()
1993 return len(self._cache) == 0
1998 return len(self._cache) == 0
1994
1999
1995 def clean(self):
2000 def clean(self):
1996 self._cache = {}
2001 self._cache = {}
1997
2002
1998 def _compact(self):
2003 def _compact(self):
1999 """Removes keys from the cache that are actually clean, by comparing
2004 """Removes keys from the cache that are actually clean, by comparing
2000 them with the underlying context.
2005 them with the underlying context.
2001
2006
2002 This can occur during the merge process, e.g. by passing --tool :local
2007 This can occur during the merge process, e.g. by passing --tool :local
2003 to resolve a conflict.
2008 to resolve a conflict.
2004 """
2009 """
2005 keys = []
2010 keys = []
2006 for path in self._cache.keys():
2011 for path in self._cache.keys():
2007 cache = self._cache[path]
2012 cache = self._cache[path]
2008 try:
2013 try:
2009 underlying = self._wrappedctx[path]
2014 underlying = self._wrappedctx[path]
2010 if (underlying.data() == cache['data'] and
2015 if (underlying.data() == cache['data'] and
2011 underlying.flags() == cache['flags']):
2016 underlying.flags() == cache['flags']):
2012 keys.append(path)
2017 keys.append(path)
2013 except error.ManifestLookupError:
2018 except error.ManifestLookupError:
2014 # Path not in the underlying manifest (created).
2019 # Path not in the underlying manifest (created).
2015 continue
2020 continue
2016
2021
2017 for path in keys:
2022 for path in keys:
2018 del self._cache[path]
2023 del self._cache[path]
2019 return keys
2024 return keys
2020
2025
2021 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2026 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2022 # data not provided, let's see if we already have some; if not, let's
2027 # data not provided, let's see if we already have some; if not, let's
2023 # grab it from our underlying context, so that we always have data if
2028 # grab it from our underlying context, so that we always have data if
2024 # the file is marked as existing.
2029 # the file is marked as existing.
2025 if exists and data is None:
2030 if exists and data is None:
2026 oldentry = self._cache.get(path) or {}
2031 oldentry = self._cache.get(path) or {}
2027 data = oldentry.get('data') or self._wrappedctx[path].data()
2032 data = oldentry.get('data') or self._wrappedctx[path].data()
2028
2033
2029 self._cache[path] = {
2034 self._cache[path] = {
2030 'exists': exists,
2035 'exists': exists,
2031 'data': data,
2036 'data': data,
2032 'date': date,
2037 'date': date,
2033 'flags': flags,
2038 'flags': flags,
2034 'copied': None,
2039 'copied': None,
2035 }
2040 }
2036
2041
2037 def filectx(self, path, filelog=None):
2042 def filectx(self, path, filelog=None):
2038 return overlayworkingfilectx(self._repo, path, parent=self,
2043 return overlayworkingfilectx(self._repo, path, parent=self,
2039 filelog=filelog)
2044 filelog=filelog)
2040
2045
2041 class overlayworkingfilectx(committablefilectx):
2046 class overlayworkingfilectx(committablefilectx):
2042 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2047 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2043 cache, which can be flushed through later by calling ``flush()``."""
2048 cache, which can be flushed through later by calling ``flush()``."""
2044
2049
2045 def __init__(self, repo, path, filelog=None, parent=None):
2050 def __init__(self, repo, path, filelog=None, parent=None):
2046 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2051 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2047 parent)
2052 parent)
2048 self._repo = repo
2053 self._repo = repo
2049 self._parent = parent
2054 self._parent = parent
2050 self._path = path
2055 self._path = path
2051
2056
2052 def cmp(self, fctx):
2057 def cmp(self, fctx):
2053 return self.data() != fctx.data()
2058 return self.data() != fctx.data()
2054
2059
2055 def changectx(self):
2060 def changectx(self):
2056 return self._parent
2061 return self._parent
2057
2062
2058 def data(self):
2063 def data(self):
2059 return self._parent.data(self._path)
2064 return self._parent.data(self._path)
2060
2065
2061 def date(self):
2066 def date(self):
2062 return self._parent.filedate(self._path)
2067 return self._parent.filedate(self._path)
2063
2068
2064 def exists(self):
2069 def exists(self):
2065 return self.lexists()
2070 return self.lexists()
2066
2071
2067 def lexists(self):
2072 def lexists(self):
2068 return self._parent.exists(self._path)
2073 return self._parent.exists(self._path)
2069
2074
2070 def renamed(self):
2075 def renamed(self):
2071 path = self._parent.copydata(self._path)
2076 path = self._parent.copydata(self._path)
2072 if not path:
2077 if not path:
2073 return None
2078 return None
2074 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2079 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2075
2080
2076 def size(self):
2081 def size(self):
2077 return self._parent.size(self._path)
2082 return self._parent.size(self._path)
2078
2083
2079 def markcopied(self, origin):
2084 def markcopied(self, origin):
2080 self._parent.markcopied(self._path, origin)
2085 self._parent.markcopied(self._path, origin)
2081
2086
2082 def audit(self):
2087 def audit(self):
2083 pass
2088 pass
2084
2089
2085 def flags(self):
2090 def flags(self):
2086 return self._parent.flags(self._path)
2091 return self._parent.flags(self._path)
2087
2092
2088 def setflags(self, islink, isexec):
2093 def setflags(self, islink, isexec):
2089 return self._parent.setflags(self._path, islink, isexec)
2094 return self._parent.setflags(self._path, islink, isexec)
2090
2095
2091 def write(self, data, flags, backgroundclose=False, **kwargs):
2096 def write(self, data, flags, backgroundclose=False, **kwargs):
2092 return self._parent.write(self._path, data, flags, **kwargs)
2097 return self._parent.write(self._path, data, flags, **kwargs)
2093
2098
2094 def remove(self, ignoremissing=False):
2099 def remove(self, ignoremissing=False):
2095 return self._parent.remove(self._path)
2100 return self._parent.remove(self._path)
2096
2101
2097 def clearunknown(self):
2102 def clearunknown(self):
2098 pass
2103 pass
2099
2104
2100 class workingcommitctx(workingctx):
2105 class workingcommitctx(workingctx):
2101 """A workingcommitctx object makes access to data related to
2106 """A workingcommitctx object makes access to data related to
2102 the revision being committed convenient.
2107 the revision being committed convenient.
2103
2108
2104 This hides changes in the working directory, if they aren't
2109 This hides changes in the working directory, if they aren't
2105 committed in this context.
2110 committed in this context.
2106 """
2111 """
2107 def __init__(self, repo, changes,
2112 def __init__(self, repo, changes,
2108 text="", user=None, date=None, extra=None):
2113 text="", user=None, date=None, extra=None):
2109 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2114 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2110 changes)
2115 changes)
2111
2116
2112 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2117 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2113 """Return matched files only in ``self._status``
2118 """Return matched files only in ``self._status``
2114
2119
2115 Uncommitted files appear "clean" via this context, even if
2120 Uncommitted files appear "clean" via this context, even if
2116 they aren't actually so in the working directory.
2121 they aren't actually so in the working directory.
2117 """
2122 """
2118 if clean:
2123 if clean:
2119 clean = [f for f in self._manifest if f not in self._changedset]
2124 clean = [f for f in self._manifest if f not in self._changedset]
2120 else:
2125 else:
2121 clean = []
2126 clean = []
2122 return scmutil.status([f for f in self._status.modified if match(f)],
2127 return scmutil.status([f for f in self._status.modified if match(f)],
2123 [f for f in self._status.added if match(f)],
2128 [f for f in self._status.added if match(f)],
2124 [f for f in self._status.removed if match(f)],
2129 [f for f in self._status.removed if match(f)],
2125 [], [], [], clean)
2130 [], [], [], clean)
2126
2131
2127 @propertycache
2132 @propertycache
2128 def _changedset(self):
2133 def _changedset(self):
2129 """Return the set of files changed in this context
2134 """Return the set of files changed in this context
2130 """
2135 """
2131 changed = set(self._status.modified)
2136 changed = set(self._status.modified)
2132 changed.update(self._status.added)
2137 changed.update(self._status.added)
2133 changed.update(self._status.removed)
2138 changed.update(self._status.removed)
2134 return changed
2139 return changed
2135
2140
2136 def makecachingfilectxfn(func):
2141 def makecachingfilectxfn(func):
2137 """Create a filectxfn that caches based on the path.
2142 """Create a filectxfn that caches based on the path.
2138
2143
2139 We can't use util.cachefunc because it uses all arguments as the cache
2144 We can't use util.cachefunc because it uses all arguments as the cache
2140 key and this creates a cycle since the arguments include the repo and
2145 key and this creates a cycle since the arguments include the repo and
2141 memctx.
2146 memctx.
2142 """
2147 """
2143 cache = {}
2148 cache = {}
2144
2149
2145 def getfilectx(repo, memctx, path):
2150 def getfilectx(repo, memctx, path):
2146 if path not in cache:
2151 if path not in cache:
2147 cache[path] = func(repo, memctx, path)
2152 cache[path] = func(repo, memctx, path)
2148 return cache[path]
2153 return cache[path]
2149
2154
2150 return getfilectx
2155 return getfilectx
2151
2156
2152 def memfilefromctx(ctx):
2157 def memfilefromctx(ctx):
2153 """Given a context return a memfilectx for ctx[path]
2158 """Given a context return a memfilectx for ctx[path]
2154
2159
2155 This is a convenience method for building a memctx based on another
2160 This is a convenience method for building a memctx based on another
2156 context.
2161 context.
2157 """
2162 """
2158 def getfilectx(repo, memctx, path):
2163 def getfilectx(repo, memctx, path):
2159 fctx = ctx[path]
2164 fctx = ctx[path]
2160 # this is weird but apparently we only keep track of one parent
2165 # this is weird but apparently we only keep track of one parent
2161 # (why not only store that instead of a tuple?)
2166 # (why not only store that instead of a tuple?)
2162 copied = fctx.renamed()
2167 copied = fctx.renamed()
2163 if copied:
2168 if copied:
2164 copied = copied[0]
2169 copied = copied[0]
2165 return memfilectx(repo, memctx, path, fctx.data(),
2170 return memfilectx(repo, memctx, path, fctx.data(),
2166 islink=fctx.islink(), isexec=fctx.isexec(),
2171 islink=fctx.islink(), isexec=fctx.isexec(),
2167 copied=copied)
2172 copied=copied)
2168
2173
2169 return getfilectx
2174 return getfilectx
2170
2175
2171 def memfilefrompatch(patchstore):
2176 def memfilefrompatch(patchstore):
2172 """Given a patch (e.g. patchstore object) return a memfilectx
2177 """Given a patch (e.g. patchstore object) return a memfilectx
2173
2178
2174 This is a convenience method for building a memctx based on a patchstore.
2179 This is a convenience method for building a memctx based on a patchstore.
2175 """
2180 """
2176 def getfilectx(repo, memctx, path):
2181 def getfilectx(repo, memctx, path):
2177 data, mode, copied = patchstore.getfile(path)
2182 data, mode, copied = patchstore.getfile(path)
2178 if data is None:
2183 if data is None:
2179 return None
2184 return None
2180 islink, isexec = mode
2185 islink, isexec = mode
2181 return memfilectx(repo, memctx, path, data, islink=islink,
2186 return memfilectx(repo, memctx, path, data, islink=islink,
2182 isexec=isexec, copied=copied)
2187 isexec=isexec, copied=copied)
2183
2188
2184 return getfilectx
2189 return getfilectx
2185
2190
2186 class memctx(committablectx):
2191 class memctx(committablectx):
2187 """Use memctx to perform in-memory commits via localrepo.commitctx().
2192 """Use memctx to perform in-memory commits via localrepo.commitctx().
2188
2193
2189 Revision information is supplied at initialization time while
2194 Revision information is supplied at initialization time while
2190 related files data and is made available through a callback
2195 related files data and is made available through a callback
2191 mechanism. 'repo' is the current localrepo, 'parents' is a
2196 mechanism. 'repo' is the current localrepo, 'parents' is a
2192 sequence of two parent revisions identifiers (pass None for every
2197 sequence of two parent revisions identifiers (pass None for every
2193 missing parent), 'text' is the commit message and 'files' lists
2198 missing parent), 'text' is the commit message and 'files' lists
2194 names of files touched by the revision (normalized and relative to
2199 names of files touched by the revision (normalized and relative to
2195 repository root).
2200 repository root).
2196
2201
2197 filectxfn(repo, memctx, path) is a callable receiving the
2202 filectxfn(repo, memctx, path) is a callable receiving the
2198 repository, the current memctx object and the normalized path of
2203 repository, the current memctx object and the normalized path of
2199 requested file, relative to repository root. It is fired by the
2204 requested file, relative to repository root. It is fired by the
2200 commit function for every file in 'files', but calls order is
2205 commit function for every file in 'files', but calls order is
2201 undefined. If the file is available in the revision being
2206 undefined. If the file is available in the revision being
2202 committed (updated or added), filectxfn returns a memfilectx
2207 committed (updated or added), filectxfn returns a memfilectx
2203 object. If the file was removed, filectxfn return None for recent
2208 object. If the file was removed, filectxfn return None for recent
2204 Mercurial. Moved files are represented by marking the source file
2209 Mercurial. Moved files are represented by marking the source file
2205 removed and the new file added with copy information (see
2210 removed and the new file added with copy information (see
2206 memfilectx).
2211 memfilectx).
2207
2212
2208 user receives the committer name and defaults to current
2213 user receives the committer name and defaults to current
2209 repository username, date is the commit date in any format
2214 repository username, date is the commit date in any format
2210 supported by dateutil.parsedate() and defaults to current date, extra
2215 supported by dateutil.parsedate() and defaults to current date, extra
2211 is a dictionary of metadata or is left empty.
2216 is a dictionary of metadata or is left empty.
2212 """
2217 """
2213
2218
2214 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2219 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2215 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2220 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2216 # this field to determine what to do in filectxfn.
2221 # this field to determine what to do in filectxfn.
2217 _returnnoneformissingfiles = True
2222 _returnnoneformissingfiles = True
2218
2223
2219 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2224 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2220 date=None, extra=None, branch=None, editor=False):
2225 date=None, extra=None, branch=None, editor=False):
2221 super(memctx, self).__init__(repo, text, user, date, extra)
2226 super(memctx, self).__init__(repo, text, user, date, extra)
2222 self._rev = None
2227 self._rev = None
2223 self._node = None
2228 self._node = None
2224 parents = [(p or nullid) for p in parents]
2229 parents = [(p or nullid) for p in parents]
2225 p1, p2 = parents
2230 p1, p2 = parents
2226 self._parents = [self._repo[p] for p in (p1, p2)]
2231 self._parents = [self._repo[p] for p in (p1, p2)]
2227 files = sorted(set(files))
2232 files = sorted(set(files))
2228 self._files = files
2233 self._files = files
2229 if branch is not None:
2234 if branch is not None:
2230 self._extra['branch'] = encoding.fromlocal(branch)
2235 self._extra['branch'] = encoding.fromlocal(branch)
2231 self.substate = {}
2236 self.substate = {}
2232
2237
2233 if isinstance(filectxfn, patch.filestore):
2238 if isinstance(filectxfn, patch.filestore):
2234 filectxfn = memfilefrompatch(filectxfn)
2239 filectxfn = memfilefrompatch(filectxfn)
2235 elif not callable(filectxfn):
2240 elif not callable(filectxfn):
2236 # if store is not callable, wrap it in a function
2241 # if store is not callable, wrap it in a function
2237 filectxfn = memfilefromctx(filectxfn)
2242 filectxfn = memfilefromctx(filectxfn)
2238
2243
2239 # memoizing increases performance for e.g. vcs convert scenarios.
2244 # memoizing increases performance for e.g. vcs convert scenarios.
2240 self._filectxfn = makecachingfilectxfn(filectxfn)
2245 self._filectxfn = makecachingfilectxfn(filectxfn)
2241
2246
2242 if editor:
2247 if editor:
2243 self._text = editor(self._repo, self, [])
2248 self._text = editor(self._repo, self, [])
2244 self._repo.savecommitmessage(self._text)
2249 self._repo.savecommitmessage(self._text)
2245
2250
2246 def filectx(self, path, filelog=None):
2251 def filectx(self, path, filelog=None):
2247 """get a file context from the working directory
2252 """get a file context from the working directory
2248
2253
2249 Returns None if file doesn't exist and should be removed."""
2254 Returns None if file doesn't exist and should be removed."""
2250 return self._filectxfn(self._repo, self, path)
2255 return self._filectxfn(self._repo, self, path)
2251
2256
2252 def commit(self):
2257 def commit(self):
2253 """commit context to the repo"""
2258 """commit context to the repo"""
2254 return self._repo.commitctx(self)
2259 return self._repo.commitctx(self)
2255
2260
2256 @propertycache
2261 @propertycache
2257 def _manifest(self):
2262 def _manifest(self):
2258 """generate a manifest based on the return values of filectxfn"""
2263 """generate a manifest based on the return values of filectxfn"""
2259
2264
2260 # keep this simple for now; just worry about p1
2265 # keep this simple for now; just worry about p1
2261 pctx = self._parents[0]
2266 pctx = self._parents[0]
2262 man = pctx.manifest().copy()
2267 man = pctx.manifest().copy()
2263
2268
2264 for f in self._status.modified:
2269 for f in self._status.modified:
2265 man[f] = modifiednodeid
2270 man[f] = modifiednodeid
2266
2271
2267 for f in self._status.added:
2272 for f in self._status.added:
2268 man[f] = addednodeid
2273 man[f] = addednodeid
2269
2274
2270 for f in self._status.removed:
2275 for f in self._status.removed:
2271 if f in man:
2276 if f in man:
2272 del man[f]
2277 del man[f]
2273
2278
2274 return man
2279 return man
2275
2280
2276 @propertycache
2281 @propertycache
2277 def _status(self):
2282 def _status(self):
2278 """Calculate exact status from ``files`` specified at construction
2283 """Calculate exact status from ``files`` specified at construction
2279 """
2284 """
2280 man1 = self.p1().manifest()
2285 man1 = self.p1().manifest()
2281 p2 = self._parents[1]
2286 p2 = self._parents[1]
2282 # "1 < len(self._parents)" can't be used for checking
2287 # "1 < len(self._parents)" can't be used for checking
2283 # existence of the 2nd parent, because "memctx._parents" is
2288 # existence of the 2nd parent, because "memctx._parents" is
2284 # explicitly initialized by the list, of which length is 2.
2289 # explicitly initialized by the list, of which length is 2.
2285 if p2.node() != nullid:
2290 if p2.node() != nullid:
2286 man2 = p2.manifest()
2291 man2 = p2.manifest()
2287 managing = lambda f: f in man1 or f in man2
2292 managing = lambda f: f in man1 or f in man2
2288 else:
2293 else:
2289 managing = lambda f: f in man1
2294 managing = lambda f: f in man1
2290
2295
2291 modified, added, removed = [], [], []
2296 modified, added, removed = [], [], []
2292 for f in self._files:
2297 for f in self._files:
2293 if not managing(f):
2298 if not managing(f):
2294 added.append(f)
2299 added.append(f)
2295 elif self[f]:
2300 elif self[f]:
2296 modified.append(f)
2301 modified.append(f)
2297 else:
2302 else:
2298 removed.append(f)
2303 removed.append(f)
2299
2304
2300 return scmutil.status(modified, added, removed, [], [], [], [])
2305 return scmutil.status(modified, added, removed, [], [], [], [])
2301
2306
2302 class memfilectx(committablefilectx):
2307 class memfilectx(committablefilectx):
2303 """memfilectx represents an in-memory file to commit.
2308 """memfilectx represents an in-memory file to commit.
2304
2309
2305 See memctx and committablefilectx for more details.
2310 See memctx and committablefilectx for more details.
2306 """
2311 """
2307 def __init__(self, repo, changectx, path, data, islink=False,
2312 def __init__(self, repo, changectx, path, data, islink=False,
2308 isexec=False, copied=None):
2313 isexec=False, copied=None):
2309 """
2314 """
2310 path is the normalized file path relative to repository root.
2315 path is the normalized file path relative to repository root.
2311 data is the file content as a string.
2316 data is the file content as a string.
2312 islink is True if the file is a symbolic link.
2317 islink is True if the file is a symbolic link.
2313 isexec is True if the file is executable.
2318 isexec is True if the file is executable.
2314 copied is the source file path if current file was copied in the
2319 copied is the source file path if current file was copied in the
2315 revision being committed, or None."""
2320 revision being committed, or None."""
2316 super(memfilectx, self).__init__(repo, path, None, changectx)
2321 super(memfilectx, self).__init__(repo, path, None, changectx)
2317 self._data = data
2322 self._data = data
2318 if islink:
2323 if islink:
2319 self._flags = 'l'
2324 self._flags = 'l'
2320 elif isexec:
2325 elif isexec:
2321 self._flags = 'x'
2326 self._flags = 'x'
2322 else:
2327 else:
2323 self._flags = ''
2328 self._flags = ''
2324 self._copied = None
2329 self._copied = None
2325 if copied:
2330 if copied:
2326 self._copied = (copied, nullid)
2331 self._copied = (copied, nullid)
2327
2332
2328 def data(self):
2333 def data(self):
2329 return self._data
2334 return self._data
2330
2335
2331 def remove(self, ignoremissing=False):
2336 def remove(self, ignoremissing=False):
2332 """wraps unlink for a repo's working directory"""
2337 """wraps unlink for a repo's working directory"""
2333 # need to figure out what to do here
2338 # need to figure out what to do here
2334 del self._changectx[self._path]
2339 del self._changectx[self._path]
2335
2340
2336 def write(self, data, flags, **kwargs):
2341 def write(self, data, flags, **kwargs):
2337 """wraps repo.wwrite"""
2342 """wraps repo.wwrite"""
2338 self._data = data
2343 self._data = data
2339
2344
2340
2345
2341 class metadataonlyctx(committablectx):
2346 class metadataonlyctx(committablectx):
2342 """Like memctx but it's reusing the manifest of different commit.
2347 """Like memctx but it's reusing the manifest of different commit.
2343 Intended to be used by lightweight operations that are creating
2348 Intended to be used by lightweight operations that are creating
2344 metadata-only changes.
2349 metadata-only changes.
2345
2350
2346 Revision information is supplied at initialization time. 'repo' is the
2351 Revision information is supplied at initialization time. 'repo' is the
2347 current localrepo, 'ctx' is original revision which manifest we're reuisng
2352 current localrepo, 'ctx' is original revision which manifest we're reuisng
2348 'parents' is a sequence of two parent revisions identifiers (pass None for
2353 'parents' is a sequence of two parent revisions identifiers (pass None for
2349 every missing parent), 'text' is the commit.
2354 every missing parent), 'text' is the commit.
2350
2355
2351 user receives the committer name and defaults to current repository
2356 user receives the committer name and defaults to current repository
2352 username, date is the commit date in any format supported by
2357 username, date is the commit date in any format supported by
2353 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2358 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2354 metadata or is left empty.
2359 metadata or is left empty.
2355 """
2360 """
2356 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2361 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2357 date=None, extra=None, editor=False):
2362 date=None, extra=None, editor=False):
2358 if text is None:
2363 if text is None:
2359 text = originalctx.description()
2364 text = originalctx.description()
2360 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2365 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2361 self._rev = None
2366 self._rev = None
2362 self._node = None
2367 self._node = None
2363 self._originalctx = originalctx
2368 self._originalctx = originalctx
2364 self._manifestnode = originalctx.manifestnode()
2369 self._manifestnode = originalctx.manifestnode()
2365 if parents is None:
2370 if parents is None:
2366 parents = originalctx.parents()
2371 parents = originalctx.parents()
2367 else:
2372 else:
2368 parents = [repo[p] for p in parents if p is not None]
2373 parents = [repo[p] for p in parents if p is not None]
2369 parents = parents[:]
2374 parents = parents[:]
2370 while len(parents) < 2:
2375 while len(parents) < 2:
2371 parents.append(repo[nullid])
2376 parents.append(repo[nullid])
2372 p1, p2 = self._parents = parents
2377 p1, p2 = self._parents = parents
2373
2378
2374 # sanity check to ensure that the reused manifest parents are
2379 # sanity check to ensure that the reused manifest parents are
2375 # manifests of our commit parents
2380 # manifests of our commit parents
2376 mp1, mp2 = self.manifestctx().parents
2381 mp1, mp2 = self.manifestctx().parents
2377 if p1 != nullid and p1.manifestnode() != mp1:
2382 if p1 != nullid and p1.manifestnode() != mp1:
2378 raise RuntimeError(r"can't reuse the manifest: its p1 "
2383 raise RuntimeError(r"can't reuse the manifest: its p1 "
2379 r"doesn't match the new ctx p1")
2384 r"doesn't match the new ctx p1")
2380 if p2 != nullid and p2.manifestnode() != mp2:
2385 if p2 != nullid and p2.manifestnode() != mp2:
2381 raise RuntimeError(r"can't reuse the manifest: "
2386 raise RuntimeError(r"can't reuse the manifest: "
2382 r"its p2 doesn't match the new ctx p2")
2387 r"its p2 doesn't match the new ctx p2")
2383
2388
2384 self._files = originalctx.files()
2389 self._files = originalctx.files()
2385 self.substate = {}
2390 self.substate = {}
2386
2391
2387 if editor:
2392 if editor:
2388 self._text = editor(self._repo, self, [])
2393 self._text = editor(self._repo, self, [])
2389 self._repo.savecommitmessage(self._text)
2394 self._repo.savecommitmessage(self._text)
2390
2395
2391 def manifestnode(self):
2396 def manifestnode(self):
2392 return self._manifestnode
2397 return self._manifestnode
2393
2398
2394 @property
2399 @property
2395 def _manifestctx(self):
2400 def _manifestctx(self):
2396 return self._repo.manifestlog[self._manifestnode]
2401 return self._repo.manifestlog[self._manifestnode]
2397
2402
2398 def filectx(self, path, filelog=None):
2403 def filectx(self, path, filelog=None):
2399 return self._originalctx.filectx(path, filelog=filelog)
2404 return self._originalctx.filectx(path, filelog=filelog)
2400
2405
2401 def commit(self):
2406 def commit(self):
2402 """commit context to the repo"""
2407 """commit context to the repo"""
2403 return self._repo.commitctx(self)
2408 return self._repo.commitctx(self)
2404
2409
2405 @property
2410 @property
2406 def _manifest(self):
2411 def _manifest(self):
2407 return self._originalctx.manifest()
2412 return self._originalctx.manifest()
2408
2413
2409 @propertycache
2414 @propertycache
2410 def _status(self):
2415 def _status(self):
2411 """Calculate exact status from ``files`` specified in the ``origctx``
2416 """Calculate exact status from ``files`` specified in the ``origctx``
2412 and parents manifests.
2417 and parents manifests.
2413 """
2418 """
2414 man1 = self.p1().manifest()
2419 man1 = self.p1().manifest()
2415 p2 = self._parents[1]
2420 p2 = self._parents[1]
2416 # "1 < len(self._parents)" can't be used for checking
2421 # "1 < len(self._parents)" can't be used for checking
2417 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2422 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2418 # explicitly initialized by the list, of which length is 2.
2423 # explicitly initialized by the list, of which length is 2.
2419 if p2.node() != nullid:
2424 if p2.node() != nullid:
2420 man2 = p2.manifest()
2425 man2 = p2.manifest()
2421 managing = lambda f: f in man1 or f in man2
2426 managing = lambda f: f in man1 or f in man2
2422 else:
2427 else:
2423 managing = lambda f: f in man1
2428 managing = lambda f: f in man1
2424
2429
2425 modified, added, removed = [], [], []
2430 modified, added, removed = [], [], []
2426 for f in self._files:
2431 for f in self._files:
2427 if not managing(f):
2432 if not managing(f):
2428 added.append(f)
2433 added.append(f)
2429 elif f in self:
2434 elif f in self:
2430 modified.append(f)
2435 modified.append(f)
2431 else:
2436 else:
2432 removed.append(f)
2437 removed.append(f)
2433
2438
2434 return scmutil.status(modified, added, removed, [], [], [], [])
2439 return scmutil.status(modified, added, removed, [], [], [], [])
2435
2440
2436 class arbitraryfilectx(object):
2441 class arbitraryfilectx(object):
2437 """Allows you to use filectx-like functions on a file in an arbitrary
2442 """Allows you to use filectx-like functions on a file in an arbitrary
2438 location on disk, possibly not in the working directory.
2443 location on disk, possibly not in the working directory.
2439 """
2444 """
2440 def __init__(self, path, repo=None):
2445 def __init__(self, path, repo=None):
2441 # Repo is optional because contrib/simplemerge uses this class.
2446 # Repo is optional because contrib/simplemerge uses this class.
2442 self._repo = repo
2447 self._repo = repo
2443 self._path = path
2448 self._path = path
2444
2449
2445 def cmp(self, fctx):
2450 def cmp(self, fctx):
2446 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2451 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2447 # path if either side is a symlink.
2452 # path if either side is a symlink.
2448 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2453 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2449 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2454 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2450 # Add a fast-path for merge if both sides are disk-backed.
2455 # Add a fast-path for merge if both sides are disk-backed.
2451 # Note that filecmp uses the opposite return values (True if same)
2456 # Note that filecmp uses the opposite return values (True if same)
2452 # from our cmp functions (True if different).
2457 # from our cmp functions (True if different).
2453 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2458 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2454 return self.data() != fctx.data()
2459 return self.data() != fctx.data()
2455
2460
2456 def path(self):
2461 def path(self):
2457 return self._path
2462 return self._path
2458
2463
2459 def flags(self):
2464 def flags(self):
2460 return ''
2465 return ''
2461
2466
2462 def data(self):
2467 def data(self):
2463 return util.readfile(self._path)
2468 return util.readfile(self._path)
2464
2469
2465 def decodeddata(self):
2470 def decodeddata(self):
2466 with open(self._path, "rb") as f:
2471 with open(self._path, "rb") as f:
2467 return f.read()
2472 return f.read()
2468
2473
2469 def remove(self):
2474 def remove(self):
2470 util.unlink(self._path)
2475 util.unlink(self._path)
2471
2476
2472 def write(self, data, flags, **kwargs):
2477 def write(self, data, flags, **kwargs):
2473 assert not flags
2478 assert not flags
2474 with open(self._path, "wb") as f:
2479 with open(self._path, "wb") as f:
2475 f.write(data)
2480 f.write(data)
@@ -1,743 +1,743 b''
1 """ Back-ported, durable, and portable selectors """
1 """ Back-ported, durable, and portable selectors """
2
2
3 # MIT License
3 # MIT License
4 #
4 #
5 # Copyright (c) 2017 Seth Michael Larson
5 # Copyright (c) 2017 Seth Michael Larson
6 #
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
12 # furnished to do so, subject to the following conditions:
13 #
13 #
14 # The above copyright notice and this permission notice shall be included in all
14 # The above copyright notice and this permission notice shall be included in all
15 # copies or substantial portions of the Software.
15 # copies or substantial portions of the Software.
16 #
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 # SOFTWARE.
23 # SOFTWARE.
24
24
25 from __future__ import absolute_import
25 from __future__ import absolute_import
26
26
27 import collections
27 import collections
28 import errno
28 import errno
29 import math
29 import math
30 import select
30 import select
31 import socket
31 import socket
32 import sys
32 import sys
33 import time
33 import time
34
34
35 from .. import pycompat
35 from .. import pycompat
36
36
37 namedtuple = collections.namedtuple
37 namedtuple = collections.namedtuple
38 Mapping = collections.Mapping
38 Mapping = collections.Mapping
39
39
40 try:
40 try:
41 monotonic = time.monotonic
41 monotonic = time.monotonic
42 except AttributeError:
42 except AttributeError:
43 monotonic = time.time
43 monotonic = time.time
44
44
45 __author__ = 'Seth Michael Larson'
45 __author__ = 'Seth Michael Larson'
46 __email__ = 'sethmichaellarson@protonmail.com'
46 __email__ = 'sethmichaellarson@protonmail.com'
47 __version__ = '2.0.0'
47 __version__ = '2.0.0'
48 __license__ = 'MIT'
48 __license__ = 'MIT'
49 __url__ = 'https://www.github.com/SethMichaelLarson/selectors2'
49 __url__ = 'https://www.github.com/SethMichaelLarson/selectors2'
50
50
51 __all__ = ['EVENT_READ',
51 __all__ = ['EVENT_READ',
52 'EVENT_WRITE',
52 'EVENT_WRITE',
53 'SelectorKey',
53 'SelectorKey',
54 'DefaultSelector',
54 'DefaultSelector',
55 'BaseSelector']
55 'BaseSelector']
56
56
57 EVENT_READ = (1 << 0)
57 EVENT_READ = (1 << 0)
58 EVENT_WRITE = (1 << 1)
58 EVENT_WRITE = (1 << 1)
59 _DEFAULT_SELECTOR = None
59 _DEFAULT_SELECTOR = None
60 _SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
60 _SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
61 _ERROR_TYPES = (OSError, IOError, socket.error)
61 _ERROR_TYPES = (OSError, IOError, socket.error)
62
62
63
63
64 SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
64 SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
65
65
66
66
67 class _SelectorMapping(Mapping):
67 class _SelectorMapping(Mapping):
68 """ Mapping of file objects to selector keys """
68 """ Mapping of file objects to selector keys """
69
69
70 def __init__(self, selector):
70 def __init__(self, selector):
71 self._selector = selector
71 self._selector = selector
72
72
73 def __len__(self):
73 def __len__(self):
74 return len(self._selector._fd_to_key)
74 return len(self._selector._fd_to_key)
75
75
76 def __getitem__(self, fileobj):
76 def __getitem__(self, fileobj):
77 try:
77 try:
78 fd = self._selector._fileobj_lookup(fileobj)
78 fd = self._selector._fileobj_lookup(fileobj)
79 return self._selector._fd_to_key[fd]
79 return self._selector._fd_to_key[fd]
80 except KeyError:
80 except KeyError:
81 raise KeyError("{0!r} is not registered.".format(fileobj))
81 raise KeyError("{0!r} is not registered.".format(fileobj))
82
82
83 def __iter__(self):
83 def __iter__(self):
84 return iter(self._selector._fd_to_key)
84 return iter(self._selector._fd_to_key)
85
85
86
86
87 def _fileobj_to_fd(fileobj):
87 def _fileobj_to_fd(fileobj):
88 """ Return a file descriptor from a file object. If
88 """ Return a file descriptor from a file object. If
89 given an integer will simply return that integer back. """
89 given an integer will simply return that integer back. """
90 if isinstance(fileobj, int):
90 if isinstance(fileobj, int):
91 fd = fileobj
91 fd = fileobj
92 else:
92 else:
93 try:
93 try:
94 fd = int(fileobj.fileno())
94 fd = int(fileobj.fileno())
95 except (AttributeError, TypeError, ValueError):
95 except (AttributeError, TypeError, ValueError):
96 raise ValueError("Invalid file object: {0!r}".format(fileobj))
96 raise ValueError("Invalid file object: {0!r}".format(fileobj))
97 if fd < 0:
97 if fd < 0:
98 raise ValueError("Invalid file descriptor: {0}".format(fd))
98 raise ValueError("Invalid file descriptor: {0}".format(fd))
99 return fd
99 return fd
100
100
101
101
102 class BaseSelector(object):
102 class BaseSelector(object):
103 """ Abstract Selector class
103 """ Abstract Selector class
104
104
105 A selector supports registering file objects to be monitored
105 A selector supports registering file objects to be monitored
106 for specific I/O events.
106 for specific I/O events.
107
107
108 A file object is a file descriptor or any object with a
108 A file object is a file descriptor or any object with a
109 `fileno()` method. An arbitrary object can be attached to the
109 `fileno()` method. An arbitrary object can be attached to the
110 file object which can be used for example to store context info,
110 file object which can be used for example to store context info,
111 a callback, etc.
111 a callback, etc.
112
112
113 A selector can use various implementations (select(), poll(), epoll(),
113 A selector can use various implementations (select(), poll(), epoll(),
114 and kqueue()) depending on the platform. The 'DefaultSelector' class uses
114 and kqueue()) depending on the platform. The 'DefaultSelector' class uses
115 the most efficient implementation for the current platform.
115 the most efficient implementation for the current platform.
116 """
116 """
117 def __init__(self):
117 def __init__(self):
118 # Maps file descriptors to keys.
118 # Maps file descriptors to keys.
119 self._fd_to_key = {}
119 self._fd_to_key = {}
120
120
121 # Read-only mapping returned by get_map()
121 # Read-only mapping returned by get_map()
122 self._map = _SelectorMapping(self)
122 self._map = _SelectorMapping(self)
123
123
124 def _fileobj_lookup(self, fileobj):
124 def _fileobj_lookup(self, fileobj):
125 """ Return a file descriptor from a file object.
125 """ Return a file descriptor from a file object.
126 This wraps _fileobj_to_fd() to do an exhaustive
126 This wraps _fileobj_to_fd() to do an exhaustive
127 search in case the object is invalid but we still
127 search in case the object is invalid but we still
128 have it in our map. Used by unregister() so we can
128 have it in our map. Used by unregister() so we can
129 unregister an object that was previously registered
129 unregister an object that was previously registered
130 even if it is closed. It is also used by _SelectorMapping
130 even if it is closed. It is also used by _SelectorMapping
131 """
131 """
132 try:
132 try:
133 return _fileobj_to_fd(fileobj)
133 return _fileobj_to_fd(fileobj)
134 except ValueError:
134 except ValueError:
135
135
136 # Search through all our mapped keys.
136 # Search through all our mapped keys.
137 for key in self._fd_to_key.values():
137 for key in self._fd_to_key.values():
138 if key.fileobj is fileobj:
138 if key.fileobj is fileobj:
139 return key.fd
139 return key.fd
140
140
141 # Raise ValueError after all.
141 # Raise ValueError after all.
142 raise
142 raise
143
143
144 def register(self, fileobj, events, data=None):
144 def register(self, fileobj, events, data=None):
145 """ Register a file object for a set of events to monitor. """
145 """ Register a file object for a set of events to monitor. """
146 if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
146 if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
147 raise ValueError("Invalid events: {0!r}".format(events))
147 raise ValueError("Invalid events: {0!r}".format(events))
148
148
149 key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
149 key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
150
150
151 if key.fd in self._fd_to_key:
151 if key.fd in self._fd_to_key:
152 raise KeyError("{0!r} (FD {1}) is already registered"
152 raise KeyError("{0!r} (FD {1}) is already registered"
153 .format(fileobj, key.fd))
153 .format(fileobj, key.fd))
154
154
155 self._fd_to_key[key.fd] = key
155 self._fd_to_key[key.fd] = key
156 return key
156 return key
157
157
158 def unregister(self, fileobj):
158 def unregister(self, fileobj):
159 """ Unregister a file object from being monitored. """
159 """ Unregister a file object from being monitored. """
160 try:
160 try:
161 key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
161 key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
162 except KeyError:
162 except KeyError:
163 raise KeyError("{0!r} is not registered".format(fileobj))
163 raise KeyError("{0!r} is not registered".format(fileobj))
164
164
165 # Getting the fileno of a closed socket on Windows errors with EBADF.
165 # Getting the fileno of a closed socket on Windows errors with EBADF.
166 except socket.error as err:
166 except socket.error as err:
167 if err.errno != errno.EBADF:
167 if err.errno != errno.EBADF:
168 raise
168 raise
169 else:
169 else:
170 for key in self._fd_to_key.values():
170 for key in self._fd_to_key.values():
171 if key.fileobj is fileobj:
171 if key.fileobj is fileobj:
172 self._fd_to_key.pop(key.fd)
172 self._fd_to_key.pop(key.fd)
173 break
173 break
174 else:
174 else:
175 raise KeyError("{0!r} is not registered".format(fileobj))
175 raise KeyError("{0!r} is not registered".format(fileobj))
176 return key
176 return key
177
177
178 def modify(self, fileobj, events, data=None):
178 def modify(self, fileobj, events, data=None):
179 """ Change a registered file object monitored events and data. """
179 """ Change a registered file object monitored events and data. """
180 # NOTE: Some subclasses optimize this operation even further.
180 # NOTE: Some subclasses optimize this operation even further.
181 try:
181 try:
182 key = self._fd_to_key[self._fileobj_lookup(fileobj)]
182 key = self._fd_to_key[self._fileobj_lookup(fileobj)]
183 except KeyError:
183 except KeyError:
184 raise KeyError("{0!r} is not registered".format(fileobj))
184 raise KeyError("{0!r} is not registered".format(fileobj))
185
185
186 if events != key.events:
186 if events != key.events:
187 self.unregister(fileobj)
187 self.unregister(fileobj)
188 key = self.register(fileobj, events, data)
188 key = self.register(fileobj, events, data)
189
189
190 elif data != key.data:
190 elif data != key.data:
191 # Use a shortcut to update the data.
191 # Use a shortcut to update the data.
192 key = key._replace(data=data)
192 key = key._replace(data=data)
193 self._fd_to_key[key.fd] = key
193 self._fd_to_key[key.fd] = key
194
194
195 return key
195 return key
196
196
197 def select(self, timeout=None):
197 def select(self, timeout=None):
198 """ Perform the actual selection until some monitored file objects
198 """ Perform the actual selection until some monitored file objects
199 are ready or the timeout expires. """
199 are ready or the timeout expires. """
200 raise NotImplementedError()
200 raise NotImplementedError()
201
201
202 def close(self):
202 def close(self):
203 """ Close the selector. This must be called to ensure that all
203 """ Close the selector. This must be called to ensure that all
204 underlying resources are freed. """
204 underlying resources are freed. """
205 self._fd_to_key.clear()
205 self._fd_to_key.clear()
206 self._map = None
206 self._map = None
207
207
208 def get_key(self, fileobj):
208 def get_key(self, fileobj):
209 """ Return the key associated with a registered file object. """
209 """ Return the key associated with a registered file object. """
210 mapping = self.get_map()
210 mapping = self.get_map()
211 if mapping is None:
211 if mapping is None:
212 raise RuntimeError("Selector is closed")
212 raise RuntimeError("Selector is closed")
213 try:
213 try:
214 return mapping[fileobj]
214 return mapping[fileobj]
215 except KeyError:
215 except KeyError:
216 raise KeyError("{0!r} is not registered".format(fileobj))
216 raise KeyError("{0!r} is not registered".format(fileobj))
217
217
218 def get_map(self):
218 def get_map(self):
219 """ Return a mapping of file objects to selector keys """
219 """ Return a mapping of file objects to selector keys """
220 return self._map
220 return self._map
221
221
222 def _key_from_fd(self, fd):
222 def _key_from_fd(self, fd):
223 """ Return the key associated to a given file descriptor
223 """ Return the key associated to a given file descriptor
224 Return None if it is not found. """
224 Return None if it is not found. """
225 try:
225 try:
226 return self._fd_to_key[fd]
226 return self._fd_to_key[fd]
227 except KeyError:
227 except KeyError:
228 return None
228 return None
229
229
230 def __enter__(self):
230 def __enter__(self):
231 return self
231 return self
232
232
233 def __exit__(self, *_):
233 def __exit__(self, *_):
234 self.close()
234 self.close()
235
235
236
236
237 # Almost all platforms have select.select()
237 # Almost all platforms have select.select()
238 if hasattr(select, "select"):
238 if hasattr(select, "select"):
239 class SelectSelector(BaseSelector):
239 class SelectSelector(BaseSelector):
240 """ Select-based selector. """
240 """ Select-based selector. """
241 def __init__(self):
241 def __init__(self):
242 super(SelectSelector, self).__init__()
242 super(SelectSelector, self).__init__()
243 self._readers = set()
243 self._readers = set()
244 self._writers = set()
244 self._writers = set()
245
245
246 def register(self, fileobj, events, data=None):
246 def register(self, fileobj, events, data=None):
247 key = super(SelectSelector, self).register(fileobj, events, data)
247 key = super(SelectSelector, self).register(fileobj, events, data)
248 if events & EVENT_READ:
248 if events & EVENT_READ:
249 self._readers.add(key.fd)
249 self._readers.add(key.fd)
250 if events & EVENT_WRITE:
250 if events & EVENT_WRITE:
251 self._writers.add(key.fd)
251 self._writers.add(key.fd)
252 return key
252 return key
253
253
254 def unregister(self, fileobj):
254 def unregister(self, fileobj):
255 key = super(SelectSelector, self).unregister(fileobj)
255 key = super(SelectSelector, self).unregister(fileobj)
256 self._readers.discard(key.fd)
256 self._readers.discard(key.fd)
257 self._writers.discard(key.fd)
257 self._writers.discard(key.fd)
258 return key
258 return key
259
259
260 def select(self, timeout=None):
260 def select(self, timeout=None):
261 # Selecting on empty lists on Windows errors out.
261 # Selecting on empty lists on Windows errors out.
262 if not len(self._readers) and not len(self._writers):
262 if not len(self._readers) and not len(self._writers):
263 return []
263 return []
264
264
265 timeout = None if timeout is None else max(timeout, 0.0)
265 timeout = None if timeout is None else max(timeout, 0.0)
266 ready = []
266 ready = []
267 r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers,
267 r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers,
268 self._writers, timeout)
268 self._writers, timeout)
269 r = set(r)
269 r = set(r)
270 w = set(w)
270 w = set(w)
271 for fd in r | w:
271 for fd in r | w:
272 events = 0
272 events = 0
273 if fd in r:
273 if fd in r:
274 events |= EVENT_READ
274 events |= EVENT_READ
275 if fd in w:
275 if fd in w:
276 events |= EVENT_WRITE
276 events |= EVENT_WRITE
277
277
278 key = self._key_from_fd(fd)
278 key = self._key_from_fd(fd)
279 if key:
279 if key:
280 ready.append((key, events & key.events))
280 ready.append((key, events & key.events))
281 return ready
281 return ready
282
282
283 def _wrap_select(self, r, w, timeout=None):
283 def _wrap_select(self, r, w, timeout=None):
284 """ Wrapper for select.select because timeout is a positional arg """
284 """ Wrapper for select.select because timeout is a positional arg """
285 return select.select(r, w, [], timeout)
285 return select.select(r, w, [], timeout)
286
286
287 __all__.append('SelectSelector')
287 __all__.append('SelectSelector')
288
288
289 # Jython has a different implementation of .fileno() for socket objects.
289 # Jython has a different implementation of .fileno() for socket objects.
290 if pycompat.isjython:
290 if pycompat.isjython:
291 class _JythonSelectorMapping(object):
291 class _JythonSelectorMapping(object):
292 """ This is an implementation of _SelectorMapping that is built
292 """ This is an implementation of _SelectorMapping that is built
293 for use specifically with Jython, which does not provide a hashable
293 for use specifically with Jython, which does not provide a hashable
294 value from socket.socket.fileno(). """
294 value from socket.socket.fileno(). """
295
295
296 def __init__(self, selector):
296 def __init__(self, selector):
297 assert isinstance(selector, JythonSelectSelector)
297 assert isinstance(selector, JythonSelectSelector)
298 self._selector = selector
298 self._selector = selector
299
299
300 def __len__(self):
300 def __len__(self):
301 return len(self._selector._sockets)
301 return len(self._selector._sockets)
302
302
303 def __getitem__(self, fileobj):
303 def __getitem__(self, fileobj):
304 for sock, key in self._selector._sockets:
304 for sock, key in self._selector._sockets:
305 if sock is fileobj:
305 if sock is fileobj:
306 return key
306 return key
307 else:
307 else:
308 raise KeyError("{0!r} is not registered.".format(fileobj))
308 raise KeyError("{0!r} is not registered.".format(fileobj))
309
309
310 class JythonSelectSelector(SelectSelector):
310 class JythonSelectSelector(SelectSelector):
311 """ This is an implementation of SelectSelector that is for Jython
311 """ This is an implementation of SelectSelector that is for Jython
312 which works around that Jython's socket.socket.fileno() does not
312 which works around that Jython's socket.socket.fileno() does not
313 return an integer fd value. All SelectorKey.fd will be equal to -1
313 return an integer fd value. All SelectorKey.fd will be equal to -1
314 and should not be used. This instead uses object id to compare fileobj
314 and should not be used. This instead uses object id to compare fileobj
315 and will only use select.select as it's the only selector that allows
315 and will only use select.select as it's the only selector that allows
316 directly passing in socket objects rather than registering fds.
316 directly passing in socket objects rather than registering fds.
317 See: http://bugs.jython.org/issue1678
317 See: http://bugs.jython.org/issue1678
318 https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer
318 https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer
319 """
319 """
320
320
321 def __init__(self):
321 def __init__(self):
322 super(JythonSelectSelector, self).__init__()
322 super(JythonSelectSelector, self).__init__()
323
323
324 self._sockets = [] # Uses a list of tuples instead of dictionary.
324 self._sockets = [] # Uses a list of tuples instead of dictionary.
325 self._map = _JythonSelectorMapping(self)
325 self._map = _JythonSelectorMapping(self)
326 self._readers = []
326 self._readers = []
327 self._writers = []
327 self._writers = []
328
328
329 # Jython has a select.cpython_compatible_select function in older versions.
329 # Jython has a select.cpython_compatible_select function in older versions.
330 self._select_func = getattr(select, 'cpython_compatible_select', select.select)
330 self._select_func = getattr(select, 'cpython_compatible_select', select.select)
331
331
332 def register(self, fileobj, events, data=None):
332 def register(self, fileobj, events, data=None):
333 for sock, _ in self._sockets:
333 for sock, _ in self._sockets:
334 if sock is fileobj:
334 if sock is fileobj:
335 raise KeyError("{0!r} is already registered"
335 raise KeyError("{0!r} is already registered"
336 .format(fileobj, sock))
336 .format(fileobj, sock))
337
337
338 key = SelectorKey(fileobj, -1, events, data)
338 key = SelectorKey(fileobj, -1, events, data)
339 self._sockets.append((fileobj, key))
339 self._sockets.append((fileobj, key))
340
340
341 if events & EVENT_READ:
341 if events & EVENT_READ:
342 self._readers.append(fileobj)
342 self._readers.append(fileobj)
343 if events & EVENT_WRITE:
343 if events & EVENT_WRITE:
344 self._writers.append(fileobj)
344 self._writers.append(fileobj)
345 return key
345 return key
346
346
347 def unregister(self, fileobj):
347 def unregister(self, fileobj):
348 for i, (sock, key) in enumerate(self._sockets):
348 for i, (sock, key) in enumerate(self._sockets):
349 if sock is fileobj:
349 if sock is fileobj:
350 break
350 break
351 else:
351 else:
352 raise KeyError("{0!r} is not registered.".format(fileobj))
352 raise KeyError("{0!r} is not registered.".format(fileobj))
353
353
354 if key.events & EVENT_READ:
354 if key.events & EVENT_READ:
355 self._readers.remove(fileobj)
355 self._readers.remove(fileobj)
356 if key.events & EVENT_WRITE:
356 if key.events & EVENT_WRITE:
357 self._writers.remove(fileobj)
357 self._writers.remove(fileobj)
358
358
359 del self._sockets[i]
359 del self._sockets[i]
360 return key
360 return key
361
361
362 def _wrap_select(self, r, w, timeout=None):
362 def _wrap_select(self, r, w, timeout=None):
363 """ Wrapper for select.select because timeout is a positional arg """
363 """ Wrapper for select.select because timeout is a positional arg """
364 return self._select_func(r, w, [], timeout)
364 return self._select_func(r, w, [], timeout)
365
365
366 __all__.append('JythonSelectSelector')
366 __all__.append('JythonSelectSelector')
367 SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used.
367 SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used.
368
368
369
369
370 if hasattr(select, "poll"):
370 if hasattr(select, "poll"):
371 class PollSelector(BaseSelector):
371 class PollSelector(BaseSelector):
372 """ Poll-based selector """
372 """ Poll-based selector """
373 def __init__(self):
373 def __init__(self):
374 super(PollSelector, self).__init__()
374 super(PollSelector, self).__init__()
375 self._poll = select.poll()
375 self._poll = select.poll()
376
376
377 def register(self, fileobj, events, data=None):
377 def register(self, fileobj, events, data=None):
378 key = super(PollSelector, self).register(fileobj, events, data)
378 key = super(PollSelector, self).register(fileobj, events, data)
379 event_mask = 0
379 event_mask = 0
380 if events & EVENT_READ:
380 if events & EVENT_READ:
381 event_mask |= select.POLLIN
381 event_mask |= select.POLLIN
382 if events & EVENT_WRITE:
382 if events & EVENT_WRITE:
383 event_mask |= select.POLLOUT
383 event_mask |= select.POLLOUT
384 self._poll.register(key.fd, event_mask)
384 self._poll.register(key.fd, event_mask)
385 return key
385 return key
386
386
387 def unregister(self, fileobj):
387 def unregister(self, fileobj):
388 key = super(PollSelector, self).unregister(fileobj)
388 key = super(PollSelector, self).unregister(fileobj)
389 self._poll.unregister(key.fd)
389 self._poll.unregister(key.fd)
390 return key
390 return key
391
391
392 def _wrap_poll(self, timeout=None):
392 def _wrap_poll(self, timeout=None):
393 """ Wrapper function for select.poll.poll() so that
393 """ Wrapper function for select.poll.poll() so that
394 _syscall_wrapper can work with only seconds. """
394 _syscall_wrapper can work with only seconds. """
395 if timeout is not None:
395 if timeout is not None:
396 if timeout <= 0:
396 if timeout <= 0:
397 timeout = 0
397 timeout = 0
398 else:
398 else:
399 # select.poll.poll() has a resolution of 1 millisecond,
399 # select.poll.poll() has a resolution of 1 millisecond,
400 # round away from zero to wait *at least* timeout seconds.
400 # round away from zero to wait *at least* timeout seconds.
401 timeout = math.ceil(timeout * 1000)
401 timeout = math.ceil(timeout * 1000)
402
402
403 result = self._poll.poll(timeout)
403 result = self._poll.poll(timeout)
404 return result
404 return result
405
405
406 def select(self, timeout=None):
406 def select(self, timeout=None):
407 ready = []
407 ready = []
408 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
408 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
409 for fd, event_mask in fd_events:
409 for fd, event_mask in fd_events:
410 events = 0
410 events = 0
411 if event_mask & ~select.POLLIN:
411 if event_mask & ~select.POLLIN:
412 events |= EVENT_WRITE
412 events |= EVENT_WRITE
413 if event_mask & ~select.POLLOUT:
413 if event_mask & ~select.POLLOUT:
414 events |= EVENT_READ
414 events |= EVENT_READ
415
415
416 key = self._key_from_fd(fd)
416 key = self._key_from_fd(fd)
417 if key:
417 if key:
418 ready.append((key, events & key.events))
418 ready.append((key, events & key.events))
419
419
420 return ready
420 return ready
421
421
422 __all__.append('PollSelector')
422 __all__.append('PollSelector')
423
423
424 if hasattr(select, "epoll"):
424 if hasattr(select, "epoll"):
425 class EpollSelector(BaseSelector):
425 class EpollSelector(BaseSelector):
426 """ Epoll-based selector """
426 """ Epoll-based selector """
427 def __init__(self):
427 def __init__(self):
428 super(EpollSelector, self).__init__()
428 super(EpollSelector, self).__init__()
429 self._epoll = select.epoll()
429 self._epoll = select.epoll()
430
430
431 def fileno(self):
431 def fileno(self):
432 return self._epoll.fileno()
432 return self._epoll.fileno()
433
433
434 def register(self, fileobj, events, data=None):
434 def register(self, fileobj, events, data=None):
435 key = super(EpollSelector, self).register(fileobj, events, data)
435 key = super(EpollSelector, self).register(fileobj, events, data)
436 events_mask = 0
436 events_mask = 0
437 if events & EVENT_READ:
437 if events & EVENT_READ:
438 events_mask |= select.EPOLLIN
438 events_mask |= select.EPOLLIN
439 if events & EVENT_WRITE:
439 if events & EVENT_WRITE:
440 events_mask |= select.EPOLLOUT
440 events_mask |= select.EPOLLOUT
441 _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
441 _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
442 return key
442 return key
443
443
444 def unregister(self, fileobj):
444 def unregister(self, fileobj):
445 key = super(EpollSelector, self).unregister(fileobj)
445 key = super(EpollSelector, self).unregister(fileobj)
446 try:
446 try:
447 _syscall_wrapper(self._epoll.unregister, False, key.fd)
447 _syscall_wrapper(self._epoll.unregister, False, key.fd)
448 except _ERROR_TYPES:
448 except _ERROR_TYPES:
449 # This can occur when the fd was closed since registry.
449 # This can occur when the fd was closed since registry.
450 pass
450 pass
451 return key
451 return key
452
452
453 def select(self, timeout=None):
453 def select(self, timeout=None):
454 if timeout is not None:
454 if timeout is not None:
455 if timeout <= 0:
455 if timeout <= 0:
456 timeout = 0.0
456 timeout = 0.0
457 else:
457 else:
458 # select.epoll.poll() has a resolution of 1 millisecond
458 # select.epoll.poll() has a resolution of 1 millisecond
459 # but luckily takes seconds so we don't need a wrapper
459 # but luckily takes seconds so we don't need a wrapper
460 # like PollSelector. Just for better rounding.
460 # like PollSelector. Just for better rounding.
461 timeout = math.ceil(timeout * 1000) * 0.001
461 timeout = math.ceil(timeout * 1000) * 0.001
462 timeout = float(timeout)
462 timeout = float(timeout)
463 else:
463 else:
464 timeout = -1.0 # epoll.poll() must have a float.
464 timeout = -1.0 # epoll.poll() must have a float.
465
465
466 # We always want at least 1 to ensure that select can be called
466 # We always want at least 1 to ensure that select can be called
467 # with no file descriptors registered. Otherwise will fail.
467 # with no file descriptors registered. Otherwise will fail.
468 max_events = max(len(self._fd_to_key), 1)
468 max_events = max(len(self._fd_to_key), 1)
469
469
470 ready = []
470 ready = []
471 fd_events = _syscall_wrapper(self._epoll.poll, True,
471 fd_events = _syscall_wrapper(self._epoll.poll, True,
472 timeout=timeout,
472 timeout=timeout,
473 maxevents=max_events)
473 maxevents=max_events)
474 for fd, event_mask in fd_events:
474 for fd, event_mask in fd_events:
475 events = 0
475 events = 0
476 if event_mask & ~select.EPOLLIN:
476 if event_mask & ~select.EPOLLIN:
477 events |= EVENT_WRITE
477 events |= EVENT_WRITE
478 if event_mask & ~select.EPOLLOUT:
478 if event_mask & ~select.EPOLLOUT:
479 events |= EVENT_READ
479 events |= EVENT_READ
480
480
481 key = self._key_from_fd(fd)
481 key = self._key_from_fd(fd)
482 if key:
482 if key:
483 ready.append((key, events & key.events))
483 ready.append((key, events & key.events))
484 return ready
484 return ready
485
485
486 def close(self):
486 def close(self):
487 self._epoll.close()
487 self._epoll.close()
488 super(EpollSelector, self).close()
488 super(EpollSelector, self).close()
489
489
490 __all__.append('EpollSelector')
490 __all__.append('EpollSelector')
491
491
492
492
493 if hasattr(select, "devpoll"):
493 if hasattr(select, "devpoll"):
494 class DevpollSelector(BaseSelector):
494 class DevpollSelector(BaseSelector):
495 """Solaris /dev/poll selector."""
495 """Solaris /dev/poll selector."""
496
496
497 def __init__(self):
497 def __init__(self):
498 super(DevpollSelector, self).__init__()
498 super(DevpollSelector, self).__init__()
499 self._devpoll = select.devpoll()
499 self._devpoll = select.devpoll()
500
500
501 def fileno(self):
501 def fileno(self):
502 return self._devpoll.fileno()
502 return self._devpoll.fileno()
503
503
504 def register(self, fileobj, events, data=None):
504 def register(self, fileobj, events, data=None):
505 key = super(DevpollSelector, self).register(fileobj, events, data)
505 key = super(DevpollSelector, self).register(fileobj, events, data)
506 poll_events = 0
506 poll_events = 0
507 if events & EVENT_READ:
507 if events & EVENT_READ:
508 poll_events |= select.POLLIN
508 poll_events |= select.POLLIN
509 if events & EVENT_WRITE:
509 if events & EVENT_WRITE:
510 poll_events |= select.POLLOUT
510 poll_events |= select.POLLOUT
511 self._devpoll.register(key.fd, poll_events)
511 self._devpoll.register(key.fd, poll_events)
512 return key
512 return key
513
513
514 def unregister(self, fileobj):
514 def unregister(self, fileobj):
515 key = super(DevpollSelector, self).unregister(fileobj)
515 key = super(DevpollSelector, self).unregister(fileobj)
516 self._devpoll.unregister(key.fd)
516 self._devpoll.unregister(key.fd)
517 return key
517 return key
518
518
519 def _wrap_poll(self, timeout=None):
519 def _wrap_poll(self, timeout=None):
520 """ Wrapper function for select.poll.poll() so that
520 """ Wrapper function for select.poll.poll() so that
521 _syscall_wrapper can work with only seconds. """
521 _syscall_wrapper can work with only seconds. """
522 if timeout is not None:
522 if timeout is not None:
523 if timeout <= 0:
523 if timeout <= 0:
524 timeout = 0
524 timeout = 0
525 else:
525 else:
526 # select.devpoll.poll() has a resolution of 1 millisecond,
526 # select.devpoll.poll() has a resolution of 1 millisecond,
527 # round away from zero to wait *at least* timeout seconds.
527 # round away from zero to wait *at least* timeout seconds.
528 timeout = math.ceil(timeout * 1000)
528 timeout = math.ceil(timeout * 1000)
529
529
530 result = self._devpoll.poll(timeout)
530 result = self._devpoll.poll(timeout)
531 return result
531 return result
532
532
533 def select(self, timeout=None):
533 def select(self, timeout=None):
534 ready = []
534 ready = []
535 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
535 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
536 for fd, event_mask in fd_events:
536 for fd, event_mask in fd_events:
537 events = 0
537 events = 0
538 if event_mask & ~select.POLLIN:
538 if event_mask & ~select.POLLIN:
539 events |= EVENT_WRITE
539 events |= EVENT_WRITE
540 if event_mask & ~select.POLLOUT:
540 if event_mask & ~select.POLLOUT:
541 events |= EVENT_READ
541 events |= EVENT_READ
542
542
543 key = self._key_from_fd(fd)
543 key = self._key_from_fd(fd)
544 if key:
544 if key:
545 ready.append((key, events & key.events))
545 ready.append((key, events & key.events))
546
546
547 return ready
547 return ready
548
548
549 def close(self):
549 def close(self):
550 self._devpoll.close()
550 self._devpoll.close()
551 super(DevpollSelector, self).close()
551 super(DevpollSelector, self).close()
552
552
553 __all__.append('DevpollSelector')
553 __all__.append('DevpollSelector')
554
554
555
555
556 if hasattr(select, "kqueue"):
556 if hasattr(select, "kqueue"):
557 class KqueueSelector(BaseSelector):
557 class KqueueSelector(BaseSelector):
558 """ Kqueue / Kevent-based selector """
558 """ Kqueue / Kevent-based selector """
559 def __init__(self):
559 def __init__(self):
560 super(KqueueSelector, self).__init__()
560 super(KqueueSelector, self).__init__()
561 self._kqueue = select.kqueue()
561 self._kqueue = select.kqueue()
562
562
563 def fileno(self):
563 def fileno(self):
564 return self._kqueue.fileno()
564 return self._kqueue.fileno()
565
565
566 def register(self, fileobj, events, data=None):
566 def register(self, fileobj, events, data=None):
567 key = super(KqueueSelector, self).register(fileobj, events, data)
567 key = super(KqueueSelector, self).register(fileobj, events, data)
568 if events & EVENT_READ:
568 if events & EVENT_READ:
569 kevent = select.kevent(key.fd,
569 kevent = select.kevent(key.fd,
570 select.KQ_FILTER_READ,
570 select.KQ_FILTER_READ,
571 select.KQ_EV_ADD)
571 select.KQ_EV_ADD)
572
572
573 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
573 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
574
574
575 if events & EVENT_WRITE:
575 if events & EVENT_WRITE:
576 kevent = select.kevent(key.fd,
576 kevent = select.kevent(key.fd,
577 select.KQ_FILTER_WRITE,
577 select.KQ_FILTER_WRITE,
578 select.KQ_EV_ADD)
578 select.KQ_EV_ADD)
579
579
580 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
580 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
581
581
582 return key
582 return key
583
583
584 def unregister(self, fileobj):
584 def unregister(self, fileobj):
585 key = super(KqueueSelector, self).unregister(fileobj)
585 key = super(KqueueSelector, self).unregister(fileobj)
586 if key.events & EVENT_READ:
586 if key.events & EVENT_READ:
587 kevent = select.kevent(key.fd,
587 kevent = select.kevent(key.fd,
588 select.KQ_FILTER_READ,
588 select.KQ_FILTER_READ,
589 select.KQ_EV_DELETE)
589 select.KQ_EV_DELETE)
590 try:
590 try:
591 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
591 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
592 except _ERROR_TYPES:
592 except _ERROR_TYPES:
593 pass
593 pass
594 if key.events & EVENT_WRITE:
594 if key.events & EVENT_WRITE:
595 kevent = select.kevent(key.fd,
595 kevent = select.kevent(key.fd,
596 select.KQ_FILTER_WRITE,
596 select.KQ_FILTER_WRITE,
597 select.KQ_EV_DELETE)
597 select.KQ_EV_DELETE)
598 try:
598 try:
599 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
599 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
600 except _ERROR_TYPES:
600 except _ERROR_TYPES:
601 pass
601 pass
602
602
603 return key
603 return key
604
604
605 def select(self, timeout=None):
605 def select(self, timeout=None):
606 if timeout is not None:
606 if timeout is not None:
607 timeout = max(timeout, 0)
607 timeout = max(timeout, 0)
608
608
609 max_events = len(self._fd_to_key) * 2
609 max_events = len(self._fd_to_key) * 2
610 ready_fds = {}
610 ready_fds = {}
611
611
612 kevent_list = _syscall_wrapper(self._kqueue.control, True,
612 kevent_list = _syscall_wrapper(self._kqueue.control, True,
613 None, max_events, timeout)
613 None, max_events, timeout)
614
614
615 for kevent in kevent_list:
615 for kevent in kevent_list:
616 fd = kevent.ident
616 fd = kevent.ident
617 event_mask = kevent.filter
617 event_mask = kevent.filter
618 events = 0
618 events = 0
619 if event_mask == select.KQ_FILTER_READ:
619 if event_mask == select.KQ_FILTER_READ:
620 events |= EVENT_READ
620 events |= EVENT_READ
621 if event_mask == select.KQ_FILTER_WRITE:
621 if event_mask == select.KQ_FILTER_WRITE:
622 events |= EVENT_WRITE
622 events |= EVENT_WRITE
623
623
624 key = self._key_from_fd(fd)
624 key = self._key_from_fd(fd)
625 if key:
625 if key:
626 if key.fd not in ready_fds:
626 if key.fd not in ready_fds:
627 ready_fds[key.fd] = (key, events & key.events)
627 ready_fds[key.fd] = (key, events & key.events)
628 else:
628 else:
629 old_events = ready_fds[key.fd][1]
629 old_events = ready_fds[key.fd][1]
630 ready_fds[key.fd] = (key, (events | old_events) & key.events)
630 ready_fds[key.fd] = (key, (events | old_events) & key.events)
631
631
632 return list(ready_fds.values())
632 return list(ready_fds.values())
633
633
634 def close(self):
634 def close(self):
635 self._kqueue.close()
635 self._kqueue.close()
636 super(KqueueSelector, self).close()
636 super(KqueueSelector, self).close()
637
637
638 __all__.append('KqueueSelector')
638 __all__.append('KqueueSelector')
639
639
640
640
641 def _can_allocate(struct):
641 def _can_allocate(struct):
642 """ Checks that select structs can be allocated by the underlying
642 """ Checks that select structs can be allocated by the underlying
643 operating system, not just advertised by the select module. We don't
643 operating system, not just advertised by the select module. We don't
644 check select() because we'll be hopeful that most platforms that
644 check select() because we'll be hopeful that most platforms that
645 don't have it available will not advertise it. (ie: GAE) """
645 don't have it available will not advertise it. (ie: GAE) """
646 try:
646 try:
647 # select.poll() objects won't fail until used.
647 # select.poll() objects won't fail until used.
648 if struct == 'poll':
648 if struct == 'poll':
649 p = select.poll()
649 p = select.poll()
650 p.poll(0)
650 p.poll(0)
651
651
652 # All others will fail on allocation.
652 # All others will fail on allocation.
653 else:
653 else:
654 getattr(select, struct)().close()
654 getattr(select, struct)().close()
655 return True
655 return True
656 except (OSError, AttributeError):
656 except (OSError, AttributeError):
657 return False
657 return False
658
658
659
659
660 # Python 3.5 uses a more direct route to wrap system calls to increase speed.
660 # Python 3.5 uses a more direct route to wrap system calls to increase speed.
661 if sys.version_info >= (3, 5):
661 if sys.version_info >= (3, 5):
662 def _syscall_wrapper(func, _, *args, **kwargs):
662 def _syscall_wrapper(func, _, *args, **kwargs):
663 """ This is the short-circuit version of the below logic
663 """ This is the short-circuit version of the below logic
664 because in Python 3.5+ all selectors restart system calls. """
664 because in Python 3.5+ all selectors restart system calls. """
665 return func(*args, **kwargs)
665 return func(*args, **kwargs)
666 else:
666 else:
667 def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
667 def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
668 """ Wrapper function for syscalls that could fail due to EINTR.
668 """ Wrapper function for syscalls that could fail due to EINTR.
669 All functions should be retried if there is time left in the timeout
669 All functions should be retried if there is time left in the timeout
670 in accordance with PEP 475. """
670 in accordance with PEP 475. """
671 timeout = kwargs.get("timeout", None)
671 timeout = kwargs.get("timeout", None)
672 if timeout is None:
672 if timeout is None:
673 expires = None
673 expires = None
674 recalc_timeout = False
674 recalc_timeout = False
675 else:
675 else:
676 timeout = float(timeout)
676 timeout = float(timeout)
677 if timeout < 0.0: # Timeout less than 0 treated as no timeout.
677 if timeout < 0.0: # Timeout less than 0 treated as no timeout.
678 expires = None
678 expires = None
679 else:
679 else:
680 expires = monotonic() + timeout
680 expires = monotonic() + timeout
681
681
682 args = list(args)
682 args = list(args)
683 if recalc_timeout and "timeout" not in kwargs:
683 if recalc_timeout and "timeout" not in kwargs:
684 raise ValueError(
684 raise ValueError(
685 "Timeout must be in args or kwargs to be recalculated")
685 "Timeout must be in args or kwargs to be recalculated")
686
686
687 result = _SYSCALL_SENTINEL
687 result = _SYSCALL_SENTINEL
688 while result is _SYSCALL_SENTINEL:
688 while result is _SYSCALL_SENTINEL:
689 try:
689 try:
690 result = func(*args, **kwargs)
690 result = func(*args, **kwargs)
691 # OSError is thrown by select.select
691 # OSError is thrown by select.select
692 # IOError is thrown by select.epoll.poll
692 # IOError is thrown by select.epoll.poll
693 # select.error is thrown by select.poll.poll
693 # select.error is thrown by select.poll.poll
694 # Aren't we thankful for Python 3.x rework for exceptions?
694 # Aren't we thankful for Python 3.x rework for exceptions?
695 except (OSError, IOError, select.error) as e:
695 except (OSError, IOError, select.error) as e:
696 # select.error wasn't a subclass of OSError in the past.
696 # select.error wasn't a subclass of OSError in the past.
697 errcode = None
697 errcode = None
698 if hasattr(e, "errno"):
698 if hasattr(e, "errno"):
699 errcode = e.errno
699 errcode = e.errno
700 elif hasattr(e, "args"):
700 elif hasattr(e, "args"):
701 errcode = e.args[0]
701 errcode = e.args[0]
702
702
703 # Also test for the Windows equivalent of EINTR.
703 # Also test for the Windows equivalent of EINTR.
704 is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
704 is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
705 errcode == errno.WSAEINTR))
705 errcode == errno.WSAEINTR))
706
706
707 if is_interrupt:
707 if is_interrupt:
708 if expires is not None:
708 if expires is not None:
709 current_time = monotonic()
709 current_time = monotonic()
710 if current_time > expires:
710 if current_time > expires:
711 raise OSError(errno=errno.ETIMEDOUT)
711 raise OSError(errno.ETIMEDOUT, 'Connection timed out')
712 if recalc_timeout:
712 if recalc_timeout:
713 if "timeout" in kwargs:
713 if "timeout" in kwargs:
714 kwargs["timeout"] = expires - current_time
714 kwargs["timeout"] = expires - current_time
715 continue
715 continue
716 raise
716 raise
717 return result
717 return result
718
718
719
719
720 # Choose the best implementation, roughly:
720 # Choose the best implementation, roughly:
721 # kqueue == devpoll == epoll > poll > select
721 # kqueue == devpoll == epoll > poll > select
722 # select() also can't accept a FD > FD_SETSIZE (usually around 1024)
722 # select() also can't accept a FD > FD_SETSIZE (usually around 1024)
723 def DefaultSelector():
723 def DefaultSelector():
724 """ This function serves as a first call for DefaultSelector to
724 """ This function serves as a first call for DefaultSelector to
725 detect if the select module is being monkey-patched incorrectly
725 detect if the select module is being monkey-patched incorrectly
726 by eventlet, greenlet, and preserve proper behavior. """
726 by eventlet, greenlet, and preserve proper behavior. """
727 global _DEFAULT_SELECTOR
727 global _DEFAULT_SELECTOR
728 if _DEFAULT_SELECTOR is None:
728 if _DEFAULT_SELECTOR is None:
729 if pycompat.isjython:
729 if pycompat.isjython:
730 _DEFAULT_SELECTOR = JythonSelectSelector
730 _DEFAULT_SELECTOR = JythonSelectSelector
731 elif _can_allocate('kqueue'):
731 elif _can_allocate('kqueue'):
732 _DEFAULT_SELECTOR = KqueueSelector
732 _DEFAULT_SELECTOR = KqueueSelector
733 elif _can_allocate('devpoll'):
733 elif _can_allocate('devpoll'):
734 _DEFAULT_SELECTOR = DevpollSelector
734 _DEFAULT_SELECTOR = DevpollSelector
735 elif _can_allocate('epoll'):
735 elif _can_allocate('epoll'):
736 _DEFAULT_SELECTOR = EpollSelector
736 _DEFAULT_SELECTOR = EpollSelector
737 elif _can_allocate('poll'):
737 elif _can_allocate('poll'):
738 _DEFAULT_SELECTOR = PollSelector
738 _DEFAULT_SELECTOR = PollSelector
739 elif hasattr(select, 'select'):
739 elif hasattr(select, 'select'):
740 _DEFAULT_SELECTOR = SelectSelector
740 _DEFAULT_SELECTOR = SelectSelector
741 else: # Platform-specific: AppEngine
741 else: # Platform-specific: AppEngine
742 raise RuntimeError('Platform does not have a selector.')
742 raise RuntimeError('Platform does not have a selector.')
743 return _DEFAULT_SELECTOR()
743 return _DEFAULT_SELECTOR()
@@ -1,524 +1,532 b''
1 # wireprotov2peer.py - client side code for wire protocol version 2
1 # wireprotov2peer.py - client side code for wire protocol version 2
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import threading
10 import threading
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 pycompat,
16 pycompat,
17 sslutil,
17 sslutil,
18 url as urlmod,
18 url as urlmod,
19 util,
19 util,
20 wireprotoframing,
20 wireprotoframing,
21 wireprototypes,
21 wireprototypes,
22 )
22 )
23 from .utils import (
23 from .utils import (
24 cborutil,
24 cborutil,
25 )
25 )
26
26
27 def formatrichmessage(atoms):
27 def formatrichmessage(atoms):
28 """Format an encoded message from the framing protocol."""
28 """Format an encoded message from the framing protocol."""
29
29
30 chunks = []
30 chunks = []
31
31
32 for atom in atoms:
32 for atom in atoms:
33 msg = _(atom[b'msg'])
33 msg = _(atom[b'msg'])
34
34
35 if b'args' in atom:
35 if b'args' in atom:
36 msg = msg % tuple(atom[b'args'])
36 msg = msg % tuple(atom[b'args'])
37
37
38 chunks.append(msg)
38 chunks.append(msg)
39
39
40 return b''.join(chunks)
40 return b''.join(chunks)
41
41
42 SUPPORTED_REDIRECT_PROTOCOLS = {
42 SUPPORTED_REDIRECT_PROTOCOLS = {
43 b'http',
43 b'http',
44 b'https',
44 b'https',
45 }
45 }
46
46
47 SUPPORTED_CONTENT_HASHES = {
47 SUPPORTED_CONTENT_HASHES = {
48 b'sha1',
48 b'sha1',
49 b'sha256',
49 b'sha256',
50 }
50 }
51
51
52 def redirecttargetsupported(ui, target):
52 def redirecttargetsupported(ui, target):
53 """Determine whether a redirect target entry is supported.
53 """Determine whether a redirect target entry is supported.
54
54
55 ``target`` should come from the capabilities data structure emitted by
55 ``target`` should come from the capabilities data structure emitted by
56 the server.
56 the server.
57 """
57 """
58 if target.get(b'protocol') not in SUPPORTED_REDIRECT_PROTOCOLS:
58 if target.get(b'protocol') not in SUPPORTED_REDIRECT_PROTOCOLS:
59 ui.note(_('(remote redirect target %s uses unsupported protocol: %s)\n')
59 ui.note(_('(remote redirect target %s uses unsupported protocol: %s)\n')
60 % (target[b'name'], target.get(b'protocol', b'')))
60 % (target[b'name'], target.get(b'protocol', b'')))
61 return False
61 return False
62
62
63 if target.get(b'snirequired') and not sslutil.hassni:
63 if target.get(b'snirequired') and not sslutil.hassni:
64 ui.note(_('(redirect target %s requires SNI, which is unsupported)\n') %
64 ui.note(_('(redirect target %s requires SNI, which is unsupported)\n') %
65 target[b'name'])
65 target[b'name'])
66 return False
66 return False
67
67
68 if b'tlsversions' in target:
68 if b'tlsversions' in target:
69 tlsversions = set(target[b'tlsversions'])
69 tlsversions = set(target[b'tlsversions'])
70 supported = set()
70 supported = set()
71
71
72 for v in sslutil.supportedprotocols:
72 for v in sslutil.supportedprotocols:
73 assert v.startswith(b'tls')
73 assert v.startswith(b'tls')
74 supported.add(v[3:])
74 supported.add(v[3:])
75
75
76 if not tlsversions & supported:
76 if not tlsversions & supported:
77 ui.note(_('(remote redirect target %s requires unsupported TLS '
77 ui.note(_('(remote redirect target %s requires unsupported TLS '
78 'versions: %s)\n') % (
78 'versions: %s)\n') % (
79 target[b'name'], b', '.join(sorted(tlsversions))))
79 target[b'name'], b', '.join(sorted(tlsversions))))
80 return False
80 return False
81
81
82 ui.note(_('(remote redirect target %s is compatible)\n') % target[b'name'])
82 ui.note(_('(remote redirect target %s is compatible)\n') % target[b'name'])
83
83
84 return True
84 return True
85
85
86 def supportedredirects(ui, apidescriptor):
86 def supportedredirects(ui, apidescriptor):
87 """Resolve the "redirect" command request key given an API descriptor.
87 """Resolve the "redirect" command request key given an API descriptor.
88
88
89 Given an API descriptor returned by the server, returns a data structure
89 Given an API descriptor returned by the server, returns a data structure
90 that can be used in hte "redirect" field of command requests to advertise
90 that can be used in hte "redirect" field of command requests to advertise
91 support for compatible redirect targets.
91 support for compatible redirect targets.
92
92
93 Returns None if no redirect targets are remotely advertised or if none are
93 Returns None if no redirect targets are remotely advertised or if none are
94 supported.
94 supported.
95 """
95 """
96 if not apidescriptor or b'redirect' not in apidescriptor:
96 if not apidescriptor or b'redirect' not in apidescriptor:
97 return None
97 return None
98
98
99 targets = [t[b'name'] for t in apidescriptor[b'redirect'][b'targets']
99 targets = [t[b'name'] for t in apidescriptor[b'redirect'][b'targets']
100 if redirecttargetsupported(ui, t)]
100 if redirecttargetsupported(ui, t)]
101
101
102 hashes = [h for h in apidescriptor[b'redirect'][b'hashes']
102 hashes = [h for h in apidescriptor[b'redirect'][b'hashes']
103 if h in SUPPORTED_CONTENT_HASHES]
103 if h in SUPPORTED_CONTENT_HASHES]
104
104
105 return {
105 return {
106 b'targets': targets,
106 b'targets': targets,
107 b'hashes': hashes,
107 b'hashes': hashes,
108 }
108 }
109
109
110 class commandresponse(object):
110 class commandresponse(object):
111 """Represents the response to a command request.
111 """Represents the response to a command request.
112
112
113 Instances track the state of the command and hold its results.
113 Instances track the state of the command and hold its results.
114
114
115 An external entity is required to update the state of the object when
115 An external entity is required to update the state of the object when
116 events occur.
116 events occur.
117 """
117 """
118
118
119 def __init__(self, requestid, command, fromredirect=False):
119 def __init__(self, requestid, command, fromredirect=False):
120 self.requestid = requestid
120 self.requestid = requestid
121 self.command = command
121 self.command = command
122 self.fromredirect = fromredirect
122 self.fromredirect = fromredirect
123
123
124 # Whether all remote input related to this command has been
124 # Whether all remote input related to this command has been
125 # received.
125 # received.
126 self._inputcomplete = False
126 self._inputcomplete = False
127
127
128 # We have a lock that is acquired when important object state is
128 # We have a lock that is acquired when important object state is
129 # mutated. This is to prevent race conditions between 1 thread
129 # mutated. This is to prevent race conditions between 1 thread
130 # sending us new data and another consuming it.
130 # sending us new data and another consuming it.
131 self._lock = threading.RLock()
131 self._lock = threading.RLock()
132
132
133 # An event is set when state of the object changes. This event
133 # An event is set when state of the object changes. This event
134 # is waited on by the generator emitting objects.
134 # is waited on by the generator emitting objects.
135 self._serviceable = threading.Event()
135 self._serviceable = threading.Event()
136
136
137 self._pendingevents = []
137 self._pendingevents = []
138 self._pendingerror = None
138 self._pendingerror = None
139 self._decoder = cborutil.bufferingdecoder()
139 self._decoder = cborutil.bufferingdecoder()
140 self._seeninitial = False
140 self._seeninitial = False
141 self._redirect = None
141 self._redirect = None
142
142
143 def _oninputcomplete(self):
143 def _oninputcomplete(self):
144 with self._lock:
144 with self._lock:
145 self._inputcomplete = True
145 self._inputcomplete = True
146 self._serviceable.set()
146 self._serviceable.set()
147
147
148 def _onresponsedata(self, data):
148 def _onresponsedata(self, data):
149 available, readcount, wanted = self._decoder.decode(data)
149 available, readcount, wanted = self._decoder.decode(data)
150
150
151 if not available:
151 if not available:
152 return
152 return
153
153
154 with self._lock:
154 with self._lock:
155 for o in self._decoder.getavailable():
155 for o in self._decoder.getavailable():
156 if not self._seeninitial and not self.fromredirect:
156 if not self._seeninitial and not self.fromredirect:
157 self._handleinitial(o)
157 self._handleinitial(o)
158 continue
158 continue
159
159
160 # We should never see an object after a content redirect,
160 # We should never see an object after a content redirect,
161 # as the spec says the main status object containing the
161 # as the spec says the main status object containing the
162 # content redirect is the only object in the stream. Fail
162 # content redirect is the only object in the stream. Fail
163 # if we see a misbehaving server.
163 # if we see a misbehaving server.
164 if self._redirect:
164 if self._redirect:
165 raise error.Abort(_('received unexpected response data '
165 raise error.Abort(_('received unexpected response data '
166 'after content redirect; the remote is '
166 'after content redirect; the remote is '
167 'buggy'))
167 'buggy'))
168
168
169 self._pendingevents.append(o)
169 self._pendingevents.append(o)
170
170
171 self._serviceable.set()
171 self._serviceable.set()
172
172
173 def _onerror(self, e):
173 def _onerror(self, e):
174 self._pendingerror = e
174 self._pendingerror = e
175
175
176 with self._lock:
176 with self._lock:
177 self._serviceable.set()
177 self._serviceable.set()
178
178
179 def _handleinitial(self, o):
179 def _handleinitial(self, o):
180 self._seeninitial = True
180 self._seeninitial = True
181 if o[b'status'] == b'ok':
181 if o[b'status'] == b'ok':
182 return
182 return
183
183
184 elif o[b'status'] == b'redirect':
184 elif o[b'status'] == b'redirect':
185 l = o[b'location']
185 l = o[b'location']
186 self._redirect = wireprototypes.alternatelocationresponse(
186 self._redirect = wireprototypes.alternatelocationresponse(
187 url=l[b'url'],
187 url=l[b'url'],
188 mediatype=l[b'mediatype'],
188 mediatype=l[b'mediatype'],
189 size=l.get(b'size'),
189 size=l.get(b'size'),
190 fullhashes=l.get(b'fullhashes'),
190 fullhashes=l.get(b'fullhashes'),
191 fullhashseed=l.get(b'fullhashseed'),
191 fullhashseed=l.get(b'fullhashseed'),
192 serverdercerts=l.get(b'serverdercerts'),
192 serverdercerts=l.get(b'serverdercerts'),
193 servercadercerts=l.get(b'servercadercerts'))
193 servercadercerts=l.get(b'servercadercerts'))
194 return
194 return
195
195
196 atoms = [{'msg': o[b'error'][b'message']}]
196 atoms = [{'msg': o[b'error'][b'message']}]
197 if b'args' in o[b'error']:
197 if b'args' in o[b'error']:
198 atoms[0]['args'] = o[b'error'][b'args']
198 atoms[0]['args'] = o[b'error'][b'args']
199
199
200 raise error.RepoError(formatrichmessage(atoms))
200 raise error.RepoError(formatrichmessage(atoms))
201
201
202 def objects(self):
202 def objects(self):
203 """Obtained decoded objects from this response.
203 """Obtained decoded objects from this response.
204
204
205 This is a generator of data structures that were decoded from the
205 This is a generator of data structures that were decoded from the
206 command response.
206 command response.
207
207
208 Obtaining the next member of the generator may block due to waiting
208 Obtaining the next member of the generator may block due to waiting
209 on external data to become available.
209 on external data to become available.
210
210
211 If the server encountered an error in the middle of serving the data
211 If the server encountered an error in the middle of serving the data
212 or if another error occurred, an exception may be raised when
212 or if another error occurred, an exception may be raised when
213 advancing the generator.
213 advancing the generator.
214 """
214 """
215 while True:
215 while True:
216 # TODO this can infinite loop if self._inputcomplete is never
216 # TODO this can infinite loop if self._inputcomplete is never
217 # set. We likely want to tie the lifetime of this object/state
217 # set. We likely want to tie the lifetime of this object/state
218 # to that of the background thread receiving frames and updating
218 # to that of the background thread receiving frames and updating
219 # our state.
219 # our state.
220 self._serviceable.wait(1.0)
220 self._serviceable.wait(1.0)
221
221
222 if self._pendingerror:
222 if self._pendingerror:
223 raise self._pendingerror
223 raise self._pendingerror
224
224
225 with self._lock:
225 with self._lock:
226 self._serviceable.clear()
226 self._serviceable.clear()
227
227
228 # Make copies because objects could be mutated during
228 # Make copies because objects could be mutated during
229 # iteration.
229 # iteration.
230 stop = self._inputcomplete
230 stop = self._inputcomplete
231 pending = list(self._pendingevents)
231 pending = list(self._pendingevents)
232 self._pendingevents[:] = []
232 self._pendingevents[:] = []
233
233
234 for o in pending:
234 for o in pending:
235 yield o
235 yield o
236
236
237 if stop:
237 if stop:
238 break
238 break
239
239
240 class clienthandler(object):
240 class clienthandler(object):
241 """Object to handle higher-level client activities.
241 """Object to handle higher-level client activities.
242
242
243 The ``clientreactor`` is used to hold low-level state about the frame-based
243 The ``clientreactor`` is used to hold low-level state about the frame-based
244 protocol, such as which requests and streams are active. This type is used
244 protocol, such as which requests and streams are active. This type is used
245 for higher-level operations, such as reading frames from a socket, exposing
245 for higher-level operations, such as reading frames from a socket, exposing
246 and managing a higher-level primitive for representing command responses,
246 and managing a higher-level primitive for representing command responses,
247 etc. This class is what peers should probably use to bridge wire activity
247 etc. This class is what peers should probably use to bridge wire activity
248 with the higher-level peer API.
248 with the higher-level peer API.
249 """
249 """
250
250
251 def __init__(self, ui, clientreactor, opener=None,
251 def __init__(self, ui, clientreactor, opener=None,
252 requestbuilder=util.urlreq.request):
252 requestbuilder=util.urlreq.request):
253 self._ui = ui
253 self._ui = ui
254 self._reactor = clientreactor
254 self._reactor = clientreactor
255 self._requests = {}
255 self._requests = {}
256 self._futures = {}
256 self._futures = {}
257 self._responses = {}
257 self._responses = {}
258 self._redirects = []
258 self._redirects = []
259 self._frameseof = False
259 self._frameseof = False
260 self._opener = opener or urlmod.opener(ui)
260 self._opener = opener or urlmod.opener(ui)
261 self._requestbuilder = requestbuilder
261 self._requestbuilder = requestbuilder
262
262
263 def callcommand(self, command, args, f, redirect=None):
263 def callcommand(self, command, args, f, redirect=None):
264 """Register a request to call a command.
264 """Register a request to call a command.
265
265
266 Returns an iterable of frames that should be sent over the wire.
266 Returns an iterable of frames that should be sent over the wire.
267 """
267 """
268 request, action, meta = self._reactor.callcommand(command, args,
268 request, action, meta = self._reactor.callcommand(command, args,
269 redirect=redirect)
269 redirect=redirect)
270
270
271 if action != 'noop':
271 if action != 'noop':
272 raise error.ProgrammingError('%s not yet supported' % action)
272 raise error.ProgrammingError('%s not yet supported' % action)
273
273
274 rid = request.requestid
274 rid = request.requestid
275 self._requests[rid] = request
275 self._requests[rid] = request
276 self._futures[rid] = f
276 self._futures[rid] = f
277 # TODO we need some kind of lifetime on response instances otherwise
277 # TODO we need some kind of lifetime on response instances otherwise
278 # objects() may deadlock.
278 # objects() may deadlock.
279 self._responses[rid] = commandresponse(rid, command)
279 self._responses[rid] = commandresponse(rid, command)
280
280
281 return iter(())
281 return iter(())
282
282
283 def flushcommands(self):
283 def flushcommands(self):
284 """Flush all queued commands.
284 """Flush all queued commands.
285
285
286 Returns an iterable of frames that should be sent over the wire.
286 Returns an iterable of frames that should be sent over the wire.
287 """
287 """
288 action, meta = self._reactor.flushcommands()
288 action, meta = self._reactor.flushcommands()
289
289
290 if action != 'sendframes':
290 if action != 'sendframes':
291 raise error.ProgrammingError('%s not yet supported' % action)
291 raise error.ProgrammingError('%s not yet supported' % action)
292
292
293 return meta['framegen']
293 return meta['framegen']
294
294
295 def readdata(self, framefh):
295 def readdata(self, framefh):
296 """Attempt to read data and do work.
296 """Attempt to read data and do work.
297
297
298 Returns None if no data was read. Presumably this means we're
298 Returns None if no data was read. Presumably this means we're
299 done with all read I/O.
299 done with all read I/O.
300 """
300 """
301 if not self._frameseof:
301 if not self._frameseof:
302 frame = wireprotoframing.readframe(framefh)
302 frame = wireprotoframing.readframe(framefh)
303 if frame is None:
303 if frame is None:
304 # TODO tell reactor?
304 # TODO tell reactor?
305 self._frameseof = True
305 self._frameseof = True
306 else:
306 else:
307 self._ui.note(_('received %r\n') % frame)
307 self._ui.note(_('received %r\n') % frame)
308 self._processframe(frame)
308 self._processframe(frame)
309
309
310 # Also try to read the first redirect.
310 # Also try to read the first redirect.
311 if self._redirects:
311 if self._redirects:
312 if not self._processredirect(*self._redirects[0]):
312 if not self._processredirect(*self._redirects[0]):
313 self._redirects.pop(0)
313 self._redirects.pop(0)
314
314
315 if self._frameseof and not self._redirects:
315 if self._frameseof and not self._redirects:
316 return None
316 return None
317
317
318 return True
318 return True
319
319
320 def _processframe(self, frame):
320 def _processframe(self, frame):
321 """Process a single read frame."""
321 """Process a single read frame."""
322
322
323 action, meta = self._reactor.onframerecv(frame)
323 action, meta = self._reactor.onframerecv(frame)
324
324
325 if action == 'error':
325 if action == 'error':
326 e = error.RepoError(meta['message'])
326 e = error.RepoError(meta['message'])
327
327
328 if frame.requestid in self._responses:
328 if frame.requestid in self._responses:
329 self._responses[frame.requestid]._oninputcomplete()
329 self._responses[frame.requestid]._oninputcomplete()
330
330
331 if frame.requestid in self._futures:
331 if frame.requestid in self._futures:
332 self._futures[frame.requestid].set_exception(e)
332 self._futures[frame.requestid].set_exception(e)
333 del self._futures[frame.requestid]
333 del self._futures[frame.requestid]
334 else:
334 else:
335 raise e
335 raise e
336
336
337 return
337 return
338 elif action == 'noop':
338 elif action == 'noop':
339 return
339 return
340 elif action == 'responsedata':
340 elif action == 'responsedata':
341 # Handled below.
341 # Handled below.
342 pass
342 pass
343 else:
343 else:
344 raise error.ProgrammingError('action not handled: %s' % action)
344 raise error.ProgrammingError('action not handled: %s' % action)
345
345
346 if frame.requestid not in self._requests:
346 if frame.requestid not in self._requests:
347 raise error.ProgrammingError(
347 raise error.ProgrammingError(
348 'received frame for unknown request; this is either a bug in '
348 'received frame for unknown request; this is either a bug in '
349 'the clientreactor not screening for this or this instance was '
349 'the clientreactor not screening for this or this instance was '
350 'never told about this request: %r' % frame)
350 'never told about this request: %r' % frame)
351
351
352 response = self._responses[frame.requestid]
352 response = self._responses[frame.requestid]
353
353
354 if action == 'responsedata':
354 if action == 'responsedata':
355 # Any failures processing this frame should bubble up to the
355 # Any failures processing this frame should bubble up to the
356 # future tracking the request.
356 # future tracking the request.
357 try:
357 try:
358 self._processresponsedata(frame, meta, response)
358 self._processresponsedata(frame, meta, response)
359 except BaseException as e:
359 except BaseException as e:
360 # If an exception occurs before the future is resolved,
360 # If an exception occurs before the future is resolved,
361 # fail the future. Otherwise, we stuff the exception on
361 # fail the future. Otherwise, we stuff the exception on
362 # the response object so it can be raised during objects()
362 # the response object so it can be raised during objects()
363 # iteration. If nothing is consuming objects(), we could
363 # iteration. If nothing is consuming objects(), we could
364 # silently swallow this exception. That's a risk we'll have to
364 # silently swallow this exception. That's a risk we'll have to
365 # take.
365 # take.
366 if frame.requestid in self._futures:
366 if frame.requestid in self._futures:
367 self._futures[frame.requestid].set_exception(e)
367 self._futures[frame.requestid].set_exception(e)
368 del self._futures[frame.requestid]
368 del self._futures[frame.requestid]
369 response._oninputcomplete()
369 response._oninputcomplete()
370 else:
370 else:
371 response._onerror(e)
371 response._onerror(e)
372 else:
372 else:
373 raise error.ProgrammingError(
373 raise error.ProgrammingError(
374 'unhandled action from clientreactor: %s' % action)
374 'unhandled action from clientreactor: %s' % action)
375
375
376 def _processresponsedata(self, frame, meta, response):
376 def _processresponsedata(self, frame, meta, response):
377 # This can raise. The caller can handle it.
377 # This can raise. The caller can handle it.
378 response._onresponsedata(meta['data'])
378 response._onresponsedata(meta['data'])
379
379
380 # If we got a content redirect response, we want to fetch it and
380 # We need to be careful about resolving futures prematurely. If a
381 # expose the data as if we received it inline. But we also want to
381 # response is a redirect response, resolving the future before the
382 # keep our internal request accounting in order. Our strategy is to
382 # redirect is processed would result in the consumer seeing an
383 # basically put meaningful response handling on pause until EOS occurs
383 # empty stream of objects, since they'd be consuming our
384 # and the stream accounting is in a good state. At that point, we follow
384 # response.objects() instead of the redirect's response.objects().
385 # the redirect and replace the response object with its data.
385 #
386 # Our strategy is to not resolve/finish the request until either
387 # EOS occurs or until the initial response object is fully received.
386
388
387 redirect = response._redirect
389 # Always react to eos.
388 handlefuture = False if redirect else True
389
390 if meta['eos']:
390 if meta['eos']:
391 response._oninputcomplete()
391 response._oninputcomplete()
392 del self._requests[frame.requestid]
392 del self._requests[frame.requestid]
393
393
394 if redirect:
394 # Not EOS but we haven't decoded the initial response object yet.
395 self._followredirect(frame.requestid, redirect)
395 # Return and wait for more data.
396 return
396 elif not response._seeninitial:
397 return
397
398
398 if not handlefuture:
399 # The specification says no objects should follow the initial/redirect
400 # object. So it should be safe to handle the redirect object if one is
401 # decoded, without having to wait for EOS.
402 if response._redirect:
403 self._followredirect(frame.requestid, response._redirect)
399 return
404 return
400
405
401 # If the command has a decoder, we wait until all input has been
406 # If the command has a decoder, we wait until all input has been
402 # received before resolving the future. Otherwise we resolve the
407 # received before resolving the future. Otherwise we resolve the
403 # future immediately.
408 # future immediately.
404 if frame.requestid not in self._futures:
409 if frame.requestid not in self._futures:
405 return
410 return
406
411
407 if response.command not in COMMAND_DECODERS:
412 if response.command not in COMMAND_DECODERS:
408 self._futures[frame.requestid].set_result(response.objects())
413 self._futures[frame.requestid].set_result(response.objects())
409 del self._futures[frame.requestid]
414 del self._futures[frame.requestid]
410 elif response._inputcomplete:
415 elif response._inputcomplete:
411 decoded = COMMAND_DECODERS[response.command](response.objects())
416 decoded = COMMAND_DECODERS[response.command](response.objects())
412 self._futures[frame.requestid].set_result(decoded)
417 self._futures[frame.requestid].set_result(decoded)
413 del self._futures[frame.requestid]
418 del self._futures[frame.requestid]
414
419
415 def _followredirect(self, requestid, redirect):
420 def _followredirect(self, requestid, redirect):
416 """Called to initiate redirect following for a request."""
421 """Called to initiate redirect following for a request."""
417 self._ui.note(_('(following redirect to %s)\n') % redirect.url)
422 self._ui.note(_('(following redirect to %s)\n') % redirect.url)
418
423
419 # TODO handle framed responses.
424 # TODO handle framed responses.
420 if redirect.mediatype != b'application/mercurial-cbor':
425 if redirect.mediatype != b'application/mercurial-cbor':
421 raise error.Abort(_('cannot handle redirects for the %s media type')
426 raise error.Abort(_('cannot handle redirects for the %s media type')
422 % redirect.mediatype)
427 % redirect.mediatype)
423
428
424 if redirect.fullhashes:
429 if redirect.fullhashes:
425 self._ui.warn(_('(support for validating hashes on content '
430 self._ui.warn(_('(support for validating hashes on content '
426 'redirects not supported)\n'))
431 'redirects not supported)\n'))
427
432
428 if redirect.serverdercerts or redirect.servercadercerts:
433 if redirect.serverdercerts or redirect.servercadercerts:
429 self._ui.warn(_('(support for pinning server certificates on '
434 self._ui.warn(_('(support for pinning server certificates on '
430 'content redirects not supported)\n'))
435 'content redirects not supported)\n'))
431
436
432 headers = {
437 headers = {
433 r'Accept': redirect.mediatype,
438 r'Accept': redirect.mediatype,
434 }
439 }
435
440
436 req = self._requestbuilder(pycompat.strurl(redirect.url), None, headers)
441 req = self._requestbuilder(pycompat.strurl(redirect.url), None, headers)
437
442
438 try:
443 try:
439 res = self._opener.open(req)
444 res = self._opener.open(req)
440 except util.urlerr.httperror as e:
445 except util.urlerr.httperror as e:
441 if e.code == 401:
446 if e.code == 401:
442 raise error.Abort(_('authorization failed'))
447 raise error.Abort(_('authorization failed'))
443 raise
448 raise
444 except util.httplib.HTTPException as e:
449 except util.httplib.HTTPException as e:
445 self._ui.debug('http error requesting %s\n' % req.get_full_url())
450 self._ui.debug('http error requesting %s\n' % req.get_full_url())
446 self._ui.traceback()
451 self._ui.traceback()
447 raise IOError(None, e)
452 raise IOError(None, e)
448
453
449 urlmod.wrapresponse(res)
454 urlmod.wrapresponse(res)
450
455
451 # The existing response object is associated with frame data. Rather
456 # The existing response object is associated with frame data. Rather
452 # than try to normalize its state, just create a new object.
457 # than try to normalize its state, just create a new object.
453 oldresponse = self._responses[requestid]
458 oldresponse = self._responses[requestid]
454 self._responses[requestid] = commandresponse(requestid,
459 self._responses[requestid] = commandresponse(requestid,
455 oldresponse.command,
460 oldresponse.command,
456 fromredirect=True)
461 fromredirect=True)
457
462
458 self._redirects.append((requestid, res))
463 self._redirects.append((requestid, res))
459
464
460 def _processredirect(self, rid, res):
465 def _processredirect(self, rid, res):
461 """Called to continue processing a response from a redirect."""
466 """Called to continue processing a response from a redirect.
467
468 Returns a bool indicating if the redirect is still serviceable.
469 """
462 response = self._responses[rid]
470 response = self._responses[rid]
463
471
464 try:
472 try:
465 data = res.read(32768)
473 data = res.read(32768)
466 response._onresponsedata(data)
474 response._onresponsedata(data)
467
475
468 # We're at end of stream.
476 # We're at end of stream.
469 if not data:
477 if not data:
470 response._oninputcomplete()
478 response._oninputcomplete()
471
479
472 if rid not in self._futures:
480 if rid not in self._futures:
473 return
481 return bool(data)
474
482
475 if response.command not in COMMAND_DECODERS:
483 if response.command not in COMMAND_DECODERS:
476 self._futures[rid].set_result(response.objects())
484 self._futures[rid].set_result(response.objects())
477 del self._futures[rid]
485 del self._futures[rid]
478 elif response._inputcomplete:
486 elif response._inputcomplete:
479 decoded = COMMAND_DECODERS[response.command](response.objects())
487 decoded = COMMAND_DECODERS[response.command](response.objects())
480 self._futures[rid].set_result(decoded)
488 self._futures[rid].set_result(decoded)
481 del self._futures[rid]
489 del self._futures[rid]
482
490
483 return bool(data)
491 return bool(data)
484
492
485 except BaseException as e:
493 except BaseException as e:
486 self._futures[rid].set_exception(e)
494 self._futures[rid].set_exception(e)
487 del self._futures[rid]
495 del self._futures[rid]
488 response._oninputcomplete()
496 response._oninputcomplete()
489 return False
497 return False
490
498
491 def decodebranchmap(objs):
499 def decodebranchmap(objs):
492 # Response should be a single CBOR map of branch name to array of nodes.
500 # Response should be a single CBOR map of branch name to array of nodes.
493 bm = next(objs)
501 bm = next(objs)
494
502
495 return {encoding.tolocal(k): v for k, v in bm.items()}
503 return {encoding.tolocal(k): v for k, v in bm.items()}
496
504
497 def decodeheads(objs):
505 def decodeheads(objs):
498 # Array of node bytestrings.
506 # Array of node bytestrings.
499 return next(objs)
507 return next(objs)
500
508
501 def decodeknown(objs):
509 def decodeknown(objs):
502 # Bytestring where each byte is a 0 or 1.
510 # Bytestring where each byte is a 0 or 1.
503 raw = next(objs)
511 raw = next(objs)
504
512
505 return [True if c == '1' else False for c in raw]
513 return [True if c == '1' else False for c in raw]
506
514
507 def decodelistkeys(objs):
515 def decodelistkeys(objs):
508 # Map with bytestring keys and values.
516 # Map with bytestring keys and values.
509 return next(objs)
517 return next(objs)
510
518
511 def decodelookup(objs):
519 def decodelookup(objs):
512 return next(objs)
520 return next(objs)
513
521
514 def decodepushkey(objs):
522 def decodepushkey(objs):
515 return next(objs)
523 return next(objs)
516
524
517 COMMAND_DECODERS = {
525 COMMAND_DECODERS = {
518 'branchmap': decodebranchmap,
526 'branchmap': decodebranchmap,
519 'heads': decodeheads,
527 'heads': decodeheads,
520 'known': decodeknown,
528 'known': decodeknown,
521 'listkeys': decodelistkeys,
529 'listkeys': decodelistkeys,
522 'lookup': decodelookup,
530 'lookup': decodelookup,
523 'pushkey': decodepushkey,
531 'pushkey': decodepushkey,
524 }
532 }
@@ -1,18 +1,43 b''
1 #require test-repo
1 #require test-repo
2
2
3 $ cd $TESTDIR/../contrib/fuzz
3 $ cd $TESTDIR/../contrib/fuzz
4
4
5 which(1) could exit nonzero, but that's fine because we'll still end
6 up without a valid executable, so we don't need to check $? here.
7
8 $ if which gmake >/dev/null 2>&1; then
9 > MAKE=gmake
10 > else
11 > MAKE=make
12 > fi
13
14 $ havefuzz() {
15 > cat > $TESTTMP/dummy.cc <<EOF
16 > #include <stdlib.h>
17 > #include <stdint.h>
18 > int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; }
19 > int main(int argc, char **argv) {
20 > const char data[] = "asdf";
21 > return LLVMFuzzerTestOneInput((const uint8_t *)data, 4);
22 > }
23 > EOF
24 > $CXX $TESTTMP/dummy.cc -o $TESTTMP/dummy \
25 > -fsanitize=fuzzer-no-link,address || return 1
26 > }
27
5 #if clang-libfuzzer
28 #if clang-libfuzzer
6 $ make -s clean all
29 $ CXX=clang++ havefuzz || exit 80
30 $ $MAKE -s clean all
7 #endif
31 #endif
8 #if no-clang-libfuzzer clang-6.0
32 #if no-clang-libfuzzer clang-6.0
9 $ make -s clean all CC=clang-6.0 CXX=clang++-6.0
33 $ CXX=clang++-6.0 havefuzz || exit 80
34 $ $MAKE -s clean all CC=clang-6.0 CXX=clang++-6.0
10 #endif
35 #endif
11 #if no-clang-libfuzzer no-clang-6.0
36 #if no-clang-libfuzzer no-clang-6.0
12 $ exit 80
37 $ exit 80
13 #endif
38 #endif
14
39
15 Just run the fuzzers for five seconds each to verify it works at all.
40 Just run the fuzzers for five seconds each to verify it works at all.
16 $ ./bdiff -max_total_time 5
41 $ ./bdiff -max_total_time 5
17 $ ./mpatch -max_total_time 5
42 $ ./mpatch -max_total_time 5
18 $ ./xdiff -max_total_time 5
43 $ ./xdiff -max_total_time 5
@@ -1,205 +1,222 b''
1 revlog.parseindex must be able to parse the index file even if
1 revlog.parseindex must be able to parse the index file even if
2 an index entry is split between two 64k blocks. The ideal test
2 an index entry is split between two 64k blocks. The ideal test
3 would be to create an index file with inline data where
3 would be to create an index file with inline data where
4 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
4 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
5 the size of an index entry) and with an index entry starting right
5 the size of an index entry) and with an index entry starting right
6 before the 64k block boundary, and try to read it.
6 before the 64k block boundary, and try to read it.
7 We approximate that by reducing the read buffer to 1 byte.
7 We approximate that by reducing the read buffer to 1 byte.
8
8
9 $ hg init a
9 $ hg init a
10 $ cd a
10 $ cd a
11 $ echo abc > foo
11 $ echo abc > foo
12 $ hg add foo
12 $ hg add foo
13 $ hg commit -m 'add foo'
13 $ hg commit -m 'add foo'
14 $ echo >> foo
14 $ echo >> foo
15 $ hg commit -m 'change foo'
15 $ hg commit -m 'change foo'
16 $ hg log -r 0:
16 $ hg log -r 0:
17 changeset: 0:7c31755bf9b5
17 changeset: 0:7c31755bf9b5
18 user: test
18 user: test
19 date: Thu Jan 01 00:00:00 1970 +0000
19 date: Thu Jan 01 00:00:00 1970 +0000
20 summary: add foo
20 summary: add foo
21
21
22 changeset: 1:26333235a41c
22 changeset: 1:26333235a41c
23 tag: tip
23 tag: tip
24 user: test
24 user: test
25 date: Thu Jan 01 00:00:00 1970 +0000
25 date: Thu Jan 01 00:00:00 1970 +0000
26 summary: change foo
26 summary: change foo
27
27
28 $ cat >> test.py << EOF
28 $ cat >> test.py << EOF
29 > from __future__ import print_function
29 > from __future__ import print_function
30 > from mercurial import changelog, node, vfs
30 > from mercurial import changelog, node, vfs
31 >
31 >
32 > class singlebyteread(object):
32 > class singlebyteread(object):
33 > def __init__(self, real):
33 > def __init__(self, real):
34 > self.real = real
34 > self.real = real
35 >
35 >
36 > def read(self, size=-1):
36 > def read(self, size=-1):
37 > if size == 65536:
37 > if size == 65536:
38 > size = 1
38 > size = 1
39 > return self.real.read(size)
39 > return self.real.read(size)
40 >
40 >
41 > def __getattr__(self, key):
41 > def __getattr__(self, key):
42 > return getattr(self.real, key)
42 > return getattr(self.real, key)
43 >
43 >
44 > def __enter__(self):
44 > def __enter__(self):
45 > self.real.__enter__()
45 > self.real.__enter__()
46 > return self
46 > return self
47 >
47 >
48 > def __exit__(self, *args, **kwargs):
48 > def __exit__(self, *args, **kwargs):
49 > return self.real.__exit__(*args, **kwargs)
49 > return self.real.__exit__(*args, **kwargs)
50 >
50 >
51 > def opener(*args):
51 > def opener(*args):
52 > o = vfs.vfs(*args)
52 > o = vfs.vfs(*args)
53 > def wrapper(*a, **kwargs):
53 > def wrapper(*a, **kwargs):
54 > f = o(*a, **kwargs)
54 > f = o(*a, **kwargs)
55 > return singlebyteread(f)
55 > return singlebyteread(f)
56 > return wrapper
56 > return wrapper
57 >
57 >
58 > cl = changelog.changelog(opener('.hg/store'))
58 > cl = changelog.changelog(opener('.hg/store'))
59 > print(len(cl), 'revisions:')
59 > print(len(cl), 'revisions:')
60 > for r in cl:
60 > for r in cl:
61 > print(node.short(cl.node(r)))
61 > print(node.short(cl.node(r)))
62 > EOF
62 > EOF
63 $ "$PYTHON" test.py
63 $ "$PYTHON" test.py
64 2 revisions:
64 2 revisions:
65 7c31755bf9b5
65 7c31755bf9b5
66 26333235a41c
66 26333235a41c
67
67
68 $ cd ..
68 $ cd ..
69
69
70 #if no-pure
70 #if no-pure
71
71
72 Test SEGV caused by bad revision passed to reachableroots() (issue4775):
72 Test SEGV caused by bad revision passed to reachableroots() (issue4775):
73
73
74 $ cd a
74 $ cd a
75
75
76 $ "$PYTHON" <<EOF
76 $ "$PYTHON" <<EOF
77 > from __future__ import print_function
77 > from __future__ import print_function
78 > from mercurial import changelog, vfs
78 > from mercurial import changelog, vfs
79 > cl = changelog.changelog(vfs.vfs('.hg/store'))
79 > cl = changelog.changelog(vfs.vfs('.hg/store'))
80 > print('good heads:')
80 > print('good heads:')
81 > for head in [0, len(cl) - 1, -1]:
81 > for head in [0, len(cl) - 1, -1]:
82 > print('%s: %r' % (head, cl.reachableroots(0, [head], [0])))
82 > print('%s: %r' % (head, cl.reachableroots(0, [head], [0])))
83 > print('bad heads:')
83 > print('bad heads:')
84 > for head in [len(cl), 10000, -2, -10000, None]:
84 > for head in [len(cl), 10000, -2, -10000, None]:
85 > print('%s:' % head, end=' ')
85 > print('%s:' % head, end=' ')
86 > try:
86 > try:
87 > cl.reachableroots(0, [head], [0])
87 > cl.reachableroots(0, [head], [0])
88 > print('uncaught buffer overflow?')
88 > print('uncaught buffer overflow?')
89 > except (IndexError, TypeError) as inst:
89 > except (IndexError, TypeError) as inst:
90 > print(inst)
90 > print(inst)
91 > print('good roots:')
91 > print('good roots:')
92 > for root in [0, len(cl) - 1, -1]:
92 > for root in [0, len(cl) - 1, -1]:
93 > print('%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root])))
93 > print('%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root])))
94 > print('out-of-range roots are ignored:')
94 > print('out-of-range roots are ignored:')
95 > for root in [len(cl), 10000, -2, -10000]:
95 > for root in [len(cl), 10000, -2, -10000]:
96 > print('%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root])))
96 > print('%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root])))
97 > print('bad roots:')
97 > print('bad roots:')
98 > for root in [None]:
98 > for root in [None]:
99 > print('%s:' % root, end=' ')
99 > print('%s:' % root, end=' ')
100 > try:
100 > try:
101 > cl.reachableroots(root, [len(cl) - 1], [root])
101 > cl.reachableroots(root, [len(cl) - 1], [root])
102 > print('uncaught error?')
102 > print('uncaught error?')
103 > except TypeError as inst:
103 > except TypeError as inst:
104 > print(inst)
104 > print(inst)
105 > EOF
105 > EOF
106 good heads:
106 good heads:
107 0: [0]
107 0: [0]
108 1: [0]
108 1: [0]
109 -1: []
109 -1: []
110 bad heads:
110 bad heads:
111 2: head out of range
111 2: head out of range
112 10000: head out of range
112 10000: head out of range
113 -2: head out of range
113 -2: head out of range
114 -10000: head out of range
114 -10000: head out of range
115 None: an integer is required
115 None: an integer is required
116 good roots:
116 good roots:
117 0: [0]
117 0: [0]
118 1: [1]
118 1: [1]
119 -1: [-1]
119 -1: [-1]
120 out-of-range roots are ignored:
120 out-of-range roots are ignored:
121 2: []
121 2: []
122 10000: []
122 10000: []
123 -2: []
123 -2: []
124 -10000: []
124 -10000: []
125 bad roots:
125 bad roots:
126 None: an integer is required
126 None: an integer is required
127
127
128 $ cd ..
128 $ cd ..
129
129
130 Test corrupted p1/p2 fields that could cause SEGV at parsers.c:
130 Test corrupted p1/p2 fields that could cause SEGV at parsers.c:
131
131
132 $ mkdir invalidparent
132 $ mkdir invalidparent
133 $ cd invalidparent
133 $ cd invalidparent
134
134
135 $ hg clone --pull -q --config phases.publish=False ../a limit
135 $ hg clone --pull -q --config phases.publish=False ../a limit
136 $ hg clone --pull -q --config phases.publish=False ../a neglimit
136 $ hg clone --pull -q --config phases.publish=False ../a segv
137 $ hg clone --pull -q --config phases.publish=False ../a segv
137 $ rm -R limit/.hg/cache segv/.hg/cache
138 $ rm -R limit/.hg/cache neglimit/.hg/cache segv/.hg/cache
138
139
139 $ "$PYTHON" <<EOF
140 $ "$PYTHON" <<EOF
140 > data = open("limit/.hg/store/00changelog.i", "rb").read()
141 > data = open("limit/.hg/store/00changelog.i", "rb").read()
141 > for n, p in [(b'limit', b'\0\0\0\x02'), (b'segv', b'\0\x01\0\0')]:
142 > poisons = [
143 > (b'limit', b'\0\0\0\x02'),
144 > (b'neglimit', b'\xff\xff\xff\xfe'),
145 > (b'segv', b'\0\x01\0\0'),
146 > ]
147 > for n, p in poisons:
142 > # corrupt p1 at rev0 and p2 at rev1
148 > # corrupt p1 at rev0 and p2 at rev1
143 > d = data[:24] + p + data[28:127 + 28] + p + data[127 + 32:]
149 > d = data[:24] + p + data[28:127 + 28] + p + data[127 + 32:]
144 > open(n + b"/.hg/store/00changelog.i", "wb").write(d)
150 > open(n + b"/.hg/store/00changelog.i", "wb").write(d)
145 > EOF
151 > EOF
146
152
147 $ hg -R limit debugrevlogindex -f1 -c
153 $ hg -R limit debugrevlogindex -f1 -c
148 rev flag size link p1 p2 nodeid
154 rev flag size link p1 p2 nodeid
149 0 0000 62 0 2 -1 7c31755bf9b5
155 0 0000 62 0 2 -1 7c31755bf9b5
150 1 0000 65 1 0 2 26333235a41c
156 1 0000 65 1 0 2 26333235a41c
151
157
152 $ hg -R limit debugdeltachain -c
158 $ hg -R limit debugdeltachain -c
153 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
159 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
154 0 1 1 -1 base 63 62 63 1.01613 63 0 0.00000
160 0 1 1 -1 base 63 62 63 1.01613 63 0 0.00000
155 1 2 1 -1 base 66 65 66 1.01538 66 0 0.00000
161 1 2 1 -1 base 66 65 66 1.01538 66 0 0.00000
156
162
163 $ hg -R neglimit debugrevlogindex -f1 -c
164 rev flag size link p1 p2 nodeid
165 0 0000 62 0 -2 -1 7c31755bf9b5
166 1 0000 65 1 0 -2 26333235a41c
167
157 $ hg -R segv debugrevlogindex -f1 -c
168 $ hg -R segv debugrevlogindex -f1 -c
158 rev flag size link p1 p2 nodeid
169 rev flag size link p1 p2 nodeid
159 0 0000 62 0 65536 -1 7c31755bf9b5
170 0 0000 62 0 65536 -1 7c31755bf9b5
160 1 0000 65 1 0 65536 26333235a41c
171 1 0000 65 1 0 65536 26333235a41c
161
172
162 $ hg -R segv debugdeltachain -c
173 $ hg -R segv debugdeltachain -c
163 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
174 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
164 0 1 1 -1 base 63 62 63 1.01613 63 0 0.00000
175 0 1 1 -1 base 63 62 63 1.01613 63 0 0.00000
165 1 2 1 -1 base 66 65 66 1.01538 66 0 0.00000
176 1 2 1 -1 base 66 65 66 1.01538 66 0 0.00000
166
177
167 $ cat <<EOF > test.py
178 $ cat <<EOF > test.py
168 > from __future__ import print_function
179 > from __future__ import print_function
169 > import sys
180 > import sys
170 > from mercurial import changelog, vfs
181 > from mercurial import changelog, vfs
171 > cl = changelog.changelog(vfs.vfs(sys.argv[1]))
182 > cl = changelog.changelog(vfs.vfs(sys.argv[1]))
172 > n0, n1 = cl.node(0), cl.node(1)
183 > n0, n1 = cl.node(0), cl.node(1)
173 > ops = [
184 > ops = [
174 > ('reachableroots',
185 > ('reachableroots',
175 > lambda: cl.index.reachableroots2(0, [1], [0], False)),
186 > lambda: cl.index.reachableroots2(0, [1], [0], False)),
176 > ('compute_phases_map_sets', lambda: cl.computephases([[0], []])),
187 > ('compute_phases_map_sets', lambda: cl.computephases([[0], []])),
177 > ('index_headrevs', lambda: cl.headrevs()),
188 > ('index_headrevs', lambda: cl.headrevs()),
178 > ('find_gca_candidates', lambda: cl.commonancestorsheads(n0, n1)),
189 > ('find_gca_candidates', lambda: cl.commonancestorsheads(n0, n1)),
179 > ('find_deepest', lambda: cl.ancestor(n0, n1)),
190 > ('find_deepest', lambda: cl.ancestor(n0, n1)),
180 > ]
191 > ]
181 > for l, f in ops:
192 > for l, f in ops:
182 > print(l + ':', end=' ')
193 > print(l + ':', end=' ')
183 > try:
194 > try:
184 > f()
195 > f()
185 > print('uncaught buffer overflow?')
196 > print('uncaught buffer overflow?')
186 > except ValueError as inst:
197 > except ValueError as inst:
187 > print(inst)
198 > print(inst)
188 > EOF
199 > EOF
189
200
190 $ "$PYTHON" test.py limit/.hg/store
201 $ "$PYTHON" test.py limit/.hg/store
191 reachableroots: parent out of range
202 reachableroots: parent out of range
192 compute_phases_map_sets: parent out of range
203 compute_phases_map_sets: parent out of range
193 index_headrevs: parent out of range
204 index_headrevs: parent out of range
194 find_gca_candidates: parent out of range
205 find_gca_candidates: parent out of range
195 find_deepest: parent out of range
206 find_deepest: parent out of range
207 $ "$PYTHON" test.py neglimit/.hg/store
208 reachableroots: parent out of range
209 compute_phases_map_sets: parent out of range
210 index_headrevs: parent out of range
211 find_gca_candidates: parent out of range
212 find_deepest: parent out of range
196 $ "$PYTHON" test.py segv/.hg/store
213 $ "$PYTHON" test.py segv/.hg/store
197 reachableroots: parent out of range
214 reachableroots: parent out of range
198 compute_phases_map_sets: parent out of range
215 compute_phases_map_sets: parent out of range
199 index_headrevs: parent out of range
216 index_headrevs: parent out of range
200 find_gca_candidates: parent out of range
217 find_gca_candidates: parent out of range
201 find_deepest: parent out of range
218 find_deepest: parent out of range
202
219
203 $ cd ..
220 $ cd ..
204
221
205 #endif
222 #endif
@@ -1,643 +1,724 b''
1 #require symlink execbit
1 #require symlink execbit
2 $ cat << EOF >> $HGRCPATH
2 $ cat << EOF >> $HGRCPATH
3 > [phases]
4 > publish=False
3 > [extensions]
5 > [extensions]
4 > amend=
6 > amend=
5 > rebase=
7 > rebase=
6 > debugdrawdag=$TESTDIR/drawdag.py
8 > debugdrawdag=$TESTDIR/drawdag.py
7 > strip=
9 > strip=
8 > [rebase]
10 > [rebase]
9 > experimental.inmemory=1
11 > experimental.inmemory=1
10 > [diff]
12 > [diff]
11 > git=1
13 > git=1
12 > [alias]
14 > [alias]
13 > tglog = log -G --template "{rev}: {node|short} '{desc}'\n"
15 > tglog = log -G --template "{rev}: {node|short} '{desc}'\n"
14 > EOF
16 > EOF
15
17
16 Rebase a simple DAG:
18 Rebase a simple DAG:
17 $ hg init repo1
19 $ hg init repo1
18 $ cd repo1
20 $ cd repo1
19 $ hg debugdrawdag <<'EOS'
21 $ hg debugdrawdag <<'EOS'
20 > c b
22 > c b
21 > |/
23 > |/
22 > d
24 > d
23 > |
25 > |
24 > a
26 > a
25 > EOS
27 > EOS
26 $ hg up -C a
28 $ hg up -C a
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 $ hg tglog
30 $ hg tglog
29 o 3: 814f6bd05178 'c'
31 o 3: 814f6bd05178 'c'
30 |
32 |
31 | o 2: db0e82a16a62 'b'
33 | o 2: db0e82a16a62 'b'
32 |/
34 |/
33 o 1: 02952614a83d 'd'
35 o 1: 02952614a83d 'd'
34 |
36 |
35 @ 0: b173517d0057 'a'
37 @ 0: b173517d0057 'a'
36
38
37 $ hg cat -r 3 c
39 $ hg cat -r 3 c
38 c (no-eol)
40 c (no-eol)
39 $ hg cat -r 2 b
41 $ hg cat -r 2 b
40 b (no-eol)
42 b (no-eol)
41 $ hg rebase --debug -r b -d c | grep rebasing
43 $ hg rebase --debug -r b -d c | grep rebasing
42 rebasing in-memory
44 rebasing in-memory
43 rebasing 2:db0e82a16a62 "b" (b)
45 rebasing 2:db0e82a16a62 "b" (b)
44 $ hg tglog
46 $ hg tglog
45 o 3: ca58782ad1e4 'b'
47 o 3: ca58782ad1e4 'b'
46 |
48 |
47 o 2: 814f6bd05178 'c'
49 o 2: 814f6bd05178 'c'
48 |
50 |
49 o 1: 02952614a83d 'd'
51 o 1: 02952614a83d 'd'
50 |
52 |
51 @ 0: b173517d0057 'a'
53 @ 0: b173517d0057 'a'
52
54
53 $ hg cat -r 3 b
55 $ hg cat -r 3 b
54 b (no-eol)
56 b (no-eol)
55 $ hg cat -r 2 c
57 $ hg cat -r 2 c
56 c (no-eol)
58 c (no-eol)
59 $ cd ..
57
60
58 Case 2:
61 Case 2:
59 $ hg init repo2
62 $ hg init repo2
60 $ cd repo2
63 $ cd repo2
61 $ hg debugdrawdag <<'EOS'
64 $ hg debugdrawdag <<'EOS'
62 > c b
65 > c b
63 > |/
66 > |/
64 > d
67 > d
65 > |
68 > |
66 > a
69 > a
67 > EOS
70 > EOS
68
71
69 Add a symlink and executable file:
72 Add a symlink and executable file:
70 $ hg up -C c
73 $ hg up -C c
71 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
74 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 $ ln -s somefile e
75 $ ln -s somefile e
73 $ echo f > f
76 $ echo f > f
74 $ chmod +x f
77 $ chmod +x f
75 $ hg add e f
78 $ hg add e f
76 $ hg amend -q
79 $ hg amend -q
77 $ hg up -Cq a
80 $ hg up -Cq a
78
81
79 Write files to the working copy, and ensure they're still there after the rebase
82 Write files to the working copy, and ensure they're still there after the rebase
80 $ echo "abc" > a
83 $ echo "abc" > a
81 $ ln -s def b
84 $ ln -s def b
82 $ echo "ghi" > c
85 $ echo "ghi" > c
83 $ echo "jkl" > d
86 $ echo "jkl" > d
84 $ echo "mno" > e
87 $ echo "mno" > e
85 $ hg tglog
88 $ hg tglog
86 o 3: f56b71190a8f 'c'
89 o 3: f56b71190a8f 'c'
87 |
90 |
88 | o 2: db0e82a16a62 'b'
91 | o 2: db0e82a16a62 'b'
89 |/
92 |/
90 o 1: 02952614a83d 'd'
93 o 1: 02952614a83d 'd'
91 |
94 |
92 @ 0: b173517d0057 'a'
95 @ 0: b173517d0057 'a'
93
96
94 $ hg cat -r 3 c
97 $ hg cat -r 3 c
95 c (no-eol)
98 c (no-eol)
96 $ hg cat -r 2 b
99 $ hg cat -r 2 b
97 b (no-eol)
100 b (no-eol)
98 $ hg cat -r 3 e
101 $ hg cat -r 3 e
99 somefile (no-eol)
102 somefile (no-eol)
100 $ hg rebase --debug -s b -d a | grep rebasing
103 $ hg rebase --debug -s b -d a | grep rebasing
101 rebasing in-memory
104 rebasing in-memory
102 rebasing 2:db0e82a16a62 "b" (b)
105 rebasing 2:db0e82a16a62 "b" (b)
103 $ hg tglog
106 $ hg tglog
104 o 3: fc055c3b4d33 'b'
107 o 3: fc055c3b4d33 'b'
105 |
108 |
106 | o 2: f56b71190a8f 'c'
109 | o 2: f56b71190a8f 'c'
107 | |
110 | |
108 | o 1: 02952614a83d 'd'
111 | o 1: 02952614a83d 'd'
109 |/
112 |/
110 @ 0: b173517d0057 'a'
113 @ 0: b173517d0057 'a'
111
114
112 $ hg cat -r 2 c
115 $ hg cat -r 2 c
113 c (no-eol)
116 c (no-eol)
114 $ hg cat -r 3 b
117 $ hg cat -r 3 b
115 b (no-eol)
118 b (no-eol)
116 $ hg rebase --debug -s 1 -d 3 | grep rebasing
119 $ hg rebase --debug -s 1 -d 3 | grep rebasing
117 rebasing in-memory
120 rebasing in-memory
118 rebasing 1:02952614a83d "d" (d)
121 rebasing 1:02952614a83d "d" (d)
119 rebasing 2:f56b71190a8f "c"
122 rebasing 2:f56b71190a8f "c"
120 $ hg tglog
123 $ hg tglog
121 o 3: 753feb6fd12a 'c'
124 o 3: 753feb6fd12a 'c'
122 |
125 |
123 o 2: 09c044d2cb43 'd'
126 o 2: 09c044d2cb43 'd'
124 |
127 |
125 o 1: fc055c3b4d33 'b'
128 o 1: fc055c3b4d33 'b'
126 |
129 |
127 @ 0: b173517d0057 'a'
130 @ 0: b173517d0057 'a'
128
131
129 Ensure working copy files are still there:
132 Ensure working copy files are still there:
130 $ cat a
133 $ cat a
131 abc
134 abc
132 $ readlink.py b
135 $ readlink.py b
133 b -> def
136 b -> def
134 $ cat e
137 $ cat e
135 mno
138 mno
136
139
137 Ensure symlink and executable files were rebased properly:
140 Ensure symlink and executable files were rebased properly:
138 $ hg up -Cq 3
141 $ hg up -Cq 3
139 $ readlink.py e
142 $ readlink.py e
140 e -> somefile
143 e -> somefile
141 $ ls -l f | cut -c -10
144 $ ls -l f | cut -c -10
142 -rwxr-xr-x
145 -rwxr-xr-x
143
146
144 Rebase the working copy parent
147 Rebase the working copy parent
145 $ hg up -C 3
148 $ hg up -C 3
146 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
149 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 $ hg rebase -r 3 -d 0 --debug | grep rebasing
150 $ hg rebase -r 3 -d 0 --debug | grep rebasing
148 rebasing in-memory
151 rebasing in-memory
149 rebasing 3:753feb6fd12a "c" (tip)
152 rebasing 3:753feb6fd12a "c" (tip)
150 $ hg tglog
153 $ hg tglog
151 @ 3: 844a7de3e617 'c'
154 @ 3: 844a7de3e617 'c'
152 |
155 |
153 | o 2: 09c044d2cb43 'd'
156 | o 2: 09c044d2cb43 'd'
154 | |
157 | |
155 | o 1: fc055c3b4d33 'b'
158 | o 1: fc055c3b4d33 'b'
156 |/
159 |/
157 o 0: b173517d0057 'a'
160 o 0: b173517d0057 'a'
158
161
159
162
160 Test reporting of path conflicts
163 Test reporting of path conflicts
161
164
162 $ hg rm a
165 $ hg rm a
163 $ mkdir a
166 $ mkdir a
164 $ touch a/a
167 $ touch a/a
165 $ hg ci -Am "a/a"
168 $ hg ci -Am "a/a"
166 adding a/a
169 adding a/a
167 $ hg tglog
170 $ hg tglog
168 @ 4: daf7dfc139cb 'a/a'
171 @ 4: daf7dfc139cb 'a/a'
169 |
172 |
170 o 3: 844a7de3e617 'c'
173 o 3: 844a7de3e617 'c'
171 |
174 |
172 | o 2: 09c044d2cb43 'd'
175 | o 2: 09c044d2cb43 'd'
173 | |
176 | |
174 | o 1: fc055c3b4d33 'b'
177 | o 1: fc055c3b4d33 'b'
175 |/
178 |/
176 o 0: b173517d0057 'a'
179 o 0: b173517d0057 'a'
177
180
178 $ hg rebase -r . -d 2
181 $ hg rebase -r . -d 2
179 rebasing 4:daf7dfc139cb "a/a" (tip)
182 rebasing 4:daf7dfc139cb "a/a" (tip)
180 saved backup bundle to $TESTTMP/repo1/repo2/.hg/strip-backup/daf7dfc139cb-fdbfcf4f-rebase.hg
183 saved backup bundle to $TESTTMP/repo2/.hg/strip-backup/daf7dfc139cb-fdbfcf4f-rebase.hg
181
184
182 $ hg tglog
185 $ hg tglog
183 @ 4: c6ad37a4f250 'a/a'
186 @ 4: c6ad37a4f250 'a/a'
184 |
187 |
185 | o 3: 844a7de3e617 'c'
188 | o 3: 844a7de3e617 'c'
186 | |
189 | |
187 o | 2: 09c044d2cb43 'd'
190 o | 2: 09c044d2cb43 'd'
188 | |
191 | |
189 o | 1: fc055c3b4d33 'b'
192 o | 1: fc055c3b4d33 'b'
190 |/
193 |/
191 o 0: b173517d0057 'a'
194 o 0: b173517d0057 'a'
192
195
193 $ echo foo > foo
196 $ echo foo > foo
194 $ hg ci -Aqm "added foo"
197 $ hg ci -Aqm "added foo"
195 $ hg up '.^'
198 $ hg up '.^'
196 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
199 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
197 $ echo bar > bar
200 $ echo bar > bar
198 $ hg ci -Aqm "added bar"
201 $ hg ci -Aqm "added bar"
199 $ hg rm a/a
202 $ hg rm a/a
200 $ echo a > a
203 $ echo a > a
201 $ hg ci -Aqm "added a back!"
204 $ hg ci -Aqm "added a back!"
202 $ hg tglog
205 $ hg tglog
203 @ 7: 855e9797387e 'added a back!'
206 @ 7: 855e9797387e 'added a back!'
204 |
207 |
205 o 6: d14530e5e3e6 'added bar'
208 o 6: d14530e5e3e6 'added bar'
206 |
209 |
207 | o 5: 9b94b9373deb 'added foo'
210 | o 5: 9b94b9373deb 'added foo'
208 |/
211 |/
209 o 4: c6ad37a4f250 'a/a'
212 o 4: c6ad37a4f250 'a/a'
210 |
213 |
211 | o 3: 844a7de3e617 'c'
214 | o 3: 844a7de3e617 'c'
212 | |
215 | |
213 o | 2: 09c044d2cb43 'd'
216 o | 2: 09c044d2cb43 'd'
214 | |
217 | |
215 o | 1: fc055c3b4d33 'b'
218 o | 1: fc055c3b4d33 'b'
216 |/
219 |/
217 o 0: b173517d0057 'a'
220 o 0: b173517d0057 'a'
218
221
219 $ hg rebase -r . -d 5
222 $ hg rebase -r . -d 5
220 rebasing 7:855e9797387e "added a back!" (tip)
223 rebasing 7:855e9797387e "added a back!" (tip)
221 saved backup bundle to $TESTTMP/repo1/repo2/.hg/strip-backup/855e9797387e-81ee4c5d-rebase.hg
224 saved backup bundle to $TESTTMP/repo2/.hg/strip-backup/855e9797387e-81ee4c5d-rebase.hg
222
225
223 $ hg tglog
226 $ hg tglog
224 @ 7: bb3f02be2688 'added a back!'
227 @ 7: bb3f02be2688 'added a back!'
225 |
228 |
226 | o 6: d14530e5e3e6 'added bar'
229 | o 6: d14530e5e3e6 'added bar'
227 | |
230 | |
228 o | 5: 9b94b9373deb 'added foo'
231 o | 5: 9b94b9373deb 'added foo'
229 |/
232 |/
230 o 4: c6ad37a4f250 'a/a'
233 o 4: c6ad37a4f250 'a/a'
231 |
234 |
232 | o 3: 844a7de3e617 'c'
235 | o 3: 844a7de3e617 'c'
233 | |
236 | |
234 o | 2: 09c044d2cb43 'd'
237 o | 2: 09c044d2cb43 'd'
235 | |
238 | |
236 o | 1: fc055c3b4d33 'b'
239 o | 1: fc055c3b4d33 'b'
237 |/
240 |/
238 o 0: b173517d0057 'a'
241 o 0: b173517d0057 'a'
239
242
243 $ mkdir c
244 $ echo c > c/c
245 $ hg add c/c
246 $ hg ci -m 'c/c'
247 $ hg rebase -r . -d 3 -n
248 starting dry-run rebase; repository will not be changed
249 rebasing 8:755f0104af9b "c/c" (tip)
250 abort: error: 'c/c' conflicts with file 'c' in 3.
251 [255]
252 $ hg rebase -r 3 -d . -n
253 starting dry-run rebase; repository will not be changed
254 rebasing 3:844a7de3e617 "c"
255 abort: error: file 'c' cannot be written because 'c/' is a folder in 755f0104af9b (containing 1 entries: c/c)
256 [255]
240
257
241 $ cd ..
258 $ cd ..
242
259
260 Test path auditing (issue5818)
261
262 $ mkdir lib_
263 $ ln -s lib_ lib
264 $ hg init repo
265 $ cd repo
266 $ mkdir -p ".$TESTTMP/lib"
267 $ touch ".$TESTTMP/lib/a"
268 $ hg add ".$TESTTMP/lib/a"
269 $ hg ci -m 'a'
270
271 $ touch ".$TESTTMP/lib/b"
272 $ hg add ".$TESTTMP/lib/b"
273 $ hg ci -m 'b'
274
275 $ hg up -q '.^'
276 $ touch ".$TESTTMP/lib/c"
277 $ hg add ".$TESTTMP/lib/c"
278 $ hg ci -m 'c'
279 created new head
280 $ hg rebase -s 1 -d .
281 rebasing 1:* "b" (glob)
282 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-rebase.hg (glob)
283 $ cd ..
284
243 Test dry-run rebasing
285 Test dry-run rebasing
244
286
245 $ hg init repo3
287 $ hg init repo3
246 $ cd repo3
288 $ cd repo3
247 $ echo a>a
289 $ echo a>a
248 $ hg ci -Aqma
290 $ hg ci -Aqma
249 $ echo b>b
291 $ echo b>b
250 $ hg ci -Aqmb
292 $ hg ci -Aqmb
251 $ echo c>c
293 $ echo c>c
252 $ hg ci -Aqmc
294 $ hg ci -Aqmc
253 $ echo d>d
295 $ echo d>d
254 $ hg ci -Aqmd
296 $ hg ci -Aqmd
255 $ echo e>e
297 $ echo e>e
256 $ hg ci -Aqme
298 $ hg ci -Aqme
257
299
258 $ hg up 1 -q
300 $ hg up 1 -q
259 $ echo f>f
301 $ echo f>f
260 $ hg ci -Amf
302 $ hg ci -Amf
261 adding f
303 adding f
262 created new head
304 created new head
263 $ echo g>g
305 $ echo g>g
264 $ hg ci -Aqmg
306 $ hg ci -Aqmg
265 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
307 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
266 @ 6:baf10c5166d4 test
308 @ 6:baf10c5166d4 test
267 | g
309 | g
268 |
310 |
269 o 5:6343ca3eff20 test
311 o 5:6343ca3eff20 test
270 | f
312 | f
271 |
313 |
272 | o 4:e860deea161a test
314 | o 4:e860deea161a test
273 | | e
315 | | e
274 | |
316 | |
275 | o 3:055a42cdd887 test
317 | o 3:055a42cdd887 test
276 | | d
318 | | d
277 | |
319 | |
278 | o 2:177f92b77385 test
320 | o 2:177f92b77385 test
279 |/ c
321 |/ c
280 |
322 |
281 o 1:d2ae7f538514 test
323 o 1:d2ae7f538514 test
282 | b
324 | b
283 |
325 |
284 o 0:cb9a9f314b8b test
326 o 0:cb9a9f314b8b test
285 a
327 a
286
328
287 Make sure it throws error while passing --continue or --abort with --dry-run
329 Make sure it throws error while passing --continue or --abort with --dry-run
288 $ hg rebase -s 2 -d 6 -n --continue
330 $ hg rebase -s 2 -d 6 -n --continue
289 abort: cannot specify both --dry-run and --continue
331 abort: cannot specify both --dry-run and --continue
290 [255]
332 [255]
291 $ hg rebase -s 2 -d 6 -n --abort
333 $ hg rebase -s 2 -d 6 -n --abort
292 abort: cannot specify both --dry-run and --abort
334 abort: cannot specify both --dry-run and --abort
293 [255]
335 [255]
294
336
295 Check dryrun gives correct results when there is no conflict in rebasing
337 Check dryrun gives correct results when there is no conflict in rebasing
296 $ hg rebase -s 2 -d 6 -n
338 $ hg rebase -s 2 -d 6 -n
297 starting dry-run rebase; repository will not be changed
339 starting dry-run rebase; repository will not be changed
298 rebasing 2:177f92b77385 "c"
340 rebasing 2:177f92b77385 "c"
299 rebasing 3:055a42cdd887 "d"
341 rebasing 3:055a42cdd887 "d"
300 rebasing 4:e860deea161a "e"
342 rebasing 4:e860deea161a "e"
301 dry-run rebase completed successfully; run without -n/--dry-run to perform this rebase
343 dry-run rebase completed successfully; run without -n/--dry-run to perform this rebase
302
344
303 $ hg diff
345 $ hg diff
304 $ hg status
346 $ hg status
305
347
306 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
348 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
307 @ 6:baf10c5166d4 test
349 @ 6:baf10c5166d4 test
308 | g
350 | g
309 |
351 |
310 o 5:6343ca3eff20 test
352 o 5:6343ca3eff20 test
311 | f
353 | f
312 |
354 |
313 | o 4:e860deea161a test
355 | o 4:e860deea161a test
314 | | e
356 | | e
315 | |
357 | |
316 | o 3:055a42cdd887 test
358 | o 3:055a42cdd887 test
317 | | d
359 | | d
318 | |
360 | |
319 | o 2:177f92b77385 test
361 | o 2:177f92b77385 test
320 |/ c
362 |/ c
321 |
363 |
322 o 1:d2ae7f538514 test
364 o 1:d2ae7f538514 test
323 | b
365 | b
324 |
366 |
325 o 0:cb9a9f314b8b test
367 o 0:cb9a9f314b8b test
326 a
368 a
327
369
328 Check dryrun working with --collapse when there is no conflict
370 Check dryrun working with --collapse when there is no conflict
329 $ hg rebase -s 2 -d 6 -n --collapse
371 $ hg rebase -s 2 -d 6 -n --collapse
330 starting dry-run rebase; repository will not be changed
372 starting dry-run rebase; repository will not be changed
331 rebasing 2:177f92b77385 "c"
373 rebasing 2:177f92b77385 "c"
332 rebasing 3:055a42cdd887 "d"
374 rebasing 3:055a42cdd887 "d"
333 rebasing 4:e860deea161a "e"
375 rebasing 4:e860deea161a "e"
334 dry-run rebase completed successfully; run without -n/--dry-run to perform this rebase
376 dry-run rebase completed successfully; run without -n/--dry-run to perform this rebase
335
377
336 Check dryrun gives correct results when there is conflict in rebasing
378 Check dryrun gives correct results when there is conflict in rebasing
337 Make a conflict:
379 Make a conflict:
338 $ hg up 6 -q
380 $ hg up 6 -q
339 $ echo conflict>e
381 $ echo conflict>e
340 $ hg ci -Aqm "conflict with e"
382 $ hg ci -Aqm "conflict with e"
341 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
383 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
342 @ 7:d2c195b28050 test
384 @ 7:d2c195b28050 test
343 | conflict with e
385 | conflict with e
344 |
386 |
345 o 6:baf10c5166d4 test
387 o 6:baf10c5166d4 test
346 | g
388 | g
347 |
389 |
348 o 5:6343ca3eff20 test
390 o 5:6343ca3eff20 test
349 | f
391 | f
350 |
392 |
351 | o 4:e860deea161a test
393 | o 4:e860deea161a test
352 | | e
394 | | e
353 | |
395 | |
354 | o 3:055a42cdd887 test
396 | o 3:055a42cdd887 test
355 | | d
397 | | d
356 | |
398 | |
357 | o 2:177f92b77385 test
399 | o 2:177f92b77385 test
358 |/ c
400 |/ c
359 |
401 |
360 o 1:d2ae7f538514 test
402 o 1:d2ae7f538514 test
361 | b
403 | b
362 |
404 |
363 o 0:cb9a9f314b8b test
405 o 0:cb9a9f314b8b test
364 a
406 a
365
407
366 $ hg rebase -s 2 -d 7 -n
408 $ hg rebase -s 2 -d 7 -n
367 starting dry-run rebase; repository will not be changed
409 starting dry-run rebase; repository will not be changed
368 rebasing 2:177f92b77385 "c"
410 rebasing 2:177f92b77385 "c"
369 rebasing 3:055a42cdd887 "d"
411 rebasing 3:055a42cdd887 "d"
370 rebasing 4:e860deea161a "e"
412 rebasing 4:e860deea161a "e"
371 merging e
413 merging e
372 transaction abort!
414 transaction abort!
373 rollback completed
415 rollback completed
374 hit a merge conflict
416 hit a merge conflict
375 [1]
417 [1]
376 $ hg diff
418 $ hg diff
377 $ hg status
419 $ hg status
378 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
420 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
379 @ 7:d2c195b28050 test
421 @ 7:d2c195b28050 test
380 | conflict with e
422 | conflict with e
381 |
423 |
382 o 6:baf10c5166d4 test
424 o 6:baf10c5166d4 test
383 | g
425 | g
384 |
426 |
385 o 5:6343ca3eff20 test
427 o 5:6343ca3eff20 test
386 | f
428 | f
387 |
429 |
388 | o 4:e860deea161a test
430 | o 4:e860deea161a test
389 | | e
431 | | e
390 | |
432 | |
391 | o 3:055a42cdd887 test
433 | o 3:055a42cdd887 test
392 | | d
434 | | d
393 | |
435 | |
394 | o 2:177f92b77385 test
436 | o 2:177f92b77385 test
395 |/ c
437 |/ c
396 |
438 |
397 o 1:d2ae7f538514 test
439 o 1:d2ae7f538514 test
398 | b
440 | b
399 |
441 |
400 o 0:cb9a9f314b8b test
442 o 0:cb9a9f314b8b test
401 a
443 a
402
444
403 Check dryrun working with --collapse when there is conflicts
445 Check dryrun working with --collapse when there is conflicts
404 $ hg rebase -s 2 -d 7 -n --collapse
446 $ hg rebase -s 2 -d 7 -n --collapse
405 starting dry-run rebase; repository will not be changed
447 starting dry-run rebase; repository will not be changed
406 rebasing 2:177f92b77385 "c"
448 rebasing 2:177f92b77385 "c"
407 rebasing 3:055a42cdd887 "d"
449 rebasing 3:055a42cdd887 "d"
408 rebasing 4:e860deea161a "e"
450 rebasing 4:e860deea161a "e"
409 merging e
451 merging e
410 hit a merge conflict
452 hit a merge conflict
411 [1]
453 [1]
412
454
413 In-memory rebase that fails due to merge conflicts
455 In-memory rebase that fails due to merge conflicts
414
456
415 $ hg rebase -s 2 -d 7
457 $ hg rebase -s 2 -d 7
416 rebasing 2:177f92b77385 "c"
458 rebasing 2:177f92b77385 "c"
417 rebasing 3:055a42cdd887 "d"
459 rebasing 3:055a42cdd887 "d"
418 rebasing 4:e860deea161a "e"
460 rebasing 4:e860deea161a "e"
419 merging e
461 merging e
420 transaction abort!
462 transaction abort!
421 rollback completed
463 rollback completed
422 hit merge conflicts; re-running rebase without in-memory merge
464 hit merge conflicts; re-running rebase without in-memory merge
423 rebase aborted
424 rebasing 2:177f92b77385 "c"
465 rebasing 2:177f92b77385 "c"
425 rebasing 3:055a42cdd887 "d"
466 rebasing 3:055a42cdd887 "d"
426 rebasing 4:e860deea161a "e"
467 rebasing 4:e860deea161a "e"
427 merging e
468 merging e
428 warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
469 warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
429 unresolved conflicts (see hg resolve, then hg rebase --continue)
470 unresolved conflicts (see hg resolve, then hg rebase --continue)
430 [1]
471 [1]
472 $ hg rebase --abort
473 saved backup bundle to $TESTTMP/repo3/.hg/strip-backup/c1e524d4287c-f91f82e1-backup.hg
474 rebase aborted
475
476 Retrying without in-memory merge won't lose working copy changes
477 $ cd ..
478 $ hg clone repo3 repo3-dirty -q
479 $ cd repo3-dirty
480 $ echo dirty > a
481 $ hg rebase -s 2 -d 7
482 rebasing 2:177f92b77385 "c"
483 rebasing 3:055a42cdd887 "d"
484 rebasing 4:e860deea161a "e"
485 merging e
486 transaction abort!
487 rollback completed
488 hit merge conflicts; re-running rebase without in-memory merge
489 abort: uncommitted changes
490 [255]
491 $ cat a
492 dirty
493
494 Retrying without in-memory merge won't lose merge state
495 $ cd ..
496 $ hg clone repo3 repo3-merge-state -q
497 $ cd repo3-merge-state
498 $ hg merge 4
499 merging e
500 warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
501 2 files updated, 0 files merged, 0 files removed, 1 files unresolved
502 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
503 [1]
504 $ hg resolve -l
505 U e
506 $ hg rebase -s 2 -d 7
507 rebasing 2:177f92b77385 "c"
508 abort: outstanding merge conflicts
509 [255]
510 $ hg resolve -l
511 U e
431
512
432 ==========================
513 ==========================
433 Test for --confirm option|
514 Test for --confirm option|
434 ==========================
515 ==========================
435 $ cd ..
516 $ cd ..
436 $ hg clone repo3 repo4 -q
517 $ hg clone repo3 repo4 -q
437 $ cd repo4
518 $ cd repo4
438 $ hg strip 7 -q
519 $ hg strip 7 -q
439 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
520 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
440 @ 6:baf10c5166d4 test
521 @ 6:baf10c5166d4 test
441 | g
522 | g
442 |
523 |
443 o 5:6343ca3eff20 test
524 o 5:6343ca3eff20 test
444 | f
525 | f
445 |
526 |
446 | o 4:e860deea161a test
527 | o 4:e860deea161a test
447 | | e
528 | | e
448 | |
529 | |
449 | o 3:055a42cdd887 test
530 | o 3:055a42cdd887 test
450 | | d
531 | | d
451 | |
532 | |
452 | o 2:177f92b77385 test
533 | o 2:177f92b77385 test
453 |/ c
534 |/ c
454 |
535 |
455 o 1:d2ae7f538514 test
536 o 1:d2ae7f538514 test
456 | b
537 | b
457 |
538 |
458 o 0:cb9a9f314b8b test
539 o 0:cb9a9f314b8b test
459 a
540 a
460
541
461 Check it gives error when both --dryrun and --confirm is used:
542 Check it gives error when both --dryrun and --confirm is used:
462 $ hg rebase -s 2 -d . --confirm --dry-run
543 $ hg rebase -s 2 -d . --confirm --dry-run
463 abort: cannot specify both --confirm and --dry-run
544 abort: cannot specify both --confirm and --dry-run
464 [255]
545 [255]
465 $ hg rebase -s 2 -d . --confirm --abort
546 $ hg rebase -s 2 -d . --confirm --abort
466 abort: cannot specify both --confirm and --abort
547 abort: cannot specify both --confirm and --abort
467 [255]
548 [255]
468 $ hg rebase -s 2 -d . --confirm --continue
549 $ hg rebase -s 2 -d . --confirm --continue
469 abort: cannot specify both --confirm and --continue
550 abort: cannot specify both --confirm and --continue
470 [255]
551 [255]
471
552
472 Test --confirm option when there are no conflicts:
553 Test --confirm option when there are no conflicts:
473 $ hg rebase -s 2 -d . --keep --config ui.interactive=True --confirm << EOF
554 $ hg rebase -s 2 -d . --keep --config ui.interactive=True --confirm << EOF
474 > n
555 > n
475 > EOF
556 > EOF
476 starting in-memory rebase
557 starting in-memory rebase
477 rebasing 2:177f92b77385 "c"
558 rebasing 2:177f92b77385 "c"
478 rebasing 3:055a42cdd887 "d"
559 rebasing 3:055a42cdd887 "d"
479 rebasing 4:e860deea161a "e"
560 rebasing 4:e860deea161a "e"
480 rebase completed successfully
561 rebase completed successfully
481 apply changes (yn)? n
562 apply changes (yn)? n
482 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
563 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
483 @ 6:baf10c5166d4 test
564 @ 6:baf10c5166d4 test
484 | g
565 | g
485 |
566 |
486 o 5:6343ca3eff20 test
567 o 5:6343ca3eff20 test
487 | f
568 | f
488 |
569 |
489 | o 4:e860deea161a test
570 | o 4:e860deea161a test
490 | | e
571 | | e
491 | |
572 | |
492 | o 3:055a42cdd887 test
573 | o 3:055a42cdd887 test
493 | | d
574 | | d
494 | |
575 | |
495 | o 2:177f92b77385 test
576 | o 2:177f92b77385 test
496 |/ c
577 |/ c
497 |
578 |
498 o 1:d2ae7f538514 test
579 o 1:d2ae7f538514 test
499 | b
580 | b
500 |
581 |
501 o 0:cb9a9f314b8b test
582 o 0:cb9a9f314b8b test
502 a
583 a
503
584
504 $ hg rebase -s 2 -d . --keep --config ui.interactive=True --confirm << EOF
585 $ hg rebase -s 2 -d . --keep --config ui.interactive=True --confirm << EOF
505 > y
586 > y
506 > EOF
587 > EOF
507 starting in-memory rebase
588 starting in-memory rebase
508 rebasing 2:177f92b77385 "c"
589 rebasing 2:177f92b77385 "c"
509 rebasing 3:055a42cdd887 "d"
590 rebasing 3:055a42cdd887 "d"
510 rebasing 4:e860deea161a "e"
591 rebasing 4:e860deea161a "e"
511 rebase completed successfully
592 rebase completed successfully
512 apply changes (yn)? y
593 apply changes (yn)? y
513 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
594 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
514 o 9:9fd28f55f6dc test
595 o 9:9fd28f55f6dc test
515 | e
596 | e
516 |
597 |
517 o 8:12cbf031f469 test
598 o 8:12cbf031f469 test
518 | d
599 | d
519 |
600 |
520 o 7:c83b1da5b1ae test
601 o 7:c83b1da5b1ae test
521 | c
602 | c
522 |
603 |
523 @ 6:baf10c5166d4 test
604 @ 6:baf10c5166d4 test
524 | g
605 | g
525 |
606 |
526 o 5:6343ca3eff20 test
607 o 5:6343ca3eff20 test
527 | f
608 | f
528 |
609 |
529 | o 4:e860deea161a test
610 | o 4:e860deea161a test
530 | | e
611 | | e
531 | |
612 | |
532 | o 3:055a42cdd887 test
613 | o 3:055a42cdd887 test
533 | | d
614 | | d
534 | |
615 | |
535 | o 2:177f92b77385 test
616 | o 2:177f92b77385 test
536 |/ c
617 |/ c
537 |
618 |
538 o 1:d2ae7f538514 test
619 o 1:d2ae7f538514 test
539 | b
620 | b
540 |
621 |
541 o 0:cb9a9f314b8b test
622 o 0:cb9a9f314b8b test
542 a
623 a
543
624
544 Test --confirm option when there is a conflict
625 Test --confirm option when there is a conflict
545 $ hg up tip -q
626 $ hg up tip -q
546 $ echo ee>e
627 $ echo ee>e
547 $ hg ci --amend -m "conflict with e" -q
628 $ hg ci --amend -m "conflict with e" -q
548 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
629 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
549 @ 9:906d72f66a59 test
630 @ 9:906d72f66a59 test
550 | conflict with e
631 | conflict with e
551 |
632 |
552 o 8:12cbf031f469 test
633 o 8:12cbf031f469 test
553 | d
634 | d
554 |
635 |
555 o 7:c83b1da5b1ae test
636 o 7:c83b1da5b1ae test
556 | c
637 | c
557 |
638 |
558 o 6:baf10c5166d4 test
639 o 6:baf10c5166d4 test
559 | g
640 | g
560 |
641 |
561 o 5:6343ca3eff20 test
642 o 5:6343ca3eff20 test
562 | f
643 | f
563 |
644 |
564 | o 4:e860deea161a test
645 | o 4:e860deea161a test
565 | | e
646 | | e
566 | |
647 | |
567 | o 3:055a42cdd887 test
648 | o 3:055a42cdd887 test
568 | | d
649 | | d
569 | |
650 | |
570 | o 2:177f92b77385 test
651 | o 2:177f92b77385 test
571 |/ c
652 |/ c
572 |
653 |
573 o 1:d2ae7f538514 test
654 o 1:d2ae7f538514 test
574 | b
655 | b
575 |
656 |
576 o 0:cb9a9f314b8b test
657 o 0:cb9a9f314b8b test
577 a
658 a
578
659
579 $ hg rebase -s 4 -d . --keep --confirm
660 $ hg rebase -s 4 -d . --keep --confirm
580 starting in-memory rebase
661 starting in-memory rebase
581 rebasing 4:e860deea161a "e"
662 rebasing 4:e860deea161a "e"
582 merging e
663 merging e
583 hit a merge conflict
664 hit a merge conflict
584 [1]
665 [1]
585 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
666 $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
586 @ 9:906d72f66a59 test
667 @ 9:906d72f66a59 test
587 | conflict with e
668 | conflict with e
588 |
669 |
589 o 8:12cbf031f469 test
670 o 8:12cbf031f469 test
590 | d
671 | d
591 |
672 |
592 o 7:c83b1da5b1ae test
673 o 7:c83b1da5b1ae test
593 | c
674 | c
594 |
675 |
595 o 6:baf10c5166d4 test
676 o 6:baf10c5166d4 test
596 | g
677 | g
597 |
678 |
598 o 5:6343ca3eff20 test
679 o 5:6343ca3eff20 test
599 | f
680 | f
600 |
681 |
601 | o 4:e860deea161a test
682 | o 4:e860deea161a test
602 | | e
683 | | e
603 | |
684 | |
604 | o 3:055a42cdd887 test
685 | o 3:055a42cdd887 test
605 | | d
686 | | d
606 | |
687 | |
607 | o 2:177f92b77385 test
688 | o 2:177f92b77385 test
608 |/ c
689 |/ c
609 |
690 |
610 o 1:d2ae7f538514 test
691 o 1:d2ae7f538514 test
611 | b
692 | b
612 |
693 |
613 o 0:cb9a9f314b8b test
694 o 0:cb9a9f314b8b test
614 a
695 a
615
696
616 #if execbit
697 #if execbit
617
698
618 Test a metadata-only in-memory merge
699 Test a metadata-only in-memory merge
619 $ cd $TESTTMP
700 $ cd $TESTTMP
620 $ hg init no_exception
701 $ hg init no_exception
621 $ cd no_exception
702 $ cd no_exception
622 # Produce the following graph:
703 # Produce the following graph:
623 # o 'add +x to foo.txt'
704 # o 'add +x to foo.txt'
624 # | o r1 (adds bar.txt, just for something to rebase to)
705 # | o r1 (adds bar.txt, just for something to rebase to)
625 # |/
706 # |/
626 # o r0 (adds foo.txt, no +x)
707 # o r0 (adds foo.txt, no +x)
627 $ echo hi > foo.txt
708 $ echo hi > foo.txt
628 $ hg ci -qAm r0
709 $ hg ci -qAm r0
629 $ echo hi > bar.txt
710 $ echo hi > bar.txt
630 $ hg ci -qAm r1
711 $ hg ci -qAm r1
631 $ hg co -qr ".^"
712 $ hg co -qr ".^"
632 $ chmod +x foo.txt
713 $ chmod +x foo.txt
633 $ hg ci -qAm 'add +x to foo.txt'
714 $ hg ci -qAm 'add +x to foo.txt'
634 issue5960: this was raising an AttributeError exception
715 issue5960: this was raising an AttributeError exception
635 $ hg rebase -r . -d 1
716 $ hg rebase -r . -d 1
636 rebasing 2:539b93e77479 "add +x to foo.txt" (tip)
717 rebasing 2:539b93e77479 "add +x to foo.txt" (tip)
637 saved backup bundle to $TESTTMP/no_exception/.hg/strip-backup/*.hg (glob)
718 saved backup bundle to $TESTTMP/no_exception/.hg/strip-backup/*.hg (glob)
638 $ hg diff -c tip
719 $ hg diff -c tip
639 diff --git a/foo.txt b/foo.txt
720 diff --git a/foo.txt b/foo.txt
640 old mode 100644
721 old mode 100644
641 new mode 100755
722 new mode 100755
642
723
643 #endif
724 #endif
General Comments 0
You need to be logged in to leave comments. Login now