##// END OF EJS Templates
merge with stable
Augie Fackler -
r33736:02a745c2 merge default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,151 +1,152 b''
1 1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
2 2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
3 3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
4 4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
5 5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
6 6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
7 7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
8 8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
9 9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
10 10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
11 11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
12 12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
13 13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
14 14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
15 15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
16 16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
17 17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
18 18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
19 19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
20 20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
21 21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
22 22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
23 23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
24 24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
25 25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
26 26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
27 27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
28 28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
29 29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
30 30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
31 31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
32 32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
33 33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
34 34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
35 35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
36 36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
37 37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
38 38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
39 39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
40 40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
41 41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
42 42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
43 43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
44 44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
45 45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
46 46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
47 47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
48 48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
49 49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
50 50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
51 51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
52 52 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 0 iD8DBQBPT/fvywK+sNU5EO8RAnfYAKCn7d0vwqIb100YfWm1F7nFD5B+FACeM02YHpQLSNsztrBCObtqcnfod7Q=
53 53 b9bd95e61b49c221c4cca24e6da7c946fc02f992 0 iD8DBQBPeLsIywK+sNU5EO8RAvpNAKCtKe2gitz8dYn52IRF0hFOPCR7AQCfRJL/RWCFweu2T1vH/mUOCf8SXXc=
54 54 d9e2f09d5488c395ae9ddbb320ceacd24757e055 0 iD8DBQBPju/dywK+sNU5EO8RArBYAJ9xtifdbk+hCOJO8OZa4JfHX8OYZQCeKPMBaBWiT8N/WHoOm1XU0q+iono=
55 55 00182b3d087909e3c3ae44761efecdde8f319ef3 0 iD8DBQBPoFhIywK+sNU5EO8RAhzhAKCBj1n2jxPTkZNJJ5pSp3soa+XHIgCgsZZpAQxOpXwCp0eCdNGe0+pmxmg=
56 56 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 0 iD8DBQBPovNWywK+sNU5EO8RAhgiAJ980T91FdPTRMmVONDhpkMsZwVIMACgg3bKvoWSeuCW28llUhAJtUjrMv0=
57 57 85a358df5bbbe404ca25730c9c459b34263441dc 0 iD8DBQBPyZsWywK+sNU5EO8RAnpLAJ48qrGDJRT+pteS0mSQ11haqHstPwCdG4ccGbk+0JHb7aNy8/NRGAOqn9w=
58 58 b013baa3898e117959984fc64c29d8c784d2f28b 0 iD8DBQBP8QOPywK+sNU5EO8RAqimAKCFRSx0lvG6y8vne2IhNG062Hn0dACeMLI5/zhpWpHBIVeAAquYfx2XFeA=
59 59 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 0 iD8DBQBQGiL8ywK+sNU5EO8RAq5oAJ4rMMCPx6O+OuzNXVOexogedWz/QgCeIiIxLd76I4pXO48tdXhr0hQcBuM=
60 60 072209ae4ddb654eb2d5fd35bff358c738414432 0 iD8DBQBQQkq0ywK+sNU5EO8RArDTAJ9nk5CySnNAjAXYvqvx4uWCw9ThZwCgqmFRehH/l+oTwj3f8nw8u8qTCdc=
61 61 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 0 iD8DBQBQamltywK+sNU5EO8RAlsqAJ4qF/m6aFu4mJCOKTiAP5RvZFK02ACfawYShUZO6OXEFfveU0aAxDR0M1k=
62 62 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 0 iD8DBQBQgPV5ywK+sNU5EO8RArylAJ0abcx5NlDjyv3ZDWpAfRIHyRsJtQCgn4TMuEayqgxzrvadQZHdTEU2g38=
63 63 195ad823b5d58c68903a6153a25e3fb4ed25239d 0 iD8DBQBQkuT9ywK+sNU5EO8RAhB4AKCeerItoK2Jipm2cVf4euGofAa/WACeJj3TVd4pFILpb+ogj7ebweFLJi0=
64 64 0c10cf8191469e7c3c8844922e17e71a176cb7cb 0 iD8DBQBQvQWoywK+sNU5EO8RAnq3AJoCn98u4geFx5YaQaeh99gFhCd7bQCgjoBwBSUyOvGd0yBy60E3Vv3VZhM=
65 65 a4765077b65e6ae29ba42bab7834717b5072d5ba 0 iD8DBQBQ486sywK+sNU5EO8RAhmJAJ90aLfLKZhmcZN7kqphigQJxiFOQACeJ5IUZxjGKH4xzi3MrgIcx9n+dB0=
66 66 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 0 iD8DBQBQ+yuYywK+sNU5EO8RAm9JAJoD/UciWvpGeKBcpGtZJBFJVcL/HACghDXSgQ+xQDjB+6uGrdgAQsRR1Lg=
67 67 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 0 iD8DBQBRDDROywK+sNU5EO8RAh75AJ9uJCGoCWnP0Lv/+XuYs4hvUl+sAgCcD36QgAnuw8IQXrvv684BAXAnHcA=
68 68 7511d4df752e61fe7ae4f3682e0a0008573b0402 0 iD8DBQBRFYaoywK+sNU5EO8RAuErAJoDyhXn+lptU3+AevVdwAIeNFyR2gCdHzPHyWd+JDeWCUR+pSOBi8O2ppM=
69 69 5b7175377babacce80a6c1e12366d8032a6d4340 0 iD8DBQBRMCYgywK+sNU5EO8RAq1/AKCWKlt9ysibyQgYwoxxIOZv5J8rpwCcDSHQaaf1fFZUTnQsOePwcM2Y/Sg=
70 70 50c922c1b5145dab8baefefb0437d363b6a6c21c 0 iD8DBQBRWnUnywK+sNU5EO8RAuQRAJwM42cJqJPeqJ0jVNdMqKMDqr4dSACeP0cRVGz1gitMuV0x8f3mrZrqc7I=
71 71 8a7bd2dccd44ed571afe7424cd7f95594f27c092 0 iD8DBQBRXfBvywK+sNU5EO8RAn+LAKCsMmflbuXjYRxlzFwId5ptm8TZcwCdGkyLbZcASBOkzQUm/WW1qfknJHU=
72 72 292cd385856d98bacb2c3086f8897bc660c2beea 0 iD8DBQBRcM0BywK+sNU5EO8RAjp4AKCJBykQbvXhKuvLSMxKx3a2TBiXcACfbr/kLg5GlZTF/XDPmY+PyHgI/GM=
73 73 23f785b38af38d2fca6b8f3db56b8007a84cd73a 0 iD8DBQBRgZwNywK+sNU5EO8RAmO4AJ4u2ILGuimRP6MJgE2t65LZ5dAdkACgiENEstIdrlFC80p+sWKD81kKIYI=
74 74 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 0 iD8DBQBRkswvywK+sNU5EO8RAiYYAJsHTHyHbJeAgmGvBTmDrfcKu4doUgCeLm7eGBjx7yAPUvEtxef8rAkQmXI=
75 75 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 0 iD8DBQBRqnFLywK+sNU5EO8RAsWNAJ9RR6t+y1DLFc2HeH0eN9VfZAKF9gCeJ8ezvhtKq/LMs0/nvcgKQc/d5jk=
76 76 009794acc6e37a650f0fae37872e733382ac1c0c 0 iD8DBQBR0guxywK+sNU5EO8RArNkAKCq9pMihVzP8Os5kCmgbWpe5C37wgCgqzuPZTHvAsXF5wTyaSTMVa9Ccq4=
77 77 f0d7721d7322dcfb5af33599c2543f27335334bb 0 iD8DBQBR8taaywK+sNU5EO8RAqeEAJ4idDhhDuEsgsUjeQgWNj498matHACfT67gSF5w0ylsrBx1Hb52HkGXDm0=
78 78 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 0 iD8DBQBR+ymFywK+sNU5EO8RAuSdAJkBMcd9DAZ3rWE9WGKPm2YZ8LBoXACfXn/wbEsVy7ZgJoUwiWmHSnQaWCI=
79 79 335a558f81dc73afeab4d7be63617392b130117f 0 iQIVAwUAUiZrIyBXgaxoKi1yAQK2iw//cquNqqSkc8Re5/TZT9I6NH+lh6DbOKjJP0Xl1Wqq0K+KSIUgZG4G32ovaEb2l5X0uY+3unRPiZ0ebl0YSw4Fb2ZiPIADXLBTOYRrY2Wwd3tpJeGI6wEgZt3SfcITV/g7NJrCjT3FlYoSOIayrExM80InSdcEM0Q3Rx6HKzY2acyxzgZeAtAW5ohFvHilSvY6p5Gcm4+QptMxvw45GPdreUmjeXZxNXNXZ8P+MjMz/QJbai/N7PjmK8lqnhkBsT48Ng/KhhmOkGntNJ2/ImBWLFGcWngSvJ7sfWwnyhndvGhe0Hq1NcCf7I8TjNDxU5TR+m+uW7xjXdLoDbUjBdX4sKXnh8ZjbYiODKBOrrDq25cf8nA/tnpKyE/qsVy60kOk6loY4XKiYmn1V49Ta0emmDx0hqo3HgxHHsHX0NDnGdWGol7cPRET0RzVobKq1A0jnrhPooWidvLh9bPzLonrWDo+ib+DuySoRkuYUK4pgZJ2mbg6daFOBEZygkSyRB8bo1UQUP7EgQDrWe4khb/5GHEfDkrQz3qu/sXvc0Ir1mOUWBFPHC2DjjCn/oMJuUkG1SwM8l2Bfv7h67ssES6YQ2+RjOix4yid7EXS/Ogl45PzCIPSI5+BbNs10JhE0w5uErBHlF53EDTe/TSLc+GU6DB6PP6dH912Njdr3jpNSUQ=
80 80 e7fa36d2ad3a7944a52dca126458d6f482db3524 0 iQIVAwUAUktg4yBXgaxoKi1yAQLO0g//du/2ypYYUfmM/yZ4zztNKIvgMSGTDVbCCGB2y2/wk2EcolpjpGTkcgnJT413ksYtw78ZU+mvv0RjgrFCm8DQ8kroJaQZ2qHmtSUb42hPBPvtg6kL9YaA4yvp87uUBpFRavGS5uX4hhEIyvZKzhXUBvqtL3TfwR7ld21bj8j00wudqELyyU9IrojIY9jkJ3XL/4shBGgP7u6OK5g8yJ6zTnWgysUetxHBPrYjG25lziiiZQFvZqK1B3PUqAOaFPltQs0PB8ipOCAHQgJsjaREj8VmC3+rskmSSy66NHm6gAB9+E8oAgOcU7FzWbdYgnz4kR3M7TQvHX9U61NinPXC6Q9d1VPhO3E6sIGvqJ4YeQOn65V9ezYuIpFSlgQzCHMmLVnOV96Uv1R/Z39I4w7D3S5qoZcQT/siQwGbsZoPMGFYmqOK1da5TZWrrJWkYzc9xvzT9m3q3Wds5pmCmo4b/dIqDifWwYEcNAZ0/YLHwCN5SEZWuunkEwtU5o7TZAv3bvDDA6WxUrrHI/y9/qvvhXxsJnY8IueNhshdmWZfXKz+lJi2Dvk7DUlEQ1zZWSsozi1E+3biMPJO47jsxjoT/jmE5+GHLCgcnXXDVBeaVal99IOaTRFukiz2EMsry1s8fnwEE5XKDKRlU/dOPfsje0gc7bgE0QD/u3E4NJ99g9A=
81 81 1596f2d8f2421314b1ddead8f7d0c91009358994 0 iQIVAwUAUmRq+yBXgaxoKi1yAQLolhAAi+l4ZFdQTu9yJDv22YmkmHH4fI3d5VBYgvfJPufpyaj7pX626QNW18UNcGSw2BBpYHIJzWPkk/4XznLVKr4Ciw2N3/yqloEFV0V2SSrTbMWiR9qXI4KJH+Df3KZnKs3FgiYpXkErL4GWkc1jLVR50xQ5RnkMljjtCd0NTeV2PHZ6gP2qbu6CS+5sm3AFhTDGnx8GicbMw76ZNw5M2G+T48yH9jn5KQi2SBThfi4H9Bpr8FDuR7PzQLgw9SbtYxtdQxNkK55k0nG4oLDxduNakU6SH9t8n8tdCfMt58kTzlQVrPFiTFjKu2n2JioDTz2HEivbZ5H757cu7SvpX8gW3paeBc57e+GOLMisMZABXLICq59c3QnrMwFY4FG+5cpiHVXoaZz/0bYCJx+IhU4QLWqZuzb18KSyHUCqQRzXlzS6QV5O7dY5YNQXFC44j/dS5zdgWMYo2mc6mVP2OaPUn7F6aQh5MCDYorPIOkcNjOg7ytajo7DXbzWt5Al8qt6386BJksyR3GAonc09+l8IFeNxk8HZNP4ETQ8aWj0dC9jgBDPK43T2Bju/i84s+U/bRe4tGSQalZUEv06mkIH/VRJp5w2izYTsdIjA4FT9d36OhaxlfoO1X6tHR9AyA3bF/g/ozvBwuo3kTRUUqo+Ggvx/DmcPQdDiZZQIqDBXch0=
82 82 d825e4025e39d1c39db943cdc89818abd0a87c27 0 iQIVAwUAUnQlXiBXgaxoKi1yAQJd3BAAi7LjMSpXmdR7B8K98C3/By4YHsCOAocMl3JXiLd7SXwKmlta1zxtkgWwWJnNYE3lVJvGCl+l4YsGKmFu755MGXlyORh1x4ohckoC1a8cqnbNAgD6CSvjSaZfnINLGZQP1wIP4yWj0FftKVANQBjj/xkkxO530mjBYnUvyA4PeDd5A1AOUUu6qHzX6S5LcprEt7iktLI+Ae1dYTkiCpckDtyYUKIk3RK/4AGWwGCPddVWeV5bDxLs8GHyMbqdBwx+2EAMtyZfXT+z6MDRsL/gEBVOXHb/UR0qpYED+qFnbtTlxqQkRE/wBhwDoRzUgcSuukQ9iPn79WNDSdT5b6Jd393uEO5BNF/DB6rrOiWmlpoooWgTY9kcwGB02v0hhLrH5r1wkv8baaPl+qjCjBxf4CNKm/83KN5/umGbZlORqPSN5JVxK6vDNwFFmHLaZbMT1g27GsGOWm84VH+dgolgk4nmRNSO37eTNM5Y1C3Zf2amiqDSRcAxCgseg0Jh10G7i52SSTcZPI2MqrwT9eIyg8PTIxT1D5bPcCzkg5nTTL6S7bet7OSwynRnHslhvVUBly8aIj4eY/5cQqAucUUa5sq6xLD8N27Tl+sQi+kE6KtWu2c0ZhpouflYp55XNMHgU4KeFcVcDtHfJRF6THT6tFcHFNauCHbhfN2F33ANMP4=
83 83 209e04a06467e2969c0cc6501335be0406d46ef0 0 iQIVAwUAUpv1oCBXgaxoKi1yAQKOFBAAma2wlsr3w/5NvDwq2rmOrgtNDq1DnNqcXloaOdwegX1z3/N++5uVjLjI0VyguexnwK+7E8rypMZ+4glaiZvIiGPnGMYbG9iOoz5XBhtUHzI5ECYfm5QU81by9VmCIvArDFe5Hlnz4XaXpEGnAwPywD+yzV3/+tyoV7MgsVinCMtbX9OF84/ubWKNzq2810FpQRfYoCOrF8sUed/1TcQrSm1eMB/PnuxjFCFySiR6J7Urd9bJoJIDtdZOQeeHaL5Z8Pcsyzjoe/9oTwJ3L3tl/NMZtRxiQUWtfRA0zvEnQ4QEkZSDMd/JnGiWHPVeP4P92+YN15za9yhneEAtustrTNAmVF2Uh92RIlmkG475HFhvwPJ4DfCx0vU1OOKX/U4c1rifW7H7HaipoaMlsDU2VFsAHcc3YF8ulVt27bH2yUaLGJz7eqpt+3DzZTKp4d/brZA2EkbVgsoYP+XYLbzxfwWlaMwiN3iCnlTFbNogH8MxhfHFWBj6ouikqOz8HlNl6BmSQiUCBnz5fquVpXmW2Md+TDekk+uOW9mvk1QMU62br+Z6PEZupkdTrqKaz+8ZMWvTRct8SiOcu7R11LpfERyrwYGGPei0P2YrEGIWGgXvEobXoPTSl7J+mpOA/rp2Q1zA3ihjgzwtGZZF+ThQXZGIMGaA2YPgzuYRqY8l5oc=
84 84 ca387377df7a3a67dbb90b6336b781cdadc3ef41 0 iQIVAwUAUsThISBXgaxoKi1yAQJpvRAAkRkCWLjHBZnWxX9Oe6t2HQgkSsmn9wMHvXXGFkcAmrqJ86yfyrxLq2Ns0X7Qwky37kOwKsywM53FQlsx9j//Y+ncnGZoObFTz9YTuSbOHGVsTbAruXWxBrGOf1nFTlg8afcbH0jPfQXwxf3ptfBhgsFCzORcqc8HNopAW+2sgXGhHnbVtq6LF90PWkbKjCCQLiX3da1uETGAElrl4jA5Y2i64S1Q/2X+UFrNslkIIRCGmAJ6BnE6KLJaUftpfbN7Br7a3z9xxWqxRYDOinxDgfAPAucOJPLgMVQ0bJIallaRu7KTmIWKIuSBgg1/hgfoX8I1w49WrTGp0gGY140kl8RWwczAz/SB03Xtbl2+h6PV7rUV2K/5g61DkwdVbWqXM9wmJZmvjEKK0qQbBT0By4QSEDNcKKqtaFFwhFzx4dkXph0igHOtXhSNzMd8PsFx/NRn9NLFIpirxfqVDwakpDNBZw4Q9hUAlTPxSFL3vD9/Zs7lV4/dAvvl+tixJEi2k/iv248b/AI1PrPIQEqDvjrozzzYvrS4HtbkUn+IiHiepQaYnpqKoXvBu6btK/nv0GTxB5OwVJzMA1RPDcxIFfZA2AazHjrXiPAl5uWYEddEvRjaCiF8xkQkfiXzLOoqhKQHdwPGcfMFEs9lNR8BrB2ZOajBJc8RPsFDswhT5h4=
85 85 8862469e16f9236208581b20de5f96bd13cc039d 0 iQIVAwUAUt7cLSBXgaxoKi1yAQLOkRAAidp501zafqe+JnDwlf7ORcJc+FgCE6mK1gxDfReCbkMsY7AzspogU7orqfSmr6XXdrDwmk3Y5x3mf44OGzNQjvuNWhqnTgJ7sOcU/lICGQUc8WiGNzHEMFGX9S+K4dpUaBf8Tcl8pU3iArhlthDghW6SZeDFB/FDBaUx9dkdFp6eXrmu4OuGRZEvwUvPtCGxIL7nKNnufI1du/MsWQxvC2ORHbMNtRq6tjA0fLZi4SvbySuYifQRS32BfHkFS5Qu4/40+1k7kd0YFyyQUvIsVa17lrix3zDqMavG8x7oOlqM/axDMBT6DhpdBMAdc5qqf8myz8lwjlFjyDUL6u3Z4/yE0nUrmEudXiXwG0xbVoEN8SCNrDmmvFMt6qdCpdDMkHr2TuSh0Hh4FT5CDkzPI8ZRssv/01j/QvIO3c/xlbpGRPWpsPXEVOz3pmjYN4qyQesnBKWCENsQLy/8s2rey8iQgx2GtsrNw8+wGX6XE4v3QtwUrRe12hWoNrEHWl0xnLv2mvAFqdMAMpFY6EpOKLlE4hoCs2CmTJ2dv6e2tiGTXGU6/frI5iuNRK61OXnH5OjEc8DCGH/GC7NXyDOXOB+7BdBvvf50l2C/vxR2TKgTncLtHeLCrR0GHNHsxqRo1UDwOWur0r7fdfCRvb2tIr5LORCqKYVKd60/BAXjHWc=
86 86 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 0 iQIVAwUAUu1lIyBXgaxoKi1yAQIzCBAAizSWvTkWt8+tReM9jUetoSToF+XahLhn381AYdErFCBErX4bNL+vyEj+Jt2DHsAfabkvNBe3k7rtFlXHwpq6POa/ciFGPDhFlplNv6yN1jOKBlMsgdjpn7plZKcLHODOigU7IMlgg70Um8qVrRgQ8FhvbVgR2I5+CD6bucFzqo78wNl9mCIHIQCpGKIUoz56GbwT+rUpEB182Z3u6rf4NWj35RZLGAicVV2A2eAAFh4ZvuC+Z0tXMkp6Gq9cINawZgqfLbzVYJeXBtJC39lHPyp5P3LaEVRhntc9YTwbfkVGjyJZR60iYrieeKpOYRnzgHauPVdgVhkTkBxshmEPY7svKYSQqlj8hLuFa+a3ajbIPrpQAAi1MgtamA991atNqGiSTjdZa9kLQvfdn0k80+gkCxpuO56PhvtdjKsYVRgQMTYmQVQdh3x4WbQOSqTADXXIZUaWxx4RmNSlxY7KD+3lPP09teOD+A3B2cP60bC5NsCfULtQFXQzdC7NvfIyYfYBTZa+Pv6HFkVe10cbnqTt83hBy0D77vdaegPRe56qDNU+GrIG2/rosnlKGFjFoK/pTYkR9uzfkrhEjLwyfkoXlBqY+376W0PC5fP10pJeQBS9DuXpCPlgtyW0Jy1ayCT1YR4QJC4n75vZwTFBFRBhSi0HqFquOgy83+O0Q/k=
87 87 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 0 iQIVAwUAUxJPlyBXgaxoKi1yAQLIRA//Qh9qzoYthPAWAUNbzybWXC/oMBI2X89NQC7l1ivKhv7cn9L79D8SWXM18q7LTwLdlwOkV/a0NTE3tkQTLvxJpfnRLCBbMOcGiIn/PxsAae8IhMAUbR7qz+XOynHOs60ZhK9X8seQHJRf1YtOI9gYTL/WYk8Cnpmc6xZQ90TNhoPPkpdfe8Y236V11SbYtN14fmrPaWQ3GXwyrvQaqM1F7BxSnC/sbm9+/wprsTa8gRQo7YQL/T5jJQgFiatG3yayrDdJtoRq3TZKtsxw8gtQdfVCrrBibbysjM8++dnwA92apHNUY8LzyptPy7rSDXRrIpPUWGGTQTD+6HQwkcLFtIuUpw4I75SV3z2r6LyOLKzDJUIunKOOYFS/rEIQGxZHxZOBAvbI+73mHAn3pJqm+UAA7R1n7tk3JyQncg50qJlm9zIUPGpNFcdEqak5iXzGYx292VlcE+fbJYeIPWggpilaVUgdmXtMCG0O0uX6C8MDmzVDCjd6FzDJ4GTZwgmWJaamvls85CkZgyN/UqlisfFXub0A1h7qAzBSVpP1+Ti+UbBjlrGX8BMRYHRGYIeIq16elcWwSpLgshjDwNn2r2EdwX8xKU5mucgTzSLprbOYGdQaqnvf6e8IX5WMBgwVW9YdY9yJKSLF7kE1AlM9nfVcXwOK4mHoMvnNgiX3zsw=
88 88 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 0 iQIVAwUAUztENyBXgaxoKi1yAQIpkhAAmJj5JRTSn0Dn/OTAHggalw8KYFbAck1X35Wg9O7ku7sd+cOnNnkYfqAdz2m5ikqWHP7aWMiNkNy7Ree2110NqkQVYG/2AJStXBdIOmewqnjDlNt+rbJQN/JsjeKSCy+ToNvhqX5cTM9DF2pwRjMsTXVff307S6/3pga244i+RFAeG3WCUrzfDu641MGFLjG4atCj8ZFLg9DcW5bsRiOs5ZK5Il+UAb2yyoS2KNQ70VLhYULhGtqq9tuO4nLRGN3DX/eDcYfncPCav1GckW4OZKakcbLtAdW0goSgGWloxcM+j2E6Z1JZ9tOTTkFN77EvX0ZWZLmYM7sUN1meFnKbVxrtGKlMelwKwlT252c65PAKa9zsTaRUKvN7XclyxZAYVCsiCQ/V08NXhNgXJXcoKUAeGNf6wruOyvRU9teia8fAiuHJoY58WC8jC4nYG3iZTnl+zNj2A5xuEUpYHhjUfe3rNJeK7CwUpJKlbxopu5mnW9AE9ITfI490eaapRLTojOBDJNqCORAtbggMD46fLeCOzzB8Gl70U2p5P34F92Sn6mgERFKh/10XwJcj4ZIeexbQK8lqQ2cIanDN9dAmbvavPTY8grbANuq+vXDGxjIjfxapqzsSPqUJ5KnfTQyLq5NWwquR9t38XvHZfktkd140BFKwIUAIlKKaFfYXXtM=
89 89 564f55b251224f16508dd1311452db7780dafe2b 0 iQIVAwUAU1BmFSBXgaxoKi1yAQJ2Aw//bjK++xJuZCIdktg/i5FxBwoxdbipfTkKsN/YjUwrEmroYM8IkqIsO+U54OGCYWr3NPJ3VS8wUQeJ+NF3ffcjmjC297R9J+X0c5G90DdQUYX44jG/tP8Tqpev4Q7DLCXT26aRwEMdJQpq0eGaqv55E5Cxnyt3RrLCqe7RjPresZFg7iYrro5nq8TGYwBhessHXnCix9QI0HtXiLpms+0UGz8Sbi9nEYW+M0OZCyO1TvykCpFzEsLNwqqtFvhOMD/AMiWcTKNUpjmOn3V83xjWl+jnDUt7BxJ7n1efUnlwl4IeWlSUb73q/durtaymb97cSdKFmXHv4pdAShQEuEpVVGO1WELsKoXmbj30ItTW2V3KvNbjFsvIdDo7zLCpXyTq1HC56W7QCIMINX2qT+hrAMWC12tPQ05f89Cv1+jpk6eOPFqIHFdi663AjyrnGll8nwN7HJWwtA5wTXisu3bec51FAq4yJTzPMtOE9spz36E+Go2hZ1cAv9oCSceZcM0wB8KiMfaZJKNZNZk1jvsdiio4CcdASOFQPOspz07GqQxVP7W+F1Oz32LgwcNAEAS/f3juwDj45GYfAWJrTh3dnJy5DTD2LVC7KtkxxUVkWkqxivnDB9anj++FN9eyekxzut5eFED+WrCfZMcSPW0ai7wbslhKUhCwSf/v3DgGwsM=
90 90 2195ac506c6ababe86985b932f4948837c0891b5 0 iQIVAwUAU2LO/CBXgaxoKi1yAQI/3w/7BT/VRPyxey6tYp7i5cONIlEB3gznebGYwm0SGYNE6lsvS2VLh6ztb+j4eqOadr8Ssna6bslBx+dVsm+VuJ+vrNLMucD5Uc+fhn6dAfVqg+YBzUEaedI5yNsJizcJUDI7hUVsxiPiiYd9hchCWJ+z2tVt2jCyG2lMV2rbW36AM89sgz/wn5/AaAFsgoS6up/uzA3Tmw+qZSO6dZChb4Q8midIUWEbNzVhokgYcw7/HmjmvkvV9RJYiG8aBnMdQmxTE69q2dTjnnDL6wu61WU2FpTN09HRFbemUqzAfoJp8MmXq6jWgfLcm0cI3kRo7ZNpnEkmVKsfKQCXXiaR4alt9IQpQ6Jl7LSYsYI+D4ejpYysIsZyAE8qzltYhBKJWqO27A5V4WdJsoTgA/RwKfPRlci4PY8I4N466S7PBXVz/Cc5EpFkecvrgceTmBafb8JEi+gPiD2Po4vtW3bCeV4xldiEXHeJ77byUz7fZU7jL78SjJVOCCQTJfKZVr36kTz3KlaOz3E700RxzEFDYbK7I41mdANeQBmNNbcvRTy5ma6W6I3McEcAH4wqM5fFQ8YS+QWJxk85Si8KtaDPqoEdC/0dQPavuU/jAVjhV8IbmmkOtO7WvOHQDBtrR15yMxGMnUwMrPHaRNKdHNYRG0LL7lpCtdMi1mzLQgHYY9SRYvI=
91 91 269c80ee5b3cb3684fa8edc61501b3506d02eb10 0 iQIVAwUAU4uX5CBXgaxoKi1yAQLpdg/+OxulOKwZN+Nr7xsRhUijYjyAElRf2mGDvMrbAOA2xNf85DOXjOrX5TKETumf1qANA5cHa1twA8wYgxUzhx30H+w5EsLjyeSsOncRnD5WZNqSoIq2XevT0T4c8xdyNftyBqK4h/SC/t2h3vEiSCUaGcfNK8yk4XO45MIk4kk9nlA9jNWdA5ZMLgEFBye2ggz0JjEAPUkVDqlr9sNORDEbnwZxGPV8CK9HaL/I8VWClaFgjKQmjqV3SQsNFe2XPffzXmIipFJ+ODuXVxYpAsvLiGmcfuUfSDHQ4L9QvjBsWe1PgYMr/6CY/lPYmR+xW5mJUE9eIdN4MYcXgicLrmMpdF5pToNccNCMtfa6CDvEasPRqe2bDzL/Q9dQbdOVE/boaYBlgmYLL+/u+dpqip9KkyGgbSo9uJzst1mLTCzJmr5bw+surul28i9HM+4+Lewg4UUdHLz46no1lfTlB5o5EAhiOZBTEVdoBaKfewVpDa/aBRvtWX7UMVRG5qrtA0sXwydN00Jaqkr9m20W0jWjtc1ZC72QCrynVHOyfIb2rN98rnuy2QN4bTvjNpNjHOhhhPTOoVo0YYPdiUupm46vymUTQCmWsglU4Rlaa3vXneP7JenL5TV8WLPs9J28lF0IkOnyBXY7OFcpvYO1euu7iR1VdjfrQukMyaX18usymiA=
92 92 2d8cd3d0e83c7336c0cb45a9f88638363f993848 0 iQIVAwUAU7OLTCBXgaxoKi1yAQJ+pw/+M3yOesgf55eo3PUTZw02QZxDyEg9ElrRc6664/QFXaJuYdz8H3LGG/NYs8uEdYihiGpS1Qc70jwd1IoUlrCELsaSSZpzWQ+VpQFX29aooBoetfL+8WgqV8zJHCtY0E1EBg/Z3ZL3n2OS++fVeWlKtp5mwEq8uLTUmhIS7GseP3bIG/CwF2Zz4bzhmPGK8V2s74aUvELZLCfkBE1ULNs7Nou1iPDGnhYOD53eq1KGIPlIg1rnLbyYw5bhS20wy5IxkWf2eCaXfmQBTG61kO5m3nkzfVgtxmZHLqYggISTJXUovfGsWZcp5a71clCSMVal+Mfviw8L/UPHG0Ie1c36djJiFLxM0f2HlwVMjegQOZSAeMGg1YL1xnIys2zMMsKgEeR+JISTal1pJyLcT9x5mr1HCnUczSGXE5zsixN+PORRnZOqcEZTa2mHJ1h5jJeEm36B/eR57BMJG+i0QgZqTpLzYTFrp2eWokGMjFB1MvgAkL2YoRsw9h6TeIwqzK8mFwLi28bf1c90gX9uMbwY/NOqGzfQKBR9bvCjs2k/gmJ+qd5AbC3DvOxHnN6hRZUqNq76Bo4F+CUVcjQ/NXnfnOIVNbILpl5Un5kl+8wLFM+mNxDxduajaUwLhSHZofKmmCSLbuuaGmQTC7a/4wzhQM9e5dX0X/8sOo8CptW7uw4=
93 93 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 0 iQIVAwUAU8n97yBXgaxoKi1yAQKqcA/+MT0VFoP6N8fHnlxj85maoM2HfZbAzX7oEW1B8F1WH6rHESHDexDWIYWJ2XnEeTD4GCXN0/1p+O/I0IMPNzqoSz8BU0SR4+ejhRkGrKG7mcFiF5G8enxaiISn9nmax6DyRfqtOQBzuXYGObXg9PGvMS6zbR0SorJK61xX7fSsUNN6BAvHJfpwcVkOrrFAIpEhs/Gh9wg0oUKCffO/Abs6oS+P6nGLylpIyXqC7rKZ4uPVc6Ljh9DOcpV4NCU6kQbNE7Ty79E0/JWWLsHOEY4F4WBzI7rVh7dOkRMmfNGaqvKkuNkJOEqTR1o1o73Hhbxn4NU7IPbVP/zFKC+/4QVtcPk2IPlpK1MqA1H2hBNYZhJlNhvAa7LwkIxM0916/zQ8dbFAzp6Ay/t/L0tSEcIrudTz2KTrY0WKw+pkzB/nTwaS3XZre6H2B+gszskmf1Y41clkIy/nH9K7zBuzANWyK3+bm40vmMoBbbnsweUAKkyCwqm4KTyQoYQWzu/ZiZcI+Uuk/ajJ9s7EhJbIlSnYG9ttWL/IZ1h+qPU9mqVO9fcaqkeL/NIRh+IsnzaWo0zmHU1bK+/E29PPGGf3v6+IEJmXg7lvNl5pHiMd2tb7RNO/UaNSv1Y2E9naD4FQwSWo38GRBcnRGuKCLdZNHGUR+6dYo6BJCGG8wtZvNXb3TOo=
94 94 3178e49892020336491cdc6945885c4de26ffa8b 0 iQIVAwUAU9whUCBXgaxoKi1yAQJDKxAAoGzdHXV/BvZ598VExEQ8IqkmBVIP1QZDVBr/orMc1eFM4tbGKxumMGbqgJsg+NetI0irkh/YWeJQ13lT4Og72iJ+4UC9eF9pcpUKr/0eBYdU2N/p2MIbVNWh3aF5QkbuQpSri0VbHOWkxqwoqrrwXEjgHaKYP4PKh+Dzukax4yzBUIyzAG38pt4a8hbjnozCl2uAikxk4Ojg+ZufhPoZWgFEuYzSfK5SrwVKOwuxKYFGbbVGTQMIXLvBhOipAmHp4JMEYHfG85kwuyx/DCDbGmXKPQYQfClwjJ4ob/IwG8asyMsPWs+09vrvpVO08HBuph3GjuiWJ1fhEef/ImWmZdQySI9Y4SjwP4dMVfzLCnY+PYPDM9Sq/5Iee13gI2lVM2NtAfQZPXh9l8u6SbCir1UhMNMx0qVMkqMAATmiZ+ETHCO75q4Wdcmnv5fk2PbvaGBVtrHGeiyuz5mK/j4cMbd0R9R0hR1PyC4dOhNqOnbqELNIe0rKNByG1RkpiQYsqZTU6insmnZrv4fVsxfA4JOObPfKNT4oa24MHS73ldLFCfQAuIxVE7RDJJ3bHeh/yO6Smo28FuVRldBl5e+wj2MykS8iVcuSa1smw6gJ14iLBH369nlR3fAAQxI0omVYPDHLr7SsH3vJasTaCD7V3SL4lW6vo/yaAh4ImlTAE+Y=
95 95 5dc91146f35369949ea56b40172308158b59063a 0 iQIVAwUAVAUgJyBXgaxoKi1yAQJkEg/9EXFZvPpuvU7AjII1dlIT8F534AXrO30+H6hweg+h2mUCSb/mZnbo3Jr1tATgBWbIKkYmmsiIKNlJMFNPZTWhImGcVA93t6v85tSFiNJRI2QP9ypl5wTt2KhiS/s7GbUYCtPDm6xyNYoSvDo6vXJ5mfGlgFZY5gYLwEHq/lIRWLWD4EWYWbk5yN+B7rHu6A1n3yro73UR8DudEhYYqC23KbWEqFOiNd1IGj3UJlxIHUE4AcDukxbfiMWrKvv1kuT/vXak3X7cLXlO56aUbMopvaUflA3PSr3XAqynDd69cxACo/T36fuwzCQN4ICpdzGTos0rQALSr7CKF5YP9LMhVhCsOn0pCsAkSiw4HxxbcHQLl+t+0rchNysc4dWGwDt6GAfYcdm3fPtGFtA3qsN8lOpCquFH3TAZ3TrIjLFoTOk6s1xX1x5rjP/DAHc/y3KZU0Ffx3TwdQEEEIFaAXaxQG848rdfzV42+dnFnXh1G/MIrKAmv3ZSUkQ3XJfGc7iu82FsYE1NLHriUQDmMRBzCoQ1Rn1Kji119Cxf5rsMcQ6ZISR1f0jDCUS/qxlHvSqETLp8H63NSUfvuKSC7uC6pGvq9XQm1JRNO5UuJfK6tHzy0jv9bt2IRo2xbmvpDu9L5oHHd3JePsAmFmbrFf/7Qem3JyzEvRcpdcdHtefxcxc=
96 96 f768c888aaa68d12dd7f509dcc7f01c9584357d0 0 iQIVAwUAVCxczSBXgaxoKi1yAQJYiA/9HnqKuU7IsGACgsUGt+YaqZQumg077Anj158kihSytmSts6xDxqVY1UQB38dqAKLJrQc7RbN0YK0NVCKZZrx/4OqgWvjiL5qWUJKqQzsDx4LGTUlbPlZNZawW2urmmYW6c9ZZDs1EVnVeZMDrOdntddtnBgtILDwrZ8o3U7FwSlfnm03vTkqUMj9okA3AsI8+lQIlo4qbqjQJYwvUC1ZezRdQwaT1LyoWUgjmhoZ1XWcWKOs9baikaJr6fMv8vZpwmaOY1+pztxYlROeSPVWt9P6yOf0Hi/2eg8AwSZLaX96xfk9IvXUSItg/wjTWP9BhnNs/ulwTnN8QOgSXpYxH4RXwsYOyU7BvwAekA9xi17wuzPrGEliScplxICIZ7jiiwv/VngMvM9AYw2mNBvZt2ZIGrrLaK6pq/zBm5tbviwqt5/8U5aqO8k1O0e4XYm5WmQ1c2AkXRO+xwvFpondlSF2y0flzf2FRXP82QMfsy7vxIP0KmaQ4ex+J8krZgMjNTwXh2M4tdYNtu5AehJQEP3l6giy2srkMDuFLqoe1yECjVlGdgA86ve3J/84I8KGgsufYMhfQnwHHGXCbONcNsDvO0QOee6CIQVcdKCG7dac3M89SC6Ns2CjuC8BIYDRnxbGQb7Fvn4ZcadyJKKbXQJzMgRV25K6BAwTIdvYAtgU=
97 97 7f8d16af8cae246fa5a48e723d48d58b015aed94 0 iQIVAwUAVEL0XyBXgaxoKi1yAQJLkRAAjZhpUju5nnSYtN9S0/vXS/tjuAtBTUdGwc0mz97VrM6Yhc6BjSCZL59tjeqQaoH7Lqf94pRAtZyIB2Vj/VVMDbM+/eaoSr1JixxppU+a4eqScaj82944u4C5YMSMC22PMvEwqKmy87RinZKJlFwSQ699zZ5g6mnNq8xeAiDlYhoF2QKzUXwnKxzpvjGsYhYGDMmVS1QPmky4WGvuTl6KeGkv8LidKf7r6/2RZeMcq+yjJ7R0RTtyjo1cM5dMcn/jRdwZxuV4cmFweCAeoy5guV+X6du022TpVndjOSDoKiRgdk7pTuaToXIy+9bleHpEo9bwKx58wvOMg7sirAYjrA4Xcx762RHiUuidTTPktm8sNsBQmgwJZ8Pzm+8TyHjFGLnBfeiDbQQEdLCXloz0jVOVRflDfMays1WpAYUV8XNOsgxnD2jDU8L0NLkJiX5Y0OerGq9AZ+XbgJFVBFhaOfsm2PEc3jq00GOLzrGzA+4b3CGpFzM3EyK9OnnwbP7SqCGb7PJgjmQ7IO8IWEmVYGaKtWONSm8zRLcKdH8xuk8iN1qCkBXMty/wfTEVTkIlMVEDbslYkVfj0rAPJ8B37bfe0Yz4CEMkCmARIB1rIOpMhnavXGuD50OP2PBBY/8DyC5aY97z9f04na/ffk+l7rWaHihjHufKIApt5OnfJ1w=
98 98 ced632394371a36953ce4d394f86278ae51a2aae 0 iQIVAwUAVFWpfSBXgaxoKi1yAQLCQw//cvCi/Di3z/2ZEDQt4Ayyxv18gzewqrYyoElgnEzr5uTynD9Mf25hprstKla/Y5C6q+y0K6qCHPimGOkz3H+wZ2GVUgLKAwMABkfSb5IZiLTGaB2DjAJKZRwB6h43wG/DSFggE3dYszWuyHW88c72ZzVF5CSNc4J1ARLjDSgnNYJQ6XdPw3C9KgiLFDXzynPpZbPg0AK5bdPUKJruMeIKPn36Hx/Tv5GXUrbc2/lcnyRDFWisaDl0X/5eLdA+r3ID0cSmyPLYOeCgszRiW++KGw+PPDsWVeM3ZaZ9SgaBWU7MIn9A7yQMnnSzgDbN+9v/VMT3zbk1WJXlQQK8oA+CCdHH9EY33RfZ6ST/lr3pSQbUG1hdK6Sw+H6WMkOnnEk6HtLwa4xZ3HjDpoPkhVV+S0C7D5WWOovbubxuBiW5v8tK4sIOS6bAaKevTBKRbo4Rs6qmS/Ish5Q+z5bKst80cyEdi4QSoPZ/W+6kh1KfOprMxynwPQhtEcDYW2gfLpgPIM7RdXPKukLlkV2qX3eF/tqApGU4KNdP4I3N80Ri0h+6tVU/K4TMYzlRV3ziLBumJ4TnBrTHU3X6AfZUfTgslQzokX8/7a3tbctX6kZuJPggLGisdFSdirHbrUc+y5VKuJtPr+LxxgZKRFbs2VpJRem6FvwGNyndWLv32v0GMtQ=
99 99 643c58303fb0ec020907af28b9e486be299ba043 0 iQIVAwUAVGKawCBXgaxoKi1yAQL7zxAAjpXKNvzm/PKVlTfDjuVOYZ9H8w9QKUZ0vfrNJrN6Eo6hULIostbdRc25FcMWocegTqvKbz3IG+L2TKOIdZJS9M9QS4URybUd37URq4Jai8kMiJY31KixNNnjO2G1B39aIXUhY+EPx12aY31/OVy4laXIVtN6qpSncjo9baXSOMZmx6RyA1dbyfwXRjT/aODCGHZXgLJHS/kHlkCsThVlqYQ4rUCDkXIeMqIGF1CR0KjfmKpp1fS14OMgpLgdnt9+pnBZ+qcf1YdpOeQob1zwunjMYOyYC74FyOTdwaynU2iDsuBrmkE8kgEedIn7+WWe9fp/6TQJMVOeTQPZBNSRRSUYCw5Tg/0L/+jLtzjc2mY4444sDPbR7scrtU+/GtvlR5z0Y5pofwEdFME7PZNOp9a4kMiSa7ZERyGdN7U1pDu9JU6BZRz+nPzW217PVnTF7YFV/GGUzMTk9i7EZb5M4T9r9gfxFSMPeT5ct712CdBfyRlsSbSWk8XclTXwW385kLVYNDtOukWrvEiwxpA14Xb/ZUXbIDZVf5rP2HrZHMkghzeUYPjRn/IlgYUt7sDNmqFZNIc9mRFrZC9uFQ/Nul5InZodNODQDM+nHpxaztt4xl4qKep8SDEPAQjNr8biC6T9MtLKbWbSKDlqYYNv0pb2PuGub3y9rvkF1Y05mgM=
100 100 902554884335e5ca3661d63be9978eb4aec3f68a 0 iQIVAwUAVH0KMyBXgaxoKi1yAQLUKxAAjgyYpmqD0Ji5OQ3995yX0dmwHOaaSuYpq71VUsOMYBskjH4xE2UgcTrX8RWUf0E+Ya91Nw3veTf+IZlYLaWuOYuJPRzw+zD1sVY8xprwqBOXNaA7n8SsTqZPSh6qgw4S0pUm0xJUOZzUP1l9S7BtIdJP7KwZ7hs9YZev4r9M3G15xOIPn5qJqBAtIeE6f5+ezoyOpSPZFtLFc4qKQ/YWzOT5uuSaYogXgVByXRFaO84+1TD93LR0PyVWxhwU9JrDU5d7P/bUTW1BXdjsxTbBnigWswKHC71EHpgz/HCYxivVL30qNdOm4Fow1Ec2GdUzGunSqTPrq18ScZDYW1x87f3JuqPM+ce/lxRWBBqP1yE30/8l/Us67m6enWXdGER8aL1lYTGOIWAhvJpfzv9KebaUq1gMFLo6j+OfwR3rYPiCHgi20nTNBa+LOceWFjCGzFa3T9UQWHW/MBElfAxK65uecbGRRYY9V1/+wxtTUiS6ixpmzL8S7uUd5n6oMaeeMiD82NLgPIbMyUHQv6eFEcCj0U9NT2uKbFRmclMs5V+8D+RTCsLJ55R9PD5OoRw/6K/coqqPShYmJvgYsFQPzXVpQdCRae31xdfGFmd5KUetqyrT+4GUdJWzSm0giSgovpEJNxXglrvNdvSO7fX3R1oahhwOwtGqMwNilcK+iDw=
101 101 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 0 iQIVAwUAVJNALCBXgaxoKi1yAQKgmw/+OFbHHOMmN2zs2lI2Y0SoMALPNQBInMBq2E6RMCMbfcS9Cn75iD29DnvBwAYNWaWsYEGyheJ7JjGBiuNKPOrLaHkdjG+5ypbhAfNDyHDiteMsXfH7D1L+cTOAB8yvhimZHOTTVF0zb/uRyVIPNowAyervUVRjDptzdfcvjUS+X+/Ufgwms6Y4CcuzFLFCxpmryJhLtOpwUPLlzIqeNkFOYWkHanCgtZX03PNIWhorH3AWOc9yztwWPQ+kcKl3FMlyuNMPhS/ElxSF6GHGtreRbtP+ZLoSIOMb2QBKpGDpZLgJ3JQEHDcZ0h5CLZWL9dDUJR3M8pg1qglqMFSWMgRPTzxPS4QntPgT/Ewd3+U5oCZUh052fG41OeCZ0CnVCpqi5PjUIDhzQkONxRCN2zbjQ2GZY7glbXoqytissihEIVP9m7RmBVq1rbjOKr+yUetJ9gOZcsMtZiCEq4Uj2cbA1x32MQv7rxwAgQP1kgQ62b0sN08HTjQpI7/IkNALLIDHoQWWr45H97i34qK1dd5uCOnYk7juvhGNX5XispxNnC01/CUVNnqChfDHpgnDjgT+1H618LiTgUAD3zo4IVAhCqF5XWsS4pQEENOB3Msffi62fYowvJx7f/htWeRLZ2OA+B85hhDiD4QBdHCRoz3spVp0asNqDxX4f4ndj8RlzfM=
102 102 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 0 iQIVAwUAVKXKYCBXgaxoKi1yAQIfsA/+PFfaWuZ6Jna12Y3MpKMnBCXYLWEJgMNlWHWzwU8lD26SKSlvMyHQsVZlkld2JmFugUCn1OV3OA4YWT6BA7VALq6Zsdcu5Dc8LRbyajBUkzGRpOUyWuFzjkCpGVbrQzbCR/bel/BBXzSqL4ipdtWgJ4y+WpZIhWkNXclBkR52b5hUTjN9vzhyhVVI7eURGwIEf7vVs1fDOcEGtaGY/ynzMTzyxIDsEEygCZau86wpKlYlqhCgxKDyzyGfpH3B1UlNGFt1afW8AWe1eHjdqC7TJZpMqmQ/Ju8vco8Xht6OXw4ZLHj7y39lpccfKTBLiK/cAKSg+xgyaH/BLhzoEkNAwYSFAB4i4IoV0KUC8nFxHfsoswBxJnMqU751ziMrpZ/XHZ1xQoEOdXgz2I04vlRn8xtynOVhcgjoAXwtbia7oNh/qCH/hl5/CdAtaawuCxJBf237F+cwur4PMAAvsGefRfZco/DInpr3qegr8rwInTxlO48ZG+o5xA4TPwT0QQTUjMdNfC146ZSbp65wG7VxJDocMZ8KJN/lqPaOvX+FVYWq4YnJhlldiV9DGgmym1AAaP0D3te2GcfHXpt/f6NYUPpgiBHy0GnOlNcQyGnnONg1A6oKVWB3k7WP28+PQbQEiCIFk2nkf5VZmye7OdHRGKOFfuprYFP1WwTWnVoNX9c=
103 103 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 0 iQIVAwUAVLsaciBXgaxoKi1yAQKMIA//a90/GvySL9UID+iYvzV2oDaAPDD0T+4Xs43I7DT5NIoDz+3yq2VV54XevQe5lYiURmsb/Q9nX2VR/Qq1J9c/R6Gy+CIfmJ3HzMZ0aAX8ZlZgQPYZKh/2kY5Ojl++k6MTqbqcrICNs4+UE/4IAxPyOfu5gy7TpdJmRZo2J3lWVC2Jbhd02Mzb+tjtfbOM+QcQxPwt9PpqmQszJceyVYOSm3jvD1uJdSOC04tBQrQwrxktQ09Om0LUMMaB5zFXpJtqUzfw7l4U4AaddEmkd3vUfLtHxc21RB01c3cpe2dJnjifDfwseLsI8rS4jmi/91c74TeBatSOhvbqzEkm/p8xZFXE4Uh+EpWjTsVqmfQaRq6NfNCR7I/kvGv8Ps6w8mg8uX8fd8lx+GJbodj+Uy0X3oqHyqPMky/df5i79zADBDuz+yuxFfDD9i22DJPIYcilfGgwpIUuO2lER5nSMVmReuWTVBnT6SEN66Q4KR8zLtIRr+t1qUUCy6wYbgwrdHVCbgMF8RPOVZPjbs17RIqcHjch0Xc7bShKGhQg4WHDjXHK61w4tOa1Yp7jT6COkl01XC9BLcGxJYKFvNCbeDZQGvVgJNoEvHxBxD9rGMVRjfuxeJawc2fGzZJn0ySyLDW0pfd4EJNgTh9bLdPjWz2VlXqn4A6bgaLgTPqjmN0VBXw=
104 104 fbdd5195528fae4f41feebc1838215c110b25d6a 0 iQIVAwUAVM7fBCBXgaxoKi1yAQKoYw/+LeIGcjQmHIVFQULsiBtPDf+eGAADQoP3mKBy+eX/3Fa0qqUNfES2Q3Y6RRApyZ1maPRMt8BvvhZMgQsu9QIrmf3zsFxZGFwoyrIj4hM3xvAbEZXqmWiR85/Ywd4ImeLaZ0c7mkO1/HGF1n2Mv47bfM4hhNe7VGJSSrTY4srFHDfk4IG9f18DukJVzRD9/dZeBw6eUN1ukuLEgQAD5Sl47bUdKSetglOSR1PjXfZ1hjtz5ywUyBc5P9p3LC4wSvlcJKl22zEvB3L0hkoDcPsdIPEnJAeXxKlR1rQpoA3fEgrstGiSNUW/9Tj0VekAHLO95SExmQyoG/AhbjRRzIj4uQ0aevCJyiAhkv+ffOSf99PMW9L1k3tVjLhpMWEz9BOAWyX7cDFWj5t/iktI046O9HGN9SGVx18e9xM6pEgRcLA2TyjEmtkA4jX0JeN7WeCweMLiSxyGP7pSPSJdpJeXaFtRpSF62p/G0Z5wN9s05LHqDyqNVtCvg4WjkuV5LZSdLbMcYBWGBxQzCG6qowXFXIawmbaFiBZwTfOgNls9ndz5RGupAaxY317prxPFv/pXoesc1P8bdK09ZvjhbmmD66Q/BmS2dOMQ8rXRjuVdlR8j2QBtFZxekMcRD02nBAVnwHg1VWQMIRaGjdgmW4wOkirWVn7me177FnBxrxW1tG4=
105 105 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 0 iQIVAwUAVPQL9CBXgaxoKi1yAQJIXxAAtD2hWhaKa+lABmCOYG92FE/WdqY/91Xv5atTL8Xeko/MkirIKZiOuxNWX+J34TVevINZSWmMfDSc5TkGxktL9jW/pDB/CXn+CVZpxRabPYFH9HM2K3g8VaTV1MFtV2+feOMDIPCmq5ogMF9/kXjmifiEBrJcFsE82fdexJ3OHoOY4iHFxEhh3GzvNqEQygk4VeU6VYziNvSQj9G//PsK3Bmk7zm5ScsZcMVML3SIYFuej1b1PI1v0N8mmCRooVNBGhD/eA0iLtdh/hSb9s/8UgJ4f9HOcx9zqs8V4i14lpd/fo0+yvFuVrVbWGzrDrk5EKLENhVPwvc1KA32PTQ4Z9u7VQIBIxq3K5lL2VlCMIYc1BSaSQBjuiLm8VdN6iDuf5poNZhk1rvtpQgpxJzh362dlGtR/iTJuLCeW7gCqWUAorLTeHy0bLQ/jSOeTAGys8bUHtlRL4QbnhLbUmJmRYVvCJ+Yt1aTgTSNcoFjoLJarR1169BXgdCA38BgReUL6kB224UJSTzB1hJUyB2LvCWrXZMipZmR99Iwdq7MePD3+AoSIXQNUMY9blxuuF5x7W2ikNXmVWuab4Z8rQRtmGqEuIMBSunxAnZSn+i8057dFKlq+/yGy+WW3RQg+RnLnwZs1zCDTfu98/GT5k5hFpjXZeUWWiOVwQJ5HrqncCw=
106 106 07a92bbd02e5e3a625e0820389b47786b02b2cea 0 iQIVAwUAVPSP9SBXgaxoKi1yAQLkBQ//dRQExJHFepJfZ0gvGnUoYI4APsLmne5XtfeXJ8OtUyC4a6RylxA5BavDWgXwUh9BGhOX2cBSz1fyvzohrPrvNnlBrYKAvOIJGEAiBTXHYTxHINEKPtDF92Uz23T0Rn/wnSvvlbWF7Pvd+0DMJpFDEyr9n6jvVLR7mgxMaCqZbVaB1W/wTwDjni780WgVx8OPUXkLx3/DyarMcIiPeI5UN+FeHDovTsBWFC95msFLm80PMRPuHOejWp65yyEemGujZEPO2D5VVah7fshM2HTz63+bkEBYoqrftuv3vXKBRG78MIrUrKpqxmnCKNKDUUWJ4yk3+NwuOiHlKdly5kZ7MNFaL73XKo8HH287lDWz0lIazs91dQA9a9JOyTsp8YqGtIJGGCbhrUDtiQJ199oBU84mw3VH/EEzm4mPv4sW5fm7BnnoH/a+9vXySc+498rkdLlzFwxrQkWyJ/pFOx4UA3mCtGQK+OSwLPc+X4SRqA4fiyqKxVAL1kpLTSDL3QA82I7GzBaXsxUXzS4nmteMhUyzTdwAhKVydL0gC3d7NmkAFSyRjdGzutUUXshYxg0ywRgYebe8uzJcTj4nNRgaalYLdg3guuDulD+dJmILsrcLmA6KD/pvfDn8PYt+4ZjNIvN2E9GF6uXDu4Ux+AlOTLk9BChxUF8uBX9ev5cvWtQ=
107 107 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 0 iQIVAwUAVRw4nyBXgaxoKi1yAQIFExAAkbCPtLjQlJvPaYCL1KhNR+ZVAmn7JrFH3XhvR26RayYbs4NxR3W1BhwhDy9+W+28szEx1kQvmr6t1bXAFywY0tNJOeuLU7uFfmbgAfYgkQ9kpsQNqFYkjbCyftw0S9vX9VOJ9DqUoDWuKfX7VzjkwE9dCfKI5F+dvzxnd6ZFjB85nyHBQuTZlzXl0+csY212RJ2G2j/mzEBVyeZj9l7Rm+1X8AC1xQMWRJGiyd0b7nhYqoOcceeJFAV1t9QO4+gjmkM5kL0orjxTnuVsxPTxcC5ca1BfidPWrZEto3duHWNiATGnCDylxxr52BxCAS+BWePW9J0PROtw1pYaZ9pF4N5X5LSXJzqX7ZiNGckxqIjry09+Tbsa8FS0VkkYBEiGotpuo4Jd05V6qpXfW2JqAfEVo6X6aGvPM2B7ZUtKi30I4J+WprrOP3WgZ/ZWHe1ERYKgjDqisn3t/D40q30WQUeQGltGsOX0Udqma2RjBugO5BHGzJ2yer4GdJXg7q1OMzrjAEuz1IoKvIB/o1pg86quVA4H2gQnL1B8t1M38/DIafyw7mrEY4Z3GL44Reev63XVvDE099Vbhqp7ufwq81Fpq7Xxa5vsr9SJ+8IqqQr8AcYSuK3G3L6BmIuSUAYMRqgl35FWoWkGyZIG5c6K6zI8w5Pb0aGi6Lb2Wfb9zbc=
108 108 e89f909edffad558b56f4affa8239e4832f88de0 0 iQIVAwUAVTBozCBXgaxoKi1yAQLHeg/+IvfpPmG7OSqCoHvMVETYdrqT7lKCwfCQWMFOC/2faWs1n4R/qQNm6ckE5OY888RK8tVQ7ue03Pg/iyWgQlYfS7Njd3WPjS4JsnEBxIvuGkIu6TPIXAUAH0PFTBh0cZEICDpPEVT2X3bPRwDHA+hUE9RrxM5zJ39Fpk/pTYCjQ9UKfEhXlEfka75YB39g2Y/ssaSbn5w/tAAx8sL72Y4G96D4IV2seLHZhB3VQ7UZKThEWn6UdVOoKj+urIwGaBYMeekGVtHSh6fnHOw3EtDO9mQ5HtAz2Bl4CwRYN8eSN+Dwgr+mdk8MWpQQJ+i1A8jUhUp8gn1Pe5GkIH4CWZ9+AvLLnshe2MkVaTT1g7EQk37tFkkdZDRBsOHIvpF71B9pEA1gMUlX4gKgh5YwukgpQlDmFCfY7XmX6eXw9Ub+EckEwYuGMz7Fbwe9J/Ce4DxvgJgq3/cu/jb3bmbewH6tZmcrlqziqqA8GySIwcURnF1c37e7+e7x1jhFJfCWpHzvCusjKhUp9tZsl9Rt1Bo/y41QY+avY7//ymhbwTMKgqjzCYoA+ipF4JfZlFiZF+JhvOSIFb0ltkfdqKD+qOjlkFaglvQU1bpGKLJ6cz4Xk2Jqt5zhcrpyDMGVv9aiWywCK2ZP34RNaJ6ZFwzwdpXihqgkm5dBGoZ4ztFUfmjXzIg=
109 109 8cc6036bca532e06681c5a8fa37efaa812de67b5 0 iQIVAwUAVUP0xCBXgaxoKi1yAQLIChAAme3kg1Z0V8t5PnWKDoIvscIeAsD2s6EhMy1SofmdZ4wvYD1VmGC6TgXMCY7ssvRBhxqwG3GxwYpwELASuw2GYfVot2scN7+b8Hs5jHtkQevKbxarYni+ZI9mw/KldnJixD1yW3j+LoJFh/Fu6GD2yrfGIhimFLozcwUu3EbLk7JzyHSn7/8NFjLJz0foAYfcbowU9/BFwNVLrQPnsUbWcEifsq5bYso9MBO9k+25yLgqHoqMbGpJcgjubNy1cWoKnlKS+lOJl0/waAk+aIjHXMzFpRRuJDjxEZn7V4VdV5d23nrBTcit1BfMzga5df7VrLPVRbom1Bi0kQ0BDeDex3hHNqHS5X+HSrd/njzP1xp8twG8hTE+njv85PWoGBTo1eUGW/esChIJKA5f3/F4B9ErgBNNOKnYmRgxixd562OWAwAQZK0r0roe2H/Mfg2VvgxT0kHd22NQLoAv0YI4jcXcCFrnV/80vHUQ8AsAYAbkLcz1jkfk3YwYDP8jbJCqcwJRt9ialYKJwvXlEe0TMeGdq7EjCO0z/pIpu82k2R/C0FtCFih3bUvJEmWoVVx8UGkDDQEORLbzxQCt0IOiQGFcoCCxgQmL0x9ZoljCWg5vZuuhU4uSOuRTuM+aa4xoLkeOcvgGRSOXrqfkV8JpWKoJB4dmY2qSuxw8LsAAzK0=
110 110 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 0 iQIVAwUAVWy9mCBXgaxoKi1yAQIm+Q/+I/tV8DC51d4f/6T5OR+motlIx9U5za5p9XUUzfp3tzSY2PutVko/FclajVdFekZsK5pUzlh/GZhfe1jjyEEIr3UC3yWk8hMcvvS+2UDmfy81QxN7Uf0kz4mZOlME6d/fYDzf4cDKkkCXoec3kyZBw7L84mteUcrJoyb5K3fkQBrK5CG/CV7+uZN6b9+quKjtDhDEkAyc6phNanzWNgiHGucEbNgXsKM01HmV1TnN4GXTKx8y2UDalIJOPyes2OWHggibMHbaNnGnwSBAK+k29yaQ5FD0rsA+q0j3TijA1NfqvtluNEPbFOx/wJV4CxonYad93gWyEdgU34LRqqw1bx7PFUvew2/T3TJsxQLoCt67OElE7ScG8evuNEe8/4r3LDnzYFx7QMP5r5+B7PxVpj/DT+buS16BhYS8pXMMqLynFOQkX5uhEM7mNC0JTXQsBMHSDAcizVDrdFCF2OSfQjLpUfFP1VEWX7EInqj7hZrd+GE7TfBD8/rwSBSkkCX2aa9uKyt6Ius1GgQUuEETskAUvvpsNBzZxtvGpMMhqQLGlJYnBbhOmsbOyTSnXU66KJ5e/H3O0KRrF09i74v30DaY4uIH8xG6KpSkfw5s/oiLCtagfc0goUvvojk9pACDR3CKM/jVC63EVp2oUcjT72jUgSLxBgi7siLD8IW86wc=
111 111 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 0 iQIVAwUAVZRtzSBXgaxoKi1yAQJVLhAAtfn+8OzHIp6wRC4NUbkImAJRLsNTRPKeRSWPCF5O5XXQ84hp+86qjhndIE6mcJSAt4cVP8uky6sEa8ULd6b3ACRBvtgZtsecA9S/KtRjyE9CKr8nP+ogBNqJPaYlTz9RuwGedOd+8I9lYgsnRjfaHSByNMX08WEHtWqAWhSkAz/HO32ardS38cN97fckCgQtA8v7c77nBT7vcw4epgxyUQvMUxUhqmCVVhVfz8JXa5hyJxFrOtqgaVuQ1B5Y/EKxcyZT+JNHPtu3V1uc1awS/w16CEPstNBSFHax5MuT9UbY0mV2ZITP99EkM+vdomh82VHdnMo0i7Pz7XF45ychD4cteroO9gGqDDt9j7hd1rubBX1bfkPsd/APJlyeshusyTj+FqsUD/HDlvM9LRjY1HpU7i7yAlLQQ3851XKMLUPNFYu2r3bo8Wt/CCHtJvB4wYuH+7Wo3muudpU01ziJBxQrUWwPbUrG+7LvO1iEEVxB8l+8Vq0mU3Te7lJi1kGetm6xHNbtvQip5P2YUqvv+lLo/K8KoJDxsh63Y01JGwdmUDb8mnFlRx4J7hQJaoNEvz3cgnc4X8gDJD8sUOjGOPnbtz2QwTY+zj/5+FdLxWDCxNrHX5vvkVdJHcCqEfVvQTKfDMOUeKuhjI7GD7t3xRPfUxq19jjoLPe7aqn1Z1s=
112 112 96a38d44ba093bd1d1ecfd34119e94056030278b 0 iQIVAwUAVarUUyBXgaxoKi1yAQIfJw/+MG/0736F/9IvzgCTF6omIC+9kS8JH0n/JBGPhpbPAHK4xxjhOOz6m3Ia3c3HNoy+I6calwU6YV7k5dUzlyLhM0Z5oYpdrH+OBNxDEsD5SfhclfR63MK1kmgtD33izijsZ++6a+ZaVfyxpMTksKOktWSIDD63a5b/avb6nKY64KwJcbbeXPdelxvXV7TXYm0GvWc46BgvrHOJpYHCDaXorAn6BMq7EQF8sxdNK4GVMNMVk1njve0HOg3Kz8llPB/7QmddZXYLFGmWqICyUn1IsJDfePxzh8sOYVCbxAgitTJHJJmmH5gzVzw7t7ljtmxSJpcUGQJB2MphejmNFGfgvJPB9c6xOCfUqDjxN5m24V+UYesZntpfgs3lpfvE7785IpVnf6WfKG4PKty01ome/joHlDlrRTekKMlpiBapGMfv8EHvPBrOA+5yAHNfKsmcyCcjD1nvXYZ2/X9qY35AhdcBuNkyp55oPDOdtYIHfnOIxlYMKG1dusDx3Z4eveF0lQTzfRVoE5w+k9A2Ov3Zx0aiSkFFevJjrq5QBfs9dAiT8JYgBmWhaJzCtJm12lQirRMKR/br88Vwt/ry/UVY9cereMNvRYUGOGfC8CGGDCw4WDD+qWvyB3mmrXVuMlXxQRIZRJy5KazaQXsBWuIsx4kgGqC5Uo+yzpiQ1VMuCyI=
113 113 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 0 iQIVAwUAVbuouCBXgaxoKi1yAQL2ng//eI1w51F4YkDiUAhrZuc8RE/chEd2o4F6Jyu9laA03vbim598ntqGjX3+UkOyTQ/zGVeZfW2cNG8zkJjSLk138DHCYl2YPPD/yxqMOJp/a7U34+HrA0aE5Y2pcfx+FofZHRvRtt40UCngicjKivko8au7Ezayidpa/vQbc6dNvGrwwk4KMgOP2HYIfHgCirR5UmaWtNpzlLhf9E7JSNL5ZXij3nt6AgEPyn0OvmmOLyUARO/JTJ6vVyLEtwiXg7B3sF5RpmyFDhrkZ+MbFHgL4k/3y9Lb97WaZl8nXJIaNPOTPJqkApFY/56S12PKYK4js2OgU+QsX1XWvouAhEx6CC6Jk9EHhr6+9qxYFhBJw7RjbswUG6LvJy/kBe+Ei5UbYg9dATf3VxQ6Gqs19lebtzltERH2yNwaHyVeqqakPSonOaUyxGMRRosvNHyrTTor38j8d27KksgpocXzBPZcc1MlS3vJg2nIwZlc9EKM9z5R0J1KAi1Z/+xzBjiGRYg5EZY6ElAw30eCjGta7tXlBssJiKeHut7QTLxCZHQuX1tKxDDs1qlXlGCMbrFqo0EiF9hTssptRG3ZyLwMdzEjnh4ki6gzONZKDI8uayAS3N+CEtWcGUtiA9OwuiFXTwodmles/Mh14LEhiVZoDK3L9TPcY22o2qRuku/6wq6QKsg=
114 114 1a45e49a6bed023deb229102a8903234d18054d3 0 iQIVAwUAVeYa2SBXgaxoKi1yAQLWVA//Q7vU0YzngbxIbrTPvfFiNTJcT4bx9u1xMHRZf6QBIE3KtRHKTooJwH9lGR0HHM+8DWWZup3Vzo6JuWHMGoW0v5fzDyk2czwM9BgQQPfEmoJ/ZuBMevTkTZngjgHVwhP3tHFym8Rk9vVxyiZd35EcxP+4F817GCzD+K7XliIBqVggmv9YeQDXfEtvo7UZrMPPec79t8tzt2UadI3KC1jWUriTS1Fg1KxgXW6srD80D10bYyCkkdo/KfF6BGZ9SkF+U3b95cuqSmOfoyyQwUA3JbMXXOnIefnC7lqRC2QTC6mYDx5hIkBiwymXJBe8rpq/S94VVvPGfW6A5upyeCZISLEEnAz0GlykdpIy/NogzhmWpbAMOus05Xnen6xPdNig6c/M5ZleRxVobNrZSd7c5qI3aUUyfMKXlY1j9oiUTjSKH1IizwaI3aL/MM70eErBxXiLs2tpQvZeaVLn3kwCB5YhywO3LK0x+FNx4Gl90deAXMYibGNiLTq9grpB8fuLg9M90JBjFkeYkrSJ2yGYumYyP/WBA3mYEYGDLNstOby4riTU3WCqVl+eah6ss3l+gNDjLxiMtJZ/g0gQACaAvxQ9tYp5eeRMuLRTp79QQPxv97s8IyVwE/TlPlcSFlEXAzsBvqvsolQXRVi9AxA6M2davYabBYAgRf6rRfgujoU=
115 115 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 0 iQIVAwUAVg1oMSBXgaxoKi1yAQLPag/+Pv0+pR9b9Y5RflEcERUzVu92q+l/JEiP7PHP9pAZuXoQ0ikYBFo1Ygw8tkIG00dgEaLk/2b7E3OxaU9pjU3thoX//XpTcbkJtVhe7Bkjh9/S3dRpm2FWNL9n0qnywebziB45Xs8XzUwBZTYOkVRInYr/NzSo8KNbQH1B4u2g56veb8u/7GtEvBSGnMGVYKhVUZ3jxyDf371QkdafMOJPpogkZcVhXusvMZPDBYtTIzswyxBJ2jxHzjt8+EKs+FI3FxzvQ9Ze3M5Daa7xfiHI3sOgECO8GMVaJi0F49lttKx08KONw8xLlEof+cJ+qxLxQ42X5XOQglJ2/bv5ES5JiZYAti2XSXbZK96p4wexqL4hnaLVU/2iEUfqB9Sj6itEuhGOknPD9fQo1rZXYIS8CT5nGTNG4rEpLFN6VwWn1btIMNkEHw998zU7N3HAOk6adD6zGcntUfMBvQC3V4VK3o7hp8PGeySrWrOLcC/xLKM+XRonz46woJK5D8w8lCVYAxBWEGKAFtj9hv9R8Ye9gCW0Q8BvJ7MwGpn+7fLQ1BVZdV1LZQTSBUr5u8mNeDsRo4H2hITQRhUeElIwlMsUbbN078a4JPOUgPz1+Fi8oHRccBchN6I40QohL934zhcKXQ+NXYN8BgpCicPztSg8O8Y/qvhFP12Zu4tOH8P/dFY=
116 116 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 0 iQIVAwUAViarTyBXgaxoKi1yAQLZgRAAh7c7ebn7kUWI5M/b/T6qHGjFrU5azkjamzy9IG+KIa2hZgSMxyEM7JJUFqKP4TiWa3sW03bjKGSM/SjjDSSyheX+JIVSPNyKrBwneYhPq45Ius8eiHziClkt0CSsl2d9xDRpI0JmHbN0Pf8nh7rnbL+231GDAOT6dP+2S8K1HGa/0BgEcL9gpYs4/2GyjL+hBSUjyrabzvwe48DCN5W0tEJbGFw5YEADxdfbVbNEuXL81tR4PFGiJxPW0QKRLDB74MWmiWC0gi2ZC/IhbNBZ2sLb6694d4Bx4PVwtiARh63HNXVMEaBrFu1S9NcMQyHvAOc6Zw4izF/PCeTcdEnPk8J1t5PTz09Lp0EAKxe7CWIViy350ke5eiaxO3ySrNMX6d83BOHLDqEFMSWm+ad+KEMT4CJrK4X/n/XMgEFAaU5nWlIRqrLRIeU2Ifc625T0Xh4BgTqXPpytQxhgV5b+Fi6duNk4cy+QnHT4ymxI6BPD9HvSQwc+O7h37qjvJVZmpQX6AP8O75Yza8ZbcYKRIIxZzOkwNpzE5A/vpvP5bCRn7AGcT3ORWmAYr/etr3vxUvt2fQz6U/R4S915V+AeWBdcp+uExu6VZ42M0vhhh0lyzx1VRJGVdV+LoxFKkaC42d0yT+O1QEhSB7WL1D3/a/iWubv6ieB/cvNMhFaK9DA=
117 117 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 0 iQIVAwUAVjZiKiBXgaxoKi1yAQKBWQ/+JcE37vprSOA5e0ezs/avC7leR6hTlXy9O5bpFnvMpbVMTUp+KfBE4HxTT0KKXKh9lGtNaQ+lAmHuy1OQE1hBKPIaCUd8/1gunGsXgRM3TJ9LwjFd4qFpOMxvOouc6kW5kmea7V9W2fg6aFNjjc/4/0J3HMOIjmf2fFz87xqR1xX8iezJ57A4pUPNViJlOWXRzfa56cI6VUe5qOMD0NRXcY+JyI5qW25Y/aL5D9loeKflpzd53Ue+Pu3qlhddJd3PVkaAiVDH+DYyRb8sKgwuiEsyaBO18IBgC8eDmTohEJt6707A+WNhwBJwp9aOUhHC7caaKRYhEKuDRQ3op++VqwuxbFRXx22XYR9bEzQIlpsv9GY2k8SShU5MZqUKIhk8vppFI6RaID5bmALnLLmjmXfSPYSJDzDuCP5UTQgI3PKPOATorVrqMdKzfb7FiwtcTvtHAXpOgLaY9P9XIePbnei6Rx9TfoHYDvzFWRqzSjl21xR+ZUrJtG2fx7XLbMjEAZJcnjP++GRvNbHBOi57aX0l2LO1peQqZVMULoIivaoLFP3i16RuXXQ/bvKyHmKjJzGrLc0QCa0yfrvV2m30RRMaYlOv7ToJfdfZLXvSAP0zbAuDaXdjGnq7gpfIlNE3xM+kQ75Akcf4V4fK1p061EGBQvQz6Ov3PkPiWL/bxrQ=
118 118 1aa5083cbebbe7575c88f3402ab377539b484897 0 iQIVAwUAVkEdCCBXgaxoKi1yAQKdWg//crTr5gsnHQppuD1p+PPn3/7SMsWJ7bgbuaXgERDLC0zWMfhM2oMmu/4jqXnpangdBVvb0SojejgzxoBo9FfRQiIoKt0vxmmn+S8CrEwb99rpP4M7lgyMAInKPMXQdYxkoDNwL70Afmog6eBtlxjYnu8nmUE/swu6JoVns+tF8UOvIKFYbuCcGujo2pUOQC0xBGiHeHSGRDJOlWmY2d7D/PkQtQE/u/d4QZt7enTHMiV44XVJ8+0U0f1ZQE7V+hNWf+IjwcZtL95dnQzUKs6tXMIln/OwO+eJ3d61BfLvmABvCwUC9IepPssNSFBUfGqBAP5wXOzFIPSYn00IWpmZtCnpUNL99X1IV3RP+p99gnEDTScQFPYt5B0q5I1nFdRh1p48BSF/kjPA7V++UfBwMXrrYLKhUR9BjmrRzYnyXJKwbH6iCNj5hsXUkVrBdBi/FnMczgsVILfFcIXUfnJD3E/dG+1lmuObg6dEynxiGChTuaR4KkLa5ZRkUcUl6fWlSRsqSNbGEEbdwcI+nTCZqJUlLSghumhs0Z89Hs1nltBd1ALX2VLJEHrKMrFQ8NfEBeCB6ENqMJi5qPlq354MCdGOZ9RvisX/HlxE4Q61BW0+EwnyXSch6LFSOS3axOocUazMoK1XiOTJSv/5bAsnwb0ztDWeUj9fZEJL+SWtgB8=
119 119 2d437a0f3355834a9485bbbeb30a52a052c98f19 0 iQIVAwUAVl5U9CBXgaxoKi1yAQLocg//a4YFz9UVSIEzVEJMUPJnN2dBvEXRpwpb5CdKPd428+18K6VWZd5Mc6xNNRV5AV/hCYylgqDplIvyOvwCj7uN8nEOrLUQQ0Pp37M5ZIX8ZVCK/wgchJ2ltabUG1NrZ7/JA84U79VGLAECMnD0Z9WvZDESpVXmdXfxrk1eCc3omRB0ofNghEx+xpYworfZsu8aap1GHQuBsjPv4VyUWGpMq/KA01PdxRTELmrJnfSyr0nPKwxlI5KsbA1GOe+Mk3tp5HJ42DZqLtKSGPirf6E+6lRJeB0H7EpotN4wD3yZDsw6AgRb2C/ay/3T3Oz7CN+45mwuujV9Cxx5zs1EeOgZcqgA/hXMcwlQyvQDMrWpO8ytSBm6MhOuFOTB3HnUxfsnfSocLJsbNwGWKceAzACcXSqapveVAz/7h+InFgl/8Qce28UJdnX5wro5gP6UWt+xrvc7vfmVGgI3oxbiOUrfglhkjmrxBjEiDQy4BWH7HWMZUVxnqPQRcxIE10+dv0KtM/PBkbUtnbGJ88opFBGkFweje5vQcZy/duuPEIufRkPr8EV47QjOxlvldEjlLq3+QUdJZEgCIFw1X0y7Pix4dsPFjwOmAyo4El1ePrdFzG3dXSVA3eHvMDRnYnNlue9wHvKhYbBle5xTOZBgGuMzhDVe+54JLql5JYr4WrI1pvA=
120 120 ea389970c08449440587712117f178d33bab3f1e 0 iQIVAwUAVociGyBXgaxoKi1yAQJx9Q//TzMypcls5CQW3DM9xY1Q+RFeIw1LcDIev6NDBjUYxULb2WIK2qPw4Th5czF622SMd+XO/kiQeWYp9IW90MZOUVT1YGgUPKlKWMjkf0lZEPzprHjHq0+z/no1kBCBQg2uUOLsb6Y7zom4hFCyPsxXOk5nnxcFEK0VDbODa9zoKb/flyQ7rtzs+Z6BljIQ0TJAJsXs+6XgrW1XJ/f6nbeqsQyPklIBJuGKiaU1Pg8wQe6QqFaO1NYgM3hBETku6r3OTpUhu/2FTUZ7yDWGGzBqmifxzdHoj7/B+2qzRpII77PlZqoe6XF+UOObSFnhKvXKLjlGY5cy3SXBMbHkPcYtHua8wYR8LqO2bYYnsDd9qD0DJ+LlqH0ZMUkB2Cdk9q/cp1PGJWGlYYecHP87DLuWKwS+a6LhVI9TGkIUosVtLaIMsUUEz83RJFb4sSGOXtjk5DDznn9QW8ltXXMTdGQwFq1vmuiXATYenhszbvagrnbAnDyNFths4IhS1jG8237SB36nGmO3zQm5V7AMHfSrISB/8VPyY4Si7uvAV2kMWxuMhYuQbBwVx/KxbKrYjowuvJvCKaV101rWxvSeU2wDih20v+dnQKPveRNnO8AAK/ICflVVsISkd7hXcfk+SnhfxcPQTr+HQIJEW9wt5Q8WbgHk9wuR8kgXQEX6tCGpT/w=
121 121 158bdc8965720ca4061f8f8d806563cfc7cdb62e 0 iQIVAwUAVqBhFyBXgaxoKi1yAQLJpQ//S8kdgmVlS+CI0d2hQVGYWB/eK+tcntG+bZKLto4bvVy5d0ymlDL0x7VrJMOkwzkU1u/GaYo3L6CVEiM/JGCgB32bllrpx+KwQ0AyHswMZruo/6xrjDIYymLMEJ9yonXBZsG7pf2saYTHm3C5/ZIPkrDZSlssJHJDdeWqd75hUnx3nX8dZ4jIIxYDhtdB5/EmuEGOVlbeBHVpwfDXidSJUHJRwJvDqezUlN003sQdUvOHHtRqBrhsYEhHqPMOxDidAgCvjSfWZQKOTKaPE/gQo/BP3GU++Fg55jBz+SBXpdfQJI2Gd8FZfjLkhFa9vTTTcd10YCd4CZbYLpj/4R2xWj1U4oTVEFa6d+AA5Yyu8xG53XSCCPyzfagyuyfLqsaq5r1qDZO/Mh5KZCTvc9xSF5KXj57mKvzMDpiNeQcamGmsV4yXxymKJKGMQvbnzqp+ItIdbnfk38Nuac8rqNnGmFYwMIPa50680vSZT/NhrlPJ8FVTJlfHtSUZbdjPpsqw7BgjFWaVUdwgCKIGERiK7zfR0innj9rF5oVwT8EbKiaR1uVxOKnTwZzPCbdO1euNg/HutZLVQmugiLAv5Z38L3YZf5bH7zJdUydhiTI4mGn/mgncsKXoSarnnduhoYu9OsQZc9pndhxjAEuAslEIyBsLy81fR2HOhUzw5FGNgdY=
122 122 2408645de650d8a29a6ce9e7dce601d8dd0d1474 0 iQIVAwUAVq/xFSBXgaxoKi1yAQLsxhAAg+E6uJCtZZOugrrFi9S6C20SRPBwHwmw22PC5z3Ufp9Vf3vqSL/+zmWI9d/yezIVcTXgM9rKCvq58sZvo4FuO2ngPx7bL9LMJ3qx0IyHUKjwa3AwrzjSzvVhNIrRoimD+lVBI/GLmoszpMICM+Nyg3D41fNJKs6YpnwwsHNJkjMwz0n2SHAShWAgIilyANNVnwnzHE68AIkB/gBkUGtrjf6xB9mXQxAv4GPco/234FAkX9xSWsM0Rx+JLLrSBXoHmIlmu9LPjC0AKn8/DDke+fj7bFaF7hdJBUYOtlYH6f7NIvyZSpw0FHl7jPxoRCtXzIV+1dZEbbIMIXzNtzPFVDYDfMhLqpTgthkZ9x0UaMaHecCUWYYBp8G/IyVS40GJodl8xnRiXUkFejbK/NDdR1f9iZS0dtiFu66cATMdb6d+MG+zW0nDKiQmBt6bwynysqn4g3SIGQFEPyEoRy0bXiefHrlkeHbdfc4zgoejx3ywcRDMGvUbpWs5C43EPu44irKXcqC695vAny3A7nZpt/XP5meDdOF67DNQPvhFdjPPbJBpSsUi2hUlZ+599wUfr3lNVzeEzHT7XApTOf6ysuGtHH3qcVHpFqQSRL1MI0f2xL13UadgTVWYrnHEis7f+ncwlWiR0ucpJB3+dQQh3NVGVo89MfbIZPkA8iil03U=
123 123 b698abf971e7377d9b7ec7fc8c52df45255b0329 0 iQIVAwUAVrJ4YCBXgaxoKi1yAQJsKw/+JHSR0bIyarO4/VilFwsYxCprOnPxmUdS4qc4yjvpbf7Dqqr/OnOHJA29LrMoqWqsHgREepemjqiNindwNtlZec+KgmbF08ihSBBpls96UTTYTcytKRkkbrB+FhwB0iDl/o8RgGPniyG6M7gOp6p8pXQVRCOToIY1B/G0rtpkcU1N3GbiZntO5Fm/LPAVIE74VaDsamMopQ/wEB8qiERngX/M8SjO1ZSaVNW6KjRUsarLXQB9ziVJBolK/WnQsDwEeuWU2udpjBiOHnFC6h84uBpc8rLGhr419bKMJcjgl+0sl2zHGPY2edQYuJqVjVENzf4zzZA+xPgKw3GrSTpd37PEnGU/fufdJ0X+pp3kvmO1cV3TsvVMTCn7NvS6+w8SGdHdwKQQwelYI6vmJnjuOCATbafJiHMaOQ0GVYYk6PPoGrYcQ081x6dStCMaHIPOV1Wirwd2wq+SN9Ql8H6njftBf5Sa5tVWdW/zrhsltMsdZYZagZ/oFT3t83exL0rgZ96bZFs0j3HO3APELygIVuQ6ybPsFyToMDbURNDvr7ZqPKhQkkdHIUMqEez5ReuVgpbO9CWV/yWpB1/ZCpjNBZyDvw05kG2mOoC7AbHc8aLUS/8DetAmhwyb48LW4qjfUkO7RyxVSxqdnaBOMlsg1wsP2S+SlkZKsDHjcquZJ5U=
124 124 d493d64757eb45ada99fcb3693e479a51b7782da 0 iQIVAwUAVtYt4SBXgaxoKi1yAQL6TQ/9FzYE/xOSC2LYqPdPjCXNjGuZdN1WMf/8fUMYT83NNOoLEBGx37C0bAxgD4/P03FwYMuP37IjIcX8vN6fWvtG9Oo0o2n/oR3SKjpsheh2zxhAFX3vXhFD4U18wCz/DnM0O1qGJwJ49kk/99WNgDWeW4n9dMzTFpcaeZBCu1REbZQS40Z+ArXTDCr60g5TLN1XR1WKEzQJvF71rvaE6P8d3GLoGobTIJMLi5UnMwGsnsv2/EIPrWHQiAY9ZEnYq6deU/4RMh9c7afZie9I+ycIA/qVH6vXNt3/a2BP3Frmv8IvKPzqwnoWmIUamew9lLf1joD5joBy8Yu+qMW0/s6DYUGQ4Slk9qIfn6wh4ySgT/7FJUMcayx9ONDq7920RjRc+XFpD8B3Zhj2mM+0g9At1FgX2w2Gkf957oz2nlgTVh9sdPvP6UvWzhqszPMpdG5Vt0oc5vuyobW333qSkufCxi5gmH7do1DIzErMcy8b6IpZUDeQ/dakKwLQpZVVPF15IrNa/zsOW55SrGrL8/ErM/mXNQBBAqvRsOLq2njFqK2JaoG6biH21DMjHVZFw2wBRoLQxbOppfz2/e3mNkNy9HjgJTW3+0iHWvRzMSjwRbk9BlbkmH6kG5163ElHq3Ft3uuQyZBL9I5SQxlHi9s/CV0YSTYthpWR3ChKIMoqBQ0=
125 125 ae279d4a19e9683214cbd1fe8298cf0b50571432 0 iQIVAwUAVvqzViBXgaxoKi1yAQKUCxAAtctMD3ydbe+li3iYjhY5qT0wyHwPr9fcLqsQUJ4ZtD4sK3oxCRZFWFxNBk5bIIyiwusSEJPiPddoQ7NljSZlYDI0HR3R4vns55fmDwPG07Ykf7aSyqr+c2ppCGzn2/2ID476FNtzKqjF+LkVyadgI9vgZk5S4BgdSlfSRBL+1KtB1BlF5etIZnc5U9qs1uqzZJc06xyyF8HlrmMZkAvRUbsx/JzA5LgzZ2WzueaxZgYzYjDk0nPLgyPPBj0DVyWXnW/kdRNmKHNbaZ9aZlWmdPCEoq5iBm71d7Xoa61shmeuVZWvxHNqXdjVMHVeT61cRxjdfxTIkJwvlRGwpy7V17vTgzWFxw6QJpmr7kupRo3idsDydLDPHGUsxP3uMZFsp6+4rEe6qbafjNajkRyiw7kVGCxboOFN0rLVJPZwZGksEIkw58IHcPhZNT1bHHocWOA/uHJTAynfKsAdv/LDdGKcZWUCFOzlokw54xbPvdrBtEOnYNp15OY01IAJd2FCUki5WHvhELUggTjfank1Tc3/Rt1KrGOFhg80CWq6eMiuiWkHGvYq3fjNLbgjl3JJatUFoB+cX1ulDOGsLJEXQ4v5DNHgel0o2H395owNlStksSeW1UBVk0hUK/ADtVUYKAPEIFiboh1iDpEOl40JVnYdsGz3w5FLj2w+16/1vWs=
126 126 740156eedf2c450aee58b1a90b0e826f47c5da64 0 iQIVAwUAVxLGMCBXgaxoKi1yAQLhIg/8DDX+sCz7LmqO47/FfTo+OqGR+bTTqpfK3WebitL0Z6hbXPj7s45jijqIFGqKgMPqS5oom1xeuGTPHdYA0NNoc/mxSCuNLfuXYolpNWPN71HeSDRV9SnhMThG5HSxI+P0Ye4rbsCHrVV+ib1rV81QE2kZ9aZsJd0HnGd512xJ+2ML7AXweM/4lcLmMthN+oi/dv1OGLzfckrcr/fEATCLZt55eO7idx11J1Fk4ptQ6dQ/bKznlD4hneyy1HMPsGxw+bCXrMF2C/nUiRLHdKgGqZ+cDq6loQRfFlQoIhfoEnWC424qbjH4rvHgkZHqC59Oi/ti9Hi75oq9Tb79yzlCY/fGsdrlJpEzrTQdHFMHUoO9CC+JYObXHRo3ALnC5350ZBKxlkdpmucrHTgcDabfhRlx9vDxP4RDopm2hAjk2LJH7bdxnGEyZYkTOZ3hXKnVpt2hUQb4jyzzC9Kl47TFpPKNVKI+NLqRRZAIdXXiy24KD7WzzE6L0NNK0/IeqKBENLL8I1PmDQ6XmYTQVhTuad1jjm2PZDyGiXmJFZO1O/NGecVTvVynKsDT6XhEvzyEtjXqD98rrhbeMHTcmNSwwJMDvm9ws0075sLQyq2EYFG6ECWFypdA/jfumTmxOTkMtuy/V1Gyq7YJ8YaksZ7fXNY9VuJFP72grmlXc6Dvpr4=
127 127 f85de28eae32e7d3064b1a1321309071bbaaa069 0 iQIVAwUAVyZQaiBXgaxoKi1yAQJhCQ//WrRZ55k3VI/OgY+I/HvgFHOC0sbhe207Kedxvy00a3AtXM6wa5E95GNX04QxUfTWUf5ZHDfEgj0/mQywNrH1oJG47iPZSs+qXNLqtgAaXtrih6r4/ruUwFCRFxqK9mkhjG61SKicw3Q7uGva950g6ZUE5BsZ7XJWgoDcJzWKR+AH992G6H//Fhi4zFQAmB34++sm80wV6wMxVKA/qhQzetooTR2x9qrHpvCKMzKllleJe48yzPLJjQoaaVgXCDav0eIePFNw0WvVSldOEp/ADDdTGa65qsC1rO2BB1Cu5+frJ/vUoo0PwIgqgD6p2i41hfIKvkp6130TxmRVxUx+ma8gBYEpPIabV0flLU72gq8lMlGBBSnQ+fcZsfs/Ug0xRN0tzkEScmZFiDxRGk0y7IalXzv6irwOyC2fZCajXGJDzkROQXWMgy9eKkwuFhZBmPVYtrATSq3jHLVmJg5vfdeiVzA6NKxAgGm2z8AsRrijKK8WRqFYiH6xcWKG5u+FroPQdKa0nGCkPSTH3tvC6fAHTVm7JeXch5QE/LiS9Y575pM2PeIP+k+Fr1ugK0AEvYJAXa5UIIcdszPyI+TwPTtWaQ83X99qGAdmRWLvSYjqevOVr7F/fhO3XKFXRCcHA3EzVYnG7nWiVACYF3H2UgN4PWjStbx/Qhhdi9xAuks=
128 128 a56296f55a5e1038ea5016dace2076b693c28a56 0 iQIVAwUAVyZarCBXgaxoKi1yAQL87g/8D7whM3e08HVGDHHEkVUgqLIfueVy1mx0AkRvelmZmwaocFNGpZTd3AjSwy6qXbRNZFXrWU85JJvQCi3PSo/8bK43kwqLJ4lv+Hv2zVTvz30vbLWTSndH3oVRu38lIA7b5K9J4y50pMCwjKLG9iyp+aQG4RBz76fJMlhXy0gu38A8JZVKEeAnQCbtzxKXBzsC8k0/ku/bEQEoo9D4AAGlVTbl5AsHMp3Z6NWu7kEHAX/52/VKU2I0LxYqRxoL1tjTVGkAQfkOHz1gOhLXUgGSYmA9Fb265AYj9cnGWCfyNonlE0Rrk2kAsrjBTGiLyb8WvK/TZmRo4ZpNukzenS9UuAOKxA22Kf9+oN9kKBu1HnwqusYDH9pto1WInCZKV1al7DMBXbGFcnyTXk2xuiTGhVRG5LzCO2QMByBLXiYl77WqqJnzxK3v5lAc/immJl5qa3ATUlTnVBjAs+6cbsbCoY6sjXCT0ClndA9+iZZ1TjPnmLrSeFh5AoE8WHmnFV6oqGN4caX6wiIW5vO+x5Q2ruSsDrwXosXIYzm+0KYKRq9O+MaTwR44Dvq3/RyeIu/cif/Nc7B8bR5Kf7OiRf2T5u97MYAomwGcQfXqgUfm6y7D3Yg+IdAdAJKitxhRPsqqdxIuteXMvOvwukXNDiWP1zsKoYLI37EcwzvbGLUlZvg=
129 129 aaabed77791a75968a12b8c43ad263631a23ee81 0 iQIVAwUAVzpH4CBXgaxoKi1yAQLm5A/9GUYv9CeIepjcdWSBAtNhCBJcqgk2cBcV0XaeQomfxqYWfbW2fze6eE+TrXPKTX1ajycgqquMyo3asQolhHXwasv8+5CQxowjGfyVg7N/kyyjgmJljI+rCi74VfnsEhvG/J4GNr8JLVQmSICfALqQjw7XN8doKthYhwOfIY2vY419613v4oeBQXSsItKC/tfKw9lYvlk4qJKDffJQFyAekgv43ovWqHNkl4LaR6ubtjOsxCnxHfr7OtpX3muM9MLT/obBax5I3EsmiDTQBOjbvI6TcLczs5tVCnTa1opQsPUcEmdA4WpUEiTnLl9lk9le/BIImfYfEP33oVYmubRlKhJYnUiu89ao9L+48FBoqCY88HqbjQI1GO6icfRJN/+NLVeE9wubltbWFETH6e2Q+Ex4+lkul1tQMLPcPt10suMHnEo3/FcOTPt6/DKeMpsYgckHSJq5KzTg632xifyySmb9qkpdGGpY9lRal6FHw3rAhRBqucMgxso4BwC51h04RImtCUQPoA3wpb4BvCHba/thpsUFnHefOvsu3ei4JyHXZK84LPwOj31PcucNFdGDTW6jvKrF1vVUIVS9uMJkJXPu0V4i/oEQSUKifJZivROlpvj1eHy3KeMtjq2kjGyXY2KdzxpT8wX/oYJhCtm1XWMui5f24XBjE6xOcjjm8k4=
130 130 a9764ab80e11bcf6a37255db7dd079011f767c6c 0 iQIVAwUAV09KHyBXgaxoKi1yAQJBWg/+OywRrqU+zvnL1tHJ95PgatsF7S4ZAHZFR098+oCjUDtKpvnm71o2TKiY4D5cckyD2KNwLWg/qW6V+5+2EYU0Y/ViwPVcngib/ZeJP+Nr44TK3YZMRmfFuUEEzA7sZ2r2Gm8eswv//W79I0hXJeFd/o6FgLnn7AbOjcOn3IhWdGAP6jUHv9zyJigQv6K9wgyvAnK1RQE+2CgMcoyeqao/zs23IPXI6XUHOwfrQ7XrQ83+ciMqN7XNRx+TKsUQoYeUew4AanoDSMPAQ4kIudsP5tOgKeLRPmHX9zg6Y5S1nTpLRNdyAxuNuyZtkQxDYcG5Hft/SIx27tZUo3gywHL2U+9RYD2nvXqaWzT3sYB2sPBOiq7kjHRgvothkXemAFsbq2nKFrN0PRua9WG4l3ny0xYmDFPlJ/s0E9XhmQaqy+uXtVbA2XdLEvE6pQ0YWbHEKMniW26w6LJkx4IV6RX/7Kpq7byw/bW65tu/BzgISKau5FYLY4CqZJH7f8QBg3XWpzB91AR494tdsD+ugM45wrY/6awGQx9CY5SAzGqTyFuSFQxgB2rBurb01seZPf8nqG8V13UYXfX/O3/WMOBMr7U/RVqmAA0ZMYOyEwfVUmHqrFjkxpXX+JdNKRiA1GJp5sdRpCxSeXdQ/Ni6AAGZV2IyRb4G4Y++1vP4yPBalas=
131 131 26a5d605b8683a292bb89aea11f37a81b06ac016 0 iQIVAwUAV3bOsSBXgaxoKi1yAQLiDg//fxmcNpTUedsXqEwNdGFJsJ2E25OANgyv1saZHNfbYFWXIR8g4nyjNaj2SjtXF0wzOq5aHlMWXjMZPOT6pQBdTnOYDdgv+O8DGpgHs5x/f+uuxtpVkdxR6uRP0/ImlTEtDix8VQiN3nTu5A0N3C7E2y+D1JIIyTp6vyjzxvGQTY0MD/qgB55Dn6khx8c3phDtMkzmVEwL4ItJxVRVNw1m+2FOXHu++hJEruJdeMV0CKOV6LVbXHho+yt3jQDKhlIgJ65EPLKrf+yRalQtSWpu7y/vUMcEUde9XeQ5x05ebCiI4MkJ0ULQro/Bdx9vBHkAstUC7D+L5y45ZnhHjOwxz9c3GQMZQt1HuyORqbBhf9hvOkUQ2GhlDHc5U04nBe0VhEoCw9ra54n+AgUyqWr4CWimSW6pMTdquCzAAbcJWgdNMwDHrMalCYHhJksKFARKq3uSTR1Noz7sOCSIEQvOozawKSQfOwGxn/5bNepKh4uIRelC1uEDoqculqCLgAruzcMNIMndNVYaJ09IohJzA9jVApa+SZVPAeREg71lnS3d8jaWh1Lu5JFlAAKQeKGVJmNm40Y3HBjtHQDrI67TT59oDAhjo420Wf9VFCaj2k0weYBLWSeJhfUZ5x3PVpAHUvP/rnHPwNYyY0wVoQEvM/bnQdcpICmKhqcK+vKjDrM=
132 132 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 0 iQIVAwUAV42tNyBXgaxoKi1yAQI/Iw//V0NtxpVD4sClotAwffBVW42Uv+SG+07CJoOuFYnmHZv/plOzXuuJlmm95L00/qyRCCTUyAGxK/eP5cAKP2V99ln6rNhh8gpgvmZlnYjU3gqFv8tCQ+fkwgRiWmgKjRL6/bK9FY5cO7ATLVu3kCkFd8CEgzlAaUqBfkNFxZxLDLvKqRlhXxVXhKjvkKg5DZ6eJqRQY7w3UqqR+sF1rMLtVyt490Wqv7YQKwcvY7MEKTyH4twGLx/RhBpBi+GccVKvWC011ffjSjxqAfQqrrSVt0Ld1Khj2/p1bDDYpTgtdDgCzclSXWEQpmSdFRBF5wYs/pDMUreI/E6mlWkB4hfZZk1NBRPRWYikXwnhU3ziubCGesZDyBYLrK1vT+tf6giseo22YQmDnOftbS999Pcn04cyCafeFuOjkubYaINB25T20GS5Wb4a0nHPRAOOVxzk/m/arwYgF0ZZZDDvJ48TRMDf3XOc1jc5qZ7AN/OQKbvh2B08vObnnPm3lmBY1qOnhwzJxpNiq+Z/ypokGXQkGBfKUo7rWHJy5iXLb3Biv9AhxY9d5pSTjBmTAYJEic3q03ztzlnfMyi+C13+YxFAbSSNGBP8Hejkkz0NvmB1TBuCKpnZA8spxY5rhZ/zMx+cCw8hQvWHHDUURps7SQvZEfrJSCGJFPDHL3vbfK+LNwI=
133 133 299546f84e68dbb9bd026f0f3a974ce4bdb93686 0 iQIcBAABCAAGBQJXn3rFAAoJELnJ3IJKpb3VmZoQAK0cdOfi/OURglnN0vYYGwdvSXTPpZauPEYEpwML3dW1j6HRnl5L+H8D8vlYzahK95X4+NNBhqtyyB6wmIVI0NkYfXfd6ACntJE/EnTdLIHIP2NAAoVsggIjiNr26ubRegaD5ya63Ofxz+Yq5iRsUUfHet7o+CyFhExyzdu+Vcz1/E9GztxNfTDVpC/mf+RMLwQTfHOhoTVbaamLCmGAIjw39w72X+vRMJoYNF44te6PvsfI67+6uuC0+9DjMnp5eL/hquSQ1qfks71rnWwxuiPcUDZloIueowVmt0z0sO4loSP1nZ5IP/6ZOoAzSjspqsxeay9sKP0kzSYLGsmCi29otyVSnXiKtyMCW5z5iM6k8XQcMi5mWy9RcpqlNYD7RUTn3g0+a8u7F6UEtske3/qoweJLPhtTmBNOfDNw4JXwOBSZea0QnIIjCeCc4ZGqfojPpbvcA4rkRpxI23YoMrT2v/kp4wgwrqK9fi8ctt8WbXpmGoAQDXWj2bWcuzj94HsAhLduFKv6sxoDz871hqjmjjnjQSU7TSNNnVzdzwqYkMB+BvhcNYxk6lcx3Aif3AayGdrWDubtU/ZRNoLzBwe6gm0udRMXBj4D/60GD6TIkYeL7HjJwfBb6Bf7qvQ6y7g0zbYG9uwBmMeduU7XchErGqQGSEyyJH3DG9OLaFOj
134 134 ccd436f7db6d5d7b9af89715179b911d031d44f1 0 iQIVAwUAV8h7F0emf/qjRqrOAQjmdhAAgYhom8fzL/YHeVLddm71ZB+pKDviKASKGSrBHY4D5Szrh/pYTedmG9IptYue5vzXpspHAaGvZN5xkwrz1/5nmnCsLA8DFaYT9qCkize6EYzxSBtA/W1S9Mv5tObinr1EX9rCSyI4HEJYE8i1IQM5h07SqUsMKDoasd4e29t6gRWg5pfOYq1kc2MTck35W9ff1Fii8S28dqbO3cLU6g5K0pT0JLCZIq7hyTNQdxHAYfebxkVl7PZrZR383IrnyotXVKFFc44qinv94T50uR4yUNYPQ8Gu0TgoGQQjBjk1Lrxot2xpgPQAy8vx+EOJgpg/yNZnYkmJZMxjDkTGVrwvXtOXZzmy2jti7PniET9hUBCU7aNHnoJJLzIf+Vb1CIRP0ypJl8GYCZx6HIYwOQH6EtcaeUqq3r+WXWv74ijIE7OApotmutM9buTvdOLdZddBzFPIjykc6cXO+W4E0kl6u9/OHtaZ3Nynh0ejBRafRWAVw2yU3T9SgQyICsmYWJCThkj14WqCJr2b7jfGlg9MkQOUG6/3f4xz2R3SgyUD8KiGsq/vdBE53zh0YA9gppLoum6AY+z61G1NhVGlrtps90txZBehuARUUz2dJC0pBMRy8XFwXMewDSIe6ATg25pHZsxHfhcalBpJncBl8pORs7oQl+GKBVxlnV4jm1pCzLU=
135 135 149433e68974eb5c63ccb03f794d8b57339a80c4 0 iQIcBAABAgAGBQJX8AfCAAoJELnJ3IJKpb3VnNAP/3umS8tohcZTr4m6DJm9u4XGr2m3FWQmjTEfimGpsOuBC8oCgsq0eAlORYcV68zDax+vQHQu3pqfPXaX+y4ZFDuz0ForNRiPJn+Q+tj1+NrOT1e8h4gH0nSK4rDxEGaa6x01fyC/xQMqN6iNfzbLLB7+WadZlyBRbHaUeZFDlPxPDf1rjDpu1vqwtOrVzSxMasRGEceiUegwsFdFMAefCq0ya/pKe9oV+GgGfR4qNrP7BfpOBcN/Po/ctkFCbLOhHbu6M7HpBSiD57BUy5lfhQQtSjzCKEVTyrWEH0ApjjXKuJzLSyq7xsHKQSOPMgGQprGehyzdCETlZOdauGrC0t9vBCr7kXEhXtycqxBC03vknA2eNeV610VX+HgO9VpCVZWHtENiArhALCcpoEsJvT29xCBYpSii/wnTpYJFT9yW8tjQCxH0zrmEZJvO1/nMINEBQFScB/nzUELn9asnghNf6vMpSGy0fSM27j87VAXCzJ5lqa6WCL/RrKgvYflow/m5AzUfMQhpqpH1vmh4ba1zZ4123lgnW4pNZDV9kmwXrEagGbWe1rnmsMzHugsECiYQyIngjWzHfpHgyEr49Uc5bMM1MlTypeHYYL4kV1jJ8Ou0SC4aV+49p8Onmb2NlVY7JKV7hqDCuZPI164YXMxhPNst4XK0/ENhoOE+8iB6
136 136 438173c415874f6ac653efc1099dec9c9150e90f 0 iQIVAwUAWAZ3okemf/qjRqrOAQj89xAAw/6QZ07yqvH+aZHeGQfgJ/X1Nze/hSMzkqbwGkuUOWD5ztN8+c39EXCn8JlqyLUPD7uGzhTV0299k5fGRihLIseXr0hy/cvVW16uqfeKJ/4/qL9zLS3rwSAgWbaHd1s6UQZVfGCb8V6oC1dkJxfrE9h6kugBqV97wStIRxmCpMDjsFv/zdNwsv6eEdxbiMilLn2/IbWXFOVKJzzv9iEY5Pu5McFR+nnrMyUZQhyGtVPLSkoEPsOysorfCZaVLJ6MnVaJunp9XEv94Pqx9+k+shsQvJHWkc0Nnb6uDHZYkLR5v2AbFsbJ9jDHsdr9A7qeQTiZay7PGI0uPoIrkmLya3cYbU1ADhwloAeQ/3gZLaJaKEjrXcFSsz7AZ9yq74rTwiPulF8uqZxJUodk2m/zy83HBrxxp/vgxWJ5JP2WXPtB8qKY+05umAt4rQS+fd2H/xOu2V2d5Mq1WmgknLBLC0ItaNaf91sSHtgEy22GtcvWQE7S6VWU1PoSYmOLITdJKAsmb7Eq+yKDW9nt0lOpUu2wUhBGctlgXgcWOmJP6gL6edIg66czAkVBp/fpKNl8Z/A0hhpuH7nW7GW/mzLVQnc+JW4wqUVkwlur3NRfvSt5ZyTY/SaR++nRf62h7PHIjU+f0kWQRdCcEQ0X38b8iAjeXcsOW8NCOPpm0zcz3i8=
137 137 eab27446995210c334c3d06f1a659e3b9b5da769 0 iQIcBAABCAAGBQJYGNsXAAoJELnJ3IJKpb3Vf30QAK/dq5vEHEkufLGiYxxkvIyiRaswS+8jamXeHMQrdK8CuokcQYhEv9xiUI6FMIoX4Zc0xfoFCBc+X4qE+Ed9SFYWgQkDs/roJq1C1mTYA+KANMqJkDt00QZq536snFQvjCXAA5fwR/DpgGOOuGMRfvbjh7x8mPyVoPr4HDQCGFXnTYdn193HpTOqUsipzIV5OJqQ9p0sfJjwKP4ZfD0tqqdjTkNwMyJuwuRaReXFvGGCjH2PqkZE/FwQG0NJJjt0xaMUmv5U5tXHC9tEVobVV/qEslqfbH2v1YPF5d8Jmdn7F76FU5J0nTd+3rIVjYGYSt01cR6wtGnzvr/7kw9kbChw4wYhXxnmIALSd48FpA1qWjlPcAdHfUUwObxOxfqmlnBGtAQFK+p5VXCsxDZEIT9MSxscfCjyDQZpkY5S5B3PFIRg6V9bdl5a4rEt27aucuKTHj1Ok2vip4WfaIKk28YMjjzuOQRbr6Pp7mJcCC1/ERHUJdLsaQP+dy18z6XbDjX3O2JDRNYbCBexQyV/Kfrt5EOS5fXiByQUHv+PyR+9Ju6QWkkcFBfgsxq25kFl+eos4V9lxPOY5jDpw2BWu9TyHtTWkjL/YxDUGwUO9WA/WzrcT4skr9FYrFV/oEgi8MkwydC0cFICDfd6tr9upqkkr1W025Im1UBXXJ89bTVj
138 138 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 0 iQIVAwUAWECEaEemf/qjRqrOAQjuZw/+IWJKnKOsaUMcB9ly3Fo/eskqDL6A0j69IXTJDeBDGMoyGbQU/gZyX2yc6Sw3EhwTSCXu5vKpzg3a6e8MNrC1iHqli4wJ/jPY7XtmiqTYDixdsBLNk46VfOi73ooFe08wVDSNB65xpZsrtPDSioNmQ2kSJwSHb71UlauS4xGkM74vuDpWvX5OZRSfBqMh6NjG5RwBBnS8mzA0SW2dCI2jSc5SCGIzIZpzM0xUN21xzq0YQbrk9qEsmi7ks0eowdhUjeET2wSWwhOK4jS4IfMyRO7KueUB05yHs4mChj9kNFNWtSzXKwKBQbZzwO/1Y7IJjU+AsbWkiUu+6ipqBPQWzS28gCwGOrv5BcIJS+tzsvLUKWgcixyfy5UAqJ32gCdzKC54FUpT2zL6Ad0vXGM6WkpZA7yworN4RCFPexXbi0x2GSTLG8PyIoZ4Iwgtj5NtsEDHrz0380FxgnKUIC3ny2SVuPlyD+9wepD3QYcxdRk1BIzcFT9ZxNlgil3IXRVPwVejvQ/zr6/ILdhBnZ8ojjvVCy3b86B1OhZj/ZByYo5QaykVqWl0V9vJOZlZfvOpm2HiDhm/2uNrVWxG4O6EwhnekAdaJYmeLq1YbhIfGA6KVOaB9Yi5A5BxK9QGXBZ6sLj+dIUD3QR47r9yAqVQE8Gr/Oh6oQXBQqOQv7WzBBs=
139 139 e69874dc1f4e142746ff3df91e678a09c6fc208c 0 iQIVAwUAWG0oGUemf/qjRqrOAQh3uhAAu4TN7jkkgH7Hxn8S1cB6Ru0x8MQutzzzpjShhsE/G7nzCxsZ5eWdJ5ItwXmKhunb7T0og54CGcTxfmdPtCI7AhhHh9/TM2Hv1EBcsXCiwjG8E+P6X1UJkijgTGjNWuCvEDOsQAvgywslECBNnXp2QA5I5UdCMeqDdTAb8ujvbD8I4pxUx1xXKY18DgQGJh13mRlfkEVnPxUi2n8emnwPLjbVVkVISkMFUkaOl8a4fOeZC1xzDpoQocoH2Q8DYa9RCPPSHHSYPNMWGCdNGN2CoAurcHWWvc7jNU28/tBhTazfFv8LYh63lLQ8SIIPZHJAOxo45ufMspzUfNgoD6y3vlF5aW7DpdxwYHnueh7S1Fxgtd9cOnxmxQsgiF4LK0a+VXOi/Tli/fivZHDRCGHJvJgsMQm7pzkay9sGohes6jAnsOv2E8DwFC71FO/btrAp07IRFxH9WhUeMsXLMS9oBlubMxMM58M+xzSKApK6bz2MkLsx9cewmfmfbJnRIK1xDv+J+77pWWNGlxCCjl1WU+aA3M7G8HzwAqjL75ASOWtBrJlFXvlLgzobwwetg6cm44Rv1P39i3rDySZvi4BDlOQHWFupgMKiXnZ1PeL7eBDs/aawrE0V2ysNkf9An+XJZkos2JSLPWcoNigfXNUu5c1AqsERvHA246XJzqvCEK8=
140 140 a1dd2c0c479e0550040542e392e87bc91262517e 0 iQIcBAABCAAGBQJYgBBEAAoJELnJ3IJKpb3VJosP/10rr3onsVbL8E+ri1Q0TJc8uhqIsBVyD/vS1MJtbxRaAdIV92o13YOent0o5ASFF/0yzVKlOWPQRjsYYbYY967k1TruDaWxJAnpeFgMni2Afl/qyWrW4AY2xegZNZCfMmwJA+uSJDdAn+jPV40XbuCZ+OgyZo5S05dfclHFxdc8rPKeUsJtvs5PMmCL3iQl1sulp1ASjuhRtFWZgSFsC6rb2Y7evD66ikL93+0/BPEB4SVX17vB/XEzdmh4ntyt4+d1XAznLHS33IU8UHbTkUmLy+82WnNH7HBB2V7gO47m/HhvaYjEfeW0bqMzN3aOUf30Vy/wB4HHsvkBGDgL5PYVHRRovGcAuCmnYbOkawqbRewW5oDs7UT3HbShNpxCxfsYpo7deHr11zWA3ooWCSlIRRREU4BfwVmn+Ds1hT5HM28Q6zr6GQZegDUbiT9i1zU0EpyfTpH7gc6NTVQrO1z1p70NBnQMqXcHjWJwjSwLER2Qify9MjrGXTL6ofD5zVZKobeRmq94mf3lDq26H7coraM9X5h9xa49VgAcRHzn/WQ6wcFCKDQr6FT67hTUOlF7Jriv8/5h/ziSZr10fCObKeKWN8Skur29VIAHHY4NuUqbM55WohD+jZ2O3d4tze1eWm5MDgWD8RlrfYhQ+cLOwH65AOtts0LNZwlvJuC7
141 141 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 0 iQIVAwUAWJIKpUemf/qjRqrOAQjjThAAvl1K/GZBrkanwEPXomewHkWKTEy1s5d5oWmPPGrSb9G4LM/3/abSbQ7fnzkS6IWi4Ao0za68w/MohaVGKoMAslRbelaTqlus0wE3zxb2yQ/j2NeZzFnFEuR/vbUug7uzH+onko2jXrt7VcPNXLOa1/g5CWwaf/YPfJO4zv+atlzBHvuFcQCkdbcOJkccCnBUoR7y0PJoBJX6K7wJQ+hWLdcY4nVaxkGPRmsZJo9qogXZMw1CwJVjofxRI0S/5vMtEqh8srYsg7qlTNv8eYnwdpfuunn2mI7Khx10Tz85PZDnr3SGRiFvdfmT30pI7jL3bhOHALkaoy2VevteJjIyMxANTvjIUBNQUi+7Kj3VIKmkL9NAMAQBbshiQL1wTrXdqOeC8Nm1BfCQEox2yiC6pDFbXVbguwJZ5VKFizTTK6f6BdNYKTVx8lNEdjAsWH8ojgGWwGXBbTkClULHezJ/sODaZzK/+M/IzbGmlF27jJYpdJX8fUoybZNw9lXwIfQQWHmQHEOJYCljD9G1tvYY70+xAFexgBX5Ib48UK4DRITVNecyQZL7bLTzGcM0TAE0EtD4M42wawsYP3Cva9UxShFLICQdPoa4Wmfs6uLbXG1DDLol/j7b6bL+6W8E3AlW+aAPc8GZm51/w3VlYqqciWTc12OJpu8FiD0pZ/iBw+E=
142 142 25703b624d27e3917d978af56d6ad59331e0464a 0 iQIcBAABCAAGBQJYuMSwAAoJELnJ3IJKpb3VL3YP/iKWY3+K3cLUBD3Ne5MhfS7N3t6rlk9YD4kmU8JnVeV1oAfg36VCylpbJLBnmQdvC8AfBJOkXi6DHp9RKXXmlsOeoppdWYGX5RMOzuwuGPBii6cA6KFd+WBpBJlRtklz61qGCAtv4q8V1mga0yucihghzt4lD/PPz7mk6yUBL8s3rK+bIHGdEhnK2dfnn/U2G0K/vGgsYZESORISuBclCrrc7M3/v1D+FBMCEYX9FXYU4PhYkKXK1mSqzCB7oENu/WP4ijl1nRnEIyzBV9pKO4ylnXTpbZAr/e4PofzjzPXb0zume1191C3wvgJ4eDautGide/Pxls5s6fJRaIowf5XVYQ5srX/NC9N3K77Hy01t5u8nwcyAhjmajZYuB9j37nmiwFawqS/y2eHovrUjkGdelV8OM7/iAexPRC8i2NcGk0m6XuzWy1Dxr8453VD8Hh3tTeafd6v5uHXSLjwogpu/th5rk/i9/5GBzc1MyJgRTwBhVHi/yFxfyakrSU7HT2cwX/Lb5KgWccogqfvrFYQABIBanxLIeZxTv8OIjC75EYknbxYtvvgb35ZdJytwrTHSZN0S7Ua2dHx2KUnHB6thbLu/v9fYrCgFF76DK4Ogd22Cbvv6NqRoglG26d0bqdwz/l1n3o416YjupteW8LMxHzuwiJy69WP1yi10eNDq
143 143 ed5b25874d998ababb181a939dd37a16ea644435 0 iQIcBAABCAAGBQJY4r/gAAoJELnJ3IJKpb3VtwYP/RuTmo252ExXQk/n5zGJZvZQnI86vO1+yGuyOlGFFBwf1v3sOLW1HD7fxF6/GdT8CSQrRqtC17Ya3qtayfY/0AEiSuH2bklBXSB1H5wPyguS5iLqyilCJY0SkHYBIDhJ0xftuIjsa805wdMm3OdclnTOkYT+K1WL8Ylbx/Ni2Lsx1rPpYdcQ/HlTkr5ca1ZbNOOSxSNI4+ilGlKbdSYeEsmqB2sDEiSaDEoxGGoSgzAE9+5Q2FfCGXV0bq4vfmEPoT9lhB4kANE+gcFUvsJTu8Z7EdF8y3CJLiy8+KHO/VLKTGJ1pMperbig9nAXl1AOt+izBFGJGTolbR/ShkkDWB/QVcqIF5CysAWMgnHAx7HjnMDBOANcKzhMMfOi3GUvOCNNIqIIoJHKRHaRk0YbMdt7z2mKpTrRQ9Zadz764jXOqqrPgQFM3jkBHzAvZz9yShrHGh42Y+iReAF9pAN0xPjyZ5Y2qp+DSl0bIQqrAet6Zd3QuoJtXczAeRrAvgn7O9MyLnMyE5s7xxI7o8M7zfWtChLF8ytJUzmRo3iVJNOJH+Zls9N30PGw6vubQAnB5ieaVTv8lnNpcAnEQD/i0tmRSxzyyqoOQbnItIPKFOsaYW+eX9sgJmObU3yDc5k3cs+yAFD2CM/uiUsLcTKyxPNcP1JHBYpwhOjIGczSHVS1
144 144 77eaf9539499a1b8be259ffe7ada787d07857f80 0 iQIcBAABCAAGBQJY9iz9AAoJELnJ3IJKpb3VYqEQAJNkB09sXgYRLA4kGQv3p4v02q9WZ1lHkAhOlNwIh7Zp+pGvT33nHZffByA0v+xtJNV9TNMIFFjkCg3jl5Z42CCe33ZlezGBAzXU+70QPvOR0ojlYk+FdMfeSyCBzWYokIpImwNmwNGKVrUAfywdikCsUC2aRjKg4Mn7GnqWl9WrBG6JEOOUamdx8qV2f6g/utRiqj4YQ86P0y4K3yakwc1LMM+vRfrwvsf1+DZ9t7QRENNKQ6gRnUdfryqSFIWn1VkBVMwIN5W3yIrTMfgH1wAZxbnYHrN5qDK7mcbP7bOA3XWJuEC+3QRnheRFd/21O1dMFuYjaKApXPHRlTGRMOaz2eydbfBopUS1BtfYEh4/B/1yJb9/HDw6LiAjea7ACHiaNec83z643005AvtUuWhjX3QTPkYlQzWaosanGy1IOGtXCPp1L0A+9gUpqyqycfPjQCbST5KRzYSZn3Ngmed5Bb6jsgvg5e5y0En/SQgK/pTKnxemAmFFVvIIrrWGRKj0AD0IFEHEepmwprPRs97EZPoBPFAGmVRuASBeIhFQxSDIXV0ebHJoUmz5w1rTy7U3Eq0ff6nW14kjWOUplatXz5LpWJ3VkZKrI+4gelto5xpTI6gJl2nmezhXQIlInk17cPuxmiHjeMdlOHZRh/zICLhQNL5fGne0ZL+qlrXY
145 145 616e788321cc4ae9975b7f0c54c849f36d82182b 0 iQIVAwUAWPZuQkemf/qjRqrOAQjFlg/9HXEegJMv8FP+uILPoaiA2UCiqWUL2MVJ0K1cvafkwUq+Iwir8sTe4VJ1v6V+ZRiOuzs4HMnoGJrIks4vHRbAxJ3J6xCfvrsbHdl59grv54vuoL5FlZvkdIe8L7/ovKrUmNwPWZX2v+ffFPrsEBeVlVrXpp4wOPhDxCKTmjYVOp87YqXfJsud7EQFPqpV4jX8DEDtJWT95OE9x0srBg0HpSE95d/BM4TuXTVNI8fV41YEqearKeFIhLxu37HxUmGmkAALCi8RJmm4hVpUHgk3tAVzImI8DglUqnC6VEfaYb+PKzIqHelhb66JO/48qN2S/JXihpNHAVUBysBT0b1xEnc6eNsF2fQEB+bEcf8IGj7/ILee1cmwPtoK2OXR2+xWWWjlu2keVcKeI0yAajJw/dP21yvVzVq0ypst7iD+EGHLJWJSmZscbyH5ICr+TJ5yQvIGZJtfsAdAUUTM2xpqSDW4mT5kYyg75URbQ3AKI7lOhJBmkkGQErE4zIQMkaAqcWziVF20xiRWfJoFxT2fK5weaRGIjELH49NLlyvZxYc4LlRo9lIdC7l/6lYDdTx15VuEj1zx/91y/d7OtPm+KCA2Bbdqth8m/fMD8trfQ6jSG/wgsvjZ+S0eoXa92qIR/igsCI+6EwP7duuzL2iyKOPXupQVNN10PKI7EuKv4Lk=
146 146 bb96d4a497432722623ae60d9bc734a1e360179e 0 iQIVAwUAWQkDfEemf/qjRqrOAQierQ/7BuQ0IW0T0cglgqIgkLuYLx2VXJCTEtRNCWmrH2UMK7fAdpAhN0xf+xedv56zYHrlyHpbskDbWvsKIHJdw/4bQitXaIFTyuMMtSR5vXy4Nly34O/Xs2uGb3Y5qwdubeK2nZr4lSPgiRHb/zI/B1Oy8GX830ljmIOY7B0nUWy4DrXcy/M41SnAMLFyD1K6T/8tkv7M4Fai7dQoF9EmIIkShVPktI3lqp3m7infZ4XnJqcqUB0NSfQZwZaUaoalOdCvEIe3ab5ewgl/CuvlDI4oqMQGjXCtNLbtiZSwo6hvudO6ewT+Zn/VdabkZyRtXUxu56ajjd6h22nU1+vknqDzo5tzw6oh1Ubzf8tzyv3Gmmr+tlOjzfK7tXXnT3vR9aEGli0qri0DzOpsDSY0pDC7EsS4LINPoNdsGQrGQdoX++AISROlNjvyuo4Vrp26tPHCSupkKOXuZaiozycAa2Q+aI1EvkPZSXe8SAXKDVtFn05ZB58YVkFzZKAYAxkE/ven59zb4aIbOgR12tZbJoZZsVHrlf/TcDtiXVfIMEMsCtJ1tPgD1rAsEURWRxK3mJ0Ev6KTHgNz4PeBhq1gIP/Y665aX2+cCjc4+vApPUienh5aOr1bQFpIDyYZsafHGMUFNCwRh8bX98oTGa0hjqz4ypwXE4Wztjdc+48UiHARp/Y=
147 147 c850f0ed54c1d42f9aa079ad528f8127e5775217 0 iQIVAwUAWTQINUemf/qjRqrOAQjZDw//b4pEgHYfWRVDEmLZtevysfhlJzbSyLAnWgNnRUVdSwl4WRF1r6ds/q7N4Ege5wQHjOpRtx4jC3y/riMbrLUlaeUXzCdqKgm4JcINS1nXy3IfkeDdUKyOR9upjaVhIEzCMRpyzabdYuflh5CoxayO7GFk2iZ8c1oAl4QzuLSspn9w+znqDg0HrMDbRNijStSulNjkqutih9UqT/PYizhE1UjL0NSnpYyD1vDljsHModJc2dhSzuZ1c4VFZHkienk+CNyeLtVKg8aC+Ej/Ppwq6FlE461T/RxOEzf+WFAc9F4iJibSN2kAFB4ySJ43y+OKkvzAwc5XbUx0y6OlWn2Ph+5T54sIwqasG3DjXyVrwVtAvCrcWUmOyS0RfkKoDVepMPIhFXyrhGqUYSq25Gt6tHVtIrlcWARIGGWlsE+PSHi87qcnSjs4xUzZwVvJWz4fuM1AUG/GTpyt4w3kB85XQikIINkmSTmsM/2/ar75T6jBL3kqOCGOL3n7bVZsGXllhkkQ7e/jqPPWnNXm8scDYdT3WENNu34zZp5ZmqdTXPAIIaqGswnU04KfUSEoYtOMri3E2VvrgMkiINm9BOKpgeTsMb3dkYRw2ZY3UAH9QfdX9BZywk6v3kkE5ghLWMUoQ4sqRlTo7mJKA8+EodjmIGRV/kAv1f7pigg6pIWWEyo=
148 148 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 0 iQIcBAABCAAGBQJZXQSmAAoJELnJ3IJKpb3VmTwP/jsxFTlKzWU8EnEhEViiP2YREOD3AXU7685DIMnoyVAsZgxrt0CG6Y92b5sINCeh5B0ORPQ7+xi2Xmz6tX8EeAR+/Dpdx6K623yExf8kq91zgfMvYkatNMu6ZVfywibYZAASq02oKoX7WqSPcQG/OwgtdFiGacCrG5iMH7wRv0N9hPc6D5vAV8/H/Inq8twpSG5SGDpCdKj7KPZiY8DFu/3OXatJtl+byg8zWT4FCYKkBPvmZp8/sRhDKBgwr3RvF1p84uuw/QxXjt+DmGxgtjvObjHr+shCMcKBAuZ4RtZmyEo/0L81uaTElHu1ejsEzsEKxs+8YifnH070PTFoV4VXQyXfTc8AyaqHE6rzX96a/HjQiJnL4dFeTZIrUhGK3AkObFLWJxVTo4J8+oliBQQldIh1H2yb1ZMfwapLnUGIqSieHDGZ6K2ccNJK8Q7IRhTCvYc0cjsnbwTpV4cebGqf3WXZhX0cZN+TNfhh/HGRzR1EeAAavjJqpDam1OBA5TmtJd/lHLIRVR5jyG+r4SK0XDlJ8uSfah7MpVH6aQ6UrycPyFusGXQlIqJ1DYQaBrI/SRJfIvRUmvVz9WgKLe83oC3Ui3aWR9rNjMb2InuQuXjeZaeaYfBAUYACcGfCZpZZvoEkMHCqtTng1rbbFnKMFk5kVy9YWuVgK9Iuh0O5
149 149 857876ebaed4e315f63157bd157d6ce553c7ab73 0 iQIVAwUAWW9XW0emf/qjRqrOAQhI7A//cKXIM4l8vrWWsc1Os4knXm/2UaexmAwV70TpviKL9RxCy5zBP/EapCaGRCH8uNPOQTkWGR9Aucm3CtxhggCMzULQxxeH86mEpWf1xILWLySPXW/t2f+2zxrwLSAxxqFJtuYv83Pe8CnS3y4BlgHnBKYXH8XXuW8uvfc0lHKblhrspGBIAinx7vPLoGQcpYrn9USWUKq5d9FaCLQCDT9501FHKf5dlYQajevCUDnewtn5ohelOXjTJQClW3aygv/z+98Kq7ZhayeIiZu+SeP+Ay7lZPklXcy6eyRiQtGCa1yesb9v53jKtgxWewV4o6zyuUesdknZ/IBeNUgw8LepqTIJo6/ckyvBOsSQcda81DuYNUChZLYTSXYPHEUmYiz6CvNoLEgHF/oO5p6CZXOPWbmLWrAFd+0+1Tuq8BSh+PSdEREM3ZLOikkXoVzTKBgu4zpMvmBnjliBg7WhixkcG0v5WunlV9/oHAIpsKdL7AatU+oCPulp+xDpTKzRazEemYiWG9zYKzwSMk9Nc17e2tk+EtFSPsPo4iVCXMgdIZSTNBvynKEFXZQVPWVa+bYRdAmbSY8awiX7exxYL10UcpnN2q/AH/F7rQzAmo8eZ3OtD0+3Nk3JRx0/CMyzKLPYDpdUgwmaPb+s2Bsy7f7TfmA7jTa69YqB1/zVwlWULr0=
150 150 5544af8622863796a0027566f6b646e10d522c4c 0 iQIcBAABCAAGBQJZjJflAAoJELnJ3IJKpb3V19kQALCvTdPrpce5+rBNbFtLGNFxTMDol1dUy87EUAWiArnfOzW3rKBdYxvxDL23BpgUfjRm1fAXdayVvlj6VC6Dyb195OLmc/I9z7SjFxsfmxWilF6U0GIa3W0x37i05EjfcccrBIuSLrvR6AWyJhjLOBCcyAqD/HcEom00/L+o2ry9CDQNLEeVuNewJiupcUqsTIG2yS26lWbtLZuoqS2T4Nlg8wjJhiSXlsZSuAF55iUJKlTQP6KyWReiaYuEVfm/Bybp0A2bFcZCYpWPwnwKBdSCHhIalH8PO57gh9J7xJVnyyBg5PU6n4l6PrGOmKhNiU/xyNe36tEAdMW6svcVvt8hiY0dnwWqR6wgnFFDu0lnTMUcjsy5M5FBY6wSw9Fph8zcNRzYyaeUbasNonPvrIrk21nT3ET3RzVR3ri2nJDVF+0GlpogGfk9k7wY3808091BMsyV3448ZPKQeWiK4Yy4UOUwbKV7YAsS5MdDnC1uKjl4GwLn9UCY/+Q2/2R0CBZ13Tox+Nbo6hBRuRGtFIbLK9j7IIUhhZrIZFSh8cDNkC+UMaS52L5z7ECvoYIUpw+MJ7NkMLHIVGZ2Nxn0C7IbGO6uHyR7D6bdNpxilU+WZStHk0ppZItRTm/htar4jifnaCI8F8OQNYmZ3cQhxx6qV2Tyow8arvWb1NYXrocG
151 151 943c91326b23954e6e1c6960d0239511f9530258 0 iQIcBAABCAAGBQJZjKKZAAoJELnJ3IJKpb3VGQkP/0iF6Khef0lBaRhbSAPwa7RUBb3iaBeuwmeic/hUjMoU1E5NR36bDDaF3u2di5mIYPBONFIeCPf9/DKyFkidueX1UnlAQa3mjh/QfKTb4/yO2Nrk7eH+QtrYxVUUYYjwgp4rS0Nd/++I1IUOor54vqJzJ7ZnM5O1RsE7VI1esAC/BTlUuO354bbm08B0owsZBwVvcVvpV4zeTvq5qyPxBJ3M0kw83Pgwh3JZB9IYhOabhSUBcA2fIPHgYGYnJVC+bLOeMWI1HJkJeoYfClNUiQUjAmi0cdTC733eQnHkDw7xyyFi+zkKu6JmU1opxkHSuj4Hrjul7Gtw3vVWWUPufz3AK7oymNp2Xr5y1HQLDtNJP3jicTTG1ae2TdX5Az3ze0I8VGbpR81/6ShAvY2cSKttV3I+2k4epxTTTf0xaZS1eUdnFOox6acElG2reNzx7EYYxpHj17K8N2qNzyY78iPgbJ+L39PBFoiGXMZJqWCxxIHoK1MxlXa8WwSnsXAU768dJvEn2N1x3fl+aeaWzeM4/5Qd83YjFuCeycuRnIo3rejSX3rWFAwZE0qQHKI5YWdKDLxIfdHTjdfMP7np+zLcHt0DV/dHmj2hKQgU0OK04fx7BrmdS1tw67Y9bL3H3TDohn7khU1FrqrKVuqSLbLsxnNyWRbZQF+DCoYrHlIW
152 3fee7f7d2da04226914c2258cc2884dc27384fd7 0 iQIcBAABCAAGBQJZjOJfAAoJELnJ3IJKpb3VvikP/iGjfahwkl2BDZYGq6Ia64a0bhEh0iltoWTCCDKMbHuuO+7h07fHpBl/XX5XPnS7imBUVWLOARhVL7aDPb0tu5NZzMKN57XUC/0FWFyf7lXXAVaOapR4kP8RtQvnoxfNSLRgiZQL88KIRBgFc8pbl8hLA6UbcHPsOk4dXKvmfPfHBHnzdUEDcSXDdyOBhuyOSzRs8egXVi3WeX6OaXG3twkw/uCF3pgOMOSyWVDwD+KvK+IBmSxCTKXzsb+pqpc7pPOFWhSXjpbuYUcI5Qy7mpd0bFL3qNqgvUNq2gX5mT6zH/TsVD10oSUjYYqKMO+gi34OgTVWRRoQfWBwrQwxsC/MxH6ZeOetl2YkS13OxdmYpNAFNQ8ye0vZigJRA+wHoC9dn0h8c5X4VJt/dufHeXc887EGJpLg6GDXi5Emr2ydAUhBJKlpi2yss22AmiQ4G9NE1hAjxqhPvkgBK/hpbr3FurV4hjTG6XKsF8I0WdbYz2CW/FEbp1+4T49ChhrwW0orZdEQX7IEjXr45Hs5sTInT90Hy2XG3Kovi0uVMt15cKsSEYDoFHkR4NgCZX2Y+qS5ryH8yqor3xtel3KsBIy6Ywn8pAo2f8flW3nro/O6x+0NKGV+ZZ0uo/FctuQLBrQVs025T1ai/6MbscQXvFVZVPKrUzlQaNPf/IwNOaRa
@@ -1,164 +1,165 b''
1 1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
2 2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
3 3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
4 4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
5 5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
6 6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
7 7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
8 8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
9 9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
10 10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
11 11 3a56574f329a368d645853e0f9e09472aee62349 0.8
12 12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
13 13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
14 14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
15 15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
16 16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
17 17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
18 18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
19 19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
20 20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
21 21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
22 22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
23 23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
24 24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
25 25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
26 26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
27 27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
28 28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
29 29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
30 30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
31 31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
32 32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
33 33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
34 34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
35 35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
36 36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
37 37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
38 38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
39 39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
40 40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
41 41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
42 42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
43 43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
44 44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
45 45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
46 46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
47 47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
48 48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
49 49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
50 50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
51 51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
52 52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
53 53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
54 54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
55 55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
56 56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
57 57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
58 58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
59 59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
60 60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
61 61 6344043924497cd06d781d9014c66802285072e4 2.0.2
62 62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
63 63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
64 64 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 2.1.1
65 65 b9bd95e61b49c221c4cca24e6da7c946fc02f992 2.1.2
66 66 d9e2f09d5488c395ae9ddbb320ceacd24757e055 2.2-rc
67 67 00182b3d087909e3c3ae44761efecdde8f319ef3 2.2
68 68 5983de86462c5a9f42a3ad0f5e90ce5b1d221d25 2.2.1
69 69 85a358df5bbbe404ca25730c9c459b34263441dc 2.2.2
70 70 b013baa3898e117959984fc64c29d8c784d2f28b 2.2.3
71 71 a06e2681dd1786e2354d84a5fa9c1c88dd4fa3e0 2.3-rc
72 72 7f5094bb3f423fc799e471aac2aee81a7ce57a0b 2.3
73 73 072209ae4ddb654eb2d5fd35bff358c738414432 2.3.1
74 74 b3f0f9a39c4e1d0250048cd803ab03542d6f140a 2.3.2
75 75 d118a4f4fd16d9b558ec3f3e87bfee772861d2b7 2.4-rc
76 76 195ad823b5d58c68903a6153a25e3fb4ed25239d 2.4
77 77 0c10cf8191469e7c3c8844922e17e71a176cb7cb 2.4.1
78 78 a4765077b65e6ae29ba42bab7834717b5072d5ba 2.4.2
79 79 f5fbe15ca7449f2c9a3cf817c86d0ae68b307214 2.5-rc
80 80 a6088c05e43a8aee0472ca3a4f6f8d7dd914ebbf 2.5
81 81 7511d4df752e61fe7ae4f3682e0a0008573b0402 2.5.1
82 82 5b7175377babacce80a6c1e12366d8032a6d4340 2.5.2
83 83 50c922c1b5145dab8baefefb0437d363b6a6c21c 2.5.3
84 84 8a7bd2dccd44ed571afe7424cd7f95594f27c092 2.5.4
85 85 292cd385856d98bacb2c3086f8897bc660c2beea 2.6-rc
86 86 23f785b38af38d2fca6b8f3db56b8007a84cd73a 2.6
87 87 ddc7a6be20212d18f3e27d9d7e6f079a66d96f21 2.6.1
88 88 cceaf7af4c9e9e6fa2dbfdcfe9856c5da69c4ffd 2.6.2
89 89 009794acc6e37a650f0fae37872e733382ac1c0c 2.6.3
90 90 f0d7721d7322dcfb5af33599c2543f27335334bb 2.7-rc
91 91 f37b5a17e6a0ee17afde2cdde5393dd74715fb58 2.7
92 92 335a558f81dc73afeab4d7be63617392b130117f 2.7.1
93 93 e7fa36d2ad3a7944a52dca126458d6f482db3524 2.7.2
94 94 1596f2d8f2421314b1ddead8f7d0c91009358994 2.8-rc
95 95 d825e4025e39d1c39db943cdc89818abd0a87c27 2.8
96 96 209e04a06467e2969c0cc6501335be0406d46ef0 2.8.1
97 97 ca387377df7a3a67dbb90b6336b781cdadc3ef41 2.8.2
98 98 8862469e16f9236208581b20de5f96bd13cc039d 2.9-rc
99 99 3cec5134e9c4bceab6a00c60f52a4f80677a78f2 2.9
100 100 b96cb15ec9e04d8ac5ee08b34fcbbe4200588965 2.9.1
101 101 3f83fc5cfe715d292069ee8417c83804f6c6c1e4 2.9.2
102 102 564f55b251224f16508dd1311452db7780dafe2b 3.0-rc
103 103 2195ac506c6ababe86985b932f4948837c0891b5 3.0
104 104 269c80ee5b3cb3684fa8edc61501b3506d02eb10 3.0.1
105 105 2d8cd3d0e83c7336c0cb45a9f88638363f993848 3.0.2
106 106 6c36dc6cd61a0e1b563f1d51e55bdf4dacf12162 3.1-rc
107 107 3178e49892020336491cdc6945885c4de26ffa8b 3.1
108 108 5dc91146f35369949ea56b40172308158b59063a 3.1.1
109 109 f768c888aaa68d12dd7f509dcc7f01c9584357d0 3.1.2
110 110 7f8d16af8cae246fa5a48e723d48d58b015aed94 3.2-rc
111 111 ced632394371a36953ce4d394f86278ae51a2aae 3.2
112 112 643c58303fb0ec020907af28b9e486be299ba043 3.2.1
113 113 902554884335e5ca3661d63be9978eb4aec3f68a 3.2.2
114 114 6dad422ecc5adb63d9fa649eeb8e05a5f9bc4900 3.2.3
115 115 1265a3a71d75396f5d4cf6935ae7d9ba5407a547 3.2.4
116 116 db8e3f7948b1fdeb9ad12d448fc3525759908b9f 3.3-rc
117 117 fbdd5195528fae4f41feebc1838215c110b25d6a 3.3
118 118 5b4ed033390bf6e2879c8f5c28c84e1ee3b87231 3.3.1
119 119 07a92bbd02e5e3a625e0820389b47786b02b2cea 3.3.2
120 120 2e2e9a0750f91a6fe0ad88e4de34f8efefdcab08 3.3.3
121 121 e89f909edffad558b56f4affa8239e4832f88de0 3.4-rc
122 122 8cc6036bca532e06681c5a8fa37efaa812de67b5 3.4
123 123 ed18f4acf435a2824c6f49fba40f42b9df5da7ad 3.4.1
124 124 540cd0ddac49c1125b2e013aa2ff18ecbd4dd954 3.4.2
125 125 96a38d44ba093bd1d1ecfd34119e94056030278b 3.5-rc
126 126 21aa1c313b05b1a85f8ffa1120d51579ddf6bf24 3.5
127 127 1a45e49a6bed023deb229102a8903234d18054d3 3.5.1
128 128 9a466b9f9792e3ad7ae3fc6c43c3ff2e136b718d 3.5.2
129 129 b66e3ca0b90c3095ea28dfd39aa24247bebf5c20 3.6-rc
130 130 47dd34f2e7272be9e3b2a5a83cd0d20be44293f4 3.6
131 131 1aa5083cbebbe7575c88f3402ab377539b484897 3.6.1
132 132 2d437a0f3355834a9485bbbeb30a52a052c98f19 3.6.2
133 133 ea389970c08449440587712117f178d33bab3f1e 3.6.3
134 134 158bdc8965720ca4061f8f8d806563cfc7cdb62e 3.7-rc
135 135 2408645de650d8a29a6ce9e7dce601d8dd0d1474 3.7
136 136 b698abf971e7377d9b7ec7fc8c52df45255b0329 3.7.1
137 137 d493d64757eb45ada99fcb3693e479a51b7782da 3.7.2
138 138 ae279d4a19e9683214cbd1fe8298cf0b50571432 3.7.3
139 139 740156eedf2c450aee58b1a90b0e826f47c5da64 3.8-rc
140 140 f85de28eae32e7d3064b1a1321309071bbaaa069 3.8
141 141 a56296f55a5e1038ea5016dace2076b693c28a56 3.8.1
142 142 aaabed77791a75968a12b8c43ad263631a23ee81 3.8.2
143 143 a9764ab80e11bcf6a37255db7dd079011f767c6c 3.8.3
144 144 26a5d605b8683a292bb89aea11f37a81b06ac016 3.8.4
145 145 519bb4f9d3a47a6e83c2b414d58811ed38f503c2 3.9-rc
146 146 299546f84e68dbb9bd026f0f3a974ce4bdb93686 3.9
147 147 ccd436f7db6d5d7b9af89715179b911d031d44f1 3.9.1
148 148 149433e68974eb5c63ccb03f794d8b57339a80c4 3.9.2
149 149 438173c415874f6ac653efc1099dec9c9150e90f 4.0-rc
150 150 eab27446995210c334c3d06f1a659e3b9b5da769 4.0
151 151 b3b1ae98f6a0e14c1e1ba806a6c18e193b6dae5c 4.0.1
152 152 e69874dc1f4e142746ff3df91e678a09c6fc208c 4.0.2
153 153 a1dd2c0c479e0550040542e392e87bc91262517e 4.1-rc
154 154 e1526da1e6d84e03146151c9b6e6950fe9a83d7d 4.1
155 155 25703b624d27e3917d978af56d6ad59331e0464a 4.1.1
156 156 ed5b25874d998ababb181a939dd37a16ea644435 4.1.2
157 157 77eaf9539499a1b8be259ffe7ada787d07857f80 4.1.3
158 158 616e788321cc4ae9975b7f0c54c849f36d82182b 4.2-rc
159 159 bb96d4a497432722623ae60d9bc734a1e360179e 4.2
160 160 c850f0ed54c1d42f9aa079ad528f8127e5775217 4.2.1
161 161 26c49ed51a698ec016d2b4c6b44ca3c3f73cc788 4.2.2
162 162 857876ebaed4e315f63157bd157d6ce553c7ab73 4.3-rc
163 163 5544af8622863796a0027566f6b646e10d522c4c 4.3
164 164 943c91326b23954e6e1c6960d0239511f9530258 4.2.3
165 3fee7f7d2da04226914c2258cc2884dc27384fd7 4.3.1
@@ -1,712 +1,712 b''
1 1 # __init__.py - fsmonitor initialization and overrides
2 2 #
3 3 # Copyright 2013-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9 9
10 10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 11 status results.
12 12
13 13 On a particular Linux system, for a real-world repository with over 400,000
14 14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 15 system, with fsmonitor it takes about 0.3 seconds.
16 16
17 17 fsmonitor requires no configuration -- it will tell Watchman about your
18 18 repository as necessary. You'll need to install Watchman from
19 19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20 20
21 21 The following configuration options exist:
22 22
23 23 ::
24 24
25 25 [fsmonitor]
26 26 mode = {off, on, paranoid}
27 27
28 28 When `mode = off`, fsmonitor will disable itself (similar to not loading the
29 29 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
30 30 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
31 31 and ensure that the results are consistent.
32 32
33 33 ::
34 34
35 35 [fsmonitor]
36 36 timeout = (float)
37 37
38 38 A value, in seconds, that determines how long fsmonitor will wait for Watchman
39 39 to return results. Defaults to `2.0`.
40 40
41 41 ::
42 42
43 43 [fsmonitor]
44 44 blacklistusers = (list of userids)
45 45
46 46 A list of usernames for which fsmonitor will disable itself altogether.
47 47
48 48 ::
49 49
50 50 [fsmonitor]
51 51 walk_on_invalidate = (boolean)
52 52
53 53 Whether or not to walk the whole repo ourselves when our cached state has been
54 54 invalidated, for example when Watchman has been restarted or .hgignore rules
55 55 have been changed. Walking the repo in that case can result in competing for
56 56 I/O with Watchman. For large repos it is recommended to set this value to
57 57 false. You may wish to set this to true if you have a very fast filesystem
58 58 that can outpace the IPC overhead of getting the result data for the full repo
59 59 from Watchman. Defaults to false.
60 60
61 61 fsmonitor is incompatible with the largefiles and eol extensions, and
62 62 will disable itself if any of those are active.
63 63
64 64 '''
65 65
66 66 # Platforms Supported
67 67 # ===================
68 68 #
69 69 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
70 70 # even under severe loads.
71 71 #
72 72 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
73 73 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
74 74 # user testing under normal loads.
75 75 #
76 76 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
77 77 # very little testing has been done.
78 78 #
79 79 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
80 80 #
81 81 # Known Issues
82 82 # ============
83 83 #
84 84 # * fsmonitor will disable itself if any of the following extensions are
85 85 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
86 86 # * fsmonitor will produce incorrect results if nested repos that are not
87 87 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
88 88 #
89 89 # The issues related to nested repos and subrepos are probably not fundamental
90 90 # ones. Patches to fix them are welcome.
91 91
92 92 from __future__ import absolute_import
93 93
94 94 import codecs
95 95 import hashlib
96 96 import os
97 97 import stat
98 98 import sys
99 99
100 100 from mercurial.i18n import _
101 101 from mercurial import (
102 102 context,
103 103 encoding,
104 104 error,
105 105 extensions,
106 106 localrepo,
107 107 merge,
108 108 pathutil,
109 109 pycompat,
110 110 scmutil,
111 111 util,
112 112 )
113 113 from mercurial import match as matchmod
114 114
115 115 from . import (
116 116 pywatchman,
117 117 state,
118 118 watchmanclient,
119 119 )
120 120
121 121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 123 # be specifying the version(s) of Mercurial they are tested with, or
124 124 # leave the attribute unspecified.
125 125 testedwith = 'ships-with-hg-core'
126 126
127 127 # This extension is incompatible with the following blacklisted extensions
128 128 # and will disable itself when encountering one of these:
129 129 _blacklist = ['largefiles', 'eol']
130 130
131 131 def _handleunavailable(ui, state, ex):
132 132 """Exception handler for Watchman interaction exceptions"""
133 133 if isinstance(ex, watchmanclient.Unavailable):
134 134 if ex.warn:
135 135 ui.warn(str(ex) + '\n')
136 136 if ex.invalidate:
137 137 state.invalidate()
138 138 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
139 139 else:
140 140 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
141 141
142 142 def _hashignore(ignore):
143 143 """Calculate hash for ignore patterns and filenames
144 144
145 145 If this information changes between Mercurial invocations, we can't
146 146 rely on Watchman information anymore and have to re-scan the working
147 147 copy.
148 148
149 149 """
150 150 sha1 = hashlib.sha1()
151 151 sha1.update(repr(ignore))
152 152 return sha1.hexdigest()
153 153
154 154 _watchmanencoding = pywatchman.encoding.get_local_encoding()
155 155 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
156 156 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
157 157
158 158 def _watchmantofsencoding(path):
159 159 """Fix path to match watchman and local filesystem encoding
160 160
161 161 watchman's paths encoding can differ from filesystem encoding. For example,
162 162 on Windows, it's always utf-8.
163 163 """
164 164 try:
165 165 decoded = path.decode(_watchmanencoding)
166 166 except UnicodeDecodeError as e:
167 167 raise error.Abort(str(e), hint='watchman encoding error')
168 168
169 169 try:
170 170 encoded = decoded.encode(_fsencoding, 'strict')
171 171 except UnicodeEncodeError as e:
172 172 raise error.Abort(str(e))
173 173
174 174 return encoded
175 175
176 176 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
177 177 '''Replacement for dirstate.walk, hooking into Watchman.
178 178
179 179 Whenever full is False, ignored is False, and the Watchman client is
180 180 available, use Watchman combined with saved state to possibly return only a
181 181 subset of files.'''
182 182 def bail():
183 183 return orig(match, subrepos, unknown, ignored, full=True)
184 184
185 185 if full or ignored or not self._watchmanclient.available():
186 186 return bail()
187 187 state = self._fsmonitorstate
188 188 clock, ignorehash, notefiles = state.get()
189 189 if not clock:
190 190 if state.walk_on_invalidate:
191 191 return bail()
192 192 # Initial NULL clock value, see
193 193 # https://facebook.github.io/watchman/docs/clockspec.html
194 194 clock = 'c:0:0'
195 195 notefiles = []
196 196
197 197 def fwarn(f, msg):
198 198 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
199 199 return False
200 200
201 201 def badtype(mode):
202 202 kind = _('unknown')
203 203 if stat.S_ISCHR(mode):
204 204 kind = _('character device')
205 205 elif stat.S_ISBLK(mode):
206 206 kind = _('block device')
207 207 elif stat.S_ISFIFO(mode):
208 208 kind = _('fifo')
209 209 elif stat.S_ISSOCK(mode):
210 210 kind = _('socket')
211 211 elif stat.S_ISDIR(mode):
212 212 kind = _('directory')
213 213 return _('unsupported file type (type is %s)') % kind
214 214
215 215 ignore = self._ignore
216 216 dirignore = self._dirignore
217 217 if unknown:
218 218 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
219 219 # ignore list changed -- can't rely on Watchman state any more
220 220 if state.walk_on_invalidate:
221 221 return bail()
222 222 notefiles = []
223 223 clock = 'c:0:0'
224 224 else:
225 225 # always ignore
226 226 ignore = util.always
227 227 dirignore = util.always
228 228
229 229 matchfn = match.matchfn
230 230 matchalways = match.always()
231 231 dmap = self._map
232 232 nonnormalset = getattr(self, '_nonnormalset', None)
233 233
234 234 copymap = self._copymap
235 235 getkind = stat.S_IFMT
236 236 dirkind = stat.S_IFDIR
237 237 regkind = stat.S_IFREG
238 238 lnkkind = stat.S_IFLNK
239 239 join = self._join
240 240 normcase = util.normcase
241 241 fresh_instance = False
242 242
243 243 exact = skipstep3 = False
244 244 if match.isexact(): # match.exact
245 245 exact = True
246 246 dirignore = util.always # skip step 2
247 247 elif match.prefix(): # match.match, no patterns
248 248 skipstep3 = True
249 249
250 250 if not exact and self._checkcase:
251 251 # note that even though we could receive directory entries, we're only
252 252 # interested in checking if a file with the same name exists. So only
253 253 # normalize files if possible.
254 254 normalize = self._normalizefile
255 255 skipstep3 = False
256 256 else:
257 257 normalize = None
258 258
259 259 # step 1: find all explicit files
260 260 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
261 261
262 262 skipstep3 = skipstep3 and not (work or dirsnotfound)
263 263 work = [d for d in work if not dirignore(d[0])]
264 264
265 265 if not work and (exact or skipstep3):
266 266 for s in subrepos:
267 267 del results[s]
268 268 del results['.hg']
269 269 return results
270 270
271 271 # step 2: query Watchman
272 272 try:
273 273 # Use the user-configured timeout for the query.
274 274 # Add a little slack over the top of the user query to allow for
275 275 # overheads while transferring the data
276 276 self._watchmanclient.settimeout(state.timeout + 0.1)
277 277 result = self._watchmanclient.command('query', {
278 278 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
279 279 'since': clock,
280 280 'expression': [
281 281 'not', [
282 282 'anyof', ['dirname', '.hg'],
283 283 ['name', '.hg', 'wholename']
284 284 ]
285 285 ],
286 286 'sync_timeout': int(state.timeout * 1000),
287 287 'empty_on_fresh_instance': state.walk_on_invalidate,
288 288 })
289 289 except Exception as ex:
290 290 _handleunavailable(self._ui, state, ex)
291 291 self._watchmanclient.clearconnection()
292 292 return bail()
293 293 else:
294 294 # We need to propagate the last observed clock up so that we
295 295 # can use it for our next query
296 296 state.setlastclock(result['clock'])
297 297 if result['is_fresh_instance']:
298 298 if state.walk_on_invalidate:
299 299 state.invalidate()
300 300 return bail()
301 301 fresh_instance = True
302 302 # Ignore any prior noteable files from the state info
303 303 notefiles = []
304 304
305 305 # for file paths which require normalization and we encounter a case
306 306 # collision, we store our own foldmap
307 307 if normalize:
308 308 foldmap = dict((normcase(k), k) for k in results)
309 309
310 310 switch_slashes = pycompat.ossep == '\\'
311 311 # The order of the results is, strictly speaking, undefined.
312 312 # For case changes on a case insensitive filesystem we may receive
313 313 # two entries, one with exists=True and another with exists=False.
314 314 # The exists=True entries in the same response should be interpreted
315 315 # as being happens-after the exists=False entries due to the way that
316 316 # Watchman tracks files. We use this property to reconcile deletes
317 317 # for name case changes.
318 318 for entry in result['files']:
319 319 fname = entry['name']
320 320 if _fixencoding:
321 321 fname = _watchmantofsencoding(fname)
322 322 if switch_slashes:
323 323 fname = fname.replace('\\', '/')
324 324 if normalize:
325 325 normed = normcase(fname)
326 326 fname = normalize(fname, True, True)
327 327 foldmap[normed] = fname
328 328 fmode = entry['mode']
329 329 fexists = entry['exists']
330 330 kind = getkind(fmode)
331 331
332 332 if not fexists:
333 333 # if marked as deleted and we don't already have a change
334 334 # record, mark it as deleted. If we already have an entry
335 335 # for fname then it was either part of walkexplicit or was
336 336 # an earlier result that was a case change
337 337 if fname not in results and fname in dmap and (
338 338 matchalways or matchfn(fname)):
339 339 results[fname] = None
340 340 elif kind == dirkind:
341 341 if fname in dmap and (matchalways or matchfn(fname)):
342 342 results[fname] = None
343 343 elif kind == regkind or kind == lnkkind:
344 344 if fname in dmap:
345 345 if matchalways or matchfn(fname):
346 346 results[fname] = entry
347 347 elif (matchalways or matchfn(fname)) and not ignore(fname):
348 348 results[fname] = entry
349 349 elif fname in dmap and (matchalways or matchfn(fname)):
350 350 results[fname] = None
351 351
352 352 # step 3: query notable files we don't already know about
353 353 # XXX try not to iterate over the entire dmap
354 354 if normalize:
355 355 # any notable files that have changed case will already be handled
356 356 # above, so just check membership in the foldmap
357 357 notefiles = set((normalize(f, True, True) for f in notefiles
358 358 if normcase(f) not in foldmap))
359 359 visit = set((f for f in notefiles if (f not in results and matchfn(f)
360 360 and (f in dmap or not ignore(f)))))
361 361
362 362 if nonnormalset is not None and not fresh_instance:
363 363 if matchalways:
364 364 visit.update(f for f in nonnormalset if f not in results)
365 365 visit.update(f for f in copymap if f not in results)
366 366 else:
367 367 visit.update(f for f in nonnormalset
368 368 if f not in results and matchfn(f))
369 369 visit.update(f for f in copymap
370 370 if f not in results and matchfn(f))
371 371 else:
372 372 if matchalways:
373 373 visit.update(f for f, st in dmap.iteritems()
374 374 if (f not in results and
375 375 (st[2] < 0 or st[0] != 'n' or fresh_instance)))
376 376 visit.update(f for f in copymap if f not in results)
377 377 else:
378 378 visit.update(f for f, st in dmap.iteritems()
379 379 if (f not in results and
380 380 (st[2] < 0 or st[0] != 'n' or fresh_instance)
381 381 and matchfn(f)))
382 382 visit.update(f for f in copymap
383 383 if f not in results and matchfn(f))
384 384
385 audit = pathutil.pathauditor(self._root).check
385 audit = pathutil.pathauditor(self._root, cached=True).check
386 386 auditpass = [f for f in visit if audit(f)]
387 387 auditpass.sort()
388 388 auditfail = visit.difference(auditpass)
389 389 for f in auditfail:
390 390 results[f] = None
391 391
392 392 nf = iter(auditpass).next
393 393 for st in util.statfiles([join(f) for f in auditpass]):
394 394 f = nf()
395 395 if st or f in dmap:
396 396 results[f] = st
397 397
398 398 for s in subrepos:
399 399 del results[s]
400 400 del results['.hg']
401 401 return results
402 402
403 403 def overridestatus(
404 404 orig, self, node1='.', node2=None, match=None, ignored=False,
405 405 clean=False, unknown=False, listsubrepos=False):
406 406 listignored = ignored
407 407 listclean = clean
408 408 listunknown = unknown
409 409
410 410 def _cmpsets(l1, l2):
411 411 try:
412 412 if 'FSMONITOR_LOG_FILE' in encoding.environ:
413 413 fn = encoding.environ['FSMONITOR_LOG_FILE']
414 414 f = open(fn, 'wb')
415 415 else:
416 416 fn = 'fsmonitorfail.log'
417 417 f = self.opener(fn, 'wb')
418 418 except (IOError, OSError):
419 419 self.ui.warn(_('warning: unable to write to %s\n') % fn)
420 420 return
421 421
422 422 try:
423 423 for i, (s1, s2) in enumerate(zip(l1, l2)):
424 424 if set(s1) != set(s2):
425 425 f.write('sets at position %d are unequal\n' % i)
426 426 f.write('watchman returned: %s\n' % s1)
427 427 f.write('stat returned: %s\n' % s2)
428 428 finally:
429 429 f.close()
430 430
431 431 if isinstance(node1, context.changectx):
432 432 ctx1 = node1
433 433 else:
434 434 ctx1 = self[node1]
435 435 if isinstance(node2, context.changectx):
436 436 ctx2 = node2
437 437 else:
438 438 ctx2 = self[node2]
439 439
440 440 working = ctx2.rev() is None
441 441 parentworking = working and ctx1 == self['.']
442 442 match = match or matchmod.always(self.root, self.getcwd())
443 443
444 444 # Maybe we can use this opportunity to update Watchman's state.
445 445 # Mercurial uses workingcommitctx and/or memctx to represent the part of
446 446 # the workingctx that is to be committed. So don't update the state in
447 447 # that case.
448 448 # HG_PENDING is set in the environment when the dirstate is being updated
449 449 # in the middle of a transaction; we must not update our state in that
450 450 # case, or we risk forgetting about changes in the working copy.
451 451 updatestate = (parentworking and match.always() and
452 452 not isinstance(ctx2, (context.workingcommitctx,
453 453 context.memctx)) and
454 454 'HG_PENDING' not in encoding.environ)
455 455
456 456 try:
457 457 if self._fsmonitorstate.walk_on_invalidate:
458 458 # Use a short timeout to query the current clock. If that
459 459 # takes too long then we assume that the service will be slow
460 460 # to answer our query.
461 461 # walk_on_invalidate indicates that we prefer to walk the
462 462 # tree ourselves because we can ignore portions that Watchman
463 463 # cannot and we tend to be faster in the warmer buffer cache
464 464 # cases.
465 465 self._watchmanclient.settimeout(0.1)
466 466 else:
467 467 # Give Watchman more time to potentially complete its walk
468 468 # and return the initial clock. In this mode we assume that
469 469 # the filesystem will be slower than parsing a potentially
470 470 # very large Watchman result set.
471 471 self._watchmanclient.settimeout(
472 472 self._fsmonitorstate.timeout + 0.1)
473 473 startclock = self._watchmanclient.getcurrentclock()
474 474 except Exception as ex:
475 475 self._watchmanclient.clearconnection()
476 476 _handleunavailable(self.ui, self._fsmonitorstate, ex)
477 477 # boo, Watchman failed. bail
478 478 return orig(node1, node2, match, listignored, listclean,
479 479 listunknown, listsubrepos)
480 480
481 481 if updatestate:
482 482 # We need info about unknown files. This may make things slower the
483 483 # first time, but whatever.
484 484 stateunknown = True
485 485 else:
486 486 stateunknown = listunknown
487 487
488 488 if updatestate:
489 489 ps = poststatus(startclock)
490 490 self.addpostdsstatus(ps)
491 491
492 492 r = orig(node1, node2, match, listignored, listclean, stateunknown,
493 493 listsubrepos)
494 494 modified, added, removed, deleted, unknown, ignored, clean = r
495 495
496 496 if not listunknown:
497 497 unknown = []
498 498
499 499 # don't do paranoid checks if we're not going to query Watchman anyway
500 500 full = listclean or match.traversedir is not None
501 501 if self._fsmonitorstate.mode == 'paranoid' and not full:
502 502 # run status again and fall back to the old walk this time
503 503 self.dirstate._fsmonitordisable = True
504 504
505 505 # shut the UI up
506 506 quiet = self.ui.quiet
507 507 self.ui.quiet = True
508 508 fout, ferr = self.ui.fout, self.ui.ferr
509 509 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
510 510
511 511 try:
512 512 rv2 = orig(
513 513 node1, node2, match, listignored, listclean, listunknown,
514 514 listsubrepos)
515 515 finally:
516 516 self.dirstate._fsmonitordisable = False
517 517 self.ui.quiet = quiet
518 518 self.ui.fout, self.ui.ferr = fout, ferr
519 519
520 520 # clean isn't tested since it's set to True above
521 521 _cmpsets([modified, added, removed, deleted, unknown, ignored, clean],
522 522 rv2)
523 523 modified, added, removed, deleted, unknown, ignored, clean = rv2
524 524
525 525 return scmutil.status(
526 526 modified, added, removed, deleted, unknown, ignored, clean)
527 527
528 528 class poststatus(object):
529 529 def __init__(self, startclock):
530 530 self._startclock = startclock
531 531
532 532 def __call__(self, wctx, status):
533 533 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
534 534 hashignore = _hashignore(wctx.repo().dirstate._ignore)
535 535 notefiles = (status.modified + status.added + status.removed +
536 536 status.deleted + status.unknown)
537 537 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
538 538
539 539 def makedirstate(repo, dirstate):
540 540 class fsmonitordirstate(dirstate.__class__):
541 541 def _fsmonitorinit(self, fsmonitorstate, watchmanclient):
542 542 # _fsmonitordisable is used in paranoid mode
543 543 self._fsmonitordisable = False
544 544 self._fsmonitorstate = fsmonitorstate
545 545 self._watchmanclient = watchmanclient
546 546
547 547 def walk(self, *args, **kwargs):
548 548 orig = super(fsmonitordirstate, self).walk
549 549 if self._fsmonitordisable:
550 550 return orig(*args, **kwargs)
551 551 return overridewalk(orig, self, *args, **kwargs)
552 552
553 553 def rebuild(self, *args, **kwargs):
554 554 self._fsmonitorstate.invalidate()
555 555 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
556 556
557 557 def invalidate(self, *args, **kwargs):
558 558 self._fsmonitorstate.invalidate()
559 559 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
560 560
561 561 dirstate.__class__ = fsmonitordirstate
562 562 dirstate._fsmonitorinit(repo._fsmonitorstate, repo._watchmanclient)
563 563
564 564 def wrapdirstate(orig, self):
565 565 ds = orig(self)
566 566 # only override the dirstate when Watchman is available for the repo
567 567 if util.safehasattr(self, '_fsmonitorstate'):
568 568 makedirstate(self, ds)
569 569 return ds
570 570
571 571 def extsetup(ui):
572 572 extensions.wrapfilecache(
573 573 localrepo.localrepository, 'dirstate', wrapdirstate)
574 574 if pycompat.sysplatform == 'darwin':
575 575 # An assist for avoiding the dangling-symlink fsevents bug
576 576 extensions.wrapfunction(os, 'symlink', wrapsymlink)
577 577
578 578 extensions.wrapfunction(merge, 'update', wrapupdate)
579 579
580 580 def wrapsymlink(orig, source, link_name):
581 581 ''' if we create a dangling symlink, also touch the parent dir
582 582 to encourage fsevents notifications to work more correctly '''
583 583 try:
584 584 return orig(source, link_name)
585 585 finally:
586 586 try:
587 587 os.utime(os.path.dirname(link_name), None)
588 588 except OSError:
589 589 pass
590 590
591 591 class state_update(object):
592 592 ''' This context manager is responsible for dispatching the state-enter
593 593 and state-leave signals to the watchman service '''
594 594
595 595 def __init__(self, repo, node, distance, partial):
596 596 self.repo = repo
597 597 self.node = node
598 598 self.distance = distance
599 599 self.partial = partial
600 600 self._lock = None
601 601 self.need_leave = False
602 602
603 603 def __enter__(self):
604 604 # We explicitly need to take a lock here, before we proceed to update
605 605 # watchman about the update operation, so that we don't race with
606 606 # some other actor. merge.update is going to take the wlock almost
607 607 # immediately anyway, so this is effectively extending the lock
608 608 # around a couple of short sanity checks.
609 609 self._lock = self.repo.wlock()
610 610 self.need_leave = self._state('state-enter')
611 611 return self
612 612
613 613 def __exit__(self, type_, value, tb):
614 614 try:
615 615 if self.need_leave:
616 616 status = 'ok' if type_ is None else 'failed'
617 617 self._state('state-leave', status=status)
618 618 finally:
619 619 if self._lock:
620 620 self._lock.release()
621 621
622 622 def _state(self, cmd, status='ok'):
623 623 if not util.safehasattr(self.repo, '_watchmanclient'):
624 624 return False
625 625 try:
626 626 commithash = self.repo[self.node].hex()
627 627 self.repo._watchmanclient.command(cmd, {
628 628 'name': 'hg.update',
629 629 'metadata': {
630 630 # the target revision
631 631 'rev': commithash,
632 632 # approximate number of commits between current and target
633 633 'distance': self.distance,
634 634 # success/failure (only really meaningful for state-leave)
635 635 'status': status,
636 636 # whether the working copy parent is changing
637 637 'partial': self.partial,
638 638 }})
639 639 return True
640 640 except Exception as e:
641 641 # Swallow any errors; fire and forget
642 642 self.repo.ui.log(
643 643 'watchman', 'Exception %s while running %s\n', e, cmd)
644 644 return False
645 645
646 646 # Bracket working copy updates with calls to the watchman state-enter
647 647 # and state-leave commands. This allows clients to perform more intelligent
648 648 # settling during bulk file change scenarios
649 649 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
650 650 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
651 651 mergeancestor=False, labels=None, matcher=None, **kwargs):
652 652
653 653 distance = 0
654 654 partial = True
655 655 if matcher is None or matcher.always():
656 656 partial = False
657 657 wc = repo[None]
658 658 parents = wc.parents()
659 659 if len(parents) == 2:
660 660 anc = repo.changelog.ancestor(parents[0].node(), parents[1].node())
661 661 ancrev = repo[anc].rev()
662 662 distance = abs(repo[node].rev() - ancrev)
663 663 elif len(parents) == 1:
664 664 distance = abs(repo[node].rev() - parents[0].rev())
665 665
666 666 with state_update(repo, node, distance, partial):
667 667 return orig(
668 668 repo, node, branchmerge, force, ancestor, mergeancestor,
669 669 labels, matcher, **kwargs)
670 670
671 671 def reposetup(ui, repo):
672 672 # We don't work with largefiles or inotify
673 673 exts = extensions.enabled()
674 674 for ext in _blacklist:
675 675 if ext in exts:
676 676 ui.warn(_('The fsmonitor extension is incompatible with the %s '
677 677 'extension and has been disabled.\n') % ext)
678 678 return
679 679
680 680 if repo.local():
681 681 # We don't work with subrepos either.
682 682 #
683 683 # if repo[None].substate can cause a dirstate parse, which is too
684 684 # slow. Instead, look for a file called hgsubstate,
685 685 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
686 686 return
687 687
688 688 fsmonitorstate = state.state(repo)
689 689 if fsmonitorstate.mode == 'off':
690 690 return
691 691
692 692 try:
693 693 client = watchmanclient.client(repo)
694 694 except Exception as ex:
695 695 _handleunavailable(ui, fsmonitorstate, ex)
696 696 return
697 697
698 698 repo._fsmonitorstate = fsmonitorstate
699 699 repo._watchmanclient = client
700 700
701 701 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
702 702 if cached:
703 703 # at this point since fsmonitorstate wasn't present,
704 704 # repo.dirstate is not a fsmonitordirstate
705 705 makedirstate(repo, dirstate)
706 706
707 707 class fsmonitorrepo(repo.__class__):
708 708 def status(self, *args, **kwargs):
709 709 orig = super(fsmonitorrepo, self).status
710 710 return overridestatus(orig, self, *args, **kwargs)
711 711
712 712 repo.__class__ = fsmonitorrepo
@@ -1,3762 +1,3762 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import itertools
12 12 import os
13 13 import re
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 )
23 23
24 24 from . import (
25 25 bookmarks,
26 26 changelog,
27 27 copies,
28 28 crecord as crecordmod,
29 29 dirstateguard,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 match as matchmod,
35 35 obsolete,
36 36 patch,
37 37 pathutil,
38 38 phases,
39 39 pycompat,
40 40 registrar,
41 41 revlog,
42 42 revset,
43 43 scmutil,
44 44 smartset,
45 45 templatekw,
46 46 templater,
47 47 util,
48 48 vfs as vfsmod,
49 49 )
50 50 stringio = util.stringio
51 51
52 52 # templates of common command options
53 53
54 54 dryrunopts = [
55 55 ('n', 'dry-run', None,
56 56 _('do not perform actions, just print output')),
57 57 ]
58 58
59 59 remoteopts = [
60 60 ('e', 'ssh', '',
61 61 _('specify ssh command to use'), _('CMD')),
62 62 ('', 'remotecmd', '',
63 63 _('specify hg command to run on the remote side'), _('CMD')),
64 64 ('', 'insecure', None,
65 65 _('do not verify server certificate (ignoring web.cacerts config)')),
66 66 ]
67 67
68 68 walkopts = [
69 69 ('I', 'include', [],
70 70 _('include names matching the given patterns'), _('PATTERN')),
71 71 ('X', 'exclude', [],
72 72 _('exclude names matching the given patterns'), _('PATTERN')),
73 73 ]
74 74
75 75 commitopts = [
76 76 ('m', 'message', '',
77 77 _('use text as commit message'), _('TEXT')),
78 78 ('l', 'logfile', '',
79 79 _('read commit message from file'), _('FILE')),
80 80 ]
81 81
82 82 commitopts2 = [
83 83 ('d', 'date', '',
84 84 _('record the specified date as commit date'), _('DATE')),
85 85 ('u', 'user', '',
86 86 _('record the specified user as committer'), _('USER')),
87 87 ]
88 88
89 89 # hidden for now
90 90 formatteropts = [
91 91 ('T', 'template', '',
92 92 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
93 93 ]
94 94
95 95 templateopts = [
96 96 ('', 'style', '',
97 97 _('display using template map file (DEPRECATED)'), _('STYLE')),
98 98 ('T', 'template', '',
99 99 _('display with template'), _('TEMPLATE')),
100 100 ]
101 101
102 102 logopts = [
103 103 ('p', 'patch', None, _('show patch')),
104 104 ('g', 'git', None, _('use git extended diff format')),
105 105 ('l', 'limit', '',
106 106 _('limit number of changes displayed'), _('NUM')),
107 107 ('M', 'no-merges', None, _('do not show merges')),
108 108 ('', 'stat', None, _('output diffstat-style summary of changes')),
109 109 ('G', 'graph', None, _("show the revision DAG")),
110 110 ] + templateopts
111 111
112 112 diffopts = [
113 113 ('a', 'text', None, _('treat all files as text')),
114 114 ('g', 'git', None, _('use git extended diff format')),
115 115 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
116 116 ('', 'nodates', None, _('omit dates from diff headers'))
117 117 ]
118 118
119 119 diffwsopts = [
120 120 ('w', 'ignore-all-space', None,
121 121 _('ignore white space when comparing lines')),
122 122 ('b', 'ignore-space-change', None,
123 123 _('ignore changes in the amount of white space')),
124 124 ('B', 'ignore-blank-lines', None,
125 125 _('ignore changes whose lines are all blank')),
126 126 ]
127 127
128 128 diffopts2 = [
129 129 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
130 130 ('p', 'show-function', None, _('show which function each change is in')),
131 131 ('', 'reverse', None, _('produce a diff that undoes the changes')),
132 132 ] + diffwsopts + [
133 133 ('U', 'unified', '',
134 134 _('number of lines of context to show'), _('NUM')),
135 135 ('', 'stat', None, _('output diffstat-style summary of changes')),
136 136 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
137 137 ]
138 138
139 139 mergetoolopts = [
140 140 ('t', 'tool', '', _('specify merge tool')),
141 141 ]
142 142
143 143 similarityopts = [
144 144 ('s', 'similarity', '',
145 145 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
146 146 ]
147 147
148 148 subrepoopts = [
149 149 ('S', 'subrepos', None,
150 150 _('recurse into subrepositories'))
151 151 ]
152 152
153 153 debugrevlogopts = [
154 154 ('c', 'changelog', False, _('open changelog')),
155 155 ('m', 'manifest', False, _('open manifest')),
156 156 ('', 'dir', '', _('open directory manifest')),
157 157 ]
158 158
159 159 # special string such that everything below this line will be ingored in the
160 160 # editor text
161 161 _linebelow = "^HG: ------------------------ >8 ------------------------$"
162 162
163 163 def ishunk(x):
164 164 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
165 165 return isinstance(x, hunkclasses)
166 166
167 167 def newandmodified(chunks, originalchunks):
168 168 newlyaddedandmodifiedfiles = set()
169 169 for chunk in chunks:
170 170 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
171 171 originalchunks:
172 172 newlyaddedandmodifiedfiles.add(chunk.header.filename())
173 173 return newlyaddedandmodifiedfiles
174 174
175 175 def parsealiases(cmd):
176 176 return cmd.lstrip("^").split("|")
177 177
178 178 def setupwrapcolorwrite(ui):
179 179 # wrap ui.write so diff output can be labeled/colorized
180 180 def wrapwrite(orig, *args, **kw):
181 181 label = kw.pop('label', '')
182 182 for chunk, l in patch.difflabel(lambda: args):
183 183 orig(chunk, label=label + l)
184 184
185 185 oldwrite = ui.write
186 186 def wrap(*args, **kwargs):
187 187 return wrapwrite(oldwrite, *args, **kwargs)
188 188 setattr(ui, 'write', wrap)
189 189 return oldwrite
190 190
191 191 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
192 192 if usecurses:
193 193 if testfile:
194 194 recordfn = crecordmod.testdecorator(testfile,
195 195 crecordmod.testchunkselector)
196 196 else:
197 197 recordfn = crecordmod.chunkselector
198 198
199 199 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
200 200
201 201 else:
202 202 return patch.filterpatch(ui, originalhunks, operation)
203 203
204 204 def recordfilter(ui, originalhunks, operation=None):
205 205 """ Prompts the user to filter the originalhunks and return a list of
206 206 selected hunks.
207 207 *operation* is used for to build ui messages to indicate the user what
208 208 kind of filtering they are doing: reverting, committing, shelving, etc.
209 209 (see patch.filterpatch).
210 210 """
211 211 usecurses = crecordmod.checkcurses(ui)
212 212 testfile = ui.config('experimental', 'crecordtest')
213 213 oldwrite = setupwrapcolorwrite(ui)
214 214 try:
215 215 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
216 216 testfile, operation)
217 217 finally:
218 218 ui.write = oldwrite
219 219 return newchunks, newopts
220 220
221 221 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
222 222 filterfn, *pats, **opts):
223 223 from . import merge as mergemod
224 224 opts = pycompat.byteskwargs(opts)
225 225 if not ui.interactive():
226 226 if cmdsuggest:
227 227 msg = _('running non-interactively, use %s instead') % cmdsuggest
228 228 else:
229 229 msg = _('running non-interactively')
230 230 raise error.Abort(msg)
231 231
232 232 # make sure username is set before going interactive
233 233 if not opts.get('user'):
234 234 ui.username() # raise exception, username not provided
235 235
236 236 def recordfunc(ui, repo, message, match, opts):
237 237 """This is generic record driver.
238 238
239 239 Its job is to interactively filter local changes, and
240 240 accordingly prepare working directory into a state in which the
241 241 job can be delegated to a non-interactive commit command such as
242 242 'commit' or 'qrefresh'.
243 243
244 244 After the actual job is done by non-interactive command, the
245 245 working directory is restored to its original state.
246 246
247 247 In the end we'll record interesting changes, and everything else
248 248 will be left in place, so the user can continue working.
249 249 """
250 250
251 251 checkunfinished(repo, commit=True)
252 252 wctx = repo[None]
253 253 merge = len(wctx.parents()) > 1
254 254 if merge:
255 255 raise error.Abort(_('cannot partially commit a merge '
256 256 '(use "hg commit" instead)'))
257 257
258 258 def fail(f, msg):
259 259 raise error.Abort('%s: %s' % (f, msg))
260 260
261 261 force = opts.get('force')
262 262 if not force:
263 263 vdirs = []
264 264 match.explicitdir = vdirs.append
265 265 match.bad = fail
266 266
267 267 status = repo.status(match=match)
268 268 if not force:
269 269 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
270 270 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
271 271 diffopts.nodates = True
272 272 diffopts.git = True
273 273 diffopts.showfunc = True
274 274 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
275 275 originalchunks = patch.parsepatch(originaldiff)
276 276
277 277 # 1. filter patch, since we are intending to apply subset of it
278 278 try:
279 279 chunks, newopts = filterfn(ui, originalchunks)
280 280 except patch.PatchError as err:
281 281 raise error.Abort(_('error parsing patch: %s') % err)
282 282 opts.update(newopts)
283 283
284 284 # We need to keep a backup of files that have been newly added and
285 285 # modified during the recording process because there is a previous
286 286 # version without the edit in the workdir
287 287 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
288 288 contenders = set()
289 289 for h in chunks:
290 290 try:
291 291 contenders.update(set(h.files()))
292 292 except AttributeError:
293 293 pass
294 294
295 295 changed = status.modified + status.added + status.removed
296 296 newfiles = [f for f in changed if f in contenders]
297 297 if not newfiles:
298 298 ui.status(_('no changes to record\n'))
299 299 return 0
300 300
301 301 modified = set(status.modified)
302 302
303 303 # 2. backup changed files, so we can restore them in the end
304 304
305 305 if backupall:
306 306 tobackup = changed
307 307 else:
308 308 tobackup = [f for f in newfiles if f in modified or f in \
309 309 newlyaddedandmodifiedfiles]
310 310 backups = {}
311 311 if tobackup:
312 312 backupdir = repo.vfs.join('record-backups')
313 313 try:
314 314 os.mkdir(backupdir)
315 315 except OSError as err:
316 316 if err.errno != errno.EEXIST:
317 317 raise
318 318 try:
319 319 # backup continues
320 320 for f in tobackup:
321 321 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
322 322 dir=backupdir)
323 323 os.close(fd)
324 324 ui.debug('backup %r as %r\n' % (f, tmpname))
325 325 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
326 326 backups[f] = tmpname
327 327
328 328 fp = stringio()
329 329 for c in chunks:
330 330 fname = c.filename()
331 331 if fname in backups:
332 332 c.write(fp)
333 333 dopatch = fp.tell()
334 334 fp.seek(0)
335 335
336 336 # 2.5 optionally review / modify patch in text editor
337 337 if opts.get('review', False):
338 338 patchtext = (crecordmod.diffhelptext
339 339 + crecordmod.patchhelptext
340 340 + fp.read())
341 341 reviewedpatch = ui.edit(patchtext, "",
342 342 extra={"suffix": ".diff"},
343 343 repopath=repo.path)
344 344 fp.truncate(0)
345 345 fp.write(reviewedpatch)
346 346 fp.seek(0)
347 347
348 348 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
349 349 # 3a. apply filtered patch to clean repo (clean)
350 350 if backups:
351 351 # Equivalent to hg.revert
352 352 m = scmutil.matchfiles(repo, backups.keys())
353 353 mergemod.update(repo, repo.dirstate.p1(),
354 354 False, True, matcher=m)
355 355
356 356 # 3b. (apply)
357 357 if dopatch:
358 358 try:
359 359 ui.debug('applying patch\n')
360 360 ui.debug(fp.getvalue())
361 361 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
362 362 except patch.PatchError as err:
363 363 raise error.Abort(str(err))
364 364 del fp
365 365
366 366 # 4. We prepared working directory according to filtered
367 367 # patch. Now is the time to delegate the job to
368 368 # commit/qrefresh or the like!
369 369
370 370 # Make all of the pathnames absolute.
371 371 newfiles = [repo.wjoin(nf) for nf in newfiles]
372 372 return commitfunc(ui, repo, *newfiles, **opts)
373 373 finally:
374 374 # 5. finally restore backed-up files
375 375 try:
376 376 dirstate = repo.dirstate
377 377 for realname, tmpname in backups.iteritems():
378 378 ui.debug('restoring %r to %r\n' % (tmpname, realname))
379 379
380 380 if dirstate[realname] == 'n':
381 381 # without normallookup, restoring timestamp
382 382 # may cause partially committed files
383 383 # to be treated as unmodified
384 384 dirstate.normallookup(realname)
385 385
386 386 # copystat=True here and above are a hack to trick any
387 387 # editors that have f open that we haven't modified them.
388 388 #
389 389 # Also note that this racy as an editor could notice the
390 390 # file's mtime before we've finished writing it.
391 391 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
392 392 os.unlink(tmpname)
393 393 if tobackup:
394 394 os.rmdir(backupdir)
395 395 except OSError:
396 396 pass
397 397
398 398 def recordinwlock(ui, repo, message, match, opts):
399 399 with repo.wlock():
400 400 return recordfunc(ui, repo, message, match, opts)
401 401
402 402 return commit(ui, repo, recordinwlock, pats, opts)
403 403
404 404 def tersestatus(root, statlist, status, ignorefn, ignore):
405 405 """
406 406 Returns a list of statuses with directory collapsed if all the files in the
407 407 directory has the same status.
408 408 """
409 409
410 410 def numfiles(dirname):
411 411 """
412 412 Calculates the number of tracked files in a given directory which also
413 413 includes files which were removed or deleted. Considers ignored files
414 414 if ignore argument is True or 'i' is present in status argument.
415 415 """
416 416 if lencache.get(dirname):
417 417 return lencache[dirname]
418 418 if 'i' in status or ignore:
419 419 def match(localpath):
420 420 absolutepath = os.path.join(root, localpath)
421 421 if os.path.isdir(absolutepath) and isemptydir(absolutepath):
422 422 return True
423 423 return False
424 424 else:
425 425 def match(localpath):
426 426 # there can be directory whose all the files are ignored and
427 427 # hence the drectory should also be ignored while counting
428 428 # number of files or subdirs in it's parent directory. This
429 429 # checks the same.
430 430 # XXX: We need a better logic here.
431 431 if os.path.isdir(os.path.join(root, localpath)):
432 432 return isignoreddir(localpath)
433 433 else:
434 434 # XXX: there can be files which have the ignored pattern but
435 435 # are not ignored. That leads to bug in counting number of
436 436 # tracked files in the directory.
437 437 return ignorefn(localpath)
438 438 lendir = 0
439 439 abspath = os.path.join(root, dirname)
440 440 # There might be cases when a directory does not exists as the whole
441 441 # directory can be removed and/or deleted.
442 442 try:
443 443 for f in os.listdir(abspath):
444 444 localpath = os.path.join(dirname, f)
445 445 if not match(localpath):
446 446 lendir += 1
447 447 except OSError:
448 448 pass
449 449 lendir += len(absentdir.get(dirname, []))
450 450 lencache[dirname] = lendir
451 451 return lendir
452 452
453 453 def isemptydir(abspath):
454 454 """
455 455 Check whether a directory is empty or not, i.e. there is no files in the
456 456 directory and all its subdirectories.
457 457 """
458 458 for f in os.listdir(abspath):
459 459 fullpath = os.path.join(abspath, f)
460 460 if os.path.isdir(fullpath):
461 461 # recursion here
462 462 ret = isemptydir(fullpath)
463 463 if not ret:
464 464 return False
465 465 else:
466 466 return False
467 467 return True
468 468
469 469 def isignoreddir(localpath):
470 470 """Return True if `localpath` directory is ignored or contains only
471 471 ignored files and should hence be considered ignored.
472 472 """
473 473 dirpath = os.path.join(root, localpath)
474 474 if ignorefn(dirpath):
475 475 return True
476 476 for f in os.listdir(dirpath):
477 477 filepath = os.path.join(dirpath, f)
478 478 if os.path.isdir(filepath):
479 479 # recursion here
480 480 ret = isignoreddir(os.path.join(localpath, f))
481 481 if not ret:
482 482 return False
483 483 else:
484 484 if not ignorefn(os.path.join(localpath, f)):
485 485 return False
486 486 return True
487 487
488 488 def absentones(removedfiles, missingfiles):
489 489 """
490 490 Returns a dictionary of directories with files in it which are either
491 491 removed or missing (deleted) in them.
492 492 """
493 493 absentdir = {}
494 494 absentfiles = removedfiles + missingfiles
495 495 while absentfiles:
496 496 f = absentfiles.pop()
497 497 par = os.path.dirname(f)
498 498 if par == '':
499 499 continue
500 500 # we need to store files rather than number of files as some files
501 501 # or subdirectories in a directory can be counted twice. This is
502 502 # also we have used sets here.
503 503 try:
504 504 absentdir[par].add(f)
505 505 except KeyError:
506 506 absentdir[par] = set([f])
507 507 absentfiles.append(par)
508 508 return absentdir
509 509
510 510 indexes = {'m': 0, 'a': 1, 'r': 2, 'd': 3, 'u': 4, 'i': 5, 'c': 6}
511 511 # get a dictonary of directories and files which are missing as os.listdir()
512 512 # won't be able to list them.
513 513 absentdir = absentones(statlist[2], statlist[3])
514 514 finalrs = [[]] * len(indexes)
515 515 didsomethingchanged = False
516 516 # dictionary to store number of files and subdir in a directory so that we
517 517 # don't compute that again.
518 518 lencache = {}
519 519
520 520 for st in pycompat.bytestr(status):
521 521
522 522 try:
523 523 ind = indexes[st]
524 524 except KeyError:
525 525 # TODO: Need a better error message here
526 526 raise error.Abort("'%s' not recognized" % st)
527 527
528 528 sfiles = statlist[ind]
529 529 if not sfiles:
530 530 continue
531 531 pardict = {}
532 532 for a in sfiles:
533 533 par = os.path.dirname(a)
534 534 pardict.setdefault(par, []).append(a)
535 535
536 536 rs = []
537 537 newls = []
538 538 for par, files in pardict.iteritems():
539 539 lenpar = numfiles(par)
540 540 if lenpar == len(files):
541 541 newls.append(par)
542 542
543 543 if not newls:
544 544 continue
545 545
546 546 while newls:
547 547 newel = newls.pop()
548 548 if newel == '':
549 549 continue
550 550 parn = os.path.dirname(newel)
551 551 pardict[newel] = []
552 552 # Adding pycompat.ossep as newel is a directory.
553 553 pardict.setdefault(parn, []).append(newel + pycompat.ossep)
554 554 lenpar = numfiles(parn)
555 555 if lenpar == len(pardict[parn]):
556 556 newls.append(parn)
557 557
558 558 # dict.values() for Py3 compatibility
559 559 for files in pardict.values():
560 560 rs.extend(files)
561 561
562 562 rs.sort()
563 563 finalrs[ind] = rs
564 564 didsomethingchanged = True
565 565
566 566 # If nothing is changed, make sure the order of files is preserved.
567 567 if not didsomethingchanged:
568 568 return statlist
569 569
570 570 for x in xrange(len(indexes)):
571 571 if not finalrs[x]:
572 572 finalrs[x] = statlist[x]
573 573
574 574 return finalrs
575 575
576 576 def findpossible(cmd, table, strict=False):
577 577 """
578 578 Return cmd -> (aliases, command table entry)
579 579 for each matching command.
580 580 Return debug commands (or their aliases) only if no normal command matches.
581 581 """
582 582 choice = {}
583 583 debugchoice = {}
584 584
585 585 if cmd in table:
586 586 # short-circuit exact matches, "log" alias beats "^log|history"
587 587 keys = [cmd]
588 588 else:
589 589 keys = table.keys()
590 590
591 591 allcmds = []
592 592 for e in keys:
593 593 aliases = parsealiases(e)
594 594 allcmds.extend(aliases)
595 595 found = None
596 596 if cmd in aliases:
597 597 found = cmd
598 598 elif not strict:
599 599 for a in aliases:
600 600 if a.startswith(cmd):
601 601 found = a
602 602 break
603 603 if found is not None:
604 604 if aliases[0].startswith("debug") or found.startswith("debug"):
605 605 debugchoice[found] = (aliases, table[e])
606 606 else:
607 607 choice[found] = (aliases, table[e])
608 608
609 609 if not choice and debugchoice:
610 610 choice = debugchoice
611 611
612 612 return choice, allcmds
613 613
614 614 def findcmd(cmd, table, strict=True):
615 615 """Return (aliases, command table entry) for command string."""
616 616 choice, allcmds = findpossible(cmd, table, strict)
617 617
618 618 if cmd in choice:
619 619 return choice[cmd]
620 620
621 621 if len(choice) > 1:
622 622 clist = sorted(choice)
623 623 raise error.AmbiguousCommand(cmd, clist)
624 624
625 625 if choice:
626 626 return list(choice.values())[0]
627 627
628 628 raise error.UnknownCommand(cmd, allcmds)
629 629
630 630 def findrepo(p):
631 631 while not os.path.isdir(os.path.join(p, ".hg")):
632 632 oldp, p = p, os.path.dirname(p)
633 633 if p == oldp:
634 634 return None
635 635
636 636 return p
637 637
638 638 def bailifchanged(repo, merge=True, hint=None):
639 639 """ enforce the precondition that working directory must be clean.
640 640
641 641 'merge' can be set to false if a pending uncommitted merge should be
642 642 ignored (such as when 'update --check' runs).
643 643
644 644 'hint' is the usual hint given to Abort exception.
645 645 """
646 646
647 647 if merge and repo.dirstate.p2() != nullid:
648 648 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
649 649 modified, added, removed, deleted = repo.status()[:4]
650 650 if modified or added or removed or deleted:
651 651 raise error.Abort(_('uncommitted changes'), hint=hint)
652 652 ctx = repo[None]
653 653 for s in sorted(ctx.substate):
654 654 ctx.sub(s).bailifchanged(hint=hint)
655 655
656 656 def logmessage(ui, opts):
657 657 """ get the log message according to -m and -l option """
658 658 message = opts.get('message')
659 659 logfile = opts.get('logfile')
660 660
661 661 if message and logfile:
662 662 raise error.Abort(_('options --message and --logfile are mutually '
663 663 'exclusive'))
664 664 if not message and logfile:
665 665 try:
666 666 if isstdiofilename(logfile):
667 667 message = ui.fin.read()
668 668 else:
669 669 message = '\n'.join(util.readfile(logfile).splitlines())
670 670 except IOError as inst:
671 671 raise error.Abort(_("can't read commit message '%s': %s") %
672 672 (logfile, inst.strerror))
673 673 return message
674 674
675 675 def mergeeditform(ctxorbool, baseformname):
676 676 """return appropriate editform name (referencing a committemplate)
677 677
678 678 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
679 679 merging is committed.
680 680
681 681 This returns baseformname with '.merge' appended if it is a merge,
682 682 otherwise '.normal' is appended.
683 683 """
684 684 if isinstance(ctxorbool, bool):
685 685 if ctxorbool:
686 686 return baseformname + ".merge"
687 687 elif 1 < len(ctxorbool.parents()):
688 688 return baseformname + ".merge"
689 689
690 690 return baseformname + ".normal"
691 691
692 692 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
693 693 editform='', **opts):
694 694 """get appropriate commit message editor according to '--edit' option
695 695
696 696 'finishdesc' is a function to be called with edited commit message
697 697 (= 'description' of the new changeset) just after editing, but
698 698 before checking empty-ness. It should return actual text to be
699 699 stored into history. This allows to change description before
700 700 storing.
701 701
702 702 'extramsg' is a extra message to be shown in the editor instead of
703 703 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
704 704 is automatically added.
705 705
706 706 'editform' is a dot-separated list of names, to distinguish
707 707 the purpose of commit text editing.
708 708
709 709 'getcommiteditor' returns 'commitforceeditor' regardless of
710 710 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
711 711 they are specific for usage in MQ.
712 712 """
713 713 if edit or finishdesc or extramsg:
714 714 return lambda r, c, s: commitforceeditor(r, c, s,
715 715 finishdesc=finishdesc,
716 716 extramsg=extramsg,
717 717 editform=editform)
718 718 elif editform:
719 719 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
720 720 else:
721 721 return commiteditor
722 722
723 723 def loglimit(opts):
724 724 """get the log limit according to option -l/--limit"""
725 725 limit = opts.get('limit')
726 726 if limit:
727 727 try:
728 728 limit = int(limit)
729 729 except ValueError:
730 730 raise error.Abort(_('limit must be a positive integer'))
731 731 if limit <= 0:
732 732 raise error.Abort(_('limit must be positive'))
733 733 else:
734 734 limit = None
735 735 return limit
736 736
737 737 def makefilename(repo, pat, node, desc=None,
738 738 total=None, seqno=None, revwidth=None, pathname=None):
739 739 node_expander = {
740 740 'H': lambda: hex(node),
741 741 'R': lambda: str(repo.changelog.rev(node)),
742 742 'h': lambda: short(node),
743 743 'm': lambda: re.sub('[^\w]', '_', str(desc))
744 744 }
745 745 expander = {
746 746 '%': lambda: '%',
747 747 'b': lambda: os.path.basename(repo.root),
748 748 }
749 749
750 750 try:
751 751 if node:
752 752 expander.update(node_expander)
753 753 if node:
754 754 expander['r'] = (lambda:
755 755 str(repo.changelog.rev(node)).zfill(revwidth or 0))
756 756 if total is not None:
757 757 expander['N'] = lambda: str(total)
758 758 if seqno is not None:
759 759 expander['n'] = lambda: str(seqno)
760 760 if total is not None and seqno is not None:
761 761 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
762 762 if pathname is not None:
763 763 expander['s'] = lambda: os.path.basename(pathname)
764 764 expander['d'] = lambda: os.path.dirname(pathname) or '.'
765 765 expander['p'] = lambda: pathname
766 766
767 767 newname = []
768 768 patlen = len(pat)
769 769 i = 0
770 770 while i < patlen:
771 771 c = pat[i:i + 1]
772 772 if c == '%':
773 773 i += 1
774 774 c = pat[i:i + 1]
775 775 c = expander[c]()
776 776 newname.append(c)
777 777 i += 1
778 778 return ''.join(newname)
779 779 except KeyError as inst:
780 780 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
781 781 inst.args[0])
782 782
783 783 def isstdiofilename(pat):
784 784 """True if the given pat looks like a filename denoting stdin/stdout"""
785 785 return not pat or pat == '-'
786 786
787 787 class _unclosablefile(object):
788 788 def __init__(self, fp):
789 789 self._fp = fp
790 790
791 791 def close(self):
792 792 pass
793 793
794 794 def __iter__(self):
795 795 return iter(self._fp)
796 796
797 797 def __getattr__(self, attr):
798 798 return getattr(self._fp, attr)
799 799
800 800 def __enter__(self):
801 801 return self
802 802
803 803 def __exit__(self, exc_type, exc_value, exc_tb):
804 804 pass
805 805
806 806 def makefileobj(repo, pat, node=None, desc=None, total=None,
807 807 seqno=None, revwidth=None, mode='wb', modemap=None,
808 808 pathname=None):
809 809
810 810 writable = mode not in ('r', 'rb')
811 811
812 812 if isstdiofilename(pat):
813 813 if writable:
814 814 fp = repo.ui.fout
815 815 else:
816 816 fp = repo.ui.fin
817 817 return _unclosablefile(fp)
818 818 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
819 819 if modemap is not None:
820 820 mode = modemap.get(fn, mode)
821 821 if mode == 'wb':
822 822 modemap[fn] = 'ab'
823 823 return open(fn, mode)
824 824
825 825 def openrevlog(repo, cmd, file_, opts):
826 826 """opens the changelog, manifest, a filelog or a given revlog"""
827 827 cl = opts['changelog']
828 828 mf = opts['manifest']
829 829 dir = opts['dir']
830 830 msg = None
831 831 if cl and mf:
832 832 msg = _('cannot specify --changelog and --manifest at the same time')
833 833 elif cl and dir:
834 834 msg = _('cannot specify --changelog and --dir at the same time')
835 835 elif cl or mf or dir:
836 836 if file_:
837 837 msg = _('cannot specify filename with --changelog or --manifest')
838 838 elif not repo:
839 839 msg = _('cannot specify --changelog or --manifest or --dir '
840 840 'without a repository')
841 841 if msg:
842 842 raise error.Abort(msg)
843 843
844 844 r = None
845 845 if repo:
846 846 if cl:
847 847 r = repo.unfiltered().changelog
848 848 elif dir:
849 849 if 'treemanifest' not in repo.requirements:
850 850 raise error.Abort(_("--dir can only be used on repos with "
851 851 "treemanifest enabled"))
852 852 dirlog = repo.manifestlog._revlog.dirlog(dir)
853 853 if len(dirlog):
854 854 r = dirlog
855 855 elif mf:
856 856 r = repo.manifestlog._revlog
857 857 elif file_:
858 858 filelog = repo.file(file_)
859 859 if len(filelog):
860 860 r = filelog
861 861 if not r:
862 862 if not file_:
863 863 raise error.CommandError(cmd, _('invalid arguments'))
864 864 if not os.path.isfile(file_):
865 865 raise error.Abort(_("revlog '%s' not found") % file_)
866 866 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
867 867 file_[:-2] + ".i")
868 868 return r
869 869
870 870 def copy(ui, repo, pats, opts, rename=False):
871 871 # called with the repo lock held
872 872 #
873 873 # hgsep => pathname that uses "/" to separate directories
874 874 # ossep => pathname that uses os.sep to separate directories
875 875 cwd = repo.getcwd()
876 876 targets = {}
877 877 after = opts.get("after")
878 878 dryrun = opts.get("dry_run")
879 879 wctx = repo[None]
880 880
881 881 def walkpat(pat):
882 882 srcs = []
883 883 if after:
884 884 badstates = '?'
885 885 else:
886 886 badstates = '?r'
887 887 m = scmutil.match(wctx, [pat], opts, globbed=True)
888 888 for abs in wctx.walk(m):
889 889 state = repo.dirstate[abs]
890 890 rel = m.rel(abs)
891 891 exact = m.exact(abs)
892 892 if state in badstates:
893 893 if exact and state == '?':
894 894 ui.warn(_('%s: not copying - file is not managed\n') % rel)
895 895 if exact and state == 'r':
896 896 ui.warn(_('%s: not copying - file has been marked for'
897 897 ' remove\n') % rel)
898 898 continue
899 899 # abs: hgsep
900 900 # rel: ossep
901 901 srcs.append((abs, rel, exact))
902 902 return srcs
903 903
904 904 # abssrc: hgsep
905 905 # relsrc: ossep
906 906 # otarget: ossep
907 907 def copyfile(abssrc, relsrc, otarget, exact):
908 908 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
909 909 if '/' in abstarget:
910 910 # We cannot normalize abstarget itself, this would prevent
911 911 # case only renames, like a => A.
912 912 abspath, absname = abstarget.rsplit('/', 1)
913 913 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
914 914 reltarget = repo.pathto(abstarget, cwd)
915 915 target = repo.wjoin(abstarget)
916 916 src = repo.wjoin(abssrc)
917 917 state = repo.dirstate[abstarget]
918 918
919 919 scmutil.checkportable(ui, abstarget)
920 920
921 921 # check for collisions
922 922 prevsrc = targets.get(abstarget)
923 923 if prevsrc is not None:
924 924 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
925 925 (reltarget, repo.pathto(abssrc, cwd),
926 926 repo.pathto(prevsrc, cwd)))
927 927 return
928 928
929 929 # check for overwrites
930 930 exists = os.path.lexists(target)
931 931 samefile = False
932 932 if exists and abssrc != abstarget:
933 933 if (repo.dirstate.normalize(abssrc) ==
934 934 repo.dirstate.normalize(abstarget)):
935 935 if not rename:
936 936 ui.warn(_("%s: can't copy - same file\n") % reltarget)
937 937 return
938 938 exists = False
939 939 samefile = True
940 940
941 941 if not after and exists or after and state in 'mn':
942 942 if not opts['force']:
943 943 if state in 'mn':
944 944 msg = _('%s: not overwriting - file already committed\n')
945 945 if after:
946 946 flags = '--after --force'
947 947 else:
948 948 flags = '--force'
949 949 if rename:
950 950 hint = _('(hg rename %s to replace the file by '
951 951 'recording a rename)\n') % flags
952 952 else:
953 953 hint = _('(hg copy %s to replace the file by '
954 954 'recording a copy)\n') % flags
955 955 else:
956 956 msg = _('%s: not overwriting - file exists\n')
957 957 if rename:
958 958 hint = _('(hg rename --after to record the rename)\n')
959 959 else:
960 960 hint = _('(hg copy --after to record the copy)\n')
961 961 ui.warn(msg % reltarget)
962 962 ui.warn(hint)
963 963 return
964 964
965 965 if after:
966 966 if not exists:
967 967 if rename:
968 968 ui.warn(_('%s: not recording move - %s does not exist\n') %
969 969 (relsrc, reltarget))
970 970 else:
971 971 ui.warn(_('%s: not recording copy - %s does not exist\n') %
972 972 (relsrc, reltarget))
973 973 return
974 974 elif not dryrun:
975 975 try:
976 976 if exists:
977 977 os.unlink(target)
978 978 targetdir = os.path.dirname(target) or '.'
979 979 if not os.path.isdir(targetdir):
980 980 os.makedirs(targetdir)
981 981 if samefile:
982 982 tmp = target + "~hgrename"
983 983 os.rename(src, tmp)
984 984 os.rename(tmp, target)
985 985 else:
986 986 util.copyfile(src, target)
987 987 srcexists = True
988 988 except IOError as inst:
989 989 if inst.errno == errno.ENOENT:
990 990 ui.warn(_('%s: deleted in working directory\n') % relsrc)
991 991 srcexists = False
992 992 else:
993 993 ui.warn(_('%s: cannot copy - %s\n') %
994 994 (relsrc, inst.strerror))
995 995 return True # report a failure
996 996
997 997 if ui.verbose or not exact:
998 998 if rename:
999 999 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1000 1000 else:
1001 1001 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1002 1002
1003 1003 targets[abstarget] = abssrc
1004 1004
1005 1005 # fix up dirstate
1006 1006 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1007 1007 dryrun=dryrun, cwd=cwd)
1008 1008 if rename and not dryrun:
1009 1009 if not after and srcexists and not samefile:
1010 1010 repo.wvfs.unlinkpath(abssrc)
1011 1011 wctx.forget([abssrc])
1012 1012
1013 1013 # pat: ossep
1014 1014 # dest ossep
1015 1015 # srcs: list of (hgsep, hgsep, ossep, bool)
1016 1016 # return: function that takes hgsep and returns ossep
1017 1017 def targetpathfn(pat, dest, srcs):
1018 1018 if os.path.isdir(pat):
1019 1019 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1020 1020 abspfx = util.localpath(abspfx)
1021 1021 if destdirexists:
1022 1022 striplen = len(os.path.split(abspfx)[0])
1023 1023 else:
1024 1024 striplen = len(abspfx)
1025 1025 if striplen:
1026 1026 striplen += len(pycompat.ossep)
1027 1027 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1028 1028 elif destdirexists:
1029 1029 res = lambda p: os.path.join(dest,
1030 1030 os.path.basename(util.localpath(p)))
1031 1031 else:
1032 1032 res = lambda p: dest
1033 1033 return res
1034 1034
1035 1035 # pat: ossep
1036 1036 # dest ossep
1037 1037 # srcs: list of (hgsep, hgsep, ossep, bool)
1038 1038 # return: function that takes hgsep and returns ossep
1039 1039 def targetpathafterfn(pat, dest, srcs):
1040 1040 if matchmod.patkind(pat):
1041 1041 # a mercurial pattern
1042 1042 res = lambda p: os.path.join(dest,
1043 1043 os.path.basename(util.localpath(p)))
1044 1044 else:
1045 1045 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1046 1046 if len(abspfx) < len(srcs[0][0]):
1047 1047 # A directory. Either the target path contains the last
1048 1048 # component of the source path or it does not.
1049 1049 def evalpath(striplen):
1050 1050 score = 0
1051 1051 for s in srcs:
1052 1052 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1053 1053 if os.path.lexists(t):
1054 1054 score += 1
1055 1055 return score
1056 1056
1057 1057 abspfx = util.localpath(abspfx)
1058 1058 striplen = len(abspfx)
1059 1059 if striplen:
1060 1060 striplen += len(pycompat.ossep)
1061 1061 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1062 1062 score = evalpath(striplen)
1063 1063 striplen1 = len(os.path.split(abspfx)[0])
1064 1064 if striplen1:
1065 1065 striplen1 += len(pycompat.ossep)
1066 1066 if evalpath(striplen1) > score:
1067 1067 striplen = striplen1
1068 1068 res = lambda p: os.path.join(dest,
1069 1069 util.localpath(p)[striplen:])
1070 1070 else:
1071 1071 # a file
1072 1072 if destdirexists:
1073 1073 res = lambda p: os.path.join(dest,
1074 1074 os.path.basename(util.localpath(p)))
1075 1075 else:
1076 1076 res = lambda p: dest
1077 1077 return res
1078 1078
1079 1079 pats = scmutil.expandpats(pats)
1080 1080 if not pats:
1081 1081 raise error.Abort(_('no source or destination specified'))
1082 1082 if len(pats) == 1:
1083 1083 raise error.Abort(_('no destination specified'))
1084 1084 dest = pats.pop()
1085 1085 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1086 1086 if not destdirexists:
1087 1087 if len(pats) > 1 or matchmod.patkind(pats[0]):
1088 1088 raise error.Abort(_('with multiple sources, destination must be an '
1089 1089 'existing directory'))
1090 1090 if util.endswithsep(dest):
1091 1091 raise error.Abort(_('destination %s is not a directory') % dest)
1092 1092
1093 1093 tfn = targetpathfn
1094 1094 if after:
1095 1095 tfn = targetpathafterfn
1096 1096 copylist = []
1097 1097 for pat in pats:
1098 1098 srcs = walkpat(pat)
1099 1099 if not srcs:
1100 1100 continue
1101 1101 copylist.append((tfn(pat, dest, srcs), srcs))
1102 1102 if not copylist:
1103 1103 raise error.Abort(_('no files to copy'))
1104 1104
1105 1105 errors = 0
1106 1106 for targetpath, srcs in copylist:
1107 1107 for abssrc, relsrc, exact in srcs:
1108 1108 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1109 1109 errors += 1
1110 1110
1111 1111 if errors:
1112 1112 ui.warn(_('(consider using --after)\n'))
1113 1113
1114 1114 return errors != 0
1115 1115
1116 1116 ## facility to let extension process additional data into an import patch
1117 1117 # list of identifier to be executed in order
1118 1118 extrapreimport = [] # run before commit
1119 1119 extrapostimport = [] # run after commit
1120 1120 # mapping from identifier to actual import function
1121 1121 #
1122 1122 # 'preimport' are run before the commit is made and are provided the following
1123 1123 # arguments:
1124 1124 # - repo: the localrepository instance,
1125 1125 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1126 1126 # - extra: the future extra dictionary of the changeset, please mutate it,
1127 1127 # - opts: the import options.
1128 1128 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1129 1129 # mutation of in memory commit and more. Feel free to rework the code to get
1130 1130 # there.
1131 1131 extrapreimportmap = {}
1132 1132 # 'postimport' are run after the commit is made and are provided the following
1133 1133 # argument:
1134 1134 # - ctx: the changectx created by import.
1135 1135 extrapostimportmap = {}
1136 1136
1137 1137 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1138 1138 """Utility function used by commands.import to import a single patch
1139 1139
1140 1140 This function is explicitly defined here to help the evolve extension to
1141 1141 wrap this part of the import logic.
1142 1142
1143 1143 The API is currently a bit ugly because it a simple code translation from
1144 1144 the import command. Feel free to make it better.
1145 1145
1146 1146 :hunk: a patch (as a binary string)
1147 1147 :parents: nodes that will be parent of the created commit
1148 1148 :opts: the full dict of option passed to the import command
1149 1149 :msgs: list to save commit message to.
1150 1150 (used in case we need to save it when failing)
1151 1151 :updatefunc: a function that update a repo to a given node
1152 1152 updatefunc(<repo>, <node>)
1153 1153 """
1154 1154 # avoid cycle context -> subrepo -> cmdutil
1155 1155 from . import context
1156 1156 extractdata = patch.extract(ui, hunk)
1157 1157 tmpname = extractdata.get('filename')
1158 1158 message = extractdata.get('message')
1159 1159 user = opts.get('user') or extractdata.get('user')
1160 1160 date = opts.get('date') or extractdata.get('date')
1161 1161 branch = extractdata.get('branch')
1162 1162 nodeid = extractdata.get('nodeid')
1163 1163 p1 = extractdata.get('p1')
1164 1164 p2 = extractdata.get('p2')
1165 1165
1166 1166 nocommit = opts.get('no_commit')
1167 1167 importbranch = opts.get('import_branch')
1168 1168 update = not opts.get('bypass')
1169 1169 strip = opts["strip"]
1170 1170 prefix = opts["prefix"]
1171 1171 sim = float(opts.get('similarity') or 0)
1172 1172 if not tmpname:
1173 1173 return (None, None, False)
1174 1174
1175 1175 rejects = False
1176 1176
1177 1177 try:
1178 1178 cmdline_message = logmessage(ui, opts)
1179 1179 if cmdline_message:
1180 1180 # pickup the cmdline msg
1181 1181 message = cmdline_message
1182 1182 elif message:
1183 1183 # pickup the patch msg
1184 1184 message = message.strip()
1185 1185 else:
1186 1186 # launch the editor
1187 1187 message = None
1188 1188 ui.debug('message:\n%s\n' % message)
1189 1189
1190 1190 if len(parents) == 1:
1191 1191 parents.append(repo[nullid])
1192 1192 if opts.get('exact'):
1193 1193 if not nodeid or not p1:
1194 1194 raise error.Abort(_('not a Mercurial patch'))
1195 1195 p1 = repo[p1]
1196 1196 p2 = repo[p2 or nullid]
1197 1197 elif p2:
1198 1198 try:
1199 1199 p1 = repo[p1]
1200 1200 p2 = repo[p2]
1201 1201 # Without any options, consider p2 only if the
1202 1202 # patch is being applied on top of the recorded
1203 1203 # first parent.
1204 1204 if p1 != parents[0]:
1205 1205 p1 = parents[0]
1206 1206 p2 = repo[nullid]
1207 1207 except error.RepoError:
1208 1208 p1, p2 = parents
1209 1209 if p2.node() == nullid:
1210 1210 ui.warn(_("warning: import the patch as a normal revision\n"
1211 1211 "(use --exact to import the patch as a merge)\n"))
1212 1212 else:
1213 1213 p1, p2 = parents
1214 1214
1215 1215 n = None
1216 1216 if update:
1217 1217 if p1 != parents[0]:
1218 1218 updatefunc(repo, p1.node())
1219 1219 if p2 != parents[1]:
1220 1220 repo.setparents(p1.node(), p2.node())
1221 1221
1222 1222 if opts.get('exact') or importbranch:
1223 1223 repo.dirstate.setbranch(branch or 'default')
1224 1224
1225 1225 partial = opts.get('partial', False)
1226 1226 files = set()
1227 1227 try:
1228 1228 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1229 1229 files=files, eolmode=None, similarity=sim / 100.0)
1230 1230 except patch.PatchError as e:
1231 1231 if not partial:
1232 1232 raise error.Abort(str(e))
1233 1233 if partial:
1234 1234 rejects = True
1235 1235
1236 1236 files = list(files)
1237 1237 if nocommit:
1238 1238 if message:
1239 1239 msgs.append(message)
1240 1240 else:
1241 1241 if opts.get('exact') or p2:
1242 1242 # If you got here, you either use --force and know what
1243 1243 # you are doing or used --exact or a merge patch while
1244 1244 # being updated to its first parent.
1245 1245 m = None
1246 1246 else:
1247 1247 m = scmutil.matchfiles(repo, files or [])
1248 1248 editform = mergeeditform(repo[None], 'import.normal')
1249 1249 if opts.get('exact'):
1250 1250 editor = None
1251 1251 else:
1252 1252 editor = getcommiteditor(editform=editform, **opts)
1253 1253 extra = {}
1254 1254 for idfunc in extrapreimport:
1255 1255 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1256 1256 overrides = {}
1257 1257 if partial:
1258 1258 overrides[('ui', 'allowemptycommit')] = True
1259 1259 with repo.ui.configoverride(overrides, 'import'):
1260 1260 n = repo.commit(message, user,
1261 1261 date, match=m,
1262 1262 editor=editor, extra=extra)
1263 1263 for idfunc in extrapostimport:
1264 1264 extrapostimportmap[idfunc](repo[n])
1265 1265 else:
1266 1266 if opts.get('exact') or importbranch:
1267 1267 branch = branch or 'default'
1268 1268 else:
1269 1269 branch = p1.branch()
1270 1270 store = patch.filestore()
1271 1271 try:
1272 1272 files = set()
1273 1273 try:
1274 1274 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1275 1275 files, eolmode=None)
1276 1276 except patch.PatchError as e:
1277 1277 raise error.Abort(str(e))
1278 1278 if opts.get('exact'):
1279 1279 editor = None
1280 1280 else:
1281 1281 editor = getcommiteditor(editform='import.bypass')
1282 1282 memctx = context.memctx(repo, (p1.node(), p2.node()),
1283 1283 message,
1284 1284 files=files,
1285 1285 filectxfn=store,
1286 1286 user=user,
1287 1287 date=date,
1288 1288 branch=branch,
1289 1289 editor=editor)
1290 1290 n = memctx.commit()
1291 1291 finally:
1292 1292 store.close()
1293 1293 if opts.get('exact') and nocommit:
1294 1294 # --exact with --no-commit is still useful in that it does merge
1295 1295 # and branch bits
1296 1296 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1297 1297 elif opts.get('exact') and hex(n) != nodeid:
1298 1298 raise error.Abort(_('patch is damaged or loses information'))
1299 1299 msg = _('applied to working directory')
1300 1300 if n:
1301 1301 # i18n: refers to a short changeset id
1302 1302 msg = _('created %s') % short(n)
1303 1303 return (msg, n, rejects)
1304 1304 finally:
1305 1305 os.unlink(tmpname)
1306 1306
1307 1307 # facility to let extensions include additional data in an exported patch
1308 1308 # list of identifiers to be executed in order
1309 1309 extraexport = []
1310 1310 # mapping from identifier to actual export function
1311 1311 # function as to return a string to be added to the header or None
1312 1312 # it is given two arguments (sequencenumber, changectx)
1313 1313 extraexportmap = {}
1314 1314
1315 1315 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1316 1316 node = scmutil.binnode(ctx)
1317 1317 parents = [p.node() for p in ctx.parents() if p]
1318 1318 branch = ctx.branch()
1319 1319 if switch_parent:
1320 1320 parents.reverse()
1321 1321
1322 1322 if parents:
1323 1323 prev = parents[0]
1324 1324 else:
1325 1325 prev = nullid
1326 1326
1327 1327 write("# HG changeset patch\n")
1328 1328 write("# User %s\n" % ctx.user())
1329 1329 write("# Date %d %d\n" % ctx.date())
1330 1330 write("# %s\n" % util.datestr(ctx.date()))
1331 1331 if branch and branch != 'default':
1332 1332 write("# Branch %s\n" % branch)
1333 1333 write("# Node ID %s\n" % hex(node))
1334 1334 write("# Parent %s\n" % hex(prev))
1335 1335 if len(parents) > 1:
1336 1336 write("# Parent %s\n" % hex(parents[1]))
1337 1337
1338 1338 for headerid in extraexport:
1339 1339 header = extraexportmap[headerid](seqno, ctx)
1340 1340 if header is not None:
1341 1341 write('# %s\n' % header)
1342 1342 write(ctx.description().rstrip())
1343 1343 write("\n\n")
1344 1344
1345 1345 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1346 1346 write(chunk, label=label)
1347 1347
1348 1348 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1349 1349 opts=None, match=None):
1350 1350 '''export changesets as hg patches
1351 1351
1352 1352 Args:
1353 1353 repo: The repository from which we're exporting revisions.
1354 1354 revs: A list of revisions to export as revision numbers.
1355 1355 fntemplate: An optional string to use for generating patch file names.
1356 1356 fp: An optional file-like object to which patches should be written.
1357 1357 switch_parent: If True, show diffs against second parent when not nullid.
1358 1358 Default is false, which always shows diff against p1.
1359 1359 opts: diff options to use for generating the patch.
1360 1360 match: If specified, only export changes to files matching this matcher.
1361 1361
1362 1362 Returns:
1363 1363 Nothing.
1364 1364
1365 1365 Side Effect:
1366 1366 "HG Changeset Patch" data is emitted to one of the following
1367 1367 destinations:
1368 1368 fp is specified: All revs are written to the specified
1369 1369 file-like object.
1370 1370 fntemplate specified: Each rev is written to a unique file named using
1371 1371 the given template.
1372 1372 Neither fp nor template specified: All revs written to repo.ui.write()
1373 1373 '''
1374 1374
1375 1375 total = len(revs)
1376 1376 revwidth = max(len(str(rev)) for rev in revs)
1377 1377 filemode = {}
1378 1378
1379 1379 write = None
1380 1380 dest = '<unnamed>'
1381 1381 if fp:
1382 1382 dest = getattr(fp, 'name', dest)
1383 1383 def write(s, **kw):
1384 1384 fp.write(s)
1385 1385 elif not fntemplate:
1386 1386 write = repo.ui.write
1387 1387
1388 1388 for seqno, rev in enumerate(revs, 1):
1389 1389 ctx = repo[rev]
1390 1390 fo = None
1391 1391 if not fp and fntemplate:
1392 1392 desc_lines = ctx.description().rstrip().split('\n')
1393 1393 desc = desc_lines[0] #Commit always has a first line.
1394 1394 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1395 1395 total=total, seqno=seqno, revwidth=revwidth,
1396 1396 mode='wb', modemap=filemode)
1397 1397 dest = fo.name
1398 1398 def write(s, **kw):
1399 1399 fo.write(s)
1400 1400 if not dest.startswith('<'):
1401 1401 repo.ui.note("%s\n" % dest)
1402 1402 _exportsingle(
1403 1403 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1404 1404 if fo is not None:
1405 1405 fo.close()
1406 1406
1407 1407 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1408 1408 changes=None, stat=False, fp=None, prefix='',
1409 1409 root='', listsubrepos=False):
1410 1410 '''show diff or diffstat.'''
1411 1411 if fp is None:
1412 1412 write = ui.write
1413 1413 else:
1414 1414 def write(s, **kw):
1415 1415 fp.write(s)
1416 1416
1417 1417 if root:
1418 1418 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1419 1419 else:
1420 1420 relroot = ''
1421 1421 if relroot != '':
1422 1422 # XXX relative roots currently don't work if the root is within a
1423 1423 # subrepo
1424 1424 uirelroot = match.uipath(relroot)
1425 1425 relroot += '/'
1426 1426 for matchroot in match.files():
1427 1427 if not matchroot.startswith(relroot):
1428 1428 ui.warn(_('warning: %s not inside relative root %s\n') % (
1429 1429 match.uipath(matchroot), uirelroot))
1430 1430
1431 1431 if stat:
1432 1432 diffopts = diffopts.copy(context=0)
1433 1433 width = 80
1434 1434 if not ui.plain():
1435 1435 width = ui.termwidth()
1436 1436 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1437 1437 prefix=prefix, relroot=relroot)
1438 1438 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1439 1439 width=width):
1440 1440 write(chunk, label=label)
1441 1441 else:
1442 1442 for chunk, label in patch.diffui(repo, node1, node2, match,
1443 1443 changes, diffopts, prefix=prefix,
1444 1444 relroot=relroot):
1445 1445 write(chunk, label=label)
1446 1446
1447 1447 if listsubrepos:
1448 1448 ctx1 = repo[node1]
1449 1449 ctx2 = repo[node2]
1450 1450 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1451 1451 tempnode2 = node2
1452 1452 try:
1453 1453 if node2 is not None:
1454 1454 tempnode2 = ctx2.substate[subpath][1]
1455 1455 except KeyError:
1456 1456 # A subrepo that existed in node1 was deleted between node1 and
1457 1457 # node2 (inclusive). Thus, ctx2's substate won't contain that
1458 1458 # subpath. The best we can do is to ignore it.
1459 1459 tempnode2 = None
1460 1460 submatch = matchmod.subdirmatcher(subpath, match)
1461 1461 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1462 1462 stat=stat, fp=fp, prefix=prefix)
1463 1463
1464 1464 def _changesetlabels(ctx):
1465 1465 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1466 1466 if ctx.obsolete():
1467 1467 labels.append('changeset.obsolete')
1468 1468 if ctx.isunstable():
1469 1469 labels.append('changeset.troubled')
1470 1470 for instability in ctx.instabilities():
1471 1471 labels.append('trouble.%s' % instability)
1472 1472 return ' '.join(labels)
1473 1473
1474 1474 class changeset_printer(object):
1475 1475 '''show changeset information when templating not requested.'''
1476 1476
1477 1477 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1478 1478 self.ui = ui
1479 1479 self.repo = repo
1480 1480 self.buffered = buffered
1481 1481 self.matchfn = matchfn
1482 1482 self.diffopts = diffopts
1483 1483 self.header = {}
1484 1484 self.hunk = {}
1485 1485 self.lastheader = None
1486 1486 self.footer = None
1487 1487
1488 1488 def flush(self, ctx):
1489 1489 rev = ctx.rev()
1490 1490 if rev in self.header:
1491 1491 h = self.header[rev]
1492 1492 if h != self.lastheader:
1493 1493 self.lastheader = h
1494 1494 self.ui.write(h)
1495 1495 del self.header[rev]
1496 1496 if rev in self.hunk:
1497 1497 self.ui.write(self.hunk[rev])
1498 1498 del self.hunk[rev]
1499 1499 return 1
1500 1500 return 0
1501 1501
1502 1502 def close(self):
1503 1503 if self.footer:
1504 1504 self.ui.write(self.footer)
1505 1505
1506 1506 def show(self, ctx, copies=None, matchfn=None, **props):
1507 1507 props = pycompat.byteskwargs(props)
1508 1508 if self.buffered:
1509 1509 self.ui.pushbuffer(labeled=True)
1510 1510 self._show(ctx, copies, matchfn, props)
1511 1511 self.hunk[ctx.rev()] = self.ui.popbuffer()
1512 1512 else:
1513 1513 self._show(ctx, copies, matchfn, props)
1514 1514
1515 1515 def _show(self, ctx, copies, matchfn, props):
1516 1516 '''show a single changeset or file revision'''
1517 1517 changenode = ctx.node()
1518 1518 rev = ctx.rev()
1519 1519 if self.ui.debugflag:
1520 1520 hexfunc = hex
1521 1521 else:
1522 1522 hexfunc = short
1523 1523 # as of now, wctx.node() and wctx.rev() return None, but we want to
1524 1524 # show the same values as {node} and {rev} templatekw
1525 1525 revnode = (scmutil.intrev(ctx), hexfunc(scmutil.binnode(ctx)))
1526 1526
1527 1527 if self.ui.quiet:
1528 1528 self.ui.write("%d:%s\n" % revnode, label='log.node')
1529 1529 return
1530 1530
1531 1531 date = util.datestr(ctx.date())
1532 1532
1533 1533 # i18n: column positioning for "hg log"
1534 1534 self.ui.write(_("changeset: %d:%s\n") % revnode,
1535 1535 label=_changesetlabels(ctx))
1536 1536
1537 1537 # branches are shown first before any other names due to backwards
1538 1538 # compatibility
1539 1539 branch = ctx.branch()
1540 1540 # don't show the default branch name
1541 1541 if branch != 'default':
1542 1542 # i18n: column positioning for "hg log"
1543 1543 self.ui.write(_("branch: %s\n") % branch,
1544 1544 label='log.branch')
1545 1545
1546 1546 for nsname, ns in self.repo.names.iteritems():
1547 1547 # branches has special logic already handled above, so here we just
1548 1548 # skip it
1549 1549 if nsname == 'branches':
1550 1550 continue
1551 1551 # we will use the templatename as the color name since those two
1552 1552 # should be the same
1553 1553 for name in ns.names(self.repo, changenode):
1554 1554 self.ui.write(ns.logfmt % name,
1555 1555 label='log.%s' % ns.colorname)
1556 1556 if self.ui.debugflag:
1557 1557 # i18n: column positioning for "hg log"
1558 1558 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1559 1559 label='log.phase')
1560 1560 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1561 1561 label = 'log.parent changeset.%s' % pctx.phasestr()
1562 1562 # i18n: column positioning for "hg log"
1563 1563 self.ui.write(_("parent: %d:%s\n")
1564 1564 % (pctx.rev(), hexfunc(pctx.node())),
1565 1565 label=label)
1566 1566
1567 1567 if self.ui.debugflag and rev is not None:
1568 1568 mnode = ctx.manifestnode()
1569 1569 # i18n: column positioning for "hg log"
1570 1570 self.ui.write(_("manifest: %d:%s\n") %
1571 1571 (self.repo.manifestlog._revlog.rev(mnode),
1572 1572 hex(mnode)),
1573 1573 label='ui.debug log.manifest')
1574 1574 # i18n: column positioning for "hg log"
1575 1575 self.ui.write(_("user: %s\n") % ctx.user(),
1576 1576 label='log.user')
1577 1577 # i18n: column positioning for "hg log"
1578 1578 self.ui.write(_("date: %s\n") % date,
1579 1579 label='log.date')
1580 1580
1581 1581 if ctx.isunstable():
1582 1582 # i18n: column positioning for "hg log"
1583 1583 instabilities = ctx.instabilities()
1584 1584 self.ui.write(_("instability: %s\n") % ', '.join(instabilities),
1585 1585 label='log.trouble')
1586 1586
1587 1587 self._exthook(ctx)
1588 1588
1589 1589 if self.ui.debugflag:
1590 1590 files = ctx.p1().status(ctx)[:3]
1591 1591 for key, value in zip([# i18n: column positioning for "hg log"
1592 1592 _("files:"),
1593 1593 # i18n: column positioning for "hg log"
1594 1594 _("files+:"),
1595 1595 # i18n: column positioning for "hg log"
1596 1596 _("files-:")], files):
1597 1597 if value:
1598 1598 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1599 1599 label='ui.debug log.files')
1600 1600 elif ctx.files() and self.ui.verbose:
1601 1601 # i18n: column positioning for "hg log"
1602 1602 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1603 1603 label='ui.note log.files')
1604 1604 if copies and self.ui.verbose:
1605 1605 copies = ['%s (%s)' % c for c in copies]
1606 1606 # i18n: column positioning for "hg log"
1607 1607 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1608 1608 label='ui.note log.copies')
1609 1609
1610 1610 extra = ctx.extra()
1611 1611 if extra and self.ui.debugflag:
1612 1612 for key, value in sorted(extra.items()):
1613 1613 # i18n: column positioning for "hg log"
1614 1614 self.ui.write(_("extra: %s=%s\n")
1615 1615 % (key, util.escapestr(value)),
1616 1616 label='ui.debug log.extra')
1617 1617
1618 1618 description = ctx.description().strip()
1619 1619 if description:
1620 1620 if self.ui.verbose:
1621 1621 self.ui.write(_("description:\n"),
1622 1622 label='ui.note log.description')
1623 1623 self.ui.write(description,
1624 1624 label='ui.note log.description')
1625 1625 self.ui.write("\n\n")
1626 1626 else:
1627 1627 # i18n: column positioning for "hg log"
1628 1628 self.ui.write(_("summary: %s\n") %
1629 1629 description.splitlines()[0],
1630 1630 label='log.summary')
1631 1631 self.ui.write("\n")
1632 1632
1633 1633 self.showpatch(ctx, matchfn)
1634 1634
1635 1635 def _exthook(self, ctx):
1636 1636 '''empty method used by extension as a hook point
1637 1637 '''
1638 1638 pass
1639 1639
1640 1640 def showpatch(self, ctx, matchfn):
1641 1641 if not matchfn:
1642 1642 matchfn = self.matchfn
1643 1643 if matchfn:
1644 1644 stat = self.diffopts.get('stat')
1645 1645 diff = self.diffopts.get('patch')
1646 1646 diffopts = patch.diffallopts(self.ui, self.diffopts)
1647 1647 node = ctx.node()
1648 1648 prev = ctx.p1().node()
1649 1649 if stat:
1650 1650 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1651 1651 match=matchfn, stat=True)
1652 1652 if diff:
1653 1653 if stat:
1654 1654 self.ui.write("\n")
1655 1655 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1656 1656 match=matchfn, stat=False)
1657 1657 self.ui.write("\n")
1658 1658
1659 1659 class jsonchangeset(changeset_printer):
1660 1660 '''format changeset information.'''
1661 1661
1662 1662 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1663 1663 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1664 1664 self.cache = {}
1665 1665 self._first = True
1666 1666
1667 1667 def close(self):
1668 1668 if not self._first:
1669 1669 self.ui.write("\n]\n")
1670 1670 else:
1671 1671 self.ui.write("[]\n")
1672 1672
1673 1673 def _show(self, ctx, copies, matchfn, props):
1674 1674 '''show a single changeset or file revision'''
1675 1675 rev = ctx.rev()
1676 1676 if rev is None:
1677 1677 jrev = jnode = 'null'
1678 1678 else:
1679 1679 jrev = '%d' % rev
1680 1680 jnode = '"%s"' % hex(ctx.node())
1681 1681 j = encoding.jsonescape
1682 1682
1683 1683 if self._first:
1684 1684 self.ui.write("[\n {")
1685 1685 self._first = False
1686 1686 else:
1687 1687 self.ui.write(",\n {")
1688 1688
1689 1689 if self.ui.quiet:
1690 1690 self.ui.write(('\n "rev": %s') % jrev)
1691 1691 self.ui.write((',\n "node": %s') % jnode)
1692 1692 self.ui.write('\n }')
1693 1693 return
1694 1694
1695 1695 self.ui.write(('\n "rev": %s') % jrev)
1696 1696 self.ui.write((',\n "node": %s') % jnode)
1697 1697 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1698 1698 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1699 1699 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1700 1700 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1701 1701 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1702 1702
1703 1703 self.ui.write((',\n "bookmarks": [%s]') %
1704 1704 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1705 1705 self.ui.write((',\n "tags": [%s]') %
1706 1706 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1707 1707 self.ui.write((',\n "parents": [%s]') %
1708 1708 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1709 1709
1710 1710 if self.ui.debugflag:
1711 1711 if rev is None:
1712 1712 jmanifestnode = 'null'
1713 1713 else:
1714 1714 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1715 1715 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1716 1716
1717 1717 self.ui.write((',\n "extra": {%s}') %
1718 1718 ", ".join('"%s": "%s"' % (j(k), j(v))
1719 1719 for k, v in ctx.extra().items()))
1720 1720
1721 1721 files = ctx.p1().status(ctx)
1722 1722 self.ui.write((',\n "modified": [%s]') %
1723 1723 ", ".join('"%s"' % j(f) for f in files[0]))
1724 1724 self.ui.write((',\n "added": [%s]') %
1725 1725 ", ".join('"%s"' % j(f) for f in files[1]))
1726 1726 self.ui.write((',\n "removed": [%s]') %
1727 1727 ", ".join('"%s"' % j(f) for f in files[2]))
1728 1728
1729 1729 elif self.ui.verbose:
1730 1730 self.ui.write((',\n "files": [%s]') %
1731 1731 ", ".join('"%s"' % j(f) for f in ctx.files()))
1732 1732
1733 1733 if copies:
1734 1734 self.ui.write((',\n "copies": {%s}') %
1735 1735 ", ".join('"%s": "%s"' % (j(k), j(v))
1736 1736 for k, v in copies))
1737 1737
1738 1738 matchfn = self.matchfn
1739 1739 if matchfn:
1740 1740 stat = self.diffopts.get('stat')
1741 1741 diff = self.diffopts.get('patch')
1742 1742 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1743 1743 node, prev = ctx.node(), ctx.p1().node()
1744 1744 if stat:
1745 1745 self.ui.pushbuffer()
1746 1746 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1747 1747 match=matchfn, stat=True)
1748 1748 self.ui.write((',\n "diffstat": "%s"')
1749 1749 % j(self.ui.popbuffer()))
1750 1750 if diff:
1751 1751 self.ui.pushbuffer()
1752 1752 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1753 1753 match=matchfn, stat=False)
1754 1754 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1755 1755
1756 1756 self.ui.write("\n }")
1757 1757
1758 1758 class changeset_templater(changeset_printer):
1759 1759 '''format changeset information.'''
1760 1760
1761 1761 # Arguments before "buffered" used to be positional. Consider not
1762 1762 # adding/removing arguments before "buffered" to not break callers.
1763 1763 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1764 1764 buffered=False):
1765 1765 diffopts = diffopts or {}
1766 1766
1767 1767 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1768 1768 self.t = formatter.loadtemplater(ui, tmplspec,
1769 1769 cache=templatekw.defaulttempl)
1770 1770 self._counter = itertools.count()
1771 1771 self.cache = {}
1772 1772
1773 1773 self._tref = tmplspec.ref
1774 1774 self._parts = {'header': '', 'footer': '',
1775 1775 tmplspec.ref: tmplspec.ref,
1776 1776 'docheader': '', 'docfooter': '',
1777 1777 'separator': ''}
1778 1778 if tmplspec.mapfile:
1779 1779 # find correct templates for current mode, for backward
1780 1780 # compatibility with 'log -v/-q/--debug' using a mapfile
1781 1781 tmplmodes = [
1782 1782 (True, ''),
1783 1783 (self.ui.verbose, '_verbose'),
1784 1784 (self.ui.quiet, '_quiet'),
1785 1785 (self.ui.debugflag, '_debug'),
1786 1786 ]
1787 1787 for mode, postfix in tmplmodes:
1788 1788 for t in self._parts:
1789 1789 cur = t + postfix
1790 1790 if mode and cur in self.t:
1791 1791 self._parts[t] = cur
1792 1792 else:
1793 1793 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1794 1794 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1795 1795 self._parts.update(m)
1796 1796
1797 1797 if self._parts['docheader']:
1798 1798 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1799 1799
1800 1800 def close(self):
1801 1801 if self._parts['docfooter']:
1802 1802 if not self.footer:
1803 1803 self.footer = ""
1804 1804 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1805 1805 return super(changeset_templater, self).close()
1806 1806
1807 1807 def _show(self, ctx, copies, matchfn, props):
1808 1808 '''show a single changeset or file revision'''
1809 1809 props = props.copy()
1810 1810 props.update(templatekw.keywords)
1811 1811 props['templ'] = self.t
1812 1812 props['ctx'] = ctx
1813 1813 props['repo'] = self.repo
1814 1814 props['ui'] = self.repo.ui
1815 1815 props['index'] = index = next(self._counter)
1816 1816 props['revcache'] = {'copies': copies}
1817 1817 props['cache'] = self.cache
1818 1818 props = pycompat.strkwargs(props)
1819 1819
1820 1820 # write separator, which wouldn't work well with the header part below
1821 1821 # since there's inherently a conflict between header (across items) and
1822 1822 # separator (per item)
1823 1823 if self._parts['separator'] and index > 0:
1824 1824 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1825 1825
1826 1826 # write header
1827 1827 if self._parts['header']:
1828 1828 h = templater.stringify(self.t(self._parts['header'], **props))
1829 1829 if self.buffered:
1830 1830 self.header[ctx.rev()] = h
1831 1831 else:
1832 1832 if self.lastheader != h:
1833 1833 self.lastheader = h
1834 1834 self.ui.write(h)
1835 1835
1836 1836 # write changeset metadata, then patch if requested
1837 1837 key = self._parts[self._tref]
1838 1838 self.ui.write(templater.stringify(self.t(key, **props)))
1839 1839 self.showpatch(ctx, matchfn)
1840 1840
1841 1841 if self._parts['footer']:
1842 1842 if not self.footer:
1843 1843 self.footer = templater.stringify(
1844 1844 self.t(self._parts['footer'], **props))
1845 1845
1846 1846 def logtemplatespec(tmpl, mapfile):
1847 1847 if mapfile:
1848 1848 return formatter.templatespec('changeset', tmpl, mapfile)
1849 1849 else:
1850 1850 return formatter.templatespec('', tmpl, None)
1851 1851
1852 1852 def _lookuplogtemplate(ui, tmpl, style):
1853 1853 """Find the template matching the given template spec or style
1854 1854
1855 1855 See formatter.lookuptemplate() for details.
1856 1856 """
1857 1857
1858 1858 # ui settings
1859 1859 if not tmpl and not style: # template are stronger than style
1860 1860 tmpl = ui.config('ui', 'logtemplate')
1861 1861 if tmpl:
1862 1862 return logtemplatespec(templater.unquotestring(tmpl), None)
1863 1863 else:
1864 1864 style = util.expandpath(ui.config('ui', 'style'))
1865 1865
1866 1866 if not tmpl and style:
1867 1867 mapfile = style
1868 1868 if not os.path.split(mapfile)[0]:
1869 1869 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1870 1870 or templater.templatepath(mapfile))
1871 1871 if mapname:
1872 1872 mapfile = mapname
1873 1873 return logtemplatespec(None, mapfile)
1874 1874
1875 1875 if not tmpl:
1876 1876 return logtemplatespec(None, None)
1877 1877
1878 1878 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1879 1879
1880 1880 def makelogtemplater(ui, repo, tmpl, buffered=False):
1881 1881 """Create a changeset_templater from a literal template 'tmpl'"""
1882 1882 spec = logtemplatespec(tmpl, None)
1883 1883 return changeset_templater(ui, repo, spec, buffered=buffered)
1884 1884
1885 1885 def show_changeset(ui, repo, opts, buffered=False):
1886 1886 """show one changeset using template or regular display.
1887 1887
1888 1888 Display format will be the first non-empty hit of:
1889 1889 1. option 'template'
1890 1890 2. option 'style'
1891 1891 3. [ui] setting 'logtemplate'
1892 1892 4. [ui] setting 'style'
1893 1893 If all of these values are either the unset or the empty string,
1894 1894 regular display via changeset_printer() is done.
1895 1895 """
1896 1896 # options
1897 1897 matchfn = None
1898 1898 if opts.get('patch') or opts.get('stat'):
1899 1899 matchfn = scmutil.matchall(repo)
1900 1900
1901 1901 if opts.get('template') == 'json':
1902 1902 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1903 1903
1904 1904 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1905 1905
1906 1906 if not spec.ref and not spec.tmpl and not spec.mapfile:
1907 1907 return changeset_printer(ui, repo, matchfn, opts, buffered)
1908 1908
1909 1909 return changeset_templater(ui, repo, spec, matchfn, opts, buffered)
1910 1910
1911 1911 def showmarker(fm, marker, index=None):
1912 1912 """utility function to display obsolescence marker in a readable way
1913 1913
1914 1914 To be used by debug function."""
1915 1915 if index is not None:
1916 1916 fm.write('index', '%i ', index)
1917 1917 fm.write('precnode', '%s ', hex(marker.prednode()))
1918 1918 succs = marker.succnodes()
1919 1919 fm.condwrite(succs, 'succnodes', '%s ',
1920 1920 fm.formatlist(map(hex, succs), name='node'))
1921 1921 fm.write('flag', '%X ', marker.flags())
1922 1922 parents = marker.parentnodes()
1923 1923 if parents is not None:
1924 1924 fm.write('parentnodes', '{%s} ',
1925 1925 fm.formatlist(map(hex, parents), name='node', sep=', '))
1926 1926 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1927 1927 meta = marker.metadata().copy()
1928 1928 meta.pop('date', None)
1929 1929 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1930 1930 fm.plain('\n')
1931 1931
1932 1932 def finddate(ui, repo, date):
1933 1933 """Find the tipmost changeset that matches the given date spec"""
1934 1934
1935 1935 df = util.matchdate(date)
1936 1936 m = scmutil.matchall(repo)
1937 1937 results = {}
1938 1938
1939 1939 def prep(ctx, fns):
1940 1940 d = ctx.date()
1941 1941 if df(d[0]):
1942 1942 results[ctx.rev()] = d
1943 1943
1944 1944 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1945 1945 rev = ctx.rev()
1946 1946 if rev in results:
1947 1947 ui.status(_("found revision %s from %s\n") %
1948 1948 (rev, util.datestr(results[rev])))
1949 1949 return '%d' % rev
1950 1950
1951 1951 raise error.Abort(_("revision matching date not found"))
1952 1952
1953 1953 def increasingwindows(windowsize=8, sizelimit=512):
1954 1954 while True:
1955 1955 yield windowsize
1956 1956 if windowsize < sizelimit:
1957 1957 windowsize *= 2
1958 1958
1959 1959 class FileWalkError(Exception):
1960 1960 pass
1961 1961
1962 1962 def walkfilerevs(repo, match, follow, revs, fncache):
1963 1963 '''Walks the file history for the matched files.
1964 1964
1965 1965 Returns the changeset revs that are involved in the file history.
1966 1966
1967 1967 Throws FileWalkError if the file history can't be walked using
1968 1968 filelogs alone.
1969 1969 '''
1970 1970 wanted = set()
1971 1971 copies = []
1972 1972 minrev, maxrev = min(revs), max(revs)
1973 1973 def filerevgen(filelog, last):
1974 1974 """
1975 1975 Only files, no patterns. Check the history of each file.
1976 1976
1977 1977 Examines filelog entries within minrev, maxrev linkrev range
1978 1978 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1979 1979 tuples in backwards order
1980 1980 """
1981 1981 cl_count = len(repo)
1982 1982 revs = []
1983 1983 for j in xrange(0, last + 1):
1984 1984 linkrev = filelog.linkrev(j)
1985 1985 if linkrev < minrev:
1986 1986 continue
1987 1987 # only yield rev for which we have the changelog, it can
1988 1988 # happen while doing "hg log" during a pull or commit
1989 1989 if linkrev >= cl_count:
1990 1990 break
1991 1991
1992 1992 parentlinkrevs = []
1993 1993 for p in filelog.parentrevs(j):
1994 1994 if p != nullrev:
1995 1995 parentlinkrevs.append(filelog.linkrev(p))
1996 1996 n = filelog.node(j)
1997 1997 revs.append((linkrev, parentlinkrevs,
1998 1998 follow and filelog.renamed(n)))
1999 1999
2000 2000 return reversed(revs)
2001 2001 def iterfiles():
2002 2002 pctx = repo['.']
2003 2003 for filename in match.files():
2004 2004 if follow:
2005 2005 if filename not in pctx:
2006 2006 raise error.Abort(_('cannot follow file not in parent '
2007 2007 'revision: "%s"') % filename)
2008 2008 yield filename, pctx[filename].filenode()
2009 2009 else:
2010 2010 yield filename, None
2011 2011 for filename_node in copies:
2012 2012 yield filename_node
2013 2013
2014 2014 for file_, node in iterfiles():
2015 2015 filelog = repo.file(file_)
2016 2016 if not len(filelog):
2017 2017 if node is None:
2018 2018 # A zero count may be a directory or deleted file, so
2019 2019 # try to find matching entries on the slow path.
2020 2020 if follow:
2021 2021 raise error.Abort(
2022 2022 _('cannot follow nonexistent file: "%s"') % file_)
2023 2023 raise FileWalkError("Cannot walk via filelog")
2024 2024 else:
2025 2025 continue
2026 2026
2027 2027 if node is None:
2028 2028 last = len(filelog) - 1
2029 2029 else:
2030 2030 last = filelog.rev(node)
2031 2031
2032 2032 # keep track of all ancestors of the file
2033 2033 ancestors = {filelog.linkrev(last)}
2034 2034
2035 2035 # iterate from latest to oldest revision
2036 2036 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2037 2037 if not follow:
2038 2038 if rev > maxrev:
2039 2039 continue
2040 2040 else:
2041 2041 # Note that last might not be the first interesting
2042 2042 # rev to us:
2043 2043 # if the file has been changed after maxrev, we'll
2044 2044 # have linkrev(last) > maxrev, and we still need
2045 2045 # to explore the file graph
2046 2046 if rev not in ancestors:
2047 2047 continue
2048 2048 # XXX insert 1327 fix here
2049 2049 if flparentlinkrevs:
2050 2050 ancestors.update(flparentlinkrevs)
2051 2051
2052 2052 fncache.setdefault(rev, []).append(file_)
2053 2053 wanted.add(rev)
2054 2054 if copied:
2055 2055 copies.append(copied)
2056 2056
2057 2057 return wanted
2058 2058
2059 2059 class _followfilter(object):
2060 2060 def __init__(self, repo, onlyfirst=False):
2061 2061 self.repo = repo
2062 2062 self.startrev = nullrev
2063 2063 self.roots = set()
2064 2064 self.onlyfirst = onlyfirst
2065 2065
2066 2066 def match(self, rev):
2067 2067 def realparents(rev):
2068 2068 if self.onlyfirst:
2069 2069 return self.repo.changelog.parentrevs(rev)[0:1]
2070 2070 else:
2071 2071 return filter(lambda x: x != nullrev,
2072 2072 self.repo.changelog.parentrevs(rev))
2073 2073
2074 2074 if self.startrev == nullrev:
2075 2075 self.startrev = rev
2076 2076 return True
2077 2077
2078 2078 if rev > self.startrev:
2079 2079 # forward: all descendants
2080 2080 if not self.roots:
2081 2081 self.roots.add(self.startrev)
2082 2082 for parent in realparents(rev):
2083 2083 if parent in self.roots:
2084 2084 self.roots.add(rev)
2085 2085 return True
2086 2086 else:
2087 2087 # backwards: all parents
2088 2088 if not self.roots:
2089 2089 self.roots.update(realparents(self.startrev))
2090 2090 if rev in self.roots:
2091 2091 self.roots.remove(rev)
2092 2092 self.roots.update(realparents(rev))
2093 2093 return True
2094 2094
2095 2095 return False
2096 2096
2097 2097 def walkchangerevs(repo, match, opts, prepare):
2098 2098 '''Iterate over files and the revs in which they changed.
2099 2099
2100 2100 Callers most commonly need to iterate backwards over the history
2101 2101 in which they are interested. Doing so has awful (quadratic-looking)
2102 2102 performance, so we use iterators in a "windowed" way.
2103 2103
2104 2104 We walk a window of revisions in the desired order. Within the
2105 2105 window, we first walk forwards to gather data, then in the desired
2106 2106 order (usually backwards) to display it.
2107 2107
2108 2108 This function returns an iterator yielding contexts. Before
2109 2109 yielding each context, the iterator will first call the prepare
2110 2110 function on each context in the window in forward order.'''
2111 2111
2112 2112 follow = opts.get('follow') or opts.get('follow_first')
2113 2113 revs = _logrevs(repo, opts)
2114 2114 if not revs:
2115 2115 return []
2116 2116 wanted = set()
2117 2117 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2118 2118 opts.get('removed'))
2119 2119 fncache = {}
2120 2120 change = repo.changectx
2121 2121
2122 2122 # First step is to fill wanted, the set of revisions that we want to yield.
2123 2123 # When it does not induce extra cost, we also fill fncache for revisions in
2124 2124 # wanted: a cache of filenames that were changed (ctx.files()) and that
2125 2125 # match the file filtering conditions.
2126 2126
2127 2127 if match.always():
2128 2128 # No files, no patterns. Display all revs.
2129 2129 wanted = revs
2130 2130 elif not slowpath:
2131 2131 # We only have to read through the filelog to find wanted revisions
2132 2132
2133 2133 try:
2134 2134 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2135 2135 except FileWalkError:
2136 2136 slowpath = True
2137 2137
2138 2138 # We decided to fall back to the slowpath because at least one
2139 2139 # of the paths was not a file. Check to see if at least one of them
2140 2140 # existed in history, otherwise simply return
2141 2141 for path in match.files():
2142 2142 if path == '.' or path in repo.store:
2143 2143 break
2144 2144 else:
2145 2145 return []
2146 2146
2147 2147 if slowpath:
2148 2148 # We have to read the changelog to match filenames against
2149 2149 # changed files
2150 2150
2151 2151 if follow:
2152 2152 raise error.Abort(_('can only follow copies/renames for explicit '
2153 2153 'filenames'))
2154 2154
2155 2155 # The slow path checks files modified in every changeset.
2156 2156 # This is really slow on large repos, so compute the set lazily.
2157 2157 class lazywantedset(object):
2158 2158 def __init__(self):
2159 2159 self.set = set()
2160 2160 self.revs = set(revs)
2161 2161
2162 2162 # No need to worry about locality here because it will be accessed
2163 2163 # in the same order as the increasing window below.
2164 2164 def __contains__(self, value):
2165 2165 if value in self.set:
2166 2166 return True
2167 2167 elif not value in self.revs:
2168 2168 return False
2169 2169 else:
2170 2170 self.revs.discard(value)
2171 2171 ctx = change(value)
2172 2172 matches = filter(match, ctx.files())
2173 2173 if matches:
2174 2174 fncache[value] = matches
2175 2175 self.set.add(value)
2176 2176 return True
2177 2177 return False
2178 2178
2179 2179 def discard(self, value):
2180 2180 self.revs.discard(value)
2181 2181 self.set.discard(value)
2182 2182
2183 2183 wanted = lazywantedset()
2184 2184
2185 2185 # it might be worthwhile to do this in the iterator if the rev range
2186 2186 # is descending and the prune args are all within that range
2187 2187 for rev in opts.get('prune', ()):
2188 2188 rev = repo[rev].rev()
2189 2189 ff = _followfilter(repo)
2190 2190 stop = min(revs[0], revs[-1])
2191 2191 for x in xrange(rev, stop - 1, -1):
2192 2192 if ff.match(x):
2193 2193 wanted = wanted - [x]
2194 2194
2195 2195 # Now that wanted is correctly initialized, we can iterate over the
2196 2196 # revision range, yielding only revisions in wanted.
2197 2197 def iterate():
2198 2198 if follow and match.always():
2199 2199 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2200 2200 def want(rev):
2201 2201 return ff.match(rev) and rev in wanted
2202 2202 else:
2203 2203 def want(rev):
2204 2204 return rev in wanted
2205 2205
2206 2206 it = iter(revs)
2207 2207 stopiteration = False
2208 2208 for windowsize in increasingwindows():
2209 2209 nrevs = []
2210 2210 for i in xrange(windowsize):
2211 2211 rev = next(it, None)
2212 2212 if rev is None:
2213 2213 stopiteration = True
2214 2214 break
2215 2215 elif want(rev):
2216 2216 nrevs.append(rev)
2217 2217 for rev in sorted(nrevs):
2218 2218 fns = fncache.get(rev)
2219 2219 ctx = change(rev)
2220 2220 if not fns:
2221 2221 def fns_generator():
2222 2222 for f in ctx.files():
2223 2223 if match(f):
2224 2224 yield f
2225 2225 fns = fns_generator()
2226 2226 prepare(ctx, fns)
2227 2227 for rev in nrevs:
2228 2228 yield change(rev)
2229 2229
2230 2230 if stopiteration:
2231 2231 break
2232 2232
2233 2233 return iterate()
2234 2234
2235 2235 def _makefollowlogfilematcher(repo, files, followfirst):
2236 2236 # When displaying a revision with --patch --follow FILE, we have
2237 2237 # to know which file of the revision must be diffed. With
2238 2238 # --follow, we want the names of the ancestors of FILE in the
2239 2239 # revision, stored in "fcache". "fcache" is populated by
2240 2240 # reproducing the graph traversal already done by --follow revset
2241 2241 # and relating revs to file names (which is not "correct" but
2242 2242 # good enough).
2243 2243 fcache = {}
2244 2244 fcacheready = [False]
2245 2245 pctx = repo['.']
2246 2246
2247 2247 def populate():
2248 2248 for fn in files:
2249 2249 fctx = pctx[fn]
2250 2250 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2251 2251 for c in fctx.ancestors(followfirst=followfirst):
2252 2252 fcache.setdefault(c.rev(), set()).add(c.path())
2253 2253
2254 2254 def filematcher(rev):
2255 2255 if not fcacheready[0]:
2256 2256 # Lazy initialization
2257 2257 fcacheready[0] = True
2258 2258 populate()
2259 2259 return scmutil.matchfiles(repo, fcache.get(rev, []))
2260 2260
2261 2261 return filematcher
2262 2262
2263 2263 def _makenofollowlogfilematcher(repo, pats, opts):
2264 2264 '''hook for extensions to override the filematcher for non-follow cases'''
2265 2265 return None
2266 2266
2267 2267 def _makelogrevset(repo, pats, opts, revs):
2268 2268 """Return (expr, filematcher) where expr is a revset string built
2269 2269 from log options and file patterns or None. If --stat or --patch
2270 2270 are not passed filematcher is None. Otherwise it is a callable
2271 2271 taking a revision number and returning a match objects filtering
2272 2272 the files to be detailed when displaying the revision.
2273 2273 """
2274 2274 opt2revset = {
2275 2275 'no_merges': ('not merge()', None),
2276 2276 'only_merges': ('merge()', None),
2277 2277 '_ancestors': ('ancestors(%(val)s)', None),
2278 2278 '_fancestors': ('_firstancestors(%(val)s)', None),
2279 2279 '_descendants': ('descendants(%(val)s)', None),
2280 2280 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2281 2281 '_matchfiles': ('_matchfiles(%(val)s)', None),
2282 2282 'date': ('date(%(val)r)', None),
2283 2283 'branch': ('branch(%(val)r)', ' or '),
2284 2284 '_patslog': ('filelog(%(val)r)', ' or '),
2285 2285 '_patsfollow': ('follow(%(val)r)', ' or '),
2286 2286 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2287 2287 'keyword': ('keyword(%(val)r)', ' or '),
2288 2288 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2289 2289 'user': ('user(%(val)r)', ' or '),
2290 2290 }
2291 2291
2292 2292 opts = dict(opts)
2293 2293 # follow or not follow?
2294 2294 follow = opts.get('follow') or opts.get('follow_first')
2295 2295 if opts.get('follow_first'):
2296 2296 followfirst = 1
2297 2297 else:
2298 2298 followfirst = 0
2299 2299 # --follow with FILE behavior depends on revs...
2300 2300 it = iter(revs)
2301 2301 startrev = next(it)
2302 2302 followdescendants = startrev < next(it, startrev)
2303 2303
2304 2304 # branch and only_branch are really aliases and must be handled at
2305 2305 # the same time
2306 2306 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2307 2307 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2308 2308 # pats/include/exclude are passed to match.match() directly in
2309 2309 # _matchfiles() revset but walkchangerevs() builds its matcher with
2310 2310 # scmutil.match(). The difference is input pats are globbed on
2311 2311 # platforms without shell expansion (windows).
2312 2312 wctx = repo[None]
2313 2313 match, pats = scmutil.matchandpats(wctx, pats, opts)
2314 2314 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2315 2315 opts.get('removed'))
2316 2316 if not slowpath:
2317 2317 for f in match.files():
2318 2318 if follow and f not in wctx:
2319 2319 # If the file exists, it may be a directory, so let it
2320 2320 # take the slow path.
2321 2321 if os.path.exists(repo.wjoin(f)):
2322 2322 slowpath = True
2323 2323 continue
2324 2324 else:
2325 2325 raise error.Abort(_('cannot follow file not in parent '
2326 2326 'revision: "%s"') % f)
2327 2327 filelog = repo.file(f)
2328 2328 if not filelog:
2329 2329 # A zero count may be a directory or deleted file, so
2330 2330 # try to find matching entries on the slow path.
2331 2331 if follow:
2332 2332 raise error.Abort(
2333 2333 _('cannot follow nonexistent file: "%s"') % f)
2334 2334 slowpath = True
2335 2335
2336 2336 # We decided to fall back to the slowpath because at least one
2337 2337 # of the paths was not a file. Check to see if at least one of them
2338 2338 # existed in history - in that case, we'll continue down the
2339 2339 # slowpath; otherwise, we can turn off the slowpath
2340 2340 if slowpath:
2341 2341 for path in match.files():
2342 2342 if path == '.' or path in repo.store:
2343 2343 break
2344 2344 else:
2345 2345 slowpath = False
2346 2346
2347 2347 fpats = ('_patsfollow', '_patsfollowfirst')
2348 2348 fnopats = (('_ancestors', '_fancestors'),
2349 2349 ('_descendants', '_fdescendants'))
2350 2350 if slowpath:
2351 2351 # See walkchangerevs() slow path.
2352 2352 #
2353 2353 # pats/include/exclude cannot be represented as separate
2354 2354 # revset expressions as their filtering logic applies at file
2355 2355 # level. For instance "-I a -X a" matches a revision touching
2356 2356 # "a" and "b" while "file(a) and not file(b)" does
2357 2357 # not. Besides, filesets are evaluated against the working
2358 2358 # directory.
2359 2359 matchargs = ['r:', 'd:relpath']
2360 2360 for p in pats:
2361 2361 matchargs.append('p:' + p)
2362 2362 for p in opts.get('include', []):
2363 2363 matchargs.append('i:' + p)
2364 2364 for p in opts.get('exclude', []):
2365 2365 matchargs.append('x:' + p)
2366 2366 matchargs = ','.join(('%r' % p) for p in matchargs)
2367 2367 opts['_matchfiles'] = matchargs
2368 2368 if follow:
2369 2369 opts[fnopats[0][followfirst]] = '.'
2370 2370 else:
2371 2371 if follow:
2372 2372 if pats:
2373 2373 # follow() revset interprets its file argument as a
2374 2374 # manifest entry, so use match.files(), not pats.
2375 2375 opts[fpats[followfirst]] = list(match.files())
2376 2376 else:
2377 2377 op = fnopats[followdescendants][followfirst]
2378 2378 opts[op] = 'rev(%d)' % startrev
2379 2379 else:
2380 2380 opts['_patslog'] = list(pats)
2381 2381
2382 2382 filematcher = None
2383 2383 if opts.get('patch') or opts.get('stat'):
2384 2384 # When following files, track renames via a special matcher.
2385 2385 # If we're forced to take the slowpath it means we're following
2386 2386 # at least one pattern/directory, so don't bother with rename tracking.
2387 2387 if follow and not match.always() and not slowpath:
2388 2388 # _makefollowlogfilematcher expects its files argument to be
2389 2389 # relative to the repo root, so use match.files(), not pats.
2390 2390 filematcher = _makefollowlogfilematcher(repo, match.files(),
2391 2391 followfirst)
2392 2392 else:
2393 2393 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2394 2394 if filematcher is None:
2395 2395 filematcher = lambda rev: match
2396 2396
2397 2397 expr = []
2398 2398 for op, val in sorted(opts.iteritems()):
2399 2399 if not val:
2400 2400 continue
2401 2401 if op not in opt2revset:
2402 2402 continue
2403 2403 revop, andor = opt2revset[op]
2404 2404 if '%(val)' not in revop:
2405 2405 expr.append(revop)
2406 2406 else:
2407 2407 if not isinstance(val, list):
2408 2408 e = revop % {'val': val}
2409 2409 else:
2410 2410 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2411 2411 expr.append(e)
2412 2412
2413 2413 if expr:
2414 2414 expr = '(' + ' and '.join(expr) + ')'
2415 2415 else:
2416 2416 expr = None
2417 2417 return expr, filematcher
2418 2418
2419 2419 def _logrevs(repo, opts):
2420 2420 # Default --rev value depends on --follow but --follow behavior
2421 2421 # depends on revisions resolved from --rev...
2422 2422 follow = opts.get('follow') or opts.get('follow_first')
2423 2423 if opts.get('rev'):
2424 2424 revs = scmutil.revrange(repo, opts['rev'])
2425 2425 elif follow and repo.dirstate.p1() == nullid:
2426 2426 revs = smartset.baseset()
2427 2427 elif follow:
2428 2428 revs = repo.revs('reverse(:.)')
2429 2429 else:
2430 2430 revs = smartset.spanset(repo)
2431 2431 revs.reverse()
2432 2432 return revs
2433 2433
2434 2434 def getgraphlogrevs(repo, pats, opts):
2435 2435 """Return (revs, expr, filematcher) where revs is an iterable of
2436 2436 revision numbers, expr is a revset string built from log options
2437 2437 and file patterns or None, and used to filter 'revs'. If --stat or
2438 2438 --patch are not passed filematcher is None. Otherwise it is a
2439 2439 callable taking a revision number and returning a match objects
2440 2440 filtering the files to be detailed when displaying the revision.
2441 2441 """
2442 2442 limit = loglimit(opts)
2443 2443 revs = _logrevs(repo, opts)
2444 2444 if not revs:
2445 2445 return smartset.baseset(), None, None
2446 2446 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2447 2447 if opts.get('rev'):
2448 2448 # User-specified revs might be unsorted, but don't sort before
2449 2449 # _makelogrevset because it might depend on the order of revs
2450 2450 if not (revs.isdescending() or revs.istopo()):
2451 2451 revs.sort(reverse=True)
2452 2452 if expr:
2453 2453 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2454 2454 revs = matcher(repo, revs)
2455 2455 if limit is not None:
2456 2456 limitedrevs = []
2457 2457 for idx, rev in enumerate(revs):
2458 2458 if idx >= limit:
2459 2459 break
2460 2460 limitedrevs.append(rev)
2461 2461 revs = smartset.baseset(limitedrevs)
2462 2462
2463 2463 return revs, expr, filematcher
2464 2464
2465 2465 def getlogrevs(repo, pats, opts):
2466 2466 """Return (revs, expr, filematcher) where revs is an iterable of
2467 2467 revision numbers, expr is a revset string built from log options
2468 2468 and file patterns or None, and used to filter 'revs'. If --stat or
2469 2469 --patch are not passed filematcher is None. Otherwise it is a
2470 2470 callable taking a revision number and returning a match objects
2471 2471 filtering the files to be detailed when displaying the revision.
2472 2472 """
2473 2473 limit = loglimit(opts)
2474 2474 revs = _logrevs(repo, opts)
2475 2475 if not revs:
2476 2476 return smartset.baseset([]), None, None
2477 2477 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2478 2478 if expr:
2479 2479 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2480 2480 revs = matcher(repo, revs)
2481 2481 if limit is not None:
2482 2482 limitedrevs = []
2483 2483 for idx, r in enumerate(revs):
2484 2484 if limit <= idx:
2485 2485 break
2486 2486 limitedrevs.append(r)
2487 2487 revs = smartset.baseset(limitedrevs)
2488 2488
2489 2489 return revs, expr, filematcher
2490 2490
2491 2491 def _graphnodeformatter(ui, displayer):
2492 2492 spec = ui.config('ui', 'graphnodetemplate')
2493 2493 if not spec:
2494 2494 return templatekw.showgraphnode # fast path for "{graphnode}"
2495 2495
2496 2496 spec = templater.unquotestring(spec)
2497 2497 templ = formatter.maketemplater(ui, spec)
2498 2498 cache = {}
2499 2499 if isinstance(displayer, changeset_templater):
2500 2500 cache = displayer.cache # reuse cache of slow templates
2501 2501 props = templatekw.keywords.copy()
2502 2502 props['templ'] = templ
2503 2503 props['cache'] = cache
2504 2504 def formatnode(repo, ctx):
2505 2505 props['ctx'] = ctx
2506 2506 props['repo'] = repo
2507 2507 props['ui'] = repo.ui
2508 2508 props['revcache'] = {}
2509 2509 return templ.render(props)
2510 2510 return formatnode
2511 2511
2512 2512 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2513 2513 filematcher=None):
2514 2514 formatnode = _graphnodeformatter(ui, displayer)
2515 2515 state = graphmod.asciistate()
2516 2516 styles = state['styles']
2517 2517
2518 2518 # only set graph styling if HGPLAIN is not set.
2519 2519 if ui.plain('graph'):
2520 2520 # set all edge styles to |, the default pre-3.8 behaviour
2521 2521 styles.update(dict.fromkeys(styles, '|'))
2522 2522 else:
2523 2523 edgetypes = {
2524 2524 'parent': graphmod.PARENT,
2525 2525 'grandparent': graphmod.GRANDPARENT,
2526 2526 'missing': graphmod.MISSINGPARENT
2527 2527 }
2528 2528 for name, key in edgetypes.items():
2529 2529 # experimental config: experimental.graphstyle.*
2530 2530 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2531 2531 styles[key])
2532 2532 if not styles[key]:
2533 2533 styles[key] = None
2534 2534
2535 2535 # experimental config: experimental.graphshorten
2536 2536 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2537 2537
2538 2538 for rev, type, ctx, parents in dag:
2539 2539 char = formatnode(repo, ctx)
2540 2540 copies = None
2541 2541 if getrenamed and ctx.rev():
2542 2542 copies = []
2543 2543 for fn in ctx.files():
2544 2544 rename = getrenamed(fn, ctx.rev())
2545 2545 if rename:
2546 2546 copies.append((fn, rename[0]))
2547 2547 revmatchfn = None
2548 2548 if filematcher is not None:
2549 2549 revmatchfn = filematcher(ctx.rev())
2550 2550 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2551 2551 lines = displayer.hunk.pop(rev).split('\n')
2552 2552 if not lines[-1]:
2553 2553 del lines[-1]
2554 2554 displayer.flush(ctx)
2555 2555 edges = edgefn(type, char, lines, state, rev, parents)
2556 2556 for type, char, lines, coldata in edges:
2557 2557 graphmod.ascii(ui, state, type, char, lines, coldata)
2558 2558 displayer.close()
2559 2559
2560 2560 def graphlog(ui, repo, pats, opts):
2561 2561 # Parameters are identical to log command ones
2562 2562 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2563 2563 revdag = graphmod.dagwalker(repo, revs)
2564 2564
2565 2565 getrenamed = None
2566 2566 if opts.get('copies'):
2567 2567 endrev = None
2568 2568 if opts.get('rev'):
2569 2569 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2570 2570 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2571 2571
2572 2572 ui.pager('log')
2573 2573 displayer = show_changeset(ui, repo, opts, buffered=True)
2574 2574 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2575 2575 filematcher)
2576 2576
2577 2577 def checkunsupportedgraphflags(pats, opts):
2578 2578 for op in ["newest_first"]:
2579 2579 if op in opts and opts[op]:
2580 2580 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2581 2581 % op.replace("_", "-"))
2582 2582
2583 2583 def graphrevs(repo, nodes, opts):
2584 2584 limit = loglimit(opts)
2585 2585 nodes.reverse()
2586 2586 if limit is not None:
2587 2587 nodes = nodes[:limit]
2588 2588 return graphmod.nodes(repo, nodes)
2589 2589
2590 2590 def add(ui, repo, match, prefix, explicitonly, **opts):
2591 2591 join = lambda f: os.path.join(prefix, f)
2592 2592 bad = []
2593 2593
2594 2594 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2595 2595 names = []
2596 2596 wctx = repo[None]
2597 2597 cca = None
2598 2598 abort, warn = scmutil.checkportabilityalert(ui)
2599 2599 if abort or warn:
2600 2600 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2601 2601
2602 2602 badmatch = matchmod.badmatch(match, badfn)
2603 2603 dirstate = repo.dirstate
2604 2604 # We don't want to just call wctx.walk here, since it would return a lot of
2605 2605 # clean files, which we aren't interested in and takes time.
2606 2606 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2607 2607 True, False, full=False)):
2608 2608 exact = match.exact(f)
2609 2609 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2610 2610 if cca:
2611 2611 cca(f)
2612 2612 names.append(f)
2613 2613 if ui.verbose or not exact:
2614 2614 ui.status(_('adding %s\n') % match.rel(f))
2615 2615
2616 2616 for subpath in sorted(wctx.substate):
2617 2617 sub = wctx.sub(subpath)
2618 2618 try:
2619 2619 submatch = matchmod.subdirmatcher(subpath, match)
2620 2620 if opts.get(r'subrepos'):
2621 2621 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2622 2622 else:
2623 2623 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2624 2624 except error.LookupError:
2625 2625 ui.status(_("skipping missing subrepository: %s\n")
2626 2626 % join(subpath))
2627 2627
2628 2628 if not opts.get(r'dry_run'):
2629 2629 rejected = wctx.add(names, prefix)
2630 2630 bad.extend(f for f in rejected if f in match.files())
2631 2631 return bad
2632 2632
2633 2633 def addwebdirpath(repo, serverpath, webconf):
2634 2634 webconf[serverpath] = repo.root
2635 2635 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2636 2636
2637 2637 for r in repo.revs('filelog("path:.hgsub")'):
2638 2638 ctx = repo[r]
2639 2639 for subpath in ctx.substate:
2640 2640 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2641 2641
2642 2642 def forget(ui, repo, match, prefix, explicitonly):
2643 2643 join = lambda f: os.path.join(prefix, f)
2644 2644 bad = []
2645 2645 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2646 2646 wctx = repo[None]
2647 2647 forgot = []
2648 2648
2649 2649 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2650 2650 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2651 2651 if explicitonly:
2652 2652 forget = [f for f in forget if match.exact(f)]
2653 2653
2654 2654 for subpath in sorted(wctx.substate):
2655 2655 sub = wctx.sub(subpath)
2656 2656 try:
2657 2657 submatch = matchmod.subdirmatcher(subpath, match)
2658 2658 subbad, subforgot = sub.forget(submatch, prefix)
2659 2659 bad.extend([subpath + '/' + f for f in subbad])
2660 2660 forgot.extend([subpath + '/' + f for f in subforgot])
2661 2661 except error.LookupError:
2662 2662 ui.status(_("skipping missing subrepository: %s\n")
2663 2663 % join(subpath))
2664 2664
2665 2665 if not explicitonly:
2666 2666 for f in match.files():
2667 2667 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2668 2668 if f not in forgot:
2669 2669 if repo.wvfs.exists(f):
2670 2670 # Don't complain if the exact case match wasn't given.
2671 2671 # But don't do this until after checking 'forgot', so
2672 2672 # that subrepo files aren't normalized, and this op is
2673 2673 # purely from data cached by the status walk above.
2674 2674 if repo.dirstate.normalize(f) in repo.dirstate:
2675 2675 continue
2676 2676 ui.warn(_('not removing %s: '
2677 2677 'file is already untracked\n')
2678 2678 % match.rel(f))
2679 2679 bad.append(f)
2680 2680
2681 2681 for f in forget:
2682 2682 if ui.verbose or not match.exact(f):
2683 2683 ui.status(_('removing %s\n') % match.rel(f))
2684 2684
2685 2685 rejected = wctx.forget(forget, prefix)
2686 2686 bad.extend(f for f in rejected if f in match.files())
2687 2687 forgot.extend(f for f in forget if f not in rejected)
2688 2688 return bad, forgot
2689 2689
2690 2690 def files(ui, ctx, m, fm, fmt, subrepos):
2691 2691 rev = ctx.rev()
2692 2692 ret = 1
2693 2693 ds = ctx.repo().dirstate
2694 2694
2695 2695 for f in ctx.matches(m):
2696 2696 if rev is None and ds[f] == 'r':
2697 2697 continue
2698 2698 fm.startitem()
2699 2699 if ui.verbose:
2700 2700 fc = ctx[f]
2701 2701 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2702 2702 fm.data(abspath=f)
2703 2703 fm.write('path', fmt, m.rel(f))
2704 2704 ret = 0
2705 2705
2706 2706 for subpath in sorted(ctx.substate):
2707 2707 submatch = matchmod.subdirmatcher(subpath, m)
2708 2708 if (subrepos or m.exact(subpath) or any(submatch.files())):
2709 2709 sub = ctx.sub(subpath)
2710 2710 try:
2711 2711 recurse = m.exact(subpath) or subrepos
2712 2712 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2713 2713 ret = 0
2714 2714 except error.LookupError:
2715 2715 ui.status(_("skipping missing subrepository: %s\n")
2716 2716 % m.abs(subpath))
2717 2717
2718 2718 return ret
2719 2719
2720 2720 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2721 2721 join = lambda f: os.path.join(prefix, f)
2722 2722 ret = 0
2723 2723 s = repo.status(match=m, clean=True)
2724 2724 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2725 2725
2726 2726 wctx = repo[None]
2727 2727
2728 2728 if warnings is None:
2729 2729 warnings = []
2730 2730 warn = True
2731 2731 else:
2732 2732 warn = False
2733 2733
2734 2734 subs = sorted(wctx.substate)
2735 2735 total = len(subs)
2736 2736 count = 0
2737 2737 for subpath in subs:
2738 2738 count += 1
2739 2739 submatch = matchmod.subdirmatcher(subpath, m)
2740 2740 if subrepos or m.exact(subpath) or any(submatch.files()):
2741 2741 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2742 2742 sub = wctx.sub(subpath)
2743 2743 try:
2744 2744 if sub.removefiles(submatch, prefix, after, force, subrepos,
2745 2745 warnings):
2746 2746 ret = 1
2747 2747 except error.LookupError:
2748 2748 warnings.append(_("skipping missing subrepository: %s\n")
2749 2749 % join(subpath))
2750 2750 ui.progress(_('searching'), None)
2751 2751
2752 2752 # warn about failure to delete explicit files/dirs
2753 2753 deleteddirs = util.dirs(deleted)
2754 2754 files = m.files()
2755 2755 total = len(files)
2756 2756 count = 0
2757 2757 for f in files:
2758 2758 def insubrepo():
2759 2759 for subpath in wctx.substate:
2760 2760 if f.startswith(subpath + '/'):
2761 2761 return True
2762 2762 return False
2763 2763
2764 2764 count += 1
2765 2765 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2766 2766 isdir = f in deleteddirs or wctx.hasdir(f)
2767 2767 if (f in repo.dirstate or isdir or f == '.'
2768 2768 or insubrepo() or f in subs):
2769 2769 continue
2770 2770
2771 2771 if repo.wvfs.exists(f):
2772 2772 if repo.wvfs.isdir(f):
2773 2773 warnings.append(_('not removing %s: no tracked files\n')
2774 2774 % m.rel(f))
2775 2775 else:
2776 2776 warnings.append(_('not removing %s: file is untracked\n')
2777 2777 % m.rel(f))
2778 2778 # missing files will generate a warning elsewhere
2779 2779 ret = 1
2780 2780 ui.progress(_('deleting'), None)
2781 2781
2782 2782 if force:
2783 2783 list = modified + deleted + clean + added
2784 2784 elif after:
2785 2785 list = deleted
2786 2786 remaining = modified + added + clean
2787 2787 total = len(remaining)
2788 2788 count = 0
2789 2789 for f in remaining:
2790 2790 count += 1
2791 2791 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2792 2792 warnings.append(_('not removing %s: file still exists\n')
2793 2793 % m.rel(f))
2794 2794 ret = 1
2795 2795 ui.progress(_('skipping'), None)
2796 2796 else:
2797 2797 list = deleted + clean
2798 2798 total = len(modified) + len(added)
2799 2799 count = 0
2800 2800 for f in modified:
2801 2801 count += 1
2802 2802 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2803 2803 warnings.append(_('not removing %s: file is modified (use -f'
2804 2804 ' to force removal)\n') % m.rel(f))
2805 2805 ret = 1
2806 2806 for f in added:
2807 2807 count += 1
2808 2808 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2809 2809 warnings.append(_("not removing %s: file has been marked for add"
2810 2810 " (use 'hg forget' to undo add)\n") % m.rel(f))
2811 2811 ret = 1
2812 2812 ui.progress(_('skipping'), None)
2813 2813
2814 2814 list = sorted(list)
2815 2815 total = len(list)
2816 2816 count = 0
2817 2817 for f in list:
2818 2818 count += 1
2819 2819 if ui.verbose or not m.exact(f):
2820 2820 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2821 2821 ui.status(_('removing %s\n') % m.rel(f))
2822 2822 ui.progress(_('deleting'), None)
2823 2823
2824 2824 with repo.wlock():
2825 2825 if not after:
2826 2826 for f in list:
2827 2827 if f in added:
2828 2828 continue # we never unlink added files on remove
2829 2829 repo.wvfs.unlinkpath(f, ignoremissing=True)
2830 2830 repo[None].forget(list)
2831 2831
2832 2832 if warn:
2833 2833 for warning in warnings:
2834 2834 ui.warn(warning)
2835 2835
2836 2836 return ret
2837 2837
2838 2838 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2839 2839 err = 1
2840 2840
2841 2841 def write(path):
2842 2842 filename = None
2843 2843 if fntemplate:
2844 2844 filename = makefilename(repo, fntemplate, ctx.node(),
2845 2845 pathname=os.path.join(prefix, path))
2846 2846 with formatter.maybereopen(basefm, filename, opts) as fm:
2847 2847 data = ctx[path].data()
2848 2848 if opts.get('decode'):
2849 2849 data = repo.wwritedata(path, data)
2850 2850 fm.startitem()
2851 2851 fm.write('data', '%s', data)
2852 2852 fm.data(abspath=path, path=matcher.rel(path))
2853 2853
2854 2854 # Automation often uses hg cat on single files, so special case it
2855 2855 # for performance to avoid the cost of parsing the manifest.
2856 2856 if len(matcher.files()) == 1 and not matcher.anypats():
2857 2857 file = matcher.files()[0]
2858 2858 mfl = repo.manifestlog
2859 2859 mfnode = ctx.manifestnode()
2860 2860 try:
2861 2861 if mfnode and mfl[mfnode].find(file)[0]:
2862 2862 write(file)
2863 2863 return 0
2864 2864 except KeyError:
2865 2865 pass
2866 2866
2867 2867 for abs in ctx.walk(matcher):
2868 2868 write(abs)
2869 2869 err = 0
2870 2870
2871 2871 for subpath in sorted(ctx.substate):
2872 2872 sub = ctx.sub(subpath)
2873 2873 try:
2874 2874 submatch = matchmod.subdirmatcher(subpath, matcher)
2875 2875
2876 2876 if not sub.cat(submatch, basefm, fntemplate,
2877 2877 os.path.join(prefix, sub._path), **opts):
2878 2878 err = 0
2879 2879 except error.RepoLookupError:
2880 2880 ui.status(_("skipping missing subrepository: %s\n")
2881 2881 % os.path.join(prefix, subpath))
2882 2882
2883 2883 return err
2884 2884
2885 2885 def commit(ui, repo, commitfunc, pats, opts):
2886 2886 '''commit the specified files or all outstanding changes'''
2887 2887 date = opts.get('date')
2888 2888 if date:
2889 2889 opts['date'] = util.parsedate(date)
2890 2890 message = logmessage(ui, opts)
2891 2891 matcher = scmutil.match(repo[None], pats, opts)
2892 2892
2893 2893 dsguard = None
2894 2894 # extract addremove carefully -- this function can be called from a command
2895 2895 # that doesn't support addremove
2896 2896 try:
2897 2897 if opts.get('addremove'):
2898 2898 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2899 2899 if scmutil.addremove(repo, matcher, "", opts) != 0:
2900 2900 raise error.Abort(
2901 2901 _("failed to mark all new/missing files as added/removed"))
2902 2902
2903 2903 r = commitfunc(ui, repo, message, matcher, opts)
2904 2904 if dsguard:
2905 2905 dsguard.close()
2906 2906 return r
2907 2907 finally:
2908 2908 if dsguard:
2909 2909 dsguard.release()
2910 2910
2911 2911 def samefile(f, ctx1, ctx2):
2912 2912 if f in ctx1.manifest():
2913 2913 a = ctx1.filectx(f)
2914 2914 if f in ctx2.manifest():
2915 2915 b = ctx2.filectx(f)
2916 2916 return (not a.cmp(b)
2917 2917 and a.flags() == b.flags())
2918 2918 else:
2919 2919 return False
2920 2920 else:
2921 2921 return f not in ctx2.manifest()
2922 2922
2923 2923 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2924 2924 # avoid cycle context -> subrepo -> cmdutil
2925 2925 from . import context
2926 2926
2927 2927 # amend will reuse the existing user if not specified, but the obsolete
2928 2928 # marker creation requires that the current user's name is specified.
2929 2929 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2930 2930 ui.username() # raise exception if username not set
2931 2931
2932 2932 ui.note(_('amending changeset %s\n') % old)
2933 2933 base = old.p1()
2934 2934
2935 2935 newid = None
2936 2936 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2937 2937 # See if we got a message from -m or -l, if not, open the editor
2938 2938 # with the message of the changeset to amend
2939 2939 message = logmessage(ui, opts)
2940 2940 # ensure logfile does not conflict with later enforcement of the
2941 2941 # message. potential logfile content has been processed by
2942 2942 # `logmessage` anyway.
2943 2943 opts.pop('logfile')
2944 2944 # First, do a regular commit to record all changes in the working
2945 2945 # directory (if there are any)
2946 2946 ui.callhooks = False
2947 2947 activebookmark = repo._bookmarks.active
2948 2948 try:
2949 2949 repo._bookmarks.active = None
2950 2950 opts['message'] = 'temporary amend commit for %s' % old
2951 2951 node = commit(ui, repo, commitfunc, pats, opts)
2952 2952 finally:
2953 2953 repo._bookmarks.active = activebookmark
2954 2954 ui.callhooks = True
2955 2955 ctx = repo[node]
2956 2956
2957 2957 # Participating changesets:
2958 2958 #
2959 2959 # node/ctx o - new (intermediate) commit that contains changes
2960 2960 # | from working dir to go into amending commit
2961 2961 # | (or a workingctx if there were no changes)
2962 2962 # |
2963 2963 # old o - changeset to amend
2964 2964 # |
2965 2965 # base o - parent of amending changeset
2966 2966
2967 2967 # Update extra dict from amended commit (e.g. to preserve graft
2968 2968 # source)
2969 2969 extra.update(old.extra())
2970 2970
2971 2971 # Also update it from the intermediate commit or from the wctx
2972 2972 extra.update(ctx.extra())
2973 2973
2974 2974 if len(old.parents()) > 1:
2975 2975 # ctx.files() isn't reliable for merges, so fall back to the
2976 2976 # slower repo.status() method
2977 2977 files = set([fn for st in repo.status(base, old)[:3]
2978 2978 for fn in st])
2979 2979 else:
2980 2980 files = set(old.files())
2981 2981
2982 2982 # Second, we use either the commit we just did, or if there were no
2983 2983 # changes the parent of the working directory as the version of the
2984 2984 # files in the final amend commit
2985 2985 if node:
2986 2986 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2987 2987
2988 2988 user = ctx.user()
2989 2989 date = ctx.date()
2990 2990 # Recompute copies (avoid recording a -> b -> a)
2991 2991 copied = copies.pathcopies(base, ctx)
2992 2992 if old.p2:
2993 2993 copied.update(copies.pathcopies(old.p2(), ctx))
2994 2994
2995 2995 # Prune files which were reverted by the updates: if old
2996 2996 # introduced file X and our intermediate commit, node,
2997 2997 # renamed that file, then those two files are the same and
2998 2998 # we can discard X from our list of files. Likewise if X
2999 2999 # was deleted, it's no longer relevant
3000 3000 files.update(ctx.files())
3001 3001 files = [f for f in files if not samefile(f, ctx, base)]
3002 3002
3003 3003 def filectxfn(repo, ctx_, path):
3004 3004 try:
3005 3005 fctx = ctx[path]
3006 3006 flags = fctx.flags()
3007 3007 mctx = context.memfilectx(repo,
3008 3008 fctx.path(), fctx.data(),
3009 3009 islink='l' in flags,
3010 3010 isexec='x' in flags,
3011 3011 copied=copied.get(path))
3012 3012 return mctx
3013 3013 except KeyError:
3014 3014 return None
3015 3015 else:
3016 3016 ui.note(_('copying changeset %s to %s\n') % (old, base))
3017 3017
3018 3018 # Use version of files as in the old cset
3019 3019 def filectxfn(repo, ctx_, path):
3020 3020 try:
3021 3021 return old.filectx(path)
3022 3022 except KeyError:
3023 3023 return None
3024 3024
3025 3025 user = opts.get('user') or old.user()
3026 3026 date = opts.get('date') or old.date()
3027 3027 editform = mergeeditform(old, 'commit.amend')
3028 3028 editor = getcommiteditor(editform=editform,
3029 3029 **pycompat.strkwargs(opts))
3030 3030 if not message:
3031 3031 editor = getcommiteditor(edit=True, editform=editform)
3032 3032 message = old.description()
3033 3033
3034 3034 pureextra = extra.copy()
3035 3035 extra['amend_source'] = old.hex()
3036 3036
3037 3037 new = context.memctx(repo,
3038 3038 parents=[base.node(), old.p2().node()],
3039 3039 text=message,
3040 3040 files=files,
3041 3041 filectxfn=filectxfn,
3042 3042 user=user,
3043 3043 date=date,
3044 3044 extra=extra,
3045 3045 editor=editor)
3046 3046
3047 3047 newdesc = changelog.stripdesc(new.description())
3048 3048 if ((not node)
3049 3049 and newdesc == old.description()
3050 3050 and user == old.user()
3051 3051 and date == old.date()
3052 3052 and pureextra == old.extra()):
3053 3053 # nothing changed. continuing here would create a new node
3054 3054 # anyway because of the amend_source noise.
3055 3055 #
3056 3056 # This not what we expect from amend.
3057 3057 return old.node()
3058 3058
3059 3059 ph = repo.ui.config('phases', 'new-commit', phases.draft)
3060 3060 try:
3061 3061 if opts.get('secret'):
3062 3062 commitphase = 'secret'
3063 3063 else:
3064 3064 commitphase = old.phase()
3065 3065 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
3066 3066 newid = repo.commitctx(new)
3067 3067 finally:
3068 3068 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
3069 3069 if newid != old.node():
3070 3070 # Reroute the working copy parent to the new changeset
3071 3071 repo.setparents(newid, nullid)
3072 3072 mapping = {old.node(): (newid,)}
3073 3073 if node:
3074 3074 mapping[node] = ()
3075 3075 scmutil.cleanupnodes(repo, mapping, 'amend')
3076 3076 return newid
3077 3077
3078 3078 def commiteditor(repo, ctx, subs, editform=''):
3079 3079 if ctx.description():
3080 3080 return ctx.description()
3081 3081 return commitforceeditor(repo, ctx, subs, editform=editform,
3082 3082 unchangedmessagedetection=True)
3083 3083
3084 3084 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3085 3085 editform='', unchangedmessagedetection=False):
3086 3086 if not extramsg:
3087 3087 extramsg = _("Leave message empty to abort commit.")
3088 3088
3089 3089 forms = [e for e in editform.split('.') if e]
3090 3090 forms.insert(0, 'changeset')
3091 3091 templatetext = None
3092 3092 while forms:
3093 3093 ref = '.'.join(forms)
3094 3094 if repo.ui.config('committemplate', ref):
3095 3095 templatetext = committext = buildcommittemplate(
3096 3096 repo, ctx, subs, extramsg, ref)
3097 3097 break
3098 3098 forms.pop()
3099 3099 else:
3100 3100 committext = buildcommittext(repo, ctx, subs, extramsg)
3101 3101
3102 3102 # run editor in the repository root
3103 3103 olddir = pycompat.getcwd()
3104 3104 os.chdir(repo.root)
3105 3105
3106 3106 # make in-memory changes visible to external process
3107 3107 tr = repo.currenttransaction()
3108 3108 repo.dirstate.write(tr)
3109 3109 pending = tr and tr.writepending() and repo.root
3110 3110
3111 3111 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3112 3112 editform=editform, pending=pending,
3113 3113 repopath=repo.path)
3114 3114 text = editortext
3115 3115
3116 3116 # strip away anything below this special string (used for editors that want
3117 3117 # to display the diff)
3118 3118 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3119 3119 if stripbelow:
3120 3120 text = text[:stripbelow.start()]
3121 3121
3122 3122 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3123 3123 os.chdir(olddir)
3124 3124
3125 3125 if finishdesc:
3126 3126 text = finishdesc(text)
3127 3127 if not text.strip():
3128 3128 raise error.Abort(_("empty commit message"))
3129 3129 if unchangedmessagedetection and editortext == templatetext:
3130 3130 raise error.Abort(_("commit message unchanged"))
3131 3131
3132 3132 return text
3133 3133
3134 3134 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3135 3135 ui = repo.ui
3136 3136 spec = formatter.templatespec(ref, None, None)
3137 3137 t = changeset_templater(ui, repo, spec, None, {}, False)
3138 3138 t.t.cache.update((k, templater.unquotestring(v))
3139 3139 for k, v in repo.ui.configitems('committemplate'))
3140 3140
3141 3141 if not extramsg:
3142 3142 extramsg = '' # ensure that extramsg is string
3143 3143
3144 3144 ui.pushbuffer()
3145 3145 t.show(ctx, extramsg=extramsg)
3146 3146 return ui.popbuffer()
3147 3147
3148 3148 def hgprefix(msg):
3149 3149 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3150 3150
3151 3151 def buildcommittext(repo, ctx, subs, extramsg):
3152 3152 edittext = []
3153 3153 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3154 3154 if ctx.description():
3155 3155 edittext.append(ctx.description())
3156 3156 edittext.append("")
3157 3157 edittext.append("") # Empty line between message and comments.
3158 3158 edittext.append(hgprefix(_("Enter commit message."
3159 3159 " Lines beginning with 'HG:' are removed.")))
3160 3160 edittext.append(hgprefix(extramsg))
3161 3161 edittext.append("HG: --")
3162 3162 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3163 3163 if ctx.p2():
3164 3164 edittext.append(hgprefix(_("branch merge")))
3165 3165 if ctx.branch():
3166 3166 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3167 3167 if bookmarks.isactivewdirparent(repo):
3168 3168 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3169 3169 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3170 3170 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3171 3171 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3172 3172 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3173 3173 if not added and not modified and not removed:
3174 3174 edittext.append(hgprefix(_("no files changed")))
3175 3175 edittext.append("")
3176 3176
3177 3177 return "\n".join(edittext)
3178 3178
3179 3179 def commitstatus(repo, node, branch, bheads=None, opts=None):
3180 3180 if opts is None:
3181 3181 opts = {}
3182 3182 ctx = repo[node]
3183 3183 parents = ctx.parents()
3184 3184
3185 3185 if (not opts.get('amend') and bheads and node not in bheads and not
3186 3186 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3187 3187 repo.ui.status(_('created new head\n'))
3188 3188 # The message is not printed for initial roots. For the other
3189 3189 # changesets, it is printed in the following situations:
3190 3190 #
3191 3191 # Par column: for the 2 parents with ...
3192 3192 # N: null or no parent
3193 3193 # B: parent is on another named branch
3194 3194 # C: parent is a regular non head changeset
3195 3195 # H: parent was a branch head of the current branch
3196 3196 # Msg column: whether we print "created new head" message
3197 3197 # In the following, it is assumed that there already exists some
3198 3198 # initial branch heads of the current branch, otherwise nothing is
3199 3199 # printed anyway.
3200 3200 #
3201 3201 # Par Msg Comment
3202 3202 # N N y additional topo root
3203 3203 #
3204 3204 # B N y additional branch root
3205 3205 # C N y additional topo head
3206 3206 # H N n usual case
3207 3207 #
3208 3208 # B B y weird additional branch root
3209 3209 # C B y branch merge
3210 3210 # H B n merge with named branch
3211 3211 #
3212 3212 # C C y additional head from merge
3213 3213 # C H n merge with a head
3214 3214 #
3215 3215 # H H n head merge: head count decreases
3216 3216
3217 3217 if not opts.get('close_branch'):
3218 3218 for r in parents:
3219 3219 if r.closesbranch() and r.branch() == branch:
3220 3220 repo.ui.status(_('reopening closed branch head %d\n') % r)
3221 3221
3222 3222 if repo.ui.debugflag:
3223 3223 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3224 3224 elif repo.ui.verbose:
3225 3225 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3226 3226
3227 3227 def postcommitstatus(repo, pats, opts):
3228 3228 return repo.status(match=scmutil.match(repo[None], pats, opts))
3229 3229
3230 3230 def revert(ui, repo, ctx, parents, *pats, **opts):
3231 3231 parent, p2 = parents
3232 3232 node = ctx.node()
3233 3233
3234 3234 mf = ctx.manifest()
3235 3235 if node == p2:
3236 3236 parent = p2
3237 3237
3238 3238 # need all matching names in dirstate and manifest of target rev,
3239 3239 # so have to walk both. do not print errors if files exist in one
3240 3240 # but not other. in both cases, filesets should be evaluated against
3241 3241 # workingctx to get consistent result (issue4497). this means 'set:**'
3242 3242 # cannot be used to select missing files from target rev.
3243 3243
3244 3244 # `names` is a mapping for all elements in working copy and target revision
3245 3245 # The mapping is in the form:
3246 3246 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3247 3247 names = {}
3248 3248
3249 3249 with repo.wlock():
3250 3250 ## filling of the `names` mapping
3251 3251 # walk dirstate to fill `names`
3252 3252
3253 3253 interactive = opts.get('interactive', False)
3254 3254 wctx = repo[None]
3255 3255 m = scmutil.match(wctx, pats, opts)
3256 3256
3257 3257 # we'll need this later
3258 3258 targetsubs = sorted(s for s in wctx.substate if m(s))
3259 3259
3260 3260 if not m.always():
3261 3261 matcher = matchmod.badmatch(m, lambda x, y: False)
3262 3262 for abs in wctx.walk(matcher):
3263 3263 names[abs] = m.rel(abs), m.exact(abs)
3264 3264
3265 3265 # walk target manifest to fill `names`
3266 3266
3267 3267 def badfn(path, msg):
3268 3268 if path in names:
3269 3269 return
3270 3270 if path in ctx.substate:
3271 3271 return
3272 3272 path_ = path + '/'
3273 3273 for f in names:
3274 3274 if f.startswith(path_):
3275 3275 return
3276 3276 ui.warn("%s: %s\n" % (m.rel(path), msg))
3277 3277
3278 3278 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3279 3279 if abs not in names:
3280 3280 names[abs] = m.rel(abs), m.exact(abs)
3281 3281
3282 3282 # Find status of all file in `names`.
3283 3283 m = scmutil.matchfiles(repo, names)
3284 3284
3285 3285 changes = repo.status(node1=node, match=m,
3286 3286 unknown=True, ignored=True, clean=True)
3287 3287 else:
3288 3288 changes = repo.status(node1=node, match=m)
3289 3289 for kind in changes:
3290 3290 for abs in kind:
3291 3291 names[abs] = m.rel(abs), m.exact(abs)
3292 3292
3293 3293 m = scmutil.matchfiles(repo, names)
3294 3294
3295 3295 modified = set(changes.modified)
3296 3296 added = set(changes.added)
3297 3297 removed = set(changes.removed)
3298 3298 _deleted = set(changes.deleted)
3299 3299 unknown = set(changes.unknown)
3300 3300 unknown.update(changes.ignored)
3301 3301 clean = set(changes.clean)
3302 3302 modadded = set()
3303 3303
3304 3304 # We need to account for the state of the file in the dirstate,
3305 3305 # even when we revert against something else than parent. This will
3306 3306 # slightly alter the behavior of revert (doing back up or not, delete
3307 3307 # or just forget etc).
3308 3308 if parent == node:
3309 3309 dsmodified = modified
3310 3310 dsadded = added
3311 3311 dsremoved = removed
3312 3312 # store all local modifications, useful later for rename detection
3313 3313 localchanges = dsmodified | dsadded
3314 3314 modified, added, removed = set(), set(), set()
3315 3315 else:
3316 3316 changes = repo.status(node1=parent, match=m)
3317 3317 dsmodified = set(changes.modified)
3318 3318 dsadded = set(changes.added)
3319 3319 dsremoved = set(changes.removed)
3320 3320 # store all local modifications, useful later for rename detection
3321 3321 localchanges = dsmodified | dsadded
3322 3322
3323 3323 # only take into account for removes between wc and target
3324 3324 clean |= dsremoved - removed
3325 3325 dsremoved &= removed
3326 3326 # distinct between dirstate remove and other
3327 3327 removed -= dsremoved
3328 3328
3329 3329 modadded = added & dsmodified
3330 3330 added -= modadded
3331 3331
3332 3332 # tell newly modified apart.
3333 3333 dsmodified &= modified
3334 3334 dsmodified |= modified & dsadded # dirstate added may need backup
3335 3335 modified -= dsmodified
3336 3336
3337 3337 # We need to wait for some post-processing to update this set
3338 3338 # before making the distinction. The dirstate will be used for
3339 3339 # that purpose.
3340 3340 dsadded = added
3341 3341
3342 3342 # in case of merge, files that are actually added can be reported as
3343 3343 # modified, we need to post process the result
3344 3344 if p2 != nullid:
3345 3345 mergeadd = set(dsmodified)
3346 3346 for path in dsmodified:
3347 3347 if path in mf:
3348 3348 mergeadd.remove(path)
3349 3349 dsadded |= mergeadd
3350 3350 dsmodified -= mergeadd
3351 3351
3352 3352 # if f is a rename, update `names` to also revert the source
3353 3353 cwd = repo.getcwd()
3354 3354 for f in localchanges:
3355 3355 src = repo.dirstate.copied(f)
3356 3356 # XXX should we check for rename down to target node?
3357 3357 if src and src not in names and repo.dirstate[src] == 'r':
3358 3358 dsremoved.add(src)
3359 3359 names[src] = (repo.pathto(src, cwd), True)
3360 3360
3361 3361 # determine the exact nature of the deleted changesets
3362 3362 deladded = set(_deleted)
3363 3363 for path in _deleted:
3364 3364 if path in mf:
3365 3365 deladded.remove(path)
3366 3366 deleted = _deleted - deladded
3367 3367
3368 3368 # distinguish between file to forget and the other
3369 3369 added = set()
3370 3370 for abs in dsadded:
3371 3371 if repo.dirstate[abs] != 'a':
3372 3372 added.add(abs)
3373 3373 dsadded -= added
3374 3374
3375 3375 for abs in deladded:
3376 3376 if repo.dirstate[abs] == 'a':
3377 3377 dsadded.add(abs)
3378 3378 deladded -= dsadded
3379 3379
3380 3380 # For files marked as removed, we check if an unknown file is present at
3381 3381 # the same path. If a such file exists it may need to be backed up.
3382 3382 # Making the distinction at this stage helps have simpler backup
3383 3383 # logic.
3384 3384 removunk = set()
3385 3385 for abs in removed:
3386 3386 target = repo.wjoin(abs)
3387 3387 if os.path.lexists(target):
3388 3388 removunk.add(abs)
3389 3389 removed -= removunk
3390 3390
3391 3391 dsremovunk = set()
3392 3392 for abs in dsremoved:
3393 3393 target = repo.wjoin(abs)
3394 3394 if os.path.lexists(target):
3395 3395 dsremovunk.add(abs)
3396 3396 dsremoved -= dsremovunk
3397 3397
3398 3398 # action to be actually performed by revert
3399 3399 # (<list of file>, message>) tuple
3400 3400 actions = {'revert': ([], _('reverting %s\n')),
3401 3401 'add': ([], _('adding %s\n')),
3402 3402 'remove': ([], _('removing %s\n')),
3403 3403 'drop': ([], _('removing %s\n')),
3404 3404 'forget': ([], _('forgetting %s\n')),
3405 3405 'undelete': ([], _('undeleting %s\n')),
3406 3406 'noop': (None, _('no changes needed to %s\n')),
3407 3407 'unknown': (None, _('file not managed: %s\n')),
3408 3408 }
3409 3409
3410 3410 # "constant" that convey the backup strategy.
3411 3411 # All set to `discard` if `no-backup` is set do avoid checking
3412 3412 # no_backup lower in the code.
3413 3413 # These values are ordered for comparison purposes
3414 3414 backupinteractive = 3 # do backup if interactively modified
3415 3415 backup = 2 # unconditionally do backup
3416 3416 check = 1 # check if the existing file differs from target
3417 3417 discard = 0 # never do backup
3418 3418 if opts.get('no_backup'):
3419 3419 backupinteractive = backup = check = discard
3420 3420 if interactive:
3421 3421 dsmodifiedbackup = backupinteractive
3422 3422 else:
3423 3423 dsmodifiedbackup = backup
3424 3424 tobackup = set()
3425 3425
3426 3426 backupanddel = actions['remove']
3427 3427 if not opts.get('no_backup'):
3428 3428 backupanddel = actions['drop']
3429 3429
3430 3430 disptable = (
3431 3431 # dispatch table:
3432 3432 # file state
3433 3433 # action
3434 3434 # make backup
3435 3435
3436 3436 ## Sets that results that will change file on disk
3437 3437 # Modified compared to target, no local change
3438 3438 (modified, actions['revert'], discard),
3439 3439 # Modified compared to target, but local file is deleted
3440 3440 (deleted, actions['revert'], discard),
3441 3441 # Modified compared to target, local change
3442 3442 (dsmodified, actions['revert'], dsmodifiedbackup),
3443 3443 # Added since target
3444 3444 (added, actions['remove'], discard),
3445 3445 # Added in working directory
3446 3446 (dsadded, actions['forget'], discard),
3447 3447 # Added since target, have local modification
3448 3448 (modadded, backupanddel, backup),
3449 3449 # Added since target but file is missing in working directory
3450 3450 (deladded, actions['drop'], discard),
3451 3451 # Removed since target, before working copy parent
3452 3452 (removed, actions['add'], discard),
3453 3453 # Same as `removed` but an unknown file exists at the same path
3454 3454 (removunk, actions['add'], check),
3455 3455 # Removed since targe, marked as such in working copy parent
3456 3456 (dsremoved, actions['undelete'], discard),
3457 3457 # Same as `dsremoved` but an unknown file exists at the same path
3458 3458 (dsremovunk, actions['undelete'], check),
3459 3459 ## the following sets does not result in any file changes
3460 3460 # File with no modification
3461 3461 (clean, actions['noop'], discard),
3462 3462 # Existing file, not tracked anywhere
3463 3463 (unknown, actions['unknown'], discard),
3464 3464 )
3465 3465
3466 3466 for abs, (rel, exact) in sorted(names.items()):
3467 3467 # target file to be touch on disk (relative to cwd)
3468 3468 target = repo.wjoin(abs)
3469 3469 # search the entry in the dispatch table.
3470 3470 # if the file is in any of these sets, it was touched in the working
3471 3471 # directory parent and we are sure it needs to be reverted.
3472 3472 for table, (xlist, msg), dobackup in disptable:
3473 3473 if abs not in table:
3474 3474 continue
3475 3475 if xlist is not None:
3476 3476 xlist.append(abs)
3477 3477 if dobackup:
3478 3478 # If in interactive mode, don't automatically create
3479 3479 # .orig files (issue4793)
3480 3480 if dobackup == backupinteractive:
3481 3481 tobackup.add(abs)
3482 3482 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3483 3483 bakname = scmutil.origpath(ui, repo, rel)
3484 3484 ui.note(_('saving current version of %s as %s\n') %
3485 3485 (rel, bakname))
3486 3486 if not opts.get('dry_run'):
3487 3487 if interactive:
3488 3488 util.copyfile(target, bakname)
3489 3489 else:
3490 3490 util.rename(target, bakname)
3491 3491 if ui.verbose or not exact:
3492 3492 if not isinstance(msg, basestring):
3493 3493 msg = msg(abs)
3494 3494 ui.status(msg % rel)
3495 3495 elif exact:
3496 3496 ui.warn(msg % rel)
3497 3497 break
3498 3498
3499 3499 if not opts.get('dry_run'):
3500 3500 needdata = ('revert', 'add', 'undelete')
3501 3501 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3502 3502 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3503 3503
3504 3504 if targetsubs:
3505 3505 # Revert the subrepos on the revert list
3506 3506 for sub in targetsubs:
3507 3507 try:
3508 3508 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3509 3509 except KeyError:
3510 3510 raise error.Abort("subrepository '%s' does not exist in %s!"
3511 3511 % (sub, short(ctx.node())))
3512 3512
3513 3513 def _revertprefetch(repo, ctx, *files):
3514 3514 """Let extension changing the storage layer prefetch content"""
3515 3515 pass
3516 3516
3517 3517 def _performrevert(repo, parents, ctx, actions, interactive=False,
3518 3518 tobackup=None):
3519 3519 """function that actually perform all the actions computed for revert
3520 3520
3521 3521 This is an independent function to let extension to plug in and react to
3522 3522 the imminent revert.
3523 3523
3524 3524 Make sure you have the working directory locked when calling this function.
3525 3525 """
3526 3526 parent, p2 = parents
3527 3527 node = ctx.node()
3528 3528 excluded_files = []
3529 3529 matcher_opts = {"exclude": excluded_files}
3530 3530
3531 3531 def checkout(f):
3532 3532 fc = ctx[f]
3533 3533 repo.wwrite(f, fc.data(), fc.flags())
3534 3534
3535 3535 def doremove(f):
3536 3536 try:
3537 3537 repo.wvfs.unlinkpath(f)
3538 3538 except OSError:
3539 3539 pass
3540 3540 repo.dirstate.remove(f)
3541 3541
3542 audit_path = pathutil.pathauditor(repo.root)
3542 audit_path = pathutil.pathauditor(repo.root, cached=True)
3543 3543 for f in actions['forget'][0]:
3544 3544 if interactive:
3545 3545 choice = repo.ui.promptchoice(
3546 3546 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3547 3547 if choice == 0:
3548 3548 repo.dirstate.drop(f)
3549 3549 else:
3550 3550 excluded_files.append(repo.wjoin(f))
3551 3551 else:
3552 3552 repo.dirstate.drop(f)
3553 3553 for f in actions['remove'][0]:
3554 3554 audit_path(f)
3555 3555 if interactive:
3556 3556 choice = repo.ui.promptchoice(
3557 3557 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3558 3558 if choice == 0:
3559 3559 doremove(f)
3560 3560 else:
3561 3561 excluded_files.append(repo.wjoin(f))
3562 3562 else:
3563 3563 doremove(f)
3564 3564 for f in actions['drop'][0]:
3565 3565 audit_path(f)
3566 3566 repo.dirstate.remove(f)
3567 3567
3568 3568 normal = None
3569 3569 if node == parent:
3570 3570 # We're reverting to our parent. If possible, we'd like status
3571 3571 # to report the file as clean. We have to use normallookup for
3572 3572 # merges to avoid losing information about merged/dirty files.
3573 3573 if p2 != nullid:
3574 3574 normal = repo.dirstate.normallookup
3575 3575 else:
3576 3576 normal = repo.dirstate.normal
3577 3577
3578 3578 newlyaddedandmodifiedfiles = set()
3579 3579 if interactive:
3580 3580 # Prompt the user for changes to revert
3581 3581 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3582 3582 m = scmutil.match(ctx, torevert, matcher_opts)
3583 3583 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3584 3584 diffopts.nodates = True
3585 3585 diffopts.git = True
3586 3586 operation = 'discard'
3587 3587 reversehunks = True
3588 3588 if node != parent:
3589 3589 operation = 'revert'
3590 3590 reversehunks = repo.ui.configbool('experimental',
3591 3591 'revertalternateinteractivemode')
3592 3592 if reversehunks:
3593 3593 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3594 3594 else:
3595 3595 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3596 3596 originalchunks = patch.parsepatch(diff)
3597 3597
3598 3598 try:
3599 3599
3600 3600 chunks, opts = recordfilter(repo.ui, originalchunks,
3601 3601 operation=operation)
3602 3602 if reversehunks:
3603 3603 chunks = patch.reversehunks(chunks)
3604 3604
3605 3605 except patch.PatchError as err:
3606 3606 raise error.Abort(_('error parsing patch: %s') % err)
3607 3607
3608 3608 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3609 3609 if tobackup is None:
3610 3610 tobackup = set()
3611 3611 # Apply changes
3612 3612 fp = stringio()
3613 3613 for c in chunks:
3614 3614 # Create a backup file only if this hunk should be backed up
3615 3615 if ishunk(c) and c.header.filename() in tobackup:
3616 3616 abs = c.header.filename()
3617 3617 target = repo.wjoin(abs)
3618 3618 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3619 3619 util.copyfile(target, bakname)
3620 3620 tobackup.remove(abs)
3621 3621 c.write(fp)
3622 3622 dopatch = fp.tell()
3623 3623 fp.seek(0)
3624 3624 if dopatch:
3625 3625 try:
3626 3626 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3627 3627 except patch.PatchError as err:
3628 3628 raise error.Abort(str(err))
3629 3629 del fp
3630 3630 else:
3631 3631 for f in actions['revert'][0]:
3632 3632 checkout(f)
3633 3633 if normal:
3634 3634 normal(f)
3635 3635
3636 3636 for f in actions['add'][0]:
3637 3637 # Don't checkout modified files, they are already created by the diff
3638 3638 if f not in newlyaddedandmodifiedfiles:
3639 3639 checkout(f)
3640 3640 repo.dirstate.add(f)
3641 3641
3642 3642 normal = repo.dirstate.normallookup
3643 3643 if node == parent and p2 == nullid:
3644 3644 normal = repo.dirstate.normal
3645 3645 for f in actions['undelete'][0]:
3646 3646 checkout(f)
3647 3647 normal(f)
3648 3648
3649 3649 copied = copies.pathcopies(repo[parent], ctx)
3650 3650
3651 3651 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3652 3652 if f in copied:
3653 3653 repo.dirstate.copy(copied[f], f)
3654 3654
3655 3655 class command(registrar.command):
3656 3656 def _doregister(self, func, name, *args, **kwargs):
3657 3657 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3658 3658 return super(command, self)._doregister(func, name, *args, **kwargs)
3659 3659
3660 3660 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3661 3661 # commands.outgoing. "missing" is "missing" of the result of
3662 3662 # "findcommonoutgoing()"
3663 3663 outgoinghooks = util.hooks()
3664 3664
3665 3665 # a list of (ui, repo) functions called by commands.summary
3666 3666 summaryhooks = util.hooks()
3667 3667
3668 3668 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3669 3669 #
3670 3670 # functions should return tuple of booleans below, if 'changes' is None:
3671 3671 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3672 3672 #
3673 3673 # otherwise, 'changes' is a tuple of tuples below:
3674 3674 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3675 3675 # - (desturl, destbranch, destpeer, outgoing)
3676 3676 summaryremotehooks = util.hooks()
3677 3677
3678 3678 # A list of state files kept by multistep operations like graft.
3679 3679 # Since graft cannot be aborted, it is considered 'clearable' by update.
3680 3680 # note: bisect is intentionally excluded
3681 3681 # (state file, clearable, allowcommit, error, hint)
3682 3682 unfinishedstates = [
3683 3683 ('graftstate', True, False, _('graft in progress'),
3684 3684 _("use 'hg graft --continue' or 'hg update' to abort")),
3685 3685 ('updatestate', True, False, _('last update was interrupted'),
3686 3686 _("use 'hg update' to get a consistent checkout"))
3687 3687 ]
3688 3688
3689 3689 def checkunfinished(repo, commit=False):
3690 3690 '''Look for an unfinished multistep operation, like graft, and abort
3691 3691 if found. It's probably good to check this right before
3692 3692 bailifchanged().
3693 3693 '''
3694 3694 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3695 3695 if commit and allowcommit:
3696 3696 continue
3697 3697 if repo.vfs.exists(f):
3698 3698 raise error.Abort(msg, hint=hint)
3699 3699
3700 3700 def clearunfinished(repo):
3701 3701 '''Check for unfinished operations (as above), and clear the ones
3702 3702 that are clearable.
3703 3703 '''
3704 3704 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3705 3705 if not clearable and repo.vfs.exists(f):
3706 3706 raise error.Abort(msg, hint=hint)
3707 3707 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3708 3708 if clearable and repo.vfs.exists(f):
3709 3709 util.unlink(repo.vfs.join(f))
3710 3710
3711 3711 afterresolvedstates = [
3712 3712 ('graftstate',
3713 3713 _('hg graft --continue')),
3714 3714 ]
3715 3715
3716 3716 def howtocontinue(repo):
3717 3717 '''Check for an unfinished operation and return the command to finish
3718 3718 it.
3719 3719
3720 3720 afterresolvedstates tuples define a .hg/{file} and the corresponding
3721 3721 command needed to finish it.
3722 3722
3723 3723 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3724 3724 a boolean.
3725 3725 '''
3726 3726 contmsg = _("continue: %s")
3727 3727 for f, msg in afterresolvedstates:
3728 3728 if repo.vfs.exists(f):
3729 3729 return contmsg % msg, True
3730 3730 if repo[None].dirty(missing=True, merge=False, branch=False):
3731 3731 return contmsg % _("hg commit"), False
3732 3732 return None, None
3733 3733
3734 3734 def checkafterresolved(repo):
3735 3735 '''Inform the user about the next action after completing hg resolve
3736 3736
3737 3737 If there's a matching afterresolvedstates, howtocontinue will yield
3738 3738 repo.ui.warn as the reporter.
3739 3739
3740 3740 Otherwise, it will yield repo.ui.note.
3741 3741 '''
3742 3742 msg, warning = howtocontinue(repo)
3743 3743 if msg is not None:
3744 3744 if warning:
3745 3745 repo.ui.warn("%s\n" % msg)
3746 3746 else:
3747 3747 repo.ui.note("%s\n" % msg)
3748 3748
3749 3749 def wrongtooltocontinue(repo, task):
3750 3750 '''Raise an abort suggesting how to properly continue if there is an
3751 3751 active task.
3752 3752
3753 3753 Uses howtocontinue() to find the active task.
3754 3754
3755 3755 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3756 3756 a hint.
3757 3757 '''
3758 3758 after = howtocontinue(repo)
3759 3759 hint = None
3760 3760 if after[1]:
3761 3761 hint = after[0]
3762 3762 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,1342 +1,1342 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 def nonnormalentries(dmap):
58 58 '''Compute the nonnormal dirstate entries from the dmap'''
59 59 try:
60 60 return parsers.nonnormalotherparententries(dmap)
61 61 except AttributeError:
62 62 nonnorm = set()
63 63 otherparent = set()
64 64 for fname, e in dmap.iteritems():
65 65 if e[0] != 'n' or e[3] == -1:
66 66 nonnorm.add(fname)
67 67 if e[0] == 'n' and e[2] == -2:
68 68 otherparent.add(fname)
69 69 return nonnorm, otherparent
70 70
71 71 class dirstate(object):
72 72
73 73 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 74 '''Create a new dirstate object.
75 75
76 76 opener is an open()-like callable that can be used to open the
77 77 dirstate file; root is the root of the directory tracked by
78 78 the dirstate.
79 79 '''
80 80 self._opener = opener
81 81 self._validate = validate
82 82 self._root = root
83 83 self._sparsematchfn = sparsematchfn
84 84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 85 # UNC path pointing to root share (issue4557)
86 86 self._rootdir = pathutil.normasprefix(root)
87 87 self._dirty = False
88 88 self._dirtypl = False
89 89 self._lastnormaltime = 0
90 90 self._ui = ui
91 91 self._filecache = {}
92 92 self._parentwriters = 0
93 93 self._filename = 'dirstate'
94 94 self._pendingfilename = '%s.pending' % self._filename
95 95 self._plchangecallbacks = {}
96 96 self._origpl = None
97 97 self._updatedfiles = set()
98 98
99 99 # for consistent view between _pl() and _read() invocations
100 100 self._pendingmode = None
101 101
102 102 @contextlib.contextmanager
103 103 def parentchange(self):
104 104 '''Context manager for handling dirstate parents.
105 105
106 106 If an exception occurs in the scope of the context manager,
107 107 the incoherent dirstate won't be written when wlock is
108 108 released.
109 109 '''
110 110 self._parentwriters += 1
111 111 yield
112 112 # Typically we want the "undo" step of a context manager in a
113 113 # finally block so it happens even when an exception
114 114 # occurs. In this case, however, we only want to decrement
115 115 # parentwriters if the code in the with statement exits
116 116 # normally, so we don't have a try/finally here on purpose.
117 117 self._parentwriters -= 1
118 118
119 119 def beginparentchange(self):
120 120 '''Marks the beginning of a set of changes that involve changing
121 121 the dirstate parents. If there is an exception during this time,
122 122 the dirstate will not be written when the wlock is released. This
123 123 prevents writing an incoherent dirstate where the parent doesn't
124 124 match the contents.
125 125 '''
126 126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 127 'parentchange context manager.', '4.3')
128 128 self._parentwriters += 1
129 129
130 130 def endparentchange(self):
131 131 '''Marks the end of a set of changes that involve changing the
132 132 dirstate parents. Once all parent changes have been marked done,
133 133 the wlock will be free to write the dirstate on release.
134 134 '''
135 135 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 136 'parentchange context manager.', '4.3')
137 137 if self._parentwriters > 0:
138 138 self._parentwriters -= 1
139 139
140 140 def pendingparentchange(self):
141 141 '''Returns true if the dirstate is in the middle of a set of changes
142 142 that modify the dirstate parent.
143 143 '''
144 144 return self._parentwriters > 0
145 145
146 146 @propertycache
147 147 def _map(self):
148 148 '''Return the dirstate contents as a map from filename to
149 149 (state, mode, size, time).'''
150 150 self._read()
151 151 return self._map
152 152
153 153 @propertycache
154 154 def _copymap(self):
155 155 self._read()
156 156 return self._copymap
157 157
158 158 @propertycache
159 159 def _identity(self):
160 160 self._read()
161 161 return self._identity
162 162
163 163 @propertycache
164 164 def _nonnormalset(self):
165 165 nonnorm, otherparents = nonnormalentries(self._map)
166 166 self._otherparentset = otherparents
167 167 return nonnorm
168 168
169 169 @propertycache
170 170 def _otherparentset(self):
171 171 nonnorm, otherparents = nonnormalentries(self._map)
172 172 self._nonnormalset = nonnorm
173 173 return otherparents
174 174
175 175 @propertycache
176 176 def _filefoldmap(self):
177 177 try:
178 178 makefilefoldmap = parsers.make_file_foldmap
179 179 except AttributeError:
180 180 pass
181 181 else:
182 182 return makefilefoldmap(self._map, util.normcasespec,
183 183 util.normcasefallback)
184 184
185 185 f = {}
186 186 normcase = util.normcase
187 187 for name, s in self._map.iteritems():
188 188 if s[0] != 'r':
189 189 f[normcase(name)] = name
190 190 f['.'] = '.' # prevents useless util.fspath() invocation
191 191 return f
192 192
193 193 @propertycache
194 194 def _dirfoldmap(self):
195 195 f = {}
196 196 normcase = util.normcase
197 197 for name in self._dirs:
198 198 f[normcase(name)] = name
199 199 return f
200 200
201 201 @property
202 202 def _sparsematcher(self):
203 203 """The matcher for the sparse checkout.
204 204
205 205 The working directory may not include every file from a manifest. The
206 206 matcher obtained by this property will match a path if it is to be
207 207 included in the working directory.
208 208 """
209 209 # TODO there is potential to cache this property. For now, the matcher
210 210 # is resolved on every access. (But the called function does use a
211 211 # cache to keep the lookup fast.)
212 212 return self._sparsematchfn()
213 213
214 214 @repocache('branch')
215 215 def _branch(self):
216 216 try:
217 217 return self._opener.read("branch").strip() or "default"
218 218 except IOError as inst:
219 219 if inst.errno != errno.ENOENT:
220 220 raise
221 221 return "default"
222 222
223 223 @propertycache
224 224 def _pl(self):
225 225 try:
226 226 fp = self._opendirstatefile()
227 227 st = fp.read(40)
228 228 fp.close()
229 229 l = len(st)
230 230 if l == 40:
231 231 return st[:20], st[20:40]
232 232 elif l > 0 and l < 40:
233 233 raise error.Abort(_('working directory state appears damaged!'))
234 234 except IOError as err:
235 235 if err.errno != errno.ENOENT:
236 236 raise
237 237 return [nullid, nullid]
238 238
239 239 @propertycache
240 240 def _dirs(self):
241 241 return util.dirs(self._map, 'r')
242 242
243 243 def dirs(self):
244 244 return self._dirs
245 245
246 246 @rootcache('.hgignore')
247 247 def _ignore(self):
248 248 files = self._ignorefiles()
249 249 if not files:
250 250 return matchmod.never(self._root, '')
251 251
252 252 pats = ['include:%s' % f for f in files]
253 253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254 254
255 255 @propertycache
256 256 def _slash(self):
257 257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258 258
259 259 @propertycache
260 260 def _checklink(self):
261 261 return util.checklink(self._root)
262 262
263 263 @propertycache
264 264 def _checkexec(self):
265 265 return util.checkexec(self._root)
266 266
267 267 @propertycache
268 268 def _checkcase(self):
269 269 return not util.fscasesensitive(self._join('.hg'))
270 270
271 271 def _join(self, f):
272 272 # much faster than os.path.join()
273 273 # it's safe because f is always a relative path
274 274 return self._rootdir + f
275 275
276 276 def flagfunc(self, buildfallback):
277 277 if self._checklink and self._checkexec:
278 278 def f(x):
279 279 try:
280 280 st = os.lstat(self._join(x))
281 281 if util.statislink(st):
282 282 return 'l'
283 283 if util.statisexec(st):
284 284 return 'x'
285 285 except OSError:
286 286 pass
287 287 return ''
288 288 return f
289 289
290 290 fallback = buildfallback()
291 291 if self._checklink:
292 292 def f(x):
293 293 if os.path.islink(self._join(x)):
294 294 return 'l'
295 295 if 'x' in fallback(x):
296 296 return 'x'
297 297 return ''
298 298 return f
299 299 if self._checkexec:
300 300 def f(x):
301 301 if 'l' in fallback(x):
302 302 return 'l'
303 303 if util.isexec(self._join(x)):
304 304 return 'x'
305 305 return ''
306 306 return f
307 307 else:
308 308 return fallback
309 309
310 310 @propertycache
311 311 def _cwd(self):
312 312 # internal config: ui.forcecwd
313 313 forcecwd = self._ui.config('ui', 'forcecwd')
314 314 if forcecwd:
315 315 return forcecwd
316 316 return pycompat.getcwd()
317 317
318 318 def getcwd(self):
319 319 '''Return the path from which a canonical path is calculated.
320 320
321 321 This path should be used to resolve file patterns or to convert
322 322 canonical paths back to file paths for display. It shouldn't be
323 323 used to get real file paths. Use vfs functions instead.
324 324 '''
325 325 cwd = self._cwd
326 326 if cwd == self._root:
327 327 return ''
328 328 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 329 rootsep = self._root
330 330 if not util.endswithsep(rootsep):
331 331 rootsep += pycompat.ossep
332 332 if cwd.startswith(rootsep):
333 333 return cwd[len(rootsep):]
334 334 else:
335 335 # we're outside the repo. return an absolute path.
336 336 return cwd
337 337
338 338 def pathto(self, f, cwd=None):
339 339 if cwd is None:
340 340 cwd = self.getcwd()
341 341 path = util.pathto(self._root, cwd, f)
342 342 if self._slash:
343 343 return util.pconvert(path)
344 344 return path
345 345
346 346 def __getitem__(self, key):
347 347 '''Return the current state of key (a filename) in the dirstate.
348 348
349 349 States are:
350 350 n normal
351 351 m needs merging
352 352 r marked for removal
353 353 a marked for addition
354 354 ? not tracked
355 355 '''
356 356 return self._map.get(key, ("?",))[0]
357 357
358 358 def __contains__(self, key):
359 359 return key in self._map
360 360
361 361 def __iter__(self):
362 362 return iter(sorted(self._map))
363 363
364 364 def items(self):
365 365 return self._map.iteritems()
366 366
367 367 iteritems = items
368 368
369 369 def parents(self):
370 370 return [self._validate(p) for p in self._pl]
371 371
372 372 def p1(self):
373 373 return self._validate(self._pl[0])
374 374
375 375 def p2(self):
376 376 return self._validate(self._pl[1])
377 377
378 378 def branch(self):
379 379 return encoding.tolocal(self._branch)
380 380
381 381 def setparents(self, p1, p2=nullid):
382 382 """Set dirstate parents to p1 and p2.
383 383
384 384 When moving from two parents to one, 'm' merged entries a
385 385 adjusted to normal and previous copy records discarded and
386 386 returned by the call.
387 387
388 388 See localrepo.setparents()
389 389 """
390 390 if self._parentwriters == 0:
391 391 raise ValueError("cannot set dirstate parent without "
392 392 "calling dirstate.beginparentchange")
393 393
394 394 self._dirty = self._dirtypl = True
395 395 oldp2 = self._pl[1]
396 396 if self._origpl is None:
397 397 self._origpl = self._pl
398 398 self._pl = p1, p2
399 399 copies = {}
400 400 if oldp2 != nullid and p2 == nullid:
401 401 candidatefiles = self._nonnormalset.union(self._otherparentset)
402 402 for f in candidatefiles:
403 403 s = self._map.get(f)
404 404 if s is None:
405 405 continue
406 406
407 407 # Discard 'm' markers when moving away from a merge state
408 408 if s[0] == 'm':
409 409 if f in self._copymap:
410 410 copies[f] = self._copymap[f]
411 411 self.normallookup(f)
412 412 # Also fix up otherparent markers
413 413 elif s[0] == 'n' and s[2] == -2:
414 414 if f in self._copymap:
415 415 copies[f] = self._copymap[f]
416 416 self.add(f)
417 417 return copies
418 418
419 419 def setbranch(self, branch):
420 420 self._branch = encoding.fromlocal(branch)
421 421 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
422 422 try:
423 423 f.write(self._branch + '\n')
424 424 f.close()
425 425
426 426 # make sure filecache has the correct stat info for _branch after
427 427 # replacing the underlying file
428 428 ce = self._filecache['_branch']
429 429 if ce:
430 430 ce.refresh()
431 431 except: # re-raises
432 432 f.discard()
433 433 raise
434 434
435 435 def _opendirstatefile(self):
436 436 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
437 437 if self._pendingmode is not None and self._pendingmode != mode:
438 438 fp.close()
439 439 raise error.Abort(_('working directory state may be '
440 440 'changed parallelly'))
441 441 self._pendingmode = mode
442 442 return fp
443 443
444 444 def _read(self):
445 445 self._map = {}
446 446 self._copymap = {}
447 447 # ignore HG_PENDING because identity is used only for writing
448 448 self._identity = util.filestat.frompath(
449 449 self._opener.join(self._filename))
450 450 try:
451 451 fp = self._opendirstatefile()
452 452 try:
453 453 st = fp.read()
454 454 finally:
455 455 fp.close()
456 456 except IOError as err:
457 457 if err.errno != errno.ENOENT:
458 458 raise
459 459 return
460 460 if not st:
461 461 return
462 462
463 463 if util.safehasattr(parsers, 'dict_new_presized'):
464 464 # Make an estimate of the number of files in the dirstate based on
465 465 # its size. From a linear regression on a set of real-world repos,
466 466 # all over 10,000 files, the size of a dirstate entry is 85
467 467 # bytes. The cost of resizing is significantly higher than the cost
468 468 # of filling in a larger presized dict, so subtract 20% from the
469 469 # size.
470 470 #
471 471 # This heuristic is imperfect in many ways, so in a future dirstate
472 472 # format update it makes sense to just record the number of entries
473 473 # on write.
474 474 self._map = parsers.dict_new_presized(len(st) / 71)
475 475
476 476 # Python's garbage collector triggers a GC each time a certain number
477 477 # of container objects (the number being defined by
478 478 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
479 479 # for each file in the dirstate. The C version then immediately marks
480 480 # them as not to be tracked by the collector. However, this has no
481 481 # effect on when GCs are triggered, only on what objects the GC looks
482 482 # into. This means that O(number of files) GCs are unavoidable.
483 483 # Depending on when in the process's lifetime the dirstate is parsed,
484 484 # this can get very expensive. As a workaround, disable GC while
485 485 # parsing the dirstate.
486 486 #
487 487 # (we cannot decorate the function directly since it is in a C module)
488 488 parse_dirstate = util.nogc(parsers.parse_dirstate)
489 489 p = parse_dirstate(self._map, self._copymap, st)
490 490 if not self._dirtypl:
491 491 self._pl = p
492 492
493 493 def invalidate(self):
494 494 '''Causes the next access to reread the dirstate.
495 495
496 496 This is different from localrepo.invalidatedirstate() because it always
497 497 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
498 498 check whether the dirstate has changed before rereading it.'''
499 499
500 500 for a in ("_map", "_copymap", "_identity",
501 501 "_filefoldmap", "_dirfoldmap", "_branch",
502 502 "_pl", "_dirs", "_ignore", "_nonnormalset",
503 503 "_otherparentset"):
504 504 if a in self.__dict__:
505 505 delattr(self, a)
506 506 self._lastnormaltime = 0
507 507 self._dirty = False
508 508 self._updatedfiles.clear()
509 509 self._parentwriters = 0
510 510 self._origpl = None
511 511
512 512 def copy(self, source, dest):
513 513 """Mark dest as a copy of source. Unmark dest if source is None."""
514 514 if source == dest:
515 515 return
516 516 self._dirty = True
517 517 if source is not None:
518 518 self._copymap[dest] = source
519 519 self._updatedfiles.add(source)
520 520 self._updatedfiles.add(dest)
521 521 elif dest in self._copymap:
522 522 del self._copymap[dest]
523 523 self._updatedfiles.add(dest)
524 524
525 525 def copied(self, file):
526 526 return self._copymap.get(file, None)
527 527
528 528 def copies(self):
529 529 return self._copymap
530 530
531 531 def _droppath(self, f):
532 532 if self[f] not in "?r" and "_dirs" in self.__dict__:
533 533 self._dirs.delpath(f)
534 534
535 535 if "_filefoldmap" in self.__dict__:
536 536 normed = util.normcase(f)
537 537 if normed in self._filefoldmap:
538 538 del self._filefoldmap[normed]
539 539
540 540 self._updatedfiles.add(f)
541 541
542 542 def _addpath(self, f, state, mode, size, mtime):
543 543 oldstate = self[f]
544 544 if state == 'a' or oldstate == 'r':
545 545 scmutil.checkfilename(f)
546 546 if f in self._dirs:
547 547 raise error.Abort(_('directory %r already in dirstate') % f)
548 548 # shadows
549 549 for d in util.finddirs(f):
550 550 if d in self._dirs:
551 551 break
552 552 if d in self._map and self[d] != 'r':
553 553 raise error.Abort(
554 554 _('file %r in dirstate clashes with %r') % (d, f))
555 555 if oldstate in "?r" and "_dirs" in self.__dict__:
556 556 self._dirs.addpath(f)
557 557 self._dirty = True
558 558 self._updatedfiles.add(f)
559 559 self._map[f] = dirstatetuple(state, mode, size, mtime)
560 560 if state != 'n' or mtime == -1:
561 561 self._nonnormalset.add(f)
562 562 if size == -2:
563 563 self._otherparentset.add(f)
564 564
565 565 def normal(self, f):
566 566 '''Mark a file normal and clean.'''
567 567 s = os.lstat(self._join(f))
568 568 mtime = s.st_mtime
569 569 self._addpath(f, 'n', s.st_mode,
570 570 s.st_size & _rangemask, mtime & _rangemask)
571 571 if f in self._copymap:
572 572 del self._copymap[f]
573 573 if f in self._nonnormalset:
574 574 self._nonnormalset.remove(f)
575 575 if mtime > self._lastnormaltime:
576 576 # Remember the most recent modification timeslot for status(),
577 577 # to make sure we won't miss future size-preserving file content
578 578 # modifications that happen within the same timeslot.
579 579 self._lastnormaltime = mtime
580 580
581 581 def normallookup(self, f):
582 582 '''Mark a file normal, but possibly dirty.'''
583 583 if self._pl[1] != nullid and f in self._map:
584 584 # if there is a merge going on and the file was either
585 585 # in state 'm' (-1) or coming from other parent (-2) before
586 586 # being removed, restore that state.
587 587 entry = self._map[f]
588 588 if entry[0] == 'r' and entry[2] in (-1, -2):
589 589 source = self._copymap.get(f)
590 590 if entry[2] == -1:
591 591 self.merge(f)
592 592 elif entry[2] == -2:
593 593 self.otherparent(f)
594 594 if source:
595 595 self.copy(source, f)
596 596 return
597 597 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
598 598 return
599 599 self._addpath(f, 'n', 0, -1, -1)
600 600 if f in self._copymap:
601 601 del self._copymap[f]
602 602 if f in self._nonnormalset:
603 603 self._nonnormalset.remove(f)
604 604
605 605 def otherparent(self, f):
606 606 '''Mark as coming from the other parent, always dirty.'''
607 607 if self._pl[1] == nullid:
608 608 raise error.Abort(_("setting %r to other parent "
609 609 "only allowed in merges") % f)
610 610 if f in self and self[f] == 'n':
611 611 # merge-like
612 612 self._addpath(f, 'm', 0, -2, -1)
613 613 else:
614 614 # add-like
615 615 self._addpath(f, 'n', 0, -2, -1)
616 616
617 617 if f in self._copymap:
618 618 del self._copymap[f]
619 619
620 620 def add(self, f):
621 621 '''Mark a file added.'''
622 622 self._addpath(f, 'a', 0, -1, -1)
623 623 if f in self._copymap:
624 624 del self._copymap[f]
625 625
626 626 def remove(self, f):
627 627 '''Mark a file removed.'''
628 628 self._dirty = True
629 629 self._droppath(f)
630 630 size = 0
631 631 if self._pl[1] != nullid and f in self._map:
632 632 # backup the previous state
633 633 entry = self._map[f]
634 634 if entry[0] == 'm': # merge
635 635 size = -1
636 636 elif entry[0] == 'n' and entry[2] == -2: # other parent
637 637 size = -2
638 638 self._otherparentset.add(f)
639 639 self._map[f] = dirstatetuple('r', 0, size, 0)
640 640 self._nonnormalset.add(f)
641 641 if size == 0 and f in self._copymap:
642 642 del self._copymap[f]
643 643
644 644 def merge(self, f):
645 645 '''Mark a file merged.'''
646 646 if self._pl[1] == nullid:
647 647 return self.normallookup(f)
648 648 return self.otherparent(f)
649 649
650 650 def drop(self, f):
651 651 '''Drop a file from the dirstate'''
652 652 if f in self._map:
653 653 self._dirty = True
654 654 self._droppath(f)
655 655 del self._map[f]
656 656 if f in self._nonnormalset:
657 657 self._nonnormalset.remove(f)
658 658 if f in self._copymap:
659 659 del self._copymap[f]
660 660
661 661 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
662 662 if exists is None:
663 663 exists = os.path.lexists(os.path.join(self._root, path))
664 664 if not exists:
665 665 # Maybe a path component exists
666 666 if not ignoremissing and '/' in path:
667 667 d, f = path.rsplit('/', 1)
668 668 d = self._normalize(d, False, ignoremissing, None)
669 669 folded = d + "/" + f
670 670 else:
671 671 # No path components, preserve original case
672 672 folded = path
673 673 else:
674 674 # recursively normalize leading directory components
675 675 # against dirstate
676 676 if '/' in normed:
677 677 d, f = normed.rsplit('/', 1)
678 678 d = self._normalize(d, False, ignoremissing, True)
679 679 r = self._root + "/" + d
680 680 folded = d + "/" + util.fspath(f, r)
681 681 else:
682 682 folded = util.fspath(normed, self._root)
683 683 storemap[normed] = folded
684 684
685 685 return folded
686 686
687 687 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
688 688 normed = util.normcase(path)
689 689 folded = self._filefoldmap.get(normed, None)
690 690 if folded is None:
691 691 if isknown:
692 692 folded = path
693 693 else:
694 694 folded = self._discoverpath(path, normed, ignoremissing, exists,
695 695 self._filefoldmap)
696 696 return folded
697 697
698 698 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
699 699 normed = util.normcase(path)
700 700 folded = self._filefoldmap.get(normed, None)
701 701 if folded is None:
702 702 folded = self._dirfoldmap.get(normed, None)
703 703 if folded is None:
704 704 if isknown:
705 705 folded = path
706 706 else:
707 707 # store discovered result in dirfoldmap so that future
708 708 # normalizefile calls don't start matching directories
709 709 folded = self._discoverpath(path, normed, ignoremissing, exists,
710 710 self._dirfoldmap)
711 711 return folded
712 712
713 713 def normalize(self, path, isknown=False, ignoremissing=False):
714 714 '''
715 715 normalize the case of a pathname when on a casefolding filesystem
716 716
717 717 isknown specifies whether the filename came from walking the
718 718 disk, to avoid extra filesystem access.
719 719
720 720 If ignoremissing is True, missing path are returned
721 721 unchanged. Otherwise, we try harder to normalize possibly
722 722 existing path components.
723 723
724 724 The normalized case is determined based on the following precedence:
725 725
726 726 - version of name already stored in the dirstate
727 727 - version of name stored on disk
728 728 - version provided via command arguments
729 729 '''
730 730
731 731 if self._checkcase:
732 732 return self._normalize(path, isknown, ignoremissing)
733 733 return path
734 734
735 735 def clear(self):
736 736 self._map = {}
737 737 self._nonnormalset = set()
738 738 self._otherparentset = set()
739 739 if "_dirs" in self.__dict__:
740 740 delattr(self, "_dirs")
741 741 self._copymap = {}
742 742 self._pl = [nullid, nullid]
743 743 self._lastnormaltime = 0
744 744 self._updatedfiles.clear()
745 745 self._dirty = True
746 746
747 747 def rebuild(self, parent, allfiles, changedfiles=None):
748 748 if changedfiles is None:
749 749 # Rebuild entire dirstate
750 750 changedfiles = allfiles
751 751 lastnormaltime = self._lastnormaltime
752 752 self.clear()
753 753 self._lastnormaltime = lastnormaltime
754 754
755 755 if self._origpl is None:
756 756 self._origpl = self._pl
757 757 self._pl = (parent, nullid)
758 758 for f in changedfiles:
759 759 if f in allfiles:
760 760 self.normallookup(f)
761 761 else:
762 762 self.drop(f)
763 763
764 764 self._dirty = True
765 765
766 766 def identity(self):
767 767 '''Return identity of dirstate itself to detect changing in storage
768 768
769 769 If identity of previous dirstate is equal to this, writing
770 770 changes based on the former dirstate out can keep consistency.
771 771 '''
772 772 return self._identity
773 773
774 774 def write(self, tr):
775 775 if not self._dirty:
776 776 return
777 777
778 778 filename = self._filename
779 779 if tr:
780 780 # 'dirstate.write()' is not only for writing in-memory
781 781 # changes out, but also for dropping ambiguous timestamp.
782 782 # delayed writing re-raise "ambiguous timestamp issue".
783 783 # See also the wiki page below for detail:
784 784 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
785 785
786 786 # emulate dropping timestamp in 'parsers.pack_dirstate'
787 787 now = _getfsnow(self._opener)
788 788 dmap = self._map
789 789 for f in self._updatedfiles:
790 790 e = dmap.get(f)
791 791 if e is not None and e[0] == 'n' and e[3] == now:
792 792 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
793 793 self._nonnormalset.add(f)
794 794
795 795 # emulate that all 'dirstate.normal' results are written out
796 796 self._lastnormaltime = 0
797 797 self._updatedfiles.clear()
798 798
799 799 # delay writing in-memory changes out
800 800 tr.addfilegenerator('dirstate', (self._filename,),
801 801 self._writedirstate, location='plain')
802 802 return
803 803
804 804 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
805 805 self._writedirstate(st)
806 806
807 807 def addparentchangecallback(self, category, callback):
808 808 """add a callback to be called when the wd parents are changed
809 809
810 810 Callback will be called with the following arguments:
811 811 dirstate, (oldp1, oldp2), (newp1, newp2)
812 812
813 813 Category is a unique identifier to allow overwriting an old callback
814 814 with a newer callback.
815 815 """
816 816 self._plchangecallbacks[category] = callback
817 817
818 818 def _writedirstate(self, st):
819 819 # notify callbacks about parents change
820 820 if self._origpl is not None and self._origpl != self._pl:
821 821 for c, callback in sorted(self._plchangecallbacks.iteritems()):
822 822 callback(self, self._origpl, self._pl)
823 823 self._origpl = None
824 824 # use the modification time of the newly created temporary file as the
825 825 # filesystem's notion of 'now'
826 826 now = util.fstat(st).st_mtime & _rangemask
827 827
828 828 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
829 829 # timestamp of each entries in dirstate, because of 'now > mtime'
830 830 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
831 831 if delaywrite > 0:
832 832 # do we have any files to delay for?
833 833 for f, e in self._map.iteritems():
834 834 if e[0] == 'n' and e[3] == now:
835 835 import time # to avoid useless import
836 836 # rather than sleep n seconds, sleep until the next
837 837 # multiple of n seconds
838 838 clock = time.time()
839 839 start = int(clock) - (int(clock) % delaywrite)
840 840 end = start + delaywrite
841 841 time.sleep(end - clock)
842 842 now = end # trust our estimate that the end is near now
843 843 break
844 844
845 845 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
846 846 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
847 847 st.close()
848 848 self._lastnormaltime = 0
849 849 self._dirty = self._dirtypl = False
850 850
851 851 def _dirignore(self, f):
852 852 if f == '.':
853 853 return False
854 854 if self._ignore(f):
855 855 return True
856 856 for p in util.finddirs(f):
857 857 if self._ignore(p):
858 858 return True
859 859 return False
860 860
861 861 def _ignorefiles(self):
862 862 files = []
863 863 if os.path.exists(self._join('.hgignore')):
864 864 files.append(self._join('.hgignore'))
865 865 for name, path in self._ui.configitems("ui"):
866 866 if name == 'ignore' or name.startswith('ignore.'):
867 867 # we need to use os.path.join here rather than self._join
868 868 # because path is arbitrary and user-specified
869 869 files.append(os.path.join(self._rootdir, util.expandpath(path)))
870 870 return files
871 871
872 872 def _ignorefileandline(self, f):
873 873 files = collections.deque(self._ignorefiles())
874 874 visited = set()
875 875 while files:
876 876 i = files.popleft()
877 877 patterns = matchmod.readpatternfile(i, self._ui.warn,
878 878 sourceinfo=True)
879 879 for pattern, lineno, line in patterns:
880 880 kind, p = matchmod._patsplit(pattern, 'glob')
881 881 if kind == "subinclude":
882 882 if p not in visited:
883 883 files.append(p)
884 884 continue
885 885 m = matchmod.match(self._root, '', [], [pattern],
886 886 warn=self._ui.warn)
887 887 if m(f):
888 888 return (i, lineno, line)
889 889 visited.add(i)
890 890 return (None, -1, "")
891 891
892 892 def _walkexplicit(self, match, subrepos):
893 893 '''Get stat data about the files explicitly specified by match.
894 894
895 895 Return a triple (results, dirsfound, dirsnotfound).
896 896 - results is a mapping from filename to stat result. It also contains
897 897 listings mapping subrepos and .hg to None.
898 898 - dirsfound is a list of files found to be directories.
899 899 - dirsnotfound is a list of files that the dirstate thinks are
900 900 directories and that were not found.'''
901 901
902 902 def badtype(mode):
903 903 kind = _('unknown')
904 904 if stat.S_ISCHR(mode):
905 905 kind = _('character device')
906 906 elif stat.S_ISBLK(mode):
907 907 kind = _('block device')
908 908 elif stat.S_ISFIFO(mode):
909 909 kind = _('fifo')
910 910 elif stat.S_ISSOCK(mode):
911 911 kind = _('socket')
912 912 elif stat.S_ISDIR(mode):
913 913 kind = _('directory')
914 914 return _('unsupported file type (type is %s)') % kind
915 915
916 916 matchedir = match.explicitdir
917 917 badfn = match.bad
918 918 dmap = self._map
919 919 lstat = os.lstat
920 920 getkind = stat.S_IFMT
921 921 dirkind = stat.S_IFDIR
922 922 regkind = stat.S_IFREG
923 923 lnkkind = stat.S_IFLNK
924 924 join = self._join
925 925 dirsfound = []
926 926 foundadd = dirsfound.append
927 927 dirsnotfound = []
928 928 notfoundadd = dirsnotfound.append
929 929
930 930 if not match.isexact() and self._checkcase:
931 931 normalize = self._normalize
932 932 else:
933 933 normalize = None
934 934
935 935 files = sorted(match.files())
936 936 subrepos.sort()
937 937 i, j = 0, 0
938 938 while i < len(files) and j < len(subrepos):
939 939 subpath = subrepos[j] + "/"
940 940 if files[i] < subpath:
941 941 i += 1
942 942 continue
943 943 while i < len(files) and files[i].startswith(subpath):
944 944 del files[i]
945 945 j += 1
946 946
947 947 if not files or '.' in files:
948 948 files = ['.']
949 949 results = dict.fromkeys(subrepos)
950 950 results['.hg'] = None
951 951
952 952 alldirs = None
953 953 for ff in files:
954 954 # constructing the foldmap is expensive, so don't do it for the
955 955 # common case where files is ['.']
956 956 if normalize and ff != '.':
957 957 nf = normalize(ff, False, True)
958 958 else:
959 959 nf = ff
960 960 if nf in results:
961 961 continue
962 962
963 963 try:
964 964 st = lstat(join(nf))
965 965 kind = getkind(st.st_mode)
966 966 if kind == dirkind:
967 967 if nf in dmap:
968 968 # file replaced by dir on disk but still in dirstate
969 969 results[nf] = None
970 970 if matchedir:
971 971 matchedir(nf)
972 972 foundadd((nf, ff))
973 973 elif kind == regkind or kind == lnkkind:
974 974 results[nf] = st
975 975 else:
976 976 badfn(ff, badtype(kind))
977 977 if nf in dmap:
978 978 results[nf] = None
979 979 except OSError as inst: # nf not found on disk - it is dirstate only
980 980 if nf in dmap: # does it exactly match a missing file?
981 981 results[nf] = None
982 982 else: # does it match a missing directory?
983 983 if alldirs is None:
984 984 alldirs = util.dirs(dmap)
985 985 if nf in alldirs:
986 986 if matchedir:
987 987 matchedir(nf)
988 988 notfoundadd(nf)
989 989 else:
990 990 badfn(ff, inst.strerror)
991 991
992 992 # Case insensitive filesystems cannot rely on lstat() failing to detect
993 993 # a case-only rename. Prune the stat object for any file that does not
994 994 # match the case in the filesystem, if there are multiple files that
995 995 # normalize to the same path.
996 996 if match.isexact() and self._checkcase:
997 997 normed = {}
998 998
999 999 for f, st in results.iteritems():
1000 1000 if st is None:
1001 1001 continue
1002 1002
1003 1003 nc = util.normcase(f)
1004 1004 paths = normed.get(nc)
1005 1005
1006 1006 if paths is None:
1007 1007 paths = set()
1008 1008 normed[nc] = paths
1009 1009
1010 1010 paths.add(f)
1011 1011
1012 1012 for norm, paths in normed.iteritems():
1013 1013 if len(paths) > 1:
1014 1014 for path in paths:
1015 1015 folded = self._discoverpath(path, norm, True, None,
1016 1016 self._dirfoldmap)
1017 1017 if path != folded:
1018 1018 results[path] = None
1019 1019
1020 1020 return results, dirsfound, dirsnotfound
1021 1021
1022 1022 def walk(self, match, subrepos, unknown, ignored, full=True):
1023 1023 '''
1024 1024 Walk recursively through the directory tree, finding all files
1025 1025 matched by match.
1026 1026
1027 1027 If full is False, maybe skip some known-clean files.
1028 1028
1029 1029 Return a dict mapping filename to stat-like object (either
1030 1030 mercurial.osutil.stat instance or return value of os.stat()).
1031 1031
1032 1032 '''
1033 1033 # full is a flag that extensions that hook into walk can use -- this
1034 1034 # implementation doesn't use it at all. This satisfies the contract
1035 1035 # because we only guarantee a "maybe".
1036 1036
1037 1037 if ignored:
1038 1038 ignore = util.never
1039 1039 dirignore = util.never
1040 1040 elif unknown:
1041 1041 ignore = self._ignore
1042 1042 dirignore = self._dirignore
1043 1043 else:
1044 1044 # if not unknown and not ignored, drop dir recursion and step 2
1045 1045 ignore = util.always
1046 1046 dirignore = util.always
1047 1047
1048 1048 matchfn = match.matchfn
1049 1049 matchalways = match.always()
1050 1050 matchtdir = match.traversedir
1051 1051 dmap = self._map
1052 1052 listdir = util.listdir
1053 1053 lstat = os.lstat
1054 1054 dirkind = stat.S_IFDIR
1055 1055 regkind = stat.S_IFREG
1056 1056 lnkkind = stat.S_IFLNK
1057 1057 join = self._join
1058 1058
1059 1059 exact = skipstep3 = False
1060 1060 if match.isexact(): # match.exact
1061 1061 exact = True
1062 1062 dirignore = util.always # skip step 2
1063 1063 elif match.prefix(): # match.match, no patterns
1064 1064 skipstep3 = True
1065 1065
1066 1066 if not exact and self._checkcase:
1067 1067 normalize = self._normalize
1068 1068 normalizefile = self._normalizefile
1069 1069 skipstep3 = False
1070 1070 else:
1071 1071 normalize = self._normalize
1072 1072 normalizefile = None
1073 1073
1074 1074 # step 1: find all explicit files
1075 1075 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1076 1076
1077 1077 skipstep3 = skipstep3 and not (work or dirsnotfound)
1078 1078 work = [d for d in work if not dirignore(d[0])]
1079 1079
1080 1080 # step 2: visit subdirectories
1081 1081 def traverse(work, alreadynormed):
1082 1082 wadd = work.append
1083 1083 while work:
1084 1084 nd = work.pop()
1085 1085 if not match.visitdir(nd):
1086 1086 continue
1087 1087 skip = None
1088 1088 if nd == '.':
1089 1089 nd = ''
1090 1090 else:
1091 1091 skip = '.hg'
1092 1092 try:
1093 1093 entries = listdir(join(nd), stat=True, skip=skip)
1094 1094 except OSError as inst:
1095 1095 if inst.errno in (errno.EACCES, errno.ENOENT):
1096 1096 match.bad(self.pathto(nd), inst.strerror)
1097 1097 continue
1098 1098 raise
1099 1099 for f, kind, st in entries:
1100 1100 if normalizefile:
1101 1101 # even though f might be a directory, we're only
1102 1102 # interested in comparing it to files currently in the
1103 1103 # dmap -- therefore normalizefile is enough
1104 1104 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1105 1105 True)
1106 1106 else:
1107 1107 nf = nd and (nd + "/" + f) or f
1108 1108 if nf not in results:
1109 1109 if kind == dirkind:
1110 1110 if not ignore(nf):
1111 1111 if matchtdir:
1112 1112 matchtdir(nf)
1113 1113 wadd(nf)
1114 1114 if nf in dmap and (matchalways or matchfn(nf)):
1115 1115 results[nf] = None
1116 1116 elif kind == regkind or kind == lnkkind:
1117 1117 if nf in dmap:
1118 1118 if matchalways or matchfn(nf):
1119 1119 results[nf] = st
1120 1120 elif ((matchalways or matchfn(nf))
1121 1121 and not ignore(nf)):
1122 1122 # unknown file -- normalize if necessary
1123 1123 if not alreadynormed:
1124 1124 nf = normalize(nf, False, True)
1125 1125 results[nf] = st
1126 1126 elif nf in dmap and (matchalways or matchfn(nf)):
1127 1127 results[nf] = None
1128 1128
1129 1129 for nd, d in work:
1130 1130 # alreadynormed means that processwork doesn't have to do any
1131 1131 # expensive directory normalization
1132 1132 alreadynormed = not normalize or nd == d
1133 1133 traverse([d], alreadynormed)
1134 1134
1135 1135 for s in subrepos:
1136 1136 del results[s]
1137 1137 del results['.hg']
1138 1138
1139 1139 # step 3: visit remaining files from dmap
1140 1140 if not skipstep3 and not exact:
1141 1141 # If a dmap file is not in results yet, it was either
1142 1142 # a) not matching matchfn b) ignored, c) missing, or d) under a
1143 1143 # symlink directory.
1144 1144 if not results and matchalways:
1145 1145 visit = [f for f in dmap]
1146 1146 else:
1147 1147 visit = [f for f in dmap if f not in results and matchfn(f)]
1148 1148 visit.sort()
1149 1149
1150 1150 if unknown:
1151 1151 # unknown == True means we walked all dirs under the roots
1152 1152 # that wasn't ignored, and everything that matched was stat'ed
1153 1153 # and is already in results.
1154 1154 # The rest must thus be ignored or under a symlink.
1155 audit_path = pathutil.pathauditor(self._root)
1155 audit_path = pathutil.pathauditor(self._root, cached=True)
1156 1156
1157 1157 for nf in iter(visit):
1158 1158 # If a stat for the same file was already added with a
1159 1159 # different case, don't add one for this, since that would
1160 1160 # make it appear as if the file exists under both names
1161 1161 # on disk.
1162 1162 if (normalizefile and
1163 1163 normalizefile(nf, True, True) in results):
1164 1164 results[nf] = None
1165 1165 # Report ignored items in the dmap as long as they are not
1166 1166 # under a symlink directory.
1167 1167 elif audit_path.check(nf):
1168 1168 try:
1169 1169 results[nf] = lstat(join(nf))
1170 1170 # file was just ignored, no links, and exists
1171 1171 except OSError:
1172 1172 # file doesn't exist
1173 1173 results[nf] = None
1174 1174 else:
1175 1175 # It's either missing or under a symlink directory
1176 1176 # which we in this case report as missing
1177 1177 results[nf] = None
1178 1178 else:
1179 1179 # We may not have walked the full directory tree above,
1180 1180 # so stat and check everything we missed.
1181 1181 iv = iter(visit)
1182 1182 for st in util.statfiles([join(i) for i in visit]):
1183 1183 results[next(iv)] = st
1184 1184 return results
1185 1185
1186 1186 def status(self, match, subrepos, ignored, clean, unknown):
1187 1187 '''Determine the status of the working copy relative to the
1188 1188 dirstate and return a pair of (unsure, status), where status is of type
1189 1189 scmutil.status and:
1190 1190
1191 1191 unsure:
1192 1192 files that might have been modified since the dirstate was
1193 1193 written, but need to be read to be sure (size is the same
1194 1194 but mtime differs)
1195 1195 status.modified:
1196 1196 files that have definitely been modified since the dirstate
1197 1197 was written (different size or mode)
1198 1198 status.clean:
1199 1199 files that have definitely not been modified since the
1200 1200 dirstate was written
1201 1201 '''
1202 1202 listignored, listclean, listunknown = ignored, clean, unknown
1203 1203 lookup, modified, added, unknown, ignored = [], [], [], [], []
1204 1204 removed, deleted, clean = [], [], []
1205 1205
1206 1206 dmap = self._map
1207 1207 ladd = lookup.append # aka "unsure"
1208 1208 madd = modified.append
1209 1209 aadd = added.append
1210 1210 uadd = unknown.append
1211 1211 iadd = ignored.append
1212 1212 radd = removed.append
1213 1213 dadd = deleted.append
1214 1214 cadd = clean.append
1215 1215 mexact = match.exact
1216 1216 dirignore = self._dirignore
1217 1217 checkexec = self._checkexec
1218 1218 copymap = self._copymap
1219 1219 lastnormaltime = self._lastnormaltime
1220 1220
1221 1221 # We need to do full walks when either
1222 1222 # - we're listing all clean files, or
1223 1223 # - match.traversedir does something, because match.traversedir should
1224 1224 # be called for every dir in the working dir
1225 1225 full = listclean or match.traversedir is not None
1226 1226 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1227 1227 full=full).iteritems():
1228 1228 if fn not in dmap:
1229 1229 if (listignored or mexact(fn)) and dirignore(fn):
1230 1230 if listignored:
1231 1231 iadd(fn)
1232 1232 else:
1233 1233 uadd(fn)
1234 1234 continue
1235 1235
1236 1236 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1237 1237 # written like that for performance reasons. dmap[fn] is not a
1238 1238 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1239 1239 # opcode has fast paths when the value to be unpacked is a tuple or
1240 1240 # a list, but falls back to creating a full-fledged iterator in
1241 1241 # general. That is much slower than simply accessing and storing the
1242 1242 # tuple members one by one.
1243 1243 t = dmap[fn]
1244 1244 state = t[0]
1245 1245 mode = t[1]
1246 1246 size = t[2]
1247 1247 time = t[3]
1248 1248
1249 1249 if not st and state in "nma":
1250 1250 dadd(fn)
1251 1251 elif state == 'n':
1252 1252 if (size >= 0 and
1253 1253 ((size != st.st_size and size != st.st_size & _rangemask)
1254 1254 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1255 1255 or size == -2 # other parent
1256 1256 or fn in copymap):
1257 1257 madd(fn)
1258 1258 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1259 1259 ladd(fn)
1260 1260 elif st.st_mtime == lastnormaltime:
1261 1261 # fn may have just been marked as normal and it may have
1262 1262 # changed in the same second without changing its size.
1263 1263 # This can happen if we quickly do multiple commits.
1264 1264 # Force lookup, so we don't miss such a racy file change.
1265 1265 ladd(fn)
1266 1266 elif listclean:
1267 1267 cadd(fn)
1268 1268 elif state == 'm':
1269 1269 madd(fn)
1270 1270 elif state == 'a':
1271 1271 aadd(fn)
1272 1272 elif state == 'r':
1273 1273 radd(fn)
1274 1274
1275 1275 return (lookup, scmutil.status(modified, added, removed, deleted,
1276 1276 unknown, ignored, clean))
1277 1277
1278 1278 def matches(self, match):
1279 1279 '''
1280 1280 return files in the dirstate (in whatever state) filtered by match
1281 1281 '''
1282 1282 dmap = self._map
1283 1283 if match.always():
1284 1284 return dmap.keys()
1285 1285 files = match.files()
1286 1286 if match.isexact():
1287 1287 # fast path -- filter the other way around, since typically files is
1288 1288 # much smaller than dmap
1289 1289 return [f for f in files if f in dmap]
1290 1290 if match.prefix() and all(fn in dmap for fn in files):
1291 1291 # fast path -- all the values are known to be files, so just return
1292 1292 # that
1293 1293 return list(files)
1294 1294 return [f for f in dmap if match(f)]
1295 1295
1296 1296 def _actualfilename(self, tr):
1297 1297 if tr:
1298 1298 return self._pendingfilename
1299 1299 else:
1300 1300 return self._filename
1301 1301
1302 1302 def savebackup(self, tr, backupname):
1303 1303 '''Save current dirstate into backup file'''
1304 1304 filename = self._actualfilename(tr)
1305 1305 assert backupname != filename
1306 1306
1307 1307 # use '_writedirstate' instead of 'write' to write changes certainly,
1308 1308 # because the latter omits writing out if transaction is running.
1309 1309 # output file will be used to create backup of dirstate at this point.
1310 1310 if self._dirty or not self._opener.exists(filename):
1311 1311 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1312 1312 checkambig=True))
1313 1313
1314 1314 if tr:
1315 1315 # ensure that subsequent tr.writepending returns True for
1316 1316 # changes written out above, even if dirstate is never
1317 1317 # changed after this
1318 1318 tr.addfilegenerator('dirstate', (self._filename,),
1319 1319 self._writedirstate, location='plain')
1320 1320
1321 1321 # ensure that pending file written above is unlinked at
1322 1322 # failure, even if tr.writepending isn't invoked until the
1323 1323 # end of this transaction
1324 1324 tr.registertmp(filename, location='plain')
1325 1325
1326 1326 self._opener.tryunlink(backupname)
1327 1327 # hardlink backup is okay because _writedirstate is always called
1328 1328 # with an "atomictemp=True" file.
1329 1329 util.copyfile(self._opener.join(filename),
1330 1330 self._opener.join(backupname), hardlink=True)
1331 1331
1332 1332 def restorebackup(self, tr, backupname):
1333 1333 '''Restore dirstate by backup file'''
1334 1334 # this "invalidate()" prevents "wlock.release()" from writing
1335 1335 # changes of dirstate out after restoring from backup file
1336 1336 self.invalidate()
1337 1337 filename = self._actualfilename(tr)
1338 1338 self._opener.rename(backupname, filename, checkambig=True)
1339 1339
1340 1340 def clearbackup(self, tr, backupname):
1341 1341 '''Clear backup file'''
1342 1342 self._opener.unlink(backupname)
@@ -1,2264 +1,2265 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 sparse,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 # set of (path, vfs-location) tuples. vfs-location is:
71 71 # - 'plain for vfs relative paths
72 72 # - '' for svfs relative paths
73 73 _cachedfiles = set()
74 74
75 75 class _basefilecache(scmutil.filecache):
76 76 """All filecache usage on repo are done for logic that should be unfiltered
77 77 """
78 78 def __get__(self, repo, type=None):
79 79 if repo is None:
80 80 return self
81 81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 82 def __set__(self, repo, value):
83 83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 84 def __delete__(self, repo):
85 85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86 86
87 87 class repofilecache(_basefilecache):
88 88 """filecache for files in .hg but outside of .hg/store"""
89 89 def __init__(self, *paths):
90 90 super(repofilecache, self).__init__(*paths)
91 91 for path in paths:
92 92 _cachedfiles.add((path, 'plain'))
93 93
94 94 def join(self, obj, fname):
95 95 return obj.vfs.join(fname)
96 96
97 97 class storecache(_basefilecache):
98 98 """filecache for files in the store"""
99 99 def __init__(self, *paths):
100 100 super(storecache, self).__init__(*paths)
101 101 for path in paths:
102 102 _cachedfiles.add((path, ''))
103 103
104 104 def join(self, obj, fname):
105 105 return obj.sjoin(fname)
106 106
107 107 def isfilecached(repo, name):
108 108 """check if a repo has already cached "name" filecache-ed property
109 109
110 110 This returns (cachedobj-or-None, iscached) tuple.
111 111 """
112 112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 113 if not cacheentry:
114 114 return None, False
115 115 return cacheentry.obj, True
116 116
117 117 class unfilteredpropertycache(util.propertycache):
118 118 """propertycache that apply to unfiltered repo only"""
119 119
120 120 def __get__(self, repo, type=None):
121 121 unfi = repo.unfiltered()
122 122 if unfi is repo:
123 123 return super(unfilteredpropertycache, self).__get__(unfi)
124 124 return getattr(unfi, self.name)
125 125
126 126 class filteredpropertycache(util.propertycache):
127 127 """propertycache that must take filtering in account"""
128 128
129 129 def cachevalue(self, obj, value):
130 130 object.__setattr__(obj, self.name, value)
131 131
132 132
133 133 def hasunfilteredcache(repo, name):
134 134 """check if a repo has an unfilteredpropertycache value for <name>"""
135 135 return name in vars(repo.unfiltered())
136 136
137 137 def unfilteredmethod(orig):
138 138 """decorate method that always need to be run on unfiltered version"""
139 139 def wrapper(repo, *args, **kwargs):
140 140 return orig(repo.unfiltered(), *args, **kwargs)
141 141 return wrapper
142 142
143 143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 144 'unbundle'}
145 145 legacycaps = moderncaps.union({'changegroupsubset'})
146 146
147 147 class localpeer(peer.peerrepository):
148 148 '''peer for a local repo; reflects only the most recent API'''
149 149
150 150 def __init__(self, repo, caps=None):
151 151 if caps is None:
152 152 caps = moderncaps.copy()
153 153 peer.peerrepository.__init__(self)
154 154 self._repo = repo.filtered('served')
155 155 self.ui = repo.ui
156 156 self._caps = repo._restrictcapabilities(caps)
157 157
158 158 def close(self):
159 159 self._repo.close()
160 160
161 161 def _capabilities(self):
162 162 return self._caps
163 163
164 164 def local(self):
165 165 return self._repo
166 166
167 167 def canpush(self):
168 168 return True
169 169
170 170 def url(self):
171 171 return self._repo.url()
172 172
173 173 def lookup(self, key):
174 174 return self._repo.lookup(key)
175 175
176 176 def branchmap(self):
177 177 return self._repo.branchmap()
178 178
179 179 def heads(self):
180 180 return self._repo.heads()
181 181
182 182 def known(self, nodes):
183 183 return self._repo.known(nodes)
184 184
185 185 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
186 186 **kwargs):
187 187 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
188 188 common=common, bundlecaps=bundlecaps,
189 189 **kwargs)
190 190 cb = util.chunkbuffer(chunks)
191 191
192 192 if exchange.bundle2requested(bundlecaps):
193 193 # When requesting a bundle2, getbundle returns a stream to make the
194 194 # wire level function happier. We need to build a proper object
195 195 # from it in local peer.
196 196 return bundle2.getunbundler(self.ui, cb)
197 197 else:
198 198 return changegroup.getunbundler('01', cb, None)
199 199
200 200 # TODO We might want to move the next two calls into legacypeer and add
201 201 # unbundle instead.
202 202
203 203 def unbundle(self, cg, heads, url):
204 204 """apply a bundle on a repo
205 205
206 206 This function handles the repo locking itself."""
207 207 try:
208 208 try:
209 209 cg = exchange.readbundle(self.ui, cg, None)
210 210 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
211 211 if util.safehasattr(ret, 'getchunks'):
212 212 # This is a bundle20 object, turn it into an unbundler.
213 213 # This little dance should be dropped eventually when the
214 214 # API is finally improved.
215 215 stream = util.chunkbuffer(ret.getchunks())
216 216 ret = bundle2.getunbundler(self.ui, stream)
217 217 return ret
218 218 except Exception as exc:
219 219 # If the exception contains output salvaged from a bundle2
220 220 # reply, we need to make sure it is printed before continuing
221 221 # to fail. So we build a bundle2 with such output and consume
222 222 # it directly.
223 223 #
224 224 # This is not very elegant but allows a "simple" solution for
225 225 # issue4594
226 226 output = getattr(exc, '_bundle2salvagedoutput', ())
227 227 if output:
228 228 bundler = bundle2.bundle20(self._repo.ui)
229 229 for out in output:
230 230 bundler.addpart(out)
231 231 stream = util.chunkbuffer(bundler.getchunks())
232 232 b = bundle2.getunbundler(self.ui, stream)
233 233 bundle2.processbundle(self._repo, b)
234 234 raise
235 235 except error.PushRaced as exc:
236 236 raise error.ResponseError(_('push failed:'), str(exc))
237 237
238 238 def pushkey(self, namespace, key, old, new):
239 239 return self._repo.pushkey(namespace, key, old, new)
240 240
241 241 def listkeys(self, namespace):
242 242 return self._repo.listkeys(namespace)
243 243
244 244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 245 '''used to test argument passing over the wire'''
246 246 return "%s %s %s %s %s" % (one, two, three, four, five)
247 247
248 248 class locallegacypeer(localpeer):
249 249 '''peer extension which implements legacy methods too; used for tests with
250 250 restricted capabilities'''
251 251
252 252 def __init__(self, repo):
253 253 localpeer.__init__(self, repo, caps=legacycaps)
254 254
255 255 def branches(self, nodes):
256 256 return self._repo.branches(nodes)
257 257
258 258 def between(self, pairs):
259 259 return self._repo.between(pairs)
260 260
261 261 def changegroup(self, basenodes, source):
262 262 return changegroup.changegroup(self._repo, basenodes, source)
263 263
264 264 def changegroupsubset(self, bases, heads, source):
265 265 return changegroup.changegroupsubset(self._repo, bases, heads, source)
266 266
267 267 # Increment the sub-version when the revlog v2 format changes to lock out old
268 268 # clients.
269 269 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
270 270
271 271 class localrepository(object):
272 272
273 273 supportedformats = {
274 274 'revlogv1',
275 275 'generaldelta',
276 276 'treemanifest',
277 277 'manifestv2',
278 278 REVLOGV2_REQUIREMENT,
279 279 }
280 280 _basesupported = supportedformats | {
281 281 'store',
282 282 'fncache',
283 283 'shared',
284 284 'relshared',
285 285 'dotencode',
286 286 'exp-sparse',
287 287 }
288 288 openerreqs = {
289 289 'revlogv1',
290 290 'generaldelta',
291 291 'treemanifest',
292 292 'manifestv2',
293 293 }
294 294
295 295 # a list of (ui, featureset) functions.
296 296 # only functions defined in module of enabled extensions are invoked
297 297 featuresetupfuncs = set()
298 298
299 299 # list of prefix for file which can be written without 'wlock'
300 300 # Extensions should extend this list when needed
301 301 _wlockfreeprefix = {
302 302 # We migh consider requiring 'wlock' for the next
303 303 # two, but pretty much all the existing code assume
304 304 # wlock is not needed so we keep them excluded for
305 305 # now.
306 306 'hgrc',
307 307 'requires',
308 308 # XXX cache is a complicatged business someone
309 309 # should investigate this in depth at some point
310 310 'cache/',
311 311 # XXX shouldn't be dirstate covered by the wlock?
312 312 'dirstate',
313 313 # XXX bisect was still a bit too messy at the time
314 314 # this changeset was introduced. Someone should fix
315 315 # the remainig bit and drop this line
316 316 'bisect.state',
317 317 }
318 318
319 319 def __init__(self, baseui, path, create=False):
320 320 self.requirements = set()
321 321 self.filtername = None
322 322 # wvfs: rooted at the repository root, used to access the working copy
323 323 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
324 324 # vfs: rooted at .hg, used to access repo files outside of .hg/store
325 325 self.vfs = None
326 326 # svfs: usually rooted at .hg/store, used to access repository history
327 327 # If this is a shared repository, this vfs may point to another
328 328 # repository's .hg/store directory.
329 329 self.svfs = None
330 330 self.root = self.wvfs.base
331 331 self.path = self.wvfs.join(".hg")
332 332 self.origroot = path
333 333 # These auditor are not used by the vfs,
334 334 # only used when writing this comment: basectx.match
335 335 self.auditor = pathutil.pathauditor(self.root, self._checknested)
336 336 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
337 realfs=False)
337 realfs=False, cached=True)
338 338 self.baseui = baseui
339 339 self.ui = baseui.copy()
340 340 self.ui.copy = baseui.copy # prevent copying repo configuration
341 self.vfs = vfsmod.vfs(self.path)
341 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
342 342 if (self.ui.configbool('devel', 'all-warnings') or
343 343 self.ui.configbool('devel', 'check-locks')):
344 344 self.vfs.audit = self._getvfsward(self.vfs.audit)
345 345 # A list of callback to shape the phase if no data were found.
346 346 # Callback are in the form: func(repo, roots) --> processed root.
347 347 # This list it to be filled by extension during repo setup
348 348 self._phasedefaults = []
349 349 try:
350 350 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
351 351 self._loadextensions()
352 352 except IOError:
353 353 pass
354 354
355 355 if self.featuresetupfuncs:
356 356 self.supported = set(self._basesupported) # use private copy
357 357 extmods = set(m.__name__ for n, m
358 358 in extensions.extensions(self.ui))
359 359 for setupfunc in self.featuresetupfuncs:
360 360 if setupfunc.__module__ in extmods:
361 361 setupfunc(self.ui, self.supported)
362 362 else:
363 363 self.supported = self._basesupported
364 364 color.setup(self.ui)
365 365
366 366 # Add compression engines.
367 367 for name in util.compengines:
368 368 engine = util.compengines[name]
369 369 if engine.revlogheader():
370 370 self.supported.add('exp-compression-%s' % name)
371 371
372 372 if not self.vfs.isdir():
373 373 if create:
374 374 self.requirements = newreporequirements(self)
375 375
376 376 if not self.wvfs.exists():
377 377 self.wvfs.makedirs()
378 378 self.vfs.makedir(notindexed=True)
379 379
380 380 if 'store' in self.requirements:
381 381 self.vfs.mkdir("store")
382 382
383 383 # create an invalid changelog
384 384 self.vfs.append(
385 385 "00changelog.i",
386 386 '\0\0\0\2' # represents revlogv2
387 387 ' dummy changelog to prevent using the old repo layout'
388 388 )
389 389 else:
390 390 raise error.RepoError(_("repository %s not found") % path)
391 391 elif create:
392 392 raise error.RepoError(_("repository %s already exists") % path)
393 393 else:
394 394 try:
395 395 self.requirements = scmutil.readrequires(
396 396 self.vfs, self.supported)
397 397 except IOError as inst:
398 398 if inst.errno != errno.ENOENT:
399 399 raise
400 400
401 401 cachepath = self.vfs.join('cache')
402 402 self.sharedpath = self.path
403 403 try:
404 404 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
405 405 if 'relshared' in self.requirements:
406 406 sharedpath = self.vfs.join(sharedpath)
407 407 vfs = vfsmod.vfs(sharedpath, realpath=True)
408 408 cachepath = vfs.join('cache')
409 409 s = vfs.base
410 410 if not vfs.exists():
411 411 raise error.RepoError(
412 412 _('.hg/sharedpath points to nonexistent directory %s') % s)
413 413 self.sharedpath = s
414 414 except IOError as inst:
415 415 if inst.errno != errno.ENOENT:
416 416 raise
417 417
418 418 if 'exp-sparse' in self.requirements and not sparse.enabled:
419 419 raise error.RepoError(_('repository is using sparse feature but '
420 420 'sparse is not enabled; enable the '
421 421 '"sparse" extensions to access'))
422 422
423 423 self.store = store.store(
424 self.requirements, self.sharedpath, vfsmod.vfs)
424 self.requirements, self.sharedpath,
425 lambda base: vfsmod.vfs(base, cacheaudited=True))
425 426 self.spath = self.store.path
426 427 self.svfs = self.store.vfs
427 428 self.sjoin = self.store.join
428 429 self.vfs.createmode = self.store.createmode
429 self.cachevfs = vfsmod.vfs(cachepath)
430 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
430 431 self.cachevfs.createmode = self.store.createmode
431 432 if (self.ui.configbool('devel', 'all-warnings') or
432 433 self.ui.configbool('devel', 'check-locks')):
433 434 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
434 435 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
435 436 else: # standard vfs
436 437 self.svfs.audit = self._getsvfsward(self.svfs.audit)
437 438 self._applyopenerreqs()
438 439 if create:
439 440 self._writerequirements()
440 441
441 442 self._dirstatevalidatewarned = False
442 443
443 444 self._branchcaches = {}
444 445 self._revbranchcache = None
445 446 self.filterpats = {}
446 447 self._datafilters = {}
447 448 self._transref = self._lockref = self._wlockref = None
448 449
449 450 # A cache for various files under .hg/ that tracks file changes,
450 451 # (used by the filecache decorator)
451 452 #
452 453 # Maps a property name to its util.filecacheentry
453 454 self._filecache = {}
454 455
455 456 # hold sets of revision to be filtered
456 457 # should be cleared when something might have changed the filter value:
457 458 # - new changesets,
458 459 # - phase change,
459 460 # - new obsolescence marker,
460 461 # - working directory parent change,
461 462 # - bookmark changes
462 463 self.filteredrevcache = {}
463 464
464 465 # post-dirstate-status hooks
465 466 self._postdsstatus = []
466 467
467 468 # Cache of types representing filtered repos.
468 469 self._filteredrepotypes = weakref.WeakKeyDictionary()
469 470
470 471 # generic mapping between names and nodes
471 472 self.names = namespaces.namespaces()
472 473
473 474 # Key to signature value.
474 475 self._sparsesignaturecache = {}
475 476 # Signature to cached matcher instance.
476 477 self._sparsematchercache = {}
477 478
478 479 def _getvfsward(self, origfunc):
479 480 """build a ward for self.vfs"""
480 481 rref = weakref.ref(self)
481 482 def checkvfs(path, mode=None):
482 483 ret = origfunc(path, mode=mode)
483 484 repo = rref()
484 485 if (repo is None
485 486 or not util.safehasattr(repo, '_wlockref')
486 487 or not util.safehasattr(repo, '_lockref')):
487 488 return
488 489 if mode in (None, 'r', 'rb'):
489 490 return
490 491 if path.startswith(repo.path):
491 492 # truncate name relative to the repository (.hg)
492 493 path = path[len(repo.path) + 1:]
493 494 if path.startswith('cache/'):
494 495 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
495 496 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
496 497 if path.startswith('journal.'):
497 498 # journal is covered by 'lock'
498 499 if repo._currentlock(repo._lockref) is None:
499 500 repo.ui.develwarn('write with no lock: "%s"' % path,
500 501 stacklevel=2, config='check-locks')
501 502 elif repo._currentlock(repo._wlockref) is None:
502 503 # rest of vfs files are covered by 'wlock'
503 504 #
504 505 # exclude special files
505 506 for prefix in self._wlockfreeprefix:
506 507 if path.startswith(prefix):
507 508 return
508 509 repo.ui.develwarn('write with no wlock: "%s"' % path,
509 510 stacklevel=2, config='check-locks')
510 511 return ret
511 512 return checkvfs
512 513
513 514 def _getsvfsward(self, origfunc):
514 515 """build a ward for self.svfs"""
515 516 rref = weakref.ref(self)
516 517 def checksvfs(path, mode=None):
517 518 ret = origfunc(path, mode=mode)
518 519 repo = rref()
519 520 if repo is None or not util.safehasattr(repo, '_lockref'):
520 521 return
521 522 if mode in (None, 'r', 'rb'):
522 523 return
523 524 if path.startswith(repo.sharedpath):
524 525 # truncate name relative to the repository (.hg)
525 526 path = path[len(repo.sharedpath) + 1:]
526 527 if repo._currentlock(repo._lockref) is None:
527 528 repo.ui.develwarn('write with no lock: "%s"' % path,
528 529 stacklevel=3)
529 530 return ret
530 531 return checksvfs
531 532
532 533 def close(self):
533 534 self._writecaches()
534 535
535 536 def _loadextensions(self):
536 537 extensions.loadall(self.ui)
537 538
538 539 def _writecaches(self):
539 540 if self._revbranchcache:
540 541 self._revbranchcache.write()
541 542
542 543 def _restrictcapabilities(self, caps):
543 544 if self.ui.configbool('experimental', 'bundle2-advertise'):
544 545 caps = set(caps)
545 546 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
546 547 caps.add('bundle2=' + urlreq.quote(capsblob))
547 548 return caps
548 549
549 550 def _applyopenerreqs(self):
550 551 self.svfs.options = dict((r, 1) for r in self.requirements
551 552 if r in self.openerreqs)
552 553 # experimental config: format.chunkcachesize
553 554 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
554 555 if chunkcachesize is not None:
555 556 self.svfs.options['chunkcachesize'] = chunkcachesize
556 557 # experimental config: format.maxchainlen
557 558 maxchainlen = self.ui.configint('format', 'maxchainlen')
558 559 if maxchainlen is not None:
559 560 self.svfs.options['maxchainlen'] = maxchainlen
560 561 # experimental config: format.manifestcachesize
561 562 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
562 563 if manifestcachesize is not None:
563 564 self.svfs.options['manifestcachesize'] = manifestcachesize
564 565 # experimental config: format.aggressivemergedeltas
565 566 aggressivemergedeltas = self.ui.configbool('format',
566 567 'aggressivemergedeltas')
567 568 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
568 569 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
569 570 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
570 571 if 0 <= chainspan:
571 572 self.svfs.options['maxdeltachainspan'] = chainspan
572 573
573 574 for r in self.requirements:
574 575 if r.startswith('exp-compression-'):
575 576 self.svfs.options['compengine'] = r[len('exp-compression-'):]
576 577
577 578 # TODO move "revlogv2" to openerreqs once finalized.
578 579 if REVLOGV2_REQUIREMENT in self.requirements:
579 580 self.svfs.options['revlogv2'] = True
580 581
581 582 def _writerequirements(self):
582 583 scmutil.writerequires(self.vfs, self.requirements)
583 584
584 585 def _checknested(self, path):
585 586 """Determine if path is a legal nested repository."""
586 587 if not path.startswith(self.root):
587 588 return False
588 589 subpath = path[len(self.root) + 1:]
589 590 normsubpath = util.pconvert(subpath)
590 591
591 592 # XXX: Checking against the current working copy is wrong in
592 593 # the sense that it can reject things like
593 594 #
594 595 # $ hg cat -r 10 sub/x.txt
595 596 #
596 597 # if sub/ is no longer a subrepository in the working copy
597 598 # parent revision.
598 599 #
599 600 # However, it can of course also allow things that would have
600 601 # been rejected before, such as the above cat command if sub/
601 602 # is a subrepository now, but was a normal directory before.
602 603 # The old path auditor would have rejected by mistake since it
603 604 # panics when it sees sub/.hg/.
604 605 #
605 606 # All in all, checking against the working copy seems sensible
606 607 # since we want to prevent access to nested repositories on
607 608 # the filesystem *now*.
608 609 ctx = self[None]
609 610 parts = util.splitpath(subpath)
610 611 while parts:
611 612 prefix = '/'.join(parts)
612 613 if prefix in ctx.substate:
613 614 if prefix == normsubpath:
614 615 return True
615 616 else:
616 617 sub = ctx.sub(prefix)
617 618 return sub.checknested(subpath[len(prefix) + 1:])
618 619 else:
619 620 parts.pop()
620 621 return False
621 622
622 623 def peer(self):
623 624 return localpeer(self) # not cached to avoid reference cycle
624 625
625 626 def unfiltered(self):
626 627 """Return unfiltered version of the repository
627 628
628 629 Intended to be overwritten by filtered repo."""
629 630 return self
630 631
631 632 def filtered(self, name):
632 633 """Return a filtered version of a repository"""
633 634 # Python <3.4 easily leaks types via __mro__. See
634 635 # https://bugs.python.org/issue17950. We cache dynamically
635 636 # created types so this method doesn't leak on every
636 637 # invocation.
637 638
638 639 key = self.unfiltered().__class__
639 640 if key not in self._filteredrepotypes:
640 641 # Build a new type with the repoview mixin and the base
641 642 # class of this repo. Give it a name containing the
642 643 # filter name to aid debugging.
643 644 bases = (repoview.repoview, key)
644 645 cls = type(r'%sfilteredrepo' % name, bases, {})
645 646 self._filteredrepotypes[key] = cls
646 647
647 648 return self._filteredrepotypes[key](self, name)
648 649
649 650 @repofilecache('bookmarks', 'bookmarks.current')
650 651 def _bookmarks(self):
651 652 return bookmarks.bmstore(self)
652 653
653 654 @property
654 655 def _activebookmark(self):
655 656 return self._bookmarks.active
656 657
657 658 # _phaserevs and _phasesets depend on changelog. what we need is to
658 659 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
659 660 # can't be easily expressed in filecache mechanism.
660 661 @storecache('phaseroots', '00changelog.i')
661 662 def _phasecache(self):
662 663 return phases.phasecache(self, self._phasedefaults)
663 664
664 665 @storecache('obsstore')
665 666 def obsstore(self):
666 667 return obsolete.makestore(self.ui, self)
667 668
668 669 @storecache('00changelog.i')
669 670 def changelog(self):
670 671 return changelog.changelog(self.svfs,
671 672 trypending=txnutil.mayhavepending(self.root))
672 673
673 674 def _constructmanifest(self):
674 675 # This is a temporary function while we migrate from manifest to
675 676 # manifestlog. It allows bundlerepo and unionrepo to intercept the
676 677 # manifest creation.
677 678 return manifest.manifestrevlog(self.svfs)
678 679
679 680 @storecache('00manifest.i')
680 681 def manifestlog(self):
681 682 return manifest.manifestlog(self.svfs, self)
682 683
683 684 @repofilecache('dirstate')
684 685 def dirstate(self):
685 686 sparsematchfn = lambda: sparse.matcher(self)
686 687
687 688 return dirstate.dirstate(self.vfs, self.ui, self.root,
688 689 self._dirstatevalidate, sparsematchfn)
689 690
690 691 def _dirstatevalidate(self, node):
691 692 try:
692 693 self.changelog.rev(node)
693 694 return node
694 695 except error.LookupError:
695 696 if not self._dirstatevalidatewarned:
696 697 self._dirstatevalidatewarned = True
697 698 self.ui.warn(_("warning: ignoring unknown"
698 699 " working parent %s!\n") % short(node))
699 700 return nullid
700 701
701 702 def __getitem__(self, changeid):
702 703 if changeid is None:
703 704 return context.workingctx(self)
704 705 if isinstance(changeid, slice):
705 706 # wdirrev isn't contiguous so the slice shouldn't include it
706 707 return [context.changectx(self, i)
707 708 for i in xrange(*changeid.indices(len(self)))
708 709 if i not in self.changelog.filteredrevs]
709 710 try:
710 711 return context.changectx(self, changeid)
711 712 except error.WdirUnsupported:
712 713 return context.workingctx(self)
713 714
714 715 def __contains__(self, changeid):
715 716 """True if the given changeid exists
716 717
717 718 error.LookupError is raised if an ambiguous node specified.
718 719 """
719 720 try:
720 721 self[changeid]
721 722 return True
722 723 except error.RepoLookupError:
723 724 return False
724 725
725 726 def __nonzero__(self):
726 727 return True
727 728
728 729 __bool__ = __nonzero__
729 730
730 731 def __len__(self):
731 732 return len(self.changelog)
732 733
733 734 def __iter__(self):
734 735 return iter(self.changelog)
735 736
736 737 def revs(self, expr, *args):
737 738 '''Find revisions matching a revset.
738 739
739 740 The revset is specified as a string ``expr`` that may contain
740 741 %-formatting to escape certain types. See ``revsetlang.formatspec``.
741 742
742 743 Revset aliases from the configuration are not expanded. To expand
743 744 user aliases, consider calling ``scmutil.revrange()`` or
744 745 ``repo.anyrevs([expr], user=True)``.
745 746
746 747 Returns a revset.abstractsmartset, which is a list-like interface
747 748 that contains integer revisions.
748 749 '''
749 750 expr = revsetlang.formatspec(expr, *args)
750 751 m = revset.match(None, expr)
751 752 return m(self)
752 753
753 754 def set(self, expr, *args):
754 755 '''Find revisions matching a revset and emit changectx instances.
755 756
756 757 This is a convenience wrapper around ``revs()`` that iterates the
757 758 result and is a generator of changectx instances.
758 759
759 760 Revset aliases from the configuration are not expanded. To expand
760 761 user aliases, consider calling ``scmutil.revrange()``.
761 762 '''
762 763 for r in self.revs(expr, *args):
763 764 yield self[r]
764 765
765 766 def anyrevs(self, specs, user=False, localalias=None):
766 767 '''Find revisions matching one of the given revsets.
767 768
768 769 Revset aliases from the configuration are not expanded by default. To
769 770 expand user aliases, specify ``user=True``. To provide some local
770 771 definitions overriding user aliases, set ``localalias`` to
771 772 ``{name: definitionstring}``.
772 773 '''
773 774 if user:
774 775 m = revset.matchany(self.ui, specs, repo=self,
775 776 localalias=localalias)
776 777 else:
777 778 m = revset.matchany(None, specs, localalias=localalias)
778 779 return m(self)
779 780
780 781 def url(self):
781 782 return 'file:' + self.root
782 783
783 784 def hook(self, name, throw=False, **args):
784 785 """Call a hook, passing this repo instance.
785 786
786 787 This a convenience method to aid invoking hooks. Extensions likely
787 788 won't call this unless they have registered a custom hook or are
788 789 replacing code that is expected to call a hook.
789 790 """
790 791 return hook.hook(self.ui, self, name, throw, **args)
791 792
792 793 @filteredpropertycache
793 794 def _tagscache(self):
794 795 '''Returns a tagscache object that contains various tags related
795 796 caches.'''
796 797
797 798 # This simplifies its cache management by having one decorated
798 799 # function (this one) and the rest simply fetch things from it.
799 800 class tagscache(object):
800 801 def __init__(self):
801 802 # These two define the set of tags for this repository. tags
802 803 # maps tag name to node; tagtypes maps tag name to 'global' or
803 804 # 'local'. (Global tags are defined by .hgtags across all
804 805 # heads, and local tags are defined in .hg/localtags.)
805 806 # They constitute the in-memory cache of tags.
806 807 self.tags = self.tagtypes = None
807 808
808 809 self.nodetagscache = self.tagslist = None
809 810
810 811 cache = tagscache()
811 812 cache.tags, cache.tagtypes = self._findtags()
812 813
813 814 return cache
814 815
815 816 def tags(self):
816 817 '''return a mapping of tag to node'''
817 818 t = {}
818 819 if self.changelog.filteredrevs:
819 820 tags, tt = self._findtags()
820 821 else:
821 822 tags = self._tagscache.tags
822 823 for k, v in tags.iteritems():
823 824 try:
824 825 # ignore tags to unknown nodes
825 826 self.changelog.rev(v)
826 827 t[k] = v
827 828 except (error.LookupError, ValueError):
828 829 pass
829 830 return t
830 831
831 832 def _findtags(self):
832 833 '''Do the hard work of finding tags. Return a pair of dicts
833 834 (tags, tagtypes) where tags maps tag name to node, and tagtypes
834 835 maps tag name to a string like \'global\' or \'local\'.
835 836 Subclasses or extensions are free to add their own tags, but
836 837 should be aware that the returned dicts will be retained for the
837 838 duration of the localrepo object.'''
838 839
839 840 # XXX what tagtype should subclasses/extensions use? Currently
840 841 # mq and bookmarks add tags, but do not set the tagtype at all.
841 842 # Should each extension invent its own tag type? Should there
842 843 # be one tagtype for all such "virtual" tags? Or is the status
843 844 # quo fine?
844 845
845 846
846 847 # map tag name to (node, hist)
847 848 alltags = tagsmod.findglobaltags(self.ui, self)
848 849 # map tag name to tag type
849 850 tagtypes = dict((tag, 'global') for tag in alltags)
850 851
851 852 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
852 853
853 854 # Build the return dicts. Have to re-encode tag names because
854 855 # the tags module always uses UTF-8 (in order not to lose info
855 856 # writing to the cache), but the rest of Mercurial wants them in
856 857 # local encoding.
857 858 tags = {}
858 859 for (name, (node, hist)) in alltags.iteritems():
859 860 if node != nullid:
860 861 tags[encoding.tolocal(name)] = node
861 862 tags['tip'] = self.changelog.tip()
862 863 tagtypes = dict([(encoding.tolocal(name), value)
863 864 for (name, value) in tagtypes.iteritems()])
864 865 return (tags, tagtypes)
865 866
866 867 def tagtype(self, tagname):
867 868 '''
868 869 return the type of the given tag. result can be:
869 870
870 871 'local' : a local tag
871 872 'global' : a global tag
872 873 None : tag does not exist
873 874 '''
874 875
875 876 return self._tagscache.tagtypes.get(tagname)
876 877
877 878 def tagslist(self):
878 879 '''return a list of tags ordered by revision'''
879 880 if not self._tagscache.tagslist:
880 881 l = []
881 882 for t, n in self.tags().iteritems():
882 883 l.append((self.changelog.rev(n), t, n))
883 884 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
884 885
885 886 return self._tagscache.tagslist
886 887
887 888 def nodetags(self, node):
888 889 '''return the tags associated with a node'''
889 890 if not self._tagscache.nodetagscache:
890 891 nodetagscache = {}
891 892 for t, n in self._tagscache.tags.iteritems():
892 893 nodetagscache.setdefault(n, []).append(t)
893 894 for tags in nodetagscache.itervalues():
894 895 tags.sort()
895 896 self._tagscache.nodetagscache = nodetagscache
896 897 return self._tagscache.nodetagscache.get(node, [])
897 898
898 899 def nodebookmarks(self, node):
899 900 """return the list of bookmarks pointing to the specified node"""
900 901 marks = []
901 902 for bookmark, n in self._bookmarks.iteritems():
902 903 if n == node:
903 904 marks.append(bookmark)
904 905 return sorted(marks)
905 906
906 907 def branchmap(self):
907 908 '''returns a dictionary {branch: [branchheads]} with branchheads
908 909 ordered by increasing revision number'''
909 910 branchmap.updatecache(self)
910 911 return self._branchcaches[self.filtername]
911 912
912 913 @unfilteredmethod
913 914 def revbranchcache(self):
914 915 if not self._revbranchcache:
915 916 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
916 917 return self._revbranchcache
917 918
918 919 def branchtip(self, branch, ignoremissing=False):
919 920 '''return the tip node for a given branch
920 921
921 922 If ignoremissing is True, then this method will not raise an error.
922 923 This is helpful for callers that only expect None for a missing branch
923 924 (e.g. namespace).
924 925
925 926 '''
926 927 try:
927 928 return self.branchmap().branchtip(branch)
928 929 except KeyError:
929 930 if not ignoremissing:
930 931 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
931 932 else:
932 933 pass
933 934
934 935 def lookup(self, key):
935 936 return self[key].node()
936 937
937 938 def lookupbranch(self, key, remote=None):
938 939 repo = remote or self
939 940 if key in repo.branchmap():
940 941 return key
941 942
942 943 repo = (remote and remote.local()) and remote or self
943 944 return repo[key].branch()
944 945
945 946 def known(self, nodes):
946 947 cl = self.changelog
947 948 nm = cl.nodemap
948 949 filtered = cl.filteredrevs
949 950 result = []
950 951 for n in nodes:
951 952 r = nm.get(n)
952 953 resp = not (r is None or r in filtered)
953 954 result.append(resp)
954 955 return result
955 956
956 957 def local(self):
957 958 return self
958 959
959 960 def publishing(self):
960 961 # it's safe (and desirable) to trust the publish flag unconditionally
961 962 # so that we don't finalize changes shared between users via ssh or nfs
962 963 return self.ui.configbool('phases', 'publish', untrusted=True)
963 964
964 965 def cancopy(self):
965 966 # so statichttprepo's override of local() works
966 967 if not self.local():
967 968 return False
968 969 if not self.publishing():
969 970 return True
970 971 # if publishing we can't copy if there is filtered content
971 972 return not self.filtered('visible').changelog.filteredrevs
972 973
973 974 def shared(self):
974 975 '''the type of shared repository (None if not shared)'''
975 976 if self.sharedpath != self.path:
976 977 return 'store'
977 978 return None
978 979
979 980 def wjoin(self, f, *insidef):
980 981 return self.vfs.reljoin(self.root, f, *insidef)
981 982
982 983 def file(self, f):
983 984 if f[0] == '/':
984 985 f = f[1:]
985 986 return filelog.filelog(self.svfs, f)
986 987
987 988 def changectx(self, changeid):
988 989 return self[changeid]
989 990
990 991 def setparents(self, p1, p2=nullid):
991 992 with self.dirstate.parentchange():
992 993 copies = self.dirstate.setparents(p1, p2)
993 994 pctx = self[p1]
994 995 if copies:
995 996 # Adjust copy records, the dirstate cannot do it, it
996 997 # requires access to parents manifests. Preserve them
997 998 # only for entries added to first parent.
998 999 for f in copies:
999 1000 if f not in pctx and copies[f] in pctx:
1000 1001 self.dirstate.copy(copies[f], f)
1001 1002 if p2 == nullid:
1002 1003 for f, s in sorted(self.dirstate.copies().items()):
1003 1004 if f not in pctx and s not in pctx:
1004 1005 self.dirstate.copy(None, f)
1005 1006
1006 1007 def filectx(self, path, changeid=None, fileid=None):
1007 1008 """changeid can be a changeset revision, node, or tag.
1008 1009 fileid can be a file revision or node."""
1009 1010 return context.filectx(self, path, changeid, fileid)
1010 1011
1011 1012 def getcwd(self):
1012 1013 return self.dirstate.getcwd()
1013 1014
1014 1015 def pathto(self, f, cwd=None):
1015 1016 return self.dirstate.pathto(f, cwd)
1016 1017
1017 1018 def _loadfilter(self, filter):
1018 1019 if filter not in self.filterpats:
1019 1020 l = []
1020 1021 for pat, cmd in self.ui.configitems(filter):
1021 1022 if cmd == '!':
1022 1023 continue
1023 1024 mf = matchmod.match(self.root, '', [pat])
1024 1025 fn = None
1025 1026 params = cmd
1026 1027 for name, filterfn in self._datafilters.iteritems():
1027 1028 if cmd.startswith(name):
1028 1029 fn = filterfn
1029 1030 params = cmd[len(name):].lstrip()
1030 1031 break
1031 1032 if not fn:
1032 1033 fn = lambda s, c, **kwargs: util.filter(s, c)
1033 1034 # Wrap old filters not supporting keyword arguments
1034 1035 if not inspect.getargspec(fn)[2]:
1035 1036 oldfn = fn
1036 1037 fn = lambda s, c, **kwargs: oldfn(s, c)
1037 1038 l.append((mf, fn, params))
1038 1039 self.filterpats[filter] = l
1039 1040 return self.filterpats[filter]
1040 1041
1041 1042 def _filter(self, filterpats, filename, data):
1042 1043 for mf, fn, cmd in filterpats:
1043 1044 if mf(filename):
1044 1045 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1045 1046 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1046 1047 break
1047 1048
1048 1049 return data
1049 1050
1050 1051 @unfilteredpropertycache
1051 1052 def _encodefilterpats(self):
1052 1053 return self._loadfilter('encode')
1053 1054
1054 1055 @unfilteredpropertycache
1055 1056 def _decodefilterpats(self):
1056 1057 return self._loadfilter('decode')
1057 1058
1058 1059 def adddatafilter(self, name, filter):
1059 1060 self._datafilters[name] = filter
1060 1061
1061 1062 def wread(self, filename):
1062 1063 if self.wvfs.islink(filename):
1063 1064 data = self.wvfs.readlink(filename)
1064 1065 else:
1065 1066 data = self.wvfs.read(filename)
1066 1067 return self._filter(self._encodefilterpats, filename, data)
1067 1068
1068 1069 def wwrite(self, filename, data, flags, backgroundclose=False):
1069 1070 """write ``data`` into ``filename`` in the working directory
1070 1071
1071 1072 This returns length of written (maybe decoded) data.
1072 1073 """
1073 1074 data = self._filter(self._decodefilterpats, filename, data)
1074 1075 if 'l' in flags:
1075 1076 self.wvfs.symlink(data, filename)
1076 1077 else:
1077 1078 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1078 1079 if 'x' in flags:
1079 1080 self.wvfs.setflags(filename, False, True)
1080 1081 return len(data)
1081 1082
1082 1083 def wwritedata(self, filename, data):
1083 1084 return self._filter(self._decodefilterpats, filename, data)
1084 1085
1085 1086 def currenttransaction(self):
1086 1087 """return the current transaction or None if non exists"""
1087 1088 if self._transref:
1088 1089 tr = self._transref()
1089 1090 else:
1090 1091 tr = None
1091 1092
1092 1093 if tr and tr.running():
1093 1094 return tr
1094 1095 return None
1095 1096
1096 1097 def transaction(self, desc, report=None):
1097 1098 if (self.ui.configbool('devel', 'all-warnings')
1098 1099 or self.ui.configbool('devel', 'check-locks')):
1099 1100 if self._currentlock(self._lockref) is None:
1100 1101 raise error.ProgrammingError('transaction requires locking')
1101 1102 tr = self.currenttransaction()
1102 1103 if tr is not None:
1103 1104 scmutil.registersummarycallback(self, tr, desc)
1104 1105 return tr.nest()
1105 1106
1106 1107 # abort here if the journal already exists
1107 1108 if self.svfs.exists("journal"):
1108 1109 raise error.RepoError(
1109 1110 _("abandoned transaction found"),
1110 1111 hint=_("run 'hg recover' to clean up transaction"))
1111 1112
1112 1113 idbase = "%.40f#%f" % (random.random(), time.time())
1113 1114 ha = hex(hashlib.sha1(idbase).digest())
1114 1115 txnid = 'TXN:' + ha
1115 1116 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1116 1117
1117 1118 self._writejournal(desc)
1118 1119 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1119 1120 if report:
1120 1121 rp = report
1121 1122 else:
1122 1123 rp = self.ui.warn
1123 1124 vfsmap = {'plain': self.vfs} # root of .hg/
1124 1125 # we must avoid cyclic reference between repo and transaction.
1125 1126 reporef = weakref.ref(self)
1126 1127 # Code to track tag movement
1127 1128 #
1128 1129 # Since tags are all handled as file content, it is actually quite hard
1129 1130 # to track these movement from a code perspective. So we fallback to a
1130 1131 # tracking at the repository level. One could envision to track changes
1131 1132 # to the '.hgtags' file through changegroup apply but that fails to
1132 1133 # cope with case where transaction expose new heads without changegroup
1133 1134 # being involved (eg: phase movement).
1134 1135 #
1135 1136 # For now, We gate the feature behind a flag since this likely comes
1136 1137 # with performance impacts. The current code run more often than needed
1137 1138 # and do not use caches as much as it could. The current focus is on
1138 1139 # the behavior of the feature so we disable it by default. The flag
1139 1140 # will be removed when we are happy with the performance impact.
1140 1141 #
1141 1142 # Once this feature is no longer experimental move the following
1142 1143 # documentation to the appropriate help section:
1143 1144 #
1144 1145 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1145 1146 # tags (new or changed or deleted tags). In addition the details of
1146 1147 # these changes are made available in a file at:
1147 1148 # ``REPOROOT/.hg/changes/tags.changes``.
1148 1149 # Make sure you check for HG_TAG_MOVED before reading that file as it
1149 1150 # might exist from a previous transaction even if no tag were touched
1150 1151 # in this one. Changes are recorded in a line base format::
1151 1152 #
1152 1153 # <action> <hex-node> <tag-name>\n
1153 1154 #
1154 1155 # Actions are defined as follow:
1155 1156 # "-R": tag is removed,
1156 1157 # "+A": tag is added,
1157 1158 # "-M": tag is moved (old value),
1158 1159 # "+M": tag is moved (new value),
1159 1160 tracktags = lambda x: None
1160 1161 # experimental config: experimental.hook-track-tags
1161 1162 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1162 1163 if desc != 'strip' and shouldtracktags:
1163 1164 oldheads = self.changelog.headrevs()
1164 1165 def tracktags(tr2):
1165 1166 repo = reporef()
1166 1167 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1167 1168 newheads = repo.changelog.headrevs()
1168 1169 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1169 1170 # notes: we compare lists here.
1170 1171 # As we do it only once buiding set would not be cheaper
1171 1172 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1172 1173 if changes:
1173 1174 tr2.hookargs['tag_moved'] = '1'
1174 1175 with repo.vfs('changes/tags.changes', 'w',
1175 1176 atomictemp=True) as changesfile:
1176 1177 # note: we do not register the file to the transaction
1177 1178 # because we needs it to still exist on the transaction
1178 1179 # is close (for txnclose hooks)
1179 1180 tagsmod.writediff(changesfile, changes)
1180 1181 def validate(tr2):
1181 1182 """will run pre-closing hooks"""
1182 1183 # XXX the transaction API is a bit lacking here so we take a hacky
1183 1184 # path for now
1184 1185 #
1185 1186 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1186 1187 # dict is copied before these run. In addition we needs the data
1187 1188 # available to in memory hooks too.
1188 1189 #
1189 1190 # Moreover, we also need to make sure this runs before txnclose
1190 1191 # hooks and there is no "pending" mechanism that would execute
1191 1192 # logic only if hooks are about to run.
1192 1193 #
1193 1194 # Fixing this limitation of the transaction is also needed to track
1194 1195 # other families of changes (bookmarks, phases, obsolescence).
1195 1196 #
1196 1197 # This will have to be fixed before we remove the experimental
1197 1198 # gating.
1198 1199 tracktags(tr2)
1199 1200 reporef().hook('pretxnclose', throw=True,
1200 1201 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1201 1202 def releasefn(tr, success):
1202 1203 repo = reporef()
1203 1204 if success:
1204 1205 # this should be explicitly invoked here, because
1205 1206 # in-memory changes aren't written out at closing
1206 1207 # transaction, if tr.addfilegenerator (via
1207 1208 # dirstate.write or so) isn't invoked while
1208 1209 # transaction running
1209 1210 repo.dirstate.write(None)
1210 1211 else:
1211 1212 # discard all changes (including ones already written
1212 1213 # out) in this transaction
1213 1214 repo.dirstate.restorebackup(None, 'journal.dirstate')
1214 1215
1215 1216 repo.invalidate(clearfilecache=True)
1216 1217
1217 1218 tr = transaction.transaction(rp, self.svfs, vfsmap,
1218 1219 "journal",
1219 1220 "undo",
1220 1221 aftertrans(renames),
1221 1222 self.store.createmode,
1222 1223 validator=validate,
1223 1224 releasefn=releasefn,
1224 1225 checkambigfiles=_cachedfiles)
1225 1226 tr.changes['revs'] = set()
1226 1227 tr.changes['obsmarkers'] = set()
1227 1228 tr.changes['phases'] = {}
1228 1229 tr.changes['bookmarks'] = {}
1229 1230
1230 1231 tr.hookargs['txnid'] = txnid
1231 1232 # note: writing the fncache only during finalize mean that the file is
1232 1233 # outdated when running hooks. As fncache is used for streaming clone,
1233 1234 # this is not expected to break anything that happen during the hooks.
1234 1235 tr.addfinalize('flush-fncache', self.store.write)
1235 1236 def txnclosehook(tr2):
1236 1237 """To be run if transaction is successful, will schedule a hook run
1237 1238 """
1238 1239 # Don't reference tr2 in hook() so we don't hold a reference.
1239 1240 # This reduces memory consumption when there are multiple
1240 1241 # transactions per lock. This can likely go away if issue5045
1241 1242 # fixes the function accumulation.
1242 1243 hookargs = tr2.hookargs
1243 1244
1244 1245 def hook():
1245 1246 reporef().hook('txnclose', throw=False, txnname=desc,
1246 1247 **pycompat.strkwargs(hookargs))
1247 1248 reporef()._afterlock(hook)
1248 1249 tr.addfinalize('txnclose-hook', txnclosehook)
1249 1250 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1250 1251 def txnaborthook(tr2):
1251 1252 """To be run if transaction is aborted
1252 1253 """
1253 1254 reporef().hook('txnabort', throw=False, txnname=desc,
1254 1255 **tr2.hookargs)
1255 1256 tr.addabort('txnabort-hook', txnaborthook)
1256 1257 # avoid eager cache invalidation. in-memory data should be identical
1257 1258 # to stored data if transaction has no error.
1258 1259 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1259 1260 self._transref = weakref.ref(tr)
1260 1261 scmutil.registersummarycallback(self, tr, desc)
1261 1262 return tr
1262 1263
1263 1264 def _journalfiles(self):
1264 1265 return ((self.svfs, 'journal'),
1265 1266 (self.vfs, 'journal.dirstate'),
1266 1267 (self.vfs, 'journal.branch'),
1267 1268 (self.vfs, 'journal.desc'),
1268 1269 (self.vfs, 'journal.bookmarks'),
1269 1270 (self.svfs, 'journal.phaseroots'))
1270 1271
1271 1272 def undofiles(self):
1272 1273 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1273 1274
1274 1275 @unfilteredmethod
1275 1276 def _writejournal(self, desc):
1276 1277 self.dirstate.savebackup(None, 'journal.dirstate')
1277 1278 self.vfs.write("journal.branch",
1278 1279 encoding.fromlocal(self.dirstate.branch()))
1279 1280 self.vfs.write("journal.desc",
1280 1281 "%d\n%s\n" % (len(self), desc))
1281 1282 self.vfs.write("journal.bookmarks",
1282 1283 self.vfs.tryread("bookmarks"))
1283 1284 self.svfs.write("journal.phaseroots",
1284 1285 self.svfs.tryread("phaseroots"))
1285 1286
1286 1287 def recover(self):
1287 1288 with self.lock():
1288 1289 if self.svfs.exists("journal"):
1289 1290 self.ui.status(_("rolling back interrupted transaction\n"))
1290 1291 vfsmap = {'': self.svfs,
1291 1292 'plain': self.vfs,}
1292 1293 transaction.rollback(self.svfs, vfsmap, "journal",
1293 1294 self.ui.warn,
1294 1295 checkambigfiles=_cachedfiles)
1295 1296 self.invalidate()
1296 1297 return True
1297 1298 else:
1298 1299 self.ui.warn(_("no interrupted transaction available\n"))
1299 1300 return False
1300 1301
1301 1302 def rollback(self, dryrun=False, force=False):
1302 1303 wlock = lock = dsguard = None
1303 1304 try:
1304 1305 wlock = self.wlock()
1305 1306 lock = self.lock()
1306 1307 if self.svfs.exists("undo"):
1307 1308 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1308 1309
1309 1310 return self._rollback(dryrun, force, dsguard)
1310 1311 else:
1311 1312 self.ui.warn(_("no rollback information available\n"))
1312 1313 return 1
1313 1314 finally:
1314 1315 release(dsguard, lock, wlock)
1315 1316
1316 1317 @unfilteredmethod # Until we get smarter cache management
1317 1318 def _rollback(self, dryrun, force, dsguard):
1318 1319 ui = self.ui
1319 1320 try:
1320 1321 args = self.vfs.read('undo.desc').splitlines()
1321 1322 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1322 1323 if len(args) >= 3:
1323 1324 detail = args[2]
1324 1325 oldtip = oldlen - 1
1325 1326
1326 1327 if detail and ui.verbose:
1327 1328 msg = (_('repository tip rolled back to revision %d'
1328 1329 ' (undo %s: %s)\n')
1329 1330 % (oldtip, desc, detail))
1330 1331 else:
1331 1332 msg = (_('repository tip rolled back to revision %d'
1332 1333 ' (undo %s)\n')
1333 1334 % (oldtip, desc))
1334 1335 except IOError:
1335 1336 msg = _('rolling back unknown transaction\n')
1336 1337 desc = None
1337 1338
1338 1339 if not force and self['.'] != self['tip'] and desc == 'commit':
1339 1340 raise error.Abort(
1340 1341 _('rollback of last commit while not checked out '
1341 1342 'may lose data'), hint=_('use -f to force'))
1342 1343
1343 1344 ui.status(msg)
1344 1345 if dryrun:
1345 1346 return 0
1346 1347
1347 1348 parents = self.dirstate.parents()
1348 1349 self.destroying()
1349 1350 vfsmap = {'plain': self.vfs, '': self.svfs}
1350 1351 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1351 1352 checkambigfiles=_cachedfiles)
1352 1353 if self.vfs.exists('undo.bookmarks'):
1353 1354 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1354 1355 if self.svfs.exists('undo.phaseroots'):
1355 1356 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1356 1357 self.invalidate()
1357 1358
1358 1359 parentgone = (parents[0] not in self.changelog.nodemap or
1359 1360 parents[1] not in self.changelog.nodemap)
1360 1361 if parentgone:
1361 1362 # prevent dirstateguard from overwriting already restored one
1362 1363 dsguard.close()
1363 1364
1364 1365 self.dirstate.restorebackup(None, 'undo.dirstate')
1365 1366 try:
1366 1367 branch = self.vfs.read('undo.branch')
1367 1368 self.dirstate.setbranch(encoding.tolocal(branch))
1368 1369 except IOError:
1369 1370 ui.warn(_('named branch could not be reset: '
1370 1371 'current branch is still \'%s\'\n')
1371 1372 % self.dirstate.branch())
1372 1373
1373 1374 parents = tuple([p.rev() for p in self[None].parents()])
1374 1375 if len(parents) > 1:
1375 1376 ui.status(_('working directory now based on '
1376 1377 'revisions %d and %d\n') % parents)
1377 1378 else:
1378 1379 ui.status(_('working directory now based on '
1379 1380 'revision %d\n') % parents)
1380 1381 mergemod.mergestate.clean(self, self['.'].node())
1381 1382
1382 1383 # TODO: if we know which new heads may result from this rollback, pass
1383 1384 # them to destroy(), which will prevent the branchhead cache from being
1384 1385 # invalidated.
1385 1386 self.destroyed()
1386 1387 return 0
1387 1388
1388 1389 def _buildcacheupdater(self, newtransaction):
1389 1390 """called during transaction to build the callback updating cache
1390 1391
1391 1392 Lives on the repository to help extension who might want to augment
1392 1393 this logic. For this purpose, the created transaction is passed to the
1393 1394 method.
1394 1395 """
1395 1396 # we must avoid cyclic reference between repo and transaction.
1396 1397 reporef = weakref.ref(self)
1397 1398 def updater(tr):
1398 1399 repo = reporef()
1399 1400 repo.updatecaches(tr)
1400 1401 return updater
1401 1402
1402 1403 @unfilteredmethod
1403 1404 def updatecaches(self, tr=None):
1404 1405 """warm appropriate caches
1405 1406
1406 1407 If this function is called after a transaction closed. The transaction
1407 1408 will be available in the 'tr' argument. This can be used to selectively
1408 1409 update caches relevant to the changes in that transaction.
1409 1410 """
1410 1411 if tr is not None and tr.hookargs.get('source') == 'strip':
1411 1412 # During strip, many caches are invalid but
1412 1413 # later call to `destroyed` will refresh them.
1413 1414 return
1414 1415
1415 1416 if tr is None or tr.changes['revs']:
1416 1417 # updating the unfiltered branchmap should refresh all the others,
1417 1418 self.ui.debug('updating the branch cache\n')
1418 1419 branchmap.updatecache(self.filtered('served'))
1419 1420
1420 1421 def invalidatecaches(self):
1421 1422
1422 1423 if '_tagscache' in vars(self):
1423 1424 # can't use delattr on proxy
1424 1425 del self.__dict__['_tagscache']
1425 1426
1426 1427 self.unfiltered()._branchcaches.clear()
1427 1428 self.invalidatevolatilesets()
1428 1429 self._sparsesignaturecache.clear()
1429 1430
1430 1431 def invalidatevolatilesets(self):
1431 1432 self.filteredrevcache.clear()
1432 1433 obsolete.clearobscaches(self)
1433 1434
1434 1435 def invalidatedirstate(self):
1435 1436 '''Invalidates the dirstate, causing the next call to dirstate
1436 1437 to check if it was modified since the last time it was read,
1437 1438 rereading it if it has.
1438 1439
1439 1440 This is different to dirstate.invalidate() that it doesn't always
1440 1441 rereads the dirstate. Use dirstate.invalidate() if you want to
1441 1442 explicitly read the dirstate again (i.e. restoring it to a previous
1442 1443 known good state).'''
1443 1444 if hasunfilteredcache(self, 'dirstate'):
1444 1445 for k in self.dirstate._filecache:
1445 1446 try:
1446 1447 delattr(self.dirstate, k)
1447 1448 except AttributeError:
1448 1449 pass
1449 1450 delattr(self.unfiltered(), 'dirstate')
1450 1451
1451 1452 def invalidate(self, clearfilecache=False):
1452 1453 '''Invalidates both store and non-store parts other than dirstate
1453 1454
1454 1455 If a transaction is running, invalidation of store is omitted,
1455 1456 because discarding in-memory changes might cause inconsistency
1456 1457 (e.g. incomplete fncache causes unintentional failure, but
1457 1458 redundant one doesn't).
1458 1459 '''
1459 1460 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1460 1461 for k in list(self._filecache.keys()):
1461 1462 # dirstate is invalidated separately in invalidatedirstate()
1462 1463 if k == 'dirstate':
1463 1464 continue
1464 1465 if (k == 'changelog' and
1465 1466 self.currenttransaction() and
1466 1467 self.changelog._delayed):
1467 1468 # The changelog object may store unwritten revisions. We don't
1468 1469 # want to lose them.
1469 1470 # TODO: Solve the problem instead of working around it.
1470 1471 continue
1471 1472
1472 1473 if clearfilecache:
1473 1474 del self._filecache[k]
1474 1475 try:
1475 1476 delattr(unfiltered, k)
1476 1477 except AttributeError:
1477 1478 pass
1478 1479 self.invalidatecaches()
1479 1480 if not self.currenttransaction():
1480 1481 # TODO: Changing contents of store outside transaction
1481 1482 # causes inconsistency. We should make in-memory store
1482 1483 # changes detectable, and abort if changed.
1483 1484 self.store.invalidatecaches()
1484 1485
1485 1486 def invalidateall(self):
1486 1487 '''Fully invalidates both store and non-store parts, causing the
1487 1488 subsequent operation to reread any outside changes.'''
1488 1489 # extension should hook this to invalidate its caches
1489 1490 self.invalidate()
1490 1491 self.invalidatedirstate()
1491 1492
1492 1493 @unfilteredmethod
1493 1494 def _refreshfilecachestats(self, tr):
1494 1495 """Reload stats of cached files so that they are flagged as valid"""
1495 1496 for k, ce in self._filecache.items():
1496 1497 if k == 'dirstate' or k not in self.__dict__:
1497 1498 continue
1498 1499 ce.refresh()
1499 1500
1500 1501 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1501 1502 inheritchecker=None, parentenvvar=None):
1502 1503 parentlock = None
1503 1504 # the contents of parentenvvar are used by the underlying lock to
1504 1505 # determine whether it can be inherited
1505 1506 if parentenvvar is not None:
1506 1507 parentlock = encoding.environ.get(parentenvvar)
1507 1508 try:
1508 1509 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1509 1510 acquirefn=acquirefn, desc=desc,
1510 1511 inheritchecker=inheritchecker,
1511 1512 parentlock=parentlock)
1512 1513 except error.LockHeld as inst:
1513 1514 if not wait:
1514 1515 raise
1515 1516 # show more details for new-style locks
1516 1517 if ':' in inst.locker:
1517 1518 host, pid = inst.locker.split(":", 1)
1518 1519 self.ui.warn(
1519 1520 _("waiting for lock on %s held by process %r "
1520 1521 "on host %r\n") % (desc, pid, host))
1521 1522 else:
1522 1523 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1523 1524 (desc, inst.locker))
1524 1525 # default to 600 seconds timeout
1525 1526 l = lockmod.lock(vfs, lockname,
1526 1527 int(self.ui.config("ui", "timeout")),
1527 1528 releasefn=releasefn, acquirefn=acquirefn,
1528 1529 desc=desc)
1529 1530 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1530 1531 return l
1531 1532
1532 1533 def _afterlock(self, callback):
1533 1534 """add a callback to be run when the repository is fully unlocked
1534 1535
1535 1536 The callback will be executed when the outermost lock is released
1536 1537 (with wlock being higher level than 'lock')."""
1537 1538 for ref in (self._wlockref, self._lockref):
1538 1539 l = ref and ref()
1539 1540 if l and l.held:
1540 1541 l.postrelease.append(callback)
1541 1542 break
1542 1543 else: # no lock have been found.
1543 1544 callback()
1544 1545
1545 1546 def lock(self, wait=True):
1546 1547 '''Lock the repository store (.hg/store) and return a weak reference
1547 1548 to the lock. Use this before modifying the store (e.g. committing or
1548 1549 stripping). If you are opening a transaction, get a lock as well.)
1549 1550
1550 1551 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1551 1552 'wlock' first to avoid a dead-lock hazard.'''
1552 1553 l = self._currentlock(self._lockref)
1553 1554 if l is not None:
1554 1555 l.lock()
1555 1556 return l
1556 1557
1557 1558 l = self._lock(self.svfs, "lock", wait, None,
1558 1559 self.invalidate, _('repository %s') % self.origroot)
1559 1560 self._lockref = weakref.ref(l)
1560 1561 return l
1561 1562
1562 1563 def _wlockchecktransaction(self):
1563 1564 if self.currenttransaction() is not None:
1564 1565 raise error.LockInheritanceContractViolation(
1565 1566 'wlock cannot be inherited in the middle of a transaction')
1566 1567
1567 1568 def wlock(self, wait=True):
1568 1569 '''Lock the non-store parts of the repository (everything under
1569 1570 .hg except .hg/store) and return a weak reference to the lock.
1570 1571
1571 1572 Use this before modifying files in .hg.
1572 1573
1573 1574 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1574 1575 'wlock' first to avoid a dead-lock hazard.'''
1575 1576 l = self._wlockref and self._wlockref()
1576 1577 if l is not None and l.held:
1577 1578 l.lock()
1578 1579 return l
1579 1580
1580 1581 # We do not need to check for non-waiting lock acquisition. Such
1581 1582 # acquisition would not cause dead-lock as they would just fail.
1582 1583 if wait and (self.ui.configbool('devel', 'all-warnings')
1583 1584 or self.ui.configbool('devel', 'check-locks')):
1584 1585 if self._currentlock(self._lockref) is not None:
1585 1586 self.ui.develwarn('"wlock" acquired after "lock"')
1586 1587
1587 1588 def unlock():
1588 1589 if self.dirstate.pendingparentchange():
1589 1590 self.dirstate.invalidate()
1590 1591 else:
1591 1592 self.dirstate.write(None)
1592 1593
1593 1594 self._filecache['dirstate'].refresh()
1594 1595
1595 1596 l = self._lock(self.vfs, "wlock", wait, unlock,
1596 1597 self.invalidatedirstate, _('working directory of %s') %
1597 1598 self.origroot,
1598 1599 inheritchecker=self._wlockchecktransaction,
1599 1600 parentenvvar='HG_WLOCK_LOCKER')
1600 1601 self._wlockref = weakref.ref(l)
1601 1602 return l
1602 1603
1603 1604 def _currentlock(self, lockref):
1604 1605 """Returns the lock if it's held, or None if it's not."""
1605 1606 if lockref is None:
1606 1607 return None
1607 1608 l = lockref()
1608 1609 if l is None or not l.held:
1609 1610 return None
1610 1611 return l
1611 1612
1612 1613 def currentwlock(self):
1613 1614 """Returns the wlock if it's held, or None if it's not."""
1614 1615 return self._currentlock(self._wlockref)
1615 1616
1616 1617 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1617 1618 """
1618 1619 commit an individual file as part of a larger transaction
1619 1620 """
1620 1621
1621 1622 fname = fctx.path()
1622 1623 fparent1 = manifest1.get(fname, nullid)
1623 1624 fparent2 = manifest2.get(fname, nullid)
1624 1625 if isinstance(fctx, context.filectx):
1625 1626 node = fctx.filenode()
1626 1627 if node in [fparent1, fparent2]:
1627 1628 self.ui.debug('reusing %s filelog entry\n' % fname)
1628 1629 if manifest1.flags(fname) != fctx.flags():
1629 1630 changelist.append(fname)
1630 1631 return node
1631 1632
1632 1633 flog = self.file(fname)
1633 1634 meta = {}
1634 1635 copy = fctx.renamed()
1635 1636 if copy and copy[0] != fname:
1636 1637 # Mark the new revision of this file as a copy of another
1637 1638 # file. This copy data will effectively act as a parent
1638 1639 # of this new revision. If this is a merge, the first
1639 1640 # parent will be the nullid (meaning "look up the copy data")
1640 1641 # and the second one will be the other parent. For example:
1641 1642 #
1642 1643 # 0 --- 1 --- 3 rev1 changes file foo
1643 1644 # \ / rev2 renames foo to bar and changes it
1644 1645 # \- 2 -/ rev3 should have bar with all changes and
1645 1646 # should record that bar descends from
1646 1647 # bar in rev2 and foo in rev1
1647 1648 #
1648 1649 # this allows this merge to succeed:
1649 1650 #
1650 1651 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1651 1652 # \ / merging rev3 and rev4 should use bar@rev2
1652 1653 # \- 2 --- 4 as the merge base
1653 1654 #
1654 1655
1655 1656 cfname = copy[0]
1656 1657 crev = manifest1.get(cfname)
1657 1658 newfparent = fparent2
1658 1659
1659 1660 if manifest2: # branch merge
1660 1661 if fparent2 == nullid or crev is None: # copied on remote side
1661 1662 if cfname in manifest2:
1662 1663 crev = manifest2[cfname]
1663 1664 newfparent = fparent1
1664 1665
1665 1666 # Here, we used to search backwards through history to try to find
1666 1667 # where the file copy came from if the source of a copy was not in
1667 1668 # the parent directory. However, this doesn't actually make sense to
1668 1669 # do (what does a copy from something not in your working copy even
1669 1670 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1670 1671 # the user that copy information was dropped, so if they didn't
1671 1672 # expect this outcome it can be fixed, but this is the correct
1672 1673 # behavior in this circumstance.
1673 1674
1674 1675 if crev:
1675 1676 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1676 1677 meta["copy"] = cfname
1677 1678 meta["copyrev"] = hex(crev)
1678 1679 fparent1, fparent2 = nullid, newfparent
1679 1680 else:
1680 1681 self.ui.warn(_("warning: can't find ancestor for '%s' "
1681 1682 "copied from '%s'!\n") % (fname, cfname))
1682 1683
1683 1684 elif fparent1 == nullid:
1684 1685 fparent1, fparent2 = fparent2, nullid
1685 1686 elif fparent2 != nullid:
1686 1687 # is one parent an ancestor of the other?
1687 1688 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1688 1689 if fparent1 in fparentancestors:
1689 1690 fparent1, fparent2 = fparent2, nullid
1690 1691 elif fparent2 in fparentancestors:
1691 1692 fparent2 = nullid
1692 1693
1693 1694 # is the file changed?
1694 1695 text = fctx.data()
1695 1696 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1696 1697 changelist.append(fname)
1697 1698 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1698 1699 # are just the flags changed during merge?
1699 1700 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1700 1701 changelist.append(fname)
1701 1702
1702 1703 return fparent1
1703 1704
1704 1705 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1705 1706 """check for commit arguments that aren't committable"""
1706 1707 if match.isexact() or match.prefix():
1707 1708 matched = set(status.modified + status.added + status.removed)
1708 1709
1709 1710 for f in match.files():
1710 1711 f = self.dirstate.normalize(f)
1711 1712 if f == '.' or f in matched or f in wctx.substate:
1712 1713 continue
1713 1714 if f in status.deleted:
1714 1715 fail(f, _('file not found!'))
1715 1716 if f in vdirs: # visited directory
1716 1717 d = f + '/'
1717 1718 for mf in matched:
1718 1719 if mf.startswith(d):
1719 1720 break
1720 1721 else:
1721 1722 fail(f, _("no match under directory!"))
1722 1723 elif f not in self.dirstate:
1723 1724 fail(f, _("file not tracked!"))
1724 1725
1725 1726 @unfilteredmethod
1726 1727 def commit(self, text="", user=None, date=None, match=None, force=False,
1727 1728 editor=False, extra=None):
1728 1729 """Add a new revision to current repository.
1729 1730
1730 1731 Revision information is gathered from the working directory,
1731 1732 match can be used to filter the committed files. If editor is
1732 1733 supplied, it is called to get a commit message.
1733 1734 """
1734 1735 if extra is None:
1735 1736 extra = {}
1736 1737
1737 1738 def fail(f, msg):
1738 1739 raise error.Abort('%s: %s' % (f, msg))
1739 1740
1740 1741 if not match:
1741 1742 match = matchmod.always(self.root, '')
1742 1743
1743 1744 if not force:
1744 1745 vdirs = []
1745 1746 match.explicitdir = vdirs.append
1746 1747 match.bad = fail
1747 1748
1748 1749 wlock = lock = tr = None
1749 1750 try:
1750 1751 wlock = self.wlock()
1751 1752 lock = self.lock() # for recent changelog (see issue4368)
1752 1753
1753 1754 wctx = self[None]
1754 1755 merge = len(wctx.parents()) > 1
1755 1756
1756 1757 if not force and merge and not match.always():
1757 1758 raise error.Abort(_('cannot partially commit a merge '
1758 1759 '(do not specify files or patterns)'))
1759 1760
1760 1761 status = self.status(match=match, clean=force)
1761 1762 if force:
1762 1763 status.modified.extend(status.clean) # mq may commit clean files
1763 1764
1764 1765 # check subrepos
1765 1766 subs = []
1766 1767 commitsubs = set()
1767 1768 newstate = wctx.substate.copy()
1768 1769 # only manage subrepos and .hgsubstate if .hgsub is present
1769 1770 if '.hgsub' in wctx:
1770 1771 # we'll decide whether to track this ourselves, thanks
1771 1772 for c in status.modified, status.added, status.removed:
1772 1773 if '.hgsubstate' in c:
1773 1774 c.remove('.hgsubstate')
1774 1775
1775 1776 # compare current state to last committed state
1776 1777 # build new substate based on last committed state
1777 1778 oldstate = wctx.p1().substate
1778 1779 for s in sorted(newstate.keys()):
1779 1780 if not match(s):
1780 1781 # ignore working copy, use old state if present
1781 1782 if s in oldstate:
1782 1783 newstate[s] = oldstate[s]
1783 1784 continue
1784 1785 if not force:
1785 1786 raise error.Abort(
1786 1787 _("commit with new subrepo %s excluded") % s)
1787 1788 dirtyreason = wctx.sub(s).dirtyreason(True)
1788 1789 if dirtyreason:
1789 1790 if not self.ui.configbool('ui', 'commitsubrepos'):
1790 1791 raise error.Abort(dirtyreason,
1791 1792 hint=_("use --subrepos for recursive commit"))
1792 1793 subs.append(s)
1793 1794 commitsubs.add(s)
1794 1795 else:
1795 1796 bs = wctx.sub(s).basestate()
1796 1797 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1797 1798 if oldstate.get(s, (None, None, None))[1] != bs:
1798 1799 subs.append(s)
1799 1800
1800 1801 # check for removed subrepos
1801 1802 for p in wctx.parents():
1802 1803 r = [s for s in p.substate if s not in newstate]
1803 1804 subs += [s for s in r if match(s)]
1804 1805 if subs:
1805 1806 if (not match('.hgsub') and
1806 1807 '.hgsub' in (wctx.modified() + wctx.added())):
1807 1808 raise error.Abort(
1808 1809 _("can't commit subrepos without .hgsub"))
1809 1810 status.modified.insert(0, '.hgsubstate')
1810 1811
1811 1812 elif '.hgsub' in status.removed:
1812 1813 # clean up .hgsubstate when .hgsub is removed
1813 1814 if ('.hgsubstate' in wctx and
1814 1815 '.hgsubstate' not in (status.modified + status.added +
1815 1816 status.removed)):
1816 1817 status.removed.insert(0, '.hgsubstate')
1817 1818
1818 1819 # make sure all explicit patterns are matched
1819 1820 if not force:
1820 1821 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1821 1822
1822 1823 cctx = context.workingcommitctx(self, status,
1823 1824 text, user, date, extra)
1824 1825
1825 1826 # internal config: ui.allowemptycommit
1826 1827 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1827 1828 or extra.get('close') or merge or cctx.files()
1828 1829 or self.ui.configbool('ui', 'allowemptycommit'))
1829 1830 if not allowemptycommit:
1830 1831 return None
1831 1832
1832 1833 if merge and cctx.deleted():
1833 1834 raise error.Abort(_("cannot commit merge with missing files"))
1834 1835
1835 1836 ms = mergemod.mergestate.read(self)
1836 1837 mergeutil.checkunresolved(ms)
1837 1838
1838 1839 if editor:
1839 1840 cctx._text = editor(self, cctx, subs)
1840 1841 edited = (text != cctx._text)
1841 1842
1842 1843 # Save commit message in case this transaction gets rolled back
1843 1844 # (e.g. by a pretxncommit hook). Leave the content alone on
1844 1845 # the assumption that the user will use the same editor again.
1845 1846 msgfn = self.savecommitmessage(cctx._text)
1846 1847
1847 1848 # commit subs and write new state
1848 1849 if subs:
1849 1850 for s in sorted(commitsubs):
1850 1851 sub = wctx.sub(s)
1851 1852 self.ui.status(_('committing subrepository %s\n') %
1852 1853 subrepo.subrelpath(sub))
1853 1854 sr = sub.commit(cctx._text, user, date)
1854 1855 newstate[s] = (newstate[s][0], sr)
1855 1856 subrepo.writestate(self, newstate)
1856 1857
1857 1858 p1, p2 = self.dirstate.parents()
1858 1859 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1859 1860 try:
1860 1861 self.hook("precommit", throw=True, parent1=hookp1,
1861 1862 parent2=hookp2)
1862 1863 tr = self.transaction('commit')
1863 1864 ret = self.commitctx(cctx, True)
1864 1865 except: # re-raises
1865 1866 if edited:
1866 1867 self.ui.write(
1867 1868 _('note: commit message saved in %s\n') % msgfn)
1868 1869 raise
1869 1870 # update bookmarks, dirstate and mergestate
1870 1871 bookmarks.update(self, [p1, p2], ret)
1871 1872 cctx.markcommitted(ret)
1872 1873 ms.reset()
1873 1874 tr.close()
1874 1875
1875 1876 finally:
1876 1877 lockmod.release(tr, lock, wlock)
1877 1878
1878 1879 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1879 1880 # hack for command that use a temporary commit (eg: histedit)
1880 1881 # temporary commit got stripped before hook release
1881 1882 if self.changelog.hasnode(ret):
1882 1883 self.hook("commit", node=node, parent1=parent1,
1883 1884 parent2=parent2)
1884 1885 self._afterlock(commithook)
1885 1886 return ret
1886 1887
1887 1888 @unfilteredmethod
1888 1889 def commitctx(self, ctx, error=False):
1889 1890 """Add a new revision to current repository.
1890 1891 Revision information is passed via the context argument.
1891 1892 """
1892 1893
1893 1894 tr = None
1894 1895 p1, p2 = ctx.p1(), ctx.p2()
1895 1896 user = ctx.user()
1896 1897
1897 1898 lock = self.lock()
1898 1899 try:
1899 1900 tr = self.transaction("commit")
1900 1901 trp = weakref.proxy(tr)
1901 1902
1902 1903 if ctx.manifestnode():
1903 1904 # reuse an existing manifest revision
1904 1905 mn = ctx.manifestnode()
1905 1906 files = ctx.files()
1906 1907 elif ctx.files():
1907 1908 m1ctx = p1.manifestctx()
1908 1909 m2ctx = p2.manifestctx()
1909 1910 mctx = m1ctx.copy()
1910 1911
1911 1912 m = mctx.read()
1912 1913 m1 = m1ctx.read()
1913 1914 m2 = m2ctx.read()
1914 1915
1915 1916 # check in files
1916 1917 added = []
1917 1918 changed = []
1918 1919 removed = list(ctx.removed())
1919 1920 linkrev = len(self)
1920 1921 self.ui.note(_("committing files:\n"))
1921 1922 for f in sorted(ctx.modified() + ctx.added()):
1922 1923 self.ui.note(f + "\n")
1923 1924 try:
1924 1925 fctx = ctx[f]
1925 1926 if fctx is None:
1926 1927 removed.append(f)
1927 1928 else:
1928 1929 added.append(f)
1929 1930 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1930 1931 trp, changed)
1931 1932 m.setflag(f, fctx.flags())
1932 1933 except OSError as inst:
1933 1934 self.ui.warn(_("trouble committing %s!\n") % f)
1934 1935 raise
1935 1936 except IOError as inst:
1936 1937 errcode = getattr(inst, 'errno', errno.ENOENT)
1937 1938 if error or errcode and errcode != errno.ENOENT:
1938 1939 self.ui.warn(_("trouble committing %s!\n") % f)
1939 1940 raise
1940 1941
1941 1942 # update manifest
1942 1943 self.ui.note(_("committing manifest\n"))
1943 1944 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1944 1945 drop = [f for f in removed if f in m]
1945 1946 for f in drop:
1946 1947 del m[f]
1947 1948 mn = mctx.write(trp, linkrev,
1948 1949 p1.manifestnode(), p2.manifestnode(),
1949 1950 added, drop)
1950 1951 files = changed + removed
1951 1952 else:
1952 1953 mn = p1.manifestnode()
1953 1954 files = []
1954 1955
1955 1956 # update changelog
1956 1957 self.ui.note(_("committing changelog\n"))
1957 1958 self.changelog.delayupdate(tr)
1958 1959 n = self.changelog.add(mn, files, ctx.description(),
1959 1960 trp, p1.node(), p2.node(),
1960 1961 user, ctx.date(), ctx.extra().copy())
1961 1962 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1962 1963 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1963 1964 parent2=xp2)
1964 1965 # set the new commit is proper phase
1965 1966 targetphase = subrepo.newcommitphase(self.ui, ctx)
1966 1967 if targetphase:
1967 1968 # retract boundary do not alter parent changeset.
1968 1969 # if a parent have higher the resulting phase will
1969 1970 # be compliant anyway
1970 1971 #
1971 1972 # if minimal phase was 0 we don't need to retract anything
1972 1973 phases.registernew(self, tr, targetphase, [n])
1973 1974 tr.close()
1974 1975 return n
1975 1976 finally:
1976 1977 if tr:
1977 1978 tr.release()
1978 1979 lock.release()
1979 1980
1980 1981 @unfilteredmethod
1981 1982 def destroying(self):
1982 1983 '''Inform the repository that nodes are about to be destroyed.
1983 1984 Intended for use by strip and rollback, so there's a common
1984 1985 place for anything that has to be done before destroying history.
1985 1986
1986 1987 This is mostly useful for saving state that is in memory and waiting
1987 1988 to be flushed when the current lock is released. Because a call to
1988 1989 destroyed is imminent, the repo will be invalidated causing those
1989 1990 changes to stay in memory (waiting for the next unlock), or vanish
1990 1991 completely.
1991 1992 '''
1992 1993 # When using the same lock to commit and strip, the phasecache is left
1993 1994 # dirty after committing. Then when we strip, the repo is invalidated,
1994 1995 # causing those changes to disappear.
1995 1996 if '_phasecache' in vars(self):
1996 1997 self._phasecache.write()
1997 1998
1998 1999 @unfilteredmethod
1999 2000 def destroyed(self):
2000 2001 '''Inform the repository that nodes have been destroyed.
2001 2002 Intended for use by strip and rollback, so there's a common
2002 2003 place for anything that has to be done after destroying history.
2003 2004 '''
2004 2005 # When one tries to:
2005 2006 # 1) destroy nodes thus calling this method (e.g. strip)
2006 2007 # 2) use phasecache somewhere (e.g. commit)
2007 2008 #
2008 2009 # then 2) will fail because the phasecache contains nodes that were
2009 2010 # removed. We can either remove phasecache from the filecache,
2010 2011 # causing it to reload next time it is accessed, or simply filter
2011 2012 # the removed nodes now and write the updated cache.
2012 2013 self._phasecache.filterunknown(self)
2013 2014 self._phasecache.write()
2014 2015
2015 2016 # refresh all repository caches
2016 2017 self.updatecaches()
2017 2018
2018 2019 # Ensure the persistent tag cache is updated. Doing it now
2019 2020 # means that the tag cache only has to worry about destroyed
2020 2021 # heads immediately after a strip/rollback. That in turn
2021 2022 # guarantees that "cachetip == currenttip" (comparing both rev
2022 2023 # and node) always means no nodes have been added or destroyed.
2023 2024
2024 2025 # XXX this is suboptimal when qrefresh'ing: we strip the current
2025 2026 # head, refresh the tag cache, then immediately add a new head.
2026 2027 # But I think doing it this way is necessary for the "instant
2027 2028 # tag cache retrieval" case to work.
2028 2029 self.invalidate()
2029 2030
2030 2031 def walk(self, match, node=None):
2031 2032 '''
2032 2033 walk recursively through the directory tree or a given
2033 2034 changeset, finding all files matched by the match
2034 2035 function
2035 2036 '''
2036 2037 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2037 2038 return self[node].walk(match)
2038 2039
2039 2040 def status(self, node1='.', node2=None, match=None,
2040 2041 ignored=False, clean=False, unknown=False,
2041 2042 listsubrepos=False):
2042 2043 '''a convenience method that calls node1.status(node2)'''
2043 2044 return self[node1].status(node2, match, ignored, clean, unknown,
2044 2045 listsubrepos)
2045 2046
2046 2047 def addpostdsstatus(self, ps):
2047 2048 """Add a callback to run within the wlock, at the point at which status
2048 2049 fixups happen.
2049 2050
2050 2051 On status completion, callback(wctx, status) will be called with the
2051 2052 wlock held, unless the dirstate has changed from underneath or the wlock
2052 2053 couldn't be grabbed.
2053 2054
2054 2055 Callbacks should not capture and use a cached copy of the dirstate --
2055 2056 it might change in the meanwhile. Instead, they should access the
2056 2057 dirstate via wctx.repo().dirstate.
2057 2058
2058 2059 This list is emptied out after each status run -- extensions should
2059 2060 make sure it adds to this list each time dirstate.status is called.
2060 2061 Extensions should also make sure they don't call this for statuses
2061 2062 that don't involve the dirstate.
2062 2063 """
2063 2064
2064 2065 # The list is located here for uniqueness reasons -- it is actually
2065 2066 # managed by the workingctx, but that isn't unique per-repo.
2066 2067 self._postdsstatus.append(ps)
2067 2068
2068 2069 def postdsstatus(self):
2069 2070 """Used by workingctx to get the list of post-dirstate-status hooks."""
2070 2071 return self._postdsstatus
2071 2072
2072 2073 def clearpostdsstatus(self):
2073 2074 """Used by workingctx to clear post-dirstate-status hooks."""
2074 2075 del self._postdsstatus[:]
2075 2076
2076 2077 def heads(self, start=None):
2077 2078 if start is None:
2078 2079 cl = self.changelog
2079 2080 headrevs = reversed(cl.headrevs())
2080 2081 return [cl.node(rev) for rev in headrevs]
2081 2082
2082 2083 heads = self.changelog.heads(start)
2083 2084 # sort the output in rev descending order
2084 2085 return sorted(heads, key=self.changelog.rev, reverse=True)
2085 2086
2086 2087 def branchheads(self, branch=None, start=None, closed=False):
2087 2088 '''return a (possibly filtered) list of heads for the given branch
2088 2089
2089 2090 Heads are returned in topological order, from newest to oldest.
2090 2091 If branch is None, use the dirstate branch.
2091 2092 If start is not None, return only heads reachable from start.
2092 2093 If closed is True, return heads that are marked as closed as well.
2093 2094 '''
2094 2095 if branch is None:
2095 2096 branch = self[None].branch()
2096 2097 branches = self.branchmap()
2097 2098 if branch not in branches:
2098 2099 return []
2099 2100 # the cache returns heads ordered lowest to highest
2100 2101 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2101 2102 if start is not None:
2102 2103 # filter out the heads that cannot be reached from startrev
2103 2104 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2104 2105 bheads = [h for h in bheads if h in fbheads]
2105 2106 return bheads
2106 2107
2107 2108 def branches(self, nodes):
2108 2109 if not nodes:
2109 2110 nodes = [self.changelog.tip()]
2110 2111 b = []
2111 2112 for n in nodes:
2112 2113 t = n
2113 2114 while True:
2114 2115 p = self.changelog.parents(n)
2115 2116 if p[1] != nullid or p[0] == nullid:
2116 2117 b.append((t, n, p[0], p[1]))
2117 2118 break
2118 2119 n = p[0]
2119 2120 return b
2120 2121
2121 2122 def between(self, pairs):
2122 2123 r = []
2123 2124
2124 2125 for top, bottom in pairs:
2125 2126 n, l, i = top, [], 0
2126 2127 f = 1
2127 2128
2128 2129 while n != bottom and n != nullid:
2129 2130 p = self.changelog.parents(n)[0]
2130 2131 if i == f:
2131 2132 l.append(n)
2132 2133 f = f * 2
2133 2134 n = p
2134 2135 i += 1
2135 2136
2136 2137 r.append(l)
2137 2138
2138 2139 return r
2139 2140
2140 2141 def checkpush(self, pushop):
2141 2142 """Extensions can override this function if additional checks have
2142 2143 to be performed before pushing, or call it if they override push
2143 2144 command.
2144 2145 """
2145 2146 pass
2146 2147
2147 2148 @unfilteredpropertycache
2148 2149 def prepushoutgoinghooks(self):
2149 2150 """Return util.hooks consists of a pushop with repo, remote, outgoing
2150 2151 methods, which are called before pushing changesets.
2151 2152 """
2152 2153 return util.hooks()
2153 2154
2154 2155 def pushkey(self, namespace, key, old, new):
2155 2156 try:
2156 2157 tr = self.currenttransaction()
2157 2158 hookargs = {}
2158 2159 if tr is not None:
2159 2160 hookargs.update(tr.hookargs)
2160 2161 hookargs['namespace'] = namespace
2161 2162 hookargs['key'] = key
2162 2163 hookargs['old'] = old
2163 2164 hookargs['new'] = new
2164 2165 self.hook('prepushkey', throw=True, **hookargs)
2165 2166 except error.HookAbort as exc:
2166 2167 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2167 2168 if exc.hint:
2168 2169 self.ui.write_err(_("(%s)\n") % exc.hint)
2169 2170 return False
2170 2171 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2171 2172 ret = pushkey.push(self, namespace, key, old, new)
2172 2173 def runhook():
2173 2174 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2174 2175 ret=ret)
2175 2176 self._afterlock(runhook)
2176 2177 return ret
2177 2178
2178 2179 def listkeys(self, namespace):
2179 2180 self.hook('prelistkeys', throw=True, namespace=namespace)
2180 2181 self.ui.debug('listing keys for "%s"\n' % namespace)
2181 2182 values = pushkey.list(self, namespace)
2182 2183 self.hook('listkeys', namespace=namespace, values=values)
2183 2184 return values
2184 2185
2185 2186 def debugwireargs(self, one, two, three=None, four=None, five=None):
2186 2187 '''used to test argument passing over the wire'''
2187 2188 return "%s %s %s %s %s" % (one, two, three, four, five)
2188 2189
2189 2190 def savecommitmessage(self, text):
2190 2191 fp = self.vfs('last-message.txt', 'wb')
2191 2192 try:
2192 2193 fp.write(text)
2193 2194 finally:
2194 2195 fp.close()
2195 2196 return self.pathto(fp.name[len(self.root) + 1:])
2196 2197
2197 2198 # used to avoid circular references so destructors work
2198 2199 def aftertrans(files):
2199 2200 renamefiles = [tuple(t) for t in files]
2200 2201 def a():
2201 2202 for vfs, src, dest in renamefiles:
2202 2203 # if src and dest refer to a same file, vfs.rename is a no-op,
2203 2204 # leaving both src and dest on disk. delete dest to make sure
2204 2205 # the rename couldn't be such a no-op.
2205 2206 vfs.tryunlink(dest)
2206 2207 try:
2207 2208 vfs.rename(src, dest)
2208 2209 except OSError: # journal file does not yet exist
2209 2210 pass
2210 2211 return a
2211 2212
2212 2213 def undoname(fn):
2213 2214 base, name = os.path.split(fn)
2214 2215 assert name.startswith('journal')
2215 2216 return os.path.join(base, name.replace('journal', 'undo', 1))
2216 2217
2217 2218 def instance(ui, path, create):
2218 2219 return localrepository(ui, util.urllocalpath(path), create)
2219 2220
2220 2221 def islocal(path):
2221 2222 return True
2222 2223
2223 2224 def newreporequirements(repo):
2224 2225 """Determine the set of requirements for a new local repository.
2225 2226
2226 2227 Extensions can wrap this function to specify custom requirements for
2227 2228 new repositories.
2228 2229 """
2229 2230 ui = repo.ui
2230 2231 requirements = {'revlogv1'}
2231 2232 if ui.configbool('format', 'usestore'):
2232 2233 requirements.add('store')
2233 2234 if ui.configbool('format', 'usefncache'):
2234 2235 requirements.add('fncache')
2235 2236 if ui.configbool('format', 'dotencode'):
2236 2237 requirements.add('dotencode')
2237 2238
2238 2239 compengine = ui.config('experimental', 'format.compression')
2239 2240 if compengine not in util.compengines:
2240 2241 raise error.Abort(_('compression engine %s defined by '
2241 2242 'experimental.format.compression not available') %
2242 2243 compengine,
2243 2244 hint=_('run "hg debuginstall" to list available '
2244 2245 'compression engines'))
2245 2246
2246 2247 # zlib is the historical default and doesn't need an explicit requirement.
2247 2248 if compengine != 'zlib':
2248 2249 requirements.add('exp-compression-%s' % compengine)
2249 2250
2250 2251 if scmutil.gdinitconfig(ui):
2251 2252 requirements.add('generaldelta')
2252 2253 if ui.configbool('experimental', 'treemanifest'):
2253 2254 requirements.add('treemanifest')
2254 2255 if ui.configbool('experimental', 'manifestv2'):
2255 2256 requirements.add('manifestv2')
2256 2257
2257 2258 revlogv2 = ui.config('experimental', 'revlogv2')
2258 2259 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2259 2260 requirements.remove('revlogv1')
2260 2261 # generaldelta is implied by revlogv2.
2261 2262 requirements.discard('generaldelta')
2262 2263 requirements.add(REVLOGV2_REQUIREMENT)
2263 2264
2264 2265 return requirements
@@ -1,215 +1,221 b''
1 1 from __future__ import absolute_import
2 2
3 3 import errno
4 4 import os
5 5 import posixpath
6 6 import stat
7 7
8 8 from .i18n import _
9 9 from . import (
10 10 encoding,
11 11 error,
12 12 pycompat,
13 13 util,
14 14 )
15 15
16 16 def _lowerclean(s):
17 17 return encoding.hfsignoreclean(s.lower())
18 18
19 19 class pathauditor(object):
20 20 '''ensure that a filesystem path contains no banned components.
21 21 the following properties of a path are checked:
22 22
23 23 - ends with a directory separator
24 24 - under top-level .hg
25 25 - starts at the root of a windows drive
26 26 - contains ".."
27 27
28 28 More check are also done about the file system states:
29 29 - traverses a symlink (e.g. a/symlink_here/b)
30 30 - inside a nested repository (a callback can be used to approve
31 31 some nested repositories, e.g., subrepositories)
32 32
33 33 The file system checks are only done when 'realfs' is set to True (the
34 34 default). They should be disable then we are auditing path for operation on
35 35 stored history.
36
37 If 'cached' is set to True, audited paths and sub-directories are cached.
38 Be careful to not keep the cache of unmanaged directories for long because
39 audited paths may be replaced with symlinks.
36 40 '''
37 41
38 def __init__(self, root, callback=None, realfs=True):
42 def __init__(self, root, callback=None, realfs=True, cached=False):
39 43 self.audited = set()
40 44 self.auditeddir = set()
41 45 self.root = root
42 46 self._realfs = realfs
47 self._cached = cached
43 48 self.callback = callback
44 49 if os.path.lexists(root) and not util.fscasesensitive(root):
45 50 self.normcase = util.normcase
46 51 else:
47 52 self.normcase = lambda x: x
48 53
49 54 def __call__(self, path, mode=None):
50 55 '''Check the relative path.
51 56 path may contain a pattern (e.g. foodir/**.txt)'''
52 57
53 58 path = util.localpath(path)
54 59 normpath = self.normcase(path)
55 60 if normpath in self.audited:
56 61 return
57 62 # AIX ignores "/" at end of path, others raise EISDIR.
58 63 if util.endswithsep(path):
59 64 raise error.Abort(_("path ends in directory separator: %s") % path)
60 65 parts = util.splitpath(path)
61 66 if (os.path.splitdrive(path)[0]
62 67 or _lowerclean(parts[0]) in ('.hg', '.hg.', '')
63 68 or os.pardir in parts):
64 69 raise error.Abort(_("path contains illegal component: %s") % path)
65 70 # Windows shortname aliases
66 71 for p in parts:
67 72 if "~" in p:
68 73 first, last = p.split("~", 1)
69 74 if last.isdigit() and first.upper() in ["HG", "HG8B6C"]:
70 75 raise error.Abort(_("path contains illegal component: %s")
71 76 % path)
72 77 if '.hg' in _lowerclean(path):
73 78 lparts = [_lowerclean(p.lower()) for p in parts]
74 79 for p in '.hg', '.hg.':
75 80 if p in lparts[1:]:
76 81 pos = lparts.index(p)
77 82 base = os.path.join(*parts[:pos])
78 83 raise error.Abort(_("path '%s' is inside nested repo %r")
79 84 % (path, base))
80 85
81 86 normparts = util.splitpath(normpath)
82 87 assert len(parts) == len(normparts)
83 88
84 89 parts.pop()
85 90 normparts.pop()
86 91 prefixes = []
87 92 # It's important that we check the path parts starting from the root.
88 93 # This means we won't accidentally traverse a symlink into some other
89 94 # filesystem (which is potentially expensive to access).
90 95 for i in range(len(parts)):
91 96 prefix = pycompat.ossep.join(parts[:i + 1])
92 97 normprefix = pycompat.ossep.join(normparts[:i + 1])
93 98 if normprefix in self.auditeddir:
94 99 continue
95 100 if self._realfs:
96 101 self._checkfs(prefix, path)
97 102 prefixes.append(normprefix)
98 103
99 self.audited.add(normpath)
100 # only add prefixes to the cache after checking everything: we don't
101 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
102 self.auditeddir.update(prefixes)
104 if self._cached:
105 self.audited.add(normpath)
106 # only add prefixes to the cache after checking everything: we don't
107 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
108 self.auditeddir.update(prefixes)
103 109
104 110 def _checkfs(self, prefix, path):
105 111 """raise exception if a file system backed check fails"""
106 112 curpath = os.path.join(self.root, prefix)
107 113 try:
108 114 st = os.lstat(curpath)
109 115 except OSError as err:
110 116 # EINVAL can be raised as invalid path syntax under win32.
111 117 # They must be ignored for patterns can be checked too.
112 118 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
113 119 raise
114 120 else:
115 121 if stat.S_ISLNK(st.st_mode):
116 122 msg = _('path %r traverses symbolic link %r') % (path, prefix)
117 123 raise error.Abort(msg)
118 124 elif (stat.S_ISDIR(st.st_mode) and
119 125 os.path.isdir(os.path.join(curpath, '.hg'))):
120 126 if not self.callback or not self.callback(curpath):
121 127 msg = _("path '%s' is inside nested repo %r")
122 128 raise error.Abort(msg % (path, prefix))
123 129
124 130 def check(self, path):
125 131 try:
126 132 self(path)
127 133 return True
128 134 except (OSError, error.Abort):
129 135 return False
130 136
131 137 def canonpath(root, cwd, myname, auditor=None):
132 138 '''return the canonical path of myname, given cwd and root'''
133 139 if util.endswithsep(root):
134 140 rootsep = root
135 141 else:
136 142 rootsep = root + pycompat.ossep
137 143 name = myname
138 144 if not os.path.isabs(name):
139 145 name = os.path.join(root, cwd, name)
140 146 name = os.path.normpath(name)
141 147 if auditor is None:
142 148 auditor = pathauditor(root)
143 149 if name != rootsep and name.startswith(rootsep):
144 150 name = name[len(rootsep):]
145 151 auditor(name)
146 152 return util.pconvert(name)
147 153 elif name == root:
148 154 return ''
149 155 else:
150 156 # Determine whether `name' is in the hierarchy at or beneath `root',
151 157 # by iterating name=dirname(name) until that causes no change (can't
152 158 # check name == '/', because that doesn't work on windows). The list
153 159 # `rel' holds the reversed list of components making up the relative
154 160 # file name we want.
155 161 rel = []
156 162 while True:
157 163 try:
158 164 s = util.samefile(name, root)
159 165 except OSError:
160 166 s = False
161 167 if s:
162 168 if not rel:
163 169 # name was actually the same as root (maybe a symlink)
164 170 return ''
165 171 rel.reverse()
166 172 name = os.path.join(*rel)
167 173 auditor(name)
168 174 return util.pconvert(name)
169 175 dirname, basename = util.split(name)
170 176 rel.append(basename)
171 177 if dirname == name:
172 178 break
173 179 name = dirname
174 180
175 181 # A common mistake is to use -R, but specify a file relative to the repo
176 182 # instead of cwd. Detect that case, and provide a hint to the user.
177 183 hint = None
178 184 try:
179 185 if cwd != root:
180 186 canonpath(root, root, myname, auditor)
181 187 hint = (_("consider using '--cwd %s'")
182 188 % os.path.relpath(root, cwd))
183 189 except error.Abort:
184 190 pass
185 191
186 192 raise error.Abort(_("%s not under root '%s'") % (myname, root),
187 193 hint=hint)
188 194
189 195 def normasprefix(path):
190 196 '''normalize the specified path as path prefix
191 197
192 198 Returned value can be used safely for "p.startswith(prefix)",
193 199 "p[len(prefix):]", and so on.
194 200
195 201 For efficiency, this expects "path" argument to be already
196 202 normalized by "os.path.normpath", "os.path.realpath", and so on.
197 203
198 204 See also issue3033 for detail about need of this function.
199 205
200 206 >>> normasprefix('/foo/bar').replace(os.sep, '/')
201 207 '/foo/bar/'
202 208 >>> normasprefix('/').replace(os.sep, '/')
203 209 '/'
204 210 '''
205 211 d, p = os.path.splitdrive(path)
206 212 if len(p) != len(pycompat.ossep):
207 213 return path + pycompat.ossep
208 214 else:
209 215 return path
210 216
211 217 # forward two methods from posixpath that do what we need, but we'd
212 218 # rather not let our internals know that we're thinking in posix terms
213 219 # - instead we'll let them be oblivious.
214 220 join = posixpath.join
215 221 dirname = posixpath.dirname
@@ -1,668 +1,675 b''
1 1 # posix.py - Posix utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import fcntl
12 12 import getpass
13 13 import grp
14 14 import os
15 15 import pwd
16 16 import re
17 17 import select
18 18 import stat
19 19 import sys
20 20 import tempfile
21 21 import unicodedata
22 22
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 error,
26 27 pycompat,
27 28 )
28 29
29 30 posixfile = open
30 31 normpath = os.path.normpath
31 32 samestat = os.path.samestat
32 33 try:
33 34 oslink = os.link
34 35 except AttributeError:
35 36 # Some platforms build Python without os.link on systems that are
36 37 # vaguely unix-like but don't have hardlink support. For those
37 38 # poor souls, just say we tried and that it failed so we fall back
38 39 # to copies.
39 40 def oslink(src, dst):
40 41 raise OSError(errno.EINVAL,
41 42 'hardlinks not supported: %s to %s' % (src, dst))
42 43 unlink = os.unlink
43 44 rename = os.rename
44 45 removedirs = os.removedirs
45 46 expandglobs = False
46 47
47 48 umask = os.umask(0)
48 49 os.umask(umask)
49 50
50 51 def split(p):
51 52 '''Same as posixpath.split, but faster
52 53
53 54 >>> import posixpath
54 55 >>> for f in ['/absolute/path/to/file',
55 56 ... 'relative/path/to/file',
56 57 ... 'file_alone',
57 58 ... 'path/to/directory/',
58 59 ... '/multiple/path//separators',
59 60 ... '/file_at_root',
60 61 ... '///multiple_leading_separators_at_root',
61 62 ... '']:
62 63 ... assert split(f) == posixpath.split(f), f
63 64 '''
64 65 ht = p.rsplit('/', 1)
65 66 if len(ht) == 1:
66 67 return '', p
67 68 nh = ht[0].rstrip('/')
68 69 if nh:
69 70 return nh, ht[1]
70 71 return ht[0] + '/', ht[1]
71 72
72 73 def openhardlinks():
73 74 '''return true if it is safe to hold open file handles to hardlinks'''
74 75 return True
75 76
76 77 def nlinks(name):
77 78 '''return number of hardlinks for the given file'''
78 79 return os.lstat(name).st_nlink
79 80
80 81 def parsepatchoutput(output_line):
81 82 """parses the output produced by patch and returns the filename"""
82 83 pf = output_line[14:]
83 84 if pycompat.sysplatform == 'OpenVMS':
84 85 if pf[0] == '`':
85 86 pf = pf[1:-1] # Remove the quotes
86 87 else:
87 88 if pf.startswith("'") and pf.endswith("'") and " " in pf:
88 89 pf = pf[1:-1] # Remove the quotes
89 90 return pf
90 91
91 92 def sshargs(sshcmd, host, user, port):
92 93 '''Build argument list for ssh'''
93 94 args = user and ("%s@%s" % (user, host)) or host
94 return port and ("%s -p %s" % (args, port)) or args
95 if '-' in args[:1]:
96 raise error.Abort(
97 _('illegal ssh hostname or username starting with -: %s') % args)
98 args = shellquote(args)
99 if port:
100 args = '-p %s %s' % (shellquote(port), args)
101 return args
95 102
96 103 def isexec(f):
97 104 """check whether a file is executable"""
98 105 return (os.lstat(f).st_mode & 0o100 != 0)
99 106
100 107 def setflags(f, l, x):
101 108 st = os.lstat(f)
102 109 s = st.st_mode
103 110 if l:
104 111 if not stat.S_ISLNK(s):
105 112 # switch file to link
106 113 fp = open(f)
107 114 data = fp.read()
108 115 fp.close()
109 116 unlink(f)
110 117 try:
111 118 os.symlink(data, f)
112 119 except OSError:
113 120 # failed to make a link, rewrite file
114 121 fp = open(f, "w")
115 122 fp.write(data)
116 123 fp.close()
117 124 # no chmod needed at this point
118 125 return
119 126 if stat.S_ISLNK(s):
120 127 # switch link to file
121 128 data = os.readlink(f)
122 129 unlink(f)
123 130 fp = open(f, "w")
124 131 fp.write(data)
125 132 fp.close()
126 133 s = 0o666 & ~umask # avoid restatting for chmod
127 134
128 135 sx = s & 0o100
129 136 if st.st_nlink > 1 and bool(x) != bool(sx):
130 137 # the file is a hardlink, break it
131 138 with open(f, "rb") as fp:
132 139 data = fp.read()
133 140 unlink(f)
134 141 with open(f, "wb") as fp:
135 142 fp.write(data)
136 143
137 144 if x and not sx:
138 145 # Turn on +x for every +r bit when making a file executable
139 146 # and obey umask.
140 147 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
141 148 elif not x and sx:
142 149 # Turn off all +x bits
143 150 os.chmod(f, s & 0o666)
144 151
145 152 def copymode(src, dst, mode=None):
146 153 '''Copy the file mode from the file at path src to dst.
147 154 If src doesn't exist, we're using mode instead. If mode is None, we're
148 155 using umask.'''
149 156 try:
150 157 st_mode = os.lstat(src).st_mode & 0o777
151 158 except OSError as inst:
152 159 if inst.errno != errno.ENOENT:
153 160 raise
154 161 st_mode = mode
155 162 if st_mode is None:
156 163 st_mode = ~umask
157 164 st_mode &= 0o666
158 165 os.chmod(dst, st_mode)
159 166
160 167 def checkexec(path):
161 168 """
162 169 Check whether the given path is on a filesystem with UNIX-like exec flags
163 170
164 171 Requires a directory (like /foo/.hg)
165 172 """
166 173
167 174 # VFAT on some Linux versions can flip mode but it doesn't persist
168 175 # a FS remount. Frequently we can detect it if files are created
169 176 # with exec bit on.
170 177
171 178 try:
172 179 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
173 180 cachedir = os.path.join(path, '.hg', 'cache')
174 181 if os.path.isdir(cachedir):
175 182 checkisexec = os.path.join(cachedir, 'checkisexec')
176 183 checknoexec = os.path.join(cachedir, 'checknoexec')
177 184
178 185 try:
179 186 m = os.stat(checkisexec).st_mode
180 187 except OSError as e:
181 188 if e.errno != errno.ENOENT:
182 189 raise
183 190 # checkisexec does not exist - fall through ...
184 191 else:
185 192 # checkisexec exists, check if it actually is exec
186 193 if m & EXECFLAGS != 0:
187 194 # ensure checkisexec exists, check it isn't exec
188 195 try:
189 196 m = os.stat(checknoexec).st_mode
190 197 except OSError as e:
191 198 if e.errno != errno.ENOENT:
192 199 raise
193 200 open(checknoexec, 'w').close() # might fail
194 201 m = os.stat(checknoexec).st_mode
195 202 if m & EXECFLAGS == 0:
196 203 # check-exec is exec and check-no-exec is not exec
197 204 return True
198 205 # checknoexec exists but is exec - delete it
199 206 unlink(checknoexec)
200 207 # checkisexec exists but is not exec - delete it
201 208 unlink(checkisexec)
202 209
203 210 # check using one file, leave it as checkisexec
204 211 checkdir = cachedir
205 212 else:
206 213 # check directly in path and don't leave checkisexec behind
207 214 checkdir = path
208 215 checkisexec = None
209 216 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
210 217 try:
211 218 os.close(fh)
212 219 m = os.stat(fn).st_mode
213 220 if m & EXECFLAGS == 0:
214 221 os.chmod(fn, m & 0o777 | EXECFLAGS)
215 222 if os.stat(fn).st_mode & EXECFLAGS != 0:
216 223 if checkisexec is not None:
217 224 os.rename(fn, checkisexec)
218 225 fn = None
219 226 return True
220 227 finally:
221 228 if fn is not None:
222 229 unlink(fn)
223 230 except (IOError, OSError):
224 231 # we don't care, the user probably won't be able to commit anyway
225 232 return False
226 233
227 234 def checklink(path):
228 235 """check whether the given path is on a symlink-capable filesystem"""
229 236 # mktemp is not racy because symlink creation will fail if the
230 237 # file already exists
231 238 while True:
232 239 cachedir = os.path.join(path, '.hg', 'cache')
233 240 checklink = os.path.join(cachedir, 'checklink')
234 241 # try fast path, read only
235 242 if os.path.islink(checklink):
236 243 return True
237 244 if os.path.isdir(cachedir):
238 245 checkdir = cachedir
239 246 else:
240 247 checkdir = path
241 248 cachedir = None
242 249 fscheckdir = pycompat.fsdecode(checkdir)
243 250 name = tempfile.mktemp(dir=fscheckdir,
244 251 prefix=r'checklink-')
245 252 name = pycompat.fsencode(name)
246 253 try:
247 254 fd = None
248 255 if cachedir is None:
249 256 fd = tempfile.NamedTemporaryFile(dir=fscheckdir,
250 257 prefix=r'hg-checklink-')
251 258 target = pycompat.fsencode(os.path.basename(fd.name))
252 259 else:
253 260 # create a fixed file to link to; doesn't matter if it
254 261 # already exists.
255 262 target = 'checklink-target'
256 263 try:
257 264 open(os.path.join(cachedir, target), 'w').close()
258 265 except IOError as inst:
259 266 if inst[0] == errno.EACCES:
260 267 # If we can't write to cachedir, just pretend
261 268 # that the fs is readonly and by association
262 269 # that the fs won't support symlinks. This
263 270 # seems like the least dangerous way to avoid
264 271 # data loss.
265 272 return False
266 273 raise
267 274 try:
268 275 os.symlink(target, name)
269 276 if cachedir is None:
270 277 unlink(name)
271 278 else:
272 279 try:
273 280 os.rename(name, checklink)
274 281 except OSError:
275 282 unlink(name)
276 283 return True
277 284 except OSError as inst:
278 285 # link creation might race, try again
279 286 if inst[0] == errno.EEXIST:
280 287 continue
281 288 raise
282 289 finally:
283 290 if fd is not None:
284 291 fd.close()
285 292 except AttributeError:
286 293 return False
287 294 except OSError as inst:
288 295 # sshfs might report failure while successfully creating the link
289 296 if inst[0] == errno.EIO and os.path.exists(name):
290 297 unlink(name)
291 298 return False
292 299
293 300 def checkosfilename(path):
294 301 '''Check that the base-relative path is a valid filename on this platform.
295 302 Returns None if the path is ok, or a UI string describing the problem.'''
296 303 pass # on posix platforms, every path is ok
297 304
298 305 def setbinary(fd):
299 306 pass
300 307
301 308 def pconvert(path):
302 309 return path
303 310
304 311 def localpath(path):
305 312 return path
306 313
307 314 def samefile(fpath1, fpath2):
308 315 """Returns whether path1 and path2 refer to the same file. This is only
309 316 guaranteed to work for files, not directories."""
310 317 return os.path.samefile(fpath1, fpath2)
311 318
312 319 def samedevice(fpath1, fpath2):
313 320 """Returns whether fpath1 and fpath2 are on the same device. This is only
314 321 guaranteed to work for files, not directories."""
315 322 st1 = os.lstat(fpath1)
316 323 st2 = os.lstat(fpath2)
317 324 return st1.st_dev == st2.st_dev
318 325
319 326 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
320 327 def normcase(path):
321 328 return path.lower()
322 329
323 330 # what normcase does to ASCII strings
324 331 normcasespec = encoding.normcasespecs.lower
325 332 # fallback normcase function for non-ASCII strings
326 333 normcasefallback = normcase
327 334
328 335 if pycompat.sysplatform == 'darwin':
329 336
330 337 def normcase(path):
331 338 '''
332 339 Normalize a filename for OS X-compatible comparison:
333 340 - escape-encode invalid characters
334 341 - decompose to NFD
335 342 - lowercase
336 343 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
337 344
338 345 >>> normcase('UPPER')
339 346 'upper'
340 347 >>> normcase('Caf\xc3\xa9')
341 348 'cafe\\xcc\\x81'
342 349 >>> normcase('\xc3\x89')
343 350 'e\\xcc\\x81'
344 351 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
345 352 '%b8%ca%c3\\xca\\xbe%c8.jpg'
346 353 '''
347 354
348 355 try:
349 356 return encoding.asciilower(path) # exception for non-ASCII
350 357 except UnicodeDecodeError:
351 358 return normcasefallback(path)
352 359
353 360 normcasespec = encoding.normcasespecs.lower
354 361
355 362 def normcasefallback(path):
356 363 try:
357 364 u = path.decode('utf-8')
358 365 except UnicodeDecodeError:
359 366 # OS X percent-encodes any bytes that aren't valid utf-8
360 367 s = ''
361 368 pos = 0
362 369 l = len(path)
363 370 while pos < l:
364 371 try:
365 372 c = encoding.getutf8char(path, pos)
366 373 pos += len(c)
367 374 except ValueError:
368 375 c = '%%%02X' % ord(path[pos])
369 376 pos += 1
370 377 s += c
371 378
372 379 u = s.decode('utf-8')
373 380
374 381 # Decompose then lowercase (HFS+ technote specifies lower)
375 382 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
376 383 # drop HFS+ ignored characters
377 384 return encoding.hfsignoreclean(enc)
378 385
379 386 if pycompat.sysplatform == 'cygwin':
380 387 # workaround for cygwin, in which mount point part of path is
381 388 # treated as case sensitive, even though underlying NTFS is case
382 389 # insensitive.
383 390
384 391 # default mount points
385 392 cygwinmountpoints = sorted([
386 393 "/usr/bin",
387 394 "/usr/lib",
388 395 "/cygdrive",
389 396 ], reverse=True)
390 397
391 398 # use upper-ing as normcase as same as NTFS workaround
392 399 def normcase(path):
393 400 pathlen = len(path)
394 401 if (pathlen == 0) or (path[0] != pycompat.ossep):
395 402 # treat as relative
396 403 return encoding.upper(path)
397 404
398 405 # to preserve case of mountpoint part
399 406 for mp in cygwinmountpoints:
400 407 if not path.startswith(mp):
401 408 continue
402 409
403 410 mplen = len(mp)
404 411 if mplen == pathlen: # mount point itself
405 412 return mp
406 413 if path[mplen] == pycompat.ossep:
407 414 return mp + encoding.upper(path[mplen:])
408 415
409 416 return encoding.upper(path)
410 417
411 418 normcasespec = encoding.normcasespecs.other
412 419 normcasefallback = normcase
413 420
414 421 # Cygwin translates native ACLs to POSIX permissions,
415 422 # but these translations are not supported by native
416 423 # tools, so the exec bit tends to be set erroneously.
417 424 # Therefore, disable executable bit access on Cygwin.
418 425 def checkexec(path):
419 426 return False
420 427
421 428 # Similarly, Cygwin's symlink emulation is likely to create
422 429 # problems when Mercurial is used from both Cygwin and native
423 430 # Windows, with other native tools, or on shared volumes
424 431 def checklink(path):
425 432 return False
426 433
427 434 _needsshellquote = None
428 435 def shellquote(s):
429 436 if pycompat.sysplatform == 'OpenVMS':
430 437 return '"%s"' % s
431 438 global _needsshellquote
432 439 if _needsshellquote is None:
433 440 _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
434 441 if s and not _needsshellquote(s):
435 442 # "s" shouldn't have to be quoted
436 443 return s
437 444 else:
438 445 return "'%s'" % s.replace("'", "'\\''")
439 446
440 447 def quotecommand(cmd):
441 448 return cmd
442 449
443 450 def popen(command, mode='r'):
444 451 return os.popen(command, mode)
445 452
446 453 def testpid(pid):
447 454 '''return False if pid dead, True if running or not sure'''
448 455 if pycompat.sysplatform == 'OpenVMS':
449 456 return True
450 457 try:
451 458 os.kill(pid, 0)
452 459 return True
453 460 except OSError as inst:
454 461 return inst.errno != errno.ESRCH
455 462
456 463 def explainexit(code):
457 464 """return a 2-tuple (desc, code) describing a subprocess status
458 465 (codes from kill are negative - not os.system/wait encoding)"""
459 466 if code >= 0:
460 467 return _("exited with status %d") % code, code
461 468 return _("killed by signal %d") % -code, -code
462 469
463 470 def isowner(st):
464 471 """Return True if the stat object st is from the current user."""
465 472 return st.st_uid == os.getuid()
466 473
467 474 def findexe(command):
468 475 '''Find executable for command searching like which does.
469 476 If command is a basename then PATH is searched for command.
470 477 PATH isn't searched if command is an absolute or relative path.
471 478 If command isn't found None is returned.'''
472 479 if pycompat.sysplatform == 'OpenVMS':
473 480 return command
474 481
475 482 def findexisting(executable):
476 483 'Will return executable if existing file'
477 484 if os.path.isfile(executable) and os.access(executable, os.X_OK):
478 485 return executable
479 486 return None
480 487
481 488 if pycompat.ossep in command:
482 489 return findexisting(command)
483 490
484 491 if pycompat.sysplatform == 'plan9':
485 492 return findexisting(os.path.join('/bin', command))
486 493
487 494 for path in encoding.environ.get('PATH', '').split(pycompat.ospathsep):
488 495 executable = findexisting(os.path.join(path, command))
489 496 if executable is not None:
490 497 return executable
491 498 return None
492 499
493 500 def setsignalhandler():
494 501 pass
495 502
496 503 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
497 504
498 505 def statfiles(files):
499 506 '''Stat each file in files. Yield each stat, or None if a file does not
500 507 exist or has a type we don't care about.'''
501 508 lstat = os.lstat
502 509 getkind = stat.S_IFMT
503 510 for nf in files:
504 511 try:
505 512 st = lstat(nf)
506 513 if getkind(st.st_mode) not in _wantedkinds:
507 514 st = None
508 515 except OSError as err:
509 516 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
510 517 raise
511 518 st = None
512 519 yield st
513 520
514 521 def getuser():
515 522 '''return name of current user'''
516 523 return pycompat.fsencode(getpass.getuser())
517 524
518 525 def username(uid=None):
519 526 """Return the name of the user with the given uid.
520 527
521 528 If uid is None, return the name of the current user."""
522 529
523 530 if uid is None:
524 531 uid = os.getuid()
525 532 try:
526 533 return pwd.getpwuid(uid)[0]
527 534 except KeyError:
528 535 return str(uid)
529 536
530 537 def groupname(gid=None):
531 538 """Return the name of the group with the given gid.
532 539
533 540 If gid is None, return the name of the current group."""
534 541
535 542 if gid is None:
536 543 gid = os.getgid()
537 544 try:
538 545 return grp.getgrgid(gid)[0]
539 546 except KeyError:
540 547 return str(gid)
541 548
542 549 def groupmembers(name):
543 550 """Return the list of members of the group with the given
544 551 name, KeyError if the group does not exist.
545 552 """
546 553 return list(grp.getgrnam(name).gr_mem)
547 554
548 555 def spawndetached(args):
549 556 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
550 557 args[0], args)
551 558
552 559 def gethgcmd():
553 560 return sys.argv[:1]
554 561
555 562 def makedir(path, notindexed):
556 563 os.mkdir(path)
557 564
558 565 def lookupreg(key, name=None, scope=None):
559 566 return None
560 567
561 568 def hidewindow():
562 569 """Hide current shell window.
563 570
564 571 Used to hide the window opened when starting asynchronous
565 572 child process under Windows, unneeded on other systems.
566 573 """
567 574 pass
568 575
569 576 class cachestat(object):
570 577 def __init__(self, path):
571 578 self.stat = os.stat(path)
572 579
573 580 def cacheable(self):
574 581 return bool(self.stat.st_ino)
575 582
576 583 __hash__ = object.__hash__
577 584
578 585 def __eq__(self, other):
579 586 try:
580 587 # Only dev, ino, size, mtime and atime are likely to change. Out
581 588 # of these, we shouldn't compare atime but should compare the
582 589 # rest. However, one of the other fields changing indicates
583 590 # something fishy going on, so return False if anything but atime
584 591 # changes.
585 592 return (self.stat.st_mode == other.stat.st_mode and
586 593 self.stat.st_ino == other.stat.st_ino and
587 594 self.stat.st_dev == other.stat.st_dev and
588 595 self.stat.st_nlink == other.stat.st_nlink and
589 596 self.stat.st_uid == other.stat.st_uid and
590 597 self.stat.st_gid == other.stat.st_gid and
591 598 self.stat.st_size == other.stat.st_size and
592 599 self.stat.st_mtime == other.stat.st_mtime and
593 600 self.stat.st_ctime == other.stat.st_ctime)
594 601 except AttributeError:
595 602 return False
596 603
597 604 def __ne__(self, other):
598 605 return not self == other
599 606
600 607 def executablepath():
601 608 return None # available on Windows only
602 609
603 610 def statislink(st):
604 611 '''check whether a stat result is a symlink'''
605 612 return st and stat.S_ISLNK(st.st_mode)
606 613
607 614 def statisexec(st):
608 615 '''check whether a stat result is an executable file'''
609 616 return st and (st.st_mode & 0o100 != 0)
610 617
611 618 def poll(fds):
612 619 """block until something happens on any file descriptor
613 620
614 621 This is a generic helper that will check for any activity
615 622 (read, write. exception) and return the list of touched files.
616 623
617 624 In unsupported cases, it will raise a NotImplementedError"""
618 625 try:
619 626 while True:
620 627 try:
621 628 res = select.select(fds, fds, fds)
622 629 break
623 630 except select.error as inst:
624 631 if inst.args[0] == errno.EINTR:
625 632 continue
626 633 raise
627 634 except ValueError: # out of range file descriptor
628 635 raise NotImplementedError()
629 636 return sorted(list(set(sum(res, []))))
630 637
631 638 def readpipe(pipe):
632 639 """Read all available data from a pipe."""
633 640 # We can't fstat() a pipe because Linux will always report 0.
634 641 # So, we set the pipe to non-blocking mode and read everything
635 642 # that's available.
636 643 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
637 644 flags |= os.O_NONBLOCK
638 645 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
639 646
640 647 try:
641 648 chunks = []
642 649 while True:
643 650 try:
644 651 s = pipe.read()
645 652 if not s:
646 653 break
647 654 chunks.append(s)
648 655 except IOError:
649 656 break
650 657
651 658 return ''.join(chunks)
652 659 finally:
653 660 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
654 661
655 662 def bindunixsocket(sock, path):
656 663 """Bind the UNIX domain socket to the specified path"""
657 664 # use relative path instead of full path at bind() if possible, since
658 665 # AF_UNIX path has very small length limit (107 chars) on common
659 666 # platforms (see sys/un.h)
660 667 dirname, basename = os.path.split(path)
661 668 bakwdfd = None
662 669 if dirname:
663 670 bakwdfd = os.open('.', os.O_DIRECTORY)
664 671 os.chdir(dirname)
665 672 sock.bind(basename)
666 673 if bakwdfd:
667 674 os.fchdir(bakwdfd)
668 675 os.close(bakwdfd)
@@ -1,1105 +1,1105 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 wdirid,
23 23 wdirrev,
24 24 )
25 25
26 26 from . import (
27 27 encoding,
28 28 error,
29 29 match as matchmod,
30 30 obsolete,
31 31 obsutil,
32 32 pathutil,
33 33 phases,
34 34 pycompat,
35 35 revsetlang,
36 36 similar,
37 37 util,
38 38 )
39 39
40 40 if pycompat.osname == 'nt':
41 41 from . import scmwindows as scmplatform
42 42 else:
43 43 from . import scmposix as scmplatform
44 44
45 45 termsize = scmplatform.termsize
46 46
47 47 class status(tuple):
48 48 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
49 49 and 'ignored' properties are only relevant to the working copy.
50 50 '''
51 51
52 52 __slots__ = ()
53 53
54 54 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
55 55 clean):
56 56 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
57 57 ignored, clean))
58 58
59 59 @property
60 60 def modified(self):
61 61 '''files that have been modified'''
62 62 return self[0]
63 63
64 64 @property
65 65 def added(self):
66 66 '''files that have been added'''
67 67 return self[1]
68 68
69 69 @property
70 70 def removed(self):
71 71 '''files that have been removed'''
72 72 return self[2]
73 73
74 74 @property
75 75 def deleted(self):
76 76 '''files that are in the dirstate, but have been deleted from the
77 77 working copy (aka "missing")
78 78 '''
79 79 return self[3]
80 80
81 81 @property
82 82 def unknown(self):
83 83 '''files not in the dirstate that are not ignored'''
84 84 return self[4]
85 85
86 86 @property
87 87 def ignored(self):
88 88 '''files not in the dirstate that are ignored (by _dirignore())'''
89 89 return self[5]
90 90
91 91 @property
92 92 def clean(self):
93 93 '''files that have not been modified'''
94 94 return self[6]
95 95
96 96 def __repr__(self, *args, **kwargs):
97 97 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
98 98 'unknown=%r, ignored=%r, clean=%r>') % self)
99 99
100 100 def itersubrepos(ctx1, ctx2):
101 101 """find subrepos in ctx1 or ctx2"""
102 102 # Create a (subpath, ctx) mapping where we prefer subpaths from
103 103 # ctx1. The subpaths from ctx2 are important when the .hgsub file
104 104 # has been modified (in ctx2) but not yet committed (in ctx1).
105 105 subpaths = dict.fromkeys(ctx2.substate, ctx2)
106 106 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
107 107
108 108 missing = set()
109 109
110 110 for subpath in ctx2.substate:
111 111 if subpath not in ctx1.substate:
112 112 del subpaths[subpath]
113 113 missing.add(subpath)
114 114
115 115 for subpath, ctx in sorted(subpaths.iteritems()):
116 116 yield subpath, ctx.sub(subpath)
117 117
118 118 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
119 119 # status and diff will have an accurate result when it does
120 120 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
121 121 # against itself.
122 122 for subpath in missing:
123 123 yield subpath, ctx2.nullsub(subpath, ctx1)
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 '''Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 '''
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def callcatch(ui, func):
143 143 """call func() with global exception handling
144 144
145 145 return func() if no exception happens. otherwise do some error handling
146 146 and return an exit code accordingly. does not handle all exceptions.
147 147 """
148 148 try:
149 149 try:
150 150 return func()
151 151 except: # re-raises
152 152 ui.traceback()
153 153 raise
154 154 # Global exception handling, alphabetically
155 155 # Mercurial-specific first, followed by built-in and library exceptions
156 156 except error.LockHeld as inst:
157 157 if inst.errno == errno.ETIMEDOUT:
158 158 reason = _('timed out waiting for lock held by %r') % inst.locker
159 159 else:
160 160 reason = _('lock held by %r') % inst.locker
161 161 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
162 162 if not inst.locker:
163 163 ui.warn(_("(lock might be very busy)\n"))
164 164 except error.LockUnavailable as inst:
165 165 ui.warn(_("abort: could not lock %s: %s\n") %
166 166 (inst.desc or inst.filename, inst.strerror))
167 167 except error.OutOfBandError as inst:
168 168 if inst.args:
169 169 msg = _("abort: remote error:\n")
170 170 else:
171 171 msg = _("abort: remote error\n")
172 172 ui.warn(msg)
173 173 if inst.args:
174 174 ui.warn(''.join(inst.args))
175 175 if inst.hint:
176 176 ui.warn('(%s)\n' % inst.hint)
177 177 except error.RepoError as inst:
178 178 ui.warn(_("abort: %s!\n") % inst)
179 179 if inst.hint:
180 180 ui.warn(_("(%s)\n") % inst.hint)
181 181 except error.ResponseError as inst:
182 182 ui.warn(_("abort: %s") % inst.args[0])
183 183 if not isinstance(inst.args[1], basestring):
184 184 ui.warn(" %r\n" % (inst.args[1],))
185 185 elif not inst.args[1]:
186 186 ui.warn(_(" empty string\n"))
187 187 else:
188 188 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
189 189 except error.CensoredNodeError as inst:
190 190 ui.warn(_("abort: file censored %s!\n") % inst)
191 191 except error.RevlogError as inst:
192 192 ui.warn(_("abort: %s!\n") % inst)
193 193 except error.InterventionRequired as inst:
194 194 ui.warn("%s\n" % inst)
195 195 if inst.hint:
196 196 ui.warn(_("(%s)\n") % inst.hint)
197 197 return 1
198 198 except error.WdirUnsupported:
199 199 ui.warn(_("abort: working directory revision cannot be specified\n"))
200 200 except error.Abort as inst:
201 201 ui.warn(_("abort: %s\n") % inst)
202 202 if inst.hint:
203 203 ui.warn(_("(%s)\n") % inst.hint)
204 204 except ImportError as inst:
205 205 ui.warn(_("abort: %s!\n") % inst)
206 206 m = str(inst).split()[-1]
207 207 if m in "mpatch bdiff".split():
208 208 ui.warn(_("(did you forget to compile extensions?)\n"))
209 209 elif m in "zlib".split():
210 210 ui.warn(_("(is your Python install correct?)\n"))
211 211 except IOError as inst:
212 212 if util.safehasattr(inst, "code"):
213 213 ui.warn(_("abort: %s\n") % inst)
214 214 elif util.safehasattr(inst, "reason"):
215 215 try: # usually it is in the form (errno, strerror)
216 216 reason = inst.reason.args[1]
217 217 except (AttributeError, IndexError):
218 218 # it might be anything, for example a string
219 219 reason = inst.reason
220 220 if isinstance(reason, unicode):
221 221 # SSLError of Python 2.7.9 contains a unicode
222 222 reason = encoding.unitolocal(reason)
223 223 ui.warn(_("abort: error: %s\n") % reason)
224 224 elif (util.safehasattr(inst, "args")
225 225 and inst.args and inst.args[0] == errno.EPIPE):
226 226 pass
227 227 elif getattr(inst, "strerror", None):
228 228 if getattr(inst, "filename", None):
229 229 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
230 230 else:
231 231 ui.warn(_("abort: %s\n") % inst.strerror)
232 232 else:
233 233 raise
234 234 except OSError as inst:
235 235 if getattr(inst, "filename", None) is not None:
236 236 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
237 237 else:
238 238 ui.warn(_("abort: %s\n") % inst.strerror)
239 239 except MemoryError:
240 240 ui.warn(_("abort: out of memory\n"))
241 241 except SystemExit as inst:
242 242 # Commands shouldn't sys.exit directly, but give a return code.
243 243 # Just in case catch this and and pass exit code to caller.
244 244 return inst.code
245 245 except socket.error as inst:
246 246 ui.warn(_("abort: %s\n") % inst.args[-1])
247 247
248 248 return -1
249 249
250 250 def checknewlabel(repo, lbl, kind):
251 251 # Do not use the "kind" parameter in ui output.
252 252 # It makes strings difficult to translate.
253 253 if lbl in ['tip', '.', 'null']:
254 254 raise error.Abort(_("the name '%s' is reserved") % lbl)
255 255 for c in (':', '\0', '\n', '\r'):
256 256 if c in lbl:
257 257 raise error.Abort(_("%r cannot be used in a name") % c)
258 258 try:
259 259 int(lbl)
260 260 raise error.Abort(_("cannot use an integer as a name"))
261 261 except ValueError:
262 262 pass
263 263
264 264 def checkfilename(f):
265 265 '''Check that the filename f is an acceptable filename for a tracked file'''
266 266 if '\r' in f or '\n' in f:
267 267 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
268 268
269 269 def checkportable(ui, f):
270 270 '''Check if filename f is portable and warn or abort depending on config'''
271 271 checkfilename(f)
272 272 abort, warn = checkportabilityalert(ui)
273 273 if abort or warn:
274 274 msg = util.checkwinfilename(f)
275 275 if msg:
276 276 msg = "%s: %r" % (msg, f)
277 277 if abort:
278 278 raise error.Abort(msg)
279 279 ui.warn(_("warning: %s\n") % msg)
280 280
281 281 def checkportabilityalert(ui):
282 282 '''check if the user's config requests nothing, a warning, or abort for
283 283 non-portable filenames'''
284 284 val = ui.config('ui', 'portablefilenames')
285 285 lval = val.lower()
286 286 bval = util.parsebool(val)
287 287 abort = pycompat.osname == 'nt' or lval == 'abort'
288 288 warn = bval or lval == 'warn'
289 289 if bval is None and not (warn or abort or lval == 'ignore'):
290 290 raise error.ConfigError(
291 291 _("ui.portablefilenames value is invalid ('%s')") % val)
292 292 return abort, warn
293 293
294 294 class casecollisionauditor(object):
295 295 def __init__(self, ui, abort, dirstate):
296 296 self._ui = ui
297 297 self._abort = abort
298 298 allfiles = '\0'.join(dirstate._map)
299 299 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
300 300 self._dirstate = dirstate
301 301 # The purpose of _newfiles is so that we don't complain about
302 302 # case collisions if someone were to call this object with the
303 303 # same filename twice.
304 304 self._newfiles = set()
305 305
306 306 def __call__(self, f):
307 307 if f in self._newfiles:
308 308 return
309 309 fl = encoding.lower(f)
310 310 if fl in self._loweredfiles and f not in self._dirstate:
311 311 msg = _('possible case-folding collision for %s') % f
312 312 if self._abort:
313 313 raise error.Abort(msg)
314 314 self._ui.warn(_("warning: %s\n") % msg)
315 315 self._loweredfiles.add(fl)
316 316 self._newfiles.add(f)
317 317
318 318 def filteredhash(repo, maxrev):
319 319 """build hash of filtered revisions in the current repoview.
320 320
321 321 Multiple caches perform up-to-date validation by checking that the
322 322 tiprev and tipnode stored in the cache file match the current repository.
323 323 However, this is not sufficient for validating repoviews because the set
324 324 of revisions in the view may change without the repository tiprev and
325 325 tipnode changing.
326 326
327 327 This function hashes all the revs filtered from the view and returns
328 328 that SHA-1 digest.
329 329 """
330 330 cl = repo.changelog
331 331 if not cl.filteredrevs:
332 332 return None
333 333 key = None
334 334 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
335 335 if revs:
336 336 s = hashlib.sha1()
337 337 for rev in revs:
338 338 s.update('%d;' % rev)
339 339 key = s.digest()
340 340 return key
341 341
342 342 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
343 343 '''yield every hg repository under path, always recursively.
344 344 The recurse flag will only control recursion into repo working dirs'''
345 345 def errhandler(err):
346 346 if err.filename == path:
347 347 raise err
348 348 samestat = getattr(os.path, 'samestat', None)
349 349 if followsym and samestat is not None:
350 350 def adddir(dirlst, dirname):
351 351 match = False
352 352 dirstat = os.stat(dirname)
353 353 for lstdirstat in dirlst:
354 354 if samestat(dirstat, lstdirstat):
355 355 match = True
356 356 break
357 357 if not match:
358 358 dirlst.append(dirstat)
359 359 return not match
360 360 else:
361 361 followsym = False
362 362
363 363 if (seen_dirs is None) and followsym:
364 364 seen_dirs = []
365 365 adddir(seen_dirs, path)
366 366 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
367 367 dirs.sort()
368 368 if '.hg' in dirs:
369 369 yield root # found a repository
370 370 qroot = os.path.join(root, '.hg', 'patches')
371 371 if os.path.isdir(os.path.join(qroot, '.hg')):
372 372 yield qroot # we have a patch queue repo here
373 373 if recurse:
374 374 # avoid recursing inside the .hg directory
375 375 dirs.remove('.hg')
376 376 else:
377 377 dirs[:] = [] # don't descend further
378 378 elif followsym:
379 379 newdirs = []
380 380 for d in dirs:
381 381 fname = os.path.join(root, d)
382 382 if adddir(seen_dirs, fname):
383 383 if os.path.islink(fname):
384 384 for hgname in walkrepos(fname, True, seen_dirs):
385 385 yield hgname
386 386 else:
387 387 newdirs.append(d)
388 388 dirs[:] = newdirs
389 389
390 390 def binnode(ctx):
391 391 """Return binary node id for a given basectx"""
392 392 node = ctx.node()
393 393 if node is None:
394 394 return wdirid
395 395 return node
396 396
397 397 def intrev(ctx):
398 398 """Return integer for a given basectx that can be used in comparison or
399 399 arithmetic operation"""
400 400 rev = ctx.rev()
401 401 if rev is None:
402 402 return wdirrev
403 403 return rev
404 404
405 405 def revsingle(repo, revspec, default='.'):
406 406 if not revspec and revspec != 0:
407 407 return repo[default]
408 408
409 409 l = revrange(repo, [revspec])
410 410 if not l:
411 411 raise error.Abort(_('empty revision set'))
412 412 return repo[l.last()]
413 413
414 414 def _pairspec(revspec):
415 415 tree = revsetlang.parse(revspec)
416 416 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
417 417
418 418 def revpair(repo, revs):
419 419 if not revs:
420 420 return repo.dirstate.p1(), None
421 421
422 422 l = revrange(repo, revs)
423 423
424 424 if not l:
425 425 first = second = None
426 426 elif l.isascending():
427 427 first = l.min()
428 428 second = l.max()
429 429 elif l.isdescending():
430 430 first = l.max()
431 431 second = l.min()
432 432 else:
433 433 first = l.first()
434 434 second = l.last()
435 435
436 436 if first is None:
437 437 raise error.Abort(_('empty revision range'))
438 438 if (first == second and len(revs) >= 2
439 439 and not all(revrange(repo, [r]) for r in revs)):
440 440 raise error.Abort(_('empty revision on one side of range'))
441 441
442 442 # if top-level is range expression, the result must always be a pair
443 443 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
444 444 return repo.lookup(first), None
445 445
446 446 return repo.lookup(first), repo.lookup(second)
447 447
448 448 def revrange(repo, specs):
449 449 """Execute 1 to many revsets and return the union.
450 450
451 451 This is the preferred mechanism for executing revsets using user-specified
452 452 config options, such as revset aliases.
453 453
454 454 The revsets specified by ``specs`` will be executed via a chained ``OR``
455 455 expression. If ``specs`` is empty, an empty result is returned.
456 456
457 457 ``specs`` can contain integers, in which case they are assumed to be
458 458 revision numbers.
459 459
460 460 It is assumed the revsets are already formatted. If you have arguments
461 461 that need to be expanded in the revset, call ``revsetlang.formatspec()``
462 462 and pass the result as an element of ``specs``.
463 463
464 464 Specifying a single revset is allowed.
465 465
466 466 Returns a ``revset.abstractsmartset`` which is a list-like interface over
467 467 integer revisions.
468 468 """
469 469 allspecs = []
470 470 for spec in specs:
471 471 if isinstance(spec, int):
472 472 spec = revsetlang.formatspec('rev(%d)', spec)
473 473 allspecs.append(spec)
474 474 return repo.anyrevs(allspecs, user=True)
475 475
476 476 def meaningfulparents(repo, ctx):
477 477 """Return list of meaningful (or all if debug) parentrevs for rev.
478 478
479 479 For merges (two non-nullrev revisions) both parents are meaningful.
480 480 Otherwise the first parent revision is considered meaningful if it
481 481 is not the preceding revision.
482 482 """
483 483 parents = ctx.parents()
484 484 if len(parents) > 1:
485 485 return parents
486 486 if repo.ui.debugflag:
487 487 return [parents[0], repo['null']]
488 488 if parents[0].rev() >= intrev(ctx) - 1:
489 489 return []
490 490 return parents
491 491
492 492 def expandpats(pats):
493 493 '''Expand bare globs when running on windows.
494 494 On posix we assume it already has already been done by sh.'''
495 495 if not util.expandglobs:
496 496 return list(pats)
497 497 ret = []
498 498 for kindpat in pats:
499 499 kind, pat = matchmod._patsplit(kindpat, None)
500 500 if kind is None:
501 501 try:
502 502 globbed = glob.glob(pat)
503 503 except re.error:
504 504 globbed = [pat]
505 505 if globbed:
506 506 ret.extend(globbed)
507 507 continue
508 508 ret.append(kindpat)
509 509 return ret
510 510
511 511 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
512 512 badfn=None):
513 513 '''Return a matcher and the patterns that were used.
514 514 The matcher will warn about bad matches, unless an alternate badfn callback
515 515 is provided.'''
516 516 if pats == ("",):
517 517 pats = []
518 518 if opts is None:
519 519 opts = {}
520 520 if not globbed and default == 'relpath':
521 521 pats = expandpats(pats or [])
522 522
523 523 def bad(f, msg):
524 524 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
525 525
526 526 if badfn is None:
527 527 badfn = bad
528 528
529 529 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
530 530 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
531 531
532 532 if m.always():
533 533 pats = []
534 534 return m, pats
535 535
536 536 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
537 537 badfn=None):
538 538 '''Return a matcher that will warn about bad matches.'''
539 539 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
540 540
541 541 def matchall(repo):
542 542 '''Return a matcher that will efficiently match everything.'''
543 543 return matchmod.always(repo.root, repo.getcwd())
544 544
545 545 def matchfiles(repo, files, badfn=None):
546 546 '''Return a matcher that will efficiently match exactly these files.'''
547 547 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
548 548
549 549 def origpath(ui, repo, filepath):
550 550 '''customize where .orig files are created
551 551
552 552 Fetch user defined path from config file: [ui] origbackuppath = <path>
553 553 Fall back to default (filepath) if not specified
554 554 '''
555 555 origbackuppath = ui.config('ui', 'origbackuppath')
556 556 if origbackuppath is None:
557 557 return filepath + ".orig"
558 558
559 559 filepathfromroot = os.path.relpath(filepath, start=repo.root)
560 560 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
561 561
562 562 origbackupdir = repo.vfs.dirname(fullorigpath)
563 563 if not repo.vfs.exists(origbackupdir):
564 564 ui.note(_('creating directory: %s\n') % origbackupdir)
565 565 util.makedirs(origbackupdir)
566 566
567 567 return fullorigpath + ".orig"
568 568
569 569 class _containsnode(object):
570 570 """proxy __contains__(node) to container.__contains__ which accepts revs"""
571 571
572 572 def __init__(self, repo, revcontainer):
573 573 self._torev = repo.changelog.rev
574 574 self._revcontains = revcontainer.__contains__
575 575
576 576 def __contains__(self, node):
577 577 return self._revcontains(self._torev(node))
578 578
579 579 def cleanupnodes(repo, mapping, operation):
580 580 """do common cleanups when old nodes are replaced by new nodes
581 581
582 582 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
583 583 (we might also want to move working directory parent in the future)
584 584
585 585 mapping is {oldnode: [newnode]} or a iterable of nodes if they do not have
586 586 replacements. operation is a string, like "rebase".
587 587 """
588 588 if not util.safehasattr(mapping, 'items'):
589 589 mapping = {n: () for n in mapping}
590 590
591 591 with repo.transaction('cleanup') as tr:
592 592 # Move bookmarks
593 593 bmarks = repo._bookmarks
594 594 bmarkchanges = []
595 595 allnewnodes = [n for ns in mapping.values() for n in ns]
596 596 for oldnode, newnodes in mapping.items():
597 597 oldbmarks = repo.nodebookmarks(oldnode)
598 598 if not oldbmarks:
599 599 continue
600 600 from . import bookmarks # avoid import cycle
601 601 if len(newnodes) > 1:
602 602 # usually a split, take the one with biggest rev number
603 603 newnode = next(repo.set('max(%ln)', newnodes)).node()
604 604 elif len(newnodes) == 0:
605 605 # move bookmark backwards
606 606 roots = list(repo.set('max((::%n) - %ln)', oldnode,
607 607 list(mapping)))
608 608 if roots:
609 609 newnode = roots[0].node()
610 610 else:
611 611 newnode = nullid
612 612 else:
613 613 newnode = newnodes[0]
614 614 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
615 615 (oldbmarks, hex(oldnode), hex(newnode)))
616 616 # Delete divergent bookmarks being parents of related newnodes
617 617 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
618 618 allnewnodes, newnode, oldnode)
619 619 deletenodes = _containsnode(repo, deleterevs)
620 620 for name in oldbmarks:
621 621 bmarkchanges.append((name, newnode))
622 622 for b in bookmarks.divergent2delete(repo, deletenodes, name):
623 623 bmarkchanges.append((b, None))
624 624
625 625 if bmarkchanges:
626 626 bmarks.applychanges(repo, tr, bmarkchanges)
627 627
628 628 # Obsolete or strip nodes
629 629 if obsolete.isenabled(repo, obsolete.createmarkersopt):
630 630 # If a node is already obsoleted, and we want to obsolete it
631 631 # without a successor, skip that obssolete request since it's
632 632 # unnecessary. That's the "if s or not isobs(n)" check below.
633 633 # Also sort the node in topology order, that might be useful for
634 634 # some obsstore logic.
635 635 # NOTE: the filtering and sorting might belong to createmarkers.
636 636 # Unfiltered repo is needed since nodes in mapping might be hidden.
637 637 unfi = repo.unfiltered()
638 638 isobs = unfi.obsstore.successors.__contains__
639 639 torev = unfi.changelog.rev
640 640 sortfunc = lambda ns: torev(ns[0])
641 641 rels = [(unfi[n], tuple(unfi[m] for m in s))
642 642 for n, s in sorted(mapping.items(), key=sortfunc)
643 643 if s or not isobs(n)]
644 644 obsolete.createmarkers(repo, rels, operation=operation)
645 645 else:
646 646 from . import repair # avoid import cycle
647 647 repair.delayedstrip(repo.ui, repo, list(mapping), operation)
648 648
649 649 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
650 650 if opts is None:
651 651 opts = {}
652 652 m = matcher
653 653 if dry_run is None:
654 654 dry_run = opts.get('dry_run')
655 655 if similarity is None:
656 656 similarity = float(opts.get('similarity') or 0)
657 657
658 658 ret = 0
659 659 join = lambda f: os.path.join(prefix, f)
660 660
661 661 wctx = repo[None]
662 662 for subpath in sorted(wctx.substate):
663 663 submatch = matchmod.subdirmatcher(subpath, m)
664 664 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
665 665 sub = wctx.sub(subpath)
666 666 try:
667 667 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
668 668 ret = 1
669 669 except error.LookupError:
670 670 repo.ui.status(_("skipping missing subrepository: %s\n")
671 671 % join(subpath))
672 672
673 673 rejected = []
674 674 def badfn(f, msg):
675 675 if f in m.files():
676 676 m.bad(f, msg)
677 677 rejected.append(f)
678 678
679 679 badmatch = matchmod.badmatch(m, badfn)
680 680 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
681 681 badmatch)
682 682
683 683 unknownset = set(unknown + forgotten)
684 684 toprint = unknownset.copy()
685 685 toprint.update(deleted)
686 686 for abs in sorted(toprint):
687 687 if repo.ui.verbose or not m.exact(abs):
688 688 if abs in unknownset:
689 689 status = _('adding %s\n') % m.uipath(abs)
690 690 else:
691 691 status = _('removing %s\n') % m.uipath(abs)
692 692 repo.ui.status(status)
693 693
694 694 renames = _findrenames(repo, m, added + unknown, removed + deleted,
695 695 similarity)
696 696
697 697 if not dry_run:
698 698 _markchanges(repo, unknown + forgotten, deleted, renames)
699 699
700 700 for f in rejected:
701 701 if f in m.files():
702 702 return 1
703 703 return ret
704 704
705 705 def marktouched(repo, files, similarity=0.0):
706 706 '''Assert that files have somehow been operated upon. files are relative to
707 707 the repo root.'''
708 708 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
709 709 rejected = []
710 710
711 711 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
712 712
713 713 if repo.ui.verbose:
714 714 unknownset = set(unknown + forgotten)
715 715 toprint = unknownset.copy()
716 716 toprint.update(deleted)
717 717 for abs in sorted(toprint):
718 718 if abs in unknownset:
719 719 status = _('adding %s\n') % abs
720 720 else:
721 721 status = _('removing %s\n') % abs
722 722 repo.ui.status(status)
723 723
724 724 renames = _findrenames(repo, m, added + unknown, removed + deleted,
725 725 similarity)
726 726
727 727 _markchanges(repo, unknown + forgotten, deleted, renames)
728 728
729 729 for f in rejected:
730 730 if f in m.files():
731 731 return 1
732 732 return 0
733 733
734 734 def _interestingfiles(repo, matcher):
735 735 '''Walk dirstate with matcher, looking for files that addremove would care
736 736 about.
737 737
738 738 This is different from dirstate.status because it doesn't care about
739 739 whether files are modified or clean.'''
740 740 added, unknown, deleted, removed, forgotten = [], [], [], [], []
741 audit_path = pathutil.pathauditor(repo.root)
741 audit_path = pathutil.pathauditor(repo.root, cached=True)
742 742
743 743 ctx = repo[None]
744 744 dirstate = repo.dirstate
745 745 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
746 746 full=False)
747 747 for abs, st in walkresults.iteritems():
748 748 dstate = dirstate[abs]
749 749 if dstate == '?' and audit_path.check(abs):
750 750 unknown.append(abs)
751 751 elif dstate != 'r' and not st:
752 752 deleted.append(abs)
753 753 elif dstate == 'r' and st:
754 754 forgotten.append(abs)
755 755 # for finding renames
756 756 elif dstate == 'r' and not st:
757 757 removed.append(abs)
758 758 elif dstate == 'a':
759 759 added.append(abs)
760 760
761 761 return added, unknown, deleted, removed, forgotten
762 762
763 763 def _findrenames(repo, matcher, added, removed, similarity):
764 764 '''Find renames from removed files to added ones.'''
765 765 renames = {}
766 766 if similarity > 0:
767 767 for old, new, score in similar.findrenames(repo, added, removed,
768 768 similarity):
769 769 if (repo.ui.verbose or not matcher.exact(old)
770 770 or not matcher.exact(new)):
771 771 repo.ui.status(_('recording removal of %s as rename to %s '
772 772 '(%d%% similar)\n') %
773 773 (matcher.rel(old), matcher.rel(new),
774 774 score * 100))
775 775 renames[new] = old
776 776 return renames
777 777
778 778 def _markchanges(repo, unknown, deleted, renames):
779 779 '''Marks the files in unknown as added, the files in deleted as removed,
780 780 and the files in renames as copied.'''
781 781 wctx = repo[None]
782 782 with repo.wlock():
783 783 wctx.forget(deleted)
784 784 wctx.add(unknown)
785 785 for new, old in renames.iteritems():
786 786 wctx.copy(old, new)
787 787
788 788 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
789 789 """Update the dirstate to reflect the intent of copying src to dst. For
790 790 different reasons it might not end with dst being marked as copied from src.
791 791 """
792 792 origsrc = repo.dirstate.copied(src) or src
793 793 if dst == origsrc: # copying back a copy?
794 794 if repo.dirstate[dst] not in 'mn' and not dryrun:
795 795 repo.dirstate.normallookup(dst)
796 796 else:
797 797 if repo.dirstate[origsrc] == 'a' and origsrc == src:
798 798 if not ui.quiet:
799 799 ui.warn(_("%s has not been committed yet, so no copy "
800 800 "data will be stored for %s.\n")
801 801 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
802 802 if repo.dirstate[dst] in '?r' and not dryrun:
803 803 wctx.add([dst])
804 804 elif not dryrun:
805 805 wctx.copy(origsrc, dst)
806 806
807 807 def readrequires(opener, supported):
808 808 '''Reads and parses .hg/requires and checks if all entries found
809 809 are in the list of supported features.'''
810 810 requirements = set(opener.read("requires").splitlines())
811 811 missings = []
812 812 for r in requirements:
813 813 if r not in supported:
814 814 if not r or not r[0].isalnum():
815 815 raise error.RequirementError(_(".hg/requires file is corrupt"))
816 816 missings.append(r)
817 817 missings.sort()
818 818 if missings:
819 819 raise error.RequirementError(
820 820 _("repository requires features unknown to this Mercurial: %s")
821 821 % " ".join(missings),
822 822 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
823 823 " for more information"))
824 824 return requirements
825 825
826 826 def writerequires(opener, requirements):
827 827 with opener('requires', 'w') as fp:
828 828 for r in sorted(requirements):
829 829 fp.write("%s\n" % r)
830 830
831 831 class filecachesubentry(object):
832 832 def __init__(self, path, stat):
833 833 self.path = path
834 834 self.cachestat = None
835 835 self._cacheable = None
836 836
837 837 if stat:
838 838 self.cachestat = filecachesubentry.stat(self.path)
839 839
840 840 if self.cachestat:
841 841 self._cacheable = self.cachestat.cacheable()
842 842 else:
843 843 # None means we don't know yet
844 844 self._cacheable = None
845 845
846 846 def refresh(self):
847 847 if self.cacheable():
848 848 self.cachestat = filecachesubentry.stat(self.path)
849 849
850 850 def cacheable(self):
851 851 if self._cacheable is not None:
852 852 return self._cacheable
853 853
854 854 # we don't know yet, assume it is for now
855 855 return True
856 856
857 857 def changed(self):
858 858 # no point in going further if we can't cache it
859 859 if not self.cacheable():
860 860 return True
861 861
862 862 newstat = filecachesubentry.stat(self.path)
863 863
864 864 # we may not know if it's cacheable yet, check again now
865 865 if newstat and self._cacheable is None:
866 866 self._cacheable = newstat.cacheable()
867 867
868 868 # check again
869 869 if not self._cacheable:
870 870 return True
871 871
872 872 if self.cachestat != newstat:
873 873 self.cachestat = newstat
874 874 return True
875 875 else:
876 876 return False
877 877
878 878 @staticmethod
879 879 def stat(path):
880 880 try:
881 881 return util.cachestat(path)
882 882 except OSError as e:
883 883 if e.errno != errno.ENOENT:
884 884 raise
885 885
886 886 class filecacheentry(object):
887 887 def __init__(self, paths, stat=True):
888 888 self._entries = []
889 889 for path in paths:
890 890 self._entries.append(filecachesubentry(path, stat))
891 891
892 892 def changed(self):
893 893 '''true if any entry has changed'''
894 894 for entry in self._entries:
895 895 if entry.changed():
896 896 return True
897 897 return False
898 898
899 899 def refresh(self):
900 900 for entry in self._entries:
901 901 entry.refresh()
902 902
903 903 class filecache(object):
904 904 '''A property like decorator that tracks files under .hg/ for updates.
905 905
906 906 Records stat info when called in _filecache.
907 907
908 908 On subsequent calls, compares old stat info with new info, and recreates the
909 909 object when any of the files changes, updating the new stat info in
910 910 _filecache.
911 911
912 912 Mercurial either atomic renames or appends for files under .hg,
913 913 so to ensure the cache is reliable we need the filesystem to be able
914 914 to tell us if a file has been replaced. If it can't, we fallback to
915 915 recreating the object on every call (essentially the same behavior as
916 916 propertycache).
917 917
918 918 '''
919 919 def __init__(self, *paths):
920 920 self.paths = paths
921 921
922 922 def join(self, obj, fname):
923 923 """Used to compute the runtime path of a cached file.
924 924
925 925 Users should subclass filecache and provide their own version of this
926 926 function to call the appropriate join function on 'obj' (an instance
927 927 of the class that its member function was decorated).
928 928 """
929 929 raise NotImplementedError
930 930
931 931 def __call__(self, func):
932 932 self.func = func
933 933 self.name = func.__name__.encode('ascii')
934 934 return self
935 935
936 936 def __get__(self, obj, type=None):
937 937 # if accessed on the class, return the descriptor itself.
938 938 if obj is None:
939 939 return self
940 940 # do we need to check if the file changed?
941 941 if self.name in obj.__dict__:
942 942 assert self.name in obj._filecache, self.name
943 943 return obj.__dict__[self.name]
944 944
945 945 entry = obj._filecache.get(self.name)
946 946
947 947 if entry:
948 948 if entry.changed():
949 949 entry.obj = self.func(obj)
950 950 else:
951 951 paths = [self.join(obj, path) for path in self.paths]
952 952
953 953 # We stat -before- creating the object so our cache doesn't lie if
954 954 # a writer modified between the time we read and stat
955 955 entry = filecacheentry(paths, True)
956 956 entry.obj = self.func(obj)
957 957
958 958 obj._filecache[self.name] = entry
959 959
960 960 obj.__dict__[self.name] = entry.obj
961 961 return entry.obj
962 962
963 963 def __set__(self, obj, value):
964 964 if self.name not in obj._filecache:
965 965 # we add an entry for the missing value because X in __dict__
966 966 # implies X in _filecache
967 967 paths = [self.join(obj, path) for path in self.paths]
968 968 ce = filecacheentry(paths, False)
969 969 obj._filecache[self.name] = ce
970 970 else:
971 971 ce = obj._filecache[self.name]
972 972
973 973 ce.obj = value # update cached copy
974 974 obj.__dict__[self.name] = value # update copy returned by obj.x
975 975
976 976 def __delete__(self, obj):
977 977 try:
978 978 del obj.__dict__[self.name]
979 979 except KeyError:
980 980 raise AttributeError(self.name)
981 981
982 982 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
983 983 if lock is None:
984 984 raise error.LockInheritanceContractViolation(
985 985 'lock can only be inherited while held')
986 986 if environ is None:
987 987 environ = {}
988 988 with lock.inherit() as locker:
989 989 environ[envvar] = locker
990 990 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
991 991
992 992 def wlocksub(repo, cmd, *args, **kwargs):
993 993 """run cmd as a subprocess that allows inheriting repo's wlock
994 994
995 995 This can only be called while the wlock is held. This takes all the
996 996 arguments that ui.system does, and returns the exit code of the
997 997 subprocess."""
998 998 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
999 999 **kwargs)
1000 1000
1001 1001 def gdinitconfig(ui):
1002 1002 """helper function to know if a repo should be created as general delta
1003 1003 """
1004 1004 # experimental config: format.generaldelta
1005 1005 return (ui.configbool('format', 'generaldelta')
1006 1006 or ui.configbool('format', 'usegeneraldelta'))
1007 1007
1008 1008 def gddeltaconfig(ui):
1009 1009 """helper function to know if incoming delta should be optimised
1010 1010 """
1011 1011 # experimental config: format.generaldelta
1012 1012 return ui.configbool('format', 'generaldelta')
1013 1013
1014 1014 class simplekeyvaluefile(object):
1015 1015 """A simple file with key=value lines
1016 1016
1017 1017 Keys must be alphanumerics and start with a letter, values must not
1018 1018 contain '\n' characters"""
1019 1019 firstlinekey = '__firstline'
1020 1020
1021 1021 def __init__(self, vfs, path, keys=None):
1022 1022 self.vfs = vfs
1023 1023 self.path = path
1024 1024
1025 1025 def read(self, firstlinenonkeyval=False):
1026 1026 """Read the contents of a simple key-value file
1027 1027
1028 1028 'firstlinenonkeyval' indicates whether the first line of file should
1029 1029 be treated as a key-value pair or reuturned fully under the
1030 1030 __firstline key."""
1031 1031 lines = self.vfs.readlines(self.path)
1032 1032 d = {}
1033 1033 if firstlinenonkeyval:
1034 1034 if not lines:
1035 1035 e = _("empty simplekeyvalue file")
1036 1036 raise error.CorruptedState(e)
1037 1037 # we don't want to include '\n' in the __firstline
1038 1038 d[self.firstlinekey] = lines[0][:-1]
1039 1039 del lines[0]
1040 1040
1041 1041 try:
1042 1042 # the 'if line.strip()' part prevents us from failing on empty
1043 1043 # lines which only contain '\n' therefore are not skipped
1044 1044 # by 'if line'
1045 1045 updatedict = dict(line[:-1].split('=', 1) for line in lines
1046 1046 if line.strip())
1047 1047 if self.firstlinekey in updatedict:
1048 1048 e = _("%r can't be used as a key")
1049 1049 raise error.CorruptedState(e % self.firstlinekey)
1050 1050 d.update(updatedict)
1051 1051 except ValueError as e:
1052 1052 raise error.CorruptedState(str(e))
1053 1053 return d
1054 1054
1055 1055 def write(self, data, firstline=None):
1056 1056 """Write key=>value mapping to a file
1057 1057 data is a dict. Keys must be alphanumerical and start with a letter.
1058 1058 Values must not contain newline characters.
1059 1059
1060 1060 If 'firstline' is not None, it is written to file before
1061 1061 everything else, as it is, not in a key=value form"""
1062 1062 lines = []
1063 1063 if firstline is not None:
1064 1064 lines.append('%s\n' % firstline)
1065 1065
1066 1066 for k, v in data.items():
1067 1067 if k == self.firstlinekey:
1068 1068 e = "key name '%s' is reserved" % self.firstlinekey
1069 1069 raise error.ProgrammingError(e)
1070 1070 if not k[0].isalpha():
1071 1071 e = "keys must start with a letter in a key-value file"
1072 1072 raise error.ProgrammingError(e)
1073 1073 if not k.isalnum():
1074 1074 e = "invalid key name in a simple key-value file"
1075 1075 raise error.ProgrammingError(e)
1076 1076 if '\n' in v:
1077 1077 e = "invalid value in a simple key-value file"
1078 1078 raise error.ProgrammingError(e)
1079 1079 lines.append("%s=%s\n" % (k, v))
1080 1080 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1081 1081 fp.write(''.join(lines))
1082 1082
1083 1083 _reportobsoletedsource = [
1084 1084 'debugobsolete',
1085 1085 'pull',
1086 1086 'push',
1087 1087 'serve',
1088 1088 'unbundle',
1089 1089 ]
1090 1090
1091 1091 def registersummarycallback(repo, otr, txnname=''):
1092 1092 """register a callback to issue a summary after the transaction is closed
1093 1093 """
1094 1094 for source in _reportobsoletedsource:
1095 1095 if txnname.startswith(source):
1096 1096 reporef = weakref.ref(repo)
1097 1097 def reportsummary(tr):
1098 1098 """the actual callback reporting the summary"""
1099 1099 repo = reporef()
1100 1100 obsoleted = obsutil.getobsoleted(repo, tr)
1101 1101 if obsoleted:
1102 1102 repo.ui.status(_('obsoleted %i changesets\n')
1103 1103 % len(obsoleted))
1104 1104 otr.addpostclose('00-txnreport', reportsummary)
1105 1105 break
@@ -1,325 +1,324 b''
1 1 # sshpeer.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 error,
15 15 pycompat,
16 16 util,
17 17 wireproto,
18 18 )
19 19
20 20 def _serverquote(s):
21 21 if not s:
22 22 return s
23 23 '''quote a string for the remote shell ... which we assume is sh'''
24 24 if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
25 25 return s
26 26 return "'%s'" % s.replace("'", "'\\''")
27 27
28 28 def _forwardoutput(ui, pipe):
29 29 """display all data currently available on pipe as remote output.
30 30
31 31 This is non blocking."""
32 32 s = util.readpipe(pipe)
33 33 if s:
34 34 for l in s.splitlines():
35 35 ui.status(_("remote: "), l, '\n')
36 36
37 37 class doublepipe(object):
38 38 """Operate a side-channel pipe in addition of a main one
39 39
40 40 The side-channel pipe contains server output to be forwarded to the user
41 41 input. The double pipe will behave as the "main" pipe, but will ensure the
42 42 content of the "side" pipe is properly processed while we wait for blocking
43 43 call on the "main" pipe.
44 44
45 45 If large amounts of data are read from "main", the forward will cease after
46 46 the first bytes start to appear. This simplifies the implementation
47 47 without affecting actual output of sshpeer too much as we rarely issue
48 48 large read for data not yet emitted by the server.
49 49
50 50 The main pipe is expected to be a 'bufferedinputpipe' from the util module
51 51 that handle all the os specific bits. This class lives in this module
52 52 because it focus on behavior specific to the ssh protocol."""
53 53
54 54 def __init__(self, ui, main, side):
55 55 self._ui = ui
56 56 self._main = main
57 57 self._side = side
58 58
59 59 def _wait(self):
60 60 """wait until some data are available on main or side
61 61
62 62 return a pair of boolean (ismainready, issideready)
63 63
64 64 (This will only wait for data if the setup is supported by `util.poll`)
65 65 """
66 66 if getattr(self._main, 'hasbuffer', False): # getattr for classic pipe
67 67 return (True, True) # main has data, assume side is worth poking at.
68 68 fds = [self._main.fileno(), self._side.fileno()]
69 69 try:
70 70 act = util.poll(fds)
71 71 except NotImplementedError:
72 72 # non supported yet case, assume all have data.
73 73 act = fds
74 74 return (self._main.fileno() in act, self._side.fileno() in act)
75 75
76 76 def write(self, data):
77 77 return self._call('write', data)
78 78
79 79 def read(self, size):
80 80 r = self._call('read', size)
81 81 if size != 0 and not r:
82 82 # We've observed a condition that indicates the
83 83 # stdout closed unexpectedly. Check stderr one
84 84 # more time and snag anything that's there before
85 85 # letting anyone know the main part of the pipe
86 86 # closed prematurely.
87 87 _forwardoutput(self._ui, self._side)
88 88 return r
89 89
90 90 def readline(self):
91 91 return self._call('readline')
92 92
93 93 def _call(self, methname, data=None):
94 94 """call <methname> on "main", forward output of "side" while blocking
95 95 """
96 96 # data can be '' or 0
97 97 if (data is not None and not data) or self._main.closed:
98 98 _forwardoutput(self._ui, self._side)
99 99 return ''
100 100 while True:
101 101 mainready, sideready = self._wait()
102 102 if sideready:
103 103 _forwardoutput(self._ui, self._side)
104 104 if mainready:
105 105 meth = getattr(self._main, methname)
106 106 if data is None:
107 107 return meth()
108 108 else:
109 109 return meth(data)
110 110
111 111 def close(self):
112 112 return self._main.close()
113 113
114 114 def flush(self):
115 115 return self._main.flush()
116 116
117 117 class sshpeer(wireproto.wirepeer):
118 118 def __init__(self, ui, path, create=False):
119 119 self._url = path
120 120 self.ui = ui
121 121 self.pipeo = self.pipei = self.pipee = None
122 122
123 123 u = util.url(path, parsequery=False, parsefragment=False)
124 124 if u.scheme != 'ssh' or not u.host or u.path is None:
125 125 self._abort(error.RepoError(_("couldn't parse location %s") % path))
126 126
127 util.checksafessh(path)
128
127 129 self.user = u.user
128 130 if u.passwd is not None:
129 131 self._abort(error.RepoError(_("password in URL not supported")))
130 132 self.host = u.host
131 133 self.port = u.port
132 134 self.path = u.path or "."
133 135
134 136 sshcmd = self.ui.config("ui", "ssh")
135 137 remotecmd = self.ui.config("ui", "remotecmd")
136 138
137 args = util.sshargs(sshcmd,
138 _serverquote(self.host),
139 _serverquote(self.user),
140 _serverquote(self.port))
139 args = util.sshargs(sshcmd, self.host, self.user, self.port)
141 140
142 141 if create:
143 142 cmd = '%s %s %s' % (sshcmd, args,
144 143 util.shellquote("%s init %s" %
145 144 (_serverquote(remotecmd), _serverquote(self.path))))
146 145 ui.debug('running %s\n' % cmd)
147 146 res = ui.system(cmd, blockedtag='sshpeer')
148 147 if res != 0:
149 148 self._abort(error.RepoError(_("could not create remote repo")))
150 149
151 150 self._validaterepo(sshcmd, args, remotecmd)
152 151
153 152 def url(self):
154 153 return self._url
155 154
156 155 def _validaterepo(self, sshcmd, args, remotecmd):
157 156 # cleanup up previous run
158 157 self.cleanup()
159 158
160 159 cmd = '%s %s %s' % (sshcmd, args,
161 160 util.shellquote("%s -R %s serve --stdio" %
162 161 (_serverquote(remotecmd), _serverquote(self.path))))
163 162 self.ui.debug('running %s\n' % cmd)
164 163 cmd = util.quotecommand(cmd)
165 164
166 165 # while self.subprocess isn't used, having it allows the subprocess to
167 166 # to clean up correctly later
168 167 #
169 168 # no buffer allow the use of 'select'
170 169 # feel free to remove buffering and select usage when we ultimately
171 170 # move to threading.
172 171 sub = util.popen4(cmd, bufsize=0)
173 172 self.pipeo, self.pipei, self.pipee, self.subprocess = sub
174 173
175 174 self.pipei = util.bufferedinputpipe(self.pipei)
176 175 self.pipei = doublepipe(self.ui, self.pipei, self.pipee)
177 176 self.pipeo = doublepipe(self.ui, self.pipeo, self.pipee)
178 177
179 178 # skip any noise generated by remote shell
180 179 self._callstream("hello")
181 180 r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
182 181 lines = ["", "dummy"]
183 182 max_noise = 500
184 183 while lines[-1] and max_noise:
185 184 l = r.readline()
186 185 self.readerr()
187 186 if lines[-1] == "1\n" and l == "\n":
188 187 break
189 188 if l:
190 189 self.ui.debug("remote: ", l)
191 190 lines.append(l)
192 191 max_noise -= 1
193 192 else:
194 193 self._abort(error.RepoError(_('no suitable response from '
195 194 'remote hg')))
196 195
197 196 self._caps = set()
198 197 for l in reversed(lines):
199 198 if l.startswith("capabilities:"):
200 199 self._caps.update(l[:-1].split(":")[1].split())
201 200 break
202 201
203 202 def _capabilities(self):
204 203 return self._caps
205 204
206 205 def readerr(self):
207 206 _forwardoutput(self.ui, self.pipee)
208 207
209 208 def _abort(self, exception):
210 209 self.cleanup()
211 210 raise exception
212 211
213 212 def cleanup(self):
214 213 if self.pipeo is None:
215 214 return
216 215 self.pipeo.close()
217 216 self.pipei.close()
218 217 try:
219 218 # read the error descriptor until EOF
220 219 for l in self.pipee:
221 220 self.ui.status(_("remote: "), l)
222 221 except (IOError, ValueError):
223 222 pass
224 223 self.pipee.close()
225 224
226 225 __del__ = cleanup
227 226
228 227 def _submitbatch(self, req):
229 228 rsp = self._callstream("batch", cmds=wireproto.encodebatchcmds(req))
230 229 available = self._getamount()
231 230 # TODO this response parsing is probably suboptimal for large
232 231 # batches with large responses.
233 232 toread = min(available, 1024)
234 233 work = rsp.read(toread)
235 234 available -= toread
236 235 chunk = work
237 236 while chunk:
238 237 while ';' in work:
239 238 one, work = work.split(';', 1)
240 239 yield wireproto.unescapearg(one)
241 240 toread = min(available, 1024)
242 241 chunk = rsp.read(toread)
243 242 available -= toread
244 243 work += chunk
245 244 yield wireproto.unescapearg(work)
246 245
247 246 def _callstream(self, cmd, **args):
248 247 args = pycompat.byteskwargs(args)
249 248 self.ui.debug("sending %s command\n" % cmd)
250 249 self.pipeo.write("%s\n" % cmd)
251 250 _func, names = wireproto.commands[cmd]
252 251 keys = names.split()
253 252 wireargs = {}
254 253 for k in keys:
255 254 if k == '*':
256 255 wireargs['*'] = args
257 256 break
258 257 else:
259 258 wireargs[k] = args[k]
260 259 del args[k]
261 260 for k, v in sorted(wireargs.iteritems()):
262 261 self.pipeo.write("%s %d\n" % (k, len(v)))
263 262 if isinstance(v, dict):
264 263 for dk, dv in v.iteritems():
265 264 self.pipeo.write("%s %d\n" % (dk, len(dv)))
266 265 self.pipeo.write(dv)
267 266 else:
268 267 self.pipeo.write(v)
269 268 self.pipeo.flush()
270 269
271 270 return self.pipei
272 271
273 272 def _callcompressable(self, cmd, **args):
274 273 return self._callstream(cmd, **args)
275 274
276 275 def _call(self, cmd, **args):
277 276 self._callstream(cmd, **args)
278 277 return self._recv()
279 278
280 279 def _callpush(self, cmd, fp, **args):
281 280 r = self._call(cmd, **args)
282 281 if r:
283 282 return '', r
284 283 for d in iter(lambda: fp.read(4096), ''):
285 284 self._send(d)
286 285 self._send("", flush=True)
287 286 r = self._recv()
288 287 if r:
289 288 return '', r
290 289 return self._recv(), ''
291 290
292 291 def _calltwowaystream(self, cmd, fp, **args):
293 292 r = self._call(cmd, **args)
294 293 if r:
295 294 # XXX needs to be made better
296 295 raise error.Abort(_('unexpected remote reply: %s') % r)
297 296 for d in iter(lambda: fp.read(4096), ''):
298 297 self._send(d)
299 298 self._send("", flush=True)
300 299 return self.pipei
301 300
302 301 def _getamount(self):
303 302 l = self.pipei.readline()
304 303 if l == '\n':
305 304 self.readerr()
306 305 msg = _('check previous remote output')
307 306 self._abort(error.OutOfBandError(hint=msg))
308 307 self.readerr()
309 308 try:
310 309 return int(l)
311 310 except ValueError:
312 311 self._abort(error.ResponseError(_("unexpected response:"), l))
313 312
314 313 def _recv(self):
315 314 return self.pipei.read(self._getamount())
316 315
317 316 def _send(self, data, flush=False):
318 317 self.pipeo.write("%d\n" % len(data))
319 318 if data:
320 319 self.pipeo.write(data)
321 320 if flush:
322 321 self.pipeo.flush()
323 322 self.readerr()
324 323
325 324 instance = sshpeer
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now