##// END OF EJS Templates
merge with stable
Thomas Arendsen Hein -
r16208:85db9917 merge default
parent child Browse files
Show More
@@ -1,51 +1,52 b''
1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
52 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 0 iD8DBQBPT/fvywK+sNU5EO8RAnfYAKCn7d0vwqIb100YfWm1F7nFD5B+FACeM02YHpQLSNsztrBCObtqcnfod7Q=
@@ -1,63 +1,64 b''
1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
11 3a56574f329a368d645853e0f9e09472aee62349 0.8
11 3a56574f329a368d645853e0f9e09472aee62349 0.8
12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
61 6344043924497cd06d781d9014c66802285072e4 2.0.2
61 6344043924497cd06d781d9014c66802285072e4 2.0.2
62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
64 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 2.1.1
@@ -1,736 +1,748 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import errno
7 import errno
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding
12 import struct, os, stat, errno
12 import struct, os, stat, errno
13 import cStringIO
13 import cStringIO
14
14
15 _format = ">cllll"
15 _format = ">cllll"
16 propertycache = util.propertycache
16 propertycache = util.propertycache
17 filecache = scmutil.filecache
18
19 class repocache(filecache):
20 """filecache for files in .hg/"""
21 def join(self, obj, fname):
22 return obj._opener.join(fname)
23
24 class rootcache(filecache):
25 """filecache for files in the repository root"""
26 def join(self, obj, fname):
27 return obj._join(fname)
17
28
18 def _finddirs(path):
29 def _finddirs(path):
19 pos = path.rfind('/')
30 pos = path.rfind('/')
20 while pos != -1:
31 while pos != -1:
21 yield path[:pos]
32 yield path[:pos]
22 pos = path.rfind('/', 0, pos)
33 pos = path.rfind('/', 0, pos)
23
34
24 def _incdirs(dirs, path):
35 def _incdirs(dirs, path):
25 for base in _finddirs(path):
36 for base in _finddirs(path):
26 if base in dirs:
37 if base in dirs:
27 dirs[base] += 1
38 dirs[base] += 1
28 return
39 return
29 dirs[base] = 1
40 dirs[base] = 1
30
41
31 def _decdirs(dirs, path):
42 def _decdirs(dirs, path):
32 for base in _finddirs(path):
43 for base in _finddirs(path):
33 if dirs[base] > 1:
44 if dirs[base] > 1:
34 dirs[base] -= 1
45 dirs[base] -= 1
35 return
46 return
36 del dirs[base]
47 del dirs[base]
37
48
38 class dirstate(object):
49 class dirstate(object):
39
50
40 def __init__(self, opener, ui, root, validate):
51 def __init__(self, opener, ui, root, validate):
41 '''Create a new dirstate object.
52 '''Create a new dirstate object.
42
53
43 opener is an open()-like callable that can be used to open the
54 opener is an open()-like callable that can be used to open the
44 dirstate file; root is the root of the directory tracked by
55 dirstate file; root is the root of the directory tracked by
45 the dirstate.
56 the dirstate.
46 '''
57 '''
47 self._opener = opener
58 self._opener = opener
48 self._validate = validate
59 self._validate = validate
49 self._root = root
60 self._root = root
50 self._rootdir = os.path.join(root, '')
61 self._rootdir = os.path.join(root, '')
51 self._dirty = False
62 self._dirty = False
52 self._dirtypl = False
63 self._dirtypl = False
53 self._lastnormaltime = 0
64 self._lastnormaltime = 0
54 self._ui = ui
65 self._ui = ui
66 self._filecache = {}
55
67
56 @propertycache
68 @propertycache
57 def _map(self):
69 def _map(self):
58 '''Return the dirstate contents as a map from filename to
70 '''Return the dirstate contents as a map from filename to
59 (state, mode, size, time).'''
71 (state, mode, size, time).'''
60 self._read()
72 self._read()
61 return self._map
73 return self._map
62
74
63 @propertycache
75 @propertycache
64 def _copymap(self):
76 def _copymap(self):
65 self._read()
77 self._read()
66 return self._copymap
78 return self._copymap
67
79
68 @propertycache
80 @propertycache
69 def _normroot(self):
81 def _normroot(self):
70 return util.normcase(self._root)
82 return util.normcase(self._root)
71
83
72 @propertycache
84 @propertycache
73 def _foldmap(self):
85 def _foldmap(self):
74 f = {}
86 f = {}
75 for name in self._map:
87 for name in self._map:
76 f[util.normcase(name)] = name
88 f[util.normcase(name)] = name
77 f['.'] = '.' # prevents useless util.fspath() invocation
89 f['.'] = '.' # prevents useless util.fspath() invocation
78 return f
90 return f
79
91
80 @propertycache
92 @repocache('branch')
81 def _branch(self):
93 def _branch(self):
82 try:
94 try:
83 return self._opener.read("branch").strip() or "default"
95 return self._opener.read("branch").strip() or "default"
84 except IOError, inst:
96 except IOError, inst:
85 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
86 raise
98 raise
87 return "default"
99 return "default"
88
100
89 @propertycache
101 @propertycache
90 def _pl(self):
102 def _pl(self):
91 try:
103 try:
92 fp = self._opener("dirstate")
104 fp = self._opener("dirstate")
93 st = fp.read(40)
105 st = fp.read(40)
94 fp.close()
106 fp.close()
95 l = len(st)
107 l = len(st)
96 if l == 40:
108 if l == 40:
97 return st[:20], st[20:40]
109 return st[:20], st[20:40]
98 elif l > 0 and l < 40:
110 elif l > 0 and l < 40:
99 raise util.Abort(_('working directory state appears damaged!'))
111 raise util.Abort(_('working directory state appears damaged!'))
100 except IOError, err:
112 except IOError, err:
101 if err.errno != errno.ENOENT:
113 if err.errno != errno.ENOENT:
102 raise
114 raise
103 return [nullid, nullid]
115 return [nullid, nullid]
104
116
105 @propertycache
117 @propertycache
106 def _dirs(self):
118 def _dirs(self):
107 dirs = {}
119 dirs = {}
108 for f, s in self._map.iteritems():
120 for f, s in self._map.iteritems():
109 if s[0] != 'r':
121 if s[0] != 'r':
110 _incdirs(dirs, f)
122 _incdirs(dirs, f)
111 return dirs
123 return dirs
112
124
113 def dirs(self):
125 def dirs(self):
114 return self._dirs
126 return self._dirs
115
127
116 @propertycache
128 @rootcache('.hgignore')
117 def _ignore(self):
129 def _ignore(self):
118 files = [self._join('.hgignore')]
130 files = [self._join('.hgignore')]
119 for name, path in self._ui.configitems("ui"):
131 for name, path in self._ui.configitems("ui"):
120 if name == 'ignore' or name.startswith('ignore.'):
132 if name == 'ignore' or name.startswith('ignore.'):
121 files.append(util.expandpath(path))
133 files.append(util.expandpath(path))
122 return ignore.ignore(self._root, files, self._ui.warn)
134 return ignore.ignore(self._root, files, self._ui.warn)
123
135
124 @propertycache
136 @propertycache
125 def _slash(self):
137 def _slash(self):
126 return self._ui.configbool('ui', 'slash') and os.sep != '/'
138 return self._ui.configbool('ui', 'slash') and os.sep != '/'
127
139
128 @propertycache
140 @propertycache
129 def _checklink(self):
141 def _checklink(self):
130 return util.checklink(self._root)
142 return util.checklink(self._root)
131
143
132 @propertycache
144 @propertycache
133 def _checkexec(self):
145 def _checkexec(self):
134 return util.checkexec(self._root)
146 return util.checkexec(self._root)
135
147
136 @propertycache
148 @propertycache
137 def _checkcase(self):
149 def _checkcase(self):
138 return not util.checkcase(self._join('.hg'))
150 return not util.checkcase(self._join('.hg'))
139
151
140 def _join(self, f):
152 def _join(self, f):
141 # much faster than os.path.join()
153 # much faster than os.path.join()
142 # it's safe because f is always a relative path
154 # it's safe because f is always a relative path
143 return self._rootdir + f
155 return self._rootdir + f
144
156
145 def flagfunc(self, buildfallback):
157 def flagfunc(self, buildfallback):
146 if self._checklink and self._checkexec:
158 if self._checklink and self._checkexec:
147 def f(x):
159 def f(x):
148 p = self._join(x)
160 p = self._join(x)
149 if os.path.islink(p):
161 if os.path.islink(p):
150 return 'l'
162 return 'l'
151 if util.isexec(p):
163 if util.isexec(p):
152 return 'x'
164 return 'x'
153 return ''
165 return ''
154 return f
166 return f
155
167
156 fallback = buildfallback()
168 fallback = buildfallback()
157 if self._checklink:
169 if self._checklink:
158 def f(x):
170 def f(x):
159 if os.path.islink(self._join(x)):
171 if os.path.islink(self._join(x)):
160 return 'l'
172 return 'l'
161 if 'x' in fallback(x):
173 if 'x' in fallback(x):
162 return 'x'
174 return 'x'
163 return ''
175 return ''
164 return f
176 return f
165 if self._checkexec:
177 if self._checkexec:
166 def f(x):
178 def f(x):
167 if 'l' in fallback(x):
179 if 'l' in fallback(x):
168 return 'l'
180 return 'l'
169 if util.isexec(self._join(x)):
181 if util.isexec(self._join(x)):
170 return 'x'
182 return 'x'
171 return ''
183 return ''
172 return f
184 return f
173 else:
185 else:
174 return fallback
186 return fallback
175
187
176 def getcwd(self):
188 def getcwd(self):
177 cwd = os.getcwd()
189 cwd = os.getcwd()
178 if cwd == self._root:
190 if cwd == self._root:
179 return ''
191 return ''
180 # self._root ends with a path separator if self._root is '/' or 'C:\'
192 # self._root ends with a path separator if self._root is '/' or 'C:\'
181 rootsep = self._root
193 rootsep = self._root
182 if not util.endswithsep(rootsep):
194 if not util.endswithsep(rootsep):
183 rootsep += os.sep
195 rootsep += os.sep
184 if cwd.startswith(rootsep):
196 if cwd.startswith(rootsep):
185 return cwd[len(rootsep):]
197 return cwd[len(rootsep):]
186 else:
198 else:
187 # we're outside the repo. return an absolute path.
199 # we're outside the repo. return an absolute path.
188 return cwd
200 return cwd
189
201
190 def pathto(self, f, cwd=None):
202 def pathto(self, f, cwd=None):
191 if cwd is None:
203 if cwd is None:
192 cwd = self.getcwd()
204 cwd = self.getcwd()
193 path = util.pathto(self._root, cwd, f)
205 path = util.pathto(self._root, cwd, f)
194 if self._slash:
206 if self._slash:
195 return util.normpath(path)
207 return util.normpath(path)
196 return path
208 return path
197
209
198 def __getitem__(self, key):
210 def __getitem__(self, key):
199 '''Return the current state of key (a filename) in the dirstate.
211 '''Return the current state of key (a filename) in the dirstate.
200
212
201 States are:
213 States are:
202 n normal
214 n normal
203 m needs merging
215 m needs merging
204 r marked for removal
216 r marked for removal
205 a marked for addition
217 a marked for addition
206 ? not tracked
218 ? not tracked
207 '''
219 '''
208 return self._map.get(key, ("?",))[0]
220 return self._map.get(key, ("?",))[0]
209
221
210 def __contains__(self, key):
222 def __contains__(self, key):
211 return key in self._map
223 return key in self._map
212
224
213 def __iter__(self):
225 def __iter__(self):
214 for x in sorted(self._map):
226 for x in sorted(self._map):
215 yield x
227 yield x
216
228
217 def parents(self):
229 def parents(self):
218 return [self._validate(p) for p in self._pl]
230 return [self._validate(p) for p in self._pl]
219
231
220 def p1(self):
232 def p1(self):
221 return self._validate(self._pl[0])
233 return self._validate(self._pl[0])
222
234
223 def p2(self):
235 def p2(self):
224 return self._validate(self._pl[1])
236 return self._validate(self._pl[1])
225
237
226 def branch(self):
238 def branch(self):
227 return encoding.tolocal(self._branch)
239 return encoding.tolocal(self._branch)
228
240
229 def setparents(self, p1, p2=nullid):
241 def setparents(self, p1, p2=nullid):
230 self._dirty = self._dirtypl = True
242 self._dirty = self._dirtypl = True
231 self._pl = p1, p2
243 self._pl = p1, p2
232
244
233 def setbranch(self, branch):
245 def setbranch(self, branch):
234 if branch in ['tip', '.', 'null']:
246 if branch in ['tip', '.', 'null']:
235 raise util.Abort(_('the name \'%s\' is reserved') % branch)
247 raise util.Abort(_('the name \'%s\' is reserved') % branch)
236 self._branch = encoding.fromlocal(branch)
248 self._branch = encoding.fromlocal(branch)
237 self._opener.write("branch", self._branch + '\n')
249 self._opener.write("branch", self._branch + '\n')
238
250
239 def _read(self):
251 def _read(self):
240 self._map = {}
252 self._map = {}
241 self._copymap = {}
253 self._copymap = {}
242 try:
254 try:
243 st = self._opener.read("dirstate")
255 st = self._opener.read("dirstate")
244 except IOError, err:
256 except IOError, err:
245 if err.errno != errno.ENOENT:
257 if err.errno != errno.ENOENT:
246 raise
258 raise
247 return
259 return
248 if not st:
260 if not st:
249 return
261 return
250
262
251 p = parsers.parse_dirstate(self._map, self._copymap, st)
263 p = parsers.parse_dirstate(self._map, self._copymap, st)
252 if not self._dirtypl:
264 if not self._dirtypl:
253 self._pl = p
265 self._pl = p
254
266
255 def invalidate(self):
267 def invalidate(self):
256 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
268 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
257 "_ignore"):
269 "_ignore"):
258 if a in self.__dict__:
270 if a in self.__dict__:
259 delattr(self, a)
271 delattr(self, a)
260 self._lastnormaltime = 0
272 self._lastnormaltime = 0
261 self._dirty = False
273 self._dirty = False
262
274
263 def copy(self, source, dest):
275 def copy(self, source, dest):
264 """Mark dest as a copy of source. Unmark dest if source is None."""
276 """Mark dest as a copy of source. Unmark dest if source is None."""
265 if source == dest:
277 if source == dest:
266 return
278 return
267 self._dirty = True
279 self._dirty = True
268 if source is not None:
280 if source is not None:
269 self._copymap[dest] = source
281 self._copymap[dest] = source
270 elif dest in self._copymap:
282 elif dest in self._copymap:
271 del self._copymap[dest]
283 del self._copymap[dest]
272
284
273 def copied(self, file):
285 def copied(self, file):
274 return self._copymap.get(file, None)
286 return self._copymap.get(file, None)
275
287
276 def copies(self):
288 def copies(self):
277 return self._copymap
289 return self._copymap
278
290
279 def _droppath(self, f):
291 def _droppath(self, f):
280 if self[f] not in "?r" and "_dirs" in self.__dict__:
292 if self[f] not in "?r" and "_dirs" in self.__dict__:
281 _decdirs(self._dirs, f)
293 _decdirs(self._dirs, f)
282
294
283 def _addpath(self, f, check=False):
295 def _addpath(self, f, check=False):
284 oldstate = self[f]
296 oldstate = self[f]
285 if check or oldstate == "r":
297 if check or oldstate == "r":
286 scmutil.checkfilename(f)
298 scmutil.checkfilename(f)
287 if f in self._dirs:
299 if f in self._dirs:
288 raise util.Abort(_('directory %r already in dirstate') % f)
300 raise util.Abort(_('directory %r already in dirstate') % f)
289 # shadows
301 # shadows
290 for d in _finddirs(f):
302 for d in _finddirs(f):
291 if d in self._dirs:
303 if d in self._dirs:
292 break
304 break
293 if d in self._map and self[d] != 'r':
305 if d in self._map and self[d] != 'r':
294 raise util.Abort(
306 raise util.Abort(
295 _('file %r in dirstate clashes with %r') % (d, f))
307 _('file %r in dirstate clashes with %r') % (d, f))
296 if oldstate in "?r" and "_dirs" in self.__dict__:
308 if oldstate in "?r" and "_dirs" in self.__dict__:
297 _incdirs(self._dirs, f)
309 _incdirs(self._dirs, f)
298
310
299 def normal(self, f):
311 def normal(self, f):
300 '''Mark a file normal and clean.'''
312 '''Mark a file normal and clean.'''
301 self._dirty = True
313 self._dirty = True
302 self._addpath(f)
314 self._addpath(f)
303 s = os.lstat(self._join(f))
315 s = os.lstat(self._join(f))
304 mtime = int(s.st_mtime)
316 mtime = int(s.st_mtime)
305 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
317 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
306 if f in self._copymap:
318 if f in self._copymap:
307 del self._copymap[f]
319 del self._copymap[f]
308 if mtime > self._lastnormaltime:
320 if mtime > self._lastnormaltime:
309 # Remember the most recent modification timeslot for status(),
321 # Remember the most recent modification timeslot for status(),
310 # to make sure we won't miss future size-preserving file content
322 # to make sure we won't miss future size-preserving file content
311 # modifications that happen within the same timeslot.
323 # modifications that happen within the same timeslot.
312 self._lastnormaltime = mtime
324 self._lastnormaltime = mtime
313
325
314 def normallookup(self, f):
326 def normallookup(self, f):
315 '''Mark a file normal, but possibly dirty.'''
327 '''Mark a file normal, but possibly dirty.'''
316 if self._pl[1] != nullid and f in self._map:
328 if self._pl[1] != nullid and f in self._map:
317 # if there is a merge going on and the file was either
329 # if there is a merge going on and the file was either
318 # in state 'm' (-1) or coming from other parent (-2) before
330 # in state 'm' (-1) or coming from other parent (-2) before
319 # being removed, restore that state.
331 # being removed, restore that state.
320 entry = self._map[f]
332 entry = self._map[f]
321 if entry[0] == 'r' and entry[2] in (-1, -2):
333 if entry[0] == 'r' and entry[2] in (-1, -2):
322 source = self._copymap.get(f)
334 source = self._copymap.get(f)
323 if entry[2] == -1:
335 if entry[2] == -1:
324 self.merge(f)
336 self.merge(f)
325 elif entry[2] == -2:
337 elif entry[2] == -2:
326 self.otherparent(f)
338 self.otherparent(f)
327 if source:
339 if source:
328 self.copy(source, f)
340 self.copy(source, f)
329 return
341 return
330 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
342 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
331 return
343 return
332 self._dirty = True
344 self._dirty = True
333 self._addpath(f)
345 self._addpath(f)
334 self._map[f] = ('n', 0, -1, -1)
346 self._map[f] = ('n', 0, -1, -1)
335 if f in self._copymap:
347 if f in self._copymap:
336 del self._copymap[f]
348 del self._copymap[f]
337
349
338 def otherparent(self, f):
350 def otherparent(self, f):
339 '''Mark as coming from the other parent, always dirty.'''
351 '''Mark as coming from the other parent, always dirty.'''
340 if self._pl[1] == nullid:
352 if self._pl[1] == nullid:
341 raise util.Abort(_("setting %r to other parent "
353 raise util.Abort(_("setting %r to other parent "
342 "only allowed in merges") % f)
354 "only allowed in merges") % f)
343 self._dirty = True
355 self._dirty = True
344 self._addpath(f)
356 self._addpath(f)
345 self._map[f] = ('n', 0, -2, -1)
357 self._map[f] = ('n', 0, -2, -1)
346 if f in self._copymap:
358 if f in self._copymap:
347 del self._copymap[f]
359 del self._copymap[f]
348
360
349 def add(self, f):
361 def add(self, f):
350 '''Mark a file added.'''
362 '''Mark a file added.'''
351 self._dirty = True
363 self._dirty = True
352 self._addpath(f, True)
364 self._addpath(f, True)
353 self._map[f] = ('a', 0, -1, -1)
365 self._map[f] = ('a', 0, -1, -1)
354 if f in self._copymap:
366 if f in self._copymap:
355 del self._copymap[f]
367 del self._copymap[f]
356
368
357 def remove(self, f):
369 def remove(self, f):
358 '''Mark a file removed.'''
370 '''Mark a file removed.'''
359 self._dirty = True
371 self._dirty = True
360 self._droppath(f)
372 self._droppath(f)
361 size = 0
373 size = 0
362 if self._pl[1] != nullid and f in self._map:
374 if self._pl[1] != nullid and f in self._map:
363 # backup the previous state
375 # backup the previous state
364 entry = self._map[f]
376 entry = self._map[f]
365 if entry[0] == 'm': # merge
377 if entry[0] == 'm': # merge
366 size = -1
378 size = -1
367 elif entry[0] == 'n' and entry[2] == -2: # other parent
379 elif entry[0] == 'n' and entry[2] == -2: # other parent
368 size = -2
380 size = -2
369 self._map[f] = ('r', 0, size, 0)
381 self._map[f] = ('r', 0, size, 0)
370 if size == 0 and f in self._copymap:
382 if size == 0 and f in self._copymap:
371 del self._copymap[f]
383 del self._copymap[f]
372
384
373 def merge(self, f):
385 def merge(self, f):
374 '''Mark a file merged.'''
386 '''Mark a file merged.'''
375 self._dirty = True
387 self._dirty = True
376 s = os.lstat(self._join(f))
388 s = os.lstat(self._join(f))
377 self._addpath(f)
389 self._addpath(f)
378 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
390 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
379 if f in self._copymap:
391 if f in self._copymap:
380 del self._copymap[f]
392 del self._copymap[f]
381
393
382 def drop(self, f):
394 def drop(self, f):
383 '''Drop a file from the dirstate'''
395 '''Drop a file from the dirstate'''
384 if f in self._map:
396 if f in self._map:
385 self._dirty = True
397 self._dirty = True
386 self._droppath(f)
398 self._droppath(f)
387 del self._map[f]
399 del self._map[f]
388
400
389 def _normalize(self, path, isknown):
401 def _normalize(self, path, isknown):
390 normed = util.normcase(path)
402 normed = util.normcase(path)
391 folded = self._foldmap.get(normed, None)
403 folded = self._foldmap.get(normed, None)
392 if folded is None:
404 if folded is None:
393 if isknown or not os.path.lexists(os.path.join(self._root, path)):
405 if isknown or not os.path.lexists(os.path.join(self._root, path)):
394 folded = path
406 folded = path
395 else:
407 else:
396 folded = self._foldmap.setdefault(normed,
408 folded = self._foldmap.setdefault(normed,
397 util.fspath(normed, self._normroot))
409 util.fspath(normed, self._normroot))
398 return folded
410 return folded
399
411
400 def normalize(self, path, isknown=False):
412 def normalize(self, path, isknown=False):
401 '''
413 '''
402 normalize the case of a pathname when on a casefolding filesystem
414 normalize the case of a pathname when on a casefolding filesystem
403
415
404 isknown specifies whether the filename came from walking the
416 isknown specifies whether the filename came from walking the
405 disk, to avoid extra filesystem access
417 disk, to avoid extra filesystem access
406
418
407 The normalized case is determined based on the following precedence:
419 The normalized case is determined based on the following precedence:
408
420
409 - version of name already stored in the dirstate
421 - version of name already stored in the dirstate
410 - version of name stored on disk
422 - version of name stored on disk
411 - version provided via command arguments
423 - version provided via command arguments
412 '''
424 '''
413
425
414 if self._checkcase:
426 if self._checkcase:
415 return self._normalize(path, isknown)
427 return self._normalize(path, isknown)
416 return path
428 return path
417
429
418 def clear(self):
430 def clear(self):
419 self._map = {}
431 self._map = {}
420 if "_dirs" in self.__dict__:
432 if "_dirs" in self.__dict__:
421 delattr(self, "_dirs")
433 delattr(self, "_dirs")
422 self._copymap = {}
434 self._copymap = {}
423 self._pl = [nullid, nullid]
435 self._pl = [nullid, nullid]
424 self._lastnormaltime = 0
436 self._lastnormaltime = 0
425 self._dirty = True
437 self._dirty = True
426
438
427 def rebuild(self, parent, files):
439 def rebuild(self, parent, files):
428 self.clear()
440 self.clear()
429 for f in files:
441 for f in files:
430 if 'x' in files.flags(f):
442 if 'x' in files.flags(f):
431 self._map[f] = ('n', 0777, -1, 0)
443 self._map[f] = ('n', 0777, -1, 0)
432 else:
444 else:
433 self._map[f] = ('n', 0666, -1, 0)
445 self._map[f] = ('n', 0666, -1, 0)
434 self._pl = (parent, nullid)
446 self._pl = (parent, nullid)
435 self._dirty = True
447 self._dirty = True
436
448
437 def write(self):
449 def write(self):
438 if not self._dirty:
450 if not self._dirty:
439 return
451 return
440 st = self._opener("dirstate", "w", atomictemp=True)
452 st = self._opener("dirstate", "w", atomictemp=True)
441
453
442 # use the modification time of the newly created temporary file as the
454 # use the modification time of the newly created temporary file as the
443 # filesystem's notion of 'now'
455 # filesystem's notion of 'now'
444 now = int(util.fstat(st).st_mtime)
456 now = int(util.fstat(st).st_mtime)
445
457
446 cs = cStringIO.StringIO()
458 cs = cStringIO.StringIO()
447 copymap = self._copymap
459 copymap = self._copymap
448 pack = struct.pack
460 pack = struct.pack
449 write = cs.write
461 write = cs.write
450 write("".join(self._pl))
462 write("".join(self._pl))
451 for f, e in self._map.iteritems():
463 for f, e in self._map.iteritems():
452 if e[0] == 'n' and e[3] == now:
464 if e[0] == 'n' and e[3] == now:
453 # The file was last modified "simultaneously" with the current
465 # The file was last modified "simultaneously" with the current
454 # write to dirstate (i.e. within the same second for file-
466 # write to dirstate (i.e. within the same second for file-
455 # systems with a granularity of 1 sec). This commonly happens
467 # systems with a granularity of 1 sec). This commonly happens
456 # for at least a couple of files on 'update'.
468 # for at least a couple of files on 'update'.
457 # The user could change the file without changing its size
469 # The user could change the file without changing its size
458 # within the same second. Invalidate the file's stat data in
470 # within the same second. Invalidate the file's stat data in
459 # dirstate, forcing future 'status' calls to compare the
471 # dirstate, forcing future 'status' calls to compare the
460 # contents of the file. This prevents mistakenly treating such
472 # contents of the file. This prevents mistakenly treating such
461 # files as clean.
473 # files as clean.
462 e = (e[0], 0, -1, -1) # mark entry as 'unset'
474 e = (e[0], 0, -1, -1) # mark entry as 'unset'
463 self._map[f] = e
475 self._map[f] = e
464
476
465 if f in copymap:
477 if f in copymap:
466 f = "%s\0%s" % (f, copymap[f])
478 f = "%s\0%s" % (f, copymap[f])
467 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
479 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
468 write(e)
480 write(e)
469 write(f)
481 write(f)
470 st.write(cs.getvalue())
482 st.write(cs.getvalue())
471 st.close()
483 st.close()
472 self._lastnormaltime = 0
484 self._lastnormaltime = 0
473 self._dirty = self._dirtypl = False
485 self._dirty = self._dirtypl = False
474
486
475 def _dirignore(self, f):
487 def _dirignore(self, f):
476 if f == '.':
488 if f == '.':
477 return False
489 return False
478 if self._ignore(f):
490 if self._ignore(f):
479 return True
491 return True
480 for p in _finddirs(f):
492 for p in _finddirs(f):
481 if self._ignore(p):
493 if self._ignore(p):
482 return True
494 return True
483 return False
495 return False
484
496
485 def walk(self, match, subrepos, unknown, ignored):
497 def walk(self, match, subrepos, unknown, ignored):
486 '''
498 '''
487 Walk recursively through the directory tree, finding all files
499 Walk recursively through the directory tree, finding all files
488 matched by match.
500 matched by match.
489
501
490 Return a dict mapping filename to stat-like object (either
502 Return a dict mapping filename to stat-like object (either
491 mercurial.osutil.stat instance or return value of os.stat()).
503 mercurial.osutil.stat instance or return value of os.stat()).
492 '''
504 '''
493
505
494 def fwarn(f, msg):
506 def fwarn(f, msg):
495 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
507 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
496 return False
508 return False
497
509
498 def badtype(mode):
510 def badtype(mode):
499 kind = _('unknown')
511 kind = _('unknown')
500 if stat.S_ISCHR(mode):
512 if stat.S_ISCHR(mode):
501 kind = _('character device')
513 kind = _('character device')
502 elif stat.S_ISBLK(mode):
514 elif stat.S_ISBLK(mode):
503 kind = _('block device')
515 kind = _('block device')
504 elif stat.S_ISFIFO(mode):
516 elif stat.S_ISFIFO(mode):
505 kind = _('fifo')
517 kind = _('fifo')
506 elif stat.S_ISSOCK(mode):
518 elif stat.S_ISSOCK(mode):
507 kind = _('socket')
519 kind = _('socket')
508 elif stat.S_ISDIR(mode):
520 elif stat.S_ISDIR(mode):
509 kind = _('directory')
521 kind = _('directory')
510 return _('unsupported file type (type is %s)') % kind
522 return _('unsupported file type (type is %s)') % kind
511
523
512 ignore = self._ignore
524 ignore = self._ignore
513 dirignore = self._dirignore
525 dirignore = self._dirignore
514 if ignored:
526 if ignored:
515 ignore = util.never
527 ignore = util.never
516 dirignore = util.never
528 dirignore = util.never
517 elif not unknown:
529 elif not unknown:
518 # if unknown and ignored are False, skip step 2
530 # if unknown and ignored are False, skip step 2
519 ignore = util.always
531 ignore = util.always
520 dirignore = util.always
532 dirignore = util.always
521
533
522 matchfn = match.matchfn
534 matchfn = match.matchfn
523 badfn = match.bad
535 badfn = match.bad
524 dmap = self._map
536 dmap = self._map
525 normpath = util.normpath
537 normpath = util.normpath
526 listdir = osutil.listdir
538 listdir = osutil.listdir
527 lstat = os.lstat
539 lstat = os.lstat
528 getkind = stat.S_IFMT
540 getkind = stat.S_IFMT
529 dirkind = stat.S_IFDIR
541 dirkind = stat.S_IFDIR
530 regkind = stat.S_IFREG
542 regkind = stat.S_IFREG
531 lnkkind = stat.S_IFLNK
543 lnkkind = stat.S_IFLNK
532 join = self._join
544 join = self._join
533 work = []
545 work = []
534 wadd = work.append
546 wadd = work.append
535
547
536 exact = skipstep3 = False
548 exact = skipstep3 = False
537 if matchfn == match.exact: # match.exact
549 if matchfn == match.exact: # match.exact
538 exact = True
550 exact = True
539 dirignore = util.always # skip step 2
551 dirignore = util.always # skip step 2
540 elif match.files() and not match.anypats(): # match.match, no patterns
552 elif match.files() and not match.anypats(): # match.match, no patterns
541 skipstep3 = True
553 skipstep3 = True
542
554
543 if self._checkcase:
555 if self._checkcase:
544 normalize = self._normalize
556 normalize = self._normalize
545 skipstep3 = False
557 skipstep3 = False
546 else:
558 else:
547 normalize = lambda x, y: x
559 normalize = lambda x, y: x
548
560
549 files = sorted(match.files())
561 files = sorted(match.files())
550 subrepos.sort()
562 subrepos.sort()
551 i, j = 0, 0
563 i, j = 0, 0
552 while i < len(files) and j < len(subrepos):
564 while i < len(files) and j < len(subrepos):
553 subpath = subrepos[j] + "/"
565 subpath = subrepos[j] + "/"
554 if files[i] < subpath:
566 if files[i] < subpath:
555 i += 1
567 i += 1
556 continue
568 continue
557 while i < len(files) and files[i].startswith(subpath):
569 while i < len(files) and files[i].startswith(subpath):
558 del files[i]
570 del files[i]
559 j += 1
571 j += 1
560
572
561 if not files or '.' in files:
573 if not files or '.' in files:
562 files = ['']
574 files = ['']
563 results = dict.fromkeys(subrepos)
575 results = dict.fromkeys(subrepos)
564 results['.hg'] = None
576 results['.hg'] = None
565
577
566 # step 1: find all explicit files
578 # step 1: find all explicit files
567 for ff in files:
579 for ff in files:
568 nf = normalize(normpath(ff), False)
580 nf = normalize(normpath(ff), False)
569 if nf in results:
581 if nf in results:
570 continue
582 continue
571
583
572 try:
584 try:
573 st = lstat(join(nf))
585 st = lstat(join(nf))
574 kind = getkind(st.st_mode)
586 kind = getkind(st.st_mode)
575 if kind == dirkind:
587 if kind == dirkind:
576 skipstep3 = False
588 skipstep3 = False
577 if nf in dmap:
589 if nf in dmap:
578 #file deleted on disk but still in dirstate
590 #file deleted on disk but still in dirstate
579 results[nf] = None
591 results[nf] = None
580 match.dir(nf)
592 match.dir(nf)
581 if not dirignore(nf):
593 if not dirignore(nf):
582 wadd(nf)
594 wadd(nf)
583 elif kind == regkind or kind == lnkkind:
595 elif kind == regkind or kind == lnkkind:
584 results[nf] = st
596 results[nf] = st
585 else:
597 else:
586 badfn(ff, badtype(kind))
598 badfn(ff, badtype(kind))
587 if nf in dmap:
599 if nf in dmap:
588 results[nf] = None
600 results[nf] = None
589 except OSError, inst:
601 except OSError, inst:
590 if nf in dmap: # does it exactly match a file?
602 if nf in dmap: # does it exactly match a file?
591 results[nf] = None
603 results[nf] = None
592 else: # does it match a directory?
604 else: # does it match a directory?
593 prefix = nf + "/"
605 prefix = nf + "/"
594 for fn in dmap:
606 for fn in dmap:
595 if fn.startswith(prefix):
607 if fn.startswith(prefix):
596 match.dir(nf)
608 match.dir(nf)
597 skipstep3 = False
609 skipstep3 = False
598 break
610 break
599 else:
611 else:
600 badfn(ff, inst.strerror)
612 badfn(ff, inst.strerror)
601
613
602 # step 2: visit subdirectories
614 # step 2: visit subdirectories
603 while work:
615 while work:
604 nd = work.pop()
616 nd = work.pop()
605 skip = None
617 skip = None
606 if nd == '.':
618 if nd == '.':
607 nd = ''
619 nd = ''
608 else:
620 else:
609 skip = '.hg'
621 skip = '.hg'
610 try:
622 try:
611 entries = listdir(join(nd), stat=True, skip=skip)
623 entries = listdir(join(nd), stat=True, skip=skip)
612 except OSError, inst:
624 except OSError, inst:
613 if inst.errno == errno.EACCES:
625 if inst.errno == errno.EACCES:
614 fwarn(nd, inst.strerror)
626 fwarn(nd, inst.strerror)
615 continue
627 continue
616 raise
628 raise
617 for f, kind, st in entries:
629 for f, kind, st in entries:
618 nf = normalize(nd and (nd + "/" + f) or f, True)
630 nf = normalize(nd and (nd + "/" + f) or f, True)
619 if nf not in results:
631 if nf not in results:
620 if kind == dirkind:
632 if kind == dirkind:
621 if not ignore(nf):
633 if not ignore(nf):
622 match.dir(nf)
634 match.dir(nf)
623 wadd(nf)
635 wadd(nf)
624 if nf in dmap and matchfn(nf):
636 if nf in dmap and matchfn(nf):
625 results[nf] = None
637 results[nf] = None
626 elif kind == regkind or kind == lnkkind:
638 elif kind == regkind or kind == lnkkind:
627 if nf in dmap:
639 if nf in dmap:
628 if matchfn(nf):
640 if matchfn(nf):
629 results[nf] = st
641 results[nf] = st
630 elif matchfn(nf) and not ignore(nf):
642 elif matchfn(nf) and not ignore(nf):
631 results[nf] = st
643 results[nf] = st
632 elif nf in dmap and matchfn(nf):
644 elif nf in dmap and matchfn(nf):
633 results[nf] = None
645 results[nf] = None
634
646
635 # step 3: report unseen items in the dmap hash
647 # step 3: report unseen items in the dmap hash
636 if not skipstep3 and not exact:
648 if not skipstep3 and not exact:
637 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
649 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
638 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
650 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
639 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
651 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
640 st = None
652 st = None
641 results[nf] = st
653 results[nf] = st
642 for s in subrepos:
654 for s in subrepos:
643 del results[s]
655 del results[s]
644 del results['.hg']
656 del results['.hg']
645 return results
657 return results
646
658
647 def status(self, match, subrepos, ignored, clean, unknown):
659 def status(self, match, subrepos, ignored, clean, unknown):
648 '''Determine the status of the working copy relative to the
660 '''Determine the status of the working copy relative to the
649 dirstate and return a tuple of lists (unsure, modified, added,
661 dirstate and return a tuple of lists (unsure, modified, added,
650 removed, deleted, unknown, ignored, clean), where:
662 removed, deleted, unknown, ignored, clean), where:
651
663
652 unsure:
664 unsure:
653 files that might have been modified since the dirstate was
665 files that might have been modified since the dirstate was
654 written, but need to be read to be sure (size is the same
666 written, but need to be read to be sure (size is the same
655 but mtime differs)
667 but mtime differs)
656 modified:
668 modified:
657 files that have definitely been modified since the dirstate
669 files that have definitely been modified since the dirstate
658 was written (different size or mode)
670 was written (different size or mode)
659 added:
671 added:
660 files that have been explicitly added with hg add
672 files that have been explicitly added with hg add
661 removed:
673 removed:
662 files that have been explicitly removed with hg remove
674 files that have been explicitly removed with hg remove
663 deleted:
675 deleted:
664 files that have been deleted through other means ("missing")
676 files that have been deleted through other means ("missing")
665 unknown:
677 unknown:
666 files not in the dirstate that are not ignored
678 files not in the dirstate that are not ignored
667 ignored:
679 ignored:
668 files not in the dirstate that are ignored
680 files not in the dirstate that are ignored
669 (by _dirignore())
681 (by _dirignore())
670 clean:
682 clean:
671 files that have definitely not been modified since the
683 files that have definitely not been modified since the
672 dirstate was written
684 dirstate was written
673 '''
685 '''
674 listignored, listclean, listunknown = ignored, clean, unknown
686 listignored, listclean, listunknown = ignored, clean, unknown
675 lookup, modified, added, unknown, ignored = [], [], [], [], []
687 lookup, modified, added, unknown, ignored = [], [], [], [], []
676 removed, deleted, clean = [], [], []
688 removed, deleted, clean = [], [], []
677
689
678 dmap = self._map
690 dmap = self._map
679 ladd = lookup.append # aka "unsure"
691 ladd = lookup.append # aka "unsure"
680 madd = modified.append
692 madd = modified.append
681 aadd = added.append
693 aadd = added.append
682 uadd = unknown.append
694 uadd = unknown.append
683 iadd = ignored.append
695 iadd = ignored.append
684 radd = removed.append
696 radd = removed.append
685 dadd = deleted.append
697 dadd = deleted.append
686 cadd = clean.append
698 cadd = clean.append
687
699
688 lnkkind = stat.S_IFLNK
700 lnkkind = stat.S_IFLNK
689
701
690 for fn, st in self.walk(match, subrepos, listunknown,
702 for fn, st in self.walk(match, subrepos, listunknown,
691 listignored).iteritems():
703 listignored).iteritems():
692 if fn not in dmap:
704 if fn not in dmap:
693 if (listignored or match.exact(fn)) and self._dirignore(fn):
705 if (listignored or match.exact(fn)) and self._dirignore(fn):
694 if listignored:
706 if listignored:
695 iadd(fn)
707 iadd(fn)
696 elif listunknown:
708 elif listunknown:
697 uadd(fn)
709 uadd(fn)
698 continue
710 continue
699
711
700 state, mode, size, time = dmap[fn]
712 state, mode, size, time = dmap[fn]
701
713
702 if not st and state in "nma":
714 if not st and state in "nma":
703 dadd(fn)
715 dadd(fn)
704 elif state == 'n':
716 elif state == 'n':
705 # The "mode & lnkkind != lnkkind or self._checklink"
717 # The "mode & lnkkind != lnkkind or self._checklink"
706 # lines are an expansion of "islink => checklink"
718 # lines are an expansion of "islink => checklink"
707 # where islink means "is this a link?" and checklink
719 # where islink means "is this a link?" and checklink
708 # means "can we check links?".
720 # means "can we check links?".
709 mtime = int(st.st_mtime)
721 mtime = int(st.st_mtime)
710 if (size >= 0 and
722 if (size >= 0 and
711 (size != st.st_size
723 (size != st.st_size
712 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
724 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
713 and (mode & lnkkind != lnkkind or self._checklink)
725 and (mode & lnkkind != lnkkind or self._checklink)
714 or size == -2 # other parent
726 or size == -2 # other parent
715 or fn in self._copymap):
727 or fn in self._copymap):
716 madd(fn)
728 madd(fn)
717 elif (mtime != time
729 elif (mtime != time
718 and (mode & lnkkind != lnkkind or self._checklink)):
730 and (mode & lnkkind != lnkkind or self._checklink)):
719 ladd(fn)
731 ladd(fn)
720 elif mtime == self._lastnormaltime:
732 elif mtime == self._lastnormaltime:
721 # fn may have been changed in the same timeslot without
733 # fn may have been changed in the same timeslot without
722 # changing its size. This can happen if we quickly do
734 # changing its size. This can happen if we quickly do
723 # multiple commits in a single transaction.
735 # multiple commits in a single transaction.
724 # Force lookup, so we don't miss such a racy file change.
736 # Force lookup, so we don't miss such a racy file change.
725 ladd(fn)
737 ladd(fn)
726 elif listclean:
738 elif listclean:
727 cadd(fn)
739 cadd(fn)
728 elif state == 'm':
740 elif state == 'm':
729 madd(fn)
741 madd(fn)
730 elif state == 'a':
742 elif state == 'a':
731 aadd(fn)
743 aadd(fn)
732 elif state == 'r':
744 elif state == 'r':
733 radd(fn)
745 radd(fn)
734
746
735 return (lookup, modified, added, removed, deleted, unknown, ignored,
747 return (lookup, modified, added, removed, deleted, unknown, ignored,
736 clean)
748 clean)
@@ -1,2336 +1,2344 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
23 """filecache for files in the store"""
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
26
22 class localrepository(repo.repository):
27 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
29 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
32 'dotencode'))
28
33
29 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
35 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
38 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
42 self.baseui = baseui
38 self.ui = baseui.copy()
43 self.ui = baseui.copy()
39 self._dirtyphases = False
44 self._dirtyphases = False
40 # A list of callback to shape the phase if no data were found.
45 # A list of callback to shape the phase if no data were found.
41 # Callback are in the form: func(repo, roots) --> processed root.
46 # Callback are in the form: func(repo, roots) --> processed root.
42 # This list it to be filled by extension during repo setup
47 # This list it to be filled by extension during repo setup
43 self._phasedefaults = []
48 self._phasedefaults = []
44
49
45 try:
50 try:
46 self.ui.readconfig(self.join("hgrc"), self.root)
51 self.ui.readconfig(self.join("hgrc"), self.root)
47 extensions.loadall(self.ui)
52 extensions.loadall(self.ui)
48 except IOError:
53 except IOError:
49 pass
54 pass
50
55
51 if not os.path.isdir(self.path):
56 if not os.path.isdir(self.path):
52 if create:
57 if create:
53 if not os.path.exists(path):
58 if not os.path.exists(path):
54 util.makedirs(path)
59 util.makedirs(path)
55 util.makedir(self.path, notindexed=True)
60 util.makedir(self.path, notindexed=True)
56 requirements = ["revlogv1"]
61 requirements = ["revlogv1"]
57 if self.ui.configbool('format', 'usestore', True):
62 if self.ui.configbool('format', 'usestore', True):
58 os.mkdir(os.path.join(self.path, "store"))
63 os.mkdir(os.path.join(self.path, "store"))
59 requirements.append("store")
64 requirements.append("store")
60 if self.ui.configbool('format', 'usefncache', True):
65 if self.ui.configbool('format', 'usefncache', True):
61 requirements.append("fncache")
66 requirements.append("fncache")
62 if self.ui.configbool('format', 'dotencode', True):
67 if self.ui.configbool('format', 'dotencode', True):
63 requirements.append('dotencode')
68 requirements.append('dotencode')
64 # create an invalid changelog
69 # create an invalid changelog
65 self.opener.append(
70 self.opener.append(
66 "00changelog.i",
71 "00changelog.i",
67 '\0\0\0\2' # represents revlogv2
72 '\0\0\0\2' # represents revlogv2
68 ' dummy changelog to prevent using the old repo layout'
73 ' dummy changelog to prevent using the old repo layout'
69 )
74 )
70 if self.ui.configbool('format', 'generaldelta', False):
75 if self.ui.configbool('format', 'generaldelta', False):
71 requirements.append("generaldelta")
76 requirements.append("generaldelta")
72 requirements = set(requirements)
77 requirements = set(requirements)
73 else:
78 else:
74 raise error.RepoError(_("repository %s not found") % path)
79 raise error.RepoError(_("repository %s not found") % path)
75 elif create:
80 elif create:
76 raise error.RepoError(_("repository %s already exists") % path)
81 raise error.RepoError(_("repository %s already exists") % path)
77 else:
82 else:
78 try:
83 try:
79 requirements = scmutil.readrequires(self.opener, self.supported)
84 requirements = scmutil.readrequires(self.opener, self.supported)
80 except IOError, inst:
85 except IOError, inst:
81 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
82 raise
87 raise
83 requirements = set()
88 requirements = set()
84
89
85 self.sharedpath = self.path
90 self.sharedpath = self.path
86 try:
91 try:
87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 if not os.path.exists(s):
93 if not os.path.exists(s):
89 raise error.RepoError(
94 raise error.RepoError(
90 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 self.sharedpath = s
96 self.sharedpath = s
92 except IOError, inst:
97 except IOError, inst:
93 if inst.errno != errno.ENOENT:
98 if inst.errno != errno.ENOENT:
94 raise
99 raise
95
100
96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 self.spath = self.store.path
102 self.spath = self.store.path
98 self.sopener = self.store.opener
103 self.sopener = self.store.opener
99 self.sjoin = self.store.join
104 self.sjoin = self.store.join
100 self.opener.createmode = self.store.createmode
105 self.opener.createmode = self.store.createmode
101 self._applyrequirements(requirements)
106 self._applyrequirements(requirements)
102 if create:
107 if create:
103 self._writerequirements()
108 self._writerequirements()
104
109
105
110
106 self._branchcache = None
111 self._branchcache = None
107 self._branchcachetip = None
112 self._branchcachetip = None
108 self.filterpats = {}
113 self.filterpats = {}
109 self._datafilters = {}
114 self._datafilters = {}
110 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
111
116
112 # A cache for various files under .hg/ that tracks file changes,
117 # A cache for various files under .hg/ that tracks file changes,
113 # (used by the filecache decorator)
118 # (used by the filecache decorator)
114 #
119 #
115 # Maps a property name to its util.filecacheentry
120 # Maps a property name to its util.filecacheentry
116 self._filecache = {}
121 self._filecache = {}
117
122
118 def _applyrequirements(self, requirements):
123 def _applyrequirements(self, requirements):
119 self.requirements = requirements
124 self.requirements = requirements
120 openerreqs = set(('revlogv1', 'generaldelta'))
125 openerreqs = set(('revlogv1', 'generaldelta'))
121 self.sopener.options = dict((r, 1) for r in requirements
126 self.sopener.options = dict((r, 1) for r in requirements
122 if r in openerreqs)
127 if r in openerreqs)
123
128
124 def _writerequirements(self):
129 def _writerequirements(self):
125 reqfile = self.opener("requires", "w")
130 reqfile = self.opener("requires", "w")
126 for r in self.requirements:
131 for r in self.requirements:
127 reqfile.write("%s\n" % r)
132 reqfile.write("%s\n" % r)
128 reqfile.close()
133 reqfile.close()
129
134
130 def _checknested(self, path):
135 def _checknested(self, path):
131 """Determine if path is a legal nested repository."""
136 """Determine if path is a legal nested repository."""
132 if not path.startswith(self.root):
137 if not path.startswith(self.root):
133 return False
138 return False
134 subpath = path[len(self.root) + 1:]
139 subpath = path[len(self.root) + 1:]
135 normsubpath = util.pconvert(subpath)
140 normsubpath = util.pconvert(subpath)
136
141
137 # XXX: Checking against the current working copy is wrong in
142 # XXX: Checking against the current working copy is wrong in
138 # the sense that it can reject things like
143 # the sense that it can reject things like
139 #
144 #
140 # $ hg cat -r 10 sub/x.txt
145 # $ hg cat -r 10 sub/x.txt
141 #
146 #
142 # if sub/ is no longer a subrepository in the working copy
147 # if sub/ is no longer a subrepository in the working copy
143 # parent revision.
148 # parent revision.
144 #
149 #
145 # However, it can of course also allow things that would have
150 # However, it can of course also allow things that would have
146 # been rejected before, such as the above cat command if sub/
151 # been rejected before, such as the above cat command if sub/
147 # is a subrepository now, but was a normal directory before.
152 # is a subrepository now, but was a normal directory before.
148 # The old path auditor would have rejected by mistake since it
153 # The old path auditor would have rejected by mistake since it
149 # panics when it sees sub/.hg/.
154 # panics when it sees sub/.hg/.
150 #
155 #
151 # All in all, checking against the working copy seems sensible
156 # All in all, checking against the working copy seems sensible
152 # since we want to prevent access to nested repositories on
157 # since we want to prevent access to nested repositories on
153 # the filesystem *now*.
158 # the filesystem *now*.
154 ctx = self[None]
159 ctx = self[None]
155 parts = util.splitpath(subpath)
160 parts = util.splitpath(subpath)
156 while parts:
161 while parts:
157 prefix = '/'.join(parts)
162 prefix = '/'.join(parts)
158 if prefix in ctx.substate:
163 if prefix in ctx.substate:
159 if prefix == normsubpath:
164 if prefix == normsubpath:
160 return True
165 return True
161 else:
166 else:
162 sub = ctx.sub(prefix)
167 sub = ctx.sub(prefix)
163 return sub.checknested(subpath[len(prefix) + 1:])
168 return sub.checknested(subpath[len(prefix) + 1:])
164 else:
169 else:
165 parts.pop()
170 parts.pop()
166 return False
171 return False
167
172
168 @filecache('bookmarks')
173 @filecache('bookmarks')
169 def _bookmarks(self):
174 def _bookmarks(self):
170 return bookmarks.read(self)
175 return bookmarks.read(self)
171
176
172 @filecache('bookmarks.current')
177 @filecache('bookmarks.current')
173 def _bookmarkcurrent(self):
178 def _bookmarkcurrent(self):
174 return bookmarks.readcurrent(self)
179 return bookmarks.readcurrent(self)
175
180
176 def _writebookmarks(self, marks):
181 def _writebookmarks(self, marks):
177 bookmarks.write(self)
182 bookmarks.write(self)
178
183
179 @filecache('phaseroots', True)
184 @storecache('phaseroots')
180 def _phaseroots(self):
185 def _phaseroots(self):
181 self._dirtyphases = False
186 self._dirtyphases = False
182 phaseroots = phases.readroots(self)
187 phaseroots = phases.readroots(self)
183 phases.filterunknown(self, phaseroots)
188 phases.filterunknown(self, phaseroots)
184 return phaseroots
189 return phaseroots
185
190
186 @propertycache
191 @propertycache
187 def _phaserev(self):
192 def _phaserev(self):
188 cache = [phases.public] * len(self)
193 cache = [phases.public] * len(self)
189 for phase in phases.trackedphases:
194 for phase in phases.trackedphases:
190 roots = map(self.changelog.rev, self._phaseroots[phase])
195 roots = map(self.changelog.rev, self._phaseroots[phase])
191 if roots:
196 if roots:
192 for rev in roots:
197 for rev in roots:
193 cache[rev] = phase
198 cache[rev] = phase
194 for rev in self.changelog.descendants(*roots):
199 for rev in self.changelog.descendants(*roots):
195 cache[rev] = phase
200 cache[rev] = phase
196 return cache
201 return cache
197
202
198 @filecache('00changelog.i', True)
203 @storecache('00changelog.i')
199 def changelog(self):
204 def changelog(self):
200 c = changelog.changelog(self.sopener)
205 c = changelog.changelog(self.sopener)
201 if 'HG_PENDING' in os.environ:
206 if 'HG_PENDING' in os.environ:
202 p = os.environ['HG_PENDING']
207 p = os.environ['HG_PENDING']
203 if p.startswith(self.root):
208 if p.startswith(self.root):
204 c.readpending('00changelog.i.a')
209 c.readpending('00changelog.i.a')
205 return c
210 return c
206
211
207 @filecache('00manifest.i', True)
212 @storecache('00manifest.i')
208 def manifest(self):
213 def manifest(self):
209 return manifest.manifest(self.sopener)
214 return manifest.manifest(self.sopener)
210
215
211 @filecache('dirstate')
216 @filecache('dirstate')
212 def dirstate(self):
217 def dirstate(self):
213 warned = [0]
218 warned = [0]
214 def validate(node):
219 def validate(node):
215 try:
220 try:
216 self.changelog.rev(node)
221 self.changelog.rev(node)
217 return node
222 return node
218 except error.LookupError:
223 except error.LookupError:
219 if not warned[0]:
224 if not warned[0]:
220 warned[0] = True
225 warned[0] = True
221 self.ui.warn(_("warning: ignoring unknown"
226 self.ui.warn(_("warning: ignoring unknown"
222 " working parent %s!\n") % short(node))
227 " working parent %s!\n") % short(node))
223 return nullid
228 return nullid
224
229
225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226
231
227 def __getitem__(self, changeid):
232 def __getitem__(self, changeid):
228 if changeid is None:
233 if changeid is None:
229 return context.workingctx(self)
234 return context.workingctx(self)
230 return context.changectx(self, changeid)
235 return context.changectx(self, changeid)
231
236
232 def __contains__(self, changeid):
237 def __contains__(self, changeid):
233 try:
238 try:
234 return bool(self.lookup(changeid))
239 return bool(self.lookup(changeid))
235 except error.RepoLookupError:
240 except error.RepoLookupError:
236 return False
241 return False
237
242
238 def __nonzero__(self):
243 def __nonzero__(self):
239 return True
244 return True
240
245
241 def __len__(self):
246 def __len__(self):
242 return len(self.changelog)
247 return len(self.changelog)
243
248
244 def __iter__(self):
249 def __iter__(self):
245 for i in xrange(len(self)):
250 for i in xrange(len(self)):
246 yield i
251 yield i
247
252
248 def revs(self, expr, *args):
253 def revs(self, expr, *args):
249 '''Return a list of revisions matching the given revset'''
254 '''Return a list of revisions matching the given revset'''
250 expr = revset.formatspec(expr, *args)
255 expr = revset.formatspec(expr, *args)
251 m = revset.match(None, expr)
256 m = revset.match(None, expr)
252 return [r for r in m(self, range(len(self)))]
257 return [r for r in m(self, range(len(self)))]
253
258
254 def set(self, expr, *args):
259 def set(self, expr, *args):
255 '''
260 '''
256 Yield a context for each matching revision, after doing arg
261 Yield a context for each matching revision, after doing arg
257 replacement via revset.formatspec
262 replacement via revset.formatspec
258 '''
263 '''
259 for r in self.revs(expr, *args):
264 for r in self.revs(expr, *args):
260 yield self[r]
265 yield self[r]
261
266
262 def url(self):
267 def url(self):
263 return 'file:' + self.root
268 return 'file:' + self.root
264
269
265 def hook(self, name, throw=False, **args):
270 def hook(self, name, throw=False, **args):
266 return hook.hook(self.ui, self, name, throw, **args)
271 return hook.hook(self.ui, self, name, throw, **args)
267
272
268 tag_disallowed = ':\r\n'
273 tag_disallowed = ':\r\n'
269
274
270 def _tag(self, names, node, message, local, user, date, extra={}):
275 def _tag(self, names, node, message, local, user, date, extra={}):
271 if isinstance(names, str):
276 if isinstance(names, str):
272 allchars = names
277 allchars = names
273 names = (names,)
278 names = (names,)
274 else:
279 else:
275 allchars = ''.join(names)
280 allchars = ''.join(names)
276 for c in self.tag_disallowed:
281 for c in self.tag_disallowed:
277 if c in allchars:
282 if c in allchars:
278 raise util.Abort(_('%r cannot be used in a tag name') % c)
283 raise util.Abort(_('%r cannot be used in a tag name') % c)
279
284
280 branches = self.branchmap()
285 branches = self.branchmap()
281 for name in names:
286 for name in names:
282 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 local=local)
288 local=local)
284 if name in branches:
289 if name in branches:
285 self.ui.warn(_("warning: tag %s conflicts with existing"
290 self.ui.warn(_("warning: tag %s conflicts with existing"
286 " branch name\n") % name)
291 " branch name\n") % name)
287
292
288 def writetags(fp, names, munge, prevtags):
293 def writetags(fp, names, munge, prevtags):
289 fp.seek(0, 2)
294 fp.seek(0, 2)
290 if prevtags and prevtags[-1] != '\n':
295 if prevtags and prevtags[-1] != '\n':
291 fp.write('\n')
296 fp.write('\n')
292 for name in names:
297 for name in names:
293 m = munge and munge(name) or name
298 m = munge and munge(name) or name
294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 old = self.tags().get(name, nullid)
300 old = self.tags().get(name, nullid)
296 fp.write('%s %s\n' % (hex(old), m))
301 fp.write('%s %s\n' % (hex(old), m))
297 fp.write('%s %s\n' % (hex(node), m))
302 fp.write('%s %s\n' % (hex(node), m))
298 fp.close()
303 fp.close()
299
304
300 prevtags = ''
305 prevtags = ''
301 if local:
306 if local:
302 try:
307 try:
303 fp = self.opener('localtags', 'r+')
308 fp = self.opener('localtags', 'r+')
304 except IOError:
309 except IOError:
305 fp = self.opener('localtags', 'a')
310 fp = self.opener('localtags', 'a')
306 else:
311 else:
307 prevtags = fp.read()
312 prevtags = fp.read()
308
313
309 # local tags are stored in the current charset
314 # local tags are stored in the current charset
310 writetags(fp, names, None, prevtags)
315 writetags(fp, names, None, prevtags)
311 for name in names:
316 for name in names:
312 self.hook('tag', node=hex(node), tag=name, local=local)
317 self.hook('tag', node=hex(node), tag=name, local=local)
313 return
318 return
314
319
315 try:
320 try:
316 fp = self.wfile('.hgtags', 'rb+')
321 fp = self.wfile('.hgtags', 'rb+')
317 except IOError, e:
322 except IOError, e:
318 if e.errno != errno.ENOENT:
323 if e.errno != errno.ENOENT:
319 raise
324 raise
320 fp = self.wfile('.hgtags', 'ab')
325 fp = self.wfile('.hgtags', 'ab')
321 else:
326 else:
322 prevtags = fp.read()
327 prevtags = fp.read()
323
328
324 # committed tags are stored in UTF-8
329 # committed tags are stored in UTF-8
325 writetags(fp, names, encoding.fromlocal, prevtags)
330 writetags(fp, names, encoding.fromlocal, prevtags)
326
331
327 fp.close()
332 fp.close()
328
333
329 self.invalidatecaches()
334 self.invalidatecaches()
330
335
331 if '.hgtags' not in self.dirstate:
336 if '.hgtags' not in self.dirstate:
332 self[None].add(['.hgtags'])
337 self[None].add(['.hgtags'])
333
338
334 m = matchmod.exact(self.root, '', ['.hgtags'])
339 m = matchmod.exact(self.root, '', ['.hgtags'])
335 tagnode = self.commit(message, user, date, extra=extra, match=m)
340 tagnode = self.commit(message, user, date, extra=extra, match=m)
336
341
337 for name in names:
342 for name in names:
338 self.hook('tag', node=hex(node), tag=name, local=local)
343 self.hook('tag', node=hex(node), tag=name, local=local)
339
344
340 return tagnode
345 return tagnode
341
346
342 def tag(self, names, node, message, local, user, date):
347 def tag(self, names, node, message, local, user, date):
343 '''tag a revision with one or more symbolic names.
348 '''tag a revision with one or more symbolic names.
344
349
345 names is a list of strings or, when adding a single tag, names may be a
350 names is a list of strings or, when adding a single tag, names may be a
346 string.
351 string.
347
352
348 if local is True, the tags are stored in a per-repository file.
353 if local is True, the tags are stored in a per-repository file.
349 otherwise, they are stored in the .hgtags file, and a new
354 otherwise, they are stored in the .hgtags file, and a new
350 changeset is committed with the change.
355 changeset is committed with the change.
351
356
352 keyword arguments:
357 keyword arguments:
353
358
354 local: whether to store tags in non-version-controlled file
359 local: whether to store tags in non-version-controlled file
355 (default False)
360 (default False)
356
361
357 message: commit message to use if committing
362 message: commit message to use if committing
358
363
359 user: name of user to use if committing
364 user: name of user to use if committing
360
365
361 date: date tuple to use if committing'''
366 date: date tuple to use if committing'''
362
367
363 if not local:
368 if not local:
364 for x in self.status()[:5]:
369 for x in self.status()[:5]:
365 if '.hgtags' in x:
370 if '.hgtags' in x:
366 raise util.Abort(_('working copy of .hgtags is changed '
371 raise util.Abort(_('working copy of .hgtags is changed '
367 '(please commit .hgtags manually)'))
372 '(please commit .hgtags manually)'))
368
373
369 self.tags() # instantiate the cache
374 self.tags() # instantiate the cache
370 self._tag(names, node, message, local, user, date)
375 self._tag(names, node, message, local, user, date)
371
376
372 @propertycache
377 @propertycache
373 def _tagscache(self):
378 def _tagscache(self):
374 '''Returns a tagscache object that contains various tags related caches.'''
379 '''Returns a tagscache object that contains various tags related caches.'''
375
380
376 # This simplifies its cache management by having one decorated
381 # This simplifies its cache management by having one decorated
377 # function (this one) and the rest simply fetch things from it.
382 # function (this one) and the rest simply fetch things from it.
378 class tagscache(object):
383 class tagscache(object):
379 def __init__(self):
384 def __init__(self):
380 # These two define the set of tags for this repository. tags
385 # These two define the set of tags for this repository. tags
381 # maps tag name to node; tagtypes maps tag name to 'global' or
386 # maps tag name to node; tagtypes maps tag name to 'global' or
382 # 'local'. (Global tags are defined by .hgtags across all
387 # 'local'. (Global tags are defined by .hgtags across all
383 # heads, and local tags are defined in .hg/localtags.)
388 # heads, and local tags are defined in .hg/localtags.)
384 # They constitute the in-memory cache of tags.
389 # They constitute the in-memory cache of tags.
385 self.tags = self.tagtypes = None
390 self.tags = self.tagtypes = None
386
391
387 self.nodetagscache = self.tagslist = None
392 self.nodetagscache = self.tagslist = None
388
393
389 cache = tagscache()
394 cache = tagscache()
390 cache.tags, cache.tagtypes = self._findtags()
395 cache.tags, cache.tagtypes = self._findtags()
391
396
392 return cache
397 return cache
393
398
394 def tags(self):
399 def tags(self):
395 '''return a mapping of tag to node'''
400 '''return a mapping of tag to node'''
396 return self._tagscache.tags
401 return self._tagscache.tags
397
402
398 def _findtags(self):
403 def _findtags(self):
399 '''Do the hard work of finding tags. Return a pair of dicts
404 '''Do the hard work of finding tags. Return a pair of dicts
400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 maps tag name to a string like \'global\' or \'local\'.
406 maps tag name to a string like \'global\' or \'local\'.
402 Subclasses or extensions are free to add their own tags, but
407 Subclasses or extensions are free to add their own tags, but
403 should be aware that the returned dicts will be retained for the
408 should be aware that the returned dicts will be retained for the
404 duration of the localrepo object.'''
409 duration of the localrepo object.'''
405
410
406 # XXX what tagtype should subclasses/extensions use? Currently
411 # XXX what tagtype should subclasses/extensions use? Currently
407 # mq and bookmarks add tags, but do not set the tagtype at all.
412 # mq and bookmarks add tags, but do not set the tagtype at all.
408 # Should each extension invent its own tag type? Should there
413 # Should each extension invent its own tag type? Should there
409 # be one tagtype for all such "virtual" tags? Or is the status
414 # be one tagtype for all such "virtual" tags? Or is the status
410 # quo fine?
415 # quo fine?
411
416
412 alltags = {} # map tag name to (node, hist)
417 alltags = {} # map tag name to (node, hist)
413 tagtypes = {}
418 tagtypes = {}
414
419
415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417
422
418 # Build the return dicts. Have to re-encode tag names because
423 # Build the return dicts. Have to re-encode tag names because
419 # the tags module always uses UTF-8 (in order not to lose info
424 # the tags module always uses UTF-8 (in order not to lose info
420 # writing to the cache), but the rest of Mercurial wants them in
425 # writing to the cache), but the rest of Mercurial wants them in
421 # local encoding.
426 # local encoding.
422 tags = {}
427 tags = {}
423 for (name, (node, hist)) in alltags.iteritems():
428 for (name, (node, hist)) in alltags.iteritems():
424 if node != nullid:
429 if node != nullid:
425 try:
430 try:
426 # ignore tags to unknown nodes
431 # ignore tags to unknown nodes
427 self.changelog.lookup(node)
432 self.changelog.lookup(node)
428 tags[encoding.tolocal(name)] = node
433 tags[encoding.tolocal(name)] = node
429 except error.LookupError:
434 except error.LookupError:
430 pass
435 pass
431 tags['tip'] = self.changelog.tip()
436 tags['tip'] = self.changelog.tip()
432 tagtypes = dict([(encoding.tolocal(name), value)
437 tagtypes = dict([(encoding.tolocal(name), value)
433 for (name, value) in tagtypes.iteritems()])
438 for (name, value) in tagtypes.iteritems()])
434 return (tags, tagtypes)
439 return (tags, tagtypes)
435
440
436 def tagtype(self, tagname):
441 def tagtype(self, tagname):
437 '''
442 '''
438 return the type of the given tag. result can be:
443 return the type of the given tag. result can be:
439
444
440 'local' : a local tag
445 'local' : a local tag
441 'global' : a global tag
446 'global' : a global tag
442 None : tag does not exist
447 None : tag does not exist
443 '''
448 '''
444
449
445 return self._tagscache.tagtypes.get(tagname)
450 return self._tagscache.tagtypes.get(tagname)
446
451
447 def tagslist(self):
452 def tagslist(self):
448 '''return a list of tags ordered by revision'''
453 '''return a list of tags ordered by revision'''
449 if not self._tagscache.tagslist:
454 if not self._tagscache.tagslist:
450 l = []
455 l = []
451 for t, n in self.tags().iteritems():
456 for t, n in self.tags().iteritems():
452 r = self.changelog.rev(n)
457 r = self.changelog.rev(n)
453 l.append((r, t, n))
458 l.append((r, t, n))
454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455
460
456 return self._tagscache.tagslist
461 return self._tagscache.tagslist
457
462
458 def nodetags(self, node):
463 def nodetags(self, node):
459 '''return the tags associated with a node'''
464 '''return the tags associated with a node'''
460 if not self._tagscache.nodetagscache:
465 if not self._tagscache.nodetagscache:
461 nodetagscache = {}
466 nodetagscache = {}
462 for t, n in self.tags().iteritems():
467 for t, n in self.tags().iteritems():
463 nodetagscache.setdefault(n, []).append(t)
468 nodetagscache.setdefault(n, []).append(t)
464 for tags in nodetagscache.itervalues():
469 for tags in nodetagscache.itervalues():
465 tags.sort()
470 tags.sort()
466 self._tagscache.nodetagscache = nodetagscache
471 self._tagscache.nodetagscache = nodetagscache
467 return self._tagscache.nodetagscache.get(node, [])
472 return self._tagscache.nodetagscache.get(node, [])
468
473
469 def nodebookmarks(self, node):
474 def nodebookmarks(self, node):
470 marks = []
475 marks = []
471 for bookmark, n in self._bookmarks.iteritems():
476 for bookmark, n in self._bookmarks.iteritems():
472 if n == node:
477 if n == node:
473 marks.append(bookmark)
478 marks.append(bookmark)
474 return sorted(marks)
479 return sorted(marks)
475
480
476 def _branchtags(self, partial, lrev):
481 def _branchtags(self, partial, lrev):
477 # TODO: rename this function?
482 # TODO: rename this function?
478 tiprev = len(self) - 1
483 tiprev = len(self) - 1
479 if lrev != tiprev:
484 if lrev != tiprev:
480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 self._updatebranchcache(partial, ctxgen)
486 self._updatebranchcache(partial, ctxgen)
482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483
488
484 return partial
489 return partial
485
490
486 def updatebranchcache(self):
491 def updatebranchcache(self):
487 tip = self.changelog.tip()
492 tip = self.changelog.tip()
488 if self._branchcache is not None and self._branchcachetip == tip:
493 if self._branchcache is not None and self._branchcachetip == tip:
489 return
494 return
490
495
491 oldtip = self._branchcachetip
496 oldtip = self._branchcachetip
492 self._branchcachetip = tip
497 self._branchcachetip = tip
493 if oldtip is None or oldtip not in self.changelog.nodemap:
498 if oldtip is None or oldtip not in self.changelog.nodemap:
494 partial, last, lrev = self._readbranchcache()
499 partial, last, lrev = self._readbranchcache()
495 else:
500 else:
496 lrev = self.changelog.rev(oldtip)
501 lrev = self.changelog.rev(oldtip)
497 partial = self._branchcache
502 partial = self._branchcache
498
503
499 self._branchtags(partial, lrev)
504 self._branchtags(partial, lrev)
500 # this private cache holds all heads (not just tips)
505 # this private cache holds all heads (not just tips)
501 self._branchcache = partial
506 self._branchcache = partial
502
507
503 def branchmap(self):
508 def branchmap(self):
504 '''returns a dictionary {branch: [branchheads]}'''
509 '''returns a dictionary {branch: [branchheads]}'''
505 self.updatebranchcache()
510 self.updatebranchcache()
506 return self._branchcache
511 return self._branchcache
507
512
508 def branchtags(self):
513 def branchtags(self):
509 '''return a dict where branch names map to the tipmost head of
514 '''return a dict where branch names map to the tipmost head of
510 the branch, open heads come before closed'''
515 the branch, open heads come before closed'''
511 bt = {}
516 bt = {}
512 for bn, heads in self.branchmap().iteritems():
517 for bn, heads in self.branchmap().iteritems():
513 tip = heads[-1]
518 tip = heads[-1]
514 for h in reversed(heads):
519 for h in reversed(heads):
515 if 'close' not in self.changelog.read(h)[5]:
520 if 'close' not in self.changelog.read(h)[5]:
516 tip = h
521 tip = h
517 break
522 break
518 bt[bn] = tip
523 bt[bn] = tip
519 return bt
524 return bt
520
525
521 def _readbranchcache(self):
526 def _readbranchcache(self):
522 partial = {}
527 partial = {}
523 try:
528 try:
524 f = self.opener("cache/branchheads")
529 f = self.opener("cache/branchheads")
525 lines = f.read().split('\n')
530 lines = f.read().split('\n')
526 f.close()
531 f.close()
527 except (IOError, OSError):
532 except (IOError, OSError):
528 return {}, nullid, nullrev
533 return {}, nullid, nullrev
529
534
530 try:
535 try:
531 last, lrev = lines.pop(0).split(" ", 1)
536 last, lrev = lines.pop(0).split(" ", 1)
532 last, lrev = bin(last), int(lrev)
537 last, lrev = bin(last), int(lrev)
533 if lrev >= len(self) or self[lrev].node() != last:
538 if lrev >= len(self) or self[lrev].node() != last:
534 # invalidate the cache
539 # invalidate the cache
535 raise ValueError('invalidating branch cache (tip differs)')
540 raise ValueError('invalidating branch cache (tip differs)')
536 for l in lines:
541 for l in lines:
537 if not l:
542 if not l:
538 continue
543 continue
539 node, label = l.split(" ", 1)
544 node, label = l.split(" ", 1)
540 label = encoding.tolocal(label.strip())
545 label = encoding.tolocal(label.strip())
541 partial.setdefault(label, []).append(bin(node))
546 partial.setdefault(label, []).append(bin(node))
542 except KeyboardInterrupt:
547 except KeyboardInterrupt:
543 raise
548 raise
544 except Exception, inst:
549 except Exception, inst:
545 if self.ui.debugflag:
550 if self.ui.debugflag:
546 self.ui.warn(str(inst), '\n')
551 self.ui.warn(str(inst), '\n')
547 partial, last, lrev = {}, nullid, nullrev
552 partial, last, lrev = {}, nullid, nullrev
548 return partial, last, lrev
553 return partial, last, lrev
549
554
550 def _writebranchcache(self, branches, tip, tiprev):
555 def _writebranchcache(self, branches, tip, tiprev):
551 try:
556 try:
552 f = self.opener("cache/branchheads", "w", atomictemp=True)
557 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 f.write("%s %s\n" % (hex(tip), tiprev))
558 f.write("%s %s\n" % (hex(tip), tiprev))
554 for label, nodes in branches.iteritems():
559 for label, nodes in branches.iteritems():
555 for node in nodes:
560 for node in nodes:
556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 f.close()
562 f.close()
558 except (IOError, OSError):
563 except (IOError, OSError):
559 pass
564 pass
560
565
561 def _updatebranchcache(self, partial, ctxgen):
566 def _updatebranchcache(self, partial, ctxgen):
562 # collect new branch entries
567 # collect new branch entries
563 newbranches = {}
568 newbranches = {}
564 for c in ctxgen:
569 for c in ctxgen:
565 newbranches.setdefault(c.branch(), []).append(c.node())
570 newbranches.setdefault(c.branch(), []).append(c.node())
566 # if older branchheads are reachable from new ones, they aren't
571 # if older branchheads are reachable from new ones, they aren't
567 # really branchheads. Note checking parents is insufficient:
572 # really branchheads. Note checking parents is insufficient:
568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 for branch, newnodes in newbranches.iteritems():
574 for branch, newnodes in newbranches.iteritems():
570 bheads = partial.setdefault(branch, [])
575 bheads = partial.setdefault(branch, [])
571 bheads.extend(newnodes)
576 bheads.extend(newnodes)
572 if len(bheads) <= 1:
577 if len(bheads) <= 1:
573 continue
578 continue
574 bheads = sorted(bheads, key=lambda x: self[x].rev())
579 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 # starting from tip means fewer passes over reachable
580 # starting from tip means fewer passes over reachable
576 while newnodes:
581 while newnodes:
577 latest = newnodes.pop()
582 latest = newnodes.pop()
578 if latest not in bheads:
583 if latest not in bheads:
579 continue
584 continue
580 minbhrev = self[bheads[0]].node()
585 minbhrev = self[bheads[0]].node()
581 reachable = self.changelog.reachable(latest, minbhrev)
586 reachable = self.changelog.reachable(latest, minbhrev)
582 reachable.remove(latest)
587 reachable.remove(latest)
583 if reachable:
588 if reachable:
584 bheads = [b for b in bheads if b not in reachable]
589 bheads = [b for b in bheads if b not in reachable]
585 partial[branch] = bheads
590 partial[branch] = bheads
586
591
587 def lookup(self, key):
592 def lookup(self, key):
588 if isinstance(key, int):
593 if isinstance(key, int):
589 return self.changelog.node(key)
594 return self.changelog.node(key)
590 elif key == '.':
595 elif key == '.':
591 return self.dirstate.p1()
596 return self.dirstate.p1()
592 elif key == 'null':
597 elif key == 'null':
593 return nullid
598 return nullid
594 elif key == 'tip':
599 elif key == 'tip':
595 return self.changelog.tip()
600 return self.changelog.tip()
596 n = self.changelog._match(key)
601 n = self.changelog._match(key)
597 if n:
602 if n:
598 return n
603 return n
599 if key in self._bookmarks:
604 if key in self._bookmarks:
600 return self._bookmarks[key]
605 return self._bookmarks[key]
601 if key in self.tags():
606 if key in self.tags():
602 return self.tags()[key]
607 return self.tags()[key]
603 if key in self.branchtags():
608 if key in self.branchtags():
604 return self.branchtags()[key]
609 return self.branchtags()[key]
605 n = self.changelog._partialmatch(key)
610 n = self.changelog._partialmatch(key)
606 if n:
611 if n:
607 return n
612 return n
608
613
609 # can't find key, check if it might have come from damaged dirstate
614 # can't find key, check if it might have come from damaged dirstate
610 if key in self.dirstate.parents():
615 if key in self.dirstate.parents():
611 raise error.Abort(_("working directory has unknown parent '%s'!")
616 raise error.Abort(_("working directory has unknown parent '%s'!")
612 % short(key))
617 % short(key))
613 try:
618 try:
614 if len(key) == 20:
619 if len(key) == 20:
615 key = hex(key)
620 key = hex(key)
616 except TypeError:
621 except TypeError:
617 pass
622 pass
618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619
624
620 def lookupbranch(self, key, remote=None):
625 def lookupbranch(self, key, remote=None):
621 repo = remote or self
626 repo = remote or self
622 if key in repo.branchmap():
627 if key in repo.branchmap():
623 return key
628 return key
624
629
625 repo = (remote and remote.local()) and remote or self
630 repo = (remote and remote.local()) and remote or self
626 return repo[key].branch()
631 return repo[key].branch()
627
632
628 def known(self, nodes):
633 def known(self, nodes):
629 nm = self.changelog.nodemap
634 nm = self.changelog.nodemap
630 result = []
635 result = []
631 for n in nodes:
636 for n in nodes:
632 r = nm.get(n)
637 r = nm.get(n)
633 resp = not (r is None or self._phaserev[r] >= phases.secret)
638 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 result.append(resp)
639 result.append(resp)
635 return result
640 return result
636
641
637 def local(self):
642 def local(self):
638 return self
643 return self
639
644
640 def join(self, f):
645 def join(self, f):
641 return os.path.join(self.path, f)
646 return os.path.join(self.path, f)
642
647
643 def wjoin(self, f):
648 def wjoin(self, f):
644 return os.path.join(self.root, f)
649 return os.path.join(self.root, f)
645
650
646 def file(self, f):
651 def file(self, f):
647 if f[0] == '/':
652 if f[0] == '/':
648 f = f[1:]
653 f = f[1:]
649 return filelog.filelog(self.sopener, f)
654 return filelog.filelog(self.sopener, f)
650
655
651 def changectx(self, changeid):
656 def changectx(self, changeid):
652 return self[changeid]
657 return self[changeid]
653
658
654 def parents(self, changeid=None):
659 def parents(self, changeid=None):
655 '''get list of changectxs for parents of changeid'''
660 '''get list of changectxs for parents of changeid'''
656 return self[changeid].parents()
661 return self[changeid].parents()
657
662
658 def filectx(self, path, changeid=None, fileid=None):
663 def filectx(self, path, changeid=None, fileid=None):
659 """changeid can be a changeset revision, node, or tag.
664 """changeid can be a changeset revision, node, or tag.
660 fileid can be a file revision or node."""
665 fileid can be a file revision or node."""
661 return context.filectx(self, path, changeid, fileid)
666 return context.filectx(self, path, changeid, fileid)
662
667
663 def getcwd(self):
668 def getcwd(self):
664 return self.dirstate.getcwd()
669 return self.dirstate.getcwd()
665
670
666 def pathto(self, f, cwd=None):
671 def pathto(self, f, cwd=None):
667 return self.dirstate.pathto(f, cwd)
672 return self.dirstate.pathto(f, cwd)
668
673
669 def wfile(self, f, mode='r'):
674 def wfile(self, f, mode='r'):
670 return self.wopener(f, mode)
675 return self.wopener(f, mode)
671
676
672 def _link(self, f):
677 def _link(self, f):
673 return os.path.islink(self.wjoin(f))
678 return os.path.islink(self.wjoin(f))
674
679
675 def _loadfilter(self, filter):
680 def _loadfilter(self, filter):
676 if filter not in self.filterpats:
681 if filter not in self.filterpats:
677 l = []
682 l = []
678 for pat, cmd in self.ui.configitems(filter):
683 for pat, cmd in self.ui.configitems(filter):
679 if cmd == '!':
684 if cmd == '!':
680 continue
685 continue
681 mf = matchmod.match(self.root, '', [pat])
686 mf = matchmod.match(self.root, '', [pat])
682 fn = None
687 fn = None
683 params = cmd
688 params = cmd
684 for name, filterfn in self._datafilters.iteritems():
689 for name, filterfn in self._datafilters.iteritems():
685 if cmd.startswith(name):
690 if cmd.startswith(name):
686 fn = filterfn
691 fn = filterfn
687 params = cmd[len(name):].lstrip()
692 params = cmd[len(name):].lstrip()
688 break
693 break
689 if not fn:
694 if not fn:
690 fn = lambda s, c, **kwargs: util.filter(s, c)
695 fn = lambda s, c, **kwargs: util.filter(s, c)
691 # Wrap old filters not supporting keyword arguments
696 # Wrap old filters not supporting keyword arguments
692 if not inspect.getargspec(fn)[2]:
697 if not inspect.getargspec(fn)[2]:
693 oldfn = fn
698 oldfn = fn
694 fn = lambda s, c, **kwargs: oldfn(s, c)
699 fn = lambda s, c, **kwargs: oldfn(s, c)
695 l.append((mf, fn, params))
700 l.append((mf, fn, params))
696 self.filterpats[filter] = l
701 self.filterpats[filter] = l
697 return self.filterpats[filter]
702 return self.filterpats[filter]
698
703
699 def _filter(self, filterpats, filename, data):
704 def _filter(self, filterpats, filename, data):
700 for mf, fn, cmd in filterpats:
705 for mf, fn, cmd in filterpats:
701 if mf(filename):
706 if mf(filename):
702 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
703 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
704 break
709 break
705
710
706 return data
711 return data
707
712
708 @propertycache
713 @propertycache
709 def _encodefilterpats(self):
714 def _encodefilterpats(self):
710 return self._loadfilter('encode')
715 return self._loadfilter('encode')
711
716
712 @propertycache
717 @propertycache
713 def _decodefilterpats(self):
718 def _decodefilterpats(self):
714 return self._loadfilter('decode')
719 return self._loadfilter('decode')
715
720
716 def adddatafilter(self, name, filter):
721 def adddatafilter(self, name, filter):
717 self._datafilters[name] = filter
722 self._datafilters[name] = filter
718
723
719 def wread(self, filename):
724 def wread(self, filename):
720 if self._link(filename):
725 if self._link(filename):
721 data = os.readlink(self.wjoin(filename))
726 data = os.readlink(self.wjoin(filename))
722 else:
727 else:
723 data = self.wopener.read(filename)
728 data = self.wopener.read(filename)
724 return self._filter(self._encodefilterpats, filename, data)
729 return self._filter(self._encodefilterpats, filename, data)
725
730
726 def wwrite(self, filename, data, flags):
731 def wwrite(self, filename, data, flags):
727 data = self._filter(self._decodefilterpats, filename, data)
732 data = self._filter(self._decodefilterpats, filename, data)
728 if 'l' in flags:
733 if 'l' in flags:
729 self.wopener.symlink(data, filename)
734 self.wopener.symlink(data, filename)
730 else:
735 else:
731 self.wopener.write(filename, data)
736 self.wopener.write(filename, data)
732 if 'x' in flags:
737 if 'x' in flags:
733 util.setflags(self.wjoin(filename), False, True)
738 util.setflags(self.wjoin(filename), False, True)
734
739
735 def wwritedata(self, filename, data):
740 def wwritedata(self, filename, data):
736 return self._filter(self._decodefilterpats, filename, data)
741 return self._filter(self._decodefilterpats, filename, data)
737
742
738 def transaction(self, desc):
743 def transaction(self, desc):
739 tr = self._transref and self._transref() or None
744 tr = self._transref and self._transref() or None
740 if tr and tr.running():
745 if tr and tr.running():
741 return tr.nest()
746 return tr.nest()
742
747
743 # abort here if the journal already exists
748 # abort here if the journal already exists
744 if os.path.exists(self.sjoin("journal")):
749 if os.path.exists(self.sjoin("journal")):
745 raise error.RepoError(
750 raise error.RepoError(
746 _("abandoned transaction found - run hg recover"))
751 _("abandoned transaction found - run hg recover"))
747
752
748 journalfiles = self._writejournal(desc)
753 journalfiles = self._writejournal(desc)
749 renames = [(x, undoname(x)) for x in journalfiles]
754 renames = [(x, undoname(x)) for x in journalfiles]
750
755
751 tr = transaction.transaction(self.ui.warn, self.sopener,
756 tr = transaction.transaction(self.ui.warn, self.sopener,
752 self.sjoin("journal"),
757 self.sjoin("journal"),
753 aftertrans(renames),
758 aftertrans(renames),
754 self.store.createmode)
759 self.store.createmode)
755 self._transref = weakref.ref(tr)
760 self._transref = weakref.ref(tr)
756 return tr
761 return tr
757
762
758 def _writejournal(self, desc):
763 def _writejournal(self, desc):
759 # save dirstate for rollback
764 # save dirstate for rollback
760 try:
765 try:
761 ds = self.opener.read("dirstate")
766 ds = self.opener.read("dirstate")
762 except IOError:
767 except IOError:
763 ds = ""
768 ds = ""
764 self.opener.write("journal.dirstate", ds)
769 self.opener.write("journal.dirstate", ds)
765 self.opener.write("journal.branch",
770 self.opener.write("journal.branch",
766 encoding.fromlocal(self.dirstate.branch()))
771 encoding.fromlocal(self.dirstate.branch()))
767 self.opener.write("journal.desc",
772 self.opener.write("journal.desc",
768 "%d\n%s\n" % (len(self), desc))
773 "%d\n%s\n" % (len(self), desc))
769
774
770 bkname = self.join('bookmarks')
775 bkname = self.join('bookmarks')
771 if os.path.exists(bkname):
776 if os.path.exists(bkname):
772 util.copyfile(bkname, self.join('journal.bookmarks'))
777 util.copyfile(bkname, self.join('journal.bookmarks'))
773 else:
778 else:
774 self.opener.write('journal.bookmarks', '')
779 self.opener.write('journal.bookmarks', '')
775 phasesname = self.sjoin('phaseroots')
780 phasesname = self.sjoin('phaseroots')
776 if os.path.exists(phasesname):
781 if os.path.exists(phasesname):
777 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
782 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
778 else:
783 else:
779 self.sopener.write('journal.phaseroots', '')
784 self.sopener.write('journal.phaseroots', '')
780
785
781 return (self.sjoin('journal'), self.join('journal.dirstate'),
786 return (self.sjoin('journal'), self.join('journal.dirstate'),
782 self.join('journal.branch'), self.join('journal.desc'),
787 self.join('journal.branch'), self.join('journal.desc'),
783 self.join('journal.bookmarks'),
788 self.join('journal.bookmarks'),
784 self.sjoin('journal.phaseroots'))
789 self.sjoin('journal.phaseroots'))
785
790
786 def recover(self):
791 def recover(self):
787 lock = self.lock()
792 lock = self.lock()
788 try:
793 try:
789 if os.path.exists(self.sjoin("journal")):
794 if os.path.exists(self.sjoin("journal")):
790 self.ui.status(_("rolling back interrupted transaction\n"))
795 self.ui.status(_("rolling back interrupted transaction\n"))
791 transaction.rollback(self.sopener, self.sjoin("journal"),
796 transaction.rollback(self.sopener, self.sjoin("journal"),
792 self.ui.warn)
797 self.ui.warn)
793 self.invalidate()
798 self.invalidate()
794 return True
799 return True
795 else:
800 else:
796 self.ui.warn(_("no interrupted transaction available\n"))
801 self.ui.warn(_("no interrupted transaction available\n"))
797 return False
802 return False
798 finally:
803 finally:
799 lock.release()
804 lock.release()
800
805
801 def rollback(self, dryrun=False, force=False):
806 def rollback(self, dryrun=False, force=False):
802 wlock = lock = None
807 wlock = lock = None
803 try:
808 try:
804 wlock = self.wlock()
809 wlock = self.wlock()
805 lock = self.lock()
810 lock = self.lock()
806 if os.path.exists(self.sjoin("undo")):
811 if os.path.exists(self.sjoin("undo")):
807 return self._rollback(dryrun, force)
812 return self._rollback(dryrun, force)
808 else:
813 else:
809 self.ui.warn(_("no rollback information available\n"))
814 self.ui.warn(_("no rollback information available\n"))
810 return 1
815 return 1
811 finally:
816 finally:
812 release(lock, wlock)
817 release(lock, wlock)
813
818
814 def _rollback(self, dryrun, force):
819 def _rollback(self, dryrun, force):
815 ui = self.ui
820 ui = self.ui
816 try:
821 try:
817 args = self.opener.read('undo.desc').splitlines()
822 args = self.opener.read('undo.desc').splitlines()
818 (oldlen, desc, detail) = (int(args[0]), args[1], None)
823 (oldlen, desc, detail) = (int(args[0]), args[1], None)
819 if len(args) >= 3:
824 if len(args) >= 3:
820 detail = args[2]
825 detail = args[2]
821 oldtip = oldlen - 1
826 oldtip = oldlen - 1
822
827
823 if detail and ui.verbose:
828 if detail and ui.verbose:
824 msg = (_('repository tip rolled back to revision %s'
829 msg = (_('repository tip rolled back to revision %s'
825 ' (undo %s: %s)\n')
830 ' (undo %s: %s)\n')
826 % (oldtip, desc, detail))
831 % (oldtip, desc, detail))
827 else:
832 else:
828 msg = (_('repository tip rolled back to revision %s'
833 msg = (_('repository tip rolled back to revision %s'
829 ' (undo %s)\n')
834 ' (undo %s)\n')
830 % (oldtip, desc))
835 % (oldtip, desc))
831 except IOError:
836 except IOError:
832 msg = _('rolling back unknown transaction\n')
837 msg = _('rolling back unknown transaction\n')
833 desc = None
838 desc = None
834
839
835 if not force and self['.'] != self['tip'] and desc == 'commit':
840 if not force and self['.'] != self['tip'] and desc == 'commit':
836 raise util.Abort(
841 raise util.Abort(
837 _('rollback of last commit while not checked out '
842 _('rollback of last commit while not checked out '
838 'may lose data'), hint=_('use -f to force'))
843 'may lose data'), hint=_('use -f to force'))
839
844
840 ui.status(msg)
845 ui.status(msg)
841 if dryrun:
846 if dryrun:
842 return 0
847 return 0
843
848
844 parents = self.dirstate.parents()
849 parents = self.dirstate.parents()
845 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
850 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
846 if os.path.exists(self.join('undo.bookmarks')):
851 if os.path.exists(self.join('undo.bookmarks')):
847 util.rename(self.join('undo.bookmarks'),
852 util.rename(self.join('undo.bookmarks'),
848 self.join('bookmarks'))
853 self.join('bookmarks'))
849 if os.path.exists(self.sjoin('undo.phaseroots')):
854 if os.path.exists(self.sjoin('undo.phaseroots')):
850 util.rename(self.sjoin('undo.phaseroots'),
855 util.rename(self.sjoin('undo.phaseroots'),
851 self.sjoin('phaseroots'))
856 self.sjoin('phaseroots'))
852 self.invalidate()
857 self.invalidate()
853
858
854 parentgone = (parents[0] not in self.changelog.nodemap or
859 parentgone = (parents[0] not in self.changelog.nodemap or
855 parents[1] not in self.changelog.nodemap)
860 parents[1] not in self.changelog.nodemap)
856 if parentgone:
861 if parentgone:
857 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
862 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
858 try:
863 try:
859 branch = self.opener.read('undo.branch')
864 branch = self.opener.read('undo.branch')
860 self.dirstate.setbranch(branch)
865 self.dirstate.setbranch(branch)
861 except IOError:
866 except IOError:
862 ui.warn(_('named branch could not be reset: '
867 ui.warn(_('named branch could not be reset: '
863 'current branch is still \'%s\'\n')
868 'current branch is still \'%s\'\n')
864 % self.dirstate.branch())
869 % self.dirstate.branch())
865
870
866 self.dirstate.invalidate()
871 self.dirstate.invalidate()
867 parents = tuple([p.rev() for p in self.parents()])
872 parents = tuple([p.rev() for p in self.parents()])
868 if len(parents) > 1:
873 if len(parents) > 1:
869 ui.status(_('working directory now based on '
874 ui.status(_('working directory now based on '
870 'revisions %d and %d\n') % parents)
875 'revisions %d and %d\n') % parents)
871 else:
876 else:
872 ui.status(_('working directory now based on '
877 ui.status(_('working directory now based on '
873 'revision %d\n') % parents)
878 'revision %d\n') % parents)
874 self.destroyed()
879 self.destroyed()
875 return 0
880 return 0
876
881
877 def invalidatecaches(self):
882 def invalidatecaches(self):
878 def delcache(name):
883 def delcache(name):
879 try:
884 try:
880 delattr(self, name)
885 delattr(self, name)
881 except AttributeError:
886 except AttributeError:
882 pass
887 pass
883
888
884 delcache('_tagscache')
889 delcache('_tagscache')
885 delcache('_phaserev')
890 delcache('_phaserev')
886
891
887 self._branchcache = None # in UTF-8
892 self._branchcache = None # in UTF-8
888 self._branchcachetip = None
893 self._branchcachetip = None
889
894
890 def invalidatedirstate(self):
895 def invalidatedirstate(self):
891 '''Invalidates the dirstate, causing the next call to dirstate
896 '''Invalidates the dirstate, causing the next call to dirstate
892 to check if it was modified since the last time it was read,
897 to check if it was modified since the last time it was read,
893 rereading it if it has.
898 rereading it if it has.
894
899
895 This is different to dirstate.invalidate() that it doesn't always
900 This is different to dirstate.invalidate() that it doesn't always
896 rereads the dirstate. Use dirstate.invalidate() if you want to
901 rereads the dirstate. Use dirstate.invalidate() if you want to
897 explicitly read the dirstate again (i.e. restoring it to a previous
902 explicitly read the dirstate again (i.e. restoring it to a previous
898 known good state).'''
903 known good state).'''
899 try:
904 if 'dirstate' in self.__dict__:
905 for k in self.dirstate._filecache:
906 try:
907 delattr(self.dirstate, k)
908 except AttributeError:
909 pass
900 delattr(self, 'dirstate')
910 delattr(self, 'dirstate')
901 except AttributeError:
902 pass
903
911
904 def invalidate(self):
912 def invalidate(self):
905 for k in self._filecache:
913 for k in self._filecache:
906 # dirstate is invalidated separately in invalidatedirstate()
914 # dirstate is invalidated separately in invalidatedirstate()
907 if k == 'dirstate':
915 if k == 'dirstate':
908 continue
916 continue
909
917
910 try:
918 try:
911 delattr(self, k)
919 delattr(self, k)
912 except AttributeError:
920 except AttributeError:
913 pass
921 pass
914 self.invalidatecaches()
922 self.invalidatecaches()
915
923
916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
924 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 try:
925 try:
918 l = lock.lock(lockname, 0, releasefn, desc=desc)
926 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 except error.LockHeld, inst:
927 except error.LockHeld, inst:
920 if not wait:
928 if not wait:
921 raise
929 raise
922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
930 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 (desc, inst.locker))
931 (desc, inst.locker))
924 # default to 600 seconds timeout
932 # default to 600 seconds timeout
925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
933 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 releasefn, desc=desc)
934 releasefn, desc=desc)
927 if acquirefn:
935 if acquirefn:
928 acquirefn()
936 acquirefn()
929 return l
937 return l
930
938
931 def _afterlock(self, callback):
939 def _afterlock(self, callback):
932 """add a callback to the current repository lock.
940 """add a callback to the current repository lock.
933
941
934 The callback will be executed on lock release."""
942 The callback will be executed on lock release."""
935 l = self._lockref and self._lockref()
943 l = self._lockref and self._lockref()
936 if l:
944 if l:
937 l.postrelease.append(callback)
945 l.postrelease.append(callback)
938
946
939 def lock(self, wait=True):
947 def lock(self, wait=True):
940 '''Lock the repository store (.hg/store) and return a weak reference
948 '''Lock the repository store (.hg/store) and return a weak reference
941 to the lock. Use this before modifying the store (e.g. committing or
949 to the lock. Use this before modifying the store (e.g. committing or
942 stripping). If you are opening a transaction, get a lock as well.)'''
950 stripping). If you are opening a transaction, get a lock as well.)'''
943 l = self._lockref and self._lockref()
951 l = self._lockref and self._lockref()
944 if l is not None and l.held:
952 if l is not None and l.held:
945 l.lock()
953 l.lock()
946 return l
954 return l
947
955
948 def unlock():
956 def unlock():
949 self.store.write()
957 self.store.write()
950 if self._dirtyphases:
958 if self._dirtyphases:
951 phases.writeroots(self)
959 phases.writeroots(self)
952 self._dirtyphases = False
960 self._dirtyphases = False
953 for k, ce in self._filecache.items():
961 for k, ce in self._filecache.items():
954 if k == 'dirstate':
962 if k == 'dirstate':
955 continue
963 continue
956 ce.refresh()
964 ce.refresh()
957
965
958 l = self._lock(self.sjoin("lock"), wait, unlock,
966 l = self._lock(self.sjoin("lock"), wait, unlock,
959 self.invalidate, _('repository %s') % self.origroot)
967 self.invalidate, _('repository %s') % self.origroot)
960 self._lockref = weakref.ref(l)
968 self._lockref = weakref.ref(l)
961 return l
969 return l
962
970
963 def wlock(self, wait=True):
971 def wlock(self, wait=True):
964 '''Lock the non-store parts of the repository (everything under
972 '''Lock the non-store parts of the repository (everything under
965 .hg except .hg/store) and return a weak reference to the lock.
973 .hg except .hg/store) and return a weak reference to the lock.
966 Use this before modifying files in .hg.'''
974 Use this before modifying files in .hg.'''
967 l = self._wlockref and self._wlockref()
975 l = self._wlockref and self._wlockref()
968 if l is not None and l.held:
976 if l is not None and l.held:
969 l.lock()
977 l.lock()
970 return l
978 return l
971
979
972 def unlock():
980 def unlock():
973 self.dirstate.write()
981 self.dirstate.write()
974 ce = self._filecache.get('dirstate')
982 ce = self._filecache.get('dirstate')
975 if ce:
983 if ce:
976 ce.refresh()
984 ce.refresh()
977
985
978 l = self._lock(self.join("wlock"), wait, unlock,
986 l = self._lock(self.join("wlock"), wait, unlock,
979 self.invalidatedirstate, _('working directory of %s') %
987 self.invalidatedirstate, _('working directory of %s') %
980 self.origroot)
988 self.origroot)
981 self._wlockref = weakref.ref(l)
989 self._wlockref = weakref.ref(l)
982 return l
990 return l
983
991
984 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
992 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
985 """
993 """
986 commit an individual file as part of a larger transaction
994 commit an individual file as part of a larger transaction
987 """
995 """
988
996
989 fname = fctx.path()
997 fname = fctx.path()
990 text = fctx.data()
998 text = fctx.data()
991 flog = self.file(fname)
999 flog = self.file(fname)
992 fparent1 = manifest1.get(fname, nullid)
1000 fparent1 = manifest1.get(fname, nullid)
993 fparent2 = fparent2o = manifest2.get(fname, nullid)
1001 fparent2 = fparent2o = manifest2.get(fname, nullid)
994
1002
995 meta = {}
1003 meta = {}
996 copy = fctx.renamed()
1004 copy = fctx.renamed()
997 if copy and copy[0] != fname:
1005 if copy and copy[0] != fname:
998 # Mark the new revision of this file as a copy of another
1006 # Mark the new revision of this file as a copy of another
999 # file. This copy data will effectively act as a parent
1007 # file. This copy data will effectively act as a parent
1000 # of this new revision. If this is a merge, the first
1008 # of this new revision. If this is a merge, the first
1001 # parent will be the nullid (meaning "look up the copy data")
1009 # parent will be the nullid (meaning "look up the copy data")
1002 # and the second one will be the other parent. For example:
1010 # and the second one will be the other parent. For example:
1003 #
1011 #
1004 # 0 --- 1 --- 3 rev1 changes file foo
1012 # 0 --- 1 --- 3 rev1 changes file foo
1005 # \ / rev2 renames foo to bar and changes it
1013 # \ / rev2 renames foo to bar and changes it
1006 # \- 2 -/ rev3 should have bar with all changes and
1014 # \- 2 -/ rev3 should have bar with all changes and
1007 # should record that bar descends from
1015 # should record that bar descends from
1008 # bar in rev2 and foo in rev1
1016 # bar in rev2 and foo in rev1
1009 #
1017 #
1010 # this allows this merge to succeed:
1018 # this allows this merge to succeed:
1011 #
1019 #
1012 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1020 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1013 # \ / merging rev3 and rev4 should use bar@rev2
1021 # \ / merging rev3 and rev4 should use bar@rev2
1014 # \- 2 --- 4 as the merge base
1022 # \- 2 --- 4 as the merge base
1015 #
1023 #
1016
1024
1017 cfname = copy[0]
1025 cfname = copy[0]
1018 crev = manifest1.get(cfname)
1026 crev = manifest1.get(cfname)
1019 newfparent = fparent2
1027 newfparent = fparent2
1020
1028
1021 if manifest2: # branch merge
1029 if manifest2: # branch merge
1022 if fparent2 == nullid or crev is None: # copied on remote side
1030 if fparent2 == nullid or crev is None: # copied on remote side
1023 if cfname in manifest2:
1031 if cfname in manifest2:
1024 crev = manifest2[cfname]
1032 crev = manifest2[cfname]
1025 newfparent = fparent1
1033 newfparent = fparent1
1026
1034
1027 # find source in nearest ancestor if we've lost track
1035 # find source in nearest ancestor if we've lost track
1028 if not crev:
1036 if not crev:
1029 self.ui.debug(" %s: searching for copy revision for %s\n" %
1037 self.ui.debug(" %s: searching for copy revision for %s\n" %
1030 (fname, cfname))
1038 (fname, cfname))
1031 for ancestor in self[None].ancestors():
1039 for ancestor in self[None].ancestors():
1032 if cfname in ancestor:
1040 if cfname in ancestor:
1033 crev = ancestor[cfname].filenode()
1041 crev = ancestor[cfname].filenode()
1034 break
1042 break
1035
1043
1036 if crev:
1044 if crev:
1037 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1045 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1038 meta["copy"] = cfname
1046 meta["copy"] = cfname
1039 meta["copyrev"] = hex(crev)
1047 meta["copyrev"] = hex(crev)
1040 fparent1, fparent2 = nullid, newfparent
1048 fparent1, fparent2 = nullid, newfparent
1041 else:
1049 else:
1042 self.ui.warn(_("warning: can't find ancestor for '%s' "
1050 self.ui.warn(_("warning: can't find ancestor for '%s' "
1043 "copied from '%s'!\n") % (fname, cfname))
1051 "copied from '%s'!\n") % (fname, cfname))
1044
1052
1045 elif fparent2 != nullid:
1053 elif fparent2 != nullid:
1046 # is one parent an ancestor of the other?
1054 # is one parent an ancestor of the other?
1047 fparentancestor = flog.ancestor(fparent1, fparent2)
1055 fparentancestor = flog.ancestor(fparent1, fparent2)
1048 if fparentancestor == fparent1:
1056 if fparentancestor == fparent1:
1049 fparent1, fparent2 = fparent2, nullid
1057 fparent1, fparent2 = fparent2, nullid
1050 elif fparentancestor == fparent2:
1058 elif fparentancestor == fparent2:
1051 fparent2 = nullid
1059 fparent2 = nullid
1052
1060
1053 # is the file changed?
1061 # is the file changed?
1054 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1062 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1055 changelist.append(fname)
1063 changelist.append(fname)
1056 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1064 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1057
1065
1058 # are just the flags changed during merge?
1066 # are just the flags changed during merge?
1059 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1067 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1060 changelist.append(fname)
1068 changelist.append(fname)
1061
1069
1062 return fparent1
1070 return fparent1
1063
1071
1064 def commit(self, text="", user=None, date=None, match=None, force=False,
1072 def commit(self, text="", user=None, date=None, match=None, force=False,
1065 editor=False, extra={}):
1073 editor=False, extra={}):
1066 """Add a new revision to current repository.
1074 """Add a new revision to current repository.
1067
1075
1068 Revision information is gathered from the working directory,
1076 Revision information is gathered from the working directory,
1069 match can be used to filter the committed files. If editor is
1077 match can be used to filter the committed files. If editor is
1070 supplied, it is called to get a commit message.
1078 supplied, it is called to get a commit message.
1071 """
1079 """
1072
1080
1073 def fail(f, msg):
1081 def fail(f, msg):
1074 raise util.Abort('%s: %s' % (f, msg))
1082 raise util.Abort('%s: %s' % (f, msg))
1075
1083
1076 if not match:
1084 if not match:
1077 match = matchmod.always(self.root, '')
1085 match = matchmod.always(self.root, '')
1078
1086
1079 if not force:
1087 if not force:
1080 vdirs = []
1088 vdirs = []
1081 match.dir = vdirs.append
1089 match.dir = vdirs.append
1082 match.bad = fail
1090 match.bad = fail
1083
1091
1084 wlock = self.wlock()
1092 wlock = self.wlock()
1085 try:
1093 try:
1086 wctx = self[None]
1094 wctx = self[None]
1087 merge = len(wctx.parents()) > 1
1095 merge = len(wctx.parents()) > 1
1088
1096
1089 if (not force and merge and match and
1097 if (not force and merge and match and
1090 (match.files() or match.anypats())):
1098 (match.files() or match.anypats())):
1091 raise util.Abort(_('cannot partially commit a merge '
1099 raise util.Abort(_('cannot partially commit a merge '
1092 '(do not specify files or patterns)'))
1100 '(do not specify files or patterns)'))
1093
1101
1094 changes = self.status(match=match, clean=force)
1102 changes = self.status(match=match, clean=force)
1095 if force:
1103 if force:
1096 changes[0].extend(changes[6]) # mq may commit unchanged files
1104 changes[0].extend(changes[6]) # mq may commit unchanged files
1097
1105
1098 # check subrepos
1106 # check subrepos
1099 subs = []
1107 subs = []
1100 commitsubs = set()
1108 commitsubs = set()
1101 newstate = wctx.substate.copy()
1109 newstate = wctx.substate.copy()
1102 # only manage subrepos and .hgsubstate if .hgsub is present
1110 # only manage subrepos and .hgsubstate if .hgsub is present
1103 if '.hgsub' in wctx:
1111 if '.hgsub' in wctx:
1104 # we'll decide whether to track this ourselves, thanks
1112 # we'll decide whether to track this ourselves, thanks
1105 if '.hgsubstate' in changes[0]:
1113 if '.hgsubstate' in changes[0]:
1106 changes[0].remove('.hgsubstate')
1114 changes[0].remove('.hgsubstate')
1107 if '.hgsubstate' in changes[2]:
1115 if '.hgsubstate' in changes[2]:
1108 changes[2].remove('.hgsubstate')
1116 changes[2].remove('.hgsubstate')
1109
1117
1110 # compare current state to last committed state
1118 # compare current state to last committed state
1111 # build new substate based on last committed state
1119 # build new substate based on last committed state
1112 oldstate = wctx.p1().substate
1120 oldstate = wctx.p1().substate
1113 for s in sorted(newstate.keys()):
1121 for s in sorted(newstate.keys()):
1114 if not match(s):
1122 if not match(s):
1115 # ignore working copy, use old state if present
1123 # ignore working copy, use old state if present
1116 if s in oldstate:
1124 if s in oldstate:
1117 newstate[s] = oldstate[s]
1125 newstate[s] = oldstate[s]
1118 continue
1126 continue
1119 if not force:
1127 if not force:
1120 raise util.Abort(
1128 raise util.Abort(
1121 _("commit with new subrepo %s excluded") % s)
1129 _("commit with new subrepo %s excluded") % s)
1122 if wctx.sub(s).dirty(True):
1130 if wctx.sub(s).dirty(True):
1123 if not self.ui.configbool('ui', 'commitsubrepos'):
1131 if not self.ui.configbool('ui', 'commitsubrepos'):
1124 raise util.Abort(
1132 raise util.Abort(
1125 _("uncommitted changes in subrepo %s") % s,
1133 _("uncommitted changes in subrepo %s") % s,
1126 hint=_("use --subrepos for recursive commit"))
1134 hint=_("use --subrepos for recursive commit"))
1127 subs.append(s)
1135 subs.append(s)
1128 commitsubs.add(s)
1136 commitsubs.add(s)
1129 else:
1137 else:
1130 bs = wctx.sub(s).basestate()
1138 bs = wctx.sub(s).basestate()
1131 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1139 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1132 if oldstate.get(s, (None, None, None))[1] != bs:
1140 if oldstate.get(s, (None, None, None))[1] != bs:
1133 subs.append(s)
1141 subs.append(s)
1134
1142
1135 # check for removed subrepos
1143 # check for removed subrepos
1136 for p in wctx.parents():
1144 for p in wctx.parents():
1137 r = [s for s in p.substate if s not in newstate]
1145 r = [s for s in p.substate if s not in newstate]
1138 subs += [s for s in r if match(s)]
1146 subs += [s for s in r if match(s)]
1139 if subs:
1147 if subs:
1140 if (not match('.hgsub') and
1148 if (not match('.hgsub') and
1141 '.hgsub' in (wctx.modified() + wctx.added())):
1149 '.hgsub' in (wctx.modified() + wctx.added())):
1142 raise util.Abort(
1150 raise util.Abort(
1143 _("can't commit subrepos without .hgsub"))
1151 _("can't commit subrepos without .hgsub"))
1144 changes[0].insert(0, '.hgsubstate')
1152 changes[0].insert(0, '.hgsubstate')
1145
1153
1146 elif '.hgsub' in changes[2]:
1154 elif '.hgsub' in changes[2]:
1147 # clean up .hgsubstate when .hgsub is removed
1155 # clean up .hgsubstate when .hgsub is removed
1148 if ('.hgsubstate' in wctx and
1156 if ('.hgsubstate' in wctx and
1149 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1157 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1150 changes[2].insert(0, '.hgsubstate')
1158 changes[2].insert(0, '.hgsubstate')
1151
1159
1152 # make sure all explicit patterns are matched
1160 # make sure all explicit patterns are matched
1153 if not force and match.files():
1161 if not force and match.files():
1154 matched = set(changes[0] + changes[1] + changes[2])
1162 matched = set(changes[0] + changes[1] + changes[2])
1155
1163
1156 for f in match.files():
1164 for f in match.files():
1157 if f == '.' or f in matched or f in wctx.substate:
1165 if f == '.' or f in matched or f in wctx.substate:
1158 continue
1166 continue
1159 if f in changes[3]: # missing
1167 if f in changes[3]: # missing
1160 fail(f, _('file not found!'))
1168 fail(f, _('file not found!'))
1161 if f in vdirs: # visited directory
1169 if f in vdirs: # visited directory
1162 d = f + '/'
1170 d = f + '/'
1163 for mf in matched:
1171 for mf in matched:
1164 if mf.startswith(d):
1172 if mf.startswith(d):
1165 break
1173 break
1166 else:
1174 else:
1167 fail(f, _("no match under directory!"))
1175 fail(f, _("no match under directory!"))
1168 elif f not in self.dirstate:
1176 elif f not in self.dirstate:
1169 fail(f, _("file not tracked!"))
1177 fail(f, _("file not tracked!"))
1170
1178
1171 if (not force and not extra.get("close") and not merge
1179 if (not force and not extra.get("close") and not merge
1172 and not (changes[0] or changes[1] or changes[2])
1180 and not (changes[0] or changes[1] or changes[2])
1173 and wctx.branch() == wctx.p1().branch()):
1181 and wctx.branch() == wctx.p1().branch()):
1174 return None
1182 return None
1175
1183
1176 ms = mergemod.mergestate(self)
1184 ms = mergemod.mergestate(self)
1177 for f in changes[0]:
1185 for f in changes[0]:
1178 if f in ms and ms[f] == 'u':
1186 if f in ms and ms[f] == 'u':
1179 raise util.Abort(_("unresolved merge conflicts "
1187 raise util.Abort(_("unresolved merge conflicts "
1180 "(see hg help resolve)"))
1188 "(see hg help resolve)"))
1181
1189
1182 cctx = context.workingctx(self, text, user, date, extra, changes)
1190 cctx = context.workingctx(self, text, user, date, extra, changes)
1183 if editor:
1191 if editor:
1184 cctx._text = editor(self, cctx, subs)
1192 cctx._text = editor(self, cctx, subs)
1185 edited = (text != cctx._text)
1193 edited = (text != cctx._text)
1186
1194
1187 # commit subs and write new state
1195 # commit subs and write new state
1188 if subs:
1196 if subs:
1189 for s in sorted(commitsubs):
1197 for s in sorted(commitsubs):
1190 sub = wctx.sub(s)
1198 sub = wctx.sub(s)
1191 self.ui.status(_('committing subrepository %s\n') %
1199 self.ui.status(_('committing subrepository %s\n') %
1192 subrepo.subrelpath(sub))
1200 subrepo.subrelpath(sub))
1193 sr = sub.commit(cctx._text, user, date)
1201 sr = sub.commit(cctx._text, user, date)
1194 newstate[s] = (newstate[s][0], sr)
1202 newstate[s] = (newstate[s][0], sr)
1195 subrepo.writestate(self, newstate)
1203 subrepo.writestate(self, newstate)
1196
1204
1197 # Save commit message in case this transaction gets rolled back
1205 # Save commit message in case this transaction gets rolled back
1198 # (e.g. by a pretxncommit hook). Leave the content alone on
1206 # (e.g. by a pretxncommit hook). Leave the content alone on
1199 # the assumption that the user will use the same editor again.
1207 # the assumption that the user will use the same editor again.
1200 msgfn = self.savecommitmessage(cctx._text)
1208 msgfn = self.savecommitmessage(cctx._text)
1201
1209
1202 p1, p2 = self.dirstate.parents()
1210 p1, p2 = self.dirstate.parents()
1203 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1211 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1204 try:
1212 try:
1205 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1213 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1206 ret = self.commitctx(cctx, True)
1214 ret = self.commitctx(cctx, True)
1207 except:
1215 except:
1208 if edited:
1216 if edited:
1209 self.ui.write(
1217 self.ui.write(
1210 _('note: commit message saved in %s\n') % msgfn)
1218 _('note: commit message saved in %s\n') % msgfn)
1211 raise
1219 raise
1212
1220
1213 # update bookmarks, dirstate and mergestate
1221 # update bookmarks, dirstate and mergestate
1214 bookmarks.update(self, p1, ret)
1222 bookmarks.update(self, p1, ret)
1215 for f in changes[0] + changes[1]:
1223 for f in changes[0] + changes[1]:
1216 self.dirstate.normal(f)
1224 self.dirstate.normal(f)
1217 for f in changes[2]:
1225 for f in changes[2]:
1218 self.dirstate.drop(f)
1226 self.dirstate.drop(f)
1219 self.dirstate.setparents(ret)
1227 self.dirstate.setparents(ret)
1220 ms.reset()
1228 ms.reset()
1221 finally:
1229 finally:
1222 wlock.release()
1230 wlock.release()
1223
1231
1224 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1232 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1225 return ret
1233 return ret
1226
1234
1227 def commitctx(self, ctx, error=False):
1235 def commitctx(self, ctx, error=False):
1228 """Add a new revision to current repository.
1236 """Add a new revision to current repository.
1229 Revision information is passed via the context argument.
1237 Revision information is passed via the context argument.
1230 """
1238 """
1231
1239
1232 tr = lock = None
1240 tr = lock = None
1233 removed = list(ctx.removed())
1241 removed = list(ctx.removed())
1234 p1, p2 = ctx.p1(), ctx.p2()
1242 p1, p2 = ctx.p1(), ctx.p2()
1235 user = ctx.user()
1243 user = ctx.user()
1236
1244
1237 lock = self.lock()
1245 lock = self.lock()
1238 try:
1246 try:
1239 tr = self.transaction("commit")
1247 tr = self.transaction("commit")
1240 trp = weakref.proxy(tr)
1248 trp = weakref.proxy(tr)
1241
1249
1242 if ctx.files():
1250 if ctx.files():
1243 m1 = p1.manifest().copy()
1251 m1 = p1.manifest().copy()
1244 m2 = p2.manifest()
1252 m2 = p2.manifest()
1245
1253
1246 # check in files
1254 # check in files
1247 new = {}
1255 new = {}
1248 changed = []
1256 changed = []
1249 linkrev = len(self)
1257 linkrev = len(self)
1250 for f in sorted(ctx.modified() + ctx.added()):
1258 for f in sorted(ctx.modified() + ctx.added()):
1251 self.ui.note(f + "\n")
1259 self.ui.note(f + "\n")
1252 try:
1260 try:
1253 fctx = ctx[f]
1261 fctx = ctx[f]
1254 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1262 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1255 changed)
1263 changed)
1256 m1.set(f, fctx.flags())
1264 m1.set(f, fctx.flags())
1257 except OSError, inst:
1265 except OSError, inst:
1258 self.ui.warn(_("trouble committing %s!\n") % f)
1266 self.ui.warn(_("trouble committing %s!\n") % f)
1259 raise
1267 raise
1260 except IOError, inst:
1268 except IOError, inst:
1261 errcode = getattr(inst, 'errno', errno.ENOENT)
1269 errcode = getattr(inst, 'errno', errno.ENOENT)
1262 if error or errcode and errcode != errno.ENOENT:
1270 if error or errcode and errcode != errno.ENOENT:
1263 self.ui.warn(_("trouble committing %s!\n") % f)
1271 self.ui.warn(_("trouble committing %s!\n") % f)
1264 raise
1272 raise
1265 else:
1273 else:
1266 removed.append(f)
1274 removed.append(f)
1267
1275
1268 # update manifest
1276 # update manifest
1269 m1.update(new)
1277 m1.update(new)
1270 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1278 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1271 drop = [f for f in removed if f in m1]
1279 drop = [f for f in removed if f in m1]
1272 for f in drop:
1280 for f in drop:
1273 del m1[f]
1281 del m1[f]
1274 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1282 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1275 p2.manifestnode(), (new, drop))
1283 p2.manifestnode(), (new, drop))
1276 files = changed + removed
1284 files = changed + removed
1277 else:
1285 else:
1278 mn = p1.manifestnode()
1286 mn = p1.manifestnode()
1279 files = []
1287 files = []
1280
1288
1281 # update changelog
1289 # update changelog
1282 self.changelog.delayupdate()
1290 self.changelog.delayupdate()
1283 n = self.changelog.add(mn, files, ctx.description(),
1291 n = self.changelog.add(mn, files, ctx.description(),
1284 trp, p1.node(), p2.node(),
1292 trp, p1.node(), p2.node(),
1285 user, ctx.date(), ctx.extra().copy())
1293 user, ctx.date(), ctx.extra().copy())
1286 p = lambda: self.changelog.writepending() and self.root or ""
1294 p = lambda: self.changelog.writepending() and self.root or ""
1287 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1295 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1288 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1296 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1289 parent2=xp2, pending=p)
1297 parent2=xp2, pending=p)
1290 self.changelog.finalize(trp)
1298 self.changelog.finalize(trp)
1291 # set the new commit is proper phase
1299 # set the new commit is proper phase
1292 targetphase = phases.newcommitphase(self.ui)
1300 targetphase = phases.newcommitphase(self.ui)
1293 if targetphase:
1301 if targetphase:
1294 # retract boundary do not alter parent changeset.
1302 # retract boundary do not alter parent changeset.
1295 # if a parent have higher the resulting phase will
1303 # if a parent have higher the resulting phase will
1296 # be compliant anyway
1304 # be compliant anyway
1297 #
1305 #
1298 # if minimal phase was 0 we don't need to retract anything
1306 # if minimal phase was 0 we don't need to retract anything
1299 phases.retractboundary(self, targetphase, [n])
1307 phases.retractboundary(self, targetphase, [n])
1300 tr.close()
1308 tr.close()
1301 self.updatebranchcache()
1309 self.updatebranchcache()
1302 return n
1310 return n
1303 finally:
1311 finally:
1304 if tr:
1312 if tr:
1305 tr.release()
1313 tr.release()
1306 lock.release()
1314 lock.release()
1307
1315
1308 def destroyed(self):
1316 def destroyed(self):
1309 '''Inform the repository that nodes have been destroyed.
1317 '''Inform the repository that nodes have been destroyed.
1310 Intended for use by strip and rollback, so there's a common
1318 Intended for use by strip and rollback, so there's a common
1311 place for anything that has to be done after destroying history.'''
1319 place for anything that has to be done after destroying history.'''
1312 # XXX it might be nice if we could take the list of destroyed
1320 # XXX it might be nice if we could take the list of destroyed
1313 # nodes, but I don't see an easy way for rollback() to do that
1321 # nodes, but I don't see an easy way for rollback() to do that
1314
1322
1315 # Ensure the persistent tag cache is updated. Doing it now
1323 # Ensure the persistent tag cache is updated. Doing it now
1316 # means that the tag cache only has to worry about destroyed
1324 # means that the tag cache only has to worry about destroyed
1317 # heads immediately after a strip/rollback. That in turn
1325 # heads immediately after a strip/rollback. That in turn
1318 # guarantees that "cachetip == currenttip" (comparing both rev
1326 # guarantees that "cachetip == currenttip" (comparing both rev
1319 # and node) always means no nodes have been added or destroyed.
1327 # and node) always means no nodes have been added or destroyed.
1320
1328
1321 # XXX this is suboptimal when qrefresh'ing: we strip the current
1329 # XXX this is suboptimal when qrefresh'ing: we strip the current
1322 # head, refresh the tag cache, then immediately add a new head.
1330 # head, refresh the tag cache, then immediately add a new head.
1323 # But I think doing it this way is necessary for the "instant
1331 # But I think doing it this way is necessary for the "instant
1324 # tag cache retrieval" case to work.
1332 # tag cache retrieval" case to work.
1325 self.invalidatecaches()
1333 self.invalidatecaches()
1326
1334
1327 # Discard all cache entries to force reloading everything.
1335 # Discard all cache entries to force reloading everything.
1328 self._filecache.clear()
1336 self._filecache.clear()
1329
1337
1330 def walk(self, match, node=None):
1338 def walk(self, match, node=None):
1331 '''
1339 '''
1332 walk recursively through the directory tree or a given
1340 walk recursively through the directory tree or a given
1333 changeset, finding all files matched by the match
1341 changeset, finding all files matched by the match
1334 function
1342 function
1335 '''
1343 '''
1336 return self[node].walk(match)
1344 return self[node].walk(match)
1337
1345
1338 def status(self, node1='.', node2=None, match=None,
1346 def status(self, node1='.', node2=None, match=None,
1339 ignored=False, clean=False, unknown=False,
1347 ignored=False, clean=False, unknown=False,
1340 listsubrepos=False):
1348 listsubrepos=False):
1341 """return status of files between two nodes or node and working directory
1349 """return status of files between two nodes or node and working directory
1342
1350
1343 If node1 is None, use the first dirstate parent instead.
1351 If node1 is None, use the first dirstate parent instead.
1344 If node2 is None, compare node1 with working directory.
1352 If node2 is None, compare node1 with working directory.
1345 """
1353 """
1346
1354
1347 def mfmatches(ctx):
1355 def mfmatches(ctx):
1348 mf = ctx.manifest().copy()
1356 mf = ctx.manifest().copy()
1349 for fn in mf.keys():
1357 for fn in mf.keys():
1350 if not match(fn):
1358 if not match(fn):
1351 del mf[fn]
1359 del mf[fn]
1352 return mf
1360 return mf
1353
1361
1354 if isinstance(node1, context.changectx):
1362 if isinstance(node1, context.changectx):
1355 ctx1 = node1
1363 ctx1 = node1
1356 else:
1364 else:
1357 ctx1 = self[node1]
1365 ctx1 = self[node1]
1358 if isinstance(node2, context.changectx):
1366 if isinstance(node2, context.changectx):
1359 ctx2 = node2
1367 ctx2 = node2
1360 else:
1368 else:
1361 ctx2 = self[node2]
1369 ctx2 = self[node2]
1362
1370
1363 working = ctx2.rev() is None
1371 working = ctx2.rev() is None
1364 parentworking = working and ctx1 == self['.']
1372 parentworking = working and ctx1 == self['.']
1365 match = match or matchmod.always(self.root, self.getcwd())
1373 match = match or matchmod.always(self.root, self.getcwd())
1366 listignored, listclean, listunknown = ignored, clean, unknown
1374 listignored, listclean, listunknown = ignored, clean, unknown
1367
1375
1368 # load earliest manifest first for caching reasons
1376 # load earliest manifest first for caching reasons
1369 if not working and ctx2.rev() < ctx1.rev():
1377 if not working and ctx2.rev() < ctx1.rev():
1370 ctx2.manifest()
1378 ctx2.manifest()
1371
1379
1372 if not parentworking:
1380 if not parentworking:
1373 def bad(f, msg):
1381 def bad(f, msg):
1374 # 'f' may be a directory pattern from 'match.files()',
1382 # 'f' may be a directory pattern from 'match.files()',
1375 # so 'f not in ctx1' is not enough
1383 # so 'f not in ctx1' is not enough
1376 if f not in ctx1 and f not in ctx1.dirs():
1384 if f not in ctx1 and f not in ctx1.dirs():
1377 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1385 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1378 match.bad = bad
1386 match.bad = bad
1379
1387
1380 if working: # we need to scan the working dir
1388 if working: # we need to scan the working dir
1381 subrepos = []
1389 subrepos = []
1382 if '.hgsub' in self.dirstate:
1390 if '.hgsub' in self.dirstate:
1383 subrepos = ctx2.substate.keys()
1391 subrepos = ctx2.substate.keys()
1384 s = self.dirstate.status(match, subrepos, listignored,
1392 s = self.dirstate.status(match, subrepos, listignored,
1385 listclean, listunknown)
1393 listclean, listunknown)
1386 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1394 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1387
1395
1388 # check for any possibly clean files
1396 # check for any possibly clean files
1389 if parentworking and cmp:
1397 if parentworking and cmp:
1390 fixup = []
1398 fixup = []
1391 # do a full compare of any files that might have changed
1399 # do a full compare of any files that might have changed
1392 for f in sorted(cmp):
1400 for f in sorted(cmp):
1393 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1401 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1394 or ctx1[f].cmp(ctx2[f])):
1402 or ctx1[f].cmp(ctx2[f])):
1395 modified.append(f)
1403 modified.append(f)
1396 else:
1404 else:
1397 fixup.append(f)
1405 fixup.append(f)
1398
1406
1399 # update dirstate for files that are actually clean
1407 # update dirstate for files that are actually clean
1400 if fixup:
1408 if fixup:
1401 if listclean:
1409 if listclean:
1402 clean += fixup
1410 clean += fixup
1403
1411
1404 try:
1412 try:
1405 # updating the dirstate is optional
1413 # updating the dirstate is optional
1406 # so we don't wait on the lock
1414 # so we don't wait on the lock
1407 wlock = self.wlock(False)
1415 wlock = self.wlock(False)
1408 try:
1416 try:
1409 for f in fixup:
1417 for f in fixup:
1410 self.dirstate.normal(f)
1418 self.dirstate.normal(f)
1411 finally:
1419 finally:
1412 wlock.release()
1420 wlock.release()
1413 except error.LockError:
1421 except error.LockError:
1414 pass
1422 pass
1415
1423
1416 if not parentworking:
1424 if not parentworking:
1417 mf1 = mfmatches(ctx1)
1425 mf1 = mfmatches(ctx1)
1418 if working:
1426 if working:
1419 # we are comparing working dir against non-parent
1427 # we are comparing working dir against non-parent
1420 # generate a pseudo-manifest for the working dir
1428 # generate a pseudo-manifest for the working dir
1421 mf2 = mfmatches(self['.'])
1429 mf2 = mfmatches(self['.'])
1422 for f in cmp + modified + added:
1430 for f in cmp + modified + added:
1423 mf2[f] = None
1431 mf2[f] = None
1424 mf2.set(f, ctx2.flags(f))
1432 mf2.set(f, ctx2.flags(f))
1425 for f in removed:
1433 for f in removed:
1426 if f in mf2:
1434 if f in mf2:
1427 del mf2[f]
1435 del mf2[f]
1428 else:
1436 else:
1429 # we are comparing two revisions
1437 # we are comparing two revisions
1430 deleted, unknown, ignored = [], [], []
1438 deleted, unknown, ignored = [], [], []
1431 mf2 = mfmatches(ctx2)
1439 mf2 = mfmatches(ctx2)
1432
1440
1433 modified, added, clean = [], [], []
1441 modified, added, clean = [], [], []
1434 for fn in mf2:
1442 for fn in mf2:
1435 if fn in mf1:
1443 if fn in mf1:
1436 if (fn not in deleted and
1444 if (fn not in deleted and
1437 (mf1.flags(fn) != mf2.flags(fn) or
1445 (mf1.flags(fn) != mf2.flags(fn) or
1438 (mf1[fn] != mf2[fn] and
1446 (mf1[fn] != mf2[fn] and
1439 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1447 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1440 modified.append(fn)
1448 modified.append(fn)
1441 elif listclean:
1449 elif listclean:
1442 clean.append(fn)
1450 clean.append(fn)
1443 del mf1[fn]
1451 del mf1[fn]
1444 elif fn not in deleted:
1452 elif fn not in deleted:
1445 added.append(fn)
1453 added.append(fn)
1446 removed = mf1.keys()
1454 removed = mf1.keys()
1447
1455
1448 if working and modified and not self.dirstate._checklink:
1456 if working and modified and not self.dirstate._checklink:
1449 # Symlink placeholders may get non-symlink-like contents
1457 # Symlink placeholders may get non-symlink-like contents
1450 # via user error or dereferencing by NFS or Samba servers,
1458 # via user error or dereferencing by NFS or Samba servers,
1451 # so we filter out any placeholders that don't look like a
1459 # so we filter out any placeholders that don't look like a
1452 # symlink
1460 # symlink
1453 sane = []
1461 sane = []
1454 for f in modified:
1462 for f in modified:
1455 if ctx2.flags(f) == 'l':
1463 if ctx2.flags(f) == 'l':
1456 d = ctx2[f].data()
1464 d = ctx2[f].data()
1457 if len(d) >= 1024 or '\n' in d or util.binary(d):
1465 if len(d) >= 1024 or '\n' in d or util.binary(d):
1458 self.ui.debug('ignoring suspect symlink placeholder'
1466 self.ui.debug('ignoring suspect symlink placeholder'
1459 ' "%s"\n' % f)
1467 ' "%s"\n' % f)
1460 continue
1468 continue
1461 sane.append(f)
1469 sane.append(f)
1462 modified = sane
1470 modified = sane
1463
1471
1464 r = modified, added, removed, deleted, unknown, ignored, clean
1472 r = modified, added, removed, deleted, unknown, ignored, clean
1465
1473
1466 if listsubrepos:
1474 if listsubrepos:
1467 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1475 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1468 if working:
1476 if working:
1469 rev2 = None
1477 rev2 = None
1470 else:
1478 else:
1471 rev2 = ctx2.substate[subpath][1]
1479 rev2 = ctx2.substate[subpath][1]
1472 try:
1480 try:
1473 submatch = matchmod.narrowmatcher(subpath, match)
1481 submatch = matchmod.narrowmatcher(subpath, match)
1474 s = sub.status(rev2, match=submatch, ignored=listignored,
1482 s = sub.status(rev2, match=submatch, ignored=listignored,
1475 clean=listclean, unknown=listunknown,
1483 clean=listclean, unknown=listunknown,
1476 listsubrepos=True)
1484 listsubrepos=True)
1477 for rfiles, sfiles in zip(r, s):
1485 for rfiles, sfiles in zip(r, s):
1478 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1486 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1479 except error.LookupError:
1487 except error.LookupError:
1480 self.ui.status(_("skipping missing subrepository: %s\n")
1488 self.ui.status(_("skipping missing subrepository: %s\n")
1481 % subpath)
1489 % subpath)
1482
1490
1483 for l in r:
1491 for l in r:
1484 l.sort()
1492 l.sort()
1485 return r
1493 return r
1486
1494
1487 def heads(self, start=None):
1495 def heads(self, start=None):
1488 heads = self.changelog.heads(start)
1496 heads = self.changelog.heads(start)
1489 # sort the output in rev descending order
1497 # sort the output in rev descending order
1490 return sorted(heads, key=self.changelog.rev, reverse=True)
1498 return sorted(heads, key=self.changelog.rev, reverse=True)
1491
1499
1492 def branchheads(self, branch=None, start=None, closed=False):
1500 def branchheads(self, branch=None, start=None, closed=False):
1493 '''return a (possibly filtered) list of heads for the given branch
1501 '''return a (possibly filtered) list of heads for the given branch
1494
1502
1495 Heads are returned in topological order, from newest to oldest.
1503 Heads are returned in topological order, from newest to oldest.
1496 If branch is None, use the dirstate branch.
1504 If branch is None, use the dirstate branch.
1497 If start is not None, return only heads reachable from start.
1505 If start is not None, return only heads reachable from start.
1498 If closed is True, return heads that are marked as closed as well.
1506 If closed is True, return heads that are marked as closed as well.
1499 '''
1507 '''
1500 if branch is None:
1508 if branch is None:
1501 branch = self[None].branch()
1509 branch = self[None].branch()
1502 branches = self.branchmap()
1510 branches = self.branchmap()
1503 if branch not in branches:
1511 if branch not in branches:
1504 return []
1512 return []
1505 # the cache returns heads ordered lowest to highest
1513 # the cache returns heads ordered lowest to highest
1506 bheads = list(reversed(branches[branch]))
1514 bheads = list(reversed(branches[branch]))
1507 if start is not None:
1515 if start is not None:
1508 # filter out the heads that cannot be reached from startrev
1516 # filter out the heads that cannot be reached from startrev
1509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1517 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1510 bheads = [h for h in bheads if h in fbheads]
1518 bheads = [h for h in bheads if h in fbheads]
1511 if not closed:
1519 if not closed:
1512 bheads = [h for h in bheads if
1520 bheads = [h for h in bheads if
1513 ('close' not in self.changelog.read(h)[5])]
1521 ('close' not in self.changelog.read(h)[5])]
1514 return bheads
1522 return bheads
1515
1523
1516 def branches(self, nodes):
1524 def branches(self, nodes):
1517 if not nodes:
1525 if not nodes:
1518 nodes = [self.changelog.tip()]
1526 nodes = [self.changelog.tip()]
1519 b = []
1527 b = []
1520 for n in nodes:
1528 for n in nodes:
1521 t = n
1529 t = n
1522 while True:
1530 while True:
1523 p = self.changelog.parents(n)
1531 p = self.changelog.parents(n)
1524 if p[1] != nullid or p[0] == nullid:
1532 if p[1] != nullid or p[0] == nullid:
1525 b.append((t, n, p[0], p[1]))
1533 b.append((t, n, p[0], p[1]))
1526 break
1534 break
1527 n = p[0]
1535 n = p[0]
1528 return b
1536 return b
1529
1537
1530 def between(self, pairs):
1538 def between(self, pairs):
1531 r = []
1539 r = []
1532
1540
1533 for top, bottom in pairs:
1541 for top, bottom in pairs:
1534 n, l, i = top, [], 0
1542 n, l, i = top, [], 0
1535 f = 1
1543 f = 1
1536
1544
1537 while n != bottom and n != nullid:
1545 while n != bottom and n != nullid:
1538 p = self.changelog.parents(n)[0]
1546 p = self.changelog.parents(n)[0]
1539 if i == f:
1547 if i == f:
1540 l.append(n)
1548 l.append(n)
1541 f = f * 2
1549 f = f * 2
1542 n = p
1550 n = p
1543 i += 1
1551 i += 1
1544
1552
1545 r.append(l)
1553 r.append(l)
1546
1554
1547 return r
1555 return r
1548
1556
1549 def pull(self, remote, heads=None, force=False):
1557 def pull(self, remote, heads=None, force=False):
1550 lock = self.lock()
1558 lock = self.lock()
1551 try:
1559 try:
1552 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1560 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1553 force=force)
1561 force=force)
1554 common, fetch, rheads = tmp
1562 common, fetch, rheads = tmp
1555 if not fetch:
1563 if not fetch:
1556 self.ui.status(_("no changes found\n"))
1564 self.ui.status(_("no changes found\n"))
1557 added = []
1565 added = []
1558 result = 0
1566 result = 0
1559 else:
1567 else:
1560 if heads is None and list(common) == [nullid]:
1568 if heads is None and list(common) == [nullid]:
1561 self.ui.status(_("requesting all changes\n"))
1569 self.ui.status(_("requesting all changes\n"))
1562 elif heads is None and remote.capable('changegroupsubset'):
1570 elif heads is None and remote.capable('changegroupsubset'):
1563 # issue1320, avoid a race if remote changed after discovery
1571 # issue1320, avoid a race if remote changed after discovery
1564 heads = rheads
1572 heads = rheads
1565
1573
1566 if remote.capable('getbundle'):
1574 if remote.capable('getbundle'):
1567 cg = remote.getbundle('pull', common=common,
1575 cg = remote.getbundle('pull', common=common,
1568 heads=heads or rheads)
1576 heads=heads or rheads)
1569 elif heads is None:
1577 elif heads is None:
1570 cg = remote.changegroup(fetch, 'pull')
1578 cg = remote.changegroup(fetch, 'pull')
1571 elif not remote.capable('changegroupsubset'):
1579 elif not remote.capable('changegroupsubset'):
1572 raise util.Abort(_("partial pull cannot be done because "
1580 raise util.Abort(_("partial pull cannot be done because "
1573 "other repository doesn't support "
1581 "other repository doesn't support "
1574 "changegroupsubset."))
1582 "changegroupsubset."))
1575 else:
1583 else:
1576 cg = remote.changegroupsubset(fetch, heads, 'pull')
1584 cg = remote.changegroupsubset(fetch, heads, 'pull')
1577 clstart = len(self.changelog)
1585 clstart = len(self.changelog)
1578 result = self.addchangegroup(cg, 'pull', remote.url())
1586 result = self.addchangegroup(cg, 'pull', remote.url())
1579 clend = len(self.changelog)
1587 clend = len(self.changelog)
1580 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1588 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1581
1589
1582 # compute target subset
1590 # compute target subset
1583 if heads is None:
1591 if heads is None:
1584 # We pulled every thing possible
1592 # We pulled every thing possible
1585 # sync on everything common
1593 # sync on everything common
1586 subset = common + added
1594 subset = common + added
1587 else:
1595 else:
1588 # We pulled a specific subset
1596 # We pulled a specific subset
1589 # sync on this subset
1597 # sync on this subset
1590 subset = heads
1598 subset = heads
1591
1599
1592 # Get remote phases data from remote
1600 # Get remote phases data from remote
1593 remotephases = remote.listkeys('phases')
1601 remotephases = remote.listkeys('phases')
1594 publishing = bool(remotephases.get('publishing', False))
1602 publishing = bool(remotephases.get('publishing', False))
1595 if remotephases and not publishing:
1603 if remotephases and not publishing:
1596 # remote is new and unpublishing
1604 # remote is new and unpublishing
1597 pheads, _dr = phases.analyzeremotephases(self, subset,
1605 pheads, _dr = phases.analyzeremotephases(self, subset,
1598 remotephases)
1606 remotephases)
1599 phases.advanceboundary(self, phases.public, pheads)
1607 phases.advanceboundary(self, phases.public, pheads)
1600 phases.advanceboundary(self, phases.draft, subset)
1608 phases.advanceboundary(self, phases.draft, subset)
1601 else:
1609 else:
1602 # Remote is old or publishing all common changesets
1610 # Remote is old or publishing all common changesets
1603 # should be seen as public
1611 # should be seen as public
1604 phases.advanceboundary(self, phases.public, subset)
1612 phases.advanceboundary(self, phases.public, subset)
1605 finally:
1613 finally:
1606 lock.release()
1614 lock.release()
1607
1615
1608 return result
1616 return result
1609
1617
1610 def checkpush(self, force, revs):
1618 def checkpush(self, force, revs):
1611 """Extensions can override this function if additional checks have
1619 """Extensions can override this function if additional checks have
1612 to be performed before pushing, or call it if they override push
1620 to be performed before pushing, or call it if they override push
1613 command.
1621 command.
1614 """
1622 """
1615 pass
1623 pass
1616
1624
1617 def push(self, remote, force=False, revs=None, newbranch=False):
1625 def push(self, remote, force=False, revs=None, newbranch=False):
1618 '''Push outgoing changesets (limited by revs) from the current
1626 '''Push outgoing changesets (limited by revs) from the current
1619 repository to remote. Return an integer:
1627 repository to remote. Return an integer:
1620 - None means nothing to push
1628 - None means nothing to push
1621 - 0 means HTTP error
1629 - 0 means HTTP error
1622 - 1 means we pushed and remote head count is unchanged *or*
1630 - 1 means we pushed and remote head count is unchanged *or*
1623 we have outgoing changesets but refused to push
1631 we have outgoing changesets but refused to push
1624 - other values as described by addchangegroup()
1632 - other values as described by addchangegroup()
1625 '''
1633 '''
1626 # there are two ways to push to remote repo:
1634 # there are two ways to push to remote repo:
1627 #
1635 #
1628 # addchangegroup assumes local user can lock remote
1636 # addchangegroup assumes local user can lock remote
1629 # repo (local filesystem, old ssh servers).
1637 # repo (local filesystem, old ssh servers).
1630 #
1638 #
1631 # unbundle assumes local user cannot lock remote repo (new ssh
1639 # unbundle assumes local user cannot lock remote repo (new ssh
1632 # servers, http servers).
1640 # servers, http servers).
1633
1641
1634 # get local lock as we might write phase data
1642 # get local lock as we might write phase data
1635 locallock = self.lock()
1643 locallock = self.lock()
1636 try:
1644 try:
1637 self.checkpush(force, revs)
1645 self.checkpush(force, revs)
1638 lock = None
1646 lock = None
1639 unbundle = remote.capable('unbundle')
1647 unbundle = remote.capable('unbundle')
1640 if not unbundle:
1648 if not unbundle:
1641 lock = remote.lock()
1649 lock = remote.lock()
1642 try:
1650 try:
1643 # discovery
1651 # discovery
1644 fci = discovery.findcommonincoming
1652 fci = discovery.findcommonincoming
1645 commoninc = fci(self, remote, force=force)
1653 commoninc = fci(self, remote, force=force)
1646 common, inc, remoteheads = commoninc
1654 common, inc, remoteheads = commoninc
1647 fco = discovery.findcommonoutgoing
1655 fco = discovery.findcommonoutgoing
1648 outgoing = fco(self, remote, onlyheads=revs,
1656 outgoing = fco(self, remote, onlyheads=revs,
1649 commoninc=commoninc, force=force)
1657 commoninc=commoninc, force=force)
1650
1658
1651
1659
1652 if not outgoing.missing:
1660 if not outgoing.missing:
1653 # nothing to push
1661 # nothing to push
1654 scmutil.nochangesfound(self.ui, outgoing.excluded)
1662 scmutil.nochangesfound(self.ui, outgoing.excluded)
1655 ret = None
1663 ret = None
1656 else:
1664 else:
1657 # something to push
1665 # something to push
1658 if not force:
1666 if not force:
1659 discovery.checkheads(self, remote, outgoing,
1667 discovery.checkheads(self, remote, outgoing,
1660 remoteheads, newbranch,
1668 remoteheads, newbranch,
1661 bool(inc))
1669 bool(inc))
1662
1670
1663 # create a changegroup from local
1671 # create a changegroup from local
1664 if revs is None and not outgoing.excluded:
1672 if revs is None and not outgoing.excluded:
1665 # push everything,
1673 # push everything,
1666 # use the fast path, no race possible on push
1674 # use the fast path, no race possible on push
1667 cg = self._changegroup(outgoing.missing, 'push')
1675 cg = self._changegroup(outgoing.missing, 'push')
1668 else:
1676 else:
1669 cg = self.getlocalbundle('push', outgoing)
1677 cg = self.getlocalbundle('push', outgoing)
1670
1678
1671 # apply changegroup to remote
1679 # apply changegroup to remote
1672 if unbundle:
1680 if unbundle:
1673 # local repo finds heads on server, finds out what
1681 # local repo finds heads on server, finds out what
1674 # revs it must push. once revs transferred, if server
1682 # revs it must push. once revs transferred, if server
1675 # finds it has different heads (someone else won
1683 # finds it has different heads (someone else won
1676 # commit/push race), server aborts.
1684 # commit/push race), server aborts.
1677 if force:
1685 if force:
1678 remoteheads = ['force']
1686 remoteheads = ['force']
1679 # ssh: return remote's addchangegroup()
1687 # ssh: return remote's addchangegroup()
1680 # http: return remote's addchangegroup() or 0 for error
1688 # http: return remote's addchangegroup() or 0 for error
1681 ret = remote.unbundle(cg, remoteheads, 'push')
1689 ret = remote.unbundle(cg, remoteheads, 'push')
1682 else:
1690 else:
1683 # we return an integer indicating remote head count change
1691 # we return an integer indicating remote head count change
1684 ret = remote.addchangegroup(cg, 'push', self.url())
1692 ret = remote.addchangegroup(cg, 'push', self.url())
1685
1693
1686 if ret:
1694 if ret:
1687 # push succeed, synchonize target of the push
1695 # push succeed, synchonize target of the push
1688 cheads = outgoing.missingheads
1696 cheads = outgoing.missingheads
1689 elif revs is None:
1697 elif revs is None:
1690 # All out push fails. synchronize all common
1698 # All out push fails. synchronize all common
1691 cheads = outgoing.commonheads
1699 cheads = outgoing.commonheads
1692 else:
1700 else:
1693 # I want cheads = heads(::missingheads and ::commonheads)
1701 # I want cheads = heads(::missingheads and ::commonheads)
1694 # (missingheads is revs with secret changeset filtered out)
1702 # (missingheads is revs with secret changeset filtered out)
1695 #
1703 #
1696 # This can be expressed as:
1704 # This can be expressed as:
1697 # cheads = ( (missingheads and ::commonheads)
1705 # cheads = ( (missingheads and ::commonheads)
1698 # + (commonheads and ::missingheads))"
1706 # + (commonheads and ::missingheads))"
1699 # )
1707 # )
1700 #
1708 #
1701 # while trying to push we already computed the following:
1709 # while trying to push we already computed the following:
1702 # common = (::commonheads)
1710 # common = (::commonheads)
1703 # missing = ((commonheads::missingheads) - commonheads)
1711 # missing = ((commonheads::missingheads) - commonheads)
1704 #
1712 #
1705 # We can pick:
1713 # We can pick:
1706 # * missingheads part of comon (::commonheads)
1714 # * missingheads part of comon (::commonheads)
1707 common = set(outgoing.common)
1715 common = set(outgoing.common)
1708 cheads = [node for node in revs if node in common]
1716 cheads = [node for node in revs if node in common]
1709 # and
1717 # and
1710 # * commonheads parents on missing
1718 # * commonheads parents on missing
1711 revset = self.set('%ln and parents(roots(%ln))',
1719 revset = self.set('%ln and parents(roots(%ln))',
1712 outgoing.commonheads,
1720 outgoing.commonheads,
1713 outgoing.missing)
1721 outgoing.missing)
1714 cheads.extend(c.node() for c in revset)
1722 cheads.extend(c.node() for c in revset)
1715 # even when we don't push, exchanging phase data is useful
1723 # even when we don't push, exchanging phase data is useful
1716 remotephases = remote.listkeys('phases')
1724 remotephases = remote.listkeys('phases')
1717 if not remotephases: # old server or public only repo
1725 if not remotephases: # old server or public only repo
1718 phases.advanceboundary(self, phases.public, cheads)
1726 phases.advanceboundary(self, phases.public, cheads)
1719 # don't push any phase data as there is nothing to push
1727 # don't push any phase data as there is nothing to push
1720 else:
1728 else:
1721 ana = phases.analyzeremotephases(self, cheads, remotephases)
1729 ana = phases.analyzeremotephases(self, cheads, remotephases)
1722 pheads, droots = ana
1730 pheads, droots = ana
1723 ### Apply remote phase on local
1731 ### Apply remote phase on local
1724 if remotephases.get('publishing', False):
1732 if remotephases.get('publishing', False):
1725 phases.advanceboundary(self, phases.public, cheads)
1733 phases.advanceboundary(self, phases.public, cheads)
1726 else: # publish = False
1734 else: # publish = False
1727 phases.advanceboundary(self, phases.public, pheads)
1735 phases.advanceboundary(self, phases.public, pheads)
1728 phases.advanceboundary(self, phases.draft, cheads)
1736 phases.advanceboundary(self, phases.draft, cheads)
1729 ### Apply local phase on remote
1737 ### Apply local phase on remote
1730
1738
1731 # Get the list of all revs draft on remote by public here.
1739 # Get the list of all revs draft on remote by public here.
1732 # XXX Beware that revset break if droots is not strictly
1740 # XXX Beware that revset break if droots is not strictly
1733 # XXX root we may want to ensure it is but it is costly
1741 # XXX root we may want to ensure it is but it is costly
1734 outdated = self.set('heads((%ln::%ln) and public())',
1742 outdated = self.set('heads((%ln::%ln) and public())',
1735 droots, cheads)
1743 droots, cheads)
1736 for newremotehead in outdated:
1744 for newremotehead in outdated:
1737 r = remote.pushkey('phases',
1745 r = remote.pushkey('phases',
1738 newremotehead.hex(),
1746 newremotehead.hex(),
1739 str(phases.draft),
1747 str(phases.draft),
1740 str(phases.public))
1748 str(phases.public))
1741 if not r:
1749 if not r:
1742 self.ui.warn(_('updating %s to public failed!\n')
1750 self.ui.warn(_('updating %s to public failed!\n')
1743 % newremotehead)
1751 % newremotehead)
1744 finally:
1752 finally:
1745 if lock is not None:
1753 if lock is not None:
1746 lock.release()
1754 lock.release()
1747 finally:
1755 finally:
1748 locallock.release()
1756 locallock.release()
1749
1757
1750 self.ui.debug("checking for updated bookmarks\n")
1758 self.ui.debug("checking for updated bookmarks\n")
1751 rb = remote.listkeys('bookmarks')
1759 rb = remote.listkeys('bookmarks')
1752 for k in rb.keys():
1760 for k in rb.keys():
1753 if k in self._bookmarks:
1761 if k in self._bookmarks:
1754 nr, nl = rb[k], hex(self._bookmarks[k])
1762 nr, nl = rb[k], hex(self._bookmarks[k])
1755 if nr in self:
1763 if nr in self:
1756 cr = self[nr]
1764 cr = self[nr]
1757 cl = self[nl]
1765 cl = self[nl]
1758 if cl in cr.descendants():
1766 if cl in cr.descendants():
1759 r = remote.pushkey('bookmarks', k, nr, nl)
1767 r = remote.pushkey('bookmarks', k, nr, nl)
1760 if r:
1768 if r:
1761 self.ui.status(_("updating bookmark %s\n") % k)
1769 self.ui.status(_("updating bookmark %s\n") % k)
1762 else:
1770 else:
1763 self.ui.warn(_('updating bookmark %s'
1771 self.ui.warn(_('updating bookmark %s'
1764 ' failed!\n') % k)
1772 ' failed!\n') % k)
1765
1773
1766 return ret
1774 return ret
1767
1775
1768 def changegroupinfo(self, nodes, source):
1776 def changegroupinfo(self, nodes, source):
1769 if self.ui.verbose or source == 'bundle':
1777 if self.ui.verbose or source == 'bundle':
1770 self.ui.status(_("%d changesets found\n") % len(nodes))
1778 self.ui.status(_("%d changesets found\n") % len(nodes))
1771 if self.ui.debugflag:
1779 if self.ui.debugflag:
1772 self.ui.debug("list of changesets:\n")
1780 self.ui.debug("list of changesets:\n")
1773 for node in nodes:
1781 for node in nodes:
1774 self.ui.debug("%s\n" % hex(node))
1782 self.ui.debug("%s\n" % hex(node))
1775
1783
1776 def changegroupsubset(self, bases, heads, source):
1784 def changegroupsubset(self, bases, heads, source):
1777 """Compute a changegroup consisting of all the nodes that are
1785 """Compute a changegroup consisting of all the nodes that are
1778 descendants of any of the bases and ancestors of any of the heads.
1786 descendants of any of the bases and ancestors of any of the heads.
1779 Return a chunkbuffer object whose read() method will return
1787 Return a chunkbuffer object whose read() method will return
1780 successive changegroup chunks.
1788 successive changegroup chunks.
1781
1789
1782 It is fairly complex as determining which filenodes and which
1790 It is fairly complex as determining which filenodes and which
1783 manifest nodes need to be included for the changeset to be complete
1791 manifest nodes need to be included for the changeset to be complete
1784 is non-trivial.
1792 is non-trivial.
1785
1793
1786 Another wrinkle is doing the reverse, figuring out which changeset in
1794 Another wrinkle is doing the reverse, figuring out which changeset in
1787 the changegroup a particular filenode or manifestnode belongs to.
1795 the changegroup a particular filenode or manifestnode belongs to.
1788 """
1796 """
1789 cl = self.changelog
1797 cl = self.changelog
1790 if not bases:
1798 if not bases:
1791 bases = [nullid]
1799 bases = [nullid]
1792 csets, bases, heads = cl.nodesbetween(bases, heads)
1800 csets, bases, heads = cl.nodesbetween(bases, heads)
1793 # We assume that all ancestors of bases are known
1801 # We assume that all ancestors of bases are known
1794 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1802 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1795 return self._changegroupsubset(common, csets, heads, source)
1803 return self._changegroupsubset(common, csets, heads, source)
1796
1804
1797 def getlocalbundle(self, source, outgoing):
1805 def getlocalbundle(self, source, outgoing):
1798 """Like getbundle, but taking a discovery.outgoing as an argument.
1806 """Like getbundle, but taking a discovery.outgoing as an argument.
1799
1807
1800 This is only implemented for local repos and reuses potentially
1808 This is only implemented for local repos and reuses potentially
1801 precomputed sets in outgoing."""
1809 precomputed sets in outgoing."""
1802 if not outgoing.missing:
1810 if not outgoing.missing:
1803 return None
1811 return None
1804 return self._changegroupsubset(outgoing.common,
1812 return self._changegroupsubset(outgoing.common,
1805 outgoing.missing,
1813 outgoing.missing,
1806 outgoing.missingheads,
1814 outgoing.missingheads,
1807 source)
1815 source)
1808
1816
1809 def getbundle(self, source, heads=None, common=None):
1817 def getbundle(self, source, heads=None, common=None):
1810 """Like changegroupsubset, but returns the set difference between the
1818 """Like changegroupsubset, but returns the set difference between the
1811 ancestors of heads and the ancestors common.
1819 ancestors of heads and the ancestors common.
1812
1820
1813 If heads is None, use the local heads. If common is None, use [nullid].
1821 If heads is None, use the local heads. If common is None, use [nullid].
1814
1822
1815 The nodes in common might not all be known locally due to the way the
1823 The nodes in common might not all be known locally due to the way the
1816 current discovery protocol works.
1824 current discovery protocol works.
1817 """
1825 """
1818 cl = self.changelog
1826 cl = self.changelog
1819 if common:
1827 if common:
1820 nm = cl.nodemap
1828 nm = cl.nodemap
1821 common = [n for n in common if n in nm]
1829 common = [n for n in common if n in nm]
1822 else:
1830 else:
1823 common = [nullid]
1831 common = [nullid]
1824 if not heads:
1832 if not heads:
1825 heads = cl.heads()
1833 heads = cl.heads()
1826 return self.getlocalbundle(source,
1834 return self.getlocalbundle(source,
1827 discovery.outgoing(cl, common, heads))
1835 discovery.outgoing(cl, common, heads))
1828
1836
1829 def _changegroupsubset(self, commonrevs, csets, heads, source):
1837 def _changegroupsubset(self, commonrevs, csets, heads, source):
1830
1838
1831 cl = self.changelog
1839 cl = self.changelog
1832 mf = self.manifest
1840 mf = self.manifest
1833 mfs = {} # needed manifests
1841 mfs = {} # needed manifests
1834 fnodes = {} # needed file nodes
1842 fnodes = {} # needed file nodes
1835 changedfiles = set()
1843 changedfiles = set()
1836 fstate = ['', {}]
1844 fstate = ['', {}]
1837 count = [0]
1845 count = [0]
1838
1846
1839 # can we go through the fast path ?
1847 # can we go through the fast path ?
1840 heads.sort()
1848 heads.sort()
1841 if heads == sorted(self.heads()):
1849 if heads == sorted(self.heads()):
1842 return self._changegroup(csets, source)
1850 return self._changegroup(csets, source)
1843
1851
1844 # slow path
1852 # slow path
1845 self.hook('preoutgoing', throw=True, source=source)
1853 self.hook('preoutgoing', throw=True, source=source)
1846 self.changegroupinfo(csets, source)
1854 self.changegroupinfo(csets, source)
1847
1855
1848 # filter any nodes that claim to be part of the known set
1856 # filter any nodes that claim to be part of the known set
1849 def prune(revlog, missing):
1857 def prune(revlog, missing):
1850 return [n for n in missing
1858 return [n for n in missing
1851 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1859 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1852
1860
1853 def lookup(revlog, x):
1861 def lookup(revlog, x):
1854 if revlog == cl:
1862 if revlog == cl:
1855 c = cl.read(x)
1863 c = cl.read(x)
1856 changedfiles.update(c[3])
1864 changedfiles.update(c[3])
1857 mfs.setdefault(c[0], x)
1865 mfs.setdefault(c[0], x)
1858 count[0] += 1
1866 count[0] += 1
1859 self.ui.progress(_('bundling'), count[0],
1867 self.ui.progress(_('bundling'), count[0],
1860 unit=_('changesets'), total=len(csets))
1868 unit=_('changesets'), total=len(csets))
1861 return x
1869 return x
1862 elif revlog == mf:
1870 elif revlog == mf:
1863 clnode = mfs[x]
1871 clnode = mfs[x]
1864 mdata = mf.readfast(x)
1872 mdata = mf.readfast(x)
1865 for f in changedfiles:
1873 for f in changedfiles:
1866 if f in mdata:
1874 if f in mdata:
1867 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1875 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1868 count[0] += 1
1876 count[0] += 1
1869 self.ui.progress(_('bundling'), count[0],
1877 self.ui.progress(_('bundling'), count[0],
1870 unit=_('manifests'), total=len(mfs))
1878 unit=_('manifests'), total=len(mfs))
1871 return mfs[x]
1879 return mfs[x]
1872 else:
1880 else:
1873 self.ui.progress(
1881 self.ui.progress(
1874 _('bundling'), count[0], item=fstate[0],
1882 _('bundling'), count[0], item=fstate[0],
1875 unit=_('files'), total=len(changedfiles))
1883 unit=_('files'), total=len(changedfiles))
1876 return fstate[1][x]
1884 return fstate[1][x]
1877
1885
1878 bundler = changegroup.bundle10(lookup)
1886 bundler = changegroup.bundle10(lookup)
1879 reorder = self.ui.config('bundle', 'reorder', 'auto')
1887 reorder = self.ui.config('bundle', 'reorder', 'auto')
1880 if reorder == 'auto':
1888 if reorder == 'auto':
1881 reorder = None
1889 reorder = None
1882 else:
1890 else:
1883 reorder = util.parsebool(reorder)
1891 reorder = util.parsebool(reorder)
1884
1892
1885 def gengroup():
1893 def gengroup():
1886 # Create a changenode group generator that will call our functions
1894 # Create a changenode group generator that will call our functions
1887 # back to lookup the owning changenode and collect information.
1895 # back to lookup the owning changenode and collect information.
1888 for chunk in cl.group(csets, bundler, reorder=reorder):
1896 for chunk in cl.group(csets, bundler, reorder=reorder):
1889 yield chunk
1897 yield chunk
1890 self.ui.progress(_('bundling'), None)
1898 self.ui.progress(_('bundling'), None)
1891
1899
1892 # Create a generator for the manifestnodes that calls our lookup
1900 # Create a generator for the manifestnodes that calls our lookup
1893 # and data collection functions back.
1901 # and data collection functions back.
1894 count[0] = 0
1902 count[0] = 0
1895 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1903 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1896 yield chunk
1904 yield chunk
1897 self.ui.progress(_('bundling'), None)
1905 self.ui.progress(_('bundling'), None)
1898
1906
1899 mfs.clear()
1907 mfs.clear()
1900
1908
1901 # Go through all our files in order sorted by name.
1909 # Go through all our files in order sorted by name.
1902 count[0] = 0
1910 count[0] = 0
1903 for fname in sorted(changedfiles):
1911 for fname in sorted(changedfiles):
1904 filerevlog = self.file(fname)
1912 filerevlog = self.file(fname)
1905 if not len(filerevlog):
1913 if not len(filerevlog):
1906 raise util.Abort(_("empty or missing revlog for %s") % fname)
1914 raise util.Abort(_("empty or missing revlog for %s") % fname)
1907 fstate[0] = fname
1915 fstate[0] = fname
1908 fstate[1] = fnodes.pop(fname, {})
1916 fstate[1] = fnodes.pop(fname, {})
1909
1917
1910 nodelist = prune(filerevlog, fstate[1])
1918 nodelist = prune(filerevlog, fstate[1])
1911 if nodelist:
1919 if nodelist:
1912 count[0] += 1
1920 count[0] += 1
1913 yield bundler.fileheader(fname)
1921 yield bundler.fileheader(fname)
1914 for chunk in filerevlog.group(nodelist, bundler, reorder):
1922 for chunk in filerevlog.group(nodelist, bundler, reorder):
1915 yield chunk
1923 yield chunk
1916
1924
1917 # Signal that no more groups are left.
1925 # Signal that no more groups are left.
1918 yield bundler.close()
1926 yield bundler.close()
1919 self.ui.progress(_('bundling'), None)
1927 self.ui.progress(_('bundling'), None)
1920
1928
1921 if csets:
1929 if csets:
1922 self.hook('outgoing', node=hex(csets[0]), source=source)
1930 self.hook('outgoing', node=hex(csets[0]), source=source)
1923
1931
1924 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1932 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1925
1933
1926 def changegroup(self, basenodes, source):
1934 def changegroup(self, basenodes, source):
1927 # to avoid a race we use changegroupsubset() (issue1320)
1935 # to avoid a race we use changegroupsubset() (issue1320)
1928 return self.changegroupsubset(basenodes, self.heads(), source)
1936 return self.changegroupsubset(basenodes, self.heads(), source)
1929
1937
1930 def _changegroup(self, nodes, source):
1938 def _changegroup(self, nodes, source):
1931 """Compute the changegroup of all nodes that we have that a recipient
1939 """Compute the changegroup of all nodes that we have that a recipient
1932 doesn't. Return a chunkbuffer object whose read() method will return
1940 doesn't. Return a chunkbuffer object whose read() method will return
1933 successive changegroup chunks.
1941 successive changegroup chunks.
1934
1942
1935 This is much easier than the previous function as we can assume that
1943 This is much easier than the previous function as we can assume that
1936 the recipient has any changenode we aren't sending them.
1944 the recipient has any changenode we aren't sending them.
1937
1945
1938 nodes is the set of nodes to send"""
1946 nodes is the set of nodes to send"""
1939
1947
1940 cl = self.changelog
1948 cl = self.changelog
1941 mf = self.manifest
1949 mf = self.manifest
1942 mfs = {}
1950 mfs = {}
1943 changedfiles = set()
1951 changedfiles = set()
1944 fstate = ['']
1952 fstate = ['']
1945 count = [0]
1953 count = [0]
1946
1954
1947 self.hook('preoutgoing', throw=True, source=source)
1955 self.hook('preoutgoing', throw=True, source=source)
1948 self.changegroupinfo(nodes, source)
1956 self.changegroupinfo(nodes, source)
1949
1957
1950 revset = set([cl.rev(n) for n in nodes])
1958 revset = set([cl.rev(n) for n in nodes])
1951
1959
1952 def gennodelst(log):
1960 def gennodelst(log):
1953 return [log.node(r) for r in log if log.linkrev(r) in revset]
1961 return [log.node(r) for r in log if log.linkrev(r) in revset]
1954
1962
1955 def lookup(revlog, x):
1963 def lookup(revlog, x):
1956 if revlog == cl:
1964 if revlog == cl:
1957 c = cl.read(x)
1965 c = cl.read(x)
1958 changedfiles.update(c[3])
1966 changedfiles.update(c[3])
1959 mfs.setdefault(c[0], x)
1967 mfs.setdefault(c[0], x)
1960 count[0] += 1
1968 count[0] += 1
1961 self.ui.progress(_('bundling'), count[0],
1969 self.ui.progress(_('bundling'), count[0],
1962 unit=_('changesets'), total=len(nodes))
1970 unit=_('changesets'), total=len(nodes))
1963 return x
1971 return x
1964 elif revlog == mf:
1972 elif revlog == mf:
1965 count[0] += 1
1973 count[0] += 1
1966 self.ui.progress(_('bundling'), count[0],
1974 self.ui.progress(_('bundling'), count[0],
1967 unit=_('manifests'), total=len(mfs))
1975 unit=_('manifests'), total=len(mfs))
1968 return cl.node(revlog.linkrev(revlog.rev(x)))
1976 return cl.node(revlog.linkrev(revlog.rev(x)))
1969 else:
1977 else:
1970 self.ui.progress(
1978 self.ui.progress(
1971 _('bundling'), count[0], item=fstate[0],
1979 _('bundling'), count[0], item=fstate[0],
1972 total=len(changedfiles), unit=_('files'))
1980 total=len(changedfiles), unit=_('files'))
1973 return cl.node(revlog.linkrev(revlog.rev(x)))
1981 return cl.node(revlog.linkrev(revlog.rev(x)))
1974
1982
1975 bundler = changegroup.bundle10(lookup)
1983 bundler = changegroup.bundle10(lookup)
1976 reorder = self.ui.config('bundle', 'reorder', 'auto')
1984 reorder = self.ui.config('bundle', 'reorder', 'auto')
1977 if reorder == 'auto':
1985 if reorder == 'auto':
1978 reorder = None
1986 reorder = None
1979 else:
1987 else:
1980 reorder = util.parsebool(reorder)
1988 reorder = util.parsebool(reorder)
1981
1989
1982 def gengroup():
1990 def gengroup():
1983 '''yield a sequence of changegroup chunks (strings)'''
1991 '''yield a sequence of changegroup chunks (strings)'''
1984 # construct a list of all changed files
1992 # construct a list of all changed files
1985
1993
1986 for chunk in cl.group(nodes, bundler, reorder=reorder):
1994 for chunk in cl.group(nodes, bundler, reorder=reorder):
1987 yield chunk
1995 yield chunk
1988 self.ui.progress(_('bundling'), None)
1996 self.ui.progress(_('bundling'), None)
1989
1997
1990 count[0] = 0
1998 count[0] = 0
1991 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1999 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1992 yield chunk
2000 yield chunk
1993 self.ui.progress(_('bundling'), None)
2001 self.ui.progress(_('bundling'), None)
1994
2002
1995 count[0] = 0
2003 count[0] = 0
1996 for fname in sorted(changedfiles):
2004 for fname in sorted(changedfiles):
1997 filerevlog = self.file(fname)
2005 filerevlog = self.file(fname)
1998 if not len(filerevlog):
2006 if not len(filerevlog):
1999 raise util.Abort(_("empty or missing revlog for %s") % fname)
2007 raise util.Abort(_("empty or missing revlog for %s") % fname)
2000 fstate[0] = fname
2008 fstate[0] = fname
2001 nodelist = gennodelst(filerevlog)
2009 nodelist = gennodelst(filerevlog)
2002 if nodelist:
2010 if nodelist:
2003 count[0] += 1
2011 count[0] += 1
2004 yield bundler.fileheader(fname)
2012 yield bundler.fileheader(fname)
2005 for chunk in filerevlog.group(nodelist, bundler, reorder):
2013 for chunk in filerevlog.group(nodelist, bundler, reorder):
2006 yield chunk
2014 yield chunk
2007 yield bundler.close()
2015 yield bundler.close()
2008 self.ui.progress(_('bundling'), None)
2016 self.ui.progress(_('bundling'), None)
2009
2017
2010 if nodes:
2018 if nodes:
2011 self.hook('outgoing', node=hex(nodes[0]), source=source)
2019 self.hook('outgoing', node=hex(nodes[0]), source=source)
2012
2020
2013 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2014
2022
2015 def addchangegroup(self, source, srctype, url, emptyok=False):
2023 def addchangegroup(self, source, srctype, url, emptyok=False):
2016 """Add the changegroup returned by source.read() to this repo.
2024 """Add the changegroup returned by source.read() to this repo.
2017 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2025 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2018 the URL of the repo where this changegroup is coming from.
2026 the URL of the repo where this changegroup is coming from.
2019
2027
2020 Return an integer summarizing the change to this repo:
2028 Return an integer summarizing the change to this repo:
2021 - nothing changed or no source: 0
2029 - nothing changed or no source: 0
2022 - more heads than before: 1+added heads (2..n)
2030 - more heads than before: 1+added heads (2..n)
2023 - fewer heads than before: -1-removed heads (-2..-n)
2031 - fewer heads than before: -1-removed heads (-2..-n)
2024 - number of heads stays the same: 1
2032 - number of heads stays the same: 1
2025 """
2033 """
2026 def csmap(x):
2034 def csmap(x):
2027 self.ui.debug("add changeset %s\n" % short(x))
2035 self.ui.debug("add changeset %s\n" % short(x))
2028 return len(cl)
2036 return len(cl)
2029
2037
2030 def revmap(x):
2038 def revmap(x):
2031 return cl.rev(x)
2039 return cl.rev(x)
2032
2040
2033 if not source:
2041 if not source:
2034 return 0
2042 return 0
2035
2043
2036 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2044 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2037
2045
2038 changesets = files = revisions = 0
2046 changesets = files = revisions = 0
2039 efiles = set()
2047 efiles = set()
2040
2048
2041 # write changelog data to temp files so concurrent readers will not see
2049 # write changelog data to temp files so concurrent readers will not see
2042 # inconsistent view
2050 # inconsistent view
2043 cl = self.changelog
2051 cl = self.changelog
2044 cl.delayupdate()
2052 cl.delayupdate()
2045 oldheads = cl.heads()
2053 oldheads = cl.heads()
2046
2054
2047 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2055 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2048 try:
2056 try:
2049 trp = weakref.proxy(tr)
2057 trp = weakref.proxy(tr)
2050 # pull off the changeset group
2058 # pull off the changeset group
2051 self.ui.status(_("adding changesets\n"))
2059 self.ui.status(_("adding changesets\n"))
2052 clstart = len(cl)
2060 clstart = len(cl)
2053 class prog(object):
2061 class prog(object):
2054 step = _('changesets')
2062 step = _('changesets')
2055 count = 1
2063 count = 1
2056 ui = self.ui
2064 ui = self.ui
2057 total = None
2065 total = None
2058 def __call__(self):
2066 def __call__(self):
2059 self.ui.progress(self.step, self.count, unit=_('chunks'),
2067 self.ui.progress(self.step, self.count, unit=_('chunks'),
2060 total=self.total)
2068 total=self.total)
2061 self.count += 1
2069 self.count += 1
2062 pr = prog()
2070 pr = prog()
2063 source.callback = pr
2071 source.callback = pr
2064
2072
2065 source.changelogheader()
2073 source.changelogheader()
2066 srccontent = cl.addgroup(source, csmap, trp)
2074 srccontent = cl.addgroup(source, csmap, trp)
2067 if not (srccontent or emptyok):
2075 if not (srccontent or emptyok):
2068 raise util.Abort(_("received changelog group is empty"))
2076 raise util.Abort(_("received changelog group is empty"))
2069 clend = len(cl)
2077 clend = len(cl)
2070 changesets = clend - clstart
2078 changesets = clend - clstart
2071 for c in xrange(clstart, clend):
2079 for c in xrange(clstart, clend):
2072 efiles.update(self[c].files())
2080 efiles.update(self[c].files())
2073 efiles = len(efiles)
2081 efiles = len(efiles)
2074 self.ui.progress(_('changesets'), None)
2082 self.ui.progress(_('changesets'), None)
2075
2083
2076 # pull off the manifest group
2084 # pull off the manifest group
2077 self.ui.status(_("adding manifests\n"))
2085 self.ui.status(_("adding manifests\n"))
2078 pr.step = _('manifests')
2086 pr.step = _('manifests')
2079 pr.count = 1
2087 pr.count = 1
2080 pr.total = changesets # manifests <= changesets
2088 pr.total = changesets # manifests <= changesets
2081 # no need to check for empty manifest group here:
2089 # no need to check for empty manifest group here:
2082 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2090 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2083 # no new manifest will be created and the manifest group will
2091 # no new manifest will be created and the manifest group will
2084 # be empty during the pull
2092 # be empty during the pull
2085 source.manifestheader()
2093 source.manifestheader()
2086 self.manifest.addgroup(source, revmap, trp)
2094 self.manifest.addgroup(source, revmap, trp)
2087 self.ui.progress(_('manifests'), None)
2095 self.ui.progress(_('manifests'), None)
2088
2096
2089 needfiles = {}
2097 needfiles = {}
2090 if self.ui.configbool('server', 'validate', default=False):
2098 if self.ui.configbool('server', 'validate', default=False):
2091 # validate incoming csets have their manifests
2099 # validate incoming csets have their manifests
2092 for cset in xrange(clstart, clend):
2100 for cset in xrange(clstart, clend):
2093 mfest = self.changelog.read(self.changelog.node(cset))[0]
2101 mfest = self.changelog.read(self.changelog.node(cset))[0]
2094 mfest = self.manifest.readdelta(mfest)
2102 mfest = self.manifest.readdelta(mfest)
2095 # store file nodes we must see
2103 # store file nodes we must see
2096 for f, n in mfest.iteritems():
2104 for f, n in mfest.iteritems():
2097 needfiles.setdefault(f, set()).add(n)
2105 needfiles.setdefault(f, set()).add(n)
2098
2106
2099 # process the files
2107 # process the files
2100 self.ui.status(_("adding file changes\n"))
2108 self.ui.status(_("adding file changes\n"))
2101 pr.step = _('files')
2109 pr.step = _('files')
2102 pr.count = 1
2110 pr.count = 1
2103 pr.total = efiles
2111 pr.total = efiles
2104 source.callback = None
2112 source.callback = None
2105
2113
2106 while True:
2114 while True:
2107 chunkdata = source.filelogheader()
2115 chunkdata = source.filelogheader()
2108 if not chunkdata:
2116 if not chunkdata:
2109 break
2117 break
2110 f = chunkdata["filename"]
2118 f = chunkdata["filename"]
2111 self.ui.debug("adding %s revisions\n" % f)
2119 self.ui.debug("adding %s revisions\n" % f)
2112 pr()
2120 pr()
2113 fl = self.file(f)
2121 fl = self.file(f)
2114 o = len(fl)
2122 o = len(fl)
2115 if not fl.addgroup(source, revmap, trp):
2123 if not fl.addgroup(source, revmap, trp):
2116 raise util.Abort(_("received file revlog group is empty"))
2124 raise util.Abort(_("received file revlog group is empty"))
2117 revisions += len(fl) - o
2125 revisions += len(fl) - o
2118 files += 1
2126 files += 1
2119 if f in needfiles:
2127 if f in needfiles:
2120 needs = needfiles[f]
2128 needs = needfiles[f]
2121 for new in xrange(o, len(fl)):
2129 for new in xrange(o, len(fl)):
2122 n = fl.node(new)
2130 n = fl.node(new)
2123 if n in needs:
2131 if n in needs:
2124 needs.remove(n)
2132 needs.remove(n)
2125 if not needs:
2133 if not needs:
2126 del needfiles[f]
2134 del needfiles[f]
2127 self.ui.progress(_('files'), None)
2135 self.ui.progress(_('files'), None)
2128
2136
2129 for f, needs in needfiles.iteritems():
2137 for f, needs in needfiles.iteritems():
2130 fl = self.file(f)
2138 fl = self.file(f)
2131 for n in needs:
2139 for n in needs:
2132 try:
2140 try:
2133 fl.rev(n)
2141 fl.rev(n)
2134 except error.LookupError:
2142 except error.LookupError:
2135 raise util.Abort(
2143 raise util.Abort(
2136 _('missing file data for %s:%s - run hg verify') %
2144 _('missing file data for %s:%s - run hg verify') %
2137 (f, hex(n)))
2145 (f, hex(n)))
2138
2146
2139 dh = 0
2147 dh = 0
2140 if oldheads:
2148 if oldheads:
2141 heads = cl.heads()
2149 heads = cl.heads()
2142 dh = len(heads) - len(oldheads)
2150 dh = len(heads) - len(oldheads)
2143 for h in heads:
2151 for h in heads:
2144 if h not in oldheads and 'close' in self[h].extra():
2152 if h not in oldheads and 'close' in self[h].extra():
2145 dh -= 1
2153 dh -= 1
2146 htext = ""
2154 htext = ""
2147 if dh:
2155 if dh:
2148 htext = _(" (%+d heads)") % dh
2156 htext = _(" (%+d heads)") % dh
2149
2157
2150 self.ui.status(_("added %d changesets"
2158 self.ui.status(_("added %d changesets"
2151 " with %d changes to %d files%s\n")
2159 " with %d changes to %d files%s\n")
2152 % (changesets, revisions, files, htext))
2160 % (changesets, revisions, files, htext))
2153
2161
2154 if changesets > 0:
2162 if changesets > 0:
2155 p = lambda: cl.writepending() and self.root or ""
2163 p = lambda: cl.writepending() and self.root or ""
2156 self.hook('pretxnchangegroup', throw=True,
2164 self.hook('pretxnchangegroup', throw=True,
2157 node=hex(cl.node(clstart)), source=srctype,
2165 node=hex(cl.node(clstart)), source=srctype,
2158 url=url, pending=p)
2166 url=url, pending=p)
2159
2167
2160 added = [cl.node(r) for r in xrange(clstart, clend)]
2168 added = [cl.node(r) for r in xrange(clstart, clend)]
2161 publishing = self.ui.configbool('phases', 'publish', True)
2169 publishing = self.ui.configbool('phases', 'publish', True)
2162 if srctype == 'push':
2170 if srctype == 'push':
2163 # Old server can not push the boundary themself.
2171 # Old server can not push the boundary themself.
2164 # New server won't push the boundary if changeset already
2172 # New server won't push the boundary if changeset already
2165 # existed locally as secrete
2173 # existed locally as secrete
2166 #
2174 #
2167 # We should not use added here but the list of all change in
2175 # We should not use added here but the list of all change in
2168 # the bundle
2176 # the bundle
2169 if publishing:
2177 if publishing:
2170 phases.advanceboundary(self, phases.public, srccontent)
2178 phases.advanceboundary(self, phases.public, srccontent)
2171 else:
2179 else:
2172 phases.advanceboundary(self, phases.draft, srccontent)
2180 phases.advanceboundary(self, phases.draft, srccontent)
2173 phases.retractboundary(self, phases.draft, added)
2181 phases.retractboundary(self, phases.draft, added)
2174 elif srctype != 'strip':
2182 elif srctype != 'strip':
2175 # publishing only alter behavior during push
2183 # publishing only alter behavior during push
2176 #
2184 #
2177 # strip should not touch boundary at all
2185 # strip should not touch boundary at all
2178 phases.retractboundary(self, phases.draft, added)
2186 phases.retractboundary(self, phases.draft, added)
2179
2187
2180 # make changelog see real files again
2188 # make changelog see real files again
2181 cl.finalize(trp)
2189 cl.finalize(trp)
2182
2190
2183 tr.close()
2191 tr.close()
2184
2192
2185 if changesets > 0:
2193 if changesets > 0:
2186 def runhooks():
2194 def runhooks():
2187 # forcefully update the on-disk branch cache
2195 # forcefully update the on-disk branch cache
2188 self.ui.debug("updating the branch cache\n")
2196 self.ui.debug("updating the branch cache\n")
2189 self.updatebranchcache()
2197 self.updatebranchcache()
2190 self.hook("changegroup", node=hex(cl.node(clstart)),
2198 self.hook("changegroup", node=hex(cl.node(clstart)),
2191 source=srctype, url=url)
2199 source=srctype, url=url)
2192
2200
2193 for n in added:
2201 for n in added:
2194 self.hook("incoming", node=hex(n), source=srctype,
2202 self.hook("incoming", node=hex(n), source=srctype,
2195 url=url)
2203 url=url)
2196 self._afterlock(runhooks)
2204 self._afterlock(runhooks)
2197
2205
2198 finally:
2206 finally:
2199 tr.release()
2207 tr.release()
2200 # never return 0 here:
2208 # never return 0 here:
2201 if dh < 0:
2209 if dh < 0:
2202 return dh - 1
2210 return dh - 1
2203 else:
2211 else:
2204 return dh + 1
2212 return dh + 1
2205
2213
2206 def stream_in(self, remote, requirements):
2214 def stream_in(self, remote, requirements):
2207 lock = self.lock()
2215 lock = self.lock()
2208 try:
2216 try:
2209 fp = remote.stream_out()
2217 fp = remote.stream_out()
2210 l = fp.readline()
2218 l = fp.readline()
2211 try:
2219 try:
2212 resp = int(l)
2220 resp = int(l)
2213 except ValueError:
2221 except ValueError:
2214 raise error.ResponseError(
2222 raise error.ResponseError(
2215 _('Unexpected response from remote server:'), l)
2223 _('Unexpected response from remote server:'), l)
2216 if resp == 1:
2224 if resp == 1:
2217 raise util.Abort(_('operation forbidden by server'))
2225 raise util.Abort(_('operation forbidden by server'))
2218 elif resp == 2:
2226 elif resp == 2:
2219 raise util.Abort(_('locking the remote repository failed'))
2227 raise util.Abort(_('locking the remote repository failed'))
2220 elif resp != 0:
2228 elif resp != 0:
2221 raise util.Abort(_('the server sent an unknown error code'))
2229 raise util.Abort(_('the server sent an unknown error code'))
2222 self.ui.status(_('streaming all changes\n'))
2230 self.ui.status(_('streaming all changes\n'))
2223 l = fp.readline()
2231 l = fp.readline()
2224 try:
2232 try:
2225 total_files, total_bytes = map(int, l.split(' ', 1))
2233 total_files, total_bytes = map(int, l.split(' ', 1))
2226 except (ValueError, TypeError):
2234 except (ValueError, TypeError):
2227 raise error.ResponseError(
2235 raise error.ResponseError(
2228 _('Unexpected response from remote server:'), l)
2236 _('Unexpected response from remote server:'), l)
2229 self.ui.status(_('%d files to transfer, %s of data\n') %
2237 self.ui.status(_('%d files to transfer, %s of data\n') %
2230 (total_files, util.bytecount(total_bytes)))
2238 (total_files, util.bytecount(total_bytes)))
2231 start = time.time()
2239 start = time.time()
2232 for i in xrange(total_files):
2240 for i in xrange(total_files):
2233 # XXX doesn't support '\n' or '\r' in filenames
2241 # XXX doesn't support '\n' or '\r' in filenames
2234 l = fp.readline()
2242 l = fp.readline()
2235 try:
2243 try:
2236 name, size = l.split('\0', 1)
2244 name, size = l.split('\0', 1)
2237 size = int(size)
2245 size = int(size)
2238 except (ValueError, TypeError):
2246 except (ValueError, TypeError):
2239 raise error.ResponseError(
2247 raise error.ResponseError(
2240 _('Unexpected response from remote server:'), l)
2248 _('Unexpected response from remote server:'), l)
2241 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2249 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2242 # for backwards compat, name was partially encoded
2250 # for backwards compat, name was partially encoded
2243 ofp = self.sopener(store.decodedir(name), 'w')
2251 ofp = self.sopener(store.decodedir(name), 'w')
2244 for chunk in util.filechunkiter(fp, limit=size):
2252 for chunk in util.filechunkiter(fp, limit=size):
2245 ofp.write(chunk)
2253 ofp.write(chunk)
2246 ofp.close()
2254 ofp.close()
2247 elapsed = time.time() - start
2255 elapsed = time.time() - start
2248 if elapsed <= 0:
2256 if elapsed <= 0:
2249 elapsed = 0.001
2257 elapsed = 0.001
2250 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2258 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2251 (util.bytecount(total_bytes), elapsed,
2259 (util.bytecount(total_bytes), elapsed,
2252 util.bytecount(total_bytes / elapsed)))
2260 util.bytecount(total_bytes / elapsed)))
2253
2261
2254 # new requirements = old non-format requirements + new format-related
2262 # new requirements = old non-format requirements + new format-related
2255 # requirements from the streamed-in repository
2263 # requirements from the streamed-in repository
2256 requirements.update(set(self.requirements) - self.supportedformats)
2264 requirements.update(set(self.requirements) - self.supportedformats)
2257 self._applyrequirements(requirements)
2265 self._applyrequirements(requirements)
2258 self._writerequirements()
2266 self._writerequirements()
2259
2267
2260 self.invalidate()
2268 self.invalidate()
2261 return len(self.heads()) + 1
2269 return len(self.heads()) + 1
2262 finally:
2270 finally:
2263 lock.release()
2271 lock.release()
2264
2272
2265 def clone(self, remote, heads=[], stream=False):
2273 def clone(self, remote, heads=[], stream=False):
2266 '''clone remote repository.
2274 '''clone remote repository.
2267
2275
2268 keyword arguments:
2276 keyword arguments:
2269 heads: list of revs to clone (forces use of pull)
2277 heads: list of revs to clone (forces use of pull)
2270 stream: use streaming clone if possible'''
2278 stream: use streaming clone if possible'''
2271
2279
2272 # now, all clients that can request uncompressed clones can
2280 # now, all clients that can request uncompressed clones can
2273 # read repo formats supported by all servers that can serve
2281 # read repo formats supported by all servers that can serve
2274 # them.
2282 # them.
2275
2283
2276 # if revlog format changes, client will have to check version
2284 # if revlog format changes, client will have to check version
2277 # and format flags on "stream" capability, and use
2285 # and format flags on "stream" capability, and use
2278 # uncompressed only if compatible.
2286 # uncompressed only if compatible.
2279
2287
2280 if stream and not heads:
2288 if stream and not heads:
2281 # 'stream' means remote revlog format is revlogv1 only
2289 # 'stream' means remote revlog format is revlogv1 only
2282 if remote.capable('stream'):
2290 if remote.capable('stream'):
2283 return self.stream_in(remote, set(('revlogv1',)))
2291 return self.stream_in(remote, set(('revlogv1',)))
2284 # otherwise, 'streamreqs' contains the remote revlog format
2292 # otherwise, 'streamreqs' contains the remote revlog format
2285 streamreqs = remote.capable('streamreqs')
2293 streamreqs = remote.capable('streamreqs')
2286 if streamreqs:
2294 if streamreqs:
2287 streamreqs = set(streamreqs.split(','))
2295 streamreqs = set(streamreqs.split(','))
2288 # if we support it, stream in and adjust our requirements
2296 # if we support it, stream in and adjust our requirements
2289 if not streamreqs - self.supportedformats:
2297 if not streamreqs - self.supportedformats:
2290 return self.stream_in(remote, streamreqs)
2298 return self.stream_in(remote, streamreqs)
2291 return self.pull(remote, heads)
2299 return self.pull(remote, heads)
2292
2300
2293 def pushkey(self, namespace, key, old, new):
2301 def pushkey(self, namespace, key, old, new):
2294 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2302 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2295 old=old, new=new)
2303 old=old, new=new)
2296 ret = pushkey.push(self, namespace, key, old, new)
2304 ret = pushkey.push(self, namespace, key, old, new)
2297 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2305 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2298 ret=ret)
2306 ret=ret)
2299 return ret
2307 return ret
2300
2308
2301 def listkeys(self, namespace):
2309 def listkeys(self, namespace):
2302 self.hook('prelistkeys', throw=True, namespace=namespace)
2310 self.hook('prelistkeys', throw=True, namespace=namespace)
2303 values = pushkey.list(self, namespace)
2311 values = pushkey.list(self, namespace)
2304 self.hook('listkeys', namespace=namespace, values=values)
2312 self.hook('listkeys', namespace=namespace, values=values)
2305 return values
2313 return values
2306
2314
2307 def debugwireargs(self, one, two, three=None, four=None, five=None):
2315 def debugwireargs(self, one, two, three=None, four=None, five=None):
2308 '''used to test argument passing over the wire'''
2316 '''used to test argument passing over the wire'''
2309 return "%s %s %s %s %s" % (one, two, three, four, five)
2317 return "%s %s %s %s %s" % (one, two, three, four, five)
2310
2318
2311 def savecommitmessage(self, text):
2319 def savecommitmessage(self, text):
2312 fp = self.opener('last-message.txt', 'wb')
2320 fp = self.opener('last-message.txt', 'wb')
2313 try:
2321 try:
2314 fp.write(text)
2322 fp.write(text)
2315 finally:
2323 finally:
2316 fp.close()
2324 fp.close()
2317 return self.pathto(fp.name[len(self.root)+1:])
2325 return self.pathto(fp.name[len(self.root)+1:])
2318
2326
2319 # used to avoid circular references so destructors work
2327 # used to avoid circular references so destructors work
2320 def aftertrans(files):
2328 def aftertrans(files):
2321 renamefiles = [tuple(t) for t in files]
2329 renamefiles = [tuple(t) for t in files]
2322 def a():
2330 def a():
2323 for src, dest in renamefiles:
2331 for src, dest in renamefiles:
2324 util.rename(src, dest)
2332 util.rename(src, dest)
2325 return a
2333 return a
2326
2334
2327 def undoname(fn):
2335 def undoname(fn):
2328 base, name = os.path.split(fn)
2336 base, name = os.path.split(fn)
2329 assert name.startswith('journal')
2337 assert name.startswith('journal')
2330 return os.path.join(base, name.replace('journal', 'undo', 1))
2338 return os.path.join(base, name.replace('journal', 'undo', 1))
2331
2339
2332 def instance(ui, path, create):
2340 def instance(ui, path, create):
2333 return localrepository(ui, util.urllocalpath(path), create)
2341 return localrepository(ui, util.urllocalpath(path), create)
2334
2342
2335 def islocal(path):
2343 def islocal(path):
2336 return True
2344 return True
@@ -1,848 +1,859 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util, error, osutil, revset, similar, encoding
9 import util, error, osutil, revset, similar, encoding
10 import match as matchmod
10 import match as matchmod
11 import os, errno, re, stat, sys, glob
11 import os, errno, re, stat, sys, glob
12
12
13 def nochangesfound(ui, secretlist=None):
13 def nochangesfound(ui, secretlist=None):
14 '''report no changes for push/pull'''
14 '''report no changes for push/pull'''
15 if secretlist:
15 if secretlist:
16 ui.status(_("no changes found (ignored %d secret changesets)\n")
16 ui.status(_("no changes found (ignored %d secret changesets)\n")
17 % len(secretlist))
17 % len(secretlist))
18 else:
18 else:
19 ui.status(_("no changes found\n"))
19 ui.status(_("no changes found\n"))
20
20
21 def checkfilename(f):
21 def checkfilename(f):
22 '''Check that the filename f is an acceptable filename for a tracked file'''
22 '''Check that the filename f is an acceptable filename for a tracked file'''
23 if '\r' in f or '\n' in f:
23 if '\r' in f or '\n' in f:
24 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
24 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
25
25
26 def checkportable(ui, f):
26 def checkportable(ui, f):
27 '''Check if filename f is portable and warn or abort depending on config'''
27 '''Check if filename f is portable and warn or abort depending on config'''
28 checkfilename(f)
28 checkfilename(f)
29 abort, warn = checkportabilityalert(ui)
29 abort, warn = checkportabilityalert(ui)
30 if abort or warn:
30 if abort or warn:
31 msg = util.checkwinfilename(f)
31 msg = util.checkwinfilename(f)
32 if msg:
32 if msg:
33 msg = "%s: %r" % (msg, f)
33 msg = "%s: %r" % (msg, f)
34 if abort:
34 if abort:
35 raise util.Abort(msg)
35 raise util.Abort(msg)
36 ui.warn(_("warning: %s\n") % msg)
36 ui.warn(_("warning: %s\n") % msg)
37
37
38 def checkportabilityalert(ui):
38 def checkportabilityalert(ui):
39 '''check if the user's config requests nothing, a warning, or abort for
39 '''check if the user's config requests nothing, a warning, or abort for
40 non-portable filenames'''
40 non-portable filenames'''
41 val = ui.config('ui', 'portablefilenames', 'warn')
41 val = ui.config('ui', 'portablefilenames', 'warn')
42 lval = val.lower()
42 lval = val.lower()
43 bval = util.parsebool(val)
43 bval = util.parsebool(val)
44 abort = os.name == 'nt' or lval == 'abort'
44 abort = os.name == 'nt' or lval == 'abort'
45 warn = bval or lval == 'warn'
45 warn = bval or lval == 'warn'
46 if bval is None and not (warn or abort or lval == 'ignore'):
46 if bval is None and not (warn or abort or lval == 'ignore'):
47 raise error.ConfigError(
47 raise error.ConfigError(
48 _("ui.portablefilenames value is invalid ('%s')") % val)
48 _("ui.portablefilenames value is invalid ('%s')") % val)
49 return abort, warn
49 return abort, warn
50
50
51 class casecollisionauditor(object):
51 class casecollisionauditor(object):
52 def __init__(self, ui, abort, existingiter):
52 def __init__(self, ui, abort, existingiter):
53 self._ui = ui
53 self._ui = ui
54 self._abort = abort
54 self._abort = abort
55 self._map = {}
55 self._map = {}
56 for f in existingiter:
56 for f in existingiter:
57 self._map[encoding.lower(f)] = f
57 self._map[encoding.lower(f)] = f
58
58
59 def __call__(self, f):
59 def __call__(self, f):
60 fl = encoding.lower(f)
60 fl = encoding.lower(f)
61 map = self._map
61 map = self._map
62 if fl in map and map[fl] != f:
62 if fl in map and map[fl] != f:
63 msg = _('possible case-folding collision for %s') % f
63 msg = _('possible case-folding collision for %s') % f
64 if self._abort:
64 if self._abort:
65 raise util.Abort(msg)
65 raise util.Abort(msg)
66 self._ui.warn(_("warning: %s\n") % msg)
66 self._ui.warn(_("warning: %s\n") % msg)
67 map[fl] = f
67 map[fl] = f
68
68
69 class pathauditor(object):
69 class pathauditor(object):
70 '''ensure that a filesystem path contains no banned components.
70 '''ensure that a filesystem path contains no banned components.
71 the following properties of a path are checked:
71 the following properties of a path are checked:
72
72
73 - ends with a directory separator
73 - ends with a directory separator
74 - under top-level .hg
74 - under top-level .hg
75 - starts at the root of a windows drive
75 - starts at the root of a windows drive
76 - contains ".."
76 - contains ".."
77 - traverses a symlink (e.g. a/symlink_here/b)
77 - traverses a symlink (e.g. a/symlink_here/b)
78 - inside a nested repository (a callback can be used to approve
78 - inside a nested repository (a callback can be used to approve
79 some nested repositories, e.g., subrepositories)
79 some nested repositories, e.g., subrepositories)
80 '''
80 '''
81
81
82 def __init__(self, root, callback=None):
82 def __init__(self, root, callback=None):
83 self.audited = set()
83 self.audited = set()
84 self.auditeddir = set()
84 self.auditeddir = set()
85 self.root = root
85 self.root = root
86 self.callback = callback
86 self.callback = callback
87 if os.path.lexists(root) and not util.checkcase(root):
87 if os.path.lexists(root) and not util.checkcase(root):
88 self.normcase = util.normcase
88 self.normcase = util.normcase
89 else:
89 else:
90 self.normcase = lambda x: x
90 self.normcase = lambda x: x
91
91
92 def __call__(self, path):
92 def __call__(self, path):
93 '''Check the relative path.
93 '''Check the relative path.
94 path may contain a pattern (e.g. foodir/**.txt)'''
94 path may contain a pattern (e.g. foodir/**.txt)'''
95
95
96 path = util.localpath(path)
96 path = util.localpath(path)
97 normpath = self.normcase(path)
97 normpath = self.normcase(path)
98 if normpath in self.audited:
98 if normpath in self.audited:
99 return
99 return
100 # AIX ignores "/" at end of path, others raise EISDIR.
100 # AIX ignores "/" at end of path, others raise EISDIR.
101 if util.endswithsep(path):
101 if util.endswithsep(path):
102 raise util.Abort(_("path ends in directory separator: %s") % path)
102 raise util.Abort(_("path ends in directory separator: %s") % path)
103 parts = util.splitpath(path)
103 parts = util.splitpath(path)
104 if (os.path.splitdrive(path)[0]
104 if (os.path.splitdrive(path)[0]
105 or parts[0].lower() in ('.hg', '.hg.', '')
105 or parts[0].lower() in ('.hg', '.hg.', '')
106 or os.pardir in parts):
106 or os.pardir in parts):
107 raise util.Abort(_("path contains illegal component: %s") % path)
107 raise util.Abort(_("path contains illegal component: %s") % path)
108 if '.hg' in path.lower():
108 if '.hg' in path.lower():
109 lparts = [p.lower() for p in parts]
109 lparts = [p.lower() for p in parts]
110 for p in '.hg', '.hg.':
110 for p in '.hg', '.hg.':
111 if p in lparts[1:]:
111 if p in lparts[1:]:
112 pos = lparts.index(p)
112 pos = lparts.index(p)
113 base = os.path.join(*parts[:pos])
113 base = os.path.join(*parts[:pos])
114 raise util.Abort(_("path '%s' is inside nested repo %r")
114 raise util.Abort(_("path '%s' is inside nested repo %r")
115 % (path, base))
115 % (path, base))
116
116
117 normparts = util.splitpath(normpath)
117 normparts = util.splitpath(normpath)
118 assert len(parts) == len(normparts)
118 assert len(parts) == len(normparts)
119
119
120 parts.pop()
120 parts.pop()
121 normparts.pop()
121 normparts.pop()
122 prefixes = []
122 prefixes = []
123 while parts:
123 while parts:
124 prefix = os.sep.join(parts)
124 prefix = os.sep.join(parts)
125 normprefix = os.sep.join(normparts)
125 normprefix = os.sep.join(normparts)
126 if normprefix in self.auditeddir:
126 if normprefix in self.auditeddir:
127 break
127 break
128 curpath = os.path.join(self.root, prefix)
128 curpath = os.path.join(self.root, prefix)
129 try:
129 try:
130 st = os.lstat(curpath)
130 st = os.lstat(curpath)
131 except OSError, err:
131 except OSError, err:
132 # EINVAL can be raised as invalid path syntax under win32.
132 # EINVAL can be raised as invalid path syntax under win32.
133 # They must be ignored for patterns can be checked too.
133 # They must be ignored for patterns can be checked too.
134 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
134 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
135 raise
135 raise
136 else:
136 else:
137 if stat.S_ISLNK(st.st_mode):
137 if stat.S_ISLNK(st.st_mode):
138 raise util.Abort(
138 raise util.Abort(
139 _('path %r traverses symbolic link %r')
139 _('path %r traverses symbolic link %r')
140 % (path, prefix))
140 % (path, prefix))
141 elif (stat.S_ISDIR(st.st_mode) and
141 elif (stat.S_ISDIR(st.st_mode) and
142 os.path.isdir(os.path.join(curpath, '.hg'))):
142 os.path.isdir(os.path.join(curpath, '.hg'))):
143 if not self.callback or not self.callback(curpath):
143 if not self.callback or not self.callback(curpath):
144 raise util.Abort(_("path '%s' is inside nested repo %r") %
144 raise util.Abort(_("path '%s' is inside nested repo %r") %
145 (path, prefix))
145 (path, prefix))
146 prefixes.append(normprefix)
146 prefixes.append(normprefix)
147 parts.pop()
147 parts.pop()
148 normparts.pop()
148 normparts.pop()
149
149
150 self.audited.add(normpath)
150 self.audited.add(normpath)
151 # only add prefixes to the cache after checking everything: we don't
151 # only add prefixes to the cache after checking everything: we don't
152 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
152 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
153 self.auditeddir.update(prefixes)
153 self.auditeddir.update(prefixes)
154
154
155 class abstractopener(object):
155 class abstractopener(object):
156 """Abstract base class; cannot be instantiated"""
156 """Abstract base class; cannot be instantiated"""
157
157
158 def __init__(self, *args, **kwargs):
158 def __init__(self, *args, **kwargs):
159 '''Prevent instantiation; don't call this from subclasses.'''
159 '''Prevent instantiation; don't call this from subclasses.'''
160 raise NotImplementedError('attempted instantiating ' + str(type(self)))
160 raise NotImplementedError('attempted instantiating ' + str(type(self)))
161
161
162 def read(self, path):
162 def read(self, path):
163 fp = self(path, 'rb')
163 fp = self(path, 'rb')
164 try:
164 try:
165 return fp.read()
165 return fp.read()
166 finally:
166 finally:
167 fp.close()
167 fp.close()
168
168
169 def write(self, path, data):
169 def write(self, path, data):
170 fp = self(path, 'wb')
170 fp = self(path, 'wb')
171 try:
171 try:
172 return fp.write(data)
172 return fp.write(data)
173 finally:
173 finally:
174 fp.close()
174 fp.close()
175
175
176 def append(self, path, data):
176 def append(self, path, data):
177 fp = self(path, 'ab')
177 fp = self(path, 'ab')
178 try:
178 try:
179 return fp.write(data)
179 return fp.write(data)
180 finally:
180 finally:
181 fp.close()
181 fp.close()
182
182
183 class opener(abstractopener):
183 class opener(abstractopener):
184 '''Open files relative to a base directory
184 '''Open files relative to a base directory
185
185
186 This class is used to hide the details of COW semantics and
186 This class is used to hide the details of COW semantics and
187 remote file access from higher level code.
187 remote file access from higher level code.
188 '''
188 '''
189 def __init__(self, base, audit=True):
189 def __init__(self, base, audit=True):
190 self.base = base
190 self.base = base
191 self._audit = audit
191 self._audit = audit
192 if audit:
192 if audit:
193 self.auditor = pathauditor(base)
193 self.auditor = pathauditor(base)
194 else:
194 else:
195 self.auditor = util.always
195 self.auditor = util.always
196 self.createmode = None
196 self.createmode = None
197 self._trustnlink = None
197 self._trustnlink = None
198
198
199 @util.propertycache
199 @util.propertycache
200 def _cansymlink(self):
200 def _cansymlink(self):
201 return util.checklink(self.base)
201 return util.checklink(self.base)
202
202
203 def _fixfilemode(self, name):
203 def _fixfilemode(self, name):
204 if self.createmode is None:
204 if self.createmode is None:
205 return
205 return
206 os.chmod(name, self.createmode & 0666)
206 os.chmod(name, self.createmode & 0666)
207
207
208 def __call__(self, path, mode="r", text=False, atomictemp=False):
208 def __call__(self, path, mode="r", text=False, atomictemp=False):
209 if self._audit:
209 if self._audit:
210 r = util.checkosfilename(path)
210 r = util.checkosfilename(path)
211 if r:
211 if r:
212 raise util.Abort("%s: %r" % (r, path))
212 raise util.Abort("%s: %r" % (r, path))
213 self.auditor(path)
213 self.auditor(path)
214 f = os.path.join(self.base, path)
214 f = self.join(path)
215
215
216 if not text and "b" not in mode:
216 if not text and "b" not in mode:
217 mode += "b" # for that other OS
217 mode += "b" # for that other OS
218
218
219 nlink = -1
219 nlink = -1
220 dirname, basename = os.path.split(f)
220 dirname, basename = os.path.split(f)
221 # If basename is empty, then the path is malformed because it points
221 # If basename is empty, then the path is malformed because it points
222 # to a directory. Let the posixfile() call below raise IOError.
222 # to a directory. Let the posixfile() call below raise IOError.
223 if basename and mode not in ('r', 'rb'):
223 if basename and mode not in ('r', 'rb'):
224 if atomictemp:
224 if atomictemp:
225 if not os.path.isdir(dirname):
225 if not os.path.isdir(dirname):
226 util.makedirs(dirname, self.createmode)
226 util.makedirs(dirname, self.createmode)
227 return util.atomictempfile(f, mode, self.createmode)
227 return util.atomictempfile(f, mode, self.createmode)
228 try:
228 try:
229 if 'w' in mode:
229 if 'w' in mode:
230 util.unlink(f)
230 util.unlink(f)
231 nlink = 0
231 nlink = 0
232 else:
232 else:
233 # nlinks() may behave differently for files on Windows
233 # nlinks() may behave differently for files on Windows
234 # shares if the file is open.
234 # shares if the file is open.
235 fd = util.posixfile(f)
235 fd = util.posixfile(f)
236 nlink = util.nlinks(f)
236 nlink = util.nlinks(f)
237 if nlink < 1:
237 if nlink < 1:
238 nlink = 2 # force mktempcopy (issue1922)
238 nlink = 2 # force mktempcopy (issue1922)
239 fd.close()
239 fd.close()
240 except (OSError, IOError), e:
240 except (OSError, IOError), e:
241 if e.errno != errno.ENOENT:
241 if e.errno != errno.ENOENT:
242 raise
242 raise
243 nlink = 0
243 nlink = 0
244 if not os.path.isdir(dirname):
244 if not os.path.isdir(dirname):
245 util.makedirs(dirname, self.createmode)
245 util.makedirs(dirname, self.createmode)
246 if nlink > 0:
246 if nlink > 0:
247 if self._trustnlink is None:
247 if self._trustnlink is None:
248 self._trustnlink = nlink > 1 or util.checknlink(f)
248 self._trustnlink = nlink > 1 or util.checknlink(f)
249 if nlink > 1 or not self._trustnlink:
249 if nlink > 1 or not self._trustnlink:
250 util.rename(util.mktempcopy(f), f)
250 util.rename(util.mktempcopy(f), f)
251 fp = util.posixfile(f, mode)
251 fp = util.posixfile(f, mode)
252 if nlink == 0:
252 if nlink == 0:
253 self._fixfilemode(f)
253 self._fixfilemode(f)
254 return fp
254 return fp
255
255
256 def symlink(self, src, dst):
256 def symlink(self, src, dst):
257 self.auditor(dst)
257 self.auditor(dst)
258 linkname = os.path.join(self.base, dst)
258 linkname = self.join(dst)
259 try:
259 try:
260 os.unlink(linkname)
260 os.unlink(linkname)
261 except OSError:
261 except OSError:
262 pass
262 pass
263
263
264 dirname = os.path.dirname(linkname)
264 dirname = os.path.dirname(linkname)
265 if not os.path.exists(dirname):
265 if not os.path.exists(dirname):
266 util.makedirs(dirname, self.createmode)
266 util.makedirs(dirname, self.createmode)
267
267
268 if self._cansymlink:
268 if self._cansymlink:
269 try:
269 try:
270 os.symlink(src, linkname)
270 os.symlink(src, linkname)
271 except OSError, err:
271 except OSError, err:
272 raise OSError(err.errno, _('could not symlink to %r: %s') %
272 raise OSError(err.errno, _('could not symlink to %r: %s') %
273 (src, err.strerror), linkname)
273 (src, err.strerror), linkname)
274 else:
274 else:
275 f = self(dst, "w")
275 f = self(dst, "w")
276 f.write(src)
276 f.write(src)
277 f.close()
277 f.close()
278 self._fixfilemode(dst)
278 self._fixfilemode(dst)
279
279
280 def audit(self, path):
280 def audit(self, path):
281 self.auditor(path)
281 self.auditor(path)
282
282
283 def join(self, path):
284 return os.path.join(self.base, path)
285
283 class filteropener(abstractopener):
286 class filteropener(abstractopener):
284 '''Wrapper opener for filtering filenames with a function.'''
287 '''Wrapper opener for filtering filenames with a function.'''
285
288
286 def __init__(self, opener, filter):
289 def __init__(self, opener, filter):
287 self._filter = filter
290 self._filter = filter
288 self._orig = opener
291 self._orig = opener
289
292
290 def __call__(self, path, *args, **kwargs):
293 def __call__(self, path, *args, **kwargs):
291 return self._orig(self._filter(path), *args, **kwargs)
294 return self._orig(self._filter(path), *args, **kwargs)
292
295
293 def canonpath(root, cwd, myname, auditor=None):
296 def canonpath(root, cwd, myname, auditor=None):
294 '''return the canonical path of myname, given cwd and root'''
297 '''return the canonical path of myname, given cwd and root'''
295 if util.endswithsep(root):
298 if util.endswithsep(root):
296 rootsep = root
299 rootsep = root
297 else:
300 else:
298 rootsep = root + os.sep
301 rootsep = root + os.sep
299 name = myname
302 name = myname
300 if not os.path.isabs(name):
303 if not os.path.isabs(name):
301 name = os.path.join(root, cwd, name)
304 name = os.path.join(root, cwd, name)
302 name = os.path.normpath(name)
305 name = os.path.normpath(name)
303 if auditor is None:
306 if auditor is None:
304 auditor = pathauditor(root)
307 auditor = pathauditor(root)
305 if name != rootsep and name.startswith(rootsep):
308 if name != rootsep and name.startswith(rootsep):
306 name = name[len(rootsep):]
309 name = name[len(rootsep):]
307 auditor(name)
310 auditor(name)
308 return util.pconvert(name)
311 return util.pconvert(name)
309 elif name == root:
312 elif name == root:
310 return ''
313 return ''
311 else:
314 else:
312 # Determine whether `name' is in the hierarchy at or beneath `root',
315 # Determine whether `name' is in the hierarchy at or beneath `root',
313 # by iterating name=dirname(name) until that causes no change (can't
316 # by iterating name=dirname(name) until that causes no change (can't
314 # check name == '/', because that doesn't work on windows). For each
317 # check name == '/', because that doesn't work on windows). For each
315 # `name', compare dev/inode numbers. If they match, the list `rel'
318 # `name', compare dev/inode numbers. If they match, the list `rel'
316 # holds the reversed list of components making up the relative file
319 # holds the reversed list of components making up the relative file
317 # name we want.
320 # name we want.
318 root_st = os.stat(root)
321 root_st = os.stat(root)
319 rel = []
322 rel = []
320 while True:
323 while True:
321 try:
324 try:
322 name_st = os.stat(name)
325 name_st = os.stat(name)
323 except OSError:
326 except OSError:
324 name_st = None
327 name_st = None
325 if name_st and util.samestat(name_st, root_st):
328 if name_st and util.samestat(name_st, root_st):
326 if not rel:
329 if not rel:
327 # name was actually the same as root (maybe a symlink)
330 # name was actually the same as root (maybe a symlink)
328 return ''
331 return ''
329 rel.reverse()
332 rel.reverse()
330 name = os.path.join(*rel)
333 name = os.path.join(*rel)
331 auditor(name)
334 auditor(name)
332 return util.pconvert(name)
335 return util.pconvert(name)
333 dirname, basename = os.path.split(name)
336 dirname, basename = os.path.split(name)
334 rel.append(basename)
337 rel.append(basename)
335 if dirname == name:
338 if dirname == name:
336 break
339 break
337 name = dirname
340 name = dirname
338
341
339 raise util.Abort('%s not under root' % myname)
342 raise util.Abort('%s not under root' % myname)
340
343
341 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
344 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
342 '''yield every hg repository under path, recursively.'''
345 '''yield every hg repository under path, recursively.'''
343 def errhandler(err):
346 def errhandler(err):
344 if err.filename == path:
347 if err.filename == path:
345 raise err
348 raise err
346 samestat = getattr(os.path, 'samestat', None)
349 samestat = getattr(os.path, 'samestat', None)
347 if followsym and samestat is not None:
350 if followsym and samestat is not None:
348 def adddir(dirlst, dirname):
351 def adddir(dirlst, dirname):
349 match = False
352 match = False
350 dirstat = os.stat(dirname)
353 dirstat = os.stat(dirname)
351 for lstdirstat in dirlst:
354 for lstdirstat in dirlst:
352 if samestat(dirstat, lstdirstat):
355 if samestat(dirstat, lstdirstat):
353 match = True
356 match = True
354 break
357 break
355 if not match:
358 if not match:
356 dirlst.append(dirstat)
359 dirlst.append(dirstat)
357 return not match
360 return not match
358 else:
361 else:
359 followsym = False
362 followsym = False
360
363
361 if (seen_dirs is None) and followsym:
364 if (seen_dirs is None) and followsym:
362 seen_dirs = []
365 seen_dirs = []
363 adddir(seen_dirs, path)
366 adddir(seen_dirs, path)
364 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
367 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
365 dirs.sort()
368 dirs.sort()
366 if '.hg' in dirs:
369 if '.hg' in dirs:
367 yield root # found a repository
370 yield root # found a repository
368 qroot = os.path.join(root, '.hg', 'patches')
371 qroot = os.path.join(root, '.hg', 'patches')
369 if os.path.isdir(os.path.join(qroot, '.hg')):
372 if os.path.isdir(os.path.join(qroot, '.hg')):
370 yield qroot # we have a patch queue repo here
373 yield qroot # we have a patch queue repo here
371 if recurse:
374 if recurse:
372 # avoid recursing inside the .hg directory
375 # avoid recursing inside the .hg directory
373 dirs.remove('.hg')
376 dirs.remove('.hg')
374 else:
377 else:
375 dirs[:] = [] # don't descend further
378 dirs[:] = [] # don't descend further
376 elif followsym:
379 elif followsym:
377 newdirs = []
380 newdirs = []
378 for d in dirs:
381 for d in dirs:
379 fname = os.path.join(root, d)
382 fname = os.path.join(root, d)
380 if adddir(seen_dirs, fname):
383 if adddir(seen_dirs, fname):
381 if os.path.islink(fname):
384 if os.path.islink(fname):
382 for hgname in walkrepos(fname, True, seen_dirs):
385 for hgname in walkrepos(fname, True, seen_dirs):
383 yield hgname
386 yield hgname
384 else:
387 else:
385 newdirs.append(d)
388 newdirs.append(d)
386 dirs[:] = newdirs
389 dirs[:] = newdirs
387
390
388 def osrcpath():
391 def osrcpath():
389 '''return default os-specific hgrc search path'''
392 '''return default os-specific hgrc search path'''
390 path = systemrcpath()
393 path = systemrcpath()
391 path.extend(userrcpath())
394 path.extend(userrcpath())
392 path = [os.path.normpath(f) for f in path]
395 path = [os.path.normpath(f) for f in path]
393 return path
396 return path
394
397
395 _rcpath = None
398 _rcpath = None
396
399
397 def rcpath():
400 def rcpath():
398 '''return hgrc search path. if env var HGRCPATH is set, use it.
401 '''return hgrc search path. if env var HGRCPATH is set, use it.
399 for each item in path, if directory, use files ending in .rc,
402 for each item in path, if directory, use files ending in .rc,
400 else use item.
403 else use item.
401 make HGRCPATH empty to only look in .hg/hgrc of current repo.
404 make HGRCPATH empty to only look in .hg/hgrc of current repo.
402 if no HGRCPATH, use default os-specific path.'''
405 if no HGRCPATH, use default os-specific path.'''
403 global _rcpath
406 global _rcpath
404 if _rcpath is None:
407 if _rcpath is None:
405 if 'HGRCPATH' in os.environ:
408 if 'HGRCPATH' in os.environ:
406 _rcpath = []
409 _rcpath = []
407 for p in os.environ['HGRCPATH'].split(os.pathsep):
410 for p in os.environ['HGRCPATH'].split(os.pathsep):
408 if not p:
411 if not p:
409 continue
412 continue
410 p = util.expandpath(p)
413 p = util.expandpath(p)
411 if os.path.isdir(p):
414 if os.path.isdir(p):
412 for f, kind in osutil.listdir(p):
415 for f, kind in osutil.listdir(p):
413 if f.endswith('.rc'):
416 if f.endswith('.rc'):
414 _rcpath.append(os.path.join(p, f))
417 _rcpath.append(os.path.join(p, f))
415 else:
418 else:
416 _rcpath.append(p)
419 _rcpath.append(p)
417 else:
420 else:
418 _rcpath = osrcpath()
421 _rcpath = osrcpath()
419 return _rcpath
422 return _rcpath
420
423
421 if os.name != 'nt':
424 if os.name != 'nt':
422
425
423 def rcfiles(path):
426 def rcfiles(path):
424 rcs = [os.path.join(path, 'hgrc')]
427 rcs = [os.path.join(path, 'hgrc')]
425 rcdir = os.path.join(path, 'hgrc.d')
428 rcdir = os.path.join(path, 'hgrc.d')
426 try:
429 try:
427 rcs.extend([os.path.join(rcdir, f)
430 rcs.extend([os.path.join(rcdir, f)
428 for f, kind in osutil.listdir(rcdir)
431 for f, kind in osutil.listdir(rcdir)
429 if f.endswith(".rc")])
432 if f.endswith(".rc")])
430 except OSError:
433 except OSError:
431 pass
434 pass
432 return rcs
435 return rcs
433
436
434 def systemrcpath():
437 def systemrcpath():
435 path = []
438 path = []
436 # old mod_python does not set sys.argv
439 # old mod_python does not set sys.argv
437 if len(getattr(sys, 'argv', [])) > 0:
440 if len(getattr(sys, 'argv', [])) > 0:
438 p = os.path.dirname(os.path.dirname(sys.argv[0]))
441 p = os.path.dirname(os.path.dirname(sys.argv[0]))
439 path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
442 path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
440 path.extend(rcfiles('/etc/mercurial'))
443 path.extend(rcfiles('/etc/mercurial'))
441 return path
444 return path
442
445
443 def userrcpath():
446 def userrcpath():
444 return [os.path.expanduser('~/.hgrc')]
447 return [os.path.expanduser('~/.hgrc')]
445
448
446 else:
449 else:
447
450
448 _HKEY_LOCAL_MACHINE = 0x80000002L
451 _HKEY_LOCAL_MACHINE = 0x80000002L
449
452
450 def systemrcpath():
453 def systemrcpath():
451 '''return default os-specific hgrc search path'''
454 '''return default os-specific hgrc search path'''
452 rcpath = []
455 rcpath = []
453 filename = util.executablepath()
456 filename = util.executablepath()
454 # Use mercurial.ini found in directory with hg.exe
457 # Use mercurial.ini found in directory with hg.exe
455 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
458 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
456 if os.path.isfile(progrc):
459 if os.path.isfile(progrc):
457 rcpath.append(progrc)
460 rcpath.append(progrc)
458 return rcpath
461 return rcpath
459 # Use hgrc.d found in directory with hg.exe
462 # Use hgrc.d found in directory with hg.exe
460 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
463 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
461 if os.path.isdir(progrcd):
464 if os.path.isdir(progrcd):
462 for f, kind in osutil.listdir(progrcd):
465 for f, kind in osutil.listdir(progrcd):
463 if f.endswith('.rc'):
466 if f.endswith('.rc'):
464 rcpath.append(os.path.join(progrcd, f))
467 rcpath.append(os.path.join(progrcd, f))
465 return rcpath
468 return rcpath
466 # else look for a system rcpath in the registry
469 # else look for a system rcpath in the registry
467 value = util.lookupreg('SOFTWARE\\Mercurial', None,
470 value = util.lookupreg('SOFTWARE\\Mercurial', None,
468 _HKEY_LOCAL_MACHINE)
471 _HKEY_LOCAL_MACHINE)
469 if not isinstance(value, str) or not value:
472 if not isinstance(value, str) or not value:
470 return rcpath
473 return rcpath
471 value = util.localpath(value)
474 value = util.localpath(value)
472 for p in value.split(os.pathsep):
475 for p in value.split(os.pathsep):
473 if p.lower().endswith('mercurial.ini'):
476 if p.lower().endswith('mercurial.ini'):
474 rcpath.append(p)
477 rcpath.append(p)
475 elif os.path.isdir(p):
478 elif os.path.isdir(p):
476 for f, kind in osutil.listdir(p):
479 for f, kind in osutil.listdir(p):
477 if f.endswith('.rc'):
480 if f.endswith('.rc'):
478 rcpath.append(os.path.join(p, f))
481 rcpath.append(os.path.join(p, f))
479 return rcpath
482 return rcpath
480
483
481 def userrcpath():
484 def userrcpath():
482 '''return os-specific hgrc search path to the user dir'''
485 '''return os-specific hgrc search path to the user dir'''
483 home = os.path.expanduser('~')
486 home = os.path.expanduser('~')
484 path = [os.path.join(home, 'mercurial.ini'),
487 path = [os.path.join(home, 'mercurial.ini'),
485 os.path.join(home, '.hgrc')]
488 os.path.join(home, '.hgrc')]
486 userprofile = os.environ.get('USERPROFILE')
489 userprofile = os.environ.get('USERPROFILE')
487 if userprofile:
490 if userprofile:
488 path.append(os.path.join(userprofile, 'mercurial.ini'))
491 path.append(os.path.join(userprofile, 'mercurial.ini'))
489 path.append(os.path.join(userprofile, '.hgrc'))
492 path.append(os.path.join(userprofile, '.hgrc'))
490 return path
493 return path
491
494
492 def revsingle(repo, revspec, default='.'):
495 def revsingle(repo, revspec, default='.'):
493 if not revspec:
496 if not revspec:
494 return repo[default]
497 return repo[default]
495
498
496 l = revrange(repo, [revspec])
499 l = revrange(repo, [revspec])
497 if len(l) < 1:
500 if len(l) < 1:
498 raise util.Abort(_('empty revision set'))
501 raise util.Abort(_('empty revision set'))
499 return repo[l[-1]]
502 return repo[l[-1]]
500
503
501 def revpair(repo, revs):
504 def revpair(repo, revs):
502 if not revs:
505 if not revs:
503 return repo.dirstate.p1(), None
506 return repo.dirstate.p1(), None
504
507
505 l = revrange(repo, revs)
508 l = revrange(repo, revs)
506
509
507 if len(l) == 0:
510 if len(l) == 0:
508 return repo.dirstate.p1(), None
511 return repo.dirstate.p1(), None
509
512
510 if len(l) == 1:
513 if len(l) == 1:
511 return repo.lookup(l[0]), None
514 return repo.lookup(l[0]), None
512
515
513 return repo.lookup(l[0]), repo.lookup(l[-1])
516 return repo.lookup(l[0]), repo.lookup(l[-1])
514
517
515 _revrangesep = ':'
518 _revrangesep = ':'
516
519
517 def revrange(repo, revs):
520 def revrange(repo, revs):
518 """Yield revision as strings from a list of revision specifications."""
521 """Yield revision as strings from a list of revision specifications."""
519
522
520 def revfix(repo, val, defval):
523 def revfix(repo, val, defval):
521 if not val and val != 0 and defval is not None:
524 if not val and val != 0 and defval is not None:
522 return defval
525 return defval
523 return repo.changelog.rev(repo.lookup(val))
526 return repo.changelog.rev(repo.lookup(val))
524
527
525 seen, l = set(), []
528 seen, l = set(), []
526 for spec in revs:
529 for spec in revs:
527 # attempt to parse old-style ranges first to deal with
530 # attempt to parse old-style ranges first to deal with
528 # things like old-tag which contain query metacharacters
531 # things like old-tag which contain query metacharacters
529 try:
532 try:
530 if isinstance(spec, int):
533 if isinstance(spec, int):
531 seen.add(spec)
534 seen.add(spec)
532 l.append(spec)
535 l.append(spec)
533 continue
536 continue
534
537
535 if _revrangesep in spec:
538 if _revrangesep in spec:
536 start, end = spec.split(_revrangesep, 1)
539 start, end = spec.split(_revrangesep, 1)
537 start = revfix(repo, start, 0)
540 start = revfix(repo, start, 0)
538 end = revfix(repo, end, len(repo) - 1)
541 end = revfix(repo, end, len(repo) - 1)
539 step = start > end and -1 or 1
542 step = start > end and -1 or 1
540 for rev in xrange(start, end + step, step):
543 for rev in xrange(start, end + step, step):
541 if rev in seen:
544 if rev in seen:
542 continue
545 continue
543 seen.add(rev)
546 seen.add(rev)
544 l.append(rev)
547 l.append(rev)
545 continue
548 continue
546 elif spec and spec in repo: # single unquoted rev
549 elif spec and spec in repo: # single unquoted rev
547 rev = revfix(repo, spec, None)
550 rev = revfix(repo, spec, None)
548 if rev in seen:
551 if rev in seen:
549 continue
552 continue
550 seen.add(rev)
553 seen.add(rev)
551 l.append(rev)
554 l.append(rev)
552 continue
555 continue
553 except error.RepoLookupError:
556 except error.RepoLookupError:
554 pass
557 pass
555
558
556 # fall through to new-style queries if old-style fails
559 # fall through to new-style queries if old-style fails
557 m = revset.match(repo.ui, spec)
560 m = revset.match(repo.ui, spec)
558 for r in m(repo, range(len(repo))):
561 for r in m(repo, range(len(repo))):
559 if r not in seen:
562 if r not in seen:
560 l.append(r)
563 l.append(r)
561 seen.update(l)
564 seen.update(l)
562
565
563 return l
566 return l
564
567
565 def expandpats(pats):
568 def expandpats(pats):
566 if not util.expandglobs:
569 if not util.expandglobs:
567 return list(pats)
570 return list(pats)
568 ret = []
571 ret = []
569 for p in pats:
572 for p in pats:
570 kind, name = matchmod._patsplit(p, None)
573 kind, name = matchmod._patsplit(p, None)
571 if kind is None:
574 if kind is None:
572 try:
575 try:
573 globbed = glob.glob(name)
576 globbed = glob.glob(name)
574 except re.error:
577 except re.error:
575 globbed = [name]
578 globbed = [name]
576 if globbed:
579 if globbed:
577 ret.extend(globbed)
580 ret.extend(globbed)
578 continue
581 continue
579 ret.append(p)
582 ret.append(p)
580 return ret
583 return ret
581
584
582 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
585 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
583 if pats == ("",):
586 if pats == ("",):
584 pats = []
587 pats = []
585 if not globbed and default == 'relpath':
588 if not globbed and default == 'relpath':
586 pats = expandpats(pats or [])
589 pats = expandpats(pats or [])
587
590
588 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
591 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
589 default)
592 default)
590 def badfn(f, msg):
593 def badfn(f, msg):
591 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
594 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
592 m.bad = badfn
595 m.bad = badfn
593 return m, pats
596 return m, pats
594
597
595 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
598 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
596 return matchandpats(ctx, pats, opts, globbed, default)[0]
599 return matchandpats(ctx, pats, opts, globbed, default)[0]
597
600
598 def matchall(repo):
601 def matchall(repo):
599 return matchmod.always(repo.root, repo.getcwd())
602 return matchmod.always(repo.root, repo.getcwd())
600
603
601 def matchfiles(repo, files):
604 def matchfiles(repo, files):
602 return matchmod.exact(repo.root, repo.getcwd(), files)
605 return matchmod.exact(repo.root, repo.getcwd(), files)
603
606
604 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
607 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
605 if dry_run is None:
608 if dry_run is None:
606 dry_run = opts.get('dry_run')
609 dry_run = opts.get('dry_run')
607 if similarity is None:
610 if similarity is None:
608 similarity = float(opts.get('similarity') or 0)
611 similarity = float(opts.get('similarity') or 0)
609 # we'd use status here, except handling of symlinks and ignore is tricky
612 # we'd use status here, except handling of symlinks and ignore is tricky
610 added, unknown, deleted, removed = [], [], [], []
613 added, unknown, deleted, removed = [], [], [], []
611 audit_path = pathauditor(repo.root)
614 audit_path = pathauditor(repo.root)
612 m = match(repo[None], pats, opts)
615 m = match(repo[None], pats, opts)
613 rejected = []
616 rejected = []
614 m.bad = lambda x, y: rejected.append(x)
617 m.bad = lambda x, y: rejected.append(x)
615
618
616 for abs in repo.walk(m):
619 for abs in repo.walk(m):
617 target = repo.wjoin(abs)
620 target = repo.wjoin(abs)
618 good = True
621 good = True
619 try:
622 try:
620 audit_path(abs)
623 audit_path(abs)
621 except (OSError, util.Abort):
624 except (OSError, util.Abort):
622 good = False
625 good = False
623 rel = m.rel(abs)
626 rel = m.rel(abs)
624 exact = m.exact(abs)
627 exact = m.exact(abs)
625 if good and abs not in repo.dirstate:
628 if good and abs not in repo.dirstate:
626 unknown.append(abs)
629 unknown.append(abs)
627 if repo.ui.verbose or not exact:
630 if repo.ui.verbose or not exact:
628 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
631 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
629 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
632 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
630 or (os.path.isdir(target) and not os.path.islink(target))):
633 or (os.path.isdir(target) and not os.path.islink(target))):
631 deleted.append(abs)
634 deleted.append(abs)
632 if repo.ui.verbose or not exact:
635 if repo.ui.verbose or not exact:
633 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
636 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
634 # for finding renames
637 # for finding renames
635 elif repo.dirstate[abs] == 'r':
638 elif repo.dirstate[abs] == 'r':
636 removed.append(abs)
639 removed.append(abs)
637 elif repo.dirstate[abs] == 'a':
640 elif repo.dirstate[abs] == 'a':
638 added.append(abs)
641 added.append(abs)
639 copies = {}
642 copies = {}
640 if similarity > 0:
643 if similarity > 0:
641 for old, new, score in similar.findrenames(repo,
644 for old, new, score in similar.findrenames(repo,
642 added + unknown, removed + deleted, similarity):
645 added + unknown, removed + deleted, similarity):
643 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
646 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
644 repo.ui.status(_('recording removal of %s as rename to %s '
647 repo.ui.status(_('recording removal of %s as rename to %s '
645 '(%d%% similar)\n') %
648 '(%d%% similar)\n') %
646 (m.rel(old), m.rel(new), score * 100))
649 (m.rel(old), m.rel(new), score * 100))
647 copies[new] = old
650 copies[new] = old
648
651
649 if not dry_run:
652 if not dry_run:
650 wctx = repo[None]
653 wctx = repo[None]
651 wlock = repo.wlock()
654 wlock = repo.wlock()
652 try:
655 try:
653 wctx.forget(deleted)
656 wctx.forget(deleted)
654 wctx.add(unknown)
657 wctx.add(unknown)
655 for new, old in copies.iteritems():
658 for new, old in copies.iteritems():
656 wctx.copy(old, new)
659 wctx.copy(old, new)
657 finally:
660 finally:
658 wlock.release()
661 wlock.release()
659
662
660 for f in rejected:
663 for f in rejected:
661 if f in m.files():
664 if f in m.files():
662 return 1
665 return 1
663 return 0
666 return 0
664
667
665 def updatedir(ui, repo, patches, similarity=0):
668 def updatedir(ui, repo, patches, similarity=0):
666 '''Update dirstate after patch application according to metadata'''
669 '''Update dirstate after patch application according to metadata'''
667 if not patches:
670 if not patches:
668 return []
671 return []
669 copies = []
672 copies = []
670 removes = set()
673 removes = set()
671 cfiles = patches.keys()
674 cfiles = patches.keys()
672 cwd = repo.getcwd()
675 cwd = repo.getcwd()
673 if cwd:
676 if cwd:
674 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
677 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
675 for f in patches:
678 for f in patches:
676 gp = patches[f]
679 gp = patches[f]
677 if not gp:
680 if not gp:
678 continue
681 continue
679 if gp.op == 'RENAME':
682 if gp.op == 'RENAME':
680 copies.append((gp.oldpath, gp.path))
683 copies.append((gp.oldpath, gp.path))
681 removes.add(gp.oldpath)
684 removes.add(gp.oldpath)
682 elif gp.op == 'COPY':
685 elif gp.op == 'COPY':
683 copies.append((gp.oldpath, gp.path))
686 copies.append((gp.oldpath, gp.path))
684 elif gp.op == 'DELETE':
687 elif gp.op == 'DELETE':
685 removes.add(gp.path)
688 removes.add(gp.path)
686
689
687 wctx = repo[None]
690 wctx = repo[None]
688 for src, dst in copies:
691 for src, dst in copies:
689 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
692 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
690 if (not similarity) and removes:
693 if (not similarity) and removes:
691 wctx.remove(sorted(removes), True)
694 wctx.remove(sorted(removes), True)
692
695
693 for f in patches:
696 for f in patches:
694 gp = patches[f]
697 gp = patches[f]
695 if gp and gp.mode:
698 if gp and gp.mode:
696 islink, isexec = gp.mode
699 islink, isexec = gp.mode
697 dst = repo.wjoin(gp.path)
700 dst = repo.wjoin(gp.path)
698 # patch won't create empty files
701 # patch won't create empty files
699 if gp.op == 'ADD' and not os.path.lexists(dst):
702 if gp.op == 'ADD' and not os.path.lexists(dst):
700 flags = (isexec and 'x' or '') + (islink and 'l' or '')
703 flags = (isexec and 'x' or '') + (islink and 'l' or '')
701 repo.wwrite(gp.path, '', flags)
704 repo.wwrite(gp.path, '', flags)
702 util.setflags(dst, islink, isexec)
705 util.setflags(dst, islink, isexec)
703 addremove(repo, cfiles, similarity=similarity)
706 addremove(repo, cfiles, similarity=similarity)
704 files = patches.keys()
707 files = patches.keys()
705 files.extend([r for r in removes if r not in files])
708 files.extend([r for r in removes if r not in files])
706 return sorted(files)
709 return sorted(files)
707
710
708 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
711 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
709 """Update the dirstate to reflect the intent of copying src to dst. For
712 """Update the dirstate to reflect the intent of copying src to dst. For
710 different reasons it might not end with dst being marked as copied from src.
713 different reasons it might not end with dst being marked as copied from src.
711 """
714 """
712 origsrc = repo.dirstate.copied(src) or src
715 origsrc = repo.dirstate.copied(src) or src
713 if dst == origsrc: # copying back a copy?
716 if dst == origsrc: # copying back a copy?
714 if repo.dirstate[dst] not in 'mn' and not dryrun:
717 if repo.dirstate[dst] not in 'mn' and not dryrun:
715 repo.dirstate.normallookup(dst)
718 repo.dirstate.normallookup(dst)
716 else:
719 else:
717 if repo.dirstate[origsrc] == 'a' and origsrc == src:
720 if repo.dirstate[origsrc] == 'a' and origsrc == src:
718 if not ui.quiet:
721 if not ui.quiet:
719 ui.warn(_("%s has not been committed yet, so no copy "
722 ui.warn(_("%s has not been committed yet, so no copy "
720 "data will be stored for %s.\n")
723 "data will be stored for %s.\n")
721 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
724 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
722 if repo.dirstate[dst] in '?r' and not dryrun:
725 if repo.dirstate[dst] in '?r' and not dryrun:
723 wctx.add([dst])
726 wctx.add([dst])
724 elif not dryrun:
727 elif not dryrun:
725 wctx.copy(origsrc, dst)
728 wctx.copy(origsrc, dst)
726
729
727 def readrequires(opener, supported):
730 def readrequires(opener, supported):
728 '''Reads and parses .hg/requires and checks if all entries found
731 '''Reads and parses .hg/requires and checks if all entries found
729 are in the list of supported features.'''
732 are in the list of supported features.'''
730 requirements = set(opener.read("requires").splitlines())
733 requirements = set(opener.read("requires").splitlines())
731 missings = []
734 missings = []
732 for r in requirements:
735 for r in requirements:
733 if r not in supported:
736 if r not in supported:
734 if not r or not r[0].isalnum():
737 if not r or not r[0].isalnum():
735 raise error.RequirementError(_(".hg/requires file is corrupt"))
738 raise error.RequirementError(_(".hg/requires file is corrupt"))
736 missings.append(r)
739 missings.append(r)
737 missings.sort()
740 missings.sort()
738 if missings:
741 if missings:
739 raise error.RequirementError(_("unknown repository format: "
742 raise error.RequirementError(_("unknown repository format: "
740 "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
743 "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
741 return requirements
744 return requirements
742
745
743 class filecacheentry(object):
746 class filecacheentry(object):
744 def __init__(self, path):
747 def __init__(self, path):
745 self.path = path
748 self.path = path
746 self.cachestat = filecacheentry.stat(self.path)
749 self.cachestat = filecacheentry.stat(self.path)
747
750
748 if self.cachestat:
751 if self.cachestat:
749 self._cacheable = self.cachestat.cacheable()
752 self._cacheable = self.cachestat.cacheable()
750 else:
753 else:
751 # None means we don't know yet
754 # None means we don't know yet
752 self._cacheable = None
755 self._cacheable = None
753
756
754 def refresh(self):
757 def refresh(self):
755 if self.cacheable():
758 if self.cacheable():
756 self.cachestat = filecacheentry.stat(self.path)
759 self.cachestat = filecacheentry.stat(self.path)
757
760
758 def cacheable(self):
761 def cacheable(self):
759 if self._cacheable is not None:
762 if self._cacheable is not None:
760 return self._cacheable
763 return self._cacheable
761
764
762 # we don't know yet, assume it is for now
765 # we don't know yet, assume it is for now
763 return True
766 return True
764
767
765 def changed(self):
768 def changed(self):
766 # no point in going further if we can't cache it
769 # no point in going further if we can't cache it
767 if not self.cacheable():
770 if not self.cacheable():
768 return True
771 return True
769
772
770 newstat = filecacheentry.stat(self.path)
773 newstat = filecacheentry.stat(self.path)
771
774
772 # we may not know if it's cacheable yet, check again now
775 # we may not know if it's cacheable yet, check again now
773 if newstat and self._cacheable is None:
776 if newstat and self._cacheable is None:
774 self._cacheable = newstat.cacheable()
777 self._cacheable = newstat.cacheable()
775
778
776 # check again
779 # check again
777 if not self._cacheable:
780 if not self._cacheable:
778 return True
781 return True
779
782
780 if self.cachestat != newstat:
783 if self.cachestat != newstat:
781 self.cachestat = newstat
784 self.cachestat = newstat
782 return True
785 return True
783 else:
786 else:
784 return False
787 return False
785
788
786 @staticmethod
789 @staticmethod
787 def stat(path):
790 def stat(path):
788 try:
791 try:
789 return util.cachestat(path)
792 return util.cachestat(path)
790 except OSError, e:
793 except OSError, e:
791 if e.errno != errno.ENOENT:
794 if e.errno != errno.ENOENT:
792 raise
795 raise
793
796
794 class filecache(object):
797 class filecache(object):
795 '''A property like decorator that tracks a file under .hg/ for updates.
798 '''A property like decorator that tracks a file under .hg/ for updates.
796
799
797 Records stat info when called in _filecache.
800 Records stat info when called in _filecache.
798
801
799 On subsequent calls, compares old stat info with new info, and recreates
802 On subsequent calls, compares old stat info with new info, and recreates
800 the object when needed, updating the new stat info in _filecache.
803 the object when needed, updating the new stat info in _filecache.
801
804
802 Mercurial either atomic renames or appends for files under .hg,
805 Mercurial either atomic renames or appends for files under .hg,
803 so to ensure the cache is reliable we need the filesystem to be able
806 so to ensure the cache is reliable we need the filesystem to be able
804 to tell us if a file has been replaced. If it can't, we fallback to
807 to tell us if a file has been replaced. If it can't, we fallback to
805 recreating the object on every call (essentially the same behaviour as
808 recreating the object on every call (essentially the same behaviour as
806 propertycache).'''
809 propertycache).'''
807 def __init__(self, path, instore=False):
810 def __init__(self, path):
808 self.path = path
811 self.path = path
809 self.instore = instore
812
813 def join(self, obj, fname):
814 """Used to compute the runtime path of the cached file.
815
816 Users should subclass filecache and provide their own version of this
817 function to call the appropriate join function on 'obj' (an instance
818 of the class that its member function was decorated).
819 """
820 return obj.join(fname)
810
821
811 def __call__(self, func):
822 def __call__(self, func):
812 self.func = func
823 self.func = func
813 self.name = func.__name__
824 self.name = func.__name__
814 return self
825 return self
815
826
816 def __get__(self, obj, type=None):
827 def __get__(self, obj, type=None):
817 # do we need to check if the file changed?
828 # do we need to check if the file changed?
818 if self.name in obj.__dict__:
829 if self.name in obj.__dict__:
819 return obj.__dict__[self.name]
830 return obj.__dict__[self.name]
820
831
821 entry = obj._filecache.get(self.name)
832 entry = obj._filecache.get(self.name)
822
833
823 if entry:
834 if entry:
824 if entry.changed():
835 if entry.changed():
825 entry.obj = self.func(obj)
836 entry.obj = self.func(obj)
826 else:
837 else:
827 path = self.instore and obj.sjoin(self.path) or obj.join(self.path)
838 path = self.join(obj, self.path)
828
839
829 # We stat -before- creating the object so our cache doesn't lie if
840 # We stat -before- creating the object so our cache doesn't lie if
830 # a writer modified between the time we read and stat
841 # a writer modified between the time we read and stat
831 entry = filecacheentry(path)
842 entry = filecacheentry(path)
832 entry.obj = self.func(obj)
843 entry.obj = self.func(obj)
833
844
834 obj._filecache[self.name] = entry
845 obj._filecache[self.name] = entry
835
846
836 obj.__dict__[self.name] = entry.obj
847 obj.__dict__[self.name] = entry.obj
837 return entry.obj
848 return entry.obj
838
849
839 def __set__(self, obj, value):
850 def __set__(self, obj, value):
840 if self.name in obj._filecache:
851 if self.name in obj._filecache:
841 obj._filecache[self.name].obj = value # update cached copy
852 obj._filecache[self.name].obj = value # update cached copy
842 obj.__dict__[self.name] = value # update copy returned by obj.x
853 obj.__dict__[self.name] = value # update copy returned by obj.x
843
854
844 def __delete__(self, obj):
855 def __delete__(self, obj):
845 try:
856 try:
846 del obj.__dict__[self.name]
857 del obj.__dict__[self.name]
847 except KeyError:
858 except KeyError:
848 raise AttributeError, self.name
859 raise AttributeError, self.name
@@ -1,234 +1,257 b''
1 import sys, os, struct, subprocess, cStringIO, re, shutil
1 import sys, os, struct, subprocess, cStringIO, re, shutil
2
2
3 def connect(path=None):
3 def connect(path=None):
4 cmdline = ['hg', 'serve', '--cmdserver', 'pipe']
4 cmdline = ['hg', 'serve', '--cmdserver', 'pipe']
5 if path:
5 if path:
6 cmdline += ['-R', path]
6 cmdline += ['-R', path]
7
7
8 server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
8 server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
9 stdout=subprocess.PIPE)
9 stdout=subprocess.PIPE)
10
10
11 return server
11 return server
12
12
13 def writeblock(server, data):
13 def writeblock(server, data):
14 server.stdin.write(struct.pack('>I', len(data)))
14 server.stdin.write(struct.pack('>I', len(data)))
15 server.stdin.write(data)
15 server.stdin.write(data)
16 server.stdin.flush()
16 server.stdin.flush()
17
17
18 def readchannel(server):
18 def readchannel(server):
19 data = server.stdout.read(5)
19 data = server.stdout.read(5)
20 if not data:
20 if not data:
21 raise EOFError()
21 raise EOFError()
22 channel, length = struct.unpack('>cI', data)
22 channel, length = struct.unpack('>cI', data)
23 if channel in 'IL':
23 if channel in 'IL':
24 return channel, length
24 return channel, length
25 else:
25 else:
26 return channel, server.stdout.read(length)
26 return channel, server.stdout.read(length)
27
27
28 def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None):
28 def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None):
29 print ' runcommand', ' '.join(args)
29 print ' runcommand', ' '.join(args)
30 sys.stdout.flush()
30 sys.stdout.flush()
31 server.stdin.write('runcommand\n')
31 server.stdin.write('runcommand\n')
32 writeblock(server, '\0'.join(args))
32 writeblock(server, '\0'.join(args))
33
33
34 if not input:
34 if not input:
35 input = cStringIO.StringIO()
35 input = cStringIO.StringIO()
36
36
37 while True:
37 while True:
38 ch, data = readchannel(server)
38 ch, data = readchannel(server)
39 if ch == 'o':
39 if ch == 'o':
40 output.write(data)
40 output.write(data)
41 output.flush()
41 output.flush()
42 elif ch == 'e':
42 elif ch == 'e':
43 error.write(data)
43 error.write(data)
44 error.flush()
44 error.flush()
45 elif ch == 'I':
45 elif ch == 'I':
46 writeblock(server, input.read(data))
46 writeblock(server, input.read(data))
47 elif ch == 'L':
47 elif ch == 'L':
48 writeblock(server, input.readline(data))
48 writeblock(server, input.readline(data))
49 elif ch == 'r':
49 elif ch == 'r':
50 return struct.unpack('>i', data)[0]
50 return struct.unpack('>i', data)[0]
51 else:
51 else:
52 print "unexpected channel %c: %r" % (ch, data)
52 print "unexpected channel %c: %r" % (ch, data)
53 if ch.isupper():
53 if ch.isupper():
54 return
54 return
55
55
56 def check(func, repopath=None):
56 def check(func, repopath=None):
57 print
57 print
58 print 'testing %s:' % func.__name__
58 print 'testing %s:' % func.__name__
59 print
59 print
60 sys.stdout.flush()
60 sys.stdout.flush()
61 server = connect(repopath)
61 server = connect(repopath)
62 try:
62 try:
63 return func(server)
63 return func(server)
64 finally:
64 finally:
65 server.stdin.close()
65 server.stdin.close()
66 server.wait()
66 server.wait()
67
67
68 def unknowncommand(server):
68 def unknowncommand(server):
69 server.stdin.write('unknowncommand\n')
69 server.stdin.write('unknowncommand\n')
70
70
71 def hellomessage(server):
71 def hellomessage(server):
72 ch, data = readchannel(server)
72 ch, data = readchannel(server)
73 # escaping python tests output not supported
73 # escaping python tests output not supported
74 print '%c, %r' % (ch, re.sub('encoding: [a-zA-Z0-9-]+', 'encoding: ***', data))
74 print '%c, %r' % (ch, re.sub('encoding: [a-zA-Z0-9-]+', 'encoding: ***', data))
75
75
76 # run an arbitrary command to make sure the next thing the server sends
76 # run an arbitrary command to make sure the next thing the server sends
77 # isn't part of the hello message
77 # isn't part of the hello message
78 runcommand(server, ['id'])
78 runcommand(server, ['id'])
79
79
80 def checkruncommand(server):
80 def checkruncommand(server):
81 # hello block
81 # hello block
82 readchannel(server)
82 readchannel(server)
83
83
84 # no args
84 # no args
85 runcommand(server, [])
85 runcommand(server, [])
86
86
87 # global options
87 # global options
88 runcommand(server, ['id', '--quiet'])
88 runcommand(server, ['id', '--quiet'])
89
89
90 # make sure global options don't stick through requests
90 # make sure global options don't stick through requests
91 runcommand(server, ['id'])
91 runcommand(server, ['id'])
92
92
93 # --config
93 # --config
94 runcommand(server, ['id', '--config', 'ui.quiet=True'])
94 runcommand(server, ['id', '--config', 'ui.quiet=True'])
95
95
96 # make sure --config doesn't stick
96 # make sure --config doesn't stick
97 runcommand(server, ['id'])
97 runcommand(server, ['id'])
98
98
99 def inputeof(server):
99 def inputeof(server):
100 readchannel(server)
100 readchannel(server)
101 server.stdin.write('runcommand\n')
101 server.stdin.write('runcommand\n')
102 # close stdin while server is waiting for input
102 # close stdin while server is waiting for input
103 server.stdin.close()
103 server.stdin.close()
104
104
105 # server exits with 1 if the pipe closed while reading the command
105 # server exits with 1 if the pipe closed while reading the command
106 print 'server exit code =', server.wait()
106 print 'server exit code =', server.wait()
107
107
108 def serverinput(server):
108 def serverinput(server):
109 readchannel(server)
109 readchannel(server)
110
110
111 patch = """
111 patch = """
112 # HG changeset patch
112 # HG changeset patch
113 # User test
113 # User test
114 # Date 0 0
114 # Date 0 0
115 # Node ID c103a3dec114d882c98382d684d8af798d09d857
115 # Node ID c103a3dec114d882c98382d684d8af798d09d857
116 # Parent 0000000000000000000000000000000000000000
116 # Parent 0000000000000000000000000000000000000000
117 1
117 1
118
118
119 diff -r 000000000000 -r c103a3dec114 a
119 diff -r 000000000000 -r c103a3dec114 a
120 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
120 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
121 +++ b/a Thu Jan 01 00:00:00 1970 +0000
121 +++ b/a Thu Jan 01 00:00:00 1970 +0000
122 @@ -0,0 +1,1 @@
122 @@ -0,0 +1,1 @@
123 +1
123 +1
124 """
124 """
125
125
126 runcommand(server, ['import', '-'], input=cStringIO.StringIO(patch))
126 runcommand(server, ['import', '-'], input=cStringIO.StringIO(patch))
127 runcommand(server, ['log'])
127 runcommand(server, ['log'])
128
128
129 def cwd(server):
129 def cwd(server):
130 """ check that --cwd doesn't persist between requests """
130 """ check that --cwd doesn't persist between requests """
131 readchannel(server)
131 readchannel(server)
132 os.mkdir('foo')
132 os.mkdir('foo')
133 f = open('foo/bar', 'wb')
133 f = open('foo/bar', 'wb')
134 f.write('a')
134 f.write('a')
135 f.close()
135 f.close()
136 runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
136 runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
137 runcommand(server, ['st', 'foo/bar'])
137 runcommand(server, ['st', 'foo/bar'])
138 os.remove('foo/bar')
138 os.remove('foo/bar')
139
139
140 def localhgrc(server):
140 def localhgrc(server):
141 """ check that local configs for the cached repo aren't inherited when -R
141 """ check that local configs for the cached repo aren't inherited when -R
142 is used """
142 is used """
143 readchannel(server)
143 readchannel(server)
144
144
145 # the cached repo local hgrc contains ui.foo=bar, so showconfig should show it
145 # the cached repo local hgrc contains ui.foo=bar, so showconfig should show it
146 runcommand(server, ['showconfig'])
146 runcommand(server, ['showconfig'])
147
147
148 # but not for this repo
148 # but not for this repo
149 runcommand(server, ['init', 'foo'])
149 runcommand(server, ['init', 'foo'])
150 runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
150 runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
151 shutil.rmtree('foo')
151 shutil.rmtree('foo')
152
152
153 def hook(**args):
153 def hook(**args):
154 print 'hook talking'
154 print 'hook talking'
155 print 'now try to read something: %r' % sys.stdin.read()
155 print 'now try to read something: %r' % sys.stdin.read()
156
156
157 def hookoutput(server):
157 def hookoutput(server):
158 readchannel(server)
158 readchannel(server)
159 runcommand(server, ['--config',
159 runcommand(server, ['--config',
160 'hooks.pre-identify=python:test-commandserver.hook', 'id'],
160 'hooks.pre-identify=python:test-commandserver.hook', 'id'],
161 input=cStringIO.StringIO('some input'))
161 input=cStringIO.StringIO('some input'))
162
162
163 def outsidechanges(server):
163 def outsidechanges(server):
164 readchannel(server)
164 readchannel(server)
165 f = open('a', 'ab')
165 f = open('a', 'ab')
166 f.write('a\n')
166 f.write('a\n')
167 f.close()
167 f.close()
168 runcommand(server, ['status'])
168 runcommand(server, ['status'])
169 os.system('hg ci -Am2')
169 os.system('hg ci -Am2')
170 runcommand(server, ['tip'])
170 runcommand(server, ['tip'])
171 runcommand(server, ['status'])
171 runcommand(server, ['status'])
172
172
173 def bookmarks(server):
173 def bookmarks(server):
174 readchannel(server)
174 readchannel(server)
175 runcommand(server, ['bookmarks'])
175 runcommand(server, ['bookmarks'])
176
176
177 # changes .hg/bookmarks
177 # changes .hg/bookmarks
178 os.system('hg bookmark -i bm1')
178 os.system('hg bookmark -i bm1')
179 os.system('hg bookmark -i bm2')
179 os.system('hg bookmark -i bm2')
180 runcommand(server, ['bookmarks'])
180 runcommand(server, ['bookmarks'])
181
181
182 # changes .hg/bookmarks.current
182 # changes .hg/bookmarks.current
183 os.system('hg upd bm1 -q')
183 os.system('hg upd bm1 -q')
184 runcommand(server, ['bookmarks'])
184 runcommand(server, ['bookmarks'])
185
185
186 runcommand(server, ['bookmarks', 'bm3'])
186 runcommand(server, ['bookmarks', 'bm3'])
187 f = open('a', 'ab')
187 f = open('a', 'ab')
188 f.write('a\n')
188 f.write('a\n')
189 f.close()
189 f.close()
190 runcommand(server, ['commit', '-Amm'])
190 runcommand(server, ['commit', '-Amm'])
191 runcommand(server, ['bookmarks'])
191 runcommand(server, ['bookmarks'])
192
192
193 def tagscache(server):
193 def tagscache(server):
194 readchannel(server)
194 readchannel(server)
195 runcommand(server, ['id', '-t', '-r', '0'])
195 runcommand(server, ['id', '-t', '-r', '0'])
196 os.system('hg tag -r 0 foo')
196 os.system('hg tag -r 0 foo')
197 runcommand(server, ['id', '-t', '-r', '0'])
197 runcommand(server, ['id', '-t', '-r', '0'])
198
198
199 def setphase(server):
199 def setphase(server):
200 readchannel(server)
200 readchannel(server)
201 runcommand(server, ['phase', '-r', '.'])
201 runcommand(server, ['phase', '-r', '.'])
202 os.system('hg phase -r . -p')
202 os.system('hg phase -r . -p')
203 runcommand(server, ['phase', '-r', '.'])
203 runcommand(server, ['phase', '-r', '.'])
204
204
205 def rollback(server):
205 def rollback(server):
206 readchannel(server)
206 readchannel(server)
207 runcommand(server, ['phase', '-r', '.', '-p'])
207 runcommand(server, ['phase', '-r', '.', '-p'])
208 f = open('a', 'ab')
208 f = open('a', 'ab')
209 f.write('a\n')
209 f.write('a\n')
210 f.close()
210 f.close()
211 runcommand(server, ['commit', '-Am.'])
211 runcommand(server, ['commit', '-Am.'])
212 runcommand(server, ['rollback'])
212 runcommand(server, ['rollback'])
213 runcommand(server, ['phase', '-r', '.'])
213 runcommand(server, ['phase', '-r', '.'])
214
214
215 def branch(server):
216 readchannel(server)
217 runcommand(server, ['branch'])
218 os.system('hg branch foo')
219 runcommand(server, ['branch'])
220 os.system('hg branch default')
221
222 def hgignore(server):
223 readchannel(server)
224 f = open('.hgignore', 'ab')
225 f.write('')
226 f.close()
227 runcommand(server, ['commit', '-Am.'])
228 f = open('ignored-file', 'ab')
229 f.write('')
230 f.close()
231 f = open('.hgignore', 'ab')
232 f.write('ignored-file')
233 f.close()
234 runcommand(server, ['status', '-i', '-u'])
235
215 if __name__ == '__main__':
236 if __name__ == '__main__':
216 os.system('hg init')
237 os.system('hg init')
217
238
218 check(hellomessage)
239 check(hellomessage)
219 check(unknowncommand)
240 check(unknowncommand)
220 check(checkruncommand)
241 check(checkruncommand)
221 check(inputeof)
242 check(inputeof)
222 check(serverinput)
243 check(serverinput)
223 check(cwd)
244 check(cwd)
224
245
225 hgrc = open('.hg/hgrc', 'a')
246 hgrc = open('.hg/hgrc', 'a')
226 hgrc.write('[ui]\nfoo=bar\n')
247 hgrc.write('[ui]\nfoo=bar\n')
227 hgrc.close()
248 hgrc.close()
228 check(localhgrc)
249 check(localhgrc)
229 check(hookoutput)
250 check(hookoutput)
230 check(outsidechanges)
251 check(outsidechanges)
231 check(bookmarks)
252 check(bookmarks)
232 check(tagscache)
253 check(tagscache)
233 check(setphase)
254 check(setphase)
234 check(rollback)
255 check(rollback)
256 check(branch)
257 check(hgignore)
@@ -1,147 +1,165 b''
1
1
2 testing hellomessage:
2 testing hellomessage:
3
3
4 o, 'capabilities: getencoding runcommand\nencoding: ***'
4 o, 'capabilities: getencoding runcommand\nencoding: ***'
5 runcommand id
5 runcommand id
6 000000000000 tip
6 000000000000 tip
7
7
8 testing unknowncommand:
8 testing unknowncommand:
9
9
10 abort: unknown command unknowncommand
10 abort: unknown command unknowncommand
11
11
12 testing checkruncommand:
12 testing checkruncommand:
13
13
14 runcommand
14 runcommand
15 Mercurial Distributed SCM
15 Mercurial Distributed SCM
16
16
17 basic commands:
17 basic commands:
18
18
19 add add the specified files on the next commit
19 add add the specified files on the next commit
20 annotate show changeset information by line for each file
20 annotate show changeset information by line for each file
21 clone make a copy of an existing repository
21 clone make a copy of an existing repository
22 commit commit the specified files or all outstanding changes
22 commit commit the specified files or all outstanding changes
23 diff diff repository (or selected files)
23 diff diff repository (or selected files)
24 export dump the header and diffs for one or more changesets
24 export dump the header and diffs for one or more changesets
25 forget forget the specified files on the next commit
25 forget forget the specified files on the next commit
26 init create a new repository in the given directory
26 init create a new repository in the given directory
27 log show revision history of entire repository or files
27 log show revision history of entire repository or files
28 merge merge working directory with another revision
28 merge merge working directory with another revision
29 phase set or show the current phase name
29 phase set or show the current phase name
30 pull pull changes from the specified source
30 pull pull changes from the specified source
31 push push changes to the specified destination
31 push push changes to the specified destination
32 remove remove the specified files on the next commit
32 remove remove the specified files on the next commit
33 serve start stand-alone webserver
33 serve start stand-alone webserver
34 status show changed files in the working directory
34 status show changed files in the working directory
35 summary summarize working directory state
35 summary summarize working directory state
36 update update working directory (or switch revisions)
36 update update working directory (or switch revisions)
37
37
38 use "hg help" for the full list of commands or "hg -v" for details
38 use "hg help" for the full list of commands or "hg -v" for details
39 runcommand id --quiet
39 runcommand id --quiet
40 000000000000
40 000000000000
41 runcommand id
41 runcommand id
42 000000000000 tip
42 000000000000 tip
43 runcommand id --config ui.quiet=True
43 runcommand id --config ui.quiet=True
44 000000000000
44 000000000000
45 runcommand id
45 runcommand id
46 000000000000 tip
46 000000000000 tip
47
47
48 testing inputeof:
48 testing inputeof:
49
49
50 server exit code = 1
50 server exit code = 1
51
51
52 testing serverinput:
52 testing serverinput:
53
53
54 runcommand import -
54 runcommand import -
55 applying patch from stdin
55 applying patch from stdin
56 runcommand log
56 runcommand log
57 changeset: 0:eff892de26ec
57 changeset: 0:eff892de26ec
58 tag: tip
58 tag: tip
59 user: test
59 user: test
60 date: Thu Jan 01 00:00:00 1970 +0000
60 date: Thu Jan 01 00:00:00 1970 +0000
61 summary: 1
61 summary: 1
62
62
63
63
64 testing cwd:
64 testing cwd:
65
65
66 runcommand --cwd foo st bar
66 runcommand --cwd foo st bar
67 ? bar
67 ? bar
68 runcommand st foo/bar
68 runcommand st foo/bar
69 ? foo/bar
69 ? foo/bar
70
70
71 testing localhgrc:
71 testing localhgrc:
72
72
73 runcommand showconfig
73 runcommand showconfig
74 bundle.mainreporoot=$TESTTMP
74 bundle.mainreporoot=$TESTTMP
75 defaults.backout=-d "0 0"
75 defaults.backout=-d "0 0"
76 defaults.commit=-d "0 0"
76 defaults.commit=-d "0 0"
77 defaults.tag=-d "0 0"
77 defaults.tag=-d "0 0"
78 ui.slash=True
78 ui.slash=True
79 ui.foo=bar
79 ui.foo=bar
80 runcommand init foo
80 runcommand init foo
81 runcommand -R foo showconfig ui defaults
81 runcommand -R foo showconfig ui defaults
82 defaults.backout=-d "0 0"
82 defaults.backout=-d "0 0"
83 defaults.commit=-d "0 0"
83 defaults.commit=-d "0 0"
84 defaults.tag=-d "0 0"
84 defaults.tag=-d "0 0"
85 ui.slash=True
85 ui.slash=True
86
86
87 testing hookoutput:
87 testing hookoutput:
88
88
89 runcommand --config hooks.pre-identify=python:test-commandserver.hook id
89 runcommand --config hooks.pre-identify=python:test-commandserver.hook id
90 hook talking
90 hook talking
91 now try to read something: 'some input'
91 now try to read something: 'some input'
92 eff892de26ec tip
92 eff892de26ec tip
93
93
94 testing outsidechanges:
94 testing outsidechanges:
95
95
96 runcommand status
96 runcommand status
97 M a
97 M a
98 runcommand tip
98 runcommand tip
99 changeset: 1:d3a0a68be6de
99 changeset: 1:d3a0a68be6de
100 tag: tip
100 tag: tip
101 user: test
101 user: test
102 date: Thu Jan 01 00:00:00 1970 +0000
102 date: Thu Jan 01 00:00:00 1970 +0000
103 summary: 2
103 summary: 2
104
104
105 runcommand status
105 runcommand status
106
106
107 testing bookmarks:
107 testing bookmarks:
108
108
109 runcommand bookmarks
109 runcommand bookmarks
110 no bookmarks set
110 no bookmarks set
111 runcommand bookmarks
111 runcommand bookmarks
112 bm1 1:d3a0a68be6de
112 bm1 1:d3a0a68be6de
113 bm2 1:d3a0a68be6de
113 bm2 1:d3a0a68be6de
114 runcommand bookmarks
114 runcommand bookmarks
115 * bm1 1:d3a0a68be6de
115 * bm1 1:d3a0a68be6de
116 bm2 1:d3a0a68be6de
116 bm2 1:d3a0a68be6de
117 runcommand bookmarks bm3
117 runcommand bookmarks bm3
118 runcommand commit -Amm
118 runcommand commit -Amm
119 runcommand bookmarks
119 runcommand bookmarks
120 bm1 1:d3a0a68be6de
120 bm1 1:d3a0a68be6de
121 bm2 1:d3a0a68be6de
121 bm2 1:d3a0a68be6de
122 * bm3 2:aef17e88f5f0
122 * bm3 2:aef17e88f5f0
123
123
124 testing tagscache:
124 testing tagscache:
125
125
126 runcommand id -t -r 0
126 runcommand id -t -r 0
127
127
128 runcommand id -t -r 0
128 runcommand id -t -r 0
129 foo
129 foo
130
130
131 testing setphase:
131 testing setphase:
132
132
133 runcommand phase -r .
133 runcommand phase -r .
134 3: draft
134 3: draft
135 runcommand phase -r .
135 runcommand phase -r .
136 3: public
136 3: public
137
137
138 testing rollback:
138 testing rollback:
139
139
140 runcommand phase -r . -p
140 runcommand phase -r . -p
141 no phases changed
141 no phases changed
142 runcommand commit -Am.
142 runcommand commit -Am.
143 runcommand rollback
143 runcommand rollback
144 repository tip rolled back to revision 3 (undo commit)
144 repository tip rolled back to revision 3 (undo commit)
145 working directory now based on revision 3
145 working directory now based on revision 3
146 runcommand phase -r .
146 runcommand phase -r .
147 3: public
147 3: public
148
149 testing branch:
150
151 runcommand branch
152 default
153 marked working directory as branch foo
154 (branches are permanent and global, did you want a bookmark?)
155 runcommand branch
156 foo
157 marked working directory as branch default
158 (branches are permanent and global, did you want a bookmark?)
159
160 testing hgignore:
161
162 runcommand commit -Am.
163 adding .hgignore
164 runcommand status -i -u
165 I ignored-file
General Comments 0
You need to be logged in to leave comments. Login now