##// END OF EJS Templates
merge with i18n
Matt Mackall -
r16215:8412d5a0 merge stable
parent child Browse files
Show More
@@ -1,51 +1,52 b''
1 1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
2 2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
3 3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
4 4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
5 5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
6 6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
7 7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
8 8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
9 9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
10 10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
11 11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
12 12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
13 13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
14 14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
15 15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
16 16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
17 17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
18 18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
19 19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
20 20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
21 21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
22 22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
23 23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
24 24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
25 25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
26 26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
27 27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
28 28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
29 29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
30 30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
31 31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
32 32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
33 33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
34 34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
35 35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
36 36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
37 37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
38 38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
39 39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
40 40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
41 41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
42 42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
43 43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
44 44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
45 45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
46 46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
47 47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
48 48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
49 49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
50 50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
51 51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
52 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 0 iD8DBQBPT/fvywK+sNU5EO8RAnfYAKCn7d0vwqIb100YfWm1F7nFD5B+FACeM02YHpQLSNsztrBCObtqcnfod7Q=
@@ -1,63 +1,64 b''
1 1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
2 2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
3 3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
4 4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
5 5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
6 6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
7 7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
8 8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
9 9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
10 10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
11 11 3a56574f329a368d645853e0f9e09472aee62349 0.8
12 12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
13 13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
14 14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
15 15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
16 16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
17 17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
18 18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
19 19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
20 20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
21 21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
22 22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
23 23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
24 24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
25 25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
26 26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
27 27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
28 28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
29 29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
30 30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
31 31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
32 32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
33 33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
34 34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
35 35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
36 36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
37 37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
38 38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
39 39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
40 40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
41 41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
42 42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
43 43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
44 44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
45 45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
46 46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
47 47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
48 48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
49 49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
50 50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
51 51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
52 52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
53 53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
54 54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
55 55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
56 56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
57 57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
58 58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
59 59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
60 60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
61 61 6344043924497cd06d781d9014c66802285072e4 2.0.2
62 62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
63 63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
64 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 2.1.1
@@ -1,773 +1,774 b''
1 1 # bugzilla.py - bugzilla integration for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 # Copyright 2011 Jim Hague <jim.hague@acm.org>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''hooks for integrating with the Bugzilla bug tracker
10 10
11 11 This hook extension adds comments on bugs in Bugzilla when changesets
12 12 that refer to bugs by Bugzilla ID are seen. The comment is formatted using
13 13 the Mercurial template mechanism.
14 14
15 15 The hook does not change bug status.
16 16
17 17 Three basic modes of access to Bugzilla are provided:
18 18
19 19 1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
20 20
21 21 2. Check data via the Bugzilla XMLRPC interface and submit bug change
22 22 via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
23 23
24 24 3. Writing directly to the Bugzilla database. Only Bugzilla installations
25 25 using MySQL are supported. Requires Python MySQLdb.
26 26
27 27 Writing directly to the database is susceptible to schema changes, and
28 28 relies on a Bugzilla contrib script to send out bug change
29 29 notification emails. This script runs as the user running Mercurial,
30 30 must be run on the host with the Bugzilla install, and requires
31 31 permission to read Bugzilla configuration details and the necessary
32 32 MySQL user and password to have full access rights to the Bugzilla
33 33 database. For these reasons this access mode is now considered
34 34 deprecated, and will not be updated for new Bugzilla versions going
35 35 forward.
36 36
37 37 Access via XMLRPC needs a Bugzilla username and password to be specified
38 38 in the configuration. Comments are added under that username. Since the
39 39 configuration must be readable by all Mercurial users, it is recommended
40 40 that the rights of that user are restricted in Bugzilla to the minimum
41 41 necessary to add comments.
42 42
43 43 Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
44 44 email to the Bugzilla email interface to submit comments to bugs.
45 45 The From: address in the email is set to the email address of the Mercurial
46 46 user, so the comment appears to come from the Mercurial user. In the event
47 47 that the Mercurial user email is not recognised by Bugzilla as a Bugzilla
48 48 user, the email associated with the Bugzilla username used to log into
49 49 Bugzilla is used instead as the source of the comment.
50 50
51 51 Configuration items common to all access modes:
52 52
53 53 bugzilla.version
54 54 This access type to use. Values recognised are:
55 55
56 56 :``xmlrpc``: Bugzilla XMLRPC interface.
57 57 :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
58 58 :``3.0``: MySQL access, Bugzilla 3.0 and later.
59 59 :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
60 60 including 3.0.
61 61 :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
62 62 including 2.18.
63 63
64 64 bugzilla.regexp
65 65 Regular expression to match bug IDs in changeset commit message.
66 66 Must contain one "()" group. The default expression matches ``Bug
67 67 1234``, ``Bug no. 1234``, ``Bug number 1234``, ``Bugs 1234,5678``,
68 68 ``Bug 1234 and 5678`` and variations thereof. Matching is case
69 69 insensitive.
70 70
71 71 bugzilla.style
72 72 The style file to use when formatting comments.
73 73
74 74 bugzilla.template
75 75 Template to use when formatting comments. Overrides style if
76 76 specified. In addition to the usual Mercurial keywords, the
77 77 extension specifies:
78 78
79 79 :``{bug}``: The Bugzilla bug ID.
80 80 :``{root}``: The full pathname of the Mercurial repository.
81 81 :``{webroot}``: Stripped pathname of the Mercurial repository.
82 82 :``{hgweb}``: Base URL for browsing Mercurial repositories.
83 83
84 84 Default ``changeset {node|short} in repo {root} refers to bug
85 85 {bug}.\\ndetails:\\n\\t{desc|tabindent}``
86 86
87 87 bugzilla.strip
88 88 The number of path separator characters to strip from the front of
89 89 the Mercurial repository path (``{root}`` in templates) to produce
90 90 ``{webroot}``. For example, a repository with ``{root}``
91 91 ``/var/local/my-project`` with a strip of 2 gives a value for
92 92 ``{webroot}`` of ``my-project``. Default 0.
93 93
94 94 web.baseurl
95 95 Base URL for browsing Mercurial repositories. Referenced from
96 96 templates as ``{hgweb}``.
97 97
98 98 Configuration items common to XMLRPC+email and MySQL access modes:
99 99
100 100 bugzilla.usermap
101 101 Path of file containing Mercurial committer email to Bugzilla user email
102 102 mappings. If specified, the file should contain one mapping per
103 103 line::
104 104
105 105 committer = Bugzilla user
106 106
107 107 See also the ``[usermap]`` section.
108 108
109 109 The ``[usermap]`` section is used to specify mappings of Mercurial
110 110 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
111 111 Contains entries of the form ``committer = Bugzilla user``.
112 112
113 113 XMLRPC access mode configuration:
114 114
115 115 bugzilla.bzurl
116 116 The base URL for the Bugzilla installation.
117 117 Default ``http://localhost/bugzilla``.
118 118
119 119 bugzilla.user
120 120 The username to use to log into Bugzilla via XMLRPC. Default
121 121 ``bugs``.
122 122
123 123 bugzilla.password
124 124 The password for Bugzilla login.
125 125
126 126 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
127 127 and also:
128 128
129 129 bugzilla.bzemail
130 130 The Bugzilla email address.
131 131
132 132 In addition, the Mercurial email settings must be configured. See the
133 133 documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
134 134
135 135 MySQL access mode configuration:
136 136
137 137 bugzilla.host
138 138 Hostname of the MySQL server holding the Bugzilla database.
139 139 Default ``localhost``.
140 140
141 141 bugzilla.db
142 142 Name of the Bugzilla database in MySQL. Default ``bugs``.
143 143
144 144 bugzilla.user
145 145 Username to use to access MySQL server. Default ``bugs``.
146 146
147 147 bugzilla.password
148 148 Password to use to access MySQL server.
149 149
150 150 bugzilla.timeout
151 151 Database connection timeout (seconds). Default 5.
152 152
153 153 bugzilla.bzuser
154 154 Fallback Bugzilla user name to record comments with, if changeset
155 155 committer cannot be found as a Bugzilla user.
156 156
157 157 bugzilla.bzdir
158 158 Bugzilla install directory. Used by default notify. Default
159 159 ``/var/www/html/bugzilla``.
160 160
161 161 bugzilla.notify
162 162 The command to run to get Bugzilla to send bug change notification
163 163 emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
164 164 id) and ``user`` (committer bugzilla email). Default depends on
165 165 version; from 2.18 it is "cd %(bzdir)s && perl -T
166 166 contrib/sendbugmail.pl %(id)s %(user)s".
167 167
168 168 Activating the extension::
169 169
170 170 [extensions]
171 171 bugzilla =
172 172
173 173 [hooks]
174 174 # run bugzilla hook on every change pulled or pushed in here
175 175 incoming.bugzilla = python:hgext.bugzilla.hook
176 176
177 177 Example configurations:
178 178
179 179 XMLRPC example configuration. This uses the Bugzilla at
180 180 ``http://my-project.org/bugzilla``, logging in as user
181 181 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
182 182 collection of Mercurial repositories in ``/var/local/hg/repos/``,
183 183 with a web interface at ``http://my-project.org/hg``. ::
184 184
185 185 [bugzilla]
186 186 bzurl=http://my-project.org/bugzilla
187 187 user=bugmail@my-project.org
188 188 password=plugh
189 189 version=xmlrpc
190 190 template=Changeset {node|short} in {root|basename}.
191 191 {hgweb}/{webroot}/rev/{node|short}\\n
192 192 {desc}\\n
193 193 strip=5
194 194
195 195 [web]
196 196 baseurl=http://my-project.org/hg
197 197
198 198 XMLRPC+email example configuration. This uses the Bugzilla at
199 199 ``http://my-project.org/bugzilla``, logging in as user
200 200 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
201 201 collection of Mercurial repositories in ``/var/local/hg/repos/``,
202 202 with a web interface at ``http://my-project.org/hg``. Bug comments
203 203 are sent to the Bugzilla email address
204 204 ``bugzilla@my-project.org``. ::
205 205
206 206 [bugzilla]
207 207 bzurl=http://my-project.org/bugzilla
208 208 user=bugmail@my-project.org
209 209 password=plugh
210 210 version=xmlrpc
211 211 bzemail=bugzilla@my-project.org
212 212 template=Changeset {node|short} in {root|basename}.
213 213 {hgweb}/{webroot}/rev/{node|short}\\n
214 214 {desc}\\n
215 215 strip=5
216 216
217 217 [web]
218 218 baseurl=http://my-project.org/hg
219 219
220 220 [usermap]
221 221 user@emaildomain.com=user.name@bugzilladomain.com
222 222
223 223 MySQL example configuration. This has a local Bugzilla 3.2 installation
224 224 in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
225 225 the Bugzilla database name is ``bugs`` and MySQL is
226 226 accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
227 227 with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
228 228 with a web interface at ``http://my-project.org/hg``. ::
229 229
230 230 [bugzilla]
231 231 host=localhost
232 232 password=XYZZY
233 233 version=3.0
234 234 bzuser=unknown@domain.com
235 235 bzdir=/opt/bugzilla-3.2
236 236 template=Changeset {node|short} in {root|basename}.
237 237 {hgweb}/{webroot}/rev/{node|short}\\n
238 238 {desc}\\n
239 239 strip=5
240 240
241 241 [web]
242 242 baseurl=http://my-project.org/hg
243 243
244 244 [usermap]
245 245 user@emaildomain.com=user.name@bugzilladomain.com
246 246
247 247 All the above add a comment to the Bugzilla bug record of the form::
248 248
249 249 Changeset 3b16791d6642 in repository-name.
250 250 http://my-project.org/hg/repository-name/rev/3b16791d6642
251 251
252 252 Changeset commit comment. Bug 1234.
253 253 '''
254 254
255 255 from mercurial.i18n import _
256 256 from mercurial.node import short
257 257 from mercurial import cmdutil, mail, templater, util
258 258 import re, time, urlparse, xmlrpclib
259 259
260 260 class bzaccess(object):
261 261 '''Base class for access to Bugzilla.'''
262 262
263 263 def __init__(self, ui):
264 264 self.ui = ui
265 265 usermap = self.ui.config('bugzilla', 'usermap')
266 266 if usermap:
267 267 self.ui.readconfig(usermap, sections=['usermap'])
268 268
269 269 def map_committer(self, user):
270 270 '''map name of committer to Bugzilla user name.'''
271 271 for committer, bzuser in self.ui.configitems('usermap'):
272 272 if committer.lower() == user.lower():
273 273 return bzuser
274 274 return user
275 275
276 276 # Methods to be implemented by access classes.
277 277 def filter_real_bug_ids(self, ids):
278 278 '''remove bug IDs that do not exist in Bugzilla from set.'''
279 279 pass
280 280
281 281 def filter_cset_known_bug_ids(self, node, ids):
282 282 '''remove bug IDs where node occurs in comment text from set.'''
283 283 pass
284 284
285 285 def add_comment(self, bugid, text, committer):
286 286 '''add comment to bug.
287 287
288 288 If possible add the comment as being from the committer of
289 289 the changeset. Otherwise use the default Bugzilla user.
290 290 '''
291 291 pass
292 292
293 293 def notify(self, ids, committer):
294 294 '''Force sending of Bugzilla notification emails.'''
295 295 pass
296 296
297 297 # Bugzilla via direct access to MySQL database.
298 298 class bzmysql(bzaccess):
299 299 '''Support for direct MySQL access to Bugzilla.
300 300
301 301 The earliest Bugzilla version this is tested with is version 2.16.
302 302
303 303 If your Bugzilla is version 3.2 or above, you are strongly
304 304 recommended to use the XMLRPC access method instead.
305 305 '''
306 306
307 307 @staticmethod
308 308 def sql_buglist(ids):
309 309 '''return SQL-friendly list of bug ids'''
310 310 return '(' + ','.join(map(str, ids)) + ')'
311 311
312 312 _MySQLdb = None
313 313
314 314 def __init__(self, ui):
315 315 try:
316 316 import MySQLdb as mysql
317 317 bzmysql._MySQLdb = mysql
318 318 except ImportError, err:
319 319 raise util.Abort(_('python mysql support not available: %s') % err)
320 320
321 321 bzaccess.__init__(self, ui)
322 322
323 323 host = self.ui.config('bugzilla', 'host', 'localhost')
324 324 user = self.ui.config('bugzilla', 'user', 'bugs')
325 325 passwd = self.ui.config('bugzilla', 'password')
326 326 db = self.ui.config('bugzilla', 'db', 'bugs')
327 327 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
328 328 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
329 329 (host, db, user, '*' * len(passwd)))
330 330 self.conn = bzmysql._MySQLdb.connect(host=host,
331 331 user=user, passwd=passwd,
332 332 db=db,
333 333 connect_timeout=timeout)
334 334 self.cursor = self.conn.cursor()
335 335 self.longdesc_id = self.get_longdesc_id()
336 336 self.user_ids = {}
337 337 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
338 338
339 339 def run(self, *args, **kwargs):
340 340 '''run a query.'''
341 341 self.ui.note(_('query: %s %s\n') % (args, kwargs))
342 342 try:
343 343 self.cursor.execute(*args, **kwargs)
344 344 except bzmysql._MySQLdb.MySQLError:
345 345 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
346 346 raise
347 347
348 348 def get_longdesc_id(self):
349 349 '''get identity of longdesc field'''
350 350 self.run('select fieldid from fielddefs where name = "longdesc"')
351 351 ids = self.cursor.fetchall()
352 352 if len(ids) != 1:
353 353 raise util.Abort(_('unknown database schema'))
354 354 return ids[0][0]
355 355
356 356 def filter_real_bug_ids(self, ids):
357 357 '''filter not-existing bug ids from set.'''
358 358 self.run('select bug_id from bugs where bug_id in %s' %
359 359 bzmysql.sql_buglist(ids))
360 360 return set([c[0] for c in self.cursor.fetchall()])
361 361
362 362 def filter_cset_known_bug_ids(self, node, ids):
363 363 '''filter bug ids that already refer to this changeset from set.'''
364 364
365 365 self.run('''select bug_id from longdescs where
366 366 bug_id in %s and thetext like "%%%s%%"''' %
367 367 (bzmysql.sql_buglist(ids), short(node)))
368 368 for (id,) in self.cursor.fetchall():
369 369 self.ui.status(_('bug %d already knows about changeset %s\n') %
370 370 (id, short(node)))
371 371 ids.discard(id)
372 372 return ids
373 373
374 374 def notify(self, ids, committer):
375 375 '''tell bugzilla to send mail.'''
376 376
377 377 self.ui.status(_('telling bugzilla to send mail:\n'))
378 378 (user, userid) = self.get_bugzilla_user(committer)
379 379 for id in ids:
380 380 self.ui.status(_(' bug %s\n') % id)
381 381 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
382 382 bzdir = self.ui.config('bugzilla', 'bzdir', '/var/www/html/bugzilla')
383 383 try:
384 384 # Backwards-compatible with old notify string, which
385 385 # took one string. This will throw with a new format
386 386 # string.
387 387 cmd = cmdfmt % id
388 388 except TypeError:
389 389 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
390 390 self.ui.note(_('running notify command %s\n') % cmd)
391 391 fp = util.popen('(%s) 2>&1' % cmd)
392 392 out = fp.read()
393 393 ret = fp.close()
394 394 if ret:
395 395 self.ui.warn(out)
396 396 raise util.Abort(_('bugzilla notify command %s') %
397 397 util.explainexit(ret)[0])
398 398 self.ui.status(_('done\n'))
399 399
400 400 def get_user_id(self, user):
401 401 '''look up numeric bugzilla user id.'''
402 402 try:
403 403 return self.user_ids[user]
404 404 except KeyError:
405 405 try:
406 406 userid = int(user)
407 407 except ValueError:
408 408 self.ui.note(_('looking up user %s\n') % user)
409 409 self.run('''select userid from profiles
410 410 where login_name like %s''', user)
411 411 all = self.cursor.fetchall()
412 412 if len(all) != 1:
413 413 raise KeyError(user)
414 414 userid = int(all[0][0])
415 415 self.user_ids[user] = userid
416 416 return userid
417 417
418 418 def get_bugzilla_user(self, committer):
419 419 '''See if committer is a registered bugzilla user. Return
420 420 bugzilla username and userid if so. If not, return default
421 421 bugzilla username and userid.'''
422 422 user = self.map_committer(committer)
423 423 try:
424 424 userid = self.get_user_id(user)
425 425 except KeyError:
426 426 try:
427 427 defaultuser = self.ui.config('bugzilla', 'bzuser')
428 428 if not defaultuser:
429 429 raise util.Abort(_('cannot find bugzilla user id for %s') %
430 430 user)
431 431 userid = self.get_user_id(defaultuser)
432 432 user = defaultuser
433 433 except KeyError:
434 434 raise util.Abort(_('cannot find bugzilla user id for %s or %s') %
435 435 (user, defaultuser))
436 436 return (user, userid)
437 437
438 438 def add_comment(self, bugid, text, committer):
439 439 '''add comment to bug. try adding comment as committer of
440 440 changeset, otherwise as default bugzilla user.'''
441 441 (user, userid) = self.get_bugzilla_user(committer)
442 442 now = time.strftime('%Y-%m-%d %H:%M:%S')
443 443 self.run('''insert into longdescs
444 444 (bug_id, who, bug_when, thetext)
445 445 values (%s, %s, %s, %s)''',
446 446 (bugid, userid, now, text))
447 447 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
448 448 values (%s, %s, %s, %s)''',
449 449 (bugid, userid, now, self.longdesc_id))
450 450 self.conn.commit()
451 451
452 452 class bzmysql_2_18(bzmysql):
453 453 '''support for bugzilla 2.18 series.'''
454 454
455 455 def __init__(self, ui):
456 456 bzmysql.__init__(self, ui)
457 457 self.default_notify = \
458 458 "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
459 459
460 460 class bzmysql_3_0(bzmysql_2_18):
461 461 '''support for bugzilla 3.0 series.'''
462 462
463 463 def __init__(self, ui):
464 464 bzmysql_2_18.__init__(self, ui)
465 465
466 466 def get_longdesc_id(self):
467 467 '''get identity of longdesc field'''
468 468 self.run('select id from fielddefs where name = "longdesc"')
469 469 ids = self.cursor.fetchall()
470 470 if len(ids) != 1:
471 471 raise util.Abort(_('unknown database schema'))
472 472 return ids[0][0]
473 473
474 474 # Buzgilla via XMLRPC interface.
475 475
476 476 class cookietransportrequest(object):
477 477 """A Transport request method that retains cookies over its lifetime.
478 478
479 479 The regular xmlrpclib transports ignore cookies. Which causes
480 480 a bit of a problem when you need a cookie-based login, as with
481 481 the Bugzilla XMLRPC interface.
482 482
483 483 So this is a helper for defining a Transport which looks for
484 484 cookies being set in responses and saves them to add to all future
485 485 requests.
486 486 """
487 487
488 488 # Inspiration drawn from
489 489 # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
490 490 # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
491 491
492 492 cookies = []
493 493 def send_cookies(self, connection):
494 494 if self.cookies:
495 495 for cookie in self.cookies:
496 496 connection.putheader("Cookie", cookie)
497 497
498 498 def request(self, host, handler, request_body, verbose=0):
499 499 self.verbose = verbose
500 self.accept_gzip_encoding = False
500 501
501 502 # issue XML-RPC request
502 503 h = self.make_connection(host)
503 504 if verbose:
504 505 h.set_debuglevel(1)
505 506
506 507 self.send_request(h, handler, request_body)
507 508 self.send_host(h, host)
508 509 self.send_cookies(h)
509 510 self.send_user_agent(h)
510 511 self.send_content(h, request_body)
511 512
512 513 # Deal with differences between Python 2.4-2.6 and 2.7.
513 514 # In the former h is a HTTP(S). In the latter it's a
514 515 # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
515 516 # HTTP(S) has an underlying HTTP(S)Connection, so extract
516 517 # that and use it.
517 518 try:
518 519 response = h.getresponse()
519 520 except AttributeError:
520 521 response = h._conn.getresponse()
521 522
522 523 # Add any cookie definitions to our list.
523 524 for header in response.msg.getallmatchingheaders("Set-Cookie"):
524 525 val = header.split(": ", 1)[1]
525 526 cookie = val.split(";", 1)[0]
526 527 self.cookies.append(cookie)
527 528
528 529 if response.status != 200:
529 530 raise xmlrpclib.ProtocolError(host + handler, response.status,
530 531 response.reason, response.msg.headers)
531 532
532 533 payload = response.read()
533 534 parser, unmarshaller = self.getparser()
534 535 parser.feed(payload)
535 536 parser.close()
536 537
537 538 return unmarshaller.close()
538 539
539 540 # The explicit calls to the underlying xmlrpclib __init__() methods are
540 541 # necessary. The xmlrpclib.Transport classes are old-style classes, and
541 542 # it turns out their __init__() doesn't get called when doing multiple
542 543 # inheritance with a new-style class.
543 544 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
544 545 def __init__(self, use_datetime=0):
545 546 xmlrpclib.Transport.__init__(self, use_datetime)
546 547
547 548 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
548 549 def __init__(self, use_datetime=0):
549 550 xmlrpclib.SafeTransport.__init__(self, use_datetime)
550 551
551 552 class bzxmlrpc(bzaccess):
552 553 """Support for access to Bugzilla via the Bugzilla XMLRPC API.
553 554
554 555 Requires a minimum Bugzilla version 3.4.
555 556 """
556 557
557 558 def __init__(self, ui):
558 559 bzaccess.__init__(self, ui)
559 560
560 561 bzweb = self.ui.config('bugzilla', 'bzurl',
561 562 'http://localhost/bugzilla/')
562 563 bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
563 564
564 565 user = self.ui.config('bugzilla', 'user', 'bugs')
565 566 passwd = self.ui.config('bugzilla', 'password')
566 567
567 568 self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
568 569 self.bzproxy.User.login(dict(login=user, password=passwd))
569 570
570 571 def transport(self, uri):
571 572 if urlparse.urlparse(uri, "http")[0] == "https":
572 573 return cookiesafetransport()
573 574 else:
574 575 return cookietransport()
575 576
576 577 def get_bug_comments(self, id):
577 578 """Return a string with all comment text for a bug."""
578 579 c = self.bzproxy.Bug.comments(dict(ids=[id]))
579 580 return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
580 581
581 582 def filter_real_bug_ids(self, ids):
582 583 res = set()
583 584 bugs = self.bzproxy.Bug.get(dict(ids=sorted(ids), permissive=True))
584 585 for bug in bugs['bugs']:
585 586 res.add(bug['id'])
586 587 return res
587 588
588 589 def filter_cset_known_bug_ids(self, node, ids):
589 590 for id in sorted(ids):
590 591 if self.get_bug_comments(id).find(short(node)) != -1:
591 592 self.ui.status(_('bug %d already knows about changeset %s\n') %
592 593 (id, short(node)))
593 594 ids.discard(id)
594 595 return ids
595 596
596 597 def add_comment(self, bugid, text, committer):
597 598 self.bzproxy.Bug.add_comment(dict(id=bugid, comment=text))
598 599
599 600 class bzxmlrpcemail(bzxmlrpc):
600 601 """Read data from Bugzilla via XMLRPC, send updates via email.
601 602
602 603 Advantages of sending updates via email:
603 604 1. Comments can be added as any user, not just logged in user.
604 605 2. Bug statuses and other fields not accessible via XMLRPC can
605 606 be updated. This is not currently used.
606 607 """
607 608
608 609 def __init__(self, ui):
609 610 bzxmlrpc.__init__(self, ui)
610 611
611 612 self.bzemail = self.ui.config('bugzilla', 'bzemail')
612 613 if not self.bzemail:
613 614 raise util.Abort(_("configuration 'bzemail' missing"))
614 615 mail.validateconfig(self.ui)
615 616
616 617 def send_bug_modify_email(self, bugid, commands, comment, committer):
617 618 '''send modification message to Bugzilla bug via email.
618 619
619 620 The message format is documented in the Bugzilla email_in.pl
620 621 specification. commands is a list of command lines, comment is the
621 622 comment text.
622 623
623 624 To stop users from crafting commit comments with
624 625 Bugzilla commands, specify the bug ID via the message body, rather
625 626 than the subject line, and leave a blank line after it.
626 627 '''
627 628 user = self.map_committer(committer)
628 629 matches = self.bzproxy.User.get(dict(match=[user]))
629 630 if not matches['users']:
630 631 user = self.ui.config('bugzilla', 'user', 'bugs')
631 632 matches = self.bzproxy.User.get(dict(match=[user]))
632 633 if not matches['users']:
633 634 raise util.Abort(_("default bugzilla user %s email not found") %
634 635 user)
635 636 user = matches['users'][0]['email']
636 637
637 638 text = "\n".join(commands) + "\n@bug_id = %d\n\n" % bugid + comment
638 639
639 640 _charsets = mail._charsets(self.ui)
640 641 user = mail.addressencode(self.ui, user, _charsets)
641 642 bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
642 643 msg = mail.mimeencode(self.ui, text, _charsets)
643 644 msg['From'] = user
644 645 msg['To'] = bzemail
645 646 msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
646 647 sendmail = mail.connect(self.ui)
647 648 sendmail(user, bzemail, msg.as_string())
648 649
649 650 def add_comment(self, bugid, text, committer):
650 651 self.send_bug_modify_email(bugid, [], text, committer)
651 652
652 653 class bugzilla(object):
653 654 # supported versions of bugzilla. different versions have
654 655 # different schemas.
655 656 _versions = {
656 657 '2.16': bzmysql,
657 658 '2.18': bzmysql_2_18,
658 659 '3.0': bzmysql_3_0,
659 660 'xmlrpc': bzxmlrpc,
660 661 'xmlrpc+email': bzxmlrpcemail
661 662 }
662 663
663 664 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
664 665 r'((?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)')
665 666
666 667 _bz = None
667 668
668 669 def __init__(self, ui, repo):
669 670 self.ui = ui
670 671 self.repo = repo
671 672
672 673 def bz(self):
673 674 '''return object that knows how to talk to bugzilla version in
674 675 use.'''
675 676
676 677 if bugzilla._bz is None:
677 678 bzversion = self.ui.config('bugzilla', 'version')
678 679 try:
679 680 bzclass = bugzilla._versions[bzversion]
680 681 except KeyError:
681 682 raise util.Abort(_('bugzilla version %s not supported') %
682 683 bzversion)
683 684 bugzilla._bz = bzclass(self.ui)
684 685 return bugzilla._bz
685 686
686 687 def __getattr__(self, key):
687 688 return getattr(self.bz(), key)
688 689
689 690 _bug_re = None
690 691 _split_re = None
691 692
692 693 def find_bug_ids(self, ctx):
693 694 '''return set of integer bug IDs from commit comment.
694 695
695 696 Extract bug IDs from changeset comments. Filter out any that are
696 697 not known to Bugzilla, and any that already have a reference to
697 698 the given changeset in their comments.
698 699 '''
699 700 if bugzilla._bug_re is None:
700 701 bugzilla._bug_re = re.compile(
701 702 self.ui.config('bugzilla', 'regexp', bugzilla._default_bug_re),
702 703 re.IGNORECASE)
703 704 bugzilla._split_re = re.compile(r'\D+')
704 705 start = 0
705 706 ids = set()
706 707 while True:
707 708 m = bugzilla._bug_re.search(ctx.description(), start)
708 709 if not m:
709 710 break
710 711 start = m.end()
711 712 for id in bugzilla._split_re.split(m.group(1)):
712 713 if not id:
713 714 continue
714 715 ids.add(int(id))
715 716 if ids:
716 717 ids = self.filter_real_bug_ids(ids)
717 718 if ids:
718 719 ids = self.filter_cset_known_bug_ids(ctx.node(), ids)
719 720 return ids
720 721
721 722 def update(self, bugid, ctx):
722 723 '''update bugzilla bug with reference to changeset.'''
723 724
724 725 def webroot(root):
725 726 '''strip leading prefix of repo root and turn into
726 727 url-safe path.'''
727 728 count = int(self.ui.config('bugzilla', 'strip', 0))
728 729 root = util.pconvert(root)
729 730 while count > 0:
730 731 c = root.find('/')
731 732 if c == -1:
732 733 break
733 734 root = root[c + 1:]
734 735 count -= 1
735 736 return root
736 737
737 738 mapfile = self.ui.config('bugzilla', 'style')
738 739 tmpl = self.ui.config('bugzilla', 'template')
739 740 t = cmdutil.changeset_templater(self.ui, self.repo,
740 741 False, None, mapfile, False)
741 742 if not mapfile and not tmpl:
742 743 tmpl = _('changeset {node|short} in repo {root} refers '
743 744 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
744 745 if tmpl:
745 746 tmpl = templater.parsestring(tmpl, quoted=False)
746 747 t.use_template(tmpl)
747 748 self.ui.pushbuffer()
748 749 t.show(ctx, changes=ctx.changeset(),
749 750 bug=str(bugid),
750 751 hgweb=self.ui.config('web', 'baseurl'),
751 752 root=self.repo.root,
752 753 webroot=webroot(self.repo.root))
753 754 data = self.ui.popbuffer()
754 755 self.add_comment(bugid, data, util.email(ctx.user()))
755 756
756 757 def hook(ui, repo, hooktype, node=None, **kwargs):
757 758 '''add comment to bugzilla for each changeset that refers to a
758 759 bugzilla bug id. only add a comment once per bug, so same change
759 760 seen multiple times does not fill bug with duplicate data.'''
760 761 if node is None:
761 762 raise util.Abort(_('hook type %s does not pass a changeset id') %
762 763 hooktype)
763 764 try:
764 765 bz = bugzilla(ui, repo)
765 766 ctx = repo[node]
766 767 ids = bz.find_bug_ids(ctx)
767 768 if ids:
768 769 for id in ids:
769 770 bz.update(id, ctx)
770 771 bz.notify(ids, util.email(ctx.user()))
771 772 except Exception, e:
772 773 raise util.Abort(_('Bugzilla error: %s') % e)
773 774
@@ -1,241 +1,242 b''
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.node import hex
10 10 from mercurial import encoding, error, util
11 11 import errno, os
12 12
13 13 def valid(mark):
14 14 for c in (':', '\0', '\n', '\r'):
15 15 if c in mark:
16 16 return False
17 17 return True
18 18
19 19 def read(repo):
20 20 '''Parse .hg/bookmarks file and return a dictionary
21 21
22 22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
23 23 in the .hg/bookmarks file.
24 24 Read the file and return a (name=>nodeid) dictionary
25 25 '''
26 26 bookmarks = {}
27 27 try:
28 28 for line in repo.opener('bookmarks'):
29 29 line = line.strip()
30 30 if not line:
31 31 continue
32 32 if ' ' not in line:
33 33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
34 34 continue
35 35 sha, refspec = line.split(' ', 1)
36 36 refspec = encoding.tolocal(refspec)
37 37 try:
38 38 bookmarks[refspec] = repo.changelog.lookup(sha)
39 39 except error.RepoLookupError:
40 40 pass
41 41 except IOError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 return bookmarks
45 45
46 46 def readcurrent(repo):
47 47 '''Get the current bookmark
48 48
49 49 If we use gittishsh branches we have a current bookmark that
50 50 we are on. This function returns the name of the bookmark. It
51 51 is stored in .hg/bookmarks.current
52 52 '''
53 53 mark = None
54 54 try:
55 55 file = repo.opener('bookmarks.current')
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 return None
60 60 try:
61 61 # No readline() in posixfile_nt, reading everything is cheap
62 62 mark = encoding.tolocal((file.readlines() or [''])[0])
63 63 if mark == '' or mark not in repo._bookmarks:
64 64 mark = None
65 65 finally:
66 66 file.close()
67 67 return mark
68 68
69 69 def write(repo):
70 70 '''Write bookmarks
71 71
72 72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
73 73 in a format equal to those of localtags.
74 74
75 75 We also store a backup of the previous state in undo.bookmarks that
76 76 can be copied back on rollback.
77 77 '''
78 78 refs = repo._bookmarks
79 79
80 80 if repo._bookmarkcurrent not in refs:
81 81 setcurrent(repo, None)
82 82 for mark in refs.keys():
83 83 if not valid(mark):
84 84 raise util.Abort(_("bookmark '%s' contains illegal "
85 85 "character" % mark))
86 86
87 87 wlock = repo.wlock()
88 88 try:
89 89
90 90 file = repo.opener('bookmarks', 'w', atomictemp=True)
91 91 for refspec, node in refs.iteritems():
92 92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
93 93 file.close()
94 94
95 95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
96 96 try:
97 97 os.utime(repo.sjoin('00changelog.i'), None)
98 98 except OSError:
99 99 pass
100 100
101 101 finally:
102 102 wlock.release()
103 103
104 104 def setcurrent(repo, mark):
105 105 '''Set the name of the bookmark that we are currently on
106 106
107 107 Set the name of the bookmark that we are on (hg update <bookmark>).
108 108 The name is recorded in .hg/bookmarks.current
109 109 '''
110 110 current = repo._bookmarkcurrent
111 111 if current == mark:
112 112 return
113 113
114 114 if mark not in repo._bookmarks:
115 115 mark = ''
116 116 if not valid(mark):
117 117 raise util.Abort(_("bookmark '%s' contains illegal "
118 118 "character" % mark))
119 119
120 120 wlock = repo.wlock()
121 121 try:
122 122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
123 123 file.write(encoding.fromlocal(mark))
124 124 file.close()
125 125 finally:
126 126 wlock.release()
127 127 repo._bookmarkcurrent = mark
128 128
129 129 def unsetcurrent(repo):
130 130 wlock = repo.wlock()
131 131 try:
132 util.unlink(repo.join('bookmarks.current'))
133 repo._bookmarkcurrent = None
134 except OSError, inst:
135 if inst.errno != errno.ENOENT:
136 raise
132 try:
133 util.unlink(repo.join('bookmarks.current'))
134 repo._bookmarkcurrent = None
135 except OSError, inst:
136 if inst.errno != errno.ENOENT:
137 raise
137 138 finally:
138 139 wlock.release()
139 140
140 141 def updatecurrentbookmark(repo, oldnode, curbranch):
141 142 try:
142 143 return update(repo, oldnode, repo.branchtags()[curbranch])
143 144 except KeyError:
144 145 if curbranch == "default": # no default branch!
145 146 return update(repo, oldnode, repo.lookup("tip"))
146 147 else:
147 148 raise util.Abort(_("branch %s not found") % curbranch)
148 149
149 150 def update(repo, parents, node):
150 151 marks = repo._bookmarks
151 152 update = False
152 153 mark = repo._bookmarkcurrent
153 154 if mark and marks[mark] in parents:
154 155 old = repo[marks[mark]]
155 156 new = repo[node]
156 157 if new in old.descendants():
157 158 marks[mark] = new.node()
158 159 update = True
159 160 if update:
160 161 repo._writebookmarks(marks)
161 162 return update
162 163
163 164 def listbookmarks(repo):
164 165 # We may try to list bookmarks on a repo type that does not
165 166 # support it (e.g., statichttprepository).
166 167 marks = getattr(repo, '_bookmarks', {})
167 168
168 169 d = {}
169 170 for k, v in marks.iteritems():
170 171 # don't expose local divergent bookmarks
171 172 if '@' not in k and not k.endswith('@'):
172 173 d[k] = hex(v)
173 174 return d
174 175
175 176 def pushbookmark(repo, key, old, new):
176 177 w = repo.wlock()
177 178 try:
178 179 marks = repo._bookmarks
179 180 if hex(marks.get(key, '')) != old:
180 181 return False
181 182 if new == '':
182 183 del marks[key]
183 184 else:
184 185 if new not in repo:
185 186 return False
186 187 marks[key] = repo[new].node()
187 188 write(repo)
188 189 return True
189 190 finally:
190 191 w.release()
191 192
192 193 def updatefromremote(ui, repo, remote, path):
193 194 ui.debug("checking for updated bookmarks\n")
194 195 rb = remote.listkeys('bookmarks')
195 196 changed = False
196 197 for k in rb.keys():
197 198 if k in repo._bookmarks:
198 199 nr, nl = rb[k], repo._bookmarks[k]
199 200 if nr in repo:
200 201 cr = repo[nr]
201 202 cl = repo[nl]
202 203 if cl.rev() >= cr.rev():
203 204 continue
204 205 if cr in cl.descendants():
205 206 repo._bookmarks[k] = cr.node()
206 207 changed = True
207 208 ui.status(_("updating bookmark %s\n") % k)
208 209 else:
209 210 # find a unique @ suffix
210 211 for x in range(1, 100):
211 212 n = '%s@%d' % (k, x)
212 213 if n not in repo._bookmarks:
213 214 break
214 215 # try to use an @pathalias suffix
215 216 # if an @pathalias already exists, we overwrite (update) it
216 217 for p, u in ui.configitems("paths"):
217 218 if path == u:
218 219 n = '%s@%s' % (k, p)
219 220
220 221 repo._bookmarks[n] = cr.node()
221 222 changed = True
222 223 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
223 224
224 225 if changed:
225 226 write(repo)
226 227
227 228 def diff(ui, repo, remote):
228 229 ui.status(_("searching for changed bookmarks\n"))
229 230
230 231 lmarks = repo.listkeys('bookmarks')
231 232 rmarks = remote.listkeys('bookmarks')
232 233
233 234 diff = sorted(set(rmarks) - set(lmarks))
234 235 for k in diff:
235 236 mark = ui.debugflag and rmarks[k] or rmarks[k][:12]
236 237 ui.write(" %-25s %s\n" % (k, mark))
237 238
238 239 if len(diff) <= 0:
239 240 ui.status(_("no changed bookmarks found\n"))
240 241 return 1
241 242 return 0
@@ -1,368 +1,369 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from node import nullid
15 15 from i18n import _
16 16 import os, tempfile, shutil
17 17 import changegroup, util, mdiff, discovery, cmdutil
18 18 import localrepo, changelog, manifest, filelog, revlog, error
19 19
20 20 class bundlerevlog(revlog.revlog):
21 21 def __init__(self, opener, indexfile, bundle, linkmapper):
22 22 # How it works:
23 23 # to retrieve a revision, we need to know the offset of
24 24 # the revision in the bundle (an unbundle object).
25 25 #
26 26 # We store this offset in the index (start), to differentiate a
27 27 # rev in the bundle and from a rev in the revlog, we check
28 28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 29 # (it is bigger since we store the node to which the delta is)
30 30 #
31 31 revlog.revlog.__init__(self, opener, indexfile)
32 32 self.bundle = bundle
33 33 self.basemap = {}
34 34 n = len(self)
35 35 chain = None
36 36 while True:
37 37 chunkdata = bundle.deltachunk(chain)
38 38 if not chunkdata:
39 39 break
40 40 node = chunkdata['node']
41 41 p1 = chunkdata['p1']
42 42 p2 = chunkdata['p2']
43 43 cs = chunkdata['cs']
44 44 deltabase = chunkdata['deltabase']
45 45 delta = chunkdata['delta']
46 46
47 47 size = len(delta)
48 48 start = bundle.tell() - size
49 49
50 50 link = linkmapper(cs)
51 51 if node in self.nodemap:
52 52 # this can happen if two branches make the same change
53 53 chain = node
54 54 continue
55 55
56 56 for p in (p1, p2):
57 57 if not p in self.nodemap:
58 58 raise error.LookupError(p, self.indexfile,
59 59 _("unknown parent"))
60 60 # start, size, full unc. size, base (unused), link, p1, p2, node
61 61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 62 self.rev(p1), self.rev(p2), node)
63 63 self.basemap[n] = deltabase
64 64 self.index.insert(-1, e)
65 65 self.nodemap[node] = n
66 66 chain = node
67 67 n += 1
68 68
69 69 def inbundle(self, rev):
70 70 """is rev from the bundle"""
71 71 if rev < 0:
72 72 return False
73 73 return rev in self.basemap
74 74 def bundlebase(self, rev):
75 75 return self.basemap[rev]
76 76 def _chunk(self, rev):
77 77 # Warning: in case of bundle, the diff is against bundlebase,
78 78 # not against rev - 1
79 79 # XXX: could use some caching
80 80 if not self.inbundle(rev):
81 81 return revlog.revlog._chunk(self, rev)
82 82 self.bundle.seek(self.start(rev))
83 83 return self.bundle.read(self.length(rev))
84 84
85 85 def revdiff(self, rev1, rev2):
86 86 """return or calculate a delta between two revisions"""
87 87 if self.inbundle(rev1) and self.inbundle(rev2):
88 88 # hot path for bundle
89 89 revb = self.rev(self.bundlebase(rev2))
90 90 if revb == rev1:
91 91 return self._chunk(rev2)
92 92 elif not self.inbundle(rev1) and not self.inbundle(rev2):
93 93 return revlog.revlog.revdiff(self, rev1, rev2)
94 94
95 95 return mdiff.textdiff(self.revision(self.node(rev1)),
96 96 self.revision(self.node(rev2)))
97 97
98 98 def revision(self, node):
99 99 """return an uncompressed revision of a given"""
100 100 if node == nullid:
101 101 return ""
102 102
103 103 text = None
104 104 chain = []
105 105 iter_node = node
106 106 rev = self.rev(iter_node)
107 107 # reconstruct the revision if it is from a changegroup
108 108 while self.inbundle(rev):
109 109 if self._cache and self._cache[0] == iter_node:
110 110 text = self._cache[2]
111 111 break
112 112 chain.append(rev)
113 113 iter_node = self.bundlebase(rev)
114 114 rev = self.rev(iter_node)
115 115 if text is None:
116 116 text = revlog.revlog.revision(self, iter_node)
117 117
118 118 while chain:
119 119 delta = self._chunk(chain.pop())
120 120 text = mdiff.patches(text, [delta])
121 121
122 122 p1, p2 = self.parents(node)
123 123 if node != revlog.hash(text, p1, p2):
124 124 raise error.RevlogError(_("integrity check failed on %s:%d")
125 125 % (self.datafile, self.rev(node)))
126 126
127 127 self._cache = (node, self.rev(node), text)
128 128 return text
129 129
130 130 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
131 131 raise NotImplementedError
132 132 def addgroup(self, revs, linkmapper, transaction):
133 133 raise NotImplementedError
134 134 def strip(self, rev, minlink):
135 135 raise NotImplementedError
136 136 def checksize(self):
137 137 raise NotImplementedError
138 138
139 139 class bundlechangelog(bundlerevlog, changelog.changelog):
140 140 def __init__(self, opener, bundle):
141 141 changelog.changelog.__init__(self, opener)
142 142 linkmapper = lambda x: x
143 143 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
144 144 linkmapper)
145 145
146 146 class bundlemanifest(bundlerevlog, manifest.manifest):
147 147 def __init__(self, opener, bundle, linkmapper):
148 148 manifest.manifest.__init__(self, opener)
149 149 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
150 150 linkmapper)
151 151
152 152 class bundlefilelog(bundlerevlog, filelog.filelog):
153 153 def __init__(self, opener, path, bundle, linkmapper, repo):
154 154 filelog.filelog.__init__(self, opener, path)
155 155 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
156 156 linkmapper)
157 157 self._repo = repo
158 158
159 159 def _file(self, f):
160 160 self._repo.file(f)
161 161
162 162 class bundlerepository(localrepo.localrepository):
163 163 def __init__(self, ui, path, bundlename):
164 164 self._tempparent = None
165 165 try:
166 166 localrepo.localrepository.__init__(self, ui, path)
167 167 except error.RepoError:
168 168 self._tempparent = tempfile.mkdtemp()
169 169 localrepo.instance(ui, self._tempparent, 1)
170 170 localrepo.localrepository.__init__(self, ui, self._tempparent)
171 self.ui.setconfig('phases', 'publish', False)
171 172
172 173 if path:
173 174 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
174 175 else:
175 176 self._url = 'bundle:' + bundlename
176 177
177 178 self.tempfile = None
178 179 f = util.posixfile(bundlename, "rb")
179 180 self.bundle = changegroup.readbundle(f, bundlename)
180 181 if self.bundle.compressed():
181 182 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
182 183 suffix=".hg10un", dir=self.path)
183 184 self.tempfile = temp
184 185 fptemp = os.fdopen(fdtemp, 'wb')
185 186
186 187 try:
187 188 fptemp.write("HG10UN")
188 189 while True:
189 190 chunk = self.bundle.read(2**18)
190 191 if not chunk:
191 192 break
192 193 fptemp.write(chunk)
193 194 finally:
194 195 fptemp.close()
195 196
196 197 f = util.posixfile(self.tempfile, "rb")
197 198 self.bundle = changegroup.readbundle(f, bundlename)
198 199
199 200 # dict with the mapping 'filename' -> position in the bundle
200 201 self.bundlefilespos = {}
201 202
202 203 @util.propertycache
203 204 def changelog(self):
204 205 # consume the header if it exists
205 206 self.bundle.changelogheader()
206 207 c = bundlechangelog(self.sopener, self.bundle)
207 208 self.manstart = self.bundle.tell()
208 209 return c
209 210
210 211 @util.propertycache
211 212 def manifest(self):
212 213 self.bundle.seek(self.manstart)
213 214 # consume the header if it exists
214 215 self.bundle.manifestheader()
215 216 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
216 217 self.filestart = self.bundle.tell()
217 218 return m
218 219
219 220 @util.propertycache
220 221 def manstart(self):
221 222 self.changelog
222 223 return self.manstart
223 224
224 225 @util.propertycache
225 226 def filestart(self):
226 227 self.manifest
227 228 return self.filestart
228 229
229 230 def url(self):
230 231 return self._url
231 232
232 233 def file(self, f):
233 234 if not self.bundlefilespos:
234 235 self.bundle.seek(self.filestart)
235 236 while True:
236 237 chunkdata = self.bundle.filelogheader()
237 238 if not chunkdata:
238 239 break
239 240 fname = chunkdata['filename']
240 241 self.bundlefilespos[fname] = self.bundle.tell()
241 242 while True:
242 243 c = self.bundle.deltachunk(None)
243 244 if not c:
244 245 break
245 246
246 247 if f[0] == '/':
247 248 f = f[1:]
248 249 if f in self.bundlefilespos:
249 250 self.bundle.seek(self.bundlefilespos[f])
250 251 return bundlefilelog(self.sopener, f, self.bundle,
251 252 self.changelog.rev, self)
252 253 else:
253 254 return filelog.filelog(self.sopener, f)
254 255
255 256 def close(self):
256 257 """Close assigned bundle file immediately."""
257 258 self.bundle.close()
258 259 if self.tempfile is not None:
259 260 os.unlink(self.tempfile)
260 261 if self._tempparent:
261 262 shutil.rmtree(self._tempparent, True)
262 263
263 264 def cancopy(self):
264 265 return False
265 266
266 267 def getcwd(self):
267 268 return os.getcwd() # always outside the repo
268 269
269 270 def _writebranchcache(self, branches, tip, tiprev):
270 271 # don't overwrite the disk cache with bundle-augmented data
271 272 pass
272 273
273 274 def instance(ui, path, create):
274 275 if create:
275 276 raise util.Abort(_('cannot create new bundle repository'))
276 277 parentpath = ui.config("bundle", "mainreporoot", "")
277 278 if not parentpath:
278 279 # try to find the correct path to the working directory repo
279 280 parentpath = cmdutil.findrepo(os.getcwd())
280 281 if parentpath is None:
281 282 parentpath = ''
282 283 if parentpath:
283 284 # Try to make the full path relative so we get a nice, short URL.
284 285 # In particular, we don't want temp dir names in test outputs.
285 286 cwd = os.getcwd()
286 287 if parentpath == cwd:
287 288 parentpath = ''
288 289 else:
289 290 cwd = os.path.join(cwd,'')
290 291 if parentpath.startswith(cwd):
291 292 parentpath = parentpath[len(cwd):]
292 293 u = util.url(path)
293 294 path = u.localpath()
294 295 if u.scheme == 'bundle':
295 296 s = path.split("+", 1)
296 297 if len(s) == 1:
297 298 repopath, bundlename = parentpath, s[0]
298 299 else:
299 300 repopath, bundlename = s
300 301 else:
301 302 repopath, bundlename = parentpath, path
302 303 return bundlerepository(ui, repopath, bundlename)
303 304
304 305 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
305 306 force=False):
306 307 '''obtains a bundle of changes incoming from other
307 308
308 309 "onlyheads" restricts the returned changes to those reachable from the
309 310 specified heads.
310 311 "bundlename", if given, stores the bundle to this file path permanently;
311 312 otherwise it's stored to a temp file and gets deleted again when you call
312 313 the returned "cleanupfn".
313 314 "force" indicates whether to proceed on unrelated repos.
314 315
315 316 Returns a tuple (local, csets, cleanupfn):
316 317
317 318 "local" is a local repo from which to obtain the actual incoming changesets; it
318 319 is a bundlerepo for the obtained bundle when the original "other" is remote.
319 320 "csets" lists the incoming changeset node ids.
320 321 "cleanupfn" must be called without arguments when you're done processing the
321 322 changes; it closes both the original "other" and the one returned here.
322 323 '''
323 324 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads, force=force)
324 325 common, incoming, rheads = tmp
325 326 if not incoming:
326 327 try:
327 328 if bundlename:
328 329 os.unlink(bundlename)
329 330 except OSError:
330 331 pass
331 332 return other, [], other.close
332 333
333 334 bundle = None
334 335 bundlerepo = None
335 336 localrepo = other
336 337 if bundlename or not other.local():
337 338 # create a bundle (uncompressed if other repo is not local)
338 339
339 340 if other.capable('getbundle'):
340 341 cg = other.getbundle('incoming', common=common, heads=rheads)
341 342 elif onlyheads is None and not other.capable('changegroupsubset'):
342 343 # compat with older servers when pulling all remote heads
343 344 cg = other.changegroup(incoming, "incoming")
344 345 rheads = None
345 346 else:
346 347 cg = other.changegroupsubset(incoming, rheads, 'incoming')
347 348 bundletype = other.local() and "HG10BZ" or "HG10UN"
348 349 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
349 350 # keep written bundle?
350 351 if bundlename:
351 352 bundle = None
352 353 if not other.local():
353 354 # use the created uncompressed bundlerepo
354 355 localrepo = bundlerepo = bundlerepository(ui, repo.root, fname)
355 356 # this repo contains local and other now, so filter out local again
356 357 common = repo.heads()
357 358
358 359 csets = localrepo.changelog.findmissing(common, rheads)
359 360
360 361 def cleanup():
361 362 if bundlerepo:
362 363 bundlerepo.close()
363 364 if bundle:
364 365 os.unlink(bundle)
365 366 other.close()
366 367
367 368 return (localrepo, csets, cleanup)
368 369
@@ -1,736 +1,748 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 import errno
8 8
9 9 from node import nullid
10 10 from i18n import _
11 11 import scmutil, util, ignore, osutil, parsers, encoding
12 12 import struct, os, stat, errno
13 13 import cStringIO
14 14
15 15 _format = ">cllll"
16 16 propertycache = util.propertycache
17 filecache = scmutil.filecache
18
19 class repocache(filecache):
20 """filecache for files in .hg/"""
21 def join(self, obj, fname):
22 return obj._opener.join(fname)
23
24 class rootcache(filecache):
25 """filecache for files in the repository root"""
26 def join(self, obj, fname):
27 return obj._join(fname)
17 28
18 29 def _finddirs(path):
19 30 pos = path.rfind('/')
20 31 while pos != -1:
21 32 yield path[:pos]
22 33 pos = path.rfind('/', 0, pos)
23 34
24 35 def _incdirs(dirs, path):
25 36 for base in _finddirs(path):
26 37 if base in dirs:
27 38 dirs[base] += 1
28 39 return
29 40 dirs[base] = 1
30 41
31 42 def _decdirs(dirs, path):
32 43 for base in _finddirs(path):
33 44 if dirs[base] > 1:
34 45 dirs[base] -= 1
35 46 return
36 47 del dirs[base]
37 48
38 49 class dirstate(object):
39 50
40 51 def __init__(self, opener, ui, root, validate):
41 52 '''Create a new dirstate object.
42 53
43 54 opener is an open()-like callable that can be used to open the
44 55 dirstate file; root is the root of the directory tracked by
45 56 the dirstate.
46 57 '''
47 58 self._opener = opener
48 59 self._validate = validate
49 60 self._root = root
50 61 self._rootdir = os.path.join(root, '')
51 62 self._dirty = False
52 63 self._dirtypl = False
53 64 self._lastnormaltime = 0
54 65 self._ui = ui
66 self._filecache = {}
55 67
56 68 @propertycache
57 69 def _map(self):
58 70 '''Return the dirstate contents as a map from filename to
59 71 (state, mode, size, time).'''
60 72 self._read()
61 73 return self._map
62 74
63 75 @propertycache
64 76 def _copymap(self):
65 77 self._read()
66 78 return self._copymap
67 79
68 80 @propertycache
69 81 def _normroot(self):
70 82 return util.normcase(self._root)
71 83
72 84 @propertycache
73 85 def _foldmap(self):
74 86 f = {}
75 87 for name in self._map:
76 88 f[util.normcase(name)] = name
77 89 f['.'] = '.' # prevents useless util.fspath() invocation
78 90 return f
79 91
80 @propertycache
92 @repocache('branch')
81 93 def _branch(self):
82 94 try:
83 95 return self._opener.read("branch").strip() or "default"
84 96 except IOError, inst:
85 97 if inst.errno != errno.ENOENT:
86 98 raise
87 99 return "default"
88 100
89 101 @propertycache
90 102 def _pl(self):
91 103 try:
92 104 fp = self._opener("dirstate")
93 105 st = fp.read(40)
94 106 fp.close()
95 107 l = len(st)
96 108 if l == 40:
97 109 return st[:20], st[20:40]
98 110 elif l > 0 and l < 40:
99 111 raise util.Abort(_('working directory state appears damaged!'))
100 112 except IOError, err:
101 113 if err.errno != errno.ENOENT:
102 114 raise
103 115 return [nullid, nullid]
104 116
105 117 @propertycache
106 118 def _dirs(self):
107 119 dirs = {}
108 120 for f, s in self._map.iteritems():
109 121 if s[0] != 'r':
110 122 _incdirs(dirs, f)
111 123 return dirs
112 124
113 125 def dirs(self):
114 126 return self._dirs
115 127
116 @propertycache
128 @rootcache('.hgignore')
117 129 def _ignore(self):
118 130 files = [self._join('.hgignore')]
119 131 for name, path in self._ui.configitems("ui"):
120 132 if name == 'ignore' or name.startswith('ignore.'):
121 133 files.append(util.expandpath(path))
122 134 return ignore.ignore(self._root, files, self._ui.warn)
123 135
124 136 @propertycache
125 137 def _slash(self):
126 138 return self._ui.configbool('ui', 'slash') and os.sep != '/'
127 139
128 140 @propertycache
129 141 def _checklink(self):
130 142 return util.checklink(self._root)
131 143
132 144 @propertycache
133 145 def _checkexec(self):
134 146 return util.checkexec(self._root)
135 147
136 148 @propertycache
137 149 def _checkcase(self):
138 150 return not util.checkcase(self._join('.hg'))
139 151
140 152 def _join(self, f):
141 153 # much faster than os.path.join()
142 154 # it's safe because f is always a relative path
143 155 return self._rootdir + f
144 156
145 157 def flagfunc(self, buildfallback):
146 158 if self._checklink and self._checkexec:
147 159 def f(x):
148 160 p = self._join(x)
149 161 if os.path.islink(p):
150 162 return 'l'
151 163 if util.isexec(p):
152 164 return 'x'
153 165 return ''
154 166 return f
155 167
156 168 fallback = buildfallback()
157 169 if self._checklink:
158 170 def f(x):
159 171 if os.path.islink(self._join(x)):
160 172 return 'l'
161 173 if 'x' in fallback(x):
162 174 return 'x'
163 175 return ''
164 176 return f
165 177 if self._checkexec:
166 178 def f(x):
167 179 if 'l' in fallback(x):
168 180 return 'l'
169 181 if util.isexec(self._join(x)):
170 182 return 'x'
171 183 return ''
172 184 return f
173 185 else:
174 186 return fallback
175 187
176 188 def getcwd(self):
177 189 cwd = os.getcwd()
178 190 if cwd == self._root:
179 191 return ''
180 192 # self._root ends with a path separator if self._root is '/' or 'C:\'
181 193 rootsep = self._root
182 194 if not util.endswithsep(rootsep):
183 195 rootsep += os.sep
184 196 if cwd.startswith(rootsep):
185 197 return cwd[len(rootsep):]
186 198 else:
187 199 # we're outside the repo. return an absolute path.
188 200 return cwd
189 201
190 202 def pathto(self, f, cwd=None):
191 203 if cwd is None:
192 204 cwd = self.getcwd()
193 205 path = util.pathto(self._root, cwd, f)
194 206 if self._slash:
195 207 return util.normpath(path)
196 208 return path
197 209
198 210 def __getitem__(self, key):
199 211 '''Return the current state of key (a filename) in the dirstate.
200 212
201 213 States are:
202 214 n normal
203 215 m needs merging
204 216 r marked for removal
205 217 a marked for addition
206 218 ? not tracked
207 219 '''
208 220 return self._map.get(key, ("?",))[0]
209 221
210 222 def __contains__(self, key):
211 223 return key in self._map
212 224
213 225 def __iter__(self):
214 226 for x in sorted(self._map):
215 227 yield x
216 228
217 229 def parents(self):
218 230 return [self._validate(p) for p in self._pl]
219 231
220 232 def p1(self):
221 233 return self._validate(self._pl[0])
222 234
223 235 def p2(self):
224 236 return self._validate(self._pl[1])
225 237
226 238 def branch(self):
227 239 return encoding.tolocal(self._branch)
228 240
229 241 def setparents(self, p1, p2=nullid):
230 242 self._dirty = self._dirtypl = True
231 243 self._pl = p1, p2
232 244
233 245 def setbranch(self, branch):
234 246 if branch in ['tip', '.', 'null']:
235 247 raise util.Abort(_('the name \'%s\' is reserved') % branch)
236 248 self._branch = encoding.fromlocal(branch)
237 249 self._opener.write("branch", self._branch + '\n')
238 250
239 251 def _read(self):
240 252 self._map = {}
241 253 self._copymap = {}
242 254 try:
243 255 st = self._opener.read("dirstate")
244 256 except IOError, err:
245 257 if err.errno != errno.ENOENT:
246 258 raise
247 259 return
248 260 if not st:
249 261 return
250 262
251 263 p = parsers.parse_dirstate(self._map, self._copymap, st)
252 264 if not self._dirtypl:
253 265 self._pl = p
254 266
255 267 def invalidate(self):
256 268 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
257 269 "_ignore"):
258 270 if a in self.__dict__:
259 271 delattr(self, a)
260 272 self._lastnormaltime = 0
261 273 self._dirty = False
262 274
263 275 def copy(self, source, dest):
264 276 """Mark dest as a copy of source. Unmark dest if source is None."""
265 277 if source == dest:
266 278 return
267 279 self._dirty = True
268 280 if source is not None:
269 281 self._copymap[dest] = source
270 282 elif dest in self._copymap:
271 283 del self._copymap[dest]
272 284
273 285 def copied(self, file):
274 286 return self._copymap.get(file, None)
275 287
276 288 def copies(self):
277 289 return self._copymap
278 290
279 291 def _droppath(self, f):
280 292 if self[f] not in "?r" and "_dirs" in self.__dict__:
281 293 _decdirs(self._dirs, f)
282 294
283 295 def _addpath(self, f, check=False):
284 296 oldstate = self[f]
285 297 if check or oldstate == "r":
286 298 scmutil.checkfilename(f)
287 299 if f in self._dirs:
288 300 raise util.Abort(_('directory %r already in dirstate') % f)
289 301 # shadows
290 302 for d in _finddirs(f):
291 303 if d in self._dirs:
292 304 break
293 305 if d in self._map and self[d] != 'r':
294 306 raise util.Abort(
295 307 _('file %r in dirstate clashes with %r') % (d, f))
296 308 if oldstate in "?r" and "_dirs" in self.__dict__:
297 309 _incdirs(self._dirs, f)
298 310
299 311 def normal(self, f):
300 312 '''Mark a file normal and clean.'''
301 313 self._dirty = True
302 314 self._addpath(f)
303 315 s = os.lstat(self._join(f))
304 316 mtime = int(s.st_mtime)
305 317 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
306 318 if f in self._copymap:
307 319 del self._copymap[f]
308 320 if mtime > self._lastnormaltime:
309 321 # Remember the most recent modification timeslot for status(),
310 322 # to make sure we won't miss future size-preserving file content
311 323 # modifications that happen within the same timeslot.
312 324 self._lastnormaltime = mtime
313 325
314 326 def normallookup(self, f):
315 327 '''Mark a file normal, but possibly dirty.'''
316 328 if self._pl[1] != nullid and f in self._map:
317 329 # if there is a merge going on and the file was either
318 330 # in state 'm' (-1) or coming from other parent (-2) before
319 331 # being removed, restore that state.
320 332 entry = self._map[f]
321 333 if entry[0] == 'r' and entry[2] in (-1, -2):
322 334 source = self._copymap.get(f)
323 335 if entry[2] == -1:
324 336 self.merge(f)
325 337 elif entry[2] == -2:
326 338 self.otherparent(f)
327 339 if source:
328 340 self.copy(source, f)
329 341 return
330 342 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
331 343 return
332 344 self._dirty = True
333 345 self._addpath(f)
334 346 self._map[f] = ('n', 0, -1, -1)
335 347 if f in self._copymap:
336 348 del self._copymap[f]
337 349
338 350 def otherparent(self, f):
339 351 '''Mark as coming from the other parent, always dirty.'''
340 352 if self._pl[1] == nullid:
341 353 raise util.Abort(_("setting %r to other parent "
342 354 "only allowed in merges") % f)
343 355 self._dirty = True
344 356 self._addpath(f)
345 357 self._map[f] = ('n', 0, -2, -1)
346 358 if f in self._copymap:
347 359 del self._copymap[f]
348 360
349 361 def add(self, f):
350 362 '''Mark a file added.'''
351 363 self._dirty = True
352 364 self._addpath(f, True)
353 365 self._map[f] = ('a', 0, -1, -1)
354 366 if f in self._copymap:
355 367 del self._copymap[f]
356 368
357 369 def remove(self, f):
358 370 '''Mark a file removed.'''
359 371 self._dirty = True
360 372 self._droppath(f)
361 373 size = 0
362 374 if self._pl[1] != nullid and f in self._map:
363 375 # backup the previous state
364 376 entry = self._map[f]
365 377 if entry[0] == 'm': # merge
366 378 size = -1
367 379 elif entry[0] == 'n' and entry[2] == -2: # other parent
368 380 size = -2
369 381 self._map[f] = ('r', 0, size, 0)
370 382 if size == 0 and f in self._copymap:
371 383 del self._copymap[f]
372 384
373 385 def merge(self, f):
374 386 '''Mark a file merged.'''
375 387 self._dirty = True
376 388 s = os.lstat(self._join(f))
377 389 self._addpath(f)
378 390 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
379 391 if f in self._copymap:
380 392 del self._copymap[f]
381 393
382 394 def drop(self, f):
383 395 '''Drop a file from the dirstate'''
384 396 if f in self._map:
385 397 self._dirty = True
386 398 self._droppath(f)
387 399 del self._map[f]
388 400
389 401 def _normalize(self, path, isknown):
390 402 normed = util.normcase(path)
391 403 folded = self._foldmap.get(normed, None)
392 404 if folded is None:
393 405 if isknown or not os.path.lexists(os.path.join(self._root, path)):
394 406 folded = path
395 407 else:
396 408 folded = self._foldmap.setdefault(normed,
397 409 util.fspath(normed, self._normroot))
398 410 return folded
399 411
400 412 def normalize(self, path, isknown=False):
401 413 '''
402 414 normalize the case of a pathname when on a casefolding filesystem
403 415
404 416 isknown specifies whether the filename came from walking the
405 417 disk, to avoid extra filesystem access
406 418
407 419 The normalized case is determined based on the following precedence:
408 420
409 421 - version of name already stored in the dirstate
410 422 - version of name stored on disk
411 423 - version provided via command arguments
412 424 '''
413 425
414 426 if self._checkcase:
415 427 return self._normalize(path, isknown)
416 428 return path
417 429
418 430 def clear(self):
419 431 self._map = {}
420 432 if "_dirs" in self.__dict__:
421 433 delattr(self, "_dirs")
422 434 self._copymap = {}
423 435 self._pl = [nullid, nullid]
424 436 self._lastnormaltime = 0
425 437 self._dirty = True
426 438
427 439 def rebuild(self, parent, files):
428 440 self.clear()
429 441 for f in files:
430 442 if 'x' in files.flags(f):
431 443 self._map[f] = ('n', 0777, -1, 0)
432 444 else:
433 445 self._map[f] = ('n', 0666, -1, 0)
434 446 self._pl = (parent, nullid)
435 447 self._dirty = True
436 448
437 449 def write(self):
438 450 if not self._dirty:
439 451 return
440 452 st = self._opener("dirstate", "w", atomictemp=True)
441 453
442 454 # use the modification time of the newly created temporary file as the
443 455 # filesystem's notion of 'now'
444 456 now = int(util.fstat(st).st_mtime)
445 457
446 458 cs = cStringIO.StringIO()
447 459 copymap = self._copymap
448 460 pack = struct.pack
449 461 write = cs.write
450 462 write("".join(self._pl))
451 463 for f, e in self._map.iteritems():
452 464 if e[0] == 'n' and e[3] == now:
453 465 # The file was last modified "simultaneously" with the current
454 466 # write to dirstate (i.e. within the same second for file-
455 467 # systems with a granularity of 1 sec). This commonly happens
456 468 # for at least a couple of files on 'update'.
457 469 # The user could change the file without changing its size
458 470 # within the same second. Invalidate the file's stat data in
459 471 # dirstate, forcing future 'status' calls to compare the
460 472 # contents of the file. This prevents mistakenly treating such
461 473 # files as clean.
462 474 e = (e[0], 0, -1, -1) # mark entry as 'unset'
463 475 self._map[f] = e
464 476
465 477 if f in copymap:
466 478 f = "%s\0%s" % (f, copymap[f])
467 479 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
468 480 write(e)
469 481 write(f)
470 482 st.write(cs.getvalue())
471 483 st.close()
472 484 self._lastnormaltime = 0
473 485 self._dirty = self._dirtypl = False
474 486
475 487 def _dirignore(self, f):
476 488 if f == '.':
477 489 return False
478 490 if self._ignore(f):
479 491 return True
480 492 for p in _finddirs(f):
481 493 if self._ignore(p):
482 494 return True
483 495 return False
484 496
485 497 def walk(self, match, subrepos, unknown, ignored):
486 498 '''
487 499 Walk recursively through the directory tree, finding all files
488 500 matched by match.
489 501
490 502 Return a dict mapping filename to stat-like object (either
491 503 mercurial.osutil.stat instance or return value of os.stat()).
492 504 '''
493 505
494 506 def fwarn(f, msg):
495 507 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
496 508 return False
497 509
498 510 def badtype(mode):
499 511 kind = _('unknown')
500 512 if stat.S_ISCHR(mode):
501 513 kind = _('character device')
502 514 elif stat.S_ISBLK(mode):
503 515 kind = _('block device')
504 516 elif stat.S_ISFIFO(mode):
505 517 kind = _('fifo')
506 518 elif stat.S_ISSOCK(mode):
507 519 kind = _('socket')
508 520 elif stat.S_ISDIR(mode):
509 521 kind = _('directory')
510 522 return _('unsupported file type (type is %s)') % kind
511 523
512 524 ignore = self._ignore
513 525 dirignore = self._dirignore
514 526 if ignored:
515 527 ignore = util.never
516 528 dirignore = util.never
517 529 elif not unknown:
518 530 # if unknown and ignored are False, skip step 2
519 531 ignore = util.always
520 532 dirignore = util.always
521 533
522 534 matchfn = match.matchfn
523 535 badfn = match.bad
524 536 dmap = self._map
525 537 normpath = util.normpath
526 538 listdir = osutil.listdir
527 539 lstat = os.lstat
528 540 getkind = stat.S_IFMT
529 541 dirkind = stat.S_IFDIR
530 542 regkind = stat.S_IFREG
531 543 lnkkind = stat.S_IFLNK
532 544 join = self._join
533 545 work = []
534 546 wadd = work.append
535 547
536 548 exact = skipstep3 = False
537 549 if matchfn == match.exact: # match.exact
538 550 exact = True
539 551 dirignore = util.always # skip step 2
540 552 elif match.files() and not match.anypats(): # match.match, no patterns
541 553 skipstep3 = True
542 554
543 555 if self._checkcase:
544 556 normalize = self._normalize
545 557 skipstep3 = False
546 558 else:
547 559 normalize = lambda x, y: x
548 560
549 561 files = sorted(match.files())
550 562 subrepos.sort()
551 563 i, j = 0, 0
552 564 while i < len(files) and j < len(subrepos):
553 565 subpath = subrepos[j] + "/"
554 566 if files[i] < subpath:
555 567 i += 1
556 568 continue
557 569 while i < len(files) and files[i].startswith(subpath):
558 570 del files[i]
559 571 j += 1
560 572
561 573 if not files or '.' in files:
562 574 files = ['']
563 575 results = dict.fromkeys(subrepos)
564 576 results['.hg'] = None
565 577
566 578 # step 1: find all explicit files
567 579 for ff in files:
568 580 nf = normalize(normpath(ff), False)
569 581 if nf in results:
570 582 continue
571 583
572 584 try:
573 585 st = lstat(join(nf))
574 586 kind = getkind(st.st_mode)
575 587 if kind == dirkind:
576 588 skipstep3 = False
577 589 if nf in dmap:
578 590 #file deleted on disk but still in dirstate
579 591 results[nf] = None
580 592 match.dir(nf)
581 593 if not dirignore(nf):
582 594 wadd(nf)
583 595 elif kind == regkind or kind == lnkkind:
584 596 results[nf] = st
585 597 else:
586 598 badfn(ff, badtype(kind))
587 599 if nf in dmap:
588 600 results[nf] = None
589 601 except OSError, inst:
590 602 if nf in dmap: # does it exactly match a file?
591 603 results[nf] = None
592 604 else: # does it match a directory?
593 605 prefix = nf + "/"
594 606 for fn in dmap:
595 607 if fn.startswith(prefix):
596 608 match.dir(nf)
597 609 skipstep3 = False
598 610 break
599 611 else:
600 612 badfn(ff, inst.strerror)
601 613
602 614 # step 2: visit subdirectories
603 615 while work:
604 616 nd = work.pop()
605 617 skip = None
606 618 if nd == '.':
607 619 nd = ''
608 620 else:
609 621 skip = '.hg'
610 622 try:
611 623 entries = listdir(join(nd), stat=True, skip=skip)
612 624 except OSError, inst:
613 625 if inst.errno == errno.EACCES:
614 626 fwarn(nd, inst.strerror)
615 627 continue
616 628 raise
617 629 for f, kind, st in entries:
618 630 nf = normalize(nd and (nd + "/" + f) or f, True)
619 631 if nf not in results:
620 632 if kind == dirkind:
621 633 if not ignore(nf):
622 634 match.dir(nf)
623 635 wadd(nf)
624 636 if nf in dmap and matchfn(nf):
625 637 results[nf] = None
626 638 elif kind == regkind or kind == lnkkind:
627 639 if nf in dmap:
628 640 if matchfn(nf):
629 641 results[nf] = st
630 642 elif matchfn(nf) and not ignore(nf):
631 643 results[nf] = st
632 644 elif nf in dmap and matchfn(nf):
633 645 results[nf] = None
634 646
635 647 # step 3: report unseen items in the dmap hash
636 648 if not skipstep3 and not exact:
637 649 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
638 650 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
639 651 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
640 652 st = None
641 653 results[nf] = st
642 654 for s in subrepos:
643 655 del results[s]
644 656 del results['.hg']
645 657 return results
646 658
647 659 def status(self, match, subrepos, ignored, clean, unknown):
648 660 '''Determine the status of the working copy relative to the
649 661 dirstate and return a tuple of lists (unsure, modified, added,
650 662 removed, deleted, unknown, ignored, clean), where:
651 663
652 664 unsure:
653 665 files that might have been modified since the dirstate was
654 666 written, but need to be read to be sure (size is the same
655 667 but mtime differs)
656 668 modified:
657 669 files that have definitely been modified since the dirstate
658 670 was written (different size or mode)
659 671 added:
660 672 files that have been explicitly added with hg add
661 673 removed:
662 674 files that have been explicitly removed with hg remove
663 675 deleted:
664 676 files that have been deleted through other means ("missing")
665 677 unknown:
666 678 files not in the dirstate that are not ignored
667 679 ignored:
668 680 files not in the dirstate that are ignored
669 681 (by _dirignore())
670 682 clean:
671 683 files that have definitely not been modified since the
672 684 dirstate was written
673 685 '''
674 686 listignored, listclean, listunknown = ignored, clean, unknown
675 687 lookup, modified, added, unknown, ignored = [], [], [], [], []
676 688 removed, deleted, clean = [], [], []
677 689
678 690 dmap = self._map
679 691 ladd = lookup.append # aka "unsure"
680 692 madd = modified.append
681 693 aadd = added.append
682 694 uadd = unknown.append
683 695 iadd = ignored.append
684 696 radd = removed.append
685 697 dadd = deleted.append
686 698 cadd = clean.append
687 699
688 700 lnkkind = stat.S_IFLNK
689 701
690 702 for fn, st in self.walk(match, subrepos, listunknown,
691 703 listignored).iteritems():
692 704 if fn not in dmap:
693 705 if (listignored or match.exact(fn)) and self._dirignore(fn):
694 706 if listignored:
695 707 iadd(fn)
696 708 elif listunknown:
697 709 uadd(fn)
698 710 continue
699 711
700 712 state, mode, size, time = dmap[fn]
701 713
702 714 if not st and state in "nma":
703 715 dadd(fn)
704 716 elif state == 'n':
705 717 # The "mode & lnkkind != lnkkind or self._checklink"
706 718 # lines are an expansion of "islink => checklink"
707 719 # where islink means "is this a link?" and checklink
708 720 # means "can we check links?".
709 721 mtime = int(st.st_mtime)
710 722 if (size >= 0 and
711 723 (size != st.st_size
712 724 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
713 725 and (mode & lnkkind != lnkkind or self._checklink)
714 726 or size == -2 # other parent
715 727 or fn in self._copymap):
716 728 madd(fn)
717 729 elif (mtime != time
718 730 and (mode & lnkkind != lnkkind or self._checklink)):
719 731 ladd(fn)
720 732 elif mtime == self._lastnormaltime:
721 733 # fn may have been changed in the same timeslot without
722 734 # changing its size. This can happen if we quickly do
723 735 # multiple commits in a single transaction.
724 736 # Force lookup, so we don't miss such a racy file change.
725 737 ladd(fn)
726 738 elif listclean:
727 739 cadd(fn)
728 740 elif state == 'm':
729 741 madd(fn)
730 742 elif state == 'a':
731 743 aadd(fn)
732 744 elif state == 'r':
733 745 radd(fn)
734 746
735 747 return (lookup, modified, added, removed, deleted, unknown, ignored,
736 748 clean)
@@ -1,269 +1,271 b''
1 1 # filemerge.py - file-level merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import short
9 9 from i18n import _
10 10 import util, simplemerge, match, error
11 11 import os, tempfile, re, filecmp
12 12
13 13 def _toolstr(ui, tool, part, default=""):
14 14 return ui.config("merge-tools", tool + "." + part, default)
15 15
16 16 def _toolbool(ui, tool, part, default=False):
17 17 return ui.configbool("merge-tools", tool + "." + part, default)
18 18
19 19 def _toollist(ui, tool, part, default=[]):
20 20 return ui.configlist("merge-tools", tool + "." + part, default)
21 21
22 22 _internal = ['internal:' + s
23 23 for s in 'fail local other merge prompt dump'.split()]
24 24
25 25 def _findtool(ui, tool):
26 26 if tool in _internal:
27 27 return tool
28 28 for kn in ("regkey", "regkeyalt"):
29 29 k = _toolstr(ui, tool, kn)
30 30 if not k:
31 31 continue
32 32 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
33 33 if p:
34 34 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
35 35 if p:
36 36 return p
37 37 exe = _toolstr(ui, tool, "executable", tool)
38 38 return util.findexe(util.expandpath(exe))
39 39
40 40 def _picktool(repo, ui, path, binary, symlink):
41 41 def check(tool, pat, symlink, binary):
42 42 tmsg = tool
43 43 if pat:
44 44 tmsg += " specified for " + pat
45 45 if not _findtool(ui, tool):
46 46 if pat: # explicitly requested tool deserves a warning
47 47 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
48 48 else: # configured but non-existing tools are more silent
49 49 ui.note(_("couldn't find merge tool %s\n") % tmsg)
50 50 elif symlink and not _toolbool(ui, tool, "symlink"):
51 51 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
52 52 elif binary and not _toolbool(ui, tool, "binary"):
53 53 ui.warn(_("tool %s can't handle binary\n") % tmsg)
54 54 elif not util.gui() and _toolbool(ui, tool, "gui"):
55 55 ui.warn(_("tool %s requires a GUI\n") % tmsg)
56 56 else:
57 57 return True
58 58 return False
59 59
60 60 # forcemerge comes from command line arguments, highest priority
61 61 force = ui.config('ui', 'forcemerge')
62 62 if force:
63 63 toolpath = _findtool(ui, force)
64 64 if toolpath:
65 65 return (force, '"' + toolpath + '"')
66 66 else:
67 67 # mimic HGMERGE if given tool not found
68 68 return (force, force)
69 69
70 70 # HGMERGE takes next precedence
71 71 hgmerge = os.environ.get("HGMERGE")
72 72 if hgmerge:
73 73 return (hgmerge, hgmerge)
74 74
75 75 # then patterns
76 76 for pat, tool in ui.configitems("merge-patterns"):
77 77 mf = match.match(repo.root, '', [pat])
78 78 if mf(path) and check(tool, pat, symlink, False):
79 79 toolpath = _findtool(ui, tool)
80 80 return (tool, '"' + toolpath + '"')
81 81
82 82 # then merge tools
83 83 tools = {}
84 84 for k, v in ui.configitems("merge-tools"):
85 85 t = k.split('.')[0]
86 86 if t not in tools:
87 87 tools[t] = int(_toolstr(ui, t, "priority", "0"))
88 88 names = tools.keys()
89 89 tools = sorted([(-p, t) for t, p in tools.items()])
90 90 uimerge = ui.config("ui", "merge")
91 91 if uimerge:
92 92 if uimerge not in names:
93 93 return (uimerge, uimerge)
94 94 tools.insert(0, (None, uimerge)) # highest priority
95 95 tools.append((None, "hgmerge")) # the old default, if found
96 96 for p, t in tools:
97 97 if check(t, None, symlink, binary):
98 98 toolpath = _findtool(ui, t)
99 99 return (t, '"' + toolpath + '"')
100 100 # internal merge as last resort
101 101 return (not (symlink or binary) and "internal:merge" or None, None)
102 102
103 103 def _eoltype(data):
104 104 "Guess the EOL type of a file"
105 105 if '\0' in data: # binary
106 106 return None
107 107 if '\r\n' in data: # Windows
108 108 return '\r\n'
109 109 if '\r' in data: # Old Mac
110 110 return '\r'
111 111 if '\n' in data: # UNIX
112 112 return '\n'
113 113 return None # unknown
114 114
115 115 def _matcheol(file, origfile):
116 116 "Convert EOL markers in a file to match origfile"
117 117 tostyle = _eoltype(util.readfile(origfile))
118 118 if tostyle:
119 119 data = util.readfile(file)
120 120 style = _eoltype(data)
121 121 if style:
122 122 newdata = data.replace(style, tostyle)
123 123 if newdata != data:
124 124 util.writefile(file, newdata)
125 125
126 126 def filemerge(repo, mynode, orig, fcd, fco, fca):
127 127 """perform a 3-way merge in the working directory
128 128
129 129 mynode = parent node before merge
130 130 orig = original local filename before merge
131 131 fco = other file context
132 132 fca = ancestor file context
133 133 fcd = local file context for current/destination file
134 134 """
135 135
136 136 def temp(prefix, ctx):
137 137 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
138 138 (fd, name) = tempfile.mkstemp(prefix=pre)
139 139 data = repo.wwritedata(ctx.path(), ctx.data())
140 140 f = os.fdopen(fd, "wb")
141 141 f.write(data)
142 142 f.close()
143 143 return name
144 144
145 145 if not fco.cmp(fcd): # files identical?
146 146 return None
147 147
148 148 ui = repo.ui
149 149 fd = fcd.path()
150 150 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
151 151 symlink = 'l' in fcd.flags() + fco.flags()
152 152 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
153 153 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
154 154 (tool, fd, binary, symlink))
155 155
156 156 if not tool or tool == 'internal:prompt':
157 157 tool = "internal:local"
158 158 if ui.promptchoice(_(" no tool found to merge %s\n"
159 159 "keep (l)ocal or take (o)ther?") % fd,
160 160 (_("&Local"), _("&Other")), 0):
161 161 tool = "internal:other"
162 162 if tool == "internal:local":
163 163 return 0
164 164 if tool == "internal:other":
165 165 repo.wwrite(fd, fco.data(), fco.flags())
166 166 return 0
167 167 if tool == "internal:fail":
168 168 return 1
169 169
170 170 # do the actual merge
171 171 a = repo.wjoin(fd)
172 172 b = temp("base", fca)
173 173 c = temp("other", fco)
174 174 out = ""
175 175 back = a + ".orig"
176 176 util.copyfile(a, back)
177 177
178 178 if orig != fco.path():
179 179 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
180 180 else:
181 181 ui.status(_("merging %s\n") % fd)
182 182
183 183 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
184 184
185 185 # do we attempt to simplemerge first?
186 186 try:
187 187 premerge = _toolbool(ui, tool, "premerge", not (binary or symlink))
188 188 except error.ConfigError:
189 189 premerge = _toolstr(ui, tool, "premerge").lower()
190 190 valid = 'keep'.split()
191 191 if premerge not in valid:
192 192 _valid = ', '.join(["'" + v + "'" for v in valid])
193 193 raise error.ConfigError(_("%s.premerge not valid "
194 194 "('%s' is neither boolean nor %s)") %
195 195 (tool, premerge, _valid))
196 196
197 197 if premerge:
198 198 r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
199 199 if not r:
200 200 ui.debug(" premerge successful\n")
201 201 os.unlink(back)
202 202 os.unlink(b)
203 203 os.unlink(c)
204 204 return 0
205 205 if premerge != 'keep':
206 206 util.copyfile(back, a) # restore from backup and try again
207 207
208 208 env = dict(HG_FILE=fd,
209 209 HG_MY_NODE=short(mynode),
210 210 HG_OTHER_NODE=str(fco.changectx()),
211 211 HG_BASE_NODE=str(fca.changectx()),
212 212 HG_MY_ISLINK='l' in fcd.flags(),
213 213 HG_OTHER_ISLINK='l' in fco.flags(),
214 214 HG_BASE_ISLINK='l' in fca.flags())
215 215
216 216 if tool == "internal:merge":
217 217 r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
218 218 elif tool == 'internal:dump':
219 219 a = repo.wjoin(fd)
220 220 util.copyfile(a, a + ".local")
221 221 repo.wwrite(fd + ".other", fco.data(), fco.flags())
222 222 repo.wwrite(fd + ".base", fca.data(), fca.flags())
223 os.unlink(b)
224 os.unlink(c)
223 225 return 1 # unresolved
224 226 else:
225 227 args = _toolstr(ui, tool, "args", '$local $base $other')
226 228 if "$output" in args:
227 229 out, a = a, back # read input from backup, write to original
228 230 replace = dict(local=a, base=b, other=c, output=out)
229 231 args = util.interpolate(r'\$', replace, args,
230 232 lambda s: '"%s"' % util.localpath(s))
231 233 r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
232 234 out=ui.fout)
233 235
234 236 if not r and (_toolbool(ui, tool, "checkconflicts") or
235 237 'conflicts' in _toollist(ui, tool, "check")):
236 238 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
237 239 re.MULTILINE):
238 240 r = 1
239 241
240 242 checked = False
241 243 if 'prompt' in _toollist(ui, tool, "check"):
242 244 checked = True
243 245 if ui.promptchoice(_("was merge of '%s' successful (yn)?") % fd,
244 246 (_("&Yes"), _("&No")), 1):
245 247 r = 1
246 248
247 249 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
248 250 'changed' in _toollist(ui, tool, "check")):
249 251 if filecmp.cmp(repo.wjoin(fd), back):
250 252 if ui.promptchoice(_(" output file %s appears unchanged\n"
251 253 "was merge successful (yn)?") % fd,
252 254 (_("&Yes"), _("&No")), 1):
253 255 r = 1
254 256
255 257 if _toolbool(ui, tool, "fixeol"):
256 258 _matcheol(repo.wjoin(fd), back)
257 259
258 260 if r:
259 261 if tool == "internal:merge":
260 262 ui.warn(_("merging %s incomplete! "
261 263 "(edit conflicts, then use 'hg resolve --mark')\n") % fd)
262 264 else:
263 265 ui.warn(_("merging %s failed!\n") % fd)
264 266 else:
265 267 os.unlink(back)
266 268
267 269 os.unlink(b)
268 270 os.unlink(c)
269 271 return r
@@ -1,2316 +1,2324 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 class storecache(filecache):
23 """filecache for files in the store"""
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
26
22 27 class localrepository(repo.repository):
23 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 29 'known', 'getbundle'))
25 30 supportedformats = set(('revlogv1', 'generaldelta'))
26 31 supported = supportedformats | set(('store', 'fncache', 'shared',
27 32 'dotencode'))
28 33
29 34 def __init__(self, baseui, path=None, create=False):
30 35 repo.repository.__init__(self)
31 36 self.root = os.path.realpath(util.expandpath(path))
32 37 self.path = os.path.join(self.root, ".hg")
33 38 self.origroot = path
34 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 40 self.opener = scmutil.opener(self.path)
36 41 self.wopener = scmutil.opener(self.root)
37 42 self.baseui = baseui
38 43 self.ui = baseui.copy()
39 44 self._dirtyphases = False
40 45 # A list of callback to shape the phase if no data were found.
41 46 # Callback are in the form: func(repo, roots) --> processed root.
42 47 # This list it to be filled by extension during repo setup
43 48 self._phasedefaults = []
44 49
45 50 try:
46 51 self.ui.readconfig(self.join("hgrc"), self.root)
47 52 extensions.loadall(self.ui)
48 53 except IOError:
49 54 pass
50 55
51 56 if not os.path.isdir(self.path):
52 57 if create:
53 58 if not os.path.exists(path):
54 59 util.makedirs(path)
55 60 util.makedir(self.path, notindexed=True)
56 61 requirements = ["revlogv1"]
57 62 if self.ui.configbool('format', 'usestore', True):
58 63 os.mkdir(os.path.join(self.path, "store"))
59 64 requirements.append("store")
60 65 if self.ui.configbool('format', 'usefncache', True):
61 66 requirements.append("fncache")
62 67 if self.ui.configbool('format', 'dotencode', True):
63 68 requirements.append('dotencode')
64 69 # create an invalid changelog
65 70 self.opener.append(
66 71 "00changelog.i",
67 72 '\0\0\0\2' # represents revlogv2
68 73 ' dummy changelog to prevent using the old repo layout'
69 74 )
70 75 if self.ui.configbool('format', 'generaldelta', False):
71 76 requirements.append("generaldelta")
72 77 requirements = set(requirements)
73 78 else:
74 79 raise error.RepoError(_("repository %s not found") % path)
75 80 elif create:
76 81 raise error.RepoError(_("repository %s already exists") % path)
77 82 else:
78 83 try:
79 84 requirements = scmutil.readrequires(self.opener, self.supported)
80 85 except IOError, inst:
81 86 if inst.errno != errno.ENOENT:
82 87 raise
83 88 requirements = set()
84 89
85 90 self.sharedpath = self.path
86 91 try:
87 92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 93 if not os.path.exists(s):
89 94 raise error.RepoError(
90 95 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 96 self.sharedpath = s
92 97 except IOError, inst:
93 98 if inst.errno != errno.ENOENT:
94 99 raise
95 100
96 101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 102 self.spath = self.store.path
98 103 self.sopener = self.store.opener
99 104 self.sjoin = self.store.join
100 105 self.opener.createmode = self.store.createmode
101 106 self._applyrequirements(requirements)
102 107 if create:
103 108 self._writerequirements()
104 109
105 110
106 111 self._branchcache = None
107 112 self._branchcachetip = None
108 113 self.filterpats = {}
109 114 self._datafilters = {}
110 115 self._transref = self._lockref = self._wlockref = None
111 116
112 117 # A cache for various files under .hg/ that tracks file changes,
113 118 # (used by the filecache decorator)
114 119 #
115 120 # Maps a property name to its util.filecacheentry
116 121 self._filecache = {}
117 122
118 123 def _applyrequirements(self, requirements):
119 124 self.requirements = requirements
120 125 openerreqs = set(('revlogv1', 'generaldelta'))
121 126 self.sopener.options = dict((r, 1) for r in requirements
122 127 if r in openerreqs)
123 128
124 129 def _writerequirements(self):
125 130 reqfile = self.opener("requires", "w")
126 131 for r in self.requirements:
127 132 reqfile.write("%s\n" % r)
128 133 reqfile.close()
129 134
130 135 def _checknested(self, path):
131 136 """Determine if path is a legal nested repository."""
132 137 if not path.startswith(self.root):
133 138 return False
134 139 subpath = path[len(self.root) + 1:]
135 140 normsubpath = util.pconvert(subpath)
136 141
137 142 # XXX: Checking against the current working copy is wrong in
138 143 # the sense that it can reject things like
139 144 #
140 145 # $ hg cat -r 10 sub/x.txt
141 146 #
142 147 # if sub/ is no longer a subrepository in the working copy
143 148 # parent revision.
144 149 #
145 150 # However, it can of course also allow things that would have
146 151 # been rejected before, such as the above cat command if sub/
147 152 # is a subrepository now, but was a normal directory before.
148 153 # The old path auditor would have rejected by mistake since it
149 154 # panics when it sees sub/.hg/.
150 155 #
151 156 # All in all, checking against the working copy seems sensible
152 157 # since we want to prevent access to nested repositories on
153 158 # the filesystem *now*.
154 159 ctx = self[None]
155 160 parts = util.splitpath(subpath)
156 161 while parts:
157 162 prefix = '/'.join(parts)
158 163 if prefix in ctx.substate:
159 164 if prefix == normsubpath:
160 165 return True
161 166 else:
162 167 sub = ctx.sub(prefix)
163 168 return sub.checknested(subpath[len(prefix) + 1:])
164 169 else:
165 170 parts.pop()
166 171 return False
167 172
168 173 @filecache('bookmarks')
169 174 def _bookmarks(self):
170 175 return bookmarks.read(self)
171 176
172 177 @filecache('bookmarks.current')
173 178 def _bookmarkcurrent(self):
174 179 return bookmarks.readcurrent(self)
175 180
176 181 def _writebookmarks(self, marks):
177 182 bookmarks.write(self)
178 183
179 @filecache('phaseroots', True)
184 @storecache('phaseroots')
180 185 def _phaseroots(self):
181 186 self._dirtyphases = False
182 187 phaseroots = phases.readroots(self)
183 188 phases.filterunknown(self, phaseroots)
184 189 return phaseroots
185 190
186 191 @propertycache
187 192 def _phaserev(self):
188 193 cache = [phases.public] * len(self)
189 194 for phase in phases.trackedphases:
190 195 roots = map(self.changelog.rev, self._phaseroots[phase])
191 196 if roots:
192 197 for rev in roots:
193 198 cache[rev] = phase
194 199 for rev in self.changelog.descendants(*roots):
195 200 cache[rev] = phase
196 201 return cache
197 202
198 @filecache('00changelog.i', True)
203 @storecache('00changelog.i')
199 204 def changelog(self):
200 205 c = changelog.changelog(self.sopener)
201 206 if 'HG_PENDING' in os.environ:
202 207 p = os.environ['HG_PENDING']
203 208 if p.startswith(self.root):
204 209 c.readpending('00changelog.i.a')
205 210 return c
206 211
207 @filecache('00manifest.i', True)
212 @storecache('00manifest.i')
208 213 def manifest(self):
209 214 return manifest.manifest(self.sopener)
210 215
211 216 @filecache('dirstate')
212 217 def dirstate(self):
213 218 warned = [0]
214 219 def validate(node):
215 220 try:
216 221 self.changelog.rev(node)
217 222 return node
218 223 except error.LookupError:
219 224 if not warned[0]:
220 225 warned[0] = True
221 226 self.ui.warn(_("warning: ignoring unknown"
222 227 " working parent %s!\n") % short(node))
223 228 return nullid
224 229
225 230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226 231
227 232 def __getitem__(self, changeid):
228 233 if changeid is None:
229 234 return context.workingctx(self)
230 235 return context.changectx(self, changeid)
231 236
232 237 def __contains__(self, changeid):
233 238 try:
234 239 return bool(self.lookup(changeid))
235 240 except error.RepoLookupError:
236 241 return False
237 242
238 243 def __nonzero__(self):
239 244 return True
240 245
241 246 def __len__(self):
242 247 return len(self.changelog)
243 248
244 249 def __iter__(self):
245 250 for i in xrange(len(self)):
246 251 yield i
247 252
248 253 def revs(self, expr, *args):
249 254 '''Return a list of revisions matching the given revset'''
250 255 expr = revset.formatspec(expr, *args)
251 256 m = revset.match(None, expr)
252 257 return [r for r in m(self, range(len(self)))]
253 258
254 259 def set(self, expr, *args):
255 260 '''
256 261 Yield a context for each matching revision, after doing arg
257 262 replacement via revset.formatspec
258 263 '''
259 264 for r in self.revs(expr, *args):
260 265 yield self[r]
261 266
262 267 def url(self):
263 268 return 'file:' + self.root
264 269
265 270 def hook(self, name, throw=False, **args):
266 271 return hook.hook(self.ui, self, name, throw, **args)
267 272
268 273 tag_disallowed = ':\r\n'
269 274
270 275 def _tag(self, names, node, message, local, user, date, extra={}):
271 276 if isinstance(names, str):
272 277 allchars = names
273 278 names = (names,)
274 279 else:
275 280 allchars = ''.join(names)
276 281 for c in self.tag_disallowed:
277 282 if c in allchars:
278 283 raise util.Abort(_('%r cannot be used in a tag name') % c)
279 284
280 285 branches = self.branchmap()
281 286 for name in names:
282 287 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 288 local=local)
284 289 if name in branches:
285 290 self.ui.warn(_("warning: tag %s conflicts with existing"
286 291 " branch name\n") % name)
287 292
288 293 def writetags(fp, names, munge, prevtags):
289 294 fp.seek(0, 2)
290 295 if prevtags and prevtags[-1] != '\n':
291 296 fp.write('\n')
292 297 for name in names:
293 298 m = munge and munge(name) or name
294 299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 300 old = self.tags().get(name, nullid)
296 301 fp.write('%s %s\n' % (hex(old), m))
297 302 fp.write('%s %s\n' % (hex(node), m))
298 303 fp.close()
299 304
300 305 prevtags = ''
301 306 if local:
302 307 try:
303 308 fp = self.opener('localtags', 'r+')
304 309 except IOError:
305 310 fp = self.opener('localtags', 'a')
306 311 else:
307 312 prevtags = fp.read()
308 313
309 314 # local tags are stored in the current charset
310 315 writetags(fp, names, None, prevtags)
311 316 for name in names:
312 317 self.hook('tag', node=hex(node), tag=name, local=local)
313 318 return
314 319
315 320 try:
316 321 fp = self.wfile('.hgtags', 'rb+')
317 322 except IOError, e:
318 323 if e.errno != errno.ENOENT:
319 324 raise
320 325 fp = self.wfile('.hgtags', 'ab')
321 326 else:
322 327 prevtags = fp.read()
323 328
324 329 # committed tags are stored in UTF-8
325 330 writetags(fp, names, encoding.fromlocal, prevtags)
326 331
327 332 fp.close()
328 333
329 334 self.invalidatecaches()
330 335
331 336 if '.hgtags' not in self.dirstate:
332 337 self[None].add(['.hgtags'])
333 338
334 339 m = matchmod.exact(self.root, '', ['.hgtags'])
335 340 tagnode = self.commit(message, user, date, extra=extra, match=m)
336 341
337 342 for name in names:
338 343 self.hook('tag', node=hex(node), tag=name, local=local)
339 344
340 345 return tagnode
341 346
342 347 def tag(self, names, node, message, local, user, date):
343 348 '''tag a revision with one or more symbolic names.
344 349
345 350 names is a list of strings or, when adding a single tag, names may be a
346 351 string.
347 352
348 353 if local is True, the tags are stored in a per-repository file.
349 354 otherwise, they are stored in the .hgtags file, and a new
350 355 changeset is committed with the change.
351 356
352 357 keyword arguments:
353 358
354 359 local: whether to store tags in non-version-controlled file
355 360 (default False)
356 361
357 362 message: commit message to use if committing
358 363
359 364 user: name of user to use if committing
360 365
361 366 date: date tuple to use if committing'''
362 367
363 368 if not local:
364 369 for x in self.status()[:5]:
365 370 if '.hgtags' in x:
366 371 raise util.Abort(_('working copy of .hgtags is changed '
367 372 '(please commit .hgtags manually)'))
368 373
369 374 self.tags() # instantiate the cache
370 375 self._tag(names, node, message, local, user, date)
371 376
372 377 @propertycache
373 378 def _tagscache(self):
374 379 '''Returns a tagscache object that contains various tags related caches.'''
375 380
376 381 # This simplifies its cache management by having one decorated
377 382 # function (this one) and the rest simply fetch things from it.
378 383 class tagscache(object):
379 384 def __init__(self):
380 385 # These two define the set of tags for this repository. tags
381 386 # maps tag name to node; tagtypes maps tag name to 'global' or
382 387 # 'local'. (Global tags are defined by .hgtags across all
383 388 # heads, and local tags are defined in .hg/localtags.)
384 389 # They constitute the in-memory cache of tags.
385 390 self.tags = self.tagtypes = None
386 391
387 392 self.nodetagscache = self.tagslist = None
388 393
389 394 cache = tagscache()
390 395 cache.tags, cache.tagtypes = self._findtags()
391 396
392 397 return cache
393 398
394 399 def tags(self):
395 400 '''return a mapping of tag to node'''
396 401 return self._tagscache.tags
397 402
398 403 def _findtags(self):
399 404 '''Do the hard work of finding tags. Return a pair of dicts
400 405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 406 maps tag name to a string like \'global\' or \'local\'.
402 407 Subclasses or extensions are free to add their own tags, but
403 408 should be aware that the returned dicts will be retained for the
404 409 duration of the localrepo object.'''
405 410
406 411 # XXX what tagtype should subclasses/extensions use? Currently
407 412 # mq and bookmarks add tags, but do not set the tagtype at all.
408 413 # Should each extension invent its own tag type? Should there
409 414 # be one tagtype for all such "virtual" tags? Or is the status
410 415 # quo fine?
411 416
412 417 alltags = {} # map tag name to (node, hist)
413 418 tagtypes = {}
414 419
415 420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417 422
418 423 # Build the return dicts. Have to re-encode tag names because
419 424 # the tags module always uses UTF-8 (in order not to lose info
420 425 # writing to the cache), but the rest of Mercurial wants them in
421 426 # local encoding.
422 427 tags = {}
423 428 for (name, (node, hist)) in alltags.iteritems():
424 429 if node != nullid:
425 430 try:
426 431 # ignore tags to unknown nodes
427 432 self.changelog.lookup(node)
428 433 tags[encoding.tolocal(name)] = node
429 434 except error.LookupError:
430 435 pass
431 436 tags['tip'] = self.changelog.tip()
432 437 tagtypes = dict([(encoding.tolocal(name), value)
433 438 for (name, value) in tagtypes.iteritems()])
434 439 return (tags, tagtypes)
435 440
436 441 def tagtype(self, tagname):
437 442 '''
438 443 return the type of the given tag. result can be:
439 444
440 445 'local' : a local tag
441 446 'global' : a global tag
442 447 None : tag does not exist
443 448 '''
444 449
445 450 return self._tagscache.tagtypes.get(tagname)
446 451
447 452 def tagslist(self):
448 453 '''return a list of tags ordered by revision'''
449 454 if not self._tagscache.tagslist:
450 455 l = []
451 456 for t, n in self.tags().iteritems():
452 457 r = self.changelog.rev(n)
453 458 l.append((r, t, n))
454 459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455 460
456 461 return self._tagscache.tagslist
457 462
458 463 def nodetags(self, node):
459 464 '''return the tags associated with a node'''
460 465 if not self._tagscache.nodetagscache:
461 466 nodetagscache = {}
462 467 for t, n in self.tags().iteritems():
463 468 nodetagscache.setdefault(n, []).append(t)
464 469 for tags in nodetagscache.itervalues():
465 470 tags.sort()
466 471 self._tagscache.nodetagscache = nodetagscache
467 472 return self._tagscache.nodetagscache.get(node, [])
468 473
469 474 def nodebookmarks(self, node):
470 475 marks = []
471 476 for bookmark, n in self._bookmarks.iteritems():
472 477 if n == node:
473 478 marks.append(bookmark)
474 479 return sorted(marks)
475 480
476 481 def _branchtags(self, partial, lrev):
477 482 # TODO: rename this function?
478 483 tiprev = len(self) - 1
479 484 if lrev != tiprev:
480 485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 486 self._updatebranchcache(partial, ctxgen)
482 487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483 488
484 489 return partial
485 490
486 491 def updatebranchcache(self):
487 492 tip = self.changelog.tip()
488 493 if self._branchcache is not None and self._branchcachetip == tip:
489 494 return
490 495
491 496 oldtip = self._branchcachetip
492 497 self._branchcachetip = tip
493 498 if oldtip is None or oldtip not in self.changelog.nodemap:
494 499 partial, last, lrev = self._readbranchcache()
495 500 else:
496 501 lrev = self.changelog.rev(oldtip)
497 502 partial = self._branchcache
498 503
499 504 self._branchtags(partial, lrev)
500 505 # this private cache holds all heads (not just tips)
501 506 self._branchcache = partial
502 507
503 508 def branchmap(self):
504 509 '''returns a dictionary {branch: [branchheads]}'''
505 510 self.updatebranchcache()
506 511 return self._branchcache
507 512
508 513 def branchtags(self):
509 514 '''return a dict where branch names map to the tipmost head of
510 515 the branch, open heads come before closed'''
511 516 bt = {}
512 517 for bn, heads in self.branchmap().iteritems():
513 518 tip = heads[-1]
514 519 for h in reversed(heads):
515 520 if 'close' not in self.changelog.read(h)[5]:
516 521 tip = h
517 522 break
518 523 bt[bn] = tip
519 524 return bt
520 525
521 526 def _readbranchcache(self):
522 527 partial = {}
523 528 try:
524 529 f = self.opener("cache/branchheads")
525 530 lines = f.read().split('\n')
526 531 f.close()
527 532 except (IOError, OSError):
528 533 return {}, nullid, nullrev
529 534
530 535 try:
531 536 last, lrev = lines.pop(0).split(" ", 1)
532 537 last, lrev = bin(last), int(lrev)
533 538 if lrev >= len(self) or self[lrev].node() != last:
534 539 # invalidate the cache
535 540 raise ValueError('invalidating branch cache (tip differs)')
536 541 for l in lines:
537 542 if not l:
538 543 continue
539 544 node, label = l.split(" ", 1)
540 545 label = encoding.tolocal(label.strip())
541 546 partial.setdefault(label, []).append(bin(node))
542 547 except KeyboardInterrupt:
543 548 raise
544 549 except Exception, inst:
545 550 if self.ui.debugflag:
546 551 self.ui.warn(str(inst), '\n')
547 552 partial, last, lrev = {}, nullid, nullrev
548 553 return partial, last, lrev
549 554
550 555 def _writebranchcache(self, branches, tip, tiprev):
551 556 try:
552 557 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 558 f.write("%s %s\n" % (hex(tip), tiprev))
554 559 for label, nodes in branches.iteritems():
555 560 for node in nodes:
556 561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 562 f.close()
558 563 except (IOError, OSError):
559 564 pass
560 565
561 566 def _updatebranchcache(self, partial, ctxgen):
562 567 # collect new branch entries
563 568 newbranches = {}
564 569 for c in ctxgen:
565 570 newbranches.setdefault(c.branch(), []).append(c.node())
566 571 # if older branchheads are reachable from new ones, they aren't
567 572 # really branchheads. Note checking parents is insufficient:
568 573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 574 for branch, newnodes in newbranches.iteritems():
570 575 bheads = partial.setdefault(branch, [])
571 576 bheads.extend(newnodes)
572 577 if len(bheads) <= 1:
573 578 continue
574 579 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 580 # starting from tip means fewer passes over reachable
576 581 while newnodes:
577 582 latest = newnodes.pop()
578 583 if latest not in bheads:
579 584 continue
580 585 minbhrev = self[bheads[0]].node()
581 586 reachable = self.changelog.reachable(latest, minbhrev)
582 587 reachable.remove(latest)
583 588 if reachable:
584 589 bheads = [b for b in bheads if b not in reachable]
585 590 partial[branch] = bheads
586 591
587 592 def lookup(self, key):
588 593 if isinstance(key, int):
589 594 return self.changelog.node(key)
590 595 elif key == '.':
591 596 return self.dirstate.p1()
592 597 elif key == 'null':
593 598 return nullid
594 599 elif key == 'tip':
595 600 return self.changelog.tip()
596 601 n = self.changelog._match(key)
597 602 if n:
598 603 return n
599 604 if key in self._bookmarks:
600 605 return self._bookmarks[key]
601 606 if key in self.tags():
602 607 return self.tags()[key]
603 608 if key in self.branchtags():
604 609 return self.branchtags()[key]
605 610 n = self.changelog._partialmatch(key)
606 611 if n:
607 612 return n
608 613
609 614 # can't find key, check if it might have come from damaged dirstate
610 615 if key in self.dirstate.parents():
611 616 raise error.Abort(_("working directory has unknown parent '%s'!")
612 617 % short(key))
613 618 try:
614 619 if len(key) == 20:
615 620 key = hex(key)
616 621 except TypeError:
617 622 pass
618 623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619 624
620 625 def lookupbranch(self, key, remote=None):
621 626 repo = remote or self
622 627 if key in repo.branchmap():
623 628 return key
624 629
625 630 repo = (remote and remote.local()) and remote or self
626 631 return repo[key].branch()
627 632
628 633 def known(self, nodes):
629 634 nm = self.changelog.nodemap
630 635 result = []
631 636 for n in nodes:
632 637 r = nm.get(n)
633 638 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 639 result.append(resp)
635 640 return result
636 641
637 642 def local(self):
638 643 return self
639 644
640 645 def join(self, f):
641 646 return os.path.join(self.path, f)
642 647
643 648 def wjoin(self, f):
644 649 return os.path.join(self.root, f)
645 650
646 651 def file(self, f):
647 652 if f[0] == '/':
648 653 f = f[1:]
649 654 return filelog.filelog(self.sopener, f)
650 655
651 656 def changectx(self, changeid):
652 657 return self[changeid]
653 658
654 659 def parents(self, changeid=None):
655 660 '''get list of changectxs for parents of changeid'''
656 661 return self[changeid].parents()
657 662
658 663 def filectx(self, path, changeid=None, fileid=None):
659 664 """changeid can be a changeset revision, node, or tag.
660 665 fileid can be a file revision or node."""
661 666 return context.filectx(self, path, changeid, fileid)
662 667
663 668 def getcwd(self):
664 669 return self.dirstate.getcwd()
665 670
666 671 def pathto(self, f, cwd=None):
667 672 return self.dirstate.pathto(f, cwd)
668 673
669 674 def wfile(self, f, mode='r'):
670 675 return self.wopener(f, mode)
671 676
672 677 def _link(self, f):
673 678 return os.path.islink(self.wjoin(f))
674 679
675 680 def _loadfilter(self, filter):
676 681 if filter not in self.filterpats:
677 682 l = []
678 683 for pat, cmd in self.ui.configitems(filter):
679 684 if cmd == '!':
680 685 continue
681 686 mf = matchmod.match(self.root, '', [pat])
682 687 fn = None
683 688 params = cmd
684 689 for name, filterfn in self._datafilters.iteritems():
685 690 if cmd.startswith(name):
686 691 fn = filterfn
687 692 params = cmd[len(name):].lstrip()
688 693 break
689 694 if not fn:
690 695 fn = lambda s, c, **kwargs: util.filter(s, c)
691 696 # Wrap old filters not supporting keyword arguments
692 697 if not inspect.getargspec(fn)[2]:
693 698 oldfn = fn
694 699 fn = lambda s, c, **kwargs: oldfn(s, c)
695 700 l.append((mf, fn, params))
696 701 self.filterpats[filter] = l
697 702 return self.filterpats[filter]
698 703
699 704 def _filter(self, filterpats, filename, data):
700 705 for mf, fn, cmd in filterpats:
701 706 if mf(filename):
702 707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
703 708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
704 709 break
705 710
706 711 return data
707 712
708 713 @propertycache
709 714 def _encodefilterpats(self):
710 715 return self._loadfilter('encode')
711 716
712 717 @propertycache
713 718 def _decodefilterpats(self):
714 719 return self._loadfilter('decode')
715 720
716 721 def adddatafilter(self, name, filter):
717 722 self._datafilters[name] = filter
718 723
719 724 def wread(self, filename):
720 725 if self._link(filename):
721 726 data = os.readlink(self.wjoin(filename))
722 727 else:
723 728 data = self.wopener.read(filename)
724 729 return self._filter(self._encodefilterpats, filename, data)
725 730
726 731 def wwrite(self, filename, data, flags):
727 732 data = self._filter(self._decodefilterpats, filename, data)
728 733 if 'l' in flags:
729 734 self.wopener.symlink(data, filename)
730 735 else:
731 736 self.wopener.write(filename, data)
732 737 if 'x' in flags:
733 738 util.setflags(self.wjoin(filename), False, True)
734 739
735 740 def wwritedata(self, filename, data):
736 741 return self._filter(self._decodefilterpats, filename, data)
737 742
738 743 def transaction(self, desc):
739 744 tr = self._transref and self._transref() or None
740 745 if tr and tr.running():
741 746 return tr.nest()
742 747
743 748 # abort here if the journal already exists
744 749 if os.path.exists(self.sjoin("journal")):
745 750 raise error.RepoError(
746 751 _("abandoned transaction found - run hg recover"))
747 752
748 753 journalfiles = self._writejournal(desc)
749 754 renames = [(x, undoname(x)) for x in journalfiles]
750 755
751 756 tr = transaction.transaction(self.ui.warn, self.sopener,
752 757 self.sjoin("journal"),
753 758 aftertrans(renames),
754 759 self.store.createmode)
755 760 self._transref = weakref.ref(tr)
756 761 return tr
757 762
758 763 def _writejournal(self, desc):
759 764 # save dirstate for rollback
760 765 try:
761 766 ds = self.opener.read("dirstate")
762 767 except IOError:
763 768 ds = ""
764 769 self.opener.write("journal.dirstate", ds)
765 770 self.opener.write("journal.branch",
766 771 encoding.fromlocal(self.dirstate.branch()))
767 772 self.opener.write("journal.desc",
768 773 "%d\n%s\n" % (len(self), desc))
769 774
770 775 bkname = self.join('bookmarks')
771 776 if os.path.exists(bkname):
772 777 util.copyfile(bkname, self.join('journal.bookmarks'))
773 778 else:
774 779 self.opener.write('journal.bookmarks', '')
775 780 phasesname = self.sjoin('phaseroots')
776 781 if os.path.exists(phasesname):
777 782 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
778 783 else:
779 784 self.sopener.write('journal.phaseroots', '')
780 785
781 786 return (self.sjoin('journal'), self.join('journal.dirstate'),
782 787 self.join('journal.branch'), self.join('journal.desc'),
783 788 self.join('journal.bookmarks'),
784 789 self.sjoin('journal.phaseroots'))
785 790
786 791 def recover(self):
787 792 lock = self.lock()
788 793 try:
789 794 if os.path.exists(self.sjoin("journal")):
790 795 self.ui.status(_("rolling back interrupted transaction\n"))
791 796 transaction.rollback(self.sopener, self.sjoin("journal"),
792 797 self.ui.warn)
793 798 self.invalidate()
794 799 return True
795 800 else:
796 801 self.ui.warn(_("no interrupted transaction available\n"))
797 802 return False
798 803 finally:
799 804 lock.release()
800 805
801 806 def rollback(self, dryrun=False, force=False):
802 807 wlock = lock = None
803 808 try:
804 809 wlock = self.wlock()
805 810 lock = self.lock()
806 811 if os.path.exists(self.sjoin("undo")):
807 812 return self._rollback(dryrun, force)
808 813 else:
809 814 self.ui.warn(_("no rollback information available\n"))
810 815 return 1
811 816 finally:
812 817 release(lock, wlock)
813 818
814 819 def _rollback(self, dryrun, force):
815 820 ui = self.ui
816 821 try:
817 822 args = self.opener.read('undo.desc').splitlines()
818 823 (oldlen, desc, detail) = (int(args[0]), args[1], None)
819 824 if len(args) >= 3:
820 825 detail = args[2]
821 826 oldtip = oldlen - 1
822 827
823 828 if detail and ui.verbose:
824 829 msg = (_('repository tip rolled back to revision %s'
825 830 ' (undo %s: %s)\n')
826 831 % (oldtip, desc, detail))
827 832 else:
828 833 msg = (_('repository tip rolled back to revision %s'
829 834 ' (undo %s)\n')
830 835 % (oldtip, desc))
831 836 except IOError:
832 837 msg = _('rolling back unknown transaction\n')
833 838 desc = None
834 839
835 840 if not force and self['.'] != self['tip'] and desc == 'commit':
836 841 raise util.Abort(
837 842 _('rollback of last commit while not checked out '
838 843 'may lose data'), hint=_('use -f to force'))
839 844
840 845 ui.status(msg)
841 846 if dryrun:
842 847 return 0
843 848
844 849 parents = self.dirstate.parents()
845 850 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
846 851 if os.path.exists(self.join('undo.bookmarks')):
847 852 util.rename(self.join('undo.bookmarks'),
848 853 self.join('bookmarks'))
849 854 if os.path.exists(self.sjoin('undo.phaseroots')):
850 855 util.rename(self.sjoin('undo.phaseroots'),
851 856 self.sjoin('phaseroots'))
852 857 self.invalidate()
853 858
854 859 parentgone = (parents[0] not in self.changelog.nodemap or
855 860 parents[1] not in self.changelog.nodemap)
856 861 if parentgone:
857 862 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
858 863 try:
859 864 branch = self.opener.read('undo.branch')
860 865 self.dirstate.setbranch(branch)
861 866 except IOError:
862 867 ui.warn(_('named branch could not be reset: '
863 868 'current branch is still \'%s\'\n')
864 869 % self.dirstate.branch())
865 870
866 871 self.dirstate.invalidate()
867 872 parents = tuple([p.rev() for p in self.parents()])
868 873 if len(parents) > 1:
869 874 ui.status(_('working directory now based on '
870 875 'revisions %d and %d\n') % parents)
871 876 else:
872 877 ui.status(_('working directory now based on '
873 878 'revision %d\n') % parents)
874 879 self.destroyed()
875 880 return 0
876 881
877 882 def invalidatecaches(self):
878 883 def delcache(name):
879 884 try:
880 885 delattr(self, name)
881 886 except AttributeError:
882 887 pass
883 888
884 889 delcache('_tagscache')
885 890 delcache('_phaserev')
886 891
887 892 self._branchcache = None # in UTF-8
888 893 self._branchcachetip = None
889 894
890 895 def invalidatedirstate(self):
891 896 '''Invalidates the dirstate, causing the next call to dirstate
892 897 to check if it was modified since the last time it was read,
893 898 rereading it if it has.
894 899
895 900 This is different to dirstate.invalidate() that it doesn't always
896 901 rereads the dirstate. Use dirstate.invalidate() if you want to
897 902 explicitly read the dirstate again (i.e. restoring it to a previous
898 903 known good state).'''
899 try:
904 if 'dirstate' in self.__dict__:
905 for k in self.dirstate._filecache:
906 try:
907 delattr(self.dirstate, k)
908 except AttributeError:
909 pass
900 910 delattr(self, 'dirstate')
901 except AttributeError:
902 pass
903 911
904 912 def invalidate(self):
905 913 for k in self._filecache:
906 914 # dirstate is invalidated separately in invalidatedirstate()
907 915 if k == 'dirstate':
908 916 continue
909 917
910 918 try:
911 919 delattr(self, k)
912 920 except AttributeError:
913 921 pass
914 922 self.invalidatecaches()
915 923
916 924 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 925 try:
918 926 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 927 except error.LockHeld, inst:
920 928 if not wait:
921 929 raise
922 930 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 931 (desc, inst.locker))
924 932 # default to 600 seconds timeout
925 933 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 934 releasefn, desc=desc)
927 935 if acquirefn:
928 936 acquirefn()
929 937 return l
930 938
931 939 def _afterlock(self, callback):
932 940 """add a callback to the current repository lock.
933 941
934 942 The callback will be executed on lock release."""
935 943 l = self._lockref and self._lockref()
936 944 if l:
937 945 l.postrelease.append(callback)
938 946
939 947 def lock(self, wait=True):
940 948 '''Lock the repository store (.hg/store) and return a weak reference
941 949 to the lock. Use this before modifying the store (e.g. committing or
942 950 stripping). If you are opening a transaction, get a lock as well.)'''
943 951 l = self._lockref and self._lockref()
944 952 if l is not None and l.held:
945 953 l.lock()
946 954 return l
947 955
948 956 def unlock():
949 957 self.store.write()
950 958 if self._dirtyphases:
951 959 phases.writeroots(self)
952 960 self._dirtyphases = False
953 961 for k, ce in self._filecache.items():
954 962 if k == 'dirstate':
955 963 continue
956 964 ce.refresh()
957 965
958 966 l = self._lock(self.sjoin("lock"), wait, unlock,
959 967 self.invalidate, _('repository %s') % self.origroot)
960 968 self._lockref = weakref.ref(l)
961 969 return l
962 970
963 971 def wlock(self, wait=True):
964 972 '''Lock the non-store parts of the repository (everything under
965 973 .hg except .hg/store) and return a weak reference to the lock.
966 974 Use this before modifying files in .hg.'''
967 975 l = self._wlockref and self._wlockref()
968 976 if l is not None and l.held:
969 977 l.lock()
970 978 return l
971 979
972 980 def unlock():
973 981 self.dirstate.write()
974 982 ce = self._filecache.get('dirstate')
975 983 if ce:
976 984 ce.refresh()
977 985
978 986 l = self._lock(self.join("wlock"), wait, unlock,
979 987 self.invalidatedirstate, _('working directory of %s') %
980 988 self.origroot)
981 989 self._wlockref = weakref.ref(l)
982 990 return l
983 991
984 992 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
985 993 """
986 994 commit an individual file as part of a larger transaction
987 995 """
988 996
989 997 fname = fctx.path()
990 998 text = fctx.data()
991 999 flog = self.file(fname)
992 1000 fparent1 = manifest1.get(fname, nullid)
993 1001 fparent2 = fparent2o = manifest2.get(fname, nullid)
994 1002
995 1003 meta = {}
996 1004 copy = fctx.renamed()
997 1005 if copy and copy[0] != fname:
998 1006 # Mark the new revision of this file as a copy of another
999 1007 # file. This copy data will effectively act as a parent
1000 1008 # of this new revision. If this is a merge, the first
1001 1009 # parent will be the nullid (meaning "look up the copy data")
1002 1010 # and the second one will be the other parent. For example:
1003 1011 #
1004 1012 # 0 --- 1 --- 3 rev1 changes file foo
1005 1013 # \ / rev2 renames foo to bar and changes it
1006 1014 # \- 2 -/ rev3 should have bar with all changes and
1007 1015 # should record that bar descends from
1008 1016 # bar in rev2 and foo in rev1
1009 1017 #
1010 1018 # this allows this merge to succeed:
1011 1019 #
1012 1020 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1013 1021 # \ / merging rev3 and rev4 should use bar@rev2
1014 1022 # \- 2 --- 4 as the merge base
1015 1023 #
1016 1024
1017 1025 cfname = copy[0]
1018 1026 crev = manifest1.get(cfname)
1019 1027 newfparent = fparent2
1020 1028
1021 1029 if manifest2: # branch merge
1022 1030 if fparent2 == nullid or crev is None: # copied on remote side
1023 1031 if cfname in manifest2:
1024 1032 crev = manifest2[cfname]
1025 1033 newfparent = fparent1
1026 1034
1027 1035 # find source in nearest ancestor if we've lost track
1028 1036 if not crev:
1029 1037 self.ui.debug(" %s: searching for copy revision for %s\n" %
1030 1038 (fname, cfname))
1031 1039 for ancestor in self[None].ancestors():
1032 1040 if cfname in ancestor:
1033 1041 crev = ancestor[cfname].filenode()
1034 1042 break
1035 1043
1036 1044 if crev:
1037 1045 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1038 1046 meta["copy"] = cfname
1039 1047 meta["copyrev"] = hex(crev)
1040 1048 fparent1, fparent2 = nullid, newfparent
1041 1049 else:
1042 1050 self.ui.warn(_("warning: can't find ancestor for '%s' "
1043 1051 "copied from '%s'!\n") % (fname, cfname))
1044 1052
1045 1053 elif fparent2 != nullid:
1046 1054 # is one parent an ancestor of the other?
1047 1055 fparentancestor = flog.ancestor(fparent1, fparent2)
1048 1056 if fparentancestor == fparent1:
1049 1057 fparent1, fparent2 = fparent2, nullid
1050 1058 elif fparentancestor == fparent2:
1051 1059 fparent2 = nullid
1052 1060
1053 1061 # is the file changed?
1054 1062 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1055 1063 changelist.append(fname)
1056 1064 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1057 1065
1058 1066 # are just the flags changed during merge?
1059 1067 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1060 1068 changelist.append(fname)
1061 1069
1062 1070 return fparent1
1063 1071
1064 1072 def commit(self, text="", user=None, date=None, match=None, force=False,
1065 1073 editor=False, extra={}):
1066 1074 """Add a new revision to current repository.
1067 1075
1068 1076 Revision information is gathered from the working directory,
1069 1077 match can be used to filter the committed files. If editor is
1070 1078 supplied, it is called to get a commit message.
1071 1079 """
1072 1080
1073 1081 def fail(f, msg):
1074 1082 raise util.Abort('%s: %s' % (f, msg))
1075 1083
1076 1084 if not match:
1077 1085 match = matchmod.always(self.root, '')
1078 1086
1079 1087 if not force:
1080 1088 vdirs = []
1081 1089 match.dir = vdirs.append
1082 1090 match.bad = fail
1083 1091
1084 1092 wlock = self.wlock()
1085 1093 try:
1086 1094 wctx = self[None]
1087 1095 merge = len(wctx.parents()) > 1
1088 1096
1089 1097 if (not force and merge and match and
1090 1098 (match.files() or match.anypats())):
1091 1099 raise util.Abort(_('cannot partially commit a merge '
1092 1100 '(do not specify files or patterns)'))
1093 1101
1094 1102 changes = self.status(match=match, clean=force)
1095 1103 if force:
1096 1104 changes[0].extend(changes[6]) # mq may commit unchanged files
1097 1105
1098 1106 # check subrepos
1099 1107 subs = []
1100 1108 removedsubs = set()
1101 1109 if '.hgsub' in wctx:
1102 1110 # only manage subrepos and .hgsubstate if .hgsub is present
1103 1111 for p in wctx.parents():
1104 1112 removedsubs.update(s for s in p.substate if match(s))
1105 1113 for s in wctx.substate:
1106 1114 removedsubs.discard(s)
1107 1115 if match(s) and wctx.sub(s).dirty():
1108 1116 subs.append(s)
1109 1117 if (subs or removedsubs):
1110 1118 if (not match('.hgsub') and
1111 1119 '.hgsub' in (wctx.modified() + wctx.added())):
1112 1120 raise util.Abort(
1113 1121 _("can't commit subrepos without .hgsub"))
1114 1122 if '.hgsubstate' not in changes[0]:
1115 1123 changes[0].insert(0, '.hgsubstate')
1116 1124 if '.hgsubstate' in changes[2]:
1117 1125 changes[2].remove('.hgsubstate')
1118 1126 elif '.hgsub' in changes[2]:
1119 1127 # clean up .hgsubstate when .hgsub is removed
1120 1128 if ('.hgsubstate' in wctx and
1121 1129 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1122 1130 changes[2].insert(0, '.hgsubstate')
1123 1131
1124 1132 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1125 1133 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1126 1134 if changedsubs:
1127 1135 raise util.Abort(_("uncommitted changes in subrepo %s")
1128 1136 % changedsubs[0],
1129 1137 hint=_("use --subrepos for recursive commit"))
1130 1138
1131 1139 # make sure all explicit patterns are matched
1132 1140 if not force and match.files():
1133 1141 matched = set(changes[0] + changes[1] + changes[2])
1134 1142
1135 1143 for f in match.files():
1136 1144 if f == '.' or f in matched or f in wctx.substate:
1137 1145 continue
1138 1146 if f in changes[3]: # missing
1139 1147 fail(f, _('file not found!'))
1140 1148 if f in vdirs: # visited directory
1141 1149 d = f + '/'
1142 1150 for mf in matched:
1143 1151 if mf.startswith(d):
1144 1152 break
1145 1153 else:
1146 1154 fail(f, _("no match under directory!"))
1147 1155 elif f not in self.dirstate:
1148 1156 fail(f, _("file not tracked!"))
1149 1157
1150 1158 if (not force and not extra.get("close") and not merge
1151 1159 and not (changes[0] or changes[1] or changes[2])
1152 1160 and wctx.branch() == wctx.p1().branch()):
1153 1161 return None
1154 1162
1155 1163 ms = mergemod.mergestate(self)
1156 1164 for f in changes[0]:
1157 1165 if f in ms and ms[f] == 'u':
1158 1166 raise util.Abort(_("unresolved merge conflicts "
1159 1167 "(see hg help resolve)"))
1160 1168
1161 1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1162 1170 if editor:
1163 1171 cctx._text = editor(self, cctx, subs)
1164 1172 edited = (text != cctx._text)
1165 1173
1166 1174 # commit subs
1167 1175 if subs or removedsubs:
1168 1176 state = wctx.substate.copy()
1169 1177 for s in sorted(subs):
1170 1178 sub = wctx.sub(s)
1171 1179 self.ui.status(_('committing subrepository %s\n') %
1172 1180 subrepo.subrelpath(sub))
1173 1181 sr = sub.commit(cctx._text, user, date)
1174 1182 state[s] = (state[s][0], sr)
1175 1183 subrepo.writestate(self, state)
1176 1184
1177 1185 # Save commit message in case this transaction gets rolled back
1178 1186 # (e.g. by a pretxncommit hook). Leave the content alone on
1179 1187 # the assumption that the user will use the same editor again.
1180 1188 msgfn = self.savecommitmessage(cctx._text)
1181 1189
1182 1190 p1, p2 = self.dirstate.parents()
1183 1191 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1184 1192 try:
1185 1193 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1186 1194 ret = self.commitctx(cctx, True)
1187 1195 except:
1188 1196 if edited:
1189 1197 self.ui.write(
1190 1198 _('note: commit message saved in %s\n') % msgfn)
1191 1199 raise
1192 1200
1193 1201 # update bookmarks, dirstate and mergestate
1194 1202 bookmarks.update(self, p1, ret)
1195 1203 for f in changes[0] + changes[1]:
1196 1204 self.dirstate.normal(f)
1197 1205 for f in changes[2]:
1198 1206 self.dirstate.drop(f)
1199 1207 self.dirstate.setparents(ret)
1200 1208 ms.reset()
1201 1209 finally:
1202 1210 wlock.release()
1203 1211
1204 1212 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1205 1213 return ret
1206 1214
1207 1215 def commitctx(self, ctx, error=False):
1208 1216 """Add a new revision to current repository.
1209 1217 Revision information is passed via the context argument.
1210 1218 """
1211 1219
1212 1220 tr = lock = None
1213 1221 removed = list(ctx.removed())
1214 1222 p1, p2 = ctx.p1(), ctx.p2()
1215 1223 user = ctx.user()
1216 1224
1217 1225 lock = self.lock()
1218 1226 try:
1219 1227 tr = self.transaction("commit")
1220 1228 trp = weakref.proxy(tr)
1221 1229
1222 1230 if ctx.files():
1223 1231 m1 = p1.manifest().copy()
1224 1232 m2 = p2.manifest()
1225 1233
1226 1234 # check in files
1227 1235 new = {}
1228 1236 changed = []
1229 1237 linkrev = len(self)
1230 1238 for f in sorted(ctx.modified() + ctx.added()):
1231 1239 self.ui.note(f + "\n")
1232 1240 try:
1233 1241 fctx = ctx[f]
1234 1242 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1235 1243 changed)
1236 1244 m1.set(f, fctx.flags())
1237 1245 except OSError, inst:
1238 1246 self.ui.warn(_("trouble committing %s!\n") % f)
1239 1247 raise
1240 1248 except IOError, inst:
1241 1249 errcode = getattr(inst, 'errno', errno.ENOENT)
1242 1250 if error or errcode and errcode != errno.ENOENT:
1243 1251 self.ui.warn(_("trouble committing %s!\n") % f)
1244 1252 raise
1245 1253 else:
1246 1254 removed.append(f)
1247 1255
1248 1256 # update manifest
1249 1257 m1.update(new)
1250 1258 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1251 1259 drop = [f for f in removed if f in m1]
1252 1260 for f in drop:
1253 1261 del m1[f]
1254 1262 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1255 1263 p2.manifestnode(), (new, drop))
1256 1264 files = changed + removed
1257 1265 else:
1258 1266 mn = p1.manifestnode()
1259 1267 files = []
1260 1268
1261 1269 # update changelog
1262 1270 self.changelog.delayupdate()
1263 1271 n = self.changelog.add(mn, files, ctx.description(),
1264 1272 trp, p1.node(), p2.node(),
1265 1273 user, ctx.date(), ctx.extra().copy())
1266 1274 p = lambda: self.changelog.writepending() and self.root or ""
1267 1275 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1268 1276 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1269 1277 parent2=xp2, pending=p)
1270 1278 self.changelog.finalize(trp)
1271 1279 # set the new commit is proper phase
1272 1280 targetphase = phases.newcommitphase(self.ui)
1273 1281 if targetphase:
1274 1282 # retract boundary do not alter parent changeset.
1275 1283 # if a parent have higher the resulting phase will
1276 1284 # be compliant anyway
1277 1285 #
1278 1286 # if minimal phase was 0 we don't need to retract anything
1279 1287 phases.retractboundary(self, targetphase, [n])
1280 1288 tr.close()
1281 1289 self.updatebranchcache()
1282 1290 return n
1283 1291 finally:
1284 1292 if tr:
1285 1293 tr.release()
1286 1294 lock.release()
1287 1295
1288 1296 def destroyed(self):
1289 1297 '''Inform the repository that nodes have been destroyed.
1290 1298 Intended for use by strip and rollback, so there's a common
1291 1299 place for anything that has to be done after destroying history.'''
1292 1300 # XXX it might be nice if we could take the list of destroyed
1293 1301 # nodes, but I don't see an easy way for rollback() to do that
1294 1302
1295 1303 # Ensure the persistent tag cache is updated. Doing it now
1296 1304 # means that the tag cache only has to worry about destroyed
1297 1305 # heads immediately after a strip/rollback. That in turn
1298 1306 # guarantees that "cachetip == currenttip" (comparing both rev
1299 1307 # and node) always means no nodes have been added or destroyed.
1300 1308
1301 1309 # XXX this is suboptimal when qrefresh'ing: we strip the current
1302 1310 # head, refresh the tag cache, then immediately add a new head.
1303 1311 # But I think doing it this way is necessary for the "instant
1304 1312 # tag cache retrieval" case to work.
1305 1313 self.invalidatecaches()
1306 1314
1307 1315 # Discard all cache entries to force reloading everything.
1308 1316 self._filecache.clear()
1309 1317
1310 1318 def walk(self, match, node=None):
1311 1319 '''
1312 1320 walk recursively through the directory tree or a given
1313 1321 changeset, finding all files matched by the match
1314 1322 function
1315 1323 '''
1316 1324 return self[node].walk(match)
1317 1325
1318 1326 def status(self, node1='.', node2=None, match=None,
1319 1327 ignored=False, clean=False, unknown=False,
1320 1328 listsubrepos=False):
1321 1329 """return status of files between two nodes or node and working directory
1322 1330
1323 1331 If node1 is None, use the first dirstate parent instead.
1324 1332 If node2 is None, compare node1 with working directory.
1325 1333 """
1326 1334
1327 1335 def mfmatches(ctx):
1328 1336 mf = ctx.manifest().copy()
1329 1337 for fn in mf.keys():
1330 1338 if not match(fn):
1331 1339 del mf[fn]
1332 1340 return mf
1333 1341
1334 1342 if isinstance(node1, context.changectx):
1335 1343 ctx1 = node1
1336 1344 else:
1337 1345 ctx1 = self[node1]
1338 1346 if isinstance(node2, context.changectx):
1339 1347 ctx2 = node2
1340 1348 else:
1341 1349 ctx2 = self[node2]
1342 1350
1343 1351 working = ctx2.rev() is None
1344 1352 parentworking = working and ctx1 == self['.']
1345 1353 match = match or matchmod.always(self.root, self.getcwd())
1346 1354 listignored, listclean, listunknown = ignored, clean, unknown
1347 1355
1348 1356 # load earliest manifest first for caching reasons
1349 1357 if not working and ctx2.rev() < ctx1.rev():
1350 1358 ctx2.manifest()
1351 1359
1352 1360 if not parentworking:
1353 1361 def bad(f, msg):
1354 1362 # 'f' may be a directory pattern from 'match.files()',
1355 1363 # so 'f not in ctx1' is not enough
1356 1364 if f not in ctx1 and f not in ctx1.dirs():
1357 1365 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1358 1366 match.bad = bad
1359 1367
1360 1368 if working: # we need to scan the working dir
1361 1369 subrepos = []
1362 1370 if '.hgsub' in self.dirstate:
1363 1371 subrepos = ctx2.substate.keys()
1364 1372 s = self.dirstate.status(match, subrepos, listignored,
1365 1373 listclean, listunknown)
1366 1374 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1367 1375
1368 1376 # check for any possibly clean files
1369 1377 if parentworking and cmp:
1370 1378 fixup = []
1371 1379 # do a full compare of any files that might have changed
1372 1380 for f in sorted(cmp):
1373 1381 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1374 1382 or ctx1[f].cmp(ctx2[f])):
1375 1383 modified.append(f)
1376 1384 else:
1377 1385 fixup.append(f)
1378 1386
1379 1387 # update dirstate for files that are actually clean
1380 1388 if fixup:
1381 1389 if listclean:
1382 1390 clean += fixup
1383 1391
1384 1392 try:
1385 1393 # updating the dirstate is optional
1386 1394 # so we don't wait on the lock
1387 1395 wlock = self.wlock(False)
1388 1396 try:
1389 1397 for f in fixup:
1390 1398 self.dirstate.normal(f)
1391 1399 finally:
1392 1400 wlock.release()
1393 1401 except error.LockError:
1394 1402 pass
1395 1403
1396 1404 if not parentworking:
1397 1405 mf1 = mfmatches(ctx1)
1398 1406 if working:
1399 1407 # we are comparing working dir against non-parent
1400 1408 # generate a pseudo-manifest for the working dir
1401 1409 mf2 = mfmatches(self['.'])
1402 1410 for f in cmp + modified + added:
1403 1411 mf2[f] = None
1404 1412 mf2.set(f, ctx2.flags(f))
1405 1413 for f in removed:
1406 1414 if f in mf2:
1407 1415 del mf2[f]
1408 1416 else:
1409 1417 # we are comparing two revisions
1410 1418 deleted, unknown, ignored = [], [], []
1411 1419 mf2 = mfmatches(ctx2)
1412 1420
1413 1421 modified, added, clean = [], [], []
1414 1422 for fn in mf2:
1415 1423 if fn in mf1:
1416 1424 if (fn not in deleted and
1417 1425 (mf1.flags(fn) != mf2.flags(fn) or
1418 1426 (mf1[fn] != mf2[fn] and
1419 1427 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1420 1428 modified.append(fn)
1421 1429 elif listclean:
1422 1430 clean.append(fn)
1423 1431 del mf1[fn]
1424 1432 elif fn not in deleted:
1425 1433 added.append(fn)
1426 1434 removed = mf1.keys()
1427 1435
1428 1436 if working and modified and not self.dirstate._checklink:
1429 1437 # Symlink placeholders may get non-symlink-like contents
1430 1438 # via user error or dereferencing by NFS or Samba servers,
1431 1439 # so we filter out any placeholders that don't look like a
1432 1440 # symlink
1433 1441 sane = []
1434 1442 for f in modified:
1435 1443 if ctx2.flags(f) == 'l':
1436 1444 d = ctx2[f].data()
1437 1445 if len(d) >= 1024 or '\n' in d or util.binary(d):
1438 1446 self.ui.debug('ignoring suspect symlink placeholder'
1439 1447 ' "%s"\n' % f)
1440 1448 continue
1441 1449 sane.append(f)
1442 1450 modified = sane
1443 1451
1444 1452 r = modified, added, removed, deleted, unknown, ignored, clean
1445 1453
1446 1454 if listsubrepos:
1447 1455 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1448 1456 if working:
1449 1457 rev2 = None
1450 1458 else:
1451 1459 rev2 = ctx2.substate[subpath][1]
1452 1460 try:
1453 1461 submatch = matchmod.narrowmatcher(subpath, match)
1454 1462 s = sub.status(rev2, match=submatch, ignored=listignored,
1455 1463 clean=listclean, unknown=listunknown,
1456 1464 listsubrepos=True)
1457 1465 for rfiles, sfiles in zip(r, s):
1458 1466 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1459 1467 except error.LookupError:
1460 1468 self.ui.status(_("skipping missing subrepository: %s\n")
1461 1469 % subpath)
1462 1470
1463 1471 for l in r:
1464 1472 l.sort()
1465 1473 return r
1466 1474
1467 1475 def heads(self, start=None):
1468 1476 heads = self.changelog.heads(start)
1469 1477 # sort the output in rev descending order
1470 1478 return sorted(heads, key=self.changelog.rev, reverse=True)
1471 1479
1472 1480 def branchheads(self, branch=None, start=None, closed=False):
1473 1481 '''return a (possibly filtered) list of heads for the given branch
1474 1482
1475 1483 Heads are returned in topological order, from newest to oldest.
1476 1484 If branch is None, use the dirstate branch.
1477 1485 If start is not None, return only heads reachable from start.
1478 1486 If closed is True, return heads that are marked as closed as well.
1479 1487 '''
1480 1488 if branch is None:
1481 1489 branch = self[None].branch()
1482 1490 branches = self.branchmap()
1483 1491 if branch not in branches:
1484 1492 return []
1485 1493 # the cache returns heads ordered lowest to highest
1486 1494 bheads = list(reversed(branches[branch]))
1487 1495 if start is not None:
1488 1496 # filter out the heads that cannot be reached from startrev
1489 1497 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1490 1498 bheads = [h for h in bheads if h in fbheads]
1491 1499 if not closed:
1492 1500 bheads = [h for h in bheads if
1493 1501 ('close' not in self.changelog.read(h)[5])]
1494 1502 return bheads
1495 1503
1496 1504 def branches(self, nodes):
1497 1505 if not nodes:
1498 1506 nodes = [self.changelog.tip()]
1499 1507 b = []
1500 1508 for n in nodes:
1501 1509 t = n
1502 1510 while True:
1503 1511 p = self.changelog.parents(n)
1504 1512 if p[1] != nullid or p[0] == nullid:
1505 1513 b.append((t, n, p[0], p[1]))
1506 1514 break
1507 1515 n = p[0]
1508 1516 return b
1509 1517
1510 1518 def between(self, pairs):
1511 1519 r = []
1512 1520
1513 1521 for top, bottom in pairs:
1514 1522 n, l, i = top, [], 0
1515 1523 f = 1
1516 1524
1517 1525 while n != bottom and n != nullid:
1518 1526 p = self.changelog.parents(n)[0]
1519 1527 if i == f:
1520 1528 l.append(n)
1521 1529 f = f * 2
1522 1530 n = p
1523 1531 i += 1
1524 1532
1525 1533 r.append(l)
1526 1534
1527 1535 return r
1528 1536
1529 1537 def pull(self, remote, heads=None, force=False):
1530 1538 lock = self.lock()
1531 1539 try:
1532 1540 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1533 1541 force=force)
1534 1542 common, fetch, rheads = tmp
1535 1543 if not fetch:
1536 1544 self.ui.status(_("no changes found\n"))
1537 1545 added = []
1538 1546 result = 0
1539 1547 else:
1540 1548 if heads is None and list(common) == [nullid]:
1541 1549 self.ui.status(_("requesting all changes\n"))
1542 1550 elif heads is None and remote.capable('changegroupsubset'):
1543 1551 # issue1320, avoid a race if remote changed after discovery
1544 1552 heads = rheads
1545 1553
1546 1554 if remote.capable('getbundle'):
1547 1555 cg = remote.getbundle('pull', common=common,
1548 1556 heads=heads or rheads)
1549 1557 elif heads is None:
1550 1558 cg = remote.changegroup(fetch, 'pull')
1551 1559 elif not remote.capable('changegroupsubset'):
1552 1560 raise util.Abort(_("partial pull cannot be done because "
1553 1561 "other repository doesn't support "
1554 1562 "changegroupsubset."))
1555 1563 else:
1556 1564 cg = remote.changegroupsubset(fetch, heads, 'pull')
1557 1565 clstart = len(self.changelog)
1558 1566 result = self.addchangegroup(cg, 'pull', remote.url())
1559 1567 clend = len(self.changelog)
1560 1568 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1561 1569
1562 1570 # compute target subset
1563 1571 if heads is None:
1564 1572 # We pulled every thing possible
1565 1573 # sync on everything common
1566 1574 subset = common + added
1567 1575 else:
1568 1576 # We pulled a specific subset
1569 1577 # sync on this subset
1570 1578 subset = heads
1571 1579
1572 1580 # Get remote phases data from remote
1573 1581 remotephases = remote.listkeys('phases')
1574 1582 publishing = bool(remotephases.get('publishing', False))
1575 1583 if remotephases and not publishing:
1576 1584 # remote is new and unpublishing
1577 1585 pheads, _dr = phases.analyzeremotephases(self, subset,
1578 1586 remotephases)
1579 1587 phases.advanceboundary(self, phases.public, pheads)
1580 1588 phases.advanceboundary(self, phases.draft, subset)
1581 1589 else:
1582 1590 # Remote is old or publishing all common changesets
1583 1591 # should be seen as public
1584 1592 phases.advanceboundary(self, phases.public, subset)
1585 1593 finally:
1586 1594 lock.release()
1587 1595
1588 1596 return result
1589 1597
1590 1598 def checkpush(self, force, revs):
1591 1599 """Extensions can override this function if additional checks have
1592 1600 to be performed before pushing, or call it if they override push
1593 1601 command.
1594 1602 """
1595 1603 pass
1596 1604
1597 1605 def push(self, remote, force=False, revs=None, newbranch=False):
1598 1606 '''Push outgoing changesets (limited by revs) from the current
1599 1607 repository to remote. Return an integer:
1600 1608 - None means nothing to push
1601 1609 - 0 means HTTP error
1602 1610 - 1 means we pushed and remote head count is unchanged *or*
1603 1611 we have outgoing changesets but refused to push
1604 1612 - other values as described by addchangegroup()
1605 1613 '''
1606 1614 # there are two ways to push to remote repo:
1607 1615 #
1608 1616 # addchangegroup assumes local user can lock remote
1609 1617 # repo (local filesystem, old ssh servers).
1610 1618 #
1611 1619 # unbundle assumes local user cannot lock remote repo (new ssh
1612 1620 # servers, http servers).
1613 1621
1614 1622 # get local lock as we might write phase data
1615 1623 locallock = self.lock()
1616 1624 try:
1617 1625 self.checkpush(force, revs)
1618 1626 lock = None
1619 1627 unbundle = remote.capable('unbundle')
1620 1628 if not unbundle:
1621 1629 lock = remote.lock()
1622 1630 try:
1623 1631 # discovery
1624 1632 fci = discovery.findcommonincoming
1625 1633 commoninc = fci(self, remote, force=force)
1626 1634 common, inc, remoteheads = commoninc
1627 1635 fco = discovery.findcommonoutgoing
1628 1636 outgoing = fco(self, remote, onlyheads=revs,
1629 1637 commoninc=commoninc, force=force)
1630 1638
1631 1639
1632 1640 if not outgoing.missing:
1633 1641 # nothing to push
1634 1642 scmutil.nochangesfound(self.ui, outgoing.excluded)
1635 1643 ret = None
1636 1644 else:
1637 1645 # something to push
1638 1646 if not force:
1639 1647 discovery.checkheads(self, remote, outgoing,
1640 1648 remoteheads, newbranch,
1641 1649 bool(inc))
1642 1650
1643 1651 # create a changegroup from local
1644 1652 if revs is None and not outgoing.excluded:
1645 1653 # push everything,
1646 1654 # use the fast path, no race possible on push
1647 1655 cg = self._changegroup(outgoing.missing, 'push')
1648 1656 else:
1649 1657 cg = self.getlocalbundle('push', outgoing)
1650 1658
1651 1659 # apply changegroup to remote
1652 1660 if unbundle:
1653 1661 # local repo finds heads on server, finds out what
1654 1662 # revs it must push. once revs transferred, if server
1655 1663 # finds it has different heads (someone else won
1656 1664 # commit/push race), server aborts.
1657 1665 if force:
1658 1666 remoteheads = ['force']
1659 1667 # ssh: return remote's addchangegroup()
1660 1668 # http: return remote's addchangegroup() or 0 for error
1661 1669 ret = remote.unbundle(cg, remoteheads, 'push')
1662 1670 else:
1663 1671 # we return an integer indicating remote head count change
1664 1672 ret = remote.addchangegroup(cg, 'push', self.url())
1665 1673
1666 1674 if ret:
1667 1675 # push succeed, synchonize target of the push
1668 1676 cheads = outgoing.missingheads
1669 1677 elif revs is None:
1670 1678 # All out push fails. synchronize all common
1671 1679 cheads = outgoing.commonheads
1672 1680 else:
1673 1681 # I want cheads = heads(::missingheads and ::commonheads)
1674 1682 # (missingheads is revs with secret changeset filtered out)
1675 1683 #
1676 1684 # This can be expressed as:
1677 1685 # cheads = ( (missingheads and ::commonheads)
1678 1686 # + (commonheads and ::missingheads))"
1679 1687 # )
1680 1688 #
1681 1689 # while trying to push we already computed the following:
1682 1690 # common = (::commonheads)
1683 1691 # missing = ((commonheads::missingheads) - commonheads)
1684 1692 #
1685 1693 # We can pick:
1686 1694 # * missingheads part of comon (::commonheads)
1687 1695 common = set(outgoing.common)
1688 1696 cheads = [node for node in revs if node in common]
1689 1697 # and
1690 1698 # * commonheads parents on missing
1691 1699 revset = self.set('%ln and parents(roots(%ln))',
1692 1700 outgoing.commonheads,
1693 1701 outgoing.missing)
1694 1702 cheads.extend(c.node() for c in revset)
1695 1703 # even when we don't push, exchanging phase data is useful
1696 1704 remotephases = remote.listkeys('phases')
1697 1705 if not remotephases: # old server or public only repo
1698 1706 phases.advanceboundary(self, phases.public, cheads)
1699 1707 # don't push any phase data as there is nothing to push
1700 1708 else:
1701 1709 ana = phases.analyzeremotephases(self, cheads, remotephases)
1702 1710 pheads, droots = ana
1703 1711 ### Apply remote phase on local
1704 1712 if remotephases.get('publishing', False):
1705 1713 phases.advanceboundary(self, phases.public, cheads)
1706 1714 else: # publish = False
1707 1715 phases.advanceboundary(self, phases.public, pheads)
1708 1716 phases.advanceboundary(self, phases.draft, cheads)
1709 1717 ### Apply local phase on remote
1710 1718
1711 1719 # Get the list of all revs draft on remote by public here.
1712 1720 # XXX Beware that revset break if droots is not strictly
1713 1721 # XXX root we may want to ensure it is but it is costly
1714 1722 outdated = self.set('heads((%ln::%ln) and public())',
1715 1723 droots, cheads)
1716 1724 for newremotehead in outdated:
1717 1725 r = remote.pushkey('phases',
1718 1726 newremotehead.hex(),
1719 1727 str(phases.draft),
1720 1728 str(phases.public))
1721 1729 if not r:
1722 1730 self.ui.warn(_('updating %s to public failed!\n')
1723 1731 % newremotehead)
1724 1732 finally:
1725 1733 if lock is not None:
1726 1734 lock.release()
1727 1735 finally:
1728 1736 locallock.release()
1729 1737
1730 1738 self.ui.debug("checking for updated bookmarks\n")
1731 1739 rb = remote.listkeys('bookmarks')
1732 1740 for k in rb.keys():
1733 1741 if k in self._bookmarks:
1734 1742 nr, nl = rb[k], hex(self._bookmarks[k])
1735 1743 if nr in self:
1736 1744 cr = self[nr]
1737 1745 cl = self[nl]
1738 1746 if cl in cr.descendants():
1739 1747 r = remote.pushkey('bookmarks', k, nr, nl)
1740 1748 if r:
1741 1749 self.ui.status(_("updating bookmark %s\n") % k)
1742 1750 else:
1743 1751 self.ui.warn(_('updating bookmark %s'
1744 1752 ' failed!\n') % k)
1745 1753
1746 1754 return ret
1747 1755
1748 1756 def changegroupinfo(self, nodes, source):
1749 1757 if self.ui.verbose or source == 'bundle':
1750 1758 self.ui.status(_("%d changesets found\n") % len(nodes))
1751 1759 if self.ui.debugflag:
1752 1760 self.ui.debug("list of changesets:\n")
1753 1761 for node in nodes:
1754 1762 self.ui.debug("%s\n" % hex(node))
1755 1763
1756 1764 def changegroupsubset(self, bases, heads, source):
1757 1765 """Compute a changegroup consisting of all the nodes that are
1758 1766 descendants of any of the bases and ancestors of any of the heads.
1759 1767 Return a chunkbuffer object whose read() method will return
1760 1768 successive changegroup chunks.
1761 1769
1762 1770 It is fairly complex as determining which filenodes and which
1763 1771 manifest nodes need to be included for the changeset to be complete
1764 1772 is non-trivial.
1765 1773
1766 1774 Another wrinkle is doing the reverse, figuring out which changeset in
1767 1775 the changegroup a particular filenode or manifestnode belongs to.
1768 1776 """
1769 1777 cl = self.changelog
1770 1778 if not bases:
1771 1779 bases = [nullid]
1772 1780 csets, bases, heads = cl.nodesbetween(bases, heads)
1773 1781 # We assume that all ancestors of bases are known
1774 1782 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1775 1783 return self._changegroupsubset(common, csets, heads, source)
1776 1784
1777 1785 def getlocalbundle(self, source, outgoing):
1778 1786 """Like getbundle, but taking a discovery.outgoing as an argument.
1779 1787
1780 1788 This is only implemented for local repos and reuses potentially
1781 1789 precomputed sets in outgoing."""
1782 1790 if not outgoing.missing:
1783 1791 return None
1784 1792 return self._changegroupsubset(outgoing.common,
1785 1793 outgoing.missing,
1786 1794 outgoing.missingheads,
1787 1795 source)
1788 1796
1789 1797 def getbundle(self, source, heads=None, common=None):
1790 1798 """Like changegroupsubset, but returns the set difference between the
1791 1799 ancestors of heads and the ancestors common.
1792 1800
1793 1801 If heads is None, use the local heads. If common is None, use [nullid].
1794 1802
1795 1803 The nodes in common might not all be known locally due to the way the
1796 1804 current discovery protocol works.
1797 1805 """
1798 1806 cl = self.changelog
1799 1807 if common:
1800 1808 nm = cl.nodemap
1801 1809 common = [n for n in common if n in nm]
1802 1810 else:
1803 1811 common = [nullid]
1804 1812 if not heads:
1805 1813 heads = cl.heads()
1806 1814 return self.getlocalbundle(source,
1807 1815 discovery.outgoing(cl, common, heads))
1808 1816
1809 1817 def _changegroupsubset(self, commonrevs, csets, heads, source):
1810 1818
1811 1819 cl = self.changelog
1812 1820 mf = self.manifest
1813 1821 mfs = {} # needed manifests
1814 1822 fnodes = {} # needed file nodes
1815 1823 changedfiles = set()
1816 1824 fstate = ['', {}]
1817 1825 count = [0]
1818 1826
1819 1827 # can we go through the fast path ?
1820 1828 heads.sort()
1821 1829 if heads == sorted(self.heads()):
1822 1830 return self._changegroup(csets, source)
1823 1831
1824 1832 # slow path
1825 1833 self.hook('preoutgoing', throw=True, source=source)
1826 1834 self.changegroupinfo(csets, source)
1827 1835
1828 1836 # filter any nodes that claim to be part of the known set
1829 1837 def prune(revlog, missing):
1830 1838 return [n for n in missing
1831 1839 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1832 1840
1833 1841 def lookup(revlog, x):
1834 1842 if revlog == cl:
1835 1843 c = cl.read(x)
1836 1844 changedfiles.update(c[3])
1837 1845 mfs.setdefault(c[0], x)
1838 1846 count[0] += 1
1839 1847 self.ui.progress(_('bundling'), count[0],
1840 1848 unit=_('changesets'), total=len(csets))
1841 1849 return x
1842 1850 elif revlog == mf:
1843 1851 clnode = mfs[x]
1844 1852 mdata = mf.readfast(x)
1845 1853 for f in changedfiles:
1846 1854 if f in mdata:
1847 1855 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1848 1856 count[0] += 1
1849 1857 self.ui.progress(_('bundling'), count[0],
1850 1858 unit=_('manifests'), total=len(mfs))
1851 1859 return mfs[x]
1852 1860 else:
1853 1861 self.ui.progress(
1854 1862 _('bundling'), count[0], item=fstate[0],
1855 1863 unit=_('files'), total=len(changedfiles))
1856 1864 return fstate[1][x]
1857 1865
1858 1866 bundler = changegroup.bundle10(lookup)
1859 1867 reorder = self.ui.config('bundle', 'reorder', 'auto')
1860 1868 if reorder == 'auto':
1861 1869 reorder = None
1862 1870 else:
1863 1871 reorder = util.parsebool(reorder)
1864 1872
1865 1873 def gengroup():
1866 1874 # Create a changenode group generator that will call our functions
1867 1875 # back to lookup the owning changenode and collect information.
1868 1876 for chunk in cl.group(csets, bundler, reorder=reorder):
1869 1877 yield chunk
1870 1878 self.ui.progress(_('bundling'), None)
1871 1879
1872 1880 # Create a generator for the manifestnodes that calls our lookup
1873 1881 # and data collection functions back.
1874 1882 count[0] = 0
1875 1883 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1876 1884 yield chunk
1877 1885 self.ui.progress(_('bundling'), None)
1878 1886
1879 1887 mfs.clear()
1880 1888
1881 1889 # Go through all our files in order sorted by name.
1882 1890 count[0] = 0
1883 1891 for fname in sorted(changedfiles):
1884 1892 filerevlog = self.file(fname)
1885 1893 if not len(filerevlog):
1886 1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1887 1895 fstate[0] = fname
1888 1896 fstate[1] = fnodes.pop(fname, {})
1889 1897
1890 1898 nodelist = prune(filerevlog, fstate[1])
1891 1899 if nodelist:
1892 1900 count[0] += 1
1893 1901 yield bundler.fileheader(fname)
1894 1902 for chunk in filerevlog.group(nodelist, bundler, reorder):
1895 1903 yield chunk
1896 1904
1897 1905 # Signal that no more groups are left.
1898 1906 yield bundler.close()
1899 1907 self.ui.progress(_('bundling'), None)
1900 1908
1901 1909 if csets:
1902 1910 self.hook('outgoing', node=hex(csets[0]), source=source)
1903 1911
1904 1912 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1905 1913
1906 1914 def changegroup(self, basenodes, source):
1907 1915 # to avoid a race we use changegroupsubset() (issue1320)
1908 1916 return self.changegroupsubset(basenodes, self.heads(), source)
1909 1917
1910 1918 def _changegroup(self, nodes, source):
1911 1919 """Compute the changegroup of all nodes that we have that a recipient
1912 1920 doesn't. Return a chunkbuffer object whose read() method will return
1913 1921 successive changegroup chunks.
1914 1922
1915 1923 This is much easier than the previous function as we can assume that
1916 1924 the recipient has any changenode we aren't sending them.
1917 1925
1918 1926 nodes is the set of nodes to send"""
1919 1927
1920 1928 cl = self.changelog
1921 1929 mf = self.manifest
1922 1930 mfs = {}
1923 1931 changedfiles = set()
1924 1932 fstate = ['']
1925 1933 count = [0]
1926 1934
1927 1935 self.hook('preoutgoing', throw=True, source=source)
1928 1936 self.changegroupinfo(nodes, source)
1929 1937
1930 1938 revset = set([cl.rev(n) for n in nodes])
1931 1939
1932 1940 def gennodelst(log):
1933 1941 return [log.node(r) for r in log if log.linkrev(r) in revset]
1934 1942
1935 1943 def lookup(revlog, x):
1936 1944 if revlog == cl:
1937 1945 c = cl.read(x)
1938 1946 changedfiles.update(c[3])
1939 1947 mfs.setdefault(c[0], x)
1940 1948 count[0] += 1
1941 1949 self.ui.progress(_('bundling'), count[0],
1942 1950 unit=_('changesets'), total=len(nodes))
1943 1951 return x
1944 1952 elif revlog == mf:
1945 1953 count[0] += 1
1946 1954 self.ui.progress(_('bundling'), count[0],
1947 1955 unit=_('manifests'), total=len(mfs))
1948 1956 return cl.node(revlog.linkrev(revlog.rev(x)))
1949 1957 else:
1950 1958 self.ui.progress(
1951 1959 _('bundling'), count[0], item=fstate[0],
1952 1960 total=len(changedfiles), unit=_('files'))
1953 1961 return cl.node(revlog.linkrev(revlog.rev(x)))
1954 1962
1955 1963 bundler = changegroup.bundle10(lookup)
1956 1964 reorder = self.ui.config('bundle', 'reorder', 'auto')
1957 1965 if reorder == 'auto':
1958 1966 reorder = None
1959 1967 else:
1960 1968 reorder = util.parsebool(reorder)
1961 1969
1962 1970 def gengroup():
1963 1971 '''yield a sequence of changegroup chunks (strings)'''
1964 1972 # construct a list of all changed files
1965 1973
1966 1974 for chunk in cl.group(nodes, bundler, reorder=reorder):
1967 1975 yield chunk
1968 1976 self.ui.progress(_('bundling'), None)
1969 1977
1970 1978 count[0] = 0
1971 1979 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1972 1980 yield chunk
1973 1981 self.ui.progress(_('bundling'), None)
1974 1982
1975 1983 count[0] = 0
1976 1984 for fname in sorted(changedfiles):
1977 1985 filerevlog = self.file(fname)
1978 1986 if not len(filerevlog):
1979 1987 raise util.Abort(_("empty or missing revlog for %s") % fname)
1980 1988 fstate[0] = fname
1981 1989 nodelist = gennodelst(filerevlog)
1982 1990 if nodelist:
1983 1991 count[0] += 1
1984 1992 yield bundler.fileheader(fname)
1985 1993 for chunk in filerevlog.group(nodelist, bundler, reorder):
1986 1994 yield chunk
1987 1995 yield bundler.close()
1988 1996 self.ui.progress(_('bundling'), None)
1989 1997
1990 1998 if nodes:
1991 1999 self.hook('outgoing', node=hex(nodes[0]), source=source)
1992 2000
1993 2001 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1994 2002
1995 2003 def addchangegroup(self, source, srctype, url, emptyok=False):
1996 2004 """Add the changegroup returned by source.read() to this repo.
1997 2005 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1998 2006 the URL of the repo where this changegroup is coming from.
1999 2007
2000 2008 Return an integer summarizing the change to this repo:
2001 2009 - nothing changed or no source: 0
2002 2010 - more heads than before: 1+added heads (2..n)
2003 2011 - fewer heads than before: -1-removed heads (-2..-n)
2004 2012 - number of heads stays the same: 1
2005 2013 """
2006 2014 def csmap(x):
2007 2015 self.ui.debug("add changeset %s\n" % short(x))
2008 2016 return len(cl)
2009 2017
2010 2018 def revmap(x):
2011 2019 return cl.rev(x)
2012 2020
2013 2021 if not source:
2014 2022 return 0
2015 2023
2016 2024 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2017 2025
2018 2026 changesets = files = revisions = 0
2019 2027 efiles = set()
2020 2028
2021 2029 # write changelog data to temp files so concurrent readers will not see
2022 2030 # inconsistent view
2023 2031 cl = self.changelog
2024 2032 cl.delayupdate()
2025 2033 oldheads = cl.heads()
2026 2034
2027 2035 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2028 2036 try:
2029 2037 trp = weakref.proxy(tr)
2030 2038 # pull off the changeset group
2031 2039 self.ui.status(_("adding changesets\n"))
2032 2040 clstart = len(cl)
2033 2041 class prog(object):
2034 2042 step = _('changesets')
2035 2043 count = 1
2036 2044 ui = self.ui
2037 2045 total = None
2038 2046 def __call__(self):
2039 2047 self.ui.progress(self.step, self.count, unit=_('chunks'),
2040 2048 total=self.total)
2041 2049 self.count += 1
2042 2050 pr = prog()
2043 2051 source.callback = pr
2044 2052
2045 2053 source.changelogheader()
2046 2054 srccontent = cl.addgroup(source, csmap, trp)
2047 2055 if not (srccontent or emptyok):
2048 2056 raise util.Abort(_("received changelog group is empty"))
2049 2057 clend = len(cl)
2050 2058 changesets = clend - clstart
2051 2059 for c in xrange(clstart, clend):
2052 2060 efiles.update(self[c].files())
2053 2061 efiles = len(efiles)
2054 2062 self.ui.progress(_('changesets'), None)
2055 2063
2056 2064 # pull off the manifest group
2057 2065 self.ui.status(_("adding manifests\n"))
2058 2066 pr.step = _('manifests')
2059 2067 pr.count = 1
2060 2068 pr.total = changesets # manifests <= changesets
2061 2069 # no need to check for empty manifest group here:
2062 2070 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2063 2071 # no new manifest will be created and the manifest group will
2064 2072 # be empty during the pull
2065 2073 source.manifestheader()
2066 2074 self.manifest.addgroup(source, revmap, trp)
2067 2075 self.ui.progress(_('manifests'), None)
2068 2076
2069 2077 needfiles = {}
2070 2078 if self.ui.configbool('server', 'validate', default=False):
2071 2079 # validate incoming csets have their manifests
2072 2080 for cset in xrange(clstart, clend):
2073 2081 mfest = self.changelog.read(self.changelog.node(cset))[0]
2074 2082 mfest = self.manifest.readdelta(mfest)
2075 2083 # store file nodes we must see
2076 2084 for f, n in mfest.iteritems():
2077 2085 needfiles.setdefault(f, set()).add(n)
2078 2086
2079 2087 # process the files
2080 2088 self.ui.status(_("adding file changes\n"))
2081 2089 pr.step = _('files')
2082 2090 pr.count = 1
2083 2091 pr.total = efiles
2084 2092 source.callback = None
2085 2093
2086 2094 while True:
2087 2095 chunkdata = source.filelogheader()
2088 2096 if not chunkdata:
2089 2097 break
2090 2098 f = chunkdata["filename"]
2091 2099 self.ui.debug("adding %s revisions\n" % f)
2092 2100 pr()
2093 2101 fl = self.file(f)
2094 2102 o = len(fl)
2095 2103 if not fl.addgroup(source, revmap, trp):
2096 2104 raise util.Abort(_("received file revlog group is empty"))
2097 2105 revisions += len(fl) - o
2098 2106 files += 1
2099 2107 if f in needfiles:
2100 2108 needs = needfiles[f]
2101 2109 for new in xrange(o, len(fl)):
2102 2110 n = fl.node(new)
2103 2111 if n in needs:
2104 2112 needs.remove(n)
2105 2113 if not needs:
2106 2114 del needfiles[f]
2107 2115 self.ui.progress(_('files'), None)
2108 2116
2109 2117 for f, needs in needfiles.iteritems():
2110 2118 fl = self.file(f)
2111 2119 for n in needs:
2112 2120 try:
2113 2121 fl.rev(n)
2114 2122 except error.LookupError:
2115 2123 raise util.Abort(
2116 2124 _('missing file data for %s:%s - run hg verify') %
2117 2125 (f, hex(n)))
2118 2126
2119 2127 dh = 0
2120 2128 if oldheads:
2121 2129 heads = cl.heads()
2122 2130 dh = len(heads) - len(oldheads)
2123 2131 for h in heads:
2124 2132 if h not in oldheads and 'close' in self[h].extra():
2125 2133 dh -= 1
2126 2134 htext = ""
2127 2135 if dh:
2128 2136 htext = _(" (%+d heads)") % dh
2129 2137
2130 2138 self.ui.status(_("added %d changesets"
2131 2139 " with %d changes to %d files%s\n")
2132 2140 % (changesets, revisions, files, htext))
2133 2141
2134 2142 if changesets > 0:
2135 2143 p = lambda: cl.writepending() and self.root or ""
2136 2144 self.hook('pretxnchangegroup', throw=True,
2137 2145 node=hex(cl.node(clstart)), source=srctype,
2138 2146 url=url, pending=p)
2139 2147
2140 2148 added = [cl.node(r) for r in xrange(clstart, clend)]
2141 2149 publishing = self.ui.configbool('phases', 'publish', True)
2142 2150 if srctype == 'push':
2143 2151 # Old server can not push the boundary themself.
2144 2152 # New server won't push the boundary if changeset already
2145 2153 # existed locally as secrete
2146 2154 #
2147 2155 # We should not use added here but the list of all change in
2148 2156 # the bundle
2149 2157 if publishing:
2150 2158 phases.advanceboundary(self, phases.public, srccontent)
2151 2159 else:
2152 2160 phases.advanceboundary(self, phases.draft, srccontent)
2153 2161 phases.retractboundary(self, phases.draft, added)
2154 2162 elif srctype != 'strip':
2155 2163 # publishing only alter behavior during push
2156 2164 #
2157 2165 # strip should not touch boundary at all
2158 2166 phases.retractboundary(self, phases.draft, added)
2159 2167
2160 2168 # make changelog see real files again
2161 2169 cl.finalize(trp)
2162 2170
2163 2171 tr.close()
2164 2172
2165 2173 if changesets > 0:
2166 2174 def runhooks():
2167 2175 # forcefully update the on-disk branch cache
2168 2176 self.ui.debug("updating the branch cache\n")
2169 2177 self.updatebranchcache()
2170 2178 self.hook("changegroup", node=hex(cl.node(clstart)),
2171 2179 source=srctype, url=url)
2172 2180
2173 2181 for n in added:
2174 2182 self.hook("incoming", node=hex(n), source=srctype,
2175 2183 url=url)
2176 2184 self._afterlock(runhooks)
2177 2185
2178 2186 finally:
2179 2187 tr.release()
2180 2188 # never return 0 here:
2181 2189 if dh < 0:
2182 2190 return dh - 1
2183 2191 else:
2184 2192 return dh + 1
2185 2193
2186 2194 def stream_in(self, remote, requirements):
2187 2195 lock = self.lock()
2188 2196 try:
2189 2197 fp = remote.stream_out()
2190 2198 l = fp.readline()
2191 2199 try:
2192 2200 resp = int(l)
2193 2201 except ValueError:
2194 2202 raise error.ResponseError(
2195 2203 _('Unexpected response from remote server:'), l)
2196 2204 if resp == 1:
2197 2205 raise util.Abort(_('operation forbidden by server'))
2198 2206 elif resp == 2:
2199 2207 raise util.Abort(_('locking the remote repository failed'))
2200 2208 elif resp != 0:
2201 2209 raise util.Abort(_('the server sent an unknown error code'))
2202 2210 self.ui.status(_('streaming all changes\n'))
2203 2211 l = fp.readline()
2204 2212 try:
2205 2213 total_files, total_bytes = map(int, l.split(' ', 1))
2206 2214 except (ValueError, TypeError):
2207 2215 raise error.ResponseError(
2208 2216 _('Unexpected response from remote server:'), l)
2209 2217 self.ui.status(_('%d files to transfer, %s of data\n') %
2210 2218 (total_files, util.bytecount(total_bytes)))
2211 2219 start = time.time()
2212 2220 for i in xrange(total_files):
2213 2221 # XXX doesn't support '\n' or '\r' in filenames
2214 2222 l = fp.readline()
2215 2223 try:
2216 2224 name, size = l.split('\0', 1)
2217 2225 size = int(size)
2218 2226 except (ValueError, TypeError):
2219 2227 raise error.ResponseError(
2220 2228 _('Unexpected response from remote server:'), l)
2221 2229 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2222 2230 # for backwards compat, name was partially encoded
2223 2231 ofp = self.sopener(store.decodedir(name), 'w')
2224 2232 for chunk in util.filechunkiter(fp, limit=size):
2225 2233 ofp.write(chunk)
2226 2234 ofp.close()
2227 2235 elapsed = time.time() - start
2228 2236 if elapsed <= 0:
2229 2237 elapsed = 0.001
2230 2238 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2231 2239 (util.bytecount(total_bytes), elapsed,
2232 2240 util.bytecount(total_bytes / elapsed)))
2233 2241
2234 2242 # new requirements = old non-format requirements + new format-related
2235 2243 # requirements from the streamed-in repository
2236 2244 requirements.update(set(self.requirements) - self.supportedformats)
2237 2245 self._applyrequirements(requirements)
2238 2246 self._writerequirements()
2239 2247
2240 2248 self.invalidate()
2241 2249 return len(self.heads()) + 1
2242 2250 finally:
2243 2251 lock.release()
2244 2252
2245 2253 def clone(self, remote, heads=[], stream=False):
2246 2254 '''clone remote repository.
2247 2255
2248 2256 keyword arguments:
2249 2257 heads: list of revs to clone (forces use of pull)
2250 2258 stream: use streaming clone if possible'''
2251 2259
2252 2260 # now, all clients that can request uncompressed clones can
2253 2261 # read repo formats supported by all servers that can serve
2254 2262 # them.
2255 2263
2256 2264 # if revlog format changes, client will have to check version
2257 2265 # and format flags on "stream" capability, and use
2258 2266 # uncompressed only if compatible.
2259 2267
2260 2268 if stream and not heads:
2261 2269 # 'stream' means remote revlog format is revlogv1 only
2262 2270 if remote.capable('stream'):
2263 2271 return self.stream_in(remote, set(('revlogv1',)))
2264 2272 # otherwise, 'streamreqs' contains the remote revlog format
2265 2273 streamreqs = remote.capable('streamreqs')
2266 2274 if streamreqs:
2267 2275 streamreqs = set(streamreqs.split(','))
2268 2276 # if we support it, stream in and adjust our requirements
2269 2277 if not streamreqs - self.supportedformats:
2270 2278 return self.stream_in(remote, streamreqs)
2271 2279 return self.pull(remote, heads)
2272 2280
2273 2281 def pushkey(self, namespace, key, old, new):
2274 2282 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2275 2283 old=old, new=new)
2276 2284 ret = pushkey.push(self, namespace, key, old, new)
2277 2285 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2278 2286 ret=ret)
2279 2287 return ret
2280 2288
2281 2289 def listkeys(self, namespace):
2282 2290 self.hook('prelistkeys', throw=True, namespace=namespace)
2283 2291 values = pushkey.list(self, namespace)
2284 2292 self.hook('listkeys', namespace=namespace, values=values)
2285 2293 return values
2286 2294
2287 2295 def debugwireargs(self, one, two, three=None, four=None, five=None):
2288 2296 '''used to test argument passing over the wire'''
2289 2297 return "%s %s %s %s %s" % (one, two, three, four, five)
2290 2298
2291 2299 def savecommitmessage(self, text):
2292 2300 fp = self.opener('last-message.txt', 'wb')
2293 2301 try:
2294 2302 fp.write(text)
2295 2303 finally:
2296 2304 fp.close()
2297 2305 return self.pathto(fp.name[len(self.root)+1:])
2298 2306
2299 2307 # used to avoid circular references so destructors work
2300 2308 def aftertrans(files):
2301 2309 renamefiles = [tuple(t) for t in files]
2302 2310 def a():
2303 2311 for src, dest in renamefiles:
2304 2312 util.rename(src, dest)
2305 2313 return a
2306 2314
2307 2315 def undoname(fn):
2308 2316 base, name = os.path.split(fn)
2309 2317 assert name.startswith('journal')
2310 2318 return os.path.join(base, name.replace('journal', 'undo', 1))
2311 2319
2312 2320 def instance(ui, path, create):
2313 2321 return localrepository(ui, util.urllocalpath(path), create)
2314 2322
2315 2323 def islocal(path):
2316 2324 return True
@@ -1,837 +1,848 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import util, error, osutil, revset, similar, encoding
10 10 import match as matchmod
11 11 import os, errno, re, stat, sys, glob
12 12
13 13 def nochangesfound(ui, secretlist=None):
14 14 '''report no changes for push/pull'''
15 15 if secretlist:
16 16 ui.status(_("no changes found (ignored %d secret changesets)\n")
17 17 % len(secretlist))
18 18 else:
19 19 ui.status(_("no changes found\n"))
20 20
21 21 def checkfilename(f):
22 22 '''Check that the filename f is an acceptable filename for a tracked file'''
23 23 if '\r' in f or '\n' in f:
24 24 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
25 25
26 26 def checkportable(ui, f):
27 27 '''Check if filename f is portable and warn or abort depending on config'''
28 28 checkfilename(f)
29 29 abort, warn = checkportabilityalert(ui)
30 30 if abort or warn:
31 31 msg = util.checkwinfilename(f)
32 32 if msg:
33 33 msg = "%s: %r" % (msg, f)
34 34 if abort:
35 35 raise util.Abort(msg)
36 36 ui.warn(_("warning: %s\n") % msg)
37 37
38 38 def checkportabilityalert(ui):
39 39 '''check if the user's config requests nothing, a warning, or abort for
40 40 non-portable filenames'''
41 41 val = ui.config('ui', 'portablefilenames', 'warn')
42 42 lval = val.lower()
43 43 bval = util.parsebool(val)
44 44 abort = os.name == 'nt' or lval == 'abort'
45 45 warn = bval or lval == 'warn'
46 46 if bval is None and not (warn or abort or lval == 'ignore'):
47 47 raise error.ConfigError(
48 48 _("ui.portablefilenames value is invalid ('%s')") % val)
49 49 return abort, warn
50 50
51 51 class casecollisionauditor(object):
52 52 def __init__(self, ui, abort, existingiter):
53 53 self._ui = ui
54 54 self._abort = abort
55 55 self._map = {}
56 56 for f in existingiter:
57 57 self._map[encoding.lower(f)] = f
58 58
59 59 def __call__(self, f):
60 60 fl = encoding.lower(f)
61 61 map = self._map
62 62 if fl in map and map[fl] != f:
63 63 msg = _('possible case-folding collision for %s') % f
64 64 if self._abort:
65 65 raise util.Abort(msg)
66 66 self._ui.warn(_("warning: %s\n") % msg)
67 67 map[fl] = f
68 68
69 69 class pathauditor(object):
70 70 '''ensure that a filesystem path contains no banned components.
71 71 the following properties of a path are checked:
72 72
73 73 - ends with a directory separator
74 74 - under top-level .hg
75 75 - starts at the root of a windows drive
76 76 - contains ".."
77 77 - traverses a symlink (e.g. a/symlink_here/b)
78 78 - inside a nested repository (a callback can be used to approve
79 79 some nested repositories, e.g., subrepositories)
80 80 '''
81 81
82 82 def __init__(self, root, callback=None):
83 83 self.audited = set()
84 84 self.auditeddir = set()
85 85 self.root = root
86 86 self.callback = callback
87 87 if os.path.lexists(root) and not util.checkcase(root):
88 88 self.normcase = util.normcase
89 89 else:
90 90 self.normcase = lambda x: x
91 91
92 92 def __call__(self, path):
93 93 '''Check the relative path.
94 94 path may contain a pattern (e.g. foodir/**.txt)'''
95 95
96 96 path = util.localpath(path)
97 97 normpath = self.normcase(path)
98 98 if normpath in self.audited:
99 99 return
100 100 # AIX ignores "/" at end of path, others raise EISDIR.
101 101 if util.endswithsep(path):
102 102 raise util.Abort(_("path ends in directory separator: %s") % path)
103 103 parts = util.splitpath(path)
104 104 if (os.path.splitdrive(path)[0]
105 105 or parts[0].lower() in ('.hg', '.hg.', '')
106 106 or os.pardir in parts):
107 107 raise util.Abort(_("path contains illegal component: %s") % path)
108 108 if '.hg' in path.lower():
109 109 lparts = [p.lower() for p in parts]
110 110 for p in '.hg', '.hg.':
111 111 if p in lparts[1:]:
112 112 pos = lparts.index(p)
113 113 base = os.path.join(*parts[:pos])
114 114 raise util.Abort(_("path '%s' is inside nested repo %r")
115 115 % (path, base))
116 116
117 117 normparts = util.splitpath(normpath)
118 118 assert len(parts) == len(normparts)
119 119
120 120 parts.pop()
121 121 normparts.pop()
122 122 prefixes = []
123 123 while parts:
124 124 prefix = os.sep.join(parts)
125 125 normprefix = os.sep.join(normparts)
126 126 if normprefix in self.auditeddir:
127 127 break
128 128 curpath = os.path.join(self.root, prefix)
129 129 try:
130 130 st = os.lstat(curpath)
131 131 except OSError, err:
132 132 # EINVAL can be raised as invalid path syntax under win32.
133 133 # They must be ignored for patterns can be checked too.
134 134 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
135 135 raise
136 136 else:
137 137 if stat.S_ISLNK(st.st_mode):
138 138 raise util.Abort(
139 139 _('path %r traverses symbolic link %r')
140 140 % (path, prefix))
141 141 elif (stat.S_ISDIR(st.st_mode) and
142 142 os.path.isdir(os.path.join(curpath, '.hg'))):
143 143 if not self.callback or not self.callback(curpath):
144 144 raise util.Abort(_("path '%s' is inside nested repo %r") %
145 145 (path, prefix))
146 146 prefixes.append(normprefix)
147 147 parts.pop()
148 148 normparts.pop()
149 149
150 150 self.audited.add(normpath)
151 151 # only add prefixes to the cache after checking everything: we don't
152 152 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
153 153 self.auditeddir.update(prefixes)
154 154
155 155 class abstractopener(object):
156 156 """Abstract base class; cannot be instantiated"""
157 157
158 158 def __init__(self, *args, **kwargs):
159 159 '''Prevent instantiation; don't call this from subclasses.'''
160 160 raise NotImplementedError('attempted instantiating ' + str(type(self)))
161 161
162 162 def read(self, path):
163 163 fp = self(path, 'rb')
164 164 try:
165 165 return fp.read()
166 166 finally:
167 167 fp.close()
168 168
169 169 def write(self, path, data):
170 170 fp = self(path, 'wb')
171 171 try:
172 172 return fp.write(data)
173 173 finally:
174 174 fp.close()
175 175
176 176 def append(self, path, data):
177 177 fp = self(path, 'ab')
178 178 try:
179 179 return fp.write(data)
180 180 finally:
181 181 fp.close()
182 182
183 183 class opener(abstractopener):
184 184 '''Open files relative to a base directory
185 185
186 186 This class is used to hide the details of COW semantics and
187 187 remote file access from higher level code.
188 188 '''
189 189 def __init__(self, base, audit=True):
190 190 self.base = base
191 191 self._audit = audit
192 192 if audit:
193 193 self.auditor = pathauditor(base)
194 194 else:
195 195 self.auditor = util.always
196 196 self.createmode = None
197 197 self._trustnlink = None
198 198
199 199 @util.propertycache
200 200 def _cansymlink(self):
201 201 return util.checklink(self.base)
202 202
203 203 def _fixfilemode(self, name):
204 204 if self.createmode is None:
205 205 return
206 206 os.chmod(name, self.createmode & 0666)
207 207
208 208 def __call__(self, path, mode="r", text=False, atomictemp=False):
209 209 if self._audit:
210 210 r = util.checkosfilename(path)
211 211 if r:
212 212 raise util.Abort("%s: %r" % (r, path))
213 213 self.auditor(path)
214 f = os.path.join(self.base, path)
214 f = self.join(path)
215 215
216 216 if not text and "b" not in mode:
217 217 mode += "b" # for that other OS
218 218
219 219 nlink = -1
220 220 dirname, basename = os.path.split(f)
221 221 # If basename is empty, then the path is malformed because it points
222 222 # to a directory. Let the posixfile() call below raise IOError.
223 223 if basename and mode not in ('r', 'rb'):
224 224 if atomictemp:
225 225 if not os.path.isdir(dirname):
226 226 util.makedirs(dirname, self.createmode)
227 227 return util.atomictempfile(f, mode, self.createmode)
228 228 try:
229 229 if 'w' in mode:
230 230 util.unlink(f)
231 231 nlink = 0
232 232 else:
233 233 # nlinks() may behave differently for files on Windows
234 234 # shares if the file is open.
235 235 fd = util.posixfile(f)
236 236 nlink = util.nlinks(f)
237 237 if nlink < 1:
238 238 nlink = 2 # force mktempcopy (issue1922)
239 239 fd.close()
240 240 except (OSError, IOError), e:
241 241 if e.errno != errno.ENOENT:
242 242 raise
243 243 nlink = 0
244 244 if not os.path.isdir(dirname):
245 245 util.makedirs(dirname, self.createmode)
246 246 if nlink > 0:
247 247 if self._trustnlink is None:
248 248 self._trustnlink = nlink > 1 or util.checknlink(f)
249 249 if nlink > 1 or not self._trustnlink:
250 250 util.rename(util.mktempcopy(f), f)
251 251 fp = util.posixfile(f, mode)
252 252 if nlink == 0:
253 253 self._fixfilemode(f)
254 254 return fp
255 255
256 256 def symlink(self, src, dst):
257 257 self.auditor(dst)
258 linkname = os.path.join(self.base, dst)
258 linkname = self.join(dst)
259 259 try:
260 260 os.unlink(linkname)
261 261 except OSError:
262 262 pass
263 263
264 264 dirname = os.path.dirname(linkname)
265 265 if not os.path.exists(dirname):
266 266 util.makedirs(dirname, self.createmode)
267 267
268 268 if self._cansymlink:
269 269 try:
270 270 os.symlink(src, linkname)
271 271 except OSError, err:
272 272 raise OSError(err.errno, _('could not symlink to %r: %s') %
273 273 (src, err.strerror), linkname)
274 274 else:
275 275 f = self(dst, "w")
276 276 f.write(src)
277 277 f.close()
278 278 self._fixfilemode(dst)
279 279
280 280 def audit(self, path):
281 281 self.auditor(path)
282 282
283 def join(self, path):
284 return os.path.join(self.base, path)
285
283 286 class filteropener(abstractopener):
284 287 '''Wrapper opener for filtering filenames with a function.'''
285 288
286 289 def __init__(self, opener, filter):
287 290 self._filter = filter
288 291 self._orig = opener
289 292
290 293 def __call__(self, path, *args, **kwargs):
291 294 return self._orig(self._filter(path), *args, **kwargs)
292 295
293 296 def canonpath(root, cwd, myname, auditor=None):
294 297 '''return the canonical path of myname, given cwd and root'''
295 298 if util.endswithsep(root):
296 299 rootsep = root
297 300 else:
298 301 rootsep = root + os.sep
299 302 name = myname
300 303 if not os.path.isabs(name):
301 304 name = os.path.join(root, cwd, name)
302 305 name = os.path.normpath(name)
303 306 if auditor is None:
304 307 auditor = pathauditor(root)
305 308 if name != rootsep and name.startswith(rootsep):
306 309 name = name[len(rootsep):]
307 310 auditor(name)
308 311 return util.pconvert(name)
309 312 elif name == root:
310 313 return ''
311 314 else:
312 315 # Determine whether `name' is in the hierarchy at or beneath `root',
313 316 # by iterating name=dirname(name) until that causes no change (can't
314 317 # check name == '/', because that doesn't work on windows). For each
315 318 # `name', compare dev/inode numbers. If they match, the list `rel'
316 319 # holds the reversed list of components making up the relative file
317 320 # name we want.
318 321 root_st = os.stat(root)
319 322 rel = []
320 323 while True:
321 324 try:
322 325 name_st = os.stat(name)
323 326 except OSError:
324 327 name_st = None
325 328 if name_st and util.samestat(name_st, root_st):
326 329 if not rel:
327 330 # name was actually the same as root (maybe a symlink)
328 331 return ''
329 332 rel.reverse()
330 333 name = os.path.join(*rel)
331 334 auditor(name)
332 335 return util.pconvert(name)
333 336 dirname, basename = os.path.split(name)
334 337 rel.append(basename)
335 338 if dirname == name:
336 339 break
337 340 name = dirname
338 341
339 342 raise util.Abort('%s not under root' % myname)
340 343
341 344 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
342 345 '''yield every hg repository under path, recursively.'''
343 346 def errhandler(err):
344 347 if err.filename == path:
345 348 raise err
346 349 samestat = getattr(os.path, 'samestat', None)
347 350 if followsym and samestat is not None:
348 351 def adddir(dirlst, dirname):
349 352 match = False
350 353 dirstat = os.stat(dirname)
351 354 for lstdirstat in dirlst:
352 355 if samestat(dirstat, lstdirstat):
353 356 match = True
354 357 break
355 358 if not match:
356 359 dirlst.append(dirstat)
357 360 return not match
358 361 else:
359 362 followsym = False
360 363
361 364 if (seen_dirs is None) and followsym:
362 365 seen_dirs = []
363 366 adddir(seen_dirs, path)
364 367 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
365 368 dirs.sort()
366 369 if '.hg' in dirs:
367 370 yield root # found a repository
368 371 qroot = os.path.join(root, '.hg', 'patches')
369 372 if os.path.isdir(os.path.join(qroot, '.hg')):
370 373 yield qroot # we have a patch queue repo here
371 374 if recurse:
372 375 # avoid recursing inside the .hg directory
373 376 dirs.remove('.hg')
374 377 else:
375 378 dirs[:] = [] # don't descend further
376 379 elif followsym:
377 380 newdirs = []
378 381 for d in dirs:
379 382 fname = os.path.join(root, d)
380 383 if adddir(seen_dirs, fname):
381 384 if os.path.islink(fname):
382 385 for hgname in walkrepos(fname, True, seen_dirs):
383 386 yield hgname
384 387 else:
385 388 newdirs.append(d)
386 389 dirs[:] = newdirs
387 390
388 391 def osrcpath():
389 392 '''return default os-specific hgrc search path'''
390 393 path = systemrcpath()
391 394 path.extend(userrcpath())
392 395 path = [os.path.normpath(f) for f in path]
393 396 return path
394 397
395 398 _rcpath = None
396 399
397 400 def rcpath():
398 401 '''return hgrc search path. if env var HGRCPATH is set, use it.
399 402 for each item in path, if directory, use files ending in .rc,
400 403 else use item.
401 404 make HGRCPATH empty to only look in .hg/hgrc of current repo.
402 405 if no HGRCPATH, use default os-specific path.'''
403 406 global _rcpath
404 407 if _rcpath is None:
405 408 if 'HGRCPATH' in os.environ:
406 409 _rcpath = []
407 410 for p in os.environ['HGRCPATH'].split(os.pathsep):
408 411 if not p:
409 412 continue
410 413 p = util.expandpath(p)
411 414 if os.path.isdir(p):
412 415 for f, kind in osutil.listdir(p):
413 416 if f.endswith('.rc'):
414 417 _rcpath.append(os.path.join(p, f))
415 418 else:
416 419 _rcpath.append(p)
417 420 else:
418 421 _rcpath = osrcpath()
419 422 return _rcpath
420 423
421 424 if os.name != 'nt':
422 425
423 426 def rcfiles(path):
424 427 rcs = [os.path.join(path, 'hgrc')]
425 428 rcdir = os.path.join(path, 'hgrc.d')
426 429 try:
427 430 rcs.extend([os.path.join(rcdir, f)
428 431 for f, kind in osutil.listdir(rcdir)
429 432 if f.endswith(".rc")])
430 433 except OSError:
431 434 pass
432 435 return rcs
433 436
434 437 def systemrcpath():
435 438 path = []
436 439 # old mod_python does not set sys.argv
437 440 if len(getattr(sys, 'argv', [])) > 0:
438 441 p = os.path.dirname(os.path.dirname(sys.argv[0]))
439 442 path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
440 443 path.extend(rcfiles('/etc/mercurial'))
441 444 return path
442 445
443 446 def userrcpath():
444 447 return [os.path.expanduser('~/.hgrc')]
445 448
446 449 else:
447 450
448 451 _HKEY_LOCAL_MACHINE = 0x80000002L
449 452
450 453 def systemrcpath():
451 454 '''return default os-specific hgrc search path'''
452 455 rcpath = []
453 456 filename = util.executablepath()
454 457 # Use mercurial.ini found in directory with hg.exe
455 458 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
456 459 if os.path.isfile(progrc):
457 460 rcpath.append(progrc)
458 461 return rcpath
459 462 # Use hgrc.d found in directory with hg.exe
460 463 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
461 464 if os.path.isdir(progrcd):
462 465 for f, kind in osutil.listdir(progrcd):
463 466 if f.endswith('.rc'):
464 467 rcpath.append(os.path.join(progrcd, f))
465 468 return rcpath
466 469 # else look for a system rcpath in the registry
467 470 value = util.lookupreg('SOFTWARE\\Mercurial', None,
468 471 _HKEY_LOCAL_MACHINE)
469 472 if not isinstance(value, str) or not value:
470 473 return rcpath
471 474 value = util.localpath(value)
472 475 for p in value.split(os.pathsep):
473 476 if p.lower().endswith('mercurial.ini'):
474 477 rcpath.append(p)
475 478 elif os.path.isdir(p):
476 479 for f, kind in osutil.listdir(p):
477 480 if f.endswith('.rc'):
478 481 rcpath.append(os.path.join(p, f))
479 482 return rcpath
480 483
481 484 def userrcpath():
482 485 '''return os-specific hgrc search path to the user dir'''
483 486 home = os.path.expanduser('~')
484 487 path = [os.path.join(home, 'mercurial.ini'),
485 488 os.path.join(home, '.hgrc')]
486 489 userprofile = os.environ.get('USERPROFILE')
487 490 if userprofile:
488 491 path.append(os.path.join(userprofile, 'mercurial.ini'))
489 492 path.append(os.path.join(userprofile, '.hgrc'))
490 493 return path
491 494
492 495 def revsingle(repo, revspec, default='.'):
493 496 if not revspec:
494 497 return repo[default]
495 498
496 499 l = revrange(repo, [revspec])
497 500 if len(l) < 1:
498 501 raise util.Abort(_('empty revision set'))
499 502 return repo[l[-1]]
500 503
501 504 def revpair(repo, revs):
502 505 if not revs:
503 506 return repo.dirstate.p1(), None
504 507
505 508 l = revrange(repo, revs)
506 509
507 510 if len(l) == 0:
508 511 return repo.dirstate.p1(), None
509 512
510 513 if len(l) == 1:
511 514 return repo.lookup(l[0]), None
512 515
513 516 return repo.lookup(l[0]), repo.lookup(l[-1])
514 517
515 518 _revrangesep = ':'
516 519
517 520 def revrange(repo, revs):
518 521 """Yield revision as strings from a list of revision specifications."""
519 522
520 523 def revfix(repo, val, defval):
521 524 if not val and val != 0 and defval is not None:
522 525 return defval
523 526 return repo.changelog.rev(repo.lookup(val))
524 527
525 528 seen, l = set(), []
526 529 for spec in revs:
527 530 # attempt to parse old-style ranges first to deal with
528 531 # things like old-tag which contain query metacharacters
529 532 try:
530 533 if isinstance(spec, int):
531 534 seen.add(spec)
532 535 l.append(spec)
533 536 continue
534 537
535 538 if _revrangesep in spec:
536 539 start, end = spec.split(_revrangesep, 1)
537 540 start = revfix(repo, start, 0)
538 541 end = revfix(repo, end, len(repo) - 1)
539 542 step = start > end and -1 or 1
540 543 for rev in xrange(start, end + step, step):
541 544 if rev in seen:
542 545 continue
543 546 seen.add(rev)
544 547 l.append(rev)
545 548 continue
546 549 elif spec and spec in repo: # single unquoted rev
547 550 rev = revfix(repo, spec, None)
548 551 if rev in seen:
549 552 continue
550 553 seen.add(rev)
551 554 l.append(rev)
552 555 continue
553 556 except error.RepoLookupError:
554 557 pass
555 558
556 559 # fall through to new-style queries if old-style fails
557 560 m = revset.match(repo.ui, spec)
558 561 for r in m(repo, range(len(repo))):
559 562 if r not in seen:
560 563 l.append(r)
561 564 seen.update(l)
562 565
563 566 return l
564 567
565 568 def expandpats(pats):
566 569 if not util.expandglobs:
567 570 return list(pats)
568 571 ret = []
569 572 for p in pats:
570 573 kind, name = matchmod._patsplit(p, None)
571 574 if kind is None:
572 575 try:
573 576 globbed = glob.glob(name)
574 577 except re.error:
575 578 globbed = [name]
576 579 if globbed:
577 580 ret.extend(globbed)
578 581 continue
579 582 ret.append(p)
580 583 return ret
581 584
582 585 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
583 586 if pats == ("",):
584 587 pats = []
585 588 if not globbed and default == 'relpath':
586 589 pats = expandpats(pats or [])
587 590
588 591 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
589 592 default)
590 593 def badfn(f, msg):
591 594 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
592 595 m.bad = badfn
593 596 return m
594 597
595 598 def matchall(repo):
596 599 return matchmod.always(repo.root, repo.getcwd())
597 600
598 601 def matchfiles(repo, files):
599 602 return matchmod.exact(repo.root, repo.getcwd(), files)
600 603
601 604 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
602 605 if dry_run is None:
603 606 dry_run = opts.get('dry_run')
604 607 if similarity is None:
605 608 similarity = float(opts.get('similarity') or 0)
606 609 # we'd use status here, except handling of symlinks and ignore is tricky
607 610 added, unknown, deleted, removed = [], [], [], []
608 611 audit_path = pathauditor(repo.root)
609 612 m = match(repo[None], pats, opts)
610 613 for abs in repo.walk(m):
611 614 target = repo.wjoin(abs)
612 615 good = True
613 616 try:
614 617 audit_path(abs)
615 618 except (OSError, util.Abort):
616 619 good = False
617 620 rel = m.rel(abs)
618 621 exact = m.exact(abs)
619 622 if good and abs not in repo.dirstate:
620 623 unknown.append(abs)
621 624 if repo.ui.verbose or not exact:
622 625 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
623 626 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
624 627 or (os.path.isdir(target) and not os.path.islink(target))):
625 628 deleted.append(abs)
626 629 if repo.ui.verbose or not exact:
627 630 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
628 631 # for finding renames
629 632 elif repo.dirstate[abs] == 'r':
630 633 removed.append(abs)
631 634 elif repo.dirstate[abs] == 'a':
632 635 added.append(abs)
633 636 copies = {}
634 637 if similarity > 0:
635 638 for old, new, score in similar.findrenames(repo,
636 639 added + unknown, removed + deleted, similarity):
637 640 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
638 641 repo.ui.status(_('recording removal of %s as rename to %s '
639 642 '(%d%% similar)\n') %
640 643 (m.rel(old), m.rel(new), score * 100))
641 644 copies[new] = old
642 645
643 646 if not dry_run:
644 647 wctx = repo[None]
645 648 wlock = repo.wlock()
646 649 try:
647 650 wctx.forget(deleted)
648 651 wctx.add(unknown)
649 652 for new, old in copies.iteritems():
650 653 wctx.copy(old, new)
651 654 finally:
652 655 wlock.release()
653 656
654 657 def updatedir(ui, repo, patches, similarity=0):
655 658 '''Update dirstate after patch application according to metadata'''
656 659 if not patches:
657 660 return []
658 661 copies = []
659 662 removes = set()
660 663 cfiles = patches.keys()
661 664 cwd = repo.getcwd()
662 665 if cwd:
663 666 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
664 667 for f in patches:
665 668 gp = patches[f]
666 669 if not gp:
667 670 continue
668 671 if gp.op == 'RENAME':
669 672 copies.append((gp.oldpath, gp.path))
670 673 removes.add(gp.oldpath)
671 674 elif gp.op == 'COPY':
672 675 copies.append((gp.oldpath, gp.path))
673 676 elif gp.op == 'DELETE':
674 677 removes.add(gp.path)
675 678
676 679 wctx = repo[None]
677 680 for src, dst in copies:
678 681 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
679 682 if (not similarity) and removes:
680 683 wctx.remove(sorted(removes), True)
681 684
682 685 for f in patches:
683 686 gp = patches[f]
684 687 if gp and gp.mode:
685 688 islink, isexec = gp.mode
686 689 dst = repo.wjoin(gp.path)
687 690 # patch won't create empty files
688 691 if gp.op == 'ADD' and not os.path.lexists(dst):
689 692 flags = (isexec and 'x' or '') + (islink and 'l' or '')
690 693 repo.wwrite(gp.path, '', flags)
691 694 util.setflags(dst, islink, isexec)
692 695 addremove(repo, cfiles, similarity=similarity)
693 696 files = patches.keys()
694 697 files.extend([r for r in removes if r not in files])
695 698 return sorted(files)
696 699
697 700 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
698 701 """Update the dirstate to reflect the intent of copying src to dst. For
699 702 different reasons it might not end with dst being marked as copied from src.
700 703 """
701 704 origsrc = repo.dirstate.copied(src) or src
702 705 if dst == origsrc: # copying back a copy?
703 706 if repo.dirstate[dst] not in 'mn' and not dryrun:
704 707 repo.dirstate.normallookup(dst)
705 708 else:
706 709 if repo.dirstate[origsrc] == 'a' and origsrc == src:
707 710 if not ui.quiet:
708 711 ui.warn(_("%s has not been committed yet, so no copy "
709 712 "data will be stored for %s.\n")
710 713 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
711 714 if repo.dirstate[dst] in '?r' and not dryrun:
712 715 wctx.add([dst])
713 716 elif not dryrun:
714 717 wctx.copy(origsrc, dst)
715 718
716 719 def readrequires(opener, supported):
717 720 '''Reads and parses .hg/requires and checks if all entries found
718 721 are in the list of supported features.'''
719 722 requirements = set(opener.read("requires").splitlines())
720 723 missings = []
721 724 for r in requirements:
722 725 if r not in supported:
723 726 if not r or not r[0].isalnum():
724 727 raise error.RequirementError(_(".hg/requires file is corrupt"))
725 728 missings.append(r)
726 729 missings.sort()
727 730 if missings:
728 731 raise error.RequirementError(_("unknown repository format: "
729 732 "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
730 733 return requirements
731 734
732 735 class filecacheentry(object):
733 736 def __init__(self, path):
734 737 self.path = path
735 738 self.cachestat = filecacheentry.stat(self.path)
736 739
737 740 if self.cachestat:
738 741 self._cacheable = self.cachestat.cacheable()
739 742 else:
740 743 # None means we don't know yet
741 744 self._cacheable = None
742 745
743 746 def refresh(self):
744 747 if self.cacheable():
745 748 self.cachestat = filecacheentry.stat(self.path)
746 749
747 750 def cacheable(self):
748 751 if self._cacheable is not None:
749 752 return self._cacheable
750 753
751 754 # we don't know yet, assume it is for now
752 755 return True
753 756
754 757 def changed(self):
755 758 # no point in going further if we can't cache it
756 759 if not self.cacheable():
757 760 return True
758 761
759 762 newstat = filecacheentry.stat(self.path)
760 763
761 764 # we may not know if it's cacheable yet, check again now
762 765 if newstat and self._cacheable is None:
763 766 self._cacheable = newstat.cacheable()
764 767
765 768 # check again
766 769 if not self._cacheable:
767 770 return True
768 771
769 772 if self.cachestat != newstat:
770 773 self.cachestat = newstat
771 774 return True
772 775 else:
773 776 return False
774 777
775 778 @staticmethod
776 779 def stat(path):
777 780 try:
778 781 return util.cachestat(path)
779 782 except OSError, e:
780 783 if e.errno != errno.ENOENT:
781 784 raise
782 785
783 786 class filecache(object):
784 787 '''A property like decorator that tracks a file under .hg/ for updates.
785 788
786 789 Records stat info when called in _filecache.
787 790
788 791 On subsequent calls, compares old stat info with new info, and recreates
789 792 the object when needed, updating the new stat info in _filecache.
790 793
791 794 Mercurial either atomic renames or appends for files under .hg,
792 795 so to ensure the cache is reliable we need the filesystem to be able
793 796 to tell us if a file has been replaced. If it can't, we fallback to
794 797 recreating the object on every call (essentially the same behaviour as
795 798 propertycache).'''
796 def __init__(self, path, instore=False):
799 def __init__(self, path):
797 800 self.path = path
798 self.instore = instore
801
802 def join(self, obj, fname):
803 """Used to compute the runtime path of the cached file.
804
805 Users should subclass filecache and provide their own version of this
806 function to call the appropriate join function on 'obj' (an instance
807 of the class that its member function was decorated).
808 """
809 return obj.join(fname)
799 810
800 811 def __call__(self, func):
801 812 self.func = func
802 813 self.name = func.__name__
803 814 return self
804 815
805 816 def __get__(self, obj, type=None):
806 817 # do we need to check if the file changed?
807 818 if self.name in obj.__dict__:
808 819 return obj.__dict__[self.name]
809 820
810 821 entry = obj._filecache.get(self.name)
811 822
812 823 if entry:
813 824 if entry.changed():
814 825 entry.obj = self.func(obj)
815 826 else:
816 path = self.instore and obj.sjoin(self.path) or obj.join(self.path)
827 path = self.join(obj, self.path)
817 828
818 829 # We stat -before- creating the object so our cache doesn't lie if
819 830 # a writer modified between the time we read and stat
820 831 entry = filecacheentry(path)
821 832 entry.obj = self.func(obj)
822 833
823 834 obj._filecache[self.name] = entry
824 835
825 836 obj.__dict__[self.name] = entry.obj
826 837 return entry.obj
827 838
828 839 def __set__(self, obj, value):
829 840 if self.name in obj._filecache:
830 841 obj._filecache[self.name].obj = value # update cached copy
831 842 obj.__dict__[self.name] = value # update copy returned by obj.x
832 843
833 844 def __delete__(self, obj):
834 845 try:
835 846 del obj.__dict__[self.name]
836 847 except KeyError:
837 848 raise AttributeError, self.name
@@ -1,1149 +1,1149 b''
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import errno, os, re, xml.dom.minidom, shutil, posixpath
9 9 import stat, subprocess, tarfile
10 10 from i18n import _
11 11 import config, scmutil, util, node, error, cmdutil, bookmarks
12 12 hg = None
13 13 propertycache = util.propertycache
14 14
15 15 nullstate = ('', '', 'empty')
16 16
17 17 def state(ctx, ui):
18 18 """return a state dict, mapping subrepo paths configured in .hgsub
19 19 to tuple: (source from .hgsub, revision from .hgsubstate, kind
20 20 (key in types dict))
21 21 """
22 22 p = config.config()
23 23 def read(f, sections=None, remap=None):
24 24 if f in ctx:
25 25 try:
26 26 data = ctx[f].data()
27 27 except IOError, err:
28 28 if err.errno != errno.ENOENT:
29 29 raise
30 30 # handle missing subrepo spec files as removed
31 31 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
32 32 return
33 33 p.parse(f, data, sections, remap, read)
34 34 else:
35 35 raise util.Abort(_("subrepo spec file %s not found") % f)
36 36
37 37 if '.hgsub' in ctx:
38 38 read('.hgsub')
39 39
40 40 for path, src in ui.configitems('subpaths'):
41 41 p.set('subpaths', path, src, ui.configsource('subpaths', path))
42 42
43 43 rev = {}
44 44 if '.hgsubstate' in ctx:
45 45 try:
46 46 for l in ctx['.hgsubstate'].data().splitlines():
47 47 revision, path = l.split(" ", 1)
48 48 rev[path] = revision
49 49 except IOError, err:
50 50 if err.errno != errno.ENOENT:
51 51 raise
52 52
53 53 def remap(src):
54 54 for pattern, repl in p.items('subpaths'):
55 55 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
56 56 # does a string decode.
57 57 repl = repl.encode('string-escape')
58 58 # However, we still want to allow back references to go
59 59 # through unharmed, so we turn r'\\1' into r'\1'. Again,
60 60 # extra escapes are needed because re.sub string decodes.
61 61 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
62 62 try:
63 63 src = re.sub(pattern, repl, src, 1)
64 64 except re.error, e:
65 65 raise util.Abort(_("bad subrepository pattern in %s: %s")
66 66 % (p.source('subpaths', pattern), e))
67 67 return src
68 68
69 69 state = {}
70 70 for path, src in p[''].items():
71 71 kind = 'hg'
72 72 if src.startswith('['):
73 73 if ']' not in src:
74 74 raise util.Abort(_('missing ] in subrepo source'))
75 75 kind, src = src.split(']', 1)
76 76 kind = kind[1:]
77 77 src = src.lstrip() # strip any extra whitespace after ']'
78 78
79 79 if not util.url(src).isabs():
80 80 parent = _abssource(ctx._repo, abort=False)
81 81 if parent:
82 82 parent = util.url(parent)
83 83 parent.path = posixpath.join(parent.path or '', src)
84 84 parent.path = posixpath.normpath(parent.path)
85 85 joined = str(parent)
86 86 # Remap the full joined path and use it if it changes,
87 87 # else remap the original source.
88 88 remapped = remap(joined)
89 89 if remapped == joined:
90 90 src = remap(src)
91 91 else:
92 92 src = remapped
93 93
94 94 src = remap(src)
95 95 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
96 96
97 97 return state
98 98
99 99 def writestate(repo, state):
100 100 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
101 101 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
102 102 repo.wwrite('.hgsubstate', ''.join(lines), '')
103 103
104 104 def submerge(repo, wctx, mctx, actx, overwrite):
105 105 """delegated from merge.applyupdates: merging of .hgsubstate file
106 106 in working context, merging context and ancestor context"""
107 107 if mctx == actx: # backwards?
108 108 actx = wctx.p1()
109 109 s1 = wctx.substate
110 110 s2 = mctx.substate
111 111 sa = actx.substate
112 112 sm = {}
113 113
114 114 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
115 115
116 116 def debug(s, msg, r=""):
117 117 if r:
118 118 r = "%s:%s:%s" % r
119 119 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
120 120
121 121 for s, l in s1.items():
122 122 a = sa.get(s, nullstate)
123 123 ld = l # local state with possible dirty flag for compares
124 124 if wctx.sub(s).dirty():
125 125 ld = (l[0], l[1] + "+")
126 126 if wctx == actx: # overwrite
127 127 a = ld
128 128
129 129 if s in s2:
130 130 r = s2[s]
131 131 if ld == r or r == a: # no change or local is newer
132 132 sm[s] = l
133 133 continue
134 134 elif ld == a: # other side changed
135 135 debug(s, "other changed, get", r)
136 136 wctx.sub(s).get(r, overwrite)
137 137 sm[s] = r
138 138 elif ld[0] != r[0]: # sources differ
139 139 if repo.ui.promptchoice(
140 140 _(' subrepository sources for %s differ\n'
141 141 'use (l)ocal source (%s) or (r)emote source (%s)?')
142 142 % (s, l[0], r[0]),
143 143 (_('&Local'), _('&Remote')), 0):
144 144 debug(s, "prompt changed, get", r)
145 145 wctx.sub(s).get(r, overwrite)
146 146 sm[s] = r
147 147 elif ld[1] == a[1]: # local side is unchanged
148 148 debug(s, "other side changed, get", r)
149 149 wctx.sub(s).get(r, overwrite)
150 150 sm[s] = r
151 151 else:
152 152 debug(s, "both sides changed, merge with", r)
153 153 wctx.sub(s).merge(r)
154 154 sm[s] = l
155 155 elif ld == a: # remote removed, local unchanged
156 156 debug(s, "remote removed, remove")
157 157 wctx.sub(s).remove()
158 158 elif a == nullstate: # not present in remote or ancestor
159 159 debug(s, "local added, keep")
160 160 sm[s] = l
161 161 continue
162 162 else:
163 163 if repo.ui.promptchoice(
164 164 _(' local changed subrepository %s which remote removed\n'
165 165 'use (c)hanged version or (d)elete?') % s,
166 166 (_('&Changed'), _('&Delete')), 0):
167 167 debug(s, "prompt remove")
168 168 wctx.sub(s).remove()
169 169
170 170 for s, r in sorted(s2.items()):
171 171 if s in s1:
172 172 continue
173 173 elif s not in sa:
174 174 debug(s, "remote added, get", r)
175 175 mctx.sub(s).get(r)
176 176 sm[s] = r
177 177 elif r != sa[s]:
178 178 if repo.ui.promptchoice(
179 179 _(' remote changed subrepository %s which local removed\n'
180 180 'use (c)hanged version or (d)elete?') % s,
181 181 (_('&Changed'), _('&Delete')), 0) == 0:
182 182 debug(s, "prompt recreate", r)
183 183 wctx.sub(s).get(r)
184 184 sm[s] = r
185 185
186 186 # record merged .hgsubstate
187 187 writestate(repo, sm)
188 188
189 189 def _updateprompt(ui, sub, dirty, local, remote):
190 190 if dirty:
191 191 msg = (_(' subrepository sources for %s differ\n'
192 192 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
193 193 % (subrelpath(sub), local, remote))
194 194 else:
195 195 msg = (_(' subrepository sources for %s differ (in checked out version)\n'
196 196 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
197 197 % (subrelpath(sub), local, remote))
198 198 return ui.promptchoice(msg, (_('&Local'), _('&Remote')), 0)
199 199
200 200 def reporelpath(repo):
201 201 """return path to this (sub)repo as seen from outermost repo"""
202 202 parent = repo
203 203 while util.safehasattr(parent, '_subparent'):
204 204 parent = parent._subparent
205 205 p = parent.root.rstrip(os.sep)
206 206 return repo.root[len(p) + 1:]
207 207
208 208 def subrelpath(sub):
209 209 """return path to this subrepo as seen from outermost repo"""
210 210 if util.safehasattr(sub, '_relpath'):
211 211 return sub._relpath
212 212 if not util.safehasattr(sub, '_repo'):
213 213 return sub._path
214 214 return reporelpath(sub._repo)
215 215
216 216 def _abssource(repo, push=False, abort=True):
217 217 """return pull/push path of repo - either based on parent repo .hgsub info
218 218 or on the top repo config. Abort or return None if no source found."""
219 219 if util.safehasattr(repo, '_subparent'):
220 220 source = util.url(repo._subsource)
221 221 if source.isabs():
222 222 return str(source)
223 223 source.path = posixpath.normpath(source.path)
224 224 parent = _abssource(repo._subparent, push, abort=False)
225 225 if parent:
226 226 parent = util.url(util.pconvert(parent))
227 227 parent.path = posixpath.join(parent.path or '', source.path)
228 228 parent.path = posixpath.normpath(parent.path)
229 229 return str(parent)
230 230 else: # recursion reached top repo
231 231 if util.safehasattr(repo, '_subtoppath'):
232 232 return repo._subtoppath
233 233 if push and repo.ui.config('paths', 'default-push'):
234 234 return repo.ui.config('paths', 'default-push')
235 235 if repo.ui.config('paths', 'default'):
236 236 return repo.ui.config('paths', 'default')
237 237 if abort:
238 238 raise util.Abort(_("default path for subrepository %s not found") %
239 239 reporelpath(repo))
240 240
241 241 def itersubrepos(ctx1, ctx2):
242 242 """find subrepos in ctx1 or ctx2"""
243 243 # Create a (subpath, ctx) mapping where we prefer subpaths from
244 244 # ctx1. The subpaths from ctx2 are important when the .hgsub file
245 245 # has been modified (in ctx2) but not yet committed (in ctx1).
246 246 subpaths = dict.fromkeys(ctx2.substate, ctx2)
247 247 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
248 248 for subpath, ctx in sorted(subpaths.iteritems()):
249 249 yield subpath, ctx.sub(subpath)
250 250
251 251 def subrepo(ctx, path):
252 252 """return instance of the right subrepo class for subrepo in path"""
253 253 # subrepo inherently violates our import layering rules
254 254 # because it wants to make repo objects from deep inside the stack
255 255 # so we manually delay the circular imports to not break
256 256 # scripts that don't use our demand-loading
257 257 global hg
258 258 import hg as h
259 259 hg = h
260 260
261 261 scmutil.pathauditor(ctx._repo.root)(path)
262 262 state = ctx.substate.get(path, nullstate)
263 263 if state[2] not in types:
264 264 raise util.Abort(_('unknown subrepo type %s') % state[2])
265 265 return types[state[2]](ctx, path, state[:2])
266 266
267 267 # subrepo classes need to implement the following abstract class:
268 268
269 269 class abstractsubrepo(object):
270 270
271 271 def dirty(self, ignoreupdate=False):
272 272 """returns true if the dirstate of the subrepo is dirty or does not
273 273 match current stored state. If ignoreupdate is true, only check
274 274 whether the subrepo has uncommitted changes in its dirstate.
275 275 """
276 276 raise NotImplementedError
277 277
278 278 def checknested(self, path):
279 279 """check if path is a subrepository within this repository"""
280 280 return False
281 281
282 282 def commit(self, text, user, date):
283 283 """commit the current changes to the subrepo with the given
284 284 log message. Use given user and date if possible. Return the
285 285 new state of the subrepo.
286 286 """
287 287 raise NotImplementedError
288 288
289 289 def remove(self):
290 290 """remove the subrepo
291 291
292 292 (should verify the dirstate is not dirty first)
293 293 """
294 294 raise NotImplementedError
295 295
296 296 def get(self, state, overwrite=False):
297 297 """run whatever commands are needed to put the subrepo into
298 298 this state
299 299 """
300 300 raise NotImplementedError
301 301
302 302 def merge(self, state):
303 303 """merge currently-saved state with the new state."""
304 304 raise NotImplementedError
305 305
306 306 def push(self, opts):
307 307 """perform whatever action is analogous to 'hg push'
308 308
309 309 This may be a no-op on some systems.
310 310 """
311 311 raise NotImplementedError
312 312
313 313 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
314 314 return []
315 315
316 316 def status(self, rev2, **opts):
317 317 return [], [], [], [], [], [], []
318 318
319 319 def diff(self, diffopts, node2, match, prefix, **opts):
320 320 pass
321 321
322 322 def outgoing(self, ui, dest, opts):
323 323 return 1
324 324
325 325 def incoming(self, ui, source, opts):
326 326 return 1
327 327
328 328 def files(self):
329 329 """return filename iterator"""
330 330 raise NotImplementedError
331 331
332 332 def filedata(self, name):
333 333 """return file data"""
334 334 raise NotImplementedError
335 335
336 336 def fileflags(self, name):
337 337 """return file flags"""
338 338 return ''
339 339
340 340 def archive(self, ui, archiver, prefix):
341 341 files = self.files()
342 342 total = len(files)
343 343 relpath = subrelpath(self)
344 344 ui.progress(_('archiving (%s)') % relpath, 0,
345 345 unit=_('files'), total=total)
346 346 for i, name in enumerate(files):
347 347 flags = self.fileflags(name)
348 348 mode = 'x' in flags and 0755 or 0644
349 349 symlink = 'l' in flags
350 350 archiver.addfile(os.path.join(prefix, self._path, name),
351 351 mode, symlink, self.filedata(name))
352 352 ui.progress(_('archiving (%s)') % relpath, i + 1,
353 353 unit=_('files'), total=total)
354 354 ui.progress(_('archiving (%s)') % relpath, None)
355 355
356 356 def walk(self, match):
357 357 '''
358 358 walk recursively through the directory tree, finding all files
359 359 matched by the match function
360 360 '''
361 361 pass
362 362
363 363 def forget(self, ui, match, prefix):
364 364 return []
365 365
366 366 class hgsubrepo(abstractsubrepo):
367 367 def __init__(self, ctx, path, state):
368 368 self._path = path
369 369 self._state = state
370 370 r = ctx._repo
371 371 root = r.wjoin(path)
372 372 create = False
373 373 if not os.path.exists(os.path.join(root, '.hg')):
374 374 create = True
375 375 util.makedirs(root)
376 376 self._repo = hg.repository(r.ui, root, create=create)
377 377 self._initrepo(r, state[0], create)
378 378
379 379 def _initrepo(self, parentrepo, source, create):
380 380 self._repo._subparent = parentrepo
381 381 self._repo._subsource = source
382 382
383 383 if create:
384 384 fp = self._repo.opener("hgrc", "w", text=True)
385 385 fp.write('[paths]\n')
386 386
387 387 def addpathconfig(key, value):
388 388 if value:
389 389 fp.write('%s = %s\n' % (key, value))
390 390 self._repo.ui.setconfig('paths', key, value)
391 391
392 392 defpath = _abssource(self._repo, abort=False)
393 393 defpushpath = _abssource(self._repo, True, abort=False)
394 394 addpathconfig('default', defpath)
395 395 if defpath != defpushpath:
396 396 addpathconfig('default-push', defpushpath)
397 397 fp.close()
398 398
399 399 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
400 400 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
401 401 os.path.join(prefix, self._path), explicitonly)
402 402
403 403 def status(self, rev2, **opts):
404 404 try:
405 405 rev1 = self._state[1]
406 406 ctx1 = self._repo[rev1]
407 407 ctx2 = self._repo[rev2]
408 408 return self._repo.status(ctx1, ctx2, **opts)
409 409 except error.RepoLookupError, inst:
410 410 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
411 411 % (inst, subrelpath(self)))
412 412 return [], [], [], [], [], [], []
413 413
414 414 def diff(self, diffopts, node2, match, prefix, **opts):
415 415 try:
416 416 node1 = node.bin(self._state[1])
417 417 # We currently expect node2 to come from substate and be
418 418 # in hex format
419 419 if node2 is not None:
420 420 node2 = node.bin(node2)
421 421 cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
422 422 node1, node2, match,
423 423 prefix=os.path.join(prefix, self._path),
424 424 listsubrepos=True, **opts)
425 425 except error.RepoLookupError, inst:
426 426 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
427 427 % (inst, subrelpath(self)))
428 428
429 429 def archive(self, ui, archiver, prefix):
430 430 self._get(self._state + ('hg',))
431 431 abstractsubrepo.archive(self, ui, archiver, prefix)
432 432
433 433 rev = self._state[1]
434 434 ctx = self._repo[rev]
435 435 for subpath in ctx.substate:
436 436 s = subrepo(ctx, subpath)
437 437 s.archive(ui, archiver, os.path.join(prefix, self._path))
438 438
439 439 def dirty(self, ignoreupdate=False):
440 440 r = self._state[1]
441 441 if r == '' and not ignoreupdate: # no state recorded
442 442 return True
443 443 w = self._repo[None]
444 444 if r != w.p1().hex() and not ignoreupdate:
445 445 # different version checked out
446 446 return True
447 447 return w.dirty() # working directory changed
448 448
449 449 def checknested(self, path):
450 450 return self._repo._checknested(self._repo.wjoin(path))
451 451
452 452 def commit(self, text, user, date):
453 453 # don't bother committing in the subrepo if it's only been
454 454 # updated
455 455 if not self.dirty(True):
456 456 return self._repo['.'].hex()
457 457 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
458 458 n = self._repo.commit(text, user, date)
459 459 if not n:
460 460 return self._repo['.'].hex() # different version checked out
461 461 return node.hex(n)
462 462
463 463 def remove(self):
464 464 # we can't fully delete the repository as it may contain
465 465 # local-only history
466 466 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
467 467 hg.clean(self._repo, node.nullid, False)
468 468
469 469 def _get(self, state):
470 470 source, revision, kind = state
471 471 if revision not in self._repo:
472 472 self._repo._subsource = source
473 473 srcurl = _abssource(self._repo)
474 474 other = hg.peer(self._repo.ui, {}, srcurl)
475 475 if len(self._repo) == 0:
476 476 self._repo.ui.status(_('cloning subrepo %s from %s\n')
477 477 % (subrelpath(self), srcurl))
478 478 parentrepo = self._repo._subparent
479 479 shutil.rmtree(self._repo.path)
480 480 other, self._repo = hg.clone(self._repo._subparent.ui, {}, other,
481 481 self._repo.root, update=False)
482 482 self._initrepo(parentrepo, source, create=True)
483 483 else:
484 484 self._repo.ui.status(_('pulling subrepo %s from %s\n')
485 485 % (subrelpath(self), srcurl))
486 486 self._repo.pull(other)
487 487 bookmarks.updatefromremote(self._repo.ui, self._repo, other,
488 488 srcurl)
489 489
490 490 def get(self, state, overwrite=False):
491 491 self._get(state)
492 492 source, revision, kind = state
493 493 self._repo.ui.debug("getting subrepo %s\n" % self._path)
494 494 hg.clean(self._repo, revision, False)
495 495
496 496 def merge(self, state):
497 497 self._get(state)
498 498 cur = self._repo['.']
499 499 dst = self._repo[state[1]]
500 500 anc = dst.ancestor(cur)
501 501
502 502 def mergefunc():
503 if anc == cur:
503 if anc == cur and dst.branch() == cur.branch():
504 504 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
505 505 hg.update(self._repo, state[1])
506 506 elif anc == dst:
507 507 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
508 508 else:
509 509 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
510 510 hg.merge(self._repo, state[1], remind=False)
511 511
512 512 wctx = self._repo[None]
513 513 if self.dirty():
514 514 if anc != dst:
515 515 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
516 516 mergefunc()
517 517 else:
518 518 mergefunc()
519 519 else:
520 520 mergefunc()
521 521
522 522 def push(self, opts):
523 523 force = opts.get('force')
524 524 newbranch = opts.get('new_branch')
525 525 ssh = opts.get('ssh')
526 526
527 527 # push subrepos depth-first for coherent ordering
528 528 c = self._repo['']
529 529 subs = c.substate # only repos that are committed
530 530 for s in sorted(subs):
531 531 if c.sub(s).push(opts) == 0:
532 532 return False
533 533
534 534 dsturl = _abssource(self._repo, True)
535 535 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
536 536 (subrelpath(self), dsturl))
537 537 other = hg.peer(self._repo.ui, {'ssh': ssh}, dsturl)
538 538 return self._repo.push(other, force, newbranch=newbranch)
539 539
540 540 def outgoing(self, ui, dest, opts):
541 541 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
542 542
543 543 def incoming(self, ui, source, opts):
544 544 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
545 545
546 546 def files(self):
547 547 rev = self._state[1]
548 548 ctx = self._repo[rev]
549 549 return ctx.manifest()
550 550
551 551 def filedata(self, name):
552 552 rev = self._state[1]
553 553 return self._repo[rev][name].data()
554 554
555 555 def fileflags(self, name):
556 556 rev = self._state[1]
557 557 ctx = self._repo[rev]
558 558 return ctx.flags(name)
559 559
560 560 def walk(self, match):
561 561 ctx = self._repo[None]
562 562 return ctx.walk(match)
563 563
564 564 def forget(self, ui, match, prefix):
565 565 return cmdutil.forget(ui, self._repo, match,
566 566 os.path.join(prefix, self._path), True)
567 567
568 568 class svnsubrepo(abstractsubrepo):
569 569 def __init__(self, ctx, path, state):
570 570 self._path = path
571 571 self._state = state
572 572 self._ctx = ctx
573 573 self._ui = ctx._repo.ui
574 574 self._exe = util.findexe('svn')
575 575 if not self._exe:
576 576 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
577 577 % self._path)
578 578
579 579 def _svncommand(self, commands, filename='', failok=False):
580 580 cmd = [self._exe]
581 581 extrakw = {}
582 582 if not self._ui.interactive():
583 583 # Making stdin be a pipe should prevent svn from behaving
584 584 # interactively even if we can't pass --non-interactive.
585 585 extrakw['stdin'] = subprocess.PIPE
586 586 # Starting in svn 1.5 --non-interactive is a global flag
587 587 # instead of being per-command, but we need to support 1.4 so
588 588 # we have to be intelligent about what commands take
589 589 # --non-interactive.
590 590 if commands[0] in ('update', 'checkout', 'commit'):
591 591 cmd.append('--non-interactive')
592 592 cmd.extend(commands)
593 593 if filename is not None:
594 594 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
595 595 cmd.append(path)
596 596 env = dict(os.environ)
597 597 # Avoid localized output, preserve current locale for everything else.
598 598 env['LC_MESSAGES'] = 'C'
599 599 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
600 600 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
601 601 universal_newlines=True, env=env, **extrakw)
602 602 stdout, stderr = p.communicate()
603 603 stderr = stderr.strip()
604 604 if not failok:
605 605 if p.returncode:
606 606 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
607 607 if stderr:
608 608 self._ui.warn(stderr + '\n')
609 609 return stdout, stderr
610 610
611 611 @propertycache
612 612 def _svnversion(self):
613 613 output, err = self._svncommand(['--version'], filename=None)
614 614 m = re.search(r'^svn,\s+version\s+(\d+)\.(\d+)', output)
615 615 if not m:
616 616 raise util.Abort(_('cannot retrieve svn tool version'))
617 617 return (int(m.group(1)), int(m.group(2)))
618 618
619 619 def _wcrevs(self):
620 620 # Get the working directory revision as well as the last
621 621 # commit revision so we can compare the subrepo state with
622 622 # both. We used to store the working directory one.
623 623 output, err = self._svncommand(['info', '--xml'])
624 624 doc = xml.dom.minidom.parseString(output)
625 625 entries = doc.getElementsByTagName('entry')
626 626 lastrev, rev = '0', '0'
627 627 if entries:
628 628 rev = str(entries[0].getAttribute('revision')) or '0'
629 629 commits = entries[0].getElementsByTagName('commit')
630 630 if commits:
631 631 lastrev = str(commits[0].getAttribute('revision')) or '0'
632 632 return (lastrev, rev)
633 633
634 634 def _wcrev(self):
635 635 return self._wcrevs()[0]
636 636
637 637 def _wcchanged(self):
638 638 """Return (changes, extchanges) where changes is True
639 639 if the working directory was changed, and extchanges is
640 640 True if any of these changes concern an external entry.
641 641 """
642 642 output, err = self._svncommand(['status', '--xml'])
643 643 externals, changes = [], []
644 644 doc = xml.dom.minidom.parseString(output)
645 645 for e in doc.getElementsByTagName('entry'):
646 646 s = e.getElementsByTagName('wc-status')
647 647 if not s:
648 648 continue
649 649 item = s[0].getAttribute('item')
650 650 props = s[0].getAttribute('props')
651 651 path = e.getAttribute('path')
652 652 if item == 'external':
653 653 externals.append(path)
654 654 if (item not in ('', 'normal', 'unversioned', 'external')
655 655 or props not in ('', 'none', 'normal')):
656 656 changes.append(path)
657 657 for path in changes:
658 658 for ext in externals:
659 659 if path == ext or path.startswith(ext + os.sep):
660 660 return True, True
661 661 return bool(changes), False
662 662
663 663 def dirty(self, ignoreupdate=False):
664 664 if not self._wcchanged()[0]:
665 665 if self._state[1] in self._wcrevs() or ignoreupdate:
666 666 return False
667 667 return True
668 668
669 669 def commit(self, text, user, date):
670 670 # user and date are out of our hands since svn is centralized
671 671 changed, extchanged = self._wcchanged()
672 672 if not changed:
673 673 return self._wcrev()
674 674 if extchanged:
675 675 # Do not try to commit externals
676 676 raise util.Abort(_('cannot commit svn externals'))
677 677 commitinfo, err = self._svncommand(['commit', '-m', text])
678 678 self._ui.status(commitinfo)
679 679 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
680 680 if not newrev:
681 681 raise util.Abort(commitinfo.splitlines()[-1])
682 682 newrev = newrev.groups()[0]
683 683 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
684 684 return newrev
685 685
686 686 def remove(self):
687 687 if self.dirty():
688 688 self._ui.warn(_('not removing repo %s because '
689 689 'it has changes.\n' % self._path))
690 690 return
691 691 self._ui.note(_('removing subrepo %s\n') % self._path)
692 692
693 693 def onerror(function, path, excinfo):
694 694 if function is not os.remove:
695 695 raise
696 696 # read-only files cannot be unlinked under Windows
697 697 s = os.stat(path)
698 698 if (s.st_mode & stat.S_IWRITE) != 0:
699 699 raise
700 700 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
701 701 os.remove(path)
702 702
703 703 path = self._ctx._repo.wjoin(self._path)
704 704 shutil.rmtree(path, onerror=onerror)
705 705 try:
706 706 os.removedirs(os.path.dirname(path))
707 707 except OSError:
708 708 pass
709 709
710 710 def get(self, state, overwrite=False):
711 711 if overwrite:
712 712 self._svncommand(['revert', '--recursive'])
713 713 args = ['checkout']
714 714 if self._svnversion >= (1, 5):
715 715 args.append('--force')
716 716 # The revision must be specified at the end of the URL to properly
717 717 # update to a directory which has since been deleted and recreated.
718 718 args.append('%s@%s' % (state[0], state[1]))
719 719 status, err = self._svncommand(args, failok=True)
720 720 if not re.search('Checked out revision [0-9]+.', status):
721 721 if ('is already a working copy for a different URL' in err
722 722 and (self._wcchanged() == (False, False))):
723 723 # obstructed but clean working copy, so just blow it away.
724 724 self.remove()
725 725 self.get(state, overwrite=False)
726 726 return
727 727 raise util.Abort((status or err).splitlines()[-1])
728 728 self._ui.status(status)
729 729
730 730 def merge(self, state):
731 731 old = self._state[1]
732 732 new = state[1]
733 733 if new != self._wcrev():
734 734 dirty = old == self._wcrev() or self._wcchanged()[0]
735 735 if _updateprompt(self._ui, self, dirty, self._wcrev(), new):
736 736 self.get(state, False)
737 737
738 738 def push(self, opts):
739 739 # push is a no-op for SVN
740 740 return True
741 741
742 742 def files(self):
743 743 output = self._svncommand(['list'])
744 744 # This works because svn forbids \n in filenames.
745 745 return output.splitlines()
746 746
747 747 def filedata(self, name):
748 748 return self._svncommand(['cat'], name)
749 749
750 750
751 751 class gitsubrepo(abstractsubrepo):
752 752 def __init__(self, ctx, path, state):
753 753 # TODO add git version check.
754 754 self._state = state
755 755 self._ctx = ctx
756 756 self._path = path
757 757 self._relpath = os.path.join(reporelpath(ctx._repo), path)
758 758 self._abspath = ctx._repo.wjoin(path)
759 759 self._subparent = ctx._repo
760 760 self._ui = ctx._repo.ui
761 761
762 762 def _gitcommand(self, commands, env=None, stream=False):
763 763 return self._gitdir(commands, env=env, stream=stream)[0]
764 764
765 765 def _gitdir(self, commands, env=None, stream=False):
766 766 return self._gitnodir(commands, env=env, stream=stream,
767 767 cwd=self._abspath)
768 768
769 769 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
770 770 """Calls the git command
771 771
772 772 The methods tries to call the git command. versions previor to 1.6.0
773 773 are not supported and very probably fail.
774 774 """
775 775 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
776 776 # unless ui.quiet is set, print git's stderr,
777 777 # which is mostly progress and useful info
778 778 errpipe = None
779 779 if self._ui.quiet:
780 780 errpipe = open(os.devnull, 'w')
781 781 p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
782 782 close_fds=util.closefds,
783 783 stdout=subprocess.PIPE, stderr=errpipe)
784 784 if stream:
785 785 return p.stdout, None
786 786
787 787 retdata = p.stdout.read().strip()
788 788 # wait for the child to exit to avoid race condition.
789 789 p.wait()
790 790
791 791 if p.returncode != 0 and p.returncode != 1:
792 792 # there are certain error codes that are ok
793 793 command = commands[0]
794 794 if command in ('cat-file', 'symbolic-ref'):
795 795 return retdata, p.returncode
796 796 # for all others, abort
797 797 raise util.Abort('git %s error %d in %s' %
798 798 (command, p.returncode, self._relpath))
799 799
800 800 return retdata, p.returncode
801 801
802 802 def _gitmissing(self):
803 803 return not os.path.exists(os.path.join(self._abspath, '.git'))
804 804
805 805 def _gitstate(self):
806 806 return self._gitcommand(['rev-parse', 'HEAD'])
807 807
808 808 def _gitcurrentbranch(self):
809 809 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
810 810 if err:
811 811 current = None
812 812 return current
813 813
814 814 def _gitremote(self, remote):
815 815 out = self._gitcommand(['remote', 'show', '-n', remote])
816 816 line = out.split('\n')[1]
817 817 i = line.index('URL: ') + len('URL: ')
818 818 return line[i:]
819 819
820 820 def _githavelocally(self, revision):
821 821 out, code = self._gitdir(['cat-file', '-e', revision])
822 822 return code == 0
823 823
824 824 def _gitisancestor(self, r1, r2):
825 825 base = self._gitcommand(['merge-base', r1, r2])
826 826 return base == r1
827 827
828 828 def _gitisbare(self):
829 829 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
830 830
831 831 def _gitupdatestat(self):
832 832 """This must be run before git diff-index.
833 833 diff-index only looks at changes to file stat;
834 834 this command looks at file contents and updates the stat."""
835 835 self._gitcommand(['update-index', '-q', '--refresh'])
836 836
837 837 def _gitbranchmap(self):
838 838 '''returns 2 things:
839 839 a map from git branch to revision
840 840 a map from revision to branches'''
841 841 branch2rev = {}
842 842 rev2branch = {}
843 843
844 844 out = self._gitcommand(['for-each-ref', '--format',
845 845 '%(objectname) %(refname)'])
846 846 for line in out.split('\n'):
847 847 revision, ref = line.split(' ')
848 848 if (not ref.startswith('refs/heads/') and
849 849 not ref.startswith('refs/remotes/')):
850 850 continue
851 851 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
852 852 continue # ignore remote/HEAD redirects
853 853 branch2rev[ref] = revision
854 854 rev2branch.setdefault(revision, []).append(ref)
855 855 return branch2rev, rev2branch
856 856
857 857 def _gittracking(self, branches):
858 858 'return map of remote branch to local tracking branch'
859 859 # assumes no more than one local tracking branch for each remote
860 860 tracking = {}
861 861 for b in branches:
862 862 if b.startswith('refs/remotes/'):
863 863 continue
864 864 bname = b.split('/', 2)[2]
865 865 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
866 866 if remote:
867 867 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
868 868 tracking['refs/remotes/%s/%s' %
869 869 (remote, ref.split('/', 2)[2])] = b
870 870 return tracking
871 871
872 872 def _abssource(self, source):
873 873 if '://' not in source:
874 874 # recognize the scp syntax as an absolute source
875 875 colon = source.find(':')
876 876 if colon != -1 and '/' not in source[:colon]:
877 877 return source
878 878 self._subsource = source
879 879 return _abssource(self)
880 880
881 881 def _fetch(self, source, revision):
882 882 if self._gitmissing():
883 883 source = self._abssource(source)
884 884 self._ui.status(_('cloning subrepo %s from %s\n') %
885 885 (self._relpath, source))
886 886 self._gitnodir(['clone', source, self._abspath])
887 887 if self._githavelocally(revision):
888 888 return
889 889 self._ui.status(_('pulling subrepo %s from %s\n') %
890 890 (self._relpath, self._gitremote('origin')))
891 891 # try only origin: the originally cloned repo
892 892 self._gitcommand(['fetch'])
893 893 if not self._githavelocally(revision):
894 894 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
895 895 (revision, self._relpath))
896 896
897 897 def dirty(self, ignoreupdate=False):
898 898 if self._gitmissing():
899 899 return self._state[1] != ''
900 900 if self._gitisbare():
901 901 return True
902 902 if not ignoreupdate and self._state[1] != self._gitstate():
903 903 # different version checked out
904 904 return True
905 905 # check for staged changes or modified files; ignore untracked files
906 906 self._gitupdatestat()
907 907 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
908 908 return code == 1
909 909
910 910 def get(self, state, overwrite=False):
911 911 source, revision, kind = state
912 912 if not revision:
913 913 self.remove()
914 914 return
915 915 self._fetch(source, revision)
916 916 # if the repo was set to be bare, unbare it
917 917 if self._gitisbare():
918 918 self._gitcommand(['config', 'core.bare', 'false'])
919 919 if self._gitstate() == revision:
920 920 self._gitcommand(['reset', '--hard', 'HEAD'])
921 921 return
922 922 elif self._gitstate() == revision:
923 923 if overwrite:
924 924 # first reset the index to unmark new files for commit, because
925 925 # reset --hard will otherwise throw away files added for commit,
926 926 # not just unmark them.
927 927 self._gitcommand(['reset', 'HEAD'])
928 928 self._gitcommand(['reset', '--hard', 'HEAD'])
929 929 return
930 930 branch2rev, rev2branch = self._gitbranchmap()
931 931
932 932 def checkout(args):
933 933 cmd = ['checkout']
934 934 if overwrite:
935 935 # first reset the index to unmark new files for commit, because
936 936 # the -f option will otherwise throw away files added for
937 937 # commit, not just unmark them.
938 938 self._gitcommand(['reset', 'HEAD'])
939 939 cmd.append('-f')
940 940 self._gitcommand(cmd + args)
941 941
942 942 def rawcheckout():
943 943 # no branch to checkout, check it out with no branch
944 944 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
945 945 self._relpath)
946 946 self._ui.warn(_('check out a git branch if you intend '
947 947 'to make changes\n'))
948 948 checkout(['-q', revision])
949 949
950 950 if revision not in rev2branch:
951 951 rawcheckout()
952 952 return
953 953 branches = rev2branch[revision]
954 954 firstlocalbranch = None
955 955 for b in branches:
956 956 if b == 'refs/heads/master':
957 957 # master trumps all other branches
958 958 checkout(['refs/heads/master'])
959 959 return
960 960 if not firstlocalbranch and not b.startswith('refs/remotes/'):
961 961 firstlocalbranch = b
962 962 if firstlocalbranch:
963 963 checkout([firstlocalbranch])
964 964 return
965 965
966 966 tracking = self._gittracking(branch2rev.keys())
967 967 # choose a remote branch already tracked if possible
968 968 remote = branches[0]
969 969 if remote not in tracking:
970 970 for b in branches:
971 971 if b in tracking:
972 972 remote = b
973 973 break
974 974
975 975 if remote not in tracking:
976 976 # create a new local tracking branch
977 977 local = remote.split('/', 2)[2]
978 978 checkout(['-b', local, remote])
979 979 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
980 980 # When updating to a tracked remote branch,
981 981 # if the local tracking branch is downstream of it,
982 982 # a normal `git pull` would have performed a "fast-forward merge"
983 983 # which is equivalent to updating the local branch to the remote.
984 984 # Since we are only looking at branching at update, we need to
985 985 # detect this situation and perform this action lazily.
986 986 if tracking[remote] != self._gitcurrentbranch():
987 987 checkout([tracking[remote]])
988 988 self._gitcommand(['merge', '--ff', remote])
989 989 else:
990 990 # a real merge would be required, just checkout the revision
991 991 rawcheckout()
992 992
993 993 def commit(self, text, user, date):
994 994 if self._gitmissing():
995 995 raise util.Abort(_("subrepo %s is missing") % self._relpath)
996 996 cmd = ['commit', '-a', '-m', text]
997 997 env = os.environ.copy()
998 998 if user:
999 999 cmd += ['--author', user]
1000 1000 if date:
1001 1001 # git's date parser silently ignores when seconds < 1e9
1002 1002 # convert to ISO8601
1003 1003 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1004 1004 '%Y-%m-%dT%H:%M:%S %1%2')
1005 1005 self._gitcommand(cmd, env=env)
1006 1006 # make sure commit works otherwise HEAD might not exist under certain
1007 1007 # circumstances
1008 1008 return self._gitstate()
1009 1009
1010 1010 def merge(self, state):
1011 1011 source, revision, kind = state
1012 1012 self._fetch(source, revision)
1013 1013 base = self._gitcommand(['merge-base', revision, self._state[1]])
1014 1014 self._gitupdatestat()
1015 1015 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1016 1016
1017 1017 def mergefunc():
1018 1018 if base == revision:
1019 1019 self.get(state) # fast forward merge
1020 1020 elif base != self._state[1]:
1021 1021 self._gitcommand(['merge', '--no-commit', revision])
1022 1022
1023 1023 if self.dirty():
1024 1024 if self._gitstate() != revision:
1025 1025 dirty = self._gitstate() == self._state[1] or code != 0
1026 1026 if _updateprompt(self._ui, self, dirty,
1027 1027 self._state[1][:7], revision[:7]):
1028 1028 mergefunc()
1029 1029 else:
1030 1030 mergefunc()
1031 1031
1032 1032 def push(self, opts):
1033 1033 force = opts.get('force')
1034 1034
1035 1035 if not self._state[1]:
1036 1036 return True
1037 1037 if self._gitmissing():
1038 1038 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1039 1039 # if a branch in origin contains the revision, nothing to do
1040 1040 branch2rev, rev2branch = self._gitbranchmap()
1041 1041 if self._state[1] in rev2branch:
1042 1042 for b in rev2branch[self._state[1]]:
1043 1043 if b.startswith('refs/remotes/origin/'):
1044 1044 return True
1045 1045 for b, revision in branch2rev.iteritems():
1046 1046 if b.startswith('refs/remotes/origin/'):
1047 1047 if self._gitisancestor(self._state[1], revision):
1048 1048 return True
1049 1049 # otherwise, try to push the currently checked out branch
1050 1050 cmd = ['push']
1051 1051 if force:
1052 1052 cmd.append('--force')
1053 1053
1054 1054 current = self._gitcurrentbranch()
1055 1055 if current:
1056 1056 # determine if the current branch is even useful
1057 1057 if not self._gitisancestor(self._state[1], current):
1058 1058 self._ui.warn(_('unrelated git branch checked out '
1059 1059 'in subrepo %s\n') % self._relpath)
1060 1060 return False
1061 1061 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1062 1062 (current.split('/', 2)[2], self._relpath))
1063 1063 self._gitcommand(cmd + ['origin', current])
1064 1064 return True
1065 1065 else:
1066 1066 self._ui.warn(_('no branch checked out in subrepo %s\n'
1067 1067 'cannot push revision %s') %
1068 1068 (self._relpath, self._state[1]))
1069 1069 return False
1070 1070
1071 1071 def remove(self):
1072 1072 if self._gitmissing():
1073 1073 return
1074 1074 if self.dirty():
1075 1075 self._ui.warn(_('not removing repo %s because '
1076 1076 'it has changes.\n') % self._relpath)
1077 1077 return
1078 1078 # we can't fully delete the repository as it may contain
1079 1079 # local-only history
1080 1080 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1081 1081 self._gitcommand(['config', 'core.bare', 'true'])
1082 1082 for f in os.listdir(self._abspath):
1083 1083 if f == '.git':
1084 1084 continue
1085 1085 path = os.path.join(self._abspath, f)
1086 1086 if os.path.isdir(path) and not os.path.islink(path):
1087 1087 shutil.rmtree(path)
1088 1088 else:
1089 1089 os.remove(path)
1090 1090
1091 1091 def archive(self, ui, archiver, prefix):
1092 1092 source, revision = self._state
1093 1093 if not revision:
1094 1094 return
1095 1095 self._fetch(source, revision)
1096 1096
1097 1097 # Parse git's native archive command.
1098 1098 # This should be much faster than manually traversing the trees
1099 1099 # and objects with many subprocess calls.
1100 1100 tarstream = self._gitcommand(['archive', revision], stream=True)
1101 1101 tar = tarfile.open(fileobj=tarstream, mode='r|')
1102 1102 relpath = subrelpath(self)
1103 1103 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1104 1104 for i, info in enumerate(tar):
1105 1105 if info.isdir():
1106 1106 continue
1107 1107 if info.issym():
1108 1108 data = info.linkname
1109 1109 else:
1110 1110 data = tar.extractfile(info).read()
1111 1111 archiver.addfile(os.path.join(prefix, self._path, info.name),
1112 1112 info.mode, info.issym(), data)
1113 1113 ui.progress(_('archiving (%s)') % relpath, i + 1,
1114 1114 unit=_('files'))
1115 1115 ui.progress(_('archiving (%s)') % relpath, None)
1116 1116
1117 1117
1118 1118 def status(self, rev2, **opts):
1119 1119 rev1 = self._state[1]
1120 1120 if self._gitmissing() or not rev1:
1121 1121 # if the repo is missing, return no results
1122 1122 return [], [], [], [], [], [], []
1123 1123 modified, added, removed = [], [], []
1124 1124 self._gitupdatestat()
1125 1125 if rev2:
1126 1126 command = ['diff-tree', rev1, rev2]
1127 1127 else:
1128 1128 command = ['diff-index', rev1]
1129 1129 out = self._gitcommand(command)
1130 1130 for line in out.split('\n'):
1131 1131 tab = line.find('\t')
1132 1132 if tab == -1:
1133 1133 continue
1134 1134 status, f = line[tab - 1], line[tab + 1:]
1135 1135 if status == 'M':
1136 1136 modified.append(f)
1137 1137 elif status == 'A':
1138 1138 added.append(f)
1139 1139 elif status == 'D':
1140 1140 removed.append(f)
1141 1141
1142 1142 deleted = unknown = ignored = clean = []
1143 1143 return modified, added, removed, deleted, unknown, ignored, clean
1144 1144
1145 1145 types = {
1146 1146 'hg': hgsubrepo,
1147 1147 'svn': svnsubrepo,
1148 1148 'git': gitsubrepo,
1149 1149 }
@@ -1,489 +1,489 b''
1 1 #
2 2 # This is the mercurial setup script.
3 3 #
4 4 # 'python setup.py install', or
5 5 # 'python setup.py --help' for more options
6 6
7 7 import sys, platform
8 8 if getattr(sys, 'version_info', (0, 0, 0)) < (2, 4, 0, 'final'):
9 9 raise SystemExit("Mercurial requires Python 2.4 or later.")
10 10
11 11 if sys.version_info[0] >= 3:
12 12 def b(s):
13 13 '''A helper function to emulate 2.6+ bytes literals using string
14 14 literals.'''
15 15 return s.encode('latin1')
16 16 else:
17 17 def b(s):
18 18 '''A helper function to emulate 2.6+ bytes literals using string
19 19 literals.'''
20 20 return s
21 21
22 22 # Solaris Python packaging brain damage
23 23 try:
24 24 import hashlib
25 25 sha = hashlib.sha1()
26 26 except:
27 27 try:
28 28 import sha
29 29 except:
30 30 raise SystemExit(
31 31 "Couldn't import standard hashlib (incomplete Python install).")
32 32
33 33 try:
34 34 import zlib
35 35 except:
36 36 raise SystemExit(
37 37 "Couldn't import standard zlib (incomplete Python install).")
38 38
39 39 # The base IronPython distribution (as of 2.7.1) doesn't support bz2
40 40 isironpython = False
41 41 try:
42 42 isironpython = platform.python_implementation().lower().find("ironpython") != -1
43 43 except:
44 44 pass
45 45
46 46 if isironpython:
47 47 sys.stderr.write("warning: IronPython detected (no bz2 support)\n")
48 48 else:
49 49 try:
50 50 import bz2
51 51 except:
52 52 raise SystemExit(
53 53 "Couldn't import standard bz2 (incomplete Python install).")
54 54
55 55 import os, subprocess, time
56 56 import shutil
57 57 import tempfile
58 58 from distutils import log
59 59 from distutils.core import setup, Command, Extension
60 60 from distutils.dist import Distribution
61 61 from distutils.command.build import build
62 62 from distutils.command.build_ext import build_ext
63 63 from distutils.command.build_py import build_py
64 64 from distutils.command.install_scripts import install_scripts
65 65 from distutils.spawn import spawn, find_executable
66 66 from distutils.ccompiler import new_compiler
67 67 from distutils.errors import CCompilerError, DistutilsExecError
68 68 from distutils.sysconfig import get_python_inc
69 69 from distutils.version import StrictVersion
70 70
71 71 convert2to3 = '--c2to3' in sys.argv
72 72 if convert2to3:
73 73 try:
74 74 from distutils.command.build_py import build_py_2to3 as build_py
75 75 from lib2to3.refactor import get_fixers_from_package as getfixers
76 76 except ImportError:
77 77 if sys.version_info[0] < 3:
78 78 raise SystemExit("--c2to3 is only compatible with python3.")
79 79 raise
80 80 sys.path.append('contrib')
81 81 elif sys.version_info[0] >= 3:
82 82 raise SystemExit("setup.py with python3 needs --c2to3 (experimental)")
83 83
84 84 scripts = ['hg']
85 85 if os.name == 'nt':
86 86 scripts.append('contrib/win32/hg.bat')
87 87
88 88 # simplified version of distutils.ccompiler.CCompiler.has_function
89 89 # that actually removes its temporary files.
90 90 def hasfunction(cc, funcname):
91 91 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
92 92 devnull = oldstderr = None
93 93 try:
94 94 try:
95 95 fname = os.path.join(tmpdir, 'funcname.c')
96 96 f = open(fname, 'w')
97 97 f.write('int main(void) {\n')
98 98 f.write(' %s();\n' % funcname)
99 99 f.write('}\n')
100 100 f.close()
101 101 # Redirect stderr to /dev/null to hide any error messages
102 102 # from the compiler.
103 103 # This will have to be changed if we ever have to check
104 104 # for a function on Windows.
105 105 devnull = open('/dev/null', 'w')
106 106 oldstderr = os.dup(sys.stderr.fileno())
107 107 os.dup2(devnull.fileno(), sys.stderr.fileno())
108 108 objects = cc.compile([fname], output_dir=tmpdir)
109 109 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
110 110 except:
111 111 return False
112 112 return True
113 113 finally:
114 114 if oldstderr is not None:
115 115 os.dup2(oldstderr, sys.stderr.fileno())
116 116 if devnull is not None:
117 117 devnull.close()
118 118 shutil.rmtree(tmpdir)
119 119
120 120 # py2exe needs to be installed to work
121 121 try:
122 122 import py2exe
123 123 py2exeloaded = True
124 124 # import py2exe's patched Distribution class
125 125 from distutils.core import Distribution
126 126 except ImportError:
127 127 py2exeloaded = False
128 128
129 129 def runcmd(cmd, env):
130 130 p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
131 131 stderr=subprocess.PIPE, env=env)
132 132 out, err = p.communicate()
133 133 return out, err
134 134
135 135 def runhg(cmd, env):
136 136 out, err = runcmd(cmd, env)
137 137 # If root is executing setup.py, but the repository is owned by
138 138 # another user (as in "sudo python setup.py install") we will get
139 139 # trust warnings since the .hg/hgrc file is untrusted. That is
140 140 # fine, we don't want to load it anyway. Python may warn about
141 141 # a missing __init__.py in mercurial/locale, we also ignore that.
142 142 err = [e for e in err.splitlines()
143 143 if not e.startswith(b('Not trusting file')) \
144 144 and not e.startswith(b('warning: Not importing'))]
145 145 if err:
146 146 return ''
147 147 return out
148 148
149 149 version = ''
150 150
151 151 # Execute hg out of this directory with a custom environment which
152 152 # includes the pure Python modules in mercurial/pure. We also take
153 153 # care to not use any hgrc files and do no localization.
154 154 pypath = ['mercurial', os.path.join('mercurial', 'pure')]
155 155 env = {'PYTHONPATH': os.pathsep.join(pypath),
156 156 'HGRCPATH': '',
157 157 'LANGUAGE': 'C'}
158 158 if 'LD_LIBRARY_PATH' in os.environ:
159 159 env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
160 160 if 'SystemRoot' in os.environ:
161 161 # Copy SystemRoot into the custom environment for Python 2.6
162 162 # under Windows. Otherwise, the subprocess will fail with
163 163 # error 0xc0150004. See: http://bugs.python.org/issue3440
164 164 env['SystemRoot'] = os.environ['SystemRoot']
165 165
166 166 if os.path.isdir('.hg'):
167 167 cmd = [sys.executable, 'hg', 'id', '-i', '-t']
168 168 l = runhg(cmd, env).split()
169 169 while len(l) > 1 and l[-1][0].isalpha(): # remove non-numbered tags
170 170 l.pop()
171 171 if len(l) > 1: # tag found
172 172 version = l[-1]
173 173 if l[0].endswith('+'): # propagate the dirty status to the tag
174 174 version += '+'
175 175 elif len(l) == 1: # no tag found
176 176 cmd = [sys.executable, 'hg', 'parents', '--template',
177 177 '{latesttag}+{latesttagdistance}-']
178 178 version = runhg(cmd, env) + l[0]
179 179 if version.endswith('+'):
180 180 version += time.strftime('%Y%m%d')
181 181 elif os.path.exists('.hg_archival.txt'):
182 182 kw = dict([[t.strip() for t in l.split(':', 1)]
183 183 for l in open('.hg_archival.txt')])
184 184 if 'tag' in kw:
185 185 version = kw['tag']
186 186 elif 'latesttag' in kw:
187 187 version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
188 188 else:
189 189 version = kw.get('node', '')[:12]
190 190
191 191 if version:
192 192 f = open("mercurial/__version__.py", "w")
193 193 f.write('# this file is autogenerated by setup.py\n')
194 194 f.write('version = "%s"\n' % version)
195 195 f.close()
196 196
197 197
198 198 try:
199 199 from mercurial import __version__
200 200 version = __version__.version
201 201 except ImportError:
202 202 version = 'unknown'
203 203
204 204 class hgbuild(build):
205 205 # Insert hgbuildmo first so that files in mercurial/locale/ are found
206 206 # when build_py is run next.
207 207 sub_commands = [('build_mo', None),
208 208 # We also need build_ext before build_py. Otherwise, when 2to3 is called (in
209 209 # build_py), it will not find osutil & friends, thinking that those modules are
210 210 # global and, consequently, making a mess, now that all module imports are
211 211 # global.
212 212 ('build_ext', build.has_ext_modules),
213 213 ] + build.sub_commands
214 214
215 215 class hgbuildmo(build):
216 216
217 217 description = "build translations (.mo files)"
218 218
219 219 def run(self):
220 220 if not find_executable('msgfmt'):
221 221 self.warn("could not find msgfmt executable, no translations "
222 222 "will be built")
223 223 return
224 224
225 225 podir = 'i18n'
226 226 if not os.path.isdir(podir):
227 227 self.warn("could not find %s/ directory" % podir)
228 228 return
229 229
230 230 join = os.path.join
231 231 for po in os.listdir(podir):
232 232 if not po.endswith('.po'):
233 233 continue
234 234 pofile = join(podir, po)
235 235 modir = join('locale', po[:-3], 'LC_MESSAGES')
236 236 mofile = join(modir, 'hg.mo')
237 237 mobuildfile = join('mercurial', mofile)
238 238 cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile]
239 239 if sys.platform != 'sunos5':
240 240 # msgfmt on Solaris does not know about -c
241 241 cmd.append('-c')
242 242 self.mkpath(join('mercurial', modir))
243 243 self.make_file([pofile], mobuildfile, spawn, (cmd,))
244 244
245 245
246 246 class hgdist(Distribution):
247 247 pure = 0
248 248
249 249 global_options = Distribution.global_options + \
250 250 [('pure', None, "use pure (slow) Python "
251 251 "code instead of C extensions"),
252 252 ('c2to3', None, "(experimental!) convert "
253 253 "code with 2to3"),
254 254 ]
255 255
256 256 def has_ext_modules(self):
257 257 # self.ext_modules is emptied in hgbuildpy.finalize_options which is
258 258 # too late for some cases
259 259 return not self.pure and Distribution.has_ext_modules(self)
260 260
261 261 class hgbuildext(build_ext):
262 262
263 263 def build_extension(self, ext):
264 264 try:
265 265 build_ext.build_extension(self, ext)
266 266 except CCompilerError:
267 267 if not getattr(ext, 'optional', False):
268 268 raise
269 269 log.warn("Failed to build optional extension '%s' (skipping)",
270 270 ext.name)
271 271
272 272 class hgbuildpy(build_py):
273 273 if convert2to3:
274 274 fixer_names = sorted(set(getfixers("lib2to3.fixes") +
275 275 getfixers("hgfixes")))
276 276
277 277 def finalize_options(self):
278 278 build_py.finalize_options(self)
279 279
280 280 if self.distribution.pure:
281 281 if self.py_modules is None:
282 282 self.py_modules = []
283 283 for ext in self.distribution.ext_modules:
284 284 if ext.name.startswith("mercurial."):
285 285 self.py_modules.append("mercurial.pure.%s" % ext.name[10:])
286 286 self.distribution.ext_modules = []
287 287 else:
288 288 if not os.path.exists(os.path.join(get_python_inc(), 'Python.h')):
289 289 raise SystemExit("Python headers are required to build Mercurial")
290 290
291 291 def find_modules(self):
292 292 modules = build_py.find_modules(self)
293 293 for module in modules:
294 294 if module[0] == "mercurial.pure":
295 295 if module[1] != "__init__":
296 296 yield ("mercurial", module[1], module[2])
297 297 else:
298 298 yield module
299 299
300 300 class buildhgextindex(Command):
301 301 description = 'generate prebuilt index of hgext (for frozen package)'
302 302 user_options = []
303 303 _indexfilename = 'hgext/__index__.py'
304 304
305 305 def initialize_options(self):
306 306 pass
307 307
308 308 def finalize_options(self):
309 309 pass
310 310
311 311 def run(self):
312 312 if os.path.exists(self._indexfilename):
313 313 os.unlink(self._indexfilename)
314 314
315 315 # here no extension enabled, disabled() lists up everything
316 316 code = ('import pprint; from mercurial import extensions; '
317 317 'pprint.pprint(extensions.disabled())')
318 318 out, err = runcmd([sys.executable, '-c', code], env)
319 319 if err:
320 320 raise DistutilsExecError(err)
321 321
322 322 f = open(self._indexfilename, 'w')
323 323 f.write('# this file is autogenerated by setup.py\n')
324 324 f.write('docs = ')
325 325 f.write(out)
326 326 f.close()
327 327
328 328 class hginstallscripts(install_scripts):
329 329 '''
330 330 This is a specialization of install_scripts that replaces the @LIBDIR@ with
331 331 the configured directory for modules. If possible, the path is made relative
332 332 to the directory for scripts.
333 333 '''
334 334
335 335 def initialize_options(self):
336 336 install_scripts.initialize_options(self)
337 337
338 338 self.install_lib = None
339 339
340 340 def finalize_options(self):
341 341 install_scripts.finalize_options(self)
342 342 self.set_undefined_options('install',
343 343 ('install_lib', 'install_lib'))
344 344
345 345 def run(self):
346 346 install_scripts.run(self)
347 347
348 348 if (os.path.splitdrive(self.install_dir)[0] !=
349 349 os.path.splitdrive(self.install_lib)[0]):
350 350 # can't make relative paths from one drive to another, so use an
351 351 # absolute path instead
352 352 libdir = self.install_lib
353 353 else:
354 354 common = os.path.commonprefix((self.install_dir, self.install_lib))
355 355 rest = self.install_dir[len(common):]
356 356 uplevel = len([n for n in os.path.split(rest) if n])
357 357
358 358 libdir = uplevel * ('..' + os.sep) + self.install_lib[len(common):]
359 359
360 360 for outfile in self.outfiles:
361 361 fp = open(outfile, 'rb')
362 362 data = fp.read()
363 363 fp.close()
364 364
365 365 # skip binary files
366 366 if b('\0') in data:
367 367 continue
368 368
369 369 data = data.replace('@LIBDIR@', libdir.encode('string_escape'))
370 370 fp = open(outfile, 'wb')
371 371 fp.write(data)
372 372 fp.close()
373 373
374 374 cmdclass = {'build': hgbuild,
375 375 'build_mo': hgbuildmo,
376 376 'build_ext': hgbuildext,
377 377 'build_py': hgbuildpy,
378 378 'build_hgextindex': buildhgextindex,
379 379 'install_scripts': hginstallscripts}
380 380
381 381 packages = ['mercurial', 'mercurial.hgweb',
382 382 'mercurial.httpclient', 'mercurial.httpclient.tests',
383 383 'hgext', 'hgext.convert', 'hgext.highlight', 'hgext.zeroconf',
384 384 'hgext.largefiles']
385 385
386 386 pymodules = []
387 387
388 388 extmodules = [
389 389 Extension('mercurial.base85', ['mercurial/base85.c']),
390 390 Extension('mercurial.bdiff', ['mercurial/bdiff.c']),
391 391 Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c']),
392 392 Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
393 393 Extension('mercurial.parsers', ['mercurial/parsers.c']),
394 394 ]
395 395
396 396 osutil_ldflags = []
397 397
398 398 if sys.platform == 'darwin':
399 399 osutil_ldflags += ['-framework', 'ApplicationServices']
400 400
401 401 # disable osutil.c under windows + python 2.4 (issue1364)
402 402 if sys.platform == 'win32' and sys.version_info < (2, 5, 0, 'final'):
403 403 pymodules.append('mercurial.pure.osutil')
404 404 else:
405 405 extmodules.append(Extension('mercurial.osutil', ['mercurial/osutil.c'],
406 406 extra_link_args=osutil_ldflags))
407 407
408 408 if sys.platform.startswith('linux') and os.uname()[2] > '2.6':
409 409 # The inotify extension is only usable with Linux 2.6 kernels.
410 410 # You also need a reasonably recent C library.
411 411 # In any case, if it fails to build the error will be skipped ('optional').
412 412 cc = new_compiler()
413 413 if hasfunction(cc, 'inotify_add_watch'):
414 414 inotify = Extension('hgext.inotify.linux._inotify',
415 415 ['hgext/inotify/linux/_inotify.c'],
416 416 ['mercurial'])
417 417 inotify.optional = True
418 418 extmodules.append(inotify)
419 419 packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
420 420
421 421 packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo',
422 422 'help/*.txt']}
423 423
424 424 def ordinarypath(p):
425 425 return p and p[0] != '.' and p[-1] != '~'
426 426
427 427 for root in ('templates',):
428 428 for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
429 429 curdir = curdir.split(os.sep, 1)[1]
430 430 dirs[:] = filter(ordinarypath, dirs)
431 431 for f in filter(ordinarypath, files):
432 432 f = os.path.join(curdir, f)
433 433 packagedata['mercurial'].append(f)
434 434
435 435 datafiles = []
436 436 setupversion = version
437 437 extra = {}
438 438
439 439 if py2exeloaded:
440 440 extra['console'] = [
441 441 {'script':'hg',
442 442 'copyright':'Copyright (C) 2005-2010 Matt Mackall and others',
443 443 'product_version':version}]
444 444 # sub command of 'build' because 'py2exe' does not handle sub_commands
445 445 build.sub_commands.insert(0, ('build_hgextindex', None))
446 446
447 447 if os.name == 'nt':
448 448 # Windows binary file versions for exe/dll files must have the
449 449 # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
450 450 setupversion = version.split('+', 1)[0]
451 451
452 452 if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
453 453 # XCode 4.0 dropped support for ppc architecture, which is hardcoded in
454 454 # distutils.sysconfig
455 455 version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[0].splitlines()
456 456 if version:
457 version = version.splitlines()[0]
457 version = version[0]
458 458 xcode4 = (version.startswith('Xcode') and
459 459 StrictVersion(version.split()[1]) >= StrictVersion('4.0'))
460 460 else:
461 461 # xcodebuild returns empty on OS X Lion with XCode 4.3 not
462 462 # installed, but instead with only command-line tools. Assume
463 463 # that only happens on >= Lion, thus no PPC support.
464 464 xcode4 = True
465 465
466 466 if xcode4:
467 467 os.environ['ARCHFLAGS'] = ''
468 468
469 469 setup(name='mercurial',
470 470 version=setupversion,
471 471 author='Matt Mackall',
472 472 author_email='mpm@selenic.com',
473 473 url='http://mercurial.selenic.com/',
474 474 description='Scalable distributed SCM',
475 475 license='GNU GPLv2+',
476 476 scripts=scripts,
477 477 packages=packages,
478 478 py_modules=pymodules,
479 479 ext_modules=extmodules,
480 480 data_files=datafiles,
481 481 package_data=packagedata,
482 482 cmdclass=cmdclass,
483 483 distclass=hgdist,
484 484 options=dict(py2exe=dict(packages=['hgext', 'email']),
485 485 bdist_mpkg=dict(zipdist=True,
486 486 license='COPYING',
487 487 readme='contrib/macosx/Readme.html',
488 488 welcome='contrib/macosx/Welcome.html')),
489 489 **extra)
@@ -1,234 +1,257 b''
1 1 import sys, os, struct, subprocess, cStringIO, re, shutil
2 2
3 3 def connect(path=None):
4 4 cmdline = ['hg', 'serve', '--cmdserver', 'pipe']
5 5 if path:
6 6 cmdline += ['-R', path]
7 7
8 8 server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
9 9 stdout=subprocess.PIPE)
10 10
11 11 return server
12 12
13 13 def writeblock(server, data):
14 14 server.stdin.write(struct.pack('>I', len(data)))
15 15 server.stdin.write(data)
16 16 server.stdin.flush()
17 17
18 18 def readchannel(server):
19 19 data = server.stdout.read(5)
20 20 if not data:
21 21 raise EOFError()
22 22 channel, length = struct.unpack('>cI', data)
23 23 if channel in 'IL':
24 24 return channel, length
25 25 else:
26 26 return channel, server.stdout.read(length)
27 27
28 28 def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None):
29 29 print ' runcommand', ' '.join(args)
30 30 sys.stdout.flush()
31 31 server.stdin.write('runcommand\n')
32 32 writeblock(server, '\0'.join(args))
33 33
34 34 if not input:
35 35 input = cStringIO.StringIO()
36 36
37 37 while True:
38 38 ch, data = readchannel(server)
39 39 if ch == 'o':
40 40 output.write(data)
41 41 output.flush()
42 42 elif ch == 'e':
43 43 error.write(data)
44 44 error.flush()
45 45 elif ch == 'I':
46 46 writeblock(server, input.read(data))
47 47 elif ch == 'L':
48 48 writeblock(server, input.readline(data))
49 49 elif ch == 'r':
50 50 return struct.unpack('>i', data)[0]
51 51 else:
52 52 print "unexpected channel %c: %r" % (ch, data)
53 53 if ch.isupper():
54 54 return
55 55
56 56 def check(func, repopath=None):
57 57 print
58 58 print 'testing %s:' % func.__name__
59 59 print
60 60 sys.stdout.flush()
61 61 server = connect(repopath)
62 62 try:
63 63 return func(server)
64 64 finally:
65 65 server.stdin.close()
66 66 server.wait()
67 67
68 68 def unknowncommand(server):
69 69 server.stdin.write('unknowncommand\n')
70 70
71 71 def hellomessage(server):
72 72 ch, data = readchannel(server)
73 73 # escaping python tests output not supported
74 74 print '%c, %r' % (ch, re.sub('encoding: [a-zA-Z0-9-]+', 'encoding: ***', data))
75 75
76 76 # run an arbitrary command to make sure the next thing the server sends
77 77 # isn't part of the hello message
78 78 runcommand(server, ['id'])
79 79
80 80 def checkruncommand(server):
81 81 # hello block
82 82 readchannel(server)
83 83
84 84 # no args
85 85 runcommand(server, [])
86 86
87 87 # global options
88 88 runcommand(server, ['id', '--quiet'])
89 89
90 90 # make sure global options don't stick through requests
91 91 runcommand(server, ['id'])
92 92
93 93 # --config
94 94 runcommand(server, ['id', '--config', 'ui.quiet=True'])
95 95
96 96 # make sure --config doesn't stick
97 97 runcommand(server, ['id'])
98 98
99 99 def inputeof(server):
100 100 readchannel(server)
101 101 server.stdin.write('runcommand\n')
102 102 # close stdin while server is waiting for input
103 103 server.stdin.close()
104 104
105 105 # server exits with 1 if the pipe closed while reading the command
106 106 print 'server exit code =', server.wait()
107 107
108 108 def serverinput(server):
109 109 readchannel(server)
110 110
111 111 patch = """
112 112 # HG changeset patch
113 113 # User test
114 114 # Date 0 0
115 115 # Node ID c103a3dec114d882c98382d684d8af798d09d857
116 116 # Parent 0000000000000000000000000000000000000000
117 117 1
118 118
119 119 diff -r 000000000000 -r c103a3dec114 a
120 120 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
121 121 +++ b/a Thu Jan 01 00:00:00 1970 +0000
122 122 @@ -0,0 +1,1 @@
123 123 +1
124 124 """
125 125
126 126 runcommand(server, ['import', '-'], input=cStringIO.StringIO(patch))
127 127 runcommand(server, ['log'])
128 128
129 129 def cwd(server):
130 130 """ check that --cwd doesn't persist between requests """
131 131 readchannel(server)
132 132 os.mkdir('foo')
133 133 f = open('foo/bar', 'wb')
134 134 f.write('a')
135 135 f.close()
136 136 runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
137 137 runcommand(server, ['st', 'foo/bar'])
138 138 os.remove('foo/bar')
139 139
140 140 def localhgrc(server):
141 141 """ check that local configs for the cached repo aren't inherited when -R
142 142 is used """
143 143 readchannel(server)
144 144
145 145 # the cached repo local hgrc contains ui.foo=bar, so showconfig should show it
146 146 runcommand(server, ['showconfig'])
147 147
148 148 # but not for this repo
149 149 runcommand(server, ['init', 'foo'])
150 150 runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
151 151 shutil.rmtree('foo')
152 152
153 153 def hook(**args):
154 154 print 'hook talking'
155 155 print 'now try to read something: %r' % sys.stdin.read()
156 156
157 157 def hookoutput(server):
158 158 readchannel(server)
159 159 runcommand(server, ['--config',
160 160 'hooks.pre-identify=python:test-commandserver.hook', 'id'],
161 161 input=cStringIO.StringIO('some input'))
162 162
163 163 def outsidechanges(server):
164 164 readchannel(server)
165 165 f = open('a', 'ab')
166 166 f.write('a\n')
167 167 f.close()
168 168 runcommand(server, ['status'])
169 169 os.system('hg ci -Am2')
170 170 runcommand(server, ['tip'])
171 171 runcommand(server, ['status'])
172 172
173 173 def bookmarks(server):
174 174 readchannel(server)
175 175 runcommand(server, ['bookmarks'])
176 176
177 177 # changes .hg/bookmarks
178 178 os.system('hg bookmark -i bm1')
179 179 os.system('hg bookmark -i bm2')
180 180 runcommand(server, ['bookmarks'])
181 181
182 182 # changes .hg/bookmarks.current
183 183 os.system('hg upd bm1 -q')
184 184 runcommand(server, ['bookmarks'])
185 185
186 186 runcommand(server, ['bookmarks', 'bm3'])
187 187 f = open('a', 'ab')
188 188 f.write('a\n')
189 189 f.close()
190 190 runcommand(server, ['commit', '-Amm'])
191 191 runcommand(server, ['bookmarks'])
192 192
193 193 def tagscache(server):
194 194 readchannel(server)
195 195 runcommand(server, ['id', '-t', '-r', '0'])
196 196 os.system('hg tag -r 0 foo')
197 197 runcommand(server, ['id', '-t', '-r', '0'])
198 198
199 199 def setphase(server):
200 200 readchannel(server)
201 201 runcommand(server, ['phase', '-r', '.'])
202 202 os.system('hg phase -r . -p')
203 203 runcommand(server, ['phase', '-r', '.'])
204 204
205 205 def rollback(server):
206 206 readchannel(server)
207 207 runcommand(server, ['phase', '-r', '.', '-p'])
208 208 f = open('a', 'ab')
209 209 f.write('a\n')
210 210 f.close()
211 211 runcommand(server, ['commit', '-Am.'])
212 212 runcommand(server, ['rollback'])
213 213 runcommand(server, ['phase', '-r', '.'])
214 214
215 def branch(server):
216 readchannel(server)
217 runcommand(server, ['branch'])
218 os.system('hg branch foo')
219 runcommand(server, ['branch'])
220 os.system('hg branch default')
221
222 def hgignore(server):
223 readchannel(server)
224 f = open('.hgignore', 'ab')
225 f.write('')
226 f.close()
227 runcommand(server, ['commit', '-Am.'])
228 f = open('ignored-file', 'ab')
229 f.write('')
230 f.close()
231 f = open('.hgignore', 'ab')
232 f.write('ignored-file')
233 f.close()
234 runcommand(server, ['status', '-i', '-u'])
235
215 236 if __name__ == '__main__':
216 237 os.system('hg init')
217 238
218 239 check(hellomessage)
219 240 check(unknowncommand)
220 241 check(checkruncommand)
221 242 check(inputeof)
222 243 check(serverinput)
223 244 check(cwd)
224 245
225 246 hgrc = open('.hg/hgrc', 'a')
226 247 hgrc.write('[ui]\nfoo=bar\n')
227 248 hgrc.close()
228 249 check(localhgrc)
229 250 check(hookoutput)
230 251 check(outsidechanges)
231 252 check(bookmarks)
232 253 check(tagscache)
233 254 check(setphase)
234 255 check(rollback)
256 check(branch)
257 check(hgignore)
@@ -1,147 +1,165 b''
1 1
2 2 testing hellomessage:
3 3
4 4 o, 'capabilities: getencoding runcommand\nencoding: ***'
5 5 runcommand id
6 6 000000000000 tip
7 7
8 8 testing unknowncommand:
9 9
10 10 abort: unknown command unknowncommand
11 11
12 12 testing checkruncommand:
13 13
14 14 runcommand
15 15 Mercurial Distributed SCM
16 16
17 17 basic commands:
18 18
19 19 add add the specified files on the next commit
20 20 annotate show changeset information by line for each file
21 21 clone make a copy of an existing repository
22 22 commit commit the specified files or all outstanding changes
23 23 diff diff repository (or selected files)
24 24 export dump the header and diffs for one or more changesets
25 25 forget forget the specified files on the next commit
26 26 init create a new repository in the given directory
27 27 log show revision history of entire repository or files
28 28 merge merge working directory with another revision
29 29 phase set or show the current phase name
30 30 pull pull changes from the specified source
31 31 push push changes to the specified destination
32 32 remove remove the specified files on the next commit
33 33 serve start stand-alone webserver
34 34 status show changed files in the working directory
35 35 summary summarize working directory state
36 36 update update working directory (or switch revisions)
37 37
38 38 use "hg help" for the full list of commands or "hg -v" for details
39 39 runcommand id --quiet
40 40 000000000000
41 41 runcommand id
42 42 000000000000 tip
43 43 runcommand id --config ui.quiet=True
44 44 000000000000
45 45 runcommand id
46 46 000000000000 tip
47 47
48 48 testing inputeof:
49 49
50 50 server exit code = 1
51 51
52 52 testing serverinput:
53 53
54 54 runcommand import -
55 55 applying patch from stdin
56 56 runcommand log
57 57 changeset: 0:eff892de26ec
58 58 tag: tip
59 59 user: test
60 60 date: Thu Jan 01 00:00:00 1970 +0000
61 61 summary: 1
62 62
63 63
64 64 testing cwd:
65 65
66 66 runcommand --cwd foo st bar
67 67 ? bar
68 68 runcommand st foo/bar
69 69 ? foo/bar
70 70
71 71 testing localhgrc:
72 72
73 73 runcommand showconfig
74 74 bundle.mainreporoot=$TESTTMP
75 75 defaults.backout=-d "0 0"
76 76 defaults.commit=-d "0 0"
77 77 defaults.tag=-d "0 0"
78 78 ui.slash=True
79 79 ui.foo=bar
80 80 runcommand init foo
81 81 runcommand -R foo showconfig ui defaults
82 82 defaults.backout=-d "0 0"
83 83 defaults.commit=-d "0 0"
84 84 defaults.tag=-d "0 0"
85 85 ui.slash=True
86 86
87 87 testing hookoutput:
88 88
89 89 runcommand --config hooks.pre-identify=python:test-commandserver.hook id
90 90 hook talking
91 91 now try to read something: 'some input'
92 92 eff892de26ec tip
93 93
94 94 testing outsidechanges:
95 95
96 96 runcommand status
97 97 M a
98 98 runcommand tip
99 99 changeset: 1:d3a0a68be6de
100 100 tag: tip
101 101 user: test
102 102 date: Thu Jan 01 00:00:00 1970 +0000
103 103 summary: 2
104 104
105 105 runcommand status
106 106
107 107 testing bookmarks:
108 108
109 109 runcommand bookmarks
110 110 no bookmarks set
111 111 runcommand bookmarks
112 112 bm1 1:d3a0a68be6de
113 113 bm2 1:d3a0a68be6de
114 114 runcommand bookmarks
115 115 * bm1 1:d3a0a68be6de
116 116 bm2 1:d3a0a68be6de
117 117 runcommand bookmarks bm3
118 118 runcommand commit -Amm
119 119 runcommand bookmarks
120 120 bm1 1:d3a0a68be6de
121 121 bm2 1:d3a0a68be6de
122 122 * bm3 2:aef17e88f5f0
123 123
124 124 testing tagscache:
125 125
126 126 runcommand id -t -r 0
127 127
128 128 runcommand id -t -r 0
129 129 foo
130 130
131 131 testing setphase:
132 132
133 133 runcommand phase -r .
134 134 3: draft
135 135 runcommand phase -r .
136 136 3: public
137 137
138 138 testing rollback:
139 139
140 140 runcommand phase -r . -p
141 141 no phases changed
142 142 runcommand commit -Am.
143 143 runcommand rollback
144 144 repository tip rolled back to revision 3 (undo commit)
145 145 working directory now based on revision 3
146 146 runcommand phase -r .
147 147 3: public
148
149 testing branch:
150
151 runcommand branch
152 default
153 marked working directory as branch foo
154 (branches are permanent and global, did you want a bookmark?)
155 runcommand branch
156 foo
157 marked working directory as branch default
158 (branches are permanent and global, did you want a bookmark?)
159
160 testing hgignore:
161
162 runcommand commit -Am.
163 adding .hgignore
164 runcommand status -i -u
165 I ignored-file
@@ -1,1023 +1,1055 b''
1 1 $ cat >> $HGRCPATH <<EOF
2 2 > [extensions]
3 3 > graphlog=
4 4 > EOF
5 5 $ hgph() { hg log -G --template "{rev} {phase} {desc} - {node|short}\n" $*; }
6 6
7 7 $ mkcommit() {
8 8 > echo "$1" > "$1"
9 9 > hg add "$1"
10 10 > message="$1"
11 11 > shift
12 12 > hg ci -m "$message" $*
13 13 > }
14 14
15 15 $ hg init alpha
16 16 $ cd alpha
17 17 $ mkcommit a-A
18 18 $ mkcommit a-B
19 19 $ mkcommit a-C
20 20 $ mkcommit a-D
21 21 $ hgph
22 22 @ 3 draft a-D - b555f63b6063
23 23 |
24 24 o 2 draft a-C - 54acac6f23ab
25 25 |
26 26 o 1 draft a-B - 548a3d25dbf0
27 27 |
28 28 o 0 draft a-A - 054250a37db4
29 29
30 30
31 31 $ hg init ../beta
32 32 $ hg push -r 1 ../beta
33 33 pushing to ../beta
34 34 searching for changes
35 35 adding changesets
36 36 adding manifests
37 37 adding file changes
38 38 added 2 changesets with 2 changes to 2 files
39 39 $ hgph
40 40 @ 3 draft a-D - b555f63b6063
41 41 |
42 42 o 2 draft a-C - 54acac6f23ab
43 43 |
44 44 o 1 public a-B - 548a3d25dbf0
45 45 |
46 46 o 0 public a-A - 054250a37db4
47 47
48 48
49 49 $ cd ../beta
50 50 $ hgph
51 51 o 1 public a-B - 548a3d25dbf0
52 52 |
53 53 o 0 public a-A - 054250a37db4
54 54
55 55 $ hg up -q
56 56 $ mkcommit b-A
57 57 $ hgph
58 58 @ 2 draft b-A - f54f1bb90ff3
59 59 |
60 60 o 1 public a-B - 548a3d25dbf0
61 61 |
62 62 o 0 public a-A - 054250a37db4
63 63
64 64 $ hg pull ../alpha
65 65 pulling from ../alpha
66 66 searching for changes
67 67 adding changesets
68 68 adding manifests
69 69 adding file changes
70 70 added 2 changesets with 2 changes to 2 files (+1 heads)
71 71 (run 'hg heads' to see heads, 'hg merge' to merge)
72 72 $ hgph
73 73 o 4 public a-D - b555f63b6063
74 74 |
75 75 o 3 public a-C - 54acac6f23ab
76 76 |
77 77 | @ 2 draft b-A - f54f1bb90ff3
78 78 |/
79 79 o 1 public a-B - 548a3d25dbf0
80 80 |
81 81 o 0 public a-A - 054250a37db4
82 82
83 83
84 84 pull did not updated ../alpha state.
85 85 push from alpha to beta should update phase even if nothing is transfered
86 86
87 87 $ cd ../alpha
88 88 $ hgph # not updated by remote pull
89 89 @ 3 draft a-D - b555f63b6063
90 90 |
91 91 o 2 draft a-C - 54acac6f23ab
92 92 |
93 93 o 1 public a-B - 548a3d25dbf0
94 94 |
95 95 o 0 public a-A - 054250a37db4
96 96
97 97 $ hg push ../beta
98 98 pushing to ../beta
99 99 searching for changes
100 100 no changes found
101 101 [1]
102 102 $ hgph
103 103 @ 3 public a-D - b555f63b6063
104 104 |
105 105 o 2 public a-C - 54acac6f23ab
106 106 |
107 107 o 1 public a-B - 548a3d25dbf0
108 108 |
109 109 o 0 public a-A - 054250a37db4
110 110
111 111
112 112 update must update phase of common changeset too
113 113
114 114 $ hg pull ../beta # getting b-A
115 115 pulling from ../beta
116 116 searching for changes
117 117 adding changesets
118 118 adding manifests
119 119 adding file changes
120 120 added 1 changesets with 1 changes to 1 files (+1 heads)
121 121 (run 'hg heads' to see heads, 'hg merge' to merge)
122 122
123 123 $ cd ../beta
124 124 $ hgph # not updated by remote pull
125 125 o 4 public a-D - b555f63b6063
126 126 |
127 127 o 3 public a-C - 54acac6f23ab
128 128 |
129 129 | @ 2 draft b-A - f54f1bb90ff3
130 130 |/
131 131 o 1 public a-B - 548a3d25dbf0
132 132 |
133 133 o 0 public a-A - 054250a37db4
134 134
135 135 $ hg pull ../alpha
136 136 pulling from ../alpha
137 137 searching for changes
138 138 no changes found
139 139 $ hgph
140 140 o 4 public a-D - b555f63b6063
141 141 |
142 142 o 3 public a-C - 54acac6f23ab
143 143 |
144 144 | @ 2 public b-A - f54f1bb90ff3
145 145 |/
146 146 o 1 public a-B - 548a3d25dbf0
147 147 |
148 148 o 0 public a-A - 054250a37db4
149 149
150 150
151 151 Publish configuration option
152 152 ----------------------------
153 153
154 154 Pull
155 155 ````
156 156
157 157 changegroup are added without phase movement
158 158
159 159 $ hg bundle -a ../base.bundle
160 160 5 changesets found
161 161 $ cd ..
162 162 $ hg init mu
163 163 $ cd mu
164 164 $ cat > .hg/hgrc << EOF
165 165 > [phases]
166 166 > publish=0
167 167 > EOF
168 168 $ hg unbundle ../base.bundle
169 169 adding changesets
170 170 adding manifests
171 171 adding file changes
172 172 added 5 changesets with 5 changes to 5 files (+1 heads)
173 173 (run 'hg heads' to see heads, 'hg merge' to merge)
174 174 $ hgph
175 175 o 4 draft a-D - b555f63b6063
176 176 |
177 177 o 3 draft a-C - 54acac6f23ab
178 178 |
179 179 | o 2 draft b-A - f54f1bb90ff3
180 180 |/
181 181 o 1 draft a-B - 548a3d25dbf0
182 182 |
183 183 o 0 draft a-A - 054250a37db4
184 184
185 185 $ cd ..
186 186
187 187 Pulling from publish=False to publish=False does not move boundary.
188 188
189 189 $ hg init nu
190 190 $ cd nu
191 191 $ cat > .hg/hgrc << EOF
192 192 > [phases]
193 193 > publish=0
194 194 > EOF
195 195 $ hg pull ../mu -r 54acac6f23ab
196 196 pulling from ../mu
197 197 adding changesets
198 198 adding manifests
199 199 adding file changes
200 200 added 3 changesets with 3 changes to 3 files
201 201 (run 'hg update' to get a working copy)
202 202 $ hgph
203 203 o 2 draft a-C - 54acac6f23ab
204 204 |
205 205 o 1 draft a-B - 548a3d25dbf0
206 206 |
207 207 o 0 draft a-A - 054250a37db4
208 208
209 209
210 210 Even for common
211 211
212 212 $ hg pull ../mu -r f54f1bb90ff3
213 213 pulling from ../mu
214 214 searching for changes
215 215 adding changesets
216 216 adding manifests
217 217 adding file changes
218 218 added 1 changesets with 1 changes to 1 files (+1 heads)
219 219 (run 'hg heads' to see heads, 'hg merge' to merge)
220 220 $ hgph
221 221 o 3 draft b-A - f54f1bb90ff3
222 222 |
223 223 | o 2 draft a-C - 54acac6f23ab
224 224 |/
225 225 o 1 draft a-B - 548a3d25dbf0
226 226 |
227 227 o 0 draft a-A - 054250a37db4
228 228
229 229
230 230
231 231 Pulling from Publish=True to Publish=False move boundary in common set.
232 232 we are in nu
233 233
234 234 $ hg pull ../alpha -r b555f63b6063
235 235 pulling from ../alpha
236 236 searching for changes
237 237 adding changesets
238 238 adding manifests
239 239 adding file changes
240 240 added 1 changesets with 1 changes to 1 files
241 241 (run 'hg update' to get a working copy)
242 242 $ hgph # f54f1bb90ff3 stay draft, not ancestor of -r
243 243 o 4 public a-D - b555f63b6063
244 244 |
245 245 | o 3 draft b-A - f54f1bb90ff3
246 246 | |
247 247 o | 2 public a-C - 54acac6f23ab
248 248 |/
249 249 o 1 public a-B - 548a3d25dbf0
250 250 |
251 251 o 0 public a-A - 054250a37db4
252 252
253 253
254 254 pulling from Publish=False to publish=False with some public
255 255
256 256 $ hg up -q f54f1bb90ff3
257 257 $ mkcommit n-A
258 258 $ mkcommit n-B
259 259 $ hgph
260 260 @ 6 draft n-B - 145e75495359
261 261 |
262 262 o 5 draft n-A - d6bcb4f74035
263 263 |
264 264 | o 4 public a-D - b555f63b6063
265 265 | |
266 266 o | 3 draft b-A - f54f1bb90ff3
267 267 | |
268 268 | o 2 public a-C - 54acac6f23ab
269 269 |/
270 270 o 1 public a-B - 548a3d25dbf0
271 271 |
272 272 o 0 public a-A - 054250a37db4
273 273
274 274 $ cd ../mu
275 275 $ hg pull ../nu
276 276 pulling from ../nu
277 277 searching for changes
278 278 adding changesets
279 279 adding manifests
280 280 adding file changes
281 281 added 2 changesets with 2 changes to 2 files
282 282 (run 'hg update' to get a working copy)
283 283 $ hgph
284 284 o 6 draft n-B - 145e75495359
285 285 |
286 286 o 5 draft n-A - d6bcb4f74035
287 287 |
288 288 | o 4 public a-D - b555f63b6063
289 289 | |
290 290 | o 3 public a-C - 54acac6f23ab
291 291 | |
292 292 o | 2 draft b-A - f54f1bb90ff3
293 293 |/
294 294 o 1 public a-B - 548a3d25dbf0
295 295 |
296 296 o 0 public a-A - 054250a37db4
297 297
298 298 $ cd ..
299 299
300 300 pulling into publish=True
301 301
302 302 $ cd alpha
303 303 $ hgph
304 304 o 4 public b-A - f54f1bb90ff3
305 305 |
306 306 | @ 3 public a-D - b555f63b6063
307 307 | |
308 308 | o 2 public a-C - 54acac6f23ab
309 309 |/
310 310 o 1 public a-B - 548a3d25dbf0
311 311 |
312 312 o 0 public a-A - 054250a37db4
313 313
314 314 $ hg pull ../mu
315 315 pulling from ../mu
316 316 searching for changes
317 317 adding changesets
318 318 adding manifests
319 319 adding file changes
320 320 added 2 changesets with 2 changes to 2 files
321 321 (run 'hg update' to get a working copy)
322 322 $ hgph
323 323 o 6 draft n-B - 145e75495359
324 324 |
325 325 o 5 draft n-A - d6bcb4f74035
326 326 |
327 327 o 4 public b-A - f54f1bb90ff3
328 328 |
329 329 | @ 3 public a-D - b555f63b6063
330 330 | |
331 331 | o 2 public a-C - 54acac6f23ab
332 332 |/
333 333 o 1 public a-B - 548a3d25dbf0
334 334 |
335 335 o 0 public a-A - 054250a37db4
336 336
337 337 $ cd ..
338 338
339 339 pulling back into original repo
340 340
341 341 $ cd nu
342 342 $ hg pull ../alpha
343 343 pulling from ../alpha
344 344 searching for changes
345 345 no changes found
346 346 $ hgph
347 347 @ 6 public n-B - 145e75495359
348 348 |
349 349 o 5 public n-A - d6bcb4f74035
350 350 |
351 351 | o 4 public a-D - b555f63b6063
352 352 | |
353 353 o | 3 public b-A - f54f1bb90ff3
354 354 | |
355 355 | o 2 public a-C - 54acac6f23ab
356 356 |/
357 357 o 1 public a-B - 548a3d25dbf0
358 358 |
359 359 o 0 public a-A - 054250a37db4
360 360
361 361
362 362 Push
363 363 ````
364 364
365 365 (inserted)
366 366
367 367 Test that phase are pushed even when they are nothing to pus
368 368 (this might be tested later bu are very convenient to not alter too much test)
369 369
370 370 Push back to alpha
371 371
372 372 $ hg push ../alpha # from nu
373 373 pushing to ../alpha
374 374 searching for changes
375 375 no changes found
376 376 [1]
377 377 $ cd ..
378 378 $ cd alpha
379 379 $ hgph
380 380 o 6 public n-B - 145e75495359
381 381 |
382 382 o 5 public n-A - d6bcb4f74035
383 383 |
384 384 o 4 public b-A - f54f1bb90ff3
385 385 |
386 386 | @ 3 public a-D - b555f63b6063
387 387 | |
388 388 | o 2 public a-C - 54acac6f23ab
389 389 |/
390 390 o 1 public a-B - 548a3d25dbf0
391 391 |
392 392 o 0 public a-A - 054250a37db4
393 393
394 394
395 395 (end insertion)
396 396
397 397
398 398 initial setup
399 399
400 400 $ hg glog # of alpha
401 401 o changeset: 6:145e75495359
402 402 | tag: tip
403 403 | user: test
404 404 | date: Thu Jan 01 00:00:00 1970 +0000
405 405 | summary: n-B
406 406 |
407 407 o changeset: 5:d6bcb4f74035
408 408 | user: test
409 409 | date: Thu Jan 01 00:00:00 1970 +0000
410 410 | summary: n-A
411 411 |
412 412 o changeset: 4:f54f1bb90ff3
413 413 | parent: 1:548a3d25dbf0
414 414 | user: test
415 415 | date: Thu Jan 01 00:00:00 1970 +0000
416 416 | summary: b-A
417 417 |
418 418 | @ changeset: 3:b555f63b6063
419 419 | | user: test
420 420 | | date: Thu Jan 01 00:00:00 1970 +0000
421 421 | | summary: a-D
422 422 | |
423 423 | o changeset: 2:54acac6f23ab
424 424 |/ user: test
425 425 | date: Thu Jan 01 00:00:00 1970 +0000
426 426 | summary: a-C
427 427 |
428 428 o changeset: 1:548a3d25dbf0
429 429 | user: test
430 430 | date: Thu Jan 01 00:00:00 1970 +0000
431 431 | summary: a-B
432 432 |
433 433 o changeset: 0:054250a37db4
434 434 user: test
435 435 date: Thu Jan 01 00:00:00 1970 +0000
436 436 summary: a-A
437 437
438 438 $ mkcommit a-E
439 439 $ mkcommit a-F
440 440 $ mkcommit a-G
441 441 $ hg up d6bcb4f74035 -q
442 442 $ mkcommit a-H
443 443 created new head
444 444 $ hgph
445 445 @ 10 draft a-H - 967b449fbc94
446 446 |
447 447 | o 9 draft a-G - 3e27b6f1eee1
448 448 | |
449 449 | o 8 draft a-F - b740e3e5c05d
450 450 | |
451 451 | o 7 draft a-E - e9f537e46dea
452 452 | |
453 453 +---o 6 public n-B - 145e75495359
454 454 | |
455 455 o | 5 public n-A - d6bcb4f74035
456 456 | |
457 457 o | 4 public b-A - f54f1bb90ff3
458 458 | |
459 459 | o 3 public a-D - b555f63b6063
460 460 | |
461 461 | o 2 public a-C - 54acac6f23ab
462 462 |/
463 463 o 1 public a-B - 548a3d25dbf0
464 464 |
465 465 o 0 public a-A - 054250a37db4
466 466
467 467
468 Pulling from bundle does not alter phases of changeset not present in the bundle
469
470 $ hg bundle --base 1 -r 6 -r 3 ../partial-bundle.hg
471 5 changesets found
472 $ hg pull ../partial-bundle.hg
473 pulling from ../partial-bundle.hg
474 searching for changes
475 no changes found
476 $ hgph
477 @ 10 draft a-H - 967b449fbc94
478 |
479 | o 9 draft a-G - 3e27b6f1eee1
480 | |
481 | o 8 draft a-F - b740e3e5c05d
482 | |
483 | o 7 draft a-E - e9f537e46dea
484 | |
485 +---o 6 public n-B - 145e75495359
486 | |
487 o | 5 public n-A - d6bcb4f74035
488 | |
489 o | 4 public b-A - f54f1bb90ff3
490 | |
491 | o 3 public a-D - b555f63b6063
492 | |
493 | o 2 public a-C - 54acac6f23ab
494 |/
495 o 1 public a-B - 548a3d25dbf0
496 |
497 o 0 public a-A - 054250a37db4
498
499
468 500 Pushing to Publish=False (unknown changeset)
469 501
470 502 $ hg push ../mu -r b740e3e5c05d # a-F
471 503 pushing to ../mu
472 504 searching for changes
473 505 adding changesets
474 506 adding manifests
475 507 adding file changes
476 508 added 2 changesets with 2 changes to 2 files
477 509 $ hgph
478 510 @ 10 draft a-H - 967b449fbc94
479 511 |
480 512 | o 9 draft a-G - 3e27b6f1eee1
481 513 | |
482 514 | o 8 draft a-F - b740e3e5c05d
483 515 | |
484 516 | o 7 draft a-E - e9f537e46dea
485 517 | |
486 518 +---o 6 public n-B - 145e75495359
487 519 | |
488 520 o | 5 public n-A - d6bcb4f74035
489 521 | |
490 522 o | 4 public b-A - f54f1bb90ff3
491 523 | |
492 524 | o 3 public a-D - b555f63b6063
493 525 | |
494 526 | o 2 public a-C - 54acac6f23ab
495 527 |/
496 528 o 1 public a-B - 548a3d25dbf0
497 529 |
498 530 o 0 public a-A - 054250a37db4
499 531
500 532
501 533 $ cd ../mu
502 534 $ hgph # again f54f1bb90ff3, d6bcb4f74035 and 145e75495359 stay draft,
503 535 > # not ancestor of -r
504 536 o 8 draft a-F - b740e3e5c05d
505 537 |
506 538 o 7 draft a-E - e9f537e46dea
507 539 |
508 540 | o 6 draft n-B - 145e75495359
509 541 | |
510 542 | o 5 draft n-A - d6bcb4f74035
511 543 | |
512 544 o | 4 public a-D - b555f63b6063
513 545 | |
514 546 o | 3 public a-C - 54acac6f23ab
515 547 | |
516 548 | o 2 draft b-A - f54f1bb90ff3
517 549 |/
518 550 o 1 public a-B - 548a3d25dbf0
519 551 |
520 552 o 0 public a-A - 054250a37db4
521 553
522 554
523 555 Pushing to Publish=True (unknown changeset)
524 556
525 557 $ hg push ../beta -r b740e3e5c05d
526 558 pushing to ../beta
527 559 searching for changes
528 560 adding changesets
529 561 adding manifests
530 562 adding file changes
531 563 added 2 changesets with 2 changes to 2 files
532 564 $ hgph # again f54f1bb90ff3, d6bcb4f74035 and 145e75495359 stay draft,
533 565 > # not ancestor of -r
534 566 o 8 public a-F - b740e3e5c05d
535 567 |
536 568 o 7 public a-E - e9f537e46dea
537 569 |
538 570 | o 6 draft n-B - 145e75495359
539 571 | |
540 572 | o 5 draft n-A - d6bcb4f74035
541 573 | |
542 574 o | 4 public a-D - b555f63b6063
543 575 | |
544 576 o | 3 public a-C - 54acac6f23ab
545 577 | |
546 578 | o 2 draft b-A - f54f1bb90ff3
547 579 |/
548 580 o 1 public a-B - 548a3d25dbf0
549 581 |
550 582 o 0 public a-A - 054250a37db4
551 583
552 584
553 585 Pushing to Publish=True (common changeset)
554 586
555 587 $ cd ../beta
556 588 $ hg push ../alpha
557 589 pushing to ../alpha
558 590 searching for changes
559 591 no changes found
560 592 [1]
561 593 $ hgph
562 594 o 6 public a-F - b740e3e5c05d
563 595 |
564 596 o 5 public a-E - e9f537e46dea
565 597 |
566 598 o 4 public a-D - b555f63b6063
567 599 |
568 600 o 3 public a-C - 54acac6f23ab
569 601 |
570 602 | @ 2 public b-A - f54f1bb90ff3
571 603 |/
572 604 o 1 public a-B - 548a3d25dbf0
573 605 |
574 606 o 0 public a-A - 054250a37db4
575 607
576 608 $ cd ../alpha
577 609 $ hgph
578 610 @ 10 draft a-H - 967b449fbc94
579 611 |
580 612 | o 9 draft a-G - 3e27b6f1eee1
581 613 | |
582 614 | o 8 public a-F - b740e3e5c05d
583 615 | |
584 616 | o 7 public a-E - e9f537e46dea
585 617 | |
586 618 +---o 6 public n-B - 145e75495359
587 619 | |
588 620 o | 5 public n-A - d6bcb4f74035
589 621 | |
590 622 o | 4 public b-A - f54f1bb90ff3
591 623 | |
592 624 | o 3 public a-D - b555f63b6063
593 625 | |
594 626 | o 2 public a-C - 54acac6f23ab
595 627 |/
596 628 o 1 public a-B - 548a3d25dbf0
597 629 |
598 630 o 0 public a-A - 054250a37db4
599 631
600 632
601 633 Pushing to Publish=False (common changeset that change phase + unknown one)
602 634
603 635 $ hg push ../mu -r 967b449fbc94 -f
604 636 pushing to ../mu
605 637 searching for changes
606 638 adding changesets
607 639 adding manifests
608 640 adding file changes
609 641 added 1 changesets with 1 changes to 1 files (+1 heads)
610 642 $ hgph
611 643 @ 10 draft a-H - 967b449fbc94
612 644 |
613 645 | o 9 draft a-G - 3e27b6f1eee1
614 646 | |
615 647 | o 8 public a-F - b740e3e5c05d
616 648 | |
617 649 | o 7 public a-E - e9f537e46dea
618 650 | |
619 651 +---o 6 public n-B - 145e75495359
620 652 | |
621 653 o | 5 public n-A - d6bcb4f74035
622 654 | |
623 655 o | 4 public b-A - f54f1bb90ff3
624 656 | |
625 657 | o 3 public a-D - b555f63b6063
626 658 | |
627 659 | o 2 public a-C - 54acac6f23ab
628 660 |/
629 661 o 1 public a-B - 548a3d25dbf0
630 662 |
631 663 o 0 public a-A - 054250a37db4
632 664
633 665 $ cd ../mu
634 666 $ hgph # d6bcb4f74035 should have changed phase
635 667 > # 145e75495359 is still draft. not ancestor of -r
636 668 o 9 draft a-H - 967b449fbc94
637 669 |
638 670 | o 8 public a-F - b740e3e5c05d
639 671 | |
640 672 | o 7 public a-E - e9f537e46dea
641 673 | |
642 674 +---o 6 draft n-B - 145e75495359
643 675 | |
644 676 o | 5 public n-A - d6bcb4f74035
645 677 | |
646 678 | o 4 public a-D - b555f63b6063
647 679 | |
648 680 | o 3 public a-C - 54acac6f23ab
649 681 | |
650 682 o | 2 public b-A - f54f1bb90ff3
651 683 |/
652 684 o 1 public a-B - 548a3d25dbf0
653 685 |
654 686 o 0 public a-A - 054250a37db4
655 687
656 688
657 689
658 690 Pushing to Publish=True (common changeset from publish=False)
659 691
660 692 (in mu)
661 693 $ hg push ../alpha
662 694 pushing to ../alpha
663 695 searching for changes
664 696 no changes found
665 697 [1]
666 698 $ hgph
667 699 o 9 public a-H - 967b449fbc94
668 700 |
669 701 | o 8 public a-F - b740e3e5c05d
670 702 | |
671 703 | o 7 public a-E - e9f537e46dea
672 704 | |
673 705 +---o 6 public n-B - 145e75495359
674 706 | |
675 707 o | 5 public n-A - d6bcb4f74035
676 708 | |
677 709 | o 4 public a-D - b555f63b6063
678 710 | |
679 711 | o 3 public a-C - 54acac6f23ab
680 712 | |
681 713 o | 2 public b-A - f54f1bb90ff3
682 714 |/
683 715 o 1 public a-B - 548a3d25dbf0
684 716 |
685 717 o 0 public a-A - 054250a37db4
686 718
687 719 $ hgph -R ../alpha # a-H should have been synced to 0
688 720 @ 10 public a-H - 967b449fbc94
689 721 |
690 722 | o 9 draft a-G - 3e27b6f1eee1
691 723 | |
692 724 | o 8 public a-F - b740e3e5c05d
693 725 | |
694 726 | o 7 public a-E - e9f537e46dea
695 727 | |
696 728 +---o 6 public n-B - 145e75495359
697 729 | |
698 730 o | 5 public n-A - d6bcb4f74035
699 731 | |
700 732 o | 4 public b-A - f54f1bb90ff3
701 733 | |
702 734 | o 3 public a-D - b555f63b6063
703 735 | |
704 736 | o 2 public a-C - 54acac6f23ab
705 737 |/
706 738 o 1 public a-B - 548a3d25dbf0
707 739 |
708 740 o 0 public a-A - 054250a37db4
709 741
710 742
711 743
712 744 Discovery locally secret changeset on a remote repository:
713 745
714 746 - should make it non-secret
715 747
716 748 $ cd ../alpha
717 749 $ mkcommit A-secret --config phases.new-commit=2
718 750 $ hgph
719 751 @ 11 secret A-secret - 435b5d83910c
720 752 |
721 753 o 10 public a-H - 967b449fbc94
722 754 |
723 755 | o 9 draft a-G - 3e27b6f1eee1
724 756 | |
725 757 | o 8 public a-F - b740e3e5c05d
726 758 | |
727 759 | o 7 public a-E - e9f537e46dea
728 760 | |
729 761 +---o 6 public n-B - 145e75495359
730 762 | |
731 763 o | 5 public n-A - d6bcb4f74035
732 764 | |
733 765 o | 4 public b-A - f54f1bb90ff3
734 766 | |
735 767 | o 3 public a-D - b555f63b6063
736 768 | |
737 769 | o 2 public a-C - 54acac6f23ab
738 770 |/
739 771 o 1 public a-B - 548a3d25dbf0
740 772 |
741 773 o 0 public a-A - 054250a37db4
742 774
743 775 $ hg bundle --base 'parents(.)' -r . ../secret-bundle.hg
744 776 1 changesets found
745 777 $ hg -R ../mu unbundle ../secret-bundle.hg
746 778 adding changesets
747 779 adding manifests
748 780 adding file changes
749 781 added 1 changesets with 1 changes to 1 files
750 782 (run 'hg update' to get a working copy)
751 783 $ hgph -R ../mu
752 784 o 10 draft A-secret - 435b5d83910c
753 785 |
754 786 o 9 public a-H - 967b449fbc94
755 787 |
756 788 | o 8 public a-F - b740e3e5c05d
757 789 | |
758 790 | o 7 public a-E - e9f537e46dea
759 791 | |
760 792 +---o 6 public n-B - 145e75495359
761 793 | |
762 794 o | 5 public n-A - d6bcb4f74035
763 795 | |
764 796 | o 4 public a-D - b555f63b6063
765 797 | |
766 798 | o 3 public a-C - 54acac6f23ab
767 799 | |
768 800 o | 2 public b-A - f54f1bb90ff3
769 801 |/
770 802 o 1 public a-B - 548a3d25dbf0
771 803 |
772 804 o 0 public a-A - 054250a37db4
773 805
774 806 $ hg pull ../mu
775 807 pulling from ../mu
776 808 searching for changes
777 809 no changes found
778 810 $ hgph
779 811 @ 11 draft A-secret - 435b5d83910c
780 812 |
781 813 o 10 public a-H - 967b449fbc94
782 814 |
783 815 | o 9 draft a-G - 3e27b6f1eee1
784 816 | |
785 817 | o 8 public a-F - b740e3e5c05d
786 818 | |
787 819 | o 7 public a-E - e9f537e46dea
788 820 | |
789 821 +---o 6 public n-B - 145e75495359
790 822 | |
791 823 o | 5 public n-A - d6bcb4f74035
792 824 | |
793 825 o | 4 public b-A - f54f1bb90ff3
794 826 | |
795 827 | o 3 public a-D - b555f63b6063
796 828 | |
797 829 | o 2 public a-C - 54acac6f23ab
798 830 |/
799 831 o 1 public a-B - 548a3d25dbf0
800 832 |
801 833 o 0 public a-A - 054250a37db4
802 834
803 835
804 836 pushing a locally public and draft changesets remotly secret should make them appear on the remote side
805 837
806 838 $ hg -R ../mu phase --secret --force 967b449fbc94
807 839 $ hg push -r 435b5d83910c ../mu
808 840 pushing to ../mu
809 841 searching for changes
810 842 adding changesets
811 843 adding manifests
812 844 adding file changes
813 845 added 0 changesets with 0 changes to 2 files
814 846 $ hgph -R ../mu
815 847 o 10 draft A-secret - 435b5d83910c
816 848 |
817 849 o 9 public a-H - 967b449fbc94
818 850 |
819 851 | o 8 public a-F - b740e3e5c05d
820 852 | |
821 853 | o 7 public a-E - e9f537e46dea
822 854 | |
823 855 +---o 6 public n-B - 145e75495359
824 856 | |
825 857 o | 5 public n-A - d6bcb4f74035
826 858 | |
827 859 | o 4 public a-D - b555f63b6063
828 860 | |
829 861 | o 3 public a-C - 54acac6f23ab
830 862 | |
831 863 o | 2 public b-A - f54f1bb90ff3
832 864 |/
833 865 o 1 public a-B - 548a3d25dbf0
834 866 |
835 867 o 0 public a-A - 054250a37db4
836 868
837 869
838 870 pull new changeset with common draft locally
839 871
840 872 $ hg up -q 967b449fbc94 # create a new root for draft
841 873 $ mkcommit 'alpha-more'
842 874 created new head
843 875 $ hg push -fr . ../mu
844 876 pushing to ../mu
845 877 searching for changes
846 878 adding changesets
847 879 adding manifests
848 880 adding file changes
849 881 added 1 changesets with 1 changes to 1 files (+1 heads)
850 882 $ cd ../mu
851 883 $ hg phase --secret --force 1c5cfd894796
852 884 $ hg up -q 435b5d83910c
853 885 $ mkcommit 'mu-more'
854 886 $ cd ../alpha
855 887 $ hg pull ../mu
856 888 pulling from ../mu
857 889 searching for changes
858 890 adding changesets
859 891 adding manifests
860 892 adding file changes
861 893 added 1 changesets with 1 changes to 1 files
862 894 (run 'hg update' to get a working copy)
863 895 $ hgph
864 896 o 13 draft mu-more - 5237fb433fc8
865 897 |
866 898 | @ 12 draft alpha-more - 1c5cfd894796
867 899 | |
868 900 o | 11 draft A-secret - 435b5d83910c
869 901 |/
870 902 o 10 public a-H - 967b449fbc94
871 903 |
872 904 | o 9 draft a-G - 3e27b6f1eee1
873 905 | |
874 906 | o 8 public a-F - b740e3e5c05d
875 907 | |
876 908 | o 7 public a-E - e9f537e46dea
877 909 | |
878 910 +---o 6 public n-B - 145e75495359
879 911 | |
880 912 o | 5 public n-A - d6bcb4f74035
881 913 | |
882 914 o | 4 public b-A - f54f1bb90ff3
883 915 | |
884 916 | o 3 public a-D - b555f63b6063
885 917 | |
886 918 | o 2 public a-C - 54acac6f23ab
887 919 |/
888 920 o 1 public a-B - 548a3d25dbf0
889 921 |
890 922 o 0 public a-A - 054250a37db4
891 923
892 924
893 925 Test that test are properly ignored on remote event when existing locally
894 926
895 927 $ cd ..
896 928 $ hg clone -qU -r b555f63b6063 -r f54f1bb90ff3 beta gamma
897 929
898 930 # pathological case are
899 931 #
900 932 # * secret remotely
901 933 # * known locally
902 934 # * repo have uncommon changeset
903 935
904 936 $ hg -R beta phase --secret --force f54f1bb90ff3
905 937 $ hg -R gamma phase --draft --force f54f1bb90ff3
906 938
907 939 $ cd gamma
908 940 $ hg pull ../beta
909 941 pulling from ../beta
910 942 searching for changes
911 943 adding changesets
912 944 adding manifests
913 945 adding file changes
914 946 added 2 changesets with 2 changes to 2 files
915 947 (run 'hg update' to get a working copy)
916 948 $ hg phase f54f1bb90ff3
917 949 2: draft
918 950
919 951 same over the wire
920 952
921 953 $ cd ../beta
922 954 $ hg serve -p $HGPORT -d --pid-file=../beta.pid -E ../beta-error.log
923 955 $ cat ../beta.pid >> $DAEMON_PIDS
924 956 $ cd ../gamma
925 957
926 958 $ hg pull http://localhost:$HGPORT/
927 959 pulling from http://localhost:$HGPORT/
928 960 searching for changes
929 961 no changes found
930 962 $ hg phase f54f1bb90ff3
931 963 2: draft
932 964
933 965 check that secret local on both side are not synced to public
934 966
935 967 $ hg push -r b555f63b6063 http://localhost:$HGPORT/
936 968 pushing to http://localhost:$HGPORT/
937 969 searching for changes
938 970 no changes found
939 971 [1]
940 972 $ hg phase f54f1bb90ff3
941 973 2: draft
942 974
943 975 put the changeset in the draft state again
944 976 (first test after this one expect to be able to copy)
945 977
946 978 $ cd ..
947 979
948 980
949 981 Test Clone behavior
950 982
951 983 A. Clone without secret changeset
952 984
953 985 1. cloning non-publishing repository
954 986 (Phase should be preserved)
955 987
956 988 # make sure there is no secret so we can use a copy clone
957 989
958 990 $ hg -R mu phase --draft 'secret()'
959 991
960 992 $ hg clone -U mu Tau
961 993 $ hgph -R Tau
962 994 o 12 draft mu-more - 5237fb433fc8
963 995 |
964 996 | o 11 draft alpha-more - 1c5cfd894796
965 997 | |
966 998 o | 10 draft A-secret - 435b5d83910c
967 999 |/
968 1000 o 9 public a-H - 967b449fbc94
969 1001 |
970 1002 | o 8 public a-F - b740e3e5c05d
971 1003 | |
972 1004 | o 7 public a-E - e9f537e46dea
973 1005 | |
974 1006 +---o 6 public n-B - 145e75495359
975 1007 | |
976 1008 o | 5 public n-A - d6bcb4f74035
977 1009 | |
978 1010 | o 4 public a-D - b555f63b6063
979 1011 | |
980 1012 | o 3 public a-C - 54acac6f23ab
981 1013 | |
982 1014 o | 2 public b-A - f54f1bb90ff3
983 1015 |/
984 1016 o 1 public a-B - 548a3d25dbf0
985 1017 |
986 1018 o 0 public a-A - 054250a37db4
987 1019
988 1020
989 1021 2. cloning publishing repository
990 1022
991 1023 (everything should be public)
992 1024
993 1025 $ hg clone -U alpha Upsilon
994 1026 $ hgph -R Upsilon
995 1027 o 13 public mu-more - 5237fb433fc8
996 1028 |
997 1029 | o 12 public alpha-more - 1c5cfd894796
998 1030 | |
999 1031 o | 11 public A-secret - 435b5d83910c
1000 1032 |/
1001 1033 o 10 public a-H - 967b449fbc94
1002 1034 |
1003 1035 | o 9 public a-G - 3e27b6f1eee1
1004 1036 | |
1005 1037 | o 8 public a-F - b740e3e5c05d
1006 1038 | |
1007 1039 | o 7 public a-E - e9f537e46dea
1008 1040 | |
1009 1041 +---o 6 public n-B - 145e75495359
1010 1042 | |
1011 1043 o | 5 public n-A - d6bcb4f74035
1012 1044 | |
1013 1045 o | 4 public b-A - f54f1bb90ff3
1014 1046 | |
1015 1047 | o 3 public a-D - b555f63b6063
1016 1048 | |
1017 1049 | o 2 public a-C - 54acac6f23ab
1018 1050 |/
1019 1051 o 1 public a-B - 548a3d25dbf0
1020 1052 |
1021 1053 o 0 public a-A - 054250a37db4
1022 1054
1023 1055
General Comments 0
You need to be logged in to leave comments. Login now