##// END OF EJS Templates
merge with stable
Thomas Arendsen Hein -
r16208:85db9917 merge default
parent child Browse files
Show More
@@ -1,51 +1,52 b''
1 1 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0 iD8DBQBEYmO2ywK+sNU5EO8RAnaYAKCO7x15xUn5mnhqWNXqk/ehlhRt2QCfRDfY0LrUq2q4oK/KypuJYPHgq1A=
2 2 2be3001847cb18a23c403439d9e7d0ace30804e9 0 iD8DBQBExUbjywK+sNU5EO8RAhzxAKCtyHAQUzcTSZTqlfJ0by6vhREwWQCghaQFHfkfN0l9/40EowNhuMOKnJk=
3 3 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0 iD8DBQBFfL2QywK+sNU5EO8RAjYFAKCoGlaWRTeMsjdmxAjUYx6diZxOBwCfY6IpBYsKvPTwB3oktnPt5Rmrlys=
4 4 27230c29bfec36d5540fbe1c976810aefecfd1d2 0 iD8DBQBFheweywK+sNU5EO8RAt7VAKCrqJQWT2/uo2RWf0ZI4bLp6v82jACgjrMdsaTbxRsypcmEsdPhlG6/8F4=
5 5 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0 iD8DBQBGgHicywK+sNU5EO8RAgNxAJ0VG8ixAaeudx4sZbhngI1syu49HQCeNUJQfWBgA8bkJ2pvsFpNxwYaX3I=
6 6 23889160905a1b09fffe1c07378e9fc1827606eb 0 iD8DBQBHGTzoywK+sNU5EO8RAr/UAJ0Y8s4jQtzgS+G9vM8z6CWBThZ8fwCcCT5XDj2XwxKkz/0s6UELwjsO3LU=
7 7 bae2e9c838e90a393bae3973a7850280413e091a 0 iD8DBQBH6DO5ywK+sNU5EO8RAsfrAJ0e4r9c9GF/MJsM7Xjd3NesLRC3+ACffj6+6HXdZf8cswAoFPO+DY00oD0=
8 8 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 0 iD8DBQBINdwsywK+sNU5EO8RAjIUAKCPmlFJSpsPAAUKF+iNHAwVnwmzeQCdEXrL27CWclXuUKdbQC8De7LICtE=
9 9 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 0 iD8DBQBIo1wpywK+sNU5EO8RAmRNAJ94x3OFt6blbqu/yBoypm/AJ44fuACfUaldXcV5z9tht97hSp22DVTEPGc=
10 10 2a67430f92f15ea5159c26b09ec4839a0c549a26 0 iEYEABECAAYFAkk1hykACgkQywK+sNU5EO85QACeNJNUanjc2tl4wUoPHNuv+lSj0ZMAoIm93wSTc/feyYnO2YCaQ1iyd9Nu
11 11 3773e510d433969e277b1863c317b674cbee2065 0 iEYEABECAAYFAklNbbAACgkQywK+sNU5EO8o+gCfeb2/lfIJZMvyDA1m+G1CsBAxfFsAoIa6iAMG8SBY7hW1Q85Yf/LXEvaE
12 12 11a4eb81fb4f4742451591489e2797dc47903277 0 iEYEABECAAYFAklcAnsACgkQywK+sNU5EO+uXwCbBVHNNsLy1g7BlAyQJwadYVyHOXoAoKvtAVO71+bv7EbVoukwTzT+P4Sx
13 13 11efa41037e280d08cfb07c09ad485df30fb0ea8 0 iEYEABECAAYFAkmvJRQACgkQywK+sNU5EO9XZwCeLMgDgPSMWMm6vgjL4lDs2pEc5+0AnRxfiFbpbBfuEFTqKz9nbzeyoBlx
14 14 02981000012e3adf40c4849bd7b3d5618f9ce82d 0 iEYEABECAAYFAknEH3wACgkQywK+sNU5EO+uXwCeI+LbLMmhjU1lKSfU3UWJHjjUC7oAoIZLvYDGOL/tNZFUuatc3RnZ2eje
15 15 196d40e7c885fa6e95f89134809b3ec7bdbca34b 0 iEYEABECAAYFAkpL2X4ACgkQywK+sNU5EO9FOwCfXJycjyKJXsvQqKkHrglwOQhEKS4An36GfKzptfN8b1qNc3+ya/5c2WOM
16 16 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 0 iEYEABECAAYFAkpopLIACgkQywK+sNU5EO8QSgCfZ0ztsd071rOa2lhmp9Fyue/WoI0AoLTei80/xrhRlB8L/rZEf2KBl8dA
17 17 31ec469f9b556f11819937cf68ee53f2be927ebf 0 iEYEABECAAYFAksBuxAACgkQywK+sNU5EO+mBwCfagB+A0txzWZ6dRpug3LEoK7Z1QsAoKpbk8vsLjv6/oRDicSk/qBu33+m
18 18 439d7ea6fe3aa4ab9ec274a68846779153789de9 0 iEYEABECAAYFAksVw0kACgkQywK+sNU5EO/oZwCfdfBEkgp38xq6wN2F4nj+SzofrJIAnjmxt04vaJSeOOeHylHvk6lzuQsw
19 19 296a0b14a68621f6990c54fdba0083f6f20935bf 0 iEYEABECAAYFAks+jCoACgkQywK+sNU5EO9J8wCeMUGF9E/gS2UBsqIz56WS4HMPRPUAoI5J95mwEIK8Clrl7qFRidNI6APq
20 20 4aa619c4c2c09907034d9824ebb1dd0e878206eb 0 iEYEABECAAYFAktm9IsACgkQywK+sNU5EO9XGgCgk4HclRQhexEtooPE5GcUCdB6M8EAn2ptOhMVbIoO+JncA+tNACPFXh0O
21 21 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 0 iEYEABECAAYFAkuRoSQACgkQywK+sNU5EO//3QCeJDc5r2uFyFCtAlpSA27DEE5rrxAAn2FSwTy9fhrB3QAdDQlwkEZcQzDh
22 22 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 0 iEYEABECAAYFAku1IwIACgkQywK+sNU5EO9MjgCdHLVwkTZlNHxhcznZKBL1rjN+J7cAoLLWi9LTL6f/TgBaPSKOy1ublbaW
23 23 39f725929f0c48c5fb3b90c071fc3066012456ca 0 iEYEABECAAYFAkvclvsACgkQywK+sNU5EO9FSwCeL9i5x8ALW/LE5+lCX6MFEAe4MhwAn1ev5o6SX6GrNdDfKweiemfO2VBk
24 24 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 0 iEYEABECAAYFAkvsKTkACgkQywK+sNU5EO9qEACgiSiRGvTG2vXGJ65tUSOIYihTuFAAnRzRIqEVSw8M8/RGeUXRps0IzaCO
25 25 24fe2629c6fd0c74c90bd066e77387c2b02e8437 0 iEYEABECAAYFAkwFLRsACgkQywK+sNU5EO+pJACgp13tPI+pbwKZV+LeMjcQ4H6tCZYAoJebzhd6a8yYx6qiwpJxA9BXZNXy
26 26 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 0 iEYEABECAAYFAkwsyxcACgkQywK+sNU5EO+crACfUpNAF57PmClkSri9nJcBjb2goN4AniPCNaKvnki7TnUsi1u2oxltpKKL
27 27 bf1774d95bde614af3956d92b20e2a0c68c5fec7 0 iEYEABECAAYFAkxVwccACgkQywK+sNU5EO+oFQCeJzwZ+we1fIIyBGCddHceOUAN++cAnjvT6A8ZWW0zV21NXIFF1qQmjxJd
28 28 c00f03a4982e467fb6b6bd45908767db6df4771d 0 iEYEABECAAYFAkxXDqsACgkQywK+sNU5EO/GJACfT9Rz4hZOxPQEs91JwtmfjevO84gAmwSmtfo5mmWSm8gtTUebCcdTv0Kf
29 29 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 0 iD8DBQBMdo+qywK+sNU5EO8RAqQpAJ975BL2CCAiWMz9SXthNQ9xG181IwCgp4O+KViHPkufZVFn2aTKMNvcr1A=
30 30 93d8bff78c96fe7e33237b257558ee97290048a4 0 iD8DBQBMpfvdywK+sNU5EO8RAsxVAJ0UaL1XB51C76JUBhafc9GBefuMxwCdEWkTOzwvE0SarJBe9i008jhbqW4=
31 31 333421b9e0f96c7bc788e5667c146a58a9440a55 0 iD8DBQBMz0HOywK+sNU5EO8RAlsEAJ0USh6yOG7OrWkADGunVt9QimBQnwCbBqeMnKgSbwEw8jZwE3Iz1mdrYlo=
32 32 4438875ec01bd0fc32be92b0872eb6daeed4d44f 0 iD8DBQBM4WYUywK+sNU5EO8RAhCVAJ0dJswachwFAHALmk1x0RJehxzqPQCbBNskP9n/X689jB+btNTZTyKU/fw=
33 33 6aff4f144ad356311318b0011df0bb21f2c97429 0 iD8DBQBM9uxXywK+sNU5EO8RAv+4AKCDj4qKP16GdPaq1tP6BUwpM/M1OACfRyzLPp/qiiN8xJTWoWYSe/XjJug=
34 34 e3bf16703e2601de99e563cdb3a5d50b64e6d320 0 iD8DBQBNH8WqywK+sNU5EO8RAiQTAJ9sBO+TeiGro4si77VVaQaA6jcRUgCfSA28dBbjj0oFoQwvPoZjANiZBH8=
35 35 a6c855c32ea081da3c3b8ff628f1847ff271482f 0 iD8DBQBNSJJ+ywK+sNU5EO8RAoJaAKCweDEF70fu+r1Zn7pYDXdlk5RuSgCeO9gK/eit8Lin/1n3pO7aYguFLok=
36 36 2b2155623ee2559caf288fd333f30475966c4525 0 iD8DBQBNSJeBywK+sNU5EO8RAm1KAJ4hW9Cm9nHaaGJguchBaPLlAr+O3wCgqgmMok8bdAS06N6PL60PSTM//Gg=
37 37 2616325766e3504c8ae7c84bd15ee610901fe91d 0 iD8DBQBNbWy9ywK+sNU5EO8RAlWCAJ4mW8HbzjJj9GpK98muX7k+7EvEHwCfaTLbC/DH3QEsZBhEP+M8tzL6RU4=
38 38 aa1f3be38ab127280761889d2dca906ca465b5f4 0 iD8DBQBNeQq7ywK+sNU5EO8RAlEOAJ4tlEDdetE9lKfjGgjbkcR8PrC3egCfXCfF3qNVvU/2YYjpgvRwevjvDy0=
39 39 b032bec2c0a651ca0ddecb65714bfe6770f67d70 0 iD8DBQBNlg5kywK+sNU5EO8RAnGEAJ9gmEx6MfaR4XcG2m/93vwtfyzs3gCgltzx8/YdHPwqDwRX/WbpYgi33is=
40 40 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 0 iD8DBQBNvTy4ywK+sNU5EO8RAmp8AJ9QnxK4jTJ7G722MyeBxf0UXEdGwACgtlM7BKtNQfbEH/fOW5y+45W88VI=
41 41 733af5d9f6b22387913e1d11350fb8cb7c1487dd 0 iD8DBQBN5q/8ywK+sNU5EO8RArRGAKCNGT94GKIYtSuwZ57z1sQbcw6uLACfffpbMV4NAPMl8womAwg+7ZPKnIU=
42 42 de9eb6b1da4fc522b1cab16d86ca166204c24f25 0 iD8DBQBODhfhywK+sNU5EO8RAr2+AJ4ugbAj8ae8/K0bYZzx3sascIAg1QCeK3b+zbbVVqd3b7CDpwFnaX8kTd4=
43 43 4a43e23b8c55b4566b8200bf69fe2158485a2634 0 iD8DBQBONzIMywK+sNU5EO8RAj5SAJ0aPS3+JHnyI6bHB2Fl0LImbDmagwCdGbDLp1S7TFobxXudOH49bX45Iik=
44 44 d629f1e89021103f1753addcef6b310e4435b184 0 iD8DBQBOWAsBywK+sNU5EO8RAht4AJwJl9oNFopuGkj5m8aKuf7bqPkoAQCeNrEm7UhFsZKYT5iUOjnMV7s2LaM=
45 45 351a9292e430e35766c552066ed3e87c557b803b 0 iD8DBQBOh3zUywK+sNU5EO8RApFMAKCD3Y/u3avDFndznwqfG5UeTHMlvACfUivPIVQZyDZnhZMq0UhC6zhCEQg=
46 46 384082750f2c51dc917d85a7145748330fa6ef4d 0 iD8DBQBOmd+OywK+sNU5EO8RAgDgAJ9V/X+G7VLwhTpHrZNiOHabzSyzYQCdE2kKfIevJUYB9QLAWCWP6DPwrwI=
47 47 41453d55b481ddfcc1dacb445179649e24ca861d 0 iD8DBQBOsFhpywK+sNU5EO8RAqM6AKCyfxUae3/zLuiLdQz+JR78690eMACfQ6JTBQib4AbE+rUDdkeFYg9K/+4=
48 48 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 0 iD8DBQBO1/fWywK+sNU5EO8RAmoPAKCR5lpv1D6JLURHD8KVLSV4GRVEBgCgnd0Sy78ligNfqAMafmACRDvj7vo=
49 49 6344043924497cd06d781d9014c66802285072e4 0 iD8DBQBPALgmywK+sNU5EO8RAlfhAJ9nYOdWnhfVDHYtDTJAyJtXBAQS9wCgnefoSQt7QABkbGxM+Q85UYEBuD0=
50 50 db33555eafeaf9df1e18950e29439eaa706d399b 0 iD8DBQBPGdzxywK+sNU5EO8RAppkAJ9jOXhUVE/97CPgiMA0pMGiIYnesQCfengAszcBiSiKGugiI8Okc9ghU+Y=
51 51 2aa5b51f310fb3befd26bed99c02267f5c12c734 0 iD8DBQBPKZ9bywK+sNU5EO8RAt1TAJ45r1eJ0YqSkInzrrayg4TVCh0SnQCgm0GA/Ua74jnnDwVQ60lAwROuz1Q=
52 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 0 iD8DBQBPT/fvywK+sNU5EO8RAnfYAKCn7d0vwqIb100YfWm1F7nFD5B+FACeM02YHpQLSNsztrBCObtqcnfod7Q=
@@ -1,63 +1,64 b''
1 1 d40cc5aacc31ed673d9b5b24f98bee78c283062c 0.4f
2 2 1c590d34bf61e2ea12c71738e5a746cd74586157 0.4e
3 3 7eca4cfa8aad5fce9a04f7d8acadcd0452e2f34e 0.4d
4 4 b4d0c3786ad3e47beacf8412157326a32b6d25a4 0.4c
5 5 f40273b0ad7b3a6d3012fd37736d0611f41ecf54 0.5
6 6 0a28dfe59f8fab54a5118c5be4f40da34a53cdb7 0.5b
7 7 12e0fdbc57a0be78f0e817fd1d170a3615cd35da 0.6
8 8 4ccf3de52989b14c3d84e1097f59e39a992e00bd 0.6b
9 9 eac9c8efcd9bd8244e72fb6821f769f450457a32 0.6c
10 10 979c049974485125e1f9357f6bbe9c1b548a64c3 0.7
11 11 3a56574f329a368d645853e0f9e09472aee62349 0.8
12 12 6a03cff2b0f5d30281e6addefe96b993582f2eac 0.8.1
13 13 35fb62a3a673d5322f6274a44ba6456e5e4b3b37 0.9
14 14 2be3001847cb18a23c403439d9e7d0ace30804e9 0.9.1
15 15 36a957364b1b89c150f2d0e60a99befe0ee08bd3 0.9.2
16 16 27230c29bfec36d5540fbe1c976810aefecfd1d2 0.9.3
17 17 fb4b6d5fe100b0886f8bc3d6731ec0e5ed5c4694 0.9.4
18 18 23889160905a1b09fffe1c07378e9fc1827606eb 0.9.5
19 19 bae2e9c838e90a393bae3973a7850280413e091a 1.0
20 20 d5cbbe2c49cee22a9fbeb9ea41daa0ac4e26b846 1.0.1
21 21 d2375bbee6d47e62ba8e415c86e83a465dc4dce9 1.0.2
22 22 2a67430f92f15ea5159c26b09ec4839a0c549a26 1.1
23 23 3773e510d433969e277b1863c317b674cbee2065 1.1.1
24 24 11a4eb81fb4f4742451591489e2797dc47903277 1.1.2
25 25 11efa41037e280d08cfb07c09ad485df30fb0ea8 1.2
26 26 02981000012e3adf40c4849bd7b3d5618f9ce82d 1.2.1
27 27 196d40e7c885fa6e95f89134809b3ec7bdbca34b 1.3
28 28 3ef6c14a1e8e83a31226f5881b7fe6095bbfa6f6 1.3.1
29 29 31ec469f9b556f11819937cf68ee53f2be927ebf 1.4
30 30 439d7ea6fe3aa4ab9ec274a68846779153789de9 1.4.1
31 31 296a0b14a68621f6990c54fdba0083f6f20935bf 1.4.2
32 32 4aa619c4c2c09907034d9824ebb1dd0e878206eb 1.4.3
33 33 ff2704a8ded37fbebd8b6eb5ec733731d725da8a 1.5
34 34 2b01dab594167bc0dd33331dbaa6dca3dca1b3aa 1.5.1
35 35 39f725929f0c48c5fb3b90c071fc3066012456ca 1.5.2
36 36 fdcf80f26604f233dc4d8f0a5ef9d7470e317e8a 1.5.3
37 37 24fe2629c6fd0c74c90bd066e77387c2b02e8437 1.5.4
38 38 f786fc4b8764cd2a5526d259cf2f94d8a66924d9 1.6
39 39 bf1774d95bde614af3956d92b20e2a0c68c5fec7 1.6.1
40 40 c00f03a4982e467fb6b6bd45908767db6df4771d 1.6.2
41 41 ff5cec76b1c5b6be9c3bb923aae8c3c6d079d6b9 1.6.3
42 42 93d8bff78c96fe7e33237b257558ee97290048a4 1.6.4
43 43 333421b9e0f96c7bc788e5667c146a58a9440a55 1.7
44 44 4438875ec01bd0fc32be92b0872eb6daeed4d44f 1.7.1
45 45 6aff4f144ad356311318b0011df0bb21f2c97429 1.7.2
46 46 e3bf16703e2601de99e563cdb3a5d50b64e6d320 1.7.3
47 47 a6c855c32ea081da3c3b8ff628f1847ff271482f 1.7.4
48 48 2b2155623ee2559caf288fd333f30475966c4525 1.7.5
49 49 2616325766e3504c8ae7c84bd15ee610901fe91d 1.8
50 50 aa1f3be38ab127280761889d2dca906ca465b5f4 1.8.1
51 51 b032bec2c0a651ca0ddecb65714bfe6770f67d70 1.8.2
52 52 3cb1e95676ad089596bd81d0937cad37d6e3b7fb 1.8.3
53 53 733af5d9f6b22387913e1d11350fb8cb7c1487dd 1.8.4
54 54 de9eb6b1da4fc522b1cab16d86ca166204c24f25 1.9
55 55 4a43e23b8c55b4566b8200bf69fe2158485a2634 1.9.1
56 56 d629f1e89021103f1753addcef6b310e4435b184 1.9.2
57 57 351a9292e430e35766c552066ed3e87c557b803b 1.9.3
58 58 384082750f2c51dc917d85a7145748330fa6ef4d 2.0-rc
59 59 41453d55b481ddfcc1dacb445179649e24ca861d 2.0
60 60 195dbd1cef0c2f9f8bcf4ea303238105f716bda3 2.0.1
61 61 6344043924497cd06d781d9014c66802285072e4 2.0.2
62 62 db33555eafeaf9df1e18950e29439eaa706d399b 2.1-rc
63 63 2aa5b51f310fb3befd26bed99c02267f5c12c734 2.1
64 53e2cd303ecf8ca7c7eeebd785c34e5ed6b0f4a4 2.1.1
@@ -1,736 +1,748 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 import errno
8 8
9 9 from node import nullid
10 10 from i18n import _
11 11 import scmutil, util, ignore, osutil, parsers, encoding
12 12 import struct, os, stat, errno
13 13 import cStringIO
14 14
15 15 _format = ">cllll"
16 16 propertycache = util.propertycache
17 filecache = scmutil.filecache
18
19 class repocache(filecache):
20 """filecache for files in .hg/"""
21 def join(self, obj, fname):
22 return obj._opener.join(fname)
23
24 class rootcache(filecache):
25 """filecache for files in the repository root"""
26 def join(self, obj, fname):
27 return obj._join(fname)
17 28
18 29 def _finddirs(path):
19 30 pos = path.rfind('/')
20 31 while pos != -1:
21 32 yield path[:pos]
22 33 pos = path.rfind('/', 0, pos)
23 34
24 35 def _incdirs(dirs, path):
25 36 for base in _finddirs(path):
26 37 if base in dirs:
27 38 dirs[base] += 1
28 39 return
29 40 dirs[base] = 1
30 41
31 42 def _decdirs(dirs, path):
32 43 for base in _finddirs(path):
33 44 if dirs[base] > 1:
34 45 dirs[base] -= 1
35 46 return
36 47 del dirs[base]
37 48
38 49 class dirstate(object):
39 50
40 51 def __init__(self, opener, ui, root, validate):
41 52 '''Create a new dirstate object.
42 53
43 54 opener is an open()-like callable that can be used to open the
44 55 dirstate file; root is the root of the directory tracked by
45 56 the dirstate.
46 57 '''
47 58 self._opener = opener
48 59 self._validate = validate
49 60 self._root = root
50 61 self._rootdir = os.path.join(root, '')
51 62 self._dirty = False
52 63 self._dirtypl = False
53 64 self._lastnormaltime = 0
54 65 self._ui = ui
66 self._filecache = {}
55 67
56 68 @propertycache
57 69 def _map(self):
58 70 '''Return the dirstate contents as a map from filename to
59 71 (state, mode, size, time).'''
60 72 self._read()
61 73 return self._map
62 74
63 75 @propertycache
64 76 def _copymap(self):
65 77 self._read()
66 78 return self._copymap
67 79
68 80 @propertycache
69 81 def _normroot(self):
70 82 return util.normcase(self._root)
71 83
72 84 @propertycache
73 85 def _foldmap(self):
74 86 f = {}
75 87 for name in self._map:
76 88 f[util.normcase(name)] = name
77 89 f['.'] = '.' # prevents useless util.fspath() invocation
78 90 return f
79 91
80 @propertycache
92 @repocache('branch')
81 93 def _branch(self):
82 94 try:
83 95 return self._opener.read("branch").strip() or "default"
84 96 except IOError, inst:
85 97 if inst.errno != errno.ENOENT:
86 98 raise
87 99 return "default"
88 100
89 101 @propertycache
90 102 def _pl(self):
91 103 try:
92 104 fp = self._opener("dirstate")
93 105 st = fp.read(40)
94 106 fp.close()
95 107 l = len(st)
96 108 if l == 40:
97 109 return st[:20], st[20:40]
98 110 elif l > 0 and l < 40:
99 111 raise util.Abort(_('working directory state appears damaged!'))
100 112 except IOError, err:
101 113 if err.errno != errno.ENOENT:
102 114 raise
103 115 return [nullid, nullid]
104 116
105 117 @propertycache
106 118 def _dirs(self):
107 119 dirs = {}
108 120 for f, s in self._map.iteritems():
109 121 if s[0] != 'r':
110 122 _incdirs(dirs, f)
111 123 return dirs
112 124
113 125 def dirs(self):
114 126 return self._dirs
115 127
116 @propertycache
128 @rootcache('.hgignore')
117 129 def _ignore(self):
118 130 files = [self._join('.hgignore')]
119 131 for name, path in self._ui.configitems("ui"):
120 132 if name == 'ignore' or name.startswith('ignore.'):
121 133 files.append(util.expandpath(path))
122 134 return ignore.ignore(self._root, files, self._ui.warn)
123 135
124 136 @propertycache
125 137 def _slash(self):
126 138 return self._ui.configbool('ui', 'slash') and os.sep != '/'
127 139
128 140 @propertycache
129 141 def _checklink(self):
130 142 return util.checklink(self._root)
131 143
132 144 @propertycache
133 145 def _checkexec(self):
134 146 return util.checkexec(self._root)
135 147
136 148 @propertycache
137 149 def _checkcase(self):
138 150 return not util.checkcase(self._join('.hg'))
139 151
140 152 def _join(self, f):
141 153 # much faster than os.path.join()
142 154 # it's safe because f is always a relative path
143 155 return self._rootdir + f
144 156
145 157 def flagfunc(self, buildfallback):
146 158 if self._checklink and self._checkexec:
147 159 def f(x):
148 160 p = self._join(x)
149 161 if os.path.islink(p):
150 162 return 'l'
151 163 if util.isexec(p):
152 164 return 'x'
153 165 return ''
154 166 return f
155 167
156 168 fallback = buildfallback()
157 169 if self._checklink:
158 170 def f(x):
159 171 if os.path.islink(self._join(x)):
160 172 return 'l'
161 173 if 'x' in fallback(x):
162 174 return 'x'
163 175 return ''
164 176 return f
165 177 if self._checkexec:
166 178 def f(x):
167 179 if 'l' in fallback(x):
168 180 return 'l'
169 181 if util.isexec(self._join(x)):
170 182 return 'x'
171 183 return ''
172 184 return f
173 185 else:
174 186 return fallback
175 187
176 188 def getcwd(self):
177 189 cwd = os.getcwd()
178 190 if cwd == self._root:
179 191 return ''
180 192 # self._root ends with a path separator if self._root is '/' or 'C:\'
181 193 rootsep = self._root
182 194 if not util.endswithsep(rootsep):
183 195 rootsep += os.sep
184 196 if cwd.startswith(rootsep):
185 197 return cwd[len(rootsep):]
186 198 else:
187 199 # we're outside the repo. return an absolute path.
188 200 return cwd
189 201
190 202 def pathto(self, f, cwd=None):
191 203 if cwd is None:
192 204 cwd = self.getcwd()
193 205 path = util.pathto(self._root, cwd, f)
194 206 if self._slash:
195 207 return util.normpath(path)
196 208 return path
197 209
198 210 def __getitem__(self, key):
199 211 '''Return the current state of key (a filename) in the dirstate.
200 212
201 213 States are:
202 214 n normal
203 215 m needs merging
204 216 r marked for removal
205 217 a marked for addition
206 218 ? not tracked
207 219 '''
208 220 return self._map.get(key, ("?",))[0]
209 221
210 222 def __contains__(self, key):
211 223 return key in self._map
212 224
213 225 def __iter__(self):
214 226 for x in sorted(self._map):
215 227 yield x
216 228
217 229 def parents(self):
218 230 return [self._validate(p) for p in self._pl]
219 231
220 232 def p1(self):
221 233 return self._validate(self._pl[0])
222 234
223 235 def p2(self):
224 236 return self._validate(self._pl[1])
225 237
226 238 def branch(self):
227 239 return encoding.tolocal(self._branch)
228 240
229 241 def setparents(self, p1, p2=nullid):
230 242 self._dirty = self._dirtypl = True
231 243 self._pl = p1, p2
232 244
233 245 def setbranch(self, branch):
234 246 if branch in ['tip', '.', 'null']:
235 247 raise util.Abort(_('the name \'%s\' is reserved') % branch)
236 248 self._branch = encoding.fromlocal(branch)
237 249 self._opener.write("branch", self._branch + '\n')
238 250
239 251 def _read(self):
240 252 self._map = {}
241 253 self._copymap = {}
242 254 try:
243 255 st = self._opener.read("dirstate")
244 256 except IOError, err:
245 257 if err.errno != errno.ENOENT:
246 258 raise
247 259 return
248 260 if not st:
249 261 return
250 262
251 263 p = parsers.parse_dirstate(self._map, self._copymap, st)
252 264 if not self._dirtypl:
253 265 self._pl = p
254 266
255 267 def invalidate(self):
256 268 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
257 269 "_ignore"):
258 270 if a in self.__dict__:
259 271 delattr(self, a)
260 272 self._lastnormaltime = 0
261 273 self._dirty = False
262 274
263 275 def copy(self, source, dest):
264 276 """Mark dest as a copy of source. Unmark dest if source is None."""
265 277 if source == dest:
266 278 return
267 279 self._dirty = True
268 280 if source is not None:
269 281 self._copymap[dest] = source
270 282 elif dest in self._copymap:
271 283 del self._copymap[dest]
272 284
273 285 def copied(self, file):
274 286 return self._copymap.get(file, None)
275 287
276 288 def copies(self):
277 289 return self._copymap
278 290
279 291 def _droppath(self, f):
280 292 if self[f] not in "?r" and "_dirs" in self.__dict__:
281 293 _decdirs(self._dirs, f)
282 294
283 295 def _addpath(self, f, check=False):
284 296 oldstate = self[f]
285 297 if check or oldstate == "r":
286 298 scmutil.checkfilename(f)
287 299 if f in self._dirs:
288 300 raise util.Abort(_('directory %r already in dirstate') % f)
289 301 # shadows
290 302 for d in _finddirs(f):
291 303 if d in self._dirs:
292 304 break
293 305 if d in self._map and self[d] != 'r':
294 306 raise util.Abort(
295 307 _('file %r in dirstate clashes with %r') % (d, f))
296 308 if oldstate in "?r" and "_dirs" in self.__dict__:
297 309 _incdirs(self._dirs, f)
298 310
299 311 def normal(self, f):
300 312 '''Mark a file normal and clean.'''
301 313 self._dirty = True
302 314 self._addpath(f)
303 315 s = os.lstat(self._join(f))
304 316 mtime = int(s.st_mtime)
305 317 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
306 318 if f in self._copymap:
307 319 del self._copymap[f]
308 320 if mtime > self._lastnormaltime:
309 321 # Remember the most recent modification timeslot for status(),
310 322 # to make sure we won't miss future size-preserving file content
311 323 # modifications that happen within the same timeslot.
312 324 self._lastnormaltime = mtime
313 325
314 326 def normallookup(self, f):
315 327 '''Mark a file normal, but possibly dirty.'''
316 328 if self._pl[1] != nullid and f in self._map:
317 329 # if there is a merge going on and the file was either
318 330 # in state 'm' (-1) or coming from other parent (-2) before
319 331 # being removed, restore that state.
320 332 entry = self._map[f]
321 333 if entry[0] == 'r' and entry[2] in (-1, -2):
322 334 source = self._copymap.get(f)
323 335 if entry[2] == -1:
324 336 self.merge(f)
325 337 elif entry[2] == -2:
326 338 self.otherparent(f)
327 339 if source:
328 340 self.copy(source, f)
329 341 return
330 342 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
331 343 return
332 344 self._dirty = True
333 345 self._addpath(f)
334 346 self._map[f] = ('n', 0, -1, -1)
335 347 if f in self._copymap:
336 348 del self._copymap[f]
337 349
338 350 def otherparent(self, f):
339 351 '''Mark as coming from the other parent, always dirty.'''
340 352 if self._pl[1] == nullid:
341 353 raise util.Abort(_("setting %r to other parent "
342 354 "only allowed in merges") % f)
343 355 self._dirty = True
344 356 self._addpath(f)
345 357 self._map[f] = ('n', 0, -2, -1)
346 358 if f in self._copymap:
347 359 del self._copymap[f]
348 360
349 361 def add(self, f):
350 362 '''Mark a file added.'''
351 363 self._dirty = True
352 364 self._addpath(f, True)
353 365 self._map[f] = ('a', 0, -1, -1)
354 366 if f in self._copymap:
355 367 del self._copymap[f]
356 368
357 369 def remove(self, f):
358 370 '''Mark a file removed.'''
359 371 self._dirty = True
360 372 self._droppath(f)
361 373 size = 0
362 374 if self._pl[1] != nullid and f in self._map:
363 375 # backup the previous state
364 376 entry = self._map[f]
365 377 if entry[0] == 'm': # merge
366 378 size = -1
367 379 elif entry[0] == 'n' and entry[2] == -2: # other parent
368 380 size = -2
369 381 self._map[f] = ('r', 0, size, 0)
370 382 if size == 0 and f in self._copymap:
371 383 del self._copymap[f]
372 384
373 385 def merge(self, f):
374 386 '''Mark a file merged.'''
375 387 self._dirty = True
376 388 s = os.lstat(self._join(f))
377 389 self._addpath(f)
378 390 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
379 391 if f in self._copymap:
380 392 del self._copymap[f]
381 393
382 394 def drop(self, f):
383 395 '''Drop a file from the dirstate'''
384 396 if f in self._map:
385 397 self._dirty = True
386 398 self._droppath(f)
387 399 del self._map[f]
388 400
389 401 def _normalize(self, path, isknown):
390 402 normed = util.normcase(path)
391 403 folded = self._foldmap.get(normed, None)
392 404 if folded is None:
393 405 if isknown or not os.path.lexists(os.path.join(self._root, path)):
394 406 folded = path
395 407 else:
396 408 folded = self._foldmap.setdefault(normed,
397 409 util.fspath(normed, self._normroot))
398 410 return folded
399 411
400 412 def normalize(self, path, isknown=False):
401 413 '''
402 414 normalize the case of a pathname when on a casefolding filesystem
403 415
404 416 isknown specifies whether the filename came from walking the
405 417 disk, to avoid extra filesystem access
406 418
407 419 The normalized case is determined based on the following precedence:
408 420
409 421 - version of name already stored in the dirstate
410 422 - version of name stored on disk
411 423 - version provided via command arguments
412 424 '''
413 425
414 426 if self._checkcase:
415 427 return self._normalize(path, isknown)
416 428 return path
417 429
418 430 def clear(self):
419 431 self._map = {}
420 432 if "_dirs" in self.__dict__:
421 433 delattr(self, "_dirs")
422 434 self._copymap = {}
423 435 self._pl = [nullid, nullid]
424 436 self._lastnormaltime = 0
425 437 self._dirty = True
426 438
427 439 def rebuild(self, parent, files):
428 440 self.clear()
429 441 for f in files:
430 442 if 'x' in files.flags(f):
431 443 self._map[f] = ('n', 0777, -1, 0)
432 444 else:
433 445 self._map[f] = ('n', 0666, -1, 0)
434 446 self._pl = (parent, nullid)
435 447 self._dirty = True
436 448
437 449 def write(self):
438 450 if not self._dirty:
439 451 return
440 452 st = self._opener("dirstate", "w", atomictemp=True)
441 453
442 454 # use the modification time of the newly created temporary file as the
443 455 # filesystem's notion of 'now'
444 456 now = int(util.fstat(st).st_mtime)
445 457
446 458 cs = cStringIO.StringIO()
447 459 copymap = self._copymap
448 460 pack = struct.pack
449 461 write = cs.write
450 462 write("".join(self._pl))
451 463 for f, e in self._map.iteritems():
452 464 if e[0] == 'n' and e[3] == now:
453 465 # The file was last modified "simultaneously" with the current
454 466 # write to dirstate (i.e. within the same second for file-
455 467 # systems with a granularity of 1 sec). This commonly happens
456 468 # for at least a couple of files on 'update'.
457 469 # The user could change the file without changing its size
458 470 # within the same second. Invalidate the file's stat data in
459 471 # dirstate, forcing future 'status' calls to compare the
460 472 # contents of the file. This prevents mistakenly treating such
461 473 # files as clean.
462 474 e = (e[0], 0, -1, -1) # mark entry as 'unset'
463 475 self._map[f] = e
464 476
465 477 if f in copymap:
466 478 f = "%s\0%s" % (f, copymap[f])
467 479 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
468 480 write(e)
469 481 write(f)
470 482 st.write(cs.getvalue())
471 483 st.close()
472 484 self._lastnormaltime = 0
473 485 self._dirty = self._dirtypl = False
474 486
475 487 def _dirignore(self, f):
476 488 if f == '.':
477 489 return False
478 490 if self._ignore(f):
479 491 return True
480 492 for p in _finddirs(f):
481 493 if self._ignore(p):
482 494 return True
483 495 return False
484 496
485 497 def walk(self, match, subrepos, unknown, ignored):
486 498 '''
487 499 Walk recursively through the directory tree, finding all files
488 500 matched by match.
489 501
490 502 Return a dict mapping filename to stat-like object (either
491 503 mercurial.osutil.stat instance or return value of os.stat()).
492 504 '''
493 505
494 506 def fwarn(f, msg):
495 507 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
496 508 return False
497 509
498 510 def badtype(mode):
499 511 kind = _('unknown')
500 512 if stat.S_ISCHR(mode):
501 513 kind = _('character device')
502 514 elif stat.S_ISBLK(mode):
503 515 kind = _('block device')
504 516 elif stat.S_ISFIFO(mode):
505 517 kind = _('fifo')
506 518 elif stat.S_ISSOCK(mode):
507 519 kind = _('socket')
508 520 elif stat.S_ISDIR(mode):
509 521 kind = _('directory')
510 522 return _('unsupported file type (type is %s)') % kind
511 523
512 524 ignore = self._ignore
513 525 dirignore = self._dirignore
514 526 if ignored:
515 527 ignore = util.never
516 528 dirignore = util.never
517 529 elif not unknown:
518 530 # if unknown and ignored are False, skip step 2
519 531 ignore = util.always
520 532 dirignore = util.always
521 533
522 534 matchfn = match.matchfn
523 535 badfn = match.bad
524 536 dmap = self._map
525 537 normpath = util.normpath
526 538 listdir = osutil.listdir
527 539 lstat = os.lstat
528 540 getkind = stat.S_IFMT
529 541 dirkind = stat.S_IFDIR
530 542 regkind = stat.S_IFREG
531 543 lnkkind = stat.S_IFLNK
532 544 join = self._join
533 545 work = []
534 546 wadd = work.append
535 547
536 548 exact = skipstep3 = False
537 549 if matchfn == match.exact: # match.exact
538 550 exact = True
539 551 dirignore = util.always # skip step 2
540 552 elif match.files() and not match.anypats(): # match.match, no patterns
541 553 skipstep3 = True
542 554
543 555 if self._checkcase:
544 556 normalize = self._normalize
545 557 skipstep3 = False
546 558 else:
547 559 normalize = lambda x, y: x
548 560
549 561 files = sorted(match.files())
550 562 subrepos.sort()
551 563 i, j = 0, 0
552 564 while i < len(files) and j < len(subrepos):
553 565 subpath = subrepos[j] + "/"
554 566 if files[i] < subpath:
555 567 i += 1
556 568 continue
557 569 while i < len(files) and files[i].startswith(subpath):
558 570 del files[i]
559 571 j += 1
560 572
561 573 if not files or '.' in files:
562 574 files = ['']
563 575 results = dict.fromkeys(subrepos)
564 576 results['.hg'] = None
565 577
566 578 # step 1: find all explicit files
567 579 for ff in files:
568 580 nf = normalize(normpath(ff), False)
569 581 if nf in results:
570 582 continue
571 583
572 584 try:
573 585 st = lstat(join(nf))
574 586 kind = getkind(st.st_mode)
575 587 if kind == dirkind:
576 588 skipstep3 = False
577 589 if nf in dmap:
578 590 #file deleted on disk but still in dirstate
579 591 results[nf] = None
580 592 match.dir(nf)
581 593 if not dirignore(nf):
582 594 wadd(nf)
583 595 elif kind == regkind or kind == lnkkind:
584 596 results[nf] = st
585 597 else:
586 598 badfn(ff, badtype(kind))
587 599 if nf in dmap:
588 600 results[nf] = None
589 601 except OSError, inst:
590 602 if nf in dmap: # does it exactly match a file?
591 603 results[nf] = None
592 604 else: # does it match a directory?
593 605 prefix = nf + "/"
594 606 for fn in dmap:
595 607 if fn.startswith(prefix):
596 608 match.dir(nf)
597 609 skipstep3 = False
598 610 break
599 611 else:
600 612 badfn(ff, inst.strerror)
601 613
602 614 # step 2: visit subdirectories
603 615 while work:
604 616 nd = work.pop()
605 617 skip = None
606 618 if nd == '.':
607 619 nd = ''
608 620 else:
609 621 skip = '.hg'
610 622 try:
611 623 entries = listdir(join(nd), stat=True, skip=skip)
612 624 except OSError, inst:
613 625 if inst.errno == errno.EACCES:
614 626 fwarn(nd, inst.strerror)
615 627 continue
616 628 raise
617 629 for f, kind, st in entries:
618 630 nf = normalize(nd and (nd + "/" + f) or f, True)
619 631 if nf not in results:
620 632 if kind == dirkind:
621 633 if not ignore(nf):
622 634 match.dir(nf)
623 635 wadd(nf)
624 636 if nf in dmap and matchfn(nf):
625 637 results[nf] = None
626 638 elif kind == regkind or kind == lnkkind:
627 639 if nf in dmap:
628 640 if matchfn(nf):
629 641 results[nf] = st
630 642 elif matchfn(nf) and not ignore(nf):
631 643 results[nf] = st
632 644 elif nf in dmap and matchfn(nf):
633 645 results[nf] = None
634 646
635 647 # step 3: report unseen items in the dmap hash
636 648 if not skipstep3 and not exact:
637 649 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
638 650 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
639 651 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
640 652 st = None
641 653 results[nf] = st
642 654 for s in subrepos:
643 655 del results[s]
644 656 del results['.hg']
645 657 return results
646 658
647 659 def status(self, match, subrepos, ignored, clean, unknown):
648 660 '''Determine the status of the working copy relative to the
649 661 dirstate and return a tuple of lists (unsure, modified, added,
650 662 removed, deleted, unknown, ignored, clean), where:
651 663
652 664 unsure:
653 665 files that might have been modified since the dirstate was
654 666 written, but need to be read to be sure (size is the same
655 667 but mtime differs)
656 668 modified:
657 669 files that have definitely been modified since the dirstate
658 670 was written (different size or mode)
659 671 added:
660 672 files that have been explicitly added with hg add
661 673 removed:
662 674 files that have been explicitly removed with hg remove
663 675 deleted:
664 676 files that have been deleted through other means ("missing")
665 677 unknown:
666 678 files not in the dirstate that are not ignored
667 679 ignored:
668 680 files not in the dirstate that are ignored
669 681 (by _dirignore())
670 682 clean:
671 683 files that have definitely not been modified since the
672 684 dirstate was written
673 685 '''
674 686 listignored, listclean, listunknown = ignored, clean, unknown
675 687 lookup, modified, added, unknown, ignored = [], [], [], [], []
676 688 removed, deleted, clean = [], [], []
677 689
678 690 dmap = self._map
679 691 ladd = lookup.append # aka "unsure"
680 692 madd = modified.append
681 693 aadd = added.append
682 694 uadd = unknown.append
683 695 iadd = ignored.append
684 696 radd = removed.append
685 697 dadd = deleted.append
686 698 cadd = clean.append
687 699
688 700 lnkkind = stat.S_IFLNK
689 701
690 702 for fn, st in self.walk(match, subrepos, listunknown,
691 703 listignored).iteritems():
692 704 if fn not in dmap:
693 705 if (listignored or match.exact(fn)) and self._dirignore(fn):
694 706 if listignored:
695 707 iadd(fn)
696 708 elif listunknown:
697 709 uadd(fn)
698 710 continue
699 711
700 712 state, mode, size, time = dmap[fn]
701 713
702 714 if not st and state in "nma":
703 715 dadd(fn)
704 716 elif state == 'n':
705 717 # The "mode & lnkkind != lnkkind or self._checklink"
706 718 # lines are an expansion of "islink => checklink"
707 719 # where islink means "is this a link?" and checklink
708 720 # means "can we check links?".
709 721 mtime = int(st.st_mtime)
710 722 if (size >= 0 and
711 723 (size != st.st_size
712 724 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
713 725 and (mode & lnkkind != lnkkind or self._checklink)
714 726 or size == -2 # other parent
715 727 or fn in self._copymap):
716 728 madd(fn)
717 729 elif (mtime != time
718 730 and (mode & lnkkind != lnkkind or self._checklink)):
719 731 ladd(fn)
720 732 elif mtime == self._lastnormaltime:
721 733 # fn may have been changed in the same timeslot without
722 734 # changing its size. This can happen if we quickly do
723 735 # multiple commits in a single transaction.
724 736 # Force lookup, so we don't miss such a racy file change.
725 737 ladd(fn)
726 738 elif listclean:
727 739 cadd(fn)
728 740 elif state == 'm':
729 741 madd(fn)
730 742 elif state == 'a':
731 743 aadd(fn)
732 744 elif state == 'r':
733 745 radd(fn)
734 746
735 747 return (lookup, modified, added, removed, deleted, unknown, ignored,
736 748 clean)
@@ -1,2336 +1,2344 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 class storecache(filecache):
23 """filecache for files in the store"""
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
26
22 27 class localrepository(repo.repository):
23 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 29 'known', 'getbundle'))
25 30 supportedformats = set(('revlogv1', 'generaldelta'))
26 31 supported = supportedformats | set(('store', 'fncache', 'shared',
27 32 'dotencode'))
28 33
29 34 def __init__(self, baseui, path=None, create=False):
30 35 repo.repository.__init__(self)
31 36 self.root = os.path.realpath(util.expandpath(path))
32 37 self.path = os.path.join(self.root, ".hg")
33 38 self.origroot = path
34 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 40 self.opener = scmutil.opener(self.path)
36 41 self.wopener = scmutil.opener(self.root)
37 42 self.baseui = baseui
38 43 self.ui = baseui.copy()
39 44 self._dirtyphases = False
40 45 # A list of callback to shape the phase if no data were found.
41 46 # Callback are in the form: func(repo, roots) --> processed root.
42 47 # This list it to be filled by extension during repo setup
43 48 self._phasedefaults = []
44 49
45 50 try:
46 51 self.ui.readconfig(self.join("hgrc"), self.root)
47 52 extensions.loadall(self.ui)
48 53 except IOError:
49 54 pass
50 55
51 56 if not os.path.isdir(self.path):
52 57 if create:
53 58 if not os.path.exists(path):
54 59 util.makedirs(path)
55 60 util.makedir(self.path, notindexed=True)
56 61 requirements = ["revlogv1"]
57 62 if self.ui.configbool('format', 'usestore', True):
58 63 os.mkdir(os.path.join(self.path, "store"))
59 64 requirements.append("store")
60 65 if self.ui.configbool('format', 'usefncache', True):
61 66 requirements.append("fncache")
62 67 if self.ui.configbool('format', 'dotencode', True):
63 68 requirements.append('dotencode')
64 69 # create an invalid changelog
65 70 self.opener.append(
66 71 "00changelog.i",
67 72 '\0\0\0\2' # represents revlogv2
68 73 ' dummy changelog to prevent using the old repo layout'
69 74 )
70 75 if self.ui.configbool('format', 'generaldelta', False):
71 76 requirements.append("generaldelta")
72 77 requirements = set(requirements)
73 78 else:
74 79 raise error.RepoError(_("repository %s not found") % path)
75 80 elif create:
76 81 raise error.RepoError(_("repository %s already exists") % path)
77 82 else:
78 83 try:
79 84 requirements = scmutil.readrequires(self.opener, self.supported)
80 85 except IOError, inst:
81 86 if inst.errno != errno.ENOENT:
82 87 raise
83 88 requirements = set()
84 89
85 90 self.sharedpath = self.path
86 91 try:
87 92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 93 if not os.path.exists(s):
89 94 raise error.RepoError(
90 95 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 96 self.sharedpath = s
92 97 except IOError, inst:
93 98 if inst.errno != errno.ENOENT:
94 99 raise
95 100
96 101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 102 self.spath = self.store.path
98 103 self.sopener = self.store.opener
99 104 self.sjoin = self.store.join
100 105 self.opener.createmode = self.store.createmode
101 106 self._applyrequirements(requirements)
102 107 if create:
103 108 self._writerequirements()
104 109
105 110
106 111 self._branchcache = None
107 112 self._branchcachetip = None
108 113 self.filterpats = {}
109 114 self._datafilters = {}
110 115 self._transref = self._lockref = self._wlockref = None
111 116
112 117 # A cache for various files under .hg/ that tracks file changes,
113 118 # (used by the filecache decorator)
114 119 #
115 120 # Maps a property name to its util.filecacheentry
116 121 self._filecache = {}
117 122
118 123 def _applyrequirements(self, requirements):
119 124 self.requirements = requirements
120 125 openerreqs = set(('revlogv1', 'generaldelta'))
121 126 self.sopener.options = dict((r, 1) for r in requirements
122 127 if r in openerreqs)
123 128
124 129 def _writerequirements(self):
125 130 reqfile = self.opener("requires", "w")
126 131 for r in self.requirements:
127 132 reqfile.write("%s\n" % r)
128 133 reqfile.close()
129 134
130 135 def _checknested(self, path):
131 136 """Determine if path is a legal nested repository."""
132 137 if not path.startswith(self.root):
133 138 return False
134 139 subpath = path[len(self.root) + 1:]
135 140 normsubpath = util.pconvert(subpath)
136 141
137 142 # XXX: Checking against the current working copy is wrong in
138 143 # the sense that it can reject things like
139 144 #
140 145 # $ hg cat -r 10 sub/x.txt
141 146 #
142 147 # if sub/ is no longer a subrepository in the working copy
143 148 # parent revision.
144 149 #
145 150 # However, it can of course also allow things that would have
146 151 # been rejected before, such as the above cat command if sub/
147 152 # is a subrepository now, but was a normal directory before.
148 153 # The old path auditor would have rejected by mistake since it
149 154 # panics when it sees sub/.hg/.
150 155 #
151 156 # All in all, checking against the working copy seems sensible
152 157 # since we want to prevent access to nested repositories on
153 158 # the filesystem *now*.
154 159 ctx = self[None]
155 160 parts = util.splitpath(subpath)
156 161 while parts:
157 162 prefix = '/'.join(parts)
158 163 if prefix in ctx.substate:
159 164 if prefix == normsubpath:
160 165 return True
161 166 else:
162 167 sub = ctx.sub(prefix)
163 168 return sub.checknested(subpath[len(prefix) + 1:])
164 169 else:
165 170 parts.pop()
166 171 return False
167 172
168 173 @filecache('bookmarks')
169 174 def _bookmarks(self):
170 175 return bookmarks.read(self)
171 176
172 177 @filecache('bookmarks.current')
173 178 def _bookmarkcurrent(self):
174 179 return bookmarks.readcurrent(self)
175 180
176 181 def _writebookmarks(self, marks):
177 182 bookmarks.write(self)
178 183
179 @filecache('phaseroots', True)
184 @storecache('phaseroots')
180 185 def _phaseroots(self):
181 186 self._dirtyphases = False
182 187 phaseroots = phases.readroots(self)
183 188 phases.filterunknown(self, phaseroots)
184 189 return phaseroots
185 190
186 191 @propertycache
187 192 def _phaserev(self):
188 193 cache = [phases.public] * len(self)
189 194 for phase in phases.trackedphases:
190 195 roots = map(self.changelog.rev, self._phaseroots[phase])
191 196 if roots:
192 197 for rev in roots:
193 198 cache[rev] = phase
194 199 for rev in self.changelog.descendants(*roots):
195 200 cache[rev] = phase
196 201 return cache
197 202
198 @filecache('00changelog.i', True)
203 @storecache('00changelog.i')
199 204 def changelog(self):
200 205 c = changelog.changelog(self.sopener)
201 206 if 'HG_PENDING' in os.environ:
202 207 p = os.environ['HG_PENDING']
203 208 if p.startswith(self.root):
204 209 c.readpending('00changelog.i.a')
205 210 return c
206 211
207 @filecache('00manifest.i', True)
212 @storecache('00manifest.i')
208 213 def manifest(self):
209 214 return manifest.manifest(self.sopener)
210 215
211 216 @filecache('dirstate')
212 217 def dirstate(self):
213 218 warned = [0]
214 219 def validate(node):
215 220 try:
216 221 self.changelog.rev(node)
217 222 return node
218 223 except error.LookupError:
219 224 if not warned[0]:
220 225 warned[0] = True
221 226 self.ui.warn(_("warning: ignoring unknown"
222 227 " working parent %s!\n") % short(node))
223 228 return nullid
224 229
225 230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226 231
227 232 def __getitem__(self, changeid):
228 233 if changeid is None:
229 234 return context.workingctx(self)
230 235 return context.changectx(self, changeid)
231 236
232 237 def __contains__(self, changeid):
233 238 try:
234 239 return bool(self.lookup(changeid))
235 240 except error.RepoLookupError:
236 241 return False
237 242
238 243 def __nonzero__(self):
239 244 return True
240 245
241 246 def __len__(self):
242 247 return len(self.changelog)
243 248
244 249 def __iter__(self):
245 250 for i in xrange(len(self)):
246 251 yield i
247 252
248 253 def revs(self, expr, *args):
249 254 '''Return a list of revisions matching the given revset'''
250 255 expr = revset.formatspec(expr, *args)
251 256 m = revset.match(None, expr)
252 257 return [r for r in m(self, range(len(self)))]
253 258
254 259 def set(self, expr, *args):
255 260 '''
256 261 Yield a context for each matching revision, after doing arg
257 262 replacement via revset.formatspec
258 263 '''
259 264 for r in self.revs(expr, *args):
260 265 yield self[r]
261 266
262 267 def url(self):
263 268 return 'file:' + self.root
264 269
265 270 def hook(self, name, throw=False, **args):
266 271 return hook.hook(self.ui, self, name, throw, **args)
267 272
268 273 tag_disallowed = ':\r\n'
269 274
270 275 def _tag(self, names, node, message, local, user, date, extra={}):
271 276 if isinstance(names, str):
272 277 allchars = names
273 278 names = (names,)
274 279 else:
275 280 allchars = ''.join(names)
276 281 for c in self.tag_disallowed:
277 282 if c in allchars:
278 283 raise util.Abort(_('%r cannot be used in a tag name') % c)
279 284
280 285 branches = self.branchmap()
281 286 for name in names:
282 287 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 288 local=local)
284 289 if name in branches:
285 290 self.ui.warn(_("warning: tag %s conflicts with existing"
286 291 " branch name\n") % name)
287 292
288 293 def writetags(fp, names, munge, prevtags):
289 294 fp.seek(0, 2)
290 295 if prevtags and prevtags[-1] != '\n':
291 296 fp.write('\n')
292 297 for name in names:
293 298 m = munge and munge(name) or name
294 299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 300 old = self.tags().get(name, nullid)
296 301 fp.write('%s %s\n' % (hex(old), m))
297 302 fp.write('%s %s\n' % (hex(node), m))
298 303 fp.close()
299 304
300 305 prevtags = ''
301 306 if local:
302 307 try:
303 308 fp = self.opener('localtags', 'r+')
304 309 except IOError:
305 310 fp = self.opener('localtags', 'a')
306 311 else:
307 312 prevtags = fp.read()
308 313
309 314 # local tags are stored in the current charset
310 315 writetags(fp, names, None, prevtags)
311 316 for name in names:
312 317 self.hook('tag', node=hex(node), tag=name, local=local)
313 318 return
314 319
315 320 try:
316 321 fp = self.wfile('.hgtags', 'rb+')
317 322 except IOError, e:
318 323 if e.errno != errno.ENOENT:
319 324 raise
320 325 fp = self.wfile('.hgtags', 'ab')
321 326 else:
322 327 prevtags = fp.read()
323 328
324 329 # committed tags are stored in UTF-8
325 330 writetags(fp, names, encoding.fromlocal, prevtags)
326 331
327 332 fp.close()
328 333
329 334 self.invalidatecaches()
330 335
331 336 if '.hgtags' not in self.dirstate:
332 337 self[None].add(['.hgtags'])
333 338
334 339 m = matchmod.exact(self.root, '', ['.hgtags'])
335 340 tagnode = self.commit(message, user, date, extra=extra, match=m)
336 341
337 342 for name in names:
338 343 self.hook('tag', node=hex(node), tag=name, local=local)
339 344
340 345 return tagnode
341 346
342 347 def tag(self, names, node, message, local, user, date):
343 348 '''tag a revision with one or more symbolic names.
344 349
345 350 names is a list of strings or, when adding a single tag, names may be a
346 351 string.
347 352
348 353 if local is True, the tags are stored in a per-repository file.
349 354 otherwise, they are stored in the .hgtags file, and a new
350 355 changeset is committed with the change.
351 356
352 357 keyword arguments:
353 358
354 359 local: whether to store tags in non-version-controlled file
355 360 (default False)
356 361
357 362 message: commit message to use if committing
358 363
359 364 user: name of user to use if committing
360 365
361 366 date: date tuple to use if committing'''
362 367
363 368 if not local:
364 369 for x in self.status()[:5]:
365 370 if '.hgtags' in x:
366 371 raise util.Abort(_('working copy of .hgtags is changed '
367 372 '(please commit .hgtags manually)'))
368 373
369 374 self.tags() # instantiate the cache
370 375 self._tag(names, node, message, local, user, date)
371 376
372 377 @propertycache
373 378 def _tagscache(self):
374 379 '''Returns a tagscache object that contains various tags related caches.'''
375 380
376 381 # This simplifies its cache management by having one decorated
377 382 # function (this one) and the rest simply fetch things from it.
378 383 class tagscache(object):
379 384 def __init__(self):
380 385 # These two define the set of tags for this repository. tags
381 386 # maps tag name to node; tagtypes maps tag name to 'global' or
382 387 # 'local'. (Global tags are defined by .hgtags across all
383 388 # heads, and local tags are defined in .hg/localtags.)
384 389 # They constitute the in-memory cache of tags.
385 390 self.tags = self.tagtypes = None
386 391
387 392 self.nodetagscache = self.tagslist = None
388 393
389 394 cache = tagscache()
390 395 cache.tags, cache.tagtypes = self._findtags()
391 396
392 397 return cache
393 398
394 399 def tags(self):
395 400 '''return a mapping of tag to node'''
396 401 return self._tagscache.tags
397 402
398 403 def _findtags(self):
399 404 '''Do the hard work of finding tags. Return a pair of dicts
400 405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 406 maps tag name to a string like \'global\' or \'local\'.
402 407 Subclasses or extensions are free to add their own tags, but
403 408 should be aware that the returned dicts will be retained for the
404 409 duration of the localrepo object.'''
405 410
406 411 # XXX what tagtype should subclasses/extensions use? Currently
407 412 # mq and bookmarks add tags, but do not set the tagtype at all.
408 413 # Should each extension invent its own tag type? Should there
409 414 # be one tagtype for all such "virtual" tags? Or is the status
410 415 # quo fine?
411 416
412 417 alltags = {} # map tag name to (node, hist)
413 418 tagtypes = {}
414 419
415 420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417 422
418 423 # Build the return dicts. Have to re-encode tag names because
419 424 # the tags module always uses UTF-8 (in order not to lose info
420 425 # writing to the cache), but the rest of Mercurial wants them in
421 426 # local encoding.
422 427 tags = {}
423 428 for (name, (node, hist)) in alltags.iteritems():
424 429 if node != nullid:
425 430 try:
426 431 # ignore tags to unknown nodes
427 432 self.changelog.lookup(node)
428 433 tags[encoding.tolocal(name)] = node
429 434 except error.LookupError:
430 435 pass
431 436 tags['tip'] = self.changelog.tip()
432 437 tagtypes = dict([(encoding.tolocal(name), value)
433 438 for (name, value) in tagtypes.iteritems()])
434 439 return (tags, tagtypes)
435 440
436 441 def tagtype(self, tagname):
437 442 '''
438 443 return the type of the given tag. result can be:
439 444
440 445 'local' : a local tag
441 446 'global' : a global tag
442 447 None : tag does not exist
443 448 '''
444 449
445 450 return self._tagscache.tagtypes.get(tagname)
446 451
447 452 def tagslist(self):
448 453 '''return a list of tags ordered by revision'''
449 454 if not self._tagscache.tagslist:
450 455 l = []
451 456 for t, n in self.tags().iteritems():
452 457 r = self.changelog.rev(n)
453 458 l.append((r, t, n))
454 459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455 460
456 461 return self._tagscache.tagslist
457 462
458 463 def nodetags(self, node):
459 464 '''return the tags associated with a node'''
460 465 if not self._tagscache.nodetagscache:
461 466 nodetagscache = {}
462 467 for t, n in self.tags().iteritems():
463 468 nodetagscache.setdefault(n, []).append(t)
464 469 for tags in nodetagscache.itervalues():
465 470 tags.sort()
466 471 self._tagscache.nodetagscache = nodetagscache
467 472 return self._tagscache.nodetagscache.get(node, [])
468 473
469 474 def nodebookmarks(self, node):
470 475 marks = []
471 476 for bookmark, n in self._bookmarks.iteritems():
472 477 if n == node:
473 478 marks.append(bookmark)
474 479 return sorted(marks)
475 480
476 481 def _branchtags(self, partial, lrev):
477 482 # TODO: rename this function?
478 483 tiprev = len(self) - 1
479 484 if lrev != tiprev:
480 485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 486 self._updatebranchcache(partial, ctxgen)
482 487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483 488
484 489 return partial
485 490
486 491 def updatebranchcache(self):
487 492 tip = self.changelog.tip()
488 493 if self._branchcache is not None and self._branchcachetip == tip:
489 494 return
490 495
491 496 oldtip = self._branchcachetip
492 497 self._branchcachetip = tip
493 498 if oldtip is None or oldtip not in self.changelog.nodemap:
494 499 partial, last, lrev = self._readbranchcache()
495 500 else:
496 501 lrev = self.changelog.rev(oldtip)
497 502 partial = self._branchcache
498 503
499 504 self._branchtags(partial, lrev)
500 505 # this private cache holds all heads (not just tips)
501 506 self._branchcache = partial
502 507
503 508 def branchmap(self):
504 509 '''returns a dictionary {branch: [branchheads]}'''
505 510 self.updatebranchcache()
506 511 return self._branchcache
507 512
508 513 def branchtags(self):
509 514 '''return a dict where branch names map to the tipmost head of
510 515 the branch, open heads come before closed'''
511 516 bt = {}
512 517 for bn, heads in self.branchmap().iteritems():
513 518 tip = heads[-1]
514 519 for h in reversed(heads):
515 520 if 'close' not in self.changelog.read(h)[5]:
516 521 tip = h
517 522 break
518 523 bt[bn] = tip
519 524 return bt
520 525
521 526 def _readbranchcache(self):
522 527 partial = {}
523 528 try:
524 529 f = self.opener("cache/branchheads")
525 530 lines = f.read().split('\n')
526 531 f.close()
527 532 except (IOError, OSError):
528 533 return {}, nullid, nullrev
529 534
530 535 try:
531 536 last, lrev = lines.pop(0).split(" ", 1)
532 537 last, lrev = bin(last), int(lrev)
533 538 if lrev >= len(self) or self[lrev].node() != last:
534 539 # invalidate the cache
535 540 raise ValueError('invalidating branch cache (tip differs)')
536 541 for l in lines:
537 542 if not l:
538 543 continue
539 544 node, label = l.split(" ", 1)
540 545 label = encoding.tolocal(label.strip())
541 546 partial.setdefault(label, []).append(bin(node))
542 547 except KeyboardInterrupt:
543 548 raise
544 549 except Exception, inst:
545 550 if self.ui.debugflag:
546 551 self.ui.warn(str(inst), '\n')
547 552 partial, last, lrev = {}, nullid, nullrev
548 553 return partial, last, lrev
549 554
550 555 def _writebranchcache(self, branches, tip, tiprev):
551 556 try:
552 557 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 558 f.write("%s %s\n" % (hex(tip), tiprev))
554 559 for label, nodes in branches.iteritems():
555 560 for node in nodes:
556 561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 562 f.close()
558 563 except (IOError, OSError):
559 564 pass
560 565
561 566 def _updatebranchcache(self, partial, ctxgen):
562 567 # collect new branch entries
563 568 newbranches = {}
564 569 for c in ctxgen:
565 570 newbranches.setdefault(c.branch(), []).append(c.node())
566 571 # if older branchheads are reachable from new ones, they aren't
567 572 # really branchheads. Note checking parents is insufficient:
568 573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 574 for branch, newnodes in newbranches.iteritems():
570 575 bheads = partial.setdefault(branch, [])
571 576 bheads.extend(newnodes)
572 577 if len(bheads) <= 1:
573 578 continue
574 579 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 580 # starting from tip means fewer passes over reachable
576 581 while newnodes:
577 582 latest = newnodes.pop()
578 583 if latest not in bheads:
579 584 continue
580 585 minbhrev = self[bheads[0]].node()
581 586 reachable = self.changelog.reachable(latest, minbhrev)
582 587 reachable.remove(latest)
583 588 if reachable:
584 589 bheads = [b for b in bheads if b not in reachable]
585 590 partial[branch] = bheads
586 591
587 592 def lookup(self, key):
588 593 if isinstance(key, int):
589 594 return self.changelog.node(key)
590 595 elif key == '.':
591 596 return self.dirstate.p1()
592 597 elif key == 'null':
593 598 return nullid
594 599 elif key == 'tip':
595 600 return self.changelog.tip()
596 601 n = self.changelog._match(key)
597 602 if n:
598 603 return n
599 604 if key in self._bookmarks:
600 605 return self._bookmarks[key]
601 606 if key in self.tags():
602 607 return self.tags()[key]
603 608 if key in self.branchtags():
604 609 return self.branchtags()[key]
605 610 n = self.changelog._partialmatch(key)
606 611 if n:
607 612 return n
608 613
609 614 # can't find key, check if it might have come from damaged dirstate
610 615 if key in self.dirstate.parents():
611 616 raise error.Abort(_("working directory has unknown parent '%s'!")
612 617 % short(key))
613 618 try:
614 619 if len(key) == 20:
615 620 key = hex(key)
616 621 except TypeError:
617 622 pass
618 623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619 624
620 625 def lookupbranch(self, key, remote=None):
621 626 repo = remote or self
622 627 if key in repo.branchmap():
623 628 return key
624 629
625 630 repo = (remote and remote.local()) and remote or self
626 631 return repo[key].branch()
627 632
628 633 def known(self, nodes):
629 634 nm = self.changelog.nodemap
630 635 result = []
631 636 for n in nodes:
632 637 r = nm.get(n)
633 638 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 639 result.append(resp)
635 640 return result
636 641
637 642 def local(self):
638 643 return self
639 644
640 645 def join(self, f):
641 646 return os.path.join(self.path, f)
642 647
643 648 def wjoin(self, f):
644 649 return os.path.join(self.root, f)
645 650
646 651 def file(self, f):
647 652 if f[0] == '/':
648 653 f = f[1:]
649 654 return filelog.filelog(self.sopener, f)
650 655
651 656 def changectx(self, changeid):
652 657 return self[changeid]
653 658
654 659 def parents(self, changeid=None):
655 660 '''get list of changectxs for parents of changeid'''
656 661 return self[changeid].parents()
657 662
658 663 def filectx(self, path, changeid=None, fileid=None):
659 664 """changeid can be a changeset revision, node, or tag.
660 665 fileid can be a file revision or node."""
661 666 return context.filectx(self, path, changeid, fileid)
662 667
663 668 def getcwd(self):
664 669 return self.dirstate.getcwd()
665 670
666 671 def pathto(self, f, cwd=None):
667 672 return self.dirstate.pathto(f, cwd)
668 673
669 674 def wfile(self, f, mode='r'):
670 675 return self.wopener(f, mode)
671 676
672 677 def _link(self, f):
673 678 return os.path.islink(self.wjoin(f))
674 679
675 680 def _loadfilter(self, filter):
676 681 if filter not in self.filterpats:
677 682 l = []
678 683 for pat, cmd in self.ui.configitems(filter):
679 684 if cmd == '!':
680 685 continue
681 686 mf = matchmod.match(self.root, '', [pat])
682 687 fn = None
683 688 params = cmd
684 689 for name, filterfn in self._datafilters.iteritems():
685 690 if cmd.startswith(name):
686 691 fn = filterfn
687 692 params = cmd[len(name):].lstrip()
688 693 break
689 694 if not fn:
690 695 fn = lambda s, c, **kwargs: util.filter(s, c)
691 696 # Wrap old filters not supporting keyword arguments
692 697 if not inspect.getargspec(fn)[2]:
693 698 oldfn = fn
694 699 fn = lambda s, c, **kwargs: oldfn(s, c)
695 700 l.append((mf, fn, params))
696 701 self.filterpats[filter] = l
697 702 return self.filterpats[filter]
698 703
699 704 def _filter(self, filterpats, filename, data):
700 705 for mf, fn, cmd in filterpats:
701 706 if mf(filename):
702 707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
703 708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
704 709 break
705 710
706 711 return data
707 712
708 713 @propertycache
709 714 def _encodefilterpats(self):
710 715 return self._loadfilter('encode')
711 716
712 717 @propertycache
713 718 def _decodefilterpats(self):
714 719 return self._loadfilter('decode')
715 720
716 721 def adddatafilter(self, name, filter):
717 722 self._datafilters[name] = filter
718 723
719 724 def wread(self, filename):
720 725 if self._link(filename):
721 726 data = os.readlink(self.wjoin(filename))
722 727 else:
723 728 data = self.wopener.read(filename)
724 729 return self._filter(self._encodefilterpats, filename, data)
725 730
726 731 def wwrite(self, filename, data, flags):
727 732 data = self._filter(self._decodefilterpats, filename, data)
728 733 if 'l' in flags:
729 734 self.wopener.symlink(data, filename)
730 735 else:
731 736 self.wopener.write(filename, data)
732 737 if 'x' in flags:
733 738 util.setflags(self.wjoin(filename), False, True)
734 739
735 740 def wwritedata(self, filename, data):
736 741 return self._filter(self._decodefilterpats, filename, data)
737 742
738 743 def transaction(self, desc):
739 744 tr = self._transref and self._transref() or None
740 745 if tr and tr.running():
741 746 return tr.nest()
742 747
743 748 # abort here if the journal already exists
744 749 if os.path.exists(self.sjoin("journal")):
745 750 raise error.RepoError(
746 751 _("abandoned transaction found - run hg recover"))
747 752
748 753 journalfiles = self._writejournal(desc)
749 754 renames = [(x, undoname(x)) for x in journalfiles]
750 755
751 756 tr = transaction.transaction(self.ui.warn, self.sopener,
752 757 self.sjoin("journal"),
753 758 aftertrans(renames),
754 759 self.store.createmode)
755 760 self._transref = weakref.ref(tr)
756 761 return tr
757 762
758 763 def _writejournal(self, desc):
759 764 # save dirstate for rollback
760 765 try:
761 766 ds = self.opener.read("dirstate")
762 767 except IOError:
763 768 ds = ""
764 769 self.opener.write("journal.dirstate", ds)
765 770 self.opener.write("journal.branch",
766 771 encoding.fromlocal(self.dirstate.branch()))
767 772 self.opener.write("journal.desc",
768 773 "%d\n%s\n" % (len(self), desc))
769 774
770 775 bkname = self.join('bookmarks')
771 776 if os.path.exists(bkname):
772 777 util.copyfile(bkname, self.join('journal.bookmarks'))
773 778 else:
774 779 self.opener.write('journal.bookmarks', '')
775 780 phasesname = self.sjoin('phaseroots')
776 781 if os.path.exists(phasesname):
777 782 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
778 783 else:
779 784 self.sopener.write('journal.phaseroots', '')
780 785
781 786 return (self.sjoin('journal'), self.join('journal.dirstate'),
782 787 self.join('journal.branch'), self.join('journal.desc'),
783 788 self.join('journal.bookmarks'),
784 789 self.sjoin('journal.phaseroots'))
785 790
786 791 def recover(self):
787 792 lock = self.lock()
788 793 try:
789 794 if os.path.exists(self.sjoin("journal")):
790 795 self.ui.status(_("rolling back interrupted transaction\n"))
791 796 transaction.rollback(self.sopener, self.sjoin("journal"),
792 797 self.ui.warn)
793 798 self.invalidate()
794 799 return True
795 800 else:
796 801 self.ui.warn(_("no interrupted transaction available\n"))
797 802 return False
798 803 finally:
799 804 lock.release()
800 805
801 806 def rollback(self, dryrun=False, force=False):
802 807 wlock = lock = None
803 808 try:
804 809 wlock = self.wlock()
805 810 lock = self.lock()
806 811 if os.path.exists(self.sjoin("undo")):
807 812 return self._rollback(dryrun, force)
808 813 else:
809 814 self.ui.warn(_("no rollback information available\n"))
810 815 return 1
811 816 finally:
812 817 release(lock, wlock)
813 818
814 819 def _rollback(self, dryrun, force):
815 820 ui = self.ui
816 821 try:
817 822 args = self.opener.read('undo.desc').splitlines()
818 823 (oldlen, desc, detail) = (int(args[0]), args[1], None)
819 824 if len(args) >= 3:
820 825 detail = args[2]
821 826 oldtip = oldlen - 1
822 827
823 828 if detail and ui.verbose:
824 829 msg = (_('repository tip rolled back to revision %s'
825 830 ' (undo %s: %s)\n')
826 831 % (oldtip, desc, detail))
827 832 else:
828 833 msg = (_('repository tip rolled back to revision %s'
829 834 ' (undo %s)\n')
830 835 % (oldtip, desc))
831 836 except IOError:
832 837 msg = _('rolling back unknown transaction\n')
833 838 desc = None
834 839
835 840 if not force and self['.'] != self['tip'] and desc == 'commit':
836 841 raise util.Abort(
837 842 _('rollback of last commit while not checked out '
838 843 'may lose data'), hint=_('use -f to force'))
839 844
840 845 ui.status(msg)
841 846 if dryrun:
842 847 return 0
843 848
844 849 parents = self.dirstate.parents()
845 850 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
846 851 if os.path.exists(self.join('undo.bookmarks')):
847 852 util.rename(self.join('undo.bookmarks'),
848 853 self.join('bookmarks'))
849 854 if os.path.exists(self.sjoin('undo.phaseroots')):
850 855 util.rename(self.sjoin('undo.phaseroots'),
851 856 self.sjoin('phaseroots'))
852 857 self.invalidate()
853 858
854 859 parentgone = (parents[0] not in self.changelog.nodemap or
855 860 parents[1] not in self.changelog.nodemap)
856 861 if parentgone:
857 862 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
858 863 try:
859 864 branch = self.opener.read('undo.branch')
860 865 self.dirstate.setbranch(branch)
861 866 except IOError:
862 867 ui.warn(_('named branch could not be reset: '
863 868 'current branch is still \'%s\'\n')
864 869 % self.dirstate.branch())
865 870
866 871 self.dirstate.invalidate()
867 872 parents = tuple([p.rev() for p in self.parents()])
868 873 if len(parents) > 1:
869 874 ui.status(_('working directory now based on '
870 875 'revisions %d and %d\n') % parents)
871 876 else:
872 877 ui.status(_('working directory now based on '
873 878 'revision %d\n') % parents)
874 879 self.destroyed()
875 880 return 0
876 881
877 882 def invalidatecaches(self):
878 883 def delcache(name):
879 884 try:
880 885 delattr(self, name)
881 886 except AttributeError:
882 887 pass
883 888
884 889 delcache('_tagscache')
885 890 delcache('_phaserev')
886 891
887 892 self._branchcache = None # in UTF-8
888 893 self._branchcachetip = None
889 894
890 895 def invalidatedirstate(self):
891 896 '''Invalidates the dirstate, causing the next call to dirstate
892 897 to check if it was modified since the last time it was read,
893 898 rereading it if it has.
894 899
895 900 This is different to dirstate.invalidate() that it doesn't always
896 901 rereads the dirstate. Use dirstate.invalidate() if you want to
897 902 explicitly read the dirstate again (i.e. restoring it to a previous
898 903 known good state).'''
899 try:
904 if 'dirstate' in self.__dict__:
905 for k in self.dirstate._filecache:
906 try:
907 delattr(self.dirstate, k)
908 except AttributeError:
909 pass
900 910 delattr(self, 'dirstate')
901 except AttributeError:
902 pass
903 911
904 912 def invalidate(self):
905 913 for k in self._filecache:
906 914 # dirstate is invalidated separately in invalidatedirstate()
907 915 if k == 'dirstate':
908 916 continue
909 917
910 918 try:
911 919 delattr(self, k)
912 920 except AttributeError:
913 921 pass
914 922 self.invalidatecaches()
915 923
916 924 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 925 try:
918 926 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 927 except error.LockHeld, inst:
920 928 if not wait:
921 929 raise
922 930 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 931 (desc, inst.locker))
924 932 # default to 600 seconds timeout
925 933 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 934 releasefn, desc=desc)
927 935 if acquirefn:
928 936 acquirefn()
929 937 return l
930 938
931 939 def _afterlock(self, callback):
932 940 """add a callback to the current repository lock.
933 941
934 942 The callback will be executed on lock release."""
935 943 l = self._lockref and self._lockref()
936 944 if l:
937 945 l.postrelease.append(callback)
938 946
939 947 def lock(self, wait=True):
940 948 '''Lock the repository store (.hg/store) and return a weak reference
941 949 to the lock. Use this before modifying the store (e.g. committing or
942 950 stripping). If you are opening a transaction, get a lock as well.)'''
943 951 l = self._lockref and self._lockref()
944 952 if l is not None and l.held:
945 953 l.lock()
946 954 return l
947 955
948 956 def unlock():
949 957 self.store.write()
950 958 if self._dirtyphases:
951 959 phases.writeroots(self)
952 960 self._dirtyphases = False
953 961 for k, ce in self._filecache.items():
954 962 if k == 'dirstate':
955 963 continue
956 964 ce.refresh()
957 965
958 966 l = self._lock(self.sjoin("lock"), wait, unlock,
959 967 self.invalidate, _('repository %s') % self.origroot)
960 968 self._lockref = weakref.ref(l)
961 969 return l
962 970
963 971 def wlock(self, wait=True):
964 972 '''Lock the non-store parts of the repository (everything under
965 973 .hg except .hg/store) and return a weak reference to the lock.
966 974 Use this before modifying files in .hg.'''
967 975 l = self._wlockref and self._wlockref()
968 976 if l is not None and l.held:
969 977 l.lock()
970 978 return l
971 979
972 980 def unlock():
973 981 self.dirstate.write()
974 982 ce = self._filecache.get('dirstate')
975 983 if ce:
976 984 ce.refresh()
977 985
978 986 l = self._lock(self.join("wlock"), wait, unlock,
979 987 self.invalidatedirstate, _('working directory of %s') %
980 988 self.origroot)
981 989 self._wlockref = weakref.ref(l)
982 990 return l
983 991
984 992 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
985 993 """
986 994 commit an individual file as part of a larger transaction
987 995 """
988 996
989 997 fname = fctx.path()
990 998 text = fctx.data()
991 999 flog = self.file(fname)
992 1000 fparent1 = manifest1.get(fname, nullid)
993 1001 fparent2 = fparent2o = manifest2.get(fname, nullid)
994 1002
995 1003 meta = {}
996 1004 copy = fctx.renamed()
997 1005 if copy and copy[0] != fname:
998 1006 # Mark the new revision of this file as a copy of another
999 1007 # file. This copy data will effectively act as a parent
1000 1008 # of this new revision. If this is a merge, the first
1001 1009 # parent will be the nullid (meaning "look up the copy data")
1002 1010 # and the second one will be the other parent. For example:
1003 1011 #
1004 1012 # 0 --- 1 --- 3 rev1 changes file foo
1005 1013 # \ / rev2 renames foo to bar and changes it
1006 1014 # \- 2 -/ rev3 should have bar with all changes and
1007 1015 # should record that bar descends from
1008 1016 # bar in rev2 and foo in rev1
1009 1017 #
1010 1018 # this allows this merge to succeed:
1011 1019 #
1012 1020 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1013 1021 # \ / merging rev3 and rev4 should use bar@rev2
1014 1022 # \- 2 --- 4 as the merge base
1015 1023 #
1016 1024
1017 1025 cfname = copy[0]
1018 1026 crev = manifest1.get(cfname)
1019 1027 newfparent = fparent2
1020 1028
1021 1029 if manifest2: # branch merge
1022 1030 if fparent2 == nullid or crev is None: # copied on remote side
1023 1031 if cfname in manifest2:
1024 1032 crev = manifest2[cfname]
1025 1033 newfparent = fparent1
1026 1034
1027 1035 # find source in nearest ancestor if we've lost track
1028 1036 if not crev:
1029 1037 self.ui.debug(" %s: searching for copy revision for %s\n" %
1030 1038 (fname, cfname))
1031 1039 for ancestor in self[None].ancestors():
1032 1040 if cfname in ancestor:
1033 1041 crev = ancestor[cfname].filenode()
1034 1042 break
1035 1043
1036 1044 if crev:
1037 1045 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1038 1046 meta["copy"] = cfname
1039 1047 meta["copyrev"] = hex(crev)
1040 1048 fparent1, fparent2 = nullid, newfparent
1041 1049 else:
1042 1050 self.ui.warn(_("warning: can't find ancestor for '%s' "
1043 1051 "copied from '%s'!\n") % (fname, cfname))
1044 1052
1045 1053 elif fparent2 != nullid:
1046 1054 # is one parent an ancestor of the other?
1047 1055 fparentancestor = flog.ancestor(fparent1, fparent2)
1048 1056 if fparentancestor == fparent1:
1049 1057 fparent1, fparent2 = fparent2, nullid
1050 1058 elif fparentancestor == fparent2:
1051 1059 fparent2 = nullid
1052 1060
1053 1061 # is the file changed?
1054 1062 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1055 1063 changelist.append(fname)
1056 1064 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1057 1065
1058 1066 # are just the flags changed during merge?
1059 1067 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1060 1068 changelist.append(fname)
1061 1069
1062 1070 return fparent1
1063 1071
1064 1072 def commit(self, text="", user=None, date=None, match=None, force=False,
1065 1073 editor=False, extra={}):
1066 1074 """Add a new revision to current repository.
1067 1075
1068 1076 Revision information is gathered from the working directory,
1069 1077 match can be used to filter the committed files. If editor is
1070 1078 supplied, it is called to get a commit message.
1071 1079 """
1072 1080
1073 1081 def fail(f, msg):
1074 1082 raise util.Abort('%s: %s' % (f, msg))
1075 1083
1076 1084 if not match:
1077 1085 match = matchmod.always(self.root, '')
1078 1086
1079 1087 if not force:
1080 1088 vdirs = []
1081 1089 match.dir = vdirs.append
1082 1090 match.bad = fail
1083 1091
1084 1092 wlock = self.wlock()
1085 1093 try:
1086 1094 wctx = self[None]
1087 1095 merge = len(wctx.parents()) > 1
1088 1096
1089 1097 if (not force and merge and match and
1090 1098 (match.files() or match.anypats())):
1091 1099 raise util.Abort(_('cannot partially commit a merge '
1092 1100 '(do not specify files or patterns)'))
1093 1101
1094 1102 changes = self.status(match=match, clean=force)
1095 1103 if force:
1096 1104 changes[0].extend(changes[6]) # mq may commit unchanged files
1097 1105
1098 1106 # check subrepos
1099 1107 subs = []
1100 1108 commitsubs = set()
1101 1109 newstate = wctx.substate.copy()
1102 1110 # only manage subrepos and .hgsubstate if .hgsub is present
1103 1111 if '.hgsub' in wctx:
1104 1112 # we'll decide whether to track this ourselves, thanks
1105 1113 if '.hgsubstate' in changes[0]:
1106 1114 changes[0].remove('.hgsubstate')
1107 1115 if '.hgsubstate' in changes[2]:
1108 1116 changes[2].remove('.hgsubstate')
1109 1117
1110 1118 # compare current state to last committed state
1111 1119 # build new substate based on last committed state
1112 1120 oldstate = wctx.p1().substate
1113 1121 for s in sorted(newstate.keys()):
1114 1122 if not match(s):
1115 1123 # ignore working copy, use old state if present
1116 1124 if s in oldstate:
1117 1125 newstate[s] = oldstate[s]
1118 1126 continue
1119 1127 if not force:
1120 1128 raise util.Abort(
1121 1129 _("commit with new subrepo %s excluded") % s)
1122 1130 if wctx.sub(s).dirty(True):
1123 1131 if not self.ui.configbool('ui', 'commitsubrepos'):
1124 1132 raise util.Abort(
1125 1133 _("uncommitted changes in subrepo %s") % s,
1126 1134 hint=_("use --subrepos for recursive commit"))
1127 1135 subs.append(s)
1128 1136 commitsubs.add(s)
1129 1137 else:
1130 1138 bs = wctx.sub(s).basestate()
1131 1139 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1132 1140 if oldstate.get(s, (None, None, None))[1] != bs:
1133 1141 subs.append(s)
1134 1142
1135 1143 # check for removed subrepos
1136 1144 for p in wctx.parents():
1137 1145 r = [s for s in p.substate if s not in newstate]
1138 1146 subs += [s for s in r if match(s)]
1139 1147 if subs:
1140 1148 if (not match('.hgsub') and
1141 1149 '.hgsub' in (wctx.modified() + wctx.added())):
1142 1150 raise util.Abort(
1143 1151 _("can't commit subrepos without .hgsub"))
1144 1152 changes[0].insert(0, '.hgsubstate')
1145 1153
1146 1154 elif '.hgsub' in changes[2]:
1147 1155 # clean up .hgsubstate when .hgsub is removed
1148 1156 if ('.hgsubstate' in wctx and
1149 1157 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1150 1158 changes[2].insert(0, '.hgsubstate')
1151 1159
1152 1160 # make sure all explicit patterns are matched
1153 1161 if not force and match.files():
1154 1162 matched = set(changes[0] + changes[1] + changes[2])
1155 1163
1156 1164 for f in match.files():
1157 1165 if f == '.' or f in matched or f in wctx.substate:
1158 1166 continue
1159 1167 if f in changes[3]: # missing
1160 1168 fail(f, _('file not found!'))
1161 1169 if f in vdirs: # visited directory
1162 1170 d = f + '/'
1163 1171 for mf in matched:
1164 1172 if mf.startswith(d):
1165 1173 break
1166 1174 else:
1167 1175 fail(f, _("no match under directory!"))
1168 1176 elif f not in self.dirstate:
1169 1177 fail(f, _("file not tracked!"))
1170 1178
1171 1179 if (not force and not extra.get("close") and not merge
1172 1180 and not (changes[0] or changes[1] or changes[2])
1173 1181 and wctx.branch() == wctx.p1().branch()):
1174 1182 return None
1175 1183
1176 1184 ms = mergemod.mergestate(self)
1177 1185 for f in changes[0]:
1178 1186 if f in ms and ms[f] == 'u':
1179 1187 raise util.Abort(_("unresolved merge conflicts "
1180 1188 "(see hg help resolve)"))
1181 1189
1182 1190 cctx = context.workingctx(self, text, user, date, extra, changes)
1183 1191 if editor:
1184 1192 cctx._text = editor(self, cctx, subs)
1185 1193 edited = (text != cctx._text)
1186 1194
1187 1195 # commit subs and write new state
1188 1196 if subs:
1189 1197 for s in sorted(commitsubs):
1190 1198 sub = wctx.sub(s)
1191 1199 self.ui.status(_('committing subrepository %s\n') %
1192 1200 subrepo.subrelpath(sub))
1193 1201 sr = sub.commit(cctx._text, user, date)
1194 1202 newstate[s] = (newstate[s][0], sr)
1195 1203 subrepo.writestate(self, newstate)
1196 1204
1197 1205 # Save commit message in case this transaction gets rolled back
1198 1206 # (e.g. by a pretxncommit hook). Leave the content alone on
1199 1207 # the assumption that the user will use the same editor again.
1200 1208 msgfn = self.savecommitmessage(cctx._text)
1201 1209
1202 1210 p1, p2 = self.dirstate.parents()
1203 1211 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1204 1212 try:
1205 1213 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1206 1214 ret = self.commitctx(cctx, True)
1207 1215 except:
1208 1216 if edited:
1209 1217 self.ui.write(
1210 1218 _('note: commit message saved in %s\n') % msgfn)
1211 1219 raise
1212 1220
1213 1221 # update bookmarks, dirstate and mergestate
1214 1222 bookmarks.update(self, p1, ret)
1215 1223 for f in changes[0] + changes[1]:
1216 1224 self.dirstate.normal(f)
1217 1225 for f in changes[2]:
1218 1226 self.dirstate.drop(f)
1219 1227 self.dirstate.setparents(ret)
1220 1228 ms.reset()
1221 1229 finally:
1222 1230 wlock.release()
1223 1231
1224 1232 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1225 1233 return ret
1226 1234
1227 1235 def commitctx(self, ctx, error=False):
1228 1236 """Add a new revision to current repository.
1229 1237 Revision information is passed via the context argument.
1230 1238 """
1231 1239
1232 1240 tr = lock = None
1233 1241 removed = list(ctx.removed())
1234 1242 p1, p2 = ctx.p1(), ctx.p2()
1235 1243 user = ctx.user()
1236 1244
1237 1245 lock = self.lock()
1238 1246 try:
1239 1247 tr = self.transaction("commit")
1240 1248 trp = weakref.proxy(tr)
1241 1249
1242 1250 if ctx.files():
1243 1251 m1 = p1.manifest().copy()
1244 1252 m2 = p2.manifest()
1245 1253
1246 1254 # check in files
1247 1255 new = {}
1248 1256 changed = []
1249 1257 linkrev = len(self)
1250 1258 for f in sorted(ctx.modified() + ctx.added()):
1251 1259 self.ui.note(f + "\n")
1252 1260 try:
1253 1261 fctx = ctx[f]
1254 1262 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1255 1263 changed)
1256 1264 m1.set(f, fctx.flags())
1257 1265 except OSError, inst:
1258 1266 self.ui.warn(_("trouble committing %s!\n") % f)
1259 1267 raise
1260 1268 except IOError, inst:
1261 1269 errcode = getattr(inst, 'errno', errno.ENOENT)
1262 1270 if error or errcode and errcode != errno.ENOENT:
1263 1271 self.ui.warn(_("trouble committing %s!\n") % f)
1264 1272 raise
1265 1273 else:
1266 1274 removed.append(f)
1267 1275
1268 1276 # update manifest
1269 1277 m1.update(new)
1270 1278 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1271 1279 drop = [f for f in removed if f in m1]
1272 1280 for f in drop:
1273 1281 del m1[f]
1274 1282 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1275 1283 p2.manifestnode(), (new, drop))
1276 1284 files = changed + removed
1277 1285 else:
1278 1286 mn = p1.manifestnode()
1279 1287 files = []
1280 1288
1281 1289 # update changelog
1282 1290 self.changelog.delayupdate()
1283 1291 n = self.changelog.add(mn, files, ctx.description(),
1284 1292 trp, p1.node(), p2.node(),
1285 1293 user, ctx.date(), ctx.extra().copy())
1286 1294 p = lambda: self.changelog.writepending() and self.root or ""
1287 1295 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1288 1296 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1289 1297 parent2=xp2, pending=p)
1290 1298 self.changelog.finalize(trp)
1291 1299 # set the new commit is proper phase
1292 1300 targetphase = phases.newcommitphase(self.ui)
1293 1301 if targetphase:
1294 1302 # retract boundary do not alter parent changeset.
1295 1303 # if a parent have higher the resulting phase will
1296 1304 # be compliant anyway
1297 1305 #
1298 1306 # if minimal phase was 0 we don't need to retract anything
1299 1307 phases.retractboundary(self, targetphase, [n])
1300 1308 tr.close()
1301 1309 self.updatebranchcache()
1302 1310 return n
1303 1311 finally:
1304 1312 if tr:
1305 1313 tr.release()
1306 1314 lock.release()
1307 1315
1308 1316 def destroyed(self):
1309 1317 '''Inform the repository that nodes have been destroyed.
1310 1318 Intended for use by strip and rollback, so there's a common
1311 1319 place for anything that has to be done after destroying history.'''
1312 1320 # XXX it might be nice if we could take the list of destroyed
1313 1321 # nodes, but I don't see an easy way for rollback() to do that
1314 1322
1315 1323 # Ensure the persistent tag cache is updated. Doing it now
1316 1324 # means that the tag cache only has to worry about destroyed
1317 1325 # heads immediately after a strip/rollback. That in turn
1318 1326 # guarantees that "cachetip == currenttip" (comparing both rev
1319 1327 # and node) always means no nodes have been added or destroyed.
1320 1328
1321 1329 # XXX this is suboptimal when qrefresh'ing: we strip the current
1322 1330 # head, refresh the tag cache, then immediately add a new head.
1323 1331 # But I think doing it this way is necessary for the "instant
1324 1332 # tag cache retrieval" case to work.
1325 1333 self.invalidatecaches()
1326 1334
1327 1335 # Discard all cache entries to force reloading everything.
1328 1336 self._filecache.clear()
1329 1337
1330 1338 def walk(self, match, node=None):
1331 1339 '''
1332 1340 walk recursively through the directory tree or a given
1333 1341 changeset, finding all files matched by the match
1334 1342 function
1335 1343 '''
1336 1344 return self[node].walk(match)
1337 1345
1338 1346 def status(self, node1='.', node2=None, match=None,
1339 1347 ignored=False, clean=False, unknown=False,
1340 1348 listsubrepos=False):
1341 1349 """return status of files between two nodes or node and working directory
1342 1350
1343 1351 If node1 is None, use the first dirstate parent instead.
1344 1352 If node2 is None, compare node1 with working directory.
1345 1353 """
1346 1354
1347 1355 def mfmatches(ctx):
1348 1356 mf = ctx.manifest().copy()
1349 1357 for fn in mf.keys():
1350 1358 if not match(fn):
1351 1359 del mf[fn]
1352 1360 return mf
1353 1361
1354 1362 if isinstance(node1, context.changectx):
1355 1363 ctx1 = node1
1356 1364 else:
1357 1365 ctx1 = self[node1]
1358 1366 if isinstance(node2, context.changectx):
1359 1367 ctx2 = node2
1360 1368 else:
1361 1369 ctx2 = self[node2]
1362 1370
1363 1371 working = ctx2.rev() is None
1364 1372 parentworking = working and ctx1 == self['.']
1365 1373 match = match or matchmod.always(self.root, self.getcwd())
1366 1374 listignored, listclean, listunknown = ignored, clean, unknown
1367 1375
1368 1376 # load earliest manifest first for caching reasons
1369 1377 if not working and ctx2.rev() < ctx1.rev():
1370 1378 ctx2.manifest()
1371 1379
1372 1380 if not parentworking:
1373 1381 def bad(f, msg):
1374 1382 # 'f' may be a directory pattern from 'match.files()',
1375 1383 # so 'f not in ctx1' is not enough
1376 1384 if f not in ctx1 and f not in ctx1.dirs():
1377 1385 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1378 1386 match.bad = bad
1379 1387
1380 1388 if working: # we need to scan the working dir
1381 1389 subrepos = []
1382 1390 if '.hgsub' in self.dirstate:
1383 1391 subrepos = ctx2.substate.keys()
1384 1392 s = self.dirstate.status(match, subrepos, listignored,
1385 1393 listclean, listunknown)
1386 1394 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1387 1395
1388 1396 # check for any possibly clean files
1389 1397 if parentworking and cmp:
1390 1398 fixup = []
1391 1399 # do a full compare of any files that might have changed
1392 1400 for f in sorted(cmp):
1393 1401 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1394 1402 or ctx1[f].cmp(ctx2[f])):
1395 1403 modified.append(f)
1396 1404 else:
1397 1405 fixup.append(f)
1398 1406
1399 1407 # update dirstate for files that are actually clean
1400 1408 if fixup:
1401 1409 if listclean:
1402 1410 clean += fixup
1403 1411
1404 1412 try:
1405 1413 # updating the dirstate is optional
1406 1414 # so we don't wait on the lock
1407 1415 wlock = self.wlock(False)
1408 1416 try:
1409 1417 for f in fixup:
1410 1418 self.dirstate.normal(f)
1411 1419 finally:
1412 1420 wlock.release()
1413 1421 except error.LockError:
1414 1422 pass
1415 1423
1416 1424 if not parentworking:
1417 1425 mf1 = mfmatches(ctx1)
1418 1426 if working:
1419 1427 # we are comparing working dir against non-parent
1420 1428 # generate a pseudo-manifest for the working dir
1421 1429 mf2 = mfmatches(self['.'])
1422 1430 for f in cmp + modified + added:
1423 1431 mf2[f] = None
1424 1432 mf2.set(f, ctx2.flags(f))
1425 1433 for f in removed:
1426 1434 if f in mf2:
1427 1435 del mf2[f]
1428 1436 else:
1429 1437 # we are comparing two revisions
1430 1438 deleted, unknown, ignored = [], [], []
1431 1439 mf2 = mfmatches(ctx2)
1432 1440
1433 1441 modified, added, clean = [], [], []
1434 1442 for fn in mf2:
1435 1443 if fn in mf1:
1436 1444 if (fn not in deleted and
1437 1445 (mf1.flags(fn) != mf2.flags(fn) or
1438 1446 (mf1[fn] != mf2[fn] and
1439 1447 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1440 1448 modified.append(fn)
1441 1449 elif listclean:
1442 1450 clean.append(fn)
1443 1451 del mf1[fn]
1444 1452 elif fn not in deleted:
1445 1453 added.append(fn)
1446 1454 removed = mf1.keys()
1447 1455
1448 1456 if working and modified and not self.dirstate._checklink:
1449 1457 # Symlink placeholders may get non-symlink-like contents
1450 1458 # via user error or dereferencing by NFS or Samba servers,
1451 1459 # so we filter out any placeholders that don't look like a
1452 1460 # symlink
1453 1461 sane = []
1454 1462 for f in modified:
1455 1463 if ctx2.flags(f) == 'l':
1456 1464 d = ctx2[f].data()
1457 1465 if len(d) >= 1024 or '\n' in d or util.binary(d):
1458 1466 self.ui.debug('ignoring suspect symlink placeholder'
1459 1467 ' "%s"\n' % f)
1460 1468 continue
1461 1469 sane.append(f)
1462 1470 modified = sane
1463 1471
1464 1472 r = modified, added, removed, deleted, unknown, ignored, clean
1465 1473
1466 1474 if listsubrepos:
1467 1475 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1468 1476 if working:
1469 1477 rev2 = None
1470 1478 else:
1471 1479 rev2 = ctx2.substate[subpath][1]
1472 1480 try:
1473 1481 submatch = matchmod.narrowmatcher(subpath, match)
1474 1482 s = sub.status(rev2, match=submatch, ignored=listignored,
1475 1483 clean=listclean, unknown=listunknown,
1476 1484 listsubrepos=True)
1477 1485 for rfiles, sfiles in zip(r, s):
1478 1486 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1479 1487 except error.LookupError:
1480 1488 self.ui.status(_("skipping missing subrepository: %s\n")
1481 1489 % subpath)
1482 1490
1483 1491 for l in r:
1484 1492 l.sort()
1485 1493 return r
1486 1494
1487 1495 def heads(self, start=None):
1488 1496 heads = self.changelog.heads(start)
1489 1497 # sort the output in rev descending order
1490 1498 return sorted(heads, key=self.changelog.rev, reverse=True)
1491 1499
1492 1500 def branchheads(self, branch=None, start=None, closed=False):
1493 1501 '''return a (possibly filtered) list of heads for the given branch
1494 1502
1495 1503 Heads are returned in topological order, from newest to oldest.
1496 1504 If branch is None, use the dirstate branch.
1497 1505 If start is not None, return only heads reachable from start.
1498 1506 If closed is True, return heads that are marked as closed as well.
1499 1507 '''
1500 1508 if branch is None:
1501 1509 branch = self[None].branch()
1502 1510 branches = self.branchmap()
1503 1511 if branch not in branches:
1504 1512 return []
1505 1513 # the cache returns heads ordered lowest to highest
1506 1514 bheads = list(reversed(branches[branch]))
1507 1515 if start is not None:
1508 1516 # filter out the heads that cannot be reached from startrev
1509 1517 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1510 1518 bheads = [h for h in bheads if h in fbheads]
1511 1519 if not closed:
1512 1520 bheads = [h for h in bheads if
1513 1521 ('close' not in self.changelog.read(h)[5])]
1514 1522 return bheads
1515 1523
1516 1524 def branches(self, nodes):
1517 1525 if not nodes:
1518 1526 nodes = [self.changelog.tip()]
1519 1527 b = []
1520 1528 for n in nodes:
1521 1529 t = n
1522 1530 while True:
1523 1531 p = self.changelog.parents(n)
1524 1532 if p[1] != nullid or p[0] == nullid:
1525 1533 b.append((t, n, p[0], p[1]))
1526 1534 break
1527 1535 n = p[0]
1528 1536 return b
1529 1537
1530 1538 def between(self, pairs):
1531 1539 r = []
1532 1540
1533 1541 for top, bottom in pairs:
1534 1542 n, l, i = top, [], 0
1535 1543 f = 1
1536 1544
1537 1545 while n != bottom and n != nullid:
1538 1546 p = self.changelog.parents(n)[0]
1539 1547 if i == f:
1540 1548 l.append(n)
1541 1549 f = f * 2
1542 1550 n = p
1543 1551 i += 1
1544 1552
1545 1553 r.append(l)
1546 1554
1547 1555 return r
1548 1556
1549 1557 def pull(self, remote, heads=None, force=False):
1550 1558 lock = self.lock()
1551 1559 try:
1552 1560 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1553 1561 force=force)
1554 1562 common, fetch, rheads = tmp
1555 1563 if not fetch:
1556 1564 self.ui.status(_("no changes found\n"))
1557 1565 added = []
1558 1566 result = 0
1559 1567 else:
1560 1568 if heads is None and list(common) == [nullid]:
1561 1569 self.ui.status(_("requesting all changes\n"))
1562 1570 elif heads is None and remote.capable('changegroupsubset'):
1563 1571 # issue1320, avoid a race if remote changed after discovery
1564 1572 heads = rheads
1565 1573
1566 1574 if remote.capable('getbundle'):
1567 1575 cg = remote.getbundle('pull', common=common,
1568 1576 heads=heads or rheads)
1569 1577 elif heads is None:
1570 1578 cg = remote.changegroup(fetch, 'pull')
1571 1579 elif not remote.capable('changegroupsubset'):
1572 1580 raise util.Abort(_("partial pull cannot be done because "
1573 1581 "other repository doesn't support "
1574 1582 "changegroupsubset."))
1575 1583 else:
1576 1584 cg = remote.changegroupsubset(fetch, heads, 'pull')
1577 1585 clstart = len(self.changelog)
1578 1586 result = self.addchangegroup(cg, 'pull', remote.url())
1579 1587 clend = len(self.changelog)
1580 1588 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1581 1589
1582 1590 # compute target subset
1583 1591 if heads is None:
1584 1592 # We pulled every thing possible
1585 1593 # sync on everything common
1586 1594 subset = common + added
1587 1595 else:
1588 1596 # We pulled a specific subset
1589 1597 # sync on this subset
1590 1598 subset = heads
1591 1599
1592 1600 # Get remote phases data from remote
1593 1601 remotephases = remote.listkeys('phases')
1594 1602 publishing = bool(remotephases.get('publishing', False))
1595 1603 if remotephases and not publishing:
1596 1604 # remote is new and unpublishing
1597 1605 pheads, _dr = phases.analyzeremotephases(self, subset,
1598 1606 remotephases)
1599 1607 phases.advanceboundary(self, phases.public, pheads)
1600 1608 phases.advanceboundary(self, phases.draft, subset)
1601 1609 else:
1602 1610 # Remote is old or publishing all common changesets
1603 1611 # should be seen as public
1604 1612 phases.advanceboundary(self, phases.public, subset)
1605 1613 finally:
1606 1614 lock.release()
1607 1615
1608 1616 return result
1609 1617
1610 1618 def checkpush(self, force, revs):
1611 1619 """Extensions can override this function if additional checks have
1612 1620 to be performed before pushing, or call it if they override push
1613 1621 command.
1614 1622 """
1615 1623 pass
1616 1624
1617 1625 def push(self, remote, force=False, revs=None, newbranch=False):
1618 1626 '''Push outgoing changesets (limited by revs) from the current
1619 1627 repository to remote. Return an integer:
1620 1628 - None means nothing to push
1621 1629 - 0 means HTTP error
1622 1630 - 1 means we pushed and remote head count is unchanged *or*
1623 1631 we have outgoing changesets but refused to push
1624 1632 - other values as described by addchangegroup()
1625 1633 '''
1626 1634 # there are two ways to push to remote repo:
1627 1635 #
1628 1636 # addchangegroup assumes local user can lock remote
1629 1637 # repo (local filesystem, old ssh servers).
1630 1638 #
1631 1639 # unbundle assumes local user cannot lock remote repo (new ssh
1632 1640 # servers, http servers).
1633 1641
1634 1642 # get local lock as we might write phase data
1635 1643 locallock = self.lock()
1636 1644 try:
1637 1645 self.checkpush(force, revs)
1638 1646 lock = None
1639 1647 unbundle = remote.capable('unbundle')
1640 1648 if not unbundle:
1641 1649 lock = remote.lock()
1642 1650 try:
1643 1651 # discovery
1644 1652 fci = discovery.findcommonincoming
1645 1653 commoninc = fci(self, remote, force=force)
1646 1654 common, inc, remoteheads = commoninc
1647 1655 fco = discovery.findcommonoutgoing
1648 1656 outgoing = fco(self, remote, onlyheads=revs,
1649 1657 commoninc=commoninc, force=force)
1650 1658
1651 1659
1652 1660 if not outgoing.missing:
1653 1661 # nothing to push
1654 1662 scmutil.nochangesfound(self.ui, outgoing.excluded)
1655 1663 ret = None
1656 1664 else:
1657 1665 # something to push
1658 1666 if not force:
1659 1667 discovery.checkheads(self, remote, outgoing,
1660 1668 remoteheads, newbranch,
1661 1669 bool(inc))
1662 1670
1663 1671 # create a changegroup from local
1664 1672 if revs is None and not outgoing.excluded:
1665 1673 # push everything,
1666 1674 # use the fast path, no race possible on push
1667 1675 cg = self._changegroup(outgoing.missing, 'push')
1668 1676 else:
1669 1677 cg = self.getlocalbundle('push', outgoing)
1670 1678
1671 1679 # apply changegroup to remote
1672 1680 if unbundle:
1673 1681 # local repo finds heads on server, finds out what
1674 1682 # revs it must push. once revs transferred, if server
1675 1683 # finds it has different heads (someone else won
1676 1684 # commit/push race), server aborts.
1677 1685 if force:
1678 1686 remoteheads = ['force']
1679 1687 # ssh: return remote's addchangegroup()
1680 1688 # http: return remote's addchangegroup() or 0 for error
1681 1689 ret = remote.unbundle(cg, remoteheads, 'push')
1682 1690 else:
1683 1691 # we return an integer indicating remote head count change
1684 1692 ret = remote.addchangegroup(cg, 'push', self.url())
1685 1693
1686 1694 if ret:
1687 1695 # push succeed, synchonize target of the push
1688 1696 cheads = outgoing.missingheads
1689 1697 elif revs is None:
1690 1698 # All out push fails. synchronize all common
1691 1699 cheads = outgoing.commonheads
1692 1700 else:
1693 1701 # I want cheads = heads(::missingheads and ::commonheads)
1694 1702 # (missingheads is revs with secret changeset filtered out)
1695 1703 #
1696 1704 # This can be expressed as:
1697 1705 # cheads = ( (missingheads and ::commonheads)
1698 1706 # + (commonheads and ::missingheads))"
1699 1707 # )
1700 1708 #
1701 1709 # while trying to push we already computed the following:
1702 1710 # common = (::commonheads)
1703 1711 # missing = ((commonheads::missingheads) - commonheads)
1704 1712 #
1705 1713 # We can pick:
1706 1714 # * missingheads part of comon (::commonheads)
1707 1715 common = set(outgoing.common)
1708 1716 cheads = [node for node in revs if node in common]
1709 1717 # and
1710 1718 # * commonheads parents on missing
1711 1719 revset = self.set('%ln and parents(roots(%ln))',
1712 1720 outgoing.commonheads,
1713 1721 outgoing.missing)
1714 1722 cheads.extend(c.node() for c in revset)
1715 1723 # even when we don't push, exchanging phase data is useful
1716 1724 remotephases = remote.listkeys('phases')
1717 1725 if not remotephases: # old server or public only repo
1718 1726 phases.advanceboundary(self, phases.public, cheads)
1719 1727 # don't push any phase data as there is nothing to push
1720 1728 else:
1721 1729 ana = phases.analyzeremotephases(self, cheads, remotephases)
1722 1730 pheads, droots = ana
1723 1731 ### Apply remote phase on local
1724 1732 if remotephases.get('publishing', False):
1725 1733 phases.advanceboundary(self, phases.public, cheads)
1726 1734 else: # publish = False
1727 1735 phases.advanceboundary(self, phases.public, pheads)
1728 1736 phases.advanceboundary(self, phases.draft, cheads)
1729 1737 ### Apply local phase on remote
1730 1738
1731 1739 # Get the list of all revs draft on remote by public here.
1732 1740 # XXX Beware that revset break if droots is not strictly
1733 1741 # XXX root we may want to ensure it is but it is costly
1734 1742 outdated = self.set('heads((%ln::%ln) and public())',
1735 1743 droots, cheads)
1736 1744 for newremotehead in outdated:
1737 1745 r = remote.pushkey('phases',
1738 1746 newremotehead.hex(),
1739 1747 str(phases.draft),
1740 1748 str(phases.public))
1741 1749 if not r:
1742 1750 self.ui.warn(_('updating %s to public failed!\n')
1743 1751 % newremotehead)
1744 1752 finally:
1745 1753 if lock is not None:
1746 1754 lock.release()
1747 1755 finally:
1748 1756 locallock.release()
1749 1757
1750 1758 self.ui.debug("checking for updated bookmarks\n")
1751 1759 rb = remote.listkeys('bookmarks')
1752 1760 for k in rb.keys():
1753 1761 if k in self._bookmarks:
1754 1762 nr, nl = rb[k], hex(self._bookmarks[k])
1755 1763 if nr in self:
1756 1764 cr = self[nr]
1757 1765 cl = self[nl]
1758 1766 if cl in cr.descendants():
1759 1767 r = remote.pushkey('bookmarks', k, nr, nl)
1760 1768 if r:
1761 1769 self.ui.status(_("updating bookmark %s\n") % k)
1762 1770 else:
1763 1771 self.ui.warn(_('updating bookmark %s'
1764 1772 ' failed!\n') % k)
1765 1773
1766 1774 return ret
1767 1775
1768 1776 def changegroupinfo(self, nodes, source):
1769 1777 if self.ui.verbose or source == 'bundle':
1770 1778 self.ui.status(_("%d changesets found\n") % len(nodes))
1771 1779 if self.ui.debugflag:
1772 1780 self.ui.debug("list of changesets:\n")
1773 1781 for node in nodes:
1774 1782 self.ui.debug("%s\n" % hex(node))
1775 1783
1776 1784 def changegroupsubset(self, bases, heads, source):
1777 1785 """Compute a changegroup consisting of all the nodes that are
1778 1786 descendants of any of the bases and ancestors of any of the heads.
1779 1787 Return a chunkbuffer object whose read() method will return
1780 1788 successive changegroup chunks.
1781 1789
1782 1790 It is fairly complex as determining which filenodes and which
1783 1791 manifest nodes need to be included for the changeset to be complete
1784 1792 is non-trivial.
1785 1793
1786 1794 Another wrinkle is doing the reverse, figuring out which changeset in
1787 1795 the changegroup a particular filenode or manifestnode belongs to.
1788 1796 """
1789 1797 cl = self.changelog
1790 1798 if not bases:
1791 1799 bases = [nullid]
1792 1800 csets, bases, heads = cl.nodesbetween(bases, heads)
1793 1801 # We assume that all ancestors of bases are known
1794 1802 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1795 1803 return self._changegroupsubset(common, csets, heads, source)
1796 1804
1797 1805 def getlocalbundle(self, source, outgoing):
1798 1806 """Like getbundle, but taking a discovery.outgoing as an argument.
1799 1807
1800 1808 This is only implemented for local repos and reuses potentially
1801 1809 precomputed sets in outgoing."""
1802 1810 if not outgoing.missing:
1803 1811 return None
1804 1812 return self._changegroupsubset(outgoing.common,
1805 1813 outgoing.missing,
1806 1814 outgoing.missingheads,
1807 1815 source)
1808 1816
1809 1817 def getbundle(self, source, heads=None, common=None):
1810 1818 """Like changegroupsubset, but returns the set difference between the
1811 1819 ancestors of heads and the ancestors common.
1812 1820
1813 1821 If heads is None, use the local heads. If common is None, use [nullid].
1814 1822
1815 1823 The nodes in common might not all be known locally due to the way the
1816 1824 current discovery protocol works.
1817 1825 """
1818 1826 cl = self.changelog
1819 1827 if common:
1820 1828 nm = cl.nodemap
1821 1829 common = [n for n in common if n in nm]
1822 1830 else:
1823 1831 common = [nullid]
1824 1832 if not heads:
1825 1833 heads = cl.heads()
1826 1834 return self.getlocalbundle(source,
1827 1835 discovery.outgoing(cl, common, heads))
1828 1836
1829 1837 def _changegroupsubset(self, commonrevs, csets, heads, source):
1830 1838
1831 1839 cl = self.changelog
1832 1840 mf = self.manifest
1833 1841 mfs = {} # needed manifests
1834 1842 fnodes = {} # needed file nodes
1835 1843 changedfiles = set()
1836 1844 fstate = ['', {}]
1837 1845 count = [0]
1838 1846
1839 1847 # can we go through the fast path ?
1840 1848 heads.sort()
1841 1849 if heads == sorted(self.heads()):
1842 1850 return self._changegroup(csets, source)
1843 1851
1844 1852 # slow path
1845 1853 self.hook('preoutgoing', throw=True, source=source)
1846 1854 self.changegroupinfo(csets, source)
1847 1855
1848 1856 # filter any nodes that claim to be part of the known set
1849 1857 def prune(revlog, missing):
1850 1858 return [n for n in missing
1851 1859 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1852 1860
1853 1861 def lookup(revlog, x):
1854 1862 if revlog == cl:
1855 1863 c = cl.read(x)
1856 1864 changedfiles.update(c[3])
1857 1865 mfs.setdefault(c[0], x)
1858 1866 count[0] += 1
1859 1867 self.ui.progress(_('bundling'), count[0],
1860 1868 unit=_('changesets'), total=len(csets))
1861 1869 return x
1862 1870 elif revlog == mf:
1863 1871 clnode = mfs[x]
1864 1872 mdata = mf.readfast(x)
1865 1873 for f in changedfiles:
1866 1874 if f in mdata:
1867 1875 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1868 1876 count[0] += 1
1869 1877 self.ui.progress(_('bundling'), count[0],
1870 1878 unit=_('manifests'), total=len(mfs))
1871 1879 return mfs[x]
1872 1880 else:
1873 1881 self.ui.progress(
1874 1882 _('bundling'), count[0], item=fstate[0],
1875 1883 unit=_('files'), total=len(changedfiles))
1876 1884 return fstate[1][x]
1877 1885
1878 1886 bundler = changegroup.bundle10(lookup)
1879 1887 reorder = self.ui.config('bundle', 'reorder', 'auto')
1880 1888 if reorder == 'auto':
1881 1889 reorder = None
1882 1890 else:
1883 1891 reorder = util.parsebool(reorder)
1884 1892
1885 1893 def gengroup():
1886 1894 # Create a changenode group generator that will call our functions
1887 1895 # back to lookup the owning changenode and collect information.
1888 1896 for chunk in cl.group(csets, bundler, reorder=reorder):
1889 1897 yield chunk
1890 1898 self.ui.progress(_('bundling'), None)
1891 1899
1892 1900 # Create a generator for the manifestnodes that calls our lookup
1893 1901 # and data collection functions back.
1894 1902 count[0] = 0
1895 1903 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1896 1904 yield chunk
1897 1905 self.ui.progress(_('bundling'), None)
1898 1906
1899 1907 mfs.clear()
1900 1908
1901 1909 # Go through all our files in order sorted by name.
1902 1910 count[0] = 0
1903 1911 for fname in sorted(changedfiles):
1904 1912 filerevlog = self.file(fname)
1905 1913 if not len(filerevlog):
1906 1914 raise util.Abort(_("empty or missing revlog for %s") % fname)
1907 1915 fstate[0] = fname
1908 1916 fstate[1] = fnodes.pop(fname, {})
1909 1917
1910 1918 nodelist = prune(filerevlog, fstate[1])
1911 1919 if nodelist:
1912 1920 count[0] += 1
1913 1921 yield bundler.fileheader(fname)
1914 1922 for chunk in filerevlog.group(nodelist, bundler, reorder):
1915 1923 yield chunk
1916 1924
1917 1925 # Signal that no more groups are left.
1918 1926 yield bundler.close()
1919 1927 self.ui.progress(_('bundling'), None)
1920 1928
1921 1929 if csets:
1922 1930 self.hook('outgoing', node=hex(csets[0]), source=source)
1923 1931
1924 1932 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1925 1933
1926 1934 def changegroup(self, basenodes, source):
1927 1935 # to avoid a race we use changegroupsubset() (issue1320)
1928 1936 return self.changegroupsubset(basenodes, self.heads(), source)
1929 1937
1930 1938 def _changegroup(self, nodes, source):
1931 1939 """Compute the changegroup of all nodes that we have that a recipient
1932 1940 doesn't. Return a chunkbuffer object whose read() method will return
1933 1941 successive changegroup chunks.
1934 1942
1935 1943 This is much easier than the previous function as we can assume that
1936 1944 the recipient has any changenode we aren't sending them.
1937 1945
1938 1946 nodes is the set of nodes to send"""
1939 1947
1940 1948 cl = self.changelog
1941 1949 mf = self.manifest
1942 1950 mfs = {}
1943 1951 changedfiles = set()
1944 1952 fstate = ['']
1945 1953 count = [0]
1946 1954
1947 1955 self.hook('preoutgoing', throw=True, source=source)
1948 1956 self.changegroupinfo(nodes, source)
1949 1957
1950 1958 revset = set([cl.rev(n) for n in nodes])
1951 1959
1952 1960 def gennodelst(log):
1953 1961 return [log.node(r) for r in log if log.linkrev(r) in revset]
1954 1962
1955 1963 def lookup(revlog, x):
1956 1964 if revlog == cl:
1957 1965 c = cl.read(x)
1958 1966 changedfiles.update(c[3])
1959 1967 mfs.setdefault(c[0], x)
1960 1968 count[0] += 1
1961 1969 self.ui.progress(_('bundling'), count[0],
1962 1970 unit=_('changesets'), total=len(nodes))
1963 1971 return x
1964 1972 elif revlog == mf:
1965 1973 count[0] += 1
1966 1974 self.ui.progress(_('bundling'), count[0],
1967 1975 unit=_('manifests'), total=len(mfs))
1968 1976 return cl.node(revlog.linkrev(revlog.rev(x)))
1969 1977 else:
1970 1978 self.ui.progress(
1971 1979 _('bundling'), count[0], item=fstate[0],
1972 1980 total=len(changedfiles), unit=_('files'))
1973 1981 return cl.node(revlog.linkrev(revlog.rev(x)))
1974 1982
1975 1983 bundler = changegroup.bundle10(lookup)
1976 1984 reorder = self.ui.config('bundle', 'reorder', 'auto')
1977 1985 if reorder == 'auto':
1978 1986 reorder = None
1979 1987 else:
1980 1988 reorder = util.parsebool(reorder)
1981 1989
1982 1990 def gengroup():
1983 1991 '''yield a sequence of changegroup chunks (strings)'''
1984 1992 # construct a list of all changed files
1985 1993
1986 1994 for chunk in cl.group(nodes, bundler, reorder=reorder):
1987 1995 yield chunk
1988 1996 self.ui.progress(_('bundling'), None)
1989 1997
1990 1998 count[0] = 0
1991 1999 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1992 2000 yield chunk
1993 2001 self.ui.progress(_('bundling'), None)
1994 2002
1995 2003 count[0] = 0
1996 2004 for fname in sorted(changedfiles):
1997 2005 filerevlog = self.file(fname)
1998 2006 if not len(filerevlog):
1999 2007 raise util.Abort(_("empty or missing revlog for %s") % fname)
2000 2008 fstate[0] = fname
2001 2009 nodelist = gennodelst(filerevlog)
2002 2010 if nodelist:
2003 2011 count[0] += 1
2004 2012 yield bundler.fileheader(fname)
2005 2013 for chunk in filerevlog.group(nodelist, bundler, reorder):
2006 2014 yield chunk
2007 2015 yield bundler.close()
2008 2016 self.ui.progress(_('bundling'), None)
2009 2017
2010 2018 if nodes:
2011 2019 self.hook('outgoing', node=hex(nodes[0]), source=source)
2012 2020
2013 2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2014 2022
2015 2023 def addchangegroup(self, source, srctype, url, emptyok=False):
2016 2024 """Add the changegroup returned by source.read() to this repo.
2017 2025 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2018 2026 the URL of the repo where this changegroup is coming from.
2019 2027
2020 2028 Return an integer summarizing the change to this repo:
2021 2029 - nothing changed or no source: 0
2022 2030 - more heads than before: 1+added heads (2..n)
2023 2031 - fewer heads than before: -1-removed heads (-2..-n)
2024 2032 - number of heads stays the same: 1
2025 2033 """
2026 2034 def csmap(x):
2027 2035 self.ui.debug("add changeset %s\n" % short(x))
2028 2036 return len(cl)
2029 2037
2030 2038 def revmap(x):
2031 2039 return cl.rev(x)
2032 2040
2033 2041 if not source:
2034 2042 return 0
2035 2043
2036 2044 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2037 2045
2038 2046 changesets = files = revisions = 0
2039 2047 efiles = set()
2040 2048
2041 2049 # write changelog data to temp files so concurrent readers will not see
2042 2050 # inconsistent view
2043 2051 cl = self.changelog
2044 2052 cl.delayupdate()
2045 2053 oldheads = cl.heads()
2046 2054
2047 2055 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2048 2056 try:
2049 2057 trp = weakref.proxy(tr)
2050 2058 # pull off the changeset group
2051 2059 self.ui.status(_("adding changesets\n"))
2052 2060 clstart = len(cl)
2053 2061 class prog(object):
2054 2062 step = _('changesets')
2055 2063 count = 1
2056 2064 ui = self.ui
2057 2065 total = None
2058 2066 def __call__(self):
2059 2067 self.ui.progress(self.step, self.count, unit=_('chunks'),
2060 2068 total=self.total)
2061 2069 self.count += 1
2062 2070 pr = prog()
2063 2071 source.callback = pr
2064 2072
2065 2073 source.changelogheader()
2066 2074 srccontent = cl.addgroup(source, csmap, trp)
2067 2075 if not (srccontent or emptyok):
2068 2076 raise util.Abort(_("received changelog group is empty"))
2069 2077 clend = len(cl)
2070 2078 changesets = clend - clstart
2071 2079 for c in xrange(clstart, clend):
2072 2080 efiles.update(self[c].files())
2073 2081 efiles = len(efiles)
2074 2082 self.ui.progress(_('changesets'), None)
2075 2083
2076 2084 # pull off the manifest group
2077 2085 self.ui.status(_("adding manifests\n"))
2078 2086 pr.step = _('manifests')
2079 2087 pr.count = 1
2080 2088 pr.total = changesets # manifests <= changesets
2081 2089 # no need to check for empty manifest group here:
2082 2090 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2083 2091 # no new manifest will be created and the manifest group will
2084 2092 # be empty during the pull
2085 2093 source.manifestheader()
2086 2094 self.manifest.addgroup(source, revmap, trp)
2087 2095 self.ui.progress(_('manifests'), None)
2088 2096
2089 2097 needfiles = {}
2090 2098 if self.ui.configbool('server', 'validate', default=False):
2091 2099 # validate incoming csets have their manifests
2092 2100 for cset in xrange(clstart, clend):
2093 2101 mfest = self.changelog.read(self.changelog.node(cset))[0]
2094 2102 mfest = self.manifest.readdelta(mfest)
2095 2103 # store file nodes we must see
2096 2104 for f, n in mfest.iteritems():
2097 2105 needfiles.setdefault(f, set()).add(n)
2098 2106
2099 2107 # process the files
2100 2108 self.ui.status(_("adding file changes\n"))
2101 2109 pr.step = _('files')
2102 2110 pr.count = 1
2103 2111 pr.total = efiles
2104 2112 source.callback = None
2105 2113
2106 2114 while True:
2107 2115 chunkdata = source.filelogheader()
2108 2116 if not chunkdata:
2109 2117 break
2110 2118 f = chunkdata["filename"]
2111 2119 self.ui.debug("adding %s revisions\n" % f)
2112 2120 pr()
2113 2121 fl = self.file(f)
2114 2122 o = len(fl)
2115 2123 if not fl.addgroup(source, revmap, trp):
2116 2124 raise util.Abort(_("received file revlog group is empty"))
2117 2125 revisions += len(fl) - o
2118 2126 files += 1
2119 2127 if f in needfiles:
2120 2128 needs = needfiles[f]
2121 2129 for new in xrange(o, len(fl)):
2122 2130 n = fl.node(new)
2123 2131 if n in needs:
2124 2132 needs.remove(n)
2125 2133 if not needs:
2126 2134 del needfiles[f]
2127 2135 self.ui.progress(_('files'), None)
2128 2136
2129 2137 for f, needs in needfiles.iteritems():
2130 2138 fl = self.file(f)
2131 2139 for n in needs:
2132 2140 try:
2133 2141 fl.rev(n)
2134 2142 except error.LookupError:
2135 2143 raise util.Abort(
2136 2144 _('missing file data for %s:%s - run hg verify') %
2137 2145 (f, hex(n)))
2138 2146
2139 2147 dh = 0
2140 2148 if oldheads:
2141 2149 heads = cl.heads()
2142 2150 dh = len(heads) - len(oldheads)
2143 2151 for h in heads:
2144 2152 if h not in oldheads and 'close' in self[h].extra():
2145 2153 dh -= 1
2146 2154 htext = ""
2147 2155 if dh:
2148 2156 htext = _(" (%+d heads)") % dh
2149 2157
2150 2158 self.ui.status(_("added %d changesets"
2151 2159 " with %d changes to %d files%s\n")
2152 2160 % (changesets, revisions, files, htext))
2153 2161
2154 2162 if changesets > 0:
2155 2163 p = lambda: cl.writepending() and self.root or ""
2156 2164 self.hook('pretxnchangegroup', throw=True,
2157 2165 node=hex(cl.node(clstart)), source=srctype,
2158 2166 url=url, pending=p)
2159 2167
2160 2168 added = [cl.node(r) for r in xrange(clstart, clend)]
2161 2169 publishing = self.ui.configbool('phases', 'publish', True)
2162 2170 if srctype == 'push':
2163 2171 # Old server can not push the boundary themself.
2164 2172 # New server won't push the boundary if changeset already
2165 2173 # existed locally as secrete
2166 2174 #
2167 2175 # We should not use added here but the list of all change in
2168 2176 # the bundle
2169 2177 if publishing:
2170 2178 phases.advanceboundary(self, phases.public, srccontent)
2171 2179 else:
2172 2180 phases.advanceboundary(self, phases.draft, srccontent)
2173 2181 phases.retractboundary(self, phases.draft, added)
2174 2182 elif srctype != 'strip':
2175 2183 # publishing only alter behavior during push
2176 2184 #
2177 2185 # strip should not touch boundary at all
2178 2186 phases.retractboundary(self, phases.draft, added)
2179 2187
2180 2188 # make changelog see real files again
2181 2189 cl.finalize(trp)
2182 2190
2183 2191 tr.close()
2184 2192
2185 2193 if changesets > 0:
2186 2194 def runhooks():
2187 2195 # forcefully update the on-disk branch cache
2188 2196 self.ui.debug("updating the branch cache\n")
2189 2197 self.updatebranchcache()
2190 2198 self.hook("changegroup", node=hex(cl.node(clstart)),
2191 2199 source=srctype, url=url)
2192 2200
2193 2201 for n in added:
2194 2202 self.hook("incoming", node=hex(n), source=srctype,
2195 2203 url=url)
2196 2204 self._afterlock(runhooks)
2197 2205
2198 2206 finally:
2199 2207 tr.release()
2200 2208 # never return 0 here:
2201 2209 if dh < 0:
2202 2210 return dh - 1
2203 2211 else:
2204 2212 return dh + 1
2205 2213
2206 2214 def stream_in(self, remote, requirements):
2207 2215 lock = self.lock()
2208 2216 try:
2209 2217 fp = remote.stream_out()
2210 2218 l = fp.readline()
2211 2219 try:
2212 2220 resp = int(l)
2213 2221 except ValueError:
2214 2222 raise error.ResponseError(
2215 2223 _('Unexpected response from remote server:'), l)
2216 2224 if resp == 1:
2217 2225 raise util.Abort(_('operation forbidden by server'))
2218 2226 elif resp == 2:
2219 2227 raise util.Abort(_('locking the remote repository failed'))
2220 2228 elif resp != 0:
2221 2229 raise util.Abort(_('the server sent an unknown error code'))
2222 2230 self.ui.status(_('streaming all changes\n'))
2223 2231 l = fp.readline()
2224 2232 try:
2225 2233 total_files, total_bytes = map(int, l.split(' ', 1))
2226 2234 except (ValueError, TypeError):
2227 2235 raise error.ResponseError(
2228 2236 _('Unexpected response from remote server:'), l)
2229 2237 self.ui.status(_('%d files to transfer, %s of data\n') %
2230 2238 (total_files, util.bytecount(total_bytes)))
2231 2239 start = time.time()
2232 2240 for i in xrange(total_files):
2233 2241 # XXX doesn't support '\n' or '\r' in filenames
2234 2242 l = fp.readline()
2235 2243 try:
2236 2244 name, size = l.split('\0', 1)
2237 2245 size = int(size)
2238 2246 except (ValueError, TypeError):
2239 2247 raise error.ResponseError(
2240 2248 _('Unexpected response from remote server:'), l)
2241 2249 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2242 2250 # for backwards compat, name was partially encoded
2243 2251 ofp = self.sopener(store.decodedir(name), 'w')
2244 2252 for chunk in util.filechunkiter(fp, limit=size):
2245 2253 ofp.write(chunk)
2246 2254 ofp.close()
2247 2255 elapsed = time.time() - start
2248 2256 if elapsed <= 0:
2249 2257 elapsed = 0.001
2250 2258 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2251 2259 (util.bytecount(total_bytes), elapsed,
2252 2260 util.bytecount(total_bytes / elapsed)))
2253 2261
2254 2262 # new requirements = old non-format requirements + new format-related
2255 2263 # requirements from the streamed-in repository
2256 2264 requirements.update(set(self.requirements) - self.supportedformats)
2257 2265 self._applyrequirements(requirements)
2258 2266 self._writerequirements()
2259 2267
2260 2268 self.invalidate()
2261 2269 return len(self.heads()) + 1
2262 2270 finally:
2263 2271 lock.release()
2264 2272
2265 2273 def clone(self, remote, heads=[], stream=False):
2266 2274 '''clone remote repository.
2267 2275
2268 2276 keyword arguments:
2269 2277 heads: list of revs to clone (forces use of pull)
2270 2278 stream: use streaming clone if possible'''
2271 2279
2272 2280 # now, all clients that can request uncompressed clones can
2273 2281 # read repo formats supported by all servers that can serve
2274 2282 # them.
2275 2283
2276 2284 # if revlog format changes, client will have to check version
2277 2285 # and format flags on "stream" capability, and use
2278 2286 # uncompressed only if compatible.
2279 2287
2280 2288 if stream and not heads:
2281 2289 # 'stream' means remote revlog format is revlogv1 only
2282 2290 if remote.capable('stream'):
2283 2291 return self.stream_in(remote, set(('revlogv1',)))
2284 2292 # otherwise, 'streamreqs' contains the remote revlog format
2285 2293 streamreqs = remote.capable('streamreqs')
2286 2294 if streamreqs:
2287 2295 streamreqs = set(streamreqs.split(','))
2288 2296 # if we support it, stream in and adjust our requirements
2289 2297 if not streamreqs - self.supportedformats:
2290 2298 return self.stream_in(remote, streamreqs)
2291 2299 return self.pull(remote, heads)
2292 2300
2293 2301 def pushkey(self, namespace, key, old, new):
2294 2302 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2295 2303 old=old, new=new)
2296 2304 ret = pushkey.push(self, namespace, key, old, new)
2297 2305 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2298 2306 ret=ret)
2299 2307 return ret
2300 2308
2301 2309 def listkeys(self, namespace):
2302 2310 self.hook('prelistkeys', throw=True, namespace=namespace)
2303 2311 values = pushkey.list(self, namespace)
2304 2312 self.hook('listkeys', namespace=namespace, values=values)
2305 2313 return values
2306 2314
2307 2315 def debugwireargs(self, one, two, three=None, four=None, five=None):
2308 2316 '''used to test argument passing over the wire'''
2309 2317 return "%s %s %s %s %s" % (one, two, three, four, five)
2310 2318
2311 2319 def savecommitmessage(self, text):
2312 2320 fp = self.opener('last-message.txt', 'wb')
2313 2321 try:
2314 2322 fp.write(text)
2315 2323 finally:
2316 2324 fp.close()
2317 2325 return self.pathto(fp.name[len(self.root)+1:])
2318 2326
2319 2327 # used to avoid circular references so destructors work
2320 2328 def aftertrans(files):
2321 2329 renamefiles = [tuple(t) for t in files]
2322 2330 def a():
2323 2331 for src, dest in renamefiles:
2324 2332 util.rename(src, dest)
2325 2333 return a
2326 2334
2327 2335 def undoname(fn):
2328 2336 base, name = os.path.split(fn)
2329 2337 assert name.startswith('journal')
2330 2338 return os.path.join(base, name.replace('journal', 'undo', 1))
2331 2339
2332 2340 def instance(ui, path, create):
2333 2341 return localrepository(ui, util.urllocalpath(path), create)
2334 2342
2335 2343 def islocal(path):
2336 2344 return True
@@ -1,848 +1,859 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import util, error, osutil, revset, similar, encoding
10 10 import match as matchmod
11 11 import os, errno, re, stat, sys, glob
12 12
13 13 def nochangesfound(ui, secretlist=None):
14 14 '''report no changes for push/pull'''
15 15 if secretlist:
16 16 ui.status(_("no changes found (ignored %d secret changesets)\n")
17 17 % len(secretlist))
18 18 else:
19 19 ui.status(_("no changes found\n"))
20 20
21 21 def checkfilename(f):
22 22 '''Check that the filename f is an acceptable filename for a tracked file'''
23 23 if '\r' in f or '\n' in f:
24 24 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
25 25
26 26 def checkportable(ui, f):
27 27 '''Check if filename f is portable and warn or abort depending on config'''
28 28 checkfilename(f)
29 29 abort, warn = checkportabilityalert(ui)
30 30 if abort or warn:
31 31 msg = util.checkwinfilename(f)
32 32 if msg:
33 33 msg = "%s: %r" % (msg, f)
34 34 if abort:
35 35 raise util.Abort(msg)
36 36 ui.warn(_("warning: %s\n") % msg)
37 37
38 38 def checkportabilityalert(ui):
39 39 '''check if the user's config requests nothing, a warning, or abort for
40 40 non-portable filenames'''
41 41 val = ui.config('ui', 'portablefilenames', 'warn')
42 42 lval = val.lower()
43 43 bval = util.parsebool(val)
44 44 abort = os.name == 'nt' or lval == 'abort'
45 45 warn = bval or lval == 'warn'
46 46 if bval is None and not (warn or abort or lval == 'ignore'):
47 47 raise error.ConfigError(
48 48 _("ui.portablefilenames value is invalid ('%s')") % val)
49 49 return abort, warn
50 50
51 51 class casecollisionauditor(object):
52 52 def __init__(self, ui, abort, existingiter):
53 53 self._ui = ui
54 54 self._abort = abort
55 55 self._map = {}
56 56 for f in existingiter:
57 57 self._map[encoding.lower(f)] = f
58 58
59 59 def __call__(self, f):
60 60 fl = encoding.lower(f)
61 61 map = self._map
62 62 if fl in map and map[fl] != f:
63 63 msg = _('possible case-folding collision for %s') % f
64 64 if self._abort:
65 65 raise util.Abort(msg)
66 66 self._ui.warn(_("warning: %s\n") % msg)
67 67 map[fl] = f
68 68
69 69 class pathauditor(object):
70 70 '''ensure that a filesystem path contains no banned components.
71 71 the following properties of a path are checked:
72 72
73 73 - ends with a directory separator
74 74 - under top-level .hg
75 75 - starts at the root of a windows drive
76 76 - contains ".."
77 77 - traverses a symlink (e.g. a/symlink_here/b)
78 78 - inside a nested repository (a callback can be used to approve
79 79 some nested repositories, e.g., subrepositories)
80 80 '''
81 81
82 82 def __init__(self, root, callback=None):
83 83 self.audited = set()
84 84 self.auditeddir = set()
85 85 self.root = root
86 86 self.callback = callback
87 87 if os.path.lexists(root) and not util.checkcase(root):
88 88 self.normcase = util.normcase
89 89 else:
90 90 self.normcase = lambda x: x
91 91
92 92 def __call__(self, path):
93 93 '''Check the relative path.
94 94 path may contain a pattern (e.g. foodir/**.txt)'''
95 95
96 96 path = util.localpath(path)
97 97 normpath = self.normcase(path)
98 98 if normpath in self.audited:
99 99 return
100 100 # AIX ignores "/" at end of path, others raise EISDIR.
101 101 if util.endswithsep(path):
102 102 raise util.Abort(_("path ends in directory separator: %s") % path)
103 103 parts = util.splitpath(path)
104 104 if (os.path.splitdrive(path)[0]
105 105 or parts[0].lower() in ('.hg', '.hg.', '')
106 106 or os.pardir in parts):
107 107 raise util.Abort(_("path contains illegal component: %s") % path)
108 108 if '.hg' in path.lower():
109 109 lparts = [p.lower() for p in parts]
110 110 for p in '.hg', '.hg.':
111 111 if p in lparts[1:]:
112 112 pos = lparts.index(p)
113 113 base = os.path.join(*parts[:pos])
114 114 raise util.Abort(_("path '%s' is inside nested repo %r")
115 115 % (path, base))
116 116
117 117 normparts = util.splitpath(normpath)
118 118 assert len(parts) == len(normparts)
119 119
120 120 parts.pop()
121 121 normparts.pop()
122 122 prefixes = []
123 123 while parts:
124 124 prefix = os.sep.join(parts)
125 125 normprefix = os.sep.join(normparts)
126 126 if normprefix in self.auditeddir:
127 127 break
128 128 curpath = os.path.join(self.root, prefix)
129 129 try:
130 130 st = os.lstat(curpath)
131 131 except OSError, err:
132 132 # EINVAL can be raised as invalid path syntax under win32.
133 133 # They must be ignored for patterns can be checked too.
134 134 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
135 135 raise
136 136 else:
137 137 if stat.S_ISLNK(st.st_mode):
138 138 raise util.Abort(
139 139 _('path %r traverses symbolic link %r')
140 140 % (path, prefix))
141 141 elif (stat.S_ISDIR(st.st_mode) and
142 142 os.path.isdir(os.path.join(curpath, '.hg'))):
143 143 if not self.callback or not self.callback(curpath):
144 144 raise util.Abort(_("path '%s' is inside nested repo %r") %
145 145 (path, prefix))
146 146 prefixes.append(normprefix)
147 147 parts.pop()
148 148 normparts.pop()
149 149
150 150 self.audited.add(normpath)
151 151 # only add prefixes to the cache after checking everything: we don't
152 152 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
153 153 self.auditeddir.update(prefixes)
154 154
155 155 class abstractopener(object):
156 156 """Abstract base class; cannot be instantiated"""
157 157
158 158 def __init__(self, *args, **kwargs):
159 159 '''Prevent instantiation; don't call this from subclasses.'''
160 160 raise NotImplementedError('attempted instantiating ' + str(type(self)))
161 161
162 162 def read(self, path):
163 163 fp = self(path, 'rb')
164 164 try:
165 165 return fp.read()
166 166 finally:
167 167 fp.close()
168 168
169 169 def write(self, path, data):
170 170 fp = self(path, 'wb')
171 171 try:
172 172 return fp.write(data)
173 173 finally:
174 174 fp.close()
175 175
176 176 def append(self, path, data):
177 177 fp = self(path, 'ab')
178 178 try:
179 179 return fp.write(data)
180 180 finally:
181 181 fp.close()
182 182
183 183 class opener(abstractopener):
184 184 '''Open files relative to a base directory
185 185
186 186 This class is used to hide the details of COW semantics and
187 187 remote file access from higher level code.
188 188 '''
189 189 def __init__(self, base, audit=True):
190 190 self.base = base
191 191 self._audit = audit
192 192 if audit:
193 193 self.auditor = pathauditor(base)
194 194 else:
195 195 self.auditor = util.always
196 196 self.createmode = None
197 197 self._trustnlink = None
198 198
199 199 @util.propertycache
200 200 def _cansymlink(self):
201 201 return util.checklink(self.base)
202 202
203 203 def _fixfilemode(self, name):
204 204 if self.createmode is None:
205 205 return
206 206 os.chmod(name, self.createmode & 0666)
207 207
208 208 def __call__(self, path, mode="r", text=False, atomictemp=False):
209 209 if self._audit:
210 210 r = util.checkosfilename(path)
211 211 if r:
212 212 raise util.Abort("%s: %r" % (r, path))
213 213 self.auditor(path)
214 f = os.path.join(self.base, path)
214 f = self.join(path)
215 215
216 216 if not text and "b" not in mode:
217 217 mode += "b" # for that other OS
218 218
219 219 nlink = -1
220 220 dirname, basename = os.path.split(f)
221 221 # If basename is empty, then the path is malformed because it points
222 222 # to a directory. Let the posixfile() call below raise IOError.
223 223 if basename and mode not in ('r', 'rb'):
224 224 if atomictemp:
225 225 if not os.path.isdir(dirname):
226 226 util.makedirs(dirname, self.createmode)
227 227 return util.atomictempfile(f, mode, self.createmode)
228 228 try:
229 229 if 'w' in mode:
230 230 util.unlink(f)
231 231 nlink = 0
232 232 else:
233 233 # nlinks() may behave differently for files on Windows
234 234 # shares if the file is open.
235 235 fd = util.posixfile(f)
236 236 nlink = util.nlinks(f)
237 237 if nlink < 1:
238 238 nlink = 2 # force mktempcopy (issue1922)
239 239 fd.close()
240 240 except (OSError, IOError), e:
241 241 if e.errno != errno.ENOENT:
242 242 raise
243 243 nlink = 0
244 244 if not os.path.isdir(dirname):
245 245 util.makedirs(dirname, self.createmode)
246 246 if nlink > 0:
247 247 if self._trustnlink is None:
248 248 self._trustnlink = nlink > 1 or util.checknlink(f)
249 249 if nlink > 1 or not self._trustnlink:
250 250 util.rename(util.mktempcopy(f), f)
251 251 fp = util.posixfile(f, mode)
252 252 if nlink == 0:
253 253 self._fixfilemode(f)
254 254 return fp
255 255
256 256 def symlink(self, src, dst):
257 257 self.auditor(dst)
258 linkname = os.path.join(self.base, dst)
258 linkname = self.join(dst)
259 259 try:
260 260 os.unlink(linkname)
261 261 except OSError:
262 262 pass
263 263
264 264 dirname = os.path.dirname(linkname)
265 265 if not os.path.exists(dirname):
266 266 util.makedirs(dirname, self.createmode)
267 267
268 268 if self._cansymlink:
269 269 try:
270 270 os.symlink(src, linkname)
271 271 except OSError, err:
272 272 raise OSError(err.errno, _('could not symlink to %r: %s') %
273 273 (src, err.strerror), linkname)
274 274 else:
275 275 f = self(dst, "w")
276 276 f.write(src)
277 277 f.close()
278 278 self._fixfilemode(dst)
279 279
280 280 def audit(self, path):
281 281 self.auditor(path)
282 282
283 def join(self, path):
284 return os.path.join(self.base, path)
285
283 286 class filteropener(abstractopener):
284 287 '''Wrapper opener for filtering filenames with a function.'''
285 288
286 289 def __init__(self, opener, filter):
287 290 self._filter = filter
288 291 self._orig = opener
289 292
290 293 def __call__(self, path, *args, **kwargs):
291 294 return self._orig(self._filter(path), *args, **kwargs)
292 295
293 296 def canonpath(root, cwd, myname, auditor=None):
294 297 '''return the canonical path of myname, given cwd and root'''
295 298 if util.endswithsep(root):
296 299 rootsep = root
297 300 else:
298 301 rootsep = root + os.sep
299 302 name = myname
300 303 if not os.path.isabs(name):
301 304 name = os.path.join(root, cwd, name)
302 305 name = os.path.normpath(name)
303 306 if auditor is None:
304 307 auditor = pathauditor(root)
305 308 if name != rootsep and name.startswith(rootsep):
306 309 name = name[len(rootsep):]
307 310 auditor(name)
308 311 return util.pconvert(name)
309 312 elif name == root:
310 313 return ''
311 314 else:
312 315 # Determine whether `name' is in the hierarchy at or beneath `root',
313 316 # by iterating name=dirname(name) until that causes no change (can't
314 317 # check name == '/', because that doesn't work on windows). For each
315 318 # `name', compare dev/inode numbers. If they match, the list `rel'
316 319 # holds the reversed list of components making up the relative file
317 320 # name we want.
318 321 root_st = os.stat(root)
319 322 rel = []
320 323 while True:
321 324 try:
322 325 name_st = os.stat(name)
323 326 except OSError:
324 327 name_st = None
325 328 if name_st and util.samestat(name_st, root_st):
326 329 if not rel:
327 330 # name was actually the same as root (maybe a symlink)
328 331 return ''
329 332 rel.reverse()
330 333 name = os.path.join(*rel)
331 334 auditor(name)
332 335 return util.pconvert(name)
333 336 dirname, basename = os.path.split(name)
334 337 rel.append(basename)
335 338 if dirname == name:
336 339 break
337 340 name = dirname
338 341
339 342 raise util.Abort('%s not under root' % myname)
340 343
341 344 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
342 345 '''yield every hg repository under path, recursively.'''
343 346 def errhandler(err):
344 347 if err.filename == path:
345 348 raise err
346 349 samestat = getattr(os.path, 'samestat', None)
347 350 if followsym and samestat is not None:
348 351 def adddir(dirlst, dirname):
349 352 match = False
350 353 dirstat = os.stat(dirname)
351 354 for lstdirstat in dirlst:
352 355 if samestat(dirstat, lstdirstat):
353 356 match = True
354 357 break
355 358 if not match:
356 359 dirlst.append(dirstat)
357 360 return not match
358 361 else:
359 362 followsym = False
360 363
361 364 if (seen_dirs is None) and followsym:
362 365 seen_dirs = []
363 366 adddir(seen_dirs, path)
364 367 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
365 368 dirs.sort()
366 369 if '.hg' in dirs:
367 370 yield root # found a repository
368 371 qroot = os.path.join(root, '.hg', 'patches')
369 372 if os.path.isdir(os.path.join(qroot, '.hg')):
370 373 yield qroot # we have a patch queue repo here
371 374 if recurse:
372 375 # avoid recursing inside the .hg directory
373 376 dirs.remove('.hg')
374 377 else:
375 378 dirs[:] = [] # don't descend further
376 379 elif followsym:
377 380 newdirs = []
378 381 for d in dirs:
379 382 fname = os.path.join(root, d)
380 383 if adddir(seen_dirs, fname):
381 384 if os.path.islink(fname):
382 385 for hgname in walkrepos(fname, True, seen_dirs):
383 386 yield hgname
384 387 else:
385 388 newdirs.append(d)
386 389 dirs[:] = newdirs
387 390
388 391 def osrcpath():
389 392 '''return default os-specific hgrc search path'''
390 393 path = systemrcpath()
391 394 path.extend(userrcpath())
392 395 path = [os.path.normpath(f) for f in path]
393 396 return path
394 397
395 398 _rcpath = None
396 399
397 400 def rcpath():
398 401 '''return hgrc search path. if env var HGRCPATH is set, use it.
399 402 for each item in path, if directory, use files ending in .rc,
400 403 else use item.
401 404 make HGRCPATH empty to only look in .hg/hgrc of current repo.
402 405 if no HGRCPATH, use default os-specific path.'''
403 406 global _rcpath
404 407 if _rcpath is None:
405 408 if 'HGRCPATH' in os.environ:
406 409 _rcpath = []
407 410 for p in os.environ['HGRCPATH'].split(os.pathsep):
408 411 if not p:
409 412 continue
410 413 p = util.expandpath(p)
411 414 if os.path.isdir(p):
412 415 for f, kind in osutil.listdir(p):
413 416 if f.endswith('.rc'):
414 417 _rcpath.append(os.path.join(p, f))
415 418 else:
416 419 _rcpath.append(p)
417 420 else:
418 421 _rcpath = osrcpath()
419 422 return _rcpath
420 423
421 424 if os.name != 'nt':
422 425
423 426 def rcfiles(path):
424 427 rcs = [os.path.join(path, 'hgrc')]
425 428 rcdir = os.path.join(path, 'hgrc.d')
426 429 try:
427 430 rcs.extend([os.path.join(rcdir, f)
428 431 for f, kind in osutil.listdir(rcdir)
429 432 if f.endswith(".rc")])
430 433 except OSError:
431 434 pass
432 435 return rcs
433 436
434 437 def systemrcpath():
435 438 path = []
436 439 # old mod_python does not set sys.argv
437 440 if len(getattr(sys, 'argv', [])) > 0:
438 441 p = os.path.dirname(os.path.dirname(sys.argv[0]))
439 442 path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
440 443 path.extend(rcfiles('/etc/mercurial'))
441 444 return path
442 445
443 446 def userrcpath():
444 447 return [os.path.expanduser('~/.hgrc')]
445 448
446 449 else:
447 450
448 451 _HKEY_LOCAL_MACHINE = 0x80000002L
449 452
450 453 def systemrcpath():
451 454 '''return default os-specific hgrc search path'''
452 455 rcpath = []
453 456 filename = util.executablepath()
454 457 # Use mercurial.ini found in directory with hg.exe
455 458 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
456 459 if os.path.isfile(progrc):
457 460 rcpath.append(progrc)
458 461 return rcpath
459 462 # Use hgrc.d found in directory with hg.exe
460 463 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
461 464 if os.path.isdir(progrcd):
462 465 for f, kind in osutil.listdir(progrcd):
463 466 if f.endswith('.rc'):
464 467 rcpath.append(os.path.join(progrcd, f))
465 468 return rcpath
466 469 # else look for a system rcpath in the registry
467 470 value = util.lookupreg('SOFTWARE\\Mercurial', None,
468 471 _HKEY_LOCAL_MACHINE)
469 472 if not isinstance(value, str) or not value:
470 473 return rcpath
471 474 value = util.localpath(value)
472 475 for p in value.split(os.pathsep):
473 476 if p.lower().endswith('mercurial.ini'):
474 477 rcpath.append(p)
475 478 elif os.path.isdir(p):
476 479 for f, kind in osutil.listdir(p):
477 480 if f.endswith('.rc'):
478 481 rcpath.append(os.path.join(p, f))
479 482 return rcpath
480 483
481 484 def userrcpath():
482 485 '''return os-specific hgrc search path to the user dir'''
483 486 home = os.path.expanduser('~')
484 487 path = [os.path.join(home, 'mercurial.ini'),
485 488 os.path.join(home, '.hgrc')]
486 489 userprofile = os.environ.get('USERPROFILE')
487 490 if userprofile:
488 491 path.append(os.path.join(userprofile, 'mercurial.ini'))
489 492 path.append(os.path.join(userprofile, '.hgrc'))
490 493 return path
491 494
492 495 def revsingle(repo, revspec, default='.'):
493 496 if not revspec:
494 497 return repo[default]
495 498
496 499 l = revrange(repo, [revspec])
497 500 if len(l) < 1:
498 501 raise util.Abort(_('empty revision set'))
499 502 return repo[l[-1]]
500 503
501 504 def revpair(repo, revs):
502 505 if not revs:
503 506 return repo.dirstate.p1(), None
504 507
505 508 l = revrange(repo, revs)
506 509
507 510 if len(l) == 0:
508 511 return repo.dirstate.p1(), None
509 512
510 513 if len(l) == 1:
511 514 return repo.lookup(l[0]), None
512 515
513 516 return repo.lookup(l[0]), repo.lookup(l[-1])
514 517
515 518 _revrangesep = ':'
516 519
517 520 def revrange(repo, revs):
518 521 """Yield revision as strings from a list of revision specifications."""
519 522
520 523 def revfix(repo, val, defval):
521 524 if not val and val != 0 and defval is not None:
522 525 return defval
523 526 return repo.changelog.rev(repo.lookup(val))
524 527
525 528 seen, l = set(), []
526 529 for spec in revs:
527 530 # attempt to parse old-style ranges first to deal with
528 531 # things like old-tag which contain query metacharacters
529 532 try:
530 533 if isinstance(spec, int):
531 534 seen.add(spec)
532 535 l.append(spec)
533 536 continue
534 537
535 538 if _revrangesep in spec:
536 539 start, end = spec.split(_revrangesep, 1)
537 540 start = revfix(repo, start, 0)
538 541 end = revfix(repo, end, len(repo) - 1)
539 542 step = start > end and -1 or 1
540 543 for rev in xrange(start, end + step, step):
541 544 if rev in seen:
542 545 continue
543 546 seen.add(rev)
544 547 l.append(rev)
545 548 continue
546 549 elif spec and spec in repo: # single unquoted rev
547 550 rev = revfix(repo, spec, None)
548 551 if rev in seen:
549 552 continue
550 553 seen.add(rev)
551 554 l.append(rev)
552 555 continue
553 556 except error.RepoLookupError:
554 557 pass
555 558
556 559 # fall through to new-style queries if old-style fails
557 560 m = revset.match(repo.ui, spec)
558 561 for r in m(repo, range(len(repo))):
559 562 if r not in seen:
560 563 l.append(r)
561 564 seen.update(l)
562 565
563 566 return l
564 567
565 568 def expandpats(pats):
566 569 if not util.expandglobs:
567 570 return list(pats)
568 571 ret = []
569 572 for p in pats:
570 573 kind, name = matchmod._patsplit(p, None)
571 574 if kind is None:
572 575 try:
573 576 globbed = glob.glob(name)
574 577 except re.error:
575 578 globbed = [name]
576 579 if globbed:
577 580 ret.extend(globbed)
578 581 continue
579 582 ret.append(p)
580 583 return ret
581 584
582 585 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
583 586 if pats == ("",):
584 587 pats = []
585 588 if not globbed and default == 'relpath':
586 589 pats = expandpats(pats or [])
587 590
588 591 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
589 592 default)
590 593 def badfn(f, msg):
591 594 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
592 595 m.bad = badfn
593 596 return m, pats
594 597
595 598 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
596 599 return matchandpats(ctx, pats, opts, globbed, default)[0]
597 600
598 601 def matchall(repo):
599 602 return matchmod.always(repo.root, repo.getcwd())
600 603
601 604 def matchfiles(repo, files):
602 605 return matchmod.exact(repo.root, repo.getcwd(), files)
603 606
604 607 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
605 608 if dry_run is None:
606 609 dry_run = opts.get('dry_run')
607 610 if similarity is None:
608 611 similarity = float(opts.get('similarity') or 0)
609 612 # we'd use status here, except handling of symlinks and ignore is tricky
610 613 added, unknown, deleted, removed = [], [], [], []
611 614 audit_path = pathauditor(repo.root)
612 615 m = match(repo[None], pats, opts)
613 616 rejected = []
614 617 m.bad = lambda x, y: rejected.append(x)
615 618
616 619 for abs in repo.walk(m):
617 620 target = repo.wjoin(abs)
618 621 good = True
619 622 try:
620 623 audit_path(abs)
621 624 except (OSError, util.Abort):
622 625 good = False
623 626 rel = m.rel(abs)
624 627 exact = m.exact(abs)
625 628 if good and abs not in repo.dirstate:
626 629 unknown.append(abs)
627 630 if repo.ui.verbose or not exact:
628 631 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
629 632 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
630 633 or (os.path.isdir(target) and not os.path.islink(target))):
631 634 deleted.append(abs)
632 635 if repo.ui.verbose or not exact:
633 636 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
634 637 # for finding renames
635 638 elif repo.dirstate[abs] == 'r':
636 639 removed.append(abs)
637 640 elif repo.dirstate[abs] == 'a':
638 641 added.append(abs)
639 642 copies = {}
640 643 if similarity > 0:
641 644 for old, new, score in similar.findrenames(repo,
642 645 added + unknown, removed + deleted, similarity):
643 646 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
644 647 repo.ui.status(_('recording removal of %s as rename to %s '
645 648 '(%d%% similar)\n') %
646 649 (m.rel(old), m.rel(new), score * 100))
647 650 copies[new] = old
648 651
649 652 if not dry_run:
650 653 wctx = repo[None]
651 654 wlock = repo.wlock()
652 655 try:
653 656 wctx.forget(deleted)
654 657 wctx.add(unknown)
655 658 for new, old in copies.iteritems():
656 659 wctx.copy(old, new)
657 660 finally:
658 661 wlock.release()
659 662
660 663 for f in rejected:
661 664 if f in m.files():
662 665 return 1
663 666 return 0
664 667
665 668 def updatedir(ui, repo, patches, similarity=0):
666 669 '''Update dirstate after patch application according to metadata'''
667 670 if not patches:
668 671 return []
669 672 copies = []
670 673 removes = set()
671 674 cfiles = patches.keys()
672 675 cwd = repo.getcwd()
673 676 if cwd:
674 677 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
675 678 for f in patches:
676 679 gp = patches[f]
677 680 if not gp:
678 681 continue
679 682 if gp.op == 'RENAME':
680 683 copies.append((gp.oldpath, gp.path))
681 684 removes.add(gp.oldpath)
682 685 elif gp.op == 'COPY':
683 686 copies.append((gp.oldpath, gp.path))
684 687 elif gp.op == 'DELETE':
685 688 removes.add(gp.path)
686 689
687 690 wctx = repo[None]
688 691 for src, dst in copies:
689 692 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
690 693 if (not similarity) and removes:
691 694 wctx.remove(sorted(removes), True)
692 695
693 696 for f in patches:
694 697 gp = patches[f]
695 698 if gp and gp.mode:
696 699 islink, isexec = gp.mode
697 700 dst = repo.wjoin(gp.path)
698 701 # patch won't create empty files
699 702 if gp.op == 'ADD' and not os.path.lexists(dst):
700 703 flags = (isexec and 'x' or '') + (islink and 'l' or '')
701 704 repo.wwrite(gp.path, '', flags)
702 705 util.setflags(dst, islink, isexec)
703 706 addremove(repo, cfiles, similarity=similarity)
704 707 files = patches.keys()
705 708 files.extend([r for r in removes if r not in files])
706 709 return sorted(files)
707 710
708 711 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
709 712 """Update the dirstate to reflect the intent of copying src to dst. For
710 713 different reasons it might not end with dst being marked as copied from src.
711 714 """
712 715 origsrc = repo.dirstate.copied(src) or src
713 716 if dst == origsrc: # copying back a copy?
714 717 if repo.dirstate[dst] not in 'mn' and not dryrun:
715 718 repo.dirstate.normallookup(dst)
716 719 else:
717 720 if repo.dirstate[origsrc] == 'a' and origsrc == src:
718 721 if not ui.quiet:
719 722 ui.warn(_("%s has not been committed yet, so no copy "
720 723 "data will be stored for %s.\n")
721 724 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
722 725 if repo.dirstate[dst] in '?r' and not dryrun:
723 726 wctx.add([dst])
724 727 elif not dryrun:
725 728 wctx.copy(origsrc, dst)
726 729
727 730 def readrequires(opener, supported):
728 731 '''Reads and parses .hg/requires and checks if all entries found
729 732 are in the list of supported features.'''
730 733 requirements = set(opener.read("requires").splitlines())
731 734 missings = []
732 735 for r in requirements:
733 736 if r not in supported:
734 737 if not r or not r[0].isalnum():
735 738 raise error.RequirementError(_(".hg/requires file is corrupt"))
736 739 missings.append(r)
737 740 missings.sort()
738 741 if missings:
739 742 raise error.RequirementError(_("unknown repository format: "
740 743 "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
741 744 return requirements
742 745
743 746 class filecacheentry(object):
744 747 def __init__(self, path):
745 748 self.path = path
746 749 self.cachestat = filecacheentry.stat(self.path)
747 750
748 751 if self.cachestat:
749 752 self._cacheable = self.cachestat.cacheable()
750 753 else:
751 754 # None means we don't know yet
752 755 self._cacheable = None
753 756
754 757 def refresh(self):
755 758 if self.cacheable():
756 759 self.cachestat = filecacheentry.stat(self.path)
757 760
758 761 def cacheable(self):
759 762 if self._cacheable is not None:
760 763 return self._cacheable
761 764
762 765 # we don't know yet, assume it is for now
763 766 return True
764 767
765 768 def changed(self):
766 769 # no point in going further if we can't cache it
767 770 if not self.cacheable():
768 771 return True
769 772
770 773 newstat = filecacheentry.stat(self.path)
771 774
772 775 # we may not know if it's cacheable yet, check again now
773 776 if newstat and self._cacheable is None:
774 777 self._cacheable = newstat.cacheable()
775 778
776 779 # check again
777 780 if not self._cacheable:
778 781 return True
779 782
780 783 if self.cachestat != newstat:
781 784 self.cachestat = newstat
782 785 return True
783 786 else:
784 787 return False
785 788
786 789 @staticmethod
787 790 def stat(path):
788 791 try:
789 792 return util.cachestat(path)
790 793 except OSError, e:
791 794 if e.errno != errno.ENOENT:
792 795 raise
793 796
794 797 class filecache(object):
795 798 '''A property like decorator that tracks a file under .hg/ for updates.
796 799
797 800 Records stat info when called in _filecache.
798 801
799 802 On subsequent calls, compares old stat info with new info, and recreates
800 803 the object when needed, updating the new stat info in _filecache.
801 804
802 805 Mercurial either atomic renames or appends for files under .hg,
803 806 so to ensure the cache is reliable we need the filesystem to be able
804 807 to tell us if a file has been replaced. If it can't, we fallback to
805 808 recreating the object on every call (essentially the same behaviour as
806 809 propertycache).'''
807 def __init__(self, path, instore=False):
810 def __init__(self, path):
808 811 self.path = path
809 self.instore = instore
812
813 def join(self, obj, fname):
814 """Used to compute the runtime path of the cached file.
815
816 Users should subclass filecache and provide their own version of this
817 function to call the appropriate join function on 'obj' (an instance
818 of the class that its member function was decorated).
819 """
820 return obj.join(fname)
810 821
811 822 def __call__(self, func):
812 823 self.func = func
813 824 self.name = func.__name__
814 825 return self
815 826
816 827 def __get__(self, obj, type=None):
817 828 # do we need to check if the file changed?
818 829 if self.name in obj.__dict__:
819 830 return obj.__dict__[self.name]
820 831
821 832 entry = obj._filecache.get(self.name)
822 833
823 834 if entry:
824 835 if entry.changed():
825 836 entry.obj = self.func(obj)
826 837 else:
827 path = self.instore and obj.sjoin(self.path) or obj.join(self.path)
838 path = self.join(obj, self.path)
828 839
829 840 # We stat -before- creating the object so our cache doesn't lie if
830 841 # a writer modified between the time we read and stat
831 842 entry = filecacheentry(path)
832 843 entry.obj = self.func(obj)
833 844
834 845 obj._filecache[self.name] = entry
835 846
836 847 obj.__dict__[self.name] = entry.obj
837 848 return entry.obj
838 849
839 850 def __set__(self, obj, value):
840 851 if self.name in obj._filecache:
841 852 obj._filecache[self.name].obj = value # update cached copy
842 853 obj.__dict__[self.name] = value # update copy returned by obj.x
843 854
844 855 def __delete__(self, obj):
845 856 try:
846 857 del obj.__dict__[self.name]
847 858 except KeyError:
848 859 raise AttributeError, self.name
@@ -1,234 +1,257 b''
1 1 import sys, os, struct, subprocess, cStringIO, re, shutil
2 2
3 3 def connect(path=None):
4 4 cmdline = ['hg', 'serve', '--cmdserver', 'pipe']
5 5 if path:
6 6 cmdline += ['-R', path]
7 7
8 8 server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
9 9 stdout=subprocess.PIPE)
10 10
11 11 return server
12 12
13 13 def writeblock(server, data):
14 14 server.stdin.write(struct.pack('>I', len(data)))
15 15 server.stdin.write(data)
16 16 server.stdin.flush()
17 17
18 18 def readchannel(server):
19 19 data = server.stdout.read(5)
20 20 if not data:
21 21 raise EOFError()
22 22 channel, length = struct.unpack('>cI', data)
23 23 if channel in 'IL':
24 24 return channel, length
25 25 else:
26 26 return channel, server.stdout.read(length)
27 27
28 28 def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None):
29 29 print ' runcommand', ' '.join(args)
30 30 sys.stdout.flush()
31 31 server.stdin.write('runcommand\n')
32 32 writeblock(server, '\0'.join(args))
33 33
34 34 if not input:
35 35 input = cStringIO.StringIO()
36 36
37 37 while True:
38 38 ch, data = readchannel(server)
39 39 if ch == 'o':
40 40 output.write(data)
41 41 output.flush()
42 42 elif ch == 'e':
43 43 error.write(data)
44 44 error.flush()
45 45 elif ch == 'I':
46 46 writeblock(server, input.read(data))
47 47 elif ch == 'L':
48 48 writeblock(server, input.readline(data))
49 49 elif ch == 'r':
50 50 return struct.unpack('>i', data)[0]
51 51 else:
52 52 print "unexpected channel %c: %r" % (ch, data)
53 53 if ch.isupper():
54 54 return
55 55
56 56 def check(func, repopath=None):
57 57 print
58 58 print 'testing %s:' % func.__name__
59 59 print
60 60 sys.stdout.flush()
61 61 server = connect(repopath)
62 62 try:
63 63 return func(server)
64 64 finally:
65 65 server.stdin.close()
66 66 server.wait()
67 67
68 68 def unknowncommand(server):
69 69 server.stdin.write('unknowncommand\n')
70 70
71 71 def hellomessage(server):
72 72 ch, data = readchannel(server)
73 73 # escaping python tests output not supported
74 74 print '%c, %r' % (ch, re.sub('encoding: [a-zA-Z0-9-]+', 'encoding: ***', data))
75 75
76 76 # run an arbitrary command to make sure the next thing the server sends
77 77 # isn't part of the hello message
78 78 runcommand(server, ['id'])
79 79
80 80 def checkruncommand(server):
81 81 # hello block
82 82 readchannel(server)
83 83
84 84 # no args
85 85 runcommand(server, [])
86 86
87 87 # global options
88 88 runcommand(server, ['id', '--quiet'])
89 89
90 90 # make sure global options don't stick through requests
91 91 runcommand(server, ['id'])
92 92
93 93 # --config
94 94 runcommand(server, ['id', '--config', 'ui.quiet=True'])
95 95
96 96 # make sure --config doesn't stick
97 97 runcommand(server, ['id'])
98 98
99 99 def inputeof(server):
100 100 readchannel(server)
101 101 server.stdin.write('runcommand\n')
102 102 # close stdin while server is waiting for input
103 103 server.stdin.close()
104 104
105 105 # server exits with 1 if the pipe closed while reading the command
106 106 print 'server exit code =', server.wait()
107 107
108 108 def serverinput(server):
109 109 readchannel(server)
110 110
111 111 patch = """
112 112 # HG changeset patch
113 113 # User test
114 114 # Date 0 0
115 115 # Node ID c103a3dec114d882c98382d684d8af798d09d857
116 116 # Parent 0000000000000000000000000000000000000000
117 117 1
118 118
119 119 diff -r 000000000000 -r c103a3dec114 a
120 120 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
121 121 +++ b/a Thu Jan 01 00:00:00 1970 +0000
122 122 @@ -0,0 +1,1 @@
123 123 +1
124 124 """
125 125
126 126 runcommand(server, ['import', '-'], input=cStringIO.StringIO(patch))
127 127 runcommand(server, ['log'])
128 128
129 129 def cwd(server):
130 130 """ check that --cwd doesn't persist between requests """
131 131 readchannel(server)
132 132 os.mkdir('foo')
133 133 f = open('foo/bar', 'wb')
134 134 f.write('a')
135 135 f.close()
136 136 runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
137 137 runcommand(server, ['st', 'foo/bar'])
138 138 os.remove('foo/bar')
139 139
140 140 def localhgrc(server):
141 141 """ check that local configs for the cached repo aren't inherited when -R
142 142 is used """
143 143 readchannel(server)
144 144
145 145 # the cached repo local hgrc contains ui.foo=bar, so showconfig should show it
146 146 runcommand(server, ['showconfig'])
147 147
148 148 # but not for this repo
149 149 runcommand(server, ['init', 'foo'])
150 150 runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
151 151 shutil.rmtree('foo')
152 152
153 153 def hook(**args):
154 154 print 'hook talking'
155 155 print 'now try to read something: %r' % sys.stdin.read()
156 156
157 157 def hookoutput(server):
158 158 readchannel(server)
159 159 runcommand(server, ['--config',
160 160 'hooks.pre-identify=python:test-commandserver.hook', 'id'],
161 161 input=cStringIO.StringIO('some input'))
162 162
163 163 def outsidechanges(server):
164 164 readchannel(server)
165 165 f = open('a', 'ab')
166 166 f.write('a\n')
167 167 f.close()
168 168 runcommand(server, ['status'])
169 169 os.system('hg ci -Am2')
170 170 runcommand(server, ['tip'])
171 171 runcommand(server, ['status'])
172 172
173 173 def bookmarks(server):
174 174 readchannel(server)
175 175 runcommand(server, ['bookmarks'])
176 176
177 177 # changes .hg/bookmarks
178 178 os.system('hg bookmark -i bm1')
179 179 os.system('hg bookmark -i bm2')
180 180 runcommand(server, ['bookmarks'])
181 181
182 182 # changes .hg/bookmarks.current
183 183 os.system('hg upd bm1 -q')
184 184 runcommand(server, ['bookmarks'])
185 185
186 186 runcommand(server, ['bookmarks', 'bm3'])
187 187 f = open('a', 'ab')
188 188 f.write('a\n')
189 189 f.close()
190 190 runcommand(server, ['commit', '-Amm'])
191 191 runcommand(server, ['bookmarks'])
192 192
193 193 def tagscache(server):
194 194 readchannel(server)
195 195 runcommand(server, ['id', '-t', '-r', '0'])
196 196 os.system('hg tag -r 0 foo')
197 197 runcommand(server, ['id', '-t', '-r', '0'])
198 198
199 199 def setphase(server):
200 200 readchannel(server)
201 201 runcommand(server, ['phase', '-r', '.'])
202 202 os.system('hg phase -r . -p')
203 203 runcommand(server, ['phase', '-r', '.'])
204 204
205 205 def rollback(server):
206 206 readchannel(server)
207 207 runcommand(server, ['phase', '-r', '.', '-p'])
208 208 f = open('a', 'ab')
209 209 f.write('a\n')
210 210 f.close()
211 211 runcommand(server, ['commit', '-Am.'])
212 212 runcommand(server, ['rollback'])
213 213 runcommand(server, ['phase', '-r', '.'])
214 214
215 def branch(server):
216 readchannel(server)
217 runcommand(server, ['branch'])
218 os.system('hg branch foo')
219 runcommand(server, ['branch'])
220 os.system('hg branch default')
221
222 def hgignore(server):
223 readchannel(server)
224 f = open('.hgignore', 'ab')
225 f.write('')
226 f.close()
227 runcommand(server, ['commit', '-Am.'])
228 f = open('ignored-file', 'ab')
229 f.write('')
230 f.close()
231 f = open('.hgignore', 'ab')
232 f.write('ignored-file')
233 f.close()
234 runcommand(server, ['status', '-i', '-u'])
235
215 236 if __name__ == '__main__':
216 237 os.system('hg init')
217 238
218 239 check(hellomessage)
219 240 check(unknowncommand)
220 241 check(checkruncommand)
221 242 check(inputeof)
222 243 check(serverinput)
223 244 check(cwd)
224 245
225 246 hgrc = open('.hg/hgrc', 'a')
226 247 hgrc.write('[ui]\nfoo=bar\n')
227 248 hgrc.close()
228 249 check(localhgrc)
229 250 check(hookoutput)
230 251 check(outsidechanges)
231 252 check(bookmarks)
232 253 check(tagscache)
233 254 check(setphase)
234 255 check(rollback)
256 check(branch)
257 check(hgignore)
@@ -1,147 +1,165 b''
1 1
2 2 testing hellomessage:
3 3
4 4 o, 'capabilities: getencoding runcommand\nencoding: ***'
5 5 runcommand id
6 6 000000000000 tip
7 7
8 8 testing unknowncommand:
9 9
10 10 abort: unknown command unknowncommand
11 11
12 12 testing checkruncommand:
13 13
14 14 runcommand
15 15 Mercurial Distributed SCM
16 16
17 17 basic commands:
18 18
19 19 add add the specified files on the next commit
20 20 annotate show changeset information by line for each file
21 21 clone make a copy of an existing repository
22 22 commit commit the specified files or all outstanding changes
23 23 diff diff repository (or selected files)
24 24 export dump the header and diffs for one or more changesets
25 25 forget forget the specified files on the next commit
26 26 init create a new repository in the given directory
27 27 log show revision history of entire repository or files
28 28 merge merge working directory with another revision
29 29 phase set or show the current phase name
30 30 pull pull changes from the specified source
31 31 push push changes to the specified destination
32 32 remove remove the specified files on the next commit
33 33 serve start stand-alone webserver
34 34 status show changed files in the working directory
35 35 summary summarize working directory state
36 36 update update working directory (or switch revisions)
37 37
38 38 use "hg help" for the full list of commands or "hg -v" for details
39 39 runcommand id --quiet
40 40 000000000000
41 41 runcommand id
42 42 000000000000 tip
43 43 runcommand id --config ui.quiet=True
44 44 000000000000
45 45 runcommand id
46 46 000000000000 tip
47 47
48 48 testing inputeof:
49 49
50 50 server exit code = 1
51 51
52 52 testing serverinput:
53 53
54 54 runcommand import -
55 55 applying patch from stdin
56 56 runcommand log
57 57 changeset: 0:eff892de26ec
58 58 tag: tip
59 59 user: test
60 60 date: Thu Jan 01 00:00:00 1970 +0000
61 61 summary: 1
62 62
63 63
64 64 testing cwd:
65 65
66 66 runcommand --cwd foo st bar
67 67 ? bar
68 68 runcommand st foo/bar
69 69 ? foo/bar
70 70
71 71 testing localhgrc:
72 72
73 73 runcommand showconfig
74 74 bundle.mainreporoot=$TESTTMP
75 75 defaults.backout=-d "0 0"
76 76 defaults.commit=-d "0 0"
77 77 defaults.tag=-d "0 0"
78 78 ui.slash=True
79 79 ui.foo=bar
80 80 runcommand init foo
81 81 runcommand -R foo showconfig ui defaults
82 82 defaults.backout=-d "0 0"
83 83 defaults.commit=-d "0 0"
84 84 defaults.tag=-d "0 0"
85 85 ui.slash=True
86 86
87 87 testing hookoutput:
88 88
89 89 runcommand --config hooks.pre-identify=python:test-commandserver.hook id
90 90 hook talking
91 91 now try to read something: 'some input'
92 92 eff892de26ec tip
93 93
94 94 testing outsidechanges:
95 95
96 96 runcommand status
97 97 M a
98 98 runcommand tip
99 99 changeset: 1:d3a0a68be6de
100 100 tag: tip
101 101 user: test
102 102 date: Thu Jan 01 00:00:00 1970 +0000
103 103 summary: 2
104 104
105 105 runcommand status
106 106
107 107 testing bookmarks:
108 108
109 109 runcommand bookmarks
110 110 no bookmarks set
111 111 runcommand bookmarks
112 112 bm1 1:d3a0a68be6de
113 113 bm2 1:d3a0a68be6de
114 114 runcommand bookmarks
115 115 * bm1 1:d3a0a68be6de
116 116 bm2 1:d3a0a68be6de
117 117 runcommand bookmarks bm3
118 118 runcommand commit -Amm
119 119 runcommand bookmarks
120 120 bm1 1:d3a0a68be6de
121 121 bm2 1:d3a0a68be6de
122 122 * bm3 2:aef17e88f5f0
123 123
124 124 testing tagscache:
125 125
126 126 runcommand id -t -r 0
127 127
128 128 runcommand id -t -r 0
129 129 foo
130 130
131 131 testing setphase:
132 132
133 133 runcommand phase -r .
134 134 3: draft
135 135 runcommand phase -r .
136 136 3: public
137 137
138 138 testing rollback:
139 139
140 140 runcommand phase -r . -p
141 141 no phases changed
142 142 runcommand commit -Am.
143 143 runcommand rollback
144 144 repository tip rolled back to revision 3 (undo commit)
145 145 working directory now based on revision 3
146 146 runcommand phase -r .
147 147 3: public
148
149 testing branch:
150
151 runcommand branch
152 default
153 marked working directory as branch foo
154 (branches are permanent and global, did you want a bookmark?)
155 runcommand branch
156 foo
157 marked working directory as branch default
158 (branches are permanent and global, did you want a bookmark?)
159
160 testing hgignore:
161
162 runcommand commit -Am.
163 adding .hgignore
164 runcommand status -i -u
165 I ignored-file
General Comments 0
You need to be logged in to leave comments. Login now