##// END OF EJS Templates
strip: invalidate all caches after stripping (fixes issue1951)...
Benoit Boissinot -
r10547:bae9bb09 stable
parent child Browse files
Show More
@@ -0,0 +1,104 b''
1 #!/bin/sh
2
3 createrepo() {
4 rm -rf repo
5 hg init repo
6 cd repo
7
8 echo "a" > a
9 hg commit -d '0 0' -A -m 'A'
10
11 hg branch branch1
12 hg commit -d '1 0' -m 'Branch1'
13
14 echo "b" > b
15 hg commit -A -d '2 0' -m 'B'
16
17 hg up 0
18 hg branch branch2
19 hg commit -d '3 0' -m 'Branch2'
20
21 echo "c" > C
22 hg commit -A -d '4 0' -m 'C'
23
24 hg up 2
25 hg branch -f branch2
26 echo "d" > d
27 hg commit -A -d '5 0' -m 'D'
28
29 echo "e" > e
30 hg commit -A -d '6 0' -m 'E'
31
32 hg update default
33
34 hg branch branch3
35 hg commit -d '7 0' -m 'Branch3'
36
37 echo "f" > f
38 hg commit -A -d '8 0' -m 'F'
39 }
40
41 echo
42 createrepo > /dev/null 2>&1
43 hg --config extensions.hgext.graphlog= glog --template '{rev}:{node|short} {desc} branch: {branches}\n'
44
45 echo
46 echo '% Branches'
47 hg branches
48
49 echo
50 echo '% Heads'
51 hg heads --template '{rev}:{node|short} {desc} branch: {branches}\n'
52
53 echo
54 echo '% Rebase part of branch2 (5-6) onto branch3 (8)'
55 hg --config extensions.hgext.rebase= rebase --detach -s 5 -d 8 2>&1 | sed 's/\(saving bundle to \).*/\1/'
56
57 echo
58 echo '% Branches'
59 hg branches
60
61 echo
62 echo '% Heads'
63 hg heads --template '{rev}:{node|short} {desc} branch: {branches}\n'
64
65 echo
66 hg --config extensions.hgext.graphlog= glog --template '{rev}:{node|short} {desc} branch: {branches}\n'
67
68 echo
69 echo '% Rebase head of branch3 (8) onto branch2 (6)'
70 createrepo > /dev/null 2>&1
71 hg --config extensions.hgext.graphlog= glog --template '{rev}:{node|short} {desc} branch: {branches}\n'
72
73 hg --config extensions.hgext.rebase= rebase --detach -s 8 -d 6 2>&1 | sed 's/\(saving bundle to \).*/\1/'
74
75 echo
76 echo '% Branches'
77 hg branches
78
79 echo
80 echo '% Heads'
81 hg heads --template '{rev}:{node|short} {desc} branch: {branches}\n'
82
83 echo
84 hg --config extensions.hgext.graphlog= glog --template '{rev}:{node|short} {desc} branch: {branches}\n'
85 hg verify -q
86
87 echo
88 echo '% Rebase entire branch3 (7-8) onto branch2 (6)'
89 createrepo > /dev/null 2>&1
90 hg --config extensions.hgext.graphlog= glog --template '{rev}:{node|short} {desc} branch: {branches}\n'
91
92 hg --config extensions.hgext.rebase= rebase --detach -s 7 -d 6 2>&1 | sed 's/\(saving bundle to \).*/\1/'
93
94 echo
95 echo '% Branches'
96 hg branches
97
98 echo
99 echo '% Heads'
100 hg heads --template '{rev}:{node|short} {desc} branch: {branches}\n'
101
102 echo
103 hg --config extensions.hgext.graphlog= glog --template '{rev}:{node|short} {desc} branch: {branches}\n'
104 hg verify -q
@@ -0,0 +1,186 b''
1
2 @ 8:c11d5b3e9c00 F branch: branch3
3 |
4 o 7:33c9da881988 Branch3 branch: branch3
5 |
6 | o 6:0e4064ab11a3 E branch: branch2
7 | |
8 | o 5:5ac035cb5d8f D branch: branch2
9 | |
10 | | o 4:8e66061486ee C branch: branch2
11 | | |
12 +---o 3:99567862abbe Branch2 branch: branch2
13 | |
14 | o 2:65a26a4d12f6 B branch: branch1
15 | |
16 | o 1:0f3f3010ee16 Branch1 branch: branch1
17 |/
18 o 0:1994f17a630e A branch:
19
20
21 % Branches
22 branch3 8:c11d5b3e9c00
23 branch2 6:0e4064ab11a3
24 branch1 2:65a26a4d12f6 (inactive)
25 default 0:1994f17a630e (inactive)
26
27 % Heads
28 8:c11d5b3e9c00 F branch: branch3
29 6:0e4064ab11a3 E branch: branch2
30 4:8e66061486ee C branch: branch2
31 2:65a26a4d12f6 B branch: branch1
32 0:1994f17a630e A branch:
33
34 % Rebase part of branch2 (5-6) onto branch3 (8)
35 saving bundle to
36 adding branch
37 adding changesets
38 adding manifests
39 adding file changes
40 added 4 changesets with 3 changes to 3 files (+1 heads)
41 rebase completed
42
43 % Branches
44 branch3 8:c9bfa9beb84e
45 branch2 4:8e66061486ee
46 branch1 2:65a26a4d12f6
47 default 0:1994f17a630e (inactive)
48
49 % Heads
50 8:c9bfa9beb84e E branch: branch3
51 4:8e66061486ee C branch: branch2
52 2:65a26a4d12f6 B branch: branch1
53 0:1994f17a630e A branch:
54
55 @ 8:c9bfa9beb84e E branch: branch3
56 |
57 o 7:bf9037384081 D branch: branch3
58 |
59 o 6:c11d5b3e9c00 F branch: branch3
60 |
61 o 5:33c9da881988 Branch3 branch: branch3
62 |
63 | o 4:8e66061486ee C branch: branch2
64 | |
65 | o 3:99567862abbe Branch2 branch: branch2
66 |/
67 | o 2:65a26a4d12f6 B branch: branch1
68 | |
69 | o 1:0f3f3010ee16 Branch1 branch: branch1
70 |/
71 o 0:1994f17a630e A branch:
72
73
74 % Rebase head of branch3 (8) onto branch2 (6)
75 @ 8:c11d5b3e9c00 F branch: branch3
76 |
77 o 7:33c9da881988 Branch3 branch: branch3
78 |
79 | o 6:0e4064ab11a3 E branch: branch2
80 | |
81 | o 5:5ac035cb5d8f D branch: branch2
82 | |
83 | | o 4:8e66061486ee C branch: branch2
84 | | |
85 +---o 3:99567862abbe Branch2 branch: branch2
86 | |
87 | o 2:65a26a4d12f6 B branch: branch1
88 | |
89 | o 1:0f3f3010ee16 Branch1 branch: branch1
90 |/
91 o 0:1994f17a630e A branch:
92
93 saving bundle to
94 adding branch
95 adding changesets
96 adding manifests
97 adding file changes
98 added 1 changesets with 1 changes to 1 files
99 rebase completed
100
101 % Branches
102 branch2 8:b44d3024f247
103 branch3 7:33c9da881988
104 branch1 2:65a26a4d12f6 (inactive)
105 default 0:1994f17a630e (inactive)
106
107 % Heads
108 8:b44d3024f247 F branch: branch2
109 7:33c9da881988 Branch3 branch: branch3
110 4:8e66061486ee C branch: branch2
111 2:65a26a4d12f6 B branch: branch1
112 0:1994f17a630e A branch:
113
114 @ 8:b44d3024f247 F branch: branch2
115 |
116 | o 7:33c9da881988 Branch3 branch: branch3
117 | |
118 o | 6:0e4064ab11a3 E branch: branch2
119 | |
120 o | 5:5ac035cb5d8f D branch: branch2
121 | |
122 | | o 4:8e66061486ee C branch: branch2
123 | | |
124 | | o 3:99567862abbe Branch2 branch: branch2
125 | |/
126 o | 2:65a26a4d12f6 B branch: branch1
127 | |
128 o | 1:0f3f3010ee16 Branch1 branch: branch1
129 |/
130 o 0:1994f17a630e A branch:
131
132
133 % Rebase entire branch3 (7-8) onto branch2 (6)
134 @ 8:c11d5b3e9c00 F branch: branch3
135 |
136 o 7:33c9da881988 Branch3 branch: branch3
137 |
138 | o 6:0e4064ab11a3 E branch: branch2
139 | |
140 | o 5:5ac035cb5d8f D branch: branch2
141 | |
142 | | o 4:8e66061486ee C branch: branch2
143 | | |
144 +---o 3:99567862abbe Branch2 branch: branch2
145 | |
146 | o 2:65a26a4d12f6 B branch: branch1
147 | |
148 | o 1:0f3f3010ee16 Branch1 branch: branch1
149 |/
150 o 0:1994f17a630e A branch:
151
152 saving bundle to
153 adding branch
154 adding changesets
155 adding manifests
156 adding file changes
157 added 1 changesets with 1 changes to 1 files
158 rebase completed
159
160 % Branches
161 branch2 7:b44d3024f247
162 branch1 2:65a26a4d12f6 (inactive)
163 default 0:1994f17a630e (inactive)
164
165 % Heads
166 7:b44d3024f247 F branch: branch2
167 4:8e66061486ee C branch: branch2
168 2:65a26a4d12f6 B branch: branch1
169 0:1994f17a630e A branch:
170
171 @ 7:b44d3024f247 F branch: branch2
172 |
173 o 6:0e4064ab11a3 E branch: branch2
174 |
175 o 5:5ac035cb5d8f D branch: branch2
176 |
177 | o 4:8e66061486ee C branch: branch2
178 | |
179 | o 3:99567862abbe Branch2 branch: branch2
180 | |
181 o | 2:65a26a4d12f6 B branch: branch1
182 | |
183 o | 1:0f3f3010ee16 Branch1 branch: branch1
184 |/
185 o 0:1994f17a630e A branch:
186
@@ -1,2220 +1,2223 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92 self.sopener.options = {}
93
93
94 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
97 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
98 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
99 self._tags = None
99 self._tags = None
100 self._tagtypes = None
100 self._tagtypes = None
101
101
102 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.options['defversion'] = c.version
116 self.sopener.options['defversion'] = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __contains__(self, changeid):
132 def __contains__(self, changeid):
133 try:
133 try:
134 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
135 except error.RepoLookupError:
135 except error.RepoLookupError:
136 return False
136 return False
137
137
138 def __nonzero__(self):
138 def __nonzero__(self):
139 return True
139 return True
140
140
141 def __len__(self):
141 def __len__(self):
142 return len(self.changelog)
142 return len(self.changelog)
143
143
144 def __iter__(self):
144 def __iter__(self):
145 for i in xrange(len(self)):
145 for i in xrange(len(self)):
146 yield i
146 yield i
147
147
148 def url(self):
148 def url(self):
149 return 'file:' + self.root
149 return 'file:' + self.root
150
150
151 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
152 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
153
153
154 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
155
155
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
157 if isinstance(names, str):
157 if isinstance(names, str):
158 allchars = names
158 allchars = names
159 names = (names,)
159 names = (names,)
160 else:
160 else:
161 allchars = ''.join(names)
161 allchars = ''.join(names)
162 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
163 if c in allchars:
163 if c in allchars:
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165
165
166 for name in names:
166 for name in names:
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 local=local)
168 local=local)
169
169
170 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
171 fp.seek(0, 2)
171 fp.seek(0, 2)
172 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
173 fp.write('\n')
173 fp.write('\n')
174 for name in names:
174 for name in names:
175 m = munge and munge(name) or name
175 m = munge and munge(name) or name
176 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
177 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
180 fp.close()
180 fp.close()
181
181
182 prevtags = ''
182 prevtags = ''
183 if local:
183 if local:
184 try:
184 try:
185 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
186 except IOError:
186 except IOError:
187 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # local tags are stored in the current charset
191 # local tags are stored in the current charset
192 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195 return
195 return
196
196
197 try:
197 try:
198 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
199 except IOError:
199 except IOError:
200 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
201 else:
201 else:
202 prevtags = fp.read()
202 prevtags = fp.read()
203
203
204 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
205 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
206
206
207 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 m = match_.exact(self.root, '', ['.hgtags'])
210 m = match_.exact(self.root, '', ['.hgtags'])
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212
212
213 for name in names:
213 for name in names:
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 return tagnode
216 return tagnode
217
217
218 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
219 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
220
220
221 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
222 string.
222 string.
223
223
224 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
225 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
226 changeset is committed with the change.
226 changeset is committed with the change.
227
227
228 keyword arguments:
228 keyword arguments:
229
229
230 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
231 (default False)
231 (default False)
232
232
233 message: commit message to use if committing
233 message: commit message to use if committing
234
234
235 user: name of user to use if committing
235 user: name of user to use if committing
236
236
237 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
238
238
239 for x in self.status()[:5]:
239 for x in self.status()[:5]:
240 if '.hgtags' in x:
240 if '.hgtags' in x:
241 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
242 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
243
243
244 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
245 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
246
246
247 def tags(self):
247 def tags(self):
248 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
249 if self._tags is None:
249 if self._tags is None:
250 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
251
251
252 return self._tags
252 return self._tags
253
253
254 def _findtags(self):
254 def _findtags(self):
255 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
258 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
259 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
260 duration of the localrepo object.'''
260 duration of the localrepo object.'''
261
261
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
266 # quo fine?
266 # quo fine?
267
267
268 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
269 tagtypes = {}
269 tagtypes = {}
270
270
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273
273
274 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
275 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
276 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
277 # local encoding.
277 # local encoding.
278 tags = {}
278 tags = {}
279 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
280 if node != nullid:
280 if node != nullid:
281 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
282 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
283 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
284 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
285 return (tags, tagtypes)
285 return (tags, tagtypes)
286
286
287 def tagtype(self, tagname):
287 def tagtype(self, tagname):
288 '''
288 '''
289 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
290
290
291 'local' : a local tag
291 'local' : a local tag
292 'global' : a global tag
292 'global' : a global tag
293 None : tag does not exist
293 None : tag does not exist
294 '''
294 '''
295
295
296 self.tags()
296 self.tags()
297
297
298 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
299
299
300 def tagslist(self):
300 def tagslist(self):
301 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
302 l = []
302 l = []
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 try:
304 try:
305 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
306 except:
306 except:
307 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
308 l.append((r, t, n))
308 l.append((r, t, n))
309 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
310
310
311 def nodetags(self, node):
311 def nodetags(self, node):
312 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
313 if not self.nodetagscache:
313 if not self.nodetagscache:
314 self.nodetagscache = {}
314 self.nodetagscache = {}
315 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
316 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
318
318
319 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
320 # TODO: rename this function?
320 # TODO: rename this function?
321 tiprev = len(self) - 1
321 tiprev = len(self) - 1
322 if lrev != tiprev:
322 if lrev != tiprev:
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325
325
326 return partial
326 return partial
327
327
328 def branchmap(self):
328 def branchmap(self):
329 '''returns a dictionary {branch: [branchheads]}'''
329 '''returns a dictionary {branch: [branchheads]}'''
330 tip = self.changelog.tip()
330 tip = self.changelog.tip()
331 if self._branchcache is not None and self._branchcachetip == tip:
331 if self._branchcache is not None and self._branchcachetip == tip:
332 return self._branchcache
332 return self._branchcache
333
333
334 oldtip = self._branchcachetip
334 oldtip = self._branchcachetip
335 self._branchcachetip = tip
335 self._branchcachetip = tip
336 if oldtip is None or oldtip not in self.changelog.nodemap:
336 if oldtip is None or oldtip not in self.changelog.nodemap:
337 partial, last, lrev = self._readbranchcache()
337 partial, last, lrev = self._readbranchcache()
338 else:
338 else:
339 lrev = self.changelog.rev(oldtip)
339 lrev = self.changelog.rev(oldtip)
340 partial = self._branchcache
340 partial = self._branchcache
341
341
342 self._branchtags(partial, lrev)
342 self._branchtags(partial, lrev)
343 # this private cache holds all heads (not just tips)
343 # this private cache holds all heads (not just tips)
344 self._branchcache = partial
344 self._branchcache = partial
345
345
346 return self._branchcache
346 return self._branchcache
347
347
348 def branchtags(self):
348 def branchtags(self):
349 '''return a dict where branch names map to the tipmost head of
349 '''return a dict where branch names map to the tipmost head of
350 the branch, open heads come before closed'''
350 the branch, open heads come before closed'''
351 bt = {}
351 bt = {}
352 for bn, heads in self.branchmap().iteritems():
352 for bn, heads in self.branchmap().iteritems():
353 tip = heads[-1]
353 tip = heads[-1]
354 for h in reversed(heads):
354 for h in reversed(heads):
355 if 'close' not in self.changelog.read(h)[5]:
355 if 'close' not in self.changelog.read(h)[5]:
356 tip = h
356 tip = h
357 break
357 break
358 bt[bn] = tip
358 bt[bn] = tip
359 return bt
359 return bt
360
360
361
361
362 def _readbranchcache(self):
362 def _readbranchcache(self):
363 partial = {}
363 partial = {}
364 try:
364 try:
365 f = self.opener("branchheads.cache")
365 f = self.opener("branchheads.cache")
366 lines = f.read().split('\n')
366 lines = f.read().split('\n')
367 f.close()
367 f.close()
368 except (IOError, OSError):
368 except (IOError, OSError):
369 return {}, nullid, nullrev
369 return {}, nullid, nullrev
370
370
371 try:
371 try:
372 last, lrev = lines.pop(0).split(" ", 1)
372 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = bin(last), int(lrev)
373 last, lrev = bin(last), int(lrev)
374 if lrev >= len(self) or self[lrev].node() != last:
374 if lrev >= len(self) or self[lrev].node() != last:
375 # invalidate the cache
375 # invalidate the cache
376 raise ValueError('invalidating branch cache (tip differs)')
376 raise ValueError('invalidating branch cache (tip differs)')
377 for l in lines:
377 for l in lines:
378 if not l:
378 if not l:
379 continue
379 continue
380 node, label = l.split(" ", 1)
380 node, label = l.split(" ", 1)
381 partial.setdefault(label.strip(), []).append(bin(node))
381 partial.setdefault(label.strip(), []).append(bin(node))
382 except KeyboardInterrupt:
382 except KeyboardInterrupt:
383 raise
383 raise
384 except Exception, inst:
384 except Exception, inst:
385 if self.ui.debugflag:
385 if self.ui.debugflag:
386 self.ui.warn(str(inst), '\n')
386 self.ui.warn(str(inst), '\n')
387 partial, last, lrev = {}, nullid, nullrev
387 partial, last, lrev = {}, nullid, nullrev
388 return partial, last, lrev
388 return partial, last, lrev
389
389
390 def _writebranchcache(self, branches, tip, tiprev):
390 def _writebranchcache(self, branches, tip, tiprev):
391 try:
391 try:
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f.write("%s %s\n" % (hex(tip), tiprev))
393 f.write("%s %s\n" % (hex(tip), tiprev))
394 for label, nodes in branches.iteritems():
394 for label, nodes in branches.iteritems():
395 for node in nodes:
395 for node in nodes:
396 f.write("%s %s\n" % (hex(node), label))
396 f.write("%s %s\n" % (hex(node), label))
397 f.rename()
397 f.rename()
398 except (IOError, OSError):
398 except (IOError, OSError):
399 pass
399 pass
400
400
401 def _updatebranchcache(self, partial, start, end):
401 def _updatebranchcache(self, partial, start, end):
402 # collect new branch entries
402 # collect new branch entries
403 newbranches = {}
403 newbranches = {}
404 for r in xrange(start, end):
404 for r in xrange(start, end):
405 c = self[r]
405 c = self[r]
406 newbranches.setdefault(c.branch(), []).append(c.node())
406 newbranches.setdefault(c.branch(), []).append(c.node())
407 # if older branchheads are reachable from new ones, they aren't
407 # if older branchheads are reachable from new ones, they aren't
408 # really branchheads. Note checking parents is insufficient:
408 # really branchheads. Note checking parents is insufficient:
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 for branch, newnodes in newbranches.iteritems():
410 for branch, newnodes in newbranches.iteritems():
411 bheads = partial.setdefault(branch, [])
411 bheads = partial.setdefault(branch, [])
412 bheads.extend(newnodes)
412 bheads.extend(newnodes)
413 if len(bheads) < 2:
413 if len(bheads) < 2:
414 continue
414 continue
415 newbheads = []
415 newbheads = []
416 # starting from tip means fewer passes over reachable
416 # starting from tip means fewer passes over reachable
417 while newnodes:
417 while newnodes:
418 latest = newnodes.pop()
418 latest = newnodes.pop()
419 if latest not in bheads:
419 if latest not in bheads:
420 continue
420 continue
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 reachable = self.changelog.reachable(latest, minbhrev)
422 reachable = self.changelog.reachable(latest, minbhrev)
423 bheads = [b for b in bheads if b not in reachable]
423 bheads = [b for b in bheads if b not in reachable]
424 newbheads.insert(0, latest)
424 newbheads.insert(0, latest)
425 bheads.extend(newbheads)
425 bheads.extend(newbheads)
426 partial[branch] = bheads
426 partial[branch] = bheads
427
427
428 def lookup(self, key):
428 def lookup(self, key):
429 if isinstance(key, int):
429 if isinstance(key, int):
430 return self.changelog.node(key)
430 return self.changelog.node(key)
431 elif key == '.':
431 elif key == '.':
432 return self.dirstate.parents()[0]
432 return self.dirstate.parents()[0]
433 elif key == 'null':
433 elif key == 'null':
434 return nullid
434 return nullid
435 elif key == 'tip':
435 elif key == 'tip':
436 return self.changelog.tip()
436 return self.changelog.tip()
437 n = self.changelog._match(key)
437 n = self.changelog._match(key)
438 if n:
438 if n:
439 return n
439 return n
440 if key in self.tags():
440 if key in self.tags():
441 return self.tags()[key]
441 return self.tags()[key]
442 if key in self.branchtags():
442 if key in self.branchtags():
443 return self.branchtags()[key]
443 return self.branchtags()[key]
444 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
445 if n:
445 if n:
446 return n
446 return n
447
447
448 # can't find key, check if it might have come from damaged dirstate
448 # can't find key, check if it might have come from damaged dirstate
449 if key in self.dirstate.parents():
449 if key in self.dirstate.parents():
450 raise error.Abort(_("working directory has unknown parent '%s'!")
450 raise error.Abort(_("working directory has unknown parent '%s'!")
451 % short(key))
451 % short(key))
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 if cmd == '!':
504 if cmd == '!':
505 continue
505 continue
506 mf = match_.match(self.root, '', [pat])
506 mf = match_.match(self.root, '', [pat])
507 fn = None
507 fn = None
508 params = cmd
508 params = cmd
509 for name, filterfn in self._datafilters.iteritems():
509 for name, filterfn in self._datafilters.iteritems():
510 if cmd.startswith(name):
510 if cmd.startswith(name):
511 fn = filterfn
511 fn = filterfn
512 params = cmd[len(name):].lstrip()
512 params = cmd[len(name):].lstrip()
513 break
513 break
514 if not fn:
514 if not fn:
515 fn = lambda s, c, **kwargs: util.filter(s, c)
515 fn = lambda s, c, **kwargs: util.filter(s, c)
516 # Wrap old filters not supporting keyword arguments
516 # Wrap old filters not supporting keyword arguments
517 if not inspect.getargspec(fn)[2]:
517 if not inspect.getargspec(fn)[2]:
518 oldfn = fn
518 oldfn = fn
519 fn = lambda s, c, **kwargs: oldfn(s, c)
519 fn = lambda s, c, **kwargs: oldfn(s, c)
520 l.append((mf, fn, params))
520 l.append((mf, fn, params))
521 self.filterpats[filter] = l
521 self.filterpats[filter] = l
522
522
523 for mf, fn, cmd in self.filterpats[filter]:
523 for mf, fn, cmd in self.filterpats[filter]:
524 if mf(filename):
524 if mf(filename):
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 break
527 break
528
528
529 return data
529 return data
530
530
531 def adddatafilter(self, name, filter):
531 def adddatafilter(self, name, filter):
532 self._datafilters[name] = filter
532 self._datafilters[name] = filter
533
533
534 def wread(self, filename):
534 def wread(self, filename):
535 if self._link(filename):
535 if self._link(filename):
536 data = os.readlink(self.wjoin(filename))
536 data = os.readlink(self.wjoin(filename))
537 else:
537 else:
538 data = self.wopener(filename, 'r').read()
538 data = self.wopener(filename, 'r').read()
539 return self._filter("encode", filename, data)
539 return self._filter("encode", filename, data)
540
540
541 def wwrite(self, filename, data, flags):
541 def wwrite(self, filename, data, flags):
542 data = self._filter("decode", filename, data)
542 data = self._filter("decode", filename, data)
543 try:
543 try:
544 os.unlink(self.wjoin(filename))
544 os.unlink(self.wjoin(filename))
545 except OSError:
545 except OSError:
546 pass
546 pass
547 if 'l' in flags:
547 if 'l' in flags:
548 self.wopener.symlink(data, filename)
548 self.wopener.symlink(data, filename)
549 else:
549 else:
550 self.wopener(filename, 'w').write(data)
550 self.wopener(filename, 'w').write(data)
551 if 'x' in flags:
551 if 'x' in flags:
552 util.set_flags(self.wjoin(filename), False, True)
552 util.set_flags(self.wjoin(filename), False, True)
553
553
554 def wwritedata(self, filename, data):
554 def wwritedata(self, filename, data):
555 return self._filter("decode", filename, data)
555 return self._filter("decode", filename, data)
556
556
557 def transaction(self):
557 def transaction(self):
558 tr = self._transref and self._transref() or None
558 tr = self._transref and self._transref() or None
559 if tr and tr.running():
559 if tr and tr.running():
560 return tr.nest()
560 return tr.nest()
561
561
562 # abort here if the journal already exists
562 # abort here if the journal already exists
563 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
564 raise error.RepoError(
564 raise error.RepoError(
565 _("abandoned transaction found - run hg recover"))
565 _("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 lock = self.lock()
586 lock = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"),
590 transaction.rollback(self.sopener, self.sjoin("journal"),
591 self.ui.warn)
591 self.ui.warn)
592 self.invalidate()
592 self.invalidate()
593 return True
593 return True
594 else:
594 else:
595 self.ui.warn(_("no interrupted transaction available\n"))
595 self.ui.warn(_("no interrupted transaction available\n"))
596 return False
596 return False
597 finally:
597 finally:
598 lock.release()
598 lock.release()
599
599
600 def rollback(self):
600 def rollback(self):
601 wlock = lock = None
601 wlock = lock = None
602 try:
602 try:
603 wlock = self.wlock()
603 wlock = self.wlock()
604 lock = self.lock()
604 lock = self.lock()
605 if os.path.exists(self.sjoin("undo")):
605 if os.path.exists(self.sjoin("undo")):
606 self.ui.status(_("rolling back last transaction\n"))
606 self.ui.status(_("rolling back last transaction\n"))
607 transaction.rollback(self.sopener, self.sjoin("undo"),
607 transaction.rollback(self.sopener, self.sjoin("undo"),
608 self.ui.warn)
608 self.ui.warn)
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 try:
610 try:
611 branch = self.opener("undo.branch").read()
611 branch = self.opener("undo.branch").read()
612 self.dirstate.setbranch(branch)
612 self.dirstate.setbranch(branch)
613 except IOError:
613 except IOError:
614 self.ui.warn(_("Named branch could not be reset, "
614 self.ui.warn(_("Named branch could not be reset, "
615 "current branch still is: %s\n")
615 "current branch still is: %s\n")
616 % encoding.tolocal(self.dirstate.branch()))
616 % encoding.tolocal(self.dirstate.branch()))
617 self.invalidate()
617 self.invalidate()
618 self.dirstate.invalidate()
618 self.dirstate.invalidate()
619 self.destroyed()
619 self.destroyed()
620 else:
620 else:
621 self.ui.warn(_("no rollback information available\n"))
621 self.ui.warn(_("no rollback information available\n"))
622 finally:
622 finally:
623 release(lock, wlock)
623 release(lock, wlock)
624
624
625 def invalidate(self):
625 def invalidatecaches(self):
626 for a in "changelog manifest".split():
627 if a in self.__dict__:
628 delattr(self, a)
629 self._tags = None
626 self._tags = None
630 self._tagtypes = None
627 self._tagtypes = None
631 self.nodetagscache = None
628 self.nodetagscache = None
632 self._branchcache = None # in UTF-8
629 self._branchcache = None # in UTF-8
633 self._branchcachetip = None
630 self._branchcachetip = None
634
631
632 def invalidate(self):
633 for a in "changelog manifest".split():
634 if a in self.__dict__:
635 delattr(self, a)
636 self.invalidatecaches()
637
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 try:
639 try:
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 except error.LockHeld, inst:
641 except error.LockHeld, inst:
639 if not wait:
642 if not wait:
640 raise
643 raise
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 (desc, inst.locker))
645 (desc, inst.locker))
643 # default to 600 seconds timeout
646 # default to 600 seconds timeout
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 releasefn, desc=desc)
648 releasefn, desc=desc)
646 if acquirefn:
649 if acquirefn:
647 acquirefn()
650 acquirefn()
648 return l
651 return l
649
652
650 def lock(self, wait=True):
653 def lock(self, wait=True):
651 '''Lock the repository store (.hg/store) and return a weak reference
654 '''Lock the repository store (.hg/store) and return a weak reference
652 to the lock. Use this before modifying the store (e.g. committing or
655 to the lock. Use this before modifying the store (e.g. committing or
653 stripping). If you are opening a transaction, get a lock as well.)'''
656 stripping). If you are opening a transaction, get a lock as well.)'''
654 l = self._lockref and self._lockref()
657 l = self._lockref and self._lockref()
655 if l is not None and l.held:
658 if l is not None and l.held:
656 l.lock()
659 l.lock()
657 return l
660 return l
658
661
659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
660 _('repository %s') % self.origroot)
663 _('repository %s') % self.origroot)
661 self._lockref = weakref.ref(l)
664 self._lockref = weakref.ref(l)
662 return l
665 return l
663
666
664 def wlock(self, wait=True):
667 def wlock(self, wait=True):
665 '''Lock the non-store parts of the repository (everything under
668 '''Lock the non-store parts of the repository (everything under
666 .hg except .hg/store) and return a weak reference to the lock.
669 .hg except .hg/store) and return a weak reference to the lock.
667 Use this before modifying files in .hg.'''
670 Use this before modifying files in .hg.'''
668 l = self._wlockref and self._wlockref()
671 l = self._wlockref and self._wlockref()
669 if l is not None and l.held:
672 if l is not None and l.held:
670 l.lock()
673 l.lock()
671 return l
674 return l
672
675
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 self.dirstate.invalidate, _('working directory of %s') %
677 self.dirstate.invalidate, _('working directory of %s') %
675 self.origroot)
678 self.origroot)
676 self._wlockref = weakref.ref(l)
679 self._wlockref = weakref.ref(l)
677 return l
680 return l
678
681
679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
680 """
683 """
681 commit an individual file as part of a larger transaction
684 commit an individual file as part of a larger transaction
682 """
685 """
683
686
684 fname = fctx.path()
687 fname = fctx.path()
685 text = fctx.data()
688 text = fctx.data()
686 flog = self.file(fname)
689 flog = self.file(fname)
687 fparent1 = manifest1.get(fname, nullid)
690 fparent1 = manifest1.get(fname, nullid)
688 fparent2 = fparent2o = manifest2.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
689
692
690 meta = {}
693 meta = {}
691 copy = fctx.renamed()
694 copy = fctx.renamed()
692 if copy and copy[0] != fname:
695 if copy and copy[0] != fname:
693 # Mark the new revision of this file as a copy of another
696 # Mark the new revision of this file as a copy of another
694 # file. This copy data will effectively act as a parent
697 # file. This copy data will effectively act as a parent
695 # of this new revision. If this is a merge, the first
698 # of this new revision. If this is a merge, the first
696 # parent will be the nullid (meaning "look up the copy data")
699 # parent will be the nullid (meaning "look up the copy data")
697 # and the second one will be the other parent. For example:
700 # and the second one will be the other parent. For example:
698 #
701 #
699 # 0 --- 1 --- 3 rev1 changes file foo
702 # 0 --- 1 --- 3 rev1 changes file foo
700 # \ / rev2 renames foo to bar and changes it
703 # \ / rev2 renames foo to bar and changes it
701 # \- 2 -/ rev3 should have bar with all changes and
704 # \- 2 -/ rev3 should have bar with all changes and
702 # should record that bar descends from
705 # should record that bar descends from
703 # bar in rev2 and foo in rev1
706 # bar in rev2 and foo in rev1
704 #
707 #
705 # this allows this merge to succeed:
708 # this allows this merge to succeed:
706 #
709 #
707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
708 # \ / merging rev3 and rev4 should use bar@rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
709 # \- 2 --- 4 as the merge base
712 # \- 2 --- 4 as the merge base
710 #
713 #
711
714
712 cfname = copy[0]
715 cfname = copy[0]
713 crev = manifest1.get(cfname)
716 crev = manifest1.get(cfname)
714 newfparent = fparent2
717 newfparent = fparent2
715
718
716 if manifest2: # branch merge
719 if manifest2: # branch merge
717 if fparent2 == nullid or crev is None: # copied on remote side
720 if fparent2 == nullid or crev is None: # copied on remote side
718 if cfname in manifest2:
721 if cfname in manifest2:
719 crev = manifest2[cfname]
722 crev = manifest2[cfname]
720 newfparent = fparent1
723 newfparent = fparent1
721
724
722 # find source in nearest ancestor if we've lost track
725 # find source in nearest ancestor if we've lost track
723 if not crev:
726 if not crev:
724 self.ui.debug(" %s: searching for copy revision for %s\n" %
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
725 (fname, cfname))
728 (fname, cfname))
726 for ancestor in self['.'].ancestors():
729 for ancestor in self['.'].ancestors():
727 if cfname in ancestor:
730 if cfname in ancestor:
728 crev = ancestor[cfname].filenode()
731 crev = ancestor[cfname].filenode()
729 break
732 break
730
733
731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
732 meta["copy"] = cfname
735 meta["copy"] = cfname
733 meta["copyrev"] = hex(crev)
736 meta["copyrev"] = hex(crev)
734 fparent1, fparent2 = nullid, newfparent
737 fparent1, fparent2 = nullid, newfparent
735 elif fparent2 != nullid:
738 elif fparent2 != nullid:
736 # is one parent an ancestor of the other?
739 # is one parent an ancestor of the other?
737 fparentancestor = flog.ancestor(fparent1, fparent2)
740 fparentancestor = flog.ancestor(fparent1, fparent2)
738 if fparentancestor == fparent1:
741 if fparentancestor == fparent1:
739 fparent1, fparent2 = fparent2, nullid
742 fparent1, fparent2 = fparent2, nullid
740 elif fparentancestor == fparent2:
743 elif fparentancestor == fparent2:
741 fparent2 = nullid
744 fparent2 = nullid
742
745
743 # is the file changed?
746 # is the file changed?
744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
745 changelist.append(fname)
748 changelist.append(fname)
746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
747
750
748 # are just the flags changed during merge?
751 # are just the flags changed during merge?
749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
750 changelist.append(fname)
753 changelist.append(fname)
751
754
752 return fparent1
755 return fparent1
753
756
754 def commit(self, text="", user=None, date=None, match=None, force=False,
757 def commit(self, text="", user=None, date=None, match=None, force=False,
755 editor=False, extra={}):
758 editor=False, extra={}):
756 """Add a new revision to current repository.
759 """Add a new revision to current repository.
757
760
758 Revision information is gathered from the working directory,
761 Revision information is gathered from the working directory,
759 match can be used to filter the committed files. If editor is
762 match can be used to filter the committed files. If editor is
760 supplied, it is called to get a commit message.
763 supplied, it is called to get a commit message.
761 """
764 """
762
765
763 def fail(f, msg):
766 def fail(f, msg):
764 raise util.Abort('%s: %s' % (f, msg))
767 raise util.Abort('%s: %s' % (f, msg))
765
768
766 if not match:
769 if not match:
767 match = match_.always(self.root, '')
770 match = match_.always(self.root, '')
768
771
769 if not force:
772 if not force:
770 vdirs = []
773 vdirs = []
771 match.dir = vdirs.append
774 match.dir = vdirs.append
772 match.bad = fail
775 match.bad = fail
773
776
774 wlock = self.wlock()
777 wlock = self.wlock()
775 try:
778 try:
776 p1, p2 = self.dirstate.parents()
779 p1, p2 = self.dirstate.parents()
777 wctx = self[None]
780 wctx = self[None]
778
781
779 if (not force and p2 != nullid and match and
782 if (not force and p2 != nullid and match and
780 (match.files() or match.anypats())):
783 (match.files() or match.anypats())):
781 raise util.Abort(_('cannot partially commit a merge '
784 raise util.Abort(_('cannot partially commit a merge '
782 '(do not specify files or patterns)'))
785 '(do not specify files or patterns)'))
783
786
784 changes = self.status(match=match, clean=force)
787 changes = self.status(match=match, clean=force)
785 if force:
788 if force:
786 changes[0].extend(changes[6]) # mq may commit unchanged files
789 changes[0].extend(changes[6]) # mq may commit unchanged files
787
790
788 # check subrepos
791 # check subrepos
789 subs = []
792 subs = []
790 removedsubs = set()
793 removedsubs = set()
791 for p in wctx.parents():
794 for p in wctx.parents():
792 removedsubs.update(s for s in p.substate if match(s))
795 removedsubs.update(s for s in p.substate if match(s))
793 for s in wctx.substate:
796 for s in wctx.substate:
794 removedsubs.discard(s)
797 removedsubs.discard(s)
795 if match(s) and wctx.sub(s).dirty():
798 if match(s) and wctx.sub(s).dirty():
796 subs.append(s)
799 subs.append(s)
797 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
800 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
798 changes[0].insert(0, '.hgsubstate')
801 changes[0].insert(0, '.hgsubstate')
799
802
800 # make sure all explicit patterns are matched
803 # make sure all explicit patterns are matched
801 if not force and match.files():
804 if not force and match.files():
802 matched = set(changes[0] + changes[1] + changes[2])
805 matched = set(changes[0] + changes[1] + changes[2])
803
806
804 for f in match.files():
807 for f in match.files():
805 if f == '.' or f in matched or f in wctx.substate:
808 if f == '.' or f in matched or f in wctx.substate:
806 continue
809 continue
807 if f in changes[3]: # missing
810 if f in changes[3]: # missing
808 fail(f, _('file not found!'))
811 fail(f, _('file not found!'))
809 if f in vdirs: # visited directory
812 if f in vdirs: # visited directory
810 d = f + '/'
813 d = f + '/'
811 for mf in matched:
814 for mf in matched:
812 if mf.startswith(d):
815 if mf.startswith(d):
813 break
816 break
814 else:
817 else:
815 fail(f, _("no match under directory!"))
818 fail(f, _("no match under directory!"))
816 elif f not in self.dirstate:
819 elif f not in self.dirstate:
817 fail(f, _("file not tracked!"))
820 fail(f, _("file not tracked!"))
818
821
819 if (not force and not extra.get("close") and p2 == nullid
822 if (not force and not extra.get("close") and p2 == nullid
820 and not (changes[0] or changes[1] or changes[2])
823 and not (changes[0] or changes[1] or changes[2])
821 and self[None].branch() == self['.'].branch()):
824 and self[None].branch() == self['.'].branch()):
822 return None
825 return None
823
826
824 ms = merge_.mergestate(self)
827 ms = merge_.mergestate(self)
825 for f in changes[0]:
828 for f in changes[0]:
826 if f in ms and ms[f] == 'u':
829 if f in ms and ms[f] == 'u':
827 raise util.Abort(_("unresolved merge conflicts "
830 raise util.Abort(_("unresolved merge conflicts "
828 "(see hg resolve)"))
831 "(see hg resolve)"))
829
832
830 cctx = context.workingctx(self, (p1, p2), text, user, date,
833 cctx = context.workingctx(self, (p1, p2), text, user, date,
831 extra, changes)
834 extra, changes)
832 if editor:
835 if editor:
833 cctx._text = editor(self, cctx, subs)
836 cctx._text = editor(self, cctx, subs)
834 edited = (text != cctx._text)
837 edited = (text != cctx._text)
835
838
836 # commit subs
839 # commit subs
837 if subs or removedsubs:
840 if subs or removedsubs:
838 state = wctx.substate.copy()
841 state = wctx.substate.copy()
839 for s in subs:
842 for s in subs:
840 self.ui.status(_('committing subrepository %s\n') % s)
843 self.ui.status(_('committing subrepository %s\n') % s)
841 sr = wctx.sub(s).commit(cctx._text, user, date)
844 sr = wctx.sub(s).commit(cctx._text, user, date)
842 state[s] = (state[s][0], sr)
845 state[s] = (state[s][0], sr)
843 subrepo.writestate(self, state)
846 subrepo.writestate(self, state)
844
847
845 # Save commit message in case this transaction gets rolled back
848 # Save commit message in case this transaction gets rolled back
846 # (e.g. by a pretxncommit hook). Leave the content alone on
849 # (e.g. by a pretxncommit hook). Leave the content alone on
847 # the assumption that the user will use the same editor again.
850 # the assumption that the user will use the same editor again.
848 msgfile = self.opener('last-message.txt', 'wb')
851 msgfile = self.opener('last-message.txt', 'wb')
849 msgfile.write(cctx._text)
852 msgfile.write(cctx._text)
850 msgfile.close()
853 msgfile.close()
851
854
852 try:
855 try:
853 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
856 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
854 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
857 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
855 ret = self.commitctx(cctx, True)
858 ret = self.commitctx(cctx, True)
856 except:
859 except:
857 if edited:
860 if edited:
858 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
861 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
859 self.ui.write(
862 self.ui.write(
860 _('note: commit message saved in %s\n') % msgfn)
863 _('note: commit message saved in %s\n') % msgfn)
861 raise
864 raise
862
865
863 # update dirstate and mergestate
866 # update dirstate and mergestate
864 for f in changes[0] + changes[1]:
867 for f in changes[0] + changes[1]:
865 self.dirstate.normal(f)
868 self.dirstate.normal(f)
866 for f in changes[2]:
869 for f in changes[2]:
867 self.dirstate.forget(f)
870 self.dirstate.forget(f)
868 self.dirstate.setparents(ret)
871 self.dirstate.setparents(ret)
869 ms.reset()
872 ms.reset()
870 finally:
873 finally:
871 wlock.release()
874 wlock.release()
872
875
873 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
876 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
874 return ret
877 return ret
875
878
876 def commitctx(self, ctx, error=False):
879 def commitctx(self, ctx, error=False):
877 """Add a new revision to current repository.
880 """Add a new revision to current repository.
878 Revision information is passed via the context argument.
881 Revision information is passed via the context argument.
879 """
882 """
880
883
881 tr = lock = None
884 tr = lock = None
882 removed = ctx.removed()
885 removed = ctx.removed()
883 p1, p2 = ctx.p1(), ctx.p2()
886 p1, p2 = ctx.p1(), ctx.p2()
884 m1 = p1.manifest().copy()
887 m1 = p1.manifest().copy()
885 m2 = p2.manifest()
888 m2 = p2.manifest()
886 user = ctx.user()
889 user = ctx.user()
887
890
888 lock = self.lock()
891 lock = self.lock()
889 try:
892 try:
890 tr = self.transaction()
893 tr = self.transaction()
891 trp = weakref.proxy(tr)
894 trp = weakref.proxy(tr)
892
895
893 # check in files
896 # check in files
894 new = {}
897 new = {}
895 changed = []
898 changed = []
896 linkrev = len(self)
899 linkrev = len(self)
897 for f in sorted(ctx.modified() + ctx.added()):
900 for f in sorted(ctx.modified() + ctx.added()):
898 self.ui.note(f + "\n")
901 self.ui.note(f + "\n")
899 try:
902 try:
900 fctx = ctx[f]
903 fctx = ctx[f]
901 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
904 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
902 changed)
905 changed)
903 m1.set(f, fctx.flags())
906 m1.set(f, fctx.flags())
904 except OSError, inst:
907 except OSError, inst:
905 self.ui.warn(_("trouble committing %s!\n") % f)
908 self.ui.warn(_("trouble committing %s!\n") % f)
906 raise
909 raise
907 except IOError, inst:
910 except IOError, inst:
908 errcode = getattr(inst, 'errno', errno.ENOENT)
911 errcode = getattr(inst, 'errno', errno.ENOENT)
909 if error or errcode and errcode != errno.ENOENT:
912 if error or errcode and errcode != errno.ENOENT:
910 self.ui.warn(_("trouble committing %s!\n") % f)
913 self.ui.warn(_("trouble committing %s!\n") % f)
911 raise
914 raise
912 else:
915 else:
913 removed.append(f)
916 removed.append(f)
914
917
915 # update manifest
918 # update manifest
916 m1.update(new)
919 m1.update(new)
917 removed = [f for f in sorted(removed) if f in m1 or f in m2]
920 removed = [f for f in sorted(removed) if f in m1 or f in m2]
918 drop = [f for f in removed if f in m1]
921 drop = [f for f in removed if f in m1]
919 for f in drop:
922 for f in drop:
920 del m1[f]
923 del m1[f]
921 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
924 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
922 p2.manifestnode(), (new, drop))
925 p2.manifestnode(), (new, drop))
923
926
924 # update changelog
927 # update changelog
925 self.changelog.delayupdate()
928 self.changelog.delayupdate()
926 n = self.changelog.add(mn, changed + removed, ctx.description(),
929 n = self.changelog.add(mn, changed + removed, ctx.description(),
927 trp, p1.node(), p2.node(),
930 trp, p1.node(), p2.node(),
928 user, ctx.date(), ctx.extra().copy())
931 user, ctx.date(), ctx.extra().copy())
929 p = lambda: self.changelog.writepending() and self.root or ""
932 p = lambda: self.changelog.writepending() and self.root or ""
930 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
933 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
931 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
934 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
932 parent2=xp2, pending=p)
935 parent2=xp2, pending=p)
933 self.changelog.finalize(trp)
936 self.changelog.finalize(trp)
934 tr.close()
937 tr.close()
935
938
936 if self._branchcache:
939 if self._branchcache:
937 self.branchtags()
940 self.branchtags()
938 return n
941 return n
939 finally:
942 finally:
940 del tr
943 del tr
941 lock.release()
944 lock.release()
942
945
943 def destroyed(self):
946 def destroyed(self):
944 '''Inform the repository that nodes have been destroyed.
947 '''Inform the repository that nodes have been destroyed.
945 Intended for use by strip and rollback, so there's a common
948 Intended for use by strip and rollback, so there's a common
946 place for anything that has to be done after destroying history.'''
949 place for anything that has to be done after destroying history.'''
947 # XXX it might be nice if we could take the list of destroyed
950 # XXX it might be nice if we could take the list of destroyed
948 # nodes, but I don't see an easy way for rollback() to do that
951 # nodes, but I don't see an easy way for rollback() to do that
949
952
950 # Ensure the persistent tag cache is updated. Doing it now
953 # Ensure the persistent tag cache is updated. Doing it now
951 # means that the tag cache only has to worry about destroyed
954 # means that the tag cache only has to worry about destroyed
952 # heads immediately after a strip/rollback. That in turn
955 # heads immediately after a strip/rollback. That in turn
953 # guarantees that "cachetip == currenttip" (comparing both rev
956 # guarantees that "cachetip == currenttip" (comparing both rev
954 # and node) always means no nodes have been added or destroyed.
957 # and node) always means no nodes have been added or destroyed.
955
958
956 # XXX this is suboptimal when qrefresh'ing: we strip the current
959 # XXX this is suboptimal when qrefresh'ing: we strip the current
957 # head, refresh the tag cache, then immediately add a new head.
960 # head, refresh the tag cache, then immediately add a new head.
958 # But I think doing it this way is necessary for the "instant
961 # But I think doing it this way is necessary for the "instant
959 # tag cache retrieval" case to work.
962 # tag cache retrieval" case to work.
960 tags_.findglobaltags(self.ui, self, {}, {})
963 self.invalidatecaches()
961
964
962 def walk(self, match, node=None):
965 def walk(self, match, node=None):
963 '''
966 '''
964 walk recursively through the directory tree or a given
967 walk recursively through the directory tree or a given
965 changeset, finding all files matched by the match
968 changeset, finding all files matched by the match
966 function
969 function
967 '''
970 '''
968 return self[node].walk(match)
971 return self[node].walk(match)
969
972
970 def status(self, node1='.', node2=None, match=None,
973 def status(self, node1='.', node2=None, match=None,
971 ignored=False, clean=False, unknown=False):
974 ignored=False, clean=False, unknown=False):
972 """return status of files between two nodes or node and working directory
975 """return status of files between two nodes or node and working directory
973
976
974 If node1 is None, use the first dirstate parent instead.
977 If node1 is None, use the first dirstate parent instead.
975 If node2 is None, compare node1 with working directory.
978 If node2 is None, compare node1 with working directory.
976 """
979 """
977
980
978 def mfmatches(ctx):
981 def mfmatches(ctx):
979 mf = ctx.manifest().copy()
982 mf = ctx.manifest().copy()
980 for fn in mf.keys():
983 for fn in mf.keys():
981 if not match(fn):
984 if not match(fn):
982 del mf[fn]
985 del mf[fn]
983 return mf
986 return mf
984
987
985 if isinstance(node1, context.changectx):
988 if isinstance(node1, context.changectx):
986 ctx1 = node1
989 ctx1 = node1
987 else:
990 else:
988 ctx1 = self[node1]
991 ctx1 = self[node1]
989 if isinstance(node2, context.changectx):
992 if isinstance(node2, context.changectx):
990 ctx2 = node2
993 ctx2 = node2
991 else:
994 else:
992 ctx2 = self[node2]
995 ctx2 = self[node2]
993
996
994 working = ctx2.rev() is None
997 working = ctx2.rev() is None
995 parentworking = working and ctx1 == self['.']
998 parentworking = working and ctx1 == self['.']
996 match = match or match_.always(self.root, self.getcwd())
999 match = match or match_.always(self.root, self.getcwd())
997 listignored, listclean, listunknown = ignored, clean, unknown
1000 listignored, listclean, listunknown = ignored, clean, unknown
998
1001
999 # load earliest manifest first for caching reasons
1002 # load earliest manifest first for caching reasons
1000 if not working and ctx2.rev() < ctx1.rev():
1003 if not working and ctx2.rev() < ctx1.rev():
1001 ctx2.manifest()
1004 ctx2.manifest()
1002
1005
1003 if not parentworking:
1006 if not parentworking:
1004 def bad(f, msg):
1007 def bad(f, msg):
1005 if f not in ctx1:
1008 if f not in ctx1:
1006 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1009 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1007 match.bad = bad
1010 match.bad = bad
1008
1011
1009 if working: # we need to scan the working dir
1012 if working: # we need to scan the working dir
1010 subrepos = ctx1.substate.keys()
1013 subrepos = ctx1.substate.keys()
1011 s = self.dirstate.status(match, subrepos, listignored,
1014 s = self.dirstate.status(match, subrepos, listignored,
1012 listclean, listunknown)
1015 listclean, listunknown)
1013 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1016 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1014
1017
1015 # check for any possibly clean files
1018 # check for any possibly clean files
1016 if parentworking and cmp:
1019 if parentworking and cmp:
1017 fixup = []
1020 fixup = []
1018 # do a full compare of any files that might have changed
1021 # do a full compare of any files that might have changed
1019 for f in sorted(cmp):
1022 for f in sorted(cmp):
1020 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1023 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1021 or ctx1[f].cmp(ctx2[f].data())):
1024 or ctx1[f].cmp(ctx2[f].data())):
1022 modified.append(f)
1025 modified.append(f)
1023 else:
1026 else:
1024 fixup.append(f)
1027 fixup.append(f)
1025
1028
1026 if listclean:
1029 if listclean:
1027 clean += fixup
1030 clean += fixup
1028
1031
1029 # update dirstate for files that are actually clean
1032 # update dirstate for files that are actually clean
1030 if fixup:
1033 if fixup:
1031 try:
1034 try:
1032 # updating the dirstate is optional
1035 # updating the dirstate is optional
1033 # so we don't wait on the lock
1036 # so we don't wait on the lock
1034 wlock = self.wlock(False)
1037 wlock = self.wlock(False)
1035 try:
1038 try:
1036 for f in fixup:
1039 for f in fixup:
1037 self.dirstate.normal(f)
1040 self.dirstate.normal(f)
1038 finally:
1041 finally:
1039 wlock.release()
1042 wlock.release()
1040 except error.LockError:
1043 except error.LockError:
1041 pass
1044 pass
1042
1045
1043 if not parentworking:
1046 if not parentworking:
1044 mf1 = mfmatches(ctx1)
1047 mf1 = mfmatches(ctx1)
1045 if working:
1048 if working:
1046 # we are comparing working dir against non-parent
1049 # we are comparing working dir against non-parent
1047 # generate a pseudo-manifest for the working dir
1050 # generate a pseudo-manifest for the working dir
1048 mf2 = mfmatches(self['.'])
1051 mf2 = mfmatches(self['.'])
1049 for f in cmp + modified + added:
1052 for f in cmp + modified + added:
1050 mf2[f] = None
1053 mf2[f] = None
1051 mf2.set(f, ctx2.flags(f))
1054 mf2.set(f, ctx2.flags(f))
1052 for f in removed:
1055 for f in removed:
1053 if f in mf2:
1056 if f in mf2:
1054 del mf2[f]
1057 del mf2[f]
1055 else:
1058 else:
1056 # we are comparing two revisions
1059 # we are comparing two revisions
1057 deleted, unknown, ignored = [], [], []
1060 deleted, unknown, ignored = [], [], []
1058 mf2 = mfmatches(ctx2)
1061 mf2 = mfmatches(ctx2)
1059
1062
1060 modified, added, clean = [], [], []
1063 modified, added, clean = [], [], []
1061 for fn in mf2:
1064 for fn in mf2:
1062 if fn in mf1:
1065 if fn in mf1:
1063 if (mf1.flags(fn) != mf2.flags(fn) or
1066 if (mf1.flags(fn) != mf2.flags(fn) or
1064 (mf1[fn] != mf2[fn] and
1067 (mf1[fn] != mf2[fn] and
1065 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1068 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1066 modified.append(fn)
1069 modified.append(fn)
1067 elif listclean:
1070 elif listclean:
1068 clean.append(fn)
1071 clean.append(fn)
1069 del mf1[fn]
1072 del mf1[fn]
1070 else:
1073 else:
1071 added.append(fn)
1074 added.append(fn)
1072 removed = mf1.keys()
1075 removed = mf1.keys()
1073
1076
1074 r = modified, added, removed, deleted, unknown, ignored, clean
1077 r = modified, added, removed, deleted, unknown, ignored, clean
1075 [l.sort() for l in r]
1078 [l.sort() for l in r]
1076 return r
1079 return r
1077
1080
1078 def add(self, list):
1081 def add(self, list):
1079 wlock = self.wlock()
1082 wlock = self.wlock()
1080 try:
1083 try:
1081 rejected = []
1084 rejected = []
1082 for f in list:
1085 for f in list:
1083 p = self.wjoin(f)
1086 p = self.wjoin(f)
1084 try:
1087 try:
1085 st = os.lstat(p)
1088 st = os.lstat(p)
1086 except:
1089 except:
1087 self.ui.warn(_("%s does not exist!\n") % f)
1090 self.ui.warn(_("%s does not exist!\n") % f)
1088 rejected.append(f)
1091 rejected.append(f)
1089 continue
1092 continue
1090 if st.st_size > 10000000:
1093 if st.st_size > 10000000:
1091 self.ui.warn(_("%s: files over 10MB may cause memory and"
1094 self.ui.warn(_("%s: files over 10MB may cause memory and"
1092 " performance problems\n"
1095 " performance problems\n"
1093 "(use 'hg revert %s' to unadd the file)\n")
1096 "(use 'hg revert %s' to unadd the file)\n")
1094 % (f, f))
1097 % (f, f))
1095 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1098 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1096 self.ui.warn(_("%s not added: only files and symlinks "
1099 self.ui.warn(_("%s not added: only files and symlinks "
1097 "supported currently\n") % f)
1100 "supported currently\n") % f)
1098 rejected.append(p)
1101 rejected.append(p)
1099 elif self.dirstate[f] in 'amn':
1102 elif self.dirstate[f] in 'amn':
1100 self.ui.warn(_("%s already tracked!\n") % f)
1103 self.ui.warn(_("%s already tracked!\n") % f)
1101 elif self.dirstate[f] == 'r':
1104 elif self.dirstate[f] == 'r':
1102 self.dirstate.normallookup(f)
1105 self.dirstate.normallookup(f)
1103 else:
1106 else:
1104 self.dirstate.add(f)
1107 self.dirstate.add(f)
1105 return rejected
1108 return rejected
1106 finally:
1109 finally:
1107 wlock.release()
1110 wlock.release()
1108
1111
1109 def forget(self, list):
1112 def forget(self, list):
1110 wlock = self.wlock()
1113 wlock = self.wlock()
1111 try:
1114 try:
1112 for f in list:
1115 for f in list:
1113 if self.dirstate[f] != 'a':
1116 if self.dirstate[f] != 'a':
1114 self.ui.warn(_("%s not added!\n") % f)
1117 self.ui.warn(_("%s not added!\n") % f)
1115 else:
1118 else:
1116 self.dirstate.forget(f)
1119 self.dirstate.forget(f)
1117 finally:
1120 finally:
1118 wlock.release()
1121 wlock.release()
1119
1122
1120 def remove(self, list, unlink=False):
1123 def remove(self, list, unlink=False):
1121 if unlink:
1124 if unlink:
1122 for f in list:
1125 for f in list:
1123 try:
1126 try:
1124 util.unlink(self.wjoin(f))
1127 util.unlink(self.wjoin(f))
1125 except OSError, inst:
1128 except OSError, inst:
1126 if inst.errno != errno.ENOENT:
1129 if inst.errno != errno.ENOENT:
1127 raise
1130 raise
1128 wlock = self.wlock()
1131 wlock = self.wlock()
1129 try:
1132 try:
1130 for f in list:
1133 for f in list:
1131 if unlink and os.path.exists(self.wjoin(f)):
1134 if unlink and os.path.exists(self.wjoin(f)):
1132 self.ui.warn(_("%s still exists!\n") % f)
1135 self.ui.warn(_("%s still exists!\n") % f)
1133 elif self.dirstate[f] == 'a':
1136 elif self.dirstate[f] == 'a':
1134 self.dirstate.forget(f)
1137 self.dirstate.forget(f)
1135 elif f not in self.dirstate:
1138 elif f not in self.dirstate:
1136 self.ui.warn(_("%s not tracked!\n") % f)
1139 self.ui.warn(_("%s not tracked!\n") % f)
1137 else:
1140 else:
1138 self.dirstate.remove(f)
1141 self.dirstate.remove(f)
1139 finally:
1142 finally:
1140 wlock.release()
1143 wlock.release()
1141
1144
1142 def undelete(self, list):
1145 def undelete(self, list):
1143 manifests = [self.manifest.read(self.changelog.read(p)[0])
1146 manifests = [self.manifest.read(self.changelog.read(p)[0])
1144 for p in self.dirstate.parents() if p != nullid]
1147 for p in self.dirstate.parents() if p != nullid]
1145 wlock = self.wlock()
1148 wlock = self.wlock()
1146 try:
1149 try:
1147 for f in list:
1150 for f in list:
1148 if self.dirstate[f] != 'r':
1151 if self.dirstate[f] != 'r':
1149 self.ui.warn(_("%s not removed!\n") % f)
1152 self.ui.warn(_("%s not removed!\n") % f)
1150 else:
1153 else:
1151 m = f in manifests[0] and manifests[0] or manifests[1]
1154 m = f in manifests[0] and manifests[0] or manifests[1]
1152 t = self.file(f).read(m[f])
1155 t = self.file(f).read(m[f])
1153 self.wwrite(f, t, m.flags(f))
1156 self.wwrite(f, t, m.flags(f))
1154 self.dirstate.normal(f)
1157 self.dirstate.normal(f)
1155 finally:
1158 finally:
1156 wlock.release()
1159 wlock.release()
1157
1160
1158 def copy(self, source, dest):
1161 def copy(self, source, dest):
1159 p = self.wjoin(dest)
1162 p = self.wjoin(dest)
1160 if not (os.path.exists(p) or os.path.islink(p)):
1163 if not (os.path.exists(p) or os.path.islink(p)):
1161 self.ui.warn(_("%s does not exist!\n") % dest)
1164 self.ui.warn(_("%s does not exist!\n") % dest)
1162 elif not (os.path.isfile(p) or os.path.islink(p)):
1165 elif not (os.path.isfile(p) or os.path.islink(p)):
1163 self.ui.warn(_("copy failed: %s is not a file or a "
1166 self.ui.warn(_("copy failed: %s is not a file or a "
1164 "symbolic link\n") % dest)
1167 "symbolic link\n") % dest)
1165 else:
1168 else:
1166 wlock = self.wlock()
1169 wlock = self.wlock()
1167 try:
1170 try:
1168 if self.dirstate[dest] in '?r':
1171 if self.dirstate[dest] in '?r':
1169 self.dirstate.add(dest)
1172 self.dirstate.add(dest)
1170 self.dirstate.copy(source, dest)
1173 self.dirstate.copy(source, dest)
1171 finally:
1174 finally:
1172 wlock.release()
1175 wlock.release()
1173
1176
1174 def heads(self, start=None):
1177 def heads(self, start=None):
1175 heads = self.changelog.heads(start)
1178 heads = self.changelog.heads(start)
1176 # sort the output in rev descending order
1179 # sort the output in rev descending order
1177 heads = [(-self.changelog.rev(h), h) for h in heads]
1180 heads = [(-self.changelog.rev(h), h) for h in heads]
1178 return [n for (r, n) in sorted(heads)]
1181 return [n for (r, n) in sorted(heads)]
1179
1182
1180 def branchheads(self, branch=None, start=None, closed=False):
1183 def branchheads(self, branch=None, start=None, closed=False):
1181 '''return a (possibly filtered) list of heads for the given branch
1184 '''return a (possibly filtered) list of heads for the given branch
1182
1185
1183 Heads are returned in topological order, from newest to oldest.
1186 Heads are returned in topological order, from newest to oldest.
1184 If branch is None, use the dirstate branch.
1187 If branch is None, use the dirstate branch.
1185 If start is not None, return only heads reachable from start.
1188 If start is not None, return only heads reachable from start.
1186 If closed is True, return heads that are marked as closed as well.
1189 If closed is True, return heads that are marked as closed as well.
1187 '''
1190 '''
1188 if branch is None:
1191 if branch is None:
1189 branch = self[None].branch()
1192 branch = self[None].branch()
1190 branches = self.branchmap()
1193 branches = self.branchmap()
1191 if branch not in branches:
1194 if branch not in branches:
1192 return []
1195 return []
1193 # the cache returns heads ordered lowest to highest
1196 # the cache returns heads ordered lowest to highest
1194 bheads = list(reversed(branches[branch]))
1197 bheads = list(reversed(branches[branch]))
1195 if start is not None:
1198 if start is not None:
1196 # filter out the heads that cannot be reached from startrev
1199 # filter out the heads that cannot be reached from startrev
1197 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1200 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1198 bheads = [h for h in bheads if h in fbheads]
1201 bheads = [h for h in bheads if h in fbheads]
1199 if not closed:
1202 if not closed:
1200 bheads = [h for h in bheads if
1203 bheads = [h for h in bheads if
1201 ('close' not in self.changelog.read(h)[5])]
1204 ('close' not in self.changelog.read(h)[5])]
1202 return bheads
1205 return bheads
1203
1206
1204 def branches(self, nodes):
1207 def branches(self, nodes):
1205 if not nodes:
1208 if not nodes:
1206 nodes = [self.changelog.tip()]
1209 nodes = [self.changelog.tip()]
1207 b = []
1210 b = []
1208 for n in nodes:
1211 for n in nodes:
1209 t = n
1212 t = n
1210 while 1:
1213 while 1:
1211 p = self.changelog.parents(n)
1214 p = self.changelog.parents(n)
1212 if p[1] != nullid or p[0] == nullid:
1215 if p[1] != nullid or p[0] == nullid:
1213 b.append((t, n, p[0], p[1]))
1216 b.append((t, n, p[0], p[1]))
1214 break
1217 break
1215 n = p[0]
1218 n = p[0]
1216 return b
1219 return b
1217
1220
1218 def between(self, pairs):
1221 def between(self, pairs):
1219 r = []
1222 r = []
1220
1223
1221 for top, bottom in pairs:
1224 for top, bottom in pairs:
1222 n, l, i = top, [], 0
1225 n, l, i = top, [], 0
1223 f = 1
1226 f = 1
1224
1227
1225 while n != bottom and n != nullid:
1228 while n != bottom and n != nullid:
1226 p = self.changelog.parents(n)[0]
1229 p = self.changelog.parents(n)[0]
1227 if i == f:
1230 if i == f:
1228 l.append(n)
1231 l.append(n)
1229 f = f * 2
1232 f = f * 2
1230 n = p
1233 n = p
1231 i += 1
1234 i += 1
1232
1235
1233 r.append(l)
1236 r.append(l)
1234
1237
1235 return r
1238 return r
1236
1239
1237 def findincoming(self, remote, base=None, heads=None, force=False):
1240 def findincoming(self, remote, base=None, heads=None, force=False):
1238 """Return list of roots of the subsets of missing nodes from remote
1241 """Return list of roots of the subsets of missing nodes from remote
1239
1242
1240 If base dict is specified, assume that these nodes and their parents
1243 If base dict is specified, assume that these nodes and their parents
1241 exist on the remote side and that no child of a node of base exists
1244 exist on the remote side and that no child of a node of base exists
1242 in both remote and self.
1245 in both remote and self.
1243 Furthermore base will be updated to include the nodes that exists
1246 Furthermore base will be updated to include the nodes that exists
1244 in self and remote but no children exists in self and remote.
1247 in self and remote but no children exists in self and remote.
1245 If a list of heads is specified, return only nodes which are heads
1248 If a list of heads is specified, return only nodes which are heads
1246 or ancestors of these heads.
1249 or ancestors of these heads.
1247
1250
1248 All the ancestors of base are in self and in remote.
1251 All the ancestors of base are in self and in remote.
1249 All the descendants of the list returned are missing in self.
1252 All the descendants of the list returned are missing in self.
1250 (and so we know that the rest of the nodes are missing in remote, see
1253 (and so we know that the rest of the nodes are missing in remote, see
1251 outgoing)
1254 outgoing)
1252 """
1255 """
1253 return self.findcommonincoming(remote, base, heads, force)[1]
1256 return self.findcommonincoming(remote, base, heads, force)[1]
1254
1257
1255 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1258 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1256 """Return a tuple (common, missing roots, heads) used to identify
1259 """Return a tuple (common, missing roots, heads) used to identify
1257 missing nodes from remote.
1260 missing nodes from remote.
1258
1261
1259 If base dict is specified, assume that these nodes and their parents
1262 If base dict is specified, assume that these nodes and their parents
1260 exist on the remote side and that no child of a node of base exists
1263 exist on the remote side and that no child of a node of base exists
1261 in both remote and self.
1264 in both remote and self.
1262 Furthermore base will be updated to include the nodes that exists
1265 Furthermore base will be updated to include the nodes that exists
1263 in self and remote but no children exists in self and remote.
1266 in self and remote but no children exists in self and remote.
1264 If a list of heads is specified, return only nodes which are heads
1267 If a list of heads is specified, return only nodes which are heads
1265 or ancestors of these heads.
1268 or ancestors of these heads.
1266
1269
1267 All the ancestors of base are in self and in remote.
1270 All the ancestors of base are in self and in remote.
1268 """
1271 """
1269 m = self.changelog.nodemap
1272 m = self.changelog.nodemap
1270 search = []
1273 search = []
1271 fetch = set()
1274 fetch = set()
1272 seen = set()
1275 seen = set()
1273 seenbranch = set()
1276 seenbranch = set()
1274 if base is None:
1277 if base is None:
1275 base = {}
1278 base = {}
1276
1279
1277 if not heads:
1280 if not heads:
1278 heads = remote.heads()
1281 heads = remote.heads()
1279
1282
1280 if self.changelog.tip() == nullid:
1283 if self.changelog.tip() == nullid:
1281 base[nullid] = 1
1284 base[nullid] = 1
1282 if heads != [nullid]:
1285 if heads != [nullid]:
1283 return [nullid], [nullid], list(heads)
1286 return [nullid], [nullid], list(heads)
1284 return [nullid], [], []
1287 return [nullid], [], []
1285
1288
1286 # assume we're closer to the tip than the root
1289 # assume we're closer to the tip than the root
1287 # and start by examining the heads
1290 # and start by examining the heads
1288 self.ui.status(_("searching for changes\n"))
1291 self.ui.status(_("searching for changes\n"))
1289
1292
1290 unknown = []
1293 unknown = []
1291 for h in heads:
1294 for h in heads:
1292 if h not in m:
1295 if h not in m:
1293 unknown.append(h)
1296 unknown.append(h)
1294 else:
1297 else:
1295 base[h] = 1
1298 base[h] = 1
1296
1299
1297 heads = unknown
1300 heads = unknown
1298 if not unknown:
1301 if not unknown:
1299 return base.keys(), [], []
1302 return base.keys(), [], []
1300
1303
1301 req = set(unknown)
1304 req = set(unknown)
1302 reqcnt = 0
1305 reqcnt = 0
1303
1306
1304 # search through remote branches
1307 # search through remote branches
1305 # a 'branch' here is a linear segment of history, with four parts:
1308 # a 'branch' here is a linear segment of history, with four parts:
1306 # head, root, first parent, second parent
1309 # head, root, first parent, second parent
1307 # (a branch always has two parents (or none) by definition)
1310 # (a branch always has two parents (or none) by definition)
1308 unknown = remote.branches(unknown)
1311 unknown = remote.branches(unknown)
1309 while unknown:
1312 while unknown:
1310 r = []
1313 r = []
1311 while unknown:
1314 while unknown:
1312 n = unknown.pop(0)
1315 n = unknown.pop(0)
1313 if n[0] in seen:
1316 if n[0] in seen:
1314 continue
1317 continue
1315
1318
1316 self.ui.debug("examining %s:%s\n"
1319 self.ui.debug("examining %s:%s\n"
1317 % (short(n[0]), short(n[1])))
1320 % (short(n[0]), short(n[1])))
1318 if n[0] == nullid: # found the end of the branch
1321 if n[0] == nullid: # found the end of the branch
1319 pass
1322 pass
1320 elif n in seenbranch:
1323 elif n in seenbranch:
1321 self.ui.debug("branch already found\n")
1324 self.ui.debug("branch already found\n")
1322 continue
1325 continue
1323 elif n[1] and n[1] in m: # do we know the base?
1326 elif n[1] and n[1] in m: # do we know the base?
1324 self.ui.debug("found incomplete branch %s:%s\n"
1327 self.ui.debug("found incomplete branch %s:%s\n"
1325 % (short(n[0]), short(n[1])))
1328 % (short(n[0]), short(n[1])))
1326 search.append(n[0:2]) # schedule branch range for scanning
1329 search.append(n[0:2]) # schedule branch range for scanning
1327 seenbranch.add(n)
1330 seenbranch.add(n)
1328 else:
1331 else:
1329 if n[1] not in seen and n[1] not in fetch:
1332 if n[1] not in seen and n[1] not in fetch:
1330 if n[2] in m and n[3] in m:
1333 if n[2] in m and n[3] in m:
1331 self.ui.debug("found new changeset %s\n" %
1334 self.ui.debug("found new changeset %s\n" %
1332 short(n[1]))
1335 short(n[1]))
1333 fetch.add(n[1]) # earliest unknown
1336 fetch.add(n[1]) # earliest unknown
1334 for p in n[2:4]:
1337 for p in n[2:4]:
1335 if p in m:
1338 if p in m:
1336 base[p] = 1 # latest known
1339 base[p] = 1 # latest known
1337
1340
1338 for p in n[2:4]:
1341 for p in n[2:4]:
1339 if p not in req and p not in m:
1342 if p not in req and p not in m:
1340 r.append(p)
1343 r.append(p)
1341 req.add(p)
1344 req.add(p)
1342 seen.add(n[0])
1345 seen.add(n[0])
1343
1346
1344 if r:
1347 if r:
1345 reqcnt += 1
1348 reqcnt += 1
1346 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1349 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1347 self.ui.debug("request %d: %s\n" %
1350 self.ui.debug("request %d: %s\n" %
1348 (reqcnt, " ".join(map(short, r))))
1351 (reqcnt, " ".join(map(short, r))))
1349 for p in xrange(0, len(r), 10):
1352 for p in xrange(0, len(r), 10):
1350 for b in remote.branches(r[p:p + 10]):
1353 for b in remote.branches(r[p:p + 10]):
1351 self.ui.debug("received %s:%s\n" %
1354 self.ui.debug("received %s:%s\n" %
1352 (short(b[0]), short(b[1])))
1355 (short(b[0]), short(b[1])))
1353 unknown.append(b)
1356 unknown.append(b)
1354
1357
1355 # do binary search on the branches we found
1358 # do binary search on the branches we found
1356 while search:
1359 while search:
1357 newsearch = []
1360 newsearch = []
1358 reqcnt += 1
1361 reqcnt += 1
1359 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1362 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1360 for n, l in zip(search, remote.between(search)):
1363 for n, l in zip(search, remote.between(search)):
1361 l.append(n[1])
1364 l.append(n[1])
1362 p = n[0]
1365 p = n[0]
1363 f = 1
1366 f = 1
1364 for i in l:
1367 for i in l:
1365 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1368 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1366 if i in m:
1369 if i in m:
1367 if f <= 2:
1370 if f <= 2:
1368 self.ui.debug("found new branch changeset %s\n" %
1371 self.ui.debug("found new branch changeset %s\n" %
1369 short(p))
1372 short(p))
1370 fetch.add(p)
1373 fetch.add(p)
1371 base[i] = 1
1374 base[i] = 1
1372 else:
1375 else:
1373 self.ui.debug("narrowed branch search to %s:%s\n"
1376 self.ui.debug("narrowed branch search to %s:%s\n"
1374 % (short(p), short(i)))
1377 % (short(p), short(i)))
1375 newsearch.append((p, i))
1378 newsearch.append((p, i))
1376 break
1379 break
1377 p, f = i, f * 2
1380 p, f = i, f * 2
1378 search = newsearch
1381 search = newsearch
1379
1382
1380 # sanity check our fetch list
1383 # sanity check our fetch list
1381 for f in fetch:
1384 for f in fetch:
1382 if f in m:
1385 if f in m:
1383 raise error.RepoError(_("already have changeset ")
1386 raise error.RepoError(_("already have changeset ")
1384 + short(f[:4]))
1387 + short(f[:4]))
1385
1388
1386 if base.keys() == [nullid]:
1389 if base.keys() == [nullid]:
1387 if force:
1390 if force:
1388 self.ui.warn(_("warning: repository is unrelated\n"))
1391 self.ui.warn(_("warning: repository is unrelated\n"))
1389 else:
1392 else:
1390 raise util.Abort(_("repository is unrelated"))
1393 raise util.Abort(_("repository is unrelated"))
1391
1394
1392 self.ui.debug("found new changesets starting at " +
1395 self.ui.debug("found new changesets starting at " +
1393 " ".join([short(f) for f in fetch]) + "\n")
1396 " ".join([short(f) for f in fetch]) + "\n")
1394
1397
1395 self.ui.progress(_('searching'), None, unit=_('queries'))
1398 self.ui.progress(_('searching'), None, unit=_('queries'))
1396 self.ui.debug("%d total queries\n" % reqcnt)
1399 self.ui.debug("%d total queries\n" % reqcnt)
1397
1400
1398 return base.keys(), list(fetch), heads
1401 return base.keys(), list(fetch), heads
1399
1402
1400 def findoutgoing(self, remote, base=None, heads=None, force=False):
1403 def findoutgoing(self, remote, base=None, heads=None, force=False):
1401 """Return list of nodes that are roots of subsets not in remote
1404 """Return list of nodes that are roots of subsets not in remote
1402
1405
1403 If base dict is specified, assume that these nodes and their parents
1406 If base dict is specified, assume that these nodes and their parents
1404 exist on the remote side.
1407 exist on the remote side.
1405 If a list of heads is specified, return only nodes which are heads
1408 If a list of heads is specified, return only nodes which are heads
1406 or ancestors of these heads, and return a second element which
1409 or ancestors of these heads, and return a second element which
1407 contains all remote heads which get new children.
1410 contains all remote heads which get new children.
1408 """
1411 """
1409 if base is None:
1412 if base is None:
1410 base = {}
1413 base = {}
1411 self.findincoming(remote, base, heads, force=force)
1414 self.findincoming(remote, base, heads, force=force)
1412
1415
1413 self.ui.debug("common changesets up to "
1416 self.ui.debug("common changesets up to "
1414 + " ".join(map(short, base.keys())) + "\n")
1417 + " ".join(map(short, base.keys())) + "\n")
1415
1418
1416 remain = set(self.changelog.nodemap)
1419 remain = set(self.changelog.nodemap)
1417
1420
1418 # prune everything remote has from the tree
1421 # prune everything remote has from the tree
1419 remain.remove(nullid)
1422 remain.remove(nullid)
1420 remove = base.keys()
1423 remove = base.keys()
1421 while remove:
1424 while remove:
1422 n = remove.pop(0)
1425 n = remove.pop(0)
1423 if n in remain:
1426 if n in remain:
1424 remain.remove(n)
1427 remain.remove(n)
1425 for p in self.changelog.parents(n):
1428 for p in self.changelog.parents(n):
1426 remove.append(p)
1429 remove.append(p)
1427
1430
1428 # find every node whose parents have been pruned
1431 # find every node whose parents have been pruned
1429 subset = []
1432 subset = []
1430 # find every remote head that will get new children
1433 # find every remote head that will get new children
1431 updated_heads = set()
1434 updated_heads = set()
1432 for n in remain:
1435 for n in remain:
1433 p1, p2 = self.changelog.parents(n)
1436 p1, p2 = self.changelog.parents(n)
1434 if p1 not in remain and p2 not in remain:
1437 if p1 not in remain and p2 not in remain:
1435 subset.append(n)
1438 subset.append(n)
1436 if heads:
1439 if heads:
1437 if p1 in heads:
1440 if p1 in heads:
1438 updated_heads.add(p1)
1441 updated_heads.add(p1)
1439 if p2 in heads:
1442 if p2 in heads:
1440 updated_heads.add(p2)
1443 updated_heads.add(p2)
1441
1444
1442 # this is the set of all roots we have to push
1445 # this is the set of all roots we have to push
1443 if heads:
1446 if heads:
1444 return subset, list(updated_heads)
1447 return subset, list(updated_heads)
1445 else:
1448 else:
1446 return subset
1449 return subset
1447
1450
1448 def pull(self, remote, heads=None, force=False):
1451 def pull(self, remote, heads=None, force=False):
1449 lock = self.lock()
1452 lock = self.lock()
1450 try:
1453 try:
1451 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1454 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1452 force=force)
1455 force=force)
1453 if fetch == [nullid]:
1456 if fetch == [nullid]:
1454 self.ui.status(_("requesting all changes\n"))
1457 self.ui.status(_("requesting all changes\n"))
1455
1458
1456 if not fetch:
1459 if not fetch:
1457 self.ui.status(_("no changes found\n"))
1460 self.ui.status(_("no changes found\n"))
1458 return 0
1461 return 0
1459
1462
1460 if heads is None and remote.capable('changegroupsubset'):
1463 if heads is None and remote.capable('changegroupsubset'):
1461 heads = rheads
1464 heads = rheads
1462
1465
1463 if heads is None:
1466 if heads is None:
1464 cg = remote.changegroup(fetch, 'pull')
1467 cg = remote.changegroup(fetch, 'pull')
1465 else:
1468 else:
1466 if not remote.capable('changegroupsubset'):
1469 if not remote.capable('changegroupsubset'):
1467 raise util.Abort(_("Partial pull cannot be done because "
1470 raise util.Abort(_("Partial pull cannot be done because "
1468 "other repository doesn't support "
1471 "other repository doesn't support "
1469 "changegroupsubset."))
1472 "changegroupsubset."))
1470 cg = remote.changegroupsubset(fetch, heads, 'pull')
1473 cg = remote.changegroupsubset(fetch, heads, 'pull')
1471 return self.addchangegroup(cg, 'pull', remote.url())
1474 return self.addchangegroup(cg, 'pull', remote.url())
1472 finally:
1475 finally:
1473 lock.release()
1476 lock.release()
1474
1477
1475 def push(self, remote, force=False, revs=None):
1478 def push(self, remote, force=False, revs=None):
1476 # there are two ways to push to remote repo:
1479 # there are two ways to push to remote repo:
1477 #
1480 #
1478 # addchangegroup assumes local user can lock remote
1481 # addchangegroup assumes local user can lock remote
1479 # repo (local filesystem, old ssh servers).
1482 # repo (local filesystem, old ssh servers).
1480 #
1483 #
1481 # unbundle assumes local user cannot lock remote repo (new ssh
1484 # unbundle assumes local user cannot lock remote repo (new ssh
1482 # servers, http servers).
1485 # servers, http servers).
1483
1486
1484 if remote.capable('unbundle'):
1487 if remote.capable('unbundle'):
1485 return self.push_unbundle(remote, force, revs)
1488 return self.push_unbundle(remote, force, revs)
1486 return self.push_addchangegroup(remote, force, revs)
1489 return self.push_addchangegroup(remote, force, revs)
1487
1490
1488 def prepush(self, remote, force, revs):
1491 def prepush(self, remote, force, revs):
1489 '''Analyze the local and remote repositories and determine which
1492 '''Analyze the local and remote repositories and determine which
1490 changesets need to be pushed to the remote. Return a tuple
1493 changesets need to be pushed to the remote. Return a tuple
1491 (changegroup, remoteheads). changegroup is a readable file-like
1494 (changegroup, remoteheads). changegroup is a readable file-like
1492 object whose read() returns successive changegroup chunks ready to
1495 object whose read() returns successive changegroup chunks ready to
1493 be sent over the wire. remoteheads is the list of remote heads.
1496 be sent over the wire. remoteheads is the list of remote heads.
1494 '''
1497 '''
1495 common = {}
1498 common = {}
1496 remote_heads = remote.heads()
1499 remote_heads = remote.heads()
1497 inc = self.findincoming(remote, common, remote_heads, force=force)
1500 inc = self.findincoming(remote, common, remote_heads, force=force)
1498
1501
1499 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1502 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1500 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1503 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1501
1504
1502 def checkbranch(lheads, rheads, updatelb, branchname=None):
1505 def checkbranch(lheads, rheads, updatelb, branchname=None):
1503 '''
1506 '''
1504 check whether there are more local heads than remote heads on
1507 check whether there are more local heads than remote heads on
1505 a specific branch.
1508 a specific branch.
1506
1509
1507 lheads: local branch heads
1510 lheads: local branch heads
1508 rheads: remote branch heads
1511 rheads: remote branch heads
1509 updatelb: outgoing local branch bases
1512 updatelb: outgoing local branch bases
1510 '''
1513 '''
1511
1514
1512 warn = 0
1515 warn = 0
1513
1516
1514 if not revs and len(lheads) > len(rheads):
1517 if not revs and len(lheads) > len(rheads):
1515 warn = 1
1518 warn = 1
1516 else:
1519 else:
1517 # add local heads involved in the push
1520 # add local heads involved in the push
1518 updatelheads = [self.changelog.heads(x, lheads)
1521 updatelheads = [self.changelog.heads(x, lheads)
1519 for x in updatelb]
1522 for x in updatelb]
1520 newheads = set(sum(updatelheads, [])) & set(lheads)
1523 newheads = set(sum(updatelheads, [])) & set(lheads)
1521
1524
1522 if not newheads:
1525 if not newheads:
1523 return True
1526 return True
1524
1527
1525 # add heads we don't have or that are not involved in the push
1528 # add heads we don't have or that are not involved in the push
1526 for r in rheads:
1529 for r in rheads:
1527 if r in self.changelog.nodemap:
1530 if r in self.changelog.nodemap:
1528 desc = self.changelog.heads(r, heads)
1531 desc = self.changelog.heads(r, heads)
1529 l = [h for h in heads if h in desc]
1532 l = [h for h in heads if h in desc]
1530 if not l:
1533 if not l:
1531 newheads.add(r)
1534 newheads.add(r)
1532 else:
1535 else:
1533 newheads.add(r)
1536 newheads.add(r)
1534 if len(newheads) > len(rheads):
1537 if len(newheads) > len(rheads):
1535 warn = 1
1538 warn = 1
1536
1539
1537 if warn:
1540 if warn:
1538 if branchname is not None:
1541 if branchname is not None:
1539 msg = _("abort: push creates new remote heads"
1542 msg = _("abort: push creates new remote heads"
1540 " on branch '%s'!\n") % branchname
1543 " on branch '%s'!\n") % branchname
1541 else:
1544 else:
1542 msg = _("abort: push creates new remote heads!\n")
1545 msg = _("abort: push creates new remote heads!\n")
1543 self.ui.warn(msg)
1546 self.ui.warn(msg)
1544 if len(lheads) > len(rheads):
1547 if len(lheads) > len(rheads):
1545 self.ui.status(_("(did you forget to merge?"
1548 self.ui.status(_("(did you forget to merge?"
1546 " use push -f to force)\n"))
1549 " use push -f to force)\n"))
1547 else:
1550 else:
1548 self.ui.status(_("(you should pull and merge or"
1551 self.ui.status(_("(you should pull and merge or"
1549 " use push -f to force)\n"))
1552 " use push -f to force)\n"))
1550 return False
1553 return False
1551 return True
1554 return True
1552
1555
1553 if not bases:
1556 if not bases:
1554 self.ui.status(_("no changes found\n"))
1557 self.ui.status(_("no changes found\n"))
1555 return None, 1
1558 return None, 1
1556 elif not force:
1559 elif not force:
1557 # Check for each named branch if we're creating new remote heads.
1560 # Check for each named branch if we're creating new remote heads.
1558 # To be a remote head after push, node must be either:
1561 # To be a remote head after push, node must be either:
1559 # - unknown locally
1562 # - unknown locally
1560 # - a local outgoing head descended from update
1563 # - a local outgoing head descended from update
1561 # - a remote head that's known locally and not
1564 # - a remote head that's known locally and not
1562 # ancestral to an outgoing head
1565 # ancestral to an outgoing head
1563 #
1566 #
1564 # New named branches cannot be created without --force.
1567 # New named branches cannot be created without --force.
1565
1568
1566 if remote_heads != [nullid]:
1569 if remote_heads != [nullid]:
1567 if remote.capable('branchmap'):
1570 if remote.capable('branchmap'):
1568 remotebrheads = remote.branchmap()
1571 remotebrheads = remote.branchmap()
1569
1572
1570 if not revs:
1573 if not revs:
1571 localbrheads = self.branchmap()
1574 localbrheads = self.branchmap()
1572 else:
1575 else:
1573 localbrheads = {}
1576 localbrheads = {}
1574 for n in heads:
1577 for n in heads:
1575 branch = self[n].branch()
1578 branch = self[n].branch()
1576 localbrheads.setdefault(branch, []).append(n)
1579 localbrheads.setdefault(branch, []).append(n)
1577
1580
1578 newbranches = list(set(localbrheads) - set(remotebrheads))
1581 newbranches = list(set(localbrheads) - set(remotebrheads))
1579 if newbranches: # new branch requires --force
1582 if newbranches: # new branch requires --force
1580 branchnames = ', '.join("%s" % b for b in newbranches)
1583 branchnames = ', '.join("%s" % b for b in newbranches)
1581 self.ui.warn(_("abort: push creates "
1584 self.ui.warn(_("abort: push creates "
1582 "new remote branches: %s!\n")
1585 "new remote branches: %s!\n")
1583 % branchnames)
1586 % branchnames)
1584 # propose 'push -b .' in the msg too?
1587 # propose 'push -b .' in the msg too?
1585 self.ui.status(_("(use 'hg push -f' to force)\n"))
1588 self.ui.status(_("(use 'hg push -f' to force)\n"))
1586 return None, 0
1589 return None, 0
1587 for branch, lheads in localbrheads.iteritems():
1590 for branch, lheads in localbrheads.iteritems():
1588 if branch in remotebrheads:
1591 if branch in remotebrheads:
1589 rheads = remotebrheads[branch]
1592 rheads = remotebrheads[branch]
1590 if not checkbranch(lheads, rheads, update, branch):
1593 if not checkbranch(lheads, rheads, update, branch):
1591 return None, 0
1594 return None, 0
1592 else:
1595 else:
1593 if not checkbranch(heads, remote_heads, update):
1596 if not checkbranch(heads, remote_heads, update):
1594 return None, 0
1597 return None, 0
1595
1598
1596 if inc:
1599 if inc:
1597 self.ui.warn(_("note: unsynced remote changes!\n"))
1600 self.ui.warn(_("note: unsynced remote changes!\n"))
1598
1601
1599
1602
1600 if revs is None:
1603 if revs is None:
1601 # use the fast path, no race possible on push
1604 # use the fast path, no race possible on push
1602 nodes = self.changelog.findmissing(common.keys())
1605 nodes = self.changelog.findmissing(common.keys())
1603 cg = self._changegroup(nodes, 'push')
1606 cg = self._changegroup(nodes, 'push')
1604 else:
1607 else:
1605 cg = self.changegroupsubset(update, revs, 'push')
1608 cg = self.changegroupsubset(update, revs, 'push')
1606 return cg, remote_heads
1609 return cg, remote_heads
1607
1610
1608 def push_addchangegroup(self, remote, force, revs):
1611 def push_addchangegroup(self, remote, force, revs):
1609 lock = remote.lock()
1612 lock = remote.lock()
1610 try:
1613 try:
1611 ret = self.prepush(remote, force, revs)
1614 ret = self.prepush(remote, force, revs)
1612 if ret[0] is not None:
1615 if ret[0] is not None:
1613 cg, remote_heads = ret
1616 cg, remote_heads = ret
1614 return remote.addchangegroup(cg, 'push', self.url())
1617 return remote.addchangegroup(cg, 'push', self.url())
1615 return ret[1]
1618 return ret[1]
1616 finally:
1619 finally:
1617 lock.release()
1620 lock.release()
1618
1621
1619 def push_unbundle(self, remote, force, revs):
1622 def push_unbundle(self, remote, force, revs):
1620 # local repo finds heads on server, finds out what revs it
1623 # local repo finds heads on server, finds out what revs it
1621 # must push. once revs transferred, if server finds it has
1624 # must push. once revs transferred, if server finds it has
1622 # different heads (someone else won commit/push race), server
1625 # different heads (someone else won commit/push race), server
1623 # aborts.
1626 # aborts.
1624
1627
1625 ret = self.prepush(remote, force, revs)
1628 ret = self.prepush(remote, force, revs)
1626 if ret[0] is not None:
1629 if ret[0] is not None:
1627 cg, remote_heads = ret
1630 cg, remote_heads = ret
1628 if force:
1631 if force:
1629 remote_heads = ['force']
1632 remote_heads = ['force']
1630 return remote.unbundle(cg, remote_heads, 'push')
1633 return remote.unbundle(cg, remote_heads, 'push')
1631 return ret[1]
1634 return ret[1]
1632
1635
1633 def changegroupinfo(self, nodes, source):
1636 def changegroupinfo(self, nodes, source):
1634 if self.ui.verbose or source == 'bundle':
1637 if self.ui.verbose or source == 'bundle':
1635 self.ui.status(_("%d changesets found\n") % len(nodes))
1638 self.ui.status(_("%d changesets found\n") % len(nodes))
1636 if self.ui.debugflag:
1639 if self.ui.debugflag:
1637 self.ui.debug("list of changesets:\n")
1640 self.ui.debug("list of changesets:\n")
1638 for node in nodes:
1641 for node in nodes:
1639 self.ui.debug("%s\n" % hex(node))
1642 self.ui.debug("%s\n" % hex(node))
1640
1643
1641 def changegroupsubset(self, bases, heads, source, extranodes=None):
1644 def changegroupsubset(self, bases, heads, source, extranodes=None):
1642 """Compute a changegroup consisting of all the nodes that are
1645 """Compute a changegroup consisting of all the nodes that are
1643 descendents of any of the bases and ancestors of any of the heads.
1646 descendents of any of the bases and ancestors of any of the heads.
1644 Return a chunkbuffer object whose read() method will return
1647 Return a chunkbuffer object whose read() method will return
1645 successive changegroup chunks.
1648 successive changegroup chunks.
1646
1649
1647 It is fairly complex as determining which filenodes and which
1650 It is fairly complex as determining which filenodes and which
1648 manifest nodes need to be included for the changeset to be complete
1651 manifest nodes need to be included for the changeset to be complete
1649 is non-trivial.
1652 is non-trivial.
1650
1653
1651 Another wrinkle is doing the reverse, figuring out which changeset in
1654 Another wrinkle is doing the reverse, figuring out which changeset in
1652 the changegroup a particular filenode or manifestnode belongs to.
1655 the changegroup a particular filenode or manifestnode belongs to.
1653
1656
1654 The caller can specify some nodes that must be included in the
1657 The caller can specify some nodes that must be included in the
1655 changegroup using the extranodes argument. It should be a dict
1658 changegroup using the extranodes argument. It should be a dict
1656 where the keys are the filenames (or 1 for the manifest), and the
1659 where the keys are the filenames (or 1 for the manifest), and the
1657 values are lists of (node, linknode) tuples, where node is a wanted
1660 values are lists of (node, linknode) tuples, where node is a wanted
1658 node and linknode is the changelog node that should be transmitted as
1661 node and linknode is the changelog node that should be transmitted as
1659 the linkrev.
1662 the linkrev.
1660 """
1663 """
1661
1664
1662 # Set up some initial variables
1665 # Set up some initial variables
1663 # Make it easy to refer to self.changelog
1666 # Make it easy to refer to self.changelog
1664 cl = self.changelog
1667 cl = self.changelog
1665 # msng is short for missing - compute the list of changesets in this
1668 # msng is short for missing - compute the list of changesets in this
1666 # changegroup.
1669 # changegroup.
1667 if not bases:
1670 if not bases:
1668 bases = [nullid]
1671 bases = [nullid]
1669 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1672 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1670
1673
1671 if extranodes is None:
1674 if extranodes is None:
1672 # can we go through the fast path ?
1675 # can we go through the fast path ?
1673 heads.sort()
1676 heads.sort()
1674 allheads = self.heads()
1677 allheads = self.heads()
1675 allheads.sort()
1678 allheads.sort()
1676 if heads == allheads:
1679 if heads == allheads:
1677 return self._changegroup(msng_cl_lst, source)
1680 return self._changegroup(msng_cl_lst, source)
1678
1681
1679 # slow path
1682 # slow path
1680 self.hook('preoutgoing', throw=True, source=source)
1683 self.hook('preoutgoing', throw=True, source=source)
1681
1684
1682 self.changegroupinfo(msng_cl_lst, source)
1685 self.changegroupinfo(msng_cl_lst, source)
1683 # Some bases may turn out to be superfluous, and some heads may be
1686 # Some bases may turn out to be superfluous, and some heads may be
1684 # too. nodesbetween will return the minimal set of bases and heads
1687 # too. nodesbetween will return the minimal set of bases and heads
1685 # necessary to re-create the changegroup.
1688 # necessary to re-create the changegroup.
1686
1689
1687 # Known heads are the list of heads that it is assumed the recipient
1690 # Known heads are the list of heads that it is assumed the recipient
1688 # of this changegroup will know about.
1691 # of this changegroup will know about.
1689 knownheads = set()
1692 knownheads = set()
1690 # We assume that all parents of bases are known heads.
1693 # We assume that all parents of bases are known heads.
1691 for n in bases:
1694 for n in bases:
1692 knownheads.update(cl.parents(n))
1695 knownheads.update(cl.parents(n))
1693 knownheads.discard(nullid)
1696 knownheads.discard(nullid)
1694 knownheads = list(knownheads)
1697 knownheads = list(knownheads)
1695 if knownheads:
1698 if knownheads:
1696 # Now that we know what heads are known, we can compute which
1699 # Now that we know what heads are known, we can compute which
1697 # changesets are known. The recipient must know about all
1700 # changesets are known. The recipient must know about all
1698 # changesets required to reach the known heads from the null
1701 # changesets required to reach the known heads from the null
1699 # changeset.
1702 # changeset.
1700 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1703 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1701 junk = None
1704 junk = None
1702 # Transform the list into a set.
1705 # Transform the list into a set.
1703 has_cl_set = set(has_cl_set)
1706 has_cl_set = set(has_cl_set)
1704 else:
1707 else:
1705 # If there were no known heads, the recipient cannot be assumed to
1708 # If there were no known heads, the recipient cannot be assumed to
1706 # know about any changesets.
1709 # know about any changesets.
1707 has_cl_set = set()
1710 has_cl_set = set()
1708
1711
1709 # Make it easy to refer to self.manifest
1712 # Make it easy to refer to self.manifest
1710 mnfst = self.manifest
1713 mnfst = self.manifest
1711 # We don't know which manifests are missing yet
1714 # We don't know which manifests are missing yet
1712 msng_mnfst_set = {}
1715 msng_mnfst_set = {}
1713 # Nor do we know which filenodes are missing.
1716 # Nor do we know which filenodes are missing.
1714 msng_filenode_set = {}
1717 msng_filenode_set = {}
1715
1718
1716 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1719 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1717 junk = None
1720 junk = None
1718
1721
1719 # A changeset always belongs to itself, so the changenode lookup
1722 # A changeset always belongs to itself, so the changenode lookup
1720 # function for a changenode is identity.
1723 # function for a changenode is identity.
1721 def identity(x):
1724 def identity(x):
1722 return x
1725 return x
1723
1726
1724 # If we determine that a particular file or manifest node must be a
1727 # If we determine that a particular file or manifest node must be a
1725 # node that the recipient of the changegroup will already have, we can
1728 # node that the recipient of the changegroup will already have, we can
1726 # also assume the recipient will have all the parents. This function
1729 # also assume the recipient will have all the parents. This function
1727 # prunes them from the set of missing nodes.
1730 # prunes them from the set of missing nodes.
1728 def prune_parents(revlog, hasset, msngset):
1731 def prune_parents(revlog, hasset, msngset):
1729 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1732 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1730 msngset.pop(revlog.node(r), None)
1733 msngset.pop(revlog.node(r), None)
1731
1734
1732 # Use the information collected in collect_manifests_and_files to say
1735 # Use the information collected in collect_manifests_and_files to say
1733 # which changenode any manifestnode belongs to.
1736 # which changenode any manifestnode belongs to.
1734 def lookup_manifest_link(mnfstnode):
1737 def lookup_manifest_link(mnfstnode):
1735 return msng_mnfst_set[mnfstnode]
1738 return msng_mnfst_set[mnfstnode]
1736
1739
1737 # A function generating function that sets up the initial environment
1740 # A function generating function that sets up the initial environment
1738 # the inner function.
1741 # the inner function.
1739 def filenode_collector(changedfiles):
1742 def filenode_collector(changedfiles):
1740 # This gathers information from each manifestnode included in the
1743 # This gathers information from each manifestnode included in the
1741 # changegroup about which filenodes the manifest node references
1744 # changegroup about which filenodes the manifest node references
1742 # so we can include those in the changegroup too.
1745 # so we can include those in the changegroup too.
1743 #
1746 #
1744 # It also remembers which changenode each filenode belongs to. It
1747 # It also remembers which changenode each filenode belongs to. It
1745 # does this by assuming the a filenode belongs to the changenode
1748 # does this by assuming the a filenode belongs to the changenode
1746 # the first manifest that references it belongs to.
1749 # the first manifest that references it belongs to.
1747 def collect_msng_filenodes(mnfstnode):
1750 def collect_msng_filenodes(mnfstnode):
1748 r = mnfst.rev(mnfstnode)
1751 r = mnfst.rev(mnfstnode)
1749 if r - 1 in mnfst.parentrevs(r):
1752 if r - 1 in mnfst.parentrevs(r):
1750 # If the previous rev is one of the parents,
1753 # If the previous rev is one of the parents,
1751 # we only need to see a diff.
1754 # we only need to see a diff.
1752 deltamf = mnfst.readdelta(mnfstnode)
1755 deltamf = mnfst.readdelta(mnfstnode)
1753 # For each line in the delta
1756 # For each line in the delta
1754 for f, fnode in deltamf.iteritems():
1757 for f, fnode in deltamf.iteritems():
1755 f = changedfiles.get(f, None)
1758 f = changedfiles.get(f, None)
1756 # And if the file is in the list of files we care
1759 # And if the file is in the list of files we care
1757 # about.
1760 # about.
1758 if f is not None:
1761 if f is not None:
1759 # Get the changenode this manifest belongs to
1762 # Get the changenode this manifest belongs to
1760 clnode = msng_mnfst_set[mnfstnode]
1763 clnode = msng_mnfst_set[mnfstnode]
1761 # Create the set of filenodes for the file if
1764 # Create the set of filenodes for the file if
1762 # there isn't one already.
1765 # there isn't one already.
1763 ndset = msng_filenode_set.setdefault(f, {})
1766 ndset = msng_filenode_set.setdefault(f, {})
1764 # And set the filenode's changelog node to the
1767 # And set the filenode's changelog node to the
1765 # manifest's if it hasn't been set already.
1768 # manifest's if it hasn't been set already.
1766 ndset.setdefault(fnode, clnode)
1769 ndset.setdefault(fnode, clnode)
1767 else:
1770 else:
1768 # Otherwise we need a full manifest.
1771 # Otherwise we need a full manifest.
1769 m = mnfst.read(mnfstnode)
1772 m = mnfst.read(mnfstnode)
1770 # For every file in we care about.
1773 # For every file in we care about.
1771 for f in changedfiles:
1774 for f in changedfiles:
1772 fnode = m.get(f, None)
1775 fnode = m.get(f, None)
1773 # If it's in the manifest
1776 # If it's in the manifest
1774 if fnode is not None:
1777 if fnode is not None:
1775 # See comments above.
1778 # See comments above.
1776 clnode = msng_mnfst_set[mnfstnode]
1779 clnode = msng_mnfst_set[mnfstnode]
1777 ndset = msng_filenode_set.setdefault(f, {})
1780 ndset = msng_filenode_set.setdefault(f, {})
1778 ndset.setdefault(fnode, clnode)
1781 ndset.setdefault(fnode, clnode)
1779 return collect_msng_filenodes
1782 return collect_msng_filenodes
1780
1783
1781 # We have a list of filenodes we think we need for a file, lets remove
1784 # We have a list of filenodes we think we need for a file, lets remove
1782 # all those we know the recipient must have.
1785 # all those we know the recipient must have.
1783 def prune_filenodes(f, filerevlog):
1786 def prune_filenodes(f, filerevlog):
1784 msngset = msng_filenode_set[f]
1787 msngset = msng_filenode_set[f]
1785 hasset = set()
1788 hasset = set()
1786 # If a 'missing' filenode thinks it belongs to a changenode we
1789 # If a 'missing' filenode thinks it belongs to a changenode we
1787 # assume the recipient must have, then the recipient must have
1790 # assume the recipient must have, then the recipient must have
1788 # that filenode.
1791 # that filenode.
1789 for n in msngset:
1792 for n in msngset:
1790 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1793 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1791 if clnode in has_cl_set:
1794 if clnode in has_cl_set:
1792 hasset.add(n)
1795 hasset.add(n)
1793 prune_parents(filerevlog, hasset, msngset)
1796 prune_parents(filerevlog, hasset, msngset)
1794
1797
1795 # A function generator function that sets up the a context for the
1798 # A function generator function that sets up the a context for the
1796 # inner function.
1799 # inner function.
1797 def lookup_filenode_link_func(fname):
1800 def lookup_filenode_link_func(fname):
1798 msngset = msng_filenode_set[fname]
1801 msngset = msng_filenode_set[fname]
1799 # Lookup the changenode the filenode belongs to.
1802 # Lookup the changenode the filenode belongs to.
1800 def lookup_filenode_link(fnode):
1803 def lookup_filenode_link(fnode):
1801 return msngset[fnode]
1804 return msngset[fnode]
1802 return lookup_filenode_link
1805 return lookup_filenode_link
1803
1806
1804 # Add the nodes that were explicitly requested.
1807 # Add the nodes that were explicitly requested.
1805 def add_extra_nodes(name, nodes):
1808 def add_extra_nodes(name, nodes):
1806 if not extranodes or name not in extranodes:
1809 if not extranodes or name not in extranodes:
1807 return
1810 return
1808
1811
1809 for node, linknode in extranodes[name]:
1812 for node, linknode in extranodes[name]:
1810 if node not in nodes:
1813 if node not in nodes:
1811 nodes[node] = linknode
1814 nodes[node] = linknode
1812
1815
1813 # Now that we have all theses utility functions to help out and
1816 # Now that we have all theses utility functions to help out and
1814 # logically divide up the task, generate the group.
1817 # logically divide up the task, generate the group.
1815 def gengroup():
1818 def gengroup():
1816 # The set of changed files starts empty.
1819 # The set of changed files starts empty.
1817 changedfiles = {}
1820 changedfiles = {}
1818 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1821 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1819
1822
1820 # Create a changenode group generator that will call our functions
1823 # Create a changenode group generator that will call our functions
1821 # back to lookup the owning changenode and collect information.
1824 # back to lookup the owning changenode and collect information.
1822 group = cl.group(msng_cl_lst, identity, collect)
1825 group = cl.group(msng_cl_lst, identity, collect)
1823 cnt = 0
1826 cnt = 0
1824 for chnk in group:
1827 for chnk in group:
1825 yield chnk
1828 yield chnk
1826 self.ui.progress(_('bundle changes'), cnt, unit=_('chunks'))
1829 self.ui.progress(_('bundle changes'), cnt, unit=_('chunks'))
1827 cnt += 1
1830 cnt += 1
1828 self.ui.progress(_('bundle changes'), None, unit=_('chunks'))
1831 self.ui.progress(_('bundle changes'), None, unit=_('chunks'))
1829
1832
1830
1833
1831 # Figure out which manifest nodes (of the ones we think might be
1834 # Figure out which manifest nodes (of the ones we think might be
1832 # part of the changegroup) the recipient must know about and
1835 # part of the changegroup) the recipient must know about and
1833 # remove them from the changegroup.
1836 # remove them from the changegroup.
1834 has_mnfst_set = set()
1837 has_mnfst_set = set()
1835 for n in msng_mnfst_set:
1838 for n in msng_mnfst_set:
1836 # If a 'missing' manifest thinks it belongs to a changenode
1839 # If a 'missing' manifest thinks it belongs to a changenode
1837 # the recipient is assumed to have, obviously the recipient
1840 # the recipient is assumed to have, obviously the recipient
1838 # must have that manifest.
1841 # must have that manifest.
1839 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1842 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1840 if linknode in has_cl_set:
1843 if linknode in has_cl_set:
1841 has_mnfst_set.add(n)
1844 has_mnfst_set.add(n)
1842 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1845 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1843 add_extra_nodes(1, msng_mnfst_set)
1846 add_extra_nodes(1, msng_mnfst_set)
1844 msng_mnfst_lst = msng_mnfst_set.keys()
1847 msng_mnfst_lst = msng_mnfst_set.keys()
1845 # Sort the manifestnodes by revision number.
1848 # Sort the manifestnodes by revision number.
1846 msng_mnfst_lst.sort(key=mnfst.rev)
1849 msng_mnfst_lst.sort(key=mnfst.rev)
1847 # Create a generator for the manifestnodes that calls our lookup
1850 # Create a generator for the manifestnodes that calls our lookup
1848 # and data collection functions back.
1851 # and data collection functions back.
1849 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1852 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1850 filenode_collector(changedfiles))
1853 filenode_collector(changedfiles))
1851 cnt = 0
1854 cnt = 0
1852 for chnk in group:
1855 for chnk in group:
1853 yield chnk
1856 yield chnk
1854 self.ui.progress(_('bundle manifests'), cnt, unit=_('chunks'))
1857 self.ui.progress(_('bundle manifests'), cnt, unit=_('chunks'))
1855 cnt += 1
1858 cnt += 1
1856 self.ui.progress(_('bundle manifests'), None, unit=_('chunks'))
1859 self.ui.progress(_('bundle manifests'), None, unit=_('chunks'))
1857
1860
1858 # These are no longer needed, dereference and toss the memory for
1861 # These are no longer needed, dereference and toss the memory for
1859 # them.
1862 # them.
1860 msng_mnfst_lst = None
1863 msng_mnfst_lst = None
1861 msng_mnfst_set.clear()
1864 msng_mnfst_set.clear()
1862
1865
1863 if extranodes:
1866 if extranodes:
1864 for fname in extranodes:
1867 for fname in extranodes:
1865 if isinstance(fname, int):
1868 if isinstance(fname, int):
1866 continue
1869 continue
1867 msng_filenode_set.setdefault(fname, {})
1870 msng_filenode_set.setdefault(fname, {})
1868 changedfiles[fname] = 1
1871 changedfiles[fname] = 1
1869 # Go through all our files in order sorted by name.
1872 # Go through all our files in order sorted by name.
1870 cnt = 0
1873 cnt = 0
1871 for fname in sorted(changedfiles):
1874 for fname in sorted(changedfiles):
1872 filerevlog = self.file(fname)
1875 filerevlog = self.file(fname)
1873 if not len(filerevlog):
1876 if not len(filerevlog):
1874 raise util.Abort(_("empty or missing revlog for %s") % fname)
1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1875 # Toss out the filenodes that the recipient isn't really
1878 # Toss out the filenodes that the recipient isn't really
1876 # missing.
1879 # missing.
1877 if fname in msng_filenode_set:
1880 if fname in msng_filenode_set:
1878 prune_filenodes(fname, filerevlog)
1881 prune_filenodes(fname, filerevlog)
1879 add_extra_nodes(fname, msng_filenode_set[fname])
1882 add_extra_nodes(fname, msng_filenode_set[fname])
1880 msng_filenode_lst = msng_filenode_set[fname].keys()
1883 msng_filenode_lst = msng_filenode_set[fname].keys()
1881 else:
1884 else:
1882 msng_filenode_lst = []
1885 msng_filenode_lst = []
1883 # If any filenodes are left, generate the group for them,
1886 # If any filenodes are left, generate the group for them,
1884 # otherwise don't bother.
1887 # otherwise don't bother.
1885 if len(msng_filenode_lst) > 0:
1888 if len(msng_filenode_lst) > 0:
1886 yield changegroup.chunkheader(len(fname))
1889 yield changegroup.chunkheader(len(fname))
1887 yield fname
1890 yield fname
1888 # Sort the filenodes by their revision #
1891 # Sort the filenodes by their revision #
1889 msng_filenode_lst.sort(key=filerevlog.rev)
1892 msng_filenode_lst.sort(key=filerevlog.rev)
1890 # Create a group generator and only pass in a changenode
1893 # Create a group generator and only pass in a changenode
1891 # lookup function as we need to collect no information
1894 # lookup function as we need to collect no information
1892 # from filenodes.
1895 # from filenodes.
1893 group = filerevlog.group(msng_filenode_lst,
1896 group = filerevlog.group(msng_filenode_lst,
1894 lookup_filenode_link_func(fname))
1897 lookup_filenode_link_func(fname))
1895 for chnk in group:
1898 for chnk in group:
1896 self.ui.progress(
1899 self.ui.progress(
1897 _('bundle files'), cnt, item=fname, unit=_('chunks'))
1900 _('bundle files'), cnt, item=fname, unit=_('chunks'))
1898 cnt += 1
1901 cnt += 1
1899 yield chnk
1902 yield chnk
1900 if fname in msng_filenode_set:
1903 if fname in msng_filenode_set:
1901 # Don't need this anymore, toss it to free memory.
1904 # Don't need this anymore, toss it to free memory.
1902 del msng_filenode_set[fname]
1905 del msng_filenode_set[fname]
1903 # Signal that no more groups are left.
1906 # Signal that no more groups are left.
1904 yield changegroup.closechunk()
1907 yield changegroup.closechunk()
1905 self.ui.progress(_('bundle files'), None, unit=_('chunks'))
1908 self.ui.progress(_('bundle files'), None, unit=_('chunks'))
1906
1909
1907 if msng_cl_lst:
1910 if msng_cl_lst:
1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1911 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1909
1912
1910 return util.chunkbuffer(gengroup())
1913 return util.chunkbuffer(gengroup())
1911
1914
1912 def changegroup(self, basenodes, source):
1915 def changegroup(self, basenodes, source):
1913 # to avoid a race we use changegroupsubset() (issue1320)
1916 # to avoid a race we use changegroupsubset() (issue1320)
1914 return self.changegroupsubset(basenodes, self.heads(), source)
1917 return self.changegroupsubset(basenodes, self.heads(), source)
1915
1918
1916 def _changegroup(self, nodes, source):
1919 def _changegroup(self, nodes, source):
1917 """Compute the changegroup of all nodes that we have that a recipient
1920 """Compute the changegroup of all nodes that we have that a recipient
1918 doesn't. Return a chunkbuffer object whose read() method will return
1921 doesn't. Return a chunkbuffer object whose read() method will return
1919 successive changegroup chunks.
1922 successive changegroup chunks.
1920
1923
1921 This is much easier than the previous function as we can assume that
1924 This is much easier than the previous function as we can assume that
1922 the recipient has any changenode we aren't sending them.
1925 the recipient has any changenode we aren't sending them.
1923
1926
1924 nodes is the set of nodes to send"""
1927 nodes is the set of nodes to send"""
1925
1928
1926 self.hook('preoutgoing', throw=True, source=source)
1929 self.hook('preoutgoing', throw=True, source=source)
1927
1930
1928 cl = self.changelog
1931 cl = self.changelog
1929 revset = set([cl.rev(n) for n in nodes])
1932 revset = set([cl.rev(n) for n in nodes])
1930 self.changegroupinfo(nodes, source)
1933 self.changegroupinfo(nodes, source)
1931
1934
1932 def identity(x):
1935 def identity(x):
1933 return x
1936 return x
1934
1937
1935 def gennodelst(log):
1938 def gennodelst(log):
1936 for r in log:
1939 for r in log:
1937 if log.linkrev(r) in revset:
1940 if log.linkrev(r) in revset:
1938 yield log.node(r)
1941 yield log.node(r)
1939
1942
1940 def lookuprevlink_func(revlog):
1943 def lookuprevlink_func(revlog):
1941 def lookuprevlink(n):
1944 def lookuprevlink(n):
1942 return cl.node(revlog.linkrev(revlog.rev(n)))
1945 return cl.node(revlog.linkrev(revlog.rev(n)))
1943 return lookuprevlink
1946 return lookuprevlink
1944
1947
1945 def gengroup():
1948 def gengroup():
1946 '''yield a sequence of changegroup chunks (strings)'''
1949 '''yield a sequence of changegroup chunks (strings)'''
1947 # construct a list of all changed files
1950 # construct a list of all changed files
1948 changedfiles = {}
1951 changedfiles = {}
1949 mmfs = {}
1952 mmfs = {}
1950 collect = changegroup.collector(cl, mmfs, changedfiles)
1953 collect = changegroup.collector(cl, mmfs, changedfiles)
1951
1954
1952 cnt = 0
1955 cnt = 0
1953 for chnk in cl.group(nodes, identity, collect):
1956 for chnk in cl.group(nodes, identity, collect):
1954 self.ui.progress(_('bundle changes'), cnt, unit=_('chunks'))
1957 self.ui.progress(_('bundle changes'), cnt, unit=_('chunks'))
1955 cnt += 1
1958 cnt += 1
1956 yield chnk
1959 yield chnk
1957 self.ui.progress(_('bundle changes'), None, unit=_('chunks'))
1960 self.ui.progress(_('bundle changes'), None, unit=_('chunks'))
1958
1961
1959 mnfst = self.manifest
1962 mnfst = self.manifest
1960 nodeiter = gennodelst(mnfst)
1963 nodeiter = gennodelst(mnfst)
1961 cnt = 0
1964 cnt = 0
1962 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1965 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1963 self.ui.progress(_('bundle manifests'), cnt, unit=_('chunks'))
1966 self.ui.progress(_('bundle manifests'), cnt, unit=_('chunks'))
1964 cnt += 1
1967 cnt += 1
1965 yield chnk
1968 yield chnk
1966 self.ui.progress(_('bundle manifests'), None, unit=_('chunks'))
1969 self.ui.progress(_('bundle manifests'), None, unit=_('chunks'))
1967
1970
1968 cnt = 0
1971 cnt = 0
1969 for fname in sorted(changedfiles):
1972 for fname in sorted(changedfiles):
1970 filerevlog = self.file(fname)
1973 filerevlog = self.file(fname)
1971 if not len(filerevlog):
1974 if not len(filerevlog):
1972 raise util.Abort(_("empty or missing revlog for %s") % fname)
1975 raise util.Abort(_("empty or missing revlog for %s") % fname)
1973 nodeiter = gennodelst(filerevlog)
1976 nodeiter = gennodelst(filerevlog)
1974 nodeiter = list(nodeiter)
1977 nodeiter = list(nodeiter)
1975 if nodeiter:
1978 if nodeiter:
1976 yield changegroup.chunkheader(len(fname))
1979 yield changegroup.chunkheader(len(fname))
1977 yield fname
1980 yield fname
1978 lookup = lookuprevlink_func(filerevlog)
1981 lookup = lookuprevlink_func(filerevlog)
1979 for chnk in filerevlog.group(nodeiter, lookup):
1982 for chnk in filerevlog.group(nodeiter, lookup):
1980 self.ui.progress(
1983 self.ui.progress(
1981 _('bundle files'), cnt, item=fname, unit=_('chunks'))
1984 _('bundle files'), cnt, item=fname, unit=_('chunks'))
1982 cnt += 1
1985 cnt += 1
1983 yield chnk
1986 yield chnk
1984 self.ui.progress(_('bundle files'), None, unit=_('chunks'))
1987 self.ui.progress(_('bundle files'), None, unit=_('chunks'))
1985
1988
1986 yield changegroup.closechunk()
1989 yield changegroup.closechunk()
1987
1990
1988 if nodes:
1991 if nodes:
1989 self.hook('outgoing', node=hex(nodes[0]), source=source)
1992 self.hook('outgoing', node=hex(nodes[0]), source=source)
1990
1993
1991 return util.chunkbuffer(gengroup())
1994 return util.chunkbuffer(gengroup())
1992
1995
1993 def addchangegroup(self, source, srctype, url, emptyok=False):
1996 def addchangegroup(self, source, srctype, url, emptyok=False):
1994 """add changegroup to repo.
1997 """add changegroup to repo.
1995
1998
1996 return values:
1999 return values:
1997 - nothing changed or no source: 0
2000 - nothing changed or no source: 0
1998 - more heads than before: 1+added heads (2..n)
2001 - more heads than before: 1+added heads (2..n)
1999 - less heads than before: -1-removed heads (-2..-n)
2002 - less heads than before: -1-removed heads (-2..-n)
2000 - number of heads stays the same: 1
2003 - number of heads stays the same: 1
2001 """
2004 """
2002 def csmap(x):
2005 def csmap(x):
2003 self.ui.debug("add changeset %s\n" % short(x))
2006 self.ui.debug("add changeset %s\n" % short(x))
2004 return len(cl)
2007 return len(cl)
2005
2008
2006 def revmap(x):
2009 def revmap(x):
2007 return cl.rev(x)
2010 return cl.rev(x)
2008
2011
2009 if not source:
2012 if not source:
2010 return 0
2013 return 0
2011
2014
2012 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2015 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2013
2016
2014 changesets = files = revisions = 0
2017 changesets = files = revisions = 0
2015
2018
2016 # write changelog data to temp files so concurrent readers will not see
2019 # write changelog data to temp files so concurrent readers will not see
2017 # inconsistent view
2020 # inconsistent view
2018 cl = self.changelog
2021 cl = self.changelog
2019 cl.delayupdate()
2022 cl.delayupdate()
2020 oldheads = len(cl.heads())
2023 oldheads = len(cl.heads())
2021
2024
2022 tr = self.transaction()
2025 tr = self.transaction()
2023 try:
2026 try:
2024 trp = weakref.proxy(tr)
2027 trp = weakref.proxy(tr)
2025 # pull off the changeset group
2028 # pull off the changeset group
2026 self.ui.status(_("adding changesets\n"))
2029 self.ui.status(_("adding changesets\n"))
2027 clstart = len(cl)
2030 clstart = len(cl)
2028 class prog(object):
2031 class prog(object):
2029 step = _('changesets')
2032 step = _('changesets')
2030 count = 1
2033 count = 1
2031 ui = self.ui
2034 ui = self.ui
2032 def __call__(self):
2035 def __call__(self):
2033 self.ui.progress(self.step, self.count, unit=_('chunks'))
2036 self.ui.progress(self.step, self.count, unit=_('chunks'))
2034 self.count += 1
2037 self.count += 1
2035 pr = prog()
2038 pr = prog()
2036 chunkiter = changegroup.chunkiter(source, progress=pr)
2039 chunkiter = changegroup.chunkiter(source, progress=pr)
2037 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2040 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2038 raise util.Abort(_("received changelog group is empty"))
2041 raise util.Abort(_("received changelog group is empty"))
2039 clend = len(cl)
2042 clend = len(cl)
2040 changesets = clend - clstart
2043 changesets = clend - clstart
2041 self.ui.progress(_('changesets'), None)
2044 self.ui.progress(_('changesets'), None)
2042
2045
2043 # pull off the manifest group
2046 # pull off the manifest group
2044 self.ui.status(_("adding manifests\n"))
2047 self.ui.status(_("adding manifests\n"))
2045 pr.step = _('manifests')
2048 pr.step = _('manifests')
2046 pr.count = 1
2049 pr.count = 1
2047 chunkiter = changegroup.chunkiter(source, progress=pr)
2050 chunkiter = changegroup.chunkiter(source, progress=pr)
2048 # no need to check for empty manifest group here:
2051 # no need to check for empty manifest group here:
2049 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2052 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2050 # no new manifest will be created and the manifest group will
2053 # no new manifest will be created and the manifest group will
2051 # be empty during the pull
2054 # be empty during the pull
2052 self.manifest.addgroup(chunkiter, revmap, trp)
2055 self.manifest.addgroup(chunkiter, revmap, trp)
2053 self.ui.progress(_('manifests'), None)
2056 self.ui.progress(_('manifests'), None)
2054
2057
2055 needfiles = {}
2058 needfiles = {}
2056 if self.ui.configbool('server', 'validate', default=False):
2059 if self.ui.configbool('server', 'validate', default=False):
2057 # validate incoming csets have their manifests
2060 # validate incoming csets have their manifests
2058 for cset in xrange(clstart, clend):
2061 for cset in xrange(clstart, clend):
2059 mfest = self.changelog.read(self.changelog.node(cset))[0]
2062 mfest = self.changelog.read(self.changelog.node(cset))[0]
2060 mfest = self.manifest.readdelta(mfest)
2063 mfest = self.manifest.readdelta(mfest)
2061 # store file nodes we must see
2064 # store file nodes we must see
2062 for f, n in mfest.iteritems():
2065 for f, n in mfest.iteritems():
2063 needfiles.setdefault(f, set()).add(n)
2066 needfiles.setdefault(f, set()).add(n)
2064
2067
2065 # process the files
2068 # process the files
2066 self.ui.status(_("adding file changes\n"))
2069 self.ui.status(_("adding file changes\n"))
2067 pr.step = 'files'
2070 pr.step = 'files'
2068 pr.count = 1
2071 pr.count = 1
2069 while 1:
2072 while 1:
2070 f = changegroup.getchunk(source)
2073 f = changegroup.getchunk(source)
2071 if not f:
2074 if not f:
2072 break
2075 break
2073 self.ui.debug("adding %s revisions\n" % f)
2076 self.ui.debug("adding %s revisions\n" % f)
2074 fl = self.file(f)
2077 fl = self.file(f)
2075 o = len(fl)
2078 o = len(fl)
2076 chunkiter = changegroup.chunkiter(source, progress=pr)
2079 chunkiter = changegroup.chunkiter(source, progress=pr)
2077 if fl.addgroup(chunkiter, revmap, trp) is None:
2080 if fl.addgroup(chunkiter, revmap, trp) is None:
2078 raise util.Abort(_("received file revlog group is empty"))
2081 raise util.Abort(_("received file revlog group is empty"))
2079 revisions += len(fl) - o
2082 revisions += len(fl) - o
2080 files += 1
2083 files += 1
2081 if f in needfiles:
2084 if f in needfiles:
2082 needs = needfiles[f]
2085 needs = needfiles[f]
2083 for new in xrange(o, len(fl)):
2086 for new in xrange(o, len(fl)):
2084 n = fl.node(new)
2087 n = fl.node(new)
2085 if n in needs:
2088 if n in needs:
2086 needs.remove(n)
2089 needs.remove(n)
2087 if not needs:
2090 if not needs:
2088 del needfiles[f]
2091 del needfiles[f]
2089 self.ui.progress(_('files'), None)
2092 self.ui.progress(_('files'), None)
2090
2093
2091 for f, needs in needfiles.iteritems():
2094 for f, needs in needfiles.iteritems():
2092 fl = self.file(f)
2095 fl = self.file(f)
2093 for n in needs:
2096 for n in needs:
2094 try:
2097 try:
2095 fl.rev(n)
2098 fl.rev(n)
2096 except error.LookupError:
2099 except error.LookupError:
2097 raise util.Abort(
2100 raise util.Abort(
2098 _('missing file data for %s:%s - run hg verify') %
2101 _('missing file data for %s:%s - run hg verify') %
2099 (f, hex(n)))
2102 (f, hex(n)))
2100
2103
2101 newheads = len(cl.heads())
2104 newheads = len(cl.heads())
2102 heads = ""
2105 heads = ""
2103 if oldheads and newheads != oldheads:
2106 if oldheads and newheads != oldheads:
2104 heads = _(" (%+d heads)") % (newheads - oldheads)
2107 heads = _(" (%+d heads)") % (newheads - oldheads)
2105
2108
2106 self.ui.status(_("added %d changesets"
2109 self.ui.status(_("added %d changesets"
2107 " with %d changes to %d files%s\n")
2110 " with %d changes to %d files%s\n")
2108 % (changesets, revisions, files, heads))
2111 % (changesets, revisions, files, heads))
2109
2112
2110 if changesets > 0:
2113 if changesets > 0:
2111 p = lambda: cl.writepending() and self.root or ""
2114 p = lambda: cl.writepending() and self.root or ""
2112 self.hook('pretxnchangegroup', throw=True,
2115 self.hook('pretxnchangegroup', throw=True,
2113 node=hex(cl.node(clstart)), source=srctype,
2116 node=hex(cl.node(clstart)), source=srctype,
2114 url=url, pending=p)
2117 url=url, pending=p)
2115
2118
2116 # make changelog see real files again
2119 # make changelog see real files again
2117 cl.finalize(trp)
2120 cl.finalize(trp)
2118
2121
2119 tr.close()
2122 tr.close()
2120 finally:
2123 finally:
2121 del tr
2124 del tr
2122
2125
2123 if changesets > 0:
2126 if changesets > 0:
2124 # forcefully update the on-disk branch cache
2127 # forcefully update the on-disk branch cache
2125 self.ui.debug("updating the branch cache\n")
2128 self.ui.debug("updating the branch cache\n")
2126 self.branchtags()
2129 self.branchtags()
2127 self.hook("changegroup", node=hex(cl.node(clstart)),
2130 self.hook("changegroup", node=hex(cl.node(clstart)),
2128 source=srctype, url=url)
2131 source=srctype, url=url)
2129
2132
2130 for i in xrange(clstart, clend):
2133 for i in xrange(clstart, clend):
2131 self.hook("incoming", node=hex(cl.node(i)),
2134 self.hook("incoming", node=hex(cl.node(i)),
2132 source=srctype, url=url)
2135 source=srctype, url=url)
2133
2136
2134 # never return 0 here:
2137 # never return 0 here:
2135 if newheads < oldheads:
2138 if newheads < oldheads:
2136 return newheads - oldheads - 1
2139 return newheads - oldheads - 1
2137 else:
2140 else:
2138 return newheads - oldheads + 1
2141 return newheads - oldheads + 1
2139
2142
2140
2143
2141 def stream_in(self, remote):
2144 def stream_in(self, remote):
2142 fp = remote.stream_out()
2145 fp = remote.stream_out()
2143 l = fp.readline()
2146 l = fp.readline()
2144 try:
2147 try:
2145 resp = int(l)
2148 resp = int(l)
2146 except ValueError:
2149 except ValueError:
2147 raise error.ResponseError(
2150 raise error.ResponseError(
2148 _('Unexpected response from remote server:'), l)
2151 _('Unexpected response from remote server:'), l)
2149 if resp == 1:
2152 if resp == 1:
2150 raise util.Abort(_('operation forbidden by server'))
2153 raise util.Abort(_('operation forbidden by server'))
2151 elif resp == 2:
2154 elif resp == 2:
2152 raise util.Abort(_('locking the remote repository failed'))
2155 raise util.Abort(_('locking the remote repository failed'))
2153 elif resp != 0:
2156 elif resp != 0:
2154 raise util.Abort(_('the server sent an unknown error code'))
2157 raise util.Abort(_('the server sent an unknown error code'))
2155 self.ui.status(_('streaming all changes\n'))
2158 self.ui.status(_('streaming all changes\n'))
2156 l = fp.readline()
2159 l = fp.readline()
2157 try:
2160 try:
2158 total_files, total_bytes = map(int, l.split(' ', 1))
2161 total_files, total_bytes = map(int, l.split(' ', 1))
2159 except (ValueError, TypeError):
2162 except (ValueError, TypeError):
2160 raise error.ResponseError(
2163 raise error.ResponseError(
2161 _('Unexpected response from remote server:'), l)
2164 _('Unexpected response from remote server:'), l)
2162 self.ui.status(_('%d files to transfer, %s of data\n') %
2165 self.ui.status(_('%d files to transfer, %s of data\n') %
2163 (total_files, util.bytecount(total_bytes)))
2166 (total_files, util.bytecount(total_bytes)))
2164 start = time.time()
2167 start = time.time()
2165 for i in xrange(total_files):
2168 for i in xrange(total_files):
2166 # XXX doesn't support '\n' or '\r' in filenames
2169 # XXX doesn't support '\n' or '\r' in filenames
2167 l = fp.readline()
2170 l = fp.readline()
2168 try:
2171 try:
2169 name, size = l.split('\0', 1)
2172 name, size = l.split('\0', 1)
2170 size = int(size)
2173 size = int(size)
2171 except (ValueError, TypeError):
2174 except (ValueError, TypeError):
2172 raise error.ResponseError(
2175 raise error.ResponseError(
2173 _('Unexpected response from remote server:'), l)
2176 _('Unexpected response from remote server:'), l)
2174 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2177 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2175 # for backwards compat, name was partially encoded
2178 # for backwards compat, name was partially encoded
2176 ofp = self.sopener(store.decodedir(name), 'w')
2179 ofp = self.sopener(store.decodedir(name), 'w')
2177 for chunk in util.filechunkiter(fp, limit=size):
2180 for chunk in util.filechunkiter(fp, limit=size):
2178 ofp.write(chunk)
2181 ofp.write(chunk)
2179 ofp.close()
2182 ofp.close()
2180 elapsed = time.time() - start
2183 elapsed = time.time() - start
2181 if elapsed <= 0:
2184 if elapsed <= 0:
2182 elapsed = 0.001
2185 elapsed = 0.001
2183 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2186 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2184 (util.bytecount(total_bytes), elapsed,
2187 (util.bytecount(total_bytes), elapsed,
2185 util.bytecount(total_bytes / elapsed)))
2188 util.bytecount(total_bytes / elapsed)))
2186 self.invalidate()
2189 self.invalidate()
2187 return len(self.heads()) + 1
2190 return len(self.heads()) + 1
2188
2191
2189 def clone(self, remote, heads=[], stream=False):
2192 def clone(self, remote, heads=[], stream=False):
2190 '''clone remote repository.
2193 '''clone remote repository.
2191
2194
2192 keyword arguments:
2195 keyword arguments:
2193 heads: list of revs to clone (forces use of pull)
2196 heads: list of revs to clone (forces use of pull)
2194 stream: use streaming clone if possible'''
2197 stream: use streaming clone if possible'''
2195
2198
2196 # now, all clients that can request uncompressed clones can
2199 # now, all clients that can request uncompressed clones can
2197 # read repo formats supported by all servers that can serve
2200 # read repo formats supported by all servers that can serve
2198 # them.
2201 # them.
2199
2202
2200 # if revlog format changes, client will have to check version
2203 # if revlog format changes, client will have to check version
2201 # and format flags on "stream" capability, and use
2204 # and format flags on "stream" capability, and use
2202 # uncompressed only if compatible.
2205 # uncompressed only if compatible.
2203
2206
2204 if stream and not heads and remote.capable('stream'):
2207 if stream and not heads and remote.capable('stream'):
2205 return self.stream_in(remote)
2208 return self.stream_in(remote)
2206 return self.pull(remote, heads)
2209 return self.pull(remote, heads)
2207
2210
2208 # used to avoid circular references so destructors work
2211 # used to avoid circular references so destructors work
2209 def aftertrans(files):
2212 def aftertrans(files):
2210 renamefiles = [tuple(t) for t in files]
2213 renamefiles = [tuple(t) for t in files]
2211 def a():
2214 def a():
2212 for src, dest in renamefiles:
2215 for src, dest in renamefiles:
2213 util.rename(src, dest)
2216 util.rename(src, dest)
2214 return a
2217 return a
2215
2218
2216 def instance(ui, path, create):
2219 def instance(ui, path, create):
2217 return localrepository(ui, util.drop_scheme('file', path), create)
2220 return localrepository(ui, util.drop_scheme('file', path), create)
2218
2221
2219 def islocal(path):
2222 def islocal(path):
2220 return True
2223 return True
General Comments 0
You need to be logged in to leave comments. Login now