##// END OF EJS Templates
discovery: simplify branchmap construction against legacy server...
Pierre-Yves David -
r17056:30853f4b default
parent child Browse files
Show More
@@ -1,268 +1,266 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import util, setdiscovery, treediscovery, phases
10 import util, setdiscovery, treediscovery, phases
11
11
12 def findcommonincoming(repo, remote, heads=None, force=False):
12 def findcommonincoming(repo, remote, heads=None, force=False):
13 """Return a tuple (common, anyincoming, heads) used to identify the common
13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 subset of nodes between repo and remote.
14 subset of nodes between repo and remote.
15
15
16 "common" is a list of (at least) the heads of the common subset.
16 "common" is a list of (at least) the heads of the common subset.
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 locally. If remote does not support getbundle, this actually is a list of
18 locally. If remote does not support getbundle, this actually is a list of
19 roots of the nodes that would be incoming, to be supplied to
19 roots of the nodes that would be incoming, to be supplied to
20 changegroupsubset. No code except for pull should be relying on this fact
20 changegroupsubset. No code except for pull should be relying on this fact
21 any longer.
21 any longer.
22 "heads" is either the supplied heads, or else the remote's heads.
22 "heads" is either the supplied heads, or else the remote's heads.
23
23
24 If you pass heads and they are all known locally, the reponse lists justs
24 If you pass heads and they are all known locally, the reponse lists justs
25 these heads in "common" and in "heads".
25 these heads in "common" and in "heads".
26
26
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 extensions a good hook into outgoing.
28 extensions a good hook into outgoing.
29 """
29 """
30
30
31 if not remote.capable('getbundle'):
31 if not remote.capable('getbundle'):
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33
33
34 if heads:
34 if heads:
35 allknown = True
35 allknown = True
36 nm = repo.changelog.nodemap
36 nm = repo.changelog.nodemap
37 for h in heads:
37 for h in heads:
38 if nm.get(h) is None:
38 if nm.get(h) is None:
39 allknown = False
39 allknown = False
40 break
40 break
41 if allknown:
41 if allknown:
42 return (heads, False, heads)
42 return (heads, False, heads)
43
43
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 abortwhenunrelated=not force)
45 abortwhenunrelated=not force)
46 common, anyinc, srvheads = res
46 common, anyinc, srvheads = res
47 return (list(common), anyinc, heads or list(srvheads))
47 return (list(common), anyinc, heads or list(srvheads))
48
48
49 class outgoing(object):
49 class outgoing(object):
50 '''Represents the set of nodes present in a local repo but not in a
50 '''Represents the set of nodes present in a local repo but not in a
51 (possibly) remote one.
51 (possibly) remote one.
52
52
53 Members:
53 Members:
54
54
55 missing is a list of all nodes present in local but not in remote.
55 missing is a list of all nodes present in local but not in remote.
56 common is a list of all nodes shared between the two repos.
56 common is a list of all nodes shared between the two repos.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 missingheads is the list of heads of missing.
58 missingheads is the list of heads of missing.
59 commonheads is the list of heads of common.
59 commonheads is the list of heads of common.
60
60
61 The sets are computed on demand from the heads, unless provided upfront
61 The sets are computed on demand from the heads, unless provided upfront
62 by discovery.'''
62 by discovery.'''
63
63
64 def __init__(self, revlog, commonheads, missingheads):
64 def __init__(self, revlog, commonheads, missingheads):
65 self.commonheads = commonheads
65 self.commonheads = commonheads
66 self.missingheads = missingheads
66 self.missingheads = missingheads
67 self._revlog = revlog
67 self._revlog = revlog
68 self._common = None
68 self._common = None
69 self._missing = None
69 self._missing = None
70 self.excluded = []
70 self.excluded = []
71
71
72 def _computecommonmissing(self):
72 def _computecommonmissing(self):
73 sets = self._revlog.findcommonmissing(self.commonheads,
73 sets = self._revlog.findcommonmissing(self.commonheads,
74 self.missingheads)
74 self.missingheads)
75 self._common, self._missing = sets
75 self._common, self._missing = sets
76
76
77 @util.propertycache
77 @util.propertycache
78 def common(self):
78 def common(self):
79 if self._common is None:
79 if self._common is None:
80 self._computecommonmissing()
80 self._computecommonmissing()
81 return self._common
81 return self._common
82
82
83 @util.propertycache
83 @util.propertycache
84 def missing(self):
84 def missing(self):
85 if self._missing is None:
85 if self._missing is None:
86 self._computecommonmissing()
86 self._computecommonmissing()
87 return self._missing
87 return self._missing
88
88
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 commoninc=None, portable=False):
90 commoninc=None, portable=False):
91 '''Return an outgoing instance to identify the nodes present in repo but
91 '''Return an outgoing instance to identify the nodes present in repo but
92 not in other.
92 not in other.
93
93
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 (inclusive) are included. If you already know the local repo's heads,
95 (inclusive) are included. If you already know the local repo's heads,
96 passing them in onlyheads is faster than letting them be recomputed here.
96 passing them in onlyheads is faster than letting them be recomputed here.
97
97
98 If commoninc is given, it must the the result of a prior call to
98 If commoninc is given, it must the the result of a prior call to
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100
100
101 If portable is given, compute more conservative common and missingheads,
101 If portable is given, compute more conservative common and missingheads,
102 to make bundles created from the instance more portable.'''
102 to make bundles created from the instance more portable.'''
103 # declare an empty outgoing object to be filled later
103 # declare an empty outgoing object to be filled later
104 og = outgoing(repo.changelog, None, None)
104 og = outgoing(repo.changelog, None, None)
105
105
106 # get common set if not provided
106 # get common set if not provided
107 if commoninc is None:
107 if commoninc is None:
108 commoninc = findcommonincoming(repo, other, force=force)
108 commoninc = findcommonincoming(repo, other, force=force)
109 og.commonheads, _any, _hds = commoninc
109 og.commonheads, _any, _hds = commoninc
110
110
111 # compute outgoing
111 # compute outgoing
112 if not repo._phasecache.phaseroots[phases.secret]:
112 if not repo._phasecache.phaseroots[phases.secret]:
113 og.missingheads = onlyheads or repo.heads()
113 og.missingheads = onlyheads or repo.heads()
114 elif onlyheads is None:
114 elif onlyheads is None:
115 # use visible heads as it should be cached
115 # use visible heads as it should be cached
116 og.missingheads = phases.visibleheads(repo)
116 og.missingheads = phases.visibleheads(repo)
117 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
117 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
118 else:
118 else:
119 # compute common, missing and exclude secret stuff
119 # compute common, missing and exclude secret stuff
120 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
120 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
121 og._common, allmissing = sets
121 og._common, allmissing = sets
122 og._missing = missing = []
122 og._missing = missing = []
123 og.excluded = excluded = []
123 og.excluded = excluded = []
124 for node in allmissing:
124 for node in allmissing:
125 if repo[node].phase() >= phases.secret:
125 if repo[node].phase() >= phases.secret:
126 excluded.append(node)
126 excluded.append(node)
127 else:
127 else:
128 missing.append(node)
128 missing.append(node)
129 if excluded:
129 if excluded:
130 # update missing heads
130 # update missing heads
131 missingheads = phases.newheads(repo, onlyheads, excluded)
131 missingheads = phases.newheads(repo, onlyheads, excluded)
132 else:
132 else:
133 missingheads = onlyheads
133 missingheads = onlyheads
134 og.missingheads = missingheads
134 og.missingheads = missingheads
135
135
136 if portable:
136 if portable:
137 # recompute common and missingheads as if -r<rev> had been given for
137 # recompute common and missingheads as if -r<rev> had been given for
138 # each head of missing, and --base <rev> for each head of the proper
138 # each head of missing, and --base <rev> for each head of the proper
139 # ancestors of missing
139 # ancestors of missing
140 og._computecommonmissing()
140 og._computecommonmissing()
141 cl = repo.changelog
141 cl = repo.changelog
142 missingrevs = set(cl.rev(n) for n in og._missing)
142 missingrevs = set(cl.rev(n) for n in og._missing)
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 commonheads = set(og.commonheads)
144 commonheads = set(og.commonheads)
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146
146
147 return og
147 return og
148
148
149 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
149 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
150 """Check that a push won't add any outgoing head
150 """Check that a push won't add any outgoing head
151
151
152 raise Abort error and display ui message as needed.
152 raise Abort error and display ui message as needed.
153 """
153 """
154 if remoteheads == [nullid]:
154 if remoteheads == [nullid]:
155 # remote is empty, nothing to check.
155 # remote is empty, nothing to check.
156 return
156 return
157
157
158 cl = repo.changelog
158 cl = repo.changelog
159 if remote.capable('branchmap'):
159 if remote.capable('branchmap'):
160 # Check for each named branch if we're creating new remote heads.
160 # Check for each named branch if we're creating new remote heads.
161 # To be a remote head after push, node must be either:
161 # To be a remote head after push, node must be either:
162 # - unknown locally
162 # - unknown locally
163 # - a local outgoing head descended from update
163 # - a local outgoing head descended from update
164 # - a remote head that's known locally and not
164 # - a remote head that's known locally and not
165 # ancestral to an outgoing head
165 # ancestral to an outgoing head
166
166
167 # 1. Create set of branches involved in the push.
167 # 1. Create set of branches involved in the push.
168 branches = set(repo[n].branch() for n in outgoing.missing)
168 branches = set(repo[n].branch() for n in outgoing.missing)
169
169
170 # 2. Check for new branches on the remote.
170 # 2. Check for new branches on the remote.
171 if remote.local():
171 if remote.local():
172 remotemap = phases.visiblebranchmap(remote)
172 remotemap = phases.visiblebranchmap(remote)
173 else:
173 else:
174 remotemap = remote.branchmap()
174 remotemap = remote.branchmap()
175 newbranches = branches - set(remotemap)
175 newbranches = branches - set(remotemap)
176 if newbranches and not newbranch: # new branch requires --new-branch
176 if newbranches and not newbranch: # new branch requires --new-branch
177 branchnames = ', '.join(sorted(newbranches))
177 branchnames = ', '.join(sorted(newbranches))
178 raise util.Abort(_("push creates new remote branches: %s!")
178 raise util.Abort(_("push creates new remote branches: %s!")
179 % branchnames,
179 % branchnames,
180 hint=_("use 'hg push --new-branch' to create"
180 hint=_("use 'hg push --new-branch' to create"
181 " new remote branches"))
181 " new remote branches"))
182 branches.difference_update(newbranches)
182 branches.difference_update(newbranches)
183
183
184 # 3. Construct the initial oldmap and newmap dicts.
184 # 3. Construct the initial oldmap and newmap dicts.
185 # They contain information about the remote heads before and
185 # They contain information about the remote heads before and
186 # after the push, respectively.
186 # after the push, respectively.
187 # Heads not found locally are not included in either dict,
187 # Heads not found locally are not included in either dict,
188 # since they won't be affected by the push.
188 # since they won't be affected by the push.
189 # unsynced contains all branches with incoming changesets.
189 # unsynced contains all branches with incoming changesets.
190 oldmap = {}
190 oldmap = {}
191 newmap = {}
191 newmap = {}
192 unsynced = set()
192 unsynced = set()
193 for branch in branches:
193 for branch in branches:
194 remotebrheads = remotemap[branch]
194 remotebrheads = remotemap[branch]
195 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
195 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
196 oldmap[branch] = prunedbrheads
196 oldmap[branch] = prunedbrheads
197 newmap[branch] = list(prunedbrheads)
197 newmap[branch] = list(prunedbrheads)
198 if len(remotebrheads) > len(prunedbrheads):
198 if len(remotebrheads) > len(prunedbrheads):
199 unsynced.add(branch)
199 unsynced.add(branch)
200
200
201 # 4. Update newmap with outgoing changes.
201 # 4. Update newmap with outgoing changes.
202 # This will possibly add new heads and remove existing ones.
202 # This will possibly add new heads and remove existing ones.
203 ctxgen = (repo[n] for n in outgoing.missing)
203 ctxgen = (repo[n] for n in outgoing.missing)
204 repo._updatebranchcache(newmap, ctxgen)
204 repo._updatebranchcache(newmap, ctxgen)
205
205
206 else:
206 else:
207 # 1-4b. old servers: Check for new topological heads.
207 # 1-4b. old servers: Check for new topological heads.
208 # Construct {old,new}map with branch = None (topological branch).
208 # Construct {old,new}map with branch = None (topological branch).
209 # (code based on _updatebranchcache)
209 # (code based on _updatebranchcache)
210 oldheadrevs = set(cl.rev(h) for h in remoteheads if h in cl.nodemap)
210 oldheads = set(h for h in remoteheads if h in cl.nodemap)
211 missingrevs = [cl.rev(node) for node in outgoing.missing]
211 # all nodes in outgoing.missing are children of either:
212 newheadrevs = oldheadrevs.union(missingrevs)
212 # - an element of oldheads
213 if len(newheadrevs) > 1:
213 # - another element of outgoing.missing
214 for latest in sorted(missingrevs, reverse=True):
214 # - nullrev
215 if latest not in newheadrevs:
215 # This explains why the new head are very simple to compute.
216 continue
216 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
217 reachable = cl.ancestors([latest], min(newheadrevs))
218 newheadrevs.difference_update(reachable)
219 branches = set([None])
217 branches = set([None])
220 newmap = {None: [cl.node(rev) for rev in newheadrevs]}
218 newmap = {None: list(c.node() for c in r)}
221 oldmap = {None: [cl.node(rev) for rev in oldheadrevs]}
219 oldmap = {None: oldheads}
222 unsynced = inc and branches or set()
220 unsynced = inc and branches or set()
223
221
224 # 5. Check for new heads.
222 # 5. Check for new heads.
225 # If there are more heads after the push than before, a suitable
223 # If there are more heads after the push than before, a suitable
226 # error message, depending on unsynced status, is displayed.
224 # error message, depending on unsynced status, is displayed.
227 error = None
225 error = None
228 localbookmarks = repo._bookmarks
226 localbookmarks = repo._bookmarks
229
227
230 for branch in branches:
228 for branch in branches:
231 newhs = set(newmap[branch])
229 newhs = set(newmap[branch])
232 oldhs = set(oldmap[branch])
230 oldhs = set(oldmap[branch])
233 dhs = None
231 dhs = None
234 if len(newhs) > len(oldhs):
232 if len(newhs) > len(oldhs):
235 # strip updates to existing remote heads from the new heads list
233 # strip updates to existing remote heads from the new heads list
236 remotebookmarks = remote.listkeys('bookmarks')
234 remotebookmarks = remote.listkeys('bookmarks')
237 bookmarkedheads = set()
235 bookmarkedheads = set()
238 for bm in localbookmarks:
236 for bm in localbookmarks:
239 rnode = remotebookmarks.get(bm)
237 rnode = remotebookmarks.get(bm)
240 if rnode and rnode in repo:
238 if rnode and rnode in repo:
241 lctx, rctx = repo[bm], repo[rnode]
239 lctx, rctx = repo[bm], repo[rnode]
242 if rctx == lctx.ancestor(rctx):
240 if rctx == lctx.ancestor(rctx):
243 bookmarkedheads.add(lctx.node())
241 bookmarkedheads.add(lctx.node())
244 dhs = list(newhs - bookmarkedheads - oldhs)
242 dhs = list(newhs - bookmarkedheads - oldhs)
245 if dhs:
243 if dhs:
246 if error is None:
244 if error is None:
247 if branch not in ('default', None):
245 if branch not in ('default', None):
248 error = _("push creates new remote head %s "
246 error = _("push creates new remote head %s "
249 "on branch '%s'!") % (short(dhs[0]), branch)
247 "on branch '%s'!") % (short(dhs[0]), branch)
250 else:
248 else:
251 error = _("push creates new remote head %s!"
249 error = _("push creates new remote head %s!"
252 ) % short(dhs[0])
250 ) % short(dhs[0])
253 if branch in unsynced:
251 if branch in unsynced:
254 hint = _("you should pull and merge or "
252 hint = _("you should pull and merge or "
255 "use push -f to force")
253 "use push -f to force")
256 else:
254 else:
257 hint = _("did you forget to merge? "
255 hint = _("did you forget to merge? "
258 "use push -f to force")
256 "use push -f to force")
259 if branch is not None:
257 if branch is not None:
260 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
258 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
261 for h in dhs:
259 for h in dhs:
262 repo.ui.note(_("new remote head %s\n") % short(h))
260 repo.ui.note(_("new remote head %s\n") % short(h))
263 if error:
261 if error:
264 raise util.Abort(error, hint=hint)
262 raise util.Abort(error, hint=hint)
265
263
266 # 6. Check for unsynced changes on involved branches.
264 # 6. Check for unsynced changes on involved branches.
267 if unsynced:
265 if unsynced:
268 repo.ui.warn(_("note: unsynced remote changes!\n"))
266 repo.ui.warn(_("note: unsynced remote changes!\n"))
General Comments 0
You need to be logged in to leave comments. Login now