##// END OF EJS Templates
discovery: simplify branchmap construction against legacy server...
Pierre-Yves David -
r17056:30853f4b default
parent child Browse files
Show More
@@ -1,268 +1,266 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the reponse lists justs
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 class outgoing(object):
50 50 '''Represents the set of nodes present in a local repo but not in a
51 51 (possibly) remote one.
52 52
53 53 Members:
54 54
55 55 missing is a list of all nodes present in local but not in remote.
56 56 common is a list of all nodes shared between the two repos.
57 57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 58 missingheads is the list of heads of missing.
59 59 commonheads is the list of heads of common.
60 60
61 61 The sets are computed on demand from the heads, unless provided upfront
62 62 by discovery.'''
63 63
64 64 def __init__(self, revlog, commonheads, missingheads):
65 65 self.commonheads = commonheads
66 66 self.missingheads = missingheads
67 67 self._revlog = revlog
68 68 self._common = None
69 69 self._missing = None
70 70 self.excluded = []
71 71
72 72 def _computecommonmissing(self):
73 73 sets = self._revlog.findcommonmissing(self.commonheads,
74 74 self.missingheads)
75 75 self._common, self._missing = sets
76 76
77 77 @util.propertycache
78 78 def common(self):
79 79 if self._common is None:
80 80 self._computecommonmissing()
81 81 return self._common
82 82
83 83 @util.propertycache
84 84 def missing(self):
85 85 if self._missing is None:
86 86 self._computecommonmissing()
87 87 return self._missing
88 88
89 89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 90 commoninc=None, portable=False):
91 91 '''Return an outgoing instance to identify the nodes present in repo but
92 92 not in other.
93 93
94 94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 95 (inclusive) are included. If you already know the local repo's heads,
96 96 passing them in onlyheads is faster than letting them be recomputed here.
97 97
98 98 If commoninc is given, it must the the result of a prior call to
99 99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100 100
101 101 If portable is given, compute more conservative common and missingheads,
102 102 to make bundles created from the instance more portable.'''
103 103 # declare an empty outgoing object to be filled later
104 104 og = outgoing(repo.changelog, None, None)
105 105
106 106 # get common set if not provided
107 107 if commoninc is None:
108 108 commoninc = findcommonincoming(repo, other, force=force)
109 109 og.commonheads, _any, _hds = commoninc
110 110
111 111 # compute outgoing
112 112 if not repo._phasecache.phaseroots[phases.secret]:
113 113 og.missingheads = onlyheads or repo.heads()
114 114 elif onlyheads is None:
115 115 # use visible heads as it should be cached
116 116 og.missingheads = phases.visibleheads(repo)
117 117 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
118 118 else:
119 119 # compute common, missing and exclude secret stuff
120 120 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
121 121 og._common, allmissing = sets
122 122 og._missing = missing = []
123 123 og.excluded = excluded = []
124 124 for node in allmissing:
125 125 if repo[node].phase() >= phases.secret:
126 126 excluded.append(node)
127 127 else:
128 128 missing.append(node)
129 129 if excluded:
130 130 # update missing heads
131 131 missingheads = phases.newheads(repo, onlyheads, excluded)
132 132 else:
133 133 missingheads = onlyheads
134 134 og.missingheads = missingheads
135 135
136 136 if portable:
137 137 # recompute common and missingheads as if -r<rev> had been given for
138 138 # each head of missing, and --base <rev> for each head of the proper
139 139 # ancestors of missing
140 140 og._computecommonmissing()
141 141 cl = repo.changelog
142 142 missingrevs = set(cl.rev(n) for n in og._missing)
143 143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 144 commonheads = set(og.commonheads)
145 145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146 146
147 147 return og
148 148
149 149 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
150 150 """Check that a push won't add any outgoing head
151 151
152 152 raise Abort error and display ui message as needed.
153 153 """
154 154 if remoteheads == [nullid]:
155 155 # remote is empty, nothing to check.
156 156 return
157 157
158 158 cl = repo.changelog
159 159 if remote.capable('branchmap'):
160 160 # Check for each named branch if we're creating new remote heads.
161 161 # To be a remote head after push, node must be either:
162 162 # - unknown locally
163 163 # - a local outgoing head descended from update
164 164 # - a remote head that's known locally and not
165 165 # ancestral to an outgoing head
166 166
167 167 # 1. Create set of branches involved in the push.
168 168 branches = set(repo[n].branch() for n in outgoing.missing)
169 169
170 170 # 2. Check for new branches on the remote.
171 171 if remote.local():
172 172 remotemap = phases.visiblebranchmap(remote)
173 173 else:
174 174 remotemap = remote.branchmap()
175 175 newbranches = branches - set(remotemap)
176 176 if newbranches and not newbranch: # new branch requires --new-branch
177 177 branchnames = ', '.join(sorted(newbranches))
178 178 raise util.Abort(_("push creates new remote branches: %s!")
179 179 % branchnames,
180 180 hint=_("use 'hg push --new-branch' to create"
181 181 " new remote branches"))
182 182 branches.difference_update(newbranches)
183 183
184 184 # 3. Construct the initial oldmap and newmap dicts.
185 185 # They contain information about the remote heads before and
186 186 # after the push, respectively.
187 187 # Heads not found locally are not included in either dict,
188 188 # since they won't be affected by the push.
189 189 # unsynced contains all branches with incoming changesets.
190 190 oldmap = {}
191 191 newmap = {}
192 192 unsynced = set()
193 193 for branch in branches:
194 194 remotebrheads = remotemap[branch]
195 195 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
196 196 oldmap[branch] = prunedbrheads
197 197 newmap[branch] = list(prunedbrheads)
198 198 if len(remotebrheads) > len(prunedbrheads):
199 199 unsynced.add(branch)
200 200
201 201 # 4. Update newmap with outgoing changes.
202 202 # This will possibly add new heads and remove existing ones.
203 203 ctxgen = (repo[n] for n in outgoing.missing)
204 204 repo._updatebranchcache(newmap, ctxgen)
205 205
206 206 else:
207 207 # 1-4b. old servers: Check for new topological heads.
208 208 # Construct {old,new}map with branch = None (topological branch).
209 209 # (code based on _updatebranchcache)
210 oldheadrevs = set(cl.rev(h) for h in remoteheads if h in cl.nodemap)
211 missingrevs = [cl.rev(node) for node in outgoing.missing]
212 newheadrevs = oldheadrevs.union(missingrevs)
213 if len(newheadrevs) > 1:
214 for latest in sorted(missingrevs, reverse=True):
215 if latest not in newheadrevs:
216 continue
217 reachable = cl.ancestors([latest], min(newheadrevs))
218 newheadrevs.difference_update(reachable)
210 oldheads = set(h for h in remoteheads if h in cl.nodemap)
211 # all nodes in outgoing.missing are children of either:
212 # - an element of oldheads
213 # - another element of outgoing.missing
214 # - nullrev
215 # This explains why the new head are very simple to compute.
216 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
219 217 branches = set([None])
220 newmap = {None: [cl.node(rev) for rev in newheadrevs]}
221 oldmap = {None: [cl.node(rev) for rev in oldheadrevs]}
218 newmap = {None: list(c.node() for c in r)}
219 oldmap = {None: oldheads}
222 220 unsynced = inc and branches or set()
223 221
224 222 # 5. Check for new heads.
225 223 # If there are more heads after the push than before, a suitable
226 224 # error message, depending on unsynced status, is displayed.
227 225 error = None
228 226 localbookmarks = repo._bookmarks
229 227
230 228 for branch in branches:
231 229 newhs = set(newmap[branch])
232 230 oldhs = set(oldmap[branch])
233 231 dhs = None
234 232 if len(newhs) > len(oldhs):
235 233 # strip updates to existing remote heads from the new heads list
236 234 remotebookmarks = remote.listkeys('bookmarks')
237 235 bookmarkedheads = set()
238 236 for bm in localbookmarks:
239 237 rnode = remotebookmarks.get(bm)
240 238 if rnode and rnode in repo:
241 239 lctx, rctx = repo[bm], repo[rnode]
242 240 if rctx == lctx.ancestor(rctx):
243 241 bookmarkedheads.add(lctx.node())
244 242 dhs = list(newhs - bookmarkedheads - oldhs)
245 243 if dhs:
246 244 if error is None:
247 245 if branch not in ('default', None):
248 246 error = _("push creates new remote head %s "
249 247 "on branch '%s'!") % (short(dhs[0]), branch)
250 248 else:
251 249 error = _("push creates new remote head %s!"
252 250 ) % short(dhs[0])
253 251 if branch in unsynced:
254 252 hint = _("you should pull and merge or "
255 253 "use push -f to force")
256 254 else:
257 255 hint = _("did you forget to merge? "
258 256 "use push -f to force")
259 257 if branch is not None:
260 258 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
261 259 for h in dhs:
262 260 repo.ui.note(_("new remote head %s\n") % short(h))
263 261 if error:
264 262 raise util.Abort(error, hint=hint)
265 263
266 264 # 6. Check for unsynced changes on involved branches.
267 265 if unsynced:
268 266 repo.ui.warn(_("note: unsynced remote changes!\n"))
General Comments 0
You need to be logged in to leave comments. Login now