##// END OF EJS Templates
checkheads: extract branchmap preprocessing...
Pierre-Yves David -
r17209:5cd3e526 default
parent child Browse files
Show More
@@ -1,306 +1,325
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import util, setdiscovery, treediscovery, phases
10 import util, setdiscovery, treediscovery, phases
11
11
12 def findcommonincoming(repo, remote, heads=None, force=False):
12 def findcommonincoming(repo, remote, heads=None, force=False):
13 """Return a tuple (common, anyincoming, heads) used to identify the common
13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 subset of nodes between repo and remote.
14 subset of nodes between repo and remote.
15
15
16 "common" is a list of (at least) the heads of the common subset.
16 "common" is a list of (at least) the heads of the common subset.
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 locally. If remote does not support getbundle, this actually is a list of
18 locally. If remote does not support getbundle, this actually is a list of
19 roots of the nodes that would be incoming, to be supplied to
19 roots of the nodes that would be incoming, to be supplied to
20 changegroupsubset. No code except for pull should be relying on this fact
20 changegroupsubset. No code except for pull should be relying on this fact
21 any longer.
21 any longer.
22 "heads" is either the supplied heads, or else the remote's heads.
22 "heads" is either the supplied heads, or else the remote's heads.
23
23
24 If you pass heads and they are all known locally, the reponse lists justs
24 If you pass heads and they are all known locally, the reponse lists justs
25 these heads in "common" and in "heads".
25 these heads in "common" and in "heads".
26
26
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 extensions a good hook into outgoing.
28 extensions a good hook into outgoing.
29 """
29 """
30
30
31 if not remote.capable('getbundle'):
31 if not remote.capable('getbundle'):
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33
33
34 if heads:
34 if heads:
35 allknown = True
35 allknown = True
36 nm = repo.changelog.nodemap
36 nm = repo.changelog.nodemap
37 for h in heads:
37 for h in heads:
38 if nm.get(h) is None:
38 if nm.get(h) is None:
39 allknown = False
39 allknown = False
40 break
40 break
41 if allknown:
41 if allknown:
42 return (heads, False, heads)
42 return (heads, False, heads)
43
43
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 abortwhenunrelated=not force)
45 abortwhenunrelated=not force)
46 common, anyinc, srvheads = res
46 common, anyinc, srvheads = res
47 return (list(common), anyinc, heads or list(srvheads))
47 return (list(common), anyinc, heads or list(srvheads))
48
48
49 class outgoing(object):
49 class outgoing(object):
50 '''Represents the set of nodes present in a local repo but not in a
50 '''Represents the set of nodes present in a local repo but not in a
51 (possibly) remote one.
51 (possibly) remote one.
52
52
53 Members:
53 Members:
54
54
55 missing is a list of all nodes present in local but not in remote.
55 missing is a list of all nodes present in local but not in remote.
56 common is a list of all nodes shared between the two repos.
56 common is a list of all nodes shared between the two repos.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 missingheads is the list of heads of missing.
58 missingheads is the list of heads of missing.
59 commonheads is the list of heads of common.
59 commonheads is the list of heads of common.
60
60
61 The sets are computed on demand from the heads, unless provided upfront
61 The sets are computed on demand from the heads, unless provided upfront
62 by discovery.'''
62 by discovery.'''
63
63
64 def __init__(self, revlog, commonheads, missingheads):
64 def __init__(self, revlog, commonheads, missingheads):
65 self.commonheads = commonheads
65 self.commonheads = commonheads
66 self.missingheads = missingheads
66 self.missingheads = missingheads
67 self._revlog = revlog
67 self._revlog = revlog
68 self._common = None
68 self._common = None
69 self._missing = None
69 self._missing = None
70 self.excluded = []
70 self.excluded = []
71
71
72 def _computecommonmissing(self):
72 def _computecommonmissing(self):
73 sets = self._revlog.findcommonmissing(self.commonheads,
73 sets = self._revlog.findcommonmissing(self.commonheads,
74 self.missingheads)
74 self.missingheads)
75 self._common, self._missing = sets
75 self._common, self._missing = sets
76
76
77 @util.propertycache
77 @util.propertycache
78 def common(self):
78 def common(self):
79 if self._common is None:
79 if self._common is None:
80 self._computecommonmissing()
80 self._computecommonmissing()
81 return self._common
81 return self._common
82
82
83 @util.propertycache
83 @util.propertycache
84 def missing(self):
84 def missing(self):
85 if self._missing is None:
85 if self._missing is None:
86 self._computecommonmissing()
86 self._computecommonmissing()
87 return self._missing
87 return self._missing
88
88
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 commoninc=None, portable=False):
90 commoninc=None, portable=False):
91 '''Return an outgoing instance to identify the nodes present in repo but
91 '''Return an outgoing instance to identify the nodes present in repo but
92 not in other.
92 not in other.
93
93
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 (inclusive) are included. If you already know the local repo's heads,
95 (inclusive) are included. If you already know the local repo's heads,
96 passing them in onlyheads is faster than letting them be recomputed here.
96 passing them in onlyheads is faster than letting them be recomputed here.
97
97
98 If commoninc is given, it must the the result of a prior call to
98 If commoninc is given, it must the the result of a prior call to
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100
100
101 If portable is given, compute more conservative common and missingheads,
101 If portable is given, compute more conservative common and missingheads,
102 to make bundles created from the instance more portable.'''
102 to make bundles created from the instance more portable.'''
103 # declare an empty outgoing object to be filled later
103 # declare an empty outgoing object to be filled later
104 og = outgoing(repo.changelog, None, None)
104 og = outgoing(repo.changelog, None, None)
105
105
106 # get common set if not provided
106 # get common set if not provided
107 if commoninc is None:
107 if commoninc is None:
108 commoninc = findcommonincoming(repo, other, force=force)
108 commoninc = findcommonincoming(repo, other, force=force)
109 og.commonheads, _any, _hds = commoninc
109 og.commonheads, _any, _hds = commoninc
110
110
111 # compute outgoing
111 # compute outgoing
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 if not mayexclude:
113 if not mayexclude:
114 og.missingheads = onlyheads or repo.heads()
114 og.missingheads = onlyheads or repo.heads()
115 elif onlyheads is None:
115 elif onlyheads is None:
116 # use visible heads as it should be cached
116 # use visible heads as it should be cached
117 og.missingheads = visibleheads(repo)
117 og.missingheads = visibleheads(repo)
118 # extinct changesets are silently ignored
118 # extinct changesets are silently ignored
119 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
119 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
120 else:
120 else:
121 # compute common, missing and exclude secret stuff
121 # compute common, missing and exclude secret stuff
122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 og._common, allmissing = sets
123 og._common, allmissing = sets
124 og._missing = missing = []
124 og._missing = missing = []
125 og.excluded = excluded = []
125 og.excluded = excluded = []
126 for node in allmissing:
126 for node in allmissing:
127 ctx = repo[node]
127 ctx = repo[node]
128 if not ctx.extinct():
128 if not ctx.extinct():
129 # extinct changesets are silently ignored
129 # extinct changesets are silently ignored
130 if ctx.phase() >= phases.secret:
130 if ctx.phase() >= phases.secret:
131 excluded.append(node)
131 excluded.append(node)
132 else:
132 else:
133 missing.append(node)
133 missing.append(node)
134 if len(missing) == len(allmissing):
134 if len(missing) == len(allmissing):
135 missingheads = onlyheads
135 missingheads = onlyheads
136 else: # update missing heads
136 else: # update missing heads
137 missingheads = phases.newheads(repo, onlyheads, excluded)
137 missingheads = phases.newheads(repo, onlyheads, excluded)
138 og.missingheads = missingheads
138 og.missingheads = missingheads
139 if portable:
139 if portable:
140 # recompute common and missingheads as if -r<rev> had been given for
140 # recompute common and missingheads as if -r<rev> had been given for
141 # each head of missing, and --base <rev> for each head of the proper
141 # each head of missing, and --base <rev> for each head of the proper
142 # ancestors of missing
142 # ancestors of missing
143 og._computecommonmissing()
143 og._computecommonmissing()
144 cl = repo.changelog
144 cl = repo.changelog
145 missingrevs = set(cl.rev(n) for n in og._missing)
145 missingrevs = set(cl.rev(n) for n in og._missing)
146 og._common = set(cl.ancestors(missingrevs)) - missingrevs
146 og._common = set(cl.ancestors(missingrevs)) - missingrevs
147 commonheads = set(og.commonheads)
147 commonheads = set(og.commonheads)
148 og.missingheads = [h for h in og.missingheads if h not in commonheads]
148 og.missingheads = [h for h in og.missingheads if h not in commonheads]
149
149
150 return og
150 return og
151
151
152 def _branchmapsummary(repo, remote, outgoing):
153 """compute a summary of branch and heads status before and after push
154
155 - oldmap: {'branch': [heads]} mapping for remote
156 - newmap: {'branch': [heads]} mapping for local
157 - unsynced: set of branch that have unsynced remote changes
158 - branches: set of all common branch pushed
159 - newbranches: list of plain new pushed branch
160 """
161 cl = repo.changelog
162
163 # A. Create set of branches involved in the push.
164 branches = set(repo[n].branch() for n in outgoing.missing)
165 remotemap = remote.branchmap()
166 newbranches = branches - set(remotemap)
167 branches.difference_update(newbranches)
168
169 # B. Construct the initial oldmap and newmap dicts.
170 # They contain information about the remote heads before and
171 # after the push, respectively.
172 # Heads not found locally are not included in either dict,
173 # since they won't be affected by the push.
174 # unsynced contains all branches with incoming changesets.
175 oldmap = {}
176 newmap = {}
177 unsynced = set()
178 for branch in branches:
179 remotebrheads = remotemap[branch]
180
181 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
182 oldmap[branch] = prunedbrheads
183 newmap[branch] = list(prunedbrheads)
184 if len(remotebrheads) > len(prunedbrheads):
185 unsynced.add(branch)
186
187 # C. Update newmap with outgoing changes.
188 # This will possibly add new heads and remove existing ones.
189 ctxgen = (repo[n] for n in outgoing.missing)
190 repo._updatebranchcache(newmap, ctxgen)
191 return oldmap, newmap, unsynced, branches, newbranches
192
193 def _oldbranchmapsummary(repo, remoteheads, outgoing, inc=False):
194 """Compute branchmapsummary for repo without branchmap support"""
195
196 cl = repo.changelog
197 # 1-4b. old servers: Check for new topological heads.
198 # Construct {old,new}map with branch = None (topological branch).
199 # (code based on _updatebranchcache)
200 oldheads = set(h for h in remoteheads if h in cl.nodemap)
201 # all nodes in outgoing.missing are children of either:
202 # - an element of oldheads
203 # - another element of outgoing.missing
204 # - nullrev
205 # This explains why the new head are very simple to compute.
206 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
207 branches = set([None])
208 newmap = {None: list(c.node() for c in r)}
209 oldmap = {None: oldheads}
210 unsynced = inc and branches or set()
211 return oldmap, newmap, unsynced, branches, set()
212
152 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
213 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
153 """Check that a push won't add any outgoing head
214 """Check that a push won't add any outgoing head
154
215
155 raise Abort error and display ui message as needed.
216 raise Abort error and display ui message as needed.
156 """
217 """
218 # Check for each named branch if we're creating new remote heads.
219 # To be a remote head after push, node must be either:
220 # - unknown locally
221 # - a local outgoing head descended from update
222 # - a remote head that's known locally and not
223 # ancestral to an outgoing head
157 if remoteheads == [nullid]:
224 if remoteheads == [nullid]:
158 # remote is empty, nothing to check.
225 # remote is empty, nothing to check.
159 return
226 return
160
227
161 cl = repo.changelog
162 if remote.capable('branchmap'):
228 if remote.capable('branchmap'):
163 # Check for each named branch if we're creating new remote heads.
229 bms = _branchmapsummary(repo, remote, outgoing)
164 # To be a remote head after push, node must be either:
230 else:
165 # - unknown locally
231 bms = _oldbranchmapsummary(repo, remoteheads, outgoing, inc)
166 # - a local outgoing head descended from update
232 oldmap, newmap, unsynced, branches, newbranches = bms
167 # - a remote head that's known locally and not
233 # 1. Check for new branches on the remote.
168 # ancestral to an outgoing head
234 if newbranches and not newbranch: # new branch requires --new-branch
169
235 branchnames = ', '.join(sorted(newbranches))
170 # 1. Create set of branches involved in the push.
236 raise util.Abort(_("push creates new remote branches: %s!")
171 branches = set(repo[n].branch() for n in outgoing.missing)
237 % branchnames,
172
238 hint=_("use 'hg push --new-branch' to create"
173 # 2. Check for new branches on the remote.
239 " new remote branches"))
174 remotemap = remote.branchmap()
175 newbranches = branches - set(remotemap)
176 if newbranches and not newbranch: # new branch requires --new-branch
177 branchnames = ', '.join(sorted(newbranches))
178 raise util.Abort(_("push creates new remote branches: %s!")
179 % branchnames,
180 hint=_("use 'hg push --new-branch' to create"
181 " new remote branches"))
182 branches.difference_update(newbranches)
183
240
184 # 3. Construct the initial oldmap and newmap dicts.
241 # 2. Check for new heads.
185 # They contain information about the remote heads before and
186 # after the push, respectively.
187 # Heads not found locally are not included in either dict,
188 # since they won't be affected by the push.
189 # unsynced contains all branches with incoming changesets.
190 oldmap = {}
191 newmap = {}
192 unsynced = set()
193 for branch in branches:
194 remotebrheads = remotemap[branch]
195 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
196 oldmap[branch] = prunedbrheads
197 newmap[branch] = list(prunedbrheads)
198 if len(remotebrheads) > len(prunedbrheads):
199 unsynced.add(branch)
200
201 # 4. Update newmap with outgoing changes.
202 # This will possibly add new heads and remove existing ones.
203 ctxgen = (repo[n] for n in outgoing.missing)
204 repo._updatebranchcache(newmap, ctxgen)
205
206 else:
207 # 1-4b. old servers: Check for new topological heads.
208 # Construct {old,new}map with branch = None (topological branch).
209 # (code based on _updatebranchcache)
210 oldheads = set(h for h in remoteheads if h in cl.nodemap)
211 # all nodes in outgoing.missing are children of either:
212 # - an element of oldheads
213 # - another element of outgoing.missing
214 # - nullrev
215 # This explains why the new head are very simple to compute.
216 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
217 branches = set([None])
218 newmap = {None: list(c.node() for c in r)}
219 oldmap = {None: oldheads}
220 unsynced = inc and branches or set()
221
222 # 5. Check for new heads.
223 # If there are more heads after the push than before, a suitable
242 # If there are more heads after the push than before, a suitable
224 # error message, depending on unsynced status, is displayed.
243 # error message, depending on unsynced status, is displayed.
225 error = None
244 error = None
226 localbookmarks = repo._bookmarks
245 localbookmarks = repo._bookmarks
227
246
228 for branch in branches:
247 for branch in branches:
229 newhs = set(newmap[branch])
248 newhs = set(newmap[branch])
230 oldhs = set(oldmap[branch])
249 oldhs = set(oldmap[branch])
231 dhs = None
250 dhs = None
232 if len(newhs) > len(oldhs):
251 if len(newhs) > len(oldhs):
233 # strip updates to existing remote heads from the new heads list
252 # strip updates to existing remote heads from the new heads list
234 remotebookmarks = remote.listkeys('bookmarks')
253 remotebookmarks = remote.listkeys('bookmarks')
235 bookmarkedheads = set()
254 bookmarkedheads = set()
236 for bm in localbookmarks:
255 for bm in localbookmarks:
237 rnode = remotebookmarks.get(bm)
256 rnode = remotebookmarks.get(bm)
238 if rnode and rnode in repo:
257 if rnode and rnode in repo:
239 lctx, rctx = repo[bm], repo[rnode]
258 lctx, rctx = repo[bm], repo[rnode]
240 if rctx == lctx.ancestor(rctx):
259 if rctx == lctx.ancestor(rctx):
241 bookmarkedheads.add(lctx.node())
260 bookmarkedheads.add(lctx.node())
242 dhs = list(newhs - bookmarkedheads - oldhs)
261 dhs = list(newhs - bookmarkedheads - oldhs)
243 if dhs:
262 if dhs:
244 if error is None:
263 if error is None:
245 if branch not in ('default', None):
264 if branch not in ('default', None):
246 error = _("push creates new remote head %s "
265 error = _("push creates new remote head %s "
247 "on branch '%s'!") % (short(dhs[0]), branch)
266 "on branch '%s'!") % (short(dhs[0]), branch)
248 else:
267 else:
249 error = _("push creates new remote head %s!"
268 error = _("push creates new remote head %s!"
250 ) % short(dhs[0])
269 ) % short(dhs[0])
251 if branch in unsynced:
270 if branch in unsynced:
252 hint = _("you should pull and merge or "
271 hint = _("you should pull and merge or "
253 "use push -f to force")
272 "use push -f to force")
254 else:
273 else:
255 hint = _("did you forget to merge? "
274 hint = _("did you forget to merge? "
256 "use push -f to force")
275 "use push -f to force")
257 if branch is not None:
276 if branch is not None:
258 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
277 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
259 for h in dhs:
278 for h in dhs:
260 repo.ui.note(_("new remote head %s\n") % short(h))
279 repo.ui.note(_("new remote head %s\n") % short(h))
261 if error:
280 if error:
262 raise util.Abort(error, hint=hint)
281 raise util.Abort(error, hint=hint)
263
282
264 # 6. Check for unsynced changes on involved branches.
283 # 6. Check for unsynced changes on involved branches.
265 if unsynced:
284 if unsynced:
266 repo.ui.warn(_("note: unsynced remote changes!\n"))
285 repo.ui.warn(_("note: unsynced remote changes!\n"))
267
286
268 def visibleheads(repo):
287 def visibleheads(repo):
269 """return the set of visible head of this repo"""
288 """return the set of visible head of this repo"""
270 # XXX we want a cache on this
289 # XXX we want a cache on this
271 sroots = repo._phasecache.phaseroots[phases.secret]
290 sroots = repo._phasecache.phaseroots[phases.secret]
272 if sroots or repo.obsstore:
291 if sroots or repo.obsstore:
273 # XXX very slow revset. storing heads or secret "boundary"
292 # XXX very slow revset. storing heads or secret "boundary"
274 # would help.
293 # would help.
275 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
294 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
276
295
277 vheads = [ctx.node() for ctx in revset]
296 vheads = [ctx.node() for ctx in revset]
278 if not vheads:
297 if not vheads:
279 vheads.append(nullid)
298 vheads.append(nullid)
280 else:
299 else:
281 vheads = repo.heads()
300 vheads = repo.heads()
282 return vheads
301 return vheads
283
302
284
303
285 def visiblebranchmap(repo):
304 def visiblebranchmap(repo):
286 """return a branchmap for the visible set"""
305 """return a branchmap for the visible set"""
287 # XXX Recomputing this data on the fly is very slow. We should build a
306 # XXX Recomputing this data on the fly is very slow. We should build a
288 # XXX cached version while computin the standard branchmap version.
307 # XXX cached version while computin the standard branchmap version.
289 sroots = repo._phasecache.phaseroots[phases.secret]
308 sroots = repo._phasecache.phaseroots[phases.secret]
290 if sroots or repo.obsstore:
309 if sroots or repo.obsstore:
291 vbranchmap = {}
310 vbranchmap = {}
292 for branch, nodes in repo.branchmap().iteritems():
311 for branch, nodes in repo.branchmap().iteritems():
293 # search for secret heads.
312 # search for secret heads.
294 for n in nodes:
313 for n in nodes:
295 if repo[n].phase() >= phases.secret:
314 if repo[n].phase() >= phases.secret:
296 nodes = None
315 nodes = None
297 break
316 break
298 # if secret heads were found we must compute them again
317 # if secret heads were found we must compute them again
299 if nodes is None:
318 if nodes is None:
300 s = repo.set('heads(branch(%s) - secret() - extinct())',
319 s = repo.set('heads(branch(%s) - secret() - extinct())',
301 branch)
320 branch)
302 nodes = [c.node() for c in s]
321 nodes = [c.node() for c in s]
303 vbranchmap[branch] = nodes
322 vbranchmap[branch] = nodes
304 else:
323 else:
305 vbranchmap = repo.branchmap()
324 vbranchmap = repo.branchmap()
306 return vbranchmap
325 return vbranchmap
General Comments 0
You need to be logged in to leave comments. Login now