##// END OF EJS Templates
checkheads: simplify the structure build by preprocessing...
Pierre-Yves David -
r17211:4f321eec default
parent child Browse files
Show More
@@ -1,325 +1,343 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import util, setdiscovery, treediscovery, phases
10 import util, setdiscovery, treediscovery, phases
11
11
12 def findcommonincoming(repo, remote, heads=None, force=False):
12 def findcommonincoming(repo, remote, heads=None, force=False):
13 """Return a tuple (common, anyincoming, heads) used to identify the common
13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 subset of nodes between repo and remote.
14 subset of nodes between repo and remote.
15
15
16 "common" is a list of (at least) the heads of the common subset.
16 "common" is a list of (at least) the heads of the common subset.
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 locally. If remote does not support getbundle, this actually is a list of
18 locally. If remote does not support getbundle, this actually is a list of
19 roots of the nodes that would be incoming, to be supplied to
19 roots of the nodes that would be incoming, to be supplied to
20 changegroupsubset. No code except for pull should be relying on this fact
20 changegroupsubset. No code except for pull should be relying on this fact
21 any longer.
21 any longer.
22 "heads" is either the supplied heads, or else the remote's heads.
22 "heads" is either the supplied heads, or else the remote's heads.
23
23
24 If you pass heads and they are all known locally, the reponse lists justs
24 If you pass heads and they are all known locally, the reponse lists justs
25 these heads in "common" and in "heads".
25 these heads in "common" and in "heads".
26
26
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 extensions a good hook into outgoing.
28 extensions a good hook into outgoing.
29 """
29 """
30
30
31 if not remote.capable('getbundle'):
31 if not remote.capable('getbundle'):
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33
33
34 if heads:
34 if heads:
35 allknown = True
35 allknown = True
36 nm = repo.changelog.nodemap
36 nm = repo.changelog.nodemap
37 for h in heads:
37 for h in heads:
38 if nm.get(h) is None:
38 if nm.get(h) is None:
39 allknown = False
39 allknown = False
40 break
40 break
41 if allknown:
41 if allknown:
42 return (heads, False, heads)
42 return (heads, False, heads)
43
43
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 abortwhenunrelated=not force)
45 abortwhenunrelated=not force)
46 common, anyinc, srvheads = res
46 common, anyinc, srvheads = res
47 return (list(common), anyinc, heads or list(srvheads))
47 return (list(common), anyinc, heads or list(srvheads))
48
48
49 class outgoing(object):
49 class outgoing(object):
50 '''Represents the set of nodes present in a local repo but not in a
50 '''Represents the set of nodes present in a local repo but not in a
51 (possibly) remote one.
51 (possibly) remote one.
52
52
53 Members:
53 Members:
54
54
55 missing is a list of all nodes present in local but not in remote.
55 missing is a list of all nodes present in local but not in remote.
56 common is a list of all nodes shared between the two repos.
56 common is a list of all nodes shared between the two repos.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 missingheads is the list of heads of missing.
58 missingheads is the list of heads of missing.
59 commonheads is the list of heads of common.
59 commonheads is the list of heads of common.
60
60
61 The sets are computed on demand from the heads, unless provided upfront
61 The sets are computed on demand from the heads, unless provided upfront
62 by discovery.'''
62 by discovery.'''
63
63
64 def __init__(self, revlog, commonheads, missingheads):
64 def __init__(self, revlog, commonheads, missingheads):
65 self.commonheads = commonheads
65 self.commonheads = commonheads
66 self.missingheads = missingheads
66 self.missingheads = missingheads
67 self._revlog = revlog
67 self._revlog = revlog
68 self._common = None
68 self._common = None
69 self._missing = None
69 self._missing = None
70 self.excluded = []
70 self.excluded = []
71
71
72 def _computecommonmissing(self):
72 def _computecommonmissing(self):
73 sets = self._revlog.findcommonmissing(self.commonheads,
73 sets = self._revlog.findcommonmissing(self.commonheads,
74 self.missingheads)
74 self.missingheads)
75 self._common, self._missing = sets
75 self._common, self._missing = sets
76
76
77 @util.propertycache
77 @util.propertycache
78 def common(self):
78 def common(self):
79 if self._common is None:
79 if self._common is None:
80 self._computecommonmissing()
80 self._computecommonmissing()
81 return self._common
81 return self._common
82
82
83 @util.propertycache
83 @util.propertycache
84 def missing(self):
84 def missing(self):
85 if self._missing is None:
85 if self._missing is None:
86 self._computecommonmissing()
86 self._computecommonmissing()
87 return self._missing
87 return self._missing
88
88
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 commoninc=None, portable=False):
90 commoninc=None, portable=False):
91 '''Return an outgoing instance to identify the nodes present in repo but
91 '''Return an outgoing instance to identify the nodes present in repo but
92 not in other.
92 not in other.
93
93
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 (inclusive) are included. If you already know the local repo's heads,
95 (inclusive) are included. If you already know the local repo's heads,
96 passing them in onlyheads is faster than letting them be recomputed here.
96 passing them in onlyheads is faster than letting them be recomputed here.
97
97
98 If commoninc is given, it must the the result of a prior call to
98 If commoninc is given, it must the the result of a prior call to
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100
100
101 If portable is given, compute more conservative common and missingheads,
101 If portable is given, compute more conservative common and missingheads,
102 to make bundles created from the instance more portable.'''
102 to make bundles created from the instance more portable.'''
103 # declare an empty outgoing object to be filled later
103 # declare an empty outgoing object to be filled later
104 og = outgoing(repo.changelog, None, None)
104 og = outgoing(repo.changelog, None, None)
105
105
106 # get common set if not provided
106 # get common set if not provided
107 if commoninc is None:
107 if commoninc is None:
108 commoninc = findcommonincoming(repo, other, force=force)
108 commoninc = findcommonincoming(repo, other, force=force)
109 og.commonheads, _any, _hds = commoninc
109 og.commonheads, _any, _hds = commoninc
110
110
111 # compute outgoing
111 # compute outgoing
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 if not mayexclude:
113 if not mayexclude:
114 og.missingheads = onlyheads or repo.heads()
114 og.missingheads = onlyheads or repo.heads()
115 elif onlyheads is None:
115 elif onlyheads is None:
116 # use visible heads as it should be cached
116 # use visible heads as it should be cached
117 og.missingheads = visibleheads(repo)
117 og.missingheads = visibleheads(repo)
118 # extinct changesets are silently ignored
118 # extinct changesets are silently ignored
119 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
119 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
120 else:
120 else:
121 # compute common, missing and exclude secret stuff
121 # compute common, missing and exclude secret stuff
122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
122 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
123 og._common, allmissing = sets
123 og._common, allmissing = sets
124 og._missing = missing = []
124 og._missing = missing = []
125 og.excluded = excluded = []
125 og.excluded = excluded = []
126 for node in allmissing:
126 for node in allmissing:
127 ctx = repo[node]
127 ctx = repo[node]
128 if not ctx.extinct():
128 if not ctx.extinct():
129 # extinct changesets are silently ignored
129 # extinct changesets are silently ignored
130 if ctx.phase() >= phases.secret:
130 if ctx.phase() >= phases.secret:
131 excluded.append(node)
131 excluded.append(node)
132 else:
132 else:
133 missing.append(node)
133 missing.append(node)
134 if len(missing) == len(allmissing):
134 if len(missing) == len(allmissing):
135 missingheads = onlyheads
135 missingheads = onlyheads
136 else: # update missing heads
136 else: # update missing heads
137 missingheads = phases.newheads(repo, onlyheads, excluded)
137 missingheads = phases.newheads(repo, onlyheads, excluded)
138 og.missingheads = missingheads
138 og.missingheads = missingheads
139 if portable:
139 if portable:
140 # recompute common and missingheads as if -r<rev> had been given for
140 # recompute common and missingheads as if -r<rev> had been given for
141 # each head of missing, and --base <rev> for each head of the proper
141 # each head of missing, and --base <rev> for each head of the proper
142 # ancestors of missing
142 # ancestors of missing
143 og._computecommonmissing()
143 og._computecommonmissing()
144 cl = repo.changelog
144 cl = repo.changelog
145 missingrevs = set(cl.rev(n) for n in og._missing)
145 missingrevs = set(cl.rev(n) for n in og._missing)
146 og._common = set(cl.ancestors(missingrevs)) - missingrevs
146 og._common = set(cl.ancestors(missingrevs)) - missingrevs
147 commonheads = set(og.commonheads)
147 commonheads = set(og.commonheads)
148 og.missingheads = [h for h in og.missingheads if h not in commonheads]
148 og.missingheads = [h for h in og.missingheads if h not in commonheads]
149
149
150 return og
150 return og
151
151
152 def _branchmapsummary(repo, remote, outgoing):
152 def _headssummary(repo, remote, outgoing):
153 """compute a summary of branch and heads status before and after push
153 """compute a summary of branch and heads status before and after push
154
154
155 - oldmap: {'branch': [heads]} mapping for remote
155 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
156 - newmap: {'branch': [heads]} mapping for local
156
157 - unsynced: set of branch that have unsynced remote changes
157 - branch: the branch name
158 - branches: set of all common branch pushed
158 - remoteheads: the list of remote heads known locally
159 - newbranches: list of plain new pushed branch
159 None is the branch is new
160 - newheads: the new remote heads (known locally) with outgoing pushed
161 - unsyncedheads: the list of remote heads unknown locally.
160 """
162 """
161 cl = repo.changelog
163 cl = repo.changelog
162
164 headssum = {}
163 # A. Create set of branches involved in the push.
165 # A. Create set of branches involved in the push.
164 branches = set(repo[n].branch() for n in outgoing.missing)
166 branches = set(repo[n].branch() for n in outgoing.missing)
165 remotemap = remote.branchmap()
167 remotemap = remote.branchmap()
166 newbranches = branches - set(remotemap)
168 newbranches = branches - set(remotemap)
167 branches.difference_update(newbranches)
169 branches.difference_update(newbranches)
168
170
169 # B. Construct the initial oldmap and newmap dicts.
171 # A. register remote heads
170 # They contain information about the remote heads before and
172 remotebranches = set()
171 # after the push, respectively.
173 for branch, heads in remote.branchmap().iteritems():
172 # Heads not found locally are not included in either dict,
174 remotebranches.add(branch)
173 # since they won't be affected by the push.
175 known = []
174 # unsynced contains all branches with incoming changesets.
176 unsynced = []
175 oldmap = {}
177 for h in heads:
176 newmap = {}
178 if h in cl.nodemap:
177 unsynced = set()
179 known.append(h)
178 for branch in branches:
180 else:
179 remotebrheads = remotemap[branch]
181 unsynced.append(h)
182 headssum[branch] = (known, list(known), unsynced)
183 # B. add new branch data
184 missingctx = list(repo[n] for n in outgoing.missing)
185 touchedbranches = set()
186 for ctx in missingctx:
187 branch = ctx.branch()
188 touchedbranches.add(branch)
189 if branch not in headssum:
190 headssum[branch] = (None, [], [])
180
191
181 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
192 # C drop data about untouched branches:
182 oldmap[branch] = prunedbrheads
193 for branch in remotebranches - touchedbranches:
183 newmap[branch] = list(prunedbrheads)
194 del headssum[branch]
184 if len(remotebrheads) > len(prunedbrheads):
185 unsynced.add(branch)
186
195
187 # C. Update newmap with outgoing changes.
196 # D. Update newmap with outgoing changes.
188 # This will possibly add new heads and remove existing ones.
197 # This will possibly add new heads and remove existing ones.
189 ctxgen = (repo[n] for n in outgoing.missing)
198 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
190 repo._updatebranchcache(newmap, ctxgen)
199 if heads[0] is not None)
191 return oldmap, newmap, unsynced, branches, newbranches
200 repo._updatebranchcache(newmap, missingctx)
201 for branch, newheads in newmap.iteritems():
202 headssum[branch][1][:] = newheads
203 return headssum
192
204
193 def _oldbranchmapsummary(repo, remoteheads, outgoing, inc=False):
205 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
194 """Compute branchmapsummary for repo without branchmap support"""
206 """Compute branchmapsummary for repo without branchmap support"""
195
207
196 cl = repo.changelog
208 cl = repo.changelog
197 # 1-4b. old servers: Check for new topological heads.
209 # 1-4b. old servers: Check for new topological heads.
198 # Construct {old,new}map with branch = None (topological branch).
210 # Construct {old,new}map with branch = None (topological branch).
199 # (code based on _updatebranchcache)
211 # (code based on _updatebranchcache)
200 oldheads = set(h for h in remoteheads if h in cl.nodemap)
212 oldheads = set(h for h in remoteheads if h in cl.nodemap)
201 # all nodes in outgoing.missing are children of either:
213 # all nodes in outgoing.missing are children of either:
202 # - an element of oldheads
214 # - an element of oldheads
203 # - another element of outgoing.missing
215 # - another element of outgoing.missing
204 # - nullrev
216 # - nullrev
205 # This explains why the new head are very simple to compute.
217 # This explains why the new head are very simple to compute.
206 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
218 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
207 branches = set([None])
219 newheads = list(c.node() for c in r)
208 newmap = {None: list(c.node() for c in r)}
220 unsynced = inc and set([None]) or set()
209 oldmap = {None: oldheads}
221 return {None: (oldheads, newheads, unsynced)}
210 unsynced = inc and branches or set()
211 return oldmap, newmap, unsynced, branches, set()
212
222
213 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
223 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
214 """Check that a push won't add any outgoing head
224 """Check that a push won't add any outgoing head
215
225
216 raise Abort error and display ui message as needed.
226 raise Abort error and display ui message as needed.
217 """
227 """
218 # Check for each named branch if we're creating new remote heads.
228 # Check for each named branch if we're creating new remote heads.
219 # To be a remote head after push, node must be either:
229 # To be a remote head after push, node must be either:
220 # - unknown locally
230 # - unknown locally
221 # - a local outgoing head descended from update
231 # - a local outgoing head descended from update
222 # - a remote head that's known locally and not
232 # - a remote head that's known locally and not
223 # ancestral to an outgoing head
233 # ancestral to an outgoing head
224 if remoteheads == [nullid]:
234 if remoteheads == [nullid]:
225 # remote is empty, nothing to check.
235 # remote is empty, nothing to check.
226 return
236 return
227
237
228 if remote.capable('branchmap'):
238 if remote.capable('branchmap'):
229 bms = _branchmapsummary(repo, remote, outgoing)
239 headssum = _headssummary(repo, remote, outgoing)
230 else:
240 else:
231 bms = _oldbranchmapsummary(repo, remoteheads, outgoing, inc)
241 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
232 oldmap, newmap, unsynced, branches, newbranches = bms
242 newbranches = [branch for branch, heads in headssum.iteritems()
243 if heads[0] is None]
233 # 1. Check for new branches on the remote.
244 # 1. Check for new branches on the remote.
234 if newbranches and not newbranch: # new branch requires --new-branch
245 if newbranches and not newbranch: # new branch requires --new-branch
235 branchnames = ', '.join(sorted(newbranches))
246 branchnames = ', '.join(sorted(newbranches))
236 raise util.Abort(_("push creates new remote branches: %s!")
247 raise util.Abort(_("push creates new remote branches: %s!")
237 % branchnames,
248 % branchnames,
238 hint=_("use 'hg push --new-branch' to create"
249 hint=_("use 'hg push --new-branch' to create"
239 " new remote branches"))
250 " new remote branches"))
240
251
241 # 2. Check for new heads.
252 # 2. Check for new heads.
242 # If there are more heads after the push than before, a suitable
253 # If there are more heads after the push than before, a suitable
243 # error message, depending on unsynced status, is displayed.
254 # error message, depending on unsynced status, is displayed.
244 error = None
255 error = None
245 localbookmarks = repo._bookmarks
256 localbookmarks = repo._bookmarks
246
257
247 for branch in branches:
258 unsynced = False
248 newhs = set(newmap[branch])
259 for branch, heads in headssum.iteritems():
249 oldhs = set(oldmap[branch])
260 if heads[0] is None:
261 # Maybe we should abort if we push more that one head
262 # for new branches ?
263 continue
264 if heads[2]:
265 unsynced = True
266 oldhs = set(heads[0])
267 newhs = set(heads[1])
250 dhs = None
268 dhs = None
251 if len(newhs) > len(oldhs):
269 if len(newhs) > len(oldhs):
252 # strip updates to existing remote heads from the new heads list
253 remotebookmarks = remote.listkeys('bookmarks')
270 remotebookmarks = remote.listkeys('bookmarks')
254 bookmarkedheads = set()
271 bookmarkedheads = set()
255 for bm in localbookmarks:
272 for bm in localbookmarks:
256 rnode = remotebookmarks.get(bm)
273 rnode = remotebookmarks.get(bm)
257 if rnode and rnode in repo:
274 if rnode and rnode in repo:
258 lctx, rctx = repo[bm], repo[rnode]
275 lctx, rctx = repo[bm], repo[rnode]
259 if rctx == lctx.ancestor(rctx):
276 if rctx == lctx.ancestor(rctx):
260 bookmarkedheads.add(lctx.node())
277 bookmarkedheads.add(lctx.node())
278 # strip updates to existing remote heads from the new heads list
261 dhs = list(newhs - bookmarkedheads - oldhs)
279 dhs = list(newhs - bookmarkedheads - oldhs)
262 if dhs:
280 if dhs:
263 if error is None:
281 if error is None:
264 if branch not in ('default', None):
282 if branch not in ('default', None):
265 error = _("push creates new remote head %s "
283 error = _("push creates new remote head %s "
266 "on branch '%s'!") % (short(dhs[0]), branch)
284 "on branch '%s'!") % (short(dhs[0]), branch)
267 else:
285 else:
268 error = _("push creates new remote head %s!"
286 error = _("push creates new remote head %s!"
269 ) % short(dhs[0])
287 ) % short(dhs[0])
270 if branch in unsynced:
288 if heads[2]: # unsynced
271 hint = _("you should pull and merge or "
289 hint = _("you should pull and merge or "
272 "use push -f to force")
290 "use push -f to force")
273 else:
291 else:
274 hint = _("did you forget to merge? "
292 hint = _("did you forget to merge? "
275 "use push -f to force")
293 "use push -f to force")
276 if branch is not None:
294 if branch is not None:
277 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
295 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
278 for h in dhs:
296 for h in dhs:
279 repo.ui.note(_("new remote head %s\n") % short(h))
297 repo.ui.note(_("new remote head %s\n") % short(h))
280 if error:
298 if error:
281 raise util.Abort(error, hint=hint)
299 raise util.Abort(error, hint=hint)
282
300
283 # 6. Check for unsynced changes on involved branches.
301 # 6. Check for unsynced changes on involved branches.
284 if unsynced:
302 if unsynced:
285 repo.ui.warn(_("note: unsynced remote changes!\n"))
303 repo.ui.warn(_("note: unsynced remote changes!\n"))
286
304
287 def visibleheads(repo):
305 def visibleheads(repo):
288 """return the set of visible head of this repo"""
306 """return the set of visible head of this repo"""
289 # XXX we want a cache on this
307 # XXX we want a cache on this
290 sroots = repo._phasecache.phaseroots[phases.secret]
308 sroots = repo._phasecache.phaseroots[phases.secret]
291 if sroots or repo.obsstore:
309 if sroots or repo.obsstore:
292 # XXX very slow revset. storing heads or secret "boundary"
310 # XXX very slow revset. storing heads or secret "boundary"
293 # would help.
311 # would help.
294 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
312 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
295
313
296 vheads = [ctx.node() for ctx in revset]
314 vheads = [ctx.node() for ctx in revset]
297 if not vheads:
315 if not vheads:
298 vheads.append(nullid)
316 vheads.append(nullid)
299 else:
317 else:
300 vheads = repo.heads()
318 vheads = repo.heads()
301 return vheads
319 return vheads
302
320
303
321
304 def visiblebranchmap(repo):
322 def visiblebranchmap(repo):
305 """return a branchmap for the visible set"""
323 """return a branchmap for the visible set"""
306 # XXX Recomputing this data on the fly is very slow. We should build a
324 # XXX Recomputing this data on the fly is very slow. We should build a
307 # XXX cached version while computin the standard branchmap version.
325 # XXX cached version while computin the standard branchmap version.
308 sroots = repo._phasecache.phaseroots[phases.secret]
326 sroots = repo._phasecache.phaseroots[phases.secret]
309 if sroots or repo.obsstore:
327 if sroots or repo.obsstore:
310 vbranchmap = {}
328 vbranchmap = {}
311 for branch, nodes in repo.branchmap().iteritems():
329 for branch, nodes in repo.branchmap().iteritems():
312 # search for secret heads.
330 # search for secret heads.
313 for n in nodes:
331 for n in nodes:
314 if repo[n].phase() >= phases.secret:
332 if repo[n].phase() >= phases.secret:
315 nodes = None
333 nodes = None
316 break
334 break
317 # if secret heads were found we must compute them again
335 # if secret heads were found we must compute them again
318 if nodes is None:
336 if nodes is None:
319 s = repo.set('heads(branch(%s) - secret() - extinct())',
337 s = repo.set('heads(branch(%s) - secret() - extinct())',
320 branch)
338 branch)
321 nodes = [c.node() for c in s]
339 nodes = [c.node() for c in s]
322 vbranchmap[branch] = nodes
340 vbranchmap[branch] = nodes
323 else:
341 else:
324 vbranchmap = repo.branchmap()
342 vbranchmap = repo.branchmap()
325 return vbranchmap
343 return vbranchmap
General Comments 0
You need to be logged in to leave comments. Login now