##// END OF EJS Templates
setdiscovery: don't call "heads" wire command when heads specified...
Martin von Zweigbergk -
r35867:5cfdf613 default
parent child Browse files
Show More
@@ -1,530 +1,530 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 """Return a tuple (common, anyincoming, heads) used to identify the common
31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 subset of nodes between repo and remote.
32 subset of nodes between repo and remote.
33
33
34 "common" is a list of (at least) the heads of the common subset.
34 "common" is a list of (at least) the heads of the common subset.
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 locally. If remote does not support getbundle, this actually is a list of
36 locally. If remote does not support getbundle, this actually is a list of
37 roots of the nodes that would be incoming, to be supplied to
37 roots of the nodes that would be incoming, to be supplied to
38 changegroupsubset. No code except for pull should be relying on this fact
38 changegroupsubset. No code except for pull should be relying on this fact
39 any longer.
39 any longer.
40 "heads" is either the supplied heads, or else the remote's heads.
40 "heads" is either the supplied heads, or else the remote's heads.
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 these nodes. Changeset outside of this set won't be considered (and
42 these nodes. Changeset outside of this set won't be considered (and
43 won't appears in "common")
43 won't appears in "common")
44
44
45 If you pass heads and they are all known locally, the response lists just
45 If you pass heads and they are all known locally, the response lists just
46 these heads in "common" and in "heads".
46 these heads in "common" and in "heads".
47
47
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 extensions a good hook into outgoing.
49 extensions a good hook into outgoing.
50 """
50 """
51
51
52 if not remote.capable('getbundle'):
52 if not remote.capable('getbundle'):
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54
54
55 if heads:
55 if heads:
56 allknown = True
56 allknown = True
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 for h in heads:
58 for h in heads:
59 if not knownnode(h):
59 if not knownnode(h):
60 allknown = False
60 allknown = False
61 break
61 break
62 if allknown:
62 if allknown:
63 return (heads, False, heads)
63 return (heads, False, heads)
64
64
65 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
65 res = setdiscovery.findcommonheads(repo.ui, repo, remote, heads,
66 abortwhenunrelated=not force,
66 abortwhenunrelated=not force,
67 ancestorsof=ancestorsof)
67 ancestorsof=ancestorsof)
68 common, anyinc, srvheads = res
68 common, anyinc, srvheads = res
69 return (list(common), anyinc, heads or list(srvheads))
69 return (list(common), anyinc, heads or list(srvheads))
70
70
71 class outgoing(object):
71 class outgoing(object):
72 '''Represents the set of nodes present in a local repo but not in a
72 '''Represents the set of nodes present in a local repo but not in a
73 (possibly) remote one.
73 (possibly) remote one.
74
74
75 Members:
75 Members:
76
76
77 missing is a list of all nodes present in local but not in remote.
77 missing is a list of all nodes present in local but not in remote.
78 common is a list of all nodes shared between the two repos.
78 common is a list of all nodes shared between the two repos.
79 excluded is the list of missing changeset that shouldn't be sent remotely.
79 excluded is the list of missing changeset that shouldn't be sent remotely.
80 missingheads is the list of heads of missing.
80 missingheads is the list of heads of missing.
81 commonheads is the list of heads of common.
81 commonheads is the list of heads of common.
82
82
83 The sets are computed on demand from the heads, unless provided upfront
83 The sets are computed on demand from the heads, unless provided upfront
84 by discovery.'''
84 by discovery.'''
85
85
86 def __init__(self, repo, commonheads=None, missingheads=None,
86 def __init__(self, repo, commonheads=None, missingheads=None,
87 missingroots=None):
87 missingroots=None):
88 # at least one of them must not be set
88 # at least one of them must not be set
89 assert None in (commonheads, missingroots)
89 assert None in (commonheads, missingroots)
90 cl = repo.changelog
90 cl = repo.changelog
91 if missingheads is None:
91 if missingheads is None:
92 missingheads = cl.heads()
92 missingheads = cl.heads()
93 if missingroots:
93 if missingroots:
94 discbases = []
94 discbases = []
95 for n in missingroots:
95 for n in missingroots:
96 discbases.extend([p for p in cl.parents(n) if p != nullid])
96 discbases.extend([p for p in cl.parents(n) if p != nullid])
97 # TODO remove call to nodesbetween.
97 # TODO remove call to nodesbetween.
98 # TODO populate attributes on outgoing instance instead of setting
98 # TODO populate attributes on outgoing instance instead of setting
99 # discbases.
99 # discbases.
100 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
100 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
101 included = set(csets)
101 included = set(csets)
102 missingheads = heads
102 missingheads = heads
103 commonheads = [n for n in discbases if n not in included]
103 commonheads = [n for n in discbases if n not in included]
104 elif not commonheads:
104 elif not commonheads:
105 commonheads = [nullid]
105 commonheads = [nullid]
106 self.commonheads = commonheads
106 self.commonheads = commonheads
107 self.missingheads = missingheads
107 self.missingheads = missingheads
108 self._revlog = cl
108 self._revlog = cl
109 self._common = None
109 self._common = None
110 self._missing = None
110 self._missing = None
111 self.excluded = []
111 self.excluded = []
112
112
113 def _computecommonmissing(self):
113 def _computecommonmissing(self):
114 sets = self._revlog.findcommonmissing(self.commonheads,
114 sets = self._revlog.findcommonmissing(self.commonheads,
115 self.missingheads)
115 self.missingheads)
116 self._common, self._missing = sets
116 self._common, self._missing = sets
117
117
118 @util.propertycache
118 @util.propertycache
119 def common(self):
119 def common(self):
120 if self._common is None:
120 if self._common is None:
121 self._computecommonmissing()
121 self._computecommonmissing()
122 return self._common
122 return self._common
123
123
124 @util.propertycache
124 @util.propertycache
125 def missing(self):
125 def missing(self):
126 if self._missing is None:
126 if self._missing is None:
127 self._computecommonmissing()
127 self._computecommonmissing()
128 return self._missing
128 return self._missing
129
129
130 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
130 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
131 commoninc=None, portable=False):
131 commoninc=None, portable=False):
132 '''Return an outgoing instance to identify the nodes present in repo but
132 '''Return an outgoing instance to identify the nodes present in repo but
133 not in other.
133 not in other.
134
134
135 If onlyheads is given, only nodes ancestral to nodes in onlyheads
135 If onlyheads is given, only nodes ancestral to nodes in onlyheads
136 (inclusive) are included. If you already know the local repo's heads,
136 (inclusive) are included. If you already know the local repo's heads,
137 passing them in onlyheads is faster than letting them be recomputed here.
137 passing them in onlyheads is faster than letting them be recomputed here.
138
138
139 If commoninc is given, it must be the result of a prior call to
139 If commoninc is given, it must be the result of a prior call to
140 findcommonincoming(repo, other, force) to avoid recomputing it here.
140 findcommonincoming(repo, other, force) to avoid recomputing it here.
141
141
142 If portable is given, compute more conservative common and missingheads,
142 If portable is given, compute more conservative common and missingheads,
143 to make bundles created from the instance more portable.'''
143 to make bundles created from the instance more portable.'''
144 # declare an empty outgoing object to be filled later
144 # declare an empty outgoing object to be filled later
145 og = outgoing(repo, None, None)
145 og = outgoing(repo, None, None)
146
146
147 # get common set if not provided
147 # get common set if not provided
148 if commoninc is None:
148 if commoninc is None:
149 commoninc = findcommonincoming(repo, other, force=force,
149 commoninc = findcommonincoming(repo, other, force=force,
150 ancestorsof=onlyheads)
150 ancestorsof=onlyheads)
151 og.commonheads, _any, _hds = commoninc
151 og.commonheads, _any, _hds = commoninc
152
152
153 # compute outgoing
153 # compute outgoing
154 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
154 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
155 if not mayexclude:
155 if not mayexclude:
156 og.missingheads = onlyheads or repo.heads()
156 og.missingheads = onlyheads or repo.heads()
157 elif onlyheads is None:
157 elif onlyheads is None:
158 # use visible heads as it should be cached
158 # use visible heads as it should be cached
159 og.missingheads = repo.filtered("served").heads()
159 og.missingheads = repo.filtered("served").heads()
160 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
160 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
161 else:
161 else:
162 # compute common, missing and exclude secret stuff
162 # compute common, missing and exclude secret stuff
163 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
163 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
164 og._common, allmissing = sets
164 og._common, allmissing = sets
165 og._missing = missing = []
165 og._missing = missing = []
166 og.excluded = excluded = []
166 og.excluded = excluded = []
167 for node in allmissing:
167 for node in allmissing:
168 ctx = repo[node]
168 ctx = repo[node]
169 if ctx.phase() >= phases.secret or ctx.extinct():
169 if ctx.phase() >= phases.secret or ctx.extinct():
170 excluded.append(node)
170 excluded.append(node)
171 else:
171 else:
172 missing.append(node)
172 missing.append(node)
173 if len(missing) == len(allmissing):
173 if len(missing) == len(allmissing):
174 missingheads = onlyheads
174 missingheads = onlyheads
175 else: # update missing heads
175 else: # update missing heads
176 missingheads = phases.newheads(repo, onlyheads, excluded)
176 missingheads = phases.newheads(repo, onlyheads, excluded)
177 og.missingheads = missingheads
177 og.missingheads = missingheads
178 if portable:
178 if portable:
179 # recompute common and missingheads as if -r<rev> had been given for
179 # recompute common and missingheads as if -r<rev> had been given for
180 # each head of missing, and --base <rev> for each head of the proper
180 # each head of missing, and --base <rev> for each head of the proper
181 # ancestors of missing
181 # ancestors of missing
182 og._computecommonmissing()
182 og._computecommonmissing()
183 cl = repo.changelog
183 cl = repo.changelog
184 missingrevs = set(cl.rev(n) for n in og._missing)
184 missingrevs = set(cl.rev(n) for n in og._missing)
185 og._common = set(cl.ancestors(missingrevs)) - missingrevs
185 og._common = set(cl.ancestors(missingrevs)) - missingrevs
186 commonheads = set(og.commonheads)
186 commonheads = set(og.commonheads)
187 og.missingheads = [h for h in og.missingheads if h not in commonheads]
187 og.missingheads = [h for h in og.missingheads if h not in commonheads]
188
188
189 return og
189 return og
190
190
191 def _headssummary(pushop):
191 def _headssummary(pushop):
192 """compute a summary of branch and heads status before and after push
192 """compute a summary of branch and heads status before and after push
193
193
194 return {'branch': ([remoteheads], [newheads],
194 return {'branch': ([remoteheads], [newheads],
195 [unsyncedheads], [discardedheads])} mapping
195 [unsyncedheads], [discardedheads])} mapping
196
196
197 - branch: the branch name,
197 - branch: the branch name,
198 - remoteheads: the list of remote heads known locally
198 - remoteheads: the list of remote heads known locally
199 None if the branch is new,
199 None if the branch is new,
200 - newheads: the new remote heads (known locally) with outgoing pushed,
200 - newheads: the new remote heads (known locally) with outgoing pushed,
201 - unsyncedheads: the list of remote heads unknown locally,
201 - unsyncedheads: the list of remote heads unknown locally,
202 - discardedheads: the list of heads made obsolete by the push.
202 - discardedheads: the list of heads made obsolete by the push.
203 """
203 """
204 repo = pushop.repo.unfiltered()
204 repo = pushop.repo.unfiltered()
205 remote = pushop.remote
205 remote = pushop.remote
206 outgoing = pushop.outgoing
206 outgoing = pushop.outgoing
207 cl = repo.changelog
207 cl = repo.changelog
208 headssum = {}
208 headssum = {}
209 # A. Create set of branches involved in the push.
209 # A. Create set of branches involved in the push.
210 branches = set(repo[n].branch() for n in outgoing.missing)
210 branches = set(repo[n].branch() for n in outgoing.missing)
211 remotemap = remote.branchmap()
211 remotemap = remote.branchmap()
212 newbranches = branches - set(remotemap)
212 newbranches = branches - set(remotemap)
213 branches.difference_update(newbranches)
213 branches.difference_update(newbranches)
214
214
215 # A. register remote heads
215 # A. register remote heads
216 remotebranches = set()
216 remotebranches = set()
217 for branch, heads in remote.branchmap().iteritems():
217 for branch, heads in remote.branchmap().iteritems():
218 remotebranches.add(branch)
218 remotebranches.add(branch)
219 known = []
219 known = []
220 unsynced = []
220 unsynced = []
221 knownnode = cl.hasnode # do not use nodemap until it is filtered
221 knownnode = cl.hasnode # do not use nodemap until it is filtered
222 for h in heads:
222 for h in heads:
223 if knownnode(h):
223 if knownnode(h):
224 known.append(h)
224 known.append(h)
225 else:
225 else:
226 unsynced.append(h)
226 unsynced.append(h)
227 headssum[branch] = (known, list(known), unsynced)
227 headssum[branch] = (known, list(known), unsynced)
228 # B. add new branch data
228 # B. add new branch data
229 missingctx = list(repo[n] for n in outgoing.missing)
229 missingctx = list(repo[n] for n in outgoing.missing)
230 touchedbranches = set()
230 touchedbranches = set()
231 for ctx in missingctx:
231 for ctx in missingctx:
232 branch = ctx.branch()
232 branch = ctx.branch()
233 touchedbranches.add(branch)
233 touchedbranches.add(branch)
234 if branch not in headssum:
234 if branch not in headssum:
235 headssum[branch] = (None, [], [])
235 headssum[branch] = (None, [], [])
236
236
237 # C drop data about untouched branches:
237 # C drop data about untouched branches:
238 for branch in remotebranches - touchedbranches:
238 for branch in remotebranches - touchedbranches:
239 del headssum[branch]
239 del headssum[branch]
240
240
241 # D. Update newmap with outgoing changes.
241 # D. Update newmap with outgoing changes.
242 # This will possibly add new heads and remove existing ones.
242 # This will possibly add new heads and remove existing ones.
243 newmap = branchmap.branchcache((branch, heads[1])
243 newmap = branchmap.branchcache((branch, heads[1])
244 for branch, heads in headssum.iteritems()
244 for branch, heads in headssum.iteritems()
245 if heads[0] is not None)
245 if heads[0] is not None)
246 newmap.update(repo, (ctx.rev() for ctx in missingctx))
246 newmap.update(repo, (ctx.rev() for ctx in missingctx))
247 for branch, newheads in newmap.iteritems():
247 for branch, newheads in newmap.iteritems():
248 headssum[branch][1][:] = newheads
248 headssum[branch][1][:] = newheads
249 for branch, items in headssum.iteritems():
249 for branch, items in headssum.iteritems():
250 for l in items:
250 for l in items:
251 if l is not None:
251 if l is not None:
252 l.sort()
252 l.sort()
253 headssum[branch] = items + ([],)
253 headssum[branch] = items + ([],)
254
254
255 # If there are no obsstore, no post processing are needed.
255 # If there are no obsstore, no post processing are needed.
256 if repo.obsstore:
256 if repo.obsstore:
257 torev = repo.changelog.rev
257 torev = repo.changelog.rev
258 futureheads = set(torev(h) for h in outgoing.missingheads)
258 futureheads = set(torev(h) for h in outgoing.missingheads)
259 futureheads |= set(torev(h) for h in outgoing.commonheads)
259 futureheads |= set(torev(h) for h in outgoing.commonheads)
260 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
260 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
261 for branch, heads in sorted(headssum.iteritems()):
261 for branch, heads in sorted(headssum.iteritems()):
262 remoteheads, newheads, unsyncedheads, placeholder = heads
262 remoteheads, newheads, unsyncedheads, placeholder = heads
263 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
263 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
264 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
264 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
265 sorted(result[1]))
265 sorted(result[1]))
266 return headssum
266 return headssum
267
267
268 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
268 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
269 """Compute branchmapsummary for repo without branchmap support"""
269 """Compute branchmapsummary for repo without branchmap support"""
270
270
271 # 1-4b. old servers: Check for new topological heads.
271 # 1-4b. old servers: Check for new topological heads.
272 # Construct {old,new}map with branch = None (topological branch).
272 # Construct {old,new}map with branch = None (topological branch).
273 # (code based on update)
273 # (code based on update)
274 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
274 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
275 oldheads = sorted(h for h in remoteheads if knownnode(h))
275 oldheads = sorted(h for h in remoteheads if knownnode(h))
276 # all nodes in outgoing.missing are children of either:
276 # all nodes in outgoing.missing are children of either:
277 # - an element of oldheads
277 # - an element of oldheads
278 # - another element of outgoing.missing
278 # - another element of outgoing.missing
279 # - nullrev
279 # - nullrev
280 # This explains why the new head are very simple to compute.
280 # This explains why the new head are very simple to compute.
281 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
281 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
282 newheads = sorted(c.node() for c in r)
282 newheads = sorted(c.node() for c in r)
283 # set some unsynced head to issue the "unsynced changes" warning
283 # set some unsynced head to issue the "unsynced changes" warning
284 if inc:
284 if inc:
285 unsynced = [None]
285 unsynced = [None]
286 else:
286 else:
287 unsynced = []
287 unsynced = []
288 return {None: (oldheads, newheads, unsynced, [])}
288 return {None: (oldheads, newheads, unsynced, [])}
289
289
290 def _nowarnheads(pushop):
290 def _nowarnheads(pushop):
291 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
291 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
292 repo = pushop.repo.unfiltered()
292 repo = pushop.repo.unfiltered()
293 remote = pushop.remote
293 remote = pushop.remote
294 localbookmarks = repo._bookmarks
294 localbookmarks = repo._bookmarks
295 remotebookmarks = remote.listkeys('bookmarks')
295 remotebookmarks = remote.listkeys('bookmarks')
296 bookmarkedheads = set()
296 bookmarkedheads = set()
297
297
298 # internal config: bookmarks.pushing
298 # internal config: bookmarks.pushing
299 newbookmarks = [localbookmarks.expandname(b)
299 newbookmarks = [localbookmarks.expandname(b)
300 for b in pushop.ui.configlist('bookmarks', 'pushing')]
300 for b in pushop.ui.configlist('bookmarks', 'pushing')]
301
301
302 for bm in localbookmarks:
302 for bm in localbookmarks:
303 rnode = remotebookmarks.get(bm)
303 rnode = remotebookmarks.get(bm)
304 if rnode and rnode in repo:
304 if rnode and rnode in repo:
305 lctx, rctx = repo[bm], repo[rnode]
305 lctx, rctx = repo[bm], repo[rnode]
306 if bookmarks.validdest(repo, rctx, lctx):
306 if bookmarks.validdest(repo, rctx, lctx):
307 bookmarkedheads.add(lctx.node())
307 bookmarkedheads.add(lctx.node())
308 else:
308 else:
309 if bm in newbookmarks and bm not in remotebookmarks:
309 if bm in newbookmarks and bm not in remotebookmarks:
310 bookmarkedheads.add(repo[bm].node())
310 bookmarkedheads.add(repo[bm].node())
311
311
312 return bookmarkedheads
312 return bookmarkedheads
313
313
314 def checkheads(pushop):
314 def checkheads(pushop):
315 """Check that a push won't add any outgoing head
315 """Check that a push won't add any outgoing head
316
316
317 raise Abort error and display ui message as needed.
317 raise Abort error and display ui message as needed.
318 """
318 """
319
319
320 repo = pushop.repo.unfiltered()
320 repo = pushop.repo.unfiltered()
321 remote = pushop.remote
321 remote = pushop.remote
322 outgoing = pushop.outgoing
322 outgoing = pushop.outgoing
323 remoteheads = pushop.remoteheads
323 remoteheads = pushop.remoteheads
324 newbranch = pushop.newbranch
324 newbranch = pushop.newbranch
325 inc = bool(pushop.incoming)
325 inc = bool(pushop.incoming)
326
326
327 # Check for each named branch if we're creating new remote heads.
327 # Check for each named branch if we're creating new remote heads.
328 # To be a remote head after push, node must be either:
328 # To be a remote head after push, node must be either:
329 # - unknown locally
329 # - unknown locally
330 # - a local outgoing head descended from update
330 # - a local outgoing head descended from update
331 # - a remote head that's known locally and not
331 # - a remote head that's known locally and not
332 # ancestral to an outgoing head
332 # ancestral to an outgoing head
333 if remoteheads == [nullid]:
333 if remoteheads == [nullid]:
334 # remote is empty, nothing to check.
334 # remote is empty, nothing to check.
335 return
335 return
336
336
337 if remote.capable('branchmap'):
337 if remote.capable('branchmap'):
338 headssum = _headssummary(pushop)
338 headssum = _headssummary(pushop)
339 else:
339 else:
340 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
340 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
341 pushop.pushbranchmap = headssum
341 pushop.pushbranchmap = headssum
342 newbranches = [branch for branch, heads in headssum.iteritems()
342 newbranches = [branch for branch, heads in headssum.iteritems()
343 if heads[0] is None]
343 if heads[0] is None]
344 # 1. Check for new branches on the remote.
344 # 1. Check for new branches on the remote.
345 if newbranches and not newbranch: # new branch requires --new-branch
345 if newbranches and not newbranch: # new branch requires --new-branch
346 branchnames = ', '.join(sorted(newbranches))
346 branchnames = ', '.join(sorted(newbranches))
347 raise error.Abort(_("push creates new remote branches: %s!")
347 raise error.Abort(_("push creates new remote branches: %s!")
348 % branchnames,
348 % branchnames,
349 hint=_("use 'hg push --new-branch' to create"
349 hint=_("use 'hg push --new-branch' to create"
350 " new remote branches"))
350 " new remote branches"))
351
351
352 # 2. Find heads that we need not warn about
352 # 2. Find heads that we need not warn about
353 nowarnheads = _nowarnheads(pushop)
353 nowarnheads = _nowarnheads(pushop)
354
354
355 # 3. Check for new heads.
355 # 3. Check for new heads.
356 # If there are more heads after the push than before, a suitable
356 # If there are more heads after the push than before, a suitable
357 # error message, depending on unsynced status, is displayed.
357 # error message, depending on unsynced status, is displayed.
358 errormsg = None
358 errormsg = None
359 for branch, heads in sorted(headssum.iteritems()):
359 for branch, heads in sorted(headssum.iteritems()):
360 remoteheads, newheads, unsyncedheads, discardedheads = heads
360 remoteheads, newheads, unsyncedheads, discardedheads = heads
361 # add unsynced data
361 # add unsynced data
362 if remoteheads is None:
362 if remoteheads is None:
363 oldhs = set()
363 oldhs = set()
364 else:
364 else:
365 oldhs = set(remoteheads)
365 oldhs = set(remoteheads)
366 oldhs.update(unsyncedheads)
366 oldhs.update(unsyncedheads)
367 dhs = None # delta heads, the new heads on branch
367 dhs = None # delta heads, the new heads on branch
368 newhs = set(newheads)
368 newhs = set(newheads)
369 newhs.update(unsyncedheads)
369 newhs.update(unsyncedheads)
370 if unsyncedheads:
370 if unsyncedheads:
371 if None in unsyncedheads:
371 if None in unsyncedheads:
372 # old remote, no heads data
372 # old remote, no heads data
373 heads = None
373 heads = None
374 else:
374 else:
375 heads = scmutil.nodesummaries(repo, unsyncedheads)
375 heads = scmutil.nodesummaries(repo, unsyncedheads)
376 if heads is None:
376 if heads is None:
377 repo.ui.status(_("remote has heads that are "
377 repo.ui.status(_("remote has heads that are "
378 "not known locally\n"))
378 "not known locally\n"))
379 elif branch is None:
379 elif branch is None:
380 repo.ui.status(_("remote has heads that are "
380 repo.ui.status(_("remote has heads that are "
381 "not known locally: %s\n") % heads)
381 "not known locally: %s\n") % heads)
382 else:
382 else:
383 repo.ui.status(_("remote has heads on branch '%s' that are "
383 repo.ui.status(_("remote has heads on branch '%s' that are "
384 "not known locally: %s\n") % (branch, heads))
384 "not known locally: %s\n") % (branch, heads))
385 if remoteheads is None:
385 if remoteheads is None:
386 if len(newhs) > 1:
386 if len(newhs) > 1:
387 dhs = list(newhs)
387 dhs = list(newhs)
388 if errormsg is None:
388 if errormsg is None:
389 errormsg = (_("push creates new branch '%s' "
389 errormsg = (_("push creates new branch '%s' "
390 "with multiple heads") % (branch))
390 "with multiple heads") % (branch))
391 hint = _("merge or"
391 hint = _("merge or"
392 " see 'hg help push' for details about"
392 " see 'hg help push' for details about"
393 " pushing new heads")
393 " pushing new heads")
394 elif len(newhs) > len(oldhs):
394 elif len(newhs) > len(oldhs):
395 # remove bookmarked or existing remote heads from the new heads list
395 # remove bookmarked or existing remote heads from the new heads list
396 dhs = sorted(newhs - nowarnheads - oldhs)
396 dhs = sorted(newhs - nowarnheads - oldhs)
397 if dhs:
397 if dhs:
398 if errormsg is None:
398 if errormsg is None:
399 if branch not in ('default', None):
399 if branch not in ('default', None):
400 errormsg = _("push creates new remote head %s "
400 errormsg = _("push creates new remote head %s "
401 "on branch '%s'!") % (short(dhs[0]), branch)
401 "on branch '%s'!") % (short(dhs[0]), branch)
402 elif repo[dhs[0]].bookmarks():
402 elif repo[dhs[0]].bookmarks():
403 errormsg = _("push creates new remote head %s "
403 errormsg = _("push creates new remote head %s "
404 "with bookmark '%s'!") % (
404 "with bookmark '%s'!") % (
405 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
405 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
406 else:
406 else:
407 errormsg = _("push creates new remote head %s!"
407 errormsg = _("push creates new remote head %s!"
408 ) % short(dhs[0])
408 ) % short(dhs[0])
409 if unsyncedheads:
409 if unsyncedheads:
410 hint = _("pull and merge or"
410 hint = _("pull and merge or"
411 " see 'hg help push' for details about"
411 " see 'hg help push' for details about"
412 " pushing new heads")
412 " pushing new heads")
413 else:
413 else:
414 hint = _("merge or"
414 hint = _("merge or"
415 " see 'hg help push' for details about"
415 " see 'hg help push' for details about"
416 " pushing new heads")
416 " pushing new heads")
417 if branch is None:
417 if branch is None:
418 repo.ui.note(_("new remote heads:\n"))
418 repo.ui.note(_("new remote heads:\n"))
419 else:
419 else:
420 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
420 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
421 for h in dhs:
421 for h in dhs:
422 repo.ui.note((" %s\n") % short(h))
422 repo.ui.note((" %s\n") % short(h))
423 if errormsg:
423 if errormsg:
424 raise error.Abort(errormsg, hint=hint)
424 raise error.Abort(errormsg, hint=hint)
425
425
426 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
426 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
427 """post process the list of new heads with obsolescence information
427 """post process the list of new heads with obsolescence information
428
428
429 Exists as a sub-function to contain the complexity and allow extensions to
429 Exists as a sub-function to contain the complexity and allow extensions to
430 experiment with smarter logic.
430 experiment with smarter logic.
431
431
432 Returns (newheads, discarded_heads) tuple
432 Returns (newheads, discarded_heads) tuple
433 """
433 """
434 # known issue
434 # known issue
435 #
435 #
436 # * We "silently" skip processing on all changeset unknown locally
436 # * We "silently" skip processing on all changeset unknown locally
437 #
437 #
438 # * if <nh> is public on the remote, it won't be affected by obsolete
438 # * if <nh> is public on the remote, it won't be affected by obsolete
439 # marker and a new is created
439 # marker and a new is created
440
440
441 # define various utilities and containers
441 # define various utilities and containers
442 repo = pushop.repo
442 repo = pushop.repo
443 unfi = repo.unfiltered()
443 unfi = repo.unfiltered()
444 tonode = unfi.changelog.node
444 tonode = unfi.changelog.node
445 torev = unfi.changelog.nodemap.get
445 torev = unfi.changelog.nodemap.get
446 public = phases.public
446 public = phases.public
447 getphase = unfi._phasecache.phase
447 getphase = unfi._phasecache.phase
448 ispublic = (lambda r: getphase(unfi, r) == public)
448 ispublic = (lambda r: getphase(unfi, r) == public)
449 ispushed = (lambda n: torev(n) in futurecommon)
449 ispushed = (lambda n: torev(n) in futurecommon)
450 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
450 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
451 successorsmarkers = unfi.obsstore.successors
451 successorsmarkers = unfi.obsstore.successors
452 newhs = set() # final set of new heads
452 newhs = set() # final set of new heads
453 discarded = set() # new head of fully replaced branch
453 discarded = set() # new head of fully replaced branch
454
454
455 localcandidate = set() # candidate heads known locally
455 localcandidate = set() # candidate heads known locally
456 unknownheads = set() # candidate heads unknown locally
456 unknownheads = set() # candidate heads unknown locally
457 for h in candidate_newhs:
457 for h in candidate_newhs:
458 if h in unfi:
458 if h in unfi:
459 localcandidate.add(h)
459 localcandidate.add(h)
460 else:
460 else:
461 if successorsmarkers.get(h) is not None:
461 if successorsmarkers.get(h) is not None:
462 msg = ('checkheads: remote head unknown locally has'
462 msg = ('checkheads: remote head unknown locally has'
463 ' local marker: %s\n')
463 ' local marker: %s\n')
464 repo.ui.debug(msg % hex(h))
464 repo.ui.debug(msg % hex(h))
465 unknownheads.add(h)
465 unknownheads.add(h)
466
466
467 # fast path the simple case
467 # fast path the simple case
468 if len(localcandidate) == 1:
468 if len(localcandidate) == 1:
469 return unknownheads | set(candidate_newhs), set()
469 return unknownheads | set(candidate_newhs), set()
470
470
471 # actually process branch replacement
471 # actually process branch replacement
472 while localcandidate:
472 while localcandidate:
473 nh = localcandidate.pop()
473 nh = localcandidate.pop()
474 # run this check early to skip the evaluation of the whole branch
474 # run this check early to skip the evaluation of the whole branch
475 if (torev(nh) in futurecommon or ispublic(torev(nh))):
475 if (torev(nh) in futurecommon or ispublic(torev(nh))):
476 newhs.add(nh)
476 newhs.add(nh)
477 continue
477 continue
478
478
479 # Get all revs/nodes on the branch exclusive to this head
479 # Get all revs/nodes on the branch exclusive to this head
480 # (already filtered heads are "ignored"))
480 # (already filtered heads are "ignored"))
481 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
481 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
482 nh, localcandidate, newhs)
482 nh, localcandidate, newhs)
483 branchnodes = [tonode(r) for r in branchrevs]
483 branchnodes = [tonode(r) for r in branchrevs]
484
484
485 # The branch won't be hidden on the remote if
485 # The branch won't be hidden on the remote if
486 # * any part of it is public,
486 # * any part of it is public,
487 # * any part of it is considered part of the result by previous logic,
487 # * any part of it is considered part of the result by previous logic,
488 # * if we have no markers to push to obsolete it.
488 # * if we have no markers to push to obsolete it.
489 if (any(ispublic(r) for r in branchrevs)
489 if (any(ispublic(r) for r in branchrevs)
490 or any(torev(n) in futurecommon for n in branchnodes)
490 or any(torev(n) in futurecommon for n in branchnodes)
491 or any(not hasoutmarker(n) for n in branchnodes)):
491 or any(not hasoutmarker(n) for n in branchnodes)):
492 newhs.add(nh)
492 newhs.add(nh)
493 else:
493 else:
494 # note: there is a corner case if there is a merge in the branch.
494 # note: there is a corner case if there is a merge in the branch.
495 # we might end up with -more- heads. However, these heads are not
495 # we might end up with -more- heads. However, these heads are not
496 # "added" by the push, but more by the "removal" on the remote so I
496 # "added" by the push, but more by the "removal" on the remote so I
497 # think is a okay to ignore them,
497 # think is a okay to ignore them,
498 discarded.add(nh)
498 discarded.add(nh)
499 newhs |= unknownheads
499 newhs |= unknownheads
500 return newhs, discarded
500 return newhs, discarded
501
501
502 def pushingmarkerfor(obsstore, ispushed, node):
502 def pushingmarkerfor(obsstore, ispushed, node):
503 """true if some markers are to be pushed for node
503 """true if some markers are to be pushed for node
504
504
505 We cannot just look in to the pushed obsmarkers from the pushop because
505 We cannot just look in to the pushed obsmarkers from the pushop because
506 discovery might have filtered relevant markers. In addition listing all
506 discovery might have filtered relevant markers. In addition listing all
507 markers relevant to all changesets in the pushed set would be too expensive
507 markers relevant to all changesets in the pushed set would be too expensive
508 (O(len(repo)))
508 (O(len(repo)))
509
509
510 (note: There are cache opportunity in this function. but it would requires
510 (note: There are cache opportunity in this function. but it would requires
511 a two dimensional stack.)
511 a two dimensional stack.)
512 """
512 """
513 successorsmarkers = obsstore.successors
513 successorsmarkers = obsstore.successors
514 stack = [node]
514 stack = [node]
515 seen = set(stack)
515 seen = set(stack)
516 while stack:
516 while stack:
517 current = stack.pop()
517 current = stack.pop()
518 if ispushed(current):
518 if ispushed(current):
519 return True
519 return True
520 markers = successorsmarkers.get(current, ())
520 markers = successorsmarkers.get(current, ())
521 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
521 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
522 for m in markers:
522 for m in markers:
523 nexts = m[1] # successors
523 nexts = m[1] # successors
524 if not nexts: # this is a prune marker
524 if not nexts: # this is a prune marker
525 nexts = m[5] or () # parents
525 nexts = m[5] or () # parents
526 for n in nexts:
526 for n in nexts:
527 if n not in seen:
527 if n not in seen:
528 seen.add(n)
528 seen.add(n)
529 stack.append(n)
529 stack.append(n)
530 return False
530 return False
@@ -1,263 +1,267 b''
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """
8 """
9 Algorithm works in the following way. You have two repository: local and
9 Algorithm works in the following way. You have two repository: local and
10 remote. They both contains a DAG of changelists.
10 remote. They both contains a DAG of changelists.
11
11
12 The goal of the discovery protocol is to find one set of node *common*,
12 The goal of the discovery protocol is to find one set of node *common*,
13 the set of nodes shared by local and remote.
13 the set of nodes shared by local and remote.
14
14
15 One of the issue with the original protocol was latency, it could
15 One of the issue with the original protocol was latency, it could
16 potentially require lots of roundtrips to discover that the local repo was a
16 potentially require lots of roundtrips to discover that the local repo was a
17 subset of remote (which is a very common case, you usually have few changes
17 subset of remote (which is a very common case, you usually have few changes
18 compared to upstream, while upstream probably had lots of development).
18 compared to upstream, while upstream probably had lots of development).
19
19
20 The new protocol only requires one interface for the remote repo: `known()`,
20 The new protocol only requires one interface for the remote repo: `known()`,
21 which given a set of changelists tells you if they are present in the DAG.
21 which given a set of changelists tells you if they are present in the DAG.
22
22
23 The algorithm then works as follow:
23 The algorithm then works as follow:
24
24
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 all nodes are in `unknown`.
26 all nodes are in `unknown`.
27 - Take a sample from `unknown`, call `remote.known(sample)`
27 - Take a sample from `unknown`, call `remote.known(sample)`
28 - For each node that remote knows, move it and all its ancestors to `common`
28 - For each node that remote knows, move it and all its ancestors to `common`
29 - For each node that remote doesn't know, move it and all its descendants
29 - For each node that remote doesn't know, move it and all its descendants
30 to `missing`
30 to `missing`
31 - Iterate until `unknown` is empty
31 - Iterate until `unknown` is empty
32
32
33 There are a couple optimizations, first is instead of starting with a random
33 There are a couple optimizations, first is instead of starting with a random
34 sample of missing, start by sending all heads, in the case where the local
34 sample of missing, start by sending all heads, in the case where the local
35 repo is a subset, you computed the answer in one round trip.
35 repo is a subset, you computed the answer in one round trip.
36
36
37 Then you can do something similar to the bisecting strategy used when
37 Then you can do something similar to the bisecting strategy used when
38 finding faulty changesets. Instead of random samples, you can try picking
38 finding faulty changesets. Instead of random samples, you can try picking
39 nodes that will maximize the number of nodes that will be
39 nodes that will maximize the number of nodes that will be
40 classified with it (since all ancestors or descendants will be marked as well).
40 classified with it (since all ancestors or descendants will be marked as well).
41 """
41 """
42
42
43 from __future__ import absolute_import
43 from __future__ import absolute_import
44
44
45 import collections
45 import collections
46 import random
46 import random
47
47
48 from .i18n import _
48 from .i18n import _
49 from .node import (
49 from .node import (
50 nullid,
50 nullid,
51 nullrev,
51 nullrev,
52 )
52 )
53 from . import (
53 from . import (
54 dagutil,
54 dagutil,
55 error,
55 error,
56 util,
56 util,
57 )
57 )
58
58
59 def _updatesample(dag, nodes, sample, quicksamplesize=0):
59 def _updatesample(dag, nodes, sample, quicksamplesize=0):
60 """update an existing sample to match the expected size
60 """update an existing sample to match the expected size
61
61
62 The sample is updated with nodes exponentially distant from each head of the
62 The sample is updated with nodes exponentially distant from each head of the
63 <nodes> set. (H~1, H~2, H~4, H~8, etc).
63 <nodes> set. (H~1, H~2, H~4, H~8, etc).
64
64
65 If a target size is specified, the sampling will stop once this size is
65 If a target size is specified, the sampling will stop once this size is
66 reached. Otherwise sampling will happen until roots of the <nodes> set are
66 reached. Otherwise sampling will happen until roots of the <nodes> set are
67 reached.
67 reached.
68
68
69 :dag: a dag object from dagutil
69 :dag: a dag object from dagutil
70 :nodes: set of nodes we want to discover (if None, assume the whole dag)
70 :nodes: set of nodes we want to discover (if None, assume the whole dag)
71 :sample: a sample to update
71 :sample: a sample to update
72 :quicksamplesize: optional target size of the sample"""
72 :quicksamplesize: optional target size of the sample"""
73 # if nodes is empty we scan the entire graph
73 # if nodes is empty we scan the entire graph
74 if nodes:
74 if nodes:
75 heads = dag.headsetofconnecteds(nodes)
75 heads = dag.headsetofconnecteds(nodes)
76 else:
76 else:
77 heads = dag.heads()
77 heads = dag.heads()
78 dist = {}
78 dist = {}
79 visit = collections.deque(heads)
79 visit = collections.deque(heads)
80 seen = set()
80 seen = set()
81 factor = 1
81 factor = 1
82 while visit:
82 while visit:
83 curr = visit.popleft()
83 curr = visit.popleft()
84 if curr in seen:
84 if curr in seen:
85 continue
85 continue
86 d = dist.setdefault(curr, 1)
86 d = dist.setdefault(curr, 1)
87 if d > factor:
87 if d > factor:
88 factor *= 2
88 factor *= 2
89 if d == factor:
89 if d == factor:
90 sample.add(curr)
90 sample.add(curr)
91 if quicksamplesize and (len(sample) >= quicksamplesize):
91 if quicksamplesize and (len(sample) >= quicksamplesize):
92 return
92 return
93 seen.add(curr)
93 seen.add(curr)
94 for p in dag.parents(curr):
94 for p in dag.parents(curr):
95 if not nodes or p in nodes:
95 if not nodes or p in nodes:
96 dist.setdefault(p, d + 1)
96 dist.setdefault(p, d + 1)
97 visit.append(p)
97 visit.append(p)
98
98
99 def _takequicksample(dag, nodes, size):
99 def _takequicksample(dag, nodes, size):
100 """takes a quick sample of size <size>
100 """takes a quick sample of size <size>
101
101
102 It is meant for initial sampling and focuses on querying heads and close
102 It is meant for initial sampling and focuses on querying heads and close
103 ancestors of heads.
103 ancestors of heads.
104
104
105 :dag: a dag object
105 :dag: a dag object
106 :nodes: set of nodes to discover
106 :nodes: set of nodes to discover
107 :size: the maximum size of the sample"""
107 :size: the maximum size of the sample"""
108 sample = dag.headsetofconnecteds(nodes)
108 sample = dag.headsetofconnecteds(nodes)
109 if size <= len(sample):
109 if size <= len(sample):
110 return _limitsample(sample, size)
110 return _limitsample(sample, size)
111 _updatesample(dag, None, sample, quicksamplesize=size)
111 _updatesample(dag, None, sample, quicksamplesize=size)
112 return sample
112 return sample
113
113
114 def _takefullsample(dag, nodes, size):
114 def _takefullsample(dag, nodes, size):
115 sample = dag.headsetofconnecteds(nodes)
115 sample = dag.headsetofconnecteds(nodes)
116 # update from heads
116 # update from heads
117 _updatesample(dag, nodes, sample)
117 _updatesample(dag, nodes, sample)
118 # update from roots
118 # update from roots
119 _updatesample(dag.inverse(), nodes, sample)
119 _updatesample(dag.inverse(), nodes, sample)
120 assert sample
120 assert sample
121 sample = _limitsample(sample, size)
121 sample = _limitsample(sample, size)
122 if len(sample) < size:
122 if len(sample) < size:
123 more = size - len(sample)
123 more = size - len(sample)
124 sample.update(random.sample(list(nodes - sample), more))
124 sample.update(random.sample(list(nodes - sample), more))
125 return sample
125 return sample
126
126
127 def _limitsample(sample, desiredlen):
127 def _limitsample(sample, desiredlen):
128 """return a random subset of sample of at most desiredlen item"""
128 """return a random subset of sample of at most desiredlen item"""
129 if len(sample) > desiredlen:
129 if len(sample) > desiredlen:
130 sample = set(random.sample(sample, desiredlen))
130 sample = set(random.sample(sample, desiredlen))
131 return sample
131 return sample
132
132
133 def findcommonheads(ui, local, remote,
133 def findcommonheads(ui, local, remote, heads=None,
134 initialsamplesize=100,
134 initialsamplesize=100,
135 fullsamplesize=200,
135 fullsamplesize=200,
136 abortwhenunrelated=True,
136 abortwhenunrelated=True,
137 ancestorsof=None):
137 ancestorsof=None):
138 '''Return a tuple (common, anyincoming, remoteheads) used to identify
138 '''Return a tuple (common, anyincoming, remoteheads) used to identify
139 missing nodes from or in remote.
139 missing nodes from or in remote.
140 '''
140 '''
141 start = util.timer()
141 start = util.timer()
142
142
143 roundtrips = 0
143 roundtrips = 0
144 cl = local.changelog
144 cl = local.changelog
145 localsubset = None
145 localsubset = None
146 if ancestorsof is not None:
146 if ancestorsof is not None:
147 rev = local.changelog.rev
147 rev = local.changelog.rev
148 localsubset = [rev(n) for n in ancestorsof]
148 localsubset = [rev(n) for n in ancestorsof]
149 dag = dagutil.revlogdag(cl, localsubset=localsubset)
149 dag = dagutil.revlogdag(cl, localsubset=localsubset)
150
150
151 # early exit if we know all the specified remote heads already
151 # early exit if we know all the specified remote heads already
152 ui.debug("query 1; heads\n")
152 ui.debug("query 1; heads\n")
153 roundtrips += 1
153 roundtrips += 1
154 ownheads = dag.heads()
154 ownheads = dag.heads()
155 sample = _limitsample(ownheads, initialsamplesize)
155 sample = _limitsample(ownheads, initialsamplesize)
156 # indices between sample and externalized version must match
156 # indices between sample and externalized version must match
157 sample = list(sample)
157 sample = list(sample)
158 batch = remote.iterbatch()
158 if heads:
159 batch.heads()
159 srvheadhashes = heads
160 batch.known(dag.externalizeall(sample))
160 yesno = remote.known(dag.externalizeall(sample))
161 batch.submit()
161 else:
162 srvheadhashes, yesno = batch.results()
162 batch = remote.iterbatch()
163 batch.heads()
164 batch.known(dag.externalizeall(sample))
165 batch.submit()
166 srvheadhashes, yesno = batch.results()
163
167
164 if cl.tip() == nullid:
168 if cl.tip() == nullid:
165 if srvheadhashes != [nullid]:
169 if srvheadhashes != [nullid]:
166 return [nullid], True, srvheadhashes
170 return [nullid], True, srvheadhashes
167 return [nullid], False, []
171 return [nullid], False, []
168
172
169 # start actual discovery (we note this before the next "if" for
173 # start actual discovery (we note this before the next "if" for
170 # compatibility reasons)
174 # compatibility reasons)
171 ui.status(_("searching for changes\n"))
175 ui.status(_("searching for changes\n"))
172
176
173 srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
177 srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
174 if len(srvheads) == len(srvheadhashes):
178 if len(srvheads) == len(srvheadhashes):
175 ui.debug("all remote heads known locally\n")
179 ui.debug("all remote heads known locally\n")
176 return (srvheadhashes, False, srvheadhashes,)
180 return (srvheadhashes, False, srvheadhashes,)
177
181
178 if sample and len(ownheads) <= initialsamplesize and all(yesno):
182 if sample and len(ownheads) <= initialsamplesize and all(yesno):
179 ui.note(_("all local heads known remotely\n"))
183 ui.note(_("all local heads known remotely\n"))
180 ownheadhashes = dag.externalizeall(ownheads)
184 ownheadhashes = dag.externalizeall(ownheads)
181 return (ownheadhashes, True, srvheadhashes,)
185 return (ownheadhashes, True, srvheadhashes,)
182
186
183 # full blown discovery
187 # full blown discovery
184
188
185 # own nodes I know we both know
189 # own nodes I know we both know
186 # treat remote heads (and maybe own heads) as a first implicit sample
190 # treat remote heads (and maybe own heads) as a first implicit sample
187 # response
191 # response
188 common = cl.incrementalmissingrevs(srvheads)
192 common = cl.incrementalmissingrevs(srvheads)
189 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
193 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
190 common.addbases(commoninsample)
194 common.addbases(commoninsample)
191 # own nodes where I don't know if remote knows them
195 # own nodes where I don't know if remote knows them
192 undecided = set(common.missingancestors(ownheads))
196 undecided = set(common.missingancestors(ownheads))
193 # own nodes I know remote lacks
197 # own nodes I know remote lacks
194 missing = set()
198 missing = set()
195
199
196 full = False
200 full = False
197 while undecided:
201 while undecided:
198
202
199 if sample:
203 if sample:
200 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
204 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
201 missing.update(dag.descendantset(missinginsample, missing))
205 missing.update(dag.descendantset(missinginsample, missing))
202
206
203 undecided.difference_update(missing)
207 undecided.difference_update(missing)
204
208
205 if not undecided:
209 if not undecided:
206 break
210 break
207
211
208 if full or common.hasbases():
212 if full or common.hasbases():
209 if full:
213 if full:
210 ui.note(_("sampling from both directions\n"))
214 ui.note(_("sampling from both directions\n"))
211 else:
215 else:
212 ui.debug("taking initial sample\n")
216 ui.debug("taking initial sample\n")
213 samplefunc = _takefullsample
217 samplefunc = _takefullsample
214 targetsize = fullsamplesize
218 targetsize = fullsamplesize
215 else:
219 else:
216 # use even cheaper initial sample
220 # use even cheaper initial sample
217 ui.debug("taking quick initial sample\n")
221 ui.debug("taking quick initial sample\n")
218 samplefunc = _takequicksample
222 samplefunc = _takequicksample
219 targetsize = initialsamplesize
223 targetsize = initialsamplesize
220 if len(undecided) < targetsize:
224 if len(undecided) < targetsize:
221 sample = list(undecided)
225 sample = list(undecided)
222 else:
226 else:
223 sample = samplefunc(dag, undecided, targetsize)
227 sample = samplefunc(dag, undecided, targetsize)
224 sample = _limitsample(sample, targetsize)
228 sample = _limitsample(sample, targetsize)
225
229
226 roundtrips += 1
230 roundtrips += 1
227 ui.progress(_('searching'), roundtrips, unit=_('queries'))
231 ui.progress(_('searching'), roundtrips, unit=_('queries'))
228 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
232 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
229 % (roundtrips, len(undecided), len(sample)))
233 % (roundtrips, len(undecided), len(sample)))
230 # indices between sample and externalized version must match
234 # indices between sample and externalized version must match
231 sample = list(sample)
235 sample = list(sample)
232 yesno = remote.known(dag.externalizeall(sample))
236 yesno = remote.known(dag.externalizeall(sample))
233 full = True
237 full = True
234
238
235 if sample:
239 if sample:
236 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
240 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
237 common.addbases(commoninsample)
241 common.addbases(commoninsample)
238 common.removeancestorsfrom(undecided)
242 common.removeancestorsfrom(undecided)
239
243
240 # heads(common) == heads(common.bases) since common represents common.bases
244 # heads(common) == heads(common.bases) since common represents common.bases
241 # and all its ancestors
245 # and all its ancestors
242 result = dag.headsetofconnecteds(common.bases)
246 result = dag.headsetofconnecteds(common.bases)
243 # common.bases can include nullrev, but our contract requires us to not
247 # common.bases can include nullrev, but our contract requires us to not
244 # return any heads in that case, so discard that
248 # return any heads in that case, so discard that
245 result.discard(nullrev)
249 result.discard(nullrev)
246 elapsed = util.timer() - start
250 elapsed = util.timer() - start
247 ui.progress(_('searching'), None)
251 ui.progress(_('searching'), None)
248 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
252 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
249 msg = ('found %d common and %d unknown server heads,'
253 msg = ('found %d common and %d unknown server heads,'
250 ' %d roundtrips in %.4fs\n')
254 ' %d roundtrips in %.4fs\n')
251 missing = set(result) - set(srvheads)
255 missing = set(result) - set(srvheads)
252 ui.log('discovery', msg, len(result), len(missing), roundtrips,
256 ui.log('discovery', msg, len(result), len(missing), roundtrips,
253 elapsed)
257 elapsed)
254
258
255 if not result and srvheadhashes != [nullid]:
259 if not result and srvheadhashes != [nullid]:
256 if abortwhenunrelated:
260 if abortwhenunrelated:
257 raise error.Abort(_("repository is unrelated"))
261 raise error.Abort(_("repository is unrelated"))
258 else:
262 else:
259 ui.warn(_("warning: repository is unrelated\n"))
263 ui.warn(_("warning: repository is unrelated\n"))
260 return ({nullid}, True, srvheadhashes,)
264 return ({nullid}, True, srvheadhashes,)
261
265
262 anyincoming = (srvheadhashes != [nullid])
266 anyincoming = (srvheadhashes != [nullid])
263 return dag.externalizeall(result), anyincoming, srvheadhashes
267 return dag.externalizeall(result), anyincoming, srvheadhashes
General Comments 0
You need to be logged in to leave comments. Login now