##// END OF EJS Templates
discovery: diet discovery.prepush from non-discovery code...
Pierre-Yves David -
r15932:4154338f default
parent child Browse files
Show More
@@ -132,146 +132,108 b' def findcommonoutgoing(repo, other, only'
132
132
133 return og
133 return og
134
134
135 def prepush(repo, remote, force, revs, newbranch):
135 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False):
136 '''Analyze the local and remote repositories and determine which
136 """Check that a push won't add any outgoing head
137 changesets need to be pushed to the remote. Return value depends
138 on circumstances:
139
140 If we are not going to push anything, return a tuple (None, 1,
141 common) The third element "common" is the list of heads of the
142 common set between local and remote.
143
137
144 Otherwise, return a tuple (changegroup, remoteheads, futureheads),
138 raise Abort error and display ui message as needed.
145 where changegroup is a readable file-like object whose read()
139 """
146 returns successive changegroup chunks ready to be sent over the
140 if remoteheads == [nullid]:
147 wire, remoteheads is the list of remote heads and futureheads is
141 # remote is empty, nothing to check.
148 the list of heads of the common set between local and remote to
142 return
149 be after push completion.
150 '''
151 commoninc = findcommonincoming(repo, remote, force=force)
152 outgoing = findcommonoutgoing(repo, remote, onlyheads=revs,
153 commoninc=commoninc, force=force)
154 _common, inc, remoteheads = commoninc
155
143
156 cl = repo.changelog
144 cl = repo.changelog
157 outg = outgoing.missing
145 if remote.capable('branchmap'):
158 common = outgoing.commonheads
146 # Check for each named branch if we're creating new remote heads.
147 # To be a remote head after push, node must be either:
148 # - unknown locally
149 # - a local outgoing head descended from update
150 # - a remote head that's known locally and not
151 # ancestral to an outgoing head
159
152
160 if not outg:
153 # 1. Create set of branches involved in the push.
161 if outgoing.excluded:
154 branches = set(repo[n].branch() for n in outgoing.missing)
162 repo.ui.status(_("no changes to push but %i secret changesets\n")
163 % len(outgoing.excluded))
164 else:
165 repo.ui.status(_("no changes found\n"))
166 return None, 1, common
167
155
168 if not force and remoteheads != [nullid]:
156 # 2. Check for new branches on the remote.
169 if remote.capable('branchmap'):
157 remotemap = remote.branchmap()
170 # Check for each named branch if we're creating new remote heads.
158 newbranches = branches - set(remotemap)
171 # To be a remote head after push, node must be either:
159 if newbranches and not newbranch: # new branch requires --new-branch
172 # - unknown locally
160 branchnames = ', '.join(sorted(newbranches))
173 # - a local outgoing head descended from update
161 raise util.Abort(_("push creates new remote branches: %s!")
174 # - a remote head that's known locally and not
162 % branchnames,
175 # ancestral to an outgoing head
163 hint=_("use 'hg push --new-branch' to create"
176
164 " new remote branches"))
177 # 1. Create set of branches involved in the push.
165 branches.difference_update(newbranches)
178 branches = set(repo[n].branch() for n in outg)
179
166
180 # 2. Check for new branches on the remote.
167 # 3. Construct the initial oldmap and newmap dicts.
181 remotemap = remote.branchmap()
168 # They contain information about the remote heads before and
182 newbranches = branches - set(remotemap)
169 # after the push, respectively.
183 if newbranches and not newbranch: # new branch requires --new-branch
170 # Heads not found locally are not included in either dict,
184 branchnames = ', '.join(sorted(newbranches))
171 # since they won't be affected by the push.
185 raise util.Abort(_("push creates new remote branches: %s!")
172 # unsynced contains all branches with incoming changesets.
186 % branchnames,
173 oldmap = {}
187 hint=_("use 'hg push --new-branch' to create"
174 newmap = {}
188 " new remote branches"))
175 unsynced = set()
189 branches.difference_update(newbranches)
176 for branch in branches:
177 remotebrheads = remotemap[branch]
178 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
179 oldmap[branch] = prunedbrheads
180 newmap[branch] = list(prunedbrheads)
181 if len(remotebrheads) > len(prunedbrheads):
182 unsynced.add(branch)
190
183
191 # 3. Construct the initial oldmap and newmap dicts.
184 # 4. Update newmap with outgoing changes.
192 # They contain information about the remote heads before and
185 # This will possibly add new heads and remove existing ones.
193 # after the push, respectively.
186 ctxgen = (repo[n] for n in outgoing.missing)
194 # Heads not found locally are not included in either dict,
187 repo._updatebranchcache(newmap, ctxgen)
195 # since they won't be affected by the push.
196 # unsynced contains all branches with incoming changesets.
197 oldmap = {}
198 newmap = {}
199 unsynced = set()
200 for branch in branches:
201 remotebrheads = remotemap[branch]
202 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
203 oldmap[branch] = prunedbrheads
204 newmap[branch] = list(prunedbrheads)
205 if len(remotebrheads) > len(prunedbrheads):
206 unsynced.add(branch)
207
208 # 4. Update newmap with outgoing changes.
209 # This will possibly add new heads and remove existing ones.
210 ctxgen = (repo[n] for n in outg)
211 repo._updatebranchcache(newmap, ctxgen)
212
188
213 else:
189 else:
214 # 1-4b. old servers: Check for new topological heads.
190 # 1-4b. old servers: Check for new topological heads.
215 # Construct {old,new}map with branch = None (topological branch).
191 # Construct {old,new}map with branch = None (topological branch).
216 # (code based on _updatebranchcache)
192 # (code based on _updatebranchcache)
217 oldheads = set(h for h in remoteheads if h in cl.nodemap)
193 oldheads = set(h for h in remoteheads if h in cl.nodemap)
218 newheads = oldheads.union(outg)
194 newheads = oldheads.union(outg)
219 if len(newheads) > 1:
195 if len(newheads) > 1:
220 for latest in reversed(outg):
196 for latest in reversed(outg):
221 if latest not in newheads:
197 if latest not in newheads:
222 continue
198 continue
223 minhrev = min(cl.rev(h) for h in newheads)
199 minhrev = min(cl.rev(h) for h in newheads)
224 reachable = cl.reachable(latest, cl.node(minhrev))
200 reachable = cl.reachable(latest, cl.node(minhrev))
225 reachable.remove(latest)
201 reachable.remove(latest)
226 newheads.difference_update(reachable)
202 newheads.difference_update(reachable)
227 branches = set([None])
203 branches = set([None])
228 newmap = {None: newheads}
204 newmap = {None: newheads}
229 oldmap = {None: oldheads}
205 oldmap = {None: oldheads}
230 unsynced = inc and branches or set()
206 unsynced = inc and branches or set()
231
207
232 # 5. Check for new heads.
208 # 5. Check for new heads.
233 # If there are more heads after the push than before, a suitable
209 # If there are more heads after the push than before, a suitable
234 # error message, depending on unsynced status, is displayed.
210 # error message, depending on unsynced status, is displayed.
235 error = None
211 error = None
236 for branch in branches:
212 for branch in branches:
237 newhs = set(newmap[branch])
213 newhs = set(newmap[branch])
238 oldhs = set(oldmap[branch])
214 oldhs = set(oldmap[branch])
239 if len(newhs) > len(oldhs):
215 if len(newhs) > len(oldhs):
240 dhs = list(newhs - oldhs)
216 dhs = list(newhs - oldhs)
241 if error is None:
217 if error is None:
242 if branch not in ('default', None):
218 if branch not in ('default', None):
243 error = _("push creates new remote head %s "
219 error = _("push creates new remote head %s "
244 "on branch '%s'!") % (short(dhs[0]), branch)
220 "on branch '%s'!") % (short(dhs[0]), branch)
245 else:
221 else:
246 error = _("push creates new remote head %s!"
222 error = _("push creates new remote head %s!"
247 ) % short(dhs[0])
223 ) % short(dhs[0])
248 if branch in unsynced:
224 if branch in unsynced:
249 hint = _("you should pull and merge or "
225 hint = _("you should pull and merge or "
250 "use push -f to force")
226 "use push -f to force")
251 else:
227 else:
252 hint = _("did you forget to merge? "
228 hint = _("did you forget to merge? "
253 "use push -f to force")
229 "use push -f to force")
254 if branch is not None:
230 if branch is not None:
255 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
231 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
256 for h in dhs:
232 for h in dhs:
257 repo.ui.note(_("new remote head %s\n") % short(h))
233 repo.ui.note(_("new remote head %s\n") % short(h))
258 if error:
234 if error:
259 raise util.Abort(error, hint=hint)
235 raise util.Abort(error, hint=hint)
260
236
261 # 6. Check for unsynced changes on involved branches.
237 # 6. Check for unsynced changes on involved branches.
262 if unsynced:
238 if unsynced:
263 repo.ui.warn(_("note: unsynced remote changes!\n"))
239 repo.ui.warn(_("note: unsynced remote changes!\n"))
264
265 if revs is None and not outgoing.excluded:
266 # push everything,
267 # use the fast path, no race possible on push
268 cg = repo._changegroup(outg, 'push')
269 else:
270 cg = repo.getlocalbundle('push', outgoing)
271 # no need to compute outg ancestor. All node in outg have either:
272 # - parents in outg
273 # - parents in common
274 # - nullid parent
275 rset = repo.set('heads(%ln + %ln)', common, outg)
276 futureheads = [ctx.node() for ctx in rset]
277 return cg, remoteheads, futureheads
@@ -1606,26 +1606,59 b' class localrepository(repo.repository):'
1606 # get local lock as we might write phase data
1606 # get local lock as we might write phase data
1607 locallock = self.lock()
1607 locallock = self.lock()
1608 try:
1608 try:
1609 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1609 # discovery
1610 revs, newbranch)
1610 fci = discovery.findcommonincoming
1611 ret = remote_heads
1611 commoninc = fci(self, remote, force=force)
1612 # create a callback for addchangegroup.
1612 common, inc, remoteheads = commoninc
1613 # If will be used branch of the conditionnal too.
1613 fco = discovery.findcommonoutgoing
1614 if cg is not None:
1614 outgoing = fco(self, remote, onlyheads=revs,
1615 commoninc=commoninc, force=force)
1616
1617
1618 if not outgoing.missing:
1619 # nothing to push
1620 if outgoing.excluded:
1621 msg = "no changes to push but %i secret changesets\n"
1622 self.ui.status(_(msg) % len(outgoing.excluded))
1623 else:
1624 self.ui.status(_("no changes found\n"))
1625 fut = outgoing.common
1626 ret = 1
1627 else:
1628 # something to push
1629 if not force:
1630 discovery.checkheads(self, remote, outgoing,
1631 remoteheads, newbranch)
1632
1633 # create a changegroup from local
1634 if revs is None and not outgoing.excluded:
1635 # push everything,
1636 # use the fast path, no race possible on push
1637 cg = self._changegroup(outgoing.missing, 'push')
1638 else:
1639 cg = self.getlocalbundle('push', outgoing)
1640
1641 # apply changegroup to remote
1615 if unbundle:
1642 if unbundle:
1616 # local repo finds heads on server, finds out what
1643 # local repo finds heads on server, finds out what
1617 # revs it must push. once revs transferred, if server
1644 # revs it must push. once revs transferred, if server
1618 # finds it has different heads (someone else won
1645 # finds it has different heads (someone else won
1619 # commit/push race), server aborts.
1646 # commit/push race), server aborts.
1620 if force:
1647 if force:
1621 remote_heads = ['force']
1648 remoteheads = ['force']
1622 # ssh: return remote's addchangegroup()
1649 # ssh: return remote's addchangegroup()
1623 # http: return remote's addchangegroup() or 0 for error
1650 # http: return remote's addchangegroup() or 0 for error
1624 ret = remote.unbundle(cg, remote_heads, 'push')
1651 ret = remote.unbundle(cg, remoteheads, 'push')
1625 else:
1652 else:
1626 # we return an integer indicating remote head count change
1653 # we return an integer indicating remote head count change
1627 ret = remote.addchangegroup(cg, 'push', self.url())
1654 ret = remote.addchangegroup(cg, 'push', self.url())
1628
1655
1656 # compute what should be the now common
1657 #
1658 # XXX If push failed we should use strict common and not
1659 # future to avoid pushing phase data on unknown changeset.
1660 # This is to done later.
1661 fut = outgoing.commonheads + outgoing.missingheads
1629 # even when we don't push, exchanging phase data is useful
1662 # even when we don't push, exchanging phase data is useful
1630 remotephases = remote.listkeys('phases')
1663 remotephases = remote.listkeys('phases')
1631 if not remotephases: # old server or public only repo
1664 if not remotephases: # old server or public only repo
@@ -1641,10 +1674,6 b' class localrepository(repo.repository):'
1641 phases.advanceboundary(self, phases.public, pheads)
1674 phases.advanceboundary(self, phases.public, pheads)
1642 phases.advanceboundary(self, phases.draft, fut)
1675 phases.advanceboundary(self, phases.draft, fut)
1643 ### Apply local phase on remote
1676 ### Apply local phase on remote
1644 #
1645 # XXX If push failed we should use strict common and not
1646 # future to avoid pushing phase data on unknown changeset.
1647 # This is to done later.
1648
1677
1649 # Get the list of all revs draft on remote by public here.
1678 # Get the list of all revs draft on remote by public here.
1650 # XXX Beware that revset break if droots is not strictly
1679 # XXX Beware that revset break if droots is not strictly
@@ -438,9 +438,6 b''
438 > if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
438 > if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
439 warning: line over 80 characters
439 warning: line over 80 characters
440 mercurial/discovery.py:0:
440 mercurial/discovery.py:0:
441 > repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
442 warning: line over 80 characters
443 mercurial/discovery.py:0:
444 > If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive)
441 > If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive)
445 warning: line over 80 characters
442 warning: line over 80 characters
446 mercurial/discovery.py:0:
443 mercurial/discovery.py:0:
General Comments 0
You need to be logged in to leave comments. Login now