##// END OF EJS Templates
exchange: pass pushop to discovery.checkheads...
Ryan McElroy -
r26935:c4a7bbc7 default
parent child Browse files
Show More
@@ -1,403 +1,413 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 nullid,
12 nullid,
13 short,
13 short,
14 )
14 )
15
15
16 from . import (
16 from . import (
17 bookmarks,
17 bookmarks,
18 branchmap,
18 branchmap,
19 error,
19 error,
20 obsolete,
20 obsolete,
21 phases,
21 phases,
22 setdiscovery,
22 setdiscovery,
23 treediscovery,
23 treediscovery,
24 util,
24 util,
25 )
25 )
26
26
27 def findcommonincoming(repo, remote, heads=None, force=False):
27 def findcommonincoming(repo, remote, heads=None, force=False):
28 """Return a tuple (common, anyincoming, heads) used to identify the common
28 """Return a tuple (common, anyincoming, heads) used to identify the common
29 subset of nodes between repo and remote.
29 subset of nodes between repo and remote.
30
30
31 "common" is a list of (at least) the heads of the common subset.
31 "common" is a list of (at least) the heads of the common subset.
32 "anyincoming" is testable as a boolean indicating if any nodes are missing
32 "anyincoming" is testable as a boolean indicating if any nodes are missing
33 locally. If remote does not support getbundle, this actually is a list of
33 locally. If remote does not support getbundle, this actually is a list of
34 roots of the nodes that would be incoming, to be supplied to
34 roots of the nodes that would be incoming, to be supplied to
35 changegroupsubset. No code except for pull should be relying on this fact
35 changegroupsubset. No code except for pull should be relying on this fact
36 any longer.
36 any longer.
37 "heads" is either the supplied heads, or else the remote's heads.
37 "heads" is either the supplied heads, or else the remote's heads.
38
38
39 If you pass heads and they are all known locally, the response lists just
39 If you pass heads and they are all known locally, the response lists just
40 these heads in "common" and in "heads".
40 these heads in "common" and in "heads".
41
41
42 Please use findcommonoutgoing to compute the set of outgoing nodes to give
42 Please use findcommonoutgoing to compute the set of outgoing nodes to give
43 extensions a good hook into outgoing.
43 extensions a good hook into outgoing.
44 """
44 """
45
45
46 if not remote.capable('getbundle'):
46 if not remote.capable('getbundle'):
47 return treediscovery.findcommonincoming(repo, remote, heads, force)
47 return treediscovery.findcommonincoming(repo, remote, heads, force)
48
48
49 if heads:
49 if heads:
50 allknown = True
50 allknown = True
51 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
51 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
52 for h in heads:
52 for h in heads:
53 if not knownnode(h):
53 if not knownnode(h):
54 allknown = False
54 allknown = False
55 break
55 break
56 if allknown:
56 if allknown:
57 return (heads, False, heads)
57 return (heads, False, heads)
58
58
59 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
59 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
60 abortwhenunrelated=not force)
60 abortwhenunrelated=not force)
61 common, anyinc, srvheads = res
61 common, anyinc, srvheads = res
62 return (list(common), anyinc, heads or list(srvheads))
62 return (list(common), anyinc, heads or list(srvheads))
63
63
64 class outgoing(object):
64 class outgoing(object):
65 '''Represents the set of nodes present in a local repo but not in a
65 '''Represents the set of nodes present in a local repo but not in a
66 (possibly) remote one.
66 (possibly) remote one.
67
67
68 Members:
68 Members:
69
69
70 missing is a list of all nodes present in local but not in remote.
70 missing is a list of all nodes present in local but not in remote.
71 common is a list of all nodes shared between the two repos.
71 common is a list of all nodes shared between the two repos.
72 excluded is the list of missing changeset that shouldn't be sent remotely.
72 excluded is the list of missing changeset that shouldn't be sent remotely.
73 missingheads is the list of heads of missing.
73 missingheads is the list of heads of missing.
74 commonheads is the list of heads of common.
74 commonheads is the list of heads of common.
75
75
76 The sets are computed on demand from the heads, unless provided upfront
76 The sets are computed on demand from the heads, unless provided upfront
77 by discovery.'''
77 by discovery.'''
78
78
79 def __init__(self, revlog, commonheads, missingheads):
79 def __init__(self, revlog, commonheads, missingheads):
80 self.commonheads = commonheads
80 self.commonheads = commonheads
81 self.missingheads = missingheads
81 self.missingheads = missingheads
82 self._revlog = revlog
82 self._revlog = revlog
83 self._common = None
83 self._common = None
84 self._missing = None
84 self._missing = None
85 self.excluded = []
85 self.excluded = []
86
86
87 def _computecommonmissing(self):
87 def _computecommonmissing(self):
88 sets = self._revlog.findcommonmissing(self.commonheads,
88 sets = self._revlog.findcommonmissing(self.commonheads,
89 self.missingheads)
89 self.missingheads)
90 self._common, self._missing = sets
90 self._common, self._missing = sets
91
91
92 @util.propertycache
92 @util.propertycache
93 def common(self):
93 def common(self):
94 if self._common is None:
94 if self._common is None:
95 self._computecommonmissing()
95 self._computecommonmissing()
96 return self._common
96 return self._common
97
97
98 @util.propertycache
98 @util.propertycache
99 def missing(self):
99 def missing(self):
100 if self._missing is None:
100 if self._missing is None:
101 self._computecommonmissing()
101 self._computecommonmissing()
102 return self._missing
102 return self._missing
103
103
104 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
104 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
105 commoninc=None, portable=False):
105 commoninc=None, portable=False):
106 '''Return an outgoing instance to identify the nodes present in repo but
106 '''Return an outgoing instance to identify the nodes present in repo but
107 not in other.
107 not in other.
108
108
109 If onlyheads is given, only nodes ancestral to nodes in onlyheads
109 If onlyheads is given, only nodes ancestral to nodes in onlyheads
110 (inclusive) are included. If you already know the local repo's heads,
110 (inclusive) are included. If you already know the local repo's heads,
111 passing them in onlyheads is faster than letting them be recomputed here.
111 passing them in onlyheads is faster than letting them be recomputed here.
112
112
113 If commoninc is given, it must be the result of a prior call to
113 If commoninc is given, it must be the result of a prior call to
114 findcommonincoming(repo, other, force) to avoid recomputing it here.
114 findcommonincoming(repo, other, force) to avoid recomputing it here.
115
115
116 If portable is given, compute more conservative common and missingheads,
116 If portable is given, compute more conservative common and missingheads,
117 to make bundles created from the instance more portable.'''
117 to make bundles created from the instance more portable.'''
118 # declare an empty outgoing object to be filled later
118 # declare an empty outgoing object to be filled later
119 og = outgoing(repo.changelog, None, None)
119 og = outgoing(repo.changelog, None, None)
120
120
121 # get common set if not provided
121 # get common set if not provided
122 if commoninc is None:
122 if commoninc is None:
123 commoninc = findcommonincoming(repo, other, force=force)
123 commoninc = findcommonincoming(repo, other, force=force)
124 og.commonheads, _any, _hds = commoninc
124 og.commonheads, _any, _hds = commoninc
125
125
126 # compute outgoing
126 # compute outgoing
127 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
127 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
128 if not mayexclude:
128 if not mayexclude:
129 og.missingheads = onlyheads or repo.heads()
129 og.missingheads = onlyheads or repo.heads()
130 elif onlyheads is None:
130 elif onlyheads is None:
131 # use visible heads as it should be cached
131 # use visible heads as it should be cached
132 og.missingheads = repo.filtered("served").heads()
132 og.missingheads = repo.filtered("served").heads()
133 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
133 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
134 else:
134 else:
135 # compute common, missing and exclude secret stuff
135 # compute common, missing and exclude secret stuff
136 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
136 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
137 og._common, allmissing = sets
137 og._common, allmissing = sets
138 og._missing = missing = []
138 og._missing = missing = []
139 og.excluded = excluded = []
139 og.excluded = excluded = []
140 for node in allmissing:
140 for node in allmissing:
141 ctx = repo[node]
141 ctx = repo[node]
142 if ctx.phase() >= phases.secret or ctx.extinct():
142 if ctx.phase() >= phases.secret or ctx.extinct():
143 excluded.append(node)
143 excluded.append(node)
144 else:
144 else:
145 missing.append(node)
145 missing.append(node)
146 if len(missing) == len(allmissing):
146 if len(missing) == len(allmissing):
147 missingheads = onlyheads
147 missingheads = onlyheads
148 else: # update missing heads
148 else: # update missing heads
149 missingheads = phases.newheads(repo, onlyheads, excluded)
149 missingheads = phases.newheads(repo, onlyheads, excluded)
150 og.missingheads = missingheads
150 og.missingheads = missingheads
151 if portable:
151 if portable:
152 # recompute common and missingheads as if -r<rev> had been given for
152 # recompute common and missingheads as if -r<rev> had been given for
153 # each head of missing, and --base <rev> for each head of the proper
153 # each head of missing, and --base <rev> for each head of the proper
154 # ancestors of missing
154 # ancestors of missing
155 og._computecommonmissing()
155 og._computecommonmissing()
156 cl = repo.changelog
156 cl = repo.changelog
157 missingrevs = set(cl.rev(n) for n in og._missing)
157 missingrevs = set(cl.rev(n) for n in og._missing)
158 og._common = set(cl.ancestors(missingrevs)) - missingrevs
158 og._common = set(cl.ancestors(missingrevs)) - missingrevs
159 commonheads = set(og.commonheads)
159 commonheads = set(og.commonheads)
160 og.missingheads = [h for h in og.missingheads if h not in commonheads]
160 og.missingheads = [h for h in og.missingheads if h not in commonheads]
161
161
162 return og
162 return og
163
163
164 def _headssummary(repo, remote, outgoing):
164 def _headssummary(repo, remote, outgoing):
165 """compute a summary of branch and heads status before and after push
165 """compute a summary of branch and heads status before and after push
166
166
167 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
167 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
168
168
169 - branch: the branch name
169 - branch: the branch name
170 - remoteheads: the list of remote heads known locally
170 - remoteheads: the list of remote heads known locally
171 None if the branch is new
171 None if the branch is new
172 - newheads: the new remote heads (known locally) with outgoing pushed
172 - newheads: the new remote heads (known locally) with outgoing pushed
173 - unsyncedheads: the list of remote heads unknown locally.
173 - unsyncedheads: the list of remote heads unknown locally.
174 """
174 """
175 cl = repo.changelog
175 cl = repo.changelog
176 headssum = {}
176 headssum = {}
177 # A. Create set of branches involved in the push.
177 # A. Create set of branches involved in the push.
178 branches = set(repo[n].branch() for n in outgoing.missing)
178 branches = set(repo[n].branch() for n in outgoing.missing)
179 remotemap = remote.branchmap()
179 remotemap = remote.branchmap()
180 newbranches = branches - set(remotemap)
180 newbranches = branches - set(remotemap)
181 branches.difference_update(newbranches)
181 branches.difference_update(newbranches)
182
182
183 # A. register remote heads
183 # A. register remote heads
184 remotebranches = set()
184 remotebranches = set()
185 for branch, heads in remote.branchmap().iteritems():
185 for branch, heads in remote.branchmap().iteritems():
186 remotebranches.add(branch)
186 remotebranches.add(branch)
187 known = []
187 known = []
188 unsynced = []
188 unsynced = []
189 knownnode = cl.hasnode # do not use nodemap until it is filtered
189 knownnode = cl.hasnode # do not use nodemap until it is filtered
190 for h in heads:
190 for h in heads:
191 if knownnode(h):
191 if knownnode(h):
192 known.append(h)
192 known.append(h)
193 else:
193 else:
194 unsynced.append(h)
194 unsynced.append(h)
195 headssum[branch] = (known, list(known), unsynced)
195 headssum[branch] = (known, list(known), unsynced)
196 # B. add new branch data
196 # B. add new branch data
197 missingctx = list(repo[n] for n in outgoing.missing)
197 missingctx = list(repo[n] for n in outgoing.missing)
198 touchedbranches = set()
198 touchedbranches = set()
199 for ctx in missingctx:
199 for ctx in missingctx:
200 branch = ctx.branch()
200 branch = ctx.branch()
201 touchedbranches.add(branch)
201 touchedbranches.add(branch)
202 if branch not in headssum:
202 if branch not in headssum:
203 headssum[branch] = (None, [], [])
203 headssum[branch] = (None, [], [])
204
204
205 # C drop data about untouched branches:
205 # C drop data about untouched branches:
206 for branch in remotebranches - touchedbranches:
206 for branch in remotebranches - touchedbranches:
207 del headssum[branch]
207 del headssum[branch]
208
208
209 # D. Update newmap with outgoing changes.
209 # D. Update newmap with outgoing changes.
210 # This will possibly add new heads and remove existing ones.
210 # This will possibly add new heads and remove existing ones.
211 newmap = branchmap.branchcache((branch, heads[1])
211 newmap = branchmap.branchcache((branch, heads[1])
212 for branch, heads in headssum.iteritems()
212 for branch, heads in headssum.iteritems()
213 if heads[0] is not None)
213 if heads[0] is not None)
214 newmap.update(repo, (ctx.rev() for ctx in missingctx))
214 newmap.update(repo, (ctx.rev() for ctx in missingctx))
215 for branch, newheads in newmap.iteritems():
215 for branch, newheads in newmap.iteritems():
216 headssum[branch][1][:] = newheads
216 headssum[branch][1][:] = newheads
217 return headssum
217 return headssum
218
218
219 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
219 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
220 """Compute branchmapsummary for repo without branchmap support"""
220 """Compute branchmapsummary for repo without branchmap support"""
221
221
222 # 1-4b. old servers: Check for new topological heads.
222 # 1-4b. old servers: Check for new topological heads.
223 # Construct {old,new}map with branch = None (topological branch).
223 # Construct {old,new}map with branch = None (topological branch).
224 # (code based on update)
224 # (code based on update)
225 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
225 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
226 oldheads = set(h for h in remoteheads if knownnode(h))
226 oldheads = set(h for h in remoteheads if knownnode(h))
227 # all nodes in outgoing.missing are children of either:
227 # all nodes in outgoing.missing are children of either:
228 # - an element of oldheads
228 # - an element of oldheads
229 # - another element of outgoing.missing
229 # - another element of outgoing.missing
230 # - nullrev
230 # - nullrev
231 # This explains why the new head are very simple to compute.
231 # This explains why the new head are very simple to compute.
232 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
232 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
233 newheads = list(c.node() for c in r)
233 newheads = list(c.node() for c in r)
234 # set some unsynced head to issue the "unsynced changes" warning
234 # set some unsynced head to issue the "unsynced changes" warning
235 if inc:
235 if inc:
236 unsynced = set([None])
236 unsynced = set([None])
237 else:
237 else:
238 unsynced = set()
238 unsynced = set()
239 return {None: (oldheads, newheads, unsynced)}
239 return {None: (oldheads, newheads, unsynced)}
240
240
241 def _nowarnheads(repo, remote, newbookmarks):
241 def _nowarnheads(repo, remote, newbookmarks):
242 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
242 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
243 localbookmarks = repo._bookmarks
243 localbookmarks = repo._bookmarks
244 remotebookmarks = remote.listkeys('bookmarks')
244 remotebookmarks = remote.listkeys('bookmarks')
245 bookmarkedheads = set()
245 bookmarkedheads = set()
246 for bm in localbookmarks:
246 for bm in localbookmarks:
247 rnode = remotebookmarks.get(bm)
247 rnode = remotebookmarks.get(bm)
248 if rnode and rnode in repo:
248 if rnode and rnode in repo:
249 lctx, rctx = repo[bm], repo[rnode]
249 lctx, rctx = repo[bm], repo[rnode]
250 if bookmarks.validdest(repo, rctx, lctx):
250 if bookmarks.validdest(repo, rctx, lctx):
251 bookmarkedheads.add(lctx.node())
251 bookmarkedheads.add(lctx.node())
252 else:
252 else:
253 if bm in newbookmarks and bm not in remotebookmarks:
253 if bm in newbookmarks and bm not in remotebookmarks:
254 bookmarkedheads.add(repo[bm].node())
254 bookmarkedheads.add(repo[bm].node())
255
255
256 return bookmarkedheads
256 return bookmarkedheads
257
257
258 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False,
258 def checkheads(pushop):
259 newbookmarks=[]):
260 """Check that a push won't add any outgoing head
259 """Check that a push won't add any outgoing head
261
260
262 raise Abort error and display ui message as needed.
261 raise Abort error and display ui message as needed.
263 """
262 """
263
264 repo = pushop.repo.unfiltered()
265 remote = pushop.remote
266 outgoing = pushop.outgoing
267 remoteheads = pushop.remoteheads
268 newbranch = pushop.newbranch
269 inc = bool(pushop.incoming)
270
271 # internal config: bookmarks.pushing
272 newbookmarks = pushop.ui.configlist('bookmarks', 'pushing')
273
264 # Check for each named branch if we're creating new remote heads.
274 # Check for each named branch if we're creating new remote heads.
265 # To be a remote head after push, node must be either:
275 # To be a remote head after push, node must be either:
266 # - unknown locally
276 # - unknown locally
267 # - a local outgoing head descended from update
277 # - a local outgoing head descended from update
268 # - a remote head that's known locally and not
278 # - a remote head that's known locally and not
269 # ancestral to an outgoing head
279 # ancestral to an outgoing head
270 if remoteheads == [nullid]:
280 if remoteheads == [nullid]:
271 # remote is empty, nothing to check.
281 # remote is empty, nothing to check.
272 return
282 return
273
283
274 if remote.capable('branchmap'):
284 if remote.capable('branchmap'):
275 headssum = _headssummary(repo, remote, outgoing)
285 headssum = _headssummary(repo, remote, outgoing)
276 else:
286 else:
277 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
287 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
278 newbranches = [branch for branch, heads in headssum.iteritems()
288 newbranches = [branch for branch, heads in headssum.iteritems()
279 if heads[0] is None]
289 if heads[0] is None]
280 # 1. Check for new branches on the remote.
290 # 1. Check for new branches on the remote.
281 if newbranches and not newbranch: # new branch requires --new-branch
291 if newbranches and not newbranch: # new branch requires --new-branch
282 branchnames = ', '.join(sorted(newbranches))
292 branchnames = ', '.join(sorted(newbranches))
283 raise error.Abort(_("push creates new remote branches: %s!")
293 raise error.Abort(_("push creates new remote branches: %s!")
284 % branchnames,
294 % branchnames,
285 hint=_("use 'hg push --new-branch' to create"
295 hint=_("use 'hg push --new-branch' to create"
286 " new remote branches"))
296 " new remote branches"))
287
297
288 # 2. Find heads that we need not warn about
298 # 2. Find heads that we need not warn about
289 nowarnheads = _nowarnheads(repo, remote, newbookmarks)
299 nowarnheads = _nowarnheads(repo, remote, newbookmarks)
290
300
291 # 3. Check for new heads.
301 # 3. Check for new heads.
292 # If there are more heads after the push than before, a suitable
302 # If there are more heads after the push than before, a suitable
293 # error message, depending on unsynced status, is displayed.
303 # error message, depending on unsynced status, is displayed.
294 errormsg = None
304 errormsg = None
295 # If there is no obsstore, allfuturecommon won't be used, so no
305 # If there is no obsstore, allfuturecommon won't be used, so no
296 # need to compute it.
306 # need to compute it.
297 if repo.obsstore:
307 if repo.obsstore:
298 allmissing = set(outgoing.missing)
308 allmissing = set(outgoing.missing)
299 cctx = repo.set('%ld', outgoing.common)
309 cctx = repo.set('%ld', outgoing.common)
300 allfuturecommon = set(c.node() for c in cctx)
310 allfuturecommon = set(c.node() for c in cctx)
301 allfuturecommon.update(allmissing)
311 allfuturecommon.update(allmissing)
302 for branch, heads in sorted(headssum.iteritems()):
312 for branch, heads in sorted(headssum.iteritems()):
303 remoteheads, newheads, unsyncedheads = heads
313 remoteheads, newheads, unsyncedheads = heads
304 candidate_newhs = set(newheads)
314 candidate_newhs = set(newheads)
305 # add unsynced data
315 # add unsynced data
306 if remoteheads is None:
316 if remoteheads is None:
307 oldhs = set()
317 oldhs = set()
308 else:
318 else:
309 oldhs = set(remoteheads)
319 oldhs = set(remoteheads)
310 oldhs.update(unsyncedheads)
320 oldhs.update(unsyncedheads)
311 candidate_newhs.update(unsyncedheads)
321 candidate_newhs.update(unsyncedheads)
312 dhs = None # delta heads, the new heads on branch
322 dhs = None # delta heads, the new heads on branch
313 discardedheads = set()
323 discardedheads = set()
314 if not repo.obsstore:
324 if not repo.obsstore:
315 newhs = candidate_newhs
325 newhs = candidate_newhs
316 else:
326 else:
317 # remove future heads which are actually obsoleted by another
327 # remove future heads which are actually obsoleted by another
318 # pushed element:
328 # pushed element:
319 #
329 #
320 # XXX as above, There are several cases this code does not handle
330 # XXX as above, There are several cases this code does not handle
321 # XXX properly
331 # XXX properly
322 #
332 #
323 # (1) if <nh> is public, it won't be affected by obsolete marker
333 # (1) if <nh> is public, it won't be affected by obsolete marker
324 # and a new is created
334 # and a new is created
325 #
335 #
326 # (2) if the new heads have ancestors which are not obsolete and
336 # (2) if the new heads have ancestors which are not obsolete and
327 # not ancestors of any other heads we will have a new head too.
337 # not ancestors of any other heads we will have a new head too.
328 #
338 #
329 # These two cases will be easy to handle for known changeset but
339 # These two cases will be easy to handle for known changeset but
330 # much more tricky for unsynced changes.
340 # much more tricky for unsynced changes.
331 #
341 #
332 # In addition, this code is confused by prune as it only looks for
342 # In addition, this code is confused by prune as it only looks for
333 # successors of the heads (none if pruned) leading to issue4354
343 # successors of the heads (none if pruned) leading to issue4354
334 newhs = set()
344 newhs = set()
335 for nh in candidate_newhs:
345 for nh in candidate_newhs:
336 if nh in repo and repo[nh].phase() <= phases.public:
346 if nh in repo and repo[nh].phase() <= phases.public:
337 newhs.add(nh)
347 newhs.add(nh)
338 else:
348 else:
339 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
349 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
340 if suc != nh and suc in allfuturecommon:
350 if suc != nh and suc in allfuturecommon:
341 discardedheads.add(nh)
351 discardedheads.add(nh)
342 break
352 break
343 else:
353 else:
344 newhs.add(nh)
354 newhs.add(nh)
345 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
355 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
346 if unsynced:
356 if unsynced:
347 if None in unsynced:
357 if None in unsynced:
348 # old remote, no heads data
358 # old remote, no heads data
349 heads = None
359 heads = None
350 elif len(unsynced) <= 4 or repo.ui.verbose:
360 elif len(unsynced) <= 4 or repo.ui.verbose:
351 heads = ' '.join(short(h) for h in unsynced)
361 heads = ' '.join(short(h) for h in unsynced)
352 else:
362 else:
353 heads = (' '.join(short(h) for h in unsynced[:4]) +
363 heads = (' '.join(short(h) for h in unsynced[:4]) +
354 ' ' + _("and %s others") % (len(unsynced) - 4))
364 ' ' + _("and %s others") % (len(unsynced) - 4))
355 if heads is None:
365 if heads is None:
356 repo.ui.status(_("remote has heads that are "
366 repo.ui.status(_("remote has heads that are "
357 "not known locally\n"))
367 "not known locally\n"))
358 elif branch is None:
368 elif branch is None:
359 repo.ui.status(_("remote has heads that are "
369 repo.ui.status(_("remote has heads that are "
360 "not known locally: %s\n") % heads)
370 "not known locally: %s\n") % heads)
361 else:
371 else:
362 repo.ui.status(_("remote has heads on branch '%s' that are "
372 repo.ui.status(_("remote has heads on branch '%s' that are "
363 "not known locally: %s\n") % (branch, heads))
373 "not known locally: %s\n") % (branch, heads))
364 if remoteheads is None:
374 if remoteheads is None:
365 if len(newhs) > 1:
375 if len(newhs) > 1:
366 dhs = list(newhs)
376 dhs = list(newhs)
367 if errormsg is None:
377 if errormsg is None:
368 errormsg = (_("push creates new branch '%s' "
378 errormsg = (_("push creates new branch '%s' "
369 "with multiple heads") % (branch))
379 "with multiple heads") % (branch))
370 hint = _("merge or"
380 hint = _("merge or"
371 " see \"hg help push\" for details about"
381 " see \"hg help push\" for details about"
372 " pushing new heads")
382 " pushing new heads")
373 elif len(newhs) > len(oldhs):
383 elif len(newhs) > len(oldhs):
374 # remove bookmarked or existing remote heads from the new heads list
384 # remove bookmarked or existing remote heads from the new heads list
375 dhs = sorted(newhs - nowarnheads - oldhs)
385 dhs = sorted(newhs - nowarnheads - oldhs)
376 if dhs:
386 if dhs:
377 if errormsg is None:
387 if errormsg is None:
378 if branch not in ('default', None):
388 if branch not in ('default', None):
379 errormsg = _("push creates new remote head %s "
389 errormsg = _("push creates new remote head %s "
380 "on branch '%s'!") % (short(dhs[0]), branch)
390 "on branch '%s'!") % (short(dhs[0]), branch)
381 elif repo[dhs[0]].bookmarks():
391 elif repo[dhs[0]].bookmarks():
382 errormsg = _("push creates new remote head %s "
392 errormsg = _("push creates new remote head %s "
383 "with bookmark '%s'!") % (
393 "with bookmark '%s'!") % (
384 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
394 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
385 else:
395 else:
386 errormsg = _("push creates new remote head %s!"
396 errormsg = _("push creates new remote head %s!"
387 ) % short(dhs[0])
397 ) % short(dhs[0])
388 if unsyncedheads:
398 if unsyncedheads:
389 hint = _("pull and merge or"
399 hint = _("pull and merge or"
390 " see \"hg help push\" for details about"
400 " see \"hg help push\" for details about"
391 " pushing new heads")
401 " pushing new heads")
392 else:
402 else:
393 hint = _("merge or"
403 hint = _("merge or"
394 " see \"hg help push\" for details about"
404 " see \"hg help push\" for details about"
395 " pushing new heads")
405 " pushing new heads")
396 if branch is None:
406 if branch is None:
397 repo.ui.note(_("new remote heads:\n"))
407 repo.ui.note(_("new remote heads:\n"))
398 else:
408 else:
399 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
409 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
400 for h in dhs:
410 for h in dhs:
401 repo.ui.note((" %s\n") % short(h))
411 repo.ui.note((" %s\n") % short(h))
402 if errormsg:
412 if errormsg:
403 raise error.Abort(errormsg, hint=hint)
413 raise error.Abort(errormsg, hint=hint)
@@ -1,1857 +1,1851 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib, urllib2
10 import errno, urllib, urllib2
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14 import streamclone
14 import streamclone
15 import sslutil
15 import sslutil
16 import tags
16 import tags
17 import url as urlmod
17 import url as urlmod
18
18
19 # Maps bundle compression human names to internal representation.
19 # Maps bundle compression human names to internal representation.
20 _bundlespeccompressions = {'none': None,
20 _bundlespeccompressions = {'none': None,
21 'bzip2': 'BZ',
21 'bzip2': 'BZ',
22 'gzip': 'GZ',
22 'gzip': 'GZ',
23 }
23 }
24
24
25 # Maps bundle version human names to changegroup versions.
25 # Maps bundle version human names to changegroup versions.
26 _bundlespeccgversions = {'v1': '01',
26 _bundlespeccgversions = {'v1': '01',
27 'v2': '02',
27 'v2': '02',
28 'packed1': 's1',
28 'packed1': 's1',
29 'bundle2': '02', #legacy
29 'bundle2': '02', #legacy
30 }
30 }
31
31
32 def parsebundlespec(repo, spec, strict=True, externalnames=False):
32 def parsebundlespec(repo, spec, strict=True, externalnames=False):
33 """Parse a bundle string specification into parts.
33 """Parse a bundle string specification into parts.
34
34
35 Bundle specifications denote a well-defined bundle/exchange format.
35 Bundle specifications denote a well-defined bundle/exchange format.
36 The content of a given specification should not change over time in
36 The content of a given specification should not change over time in
37 order to ensure that bundles produced by a newer version of Mercurial are
37 order to ensure that bundles produced by a newer version of Mercurial are
38 readable from an older version.
38 readable from an older version.
39
39
40 The string currently has the form:
40 The string currently has the form:
41
41
42 <compression>-<type>[;<parameter0>[;<parameter1>]]
42 <compression>-<type>[;<parameter0>[;<parameter1>]]
43
43
44 Where <compression> is one of the supported compression formats
44 Where <compression> is one of the supported compression formats
45 and <type> is (currently) a version string. A ";" can follow the type and
45 and <type> is (currently) a version string. A ";" can follow the type and
46 all text afterwards is interpretted as URI encoded, ";" delimited key=value
46 all text afterwards is interpretted as URI encoded, ";" delimited key=value
47 pairs.
47 pairs.
48
48
49 If ``strict`` is True (the default) <compression> is required. Otherwise,
49 If ``strict`` is True (the default) <compression> is required. Otherwise,
50 it is optional.
50 it is optional.
51
51
52 If ``externalnames`` is False (the default), the human-centric names will
52 If ``externalnames`` is False (the default), the human-centric names will
53 be converted to their internal representation.
53 be converted to their internal representation.
54
54
55 Returns a 3-tuple of (compression, version, parameters). Compression will
55 Returns a 3-tuple of (compression, version, parameters). Compression will
56 be ``None`` if not in strict mode and a compression isn't defined.
56 be ``None`` if not in strict mode and a compression isn't defined.
57
57
58 An ``InvalidBundleSpecification`` is raised when the specification is
58 An ``InvalidBundleSpecification`` is raised when the specification is
59 not syntactically well formed.
59 not syntactically well formed.
60
60
61 An ``UnsupportedBundleSpecification`` is raised when the compression or
61 An ``UnsupportedBundleSpecification`` is raised when the compression or
62 bundle type/version is not recognized.
62 bundle type/version is not recognized.
63
63
64 Note: this function will likely eventually return a more complex data
64 Note: this function will likely eventually return a more complex data
65 structure, including bundle2 part information.
65 structure, including bundle2 part information.
66 """
66 """
67 def parseparams(s):
67 def parseparams(s):
68 if ';' not in s:
68 if ';' not in s:
69 return s, {}
69 return s, {}
70
70
71 params = {}
71 params = {}
72 version, paramstr = s.split(';', 1)
72 version, paramstr = s.split(';', 1)
73
73
74 for p in paramstr.split(';'):
74 for p in paramstr.split(';'):
75 if '=' not in p:
75 if '=' not in p:
76 raise error.InvalidBundleSpecification(
76 raise error.InvalidBundleSpecification(
77 _('invalid bundle specification: '
77 _('invalid bundle specification: '
78 'missing "=" in parameter: %s') % p)
78 'missing "=" in parameter: %s') % p)
79
79
80 key, value = p.split('=', 1)
80 key, value = p.split('=', 1)
81 key = urllib.unquote(key)
81 key = urllib.unquote(key)
82 value = urllib.unquote(value)
82 value = urllib.unquote(value)
83 params[key] = value
83 params[key] = value
84
84
85 return version, params
85 return version, params
86
86
87
87
88 if strict and '-' not in spec:
88 if strict and '-' not in spec:
89 raise error.InvalidBundleSpecification(
89 raise error.InvalidBundleSpecification(
90 _('invalid bundle specification; '
90 _('invalid bundle specification; '
91 'must be prefixed with compression: %s') % spec)
91 'must be prefixed with compression: %s') % spec)
92
92
93 if '-' in spec:
93 if '-' in spec:
94 compression, version = spec.split('-', 1)
94 compression, version = spec.split('-', 1)
95
95
96 if compression not in _bundlespeccompressions:
96 if compression not in _bundlespeccompressions:
97 raise error.UnsupportedBundleSpecification(
97 raise error.UnsupportedBundleSpecification(
98 _('%s compression is not supported') % compression)
98 _('%s compression is not supported') % compression)
99
99
100 version, params = parseparams(version)
100 version, params = parseparams(version)
101
101
102 if version not in _bundlespeccgversions:
102 if version not in _bundlespeccgversions:
103 raise error.UnsupportedBundleSpecification(
103 raise error.UnsupportedBundleSpecification(
104 _('%s is not a recognized bundle version') % version)
104 _('%s is not a recognized bundle version') % version)
105 else:
105 else:
106 # Value could be just the compression or just the version, in which
106 # Value could be just the compression or just the version, in which
107 # case some defaults are assumed (but only when not in strict mode).
107 # case some defaults are assumed (but only when not in strict mode).
108 assert not strict
108 assert not strict
109
109
110 spec, params = parseparams(spec)
110 spec, params = parseparams(spec)
111
111
112 if spec in _bundlespeccompressions:
112 if spec in _bundlespeccompressions:
113 compression = spec
113 compression = spec
114 version = 'v1'
114 version = 'v1'
115 if 'generaldelta' in repo.requirements:
115 if 'generaldelta' in repo.requirements:
116 version = 'v2'
116 version = 'v2'
117 elif spec in _bundlespeccgversions:
117 elif spec in _bundlespeccgversions:
118 if spec == 'packed1':
118 if spec == 'packed1':
119 compression = 'none'
119 compression = 'none'
120 else:
120 else:
121 compression = 'bzip2'
121 compression = 'bzip2'
122 version = spec
122 version = spec
123 else:
123 else:
124 raise error.UnsupportedBundleSpecification(
124 raise error.UnsupportedBundleSpecification(
125 _('%s is not a recognized bundle specification') % spec)
125 _('%s is not a recognized bundle specification') % spec)
126
126
127 # The specification for packed1 can optionally declare the data formats
127 # The specification for packed1 can optionally declare the data formats
128 # required to apply it. If we see this metadata, compare against what the
128 # required to apply it. If we see this metadata, compare against what the
129 # repo supports and error if the bundle isn't compatible.
129 # repo supports and error if the bundle isn't compatible.
130 if version == 'packed1' and 'requirements' in params:
130 if version == 'packed1' and 'requirements' in params:
131 requirements = set(params['requirements'].split(','))
131 requirements = set(params['requirements'].split(','))
132 missingreqs = requirements - repo.supportedformats
132 missingreqs = requirements - repo.supportedformats
133 if missingreqs:
133 if missingreqs:
134 raise error.UnsupportedBundleSpecification(
134 raise error.UnsupportedBundleSpecification(
135 _('missing support for repository features: %s') %
135 _('missing support for repository features: %s') %
136 ', '.join(sorted(missingreqs)))
136 ', '.join(sorted(missingreqs)))
137
137
138 if not externalnames:
138 if not externalnames:
139 compression = _bundlespeccompressions[compression]
139 compression = _bundlespeccompressions[compression]
140 version = _bundlespeccgversions[version]
140 version = _bundlespeccgversions[version]
141 return compression, version, params
141 return compression, version, params
142
142
143 def readbundle(ui, fh, fname, vfs=None):
143 def readbundle(ui, fh, fname, vfs=None):
144 header = changegroup.readexactly(fh, 4)
144 header = changegroup.readexactly(fh, 4)
145
145
146 alg = None
146 alg = None
147 if not fname:
147 if not fname:
148 fname = "stream"
148 fname = "stream"
149 if not header.startswith('HG') and header.startswith('\0'):
149 if not header.startswith('HG') and header.startswith('\0'):
150 fh = changegroup.headerlessfixup(fh, header)
150 fh = changegroup.headerlessfixup(fh, header)
151 header = "HG10"
151 header = "HG10"
152 alg = 'UN'
152 alg = 'UN'
153 elif vfs:
153 elif vfs:
154 fname = vfs.join(fname)
154 fname = vfs.join(fname)
155
155
156 magic, version = header[0:2], header[2:4]
156 magic, version = header[0:2], header[2:4]
157
157
158 if magic != 'HG':
158 if magic != 'HG':
159 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
159 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
160 if version == '10':
160 if version == '10':
161 if alg is None:
161 if alg is None:
162 alg = changegroup.readexactly(fh, 2)
162 alg = changegroup.readexactly(fh, 2)
163 return changegroup.cg1unpacker(fh, alg)
163 return changegroup.cg1unpacker(fh, alg)
164 elif version.startswith('2'):
164 elif version.startswith('2'):
165 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
165 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
166 elif version == 'S1':
166 elif version == 'S1':
167 return streamclone.streamcloneapplier(fh)
167 return streamclone.streamcloneapplier(fh)
168 else:
168 else:
169 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
169 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
170
170
171 def buildobsmarkerspart(bundler, markers):
171 def buildobsmarkerspart(bundler, markers):
172 """add an obsmarker part to the bundler with <markers>
172 """add an obsmarker part to the bundler with <markers>
173
173
174 No part is created if markers is empty.
174 No part is created if markers is empty.
175 Raises ValueError if the bundler doesn't support any known obsmarker format.
175 Raises ValueError if the bundler doesn't support any known obsmarker format.
176 """
176 """
177 if markers:
177 if markers:
178 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
178 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
179 version = obsolete.commonversion(remoteversions)
179 version = obsolete.commonversion(remoteversions)
180 if version is None:
180 if version is None:
181 raise ValueError('bundler does not support common obsmarker format')
181 raise ValueError('bundler does not support common obsmarker format')
182 stream = obsolete.encodemarkers(markers, True, version=version)
182 stream = obsolete.encodemarkers(markers, True, version=version)
183 return bundler.newpart('obsmarkers', data=stream)
183 return bundler.newpart('obsmarkers', data=stream)
184 return None
184 return None
185
185
186 def _canusebundle2(op):
186 def _canusebundle2(op):
187 """return true if a pull/push can use bundle2
187 """return true if a pull/push can use bundle2
188
188
189 Feel free to nuke this function when we drop the experimental option"""
189 Feel free to nuke this function when we drop the experimental option"""
190 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
190 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
191 and op.remote.capable('bundle2'))
191 and op.remote.capable('bundle2'))
192
192
193
193
194 class pushoperation(object):
194 class pushoperation(object):
195 """A object that represent a single push operation
195 """A object that represent a single push operation
196
196
197 It purpose is to carry push related state and very common operation.
197 It purpose is to carry push related state and very common operation.
198
198
199 A new should be created at the beginning of each push and discarded
199 A new should be created at the beginning of each push and discarded
200 afterward.
200 afterward.
201 """
201 """
202
202
203 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
203 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
204 bookmarks=()):
204 bookmarks=()):
205 # repo we push from
205 # repo we push from
206 self.repo = repo
206 self.repo = repo
207 self.ui = repo.ui
207 self.ui = repo.ui
208 # repo we push to
208 # repo we push to
209 self.remote = remote
209 self.remote = remote
210 # force option provided
210 # force option provided
211 self.force = force
211 self.force = force
212 # revs to be pushed (None is "all")
212 # revs to be pushed (None is "all")
213 self.revs = revs
213 self.revs = revs
214 # bookmark explicitly pushed
214 # bookmark explicitly pushed
215 self.bookmarks = bookmarks
215 self.bookmarks = bookmarks
216 # allow push of new branch
216 # allow push of new branch
217 self.newbranch = newbranch
217 self.newbranch = newbranch
218 # did a local lock get acquired?
218 # did a local lock get acquired?
219 self.locallocked = None
219 self.locallocked = None
220 # step already performed
220 # step already performed
221 # (used to check what steps have been already performed through bundle2)
221 # (used to check what steps have been already performed through bundle2)
222 self.stepsdone = set()
222 self.stepsdone = set()
223 # Integer version of the changegroup push result
223 # Integer version of the changegroup push result
224 # - None means nothing to push
224 # - None means nothing to push
225 # - 0 means HTTP error
225 # - 0 means HTTP error
226 # - 1 means we pushed and remote head count is unchanged *or*
226 # - 1 means we pushed and remote head count is unchanged *or*
227 # we have outgoing changesets but refused to push
227 # we have outgoing changesets but refused to push
228 # - other values as described by addchangegroup()
228 # - other values as described by addchangegroup()
229 self.cgresult = None
229 self.cgresult = None
230 # Boolean value for the bookmark push
230 # Boolean value for the bookmark push
231 self.bkresult = None
231 self.bkresult = None
232 # discover.outgoing object (contains common and outgoing data)
232 # discover.outgoing object (contains common and outgoing data)
233 self.outgoing = None
233 self.outgoing = None
234 # all remote heads before the push
234 # all remote heads before the push
235 self.remoteheads = None
235 self.remoteheads = None
236 # testable as a boolean indicating if any nodes are missing locally.
236 # testable as a boolean indicating if any nodes are missing locally.
237 self.incoming = None
237 self.incoming = None
238 # phases changes that must be pushed along side the changesets
238 # phases changes that must be pushed along side the changesets
239 self.outdatedphases = None
239 self.outdatedphases = None
240 # phases changes that must be pushed if changeset push fails
240 # phases changes that must be pushed if changeset push fails
241 self.fallbackoutdatedphases = None
241 self.fallbackoutdatedphases = None
242 # outgoing obsmarkers
242 # outgoing obsmarkers
243 self.outobsmarkers = set()
243 self.outobsmarkers = set()
244 # outgoing bookmarks
244 # outgoing bookmarks
245 self.outbookmarks = []
245 self.outbookmarks = []
246 # transaction manager
246 # transaction manager
247 self.trmanager = None
247 self.trmanager = None
248 # map { pushkey partid -> callback handling failure}
248 # map { pushkey partid -> callback handling failure}
249 # used to handle exception from mandatory pushkey part failure
249 # used to handle exception from mandatory pushkey part failure
250 self.pkfailcb = {}
250 self.pkfailcb = {}
251
251
252 @util.propertycache
252 @util.propertycache
253 def futureheads(self):
253 def futureheads(self):
254 """future remote heads if the changeset push succeeds"""
254 """future remote heads if the changeset push succeeds"""
255 return self.outgoing.missingheads
255 return self.outgoing.missingheads
256
256
257 @util.propertycache
257 @util.propertycache
258 def fallbackheads(self):
258 def fallbackheads(self):
259 """future remote heads if the changeset push fails"""
259 """future remote heads if the changeset push fails"""
260 if self.revs is None:
260 if self.revs is None:
261 # not target to push, all common are relevant
261 # not target to push, all common are relevant
262 return self.outgoing.commonheads
262 return self.outgoing.commonheads
263 unfi = self.repo.unfiltered()
263 unfi = self.repo.unfiltered()
264 # I want cheads = heads(::missingheads and ::commonheads)
264 # I want cheads = heads(::missingheads and ::commonheads)
265 # (missingheads is revs with secret changeset filtered out)
265 # (missingheads is revs with secret changeset filtered out)
266 #
266 #
267 # This can be expressed as:
267 # This can be expressed as:
268 # cheads = ( (missingheads and ::commonheads)
268 # cheads = ( (missingheads and ::commonheads)
269 # + (commonheads and ::missingheads))"
269 # + (commonheads and ::missingheads))"
270 # )
270 # )
271 #
271 #
272 # while trying to push we already computed the following:
272 # while trying to push we already computed the following:
273 # common = (::commonheads)
273 # common = (::commonheads)
274 # missing = ((commonheads::missingheads) - commonheads)
274 # missing = ((commonheads::missingheads) - commonheads)
275 #
275 #
276 # We can pick:
276 # We can pick:
277 # * missingheads part of common (::commonheads)
277 # * missingheads part of common (::commonheads)
278 common = self.outgoing.common
278 common = self.outgoing.common
279 nm = self.repo.changelog.nodemap
279 nm = self.repo.changelog.nodemap
280 cheads = [node for node in self.revs if nm[node] in common]
280 cheads = [node for node in self.revs if nm[node] in common]
281 # and
281 # and
282 # * commonheads parents on missing
282 # * commonheads parents on missing
283 revset = unfi.set('%ln and parents(roots(%ln))',
283 revset = unfi.set('%ln and parents(roots(%ln))',
284 self.outgoing.commonheads,
284 self.outgoing.commonheads,
285 self.outgoing.missing)
285 self.outgoing.missing)
286 cheads.extend(c.node() for c in revset)
286 cheads.extend(c.node() for c in revset)
287 return cheads
287 return cheads
288
288
289 @property
289 @property
290 def commonheads(self):
290 def commonheads(self):
291 """set of all common heads after changeset bundle push"""
291 """set of all common heads after changeset bundle push"""
292 if self.cgresult:
292 if self.cgresult:
293 return self.futureheads
293 return self.futureheads
294 else:
294 else:
295 return self.fallbackheads
295 return self.fallbackheads
296
296
297 # mapping of message used when pushing bookmark
297 # mapping of message used when pushing bookmark
298 bookmsgmap = {'update': (_("updating bookmark %s\n"),
298 bookmsgmap = {'update': (_("updating bookmark %s\n"),
299 _('updating bookmark %s failed!\n')),
299 _('updating bookmark %s failed!\n')),
300 'export': (_("exporting bookmark %s\n"),
300 'export': (_("exporting bookmark %s\n"),
301 _('exporting bookmark %s failed!\n')),
301 _('exporting bookmark %s failed!\n')),
302 'delete': (_("deleting remote bookmark %s\n"),
302 'delete': (_("deleting remote bookmark %s\n"),
303 _('deleting remote bookmark %s failed!\n')),
303 _('deleting remote bookmark %s failed!\n')),
304 }
304 }
305
305
306
306
307 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
307 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
308 opargs=None):
308 opargs=None):
309 '''Push outgoing changesets (limited by revs) from a local
309 '''Push outgoing changesets (limited by revs) from a local
310 repository to remote. Return an integer:
310 repository to remote. Return an integer:
311 - None means nothing to push
311 - None means nothing to push
312 - 0 means HTTP error
312 - 0 means HTTP error
313 - 1 means we pushed and remote head count is unchanged *or*
313 - 1 means we pushed and remote head count is unchanged *or*
314 we have outgoing changesets but refused to push
314 we have outgoing changesets but refused to push
315 - other values as described by addchangegroup()
315 - other values as described by addchangegroup()
316 '''
316 '''
317 if opargs is None:
317 if opargs is None:
318 opargs = {}
318 opargs = {}
319 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
319 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
320 **opargs)
320 **opargs)
321 if pushop.remote.local():
321 if pushop.remote.local():
322 missing = (set(pushop.repo.requirements)
322 missing = (set(pushop.repo.requirements)
323 - pushop.remote.local().supported)
323 - pushop.remote.local().supported)
324 if missing:
324 if missing:
325 msg = _("required features are not"
325 msg = _("required features are not"
326 " supported in the destination:"
326 " supported in the destination:"
327 " %s") % (', '.join(sorted(missing)))
327 " %s") % (', '.join(sorted(missing)))
328 raise error.Abort(msg)
328 raise error.Abort(msg)
329
329
330 # there are two ways to push to remote repo:
330 # there are two ways to push to remote repo:
331 #
331 #
332 # addchangegroup assumes local user can lock remote
332 # addchangegroup assumes local user can lock remote
333 # repo (local filesystem, old ssh servers).
333 # repo (local filesystem, old ssh servers).
334 #
334 #
335 # unbundle assumes local user cannot lock remote repo (new ssh
335 # unbundle assumes local user cannot lock remote repo (new ssh
336 # servers, http servers).
336 # servers, http servers).
337
337
338 if not pushop.remote.canpush():
338 if not pushop.remote.canpush():
339 raise error.Abort(_("destination does not support push"))
339 raise error.Abort(_("destination does not support push"))
340 # get local lock as we might write phase data
340 # get local lock as we might write phase data
341 localwlock = locallock = None
341 localwlock = locallock = None
342 try:
342 try:
343 # bundle2 push may receive a reply bundle touching bookmarks or other
343 # bundle2 push may receive a reply bundle touching bookmarks or other
344 # things requiring the wlock. Take it now to ensure proper ordering.
344 # things requiring the wlock. Take it now to ensure proper ordering.
345 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
345 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
346 if _canusebundle2(pushop) and maypushback:
346 if _canusebundle2(pushop) and maypushback:
347 localwlock = pushop.repo.wlock()
347 localwlock = pushop.repo.wlock()
348 locallock = pushop.repo.lock()
348 locallock = pushop.repo.lock()
349 pushop.locallocked = True
349 pushop.locallocked = True
350 except IOError as err:
350 except IOError as err:
351 pushop.locallocked = False
351 pushop.locallocked = False
352 if err.errno != errno.EACCES:
352 if err.errno != errno.EACCES:
353 raise
353 raise
354 # source repo cannot be locked.
354 # source repo cannot be locked.
355 # We do not abort the push, but just disable the local phase
355 # We do not abort the push, but just disable the local phase
356 # synchronisation.
356 # synchronisation.
357 msg = 'cannot lock source repository: %s\n' % err
357 msg = 'cannot lock source repository: %s\n' % err
358 pushop.ui.debug(msg)
358 pushop.ui.debug(msg)
359 try:
359 try:
360 if pushop.locallocked:
360 if pushop.locallocked:
361 pushop.trmanager = transactionmanager(pushop.repo,
361 pushop.trmanager = transactionmanager(pushop.repo,
362 'push-response',
362 'push-response',
363 pushop.remote.url())
363 pushop.remote.url())
364 pushop.repo.checkpush(pushop)
364 pushop.repo.checkpush(pushop)
365 lock = None
365 lock = None
366 unbundle = pushop.remote.capable('unbundle')
366 unbundle = pushop.remote.capable('unbundle')
367 if not unbundle:
367 if not unbundle:
368 lock = pushop.remote.lock()
368 lock = pushop.remote.lock()
369 try:
369 try:
370 _pushdiscovery(pushop)
370 _pushdiscovery(pushop)
371 if _canusebundle2(pushop):
371 if _canusebundle2(pushop):
372 _pushbundle2(pushop)
372 _pushbundle2(pushop)
373 _pushchangeset(pushop)
373 _pushchangeset(pushop)
374 _pushsyncphase(pushop)
374 _pushsyncphase(pushop)
375 _pushobsolete(pushop)
375 _pushobsolete(pushop)
376 _pushbookmark(pushop)
376 _pushbookmark(pushop)
377 finally:
377 finally:
378 if lock is not None:
378 if lock is not None:
379 lock.release()
379 lock.release()
380 if pushop.trmanager:
380 if pushop.trmanager:
381 pushop.trmanager.close()
381 pushop.trmanager.close()
382 finally:
382 finally:
383 if pushop.trmanager:
383 if pushop.trmanager:
384 pushop.trmanager.release()
384 pushop.trmanager.release()
385 if locallock is not None:
385 if locallock is not None:
386 locallock.release()
386 locallock.release()
387 if localwlock is not None:
387 if localwlock is not None:
388 localwlock.release()
388 localwlock.release()
389
389
390 return pushop
390 return pushop
391
391
392 # list of steps to perform discovery before push
392 # list of steps to perform discovery before push
393 pushdiscoveryorder = []
393 pushdiscoveryorder = []
394
394
395 # Mapping between step name and function
395 # Mapping between step name and function
396 #
396 #
397 # This exists to help extensions wrap steps if necessary
397 # This exists to help extensions wrap steps if necessary
398 pushdiscoverymapping = {}
398 pushdiscoverymapping = {}
399
399
400 def pushdiscovery(stepname):
400 def pushdiscovery(stepname):
401 """decorator for function performing discovery before push
401 """decorator for function performing discovery before push
402
402
403 The function is added to the step -> function mapping and appended to the
403 The function is added to the step -> function mapping and appended to the
404 list of steps. Beware that decorated function will be added in order (this
404 list of steps. Beware that decorated function will be added in order (this
405 may matter).
405 may matter).
406
406
407 You can only use this decorator for a new step, if you want to wrap a step
407 You can only use this decorator for a new step, if you want to wrap a step
408 from an extension, change the pushdiscovery dictionary directly."""
408 from an extension, change the pushdiscovery dictionary directly."""
409 def dec(func):
409 def dec(func):
410 assert stepname not in pushdiscoverymapping
410 assert stepname not in pushdiscoverymapping
411 pushdiscoverymapping[stepname] = func
411 pushdiscoverymapping[stepname] = func
412 pushdiscoveryorder.append(stepname)
412 pushdiscoveryorder.append(stepname)
413 return func
413 return func
414 return dec
414 return dec
415
415
416 def _pushdiscovery(pushop):
416 def _pushdiscovery(pushop):
417 """Run all discovery steps"""
417 """Run all discovery steps"""
418 for stepname in pushdiscoveryorder:
418 for stepname in pushdiscoveryorder:
419 step = pushdiscoverymapping[stepname]
419 step = pushdiscoverymapping[stepname]
420 step(pushop)
420 step(pushop)
421
421
422 @pushdiscovery('changeset')
422 @pushdiscovery('changeset')
423 def _pushdiscoverychangeset(pushop):
423 def _pushdiscoverychangeset(pushop):
424 """discover the changeset that need to be pushed"""
424 """discover the changeset that need to be pushed"""
425 fci = discovery.findcommonincoming
425 fci = discovery.findcommonincoming
426 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
426 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
427 common, inc, remoteheads = commoninc
427 common, inc, remoteheads = commoninc
428 fco = discovery.findcommonoutgoing
428 fco = discovery.findcommonoutgoing
429 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
429 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
430 commoninc=commoninc, force=pushop.force)
430 commoninc=commoninc, force=pushop.force)
431 pushop.outgoing = outgoing
431 pushop.outgoing = outgoing
432 pushop.remoteheads = remoteheads
432 pushop.remoteheads = remoteheads
433 pushop.incoming = inc
433 pushop.incoming = inc
434
434
435 @pushdiscovery('phase')
435 @pushdiscovery('phase')
436 def _pushdiscoveryphase(pushop):
436 def _pushdiscoveryphase(pushop):
437 """discover the phase that needs to be pushed
437 """discover the phase that needs to be pushed
438
438
439 (computed for both success and failure case for changesets push)"""
439 (computed for both success and failure case for changesets push)"""
440 outgoing = pushop.outgoing
440 outgoing = pushop.outgoing
441 unfi = pushop.repo.unfiltered()
441 unfi = pushop.repo.unfiltered()
442 remotephases = pushop.remote.listkeys('phases')
442 remotephases = pushop.remote.listkeys('phases')
443 publishing = remotephases.get('publishing', False)
443 publishing = remotephases.get('publishing', False)
444 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
444 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
445 and remotephases # server supports phases
445 and remotephases # server supports phases
446 and not pushop.outgoing.missing # no changesets to be pushed
446 and not pushop.outgoing.missing # no changesets to be pushed
447 and publishing):
447 and publishing):
448 # When:
448 # When:
449 # - this is a subrepo push
449 # - this is a subrepo push
450 # - and remote support phase
450 # - and remote support phase
451 # - and no changeset are to be pushed
451 # - and no changeset are to be pushed
452 # - and remote is publishing
452 # - and remote is publishing
453 # We may be in issue 3871 case!
453 # We may be in issue 3871 case!
454 # We drop the possible phase synchronisation done by
454 # We drop the possible phase synchronisation done by
455 # courtesy to publish changesets possibly locally draft
455 # courtesy to publish changesets possibly locally draft
456 # on the remote.
456 # on the remote.
457 remotephases = {'publishing': 'True'}
457 remotephases = {'publishing': 'True'}
458 ana = phases.analyzeremotephases(pushop.repo,
458 ana = phases.analyzeremotephases(pushop.repo,
459 pushop.fallbackheads,
459 pushop.fallbackheads,
460 remotephases)
460 remotephases)
461 pheads, droots = ana
461 pheads, droots = ana
462 extracond = ''
462 extracond = ''
463 if not publishing:
463 if not publishing:
464 extracond = ' and public()'
464 extracond = ' and public()'
465 revset = 'heads((%%ln::%%ln) %s)' % extracond
465 revset = 'heads((%%ln::%%ln) %s)' % extracond
466 # Get the list of all revs draft on remote by public here.
466 # Get the list of all revs draft on remote by public here.
467 # XXX Beware that revset break if droots is not strictly
467 # XXX Beware that revset break if droots is not strictly
468 # XXX root we may want to ensure it is but it is costly
468 # XXX root we may want to ensure it is but it is costly
469 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
469 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
470 if not outgoing.missing:
470 if not outgoing.missing:
471 future = fallback
471 future = fallback
472 else:
472 else:
473 # adds changeset we are going to push as draft
473 # adds changeset we are going to push as draft
474 #
474 #
475 # should not be necessary for publishing server, but because of an
475 # should not be necessary for publishing server, but because of an
476 # issue fixed in xxxxx we have to do it anyway.
476 # issue fixed in xxxxx we have to do it anyway.
477 fdroots = list(unfi.set('roots(%ln + %ln::)',
477 fdroots = list(unfi.set('roots(%ln + %ln::)',
478 outgoing.missing, droots))
478 outgoing.missing, droots))
479 fdroots = [f.node() for f in fdroots]
479 fdroots = [f.node() for f in fdroots]
480 future = list(unfi.set(revset, fdroots, pushop.futureheads))
480 future = list(unfi.set(revset, fdroots, pushop.futureheads))
481 pushop.outdatedphases = future
481 pushop.outdatedphases = future
482 pushop.fallbackoutdatedphases = fallback
482 pushop.fallbackoutdatedphases = fallback
483
483
484 @pushdiscovery('obsmarker')
484 @pushdiscovery('obsmarker')
485 def _pushdiscoveryobsmarkers(pushop):
485 def _pushdiscoveryobsmarkers(pushop):
486 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
486 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
487 and pushop.repo.obsstore
487 and pushop.repo.obsstore
488 and 'obsolete' in pushop.remote.listkeys('namespaces')):
488 and 'obsolete' in pushop.remote.listkeys('namespaces')):
489 repo = pushop.repo
489 repo = pushop.repo
490 # very naive computation, that can be quite expensive on big repo.
490 # very naive computation, that can be quite expensive on big repo.
491 # However: evolution is currently slow on them anyway.
491 # However: evolution is currently slow on them anyway.
492 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
492 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
493 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
493 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
494
494
495 @pushdiscovery('bookmarks')
495 @pushdiscovery('bookmarks')
496 def _pushdiscoverybookmarks(pushop):
496 def _pushdiscoverybookmarks(pushop):
497 ui = pushop.ui
497 ui = pushop.ui
498 repo = pushop.repo.unfiltered()
498 repo = pushop.repo.unfiltered()
499 remote = pushop.remote
499 remote = pushop.remote
500 ui.debug("checking for updated bookmarks\n")
500 ui.debug("checking for updated bookmarks\n")
501 ancestors = ()
501 ancestors = ()
502 if pushop.revs:
502 if pushop.revs:
503 revnums = map(repo.changelog.rev, pushop.revs)
503 revnums = map(repo.changelog.rev, pushop.revs)
504 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
504 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
505 remotebookmark = remote.listkeys('bookmarks')
505 remotebookmark = remote.listkeys('bookmarks')
506
506
507 explicit = set(pushop.bookmarks)
507 explicit = set(pushop.bookmarks)
508
508
509 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
509 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
510 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
510 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
511 for b, scid, dcid in advsrc:
511 for b, scid, dcid in advsrc:
512 if b in explicit:
512 if b in explicit:
513 explicit.remove(b)
513 explicit.remove(b)
514 if not ancestors or repo[scid].rev() in ancestors:
514 if not ancestors or repo[scid].rev() in ancestors:
515 pushop.outbookmarks.append((b, dcid, scid))
515 pushop.outbookmarks.append((b, dcid, scid))
516 # search added bookmark
516 # search added bookmark
517 for b, scid, dcid in addsrc:
517 for b, scid, dcid in addsrc:
518 if b in explicit:
518 if b in explicit:
519 explicit.remove(b)
519 explicit.remove(b)
520 pushop.outbookmarks.append((b, '', scid))
520 pushop.outbookmarks.append((b, '', scid))
521 # search for overwritten bookmark
521 # search for overwritten bookmark
522 for b, scid, dcid in advdst + diverge + differ:
522 for b, scid, dcid in advdst + diverge + differ:
523 if b in explicit:
523 if b in explicit:
524 explicit.remove(b)
524 explicit.remove(b)
525 pushop.outbookmarks.append((b, dcid, scid))
525 pushop.outbookmarks.append((b, dcid, scid))
526 # search for bookmark to delete
526 # search for bookmark to delete
527 for b, scid, dcid in adddst:
527 for b, scid, dcid in adddst:
528 if b in explicit:
528 if b in explicit:
529 explicit.remove(b)
529 explicit.remove(b)
530 # treat as "deleted locally"
530 # treat as "deleted locally"
531 pushop.outbookmarks.append((b, dcid, ''))
531 pushop.outbookmarks.append((b, dcid, ''))
532 # identical bookmarks shouldn't get reported
532 # identical bookmarks shouldn't get reported
533 for b, scid, dcid in same:
533 for b, scid, dcid in same:
534 if b in explicit:
534 if b in explicit:
535 explicit.remove(b)
535 explicit.remove(b)
536
536
537 if explicit:
537 if explicit:
538 explicit = sorted(explicit)
538 explicit = sorted(explicit)
539 # we should probably list all of them
539 # we should probably list all of them
540 ui.warn(_('bookmark %s does not exist on the local '
540 ui.warn(_('bookmark %s does not exist on the local '
541 'or remote repository!\n') % explicit[0])
541 'or remote repository!\n') % explicit[0])
542 pushop.bkresult = 2
542 pushop.bkresult = 2
543
543
544 pushop.outbookmarks.sort()
544 pushop.outbookmarks.sort()
545
545
546 def _pushcheckoutgoing(pushop):
546 def _pushcheckoutgoing(pushop):
547 outgoing = pushop.outgoing
547 outgoing = pushop.outgoing
548 unfi = pushop.repo.unfiltered()
548 unfi = pushop.repo.unfiltered()
549 if not outgoing.missing:
549 if not outgoing.missing:
550 # nothing to push
550 # nothing to push
551 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
551 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
552 return False
552 return False
553 # something to push
553 # something to push
554 if not pushop.force:
554 if not pushop.force:
555 # if repo.obsstore == False --> no obsolete
555 # if repo.obsstore == False --> no obsolete
556 # then, save the iteration
556 # then, save the iteration
557 if unfi.obsstore:
557 if unfi.obsstore:
558 # this message are here for 80 char limit reason
558 # this message are here for 80 char limit reason
559 mso = _("push includes obsolete changeset: %s!")
559 mso = _("push includes obsolete changeset: %s!")
560 mst = {"unstable": _("push includes unstable changeset: %s!"),
560 mst = {"unstable": _("push includes unstable changeset: %s!"),
561 "bumped": _("push includes bumped changeset: %s!"),
561 "bumped": _("push includes bumped changeset: %s!"),
562 "divergent": _("push includes divergent changeset: %s!")}
562 "divergent": _("push includes divergent changeset: %s!")}
563 # If we are to push if there is at least one
563 # If we are to push if there is at least one
564 # obsolete or unstable changeset in missing, at
564 # obsolete or unstable changeset in missing, at
565 # least one of the missinghead will be obsolete or
565 # least one of the missinghead will be obsolete or
566 # unstable. So checking heads only is ok
566 # unstable. So checking heads only is ok
567 for node in outgoing.missingheads:
567 for node in outgoing.missingheads:
568 ctx = unfi[node]
568 ctx = unfi[node]
569 if ctx.obsolete():
569 if ctx.obsolete():
570 raise error.Abort(mso % ctx)
570 raise error.Abort(mso % ctx)
571 elif ctx.troubled():
571 elif ctx.troubled():
572 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
572 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
573
573
574 # internal config: bookmarks.pushing
574 discovery.checkheads(pushop)
575 newbm = pushop.ui.configlist('bookmarks', 'pushing')
576 discovery.checkheads(unfi, pushop.remote, outgoing,
577 pushop.remoteheads,
578 pushop.newbranch,
579 bool(pushop.incoming),
580 newbm)
581 return True
575 return True
582
576
583 # List of names of steps to perform for an outgoing bundle2, order matters.
577 # List of names of steps to perform for an outgoing bundle2, order matters.
584 b2partsgenorder = []
578 b2partsgenorder = []
585
579
586 # Mapping between step name and function
580 # Mapping between step name and function
587 #
581 #
588 # This exists to help extensions wrap steps if necessary
582 # This exists to help extensions wrap steps if necessary
589 b2partsgenmapping = {}
583 b2partsgenmapping = {}
590
584
591 def b2partsgenerator(stepname, idx=None):
585 def b2partsgenerator(stepname, idx=None):
592 """decorator for function generating bundle2 part
586 """decorator for function generating bundle2 part
593
587
594 The function is added to the step -> function mapping and appended to the
588 The function is added to the step -> function mapping and appended to the
595 list of steps. Beware that decorated functions will be added in order
589 list of steps. Beware that decorated functions will be added in order
596 (this may matter).
590 (this may matter).
597
591
598 You can only use this decorator for new steps, if you want to wrap a step
592 You can only use this decorator for new steps, if you want to wrap a step
599 from an extension, attack the b2partsgenmapping dictionary directly."""
593 from an extension, attack the b2partsgenmapping dictionary directly."""
600 def dec(func):
594 def dec(func):
601 assert stepname not in b2partsgenmapping
595 assert stepname not in b2partsgenmapping
602 b2partsgenmapping[stepname] = func
596 b2partsgenmapping[stepname] = func
603 if idx is None:
597 if idx is None:
604 b2partsgenorder.append(stepname)
598 b2partsgenorder.append(stepname)
605 else:
599 else:
606 b2partsgenorder.insert(idx, stepname)
600 b2partsgenorder.insert(idx, stepname)
607 return func
601 return func
608 return dec
602 return dec
609
603
610 def _pushb2ctxcheckheads(pushop, bundler):
604 def _pushb2ctxcheckheads(pushop, bundler):
611 """Generate race condition checking parts
605 """Generate race condition checking parts
612
606
613 Exists as an independent function to aid extensions
607 Exists as an independent function to aid extensions
614 """
608 """
615 if not pushop.force:
609 if not pushop.force:
616 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
610 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
617
611
618 @b2partsgenerator('changeset')
612 @b2partsgenerator('changeset')
619 def _pushb2ctx(pushop, bundler):
613 def _pushb2ctx(pushop, bundler):
620 """handle changegroup push through bundle2
614 """handle changegroup push through bundle2
621
615
622 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
616 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
623 """
617 """
624 if 'changesets' in pushop.stepsdone:
618 if 'changesets' in pushop.stepsdone:
625 return
619 return
626 pushop.stepsdone.add('changesets')
620 pushop.stepsdone.add('changesets')
627 # Send known heads to the server for race detection.
621 # Send known heads to the server for race detection.
628 if not _pushcheckoutgoing(pushop):
622 if not _pushcheckoutgoing(pushop):
629 return
623 return
630 pushop.repo.prepushoutgoinghooks(pushop.repo,
624 pushop.repo.prepushoutgoinghooks(pushop.repo,
631 pushop.remote,
625 pushop.remote,
632 pushop.outgoing)
626 pushop.outgoing)
633
627
634 _pushb2ctxcheckheads(pushop, bundler)
628 _pushb2ctxcheckheads(pushop, bundler)
635
629
636 b2caps = bundle2.bundle2caps(pushop.remote)
630 b2caps = bundle2.bundle2caps(pushop.remote)
637 version = None
631 version = None
638 cgversions = b2caps.get('changegroup')
632 cgversions = b2caps.get('changegroup')
639 if not cgversions: # 3.1 and 3.2 ship with an empty value
633 if not cgversions: # 3.1 and 3.2 ship with an empty value
640 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
634 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
641 pushop.outgoing)
635 pushop.outgoing)
642 else:
636 else:
643 cgversions = [v for v in cgversions if v in changegroup.packermap]
637 cgversions = [v for v in cgversions if v in changegroup.packermap]
644 if not cgversions:
638 if not cgversions:
645 raise ValueError(_('no common changegroup version'))
639 raise ValueError(_('no common changegroup version'))
646 version = max(cgversions)
640 version = max(cgversions)
647 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
641 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
648 pushop.outgoing,
642 pushop.outgoing,
649 version=version)
643 version=version)
650 cgpart = bundler.newpart('changegroup', data=cg)
644 cgpart = bundler.newpart('changegroup', data=cg)
651 if version is not None:
645 if version is not None:
652 cgpart.addparam('version', version)
646 cgpart.addparam('version', version)
653 def handlereply(op):
647 def handlereply(op):
654 """extract addchangegroup returns from server reply"""
648 """extract addchangegroup returns from server reply"""
655 cgreplies = op.records.getreplies(cgpart.id)
649 cgreplies = op.records.getreplies(cgpart.id)
656 assert len(cgreplies['changegroup']) == 1
650 assert len(cgreplies['changegroup']) == 1
657 pushop.cgresult = cgreplies['changegroup'][0]['return']
651 pushop.cgresult = cgreplies['changegroup'][0]['return']
658 return handlereply
652 return handlereply
659
653
660 @b2partsgenerator('phase')
654 @b2partsgenerator('phase')
661 def _pushb2phases(pushop, bundler):
655 def _pushb2phases(pushop, bundler):
662 """handle phase push through bundle2"""
656 """handle phase push through bundle2"""
663 if 'phases' in pushop.stepsdone:
657 if 'phases' in pushop.stepsdone:
664 return
658 return
665 b2caps = bundle2.bundle2caps(pushop.remote)
659 b2caps = bundle2.bundle2caps(pushop.remote)
666 if not 'pushkey' in b2caps:
660 if not 'pushkey' in b2caps:
667 return
661 return
668 pushop.stepsdone.add('phases')
662 pushop.stepsdone.add('phases')
669 part2node = []
663 part2node = []
670
664
671 def handlefailure(pushop, exc):
665 def handlefailure(pushop, exc):
672 targetid = int(exc.partid)
666 targetid = int(exc.partid)
673 for partid, node in part2node:
667 for partid, node in part2node:
674 if partid == targetid:
668 if partid == targetid:
675 raise error.Abort(_('updating %s to public failed') % node)
669 raise error.Abort(_('updating %s to public failed') % node)
676
670
677 enc = pushkey.encode
671 enc = pushkey.encode
678 for newremotehead in pushop.outdatedphases:
672 for newremotehead in pushop.outdatedphases:
679 part = bundler.newpart('pushkey')
673 part = bundler.newpart('pushkey')
680 part.addparam('namespace', enc('phases'))
674 part.addparam('namespace', enc('phases'))
681 part.addparam('key', enc(newremotehead.hex()))
675 part.addparam('key', enc(newremotehead.hex()))
682 part.addparam('old', enc(str(phases.draft)))
676 part.addparam('old', enc(str(phases.draft)))
683 part.addparam('new', enc(str(phases.public)))
677 part.addparam('new', enc(str(phases.public)))
684 part2node.append((part.id, newremotehead))
678 part2node.append((part.id, newremotehead))
685 pushop.pkfailcb[part.id] = handlefailure
679 pushop.pkfailcb[part.id] = handlefailure
686
680
687 def handlereply(op):
681 def handlereply(op):
688 for partid, node in part2node:
682 for partid, node in part2node:
689 partrep = op.records.getreplies(partid)
683 partrep = op.records.getreplies(partid)
690 results = partrep['pushkey']
684 results = partrep['pushkey']
691 assert len(results) <= 1
685 assert len(results) <= 1
692 msg = None
686 msg = None
693 if not results:
687 if not results:
694 msg = _('server ignored update of %s to public!\n') % node
688 msg = _('server ignored update of %s to public!\n') % node
695 elif not int(results[0]['return']):
689 elif not int(results[0]['return']):
696 msg = _('updating %s to public failed!\n') % node
690 msg = _('updating %s to public failed!\n') % node
697 if msg is not None:
691 if msg is not None:
698 pushop.ui.warn(msg)
692 pushop.ui.warn(msg)
699 return handlereply
693 return handlereply
700
694
701 @b2partsgenerator('obsmarkers')
695 @b2partsgenerator('obsmarkers')
702 def _pushb2obsmarkers(pushop, bundler):
696 def _pushb2obsmarkers(pushop, bundler):
703 if 'obsmarkers' in pushop.stepsdone:
697 if 'obsmarkers' in pushop.stepsdone:
704 return
698 return
705 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
699 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
706 if obsolete.commonversion(remoteversions) is None:
700 if obsolete.commonversion(remoteversions) is None:
707 return
701 return
708 pushop.stepsdone.add('obsmarkers')
702 pushop.stepsdone.add('obsmarkers')
709 if pushop.outobsmarkers:
703 if pushop.outobsmarkers:
710 markers = sorted(pushop.outobsmarkers)
704 markers = sorted(pushop.outobsmarkers)
711 buildobsmarkerspart(bundler, markers)
705 buildobsmarkerspart(bundler, markers)
712
706
713 @b2partsgenerator('bookmarks')
707 @b2partsgenerator('bookmarks')
714 def _pushb2bookmarks(pushop, bundler):
708 def _pushb2bookmarks(pushop, bundler):
715 """handle bookmark push through bundle2"""
709 """handle bookmark push through bundle2"""
716 if 'bookmarks' in pushop.stepsdone:
710 if 'bookmarks' in pushop.stepsdone:
717 return
711 return
718 b2caps = bundle2.bundle2caps(pushop.remote)
712 b2caps = bundle2.bundle2caps(pushop.remote)
719 if 'pushkey' not in b2caps:
713 if 'pushkey' not in b2caps:
720 return
714 return
721 pushop.stepsdone.add('bookmarks')
715 pushop.stepsdone.add('bookmarks')
722 part2book = []
716 part2book = []
723 enc = pushkey.encode
717 enc = pushkey.encode
724
718
725 def handlefailure(pushop, exc):
719 def handlefailure(pushop, exc):
726 targetid = int(exc.partid)
720 targetid = int(exc.partid)
727 for partid, book, action in part2book:
721 for partid, book, action in part2book:
728 if partid == targetid:
722 if partid == targetid:
729 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
723 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
730 # we should not be called for part we did not generated
724 # we should not be called for part we did not generated
731 assert False
725 assert False
732
726
733 for book, old, new in pushop.outbookmarks:
727 for book, old, new in pushop.outbookmarks:
734 part = bundler.newpart('pushkey')
728 part = bundler.newpart('pushkey')
735 part.addparam('namespace', enc('bookmarks'))
729 part.addparam('namespace', enc('bookmarks'))
736 part.addparam('key', enc(book))
730 part.addparam('key', enc(book))
737 part.addparam('old', enc(old))
731 part.addparam('old', enc(old))
738 part.addparam('new', enc(new))
732 part.addparam('new', enc(new))
739 action = 'update'
733 action = 'update'
740 if not old:
734 if not old:
741 action = 'export'
735 action = 'export'
742 elif not new:
736 elif not new:
743 action = 'delete'
737 action = 'delete'
744 part2book.append((part.id, book, action))
738 part2book.append((part.id, book, action))
745 pushop.pkfailcb[part.id] = handlefailure
739 pushop.pkfailcb[part.id] = handlefailure
746
740
747 def handlereply(op):
741 def handlereply(op):
748 ui = pushop.ui
742 ui = pushop.ui
749 for partid, book, action in part2book:
743 for partid, book, action in part2book:
750 partrep = op.records.getreplies(partid)
744 partrep = op.records.getreplies(partid)
751 results = partrep['pushkey']
745 results = partrep['pushkey']
752 assert len(results) <= 1
746 assert len(results) <= 1
753 if not results:
747 if not results:
754 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
748 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
755 else:
749 else:
756 ret = int(results[0]['return'])
750 ret = int(results[0]['return'])
757 if ret:
751 if ret:
758 ui.status(bookmsgmap[action][0] % book)
752 ui.status(bookmsgmap[action][0] % book)
759 else:
753 else:
760 ui.warn(bookmsgmap[action][1] % book)
754 ui.warn(bookmsgmap[action][1] % book)
761 if pushop.bkresult is not None:
755 if pushop.bkresult is not None:
762 pushop.bkresult = 1
756 pushop.bkresult = 1
763 return handlereply
757 return handlereply
764
758
765
759
766 def _pushbundle2(pushop):
760 def _pushbundle2(pushop):
767 """push data to the remote using bundle2
761 """push data to the remote using bundle2
768
762
769 The only currently supported type of data is changegroup but this will
763 The only currently supported type of data is changegroup but this will
770 evolve in the future."""
764 evolve in the future."""
771 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
765 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
772 pushback = (pushop.trmanager
766 pushback = (pushop.trmanager
773 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
767 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
774
768
775 # create reply capability
769 # create reply capability
776 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
770 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
777 allowpushback=pushback))
771 allowpushback=pushback))
778 bundler.newpart('replycaps', data=capsblob)
772 bundler.newpart('replycaps', data=capsblob)
779 replyhandlers = []
773 replyhandlers = []
780 for partgenname in b2partsgenorder:
774 for partgenname in b2partsgenorder:
781 partgen = b2partsgenmapping[partgenname]
775 partgen = b2partsgenmapping[partgenname]
782 ret = partgen(pushop, bundler)
776 ret = partgen(pushop, bundler)
783 if callable(ret):
777 if callable(ret):
784 replyhandlers.append(ret)
778 replyhandlers.append(ret)
785 # do not push if nothing to push
779 # do not push if nothing to push
786 if bundler.nbparts <= 1:
780 if bundler.nbparts <= 1:
787 return
781 return
788 stream = util.chunkbuffer(bundler.getchunks())
782 stream = util.chunkbuffer(bundler.getchunks())
789 try:
783 try:
790 try:
784 try:
791 reply = pushop.remote.unbundle(stream, ['force'], 'push')
785 reply = pushop.remote.unbundle(stream, ['force'], 'push')
792 except error.BundleValueError as exc:
786 except error.BundleValueError as exc:
793 raise error.Abort('missing support for %s' % exc)
787 raise error.Abort('missing support for %s' % exc)
794 try:
788 try:
795 trgetter = None
789 trgetter = None
796 if pushback:
790 if pushback:
797 trgetter = pushop.trmanager.transaction
791 trgetter = pushop.trmanager.transaction
798 op = bundle2.processbundle(pushop.repo, reply, trgetter)
792 op = bundle2.processbundle(pushop.repo, reply, trgetter)
799 except error.BundleValueError as exc:
793 except error.BundleValueError as exc:
800 raise error.Abort('missing support for %s' % exc)
794 raise error.Abort('missing support for %s' % exc)
801 except bundle2.AbortFromPart as exc:
795 except bundle2.AbortFromPart as exc:
802 pushop.ui.status(_('remote: %s\n') % exc)
796 pushop.ui.status(_('remote: %s\n') % exc)
803 raise error.Abort(_('push failed on remote'), hint=exc.hint)
797 raise error.Abort(_('push failed on remote'), hint=exc.hint)
804 except error.PushkeyFailed as exc:
798 except error.PushkeyFailed as exc:
805 partid = int(exc.partid)
799 partid = int(exc.partid)
806 if partid not in pushop.pkfailcb:
800 if partid not in pushop.pkfailcb:
807 raise
801 raise
808 pushop.pkfailcb[partid](pushop, exc)
802 pushop.pkfailcb[partid](pushop, exc)
809 for rephand in replyhandlers:
803 for rephand in replyhandlers:
810 rephand(op)
804 rephand(op)
811
805
812 def _pushchangeset(pushop):
806 def _pushchangeset(pushop):
813 """Make the actual push of changeset bundle to remote repo"""
807 """Make the actual push of changeset bundle to remote repo"""
814 if 'changesets' in pushop.stepsdone:
808 if 'changesets' in pushop.stepsdone:
815 return
809 return
816 pushop.stepsdone.add('changesets')
810 pushop.stepsdone.add('changesets')
817 if not _pushcheckoutgoing(pushop):
811 if not _pushcheckoutgoing(pushop):
818 return
812 return
819 pushop.repo.prepushoutgoinghooks(pushop.repo,
813 pushop.repo.prepushoutgoinghooks(pushop.repo,
820 pushop.remote,
814 pushop.remote,
821 pushop.outgoing)
815 pushop.outgoing)
822 outgoing = pushop.outgoing
816 outgoing = pushop.outgoing
823 unbundle = pushop.remote.capable('unbundle')
817 unbundle = pushop.remote.capable('unbundle')
824 # TODO: get bundlecaps from remote
818 # TODO: get bundlecaps from remote
825 bundlecaps = None
819 bundlecaps = None
826 # create a changegroup from local
820 # create a changegroup from local
827 if pushop.revs is None and not (outgoing.excluded
821 if pushop.revs is None and not (outgoing.excluded
828 or pushop.repo.changelog.filteredrevs):
822 or pushop.repo.changelog.filteredrevs):
829 # push everything,
823 # push everything,
830 # use the fast path, no race possible on push
824 # use the fast path, no race possible on push
831 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
825 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
832 cg = changegroup.getsubset(pushop.repo,
826 cg = changegroup.getsubset(pushop.repo,
833 outgoing,
827 outgoing,
834 bundler,
828 bundler,
835 'push',
829 'push',
836 fastpath=True)
830 fastpath=True)
837 else:
831 else:
838 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
832 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
839 bundlecaps)
833 bundlecaps)
840
834
841 # apply changegroup to remote
835 # apply changegroup to remote
842 if unbundle:
836 if unbundle:
843 # local repo finds heads on server, finds out what
837 # local repo finds heads on server, finds out what
844 # revs it must push. once revs transferred, if server
838 # revs it must push. once revs transferred, if server
845 # finds it has different heads (someone else won
839 # finds it has different heads (someone else won
846 # commit/push race), server aborts.
840 # commit/push race), server aborts.
847 if pushop.force:
841 if pushop.force:
848 remoteheads = ['force']
842 remoteheads = ['force']
849 else:
843 else:
850 remoteheads = pushop.remoteheads
844 remoteheads = pushop.remoteheads
851 # ssh: return remote's addchangegroup()
845 # ssh: return remote's addchangegroup()
852 # http: return remote's addchangegroup() or 0 for error
846 # http: return remote's addchangegroup() or 0 for error
853 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
847 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
854 pushop.repo.url())
848 pushop.repo.url())
855 else:
849 else:
856 # we return an integer indicating remote head count
850 # we return an integer indicating remote head count
857 # change
851 # change
858 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
852 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
859 pushop.repo.url())
853 pushop.repo.url())
860
854
861 def _pushsyncphase(pushop):
855 def _pushsyncphase(pushop):
862 """synchronise phase information locally and remotely"""
856 """synchronise phase information locally and remotely"""
863 cheads = pushop.commonheads
857 cheads = pushop.commonheads
864 # even when we don't push, exchanging phase data is useful
858 # even when we don't push, exchanging phase data is useful
865 remotephases = pushop.remote.listkeys('phases')
859 remotephases = pushop.remote.listkeys('phases')
866 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
860 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
867 and remotephases # server supports phases
861 and remotephases # server supports phases
868 and pushop.cgresult is None # nothing was pushed
862 and pushop.cgresult is None # nothing was pushed
869 and remotephases.get('publishing', False)):
863 and remotephases.get('publishing', False)):
870 # When:
864 # When:
871 # - this is a subrepo push
865 # - this is a subrepo push
872 # - and remote support phase
866 # - and remote support phase
873 # - and no changeset was pushed
867 # - and no changeset was pushed
874 # - and remote is publishing
868 # - and remote is publishing
875 # We may be in issue 3871 case!
869 # We may be in issue 3871 case!
876 # We drop the possible phase synchronisation done by
870 # We drop the possible phase synchronisation done by
877 # courtesy to publish changesets possibly locally draft
871 # courtesy to publish changesets possibly locally draft
878 # on the remote.
872 # on the remote.
879 remotephases = {'publishing': 'True'}
873 remotephases = {'publishing': 'True'}
880 if not remotephases: # old server or public only reply from non-publishing
874 if not remotephases: # old server or public only reply from non-publishing
881 _localphasemove(pushop, cheads)
875 _localphasemove(pushop, cheads)
882 # don't push any phase data as there is nothing to push
876 # don't push any phase data as there is nothing to push
883 else:
877 else:
884 ana = phases.analyzeremotephases(pushop.repo, cheads,
878 ana = phases.analyzeremotephases(pushop.repo, cheads,
885 remotephases)
879 remotephases)
886 pheads, droots = ana
880 pheads, droots = ana
887 ### Apply remote phase on local
881 ### Apply remote phase on local
888 if remotephases.get('publishing', False):
882 if remotephases.get('publishing', False):
889 _localphasemove(pushop, cheads)
883 _localphasemove(pushop, cheads)
890 else: # publish = False
884 else: # publish = False
891 _localphasemove(pushop, pheads)
885 _localphasemove(pushop, pheads)
892 _localphasemove(pushop, cheads, phases.draft)
886 _localphasemove(pushop, cheads, phases.draft)
893 ### Apply local phase on remote
887 ### Apply local phase on remote
894
888
895 if pushop.cgresult:
889 if pushop.cgresult:
896 if 'phases' in pushop.stepsdone:
890 if 'phases' in pushop.stepsdone:
897 # phases already pushed though bundle2
891 # phases already pushed though bundle2
898 return
892 return
899 outdated = pushop.outdatedphases
893 outdated = pushop.outdatedphases
900 else:
894 else:
901 outdated = pushop.fallbackoutdatedphases
895 outdated = pushop.fallbackoutdatedphases
902
896
903 pushop.stepsdone.add('phases')
897 pushop.stepsdone.add('phases')
904
898
905 # filter heads already turned public by the push
899 # filter heads already turned public by the push
906 outdated = [c for c in outdated if c.node() not in pheads]
900 outdated = [c for c in outdated if c.node() not in pheads]
907 # fallback to independent pushkey command
901 # fallback to independent pushkey command
908 for newremotehead in outdated:
902 for newremotehead in outdated:
909 r = pushop.remote.pushkey('phases',
903 r = pushop.remote.pushkey('phases',
910 newremotehead.hex(),
904 newremotehead.hex(),
911 str(phases.draft),
905 str(phases.draft),
912 str(phases.public))
906 str(phases.public))
913 if not r:
907 if not r:
914 pushop.ui.warn(_('updating %s to public failed!\n')
908 pushop.ui.warn(_('updating %s to public failed!\n')
915 % newremotehead)
909 % newremotehead)
916
910
917 def _localphasemove(pushop, nodes, phase=phases.public):
911 def _localphasemove(pushop, nodes, phase=phases.public):
918 """move <nodes> to <phase> in the local source repo"""
912 """move <nodes> to <phase> in the local source repo"""
919 if pushop.trmanager:
913 if pushop.trmanager:
920 phases.advanceboundary(pushop.repo,
914 phases.advanceboundary(pushop.repo,
921 pushop.trmanager.transaction(),
915 pushop.trmanager.transaction(),
922 phase,
916 phase,
923 nodes)
917 nodes)
924 else:
918 else:
925 # repo is not locked, do not change any phases!
919 # repo is not locked, do not change any phases!
926 # Informs the user that phases should have been moved when
920 # Informs the user that phases should have been moved when
927 # applicable.
921 # applicable.
928 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
922 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
929 phasestr = phases.phasenames[phase]
923 phasestr = phases.phasenames[phase]
930 if actualmoves:
924 if actualmoves:
931 pushop.ui.status(_('cannot lock source repo, skipping '
925 pushop.ui.status(_('cannot lock source repo, skipping '
932 'local %s phase update\n') % phasestr)
926 'local %s phase update\n') % phasestr)
933
927
934 def _pushobsolete(pushop):
928 def _pushobsolete(pushop):
935 """utility function to push obsolete markers to a remote"""
929 """utility function to push obsolete markers to a remote"""
936 if 'obsmarkers' in pushop.stepsdone:
930 if 'obsmarkers' in pushop.stepsdone:
937 return
931 return
938 repo = pushop.repo
932 repo = pushop.repo
939 remote = pushop.remote
933 remote = pushop.remote
940 pushop.stepsdone.add('obsmarkers')
934 pushop.stepsdone.add('obsmarkers')
941 if pushop.outobsmarkers:
935 if pushop.outobsmarkers:
942 pushop.ui.debug('try to push obsolete markers to remote\n')
936 pushop.ui.debug('try to push obsolete markers to remote\n')
943 rslts = []
937 rslts = []
944 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
938 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
945 for key in sorted(remotedata, reverse=True):
939 for key in sorted(remotedata, reverse=True):
946 # reverse sort to ensure we end with dump0
940 # reverse sort to ensure we end with dump0
947 data = remotedata[key]
941 data = remotedata[key]
948 rslts.append(remote.pushkey('obsolete', key, '', data))
942 rslts.append(remote.pushkey('obsolete', key, '', data))
949 if [r for r in rslts if not r]:
943 if [r for r in rslts if not r]:
950 msg = _('failed to push some obsolete markers!\n')
944 msg = _('failed to push some obsolete markers!\n')
951 repo.ui.warn(msg)
945 repo.ui.warn(msg)
952
946
953 def _pushbookmark(pushop):
947 def _pushbookmark(pushop):
954 """Update bookmark position on remote"""
948 """Update bookmark position on remote"""
955 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
949 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
956 return
950 return
957 pushop.stepsdone.add('bookmarks')
951 pushop.stepsdone.add('bookmarks')
958 ui = pushop.ui
952 ui = pushop.ui
959 remote = pushop.remote
953 remote = pushop.remote
960
954
961 for b, old, new in pushop.outbookmarks:
955 for b, old, new in pushop.outbookmarks:
962 action = 'update'
956 action = 'update'
963 if not old:
957 if not old:
964 action = 'export'
958 action = 'export'
965 elif not new:
959 elif not new:
966 action = 'delete'
960 action = 'delete'
967 if remote.pushkey('bookmarks', b, old, new):
961 if remote.pushkey('bookmarks', b, old, new):
968 ui.status(bookmsgmap[action][0] % b)
962 ui.status(bookmsgmap[action][0] % b)
969 else:
963 else:
970 ui.warn(bookmsgmap[action][1] % b)
964 ui.warn(bookmsgmap[action][1] % b)
971 # discovery can have set the value form invalid entry
965 # discovery can have set the value form invalid entry
972 if pushop.bkresult is not None:
966 if pushop.bkresult is not None:
973 pushop.bkresult = 1
967 pushop.bkresult = 1
974
968
975 class pulloperation(object):
969 class pulloperation(object):
976 """A object that represent a single pull operation
970 """A object that represent a single pull operation
977
971
978 It purpose is to carry pull related state and very common operation.
972 It purpose is to carry pull related state and very common operation.
979
973
980 A new should be created at the beginning of each pull and discarded
974 A new should be created at the beginning of each pull and discarded
981 afterward.
975 afterward.
982 """
976 """
983
977
984 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
978 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
985 remotebookmarks=None, streamclonerequested=None):
979 remotebookmarks=None, streamclonerequested=None):
986 # repo we pull into
980 # repo we pull into
987 self.repo = repo
981 self.repo = repo
988 # repo we pull from
982 # repo we pull from
989 self.remote = remote
983 self.remote = remote
990 # revision we try to pull (None is "all")
984 # revision we try to pull (None is "all")
991 self.heads = heads
985 self.heads = heads
992 # bookmark pulled explicitly
986 # bookmark pulled explicitly
993 self.explicitbookmarks = bookmarks
987 self.explicitbookmarks = bookmarks
994 # do we force pull?
988 # do we force pull?
995 self.force = force
989 self.force = force
996 # whether a streaming clone was requested
990 # whether a streaming clone was requested
997 self.streamclonerequested = streamclonerequested
991 self.streamclonerequested = streamclonerequested
998 # transaction manager
992 # transaction manager
999 self.trmanager = None
993 self.trmanager = None
1000 # set of common changeset between local and remote before pull
994 # set of common changeset between local and remote before pull
1001 self.common = None
995 self.common = None
1002 # set of pulled head
996 # set of pulled head
1003 self.rheads = None
997 self.rheads = None
1004 # list of missing changeset to fetch remotely
998 # list of missing changeset to fetch remotely
1005 self.fetch = None
999 self.fetch = None
1006 # remote bookmarks data
1000 # remote bookmarks data
1007 self.remotebookmarks = remotebookmarks
1001 self.remotebookmarks = remotebookmarks
1008 # result of changegroup pulling (used as return code by pull)
1002 # result of changegroup pulling (used as return code by pull)
1009 self.cgresult = None
1003 self.cgresult = None
1010 # list of step already done
1004 # list of step already done
1011 self.stepsdone = set()
1005 self.stepsdone = set()
1012 # Whether we attempted a clone from pre-generated bundles.
1006 # Whether we attempted a clone from pre-generated bundles.
1013 self.clonebundleattempted = False
1007 self.clonebundleattempted = False
1014
1008
1015 @util.propertycache
1009 @util.propertycache
1016 def pulledsubset(self):
1010 def pulledsubset(self):
1017 """heads of the set of changeset target by the pull"""
1011 """heads of the set of changeset target by the pull"""
1018 # compute target subset
1012 # compute target subset
1019 if self.heads is None:
1013 if self.heads is None:
1020 # We pulled every thing possible
1014 # We pulled every thing possible
1021 # sync on everything common
1015 # sync on everything common
1022 c = set(self.common)
1016 c = set(self.common)
1023 ret = list(self.common)
1017 ret = list(self.common)
1024 for n in self.rheads:
1018 for n in self.rheads:
1025 if n not in c:
1019 if n not in c:
1026 ret.append(n)
1020 ret.append(n)
1027 return ret
1021 return ret
1028 else:
1022 else:
1029 # We pulled a specific subset
1023 # We pulled a specific subset
1030 # sync on this subset
1024 # sync on this subset
1031 return self.heads
1025 return self.heads
1032
1026
1033 @util.propertycache
1027 @util.propertycache
1034 def canusebundle2(self):
1028 def canusebundle2(self):
1035 return _canusebundle2(self)
1029 return _canusebundle2(self)
1036
1030
1037 @util.propertycache
1031 @util.propertycache
1038 def remotebundle2caps(self):
1032 def remotebundle2caps(self):
1039 return bundle2.bundle2caps(self.remote)
1033 return bundle2.bundle2caps(self.remote)
1040
1034
1041 def gettransaction(self):
1035 def gettransaction(self):
1042 # deprecated; talk to trmanager directly
1036 # deprecated; talk to trmanager directly
1043 return self.trmanager.transaction()
1037 return self.trmanager.transaction()
1044
1038
1045 class transactionmanager(object):
1039 class transactionmanager(object):
1046 """An object to manage the life cycle of a transaction
1040 """An object to manage the life cycle of a transaction
1047
1041
1048 It creates the transaction on demand and calls the appropriate hooks when
1042 It creates the transaction on demand and calls the appropriate hooks when
1049 closing the transaction."""
1043 closing the transaction."""
1050 def __init__(self, repo, source, url):
1044 def __init__(self, repo, source, url):
1051 self.repo = repo
1045 self.repo = repo
1052 self.source = source
1046 self.source = source
1053 self.url = url
1047 self.url = url
1054 self._tr = None
1048 self._tr = None
1055
1049
1056 def transaction(self):
1050 def transaction(self):
1057 """Return an open transaction object, constructing if necessary"""
1051 """Return an open transaction object, constructing if necessary"""
1058 if not self._tr:
1052 if not self._tr:
1059 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1053 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1060 self._tr = self.repo.transaction(trname)
1054 self._tr = self.repo.transaction(trname)
1061 self._tr.hookargs['source'] = self.source
1055 self._tr.hookargs['source'] = self.source
1062 self._tr.hookargs['url'] = self.url
1056 self._tr.hookargs['url'] = self.url
1063 return self._tr
1057 return self._tr
1064
1058
1065 def close(self):
1059 def close(self):
1066 """close transaction if created"""
1060 """close transaction if created"""
1067 if self._tr is not None:
1061 if self._tr is not None:
1068 self._tr.close()
1062 self._tr.close()
1069
1063
1070 def release(self):
1064 def release(self):
1071 """release transaction if created"""
1065 """release transaction if created"""
1072 if self._tr is not None:
1066 if self._tr is not None:
1073 self._tr.release()
1067 self._tr.release()
1074
1068
1075 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1069 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1076 streamclonerequested=None):
1070 streamclonerequested=None):
1077 """Fetch repository data from a remote.
1071 """Fetch repository data from a remote.
1078
1072
1079 This is the main function used to retrieve data from a remote repository.
1073 This is the main function used to retrieve data from a remote repository.
1080
1074
1081 ``repo`` is the local repository to clone into.
1075 ``repo`` is the local repository to clone into.
1082 ``remote`` is a peer instance.
1076 ``remote`` is a peer instance.
1083 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1077 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1084 default) means to pull everything from the remote.
1078 default) means to pull everything from the remote.
1085 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1079 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1086 default, all remote bookmarks are pulled.
1080 default, all remote bookmarks are pulled.
1087 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1081 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1088 initialization.
1082 initialization.
1089 ``streamclonerequested`` is a boolean indicating whether a "streaming
1083 ``streamclonerequested`` is a boolean indicating whether a "streaming
1090 clone" is requested. A "streaming clone" is essentially a raw file copy
1084 clone" is requested. A "streaming clone" is essentially a raw file copy
1091 of revlogs from the server. This only works when the local repository is
1085 of revlogs from the server. This only works when the local repository is
1092 empty. The default value of ``None`` means to respect the server
1086 empty. The default value of ``None`` means to respect the server
1093 configuration for preferring stream clones.
1087 configuration for preferring stream clones.
1094
1088
1095 Returns the ``pulloperation`` created for this pull.
1089 Returns the ``pulloperation`` created for this pull.
1096 """
1090 """
1097 if opargs is None:
1091 if opargs is None:
1098 opargs = {}
1092 opargs = {}
1099 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1093 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1100 streamclonerequested=streamclonerequested, **opargs)
1094 streamclonerequested=streamclonerequested, **opargs)
1101 if pullop.remote.local():
1095 if pullop.remote.local():
1102 missing = set(pullop.remote.requirements) - pullop.repo.supported
1096 missing = set(pullop.remote.requirements) - pullop.repo.supported
1103 if missing:
1097 if missing:
1104 msg = _("required features are not"
1098 msg = _("required features are not"
1105 " supported in the destination:"
1099 " supported in the destination:"
1106 " %s") % (', '.join(sorted(missing)))
1100 " %s") % (', '.join(sorted(missing)))
1107 raise error.Abort(msg)
1101 raise error.Abort(msg)
1108
1102
1109 lock = pullop.repo.lock()
1103 lock = pullop.repo.lock()
1110 try:
1104 try:
1111 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1105 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1112 streamclone.maybeperformlegacystreamclone(pullop)
1106 streamclone.maybeperformlegacystreamclone(pullop)
1113 # This should ideally be in _pullbundle2(). However, it needs to run
1107 # This should ideally be in _pullbundle2(). However, it needs to run
1114 # before discovery to avoid extra work.
1108 # before discovery to avoid extra work.
1115 _maybeapplyclonebundle(pullop)
1109 _maybeapplyclonebundle(pullop)
1116 _pulldiscovery(pullop)
1110 _pulldiscovery(pullop)
1117 if pullop.canusebundle2:
1111 if pullop.canusebundle2:
1118 _pullbundle2(pullop)
1112 _pullbundle2(pullop)
1119 _pullchangeset(pullop)
1113 _pullchangeset(pullop)
1120 _pullphase(pullop)
1114 _pullphase(pullop)
1121 _pullbookmarks(pullop)
1115 _pullbookmarks(pullop)
1122 _pullobsolete(pullop)
1116 _pullobsolete(pullop)
1123 pullop.trmanager.close()
1117 pullop.trmanager.close()
1124 finally:
1118 finally:
1125 pullop.trmanager.release()
1119 pullop.trmanager.release()
1126 lock.release()
1120 lock.release()
1127
1121
1128 return pullop
1122 return pullop
1129
1123
1130 # list of steps to perform discovery before pull
1124 # list of steps to perform discovery before pull
1131 pulldiscoveryorder = []
1125 pulldiscoveryorder = []
1132
1126
1133 # Mapping between step name and function
1127 # Mapping between step name and function
1134 #
1128 #
1135 # This exists to help extensions wrap steps if necessary
1129 # This exists to help extensions wrap steps if necessary
1136 pulldiscoverymapping = {}
1130 pulldiscoverymapping = {}
1137
1131
1138 def pulldiscovery(stepname):
1132 def pulldiscovery(stepname):
1139 """decorator for function performing discovery before pull
1133 """decorator for function performing discovery before pull
1140
1134
1141 The function is added to the step -> function mapping and appended to the
1135 The function is added to the step -> function mapping and appended to the
1142 list of steps. Beware that decorated function will be added in order (this
1136 list of steps. Beware that decorated function will be added in order (this
1143 may matter).
1137 may matter).
1144
1138
1145 You can only use this decorator for a new step, if you want to wrap a step
1139 You can only use this decorator for a new step, if you want to wrap a step
1146 from an extension, change the pulldiscovery dictionary directly."""
1140 from an extension, change the pulldiscovery dictionary directly."""
1147 def dec(func):
1141 def dec(func):
1148 assert stepname not in pulldiscoverymapping
1142 assert stepname not in pulldiscoverymapping
1149 pulldiscoverymapping[stepname] = func
1143 pulldiscoverymapping[stepname] = func
1150 pulldiscoveryorder.append(stepname)
1144 pulldiscoveryorder.append(stepname)
1151 return func
1145 return func
1152 return dec
1146 return dec
1153
1147
1154 def _pulldiscovery(pullop):
1148 def _pulldiscovery(pullop):
1155 """Run all discovery steps"""
1149 """Run all discovery steps"""
1156 for stepname in pulldiscoveryorder:
1150 for stepname in pulldiscoveryorder:
1157 step = pulldiscoverymapping[stepname]
1151 step = pulldiscoverymapping[stepname]
1158 step(pullop)
1152 step(pullop)
1159
1153
1160 @pulldiscovery('b1:bookmarks')
1154 @pulldiscovery('b1:bookmarks')
1161 def _pullbookmarkbundle1(pullop):
1155 def _pullbookmarkbundle1(pullop):
1162 """fetch bookmark data in bundle1 case
1156 """fetch bookmark data in bundle1 case
1163
1157
1164 If not using bundle2, we have to fetch bookmarks before changeset
1158 If not using bundle2, we have to fetch bookmarks before changeset
1165 discovery to reduce the chance and impact of race conditions."""
1159 discovery to reduce the chance and impact of race conditions."""
1166 if pullop.remotebookmarks is not None:
1160 if pullop.remotebookmarks is not None:
1167 return
1161 return
1168 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1162 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1169 # all known bundle2 servers now support listkeys, but lets be nice with
1163 # all known bundle2 servers now support listkeys, but lets be nice with
1170 # new implementation.
1164 # new implementation.
1171 return
1165 return
1172 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1166 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1173
1167
1174
1168
1175 @pulldiscovery('changegroup')
1169 @pulldiscovery('changegroup')
1176 def _pulldiscoverychangegroup(pullop):
1170 def _pulldiscoverychangegroup(pullop):
1177 """discovery phase for the pull
1171 """discovery phase for the pull
1178
1172
1179 Current handle changeset discovery only, will change handle all discovery
1173 Current handle changeset discovery only, will change handle all discovery
1180 at some point."""
1174 at some point."""
1181 tmp = discovery.findcommonincoming(pullop.repo,
1175 tmp = discovery.findcommonincoming(pullop.repo,
1182 pullop.remote,
1176 pullop.remote,
1183 heads=pullop.heads,
1177 heads=pullop.heads,
1184 force=pullop.force)
1178 force=pullop.force)
1185 common, fetch, rheads = tmp
1179 common, fetch, rheads = tmp
1186 nm = pullop.repo.unfiltered().changelog.nodemap
1180 nm = pullop.repo.unfiltered().changelog.nodemap
1187 if fetch and rheads:
1181 if fetch and rheads:
1188 # If a remote heads in filtered locally, lets drop it from the unknown
1182 # If a remote heads in filtered locally, lets drop it from the unknown
1189 # remote heads and put in back in common.
1183 # remote heads and put in back in common.
1190 #
1184 #
1191 # This is a hackish solution to catch most of "common but locally
1185 # This is a hackish solution to catch most of "common but locally
1192 # hidden situation". We do not performs discovery on unfiltered
1186 # hidden situation". We do not performs discovery on unfiltered
1193 # repository because it end up doing a pathological amount of round
1187 # repository because it end up doing a pathological amount of round
1194 # trip for w huge amount of changeset we do not care about.
1188 # trip for w huge amount of changeset we do not care about.
1195 #
1189 #
1196 # If a set of such "common but filtered" changeset exist on the server
1190 # If a set of such "common but filtered" changeset exist on the server
1197 # but are not including a remote heads, we'll not be able to detect it,
1191 # but are not including a remote heads, we'll not be able to detect it,
1198 scommon = set(common)
1192 scommon = set(common)
1199 filteredrheads = []
1193 filteredrheads = []
1200 for n in rheads:
1194 for n in rheads:
1201 if n in nm:
1195 if n in nm:
1202 if n not in scommon:
1196 if n not in scommon:
1203 common.append(n)
1197 common.append(n)
1204 else:
1198 else:
1205 filteredrheads.append(n)
1199 filteredrheads.append(n)
1206 if not filteredrheads:
1200 if not filteredrheads:
1207 fetch = []
1201 fetch = []
1208 rheads = filteredrheads
1202 rheads = filteredrheads
1209 pullop.common = common
1203 pullop.common = common
1210 pullop.fetch = fetch
1204 pullop.fetch = fetch
1211 pullop.rheads = rheads
1205 pullop.rheads = rheads
1212
1206
1213 def _pullbundle2(pullop):
1207 def _pullbundle2(pullop):
1214 """pull data using bundle2
1208 """pull data using bundle2
1215
1209
1216 For now, the only supported data are changegroup."""
1210 For now, the only supported data are changegroup."""
1217 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1211 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1218
1212
1219 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1213 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1220
1214
1221 # pulling changegroup
1215 # pulling changegroup
1222 pullop.stepsdone.add('changegroup')
1216 pullop.stepsdone.add('changegroup')
1223
1217
1224 kwargs['common'] = pullop.common
1218 kwargs['common'] = pullop.common
1225 kwargs['heads'] = pullop.heads or pullop.rheads
1219 kwargs['heads'] = pullop.heads or pullop.rheads
1226 kwargs['cg'] = pullop.fetch
1220 kwargs['cg'] = pullop.fetch
1227 if 'listkeys' in pullop.remotebundle2caps:
1221 if 'listkeys' in pullop.remotebundle2caps:
1228 kwargs['listkeys'] = ['phase']
1222 kwargs['listkeys'] = ['phase']
1229 if pullop.remotebookmarks is None:
1223 if pullop.remotebookmarks is None:
1230 # make sure to always includes bookmark data when migrating
1224 # make sure to always includes bookmark data when migrating
1231 # `hg incoming --bundle` to using this function.
1225 # `hg incoming --bundle` to using this function.
1232 kwargs['listkeys'].append('bookmarks')
1226 kwargs['listkeys'].append('bookmarks')
1233
1227
1234 # If this is a full pull / clone and the server supports the clone bundles
1228 # If this is a full pull / clone and the server supports the clone bundles
1235 # feature, tell the server whether we attempted a clone bundle. The
1229 # feature, tell the server whether we attempted a clone bundle. The
1236 # presence of this flag indicates the client supports clone bundles. This
1230 # presence of this flag indicates the client supports clone bundles. This
1237 # will enable the server to treat clients that support clone bundles
1231 # will enable the server to treat clients that support clone bundles
1238 # differently from those that don't.
1232 # differently from those that don't.
1239 if (pullop.remote.capable('clonebundles')
1233 if (pullop.remote.capable('clonebundles')
1240 and pullop.heads is None and list(pullop.common) == [nullid]):
1234 and pullop.heads is None and list(pullop.common) == [nullid]):
1241 kwargs['cbattempted'] = pullop.clonebundleattempted
1235 kwargs['cbattempted'] = pullop.clonebundleattempted
1242
1236
1243 if streaming:
1237 if streaming:
1244 pullop.repo.ui.status(_('streaming all changes\n'))
1238 pullop.repo.ui.status(_('streaming all changes\n'))
1245 elif not pullop.fetch:
1239 elif not pullop.fetch:
1246 pullop.repo.ui.status(_("no changes found\n"))
1240 pullop.repo.ui.status(_("no changes found\n"))
1247 pullop.cgresult = 0
1241 pullop.cgresult = 0
1248 else:
1242 else:
1249 if pullop.heads is None and list(pullop.common) == [nullid]:
1243 if pullop.heads is None and list(pullop.common) == [nullid]:
1250 pullop.repo.ui.status(_("requesting all changes\n"))
1244 pullop.repo.ui.status(_("requesting all changes\n"))
1251 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1245 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1252 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1246 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1253 if obsolete.commonversion(remoteversions) is not None:
1247 if obsolete.commonversion(remoteversions) is not None:
1254 kwargs['obsmarkers'] = True
1248 kwargs['obsmarkers'] = True
1255 pullop.stepsdone.add('obsmarkers')
1249 pullop.stepsdone.add('obsmarkers')
1256 _pullbundle2extraprepare(pullop, kwargs)
1250 _pullbundle2extraprepare(pullop, kwargs)
1257 bundle = pullop.remote.getbundle('pull', **kwargs)
1251 bundle = pullop.remote.getbundle('pull', **kwargs)
1258 try:
1252 try:
1259 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1253 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1260 except error.BundleValueError as exc:
1254 except error.BundleValueError as exc:
1261 raise error.Abort('missing support for %s' % exc)
1255 raise error.Abort('missing support for %s' % exc)
1262
1256
1263 if pullop.fetch:
1257 if pullop.fetch:
1264 results = [cg['return'] for cg in op.records['changegroup']]
1258 results = [cg['return'] for cg in op.records['changegroup']]
1265 pullop.cgresult = changegroup.combineresults(results)
1259 pullop.cgresult = changegroup.combineresults(results)
1266
1260
1267 # processing phases change
1261 # processing phases change
1268 for namespace, value in op.records['listkeys']:
1262 for namespace, value in op.records['listkeys']:
1269 if namespace == 'phases':
1263 if namespace == 'phases':
1270 _pullapplyphases(pullop, value)
1264 _pullapplyphases(pullop, value)
1271
1265
1272 # processing bookmark update
1266 # processing bookmark update
1273 for namespace, value in op.records['listkeys']:
1267 for namespace, value in op.records['listkeys']:
1274 if namespace == 'bookmarks':
1268 if namespace == 'bookmarks':
1275 pullop.remotebookmarks = value
1269 pullop.remotebookmarks = value
1276
1270
1277 # bookmark data were either already there or pulled in the bundle
1271 # bookmark data were either already there or pulled in the bundle
1278 if pullop.remotebookmarks is not None:
1272 if pullop.remotebookmarks is not None:
1279 _pullbookmarks(pullop)
1273 _pullbookmarks(pullop)
1280
1274
1281 def _pullbundle2extraprepare(pullop, kwargs):
1275 def _pullbundle2extraprepare(pullop, kwargs):
1282 """hook function so that extensions can extend the getbundle call"""
1276 """hook function so that extensions can extend the getbundle call"""
1283 pass
1277 pass
1284
1278
1285 def _pullchangeset(pullop):
1279 def _pullchangeset(pullop):
1286 """pull changeset from unbundle into the local repo"""
1280 """pull changeset from unbundle into the local repo"""
1287 # We delay the open of the transaction as late as possible so we
1281 # We delay the open of the transaction as late as possible so we
1288 # don't open transaction for nothing or you break future useful
1282 # don't open transaction for nothing or you break future useful
1289 # rollback call
1283 # rollback call
1290 if 'changegroup' in pullop.stepsdone:
1284 if 'changegroup' in pullop.stepsdone:
1291 return
1285 return
1292 pullop.stepsdone.add('changegroup')
1286 pullop.stepsdone.add('changegroup')
1293 if not pullop.fetch:
1287 if not pullop.fetch:
1294 pullop.repo.ui.status(_("no changes found\n"))
1288 pullop.repo.ui.status(_("no changes found\n"))
1295 pullop.cgresult = 0
1289 pullop.cgresult = 0
1296 return
1290 return
1297 pullop.gettransaction()
1291 pullop.gettransaction()
1298 if pullop.heads is None and list(pullop.common) == [nullid]:
1292 if pullop.heads is None and list(pullop.common) == [nullid]:
1299 pullop.repo.ui.status(_("requesting all changes\n"))
1293 pullop.repo.ui.status(_("requesting all changes\n"))
1300 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1294 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1301 # issue1320, avoid a race if remote changed after discovery
1295 # issue1320, avoid a race if remote changed after discovery
1302 pullop.heads = pullop.rheads
1296 pullop.heads = pullop.rheads
1303
1297
1304 if pullop.remote.capable('getbundle'):
1298 if pullop.remote.capable('getbundle'):
1305 # TODO: get bundlecaps from remote
1299 # TODO: get bundlecaps from remote
1306 cg = pullop.remote.getbundle('pull', common=pullop.common,
1300 cg = pullop.remote.getbundle('pull', common=pullop.common,
1307 heads=pullop.heads or pullop.rheads)
1301 heads=pullop.heads or pullop.rheads)
1308 elif pullop.heads is None:
1302 elif pullop.heads is None:
1309 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1303 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1310 elif not pullop.remote.capable('changegroupsubset'):
1304 elif not pullop.remote.capable('changegroupsubset'):
1311 raise error.Abort(_("partial pull cannot be done because "
1305 raise error.Abort(_("partial pull cannot be done because "
1312 "other repository doesn't support "
1306 "other repository doesn't support "
1313 "changegroupsubset."))
1307 "changegroupsubset."))
1314 else:
1308 else:
1315 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1309 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1316 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1310 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1317
1311
1318 def _pullphase(pullop):
1312 def _pullphase(pullop):
1319 # Get remote phases data from remote
1313 # Get remote phases data from remote
1320 if 'phases' in pullop.stepsdone:
1314 if 'phases' in pullop.stepsdone:
1321 return
1315 return
1322 remotephases = pullop.remote.listkeys('phases')
1316 remotephases = pullop.remote.listkeys('phases')
1323 _pullapplyphases(pullop, remotephases)
1317 _pullapplyphases(pullop, remotephases)
1324
1318
1325 def _pullapplyphases(pullop, remotephases):
1319 def _pullapplyphases(pullop, remotephases):
1326 """apply phase movement from observed remote state"""
1320 """apply phase movement from observed remote state"""
1327 if 'phases' in pullop.stepsdone:
1321 if 'phases' in pullop.stepsdone:
1328 return
1322 return
1329 pullop.stepsdone.add('phases')
1323 pullop.stepsdone.add('phases')
1330 publishing = bool(remotephases.get('publishing', False))
1324 publishing = bool(remotephases.get('publishing', False))
1331 if remotephases and not publishing:
1325 if remotephases and not publishing:
1332 # remote is new and unpublishing
1326 # remote is new and unpublishing
1333 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1327 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1334 pullop.pulledsubset,
1328 pullop.pulledsubset,
1335 remotephases)
1329 remotephases)
1336 dheads = pullop.pulledsubset
1330 dheads = pullop.pulledsubset
1337 else:
1331 else:
1338 # Remote is old or publishing all common changesets
1332 # Remote is old or publishing all common changesets
1339 # should be seen as public
1333 # should be seen as public
1340 pheads = pullop.pulledsubset
1334 pheads = pullop.pulledsubset
1341 dheads = []
1335 dheads = []
1342 unfi = pullop.repo.unfiltered()
1336 unfi = pullop.repo.unfiltered()
1343 phase = unfi._phasecache.phase
1337 phase = unfi._phasecache.phase
1344 rev = unfi.changelog.nodemap.get
1338 rev = unfi.changelog.nodemap.get
1345 public = phases.public
1339 public = phases.public
1346 draft = phases.draft
1340 draft = phases.draft
1347
1341
1348 # exclude changesets already public locally and update the others
1342 # exclude changesets already public locally and update the others
1349 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1343 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1350 if pheads:
1344 if pheads:
1351 tr = pullop.gettransaction()
1345 tr = pullop.gettransaction()
1352 phases.advanceboundary(pullop.repo, tr, public, pheads)
1346 phases.advanceboundary(pullop.repo, tr, public, pheads)
1353
1347
1354 # exclude changesets already draft locally and update the others
1348 # exclude changesets already draft locally and update the others
1355 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1349 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1356 if dheads:
1350 if dheads:
1357 tr = pullop.gettransaction()
1351 tr = pullop.gettransaction()
1358 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1352 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1359
1353
1360 def _pullbookmarks(pullop):
1354 def _pullbookmarks(pullop):
1361 """process the remote bookmark information to update the local one"""
1355 """process the remote bookmark information to update the local one"""
1362 if 'bookmarks' in pullop.stepsdone:
1356 if 'bookmarks' in pullop.stepsdone:
1363 return
1357 return
1364 pullop.stepsdone.add('bookmarks')
1358 pullop.stepsdone.add('bookmarks')
1365 repo = pullop.repo
1359 repo = pullop.repo
1366 remotebookmarks = pullop.remotebookmarks
1360 remotebookmarks = pullop.remotebookmarks
1367 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1361 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1368 pullop.remote.url(),
1362 pullop.remote.url(),
1369 pullop.gettransaction,
1363 pullop.gettransaction,
1370 explicit=pullop.explicitbookmarks)
1364 explicit=pullop.explicitbookmarks)
1371
1365
1372 def _pullobsolete(pullop):
1366 def _pullobsolete(pullop):
1373 """utility function to pull obsolete markers from a remote
1367 """utility function to pull obsolete markers from a remote
1374
1368
1375 The `gettransaction` is function that return the pull transaction, creating
1369 The `gettransaction` is function that return the pull transaction, creating
1376 one if necessary. We return the transaction to inform the calling code that
1370 one if necessary. We return the transaction to inform the calling code that
1377 a new transaction have been created (when applicable).
1371 a new transaction have been created (when applicable).
1378
1372
1379 Exists mostly to allow overriding for experimentation purpose"""
1373 Exists mostly to allow overriding for experimentation purpose"""
1380 if 'obsmarkers' in pullop.stepsdone:
1374 if 'obsmarkers' in pullop.stepsdone:
1381 return
1375 return
1382 pullop.stepsdone.add('obsmarkers')
1376 pullop.stepsdone.add('obsmarkers')
1383 tr = None
1377 tr = None
1384 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1378 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1385 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1379 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1386 remoteobs = pullop.remote.listkeys('obsolete')
1380 remoteobs = pullop.remote.listkeys('obsolete')
1387 if 'dump0' in remoteobs:
1381 if 'dump0' in remoteobs:
1388 tr = pullop.gettransaction()
1382 tr = pullop.gettransaction()
1389 for key in sorted(remoteobs, reverse=True):
1383 for key in sorted(remoteobs, reverse=True):
1390 if key.startswith('dump'):
1384 if key.startswith('dump'):
1391 data = base85.b85decode(remoteobs[key])
1385 data = base85.b85decode(remoteobs[key])
1392 pullop.repo.obsstore.mergemarkers(tr, data)
1386 pullop.repo.obsstore.mergemarkers(tr, data)
1393 pullop.repo.invalidatevolatilesets()
1387 pullop.repo.invalidatevolatilesets()
1394 return tr
1388 return tr
1395
1389
1396 def caps20to10(repo):
1390 def caps20to10(repo):
1397 """return a set with appropriate options to use bundle20 during getbundle"""
1391 """return a set with appropriate options to use bundle20 during getbundle"""
1398 caps = set(['HG20'])
1392 caps = set(['HG20'])
1399 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1393 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1400 caps.add('bundle2=' + urllib.quote(capsblob))
1394 caps.add('bundle2=' + urllib.quote(capsblob))
1401 return caps
1395 return caps
1402
1396
1403 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1397 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1404 getbundle2partsorder = []
1398 getbundle2partsorder = []
1405
1399
1406 # Mapping between step name and function
1400 # Mapping between step name and function
1407 #
1401 #
1408 # This exists to help extensions wrap steps if necessary
1402 # This exists to help extensions wrap steps if necessary
1409 getbundle2partsmapping = {}
1403 getbundle2partsmapping = {}
1410
1404
1411 def getbundle2partsgenerator(stepname, idx=None):
1405 def getbundle2partsgenerator(stepname, idx=None):
1412 """decorator for function generating bundle2 part for getbundle
1406 """decorator for function generating bundle2 part for getbundle
1413
1407
1414 The function is added to the step -> function mapping and appended to the
1408 The function is added to the step -> function mapping and appended to the
1415 list of steps. Beware that decorated functions will be added in order
1409 list of steps. Beware that decorated functions will be added in order
1416 (this may matter).
1410 (this may matter).
1417
1411
1418 You can only use this decorator for new steps, if you want to wrap a step
1412 You can only use this decorator for new steps, if you want to wrap a step
1419 from an extension, attack the getbundle2partsmapping dictionary directly."""
1413 from an extension, attack the getbundle2partsmapping dictionary directly."""
1420 def dec(func):
1414 def dec(func):
1421 assert stepname not in getbundle2partsmapping
1415 assert stepname not in getbundle2partsmapping
1422 getbundle2partsmapping[stepname] = func
1416 getbundle2partsmapping[stepname] = func
1423 if idx is None:
1417 if idx is None:
1424 getbundle2partsorder.append(stepname)
1418 getbundle2partsorder.append(stepname)
1425 else:
1419 else:
1426 getbundle2partsorder.insert(idx, stepname)
1420 getbundle2partsorder.insert(idx, stepname)
1427 return func
1421 return func
1428 return dec
1422 return dec
1429
1423
1430 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1424 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1431 **kwargs):
1425 **kwargs):
1432 """return a full bundle (with potentially multiple kind of parts)
1426 """return a full bundle (with potentially multiple kind of parts)
1433
1427
1434 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1428 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1435 passed. For now, the bundle can contain only changegroup, but this will
1429 passed. For now, the bundle can contain only changegroup, but this will
1436 changes when more part type will be available for bundle2.
1430 changes when more part type will be available for bundle2.
1437
1431
1438 This is different from changegroup.getchangegroup that only returns an HG10
1432 This is different from changegroup.getchangegroup that only returns an HG10
1439 changegroup bundle. They may eventually get reunited in the future when we
1433 changegroup bundle. They may eventually get reunited in the future when we
1440 have a clearer idea of the API we what to query different data.
1434 have a clearer idea of the API we what to query different data.
1441
1435
1442 The implementation is at a very early stage and will get massive rework
1436 The implementation is at a very early stage and will get massive rework
1443 when the API of bundle is refined.
1437 when the API of bundle is refined.
1444 """
1438 """
1445 # bundle10 case
1439 # bundle10 case
1446 usebundle2 = False
1440 usebundle2 = False
1447 if bundlecaps is not None:
1441 if bundlecaps is not None:
1448 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1442 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1449 if not usebundle2:
1443 if not usebundle2:
1450 if bundlecaps and not kwargs.get('cg', True):
1444 if bundlecaps and not kwargs.get('cg', True):
1451 raise ValueError(_('request for bundle10 must include changegroup'))
1445 raise ValueError(_('request for bundle10 must include changegroup'))
1452
1446
1453 if kwargs:
1447 if kwargs:
1454 raise ValueError(_('unsupported getbundle arguments: %s')
1448 raise ValueError(_('unsupported getbundle arguments: %s')
1455 % ', '.join(sorted(kwargs.keys())))
1449 % ', '.join(sorted(kwargs.keys())))
1456 return changegroup.getchangegroup(repo, source, heads=heads,
1450 return changegroup.getchangegroup(repo, source, heads=heads,
1457 common=common, bundlecaps=bundlecaps)
1451 common=common, bundlecaps=bundlecaps)
1458
1452
1459 # bundle20 case
1453 # bundle20 case
1460 b2caps = {}
1454 b2caps = {}
1461 for bcaps in bundlecaps:
1455 for bcaps in bundlecaps:
1462 if bcaps.startswith('bundle2='):
1456 if bcaps.startswith('bundle2='):
1463 blob = urllib.unquote(bcaps[len('bundle2='):])
1457 blob = urllib.unquote(bcaps[len('bundle2='):])
1464 b2caps.update(bundle2.decodecaps(blob))
1458 b2caps.update(bundle2.decodecaps(blob))
1465 bundler = bundle2.bundle20(repo.ui, b2caps)
1459 bundler = bundle2.bundle20(repo.ui, b2caps)
1466
1460
1467 kwargs['heads'] = heads
1461 kwargs['heads'] = heads
1468 kwargs['common'] = common
1462 kwargs['common'] = common
1469
1463
1470 for name in getbundle2partsorder:
1464 for name in getbundle2partsorder:
1471 func = getbundle2partsmapping[name]
1465 func = getbundle2partsmapping[name]
1472 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1466 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1473 **kwargs)
1467 **kwargs)
1474
1468
1475 return util.chunkbuffer(bundler.getchunks())
1469 return util.chunkbuffer(bundler.getchunks())
1476
1470
1477 @getbundle2partsgenerator('changegroup')
1471 @getbundle2partsgenerator('changegroup')
1478 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1472 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1479 b2caps=None, heads=None, common=None, **kwargs):
1473 b2caps=None, heads=None, common=None, **kwargs):
1480 """add a changegroup part to the requested bundle"""
1474 """add a changegroup part to the requested bundle"""
1481 cg = None
1475 cg = None
1482 if kwargs.get('cg', True):
1476 if kwargs.get('cg', True):
1483 # build changegroup bundle here.
1477 # build changegroup bundle here.
1484 version = None
1478 version = None
1485 cgversions = b2caps.get('changegroup')
1479 cgversions = b2caps.get('changegroup')
1486 getcgkwargs = {}
1480 getcgkwargs = {}
1487 if cgversions: # 3.1 and 3.2 ship with an empty value
1481 if cgversions: # 3.1 and 3.2 ship with an empty value
1488 cgversions = [v for v in cgversions if v in changegroup.packermap]
1482 cgversions = [v for v in cgversions if v in changegroup.packermap]
1489 if not cgversions:
1483 if not cgversions:
1490 raise ValueError(_('no common changegroup version'))
1484 raise ValueError(_('no common changegroup version'))
1491 version = getcgkwargs['version'] = max(cgversions)
1485 version = getcgkwargs['version'] = max(cgversions)
1492 outgoing = changegroup.computeoutgoing(repo, heads, common)
1486 outgoing = changegroup.computeoutgoing(repo, heads, common)
1493 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1487 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1494 bundlecaps=bundlecaps,
1488 bundlecaps=bundlecaps,
1495 **getcgkwargs)
1489 **getcgkwargs)
1496
1490
1497 if cg:
1491 if cg:
1498 part = bundler.newpart('changegroup', data=cg)
1492 part = bundler.newpart('changegroup', data=cg)
1499 if version is not None:
1493 if version is not None:
1500 part.addparam('version', version)
1494 part.addparam('version', version)
1501 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1495 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1502
1496
1503 @getbundle2partsgenerator('listkeys')
1497 @getbundle2partsgenerator('listkeys')
1504 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1498 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1505 b2caps=None, **kwargs):
1499 b2caps=None, **kwargs):
1506 """add parts containing listkeys namespaces to the requested bundle"""
1500 """add parts containing listkeys namespaces to the requested bundle"""
1507 listkeys = kwargs.get('listkeys', ())
1501 listkeys = kwargs.get('listkeys', ())
1508 for namespace in listkeys:
1502 for namespace in listkeys:
1509 part = bundler.newpart('listkeys')
1503 part = bundler.newpart('listkeys')
1510 part.addparam('namespace', namespace)
1504 part.addparam('namespace', namespace)
1511 keys = repo.listkeys(namespace).items()
1505 keys = repo.listkeys(namespace).items()
1512 part.data = pushkey.encodekeys(keys)
1506 part.data = pushkey.encodekeys(keys)
1513
1507
1514 @getbundle2partsgenerator('obsmarkers')
1508 @getbundle2partsgenerator('obsmarkers')
1515 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1509 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1516 b2caps=None, heads=None, **kwargs):
1510 b2caps=None, heads=None, **kwargs):
1517 """add an obsolescence markers part to the requested bundle"""
1511 """add an obsolescence markers part to the requested bundle"""
1518 if kwargs.get('obsmarkers', False):
1512 if kwargs.get('obsmarkers', False):
1519 if heads is None:
1513 if heads is None:
1520 heads = repo.heads()
1514 heads = repo.heads()
1521 subset = [c.node() for c in repo.set('::%ln', heads)]
1515 subset = [c.node() for c in repo.set('::%ln', heads)]
1522 markers = repo.obsstore.relevantmarkers(subset)
1516 markers = repo.obsstore.relevantmarkers(subset)
1523 markers = sorted(markers)
1517 markers = sorted(markers)
1524 buildobsmarkerspart(bundler, markers)
1518 buildobsmarkerspart(bundler, markers)
1525
1519
1526 @getbundle2partsgenerator('hgtagsfnodes')
1520 @getbundle2partsgenerator('hgtagsfnodes')
1527 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1521 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1528 b2caps=None, heads=None, common=None,
1522 b2caps=None, heads=None, common=None,
1529 **kwargs):
1523 **kwargs):
1530 """Transfer the .hgtags filenodes mapping.
1524 """Transfer the .hgtags filenodes mapping.
1531
1525
1532 Only values for heads in this bundle will be transferred.
1526 Only values for heads in this bundle will be transferred.
1533
1527
1534 The part data consists of pairs of 20 byte changeset node and .hgtags
1528 The part data consists of pairs of 20 byte changeset node and .hgtags
1535 filenodes raw values.
1529 filenodes raw values.
1536 """
1530 """
1537 # Don't send unless:
1531 # Don't send unless:
1538 # - changeset are being exchanged,
1532 # - changeset are being exchanged,
1539 # - the client supports it.
1533 # - the client supports it.
1540 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1534 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1541 return
1535 return
1542
1536
1543 outgoing = changegroup.computeoutgoing(repo, heads, common)
1537 outgoing = changegroup.computeoutgoing(repo, heads, common)
1544
1538
1545 if not outgoing.missingheads:
1539 if not outgoing.missingheads:
1546 return
1540 return
1547
1541
1548 cache = tags.hgtagsfnodescache(repo.unfiltered())
1542 cache = tags.hgtagsfnodescache(repo.unfiltered())
1549 chunks = []
1543 chunks = []
1550
1544
1551 # .hgtags fnodes are only relevant for head changesets. While we could
1545 # .hgtags fnodes are only relevant for head changesets. While we could
1552 # transfer values for all known nodes, there will likely be little to
1546 # transfer values for all known nodes, there will likely be little to
1553 # no benefit.
1547 # no benefit.
1554 #
1548 #
1555 # We don't bother using a generator to produce output data because
1549 # We don't bother using a generator to produce output data because
1556 # a) we only have 40 bytes per head and even esoteric numbers of heads
1550 # a) we only have 40 bytes per head and even esoteric numbers of heads
1557 # consume little memory (1M heads is 40MB) b) we don't want to send the
1551 # consume little memory (1M heads is 40MB) b) we don't want to send the
1558 # part if we don't have entries and knowing if we have entries requires
1552 # part if we don't have entries and knowing if we have entries requires
1559 # cache lookups.
1553 # cache lookups.
1560 for node in outgoing.missingheads:
1554 for node in outgoing.missingheads:
1561 # Don't compute missing, as this may slow down serving.
1555 # Don't compute missing, as this may slow down serving.
1562 fnode = cache.getfnode(node, computemissing=False)
1556 fnode = cache.getfnode(node, computemissing=False)
1563 if fnode is not None:
1557 if fnode is not None:
1564 chunks.extend([node, fnode])
1558 chunks.extend([node, fnode])
1565
1559
1566 if chunks:
1560 if chunks:
1567 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1561 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1568
1562
1569 def check_heads(repo, their_heads, context):
1563 def check_heads(repo, their_heads, context):
1570 """check if the heads of a repo have been modified
1564 """check if the heads of a repo have been modified
1571
1565
1572 Used by peer for unbundling.
1566 Used by peer for unbundling.
1573 """
1567 """
1574 heads = repo.heads()
1568 heads = repo.heads()
1575 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1569 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1576 if not (their_heads == ['force'] or their_heads == heads or
1570 if not (their_heads == ['force'] or their_heads == heads or
1577 their_heads == ['hashed', heads_hash]):
1571 their_heads == ['hashed', heads_hash]):
1578 # someone else committed/pushed/unbundled while we
1572 # someone else committed/pushed/unbundled while we
1579 # were transferring data
1573 # were transferring data
1580 raise error.PushRaced('repository changed while %s - '
1574 raise error.PushRaced('repository changed while %s - '
1581 'please try again' % context)
1575 'please try again' % context)
1582
1576
1583 def unbundle(repo, cg, heads, source, url):
1577 def unbundle(repo, cg, heads, source, url):
1584 """Apply a bundle to a repo.
1578 """Apply a bundle to a repo.
1585
1579
1586 this function makes sure the repo is locked during the application and have
1580 this function makes sure the repo is locked during the application and have
1587 mechanism to check that no push race occurred between the creation of the
1581 mechanism to check that no push race occurred between the creation of the
1588 bundle and its application.
1582 bundle and its application.
1589
1583
1590 If the push was raced as PushRaced exception is raised."""
1584 If the push was raced as PushRaced exception is raised."""
1591 r = 0
1585 r = 0
1592 # need a transaction when processing a bundle2 stream
1586 # need a transaction when processing a bundle2 stream
1593 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1587 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1594 lockandtr = [None, None, None]
1588 lockandtr = [None, None, None]
1595 recordout = None
1589 recordout = None
1596 # quick fix for output mismatch with bundle2 in 3.4
1590 # quick fix for output mismatch with bundle2 in 3.4
1597 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1591 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1598 False)
1592 False)
1599 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1593 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1600 captureoutput = True
1594 captureoutput = True
1601 try:
1595 try:
1602 check_heads(repo, heads, 'uploading changes')
1596 check_heads(repo, heads, 'uploading changes')
1603 # push can proceed
1597 # push can proceed
1604 if util.safehasattr(cg, 'params'):
1598 if util.safehasattr(cg, 'params'):
1605 r = None
1599 r = None
1606 try:
1600 try:
1607 def gettransaction():
1601 def gettransaction():
1608 if not lockandtr[2]:
1602 if not lockandtr[2]:
1609 lockandtr[0] = repo.wlock()
1603 lockandtr[0] = repo.wlock()
1610 lockandtr[1] = repo.lock()
1604 lockandtr[1] = repo.lock()
1611 lockandtr[2] = repo.transaction(source)
1605 lockandtr[2] = repo.transaction(source)
1612 lockandtr[2].hookargs['source'] = source
1606 lockandtr[2].hookargs['source'] = source
1613 lockandtr[2].hookargs['url'] = url
1607 lockandtr[2].hookargs['url'] = url
1614 lockandtr[2].hookargs['bundle2'] = '1'
1608 lockandtr[2].hookargs['bundle2'] = '1'
1615 return lockandtr[2]
1609 return lockandtr[2]
1616
1610
1617 # Do greedy locking by default until we're satisfied with lazy
1611 # Do greedy locking by default until we're satisfied with lazy
1618 # locking.
1612 # locking.
1619 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1613 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1620 gettransaction()
1614 gettransaction()
1621
1615
1622 op = bundle2.bundleoperation(repo, gettransaction,
1616 op = bundle2.bundleoperation(repo, gettransaction,
1623 captureoutput=captureoutput)
1617 captureoutput=captureoutput)
1624 try:
1618 try:
1625 op = bundle2.processbundle(repo, cg, op=op)
1619 op = bundle2.processbundle(repo, cg, op=op)
1626 finally:
1620 finally:
1627 r = op.reply
1621 r = op.reply
1628 if captureoutput and r is not None:
1622 if captureoutput and r is not None:
1629 repo.ui.pushbuffer(error=True, subproc=True)
1623 repo.ui.pushbuffer(error=True, subproc=True)
1630 def recordout(output):
1624 def recordout(output):
1631 r.newpart('output', data=output, mandatory=False)
1625 r.newpart('output', data=output, mandatory=False)
1632 if lockandtr[2] is not None:
1626 if lockandtr[2] is not None:
1633 lockandtr[2].close()
1627 lockandtr[2].close()
1634 except BaseException as exc:
1628 except BaseException as exc:
1635 exc.duringunbundle2 = True
1629 exc.duringunbundle2 = True
1636 if captureoutput and r is not None:
1630 if captureoutput and r is not None:
1637 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1631 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1638 def recordout(output):
1632 def recordout(output):
1639 part = bundle2.bundlepart('output', data=output,
1633 part = bundle2.bundlepart('output', data=output,
1640 mandatory=False)
1634 mandatory=False)
1641 parts.append(part)
1635 parts.append(part)
1642 raise
1636 raise
1643 else:
1637 else:
1644 lockandtr[1] = repo.lock()
1638 lockandtr[1] = repo.lock()
1645 r = cg.apply(repo, source, url)
1639 r = cg.apply(repo, source, url)
1646 finally:
1640 finally:
1647 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1641 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1648 if recordout is not None:
1642 if recordout is not None:
1649 recordout(repo.ui.popbuffer())
1643 recordout(repo.ui.popbuffer())
1650 return r
1644 return r
1651
1645
1652 def _maybeapplyclonebundle(pullop):
1646 def _maybeapplyclonebundle(pullop):
1653 """Apply a clone bundle from a remote, if possible."""
1647 """Apply a clone bundle from a remote, if possible."""
1654
1648
1655 repo = pullop.repo
1649 repo = pullop.repo
1656 remote = pullop.remote
1650 remote = pullop.remote
1657
1651
1658 if not repo.ui.configbool('experimental', 'clonebundles', False):
1652 if not repo.ui.configbool('experimental', 'clonebundles', False):
1659 return
1653 return
1660
1654
1661 # Only run if local repo is empty.
1655 # Only run if local repo is empty.
1662 if len(repo):
1656 if len(repo):
1663 return
1657 return
1664
1658
1665 if pullop.heads:
1659 if pullop.heads:
1666 return
1660 return
1667
1661
1668 if not remote.capable('clonebundles'):
1662 if not remote.capable('clonebundles'):
1669 return
1663 return
1670
1664
1671 res = remote._call('clonebundles')
1665 res = remote._call('clonebundles')
1672
1666
1673 # If we call the wire protocol command, that's good enough to record the
1667 # If we call the wire protocol command, that's good enough to record the
1674 # attempt.
1668 # attempt.
1675 pullop.clonebundleattempted = True
1669 pullop.clonebundleattempted = True
1676
1670
1677 entries = parseclonebundlesmanifest(repo, res)
1671 entries = parseclonebundlesmanifest(repo, res)
1678 if not entries:
1672 if not entries:
1679 repo.ui.note(_('no clone bundles available on remote; '
1673 repo.ui.note(_('no clone bundles available on remote; '
1680 'falling back to regular clone\n'))
1674 'falling back to regular clone\n'))
1681 return
1675 return
1682
1676
1683 entries = filterclonebundleentries(repo, entries)
1677 entries = filterclonebundleentries(repo, entries)
1684 if not entries:
1678 if not entries:
1685 # There is a thundering herd concern here. However, if a server
1679 # There is a thundering herd concern here. However, if a server
1686 # operator doesn't advertise bundles appropriate for its clients,
1680 # operator doesn't advertise bundles appropriate for its clients,
1687 # they deserve what's coming. Furthermore, from a client's
1681 # they deserve what's coming. Furthermore, from a client's
1688 # perspective, no automatic fallback would mean not being able to
1682 # perspective, no automatic fallback would mean not being able to
1689 # clone!
1683 # clone!
1690 repo.ui.warn(_('no compatible clone bundles available on server; '
1684 repo.ui.warn(_('no compatible clone bundles available on server; '
1691 'falling back to regular clone\n'))
1685 'falling back to regular clone\n'))
1692 repo.ui.warn(_('(you may want to report this to the server '
1686 repo.ui.warn(_('(you may want to report this to the server '
1693 'operator)\n'))
1687 'operator)\n'))
1694 return
1688 return
1695
1689
1696 entries = sortclonebundleentries(repo.ui, entries)
1690 entries = sortclonebundleentries(repo.ui, entries)
1697
1691
1698 url = entries[0]['URL']
1692 url = entries[0]['URL']
1699 repo.ui.status(_('applying clone bundle from %s\n') % url)
1693 repo.ui.status(_('applying clone bundle from %s\n') % url)
1700 if trypullbundlefromurl(repo.ui, repo, url):
1694 if trypullbundlefromurl(repo.ui, repo, url):
1701 repo.ui.status(_('finished applying clone bundle\n'))
1695 repo.ui.status(_('finished applying clone bundle\n'))
1702 # Bundle failed.
1696 # Bundle failed.
1703 #
1697 #
1704 # We abort by default to avoid the thundering herd of
1698 # We abort by default to avoid the thundering herd of
1705 # clients flooding a server that was expecting expensive
1699 # clients flooding a server that was expecting expensive
1706 # clone load to be offloaded.
1700 # clone load to be offloaded.
1707 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1701 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1708 repo.ui.warn(_('falling back to normal clone\n'))
1702 repo.ui.warn(_('falling back to normal clone\n'))
1709 else:
1703 else:
1710 raise error.Abort(_('error applying bundle'),
1704 raise error.Abort(_('error applying bundle'),
1711 hint=_('if this error persists, consider contacting '
1705 hint=_('if this error persists, consider contacting '
1712 'the server operator or disable clone '
1706 'the server operator or disable clone '
1713 'bundles via '
1707 'bundles via '
1714 '"--config experimental.clonebundles=false"'))
1708 '"--config experimental.clonebundles=false"'))
1715
1709
1716 def parseclonebundlesmanifest(repo, s):
1710 def parseclonebundlesmanifest(repo, s):
1717 """Parses the raw text of a clone bundles manifest.
1711 """Parses the raw text of a clone bundles manifest.
1718
1712
1719 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1713 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1720 to the URL and other keys are the attributes for the entry.
1714 to the URL and other keys are the attributes for the entry.
1721 """
1715 """
1722 m = []
1716 m = []
1723 for line in s.splitlines():
1717 for line in s.splitlines():
1724 fields = line.split()
1718 fields = line.split()
1725 if not fields:
1719 if not fields:
1726 continue
1720 continue
1727 attrs = {'URL': fields[0]}
1721 attrs = {'URL': fields[0]}
1728 for rawattr in fields[1:]:
1722 for rawattr in fields[1:]:
1729 key, value = rawattr.split('=', 1)
1723 key, value = rawattr.split('=', 1)
1730 key = urllib.unquote(key)
1724 key = urllib.unquote(key)
1731 value = urllib.unquote(value)
1725 value = urllib.unquote(value)
1732 attrs[key] = value
1726 attrs[key] = value
1733
1727
1734 # Parse BUNDLESPEC into components. This makes client-side
1728 # Parse BUNDLESPEC into components. This makes client-side
1735 # preferences easier to specify since you can prefer a single
1729 # preferences easier to specify since you can prefer a single
1736 # component of the BUNDLESPEC.
1730 # component of the BUNDLESPEC.
1737 if key == 'BUNDLESPEC':
1731 if key == 'BUNDLESPEC':
1738 try:
1732 try:
1739 comp, version, params = parsebundlespec(repo, value,
1733 comp, version, params = parsebundlespec(repo, value,
1740 externalnames=True)
1734 externalnames=True)
1741 attrs['COMPRESSION'] = comp
1735 attrs['COMPRESSION'] = comp
1742 attrs['VERSION'] = version
1736 attrs['VERSION'] = version
1743 except error.InvalidBundleSpecification:
1737 except error.InvalidBundleSpecification:
1744 pass
1738 pass
1745 except error.UnsupportedBundleSpecification:
1739 except error.UnsupportedBundleSpecification:
1746 pass
1740 pass
1747
1741
1748 m.append(attrs)
1742 m.append(attrs)
1749
1743
1750 return m
1744 return m
1751
1745
1752 def filterclonebundleentries(repo, entries):
1746 def filterclonebundleentries(repo, entries):
1753 """Remove incompatible clone bundle manifest entries.
1747 """Remove incompatible clone bundle manifest entries.
1754
1748
1755 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1749 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1756 and returns a new list consisting of only the entries that this client
1750 and returns a new list consisting of only the entries that this client
1757 should be able to apply.
1751 should be able to apply.
1758
1752
1759 There is no guarantee we'll be able to apply all returned entries because
1753 There is no guarantee we'll be able to apply all returned entries because
1760 the metadata we use to filter on may be missing or wrong.
1754 the metadata we use to filter on may be missing or wrong.
1761 """
1755 """
1762 newentries = []
1756 newentries = []
1763 for entry in entries:
1757 for entry in entries:
1764 spec = entry.get('BUNDLESPEC')
1758 spec = entry.get('BUNDLESPEC')
1765 if spec:
1759 if spec:
1766 try:
1760 try:
1767 parsebundlespec(repo, spec, strict=True)
1761 parsebundlespec(repo, spec, strict=True)
1768 except error.InvalidBundleSpecification as e:
1762 except error.InvalidBundleSpecification as e:
1769 repo.ui.debug(str(e) + '\n')
1763 repo.ui.debug(str(e) + '\n')
1770 continue
1764 continue
1771 except error.UnsupportedBundleSpecification as e:
1765 except error.UnsupportedBundleSpecification as e:
1772 repo.ui.debug('filtering %s because unsupported bundle '
1766 repo.ui.debug('filtering %s because unsupported bundle '
1773 'spec: %s\n' % (entry['URL'], str(e)))
1767 'spec: %s\n' % (entry['URL'], str(e)))
1774 continue
1768 continue
1775
1769
1776 if 'REQUIRESNI' in entry and not sslutil.hassni:
1770 if 'REQUIRESNI' in entry and not sslutil.hassni:
1777 repo.ui.debug('filtering %s because SNI not supported\n' %
1771 repo.ui.debug('filtering %s because SNI not supported\n' %
1778 entry['URL'])
1772 entry['URL'])
1779 continue
1773 continue
1780
1774
1781 newentries.append(entry)
1775 newentries.append(entry)
1782
1776
1783 return newentries
1777 return newentries
1784
1778
1785 def sortclonebundleentries(ui, entries):
1779 def sortclonebundleentries(ui, entries):
1786 # experimental config: experimental.clonebundleprefers
1780 # experimental config: experimental.clonebundleprefers
1787 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1781 prefers = ui.configlist('experimental', 'clonebundleprefers', default=[])
1788 if not prefers:
1782 if not prefers:
1789 return list(entries)
1783 return list(entries)
1790
1784
1791 prefers = [p.split('=', 1) for p in prefers]
1785 prefers = [p.split('=', 1) for p in prefers]
1792
1786
1793 # Our sort function.
1787 # Our sort function.
1794 def compareentry(a, b):
1788 def compareentry(a, b):
1795 for prefkey, prefvalue in prefers:
1789 for prefkey, prefvalue in prefers:
1796 avalue = a.get(prefkey)
1790 avalue = a.get(prefkey)
1797 bvalue = b.get(prefkey)
1791 bvalue = b.get(prefkey)
1798
1792
1799 # Special case for b missing attribute and a matches exactly.
1793 # Special case for b missing attribute and a matches exactly.
1800 if avalue is not None and bvalue is None and avalue == prefvalue:
1794 if avalue is not None and bvalue is None and avalue == prefvalue:
1801 return -1
1795 return -1
1802
1796
1803 # Special case for a missing attribute and b matches exactly.
1797 # Special case for a missing attribute and b matches exactly.
1804 if bvalue is not None and avalue is None and bvalue == prefvalue:
1798 if bvalue is not None and avalue is None and bvalue == prefvalue:
1805 return 1
1799 return 1
1806
1800
1807 # We can't compare unless attribute present on both.
1801 # We can't compare unless attribute present on both.
1808 if avalue is None or bvalue is None:
1802 if avalue is None or bvalue is None:
1809 continue
1803 continue
1810
1804
1811 # Same values should fall back to next attribute.
1805 # Same values should fall back to next attribute.
1812 if avalue == bvalue:
1806 if avalue == bvalue:
1813 continue
1807 continue
1814
1808
1815 # Exact matches come first.
1809 # Exact matches come first.
1816 if avalue == prefvalue:
1810 if avalue == prefvalue:
1817 return -1
1811 return -1
1818 if bvalue == prefvalue:
1812 if bvalue == prefvalue:
1819 return 1
1813 return 1
1820
1814
1821 # Fall back to next attribute.
1815 # Fall back to next attribute.
1822 continue
1816 continue
1823
1817
1824 # If we got here we couldn't sort by attributes and prefers. Fall
1818 # If we got here we couldn't sort by attributes and prefers. Fall
1825 # back to index order.
1819 # back to index order.
1826 return 0
1820 return 0
1827
1821
1828 return sorted(entries, cmp=compareentry)
1822 return sorted(entries, cmp=compareentry)
1829
1823
1830 def trypullbundlefromurl(ui, repo, url):
1824 def trypullbundlefromurl(ui, repo, url):
1831 """Attempt to apply a bundle from a URL."""
1825 """Attempt to apply a bundle from a URL."""
1832 lock = repo.lock()
1826 lock = repo.lock()
1833 try:
1827 try:
1834 tr = repo.transaction('bundleurl')
1828 tr = repo.transaction('bundleurl')
1835 try:
1829 try:
1836 try:
1830 try:
1837 fh = urlmod.open(ui, url)
1831 fh = urlmod.open(ui, url)
1838 cg = readbundle(ui, fh, 'stream')
1832 cg = readbundle(ui, fh, 'stream')
1839
1833
1840 if isinstance(cg, bundle2.unbundle20):
1834 if isinstance(cg, bundle2.unbundle20):
1841 bundle2.processbundle(repo, cg, lambda: tr)
1835 bundle2.processbundle(repo, cg, lambda: tr)
1842 elif isinstance(cg, streamclone.streamcloneapplier):
1836 elif isinstance(cg, streamclone.streamcloneapplier):
1843 cg.apply(repo)
1837 cg.apply(repo)
1844 else:
1838 else:
1845 cg.apply(repo, 'clonebundles', url)
1839 cg.apply(repo, 'clonebundles', url)
1846 tr.close()
1840 tr.close()
1847 return True
1841 return True
1848 except urllib2.HTTPError as e:
1842 except urllib2.HTTPError as e:
1849 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1843 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1850 except urllib2.URLError as e:
1844 except urllib2.URLError as e:
1851 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1845 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1852
1846
1853 return False
1847 return False
1854 finally:
1848 finally:
1855 tr.release()
1849 tr.release()
1856 finally:
1850 finally:
1857 lock.release()
1851 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now