##// END OF EJS Templates
push: restrict common discovery to the pushed set...
Boris Feld -
r35306:483b5dd0 default
parent child Browse files
Show More
@@ -1,525 +1,530
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30 def findcommonincoming(repo, remote, heads=None, force=False):
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 """Return a tuple (common, anyincoming, heads) used to identify the common
31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 subset of nodes between repo and remote.
32 subset of nodes between repo and remote.
33
33
34 "common" is a list of (at least) the heads of the common subset.
34 "common" is a list of (at least) the heads of the common subset.
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 locally. If remote does not support getbundle, this actually is a list of
36 locally. If remote does not support getbundle, this actually is a list of
37 roots of the nodes that would be incoming, to be supplied to
37 roots of the nodes that would be incoming, to be supplied to
38 changegroupsubset. No code except for pull should be relying on this fact
38 changegroupsubset. No code except for pull should be relying on this fact
39 any longer.
39 any longer.
40 "heads" is either the supplied heads, or else the remote's heads.
40 "heads" is either the supplied heads, or else the remote's heads.
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 these nodes. Changeset outside of this set won't be considered (and
43 won't appears in "common")
41
44
42 If you pass heads and they are all known locally, the response lists just
45 If you pass heads and they are all known locally, the response lists just
43 these heads in "common" and in "heads".
46 these heads in "common" and in "heads".
44
47
45 Please use findcommonoutgoing to compute the set of outgoing nodes to give
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
46 extensions a good hook into outgoing.
49 extensions a good hook into outgoing.
47 """
50 """
48
51
49 if not remote.capable('getbundle'):
52 if not remote.capable('getbundle'):
50 return treediscovery.findcommonincoming(repo, remote, heads, force)
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
51
54
52 if heads:
55 if heads:
53 allknown = True
56 allknown = True
54 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
55 for h in heads:
58 for h in heads:
56 if not knownnode(h):
59 if not knownnode(h):
57 allknown = False
60 allknown = False
58 break
61 break
59 if allknown:
62 if allknown:
60 return (heads, False, heads)
63 return (heads, False, heads)
61
64
62 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
65 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
63 abortwhenunrelated=not force)
66 abortwhenunrelated=not force,
67 ancestorsof=ancestorsof)
64 common, anyinc, srvheads = res
68 common, anyinc, srvheads = res
65 return (list(common), anyinc, heads or list(srvheads))
69 return (list(common), anyinc, heads or list(srvheads))
66
70
67 class outgoing(object):
71 class outgoing(object):
68 '''Represents the set of nodes present in a local repo but not in a
72 '''Represents the set of nodes present in a local repo but not in a
69 (possibly) remote one.
73 (possibly) remote one.
70
74
71 Members:
75 Members:
72
76
73 missing is a list of all nodes present in local but not in remote.
77 missing is a list of all nodes present in local but not in remote.
74 common is a list of all nodes shared between the two repos.
78 common is a list of all nodes shared between the two repos.
75 excluded is the list of missing changeset that shouldn't be sent remotely.
79 excluded is the list of missing changeset that shouldn't be sent remotely.
76 missingheads is the list of heads of missing.
80 missingheads is the list of heads of missing.
77 commonheads is the list of heads of common.
81 commonheads is the list of heads of common.
78
82
79 The sets are computed on demand from the heads, unless provided upfront
83 The sets are computed on demand from the heads, unless provided upfront
80 by discovery.'''
84 by discovery.'''
81
85
82 def __init__(self, repo, commonheads=None, missingheads=None,
86 def __init__(self, repo, commonheads=None, missingheads=None,
83 missingroots=None):
87 missingroots=None):
84 # at least one of them must not be set
88 # at least one of them must not be set
85 assert None in (commonheads, missingroots)
89 assert None in (commonheads, missingroots)
86 cl = repo.changelog
90 cl = repo.changelog
87 if missingheads is None:
91 if missingheads is None:
88 missingheads = cl.heads()
92 missingheads = cl.heads()
89 if missingroots:
93 if missingroots:
90 discbases = []
94 discbases = []
91 for n in missingroots:
95 for n in missingroots:
92 discbases.extend([p for p in cl.parents(n) if p != nullid])
96 discbases.extend([p for p in cl.parents(n) if p != nullid])
93 # TODO remove call to nodesbetween.
97 # TODO remove call to nodesbetween.
94 # TODO populate attributes on outgoing instance instead of setting
98 # TODO populate attributes on outgoing instance instead of setting
95 # discbases.
99 # discbases.
96 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
100 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
97 included = set(csets)
101 included = set(csets)
98 missingheads = heads
102 missingheads = heads
99 commonheads = [n for n in discbases if n not in included]
103 commonheads = [n for n in discbases if n not in included]
100 elif not commonheads:
104 elif not commonheads:
101 commonheads = [nullid]
105 commonheads = [nullid]
102 self.commonheads = commonheads
106 self.commonheads = commonheads
103 self.missingheads = missingheads
107 self.missingheads = missingheads
104 self._revlog = cl
108 self._revlog = cl
105 self._common = None
109 self._common = None
106 self._missing = None
110 self._missing = None
107 self.excluded = []
111 self.excluded = []
108
112
109 def _computecommonmissing(self):
113 def _computecommonmissing(self):
110 sets = self._revlog.findcommonmissing(self.commonheads,
114 sets = self._revlog.findcommonmissing(self.commonheads,
111 self.missingheads)
115 self.missingheads)
112 self._common, self._missing = sets
116 self._common, self._missing = sets
113
117
114 @util.propertycache
118 @util.propertycache
115 def common(self):
119 def common(self):
116 if self._common is None:
120 if self._common is None:
117 self._computecommonmissing()
121 self._computecommonmissing()
118 return self._common
122 return self._common
119
123
120 @util.propertycache
124 @util.propertycache
121 def missing(self):
125 def missing(self):
122 if self._missing is None:
126 if self._missing is None:
123 self._computecommonmissing()
127 self._computecommonmissing()
124 return self._missing
128 return self._missing
125
129
126 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
130 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
127 commoninc=None, portable=False):
131 commoninc=None, portable=False):
128 '''Return an outgoing instance to identify the nodes present in repo but
132 '''Return an outgoing instance to identify the nodes present in repo but
129 not in other.
133 not in other.
130
134
131 If onlyheads is given, only nodes ancestral to nodes in onlyheads
135 If onlyheads is given, only nodes ancestral to nodes in onlyheads
132 (inclusive) are included. If you already know the local repo's heads,
136 (inclusive) are included. If you already know the local repo's heads,
133 passing them in onlyheads is faster than letting them be recomputed here.
137 passing them in onlyheads is faster than letting them be recomputed here.
134
138
135 If commoninc is given, it must be the result of a prior call to
139 If commoninc is given, it must be the result of a prior call to
136 findcommonincoming(repo, other, force) to avoid recomputing it here.
140 findcommonincoming(repo, other, force) to avoid recomputing it here.
137
141
138 If portable is given, compute more conservative common and missingheads,
142 If portable is given, compute more conservative common and missingheads,
139 to make bundles created from the instance more portable.'''
143 to make bundles created from the instance more portable.'''
140 # declare an empty outgoing object to be filled later
144 # declare an empty outgoing object to be filled later
141 og = outgoing(repo, None, None)
145 og = outgoing(repo, None, None)
142
146
143 # get common set if not provided
147 # get common set if not provided
144 if commoninc is None:
148 if commoninc is None:
145 commoninc = findcommonincoming(repo, other, force=force)
149 commoninc = findcommonincoming(repo, other, force=force,
150 ancestorsof=onlyheads)
146 og.commonheads, _any, _hds = commoninc
151 og.commonheads, _any, _hds = commoninc
147
152
148 # compute outgoing
153 # compute outgoing
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
154 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 if not mayexclude:
155 if not mayexclude:
151 og.missingheads = onlyheads or repo.heads()
156 og.missingheads = onlyheads or repo.heads()
152 elif onlyheads is None:
157 elif onlyheads is None:
153 # use visible heads as it should be cached
158 # use visible heads as it should be cached
154 og.missingheads = repo.filtered("served").heads()
159 og.missingheads = repo.filtered("served").heads()
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
160 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 else:
161 else:
157 # compute common, missing and exclude secret stuff
162 # compute common, missing and exclude secret stuff
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
163 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 og._common, allmissing = sets
164 og._common, allmissing = sets
160 og._missing = missing = []
165 og._missing = missing = []
161 og.excluded = excluded = []
166 og.excluded = excluded = []
162 for node in allmissing:
167 for node in allmissing:
163 ctx = repo[node]
168 ctx = repo[node]
164 if ctx.phase() >= phases.secret or ctx.extinct():
169 if ctx.phase() >= phases.secret or ctx.extinct():
165 excluded.append(node)
170 excluded.append(node)
166 else:
171 else:
167 missing.append(node)
172 missing.append(node)
168 if len(missing) == len(allmissing):
173 if len(missing) == len(allmissing):
169 missingheads = onlyheads
174 missingheads = onlyheads
170 else: # update missing heads
175 else: # update missing heads
171 missingheads = phases.newheads(repo, onlyheads, excluded)
176 missingheads = phases.newheads(repo, onlyheads, excluded)
172 og.missingheads = missingheads
177 og.missingheads = missingheads
173 if portable:
178 if portable:
174 # recompute common and missingheads as if -r<rev> had been given for
179 # recompute common and missingheads as if -r<rev> had been given for
175 # each head of missing, and --base <rev> for each head of the proper
180 # each head of missing, and --base <rev> for each head of the proper
176 # ancestors of missing
181 # ancestors of missing
177 og._computecommonmissing()
182 og._computecommonmissing()
178 cl = repo.changelog
183 cl = repo.changelog
179 missingrevs = set(cl.rev(n) for n in og._missing)
184 missingrevs = set(cl.rev(n) for n in og._missing)
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
185 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 commonheads = set(og.commonheads)
186 commonheads = set(og.commonheads)
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
187 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183
188
184 return og
189 return og
185
190
186 def _headssummary(pushop):
191 def _headssummary(pushop):
187 """compute a summary of branch and heads status before and after push
192 """compute a summary of branch and heads status before and after push
188
193
189 return {'branch': ([remoteheads], [newheads],
194 return {'branch': ([remoteheads], [newheads],
190 [unsyncedheads], [discardedheads])} mapping
195 [unsyncedheads], [discardedheads])} mapping
191
196
192 - branch: the branch name,
197 - branch: the branch name,
193 - remoteheads: the list of remote heads known locally
198 - remoteheads: the list of remote heads known locally
194 None if the branch is new,
199 None if the branch is new,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
200 - newheads: the new remote heads (known locally) with outgoing pushed,
196 - unsyncedheads: the list of remote heads unknown locally,
201 - unsyncedheads: the list of remote heads unknown locally,
197 - discardedheads: the list of heads made obsolete by the push.
202 - discardedheads: the list of heads made obsolete by the push.
198 """
203 """
199 repo = pushop.repo.unfiltered()
204 repo = pushop.repo.unfiltered()
200 remote = pushop.remote
205 remote = pushop.remote
201 outgoing = pushop.outgoing
206 outgoing = pushop.outgoing
202 cl = repo.changelog
207 cl = repo.changelog
203 headssum = {}
208 headssum = {}
204 # A. Create set of branches involved in the push.
209 # A. Create set of branches involved in the push.
205 branches = set(repo[n].branch() for n in outgoing.missing)
210 branches = set(repo[n].branch() for n in outgoing.missing)
206 remotemap = remote.branchmap()
211 remotemap = remote.branchmap()
207 newbranches = branches - set(remotemap)
212 newbranches = branches - set(remotemap)
208 branches.difference_update(newbranches)
213 branches.difference_update(newbranches)
209
214
210 # A. register remote heads
215 # A. register remote heads
211 remotebranches = set()
216 remotebranches = set()
212 for branch, heads in remote.branchmap().iteritems():
217 for branch, heads in remote.branchmap().iteritems():
213 remotebranches.add(branch)
218 remotebranches.add(branch)
214 known = []
219 known = []
215 unsynced = []
220 unsynced = []
216 knownnode = cl.hasnode # do not use nodemap until it is filtered
221 knownnode = cl.hasnode # do not use nodemap until it is filtered
217 for h in heads:
222 for h in heads:
218 if knownnode(h):
223 if knownnode(h):
219 known.append(h)
224 known.append(h)
220 else:
225 else:
221 unsynced.append(h)
226 unsynced.append(h)
222 headssum[branch] = (known, list(known), unsynced)
227 headssum[branch] = (known, list(known), unsynced)
223 # B. add new branch data
228 # B. add new branch data
224 missingctx = list(repo[n] for n in outgoing.missing)
229 missingctx = list(repo[n] for n in outgoing.missing)
225 touchedbranches = set()
230 touchedbranches = set()
226 for ctx in missingctx:
231 for ctx in missingctx:
227 branch = ctx.branch()
232 branch = ctx.branch()
228 touchedbranches.add(branch)
233 touchedbranches.add(branch)
229 if branch not in headssum:
234 if branch not in headssum:
230 headssum[branch] = (None, [], [])
235 headssum[branch] = (None, [], [])
231
236
232 # C drop data about untouched branches:
237 # C drop data about untouched branches:
233 for branch in remotebranches - touchedbranches:
238 for branch in remotebranches - touchedbranches:
234 del headssum[branch]
239 del headssum[branch]
235
240
236 # D. Update newmap with outgoing changes.
241 # D. Update newmap with outgoing changes.
237 # This will possibly add new heads and remove existing ones.
242 # This will possibly add new heads and remove existing ones.
238 newmap = branchmap.branchcache((branch, heads[1])
243 newmap = branchmap.branchcache((branch, heads[1])
239 for branch, heads in headssum.iteritems()
244 for branch, heads in headssum.iteritems()
240 if heads[0] is not None)
245 if heads[0] is not None)
241 newmap.update(repo, (ctx.rev() for ctx in missingctx))
246 newmap.update(repo, (ctx.rev() for ctx in missingctx))
242 for branch, newheads in newmap.iteritems():
247 for branch, newheads in newmap.iteritems():
243 headssum[branch][1][:] = newheads
248 headssum[branch][1][:] = newheads
244 for branch, items in headssum.iteritems():
249 for branch, items in headssum.iteritems():
245 for l in items:
250 for l in items:
246 if l is not None:
251 if l is not None:
247 l.sort()
252 l.sort()
248 headssum[branch] = items + ([],)
253 headssum[branch] = items + ([],)
249
254
250 # If there are no obsstore, no post processing are needed.
255 # If there are no obsstore, no post processing are needed.
251 if repo.obsstore:
256 if repo.obsstore:
252 torev = repo.changelog.rev
257 torev = repo.changelog.rev
253 futureheads = set(torev(h) for h in outgoing.missingheads)
258 futureheads = set(torev(h) for h in outgoing.missingheads)
254 futureheads |= set(torev(h) for h in outgoing.commonheads)
259 futureheads |= set(torev(h) for h in outgoing.commonheads)
255 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
260 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
256 for branch, heads in sorted(headssum.iteritems()):
261 for branch, heads in sorted(headssum.iteritems()):
257 remoteheads, newheads, unsyncedheads, placeholder = heads
262 remoteheads, newheads, unsyncedheads, placeholder = heads
258 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
263 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
259 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
264 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
260 sorted(result[1]))
265 sorted(result[1]))
261 return headssum
266 return headssum
262
267
263 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
268 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
264 """Compute branchmapsummary for repo without branchmap support"""
269 """Compute branchmapsummary for repo without branchmap support"""
265
270
266 # 1-4b. old servers: Check for new topological heads.
271 # 1-4b. old servers: Check for new topological heads.
267 # Construct {old,new}map with branch = None (topological branch).
272 # Construct {old,new}map with branch = None (topological branch).
268 # (code based on update)
273 # (code based on update)
269 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
274 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
270 oldheads = sorted(h for h in remoteheads if knownnode(h))
275 oldheads = sorted(h for h in remoteheads if knownnode(h))
271 # all nodes in outgoing.missing are children of either:
276 # all nodes in outgoing.missing are children of either:
272 # - an element of oldheads
277 # - an element of oldheads
273 # - another element of outgoing.missing
278 # - another element of outgoing.missing
274 # - nullrev
279 # - nullrev
275 # This explains why the new head are very simple to compute.
280 # This explains why the new head are very simple to compute.
276 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
281 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
277 newheads = sorted(c.node() for c in r)
282 newheads = sorted(c.node() for c in r)
278 # set some unsynced head to issue the "unsynced changes" warning
283 # set some unsynced head to issue the "unsynced changes" warning
279 if inc:
284 if inc:
280 unsynced = [None]
285 unsynced = [None]
281 else:
286 else:
282 unsynced = []
287 unsynced = []
283 return {None: (oldheads, newheads, unsynced, [])}
288 return {None: (oldheads, newheads, unsynced, [])}
284
289
285 def _nowarnheads(pushop):
290 def _nowarnheads(pushop):
286 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
291 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
287 repo = pushop.repo.unfiltered()
292 repo = pushop.repo.unfiltered()
288 remote = pushop.remote
293 remote = pushop.remote
289 localbookmarks = repo._bookmarks
294 localbookmarks = repo._bookmarks
290 remotebookmarks = remote.listkeys('bookmarks')
295 remotebookmarks = remote.listkeys('bookmarks')
291 bookmarkedheads = set()
296 bookmarkedheads = set()
292
297
293 # internal config: bookmarks.pushing
298 # internal config: bookmarks.pushing
294 newbookmarks = [localbookmarks.expandname(b)
299 newbookmarks = [localbookmarks.expandname(b)
295 for b in pushop.ui.configlist('bookmarks', 'pushing')]
300 for b in pushop.ui.configlist('bookmarks', 'pushing')]
296
301
297 for bm in localbookmarks:
302 for bm in localbookmarks:
298 rnode = remotebookmarks.get(bm)
303 rnode = remotebookmarks.get(bm)
299 if rnode and rnode in repo:
304 if rnode and rnode in repo:
300 lctx, rctx = repo[bm], repo[rnode]
305 lctx, rctx = repo[bm], repo[rnode]
301 if bookmarks.validdest(repo, rctx, lctx):
306 if bookmarks.validdest(repo, rctx, lctx):
302 bookmarkedheads.add(lctx.node())
307 bookmarkedheads.add(lctx.node())
303 else:
308 else:
304 if bm in newbookmarks and bm not in remotebookmarks:
309 if bm in newbookmarks and bm not in remotebookmarks:
305 bookmarkedheads.add(repo[bm].node())
310 bookmarkedheads.add(repo[bm].node())
306
311
307 return bookmarkedheads
312 return bookmarkedheads
308
313
309 def checkheads(pushop):
314 def checkheads(pushop):
310 """Check that a push won't add any outgoing head
315 """Check that a push won't add any outgoing head
311
316
312 raise Abort error and display ui message as needed.
317 raise Abort error and display ui message as needed.
313 """
318 """
314
319
315 repo = pushop.repo.unfiltered()
320 repo = pushop.repo.unfiltered()
316 remote = pushop.remote
321 remote = pushop.remote
317 outgoing = pushop.outgoing
322 outgoing = pushop.outgoing
318 remoteheads = pushop.remoteheads
323 remoteheads = pushop.remoteheads
319 newbranch = pushop.newbranch
324 newbranch = pushop.newbranch
320 inc = bool(pushop.incoming)
325 inc = bool(pushop.incoming)
321
326
322 # Check for each named branch if we're creating new remote heads.
327 # Check for each named branch if we're creating new remote heads.
323 # To be a remote head after push, node must be either:
328 # To be a remote head after push, node must be either:
324 # - unknown locally
329 # - unknown locally
325 # - a local outgoing head descended from update
330 # - a local outgoing head descended from update
326 # - a remote head that's known locally and not
331 # - a remote head that's known locally and not
327 # ancestral to an outgoing head
332 # ancestral to an outgoing head
328 if remoteheads == [nullid]:
333 if remoteheads == [nullid]:
329 # remote is empty, nothing to check.
334 # remote is empty, nothing to check.
330 return
335 return
331
336
332 if remote.capable('branchmap'):
337 if remote.capable('branchmap'):
333 headssum = _headssummary(pushop)
338 headssum = _headssummary(pushop)
334 else:
339 else:
335 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
340 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
336 pushop.pushbranchmap = headssum
341 pushop.pushbranchmap = headssum
337 newbranches = [branch for branch, heads in headssum.iteritems()
342 newbranches = [branch for branch, heads in headssum.iteritems()
338 if heads[0] is None]
343 if heads[0] is None]
339 # 1. Check for new branches on the remote.
344 # 1. Check for new branches on the remote.
340 if newbranches and not newbranch: # new branch requires --new-branch
345 if newbranches and not newbranch: # new branch requires --new-branch
341 branchnames = ', '.join(sorted(newbranches))
346 branchnames = ', '.join(sorted(newbranches))
342 raise error.Abort(_("push creates new remote branches: %s!")
347 raise error.Abort(_("push creates new remote branches: %s!")
343 % branchnames,
348 % branchnames,
344 hint=_("use 'hg push --new-branch' to create"
349 hint=_("use 'hg push --new-branch' to create"
345 " new remote branches"))
350 " new remote branches"))
346
351
347 # 2. Find heads that we need not warn about
352 # 2. Find heads that we need not warn about
348 nowarnheads = _nowarnheads(pushop)
353 nowarnheads = _nowarnheads(pushop)
349
354
350 # 3. Check for new heads.
355 # 3. Check for new heads.
351 # If there are more heads after the push than before, a suitable
356 # If there are more heads after the push than before, a suitable
352 # error message, depending on unsynced status, is displayed.
357 # error message, depending on unsynced status, is displayed.
353 errormsg = None
358 errormsg = None
354 for branch, heads in sorted(headssum.iteritems()):
359 for branch, heads in sorted(headssum.iteritems()):
355 remoteheads, newheads, unsyncedheads, discardedheads = heads
360 remoteheads, newheads, unsyncedheads, discardedheads = heads
356 # add unsynced data
361 # add unsynced data
357 if remoteheads is None:
362 if remoteheads is None:
358 oldhs = set()
363 oldhs = set()
359 else:
364 else:
360 oldhs = set(remoteheads)
365 oldhs = set(remoteheads)
361 oldhs.update(unsyncedheads)
366 oldhs.update(unsyncedheads)
362 dhs = None # delta heads, the new heads on branch
367 dhs = None # delta heads, the new heads on branch
363 newhs = set(newheads)
368 newhs = set(newheads)
364 newhs.update(unsyncedheads)
369 newhs.update(unsyncedheads)
365 if unsyncedheads:
370 if unsyncedheads:
366 if None in unsyncedheads:
371 if None in unsyncedheads:
367 # old remote, no heads data
372 # old remote, no heads data
368 heads = None
373 heads = None
369 else:
374 else:
370 heads = scmutil.nodesummaries(repo, unsyncedheads)
375 heads = scmutil.nodesummaries(repo, unsyncedheads)
371 if heads is None:
376 if heads is None:
372 repo.ui.status(_("remote has heads that are "
377 repo.ui.status(_("remote has heads that are "
373 "not known locally\n"))
378 "not known locally\n"))
374 elif branch is None:
379 elif branch is None:
375 repo.ui.status(_("remote has heads that are "
380 repo.ui.status(_("remote has heads that are "
376 "not known locally: %s\n") % heads)
381 "not known locally: %s\n") % heads)
377 else:
382 else:
378 repo.ui.status(_("remote has heads on branch '%s' that are "
383 repo.ui.status(_("remote has heads on branch '%s' that are "
379 "not known locally: %s\n") % (branch, heads))
384 "not known locally: %s\n") % (branch, heads))
380 if remoteheads is None:
385 if remoteheads is None:
381 if len(newhs) > 1:
386 if len(newhs) > 1:
382 dhs = list(newhs)
387 dhs = list(newhs)
383 if errormsg is None:
388 if errormsg is None:
384 errormsg = (_("push creates new branch '%s' "
389 errormsg = (_("push creates new branch '%s' "
385 "with multiple heads") % (branch))
390 "with multiple heads") % (branch))
386 hint = _("merge or"
391 hint = _("merge or"
387 " see 'hg help push' for details about"
392 " see 'hg help push' for details about"
388 " pushing new heads")
393 " pushing new heads")
389 elif len(newhs) > len(oldhs):
394 elif len(newhs) > len(oldhs):
390 # remove bookmarked or existing remote heads from the new heads list
395 # remove bookmarked or existing remote heads from the new heads list
391 dhs = sorted(newhs - nowarnheads - oldhs)
396 dhs = sorted(newhs - nowarnheads - oldhs)
392 if dhs:
397 if dhs:
393 if errormsg is None:
398 if errormsg is None:
394 if branch not in ('default', None):
399 if branch not in ('default', None):
395 errormsg = _("push creates new remote head %s "
400 errormsg = _("push creates new remote head %s "
396 "on branch '%s'!") % (short(dhs[0]), branch)
401 "on branch '%s'!") % (short(dhs[0]), branch)
397 elif repo[dhs[0]].bookmarks():
402 elif repo[dhs[0]].bookmarks():
398 errormsg = _("push creates new remote head %s "
403 errormsg = _("push creates new remote head %s "
399 "with bookmark '%s'!") % (
404 "with bookmark '%s'!") % (
400 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
405 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
401 else:
406 else:
402 errormsg = _("push creates new remote head %s!"
407 errormsg = _("push creates new remote head %s!"
403 ) % short(dhs[0])
408 ) % short(dhs[0])
404 if unsyncedheads:
409 if unsyncedheads:
405 hint = _("pull and merge or"
410 hint = _("pull and merge or"
406 " see 'hg help push' for details about"
411 " see 'hg help push' for details about"
407 " pushing new heads")
412 " pushing new heads")
408 else:
413 else:
409 hint = _("merge or"
414 hint = _("merge or"
410 " see 'hg help push' for details about"
415 " see 'hg help push' for details about"
411 " pushing new heads")
416 " pushing new heads")
412 if branch is None:
417 if branch is None:
413 repo.ui.note(_("new remote heads:\n"))
418 repo.ui.note(_("new remote heads:\n"))
414 else:
419 else:
415 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
420 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
416 for h in dhs:
421 for h in dhs:
417 repo.ui.note((" %s\n") % short(h))
422 repo.ui.note((" %s\n") % short(h))
418 if errormsg:
423 if errormsg:
419 raise error.Abort(errormsg, hint=hint)
424 raise error.Abort(errormsg, hint=hint)
420
425
421 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
426 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
422 """post process the list of new heads with obsolescence information
427 """post process the list of new heads with obsolescence information
423
428
424 Exists as a sub-function to contain the complexity and allow extensions to
429 Exists as a sub-function to contain the complexity and allow extensions to
425 experiment with smarter logic.
430 experiment with smarter logic.
426
431
427 Returns (newheads, discarded_heads) tuple
432 Returns (newheads, discarded_heads) tuple
428 """
433 """
429 # known issue
434 # known issue
430 #
435 #
431 # * We "silently" skip processing on all changeset unknown locally
436 # * We "silently" skip processing on all changeset unknown locally
432 #
437 #
433 # * if <nh> is public on the remote, it won't be affected by obsolete
438 # * if <nh> is public on the remote, it won't be affected by obsolete
434 # marker and a new is created
439 # marker and a new is created
435
440
436 # define various utilities and containers
441 # define various utilities and containers
437 repo = pushop.repo
442 repo = pushop.repo
438 unfi = repo.unfiltered()
443 unfi = repo.unfiltered()
439 tonode = unfi.changelog.node
444 tonode = unfi.changelog.node
440 torev = unfi.changelog.nodemap.get
445 torev = unfi.changelog.nodemap.get
441 public = phases.public
446 public = phases.public
442 getphase = unfi._phasecache.phase
447 getphase = unfi._phasecache.phase
443 ispublic = (lambda r: getphase(unfi, r) == public)
448 ispublic = (lambda r: getphase(unfi, r) == public)
444 ispushed = (lambda n: torev(n) in futurecommon)
449 ispushed = (lambda n: torev(n) in futurecommon)
445 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
450 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
446 successorsmarkers = unfi.obsstore.successors
451 successorsmarkers = unfi.obsstore.successors
447 newhs = set() # final set of new heads
452 newhs = set() # final set of new heads
448 discarded = set() # new head of fully replaced branch
453 discarded = set() # new head of fully replaced branch
449
454
450 localcandidate = set() # candidate heads known locally
455 localcandidate = set() # candidate heads known locally
451 unknownheads = set() # candidate heads unknown locally
456 unknownheads = set() # candidate heads unknown locally
452 for h in candidate_newhs:
457 for h in candidate_newhs:
453 if h in unfi:
458 if h in unfi:
454 localcandidate.add(h)
459 localcandidate.add(h)
455 else:
460 else:
456 if successorsmarkers.get(h) is not None:
461 if successorsmarkers.get(h) is not None:
457 msg = ('checkheads: remote head unknown locally has'
462 msg = ('checkheads: remote head unknown locally has'
458 ' local marker: %s\n')
463 ' local marker: %s\n')
459 repo.ui.debug(msg % hex(h))
464 repo.ui.debug(msg % hex(h))
460 unknownheads.add(h)
465 unknownheads.add(h)
461
466
462 # fast path the simple case
467 # fast path the simple case
463 if len(localcandidate) == 1:
468 if len(localcandidate) == 1:
464 return unknownheads | set(candidate_newhs), set()
469 return unknownheads | set(candidate_newhs), set()
465
470
466 # actually process branch replacement
471 # actually process branch replacement
467 while localcandidate:
472 while localcandidate:
468 nh = localcandidate.pop()
473 nh = localcandidate.pop()
469 # run this check early to skip the evaluation of the whole branch
474 # run this check early to skip the evaluation of the whole branch
470 if (torev(nh) in futurecommon or ispublic(torev(nh))):
475 if (torev(nh) in futurecommon or ispublic(torev(nh))):
471 newhs.add(nh)
476 newhs.add(nh)
472 continue
477 continue
473
478
474 # Get all revs/nodes on the branch exclusive to this head
479 # Get all revs/nodes on the branch exclusive to this head
475 # (already filtered heads are "ignored"))
480 # (already filtered heads are "ignored"))
476 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
481 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
477 nh, localcandidate, newhs)
482 nh, localcandidate, newhs)
478 branchnodes = [tonode(r) for r in branchrevs]
483 branchnodes = [tonode(r) for r in branchrevs]
479
484
480 # The branch won't be hidden on the remote if
485 # The branch won't be hidden on the remote if
481 # * any part of it is public,
486 # * any part of it is public,
482 # * any part of it is considered part of the result by previous logic,
487 # * any part of it is considered part of the result by previous logic,
483 # * if we have no markers to push to obsolete it.
488 # * if we have no markers to push to obsolete it.
484 if (any(ispublic(r) for r in branchrevs)
489 if (any(ispublic(r) for r in branchrevs)
485 or any(torev(n) in futurecommon for n in branchnodes)
490 or any(torev(n) in futurecommon for n in branchnodes)
486 or any(not hasoutmarker(n) for n in branchnodes)):
491 or any(not hasoutmarker(n) for n in branchnodes)):
487 newhs.add(nh)
492 newhs.add(nh)
488 else:
493 else:
489 # note: there is a corner case if there is a merge in the branch.
494 # note: there is a corner case if there is a merge in the branch.
490 # we might end up with -more- heads. However, these heads are not
495 # we might end up with -more- heads. However, these heads are not
491 # "added" by the push, but more by the "removal" on the remote so I
496 # "added" by the push, but more by the "removal" on the remote so I
492 # think is a okay to ignore them,
497 # think is a okay to ignore them,
493 discarded.add(nh)
498 discarded.add(nh)
494 newhs |= unknownheads
499 newhs |= unknownheads
495 return newhs, discarded
500 return newhs, discarded
496
501
497 def pushingmarkerfor(obsstore, ispushed, node):
502 def pushingmarkerfor(obsstore, ispushed, node):
498 """true if some markers are to be pushed for node
503 """true if some markers are to be pushed for node
499
504
500 We cannot just look in to the pushed obsmarkers from the pushop because
505 We cannot just look in to the pushed obsmarkers from the pushop because
501 discovery might have filtered relevant markers. In addition listing all
506 discovery might have filtered relevant markers. In addition listing all
502 markers relevant to all changesets in the pushed set would be too expensive
507 markers relevant to all changesets in the pushed set would be too expensive
503 (O(len(repo)))
508 (O(len(repo)))
504
509
505 (note: There are cache opportunity in this function. but it would requires
510 (note: There are cache opportunity in this function. but it would requires
506 a two dimensional stack.)
511 a two dimensional stack.)
507 """
512 """
508 successorsmarkers = obsstore.successors
513 successorsmarkers = obsstore.successors
509 stack = [node]
514 stack = [node]
510 seen = set(stack)
515 seen = set(stack)
511 while stack:
516 while stack:
512 current = stack.pop()
517 current = stack.pop()
513 if ispushed(current):
518 if ispushed(current):
514 return True
519 return True
515 markers = successorsmarkers.get(current, ())
520 markers = successorsmarkers.get(current, ())
516 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
521 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
517 for m in markers:
522 for m in markers:
518 nexts = m[1] # successors
523 nexts = m[1] # successors
519 if not nexts: # this is a prune marker
524 if not nexts: # this is a prune marker
520 nexts = m[5] or () # parents
525 nexts = m[5] or () # parents
521 for n in nexts:
526 for n in nexts:
522 if n not in seen:
527 if n not in seen:
523 seen.add(n)
528 seen.add(n)
524 stack.append(n)
529 stack.append(n)
525 return False
530 return False
@@ -1,2210 +1,2214
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 lock as lockmod,
26 lock as lockmod,
27 obsolete,
27 obsolete,
28 phases,
28 phases,
29 pushkey,
29 pushkey,
30 pycompat,
30 pycompat,
31 remotenames,
31 remotenames,
32 scmutil,
32 scmutil,
33 sslutil,
33 sslutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 )
37 )
38
38
39 urlerr = util.urlerr
39 urlerr = util.urlerr
40 urlreq = util.urlreq
40 urlreq = util.urlreq
41
41
42 # Maps bundle version human names to changegroup versions.
42 # Maps bundle version human names to changegroup versions.
43 _bundlespeccgversions = {'v1': '01',
43 _bundlespeccgversions = {'v1': '01',
44 'v2': '02',
44 'v2': '02',
45 'packed1': 's1',
45 'packed1': 's1',
46 'bundle2': '02', #legacy
46 'bundle2': '02', #legacy
47 }
47 }
48
48
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51
51
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 """Parse a bundle string specification into parts.
53 """Parse a bundle string specification into parts.
54
54
55 Bundle specifications denote a well-defined bundle/exchange format.
55 Bundle specifications denote a well-defined bundle/exchange format.
56 The content of a given specification should not change over time in
56 The content of a given specification should not change over time in
57 order to ensure that bundles produced by a newer version of Mercurial are
57 order to ensure that bundles produced by a newer version of Mercurial are
58 readable from an older version.
58 readable from an older version.
59
59
60 The string currently has the form:
60 The string currently has the form:
61
61
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63
63
64 Where <compression> is one of the supported compression formats
64 Where <compression> is one of the supported compression formats
65 and <type> is (currently) a version string. A ";" can follow the type and
65 and <type> is (currently) a version string. A ";" can follow the type and
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 pairs.
67 pairs.
68
68
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 it is optional.
70 it is optional.
71
71
72 If ``externalnames`` is False (the default), the human-centric names will
72 If ``externalnames`` is False (the default), the human-centric names will
73 be converted to their internal representation.
73 be converted to their internal representation.
74
74
75 Returns a 3-tuple of (compression, version, parameters). Compression will
75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 be ``None`` if not in strict mode and a compression isn't defined.
76 be ``None`` if not in strict mode and a compression isn't defined.
77
77
78 An ``InvalidBundleSpecification`` is raised when the specification is
78 An ``InvalidBundleSpecification`` is raised when the specification is
79 not syntactically well formed.
79 not syntactically well formed.
80
80
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 bundle type/version is not recognized.
82 bundle type/version is not recognized.
83
83
84 Note: this function will likely eventually return a more complex data
84 Note: this function will likely eventually return a more complex data
85 structure, including bundle2 part information.
85 structure, including bundle2 part information.
86 """
86 """
87 def parseparams(s):
87 def parseparams(s):
88 if ';' not in s:
88 if ';' not in s:
89 return s, {}
89 return s, {}
90
90
91 params = {}
91 params = {}
92 version, paramstr = s.split(';', 1)
92 version, paramstr = s.split(';', 1)
93
93
94 for p in paramstr.split(';'):
94 for p in paramstr.split(';'):
95 if '=' not in p:
95 if '=' not in p:
96 raise error.InvalidBundleSpecification(
96 raise error.InvalidBundleSpecification(
97 _('invalid bundle specification: '
97 _('invalid bundle specification: '
98 'missing "=" in parameter: %s') % p)
98 'missing "=" in parameter: %s') % p)
99
99
100 key, value = p.split('=', 1)
100 key, value = p.split('=', 1)
101 key = urlreq.unquote(key)
101 key = urlreq.unquote(key)
102 value = urlreq.unquote(value)
102 value = urlreq.unquote(value)
103 params[key] = value
103 params[key] = value
104
104
105 return version, params
105 return version, params
106
106
107
107
108 if strict and '-' not in spec:
108 if strict and '-' not in spec:
109 raise error.InvalidBundleSpecification(
109 raise error.InvalidBundleSpecification(
110 _('invalid bundle specification; '
110 _('invalid bundle specification; '
111 'must be prefixed with compression: %s') % spec)
111 'must be prefixed with compression: %s') % spec)
112
112
113 if '-' in spec:
113 if '-' in spec:
114 compression, version = spec.split('-', 1)
114 compression, version = spec.split('-', 1)
115
115
116 if compression not in util.compengines.supportedbundlenames:
116 if compression not in util.compengines.supportedbundlenames:
117 raise error.UnsupportedBundleSpecification(
117 raise error.UnsupportedBundleSpecification(
118 _('%s compression is not supported') % compression)
118 _('%s compression is not supported') % compression)
119
119
120 version, params = parseparams(version)
120 version, params = parseparams(version)
121
121
122 if version not in _bundlespeccgversions:
122 if version not in _bundlespeccgversions:
123 raise error.UnsupportedBundleSpecification(
123 raise error.UnsupportedBundleSpecification(
124 _('%s is not a recognized bundle version') % version)
124 _('%s is not a recognized bundle version') % version)
125 else:
125 else:
126 # Value could be just the compression or just the version, in which
126 # Value could be just the compression or just the version, in which
127 # case some defaults are assumed (but only when not in strict mode).
127 # case some defaults are assumed (but only when not in strict mode).
128 assert not strict
128 assert not strict
129
129
130 spec, params = parseparams(spec)
130 spec, params = parseparams(spec)
131
131
132 if spec in util.compengines.supportedbundlenames:
132 if spec in util.compengines.supportedbundlenames:
133 compression = spec
133 compression = spec
134 version = 'v1'
134 version = 'v1'
135 # Generaldelta repos require v2.
135 # Generaldelta repos require v2.
136 if 'generaldelta' in repo.requirements:
136 if 'generaldelta' in repo.requirements:
137 version = 'v2'
137 version = 'v2'
138 # Modern compression engines require v2.
138 # Modern compression engines require v2.
139 if compression not in _bundlespecv1compengines:
139 if compression not in _bundlespecv1compengines:
140 version = 'v2'
140 version = 'v2'
141 elif spec in _bundlespeccgversions:
141 elif spec in _bundlespeccgversions:
142 if spec == 'packed1':
142 if spec == 'packed1':
143 compression = 'none'
143 compression = 'none'
144 else:
144 else:
145 compression = 'bzip2'
145 compression = 'bzip2'
146 version = spec
146 version = spec
147 else:
147 else:
148 raise error.UnsupportedBundleSpecification(
148 raise error.UnsupportedBundleSpecification(
149 _('%s is not a recognized bundle specification') % spec)
149 _('%s is not a recognized bundle specification') % spec)
150
150
151 # Bundle version 1 only supports a known set of compression engines.
151 # Bundle version 1 only supports a known set of compression engines.
152 if version == 'v1' and compression not in _bundlespecv1compengines:
152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 raise error.UnsupportedBundleSpecification(
153 raise error.UnsupportedBundleSpecification(
154 _('compression engine %s is not supported on v1 bundles') %
154 _('compression engine %s is not supported on v1 bundles') %
155 compression)
155 compression)
156
156
157 # The specification for packed1 can optionally declare the data formats
157 # The specification for packed1 can optionally declare the data formats
158 # required to apply it. If we see this metadata, compare against what the
158 # required to apply it. If we see this metadata, compare against what the
159 # repo supports and error if the bundle isn't compatible.
159 # repo supports and error if the bundle isn't compatible.
160 if version == 'packed1' and 'requirements' in params:
160 if version == 'packed1' and 'requirements' in params:
161 requirements = set(params['requirements'].split(','))
161 requirements = set(params['requirements'].split(','))
162 missingreqs = requirements - repo.supportedformats
162 missingreqs = requirements - repo.supportedformats
163 if missingreqs:
163 if missingreqs:
164 raise error.UnsupportedBundleSpecification(
164 raise error.UnsupportedBundleSpecification(
165 _('missing support for repository features: %s') %
165 _('missing support for repository features: %s') %
166 ', '.join(sorted(missingreqs)))
166 ', '.join(sorted(missingreqs)))
167
167
168 if not externalnames:
168 if not externalnames:
169 engine = util.compengines.forbundlename(compression)
169 engine = util.compengines.forbundlename(compression)
170 compression = engine.bundletype()[1]
170 compression = engine.bundletype()[1]
171 version = _bundlespeccgversions[version]
171 version = _bundlespeccgversions[version]
172 return compression, version, params
172 return compression, version, params
173
173
174 def readbundle(ui, fh, fname, vfs=None):
174 def readbundle(ui, fh, fname, vfs=None):
175 header = changegroup.readexactly(fh, 4)
175 header = changegroup.readexactly(fh, 4)
176
176
177 alg = None
177 alg = None
178 if not fname:
178 if not fname:
179 fname = "stream"
179 fname = "stream"
180 if not header.startswith('HG') and header.startswith('\0'):
180 if not header.startswith('HG') and header.startswith('\0'):
181 fh = changegroup.headerlessfixup(fh, header)
181 fh = changegroup.headerlessfixup(fh, header)
182 header = "HG10"
182 header = "HG10"
183 alg = 'UN'
183 alg = 'UN'
184 elif vfs:
184 elif vfs:
185 fname = vfs.join(fname)
185 fname = vfs.join(fname)
186
186
187 magic, version = header[0:2], header[2:4]
187 magic, version = header[0:2], header[2:4]
188
188
189 if magic != 'HG':
189 if magic != 'HG':
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 if version == '10':
191 if version == '10':
192 if alg is None:
192 if alg is None:
193 alg = changegroup.readexactly(fh, 2)
193 alg = changegroup.readexactly(fh, 2)
194 return changegroup.cg1unpacker(fh, alg)
194 return changegroup.cg1unpacker(fh, alg)
195 elif version.startswith('2'):
195 elif version.startswith('2'):
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 elif version == 'S1':
197 elif version == 'S1':
198 return streamclone.streamcloneapplier(fh)
198 return streamclone.streamcloneapplier(fh)
199 else:
199 else:
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201
201
202 def getbundlespec(ui, fh):
202 def getbundlespec(ui, fh):
203 """Infer the bundlespec from a bundle file handle.
203 """Infer the bundlespec from a bundle file handle.
204
204
205 The input file handle is seeked and the original seek position is not
205 The input file handle is seeked and the original seek position is not
206 restored.
206 restored.
207 """
207 """
208 def speccompression(alg):
208 def speccompression(alg):
209 try:
209 try:
210 return util.compengines.forbundletype(alg).bundletype()[0]
210 return util.compengines.forbundletype(alg).bundletype()[0]
211 except KeyError:
211 except KeyError:
212 return None
212 return None
213
213
214 b = readbundle(ui, fh, None)
214 b = readbundle(ui, fh, None)
215 if isinstance(b, changegroup.cg1unpacker):
215 if isinstance(b, changegroup.cg1unpacker):
216 alg = b._type
216 alg = b._type
217 if alg == '_truncatedBZ':
217 if alg == '_truncatedBZ':
218 alg = 'BZ'
218 alg = 'BZ'
219 comp = speccompression(alg)
219 comp = speccompression(alg)
220 if not comp:
220 if not comp:
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 return '%s-v1' % comp
222 return '%s-v1' % comp
223 elif isinstance(b, bundle2.unbundle20):
223 elif isinstance(b, bundle2.unbundle20):
224 if 'Compression' in b.params:
224 if 'Compression' in b.params:
225 comp = speccompression(b.params['Compression'])
225 comp = speccompression(b.params['Compression'])
226 if not comp:
226 if not comp:
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 else:
228 else:
229 comp = 'none'
229 comp = 'none'
230
230
231 version = None
231 version = None
232 for part in b.iterparts():
232 for part in b.iterparts():
233 if part.type == 'changegroup':
233 if part.type == 'changegroup':
234 version = part.params['version']
234 version = part.params['version']
235 if version in ('01', '02'):
235 if version in ('01', '02'):
236 version = 'v2'
236 version = 'v2'
237 else:
237 else:
238 raise error.Abort(_('changegroup version %s does not have '
238 raise error.Abort(_('changegroup version %s does not have '
239 'a known bundlespec') % version,
239 'a known bundlespec') % version,
240 hint=_('try upgrading your Mercurial '
240 hint=_('try upgrading your Mercurial '
241 'client'))
241 'client'))
242
242
243 if not version:
243 if not version:
244 raise error.Abort(_('could not identify changegroup version in '
244 raise error.Abort(_('could not identify changegroup version in '
245 'bundle'))
245 'bundle'))
246
246
247 return '%s-%s' % (comp, version)
247 return '%s-%s' % (comp, version)
248 elif isinstance(b, streamclone.streamcloneapplier):
248 elif isinstance(b, streamclone.streamcloneapplier):
249 requirements = streamclone.readbundle1header(fh)[2]
249 requirements = streamclone.readbundle1header(fh)[2]
250 params = 'requirements=%s' % ','.join(sorted(requirements))
250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 return 'none-packed1;%s' % urlreq.quote(params)
251 return 'none-packed1;%s' % urlreq.quote(params)
252 else:
252 else:
253 raise error.Abort(_('unknown bundle type: %s') % b)
253 raise error.Abort(_('unknown bundle type: %s') % b)
254
254
255 def _computeoutgoing(repo, heads, common):
255 def _computeoutgoing(repo, heads, common):
256 """Computes which revs are outgoing given a set of common
256 """Computes which revs are outgoing given a set of common
257 and a set of heads.
257 and a set of heads.
258
258
259 This is a separate function so extensions can have access to
259 This is a separate function so extensions can have access to
260 the logic.
260 the logic.
261
261
262 Returns a discovery.outgoing object.
262 Returns a discovery.outgoing object.
263 """
263 """
264 cl = repo.changelog
264 cl = repo.changelog
265 if common:
265 if common:
266 hasnode = cl.hasnode
266 hasnode = cl.hasnode
267 common = [n for n in common if hasnode(n)]
267 common = [n for n in common if hasnode(n)]
268 else:
268 else:
269 common = [nullid]
269 common = [nullid]
270 if not heads:
270 if not heads:
271 heads = cl.heads()
271 heads = cl.heads()
272 return discovery.outgoing(repo, common, heads)
272 return discovery.outgoing(repo, common, heads)
273
273
274 def _forcebundle1(op):
274 def _forcebundle1(op):
275 """return true if a pull/push must use bundle1
275 """return true if a pull/push must use bundle1
276
276
277 This function is used to allow testing of the older bundle version"""
277 This function is used to allow testing of the older bundle version"""
278 ui = op.repo.ui
278 ui = op.repo.ui
279 forcebundle1 = False
279 forcebundle1 = False
280 # The goal is this config is to allow developer to choose the bundle
280 # The goal is this config is to allow developer to choose the bundle
281 # version used during exchanged. This is especially handy during test.
281 # version used during exchanged. This is especially handy during test.
282 # Value is a list of bundle version to be picked from, highest version
282 # Value is a list of bundle version to be picked from, highest version
283 # should be used.
283 # should be used.
284 #
284 #
285 # developer config: devel.legacy.exchange
285 # developer config: devel.legacy.exchange
286 exchange = ui.configlist('devel', 'legacy.exchange')
286 exchange = ui.configlist('devel', 'legacy.exchange')
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 return forcebundle1 or not op.remote.capable('bundle2')
288 return forcebundle1 or not op.remote.capable('bundle2')
289
289
290 class pushoperation(object):
290 class pushoperation(object):
291 """A object that represent a single push operation
291 """A object that represent a single push operation
292
292
293 Its purpose is to carry push related state and very common operations.
293 Its purpose is to carry push related state and very common operations.
294
294
295 A new pushoperation should be created at the beginning of each push and
295 A new pushoperation should be created at the beginning of each push and
296 discarded afterward.
296 discarded afterward.
297 """
297 """
298
298
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 bookmarks=(), pushvars=None):
300 bookmarks=(), pushvars=None):
301 # repo we push from
301 # repo we push from
302 self.repo = repo
302 self.repo = repo
303 self.ui = repo.ui
303 self.ui = repo.ui
304 # repo we push to
304 # repo we push to
305 self.remote = remote
305 self.remote = remote
306 # force option provided
306 # force option provided
307 self.force = force
307 self.force = force
308 # revs to be pushed (None is "all")
308 # revs to be pushed (None is "all")
309 self.revs = revs
309 self.revs = revs
310 # bookmark explicitly pushed
310 # bookmark explicitly pushed
311 self.bookmarks = bookmarks
311 self.bookmarks = bookmarks
312 # allow push of new branch
312 # allow push of new branch
313 self.newbranch = newbranch
313 self.newbranch = newbranch
314 # step already performed
314 # step already performed
315 # (used to check what steps have been already performed through bundle2)
315 # (used to check what steps have been already performed through bundle2)
316 self.stepsdone = set()
316 self.stepsdone = set()
317 # Integer version of the changegroup push result
317 # Integer version of the changegroup push result
318 # - None means nothing to push
318 # - None means nothing to push
319 # - 0 means HTTP error
319 # - 0 means HTTP error
320 # - 1 means we pushed and remote head count is unchanged *or*
320 # - 1 means we pushed and remote head count is unchanged *or*
321 # we have outgoing changesets but refused to push
321 # we have outgoing changesets but refused to push
322 # - other values as described by addchangegroup()
322 # - other values as described by addchangegroup()
323 self.cgresult = None
323 self.cgresult = None
324 # Boolean value for the bookmark push
324 # Boolean value for the bookmark push
325 self.bkresult = None
325 self.bkresult = None
326 # discover.outgoing object (contains common and outgoing data)
326 # discover.outgoing object (contains common and outgoing data)
327 self.outgoing = None
327 self.outgoing = None
328 # all remote topological heads before the push
328 # all remote topological heads before the push
329 self.remoteheads = None
329 self.remoteheads = None
330 # Details of the remote branch pre and post push
330 # Details of the remote branch pre and post push
331 #
331 #
332 # mapping: {'branch': ([remoteheads],
332 # mapping: {'branch': ([remoteheads],
333 # [newheads],
333 # [newheads],
334 # [unsyncedheads],
334 # [unsyncedheads],
335 # [discardedheads])}
335 # [discardedheads])}
336 # - branch: the branch name
336 # - branch: the branch name
337 # - remoteheads: the list of remote heads known locally
337 # - remoteheads: the list of remote heads known locally
338 # None if the branch is new
338 # None if the branch is new
339 # - newheads: the new remote heads (known locally) with outgoing pushed
339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 # - unsyncedheads: the list of remote heads unknown locally.
340 # - unsyncedheads: the list of remote heads unknown locally.
341 # - discardedheads: the list of remote heads made obsolete by the push
341 # - discardedheads: the list of remote heads made obsolete by the push
342 self.pushbranchmap = None
342 self.pushbranchmap = None
343 # testable as a boolean indicating if any nodes are missing locally.
343 # testable as a boolean indicating if any nodes are missing locally.
344 self.incoming = None
344 self.incoming = None
345 # summary of the remote phase situation
345 # summary of the remote phase situation
346 self.remotephases = None
346 self.remotephases = None
347 # phases changes that must be pushed along side the changesets
347 # phases changes that must be pushed along side the changesets
348 self.outdatedphases = None
348 self.outdatedphases = None
349 # phases changes that must be pushed if changeset push fails
349 # phases changes that must be pushed if changeset push fails
350 self.fallbackoutdatedphases = None
350 self.fallbackoutdatedphases = None
351 # outgoing obsmarkers
351 # outgoing obsmarkers
352 self.outobsmarkers = set()
352 self.outobsmarkers = set()
353 # outgoing bookmarks
353 # outgoing bookmarks
354 self.outbookmarks = []
354 self.outbookmarks = []
355 # transaction manager
355 # transaction manager
356 self.trmanager = None
356 self.trmanager = None
357 # map { pushkey partid -> callback handling failure}
357 # map { pushkey partid -> callback handling failure}
358 # used to handle exception from mandatory pushkey part failure
358 # used to handle exception from mandatory pushkey part failure
359 self.pkfailcb = {}
359 self.pkfailcb = {}
360 # an iterable of pushvars or None
360 # an iterable of pushvars or None
361 self.pushvars = pushvars
361 self.pushvars = pushvars
362
362
363 @util.propertycache
363 @util.propertycache
364 def futureheads(self):
364 def futureheads(self):
365 """future remote heads if the changeset push succeeds"""
365 """future remote heads if the changeset push succeeds"""
366 return self.outgoing.missingheads
366 return self.outgoing.missingheads
367
367
368 @util.propertycache
368 @util.propertycache
369 def fallbackheads(self):
369 def fallbackheads(self):
370 """future remote heads if the changeset push fails"""
370 """future remote heads if the changeset push fails"""
371 if self.revs is None:
371 if self.revs is None:
372 # not target to push, all common are relevant
372 # not target to push, all common are relevant
373 return self.outgoing.commonheads
373 return self.outgoing.commonheads
374 unfi = self.repo.unfiltered()
374 unfi = self.repo.unfiltered()
375 # I want cheads = heads(::missingheads and ::commonheads)
375 # I want cheads = heads(::missingheads and ::commonheads)
376 # (missingheads is revs with secret changeset filtered out)
376 # (missingheads is revs with secret changeset filtered out)
377 #
377 #
378 # This can be expressed as:
378 # This can be expressed as:
379 # cheads = ( (missingheads and ::commonheads)
379 # cheads = ( (missingheads and ::commonheads)
380 # + (commonheads and ::missingheads))"
380 # + (commonheads and ::missingheads))"
381 # )
381 # )
382 #
382 #
383 # while trying to push we already computed the following:
383 # while trying to push we already computed the following:
384 # common = (::commonheads)
384 # common = (::commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
386 #
386 #
387 # We can pick:
387 # We can pick:
388 # * missingheads part of common (::commonheads)
388 # * missingheads part of common (::commonheads)
389 common = self.outgoing.common
389 common = self.outgoing.common
390 nm = self.repo.changelog.nodemap
390 nm = self.repo.changelog.nodemap
391 cheads = [node for node in self.revs if nm[node] in common]
391 cheads = [node for node in self.revs if nm[node] in common]
392 # and
392 # and
393 # * commonheads parents on missing
393 # * commonheads parents on missing
394 revset = unfi.set('%ln and parents(roots(%ln))',
394 revset = unfi.set('%ln and parents(roots(%ln))',
395 self.outgoing.commonheads,
395 self.outgoing.commonheads,
396 self.outgoing.missing)
396 self.outgoing.missing)
397 cheads.extend(c.node() for c in revset)
397 cheads.extend(c.node() for c in revset)
398 return cheads
398 return cheads
399
399
400 @property
400 @property
401 def commonheads(self):
401 def commonheads(self):
402 """set of all common heads after changeset bundle push"""
402 """set of all common heads after changeset bundle push"""
403 if self.cgresult:
403 if self.cgresult:
404 return self.futureheads
404 return self.futureheads
405 else:
405 else:
406 return self.fallbackheads
406 return self.fallbackheads
407
407
408 # mapping of message used when pushing bookmark
408 # mapping of message used when pushing bookmark
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 _('updating bookmark %s failed!\n')),
410 _('updating bookmark %s failed!\n')),
411 'export': (_("exporting bookmark %s\n"),
411 'export': (_("exporting bookmark %s\n"),
412 _('exporting bookmark %s failed!\n')),
412 _('exporting bookmark %s failed!\n')),
413 'delete': (_("deleting remote bookmark %s\n"),
413 'delete': (_("deleting remote bookmark %s\n"),
414 _('deleting remote bookmark %s failed!\n')),
414 _('deleting remote bookmark %s failed!\n')),
415 }
415 }
416
416
417
417
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 opargs=None):
419 opargs=None):
420 '''Push outgoing changesets (limited by revs) from a local
420 '''Push outgoing changesets (limited by revs) from a local
421 repository to remote. Return an integer:
421 repository to remote. Return an integer:
422 - None means nothing to push
422 - None means nothing to push
423 - 0 means HTTP error
423 - 0 means HTTP error
424 - 1 means we pushed and remote head count is unchanged *or*
424 - 1 means we pushed and remote head count is unchanged *or*
425 we have outgoing changesets but refused to push
425 we have outgoing changesets but refused to push
426 - other values as described by addchangegroup()
426 - other values as described by addchangegroup()
427 '''
427 '''
428 if opargs is None:
428 if opargs is None:
429 opargs = {}
429 opargs = {}
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 **pycompat.strkwargs(opargs))
431 **pycompat.strkwargs(opargs))
432 if pushop.remote.local():
432 if pushop.remote.local():
433 missing = (set(pushop.repo.requirements)
433 missing = (set(pushop.repo.requirements)
434 - pushop.remote.local().supported)
434 - pushop.remote.local().supported)
435 if missing:
435 if missing:
436 msg = _("required features are not"
436 msg = _("required features are not"
437 " supported in the destination:"
437 " supported in the destination:"
438 " %s") % (', '.join(sorted(missing)))
438 " %s") % (', '.join(sorted(missing)))
439 raise error.Abort(msg)
439 raise error.Abort(msg)
440
440
441 if not pushop.remote.canpush():
441 if not pushop.remote.canpush():
442 raise error.Abort(_("destination does not support push"))
442 raise error.Abort(_("destination does not support push"))
443
443
444 if not pushop.remote.capable('unbundle'):
444 if not pushop.remote.capable('unbundle'):
445 raise error.Abort(_('cannot push: destination does not support the '
445 raise error.Abort(_('cannot push: destination does not support the '
446 'unbundle wire protocol command'))
446 'unbundle wire protocol command'))
447
447
448 # get lock as we might write phase data
448 # get lock as we might write phase data
449 wlock = lock = None
449 wlock = lock = None
450 try:
450 try:
451 # bundle2 push may receive a reply bundle touching bookmarks or other
451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 # things requiring the wlock. Take it now to ensure proper ordering.
452 # things requiring the wlock. Take it now to ensure proper ordering.
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 if (not _forcebundle1(pushop)) and maypushback:
454 if (not _forcebundle1(pushop)) and maypushback:
455 wlock = pushop.repo.wlock()
455 wlock = pushop.repo.wlock()
456 lock = pushop.repo.lock()
456 lock = pushop.repo.lock()
457 pushop.trmanager = transactionmanager(pushop.repo,
457 pushop.trmanager = transactionmanager(pushop.repo,
458 'push-response',
458 'push-response',
459 pushop.remote.url())
459 pushop.remote.url())
460 except IOError as err:
460 except IOError as err:
461 if err.errno != errno.EACCES:
461 if err.errno != errno.EACCES:
462 raise
462 raise
463 # source repo cannot be locked.
463 # source repo cannot be locked.
464 # We do not abort the push, but just disable the local phase
464 # We do not abort the push, but just disable the local phase
465 # synchronisation.
465 # synchronisation.
466 msg = 'cannot lock source repository: %s\n' % err
466 msg = 'cannot lock source repository: %s\n' % err
467 pushop.ui.debug(msg)
467 pushop.ui.debug(msg)
468
468
469 with wlock or util.nullcontextmanager(), \
469 with wlock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
471 pushop.trmanager or util.nullcontextmanager():
471 pushop.trmanager or util.nullcontextmanager():
472 pushop.repo.checkpush(pushop)
472 pushop.repo.checkpush(pushop)
473 _pushdiscovery(pushop)
473 _pushdiscovery(pushop)
474 if not _forcebundle1(pushop):
474 if not _forcebundle1(pushop):
475 _pushbundle2(pushop)
475 _pushbundle2(pushop)
476 _pushchangeset(pushop)
476 _pushchangeset(pushop)
477 _pushsyncphase(pushop)
477 _pushsyncphase(pushop)
478 _pushobsolete(pushop)
478 _pushobsolete(pushop)
479 _pushbookmark(pushop)
479 _pushbookmark(pushop)
480
480
481 return pushop
481 return pushop
482
482
483 # list of steps to perform discovery before push
483 # list of steps to perform discovery before push
484 pushdiscoveryorder = []
484 pushdiscoveryorder = []
485
485
486 # Mapping between step name and function
486 # Mapping between step name and function
487 #
487 #
488 # This exists to help extensions wrap steps if necessary
488 # This exists to help extensions wrap steps if necessary
489 pushdiscoverymapping = {}
489 pushdiscoverymapping = {}
490
490
491 def pushdiscovery(stepname):
491 def pushdiscovery(stepname):
492 """decorator for function performing discovery before push
492 """decorator for function performing discovery before push
493
493
494 The function is added to the step -> function mapping and appended to the
494 The function is added to the step -> function mapping and appended to the
495 list of steps. Beware that decorated function will be added in order (this
495 list of steps. Beware that decorated function will be added in order (this
496 may matter).
496 may matter).
497
497
498 You can only use this decorator for a new step, if you want to wrap a step
498 You can only use this decorator for a new step, if you want to wrap a step
499 from an extension, change the pushdiscovery dictionary directly."""
499 from an extension, change the pushdiscovery dictionary directly."""
500 def dec(func):
500 def dec(func):
501 assert stepname not in pushdiscoverymapping
501 assert stepname not in pushdiscoverymapping
502 pushdiscoverymapping[stepname] = func
502 pushdiscoverymapping[stepname] = func
503 pushdiscoveryorder.append(stepname)
503 pushdiscoveryorder.append(stepname)
504 return func
504 return func
505 return dec
505 return dec
506
506
507 def _pushdiscovery(pushop):
507 def _pushdiscovery(pushop):
508 """Run all discovery steps"""
508 """Run all discovery steps"""
509 for stepname in pushdiscoveryorder:
509 for stepname in pushdiscoveryorder:
510 step = pushdiscoverymapping[stepname]
510 step = pushdiscoverymapping[stepname]
511 step(pushop)
511 step(pushop)
512
512
513 @pushdiscovery('changeset')
513 @pushdiscovery('changeset')
514 def _pushdiscoverychangeset(pushop):
514 def _pushdiscoverychangeset(pushop):
515 """discover the changeset that need to be pushed"""
515 """discover the changeset that need to be pushed"""
516 fci = discovery.findcommonincoming
516 fci = discovery.findcommonincoming
517 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
517 if pushop.revs:
518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
519 ancestorsof=pushop.revs)
520 else:
521 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
518 common, inc, remoteheads = commoninc
522 common, inc, remoteheads = commoninc
519 fco = discovery.findcommonoutgoing
523 fco = discovery.findcommonoutgoing
520 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
524 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
521 commoninc=commoninc, force=pushop.force)
525 commoninc=commoninc, force=pushop.force)
522 pushop.outgoing = outgoing
526 pushop.outgoing = outgoing
523 pushop.remoteheads = remoteheads
527 pushop.remoteheads = remoteheads
524 pushop.incoming = inc
528 pushop.incoming = inc
525
529
526 @pushdiscovery('phase')
530 @pushdiscovery('phase')
527 def _pushdiscoveryphase(pushop):
531 def _pushdiscoveryphase(pushop):
528 """discover the phase that needs to be pushed
532 """discover the phase that needs to be pushed
529
533
530 (computed for both success and failure case for changesets push)"""
534 (computed for both success and failure case for changesets push)"""
531 outgoing = pushop.outgoing
535 outgoing = pushop.outgoing
532 unfi = pushop.repo.unfiltered()
536 unfi = pushop.repo.unfiltered()
533 remotephases = pushop.remote.listkeys('phases')
537 remotephases = pushop.remote.listkeys('phases')
534 if (pushop.ui.configbool('ui', '_usedassubrepo')
538 if (pushop.ui.configbool('ui', '_usedassubrepo')
535 and remotephases # server supports phases
539 and remotephases # server supports phases
536 and not pushop.outgoing.missing # no changesets to be pushed
540 and not pushop.outgoing.missing # no changesets to be pushed
537 and remotephases.get('publishing', False)):
541 and remotephases.get('publishing', False)):
538 # When:
542 # When:
539 # - this is a subrepo push
543 # - this is a subrepo push
540 # - and remote support phase
544 # - and remote support phase
541 # - and no changeset are to be pushed
545 # - and no changeset are to be pushed
542 # - and remote is publishing
546 # - and remote is publishing
543 # We may be in issue 3781 case!
547 # We may be in issue 3781 case!
544 # We drop the possible phase synchronisation done by
548 # We drop the possible phase synchronisation done by
545 # courtesy to publish changesets possibly locally draft
549 # courtesy to publish changesets possibly locally draft
546 # on the remote.
550 # on the remote.
547 pushop.outdatedphases = []
551 pushop.outdatedphases = []
548 pushop.fallbackoutdatedphases = []
552 pushop.fallbackoutdatedphases = []
549 return
553 return
550
554
551 pushop.remotephases = phases.remotephasessummary(pushop.repo,
555 pushop.remotephases = phases.remotephasessummary(pushop.repo,
552 pushop.fallbackheads,
556 pushop.fallbackheads,
553 remotephases)
557 remotephases)
554 droots = pushop.remotephases.draftroots
558 droots = pushop.remotephases.draftroots
555
559
556 extracond = ''
560 extracond = ''
557 if not pushop.remotephases.publishing:
561 if not pushop.remotephases.publishing:
558 extracond = ' and public()'
562 extracond = ' and public()'
559 revset = 'heads((%%ln::%%ln) %s)' % extracond
563 revset = 'heads((%%ln::%%ln) %s)' % extracond
560 # Get the list of all revs draft on remote by public here.
564 # Get the list of all revs draft on remote by public here.
561 # XXX Beware that revset break if droots is not strictly
565 # XXX Beware that revset break if droots is not strictly
562 # XXX root we may want to ensure it is but it is costly
566 # XXX root we may want to ensure it is but it is costly
563 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
567 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
564 if not outgoing.missing:
568 if not outgoing.missing:
565 future = fallback
569 future = fallback
566 else:
570 else:
567 # adds changeset we are going to push as draft
571 # adds changeset we are going to push as draft
568 #
572 #
569 # should not be necessary for publishing server, but because of an
573 # should not be necessary for publishing server, but because of an
570 # issue fixed in xxxxx we have to do it anyway.
574 # issue fixed in xxxxx we have to do it anyway.
571 fdroots = list(unfi.set('roots(%ln + %ln::)',
575 fdroots = list(unfi.set('roots(%ln + %ln::)',
572 outgoing.missing, droots))
576 outgoing.missing, droots))
573 fdroots = [f.node() for f in fdroots]
577 fdroots = [f.node() for f in fdroots]
574 future = list(unfi.set(revset, fdroots, pushop.futureheads))
578 future = list(unfi.set(revset, fdroots, pushop.futureheads))
575 pushop.outdatedphases = future
579 pushop.outdatedphases = future
576 pushop.fallbackoutdatedphases = fallback
580 pushop.fallbackoutdatedphases = fallback
577
581
578 @pushdiscovery('obsmarker')
582 @pushdiscovery('obsmarker')
579 def _pushdiscoveryobsmarkers(pushop):
583 def _pushdiscoveryobsmarkers(pushop):
580 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
584 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
581 and pushop.repo.obsstore
585 and pushop.repo.obsstore
582 and 'obsolete' in pushop.remote.listkeys('namespaces')):
586 and 'obsolete' in pushop.remote.listkeys('namespaces')):
583 repo = pushop.repo
587 repo = pushop.repo
584 # very naive computation, that can be quite expensive on big repo.
588 # very naive computation, that can be quite expensive on big repo.
585 # However: evolution is currently slow on them anyway.
589 # However: evolution is currently slow on them anyway.
586 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
590 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
587 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
591 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
588
592
589 @pushdiscovery('bookmarks')
593 @pushdiscovery('bookmarks')
590 def _pushdiscoverybookmarks(pushop):
594 def _pushdiscoverybookmarks(pushop):
591 ui = pushop.ui
595 ui = pushop.ui
592 repo = pushop.repo.unfiltered()
596 repo = pushop.repo.unfiltered()
593 remote = pushop.remote
597 remote = pushop.remote
594 ui.debug("checking for updated bookmarks\n")
598 ui.debug("checking for updated bookmarks\n")
595 ancestors = ()
599 ancestors = ()
596 if pushop.revs:
600 if pushop.revs:
597 revnums = map(repo.changelog.rev, pushop.revs)
601 revnums = map(repo.changelog.rev, pushop.revs)
598 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
602 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
599 remotebookmark = remote.listkeys('bookmarks')
603 remotebookmark = remote.listkeys('bookmarks')
600
604
601 explicit = set([repo._bookmarks.expandname(bookmark)
605 explicit = set([repo._bookmarks.expandname(bookmark)
602 for bookmark in pushop.bookmarks])
606 for bookmark in pushop.bookmarks])
603
607
604 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
608 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
605 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
609 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
606
610
607 def safehex(x):
611 def safehex(x):
608 if x is None:
612 if x is None:
609 return x
613 return x
610 return hex(x)
614 return hex(x)
611
615
612 def hexifycompbookmarks(bookmarks):
616 def hexifycompbookmarks(bookmarks):
613 for b, scid, dcid in bookmarks:
617 for b, scid, dcid in bookmarks:
614 yield b, safehex(scid), safehex(dcid)
618 yield b, safehex(scid), safehex(dcid)
615
619
616 comp = [hexifycompbookmarks(marks) for marks in comp]
620 comp = [hexifycompbookmarks(marks) for marks in comp]
617 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
621 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
618
622
619 for b, scid, dcid in advsrc:
623 for b, scid, dcid in advsrc:
620 if b in explicit:
624 if b in explicit:
621 explicit.remove(b)
625 explicit.remove(b)
622 if not ancestors or repo[scid].rev() in ancestors:
626 if not ancestors or repo[scid].rev() in ancestors:
623 pushop.outbookmarks.append((b, dcid, scid))
627 pushop.outbookmarks.append((b, dcid, scid))
624 # search added bookmark
628 # search added bookmark
625 for b, scid, dcid in addsrc:
629 for b, scid, dcid in addsrc:
626 if b in explicit:
630 if b in explicit:
627 explicit.remove(b)
631 explicit.remove(b)
628 pushop.outbookmarks.append((b, '', scid))
632 pushop.outbookmarks.append((b, '', scid))
629 # search for overwritten bookmark
633 # search for overwritten bookmark
630 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
634 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
631 if b in explicit:
635 if b in explicit:
632 explicit.remove(b)
636 explicit.remove(b)
633 pushop.outbookmarks.append((b, dcid, scid))
637 pushop.outbookmarks.append((b, dcid, scid))
634 # search for bookmark to delete
638 # search for bookmark to delete
635 for b, scid, dcid in adddst:
639 for b, scid, dcid in adddst:
636 if b in explicit:
640 if b in explicit:
637 explicit.remove(b)
641 explicit.remove(b)
638 # treat as "deleted locally"
642 # treat as "deleted locally"
639 pushop.outbookmarks.append((b, dcid, ''))
643 pushop.outbookmarks.append((b, dcid, ''))
640 # identical bookmarks shouldn't get reported
644 # identical bookmarks shouldn't get reported
641 for b, scid, dcid in same:
645 for b, scid, dcid in same:
642 if b in explicit:
646 if b in explicit:
643 explicit.remove(b)
647 explicit.remove(b)
644
648
645 if explicit:
649 if explicit:
646 explicit = sorted(explicit)
650 explicit = sorted(explicit)
647 # we should probably list all of them
651 # we should probably list all of them
648 ui.warn(_('bookmark %s does not exist on the local '
652 ui.warn(_('bookmark %s does not exist on the local '
649 'or remote repository!\n') % explicit[0])
653 'or remote repository!\n') % explicit[0])
650 pushop.bkresult = 2
654 pushop.bkresult = 2
651
655
652 pushop.outbookmarks.sort()
656 pushop.outbookmarks.sort()
653
657
654 def _pushcheckoutgoing(pushop):
658 def _pushcheckoutgoing(pushop):
655 outgoing = pushop.outgoing
659 outgoing = pushop.outgoing
656 unfi = pushop.repo.unfiltered()
660 unfi = pushop.repo.unfiltered()
657 if not outgoing.missing:
661 if not outgoing.missing:
658 # nothing to push
662 # nothing to push
659 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
663 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
660 return False
664 return False
661 # something to push
665 # something to push
662 if not pushop.force:
666 if not pushop.force:
663 # if repo.obsstore == False --> no obsolete
667 # if repo.obsstore == False --> no obsolete
664 # then, save the iteration
668 # then, save the iteration
665 if unfi.obsstore:
669 if unfi.obsstore:
666 # this message are here for 80 char limit reason
670 # this message are here for 80 char limit reason
667 mso = _("push includes obsolete changeset: %s!")
671 mso = _("push includes obsolete changeset: %s!")
668 mspd = _("push includes phase-divergent changeset: %s!")
672 mspd = _("push includes phase-divergent changeset: %s!")
669 mscd = _("push includes content-divergent changeset: %s!")
673 mscd = _("push includes content-divergent changeset: %s!")
670 mst = {"orphan": _("push includes orphan changeset: %s!"),
674 mst = {"orphan": _("push includes orphan changeset: %s!"),
671 "phase-divergent": mspd,
675 "phase-divergent": mspd,
672 "content-divergent": mscd}
676 "content-divergent": mscd}
673 # If we are to push if there is at least one
677 # If we are to push if there is at least one
674 # obsolete or unstable changeset in missing, at
678 # obsolete or unstable changeset in missing, at
675 # least one of the missinghead will be obsolete or
679 # least one of the missinghead will be obsolete or
676 # unstable. So checking heads only is ok
680 # unstable. So checking heads only is ok
677 for node in outgoing.missingheads:
681 for node in outgoing.missingheads:
678 ctx = unfi[node]
682 ctx = unfi[node]
679 if ctx.obsolete():
683 if ctx.obsolete():
680 raise error.Abort(mso % ctx)
684 raise error.Abort(mso % ctx)
681 elif ctx.isunstable():
685 elif ctx.isunstable():
682 # TODO print more than one instability in the abort
686 # TODO print more than one instability in the abort
683 # message
687 # message
684 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
688 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
685
689
686 discovery.checkheads(pushop)
690 discovery.checkheads(pushop)
687 return True
691 return True
688
692
689 # List of names of steps to perform for an outgoing bundle2, order matters.
693 # List of names of steps to perform for an outgoing bundle2, order matters.
690 b2partsgenorder = []
694 b2partsgenorder = []
691
695
692 # Mapping between step name and function
696 # Mapping between step name and function
693 #
697 #
694 # This exists to help extensions wrap steps if necessary
698 # This exists to help extensions wrap steps if necessary
695 b2partsgenmapping = {}
699 b2partsgenmapping = {}
696
700
697 def b2partsgenerator(stepname, idx=None):
701 def b2partsgenerator(stepname, idx=None):
698 """decorator for function generating bundle2 part
702 """decorator for function generating bundle2 part
699
703
700 The function is added to the step -> function mapping and appended to the
704 The function is added to the step -> function mapping and appended to the
701 list of steps. Beware that decorated functions will be added in order
705 list of steps. Beware that decorated functions will be added in order
702 (this may matter).
706 (this may matter).
703
707
704 You can only use this decorator for new steps, if you want to wrap a step
708 You can only use this decorator for new steps, if you want to wrap a step
705 from an extension, attack the b2partsgenmapping dictionary directly."""
709 from an extension, attack the b2partsgenmapping dictionary directly."""
706 def dec(func):
710 def dec(func):
707 assert stepname not in b2partsgenmapping
711 assert stepname not in b2partsgenmapping
708 b2partsgenmapping[stepname] = func
712 b2partsgenmapping[stepname] = func
709 if idx is None:
713 if idx is None:
710 b2partsgenorder.append(stepname)
714 b2partsgenorder.append(stepname)
711 else:
715 else:
712 b2partsgenorder.insert(idx, stepname)
716 b2partsgenorder.insert(idx, stepname)
713 return func
717 return func
714 return dec
718 return dec
715
719
716 def _pushb2ctxcheckheads(pushop, bundler):
720 def _pushb2ctxcheckheads(pushop, bundler):
717 """Generate race condition checking parts
721 """Generate race condition checking parts
718
722
719 Exists as an independent function to aid extensions
723 Exists as an independent function to aid extensions
720 """
724 """
721 # * 'force' do not check for push race,
725 # * 'force' do not check for push race,
722 # * if we don't push anything, there are nothing to check.
726 # * if we don't push anything, there are nothing to check.
723 if not pushop.force and pushop.outgoing.missingheads:
727 if not pushop.force and pushop.outgoing.missingheads:
724 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
728 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
725 emptyremote = pushop.pushbranchmap is None
729 emptyremote = pushop.pushbranchmap is None
726 if not allowunrelated or emptyremote:
730 if not allowunrelated or emptyremote:
727 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
728 else:
732 else:
729 affected = set()
733 affected = set()
730 for branch, heads in pushop.pushbranchmap.iteritems():
734 for branch, heads in pushop.pushbranchmap.iteritems():
731 remoteheads, newheads, unsyncedheads, discardedheads = heads
735 remoteheads, newheads, unsyncedheads, discardedheads = heads
732 if remoteheads is not None:
736 if remoteheads is not None:
733 remote = set(remoteheads)
737 remote = set(remoteheads)
734 affected |= set(discardedheads) & remote
738 affected |= set(discardedheads) & remote
735 affected |= remote - set(newheads)
739 affected |= remote - set(newheads)
736 if affected:
740 if affected:
737 data = iter(sorted(affected))
741 data = iter(sorted(affected))
738 bundler.newpart('check:updated-heads', data=data)
742 bundler.newpart('check:updated-heads', data=data)
739
743
740 def _pushing(pushop):
744 def _pushing(pushop):
741 """return True if we are pushing anything"""
745 """return True if we are pushing anything"""
742 return bool(pushop.outgoing.missing
746 return bool(pushop.outgoing.missing
743 or pushop.outdatedphases
747 or pushop.outdatedphases
744 or pushop.outobsmarkers
748 or pushop.outobsmarkers
745 or pushop.outbookmarks)
749 or pushop.outbookmarks)
746
750
747 @b2partsgenerator('check-bookmarks')
751 @b2partsgenerator('check-bookmarks')
748 def _pushb2checkbookmarks(pushop, bundler):
752 def _pushb2checkbookmarks(pushop, bundler):
749 """insert bookmark move checking"""
753 """insert bookmark move checking"""
750 if not _pushing(pushop) or pushop.force:
754 if not _pushing(pushop) or pushop.force:
751 return
755 return
752 b2caps = bundle2.bundle2caps(pushop.remote)
756 b2caps = bundle2.bundle2caps(pushop.remote)
753 hasbookmarkcheck = 'bookmarks' in b2caps
757 hasbookmarkcheck = 'bookmarks' in b2caps
754 if not (pushop.outbookmarks and hasbookmarkcheck):
758 if not (pushop.outbookmarks and hasbookmarkcheck):
755 return
759 return
756 data = []
760 data = []
757 for book, old, new in pushop.outbookmarks:
761 for book, old, new in pushop.outbookmarks:
758 old = bin(old)
762 old = bin(old)
759 data.append((book, old))
763 data.append((book, old))
760 checkdata = bookmod.binaryencode(data)
764 checkdata = bookmod.binaryencode(data)
761 bundler.newpart('check:bookmarks', data=checkdata)
765 bundler.newpart('check:bookmarks', data=checkdata)
762
766
763 @b2partsgenerator('check-phases')
767 @b2partsgenerator('check-phases')
764 def _pushb2checkphases(pushop, bundler):
768 def _pushb2checkphases(pushop, bundler):
765 """insert phase move checking"""
769 """insert phase move checking"""
766 if not _pushing(pushop) or pushop.force:
770 if not _pushing(pushop) or pushop.force:
767 return
771 return
768 b2caps = bundle2.bundle2caps(pushop.remote)
772 b2caps = bundle2.bundle2caps(pushop.remote)
769 hasphaseheads = 'heads' in b2caps.get('phases', ())
773 hasphaseheads = 'heads' in b2caps.get('phases', ())
770 if pushop.remotephases is not None and hasphaseheads:
774 if pushop.remotephases is not None and hasphaseheads:
771 # check that the remote phase has not changed
775 # check that the remote phase has not changed
772 checks = [[] for p in phases.allphases]
776 checks = [[] for p in phases.allphases]
773 checks[phases.public].extend(pushop.remotephases.publicheads)
777 checks[phases.public].extend(pushop.remotephases.publicheads)
774 checks[phases.draft].extend(pushop.remotephases.draftroots)
778 checks[phases.draft].extend(pushop.remotephases.draftroots)
775 if any(checks):
779 if any(checks):
776 for nodes in checks:
780 for nodes in checks:
777 nodes.sort()
781 nodes.sort()
778 checkdata = phases.binaryencode(checks)
782 checkdata = phases.binaryencode(checks)
779 bundler.newpart('check:phases', data=checkdata)
783 bundler.newpart('check:phases', data=checkdata)
780
784
781 @b2partsgenerator('changeset')
785 @b2partsgenerator('changeset')
782 def _pushb2ctx(pushop, bundler):
786 def _pushb2ctx(pushop, bundler):
783 """handle changegroup push through bundle2
787 """handle changegroup push through bundle2
784
788
785 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
789 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
786 """
790 """
787 if 'changesets' in pushop.stepsdone:
791 if 'changesets' in pushop.stepsdone:
788 return
792 return
789 pushop.stepsdone.add('changesets')
793 pushop.stepsdone.add('changesets')
790 # Send known heads to the server for race detection.
794 # Send known heads to the server for race detection.
791 if not _pushcheckoutgoing(pushop):
795 if not _pushcheckoutgoing(pushop):
792 return
796 return
793 pushop.repo.prepushoutgoinghooks(pushop)
797 pushop.repo.prepushoutgoinghooks(pushop)
794
798
795 _pushb2ctxcheckheads(pushop, bundler)
799 _pushb2ctxcheckheads(pushop, bundler)
796
800
797 b2caps = bundle2.bundle2caps(pushop.remote)
801 b2caps = bundle2.bundle2caps(pushop.remote)
798 version = '01'
802 version = '01'
799 cgversions = b2caps.get('changegroup')
803 cgversions = b2caps.get('changegroup')
800 if cgversions: # 3.1 and 3.2 ship with an empty value
804 if cgversions: # 3.1 and 3.2 ship with an empty value
801 cgversions = [v for v in cgversions
805 cgversions = [v for v in cgversions
802 if v in changegroup.supportedoutgoingversions(
806 if v in changegroup.supportedoutgoingversions(
803 pushop.repo)]
807 pushop.repo)]
804 if not cgversions:
808 if not cgversions:
805 raise ValueError(_('no common changegroup version'))
809 raise ValueError(_('no common changegroup version'))
806 version = max(cgversions)
810 version = max(cgversions)
807 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
811 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
808 'push')
812 'push')
809 cgpart = bundler.newpart('changegroup', data=cgstream)
813 cgpart = bundler.newpart('changegroup', data=cgstream)
810 if cgversions:
814 if cgversions:
811 cgpart.addparam('version', version)
815 cgpart.addparam('version', version)
812 if 'treemanifest' in pushop.repo.requirements:
816 if 'treemanifest' in pushop.repo.requirements:
813 cgpart.addparam('treemanifest', '1')
817 cgpart.addparam('treemanifest', '1')
814 def handlereply(op):
818 def handlereply(op):
815 """extract addchangegroup returns from server reply"""
819 """extract addchangegroup returns from server reply"""
816 cgreplies = op.records.getreplies(cgpart.id)
820 cgreplies = op.records.getreplies(cgpart.id)
817 assert len(cgreplies['changegroup']) == 1
821 assert len(cgreplies['changegroup']) == 1
818 pushop.cgresult = cgreplies['changegroup'][0]['return']
822 pushop.cgresult = cgreplies['changegroup'][0]['return']
819 return handlereply
823 return handlereply
820
824
821 @b2partsgenerator('phase')
825 @b2partsgenerator('phase')
822 def _pushb2phases(pushop, bundler):
826 def _pushb2phases(pushop, bundler):
823 """handle phase push through bundle2"""
827 """handle phase push through bundle2"""
824 if 'phases' in pushop.stepsdone:
828 if 'phases' in pushop.stepsdone:
825 return
829 return
826 b2caps = bundle2.bundle2caps(pushop.remote)
830 b2caps = bundle2.bundle2caps(pushop.remote)
827 ui = pushop.repo.ui
831 ui = pushop.repo.ui
828
832
829 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
833 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
830 haspushkey = 'pushkey' in b2caps
834 haspushkey = 'pushkey' in b2caps
831 hasphaseheads = 'heads' in b2caps.get('phases', ())
835 hasphaseheads = 'heads' in b2caps.get('phases', ())
832
836
833 if hasphaseheads and not legacyphase:
837 if hasphaseheads and not legacyphase:
834 return _pushb2phaseheads(pushop, bundler)
838 return _pushb2phaseheads(pushop, bundler)
835 elif haspushkey:
839 elif haspushkey:
836 return _pushb2phasespushkey(pushop, bundler)
840 return _pushb2phasespushkey(pushop, bundler)
837
841
838 def _pushb2phaseheads(pushop, bundler):
842 def _pushb2phaseheads(pushop, bundler):
839 """push phase information through a bundle2 - binary part"""
843 """push phase information through a bundle2 - binary part"""
840 pushop.stepsdone.add('phases')
844 pushop.stepsdone.add('phases')
841 if pushop.outdatedphases:
845 if pushop.outdatedphases:
842 updates = [[] for p in phases.allphases]
846 updates = [[] for p in phases.allphases]
843 updates[0].extend(h.node() for h in pushop.outdatedphases)
847 updates[0].extend(h.node() for h in pushop.outdatedphases)
844 phasedata = phases.binaryencode(updates)
848 phasedata = phases.binaryencode(updates)
845 bundler.newpart('phase-heads', data=phasedata)
849 bundler.newpart('phase-heads', data=phasedata)
846
850
847 def _pushb2phasespushkey(pushop, bundler):
851 def _pushb2phasespushkey(pushop, bundler):
848 """push phase information through a bundle2 - pushkey part"""
852 """push phase information through a bundle2 - pushkey part"""
849 pushop.stepsdone.add('phases')
853 pushop.stepsdone.add('phases')
850 part2node = []
854 part2node = []
851
855
852 def handlefailure(pushop, exc):
856 def handlefailure(pushop, exc):
853 targetid = int(exc.partid)
857 targetid = int(exc.partid)
854 for partid, node in part2node:
858 for partid, node in part2node:
855 if partid == targetid:
859 if partid == targetid:
856 raise error.Abort(_('updating %s to public failed') % node)
860 raise error.Abort(_('updating %s to public failed') % node)
857
861
858 enc = pushkey.encode
862 enc = pushkey.encode
859 for newremotehead in pushop.outdatedphases:
863 for newremotehead in pushop.outdatedphases:
860 part = bundler.newpart('pushkey')
864 part = bundler.newpart('pushkey')
861 part.addparam('namespace', enc('phases'))
865 part.addparam('namespace', enc('phases'))
862 part.addparam('key', enc(newremotehead.hex()))
866 part.addparam('key', enc(newremotehead.hex()))
863 part.addparam('old', enc('%d' % phases.draft))
867 part.addparam('old', enc('%d' % phases.draft))
864 part.addparam('new', enc('%d' % phases.public))
868 part.addparam('new', enc('%d' % phases.public))
865 part2node.append((part.id, newremotehead))
869 part2node.append((part.id, newremotehead))
866 pushop.pkfailcb[part.id] = handlefailure
870 pushop.pkfailcb[part.id] = handlefailure
867
871
868 def handlereply(op):
872 def handlereply(op):
869 for partid, node in part2node:
873 for partid, node in part2node:
870 partrep = op.records.getreplies(partid)
874 partrep = op.records.getreplies(partid)
871 results = partrep['pushkey']
875 results = partrep['pushkey']
872 assert len(results) <= 1
876 assert len(results) <= 1
873 msg = None
877 msg = None
874 if not results:
878 if not results:
875 msg = _('server ignored update of %s to public!\n') % node
879 msg = _('server ignored update of %s to public!\n') % node
876 elif not int(results[0]['return']):
880 elif not int(results[0]['return']):
877 msg = _('updating %s to public failed!\n') % node
881 msg = _('updating %s to public failed!\n') % node
878 if msg is not None:
882 if msg is not None:
879 pushop.ui.warn(msg)
883 pushop.ui.warn(msg)
880 return handlereply
884 return handlereply
881
885
882 @b2partsgenerator('obsmarkers')
886 @b2partsgenerator('obsmarkers')
883 def _pushb2obsmarkers(pushop, bundler):
887 def _pushb2obsmarkers(pushop, bundler):
884 if 'obsmarkers' in pushop.stepsdone:
888 if 'obsmarkers' in pushop.stepsdone:
885 return
889 return
886 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
890 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
887 if obsolete.commonversion(remoteversions) is None:
891 if obsolete.commonversion(remoteversions) is None:
888 return
892 return
889 pushop.stepsdone.add('obsmarkers')
893 pushop.stepsdone.add('obsmarkers')
890 if pushop.outobsmarkers:
894 if pushop.outobsmarkers:
891 markers = sorted(pushop.outobsmarkers)
895 markers = sorted(pushop.outobsmarkers)
892 bundle2.buildobsmarkerspart(bundler, markers)
896 bundle2.buildobsmarkerspart(bundler, markers)
893
897
894 @b2partsgenerator('bookmarks')
898 @b2partsgenerator('bookmarks')
895 def _pushb2bookmarks(pushop, bundler):
899 def _pushb2bookmarks(pushop, bundler):
896 """handle bookmark push through bundle2"""
900 """handle bookmark push through bundle2"""
897 if 'bookmarks' in pushop.stepsdone:
901 if 'bookmarks' in pushop.stepsdone:
898 return
902 return
899 b2caps = bundle2.bundle2caps(pushop.remote)
903 b2caps = bundle2.bundle2caps(pushop.remote)
900
904
901 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
905 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
902 legacybooks = 'bookmarks' in legacy
906 legacybooks = 'bookmarks' in legacy
903
907
904 if not legacybooks and 'bookmarks' in b2caps:
908 if not legacybooks and 'bookmarks' in b2caps:
905 return _pushb2bookmarkspart(pushop, bundler)
909 return _pushb2bookmarkspart(pushop, bundler)
906 elif 'pushkey' in b2caps:
910 elif 'pushkey' in b2caps:
907 return _pushb2bookmarkspushkey(pushop, bundler)
911 return _pushb2bookmarkspushkey(pushop, bundler)
908
912
909 def _bmaction(old, new):
913 def _bmaction(old, new):
910 """small utility for bookmark pushing"""
914 """small utility for bookmark pushing"""
911 if not old:
915 if not old:
912 return 'export'
916 return 'export'
913 elif not new:
917 elif not new:
914 return 'delete'
918 return 'delete'
915 return 'update'
919 return 'update'
916
920
917 def _pushb2bookmarkspart(pushop, bundler):
921 def _pushb2bookmarkspart(pushop, bundler):
918 pushop.stepsdone.add('bookmarks')
922 pushop.stepsdone.add('bookmarks')
919 if not pushop.outbookmarks:
923 if not pushop.outbookmarks:
920 return
924 return
921
925
922 allactions = []
926 allactions = []
923 data = []
927 data = []
924 for book, old, new in pushop.outbookmarks:
928 for book, old, new in pushop.outbookmarks:
925 new = bin(new)
929 new = bin(new)
926 data.append((book, new))
930 data.append((book, new))
927 allactions.append((book, _bmaction(old, new)))
931 allactions.append((book, _bmaction(old, new)))
928 checkdata = bookmod.binaryencode(data)
932 checkdata = bookmod.binaryencode(data)
929 bundler.newpart('bookmarks', data=checkdata)
933 bundler.newpart('bookmarks', data=checkdata)
930
934
931 def handlereply(op):
935 def handlereply(op):
932 ui = pushop.ui
936 ui = pushop.ui
933 # if success
937 # if success
934 for book, action in allactions:
938 for book, action in allactions:
935 ui.status(bookmsgmap[action][0] % book)
939 ui.status(bookmsgmap[action][0] % book)
936
940
937 return handlereply
941 return handlereply
938
942
939 def _pushb2bookmarkspushkey(pushop, bundler):
943 def _pushb2bookmarkspushkey(pushop, bundler):
940 pushop.stepsdone.add('bookmarks')
944 pushop.stepsdone.add('bookmarks')
941 part2book = []
945 part2book = []
942 enc = pushkey.encode
946 enc = pushkey.encode
943
947
944 def handlefailure(pushop, exc):
948 def handlefailure(pushop, exc):
945 targetid = int(exc.partid)
949 targetid = int(exc.partid)
946 for partid, book, action in part2book:
950 for partid, book, action in part2book:
947 if partid == targetid:
951 if partid == targetid:
948 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
952 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
949 # we should not be called for part we did not generated
953 # we should not be called for part we did not generated
950 assert False
954 assert False
951
955
952 for book, old, new in pushop.outbookmarks:
956 for book, old, new in pushop.outbookmarks:
953 part = bundler.newpart('pushkey')
957 part = bundler.newpart('pushkey')
954 part.addparam('namespace', enc('bookmarks'))
958 part.addparam('namespace', enc('bookmarks'))
955 part.addparam('key', enc(book))
959 part.addparam('key', enc(book))
956 part.addparam('old', enc(old))
960 part.addparam('old', enc(old))
957 part.addparam('new', enc(new))
961 part.addparam('new', enc(new))
958 action = 'update'
962 action = 'update'
959 if not old:
963 if not old:
960 action = 'export'
964 action = 'export'
961 elif not new:
965 elif not new:
962 action = 'delete'
966 action = 'delete'
963 part2book.append((part.id, book, action))
967 part2book.append((part.id, book, action))
964 pushop.pkfailcb[part.id] = handlefailure
968 pushop.pkfailcb[part.id] = handlefailure
965
969
966 def handlereply(op):
970 def handlereply(op):
967 ui = pushop.ui
971 ui = pushop.ui
968 for partid, book, action in part2book:
972 for partid, book, action in part2book:
969 partrep = op.records.getreplies(partid)
973 partrep = op.records.getreplies(partid)
970 results = partrep['pushkey']
974 results = partrep['pushkey']
971 assert len(results) <= 1
975 assert len(results) <= 1
972 if not results:
976 if not results:
973 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
977 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
974 else:
978 else:
975 ret = int(results[0]['return'])
979 ret = int(results[0]['return'])
976 if ret:
980 if ret:
977 ui.status(bookmsgmap[action][0] % book)
981 ui.status(bookmsgmap[action][0] % book)
978 else:
982 else:
979 ui.warn(bookmsgmap[action][1] % book)
983 ui.warn(bookmsgmap[action][1] % book)
980 if pushop.bkresult is not None:
984 if pushop.bkresult is not None:
981 pushop.bkresult = 1
985 pushop.bkresult = 1
982 return handlereply
986 return handlereply
983
987
984 @b2partsgenerator('pushvars', idx=0)
988 @b2partsgenerator('pushvars', idx=0)
985 def _getbundlesendvars(pushop, bundler):
989 def _getbundlesendvars(pushop, bundler):
986 '''send shellvars via bundle2'''
990 '''send shellvars via bundle2'''
987 pushvars = pushop.pushvars
991 pushvars = pushop.pushvars
988 if pushvars:
992 if pushvars:
989 shellvars = {}
993 shellvars = {}
990 for raw in pushvars:
994 for raw in pushvars:
991 if '=' not in raw:
995 if '=' not in raw:
992 msg = ("unable to parse variable '%s', should follow "
996 msg = ("unable to parse variable '%s', should follow "
993 "'KEY=VALUE' or 'KEY=' format")
997 "'KEY=VALUE' or 'KEY=' format")
994 raise error.Abort(msg % raw)
998 raise error.Abort(msg % raw)
995 k, v = raw.split('=', 1)
999 k, v = raw.split('=', 1)
996 shellvars[k] = v
1000 shellvars[k] = v
997
1001
998 part = bundler.newpart('pushvars')
1002 part = bundler.newpart('pushvars')
999
1003
1000 for key, value in shellvars.iteritems():
1004 for key, value in shellvars.iteritems():
1001 part.addparam(key, value, mandatory=False)
1005 part.addparam(key, value, mandatory=False)
1002
1006
1003 def _pushbundle2(pushop):
1007 def _pushbundle2(pushop):
1004 """push data to the remote using bundle2
1008 """push data to the remote using bundle2
1005
1009
1006 The only currently supported type of data is changegroup but this will
1010 The only currently supported type of data is changegroup but this will
1007 evolve in the future."""
1011 evolve in the future."""
1008 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1012 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1009 pushback = (pushop.trmanager
1013 pushback = (pushop.trmanager
1010 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1014 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1011
1015
1012 # create reply capability
1016 # create reply capability
1013 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1017 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1014 allowpushback=pushback))
1018 allowpushback=pushback))
1015 bundler.newpart('replycaps', data=capsblob)
1019 bundler.newpart('replycaps', data=capsblob)
1016 replyhandlers = []
1020 replyhandlers = []
1017 for partgenname in b2partsgenorder:
1021 for partgenname in b2partsgenorder:
1018 partgen = b2partsgenmapping[partgenname]
1022 partgen = b2partsgenmapping[partgenname]
1019 ret = partgen(pushop, bundler)
1023 ret = partgen(pushop, bundler)
1020 if callable(ret):
1024 if callable(ret):
1021 replyhandlers.append(ret)
1025 replyhandlers.append(ret)
1022 # do not push if nothing to push
1026 # do not push if nothing to push
1023 if bundler.nbparts <= 1:
1027 if bundler.nbparts <= 1:
1024 return
1028 return
1025 stream = util.chunkbuffer(bundler.getchunks())
1029 stream = util.chunkbuffer(bundler.getchunks())
1026 try:
1030 try:
1027 try:
1031 try:
1028 reply = pushop.remote.unbundle(
1032 reply = pushop.remote.unbundle(
1029 stream, ['force'], pushop.remote.url())
1033 stream, ['force'], pushop.remote.url())
1030 except error.BundleValueError as exc:
1034 except error.BundleValueError as exc:
1031 raise error.Abort(_('missing support for %s') % exc)
1035 raise error.Abort(_('missing support for %s') % exc)
1032 try:
1036 try:
1033 trgetter = None
1037 trgetter = None
1034 if pushback:
1038 if pushback:
1035 trgetter = pushop.trmanager.transaction
1039 trgetter = pushop.trmanager.transaction
1036 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1040 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1037 except error.BundleValueError as exc:
1041 except error.BundleValueError as exc:
1038 raise error.Abort(_('missing support for %s') % exc)
1042 raise error.Abort(_('missing support for %s') % exc)
1039 except bundle2.AbortFromPart as exc:
1043 except bundle2.AbortFromPart as exc:
1040 pushop.ui.status(_('remote: %s\n') % exc)
1044 pushop.ui.status(_('remote: %s\n') % exc)
1041 if exc.hint is not None:
1045 if exc.hint is not None:
1042 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1046 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1043 raise error.Abort(_('push failed on remote'))
1047 raise error.Abort(_('push failed on remote'))
1044 except error.PushkeyFailed as exc:
1048 except error.PushkeyFailed as exc:
1045 partid = int(exc.partid)
1049 partid = int(exc.partid)
1046 if partid not in pushop.pkfailcb:
1050 if partid not in pushop.pkfailcb:
1047 raise
1051 raise
1048 pushop.pkfailcb[partid](pushop, exc)
1052 pushop.pkfailcb[partid](pushop, exc)
1049 for rephand in replyhandlers:
1053 for rephand in replyhandlers:
1050 rephand(op)
1054 rephand(op)
1051
1055
1052 def _pushchangeset(pushop):
1056 def _pushchangeset(pushop):
1053 """Make the actual push of changeset bundle to remote repo"""
1057 """Make the actual push of changeset bundle to remote repo"""
1054 if 'changesets' in pushop.stepsdone:
1058 if 'changesets' in pushop.stepsdone:
1055 return
1059 return
1056 pushop.stepsdone.add('changesets')
1060 pushop.stepsdone.add('changesets')
1057 if not _pushcheckoutgoing(pushop):
1061 if not _pushcheckoutgoing(pushop):
1058 return
1062 return
1059
1063
1060 # Should have verified this in push().
1064 # Should have verified this in push().
1061 assert pushop.remote.capable('unbundle')
1065 assert pushop.remote.capable('unbundle')
1062
1066
1063 pushop.repo.prepushoutgoinghooks(pushop)
1067 pushop.repo.prepushoutgoinghooks(pushop)
1064 outgoing = pushop.outgoing
1068 outgoing = pushop.outgoing
1065 # TODO: get bundlecaps from remote
1069 # TODO: get bundlecaps from remote
1066 bundlecaps = None
1070 bundlecaps = None
1067 # create a changegroup from local
1071 # create a changegroup from local
1068 if pushop.revs is None and not (outgoing.excluded
1072 if pushop.revs is None and not (outgoing.excluded
1069 or pushop.repo.changelog.filteredrevs):
1073 or pushop.repo.changelog.filteredrevs):
1070 # push everything,
1074 # push everything,
1071 # use the fast path, no race possible on push
1075 # use the fast path, no race possible on push
1072 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1076 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1073 fastpath=True, bundlecaps=bundlecaps)
1077 fastpath=True, bundlecaps=bundlecaps)
1074 else:
1078 else:
1075 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1079 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1076 'push', bundlecaps=bundlecaps)
1080 'push', bundlecaps=bundlecaps)
1077
1081
1078 # apply changegroup to remote
1082 # apply changegroup to remote
1079 # local repo finds heads on server, finds out what
1083 # local repo finds heads on server, finds out what
1080 # revs it must push. once revs transferred, if server
1084 # revs it must push. once revs transferred, if server
1081 # finds it has different heads (someone else won
1085 # finds it has different heads (someone else won
1082 # commit/push race), server aborts.
1086 # commit/push race), server aborts.
1083 if pushop.force:
1087 if pushop.force:
1084 remoteheads = ['force']
1088 remoteheads = ['force']
1085 else:
1089 else:
1086 remoteheads = pushop.remoteheads
1090 remoteheads = pushop.remoteheads
1087 # ssh: return remote's addchangegroup()
1091 # ssh: return remote's addchangegroup()
1088 # http: return remote's addchangegroup() or 0 for error
1092 # http: return remote's addchangegroup() or 0 for error
1089 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1093 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1090 pushop.repo.url())
1094 pushop.repo.url())
1091
1095
1092 def _pushsyncphase(pushop):
1096 def _pushsyncphase(pushop):
1093 """synchronise phase information locally and remotely"""
1097 """synchronise phase information locally and remotely"""
1094 cheads = pushop.commonheads
1098 cheads = pushop.commonheads
1095 # even when we don't push, exchanging phase data is useful
1099 # even when we don't push, exchanging phase data is useful
1096 remotephases = pushop.remote.listkeys('phases')
1100 remotephases = pushop.remote.listkeys('phases')
1097 if (pushop.ui.configbool('ui', '_usedassubrepo')
1101 if (pushop.ui.configbool('ui', '_usedassubrepo')
1098 and remotephases # server supports phases
1102 and remotephases # server supports phases
1099 and pushop.cgresult is None # nothing was pushed
1103 and pushop.cgresult is None # nothing was pushed
1100 and remotephases.get('publishing', False)):
1104 and remotephases.get('publishing', False)):
1101 # When:
1105 # When:
1102 # - this is a subrepo push
1106 # - this is a subrepo push
1103 # - and remote support phase
1107 # - and remote support phase
1104 # - and no changeset was pushed
1108 # - and no changeset was pushed
1105 # - and remote is publishing
1109 # - and remote is publishing
1106 # We may be in issue 3871 case!
1110 # We may be in issue 3871 case!
1107 # We drop the possible phase synchronisation done by
1111 # We drop the possible phase synchronisation done by
1108 # courtesy to publish changesets possibly locally draft
1112 # courtesy to publish changesets possibly locally draft
1109 # on the remote.
1113 # on the remote.
1110 remotephases = {'publishing': 'True'}
1114 remotephases = {'publishing': 'True'}
1111 if not remotephases: # old server or public only reply from non-publishing
1115 if not remotephases: # old server or public only reply from non-publishing
1112 _localphasemove(pushop, cheads)
1116 _localphasemove(pushop, cheads)
1113 # don't push any phase data as there is nothing to push
1117 # don't push any phase data as there is nothing to push
1114 else:
1118 else:
1115 ana = phases.analyzeremotephases(pushop.repo, cheads,
1119 ana = phases.analyzeremotephases(pushop.repo, cheads,
1116 remotephases)
1120 remotephases)
1117 pheads, droots = ana
1121 pheads, droots = ana
1118 ### Apply remote phase on local
1122 ### Apply remote phase on local
1119 if remotephases.get('publishing', False):
1123 if remotephases.get('publishing', False):
1120 _localphasemove(pushop, cheads)
1124 _localphasemove(pushop, cheads)
1121 else: # publish = False
1125 else: # publish = False
1122 _localphasemove(pushop, pheads)
1126 _localphasemove(pushop, pheads)
1123 _localphasemove(pushop, cheads, phases.draft)
1127 _localphasemove(pushop, cheads, phases.draft)
1124 ### Apply local phase on remote
1128 ### Apply local phase on remote
1125
1129
1126 if pushop.cgresult:
1130 if pushop.cgresult:
1127 if 'phases' in pushop.stepsdone:
1131 if 'phases' in pushop.stepsdone:
1128 # phases already pushed though bundle2
1132 # phases already pushed though bundle2
1129 return
1133 return
1130 outdated = pushop.outdatedphases
1134 outdated = pushop.outdatedphases
1131 else:
1135 else:
1132 outdated = pushop.fallbackoutdatedphases
1136 outdated = pushop.fallbackoutdatedphases
1133
1137
1134 pushop.stepsdone.add('phases')
1138 pushop.stepsdone.add('phases')
1135
1139
1136 # filter heads already turned public by the push
1140 # filter heads already turned public by the push
1137 outdated = [c for c in outdated if c.node() not in pheads]
1141 outdated = [c for c in outdated if c.node() not in pheads]
1138 # fallback to independent pushkey command
1142 # fallback to independent pushkey command
1139 for newremotehead in outdated:
1143 for newremotehead in outdated:
1140 r = pushop.remote.pushkey('phases',
1144 r = pushop.remote.pushkey('phases',
1141 newremotehead.hex(),
1145 newremotehead.hex(),
1142 str(phases.draft),
1146 str(phases.draft),
1143 str(phases.public))
1147 str(phases.public))
1144 if not r:
1148 if not r:
1145 pushop.ui.warn(_('updating %s to public failed!\n')
1149 pushop.ui.warn(_('updating %s to public failed!\n')
1146 % newremotehead)
1150 % newremotehead)
1147
1151
1148 def _localphasemove(pushop, nodes, phase=phases.public):
1152 def _localphasemove(pushop, nodes, phase=phases.public):
1149 """move <nodes> to <phase> in the local source repo"""
1153 """move <nodes> to <phase> in the local source repo"""
1150 if pushop.trmanager:
1154 if pushop.trmanager:
1151 phases.advanceboundary(pushop.repo,
1155 phases.advanceboundary(pushop.repo,
1152 pushop.trmanager.transaction(),
1156 pushop.trmanager.transaction(),
1153 phase,
1157 phase,
1154 nodes)
1158 nodes)
1155 else:
1159 else:
1156 # repo is not locked, do not change any phases!
1160 # repo is not locked, do not change any phases!
1157 # Informs the user that phases should have been moved when
1161 # Informs the user that phases should have been moved when
1158 # applicable.
1162 # applicable.
1159 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1163 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1160 phasestr = phases.phasenames[phase]
1164 phasestr = phases.phasenames[phase]
1161 if actualmoves:
1165 if actualmoves:
1162 pushop.ui.status(_('cannot lock source repo, skipping '
1166 pushop.ui.status(_('cannot lock source repo, skipping '
1163 'local %s phase update\n') % phasestr)
1167 'local %s phase update\n') % phasestr)
1164
1168
1165 def _pushobsolete(pushop):
1169 def _pushobsolete(pushop):
1166 """utility function to push obsolete markers to a remote"""
1170 """utility function to push obsolete markers to a remote"""
1167 if 'obsmarkers' in pushop.stepsdone:
1171 if 'obsmarkers' in pushop.stepsdone:
1168 return
1172 return
1169 repo = pushop.repo
1173 repo = pushop.repo
1170 remote = pushop.remote
1174 remote = pushop.remote
1171 pushop.stepsdone.add('obsmarkers')
1175 pushop.stepsdone.add('obsmarkers')
1172 if pushop.outobsmarkers:
1176 if pushop.outobsmarkers:
1173 pushop.ui.debug('try to push obsolete markers to remote\n')
1177 pushop.ui.debug('try to push obsolete markers to remote\n')
1174 rslts = []
1178 rslts = []
1175 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1179 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1176 for key in sorted(remotedata, reverse=True):
1180 for key in sorted(remotedata, reverse=True):
1177 # reverse sort to ensure we end with dump0
1181 # reverse sort to ensure we end with dump0
1178 data = remotedata[key]
1182 data = remotedata[key]
1179 rslts.append(remote.pushkey('obsolete', key, '', data))
1183 rslts.append(remote.pushkey('obsolete', key, '', data))
1180 if [r for r in rslts if not r]:
1184 if [r for r in rslts if not r]:
1181 msg = _('failed to push some obsolete markers!\n')
1185 msg = _('failed to push some obsolete markers!\n')
1182 repo.ui.warn(msg)
1186 repo.ui.warn(msg)
1183
1187
1184 def _pushbookmark(pushop):
1188 def _pushbookmark(pushop):
1185 """Update bookmark position on remote"""
1189 """Update bookmark position on remote"""
1186 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1190 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1187 return
1191 return
1188 pushop.stepsdone.add('bookmarks')
1192 pushop.stepsdone.add('bookmarks')
1189 ui = pushop.ui
1193 ui = pushop.ui
1190 remote = pushop.remote
1194 remote = pushop.remote
1191
1195
1192 for b, old, new in pushop.outbookmarks:
1196 for b, old, new in pushop.outbookmarks:
1193 action = 'update'
1197 action = 'update'
1194 if not old:
1198 if not old:
1195 action = 'export'
1199 action = 'export'
1196 elif not new:
1200 elif not new:
1197 action = 'delete'
1201 action = 'delete'
1198 if remote.pushkey('bookmarks', b, old, new):
1202 if remote.pushkey('bookmarks', b, old, new):
1199 ui.status(bookmsgmap[action][0] % b)
1203 ui.status(bookmsgmap[action][0] % b)
1200 else:
1204 else:
1201 ui.warn(bookmsgmap[action][1] % b)
1205 ui.warn(bookmsgmap[action][1] % b)
1202 # discovery can have set the value form invalid entry
1206 # discovery can have set the value form invalid entry
1203 if pushop.bkresult is not None:
1207 if pushop.bkresult is not None:
1204 pushop.bkresult = 1
1208 pushop.bkresult = 1
1205
1209
1206 class pulloperation(object):
1210 class pulloperation(object):
1207 """A object that represent a single pull operation
1211 """A object that represent a single pull operation
1208
1212
1209 It purpose is to carry pull related state and very common operation.
1213 It purpose is to carry pull related state and very common operation.
1210
1214
1211 A new should be created at the beginning of each pull and discarded
1215 A new should be created at the beginning of each pull and discarded
1212 afterward.
1216 afterward.
1213 """
1217 """
1214
1218
1215 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1219 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1216 remotebookmarks=None, streamclonerequested=None):
1220 remotebookmarks=None, streamclonerequested=None):
1217 # repo we pull into
1221 # repo we pull into
1218 self.repo = repo
1222 self.repo = repo
1219 # repo we pull from
1223 # repo we pull from
1220 self.remote = remote
1224 self.remote = remote
1221 # revision we try to pull (None is "all")
1225 # revision we try to pull (None is "all")
1222 self.heads = heads
1226 self.heads = heads
1223 # bookmark pulled explicitly
1227 # bookmark pulled explicitly
1224 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1228 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1225 for bookmark in bookmarks]
1229 for bookmark in bookmarks]
1226 # do we force pull?
1230 # do we force pull?
1227 self.force = force
1231 self.force = force
1228 # whether a streaming clone was requested
1232 # whether a streaming clone was requested
1229 self.streamclonerequested = streamclonerequested
1233 self.streamclonerequested = streamclonerequested
1230 # transaction manager
1234 # transaction manager
1231 self.trmanager = None
1235 self.trmanager = None
1232 # set of common changeset between local and remote before pull
1236 # set of common changeset between local and remote before pull
1233 self.common = None
1237 self.common = None
1234 # set of pulled head
1238 # set of pulled head
1235 self.rheads = None
1239 self.rheads = None
1236 # list of missing changeset to fetch remotely
1240 # list of missing changeset to fetch remotely
1237 self.fetch = None
1241 self.fetch = None
1238 # remote bookmarks data
1242 # remote bookmarks data
1239 self.remotebookmarks = remotebookmarks
1243 self.remotebookmarks = remotebookmarks
1240 # result of changegroup pulling (used as return code by pull)
1244 # result of changegroup pulling (used as return code by pull)
1241 self.cgresult = None
1245 self.cgresult = None
1242 # list of step already done
1246 # list of step already done
1243 self.stepsdone = set()
1247 self.stepsdone = set()
1244 # Whether we attempted a clone from pre-generated bundles.
1248 # Whether we attempted a clone from pre-generated bundles.
1245 self.clonebundleattempted = False
1249 self.clonebundleattempted = False
1246
1250
1247 @util.propertycache
1251 @util.propertycache
1248 def pulledsubset(self):
1252 def pulledsubset(self):
1249 """heads of the set of changeset target by the pull"""
1253 """heads of the set of changeset target by the pull"""
1250 # compute target subset
1254 # compute target subset
1251 if self.heads is None:
1255 if self.heads is None:
1252 # We pulled every thing possible
1256 # We pulled every thing possible
1253 # sync on everything common
1257 # sync on everything common
1254 c = set(self.common)
1258 c = set(self.common)
1255 ret = list(self.common)
1259 ret = list(self.common)
1256 for n in self.rheads:
1260 for n in self.rheads:
1257 if n not in c:
1261 if n not in c:
1258 ret.append(n)
1262 ret.append(n)
1259 return ret
1263 return ret
1260 else:
1264 else:
1261 # We pulled a specific subset
1265 # We pulled a specific subset
1262 # sync on this subset
1266 # sync on this subset
1263 return self.heads
1267 return self.heads
1264
1268
1265 @util.propertycache
1269 @util.propertycache
1266 def canusebundle2(self):
1270 def canusebundle2(self):
1267 return not _forcebundle1(self)
1271 return not _forcebundle1(self)
1268
1272
1269 @util.propertycache
1273 @util.propertycache
1270 def remotebundle2caps(self):
1274 def remotebundle2caps(self):
1271 return bundle2.bundle2caps(self.remote)
1275 return bundle2.bundle2caps(self.remote)
1272
1276
1273 def gettransaction(self):
1277 def gettransaction(self):
1274 # deprecated; talk to trmanager directly
1278 # deprecated; talk to trmanager directly
1275 return self.trmanager.transaction()
1279 return self.trmanager.transaction()
1276
1280
1277 class transactionmanager(util.transactional):
1281 class transactionmanager(util.transactional):
1278 """An object to manage the life cycle of a transaction
1282 """An object to manage the life cycle of a transaction
1279
1283
1280 It creates the transaction on demand and calls the appropriate hooks when
1284 It creates the transaction on demand and calls the appropriate hooks when
1281 closing the transaction."""
1285 closing the transaction."""
1282 def __init__(self, repo, source, url):
1286 def __init__(self, repo, source, url):
1283 self.repo = repo
1287 self.repo = repo
1284 self.source = source
1288 self.source = source
1285 self.url = url
1289 self.url = url
1286 self._tr = None
1290 self._tr = None
1287
1291
1288 def transaction(self):
1292 def transaction(self):
1289 """Return an open transaction object, constructing if necessary"""
1293 """Return an open transaction object, constructing if necessary"""
1290 if not self._tr:
1294 if not self._tr:
1291 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1295 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1292 self._tr = self.repo.transaction(trname)
1296 self._tr = self.repo.transaction(trname)
1293 self._tr.hookargs['source'] = self.source
1297 self._tr.hookargs['source'] = self.source
1294 self._tr.hookargs['url'] = self.url
1298 self._tr.hookargs['url'] = self.url
1295 return self._tr
1299 return self._tr
1296
1300
1297 def close(self):
1301 def close(self):
1298 """close transaction if created"""
1302 """close transaction if created"""
1299 if self._tr is not None:
1303 if self._tr is not None:
1300 self._tr.close()
1304 self._tr.close()
1301
1305
1302 def release(self):
1306 def release(self):
1303 """release transaction if created"""
1307 """release transaction if created"""
1304 if self._tr is not None:
1308 if self._tr is not None:
1305 self._tr.release()
1309 self._tr.release()
1306
1310
1307 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1311 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1308 streamclonerequested=None):
1312 streamclonerequested=None):
1309 """Fetch repository data from a remote.
1313 """Fetch repository data from a remote.
1310
1314
1311 This is the main function used to retrieve data from a remote repository.
1315 This is the main function used to retrieve data from a remote repository.
1312
1316
1313 ``repo`` is the local repository to clone into.
1317 ``repo`` is the local repository to clone into.
1314 ``remote`` is a peer instance.
1318 ``remote`` is a peer instance.
1315 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1319 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1316 default) means to pull everything from the remote.
1320 default) means to pull everything from the remote.
1317 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1321 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1318 default, all remote bookmarks are pulled.
1322 default, all remote bookmarks are pulled.
1319 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1323 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1320 initialization.
1324 initialization.
1321 ``streamclonerequested`` is a boolean indicating whether a "streaming
1325 ``streamclonerequested`` is a boolean indicating whether a "streaming
1322 clone" is requested. A "streaming clone" is essentially a raw file copy
1326 clone" is requested. A "streaming clone" is essentially a raw file copy
1323 of revlogs from the server. This only works when the local repository is
1327 of revlogs from the server. This only works when the local repository is
1324 empty. The default value of ``None`` means to respect the server
1328 empty. The default value of ``None`` means to respect the server
1325 configuration for preferring stream clones.
1329 configuration for preferring stream clones.
1326
1330
1327 Returns the ``pulloperation`` created for this pull.
1331 Returns the ``pulloperation`` created for this pull.
1328 """
1332 """
1329 if opargs is None:
1333 if opargs is None:
1330 opargs = {}
1334 opargs = {}
1331 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1335 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1332 streamclonerequested=streamclonerequested, **opargs)
1336 streamclonerequested=streamclonerequested, **opargs)
1333
1337
1334 peerlocal = pullop.remote.local()
1338 peerlocal = pullop.remote.local()
1335 if peerlocal:
1339 if peerlocal:
1336 missing = set(peerlocal.requirements) - pullop.repo.supported
1340 missing = set(peerlocal.requirements) - pullop.repo.supported
1337 if missing:
1341 if missing:
1338 msg = _("required features are not"
1342 msg = _("required features are not"
1339 " supported in the destination:"
1343 " supported in the destination:"
1340 " %s") % (', '.join(sorted(missing)))
1344 " %s") % (', '.join(sorted(missing)))
1341 raise error.Abort(msg)
1345 raise error.Abort(msg)
1342
1346
1343 wlock = lock = None
1347 wlock = lock = None
1344 try:
1348 try:
1345 wlock = pullop.repo.wlock()
1349 wlock = pullop.repo.wlock()
1346 lock = pullop.repo.lock()
1350 lock = pullop.repo.lock()
1347 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1351 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1348 # This should ideally be in _pullbundle2(). However, it needs to run
1352 # This should ideally be in _pullbundle2(). However, it needs to run
1349 # before discovery to avoid extra work.
1353 # before discovery to avoid extra work.
1350 _maybeapplyclonebundle(pullop)
1354 _maybeapplyclonebundle(pullop)
1351 streamclone.maybeperformlegacystreamclone(pullop)
1355 streamclone.maybeperformlegacystreamclone(pullop)
1352 _pulldiscovery(pullop)
1356 _pulldiscovery(pullop)
1353 if pullop.canusebundle2:
1357 if pullop.canusebundle2:
1354 _pullbundle2(pullop)
1358 _pullbundle2(pullop)
1355 _pullchangeset(pullop)
1359 _pullchangeset(pullop)
1356 _pullphase(pullop)
1360 _pullphase(pullop)
1357 _pullbookmarks(pullop)
1361 _pullbookmarks(pullop)
1358 _pullobsolete(pullop)
1362 _pullobsolete(pullop)
1359 pullop.trmanager.close()
1363 pullop.trmanager.close()
1360 finally:
1364 finally:
1361 lockmod.release(pullop.trmanager, lock, wlock)
1365 lockmod.release(pullop.trmanager, lock, wlock)
1362
1366
1363 # storing remotenames
1367 # storing remotenames
1364 if repo.ui.configbool('experimental', 'remotenames'):
1368 if repo.ui.configbool('experimental', 'remotenames'):
1365 remotenames.pullremotenames(repo, remote)
1369 remotenames.pullremotenames(repo, remote)
1366
1370
1367 return pullop
1371 return pullop
1368
1372
1369 # list of steps to perform discovery before pull
1373 # list of steps to perform discovery before pull
1370 pulldiscoveryorder = []
1374 pulldiscoveryorder = []
1371
1375
1372 # Mapping between step name and function
1376 # Mapping between step name and function
1373 #
1377 #
1374 # This exists to help extensions wrap steps if necessary
1378 # This exists to help extensions wrap steps if necessary
1375 pulldiscoverymapping = {}
1379 pulldiscoverymapping = {}
1376
1380
1377 def pulldiscovery(stepname):
1381 def pulldiscovery(stepname):
1378 """decorator for function performing discovery before pull
1382 """decorator for function performing discovery before pull
1379
1383
1380 The function is added to the step -> function mapping and appended to the
1384 The function is added to the step -> function mapping and appended to the
1381 list of steps. Beware that decorated function will be added in order (this
1385 list of steps. Beware that decorated function will be added in order (this
1382 may matter).
1386 may matter).
1383
1387
1384 You can only use this decorator for a new step, if you want to wrap a step
1388 You can only use this decorator for a new step, if you want to wrap a step
1385 from an extension, change the pulldiscovery dictionary directly."""
1389 from an extension, change the pulldiscovery dictionary directly."""
1386 def dec(func):
1390 def dec(func):
1387 assert stepname not in pulldiscoverymapping
1391 assert stepname not in pulldiscoverymapping
1388 pulldiscoverymapping[stepname] = func
1392 pulldiscoverymapping[stepname] = func
1389 pulldiscoveryorder.append(stepname)
1393 pulldiscoveryorder.append(stepname)
1390 return func
1394 return func
1391 return dec
1395 return dec
1392
1396
1393 def _pulldiscovery(pullop):
1397 def _pulldiscovery(pullop):
1394 """Run all discovery steps"""
1398 """Run all discovery steps"""
1395 for stepname in pulldiscoveryorder:
1399 for stepname in pulldiscoveryorder:
1396 step = pulldiscoverymapping[stepname]
1400 step = pulldiscoverymapping[stepname]
1397 step(pullop)
1401 step(pullop)
1398
1402
1399 @pulldiscovery('b1:bookmarks')
1403 @pulldiscovery('b1:bookmarks')
1400 def _pullbookmarkbundle1(pullop):
1404 def _pullbookmarkbundle1(pullop):
1401 """fetch bookmark data in bundle1 case
1405 """fetch bookmark data in bundle1 case
1402
1406
1403 If not using bundle2, we have to fetch bookmarks before changeset
1407 If not using bundle2, we have to fetch bookmarks before changeset
1404 discovery to reduce the chance and impact of race conditions."""
1408 discovery to reduce the chance and impact of race conditions."""
1405 if pullop.remotebookmarks is not None:
1409 if pullop.remotebookmarks is not None:
1406 return
1410 return
1407 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1411 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1408 # all known bundle2 servers now support listkeys, but lets be nice with
1412 # all known bundle2 servers now support listkeys, but lets be nice with
1409 # new implementation.
1413 # new implementation.
1410 return
1414 return
1411 books = pullop.remote.listkeys('bookmarks')
1415 books = pullop.remote.listkeys('bookmarks')
1412 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1416 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1413
1417
1414
1418
1415 @pulldiscovery('changegroup')
1419 @pulldiscovery('changegroup')
1416 def _pulldiscoverychangegroup(pullop):
1420 def _pulldiscoverychangegroup(pullop):
1417 """discovery phase for the pull
1421 """discovery phase for the pull
1418
1422
1419 Current handle changeset discovery only, will change handle all discovery
1423 Current handle changeset discovery only, will change handle all discovery
1420 at some point."""
1424 at some point."""
1421 tmp = discovery.findcommonincoming(pullop.repo,
1425 tmp = discovery.findcommonincoming(pullop.repo,
1422 pullop.remote,
1426 pullop.remote,
1423 heads=pullop.heads,
1427 heads=pullop.heads,
1424 force=pullop.force)
1428 force=pullop.force)
1425 common, fetch, rheads = tmp
1429 common, fetch, rheads = tmp
1426 nm = pullop.repo.unfiltered().changelog.nodemap
1430 nm = pullop.repo.unfiltered().changelog.nodemap
1427 if fetch and rheads:
1431 if fetch and rheads:
1428 # If a remote heads is filtered locally, put in back in common.
1432 # If a remote heads is filtered locally, put in back in common.
1429 #
1433 #
1430 # This is a hackish solution to catch most of "common but locally
1434 # This is a hackish solution to catch most of "common but locally
1431 # hidden situation". We do not performs discovery on unfiltered
1435 # hidden situation". We do not performs discovery on unfiltered
1432 # repository because it end up doing a pathological amount of round
1436 # repository because it end up doing a pathological amount of round
1433 # trip for w huge amount of changeset we do not care about.
1437 # trip for w huge amount of changeset we do not care about.
1434 #
1438 #
1435 # If a set of such "common but filtered" changeset exist on the server
1439 # If a set of such "common but filtered" changeset exist on the server
1436 # but are not including a remote heads, we'll not be able to detect it,
1440 # but are not including a remote heads, we'll not be able to detect it,
1437 scommon = set(common)
1441 scommon = set(common)
1438 for n in rheads:
1442 for n in rheads:
1439 if n in nm:
1443 if n in nm:
1440 if n not in scommon:
1444 if n not in scommon:
1441 common.append(n)
1445 common.append(n)
1442 if set(rheads).issubset(set(common)):
1446 if set(rheads).issubset(set(common)):
1443 fetch = []
1447 fetch = []
1444 pullop.common = common
1448 pullop.common = common
1445 pullop.fetch = fetch
1449 pullop.fetch = fetch
1446 pullop.rheads = rheads
1450 pullop.rheads = rheads
1447
1451
1448 def _pullbundle2(pullop):
1452 def _pullbundle2(pullop):
1449 """pull data using bundle2
1453 """pull data using bundle2
1450
1454
1451 For now, the only supported data are changegroup."""
1455 For now, the only supported data are changegroup."""
1452 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1456 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1453
1457
1454 # At the moment we don't do stream clones over bundle2. If that is
1458 # At the moment we don't do stream clones over bundle2. If that is
1455 # implemented then here's where the check for that will go.
1459 # implemented then here's where the check for that will go.
1456 streaming = False
1460 streaming = False
1457
1461
1458 # pulling changegroup
1462 # pulling changegroup
1459 pullop.stepsdone.add('changegroup')
1463 pullop.stepsdone.add('changegroup')
1460
1464
1461 kwargs['common'] = pullop.common
1465 kwargs['common'] = pullop.common
1462 kwargs['heads'] = pullop.heads or pullop.rheads
1466 kwargs['heads'] = pullop.heads or pullop.rheads
1463 kwargs['cg'] = pullop.fetch
1467 kwargs['cg'] = pullop.fetch
1464
1468
1465 ui = pullop.repo.ui
1469 ui = pullop.repo.ui
1466 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1470 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1467 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1471 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1468 if (not legacyphase and hasbinaryphase):
1472 if (not legacyphase and hasbinaryphase):
1469 kwargs['phases'] = True
1473 kwargs['phases'] = True
1470 pullop.stepsdone.add('phases')
1474 pullop.stepsdone.add('phases')
1471
1475
1472 bookmarksrequested = False
1476 bookmarksrequested = False
1473 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1477 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1474 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1478 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1475
1479
1476 if pullop.remotebookmarks is not None:
1480 if pullop.remotebookmarks is not None:
1477 pullop.stepsdone.add('request-bookmarks')
1481 pullop.stepsdone.add('request-bookmarks')
1478
1482
1479 if ('request-bookmarks' not in pullop.stepsdone
1483 if ('request-bookmarks' not in pullop.stepsdone
1480 and pullop.remotebookmarks is None
1484 and pullop.remotebookmarks is None
1481 and not legacybookmark and hasbinarybook):
1485 and not legacybookmark and hasbinarybook):
1482 kwargs['bookmarks'] = True
1486 kwargs['bookmarks'] = True
1483 bookmarksrequested = True
1487 bookmarksrequested = True
1484
1488
1485 if 'listkeys' in pullop.remotebundle2caps:
1489 if 'listkeys' in pullop.remotebundle2caps:
1486 if 'phases' not in pullop.stepsdone:
1490 if 'phases' not in pullop.stepsdone:
1487 kwargs['listkeys'] = ['phases']
1491 kwargs['listkeys'] = ['phases']
1488 if 'request-bookmarks' not in pullop.stepsdone:
1492 if 'request-bookmarks' not in pullop.stepsdone:
1489 # make sure to always includes bookmark data when migrating
1493 # make sure to always includes bookmark data when migrating
1490 # `hg incoming --bundle` to using this function.
1494 # `hg incoming --bundle` to using this function.
1491 pullop.stepsdone.add('request-bookmarks')
1495 pullop.stepsdone.add('request-bookmarks')
1492 kwargs.setdefault('listkeys', []).append('bookmarks')
1496 kwargs.setdefault('listkeys', []).append('bookmarks')
1493
1497
1494 # If this is a full pull / clone and the server supports the clone bundles
1498 # If this is a full pull / clone and the server supports the clone bundles
1495 # feature, tell the server whether we attempted a clone bundle. The
1499 # feature, tell the server whether we attempted a clone bundle. The
1496 # presence of this flag indicates the client supports clone bundles. This
1500 # presence of this flag indicates the client supports clone bundles. This
1497 # will enable the server to treat clients that support clone bundles
1501 # will enable the server to treat clients that support clone bundles
1498 # differently from those that don't.
1502 # differently from those that don't.
1499 if (pullop.remote.capable('clonebundles')
1503 if (pullop.remote.capable('clonebundles')
1500 and pullop.heads is None and list(pullop.common) == [nullid]):
1504 and pullop.heads is None and list(pullop.common) == [nullid]):
1501 kwargs['cbattempted'] = pullop.clonebundleattempted
1505 kwargs['cbattempted'] = pullop.clonebundleattempted
1502
1506
1503 if streaming:
1507 if streaming:
1504 pullop.repo.ui.status(_('streaming all changes\n'))
1508 pullop.repo.ui.status(_('streaming all changes\n'))
1505 elif not pullop.fetch:
1509 elif not pullop.fetch:
1506 pullop.repo.ui.status(_("no changes found\n"))
1510 pullop.repo.ui.status(_("no changes found\n"))
1507 pullop.cgresult = 0
1511 pullop.cgresult = 0
1508 else:
1512 else:
1509 if pullop.heads is None and list(pullop.common) == [nullid]:
1513 if pullop.heads is None and list(pullop.common) == [nullid]:
1510 pullop.repo.ui.status(_("requesting all changes\n"))
1514 pullop.repo.ui.status(_("requesting all changes\n"))
1511 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1515 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1512 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1516 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1513 if obsolete.commonversion(remoteversions) is not None:
1517 if obsolete.commonversion(remoteversions) is not None:
1514 kwargs['obsmarkers'] = True
1518 kwargs['obsmarkers'] = True
1515 pullop.stepsdone.add('obsmarkers')
1519 pullop.stepsdone.add('obsmarkers')
1516 _pullbundle2extraprepare(pullop, kwargs)
1520 _pullbundle2extraprepare(pullop, kwargs)
1517 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1521 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1518 try:
1522 try:
1519 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1523 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1520 op.modes['bookmarks'] = 'records'
1524 op.modes['bookmarks'] = 'records'
1521 bundle2.processbundle(pullop.repo, bundle, op=op)
1525 bundle2.processbundle(pullop.repo, bundle, op=op)
1522 except bundle2.AbortFromPart as exc:
1526 except bundle2.AbortFromPart as exc:
1523 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1527 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1524 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1528 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1525 except error.BundleValueError as exc:
1529 except error.BundleValueError as exc:
1526 raise error.Abort(_('missing support for %s') % exc)
1530 raise error.Abort(_('missing support for %s') % exc)
1527
1531
1528 if pullop.fetch:
1532 if pullop.fetch:
1529 pullop.cgresult = bundle2.combinechangegroupresults(op)
1533 pullop.cgresult = bundle2.combinechangegroupresults(op)
1530
1534
1531 # processing phases change
1535 # processing phases change
1532 for namespace, value in op.records['listkeys']:
1536 for namespace, value in op.records['listkeys']:
1533 if namespace == 'phases':
1537 if namespace == 'phases':
1534 _pullapplyphases(pullop, value)
1538 _pullapplyphases(pullop, value)
1535
1539
1536 # processing bookmark update
1540 # processing bookmark update
1537 if bookmarksrequested:
1541 if bookmarksrequested:
1538 books = {}
1542 books = {}
1539 for record in op.records['bookmarks']:
1543 for record in op.records['bookmarks']:
1540 books[record['bookmark']] = record["node"]
1544 books[record['bookmark']] = record["node"]
1541 pullop.remotebookmarks = books
1545 pullop.remotebookmarks = books
1542 else:
1546 else:
1543 for namespace, value in op.records['listkeys']:
1547 for namespace, value in op.records['listkeys']:
1544 if namespace == 'bookmarks':
1548 if namespace == 'bookmarks':
1545 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1549 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1546
1550
1547 # bookmark data were either already there or pulled in the bundle
1551 # bookmark data were either already there or pulled in the bundle
1548 if pullop.remotebookmarks is not None:
1552 if pullop.remotebookmarks is not None:
1549 _pullbookmarks(pullop)
1553 _pullbookmarks(pullop)
1550
1554
1551 def _pullbundle2extraprepare(pullop, kwargs):
1555 def _pullbundle2extraprepare(pullop, kwargs):
1552 """hook function so that extensions can extend the getbundle call"""
1556 """hook function so that extensions can extend the getbundle call"""
1553
1557
1554 def _pullchangeset(pullop):
1558 def _pullchangeset(pullop):
1555 """pull changeset from unbundle into the local repo"""
1559 """pull changeset from unbundle into the local repo"""
1556 # We delay the open of the transaction as late as possible so we
1560 # We delay the open of the transaction as late as possible so we
1557 # don't open transaction for nothing or you break future useful
1561 # don't open transaction for nothing or you break future useful
1558 # rollback call
1562 # rollback call
1559 if 'changegroup' in pullop.stepsdone:
1563 if 'changegroup' in pullop.stepsdone:
1560 return
1564 return
1561 pullop.stepsdone.add('changegroup')
1565 pullop.stepsdone.add('changegroup')
1562 if not pullop.fetch:
1566 if not pullop.fetch:
1563 pullop.repo.ui.status(_("no changes found\n"))
1567 pullop.repo.ui.status(_("no changes found\n"))
1564 pullop.cgresult = 0
1568 pullop.cgresult = 0
1565 return
1569 return
1566 tr = pullop.gettransaction()
1570 tr = pullop.gettransaction()
1567 if pullop.heads is None and list(pullop.common) == [nullid]:
1571 if pullop.heads is None and list(pullop.common) == [nullid]:
1568 pullop.repo.ui.status(_("requesting all changes\n"))
1572 pullop.repo.ui.status(_("requesting all changes\n"))
1569 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1573 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1570 # issue1320, avoid a race if remote changed after discovery
1574 # issue1320, avoid a race if remote changed after discovery
1571 pullop.heads = pullop.rheads
1575 pullop.heads = pullop.rheads
1572
1576
1573 if pullop.remote.capable('getbundle'):
1577 if pullop.remote.capable('getbundle'):
1574 # TODO: get bundlecaps from remote
1578 # TODO: get bundlecaps from remote
1575 cg = pullop.remote.getbundle('pull', common=pullop.common,
1579 cg = pullop.remote.getbundle('pull', common=pullop.common,
1576 heads=pullop.heads or pullop.rheads)
1580 heads=pullop.heads or pullop.rheads)
1577 elif pullop.heads is None:
1581 elif pullop.heads is None:
1578 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1582 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1579 elif not pullop.remote.capable('changegroupsubset'):
1583 elif not pullop.remote.capable('changegroupsubset'):
1580 raise error.Abort(_("partial pull cannot be done because "
1584 raise error.Abort(_("partial pull cannot be done because "
1581 "other repository doesn't support "
1585 "other repository doesn't support "
1582 "changegroupsubset."))
1586 "changegroupsubset."))
1583 else:
1587 else:
1584 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1588 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1585 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1589 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1586 pullop.remote.url())
1590 pullop.remote.url())
1587 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1591 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1588
1592
1589 def _pullphase(pullop):
1593 def _pullphase(pullop):
1590 # Get remote phases data from remote
1594 # Get remote phases data from remote
1591 if 'phases' in pullop.stepsdone:
1595 if 'phases' in pullop.stepsdone:
1592 return
1596 return
1593 remotephases = pullop.remote.listkeys('phases')
1597 remotephases = pullop.remote.listkeys('phases')
1594 _pullapplyphases(pullop, remotephases)
1598 _pullapplyphases(pullop, remotephases)
1595
1599
1596 def _pullapplyphases(pullop, remotephases):
1600 def _pullapplyphases(pullop, remotephases):
1597 """apply phase movement from observed remote state"""
1601 """apply phase movement from observed remote state"""
1598 if 'phases' in pullop.stepsdone:
1602 if 'phases' in pullop.stepsdone:
1599 return
1603 return
1600 pullop.stepsdone.add('phases')
1604 pullop.stepsdone.add('phases')
1601 publishing = bool(remotephases.get('publishing', False))
1605 publishing = bool(remotephases.get('publishing', False))
1602 if remotephases and not publishing:
1606 if remotephases and not publishing:
1603 # remote is new and non-publishing
1607 # remote is new and non-publishing
1604 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1608 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1605 pullop.pulledsubset,
1609 pullop.pulledsubset,
1606 remotephases)
1610 remotephases)
1607 dheads = pullop.pulledsubset
1611 dheads = pullop.pulledsubset
1608 else:
1612 else:
1609 # Remote is old or publishing all common changesets
1613 # Remote is old or publishing all common changesets
1610 # should be seen as public
1614 # should be seen as public
1611 pheads = pullop.pulledsubset
1615 pheads = pullop.pulledsubset
1612 dheads = []
1616 dheads = []
1613 unfi = pullop.repo.unfiltered()
1617 unfi = pullop.repo.unfiltered()
1614 phase = unfi._phasecache.phase
1618 phase = unfi._phasecache.phase
1615 rev = unfi.changelog.nodemap.get
1619 rev = unfi.changelog.nodemap.get
1616 public = phases.public
1620 public = phases.public
1617 draft = phases.draft
1621 draft = phases.draft
1618
1622
1619 # exclude changesets already public locally and update the others
1623 # exclude changesets already public locally and update the others
1620 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1624 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1621 if pheads:
1625 if pheads:
1622 tr = pullop.gettransaction()
1626 tr = pullop.gettransaction()
1623 phases.advanceboundary(pullop.repo, tr, public, pheads)
1627 phases.advanceboundary(pullop.repo, tr, public, pheads)
1624
1628
1625 # exclude changesets already draft locally and update the others
1629 # exclude changesets already draft locally and update the others
1626 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1630 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1627 if dheads:
1631 if dheads:
1628 tr = pullop.gettransaction()
1632 tr = pullop.gettransaction()
1629 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1633 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1630
1634
1631 def _pullbookmarks(pullop):
1635 def _pullbookmarks(pullop):
1632 """process the remote bookmark information to update the local one"""
1636 """process the remote bookmark information to update the local one"""
1633 if 'bookmarks' in pullop.stepsdone:
1637 if 'bookmarks' in pullop.stepsdone:
1634 return
1638 return
1635 pullop.stepsdone.add('bookmarks')
1639 pullop.stepsdone.add('bookmarks')
1636 repo = pullop.repo
1640 repo = pullop.repo
1637 remotebookmarks = pullop.remotebookmarks
1641 remotebookmarks = pullop.remotebookmarks
1638 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1642 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1639 pullop.remote.url(),
1643 pullop.remote.url(),
1640 pullop.gettransaction,
1644 pullop.gettransaction,
1641 explicit=pullop.explicitbookmarks)
1645 explicit=pullop.explicitbookmarks)
1642
1646
1643 def _pullobsolete(pullop):
1647 def _pullobsolete(pullop):
1644 """utility function to pull obsolete markers from a remote
1648 """utility function to pull obsolete markers from a remote
1645
1649
1646 The `gettransaction` is function that return the pull transaction, creating
1650 The `gettransaction` is function that return the pull transaction, creating
1647 one if necessary. We return the transaction to inform the calling code that
1651 one if necessary. We return the transaction to inform the calling code that
1648 a new transaction have been created (when applicable).
1652 a new transaction have been created (when applicable).
1649
1653
1650 Exists mostly to allow overriding for experimentation purpose"""
1654 Exists mostly to allow overriding for experimentation purpose"""
1651 if 'obsmarkers' in pullop.stepsdone:
1655 if 'obsmarkers' in pullop.stepsdone:
1652 return
1656 return
1653 pullop.stepsdone.add('obsmarkers')
1657 pullop.stepsdone.add('obsmarkers')
1654 tr = None
1658 tr = None
1655 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1659 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1656 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1660 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1657 remoteobs = pullop.remote.listkeys('obsolete')
1661 remoteobs = pullop.remote.listkeys('obsolete')
1658 if 'dump0' in remoteobs:
1662 if 'dump0' in remoteobs:
1659 tr = pullop.gettransaction()
1663 tr = pullop.gettransaction()
1660 markers = []
1664 markers = []
1661 for key in sorted(remoteobs, reverse=True):
1665 for key in sorted(remoteobs, reverse=True):
1662 if key.startswith('dump'):
1666 if key.startswith('dump'):
1663 data = util.b85decode(remoteobs[key])
1667 data = util.b85decode(remoteobs[key])
1664 version, newmarks = obsolete._readmarkers(data)
1668 version, newmarks = obsolete._readmarkers(data)
1665 markers += newmarks
1669 markers += newmarks
1666 if markers:
1670 if markers:
1667 pullop.repo.obsstore.add(tr, markers)
1671 pullop.repo.obsstore.add(tr, markers)
1668 pullop.repo.invalidatevolatilesets()
1672 pullop.repo.invalidatevolatilesets()
1669 return tr
1673 return tr
1670
1674
1671 def caps20to10(repo):
1675 def caps20to10(repo):
1672 """return a set with appropriate options to use bundle20 during getbundle"""
1676 """return a set with appropriate options to use bundle20 during getbundle"""
1673 caps = {'HG20'}
1677 caps = {'HG20'}
1674 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1678 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1675 caps.add('bundle2=' + urlreq.quote(capsblob))
1679 caps.add('bundle2=' + urlreq.quote(capsblob))
1676 return caps
1680 return caps
1677
1681
1678 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1682 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1679 getbundle2partsorder = []
1683 getbundle2partsorder = []
1680
1684
1681 # Mapping between step name and function
1685 # Mapping between step name and function
1682 #
1686 #
1683 # This exists to help extensions wrap steps if necessary
1687 # This exists to help extensions wrap steps if necessary
1684 getbundle2partsmapping = {}
1688 getbundle2partsmapping = {}
1685
1689
1686 def getbundle2partsgenerator(stepname, idx=None):
1690 def getbundle2partsgenerator(stepname, idx=None):
1687 """decorator for function generating bundle2 part for getbundle
1691 """decorator for function generating bundle2 part for getbundle
1688
1692
1689 The function is added to the step -> function mapping and appended to the
1693 The function is added to the step -> function mapping and appended to the
1690 list of steps. Beware that decorated functions will be added in order
1694 list of steps. Beware that decorated functions will be added in order
1691 (this may matter).
1695 (this may matter).
1692
1696
1693 You can only use this decorator for new steps, if you want to wrap a step
1697 You can only use this decorator for new steps, if you want to wrap a step
1694 from an extension, attack the getbundle2partsmapping dictionary directly."""
1698 from an extension, attack the getbundle2partsmapping dictionary directly."""
1695 def dec(func):
1699 def dec(func):
1696 assert stepname not in getbundle2partsmapping
1700 assert stepname not in getbundle2partsmapping
1697 getbundle2partsmapping[stepname] = func
1701 getbundle2partsmapping[stepname] = func
1698 if idx is None:
1702 if idx is None:
1699 getbundle2partsorder.append(stepname)
1703 getbundle2partsorder.append(stepname)
1700 else:
1704 else:
1701 getbundle2partsorder.insert(idx, stepname)
1705 getbundle2partsorder.insert(idx, stepname)
1702 return func
1706 return func
1703 return dec
1707 return dec
1704
1708
1705 def bundle2requested(bundlecaps):
1709 def bundle2requested(bundlecaps):
1706 if bundlecaps is not None:
1710 if bundlecaps is not None:
1707 return any(cap.startswith('HG2') for cap in bundlecaps)
1711 return any(cap.startswith('HG2') for cap in bundlecaps)
1708 return False
1712 return False
1709
1713
1710 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1714 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1711 **kwargs):
1715 **kwargs):
1712 """Return chunks constituting a bundle's raw data.
1716 """Return chunks constituting a bundle's raw data.
1713
1717
1714 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1718 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1715 passed.
1719 passed.
1716
1720
1717 Returns an iterator over raw chunks (of varying sizes).
1721 Returns an iterator over raw chunks (of varying sizes).
1718 """
1722 """
1719 kwargs = pycompat.byteskwargs(kwargs)
1723 kwargs = pycompat.byteskwargs(kwargs)
1720 usebundle2 = bundle2requested(bundlecaps)
1724 usebundle2 = bundle2requested(bundlecaps)
1721 # bundle10 case
1725 # bundle10 case
1722 if not usebundle2:
1726 if not usebundle2:
1723 if bundlecaps and not kwargs.get('cg', True):
1727 if bundlecaps and not kwargs.get('cg', True):
1724 raise ValueError(_('request for bundle10 must include changegroup'))
1728 raise ValueError(_('request for bundle10 must include changegroup'))
1725
1729
1726 if kwargs:
1730 if kwargs:
1727 raise ValueError(_('unsupported getbundle arguments: %s')
1731 raise ValueError(_('unsupported getbundle arguments: %s')
1728 % ', '.join(sorted(kwargs.keys())))
1732 % ', '.join(sorted(kwargs.keys())))
1729 outgoing = _computeoutgoing(repo, heads, common)
1733 outgoing = _computeoutgoing(repo, heads, common)
1730 return changegroup.makestream(repo, outgoing, '01', source,
1734 return changegroup.makestream(repo, outgoing, '01', source,
1731 bundlecaps=bundlecaps)
1735 bundlecaps=bundlecaps)
1732
1736
1733 # bundle20 case
1737 # bundle20 case
1734 b2caps = {}
1738 b2caps = {}
1735 for bcaps in bundlecaps:
1739 for bcaps in bundlecaps:
1736 if bcaps.startswith('bundle2='):
1740 if bcaps.startswith('bundle2='):
1737 blob = urlreq.unquote(bcaps[len('bundle2='):])
1741 blob = urlreq.unquote(bcaps[len('bundle2='):])
1738 b2caps.update(bundle2.decodecaps(blob))
1742 b2caps.update(bundle2.decodecaps(blob))
1739 bundler = bundle2.bundle20(repo.ui, b2caps)
1743 bundler = bundle2.bundle20(repo.ui, b2caps)
1740
1744
1741 kwargs['heads'] = heads
1745 kwargs['heads'] = heads
1742 kwargs['common'] = common
1746 kwargs['common'] = common
1743
1747
1744 for name in getbundle2partsorder:
1748 for name in getbundle2partsorder:
1745 func = getbundle2partsmapping[name]
1749 func = getbundle2partsmapping[name]
1746 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1750 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1747 **pycompat.strkwargs(kwargs))
1751 **pycompat.strkwargs(kwargs))
1748
1752
1749 return bundler.getchunks()
1753 return bundler.getchunks()
1750
1754
1751 @getbundle2partsgenerator('changegroup')
1755 @getbundle2partsgenerator('changegroup')
1752 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1756 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1753 b2caps=None, heads=None, common=None, **kwargs):
1757 b2caps=None, heads=None, common=None, **kwargs):
1754 """add a changegroup part to the requested bundle"""
1758 """add a changegroup part to the requested bundle"""
1755 cgstream = None
1759 cgstream = None
1756 if kwargs.get('cg', True):
1760 if kwargs.get('cg', True):
1757 # build changegroup bundle here.
1761 # build changegroup bundle here.
1758 version = '01'
1762 version = '01'
1759 cgversions = b2caps.get('changegroup')
1763 cgversions = b2caps.get('changegroup')
1760 if cgversions: # 3.1 and 3.2 ship with an empty value
1764 if cgversions: # 3.1 and 3.2 ship with an empty value
1761 cgversions = [v for v in cgversions
1765 cgversions = [v for v in cgversions
1762 if v in changegroup.supportedoutgoingversions(repo)]
1766 if v in changegroup.supportedoutgoingversions(repo)]
1763 if not cgversions:
1767 if not cgversions:
1764 raise ValueError(_('no common changegroup version'))
1768 raise ValueError(_('no common changegroup version'))
1765 version = max(cgversions)
1769 version = max(cgversions)
1766 outgoing = _computeoutgoing(repo, heads, common)
1770 outgoing = _computeoutgoing(repo, heads, common)
1767 if outgoing.missing:
1771 if outgoing.missing:
1768 cgstream = changegroup.makestream(repo, outgoing, version, source,
1772 cgstream = changegroup.makestream(repo, outgoing, version, source,
1769 bundlecaps=bundlecaps)
1773 bundlecaps=bundlecaps)
1770
1774
1771 if cgstream:
1775 if cgstream:
1772 part = bundler.newpart('changegroup', data=cgstream)
1776 part = bundler.newpart('changegroup', data=cgstream)
1773 if cgversions:
1777 if cgversions:
1774 part.addparam('version', version)
1778 part.addparam('version', version)
1775 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1779 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1776 mandatory=False)
1780 mandatory=False)
1777 if 'treemanifest' in repo.requirements:
1781 if 'treemanifest' in repo.requirements:
1778 part.addparam('treemanifest', '1')
1782 part.addparam('treemanifest', '1')
1779
1783
1780 @getbundle2partsgenerator('bookmarks')
1784 @getbundle2partsgenerator('bookmarks')
1781 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1785 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1782 b2caps=None, **kwargs):
1786 b2caps=None, **kwargs):
1783 """add a bookmark part to the requested bundle"""
1787 """add a bookmark part to the requested bundle"""
1784 if not kwargs.get('bookmarks', False):
1788 if not kwargs.get('bookmarks', False):
1785 return
1789 return
1786 if 'bookmarks' not in b2caps:
1790 if 'bookmarks' not in b2caps:
1787 raise ValueError(_('no common bookmarks exchange method'))
1791 raise ValueError(_('no common bookmarks exchange method'))
1788 books = bookmod.listbinbookmarks(repo)
1792 books = bookmod.listbinbookmarks(repo)
1789 data = bookmod.binaryencode(books)
1793 data = bookmod.binaryencode(books)
1790 if data:
1794 if data:
1791 bundler.newpart('bookmarks', data=data)
1795 bundler.newpart('bookmarks', data=data)
1792
1796
1793 @getbundle2partsgenerator('listkeys')
1797 @getbundle2partsgenerator('listkeys')
1794 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1798 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1795 b2caps=None, **kwargs):
1799 b2caps=None, **kwargs):
1796 """add parts containing listkeys namespaces to the requested bundle"""
1800 """add parts containing listkeys namespaces to the requested bundle"""
1797 listkeys = kwargs.get('listkeys', ())
1801 listkeys = kwargs.get('listkeys', ())
1798 for namespace in listkeys:
1802 for namespace in listkeys:
1799 part = bundler.newpart('listkeys')
1803 part = bundler.newpart('listkeys')
1800 part.addparam('namespace', namespace)
1804 part.addparam('namespace', namespace)
1801 keys = repo.listkeys(namespace).items()
1805 keys = repo.listkeys(namespace).items()
1802 part.data = pushkey.encodekeys(keys)
1806 part.data = pushkey.encodekeys(keys)
1803
1807
1804 @getbundle2partsgenerator('obsmarkers')
1808 @getbundle2partsgenerator('obsmarkers')
1805 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1809 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1806 b2caps=None, heads=None, **kwargs):
1810 b2caps=None, heads=None, **kwargs):
1807 """add an obsolescence markers part to the requested bundle"""
1811 """add an obsolescence markers part to the requested bundle"""
1808 if kwargs.get('obsmarkers', False):
1812 if kwargs.get('obsmarkers', False):
1809 if heads is None:
1813 if heads is None:
1810 heads = repo.heads()
1814 heads = repo.heads()
1811 subset = [c.node() for c in repo.set('::%ln', heads)]
1815 subset = [c.node() for c in repo.set('::%ln', heads)]
1812 markers = repo.obsstore.relevantmarkers(subset)
1816 markers = repo.obsstore.relevantmarkers(subset)
1813 markers = sorted(markers)
1817 markers = sorted(markers)
1814 bundle2.buildobsmarkerspart(bundler, markers)
1818 bundle2.buildobsmarkerspart(bundler, markers)
1815
1819
1816 @getbundle2partsgenerator('phases')
1820 @getbundle2partsgenerator('phases')
1817 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1821 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1818 b2caps=None, heads=None, **kwargs):
1822 b2caps=None, heads=None, **kwargs):
1819 """add phase heads part to the requested bundle"""
1823 """add phase heads part to the requested bundle"""
1820 if kwargs.get('phases', False):
1824 if kwargs.get('phases', False):
1821 if not 'heads' in b2caps.get('phases'):
1825 if not 'heads' in b2caps.get('phases'):
1822 raise ValueError(_('no common phases exchange method'))
1826 raise ValueError(_('no common phases exchange method'))
1823 if heads is None:
1827 if heads is None:
1824 heads = repo.heads()
1828 heads = repo.heads()
1825
1829
1826 headsbyphase = collections.defaultdict(set)
1830 headsbyphase = collections.defaultdict(set)
1827 if repo.publishing():
1831 if repo.publishing():
1828 headsbyphase[phases.public] = heads
1832 headsbyphase[phases.public] = heads
1829 else:
1833 else:
1830 # find the appropriate heads to move
1834 # find the appropriate heads to move
1831
1835
1832 phase = repo._phasecache.phase
1836 phase = repo._phasecache.phase
1833 node = repo.changelog.node
1837 node = repo.changelog.node
1834 rev = repo.changelog.rev
1838 rev = repo.changelog.rev
1835 for h in heads:
1839 for h in heads:
1836 headsbyphase[phase(repo, rev(h))].add(h)
1840 headsbyphase[phase(repo, rev(h))].add(h)
1837 seenphases = list(headsbyphase.keys())
1841 seenphases = list(headsbyphase.keys())
1838
1842
1839 # We do not handle anything but public and draft phase for now)
1843 # We do not handle anything but public and draft phase for now)
1840 if seenphases:
1844 if seenphases:
1841 assert max(seenphases) <= phases.draft
1845 assert max(seenphases) <= phases.draft
1842
1846
1843 # if client is pulling non-public changesets, we need to find
1847 # if client is pulling non-public changesets, we need to find
1844 # intermediate public heads.
1848 # intermediate public heads.
1845 draftheads = headsbyphase.get(phases.draft, set())
1849 draftheads = headsbyphase.get(phases.draft, set())
1846 if draftheads:
1850 if draftheads:
1847 publicheads = headsbyphase.get(phases.public, set())
1851 publicheads = headsbyphase.get(phases.public, set())
1848
1852
1849 revset = 'heads(only(%ln, %ln) and public())'
1853 revset = 'heads(only(%ln, %ln) and public())'
1850 extraheads = repo.revs(revset, draftheads, publicheads)
1854 extraheads = repo.revs(revset, draftheads, publicheads)
1851 for r in extraheads:
1855 for r in extraheads:
1852 headsbyphase[phases.public].add(node(r))
1856 headsbyphase[phases.public].add(node(r))
1853
1857
1854 # transform data in a format used by the encoding function
1858 # transform data in a format used by the encoding function
1855 phasemapping = []
1859 phasemapping = []
1856 for phase in phases.allphases:
1860 for phase in phases.allphases:
1857 phasemapping.append(sorted(headsbyphase[phase]))
1861 phasemapping.append(sorted(headsbyphase[phase]))
1858
1862
1859 # generate the actual part
1863 # generate the actual part
1860 phasedata = phases.binaryencode(phasemapping)
1864 phasedata = phases.binaryencode(phasemapping)
1861 bundler.newpart('phase-heads', data=phasedata)
1865 bundler.newpart('phase-heads', data=phasedata)
1862
1866
1863 @getbundle2partsgenerator('hgtagsfnodes')
1867 @getbundle2partsgenerator('hgtagsfnodes')
1864 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1868 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1865 b2caps=None, heads=None, common=None,
1869 b2caps=None, heads=None, common=None,
1866 **kwargs):
1870 **kwargs):
1867 """Transfer the .hgtags filenodes mapping.
1871 """Transfer the .hgtags filenodes mapping.
1868
1872
1869 Only values for heads in this bundle will be transferred.
1873 Only values for heads in this bundle will be transferred.
1870
1874
1871 The part data consists of pairs of 20 byte changeset node and .hgtags
1875 The part data consists of pairs of 20 byte changeset node and .hgtags
1872 filenodes raw values.
1876 filenodes raw values.
1873 """
1877 """
1874 # Don't send unless:
1878 # Don't send unless:
1875 # - changeset are being exchanged,
1879 # - changeset are being exchanged,
1876 # - the client supports it.
1880 # - the client supports it.
1877 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1881 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1878 return
1882 return
1879
1883
1880 outgoing = _computeoutgoing(repo, heads, common)
1884 outgoing = _computeoutgoing(repo, heads, common)
1881 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1885 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1882
1886
1883 def check_heads(repo, their_heads, context):
1887 def check_heads(repo, their_heads, context):
1884 """check if the heads of a repo have been modified
1888 """check if the heads of a repo have been modified
1885
1889
1886 Used by peer for unbundling.
1890 Used by peer for unbundling.
1887 """
1891 """
1888 heads = repo.heads()
1892 heads = repo.heads()
1889 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1893 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1890 if not (their_heads == ['force'] or their_heads == heads or
1894 if not (their_heads == ['force'] or their_heads == heads or
1891 their_heads == ['hashed', heads_hash]):
1895 their_heads == ['hashed', heads_hash]):
1892 # someone else committed/pushed/unbundled while we
1896 # someone else committed/pushed/unbundled while we
1893 # were transferring data
1897 # were transferring data
1894 raise error.PushRaced('repository changed while %s - '
1898 raise error.PushRaced('repository changed while %s - '
1895 'please try again' % context)
1899 'please try again' % context)
1896
1900
1897 def unbundle(repo, cg, heads, source, url):
1901 def unbundle(repo, cg, heads, source, url):
1898 """Apply a bundle to a repo.
1902 """Apply a bundle to a repo.
1899
1903
1900 this function makes sure the repo is locked during the application and have
1904 this function makes sure the repo is locked during the application and have
1901 mechanism to check that no push race occurred between the creation of the
1905 mechanism to check that no push race occurred between the creation of the
1902 bundle and its application.
1906 bundle and its application.
1903
1907
1904 If the push was raced as PushRaced exception is raised."""
1908 If the push was raced as PushRaced exception is raised."""
1905 r = 0
1909 r = 0
1906 # need a transaction when processing a bundle2 stream
1910 # need a transaction when processing a bundle2 stream
1907 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1911 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1908 lockandtr = [None, None, None]
1912 lockandtr = [None, None, None]
1909 recordout = None
1913 recordout = None
1910 # quick fix for output mismatch with bundle2 in 3.4
1914 # quick fix for output mismatch with bundle2 in 3.4
1911 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1915 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1912 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1916 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1913 captureoutput = True
1917 captureoutput = True
1914 try:
1918 try:
1915 # note: outside bundle1, 'heads' is expected to be empty and this
1919 # note: outside bundle1, 'heads' is expected to be empty and this
1916 # 'check_heads' call wil be a no-op
1920 # 'check_heads' call wil be a no-op
1917 check_heads(repo, heads, 'uploading changes')
1921 check_heads(repo, heads, 'uploading changes')
1918 # push can proceed
1922 # push can proceed
1919 if not isinstance(cg, bundle2.unbundle20):
1923 if not isinstance(cg, bundle2.unbundle20):
1920 # legacy case: bundle1 (changegroup 01)
1924 # legacy case: bundle1 (changegroup 01)
1921 txnname = "\n".join([source, util.hidepassword(url)])
1925 txnname = "\n".join([source, util.hidepassword(url)])
1922 with repo.lock(), repo.transaction(txnname) as tr:
1926 with repo.lock(), repo.transaction(txnname) as tr:
1923 op = bundle2.applybundle(repo, cg, tr, source, url)
1927 op = bundle2.applybundle(repo, cg, tr, source, url)
1924 r = bundle2.combinechangegroupresults(op)
1928 r = bundle2.combinechangegroupresults(op)
1925 else:
1929 else:
1926 r = None
1930 r = None
1927 try:
1931 try:
1928 def gettransaction():
1932 def gettransaction():
1929 if not lockandtr[2]:
1933 if not lockandtr[2]:
1930 lockandtr[0] = repo.wlock()
1934 lockandtr[0] = repo.wlock()
1931 lockandtr[1] = repo.lock()
1935 lockandtr[1] = repo.lock()
1932 lockandtr[2] = repo.transaction(source)
1936 lockandtr[2] = repo.transaction(source)
1933 lockandtr[2].hookargs['source'] = source
1937 lockandtr[2].hookargs['source'] = source
1934 lockandtr[2].hookargs['url'] = url
1938 lockandtr[2].hookargs['url'] = url
1935 lockandtr[2].hookargs['bundle2'] = '1'
1939 lockandtr[2].hookargs['bundle2'] = '1'
1936 return lockandtr[2]
1940 return lockandtr[2]
1937
1941
1938 # Do greedy locking by default until we're satisfied with lazy
1942 # Do greedy locking by default until we're satisfied with lazy
1939 # locking.
1943 # locking.
1940 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1944 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1941 gettransaction()
1945 gettransaction()
1942
1946
1943 op = bundle2.bundleoperation(repo, gettransaction,
1947 op = bundle2.bundleoperation(repo, gettransaction,
1944 captureoutput=captureoutput)
1948 captureoutput=captureoutput)
1945 try:
1949 try:
1946 op = bundle2.processbundle(repo, cg, op=op)
1950 op = bundle2.processbundle(repo, cg, op=op)
1947 finally:
1951 finally:
1948 r = op.reply
1952 r = op.reply
1949 if captureoutput and r is not None:
1953 if captureoutput and r is not None:
1950 repo.ui.pushbuffer(error=True, subproc=True)
1954 repo.ui.pushbuffer(error=True, subproc=True)
1951 def recordout(output):
1955 def recordout(output):
1952 r.newpart('output', data=output, mandatory=False)
1956 r.newpart('output', data=output, mandatory=False)
1953 if lockandtr[2] is not None:
1957 if lockandtr[2] is not None:
1954 lockandtr[2].close()
1958 lockandtr[2].close()
1955 except BaseException as exc:
1959 except BaseException as exc:
1956 exc.duringunbundle2 = True
1960 exc.duringunbundle2 = True
1957 if captureoutput and r is not None:
1961 if captureoutput and r is not None:
1958 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1962 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1959 def recordout(output):
1963 def recordout(output):
1960 part = bundle2.bundlepart('output', data=output,
1964 part = bundle2.bundlepart('output', data=output,
1961 mandatory=False)
1965 mandatory=False)
1962 parts.append(part)
1966 parts.append(part)
1963 raise
1967 raise
1964 finally:
1968 finally:
1965 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1969 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1966 if recordout is not None:
1970 if recordout is not None:
1967 recordout(repo.ui.popbuffer())
1971 recordout(repo.ui.popbuffer())
1968 return r
1972 return r
1969
1973
1970 def _maybeapplyclonebundle(pullop):
1974 def _maybeapplyclonebundle(pullop):
1971 """Apply a clone bundle from a remote, if possible."""
1975 """Apply a clone bundle from a remote, if possible."""
1972
1976
1973 repo = pullop.repo
1977 repo = pullop.repo
1974 remote = pullop.remote
1978 remote = pullop.remote
1975
1979
1976 if not repo.ui.configbool('ui', 'clonebundles'):
1980 if not repo.ui.configbool('ui', 'clonebundles'):
1977 return
1981 return
1978
1982
1979 # Only run if local repo is empty.
1983 # Only run if local repo is empty.
1980 if len(repo):
1984 if len(repo):
1981 return
1985 return
1982
1986
1983 if pullop.heads:
1987 if pullop.heads:
1984 return
1988 return
1985
1989
1986 if not remote.capable('clonebundles'):
1990 if not remote.capable('clonebundles'):
1987 return
1991 return
1988
1992
1989 res = remote._call('clonebundles')
1993 res = remote._call('clonebundles')
1990
1994
1991 # If we call the wire protocol command, that's good enough to record the
1995 # If we call the wire protocol command, that's good enough to record the
1992 # attempt.
1996 # attempt.
1993 pullop.clonebundleattempted = True
1997 pullop.clonebundleattempted = True
1994
1998
1995 entries = parseclonebundlesmanifest(repo, res)
1999 entries = parseclonebundlesmanifest(repo, res)
1996 if not entries:
2000 if not entries:
1997 repo.ui.note(_('no clone bundles available on remote; '
2001 repo.ui.note(_('no clone bundles available on remote; '
1998 'falling back to regular clone\n'))
2002 'falling back to regular clone\n'))
1999 return
2003 return
2000
2004
2001 entries = filterclonebundleentries(
2005 entries = filterclonebundleentries(
2002 repo, entries, streamclonerequested=pullop.streamclonerequested)
2006 repo, entries, streamclonerequested=pullop.streamclonerequested)
2003
2007
2004 if not entries:
2008 if not entries:
2005 # There is a thundering herd concern here. However, if a server
2009 # There is a thundering herd concern here. However, if a server
2006 # operator doesn't advertise bundles appropriate for its clients,
2010 # operator doesn't advertise bundles appropriate for its clients,
2007 # they deserve what's coming. Furthermore, from a client's
2011 # they deserve what's coming. Furthermore, from a client's
2008 # perspective, no automatic fallback would mean not being able to
2012 # perspective, no automatic fallback would mean not being able to
2009 # clone!
2013 # clone!
2010 repo.ui.warn(_('no compatible clone bundles available on server; '
2014 repo.ui.warn(_('no compatible clone bundles available on server; '
2011 'falling back to regular clone\n'))
2015 'falling back to regular clone\n'))
2012 repo.ui.warn(_('(you may want to report this to the server '
2016 repo.ui.warn(_('(you may want to report this to the server '
2013 'operator)\n'))
2017 'operator)\n'))
2014 return
2018 return
2015
2019
2016 entries = sortclonebundleentries(repo.ui, entries)
2020 entries = sortclonebundleentries(repo.ui, entries)
2017
2021
2018 url = entries[0]['URL']
2022 url = entries[0]['URL']
2019 repo.ui.status(_('applying clone bundle from %s\n') % url)
2023 repo.ui.status(_('applying clone bundle from %s\n') % url)
2020 if trypullbundlefromurl(repo.ui, repo, url):
2024 if trypullbundlefromurl(repo.ui, repo, url):
2021 repo.ui.status(_('finished applying clone bundle\n'))
2025 repo.ui.status(_('finished applying clone bundle\n'))
2022 # Bundle failed.
2026 # Bundle failed.
2023 #
2027 #
2024 # We abort by default to avoid the thundering herd of
2028 # We abort by default to avoid the thundering herd of
2025 # clients flooding a server that was expecting expensive
2029 # clients flooding a server that was expecting expensive
2026 # clone load to be offloaded.
2030 # clone load to be offloaded.
2027 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2031 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2028 repo.ui.warn(_('falling back to normal clone\n'))
2032 repo.ui.warn(_('falling back to normal clone\n'))
2029 else:
2033 else:
2030 raise error.Abort(_('error applying bundle'),
2034 raise error.Abort(_('error applying bundle'),
2031 hint=_('if this error persists, consider contacting '
2035 hint=_('if this error persists, consider contacting '
2032 'the server operator or disable clone '
2036 'the server operator or disable clone '
2033 'bundles via '
2037 'bundles via '
2034 '"--config ui.clonebundles=false"'))
2038 '"--config ui.clonebundles=false"'))
2035
2039
2036 def parseclonebundlesmanifest(repo, s):
2040 def parseclonebundlesmanifest(repo, s):
2037 """Parses the raw text of a clone bundles manifest.
2041 """Parses the raw text of a clone bundles manifest.
2038
2042
2039 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2043 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2040 to the URL and other keys are the attributes for the entry.
2044 to the URL and other keys are the attributes for the entry.
2041 """
2045 """
2042 m = []
2046 m = []
2043 for line in s.splitlines():
2047 for line in s.splitlines():
2044 fields = line.split()
2048 fields = line.split()
2045 if not fields:
2049 if not fields:
2046 continue
2050 continue
2047 attrs = {'URL': fields[0]}
2051 attrs = {'URL': fields[0]}
2048 for rawattr in fields[1:]:
2052 for rawattr in fields[1:]:
2049 key, value = rawattr.split('=', 1)
2053 key, value = rawattr.split('=', 1)
2050 key = urlreq.unquote(key)
2054 key = urlreq.unquote(key)
2051 value = urlreq.unquote(value)
2055 value = urlreq.unquote(value)
2052 attrs[key] = value
2056 attrs[key] = value
2053
2057
2054 # Parse BUNDLESPEC into components. This makes client-side
2058 # Parse BUNDLESPEC into components. This makes client-side
2055 # preferences easier to specify since you can prefer a single
2059 # preferences easier to specify since you can prefer a single
2056 # component of the BUNDLESPEC.
2060 # component of the BUNDLESPEC.
2057 if key == 'BUNDLESPEC':
2061 if key == 'BUNDLESPEC':
2058 try:
2062 try:
2059 comp, version, params = parsebundlespec(repo, value,
2063 comp, version, params = parsebundlespec(repo, value,
2060 externalnames=True)
2064 externalnames=True)
2061 attrs['COMPRESSION'] = comp
2065 attrs['COMPRESSION'] = comp
2062 attrs['VERSION'] = version
2066 attrs['VERSION'] = version
2063 except error.InvalidBundleSpecification:
2067 except error.InvalidBundleSpecification:
2064 pass
2068 pass
2065 except error.UnsupportedBundleSpecification:
2069 except error.UnsupportedBundleSpecification:
2066 pass
2070 pass
2067
2071
2068 m.append(attrs)
2072 m.append(attrs)
2069
2073
2070 return m
2074 return m
2071
2075
2072 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2076 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2073 """Remove incompatible clone bundle manifest entries.
2077 """Remove incompatible clone bundle manifest entries.
2074
2078
2075 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2079 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2076 and returns a new list consisting of only the entries that this client
2080 and returns a new list consisting of only the entries that this client
2077 should be able to apply.
2081 should be able to apply.
2078
2082
2079 There is no guarantee we'll be able to apply all returned entries because
2083 There is no guarantee we'll be able to apply all returned entries because
2080 the metadata we use to filter on may be missing or wrong.
2084 the metadata we use to filter on may be missing or wrong.
2081 """
2085 """
2082 newentries = []
2086 newentries = []
2083 for entry in entries:
2087 for entry in entries:
2084 spec = entry.get('BUNDLESPEC')
2088 spec = entry.get('BUNDLESPEC')
2085 if spec:
2089 if spec:
2086 try:
2090 try:
2087 comp, version, params = parsebundlespec(repo, spec, strict=True)
2091 comp, version, params = parsebundlespec(repo, spec, strict=True)
2088
2092
2089 # If a stream clone was requested, filter out non-streamclone
2093 # If a stream clone was requested, filter out non-streamclone
2090 # entries.
2094 # entries.
2091 if streamclonerequested and (comp != 'UN' or version != 's1'):
2095 if streamclonerequested and (comp != 'UN' or version != 's1'):
2092 repo.ui.debug('filtering %s because not a stream clone\n' %
2096 repo.ui.debug('filtering %s because not a stream clone\n' %
2093 entry['URL'])
2097 entry['URL'])
2094 continue
2098 continue
2095
2099
2096 except error.InvalidBundleSpecification as e:
2100 except error.InvalidBundleSpecification as e:
2097 repo.ui.debug(str(e) + '\n')
2101 repo.ui.debug(str(e) + '\n')
2098 continue
2102 continue
2099 except error.UnsupportedBundleSpecification as e:
2103 except error.UnsupportedBundleSpecification as e:
2100 repo.ui.debug('filtering %s because unsupported bundle '
2104 repo.ui.debug('filtering %s because unsupported bundle '
2101 'spec: %s\n' % (entry['URL'], str(e)))
2105 'spec: %s\n' % (entry['URL'], str(e)))
2102 continue
2106 continue
2103 # If we don't have a spec and requested a stream clone, we don't know
2107 # If we don't have a spec and requested a stream clone, we don't know
2104 # what the entry is so don't attempt to apply it.
2108 # what the entry is so don't attempt to apply it.
2105 elif streamclonerequested:
2109 elif streamclonerequested:
2106 repo.ui.debug('filtering %s because cannot determine if a stream '
2110 repo.ui.debug('filtering %s because cannot determine if a stream '
2107 'clone bundle\n' % entry['URL'])
2111 'clone bundle\n' % entry['URL'])
2108 continue
2112 continue
2109
2113
2110 if 'REQUIRESNI' in entry and not sslutil.hassni:
2114 if 'REQUIRESNI' in entry and not sslutil.hassni:
2111 repo.ui.debug('filtering %s because SNI not supported\n' %
2115 repo.ui.debug('filtering %s because SNI not supported\n' %
2112 entry['URL'])
2116 entry['URL'])
2113 continue
2117 continue
2114
2118
2115 newentries.append(entry)
2119 newentries.append(entry)
2116
2120
2117 return newentries
2121 return newentries
2118
2122
2119 class clonebundleentry(object):
2123 class clonebundleentry(object):
2120 """Represents an item in a clone bundles manifest.
2124 """Represents an item in a clone bundles manifest.
2121
2125
2122 This rich class is needed to support sorting since sorted() in Python 3
2126 This rich class is needed to support sorting since sorted() in Python 3
2123 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2127 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2124 won't work.
2128 won't work.
2125 """
2129 """
2126
2130
2127 def __init__(self, value, prefers):
2131 def __init__(self, value, prefers):
2128 self.value = value
2132 self.value = value
2129 self.prefers = prefers
2133 self.prefers = prefers
2130
2134
2131 def _cmp(self, other):
2135 def _cmp(self, other):
2132 for prefkey, prefvalue in self.prefers:
2136 for prefkey, prefvalue in self.prefers:
2133 avalue = self.value.get(prefkey)
2137 avalue = self.value.get(prefkey)
2134 bvalue = other.value.get(prefkey)
2138 bvalue = other.value.get(prefkey)
2135
2139
2136 # Special case for b missing attribute and a matches exactly.
2140 # Special case for b missing attribute and a matches exactly.
2137 if avalue is not None and bvalue is None and avalue == prefvalue:
2141 if avalue is not None and bvalue is None and avalue == prefvalue:
2138 return -1
2142 return -1
2139
2143
2140 # Special case for a missing attribute and b matches exactly.
2144 # Special case for a missing attribute and b matches exactly.
2141 if bvalue is not None and avalue is None and bvalue == prefvalue:
2145 if bvalue is not None and avalue is None and bvalue == prefvalue:
2142 return 1
2146 return 1
2143
2147
2144 # We can't compare unless attribute present on both.
2148 # We can't compare unless attribute present on both.
2145 if avalue is None or bvalue is None:
2149 if avalue is None or bvalue is None:
2146 continue
2150 continue
2147
2151
2148 # Same values should fall back to next attribute.
2152 # Same values should fall back to next attribute.
2149 if avalue == bvalue:
2153 if avalue == bvalue:
2150 continue
2154 continue
2151
2155
2152 # Exact matches come first.
2156 # Exact matches come first.
2153 if avalue == prefvalue:
2157 if avalue == prefvalue:
2154 return -1
2158 return -1
2155 if bvalue == prefvalue:
2159 if bvalue == prefvalue:
2156 return 1
2160 return 1
2157
2161
2158 # Fall back to next attribute.
2162 # Fall back to next attribute.
2159 continue
2163 continue
2160
2164
2161 # If we got here we couldn't sort by attributes and prefers. Fall
2165 # If we got here we couldn't sort by attributes and prefers. Fall
2162 # back to index order.
2166 # back to index order.
2163 return 0
2167 return 0
2164
2168
2165 def __lt__(self, other):
2169 def __lt__(self, other):
2166 return self._cmp(other) < 0
2170 return self._cmp(other) < 0
2167
2171
2168 def __gt__(self, other):
2172 def __gt__(self, other):
2169 return self._cmp(other) > 0
2173 return self._cmp(other) > 0
2170
2174
2171 def __eq__(self, other):
2175 def __eq__(self, other):
2172 return self._cmp(other) == 0
2176 return self._cmp(other) == 0
2173
2177
2174 def __le__(self, other):
2178 def __le__(self, other):
2175 return self._cmp(other) <= 0
2179 return self._cmp(other) <= 0
2176
2180
2177 def __ge__(self, other):
2181 def __ge__(self, other):
2178 return self._cmp(other) >= 0
2182 return self._cmp(other) >= 0
2179
2183
2180 def __ne__(self, other):
2184 def __ne__(self, other):
2181 return self._cmp(other) != 0
2185 return self._cmp(other) != 0
2182
2186
2183 def sortclonebundleentries(ui, entries):
2187 def sortclonebundleentries(ui, entries):
2184 prefers = ui.configlist('ui', 'clonebundleprefers')
2188 prefers = ui.configlist('ui', 'clonebundleprefers')
2185 if not prefers:
2189 if not prefers:
2186 return list(entries)
2190 return list(entries)
2187
2191
2188 prefers = [p.split('=', 1) for p in prefers]
2192 prefers = [p.split('=', 1) for p in prefers]
2189
2193
2190 items = sorted(clonebundleentry(v, prefers) for v in entries)
2194 items = sorted(clonebundleentry(v, prefers) for v in entries)
2191 return [i.value for i in items]
2195 return [i.value for i in items]
2192
2196
2193 def trypullbundlefromurl(ui, repo, url):
2197 def trypullbundlefromurl(ui, repo, url):
2194 """Attempt to apply a bundle from a URL."""
2198 """Attempt to apply a bundle from a URL."""
2195 with repo.lock(), repo.transaction('bundleurl') as tr:
2199 with repo.lock(), repo.transaction('bundleurl') as tr:
2196 try:
2200 try:
2197 fh = urlmod.open(ui, url)
2201 fh = urlmod.open(ui, url)
2198 cg = readbundle(ui, fh, 'stream')
2202 cg = readbundle(ui, fh, 'stream')
2199
2203
2200 if isinstance(cg, streamclone.streamcloneapplier):
2204 if isinstance(cg, streamclone.streamcloneapplier):
2201 cg.apply(repo)
2205 cg.apply(repo)
2202 else:
2206 else:
2203 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2207 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2204 return True
2208 return True
2205 except urlerr.httperror as e:
2209 except urlerr.httperror as e:
2206 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2210 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2207 except urlerr.urlerror as e:
2211 except urlerr.urlerror as e:
2208 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2212 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2209
2213
2210 return False
2214 return False
General Comments 0
You need to be logged in to leave comments. Login now