##// END OF EJS Templates
bookmark: fix remote bookmark deletion when the push is raced...
marmoute -
r52529:553eb132 stable
parent child Browse files
Show More
@@ -0,0 +1,153 b''
1 ============================================
2 Testing various race condition while pushing
3 ============================================
4
5 $ cat << EOF >> $HGRCPATH
6 > [command-templates]
7 > log={rev}:{node|short} {desc|firstline} {bookmarks}
8 > [ui]
9 > timeout = 20
10 > [phases]
11 > publish=False
12 > EOF
13
14 Initial Setup
15 =============
16
17 $ hg init dst
18 $ echo a > dst/a-file
19 $ hg --cwd dst add a-file
20 $ hg --cwd dst commit -m root
21 $ hg --cwd dst bookmark my-book
22 $ hg --cwd dst bookmarks
23 * my-book 0:a64e49638499
24 $ hg --cwd dst log -G
25 @ 0:a64e49638499 root my-book
26
27
28 $ hg clone ssh://user@dummy/dst src
29 requesting all changes
30 adding changesets
31 adding manifests
32 adding file changes
33 added 1 changesets with 1 changes to 1 files
34 new changesets a64e49638499 (1 drafts)
35 updating to branch default
36 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 $ hg --cwd src update my-book
38 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 (activating bookmark my-book)
40 $ hg --cwd src log -G
41 @ 0:a64e49638499 root my-book
42
43
44 $ echo b > src/a-file
45 $ hg --cwd src commit -m cA0_
46 $ hg --cwd src log -G
47 @ 1:e89d3a6ed79b cA0_ my-book
48 |
49 o 0:a64e49638499 root
50
51
52 Race condition while pushing a forward moving bookmarks
53 =======================================================
54
55 This is currently slightly broken as we eventually don't push the bookmark.
56 However at least we do not delete the remote one.
57
58 $ echo c > src/a-file
59 $ hg --cwd src push -B my-book --config hooks.prelock="hg commit -m cA1_"
60 pushing to ssh://user@dummy/dst
61 searching for changes
62 remote: adding changesets
63 remote: adding manifests
64 remote: adding file changes
65 remote: added 1 changesets with 1 changes to 1 files
66 $ hg --cwd src log -G
67 @ 2:08d837bbfe8d cA1_ my-book
68 |
69 o 1:e89d3a6ed79b cA0_
70 |
71 o 0:a64e49638499 root
72
73 $ hg --cwd dst log -G
74 o 1:e89d3a6ed79b cA0_
75 |
76 @ 0:a64e49638499 root my-book
77
78
79 create a side-moving bookmark
80 Race condition while pushing a side moving bookmarks
81 =======================================================
82
83 resynchronize the repo and setup test
84 -------------------------------------
85
86 $ hg --cwd src push -B my-book
87 pushing to ssh://user@dummy/dst
88 searching for changes
89 remote: adding changesets
90 remote: adding manifests
91 remote: adding file changes
92 remote: added 1 changesets with 1 changes to 1 files
93 updating bookmark my-book
94 $ hg --cwd dst log -G
95 o 2:08d837bbfe8d cA1_ my-book
96 |
97 o 1:e89d3a6ed79b cA0_
98 |
99 @ 0:a64e49638499 root
100
101
102 $ hg --cwd src up 'desc("root")'
103 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
104 (leaving bookmark my-book)
105 $ echo d > src/a-file
106 $ hg --cwd src commit -m cB0_
107 created new head
108 $ hg --cwd src bookmark --force my-book
109 $ echo e > src/a-file
110 $ hg --cwd src log -G
111 @ 3:726401661fe5 cB0_ my-book
112 |
113 | o 2:08d837bbfe8d cA1_
114 | |
115 | o 1:e89d3a6ed79b cA0_
116 |/
117 o 0:a64e49638499 root
118
119
120 Push the bookmark while a commit is being made
121 ----------------------------------------------
122
123 This is currently slightly broken as we eventually don't push the bookmark.
124 However at least we do not delete the remote one.
125
126 $ hg --cwd src push -f -r 'desc("cB0_")' -B my-book --config hooks.prelock="hg commit -m cB1_"
127 pushing to ssh://user@dummy/dst
128 searching for changes
129 remote: adding changesets
130 remote: adding manifests
131 remote: adding file changes
132 remote: added 1 changesets with 1 changes to 1 files (+1 heads)
133 $ hg --cwd src log -G
134 @ 4:a7f9cbf631a0 cB1_ my-book
135 |
136 o 3:726401661fe5 cB0_
137 |
138 | o 2:08d837bbfe8d cA1_
139 | |
140 | o 1:e89d3a6ed79b cA0_
141 |/
142 o 0:a64e49638499 root
143
144
145 $ hg --cwd dst log -G
146 o 3:726401661fe5 cB0_
147 |
148 | o 2:08d837bbfe8d cA1_ my-book
149 | |
150 | o 1:e89d3a6ed79b cA0_
151 |/
152 @ 0:a64e49638499 root
153
@@ -1,2895 +1,2901 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import weakref
10 import weakref
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullrev,
15 nullrev,
16 )
16 )
17 from . import (
17 from . import (
18 bookmarks as bookmod,
18 bookmarks as bookmod,
19 bundle2,
19 bundle2,
20 bundlecaches,
20 bundlecaches,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 logexchange,
25 logexchange,
26 narrowspec,
26 narrowspec,
27 obsolete,
27 obsolete,
28 obsutil,
28 obsutil,
29 phases,
29 phases,
30 pushkey,
30 pushkey,
31 pycompat,
31 pycompat,
32 requirements,
32 requirements,
33 scmutil,
33 scmutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 wireprototypes,
37 wireprototypes,
38 )
38 )
39 from .utils import (
39 from .utils import (
40 hashutil,
40 hashutil,
41 stringutil,
41 stringutil,
42 urlutil,
42 urlutil,
43 )
43 )
44 from .interfaces import repository
44 from .interfaces import repository
45
45
46 urlerr = util.urlerr
46 urlerr = util.urlerr
47 urlreq = util.urlreq
47 urlreq = util.urlreq
48
48
49 _NARROWACL_SECTION = b'narrowacl'
49 _NARROWACL_SECTION = b'narrowacl'
50
50
51
51
52 def readbundle(ui, fh, fname, vfs=None):
52 def readbundle(ui, fh, fname, vfs=None):
53 header = changegroup.readexactly(fh, 4)
53 header = changegroup.readexactly(fh, 4)
54
54
55 alg = None
55 alg = None
56 if not fname:
56 if not fname:
57 fname = b"stream"
57 fname = b"stream"
58 if not header.startswith(b'HG') and header.startswith(b'\0'):
58 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 fh = changegroup.headerlessfixup(fh, header)
59 fh = changegroup.headerlessfixup(fh, header)
60 header = b"HG10"
60 header = b"HG10"
61 alg = b'UN'
61 alg = b'UN'
62 elif vfs:
62 elif vfs:
63 fname = vfs.join(fname)
63 fname = vfs.join(fname)
64
64
65 magic, version = header[0:2], header[2:4]
65 magic, version = header[0:2], header[2:4]
66
66
67 if magic != b'HG':
67 if magic != b'HG':
68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 if version == b'10':
69 if version == b'10':
70 if alg is None:
70 if alg is None:
71 alg = changegroup.readexactly(fh, 2)
71 alg = changegroup.readexactly(fh, 2)
72 return changegroup.cg1unpacker(fh, alg)
72 return changegroup.cg1unpacker(fh, alg)
73 elif version.startswith(b'2'):
73 elif version.startswith(b'2'):
74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 elif version == b'S1':
75 elif version == b'S1':
76 return streamclone.streamcloneapplier(fh)
76 return streamclone.streamcloneapplier(fh)
77 else:
77 else:
78 raise error.Abort(
78 raise error.Abort(
79 _(b'%s: unknown bundle version %s') % (fname, version)
79 _(b'%s: unknown bundle version %s') % (fname, version)
80 )
80 )
81
81
82
82
83 def _format_params(params):
83 def _format_params(params):
84 parts = []
84 parts = []
85 for key, value in sorted(params.items()):
85 for key, value in sorted(params.items()):
86 value = urlreq.quote(value)
86 value = urlreq.quote(value)
87 parts.append(b"%s=%s" % (key, value))
87 parts.append(b"%s=%s" % (key, value))
88 return b';'.join(parts)
88 return b';'.join(parts)
89
89
90
90
91 def getbundlespec(ui, fh):
91 def getbundlespec(ui, fh):
92 """Infer the bundlespec from a bundle file handle.
92 """Infer the bundlespec from a bundle file handle.
93
93
94 The input file handle is seeked and the original seek position is not
94 The input file handle is seeked and the original seek position is not
95 restored.
95 restored.
96 """
96 """
97
97
98 def speccompression(alg):
98 def speccompression(alg):
99 try:
99 try:
100 return util.compengines.forbundletype(alg).bundletype()[0]
100 return util.compengines.forbundletype(alg).bundletype()[0]
101 except KeyError:
101 except KeyError:
102 return None
102 return None
103
103
104 params = {}
104 params = {}
105
105
106 b = readbundle(ui, fh, None)
106 b = readbundle(ui, fh, None)
107 if isinstance(b, changegroup.cg1unpacker):
107 if isinstance(b, changegroup.cg1unpacker):
108 alg = b._type
108 alg = b._type
109 if alg == b'_truncatedBZ':
109 if alg == b'_truncatedBZ':
110 alg = b'BZ'
110 alg = b'BZ'
111 comp = speccompression(alg)
111 comp = speccompression(alg)
112 if not comp:
112 if not comp:
113 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
113 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
114 return b'%s-v1' % comp
114 return b'%s-v1' % comp
115 elif isinstance(b, bundle2.unbundle20):
115 elif isinstance(b, bundle2.unbundle20):
116 if b'Compression' in b.params:
116 if b'Compression' in b.params:
117 comp = speccompression(b.params[b'Compression'])
117 comp = speccompression(b.params[b'Compression'])
118 if not comp:
118 if not comp:
119 raise error.Abort(
119 raise error.Abort(
120 _(b'unknown compression algorithm: %s') % comp
120 _(b'unknown compression algorithm: %s') % comp
121 )
121 )
122 else:
122 else:
123 comp = b'none'
123 comp = b'none'
124
124
125 version = None
125 version = None
126 for part in b.iterparts():
126 for part in b.iterparts():
127 if part.type == b'changegroup':
127 if part.type == b'changegroup':
128 cgversion = part.params[b'version']
128 cgversion = part.params[b'version']
129 if cgversion in (b'01', b'02'):
129 if cgversion in (b'01', b'02'):
130 version = b'v2'
130 version = b'v2'
131 elif cgversion in (b'03',):
131 elif cgversion in (b'03',):
132 version = b'v2'
132 version = b'v2'
133 params[b'cg.version'] = cgversion
133 params[b'cg.version'] = cgversion
134 else:
134 else:
135 raise error.Abort(
135 raise error.Abort(
136 _(
136 _(
137 b'changegroup version %s does not have '
137 b'changegroup version %s does not have '
138 b'a known bundlespec'
138 b'a known bundlespec'
139 )
139 )
140 % version,
140 % version,
141 hint=_(b'try upgrading your Mercurial client'),
141 hint=_(b'try upgrading your Mercurial client'),
142 )
142 )
143 elif part.type == b'stream2' and version is None:
143 elif part.type == b'stream2' and version is None:
144 # A stream2 part requires to be part of a v2 bundle
144 # A stream2 part requires to be part of a v2 bundle
145 requirements = urlreq.unquote(part.params[b'requirements'])
145 requirements = urlreq.unquote(part.params[b'requirements'])
146 splitted = requirements.split()
146 splitted = requirements.split()
147 params = bundle2._formatrequirementsparams(splitted)
147 params = bundle2._formatrequirementsparams(splitted)
148 return b'none-v2;stream=v2;%s' % params
148 return b'none-v2;stream=v2;%s' % params
149 elif part.type == b'stream3-exp' and version is None:
149 elif part.type == b'stream3-exp' and version is None:
150 # A stream3 part requires to be part of a v2 bundle
150 # A stream3 part requires to be part of a v2 bundle
151 requirements = urlreq.unquote(part.params[b'requirements'])
151 requirements = urlreq.unquote(part.params[b'requirements'])
152 splitted = requirements.split()
152 splitted = requirements.split()
153 params = bundle2._formatrequirementsparams(splitted)
153 params = bundle2._formatrequirementsparams(splitted)
154 return b'none-v2;stream=v3-exp;%s' % params
154 return b'none-v2;stream=v3-exp;%s' % params
155 elif part.type == b'obsmarkers':
155 elif part.type == b'obsmarkers':
156 params[b'obsolescence'] = b'yes'
156 params[b'obsolescence'] = b'yes'
157 if not part.mandatory:
157 if not part.mandatory:
158 params[b'obsolescence-mandatory'] = b'no'
158 params[b'obsolescence-mandatory'] = b'no'
159
159
160 if not version:
160 if not version:
161 params[b'changegroup'] = b'no'
161 params[b'changegroup'] = b'no'
162 version = b'v2'
162 version = b'v2'
163 spec = b'%s-%s' % (comp, version)
163 spec = b'%s-%s' % (comp, version)
164 if params:
164 if params:
165 spec += b';'
165 spec += b';'
166 spec += _format_params(params)
166 spec += _format_params(params)
167 return spec
167 return spec
168
168
169 elif isinstance(b, streamclone.streamcloneapplier):
169 elif isinstance(b, streamclone.streamcloneapplier):
170 requirements = streamclone.readbundle1header(fh)[2]
170 requirements = streamclone.readbundle1header(fh)[2]
171 formatted = bundle2._formatrequirementsparams(requirements)
171 formatted = bundle2._formatrequirementsparams(requirements)
172 return b'none-packed1;%s' % formatted
172 return b'none-packed1;%s' % formatted
173 else:
173 else:
174 raise error.Abort(_(b'unknown bundle type: %s') % b)
174 raise error.Abort(_(b'unknown bundle type: %s') % b)
175
175
176
176
177 def _computeoutgoing(repo, heads, common):
177 def _computeoutgoing(repo, heads, common):
178 """Computes which revs are outgoing given a set of common
178 """Computes which revs are outgoing given a set of common
179 and a set of heads.
179 and a set of heads.
180
180
181 This is a separate function so extensions can have access to
181 This is a separate function so extensions can have access to
182 the logic.
182 the logic.
183
183
184 Returns a discovery.outgoing object.
184 Returns a discovery.outgoing object.
185 """
185 """
186 cl = repo.changelog
186 cl = repo.changelog
187 if common:
187 if common:
188 hasnode = cl.hasnode
188 hasnode = cl.hasnode
189 common = [n for n in common if hasnode(n)]
189 common = [n for n in common if hasnode(n)]
190 else:
190 else:
191 common = [repo.nullid]
191 common = [repo.nullid]
192 if not heads:
192 if not heads:
193 heads = cl.heads()
193 heads = cl.heads()
194 return discovery.outgoing(repo, common, heads)
194 return discovery.outgoing(repo, common, heads)
195
195
196
196
197 def _checkpublish(pushop):
197 def _checkpublish(pushop):
198 repo = pushop.repo
198 repo = pushop.repo
199 ui = repo.ui
199 ui = repo.ui
200 behavior = ui.config(b'experimental', b'auto-publish')
200 behavior = ui.config(b'experimental', b'auto-publish')
201 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
201 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
202 return
202 return
203 remotephases = listkeys(pushop.remote, b'phases')
203 remotephases = listkeys(pushop.remote, b'phases')
204 if not remotephases.get(b'publishing', False):
204 if not remotephases.get(b'publishing', False):
205 return
205 return
206
206
207 if pushop.revs is None:
207 if pushop.revs is None:
208 published = repo.filtered(b'served').revs(b'not public()')
208 published = repo.filtered(b'served').revs(b'not public()')
209 else:
209 else:
210 published = repo.revs(b'::%ln - public()', pushop.revs)
210 published = repo.revs(b'::%ln - public()', pushop.revs)
211 # we want to use pushop.revs in the revset even if they themselves are
211 # we want to use pushop.revs in the revset even if they themselves are
212 # secret, but we don't want to have anything that the server won't see
212 # secret, but we don't want to have anything that the server won't see
213 # in the result of this expression
213 # in the result of this expression
214 published &= repo.filtered(b'served')
214 published &= repo.filtered(b'served')
215 if published:
215 if published:
216 if behavior == b'warn':
216 if behavior == b'warn':
217 ui.warn(
217 ui.warn(
218 _(b'%i changesets about to be published\n') % len(published)
218 _(b'%i changesets about to be published\n') % len(published)
219 )
219 )
220 elif behavior == b'confirm':
220 elif behavior == b'confirm':
221 if ui.promptchoice(
221 if ui.promptchoice(
222 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
222 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
223 % len(published)
223 % len(published)
224 ):
224 ):
225 raise error.CanceledError(_(b'user quit'))
225 raise error.CanceledError(_(b'user quit'))
226 elif behavior == b'abort':
226 elif behavior == b'abort':
227 msg = _(b'push would publish %i changesets') % len(published)
227 msg = _(b'push would publish %i changesets') % len(published)
228 hint = _(
228 hint = _(
229 b"use --publish or adjust 'experimental.auto-publish'"
229 b"use --publish or adjust 'experimental.auto-publish'"
230 b" config"
230 b" config"
231 )
231 )
232 raise error.Abort(msg, hint=hint)
232 raise error.Abort(msg, hint=hint)
233
233
234
234
235 def _forcebundle1(op):
235 def _forcebundle1(op):
236 """return true if a pull/push must use bundle1
236 """return true if a pull/push must use bundle1
237
237
238 This function is used to allow testing of the older bundle version"""
238 This function is used to allow testing of the older bundle version"""
239 ui = op.repo.ui
239 ui = op.repo.ui
240 # The goal is this config is to allow developer to choose the bundle
240 # The goal is this config is to allow developer to choose the bundle
241 # version used during exchanged. This is especially handy during test.
241 # version used during exchanged. This is especially handy during test.
242 # Value is a list of bundle version to be picked from, highest version
242 # Value is a list of bundle version to be picked from, highest version
243 # should be used.
243 # should be used.
244 #
244 #
245 # developer config: devel.legacy.exchange
245 # developer config: devel.legacy.exchange
246 exchange = ui.configlist(b'devel', b'legacy.exchange')
246 exchange = ui.configlist(b'devel', b'legacy.exchange')
247 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
247 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
248 return forcebundle1 or not op.remote.capable(b'bundle2')
248 return forcebundle1 or not op.remote.capable(b'bundle2')
249
249
250
250
251 class pushoperation:
251 class pushoperation:
252 """A object that represent a single push operation
252 """A object that represent a single push operation
253
253
254 Its purpose is to carry push related state and very common operations.
254 Its purpose is to carry push related state and very common operations.
255
255
256 A new pushoperation should be created at the beginning of each push and
256 A new pushoperation should be created at the beginning of each push and
257 discarded afterward.
257 discarded afterward.
258 """
258 """
259
259
260 def __init__(
260 def __init__(
261 self,
261 self,
262 repo,
262 repo,
263 remote,
263 remote,
264 force=False,
264 force=False,
265 revs=None,
265 revs=None,
266 newbranch=False,
266 newbranch=False,
267 bookmarks=(),
267 bookmarks=(),
268 publish=False,
268 publish=False,
269 pushvars=None,
269 pushvars=None,
270 ):
270 ):
271 # repo we push from
271 # repo we push from
272 self.repo = repo
272 self.repo = repo
273 self.ui = repo.ui
273 self.ui = repo.ui
274 # repo we push to
274 # repo we push to
275 self.remote = remote
275 self.remote = remote
276 # force option provided
276 # force option provided
277 self.force = force
277 self.force = force
278 # revs to be pushed (None is "all")
278 # revs to be pushed (None is "all")
279 self.revs = revs
279 self.revs = revs
280 # bookmark explicitly pushed
280 # bookmark explicitly pushed
281 self.bookmarks = bookmarks
281 self.bookmarks = bookmarks
282 # allow push of new branch
282 # allow push of new branch
283 self.newbranch = newbranch
283 self.newbranch = newbranch
284 # step already performed
284 # step already performed
285 # (used to check what steps have been already performed through bundle2)
285 # (used to check what steps have been already performed through bundle2)
286 self.stepsdone = set()
286 self.stepsdone = set()
287 # Integer version of the changegroup push result
287 # Integer version of the changegroup push result
288 # - None means nothing to push
288 # - None means nothing to push
289 # - 0 means HTTP error
289 # - 0 means HTTP error
290 # - 1 means we pushed and remote head count is unchanged *or*
290 # - 1 means we pushed and remote head count is unchanged *or*
291 # we have outgoing changesets but refused to push
291 # we have outgoing changesets but refused to push
292 # - other values as described by addchangegroup()
292 # - other values as described by addchangegroup()
293 self.cgresult = None
293 self.cgresult = None
294 # Boolean value for the bookmark push
294 # Boolean value for the bookmark push
295 self.bkresult = None
295 self.bkresult = None
296 # discover.outgoing object (contains common and outgoing data)
296 # discover.outgoing object (contains common and outgoing data)
297 self.outgoing = None
297 self.outgoing = None
298 # all remote topological heads before the push
298 # all remote topological heads before the push
299 self.remoteheads = None
299 self.remoteheads = None
300 # Details of the remote branch pre and post push
300 # Details of the remote branch pre and post push
301 #
301 #
302 # mapping: {'branch': ([remoteheads],
302 # mapping: {'branch': ([remoteheads],
303 # [newheads],
303 # [newheads],
304 # [unsyncedheads],
304 # [unsyncedheads],
305 # [discardedheads])}
305 # [discardedheads])}
306 # - branch: the branch name
306 # - branch: the branch name
307 # - remoteheads: the list of remote heads known locally
307 # - remoteheads: the list of remote heads known locally
308 # None if the branch is new
308 # None if the branch is new
309 # - newheads: the new remote heads (known locally) with outgoing pushed
309 # - newheads: the new remote heads (known locally) with outgoing pushed
310 # - unsyncedheads: the list of remote heads unknown locally.
310 # - unsyncedheads: the list of remote heads unknown locally.
311 # - discardedheads: the list of remote heads made obsolete by the push
311 # - discardedheads: the list of remote heads made obsolete by the push
312 self.pushbranchmap = None
312 self.pushbranchmap = None
313 # testable as a boolean indicating if any nodes are missing locally.
313 # testable as a boolean indicating if any nodes are missing locally.
314 self.incoming = None
314 self.incoming = None
315 # summary of the remote phase situation
315 # summary of the remote phase situation
316 self.remotephases = None
316 self.remotephases = None
317 # phases changes that must be pushed along side the changesets
317 # phases changes that must be pushed along side the changesets
318 self.outdatedphases = None
318 self.outdatedphases = None
319 # phases changes that must be pushed if changeset push fails
319 # phases changes that must be pushed if changeset push fails
320 self.fallbackoutdatedphases = None
320 self.fallbackoutdatedphases = None
321 # outgoing obsmarkers
321 # outgoing obsmarkers
322 self.outobsmarkers = set()
322 self.outobsmarkers = set()
323 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
323 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
324 self.outbookmarks = []
324 self.outbookmarks = []
325 # transaction manager
325 # transaction manager
326 self.trmanager = None
326 self.trmanager = None
327 # map { pushkey partid -> callback handling failure}
327 # map { pushkey partid -> callback handling failure}
328 # used to handle exception from mandatory pushkey part failure
328 # used to handle exception from mandatory pushkey part failure
329 self.pkfailcb = {}
329 self.pkfailcb = {}
330 # an iterable of pushvars or None
330 # an iterable of pushvars or None
331 self.pushvars = pushvars
331 self.pushvars = pushvars
332 # publish pushed changesets
332 # publish pushed changesets
333 self.publish = publish
333 self.publish = publish
334
334
335 @util.propertycache
335 @util.propertycache
336 def futureheads(self):
336 def futureheads(self):
337 """future remote heads if the changeset push succeeds"""
337 """future remote heads if the changeset push succeeds"""
338 return self.outgoing.ancestorsof
338 return self.outgoing.ancestorsof
339
339
340 @util.propertycache
340 @util.propertycache
341 def fallbackheads(self):
341 def fallbackheads(self):
342 """future remote heads if the changeset push fails"""
342 """future remote heads if the changeset push fails"""
343 if self.revs is None:
343 if self.revs is None:
344 # not target to push, all common are relevant
344 # not target to push, all common are relevant
345 return self.outgoing.commonheads
345 return self.outgoing.commonheads
346 unfi = self.repo.unfiltered()
346 unfi = self.repo.unfiltered()
347 # I want cheads = heads(::ancestorsof and ::commonheads)
347 # I want cheads = heads(::ancestorsof and ::commonheads)
348 # (ancestorsof is revs with secret changeset filtered out)
348 # (ancestorsof is revs with secret changeset filtered out)
349 #
349 #
350 # This can be expressed as:
350 # This can be expressed as:
351 # cheads = ( (ancestorsof and ::commonheads)
351 # cheads = ( (ancestorsof and ::commonheads)
352 # + (commonheads and ::ancestorsof))"
352 # + (commonheads and ::ancestorsof))"
353 # )
353 # )
354 #
354 #
355 # while trying to push we already computed the following:
355 # while trying to push we already computed the following:
356 # common = (::commonheads)
356 # common = (::commonheads)
357 # missing = ((commonheads::ancestorsof) - commonheads)
357 # missing = ((commonheads::ancestorsof) - commonheads)
358 #
358 #
359 # We can pick:
359 # We can pick:
360 # * ancestorsof part of common (::commonheads)
360 # * ancestorsof part of common (::commonheads)
361 common = self.outgoing.common
361 common = self.outgoing.common
362 rev = self.repo.changelog.index.rev
362 rev = self.repo.changelog.index.rev
363 cheads = [node for node in self.revs if rev(node) in common]
363 cheads = [node for node in self.revs if rev(node) in common]
364 # and
364 # and
365 # * commonheads parents on missing
365 # * commonheads parents on missing
366 revset = unfi.set(
366 revset = unfi.set(
367 b'%ln and parents(roots(%ln))',
367 b'%ln and parents(roots(%ln))',
368 self.outgoing.commonheads,
368 self.outgoing.commonheads,
369 self.outgoing.missing,
369 self.outgoing.missing,
370 )
370 )
371 cheads.extend(c.node() for c in revset)
371 cheads.extend(c.node() for c in revset)
372 return cheads
372 return cheads
373
373
374 @property
374 @property
375 def commonheads(self):
375 def commonheads(self):
376 """set of all common heads after changeset bundle push"""
376 """set of all common heads after changeset bundle push"""
377 if self.cgresult:
377 if self.cgresult:
378 return self.futureheads
378 return self.futureheads
379 else:
379 else:
380 return self.fallbackheads
380 return self.fallbackheads
381
381
382
382
383 # mapping of message used when pushing bookmark
383 # mapping of message used when pushing bookmark
384 bookmsgmap = {
384 bookmsgmap = {
385 b'update': (
385 b'update': (
386 _(b"updating bookmark %s\n"),
386 _(b"updating bookmark %s\n"),
387 _(b'updating bookmark %s failed\n'),
387 _(b'updating bookmark %s failed\n'),
388 ),
388 ),
389 b'export': (
389 b'export': (
390 _(b"exporting bookmark %s\n"),
390 _(b"exporting bookmark %s\n"),
391 _(b'exporting bookmark %s failed\n'),
391 _(b'exporting bookmark %s failed\n'),
392 ),
392 ),
393 b'delete': (
393 b'delete': (
394 _(b"deleting remote bookmark %s\n"),
394 _(b"deleting remote bookmark %s\n"),
395 _(b'deleting remote bookmark %s failed\n'),
395 _(b'deleting remote bookmark %s failed\n'),
396 ),
396 ),
397 }
397 }
398
398
399
399
400 def push(
400 def push(
401 repo,
401 repo,
402 remote,
402 remote,
403 force=False,
403 force=False,
404 revs=None,
404 revs=None,
405 newbranch=False,
405 newbranch=False,
406 bookmarks=(),
406 bookmarks=(),
407 publish=False,
407 publish=False,
408 opargs=None,
408 opargs=None,
409 ):
409 ):
410 """Push outgoing changesets (limited by revs) from a local
410 """Push outgoing changesets (limited by revs) from a local
411 repository to remote. Return an integer:
411 repository to remote. Return an integer:
412 - None means nothing to push
412 - None means nothing to push
413 - 0 means HTTP error
413 - 0 means HTTP error
414 - 1 means we pushed and remote head count is unchanged *or*
414 - 1 means we pushed and remote head count is unchanged *or*
415 we have outgoing changesets but refused to push
415 we have outgoing changesets but refused to push
416 - other values as described by addchangegroup()
416 - other values as described by addchangegroup()
417 """
417 """
418 if opargs is None:
418 if opargs is None:
419 opargs = {}
419 opargs = {}
420 pushop = pushoperation(
420 pushop = pushoperation(
421 repo,
421 repo,
422 remote,
422 remote,
423 force,
423 force,
424 revs,
424 revs,
425 newbranch,
425 newbranch,
426 bookmarks,
426 bookmarks,
427 publish,
427 publish,
428 **pycompat.strkwargs(opargs)
428 **pycompat.strkwargs(opargs)
429 )
429 )
430 if pushop.remote.local():
430 if pushop.remote.local():
431 missing = (
431 missing = (
432 set(pushop.repo.requirements) - pushop.remote.local().supported
432 set(pushop.repo.requirements) - pushop.remote.local().supported
433 )
433 )
434 if missing:
434 if missing:
435 msg = _(
435 msg = _(
436 b"required features are not"
436 b"required features are not"
437 b" supported in the destination:"
437 b" supported in the destination:"
438 b" %s"
438 b" %s"
439 ) % (b', '.join(sorted(missing)))
439 ) % (b', '.join(sorted(missing)))
440 raise error.Abort(msg)
440 raise error.Abort(msg)
441
441
442 if not pushop.remote.canpush():
442 if not pushop.remote.canpush():
443 raise error.Abort(_(b"destination does not support push"))
443 raise error.Abort(_(b"destination does not support push"))
444
444
445 if not pushop.remote.capable(b'unbundle'):
445 if not pushop.remote.capable(b'unbundle'):
446 raise error.Abort(
446 raise error.Abort(
447 _(
447 _(
448 b'cannot push: destination does not support the '
448 b'cannot push: destination does not support the '
449 b'unbundle wire protocol command'
449 b'unbundle wire protocol command'
450 )
450 )
451 )
451 )
452 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
452 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
453 # Check that a computer is registered for that category for at least
453 # Check that a computer is registered for that category for at least
454 # one revlog kind.
454 # one revlog kind.
455 for kind, computers in repo._sidedata_computers.items():
455 for kind, computers in repo._sidedata_computers.items():
456 if computers.get(category):
456 if computers.get(category):
457 break
457 break
458 else:
458 else:
459 raise error.Abort(
459 raise error.Abort(
460 _(
460 _(
461 b'cannot push: required sidedata category not supported'
461 b'cannot push: required sidedata category not supported'
462 b" by this client: '%s'"
462 b" by this client: '%s'"
463 )
463 )
464 % pycompat.bytestr(category)
464 % pycompat.bytestr(category)
465 )
465 )
466 # get lock as we might write phase data
466 # get lock as we might write phase data
467 wlock = lock = None
467 wlock = lock = None
468 try:
468 try:
469 try:
469 try:
470 # bundle2 push may receive a reply bundle touching bookmarks
470 # bundle2 push may receive a reply bundle touching bookmarks
471 # requiring the wlock. Take it now to ensure proper ordering.
471 # requiring the wlock. Take it now to ensure proper ordering.
472 maypushback = pushop.ui.configbool(
472 maypushback = pushop.ui.configbool(
473 b'experimental',
473 b'experimental',
474 b'bundle2.pushback',
474 b'bundle2.pushback',
475 )
475 )
476 if (
476 if (
477 (not _forcebundle1(pushop))
477 (not _forcebundle1(pushop))
478 and maypushback
478 and maypushback
479 and not bookmod.bookmarksinstore(repo)
479 and not bookmod.bookmarksinstore(repo)
480 ):
480 ):
481 wlock = pushop.repo.wlock()
481 wlock = pushop.repo.wlock()
482 lock = pushop.repo.lock()
482 lock = pushop.repo.lock()
483 pushop.trmanager = transactionmanager(
483 pushop.trmanager = transactionmanager(
484 pushop.repo, b'push-response', pushop.remote.url()
484 pushop.repo, b'push-response', pushop.remote.url()
485 )
485 )
486 except error.LockUnavailable as err:
486 except error.LockUnavailable as err:
487 # source repo cannot be locked.
487 # source repo cannot be locked.
488 # We do not abort the push, but just disable the local phase
488 # We do not abort the push, but just disable the local phase
489 # synchronisation.
489 # synchronisation.
490 msg = b'cannot lock source repository: %s\n'
490 msg = b'cannot lock source repository: %s\n'
491 msg %= stringutil.forcebytestr(err)
491 msg %= stringutil.forcebytestr(err)
492 pushop.ui.debug(msg)
492 pushop.ui.debug(msg)
493
493
494 pushop.repo.checkpush(pushop)
494 pushop.repo.checkpush(pushop)
495 _checkpublish(pushop)
495 _checkpublish(pushop)
496 _pushdiscovery(pushop)
496 _pushdiscovery(pushop)
497 if not pushop.force:
497 if not pushop.force:
498 _checksubrepostate(pushop)
498 _checksubrepostate(pushop)
499 if not _forcebundle1(pushop):
499 if not _forcebundle1(pushop):
500 _pushbundle2(pushop)
500 _pushbundle2(pushop)
501 _pushchangeset(pushop)
501 _pushchangeset(pushop)
502 _pushsyncphase(pushop)
502 _pushsyncphase(pushop)
503 _pushobsolete(pushop)
503 _pushobsolete(pushop)
504 _pushbookmark(pushop)
504 _pushbookmark(pushop)
505 if pushop.trmanager is not None:
505 if pushop.trmanager is not None:
506 pushop.trmanager.close()
506 pushop.trmanager.close()
507 finally:
507 finally:
508 lockmod.release(pushop.trmanager, lock, wlock)
508 lockmod.release(pushop.trmanager, lock, wlock)
509
509
510 if repo.ui.configbool(b'experimental', b'remotenames'):
510 if repo.ui.configbool(b'experimental', b'remotenames'):
511 logexchange.pullremotenames(repo, remote)
511 logexchange.pullremotenames(repo, remote)
512
512
513 return pushop
513 return pushop
514
514
515
515
516 # list of steps to perform discovery before push
516 # list of steps to perform discovery before push
517 pushdiscoveryorder = []
517 pushdiscoveryorder = []
518
518
519 # Mapping between step name and function
519 # Mapping between step name and function
520 #
520 #
521 # This exists to help extensions wrap steps if necessary
521 # This exists to help extensions wrap steps if necessary
522 pushdiscoverymapping = {}
522 pushdiscoverymapping = {}
523
523
524
524
525 def pushdiscovery(stepname):
525 def pushdiscovery(stepname):
526 """decorator for function performing discovery before push
526 """decorator for function performing discovery before push
527
527
528 The function is added to the step -> function mapping and appended to the
528 The function is added to the step -> function mapping and appended to the
529 list of steps. Beware that decorated function will be added in order (this
529 list of steps. Beware that decorated function will be added in order (this
530 may matter).
530 may matter).
531
531
532 You can only use this decorator for a new step, if you want to wrap a step
532 You can only use this decorator for a new step, if you want to wrap a step
533 from an extension, change the pushdiscovery dictionary directly."""
533 from an extension, change the pushdiscovery dictionary directly."""
534
534
535 def dec(func):
535 def dec(func):
536 assert stepname not in pushdiscoverymapping
536 assert stepname not in pushdiscoverymapping
537 pushdiscoverymapping[stepname] = func
537 pushdiscoverymapping[stepname] = func
538 pushdiscoveryorder.append(stepname)
538 pushdiscoveryorder.append(stepname)
539 return func
539 return func
540
540
541 return dec
541 return dec
542
542
543
543
544 def _pushdiscovery(pushop):
544 def _pushdiscovery(pushop):
545 """Run all discovery steps"""
545 """Run all discovery steps"""
546 for stepname in pushdiscoveryorder:
546 for stepname in pushdiscoveryorder:
547 step = pushdiscoverymapping[stepname]
547 step = pushdiscoverymapping[stepname]
548 step(pushop)
548 step(pushop)
549
549
550
550
551 def _checksubrepostate(pushop):
551 def _checksubrepostate(pushop):
552 """Ensure all outgoing referenced subrepo revisions are present locally"""
552 """Ensure all outgoing referenced subrepo revisions are present locally"""
553
553
554 repo = pushop.repo
554 repo = pushop.repo
555
555
556 # If the repository does not use subrepos, skip the expensive
556 # If the repository does not use subrepos, skip the expensive
557 # manifest checks.
557 # manifest checks.
558 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
558 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
559 return
559 return
560
560
561 for n in pushop.outgoing.missing:
561 for n in pushop.outgoing.missing:
562 ctx = repo[n]
562 ctx = repo[n]
563
563
564 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
564 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
565 for subpath in sorted(ctx.substate):
565 for subpath in sorted(ctx.substate):
566 sub = ctx.sub(subpath)
566 sub = ctx.sub(subpath)
567 sub.verify(onpush=True)
567 sub.verify(onpush=True)
568
568
569
569
570 @pushdiscovery(b'changeset')
570 @pushdiscovery(b'changeset')
571 def _pushdiscoverychangeset(pushop):
571 def _pushdiscoverychangeset(pushop):
572 """discover the changeset that need to be pushed"""
572 """discover the changeset that need to be pushed"""
573 fci = discovery.findcommonincoming
573 fci = discovery.findcommonincoming
574 if pushop.revs:
574 if pushop.revs:
575 commoninc = fci(
575 commoninc = fci(
576 pushop.repo,
576 pushop.repo,
577 pushop.remote,
577 pushop.remote,
578 force=pushop.force,
578 force=pushop.force,
579 ancestorsof=pushop.revs,
579 ancestorsof=pushop.revs,
580 )
580 )
581 else:
581 else:
582 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
582 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
583 common, inc, remoteheads = commoninc
583 common, inc, remoteheads = commoninc
584 fco = discovery.findcommonoutgoing
584 fco = discovery.findcommonoutgoing
585 outgoing = fco(
585 outgoing = fco(
586 pushop.repo,
586 pushop.repo,
587 pushop.remote,
587 pushop.remote,
588 onlyheads=pushop.revs,
588 onlyheads=pushop.revs,
589 commoninc=commoninc,
589 commoninc=commoninc,
590 force=pushop.force,
590 force=pushop.force,
591 )
591 )
592 pushop.outgoing = outgoing
592 pushop.outgoing = outgoing
593 pushop.remoteheads = remoteheads
593 pushop.remoteheads = remoteheads
594 pushop.incoming = inc
594 pushop.incoming = inc
595
595
596
596
597 @pushdiscovery(b'phase')
597 @pushdiscovery(b'phase')
598 def _pushdiscoveryphase(pushop):
598 def _pushdiscoveryphase(pushop):
599 """discover the phase that needs to be pushed
599 """discover the phase that needs to be pushed
600
600
601 (computed for both success and failure case for changesets push)"""
601 (computed for both success and failure case for changesets push)"""
602 outgoing = pushop.outgoing
602 outgoing = pushop.outgoing
603 unfi = pushop.repo.unfiltered()
603 unfi = pushop.repo.unfiltered()
604 remotephases = listkeys(pushop.remote, b'phases')
604 remotephases = listkeys(pushop.remote, b'phases')
605
605
606 if (
606 if (
607 pushop.ui.configbool(b'ui', b'_usedassubrepo')
607 pushop.ui.configbool(b'ui', b'_usedassubrepo')
608 and remotephases # server supports phases
608 and remotephases # server supports phases
609 and not pushop.outgoing.missing # no changesets to be pushed
609 and not pushop.outgoing.missing # no changesets to be pushed
610 and remotephases.get(b'publishing', False)
610 and remotephases.get(b'publishing', False)
611 ):
611 ):
612 # When:
612 # When:
613 # - this is a subrepo push
613 # - this is a subrepo push
614 # - and remote support phase
614 # - and remote support phase
615 # - and no changeset are to be pushed
615 # - and no changeset are to be pushed
616 # - and remote is publishing
616 # - and remote is publishing
617 # We may be in issue 3781 case!
617 # We may be in issue 3781 case!
618 # We drop the possible phase synchronisation done by
618 # We drop the possible phase synchronisation done by
619 # courtesy to publish changesets possibly locally draft
619 # courtesy to publish changesets possibly locally draft
620 # on the remote.
620 # on the remote.
621 pushop.outdatedphases = []
621 pushop.outdatedphases = []
622 pushop.fallbackoutdatedphases = []
622 pushop.fallbackoutdatedphases = []
623 return
623 return
624
624
625 pushop.remotephases = phases.remotephasessummary(
625 pushop.remotephases = phases.remotephasessummary(
626 pushop.repo, pushop.fallbackheads, remotephases
626 pushop.repo, pushop.fallbackheads, remotephases
627 )
627 )
628 droots = pushop.remotephases.draftroots
628 droots = pushop.remotephases.draftroots
629
629
630 extracond = b''
630 extracond = b''
631 if not pushop.remotephases.publishing:
631 if not pushop.remotephases.publishing:
632 extracond = b' and public()'
632 extracond = b' and public()'
633 revset = b'heads((%%ln::%%ln) %s)' % extracond
633 revset = b'heads((%%ln::%%ln) %s)' % extracond
634 # Get the list of all revs draft on remote by public here.
634 # Get the list of all revs draft on remote by public here.
635 # XXX Beware that revset break if droots is not strictly
635 # XXX Beware that revset break if droots is not strictly
636 # XXX root we may want to ensure it is but it is costly
636 # XXX root we may want to ensure it is but it is costly
637 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
637 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
638 if not pushop.remotephases.publishing and pushop.publish:
638 if not pushop.remotephases.publishing and pushop.publish:
639 future = list(
639 future = list(
640 unfi.set(
640 unfi.set(
641 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
641 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
642 )
642 )
643 )
643 )
644 elif not outgoing.missing:
644 elif not outgoing.missing:
645 future = fallback
645 future = fallback
646 else:
646 else:
647 # adds changeset we are going to push as draft
647 # adds changeset we are going to push as draft
648 #
648 #
649 # should not be necessary for publishing server, but because of an
649 # should not be necessary for publishing server, but because of an
650 # issue fixed in xxxxx we have to do it anyway.
650 # issue fixed in xxxxx we have to do it anyway.
651 fdroots = list(
651 fdroots = list(
652 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
652 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
653 )
653 )
654 fdroots = [f.node() for f in fdroots]
654 fdroots = [f.node() for f in fdroots]
655 future = list(unfi.set(revset, fdroots, pushop.futureheads))
655 future = list(unfi.set(revset, fdroots, pushop.futureheads))
656 pushop.outdatedphases = future
656 pushop.outdatedphases = future
657 pushop.fallbackoutdatedphases = fallback
657 pushop.fallbackoutdatedphases = fallback
658
658
659
659
660 @pushdiscovery(b'obsmarker')
660 @pushdiscovery(b'obsmarker')
661 def _pushdiscoveryobsmarkers(pushop):
661 def _pushdiscoveryobsmarkers(pushop):
662 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
662 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
663 return
663 return
664
664
665 if not pushop.repo.obsstore:
665 if not pushop.repo.obsstore:
666 return
666 return
667
667
668 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
668 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
669 return
669 return
670
670
671 repo = pushop.repo
671 repo = pushop.repo
672 # very naive computation, that can be quite expensive on big repo.
672 # very naive computation, that can be quite expensive on big repo.
673 # However: evolution is currently slow on them anyway.
673 # However: evolution is currently slow on them anyway.
674 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
674 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
675 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
675 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
676
676
677
677
678 @pushdiscovery(b'bookmarks')
678 @pushdiscovery(b'bookmarks')
679 def _pushdiscoverybookmarks(pushop):
679 def _pushdiscoverybookmarks(pushop):
680 ui = pushop.ui
680 ui = pushop.ui
681 repo = pushop.repo.unfiltered()
681 repo = pushop.repo.unfiltered()
682 remote = pushop.remote
682 remote = pushop.remote
683 ui.debug(b"checking for updated bookmarks\n")
683 ui.debug(b"checking for updated bookmarks\n")
684 ancestors = ()
684 ancestors = ()
685 if pushop.revs:
685 if pushop.revs:
686 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
686 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
687 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
687 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
688
688
689 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
689 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
690
690
691 explicit = {
691 explicit = {
692 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
692 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
693 }
693 }
694
694
695 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
695 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
696 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
696 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
697
697
698
698
699 def _processcompared(pushop, pushed, explicit, remotebms, comp):
699 def _processcompared(pushop, pushed, explicit, remotebms, comp):
700 """take decision on bookmarks to push to the remote repo
700 """take decision on bookmarks to push to the remote repo
701
701
702 Exists to help extensions alter this behavior.
702 Exists to help extensions alter this behavior.
703 """
703 """
704 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
704 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
705
705
706 repo = pushop.repo
706 repo = pushop.repo
707
707
708 for b, scid, dcid in advsrc:
708 for b, scid, dcid in advsrc:
709 if b in explicit:
709 if b in explicit:
710 explicit.remove(b)
710 explicit.remove(b)
711 if not pushed or repo[scid].rev() in pushed:
711 if not pushed or repo[scid].rev() in pushed:
712 pushop.outbookmarks.append((b, dcid, scid))
712 pushop.outbookmarks.append((b, dcid, scid))
713 # search added bookmark
713 # search added bookmark
714 for b, scid, dcid in addsrc:
714 for b, scid, dcid in addsrc:
715 if b in explicit:
715 if b in explicit:
716 explicit.remove(b)
716 explicit.remove(b)
717 if bookmod.isdivergent(b):
717 if bookmod.isdivergent(b):
718 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
718 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
719 pushop.bkresult = 2
719 pushop.bkresult = 2
720 elif pushed and repo[scid].rev() not in pushed:
721 # in case of race or secret
722 msg = _(b'cannot push bookmark X without its revision: %s!\n')
723 pushop.ui.warn(msg % b)
724 pushop.bkresult = 2
720 else:
725 else:
721 pushop.outbookmarks.append((b, b'', scid))
726 pushop.outbookmarks.append((b, b'', scid))
722 # search for overwritten bookmark
727 # search for overwritten bookmark
723 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
728 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
724 if b in explicit:
729 if b in explicit:
725 explicit.remove(b)
730 explicit.remove(b)
726 pushop.outbookmarks.append((b, dcid, scid))
731 if not pushed or repo[scid].rev() in pushed:
732 pushop.outbookmarks.append((b, dcid, scid))
727 # search for bookmark to delete
733 # search for bookmark to delete
728 for b, scid, dcid in adddst:
734 for b, scid, dcid in adddst:
729 if b in explicit:
735 if b in explicit:
730 explicit.remove(b)
736 explicit.remove(b)
731 # treat as "deleted locally"
737 # treat as "deleted locally"
732 pushop.outbookmarks.append((b, dcid, b''))
738 pushop.outbookmarks.append((b, dcid, b''))
733 # identical bookmarks shouldn't get reported
739 # identical bookmarks shouldn't get reported
734 for b, scid, dcid in same:
740 for b, scid, dcid in same:
735 if b in explicit:
741 if b in explicit:
736 explicit.remove(b)
742 explicit.remove(b)
737
743
738 if explicit:
744 if explicit:
739 explicit = sorted(explicit)
745 explicit = sorted(explicit)
740 # we should probably list all of them
746 # we should probably list all of them
741 pushop.ui.warn(
747 pushop.ui.warn(
742 _(
748 _(
743 b'bookmark %s does not exist on the local '
749 b'bookmark %s does not exist on the local '
744 b'or remote repository!\n'
750 b'or remote repository!\n'
745 )
751 )
746 % explicit[0]
752 % explicit[0]
747 )
753 )
748 pushop.bkresult = 2
754 pushop.bkresult = 2
749
755
750 pushop.outbookmarks.sort()
756 pushop.outbookmarks.sort()
751
757
752
758
753 def _pushcheckoutgoing(pushop):
759 def _pushcheckoutgoing(pushop):
754 outgoing = pushop.outgoing
760 outgoing = pushop.outgoing
755 unfi = pushop.repo.unfiltered()
761 unfi = pushop.repo.unfiltered()
756 if not outgoing.missing:
762 if not outgoing.missing:
757 # nothing to push
763 # nothing to push
758 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
764 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
759 return False
765 return False
760 # something to push
766 # something to push
761 if not pushop.force:
767 if not pushop.force:
762 # if repo.obsstore == False --> no obsolete
768 # if repo.obsstore == False --> no obsolete
763 # then, save the iteration
769 # then, save the iteration
764 if unfi.obsstore:
770 if unfi.obsstore:
765 # this message are here for 80 char limit reason
771 # this message are here for 80 char limit reason
766 mso = _(b"push includes obsolete changeset: %s!")
772 mso = _(b"push includes obsolete changeset: %s!")
767 mspd = _(b"push includes phase-divergent changeset: %s!")
773 mspd = _(b"push includes phase-divergent changeset: %s!")
768 mscd = _(b"push includes content-divergent changeset: %s!")
774 mscd = _(b"push includes content-divergent changeset: %s!")
769 mst = {
775 mst = {
770 b"orphan": _(b"push includes orphan changeset: %s!"),
776 b"orphan": _(b"push includes orphan changeset: %s!"),
771 b"phase-divergent": mspd,
777 b"phase-divergent": mspd,
772 b"content-divergent": mscd,
778 b"content-divergent": mscd,
773 }
779 }
774 # If we are to push if there is at least one
780 # If we are to push if there is at least one
775 # obsolete or unstable changeset in missing, at
781 # obsolete or unstable changeset in missing, at
776 # least one of the missinghead will be obsolete or
782 # least one of the missinghead will be obsolete or
777 # unstable. So checking heads only is ok
783 # unstable. So checking heads only is ok
778 for node in outgoing.ancestorsof:
784 for node in outgoing.ancestorsof:
779 ctx = unfi[node]
785 ctx = unfi[node]
780 if ctx.obsolete():
786 if ctx.obsolete():
781 raise error.Abort(mso % ctx)
787 raise error.Abort(mso % ctx)
782 elif ctx.isunstable():
788 elif ctx.isunstable():
783 # TODO print more than one instability in the abort
789 # TODO print more than one instability in the abort
784 # message
790 # message
785 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
791 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
786
792
787 discovery.checkheads(pushop)
793 discovery.checkheads(pushop)
788 return True
794 return True
789
795
790
796
791 # List of names of steps to perform for an outgoing bundle2, order matters.
797 # List of names of steps to perform for an outgoing bundle2, order matters.
792 b2partsgenorder = []
798 b2partsgenorder = []
793
799
794 # Mapping between step name and function
800 # Mapping between step name and function
795 #
801 #
796 # This exists to help extensions wrap steps if necessary
802 # This exists to help extensions wrap steps if necessary
797 b2partsgenmapping = {}
803 b2partsgenmapping = {}
798
804
799
805
800 def b2partsgenerator(stepname, idx=None):
806 def b2partsgenerator(stepname, idx=None):
801 """decorator for function generating bundle2 part
807 """decorator for function generating bundle2 part
802
808
803 The function is added to the step -> function mapping and appended to the
809 The function is added to the step -> function mapping and appended to the
804 list of steps. Beware that decorated functions will be added in order
810 list of steps. Beware that decorated functions will be added in order
805 (this may matter).
811 (this may matter).
806
812
807 You can only use this decorator for new steps, if you want to wrap a step
813 You can only use this decorator for new steps, if you want to wrap a step
808 from an extension, attack the b2partsgenmapping dictionary directly."""
814 from an extension, attack the b2partsgenmapping dictionary directly."""
809
815
810 def dec(func):
816 def dec(func):
811 assert stepname not in b2partsgenmapping
817 assert stepname not in b2partsgenmapping
812 b2partsgenmapping[stepname] = func
818 b2partsgenmapping[stepname] = func
813 if idx is None:
819 if idx is None:
814 b2partsgenorder.append(stepname)
820 b2partsgenorder.append(stepname)
815 else:
821 else:
816 b2partsgenorder.insert(idx, stepname)
822 b2partsgenorder.insert(idx, stepname)
817 return func
823 return func
818
824
819 return dec
825 return dec
820
826
821
827
822 def _pushb2ctxcheckheads(pushop, bundler):
828 def _pushb2ctxcheckheads(pushop, bundler):
823 """Generate race condition checking parts
829 """Generate race condition checking parts
824
830
825 Exists as an independent function to aid extensions
831 Exists as an independent function to aid extensions
826 """
832 """
827 # * 'force' do not check for push race,
833 # * 'force' do not check for push race,
828 # * if we don't push anything, there are nothing to check.
834 # * if we don't push anything, there are nothing to check.
829 if not pushop.force and pushop.outgoing.ancestorsof:
835 if not pushop.force and pushop.outgoing.ancestorsof:
830 allowunrelated = b'related' in bundler.capabilities.get(
836 allowunrelated = b'related' in bundler.capabilities.get(
831 b'checkheads', ()
837 b'checkheads', ()
832 )
838 )
833 emptyremote = pushop.pushbranchmap is None
839 emptyremote = pushop.pushbranchmap is None
834 if not allowunrelated or emptyremote:
840 if not allowunrelated or emptyremote:
835 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
841 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
836 else:
842 else:
837 affected = set()
843 affected = set()
838 for branch, heads in pushop.pushbranchmap.items():
844 for branch, heads in pushop.pushbranchmap.items():
839 remoteheads, newheads, unsyncedheads, discardedheads = heads
845 remoteheads, newheads, unsyncedheads, discardedheads = heads
840 if remoteheads is not None:
846 if remoteheads is not None:
841 remote = set(remoteheads)
847 remote = set(remoteheads)
842 affected |= set(discardedheads) & remote
848 affected |= set(discardedheads) & remote
843 affected |= remote - set(newheads)
849 affected |= remote - set(newheads)
844 if affected:
850 if affected:
845 data = iter(sorted(affected))
851 data = iter(sorted(affected))
846 bundler.newpart(b'check:updated-heads', data=data)
852 bundler.newpart(b'check:updated-heads', data=data)
847
853
848
854
849 def _pushing(pushop):
855 def _pushing(pushop):
850 """return True if we are pushing anything"""
856 """return True if we are pushing anything"""
851 return bool(
857 return bool(
852 pushop.outgoing.missing
858 pushop.outgoing.missing
853 or pushop.outdatedphases
859 or pushop.outdatedphases
854 or pushop.outobsmarkers
860 or pushop.outobsmarkers
855 or pushop.outbookmarks
861 or pushop.outbookmarks
856 )
862 )
857
863
858
864
859 @b2partsgenerator(b'check-bookmarks')
865 @b2partsgenerator(b'check-bookmarks')
860 def _pushb2checkbookmarks(pushop, bundler):
866 def _pushb2checkbookmarks(pushop, bundler):
861 """insert bookmark move checking"""
867 """insert bookmark move checking"""
862 if not _pushing(pushop) or pushop.force:
868 if not _pushing(pushop) or pushop.force:
863 return
869 return
864 b2caps = bundle2.bundle2caps(pushop.remote)
870 b2caps = bundle2.bundle2caps(pushop.remote)
865 hasbookmarkcheck = b'bookmarks' in b2caps
871 hasbookmarkcheck = b'bookmarks' in b2caps
866 if not (pushop.outbookmarks and hasbookmarkcheck):
872 if not (pushop.outbookmarks and hasbookmarkcheck):
867 return
873 return
868 data = []
874 data = []
869 for book, old, new in pushop.outbookmarks:
875 for book, old, new in pushop.outbookmarks:
870 data.append((book, old))
876 data.append((book, old))
871 checkdata = bookmod.binaryencode(pushop.repo, data)
877 checkdata = bookmod.binaryencode(pushop.repo, data)
872 bundler.newpart(b'check:bookmarks', data=checkdata)
878 bundler.newpart(b'check:bookmarks', data=checkdata)
873
879
874
880
875 @b2partsgenerator(b'check-phases')
881 @b2partsgenerator(b'check-phases')
876 def _pushb2checkphases(pushop, bundler):
882 def _pushb2checkphases(pushop, bundler):
877 """insert phase move checking"""
883 """insert phase move checking"""
878 if not _pushing(pushop) or pushop.force:
884 if not _pushing(pushop) or pushop.force:
879 return
885 return
880 b2caps = bundle2.bundle2caps(pushop.remote)
886 b2caps = bundle2.bundle2caps(pushop.remote)
881 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
887 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
882 if pushop.remotephases is not None and hasphaseheads:
888 if pushop.remotephases is not None and hasphaseheads:
883 # check that the remote phase has not changed
889 # check that the remote phase has not changed
884 checks = {p: [] for p in phases.allphases}
890 checks = {p: [] for p in phases.allphases}
885 checks[phases.public].extend(pushop.remotephases.publicheads)
891 checks[phases.public].extend(pushop.remotephases.publicheads)
886 checks[phases.draft].extend(pushop.remotephases.draftroots)
892 checks[phases.draft].extend(pushop.remotephases.draftroots)
887 if any(checks.values()):
893 if any(checks.values()):
888 for phase in checks:
894 for phase in checks:
889 checks[phase].sort()
895 checks[phase].sort()
890 checkdata = phases.binaryencode(checks)
896 checkdata = phases.binaryencode(checks)
891 bundler.newpart(b'check:phases', data=checkdata)
897 bundler.newpart(b'check:phases', data=checkdata)
892
898
893
899
894 @b2partsgenerator(b'changeset')
900 @b2partsgenerator(b'changeset')
895 def _pushb2ctx(pushop, bundler):
901 def _pushb2ctx(pushop, bundler):
896 """handle changegroup push through bundle2
902 """handle changegroup push through bundle2
897
903
898 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
904 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
899 """
905 """
900 if b'changesets' in pushop.stepsdone:
906 if b'changesets' in pushop.stepsdone:
901 return
907 return
902 pushop.stepsdone.add(b'changesets')
908 pushop.stepsdone.add(b'changesets')
903 # Send known heads to the server for race detection.
909 # Send known heads to the server for race detection.
904 if not _pushcheckoutgoing(pushop):
910 if not _pushcheckoutgoing(pushop):
905 return
911 return
906 pushop.repo.prepushoutgoinghooks(pushop)
912 pushop.repo.prepushoutgoinghooks(pushop)
907
913
908 _pushb2ctxcheckheads(pushop, bundler)
914 _pushb2ctxcheckheads(pushop, bundler)
909
915
910 b2caps = bundle2.bundle2caps(pushop.remote)
916 b2caps = bundle2.bundle2caps(pushop.remote)
911 version = b'01'
917 version = b'01'
912 cgversions = b2caps.get(b'changegroup')
918 cgversions = b2caps.get(b'changegroup')
913 if cgversions: # 3.1 and 3.2 ship with an empty value
919 if cgversions: # 3.1 and 3.2 ship with an empty value
914 cgversions = [
920 cgversions = [
915 v
921 v
916 for v in cgversions
922 for v in cgversions
917 if v in changegroup.supportedoutgoingversions(pushop.repo)
923 if v in changegroup.supportedoutgoingversions(pushop.repo)
918 ]
924 ]
919 if not cgversions:
925 if not cgversions:
920 raise error.Abort(_(b'no common changegroup version'))
926 raise error.Abort(_(b'no common changegroup version'))
921 version = max(cgversions)
927 version = max(cgversions)
922
928
923 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
929 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
924 cgstream = changegroup.makestream(
930 cgstream = changegroup.makestream(
925 pushop.repo,
931 pushop.repo,
926 pushop.outgoing,
932 pushop.outgoing,
927 version,
933 version,
928 b'push',
934 b'push',
929 bundlecaps=b2caps,
935 bundlecaps=b2caps,
930 remote_sidedata=remote_sidedata,
936 remote_sidedata=remote_sidedata,
931 )
937 )
932 cgpart = bundler.newpart(b'changegroup', data=cgstream)
938 cgpart = bundler.newpart(b'changegroup', data=cgstream)
933 if cgversions:
939 if cgversions:
934 cgpart.addparam(b'version', version)
940 cgpart.addparam(b'version', version)
935 if scmutil.istreemanifest(pushop.repo):
941 if scmutil.istreemanifest(pushop.repo):
936 cgpart.addparam(b'treemanifest', b'1')
942 cgpart.addparam(b'treemanifest', b'1')
937 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
943 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
938 cgpart.addparam(b'exp-sidedata', b'1')
944 cgpart.addparam(b'exp-sidedata', b'1')
939
945
940 def handlereply(op):
946 def handlereply(op):
941 """extract addchangegroup returns from server reply"""
947 """extract addchangegroup returns from server reply"""
942 cgreplies = op.records.getreplies(cgpart.id)
948 cgreplies = op.records.getreplies(cgpart.id)
943 assert len(cgreplies[b'changegroup']) == 1
949 assert len(cgreplies[b'changegroup']) == 1
944 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
950 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
945
951
946 return handlereply
952 return handlereply
947
953
948
954
949 @b2partsgenerator(b'phase')
955 @b2partsgenerator(b'phase')
950 def _pushb2phases(pushop, bundler):
956 def _pushb2phases(pushop, bundler):
951 """handle phase push through bundle2"""
957 """handle phase push through bundle2"""
952 if b'phases' in pushop.stepsdone:
958 if b'phases' in pushop.stepsdone:
953 return
959 return
954 b2caps = bundle2.bundle2caps(pushop.remote)
960 b2caps = bundle2.bundle2caps(pushop.remote)
955 ui = pushop.repo.ui
961 ui = pushop.repo.ui
956
962
957 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
963 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
958 haspushkey = b'pushkey' in b2caps
964 haspushkey = b'pushkey' in b2caps
959 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
965 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
960
966
961 if hasphaseheads and not legacyphase:
967 if hasphaseheads and not legacyphase:
962 return _pushb2phaseheads(pushop, bundler)
968 return _pushb2phaseheads(pushop, bundler)
963 elif haspushkey:
969 elif haspushkey:
964 return _pushb2phasespushkey(pushop, bundler)
970 return _pushb2phasespushkey(pushop, bundler)
965
971
966
972
967 def _pushb2phaseheads(pushop, bundler):
973 def _pushb2phaseheads(pushop, bundler):
968 """push phase information through a bundle2 - binary part"""
974 """push phase information through a bundle2 - binary part"""
969 pushop.stepsdone.add(b'phases')
975 pushop.stepsdone.add(b'phases')
970 if pushop.outdatedphases:
976 if pushop.outdatedphases:
971 updates = {p: [] for p in phases.allphases}
977 updates = {p: [] for p in phases.allphases}
972 updates[0].extend(h.node() for h in pushop.outdatedphases)
978 updates[0].extend(h.node() for h in pushop.outdatedphases)
973 phasedata = phases.binaryencode(updates)
979 phasedata = phases.binaryencode(updates)
974 bundler.newpart(b'phase-heads', data=phasedata)
980 bundler.newpart(b'phase-heads', data=phasedata)
975
981
976
982
977 def _pushb2phasespushkey(pushop, bundler):
983 def _pushb2phasespushkey(pushop, bundler):
978 """push phase information through a bundle2 - pushkey part"""
984 """push phase information through a bundle2 - pushkey part"""
979 pushop.stepsdone.add(b'phases')
985 pushop.stepsdone.add(b'phases')
980 part2node = []
986 part2node = []
981
987
982 def handlefailure(pushop, exc):
988 def handlefailure(pushop, exc):
983 targetid = int(exc.partid)
989 targetid = int(exc.partid)
984 for partid, node in part2node:
990 for partid, node in part2node:
985 if partid == targetid:
991 if partid == targetid:
986 raise error.Abort(_(b'updating %s to public failed') % node)
992 raise error.Abort(_(b'updating %s to public failed') % node)
987
993
988 enc = pushkey.encode
994 enc = pushkey.encode
989 for newremotehead in pushop.outdatedphases:
995 for newremotehead in pushop.outdatedphases:
990 part = bundler.newpart(b'pushkey')
996 part = bundler.newpart(b'pushkey')
991 part.addparam(b'namespace', enc(b'phases'))
997 part.addparam(b'namespace', enc(b'phases'))
992 part.addparam(b'key', enc(newremotehead.hex()))
998 part.addparam(b'key', enc(newremotehead.hex()))
993 part.addparam(b'old', enc(b'%d' % phases.draft))
999 part.addparam(b'old', enc(b'%d' % phases.draft))
994 part.addparam(b'new', enc(b'%d' % phases.public))
1000 part.addparam(b'new', enc(b'%d' % phases.public))
995 part2node.append((part.id, newremotehead))
1001 part2node.append((part.id, newremotehead))
996 pushop.pkfailcb[part.id] = handlefailure
1002 pushop.pkfailcb[part.id] = handlefailure
997
1003
998 def handlereply(op):
1004 def handlereply(op):
999 for partid, node in part2node:
1005 for partid, node in part2node:
1000 partrep = op.records.getreplies(partid)
1006 partrep = op.records.getreplies(partid)
1001 results = partrep[b'pushkey']
1007 results = partrep[b'pushkey']
1002 assert len(results) <= 1
1008 assert len(results) <= 1
1003 msg = None
1009 msg = None
1004 if not results:
1010 if not results:
1005 msg = _(b'server ignored update of %s to public!\n') % node
1011 msg = _(b'server ignored update of %s to public!\n') % node
1006 elif not int(results[0][b'return']):
1012 elif not int(results[0][b'return']):
1007 msg = _(b'updating %s to public failed!\n') % node
1013 msg = _(b'updating %s to public failed!\n') % node
1008 if msg is not None:
1014 if msg is not None:
1009 pushop.ui.warn(msg)
1015 pushop.ui.warn(msg)
1010
1016
1011 return handlereply
1017 return handlereply
1012
1018
1013
1019
1014 @b2partsgenerator(b'obsmarkers')
1020 @b2partsgenerator(b'obsmarkers')
1015 def _pushb2obsmarkers(pushop, bundler):
1021 def _pushb2obsmarkers(pushop, bundler):
1016 if b'obsmarkers' in pushop.stepsdone:
1022 if b'obsmarkers' in pushop.stepsdone:
1017 return
1023 return
1018 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1024 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1019 if obsolete.commonversion(remoteversions) is None:
1025 if obsolete.commonversion(remoteversions) is None:
1020 return
1026 return
1021 pushop.stepsdone.add(b'obsmarkers')
1027 pushop.stepsdone.add(b'obsmarkers')
1022 if pushop.outobsmarkers:
1028 if pushop.outobsmarkers:
1023 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1029 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1024 bundle2.buildobsmarkerspart(bundler, markers)
1030 bundle2.buildobsmarkerspart(bundler, markers)
1025
1031
1026
1032
1027 @b2partsgenerator(b'bookmarks')
1033 @b2partsgenerator(b'bookmarks')
1028 def _pushb2bookmarks(pushop, bundler):
1034 def _pushb2bookmarks(pushop, bundler):
1029 """handle bookmark push through bundle2"""
1035 """handle bookmark push through bundle2"""
1030 if b'bookmarks' in pushop.stepsdone:
1036 if b'bookmarks' in pushop.stepsdone:
1031 return
1037 return
1032 b2caps = bundle2.bundle2caps(pushop.remote)
1038 b2caps = bundle2.bundle2caps(pushop.remote)
1033
1039
1034 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1040 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1035 legacybooks = b'bookmarks' in legacy
1041 legacybooks = b'bookmarks' in legacy
1036
1042
1037 if not legacybooks and b'bookmarks' in b2caps:
1043 if not legacybooks and b'bookmarks' in b2caps:
1038 return _pushb2bookmarkspart(pushop, bundler)
1044 return _pushb2bookmarkspart(pushop, bundler)
1039 elif b'pushkey' in b2caps:
1045 elif b'pushkey' in b2caps:
1040 return _pushb2bookmarkspushkey(pushop, bundler)
1046 return _pushb2bookmarkspushkey(pushop, bundler)
1041
1047
1042
1048
1043 def _bmaction(old, new):
1049 def _bmaction(old, new):
1044 """small utility for bookmark pushing"""
1050 """small utility for bookmark pushing"""
1045 if not old:
1051 if not old:
1046 return b'export'
1052 return b'export'
1047 elif not new:
1053 elif not new:
1048 return b'delete'
1054 return b'delete'
1049 return b'update'
1055 return b'update'
1050
1056
1051
1057
1052 def _abortonsecretctx(pushop, node, b):
1058 def _abortonsecretctx(pushop, node, b):
1053 """abort if a given bookmark points to a secret changeset"""
1059 """abort if a given bookmark points to a secret changeset"""
1054 if node and pushop.repo[node].phase() == phases.secret:
1060 if node and pushop.repo[node].phase() == phases.secret:
1055 raise error.Abort(
1061 raise error.Abort(
1056 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1062 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1057 )
1063 )
1058
1064
1059
1065
1060 def _pushb2bookmarkspart(pushop, bundler):
1066 def _pushb2bookmarkspart(pushop, bundler):
1061 pushop.stepsdone.add(b'bookmarks')
1067 pushop.stepsdone.add(b'bookmarks')
1062 if not pushop.outbookmarks:
1068 if not pushop.outbookmarks:
1063 return
1069 return
1064
1070
1065 allactions = []
1071 allactions = []
1066 data = []
1072 data = []
1067 for book, old, new in pushop.outbookmarks:
1073 for book, old, new in pushop.outbookmarks:
1068 _abortonsecretctx(pushop, new, book)
1074 _abortonsecretctx(pushop, new, book)
1069 data.append((book, new))
1075 data.append((book, new))
1070 allactions.append((book, _bmaction(old, new)))
1076 allactions.append((book, _bmaction(old, new)))
1071 checkdata = bookmod.binaryencode(pushop.repo, data)
1077 checkdata = bookmod.binaryencode(pushop.repo, data)
1072 bundler.newpart(b'bookmarks', data=checkdata)
1078 bundler.newpart(b'bookmarks', data=checkdata)
1073
1079
1074 def handlereply(op):
1080 def handlereply(op):
1075 ui = pushop.ui
1081 ui = pushop.ui
1076 # if success
1082 # if success
1077 for book, action in allactions:
1083 for book, action in allactions:
1078 ui.status(bookmsgmap[action][0] % book)
1084 ui.status(bookmsgmap[action][0] % book)
1079
1085
1080 return handlereply
1086 return handlereply
1081
1087
1082
1088
1083 def _pushb2bookmarkspushkey(pushop, bundler):
1089 def _pushb2bookmarkspushkey(pushop, bundler):
1084 pushop.stepsdone.add(b'bookmarks')
1090 pushop.stepsdone.add(b'bookmarks')
1085 part2book = []
1091 part2book = []
1086 enc = pushkey.encode
1092 enc = pushkey.encode
1087
1093
1088 def handlefailure(pushop, exc):
1094 def handlefailure(pushop, exc):
1089 targetid = int(exc.partid)
1095 targetid = int(exc.partid)
1090 for partid, book, action in part2book:
1096 for partid, book, action in part2book:
1091 if partid == targetid:
1097 if partid == targetid:
1092 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1098 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1093 # we should not be called for part we did not generated
1099 # we should not be called for part we did not generated
1094 assert False
1100 assert False
1095
1101
1096 for book, old, new in pushop.outbookmarks:
1102 for book, old, new in pushop.outbookmarks:
1097 _abortonsecretctx(pushop, new, book)
1103 _abortonsecretctx(pushop, new, book)
1098 part = bundler.newpart(b'pushkey')
1104 part = bundler.newpart(b'pushkey')
1099 part.addparam(b'namespace', enc(b'bookmarks'))
1105 part.addparam(b'namespace', enc(b'bookmarks'))
1100 part.addparam(b'key', enc(book))
1106 part.addparam(b'key', enc(book))
1101 part.addparam(b'old', enc(hex(old)))
1107 part.addparam(b'old', enc(hex(old)))
1102 part.addparam(b'new', enc(hex(new)))
1108 part.addparam(b'new', enc(hex(new)))
1103 action = b'update'
1109 action = b'update'
1104 if not old:
1110 if not old:
1105 action = b'export'
1111 action = b'export'
1106 elif not new:
1112 elif not new:
1107 action = b'delete'
1113 action = b'delete'
1108 part2book.append((part.id, book, action))
1114 part2book.append((part.id, book, action))
1109 pushop.pkfailcb[part.id] = handlefailure
1115 pushop.pkfailcb[part.id] = handlefailure
1110
1116
1111 def handlereply(op):
1117 def handlereply(op):
1112 ui = pushop.ui
1118 ui = pushop.ui
1113 for partid, book, action in part2book:
1119 for partid, book, action in part2book:
1114 partrep = op.records.getreplies(partid)
1120 partrep = op.records.getreplies(partid)
1115 results = partrep[b'pushkey']
1121 results = partrep[b'pushkey']
1116 assert len(results) <= 1
1122 assert len(results) <= 1
1117 if not results:
1123 if not results:
1118 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1124 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1119 else:
1125 else:
1120 ret = int(results[0][b'return'])
1126 ret = int(results[0][b'return'])
1121 if ret:
1127 if ret:
1122 ui.status(bookmsgmap[action][0] % book)
1128 ui.status(bookmsgmap[action][0] % book)
1123 else:
1129 else:
1124 ui.warn(bookmsgmap[action][1] % book)
1130 ui.warn(bookmsgmap[action][1] % book)
1125 if pushop.bkresult is not None:
1131 if pushop.bkresult is not None:
1126 pushop.bkresult = 1
1132 pushop.bkresult = 1
1127
1133
1128 return handlereply
1134 return handlereply
1129
1135
1130
1136
1131 @b2partsgenerator(b'pushvars', idx=0)
1137 @b2partsgenerator(b'pushvars', idx=0)
1132 def _getbundlesendvars(pushop, bundler):
1138 def _getbundlesendvars(pushop, bundler):
1133 '''send shellvars via bundle2'''
1139 '''send shellvars via bundle2'''
1134 pushvars = pushop.pushvars
1140 pushvars = pushop.pushvars
1135 if pushvars:
1141 if pushvars:
1136 shellvars = {}
1142 shellvars = {}
1137 for raw in pushvars:
1143 for raw in pushvars:
1138 if b'=' not in raw:
1144 if b'=' not in raw:
1139 msg = (
1145 msg = (
1140 b"unable to parse variable '%s', should follow "
1146 b"unable to parse variable '%s', should follow "
1141 b"'KEY=VALUE' or 'KEY=' format"
1147 b"'KEY=VALUE' or 'KEY=' format"
1142 )
1148 )
1143 raise error.Abort(msg % raw)
1149 raise error.Abort(msg % raw)
1144 k, v = raw.split(b'=', 1)
1150 k, v = raw.split(b'=', 1)
1145 shellvars[k] = v
1151 shellvars[k] = v
1146
1152
1147 part = bundler.newpart(b'pushvars')
1153 part = bundler.newpart(b'pushvars')
1148
1154
1149 for key, value in shellvars.items():
1155 for key, value in shellvars.items():
1150 part.addparam(key, value, mandatory=False)
1156 part.addparam(key, value, mandatory=False)
1151
1157
1152
1158
1153 def _pushbundle2(pushop):
1159 def _pushbundle2(pushop):
1154 """push data to the remote using bundle2
1160 """push data to the remote using bundle2
1155
1161
1156 The only currently supported type of data is changegroup but this will
1162 The only currently supported type of data is changegroup but this will
1157 evolve in the future."""
1163 evolve in the future."""
1158 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1164 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1159 pushback = pushop.trmanager and pushop.ui.configbool(
1165 pushback = pushop.trmanager and pushop.ui.configbool(
1160 b'experimental', b'bundle2.pushback'
1166 b'experimental', b'bundle2.pushback'
1161 )
1167 )
1162
1168
1163 # create reply capability
1169 # create reply capability
1164 capsblob = bundle2.encodecaps(
1170 capsblob = bundle2.encodecaps(
1165 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1171 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1166 )
1172 )
1167 bundler.newpart(b'replycaps', data=capsblob)
1173 bundler.newpart(b'replycaps', data=capsblob)
1168 replyhandlers = []
1174 replyhandlers = []
1169 for partgenname in b2partsgenorder:
1175 for partgenname in b2partsgenorder:
1170 partgen = b2partsgenmapping[partgenname]
1176 partgen = b2partsgenmapping[partgenname]
1171 ret = partgen(pushop, bundler)
1177 ret = partgen(pushop, bundler)
1172 if callable(ret):
1178 if callable(ret):
1173 replyhandlers.append(ret)
1179 replyhandlers.append(ret)
1174 # do not push if nothing to push
1180 # do not push if nothing to push
1175 if bundler.nbparts <= 1:
1181 if bundler.nbparts <= 1:
1176 return
1182 return
1177 stream = util.chunkbuffer(bundler.getchunks())
1183 stream = util.chunkbuffer(bundler.getchunks())
1178 try:
1184 try:
1179 try:
1185 try:
1180 with pushop.remote.commandexecutor() as e:
1186 with pushop.remote.commandexecutor() as e:
1181 reply = e.callcommand(
1187 reply = e.callcommand(
1182 b'unbundle',
1188 b'unbundle',
1183 {
1189 {
1184 b'bundle': stream,
1190 b'bundle': stream,
1185 b'heads': [b'force'],
1191 b'heads': [b'force'],
1186 b'url': pushop.remote.url(),
1192 b'url': pushop.remote.url(),
1187 },
1193 },
1188 ).result()
1194 ).result()
1189 except error.BundleValueError as exc:
1195 except error.BundleValueError as exc:
1190 raise error.RemoteError(_(b'missing support for %s') % exc)
1196 raise error.RemoteError(_(b'missing support for %s') % exc)
1191 try:
1197 try:
1192 trgetter = None
1198 trgetter = None
1193 if pushback:
1199 if pushback:
1194 trgetter = pushop.trmanager.transaction
1200 trgetter = pushop.trmanager.transaction
1195 op = bundle2.processbundle(
1201 op = bundle2.processbundle(
1196 pushop.repo,
1202 pushop.repo,
1197 reply,
1203 reply,
1198 trgetter,
1204 trgetter,
1199 remote=pushop.remote,
1205 remote=pushop.remote,
1200 )
1206 )
1201 except error.BundleValueError as exc:
1207 except error.BundleValueError as exc:
1202 raise error.RemoteError(_(b'missing support for %s') % exc)
1208 raise error.RemoteError(_(b'missing support for %s') % exc)
1203 except bundle2.AbortFromPart as exc:
1209 except bundle2.AbortFromPart as exc:
1204 pushop.ui.error(_(b'remote: %s\n') % exc)
1210 pushop.ui.error(_(b'remote: %s\n') % exc)
1205 if exc.hint is not None:
1211 if exc.hint is not None:
1206 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1212 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1207 raise error.RemoteError(_(b'push failed on remote'))
1213 raise error.RemoteError(_(b'push failed on remote'))
1208 except error.PushkeyFailed as exc:
1214 except error.PushkeyFailed as exc:
1209 partid = int(exc.partid)
1215 partid = int(exc.partid)
1210 if partid not in pushop.pkfailcb:
1216 if partid not in pushop.pkfailcb:
1211 raise
1217 raise
1212 pushop.pkfailcb[partid](pushop, exc)
1218 pushop.pkfailcb[partid](pushop, exc)
1213 for rephand in replyhandlers:
1219 for rephand in replyhandlers:
1214 rephand(op)
1220 rephand(op)
1215
1221
1216
1222
1217 def _pushchangeset(pushop):
1223 def _pushchangeset(pushop):
1218 """Make the actual push of changeset bundle to remote repo"""
1224 """Make the actual push of changeset bundle to remote repo"""
1219 if b'changesets' in pushop.stepsdone:
1225 if b'changesets' in pushop.stepsdone:
1220 return
1226 return
1221 pushop.stepsdone.add(b'changesets')
1227 pushop.stepsdone.add(b'changesets')
1222 if not _pushcheckoutgoing(pushop):
1228 if not _pushcheckoutgoing(pushop):
1223 return
1229 return
1224
1230
1225 # Should have verified this in push().
1231 # Should have verified this in push().
1226 assert pushop.remote.capable(b'unbundle')
1232 assert pushop.remote.capable(b'unbundle')
1227
1233
1228 pushop.repo.prepushoutgoinghooks(pushop)
1234 pushop.repo.prepushoutgoinghooks(pushop)
1229 outgoing = pushop.outgoing
1235 outgoing = pushop.outgoing
1230 # TODO: get bundlecaps from remote
1236 # TODO: get bundlecaps from remote
1231 bundlecaps = None
1237 bundlecaps = None
1232 # create a changegroup from local
1238 # create a changegroup from local
1233 if pushop.revs is None and not (
1239 if pushop.revs is None and not (
1234 outgoing.excluded or pushop.repo.changelog.filteredrevs
1240 outgoing.excluded or pushop.repo.changelog.filteredrevs
1235 ):
1241 ):
1236 # push everything,
1242 # push everything,
1237 # use the fast path, no race possible on push
1243 # use the fast path, no race possible on push
1238 cg = changegroup.makechangegroup(
1244 cg = changegroup.makechangegroup(
1239 pushop.repo,
1245 pushop.repo,
1240 outgoing,
1246 outgoing,
1241 b'01',
1247 b'01',
1242 b'push',
1248 b'push',
1243 fastpath=True,
1249 fastpath=True,
1244 bundlecaps=bundlecaps,
1250 bundlecaps=bundlecaps,
1245 )
1251 )
1246 else:
1252 else:
1247 cg = changegroup.makechangegroup(
1253 cg = changegroup.makechangegroup(
1248 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1254 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1249 )
1255 )
1250
1256
1251 # apply changegroup to remote
1257 # apply changegroup to remote
1252 # local repo finds heads on server, finds out what
1258 # local repo finds heads on server, finds out what
1253 # revs it must push. once revs transferred, if server
1259 # revs it must push. once revs transferred, if server
1254 # finds it has different heads (someone else won
1260 # finds it has different heads (someone else won
1255 # commit/push race), server aborts.
1261 # commit/push race), server aborts.
1256 if pushop.force:
1262 if pushop.force:
1257 remoteheads = [b'force']
1263 remoteheads = [b'force']
1258 else:
1264 else:
1259 remoteheads = pushop.remoteheads
1265 remoteheads = pushop.remoteheads
1260 # ssh: return remote's addchangegroup()
1266 # ssh: return remote's addchangegroup()
1261 # http: return remote's addchangegroup() or 0 for error
1267 # http: return remote's addchangegroup() or 0 for error
1262 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1268 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1263
1269
1264
1270
1265 def _pushsyncphase(pushop):
1271 def _pushsyncphase(pushop):
1266 """synchronise phase information locally and remotely"""
1272 """synchronise phase information locally and remotely"""
1267 cheads = pushop.commonheads
1273 cheads = pushop.commonheads
1268 # even when we don't push, exchanging phase data is useful
1274 # even when we don't push, exchanging phase data is useful
1269 remotephases = listkeys(pushop.remote, b'phases')
1275 remotephases = listkeys(pushop.remote, b'phases')
1270 if (
1276 if (
1271 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1277 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1272 and remotephases # server supports phases
1278 and remotephases # server supports phases
1273 and pushop.cgresult is None # nothing was pushed
1279 and pushop.cgresult is None # nothing was pushed
1274 and remotephases.get(b'publishing', False)
1280 and remotephases.get(b'publishing', False)
1275 ):
1281 ):
1276 # When:
1282 # When:
1277 # - this is a subrepo push
1283 # - this is a subrepo push
1278 # - and remote support phase
1284 # - and remote support phase
1279 # - and no changeset was pushed
1285 # - and no changeset was pushed
1280 # - and remote is publishing
1286 # - and remote is publishing
1281 # We may be in issue 3871 case!
1287 # We may be in issue 3871 case!
1282 # We drop the possible phase synchronisation done by
1288 # We drop the possible phase synchronisation done by
1283 # courtesy to publish changesets possibly locally draft
1289 # courtesy to publish changesets possibly locally draft
1284 # on the remote.
1290 # on the remote.
1285 remotephases = {b'publishing': b'True'}
1291 remotephases = {b'publishing': b'True'}
1286 if not remotephases: # old server or public only reply from non-publishing
1292 if not remotephases: # old server or public only reply from non-publishing
1287 _localphasemove(pushop, cheads)
1293 _localphasemove(pushop, cheads)
1288 # don't push any phase data as there is nothing to push
1294 # don't push any phase data as there is nothing to push
1289 else:
1295 else:
1290 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1296 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1291 pheads, droots = ana
1297 pheads, droots = ana
1292 ### Apply remote phase on local
1298 ### Apply remote phase on local
1293 if remotephases.get(b'publishing', False):
1299 if remotephases.get(b'publishing', False):
1294 _localphasemove(pushop, cheads)
1300 _localphasemove(pushop, cheads)
1295 else: # publish = False
1301 else: # publish = False
1296 _localphasemove(pushop, pheads)
1302 _localphasemove(pushop, pheads)
1297 _localphasemove(pushop, cheads, phases.draft)
1303 _localphasemove(pushop, cheads, phases.draft)
1298 ### Apply local phase on remote
1304 ### Apply local phase on remote
1299
1305
1300 if pushop.cgresult:
1306 if pushop.cgresult:
1301 if b'phases' in pushop.stepsdone:
1307 if b'phases' in pushop.stepsdone:
1302 # phases already pushed though bundle2
1308 # phases already pushed though bundle2
1303 return
1309 return
1304 outdated = pushop.outdatedphases
1310 outdated = pushop.outdatedphases
1305 else:
1311 else:
1306 outdated = pushop.fallbackoutdatedphases
1312 outdated = pushop.fallbackoutdatedphases
1307
1313
1308 pushop.stepsdone.add(b'phases')
1314 pushop.stepsdone.add(b'phases')
1309
1315
1310 # filter heads already turned public by the push
1316 # filter heads already turned public by the push
1311 outdated = [c for c in outdated if c.node() not in pheads]
1317 outdated = [c for c in outdated if c.node() not in pheads]
1312 # fallback to independent pushkey command
1318 # fallback to independent pushkey command
1313 for newremotehead in outdated:
1319 for newremotehead in outdated:
1314 with pushop.remote.commandexecutor() as e:
1320 with pushop.remote.commandexecutor() as e:
1315 r = e.callcommand(
1321 r = e.callcommand(
1316 b'pushkey',
1322 b'pushkey',
1317 {
1323 {
1318 b'namespace': b'phases',
1324 b'namespace': b'phases',
1319 b'key': newremotehead.hex(),
1325 b'key': newremotehead.hex(),
1320 b'old': b'%d' % phases.draft,
1326 b'old': b'%d' % phases.draft,
1321 b'new': b'%d' % phases.public,
1327 b'new': b'%d' % phases.public,
1322 },
1328 },
1323 ).result()
1329 ).result()
1324
1330
1325 if not r:
1331 if not r:
1326 pushop.ui.warn(
1332 pushop.ui.warn(
1327 _(b'updating %s to public failed!\n') % newremotehead
1333 _(b'updating %s to public failed!\n') % newremotehead
1328 )
1334 )
1329
1335
1330
1336
1331 def _localphasemove(pushop, nodes, phase=phases.public):
1337 def _localphasemove(pushop, nodes, phase=phases.public):
1332 """move <nodes> to <phase> in the local source repo"""
1338 """move <nodes> to <phase> in the local source repo"""
1333 if pushop.trmanager:
1339 if pushop.trmanager:
1334 phases.advanceboundary(
1340 phases.advanceboundary(
1335 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1341 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1336 )
1342 )
1337 else:
1343 else:
1338 # repo is not locked, do not change any phases!
1344 # repo is not locked, do not change any phases!
1339 # Informs the user that phases should have been moved when
1345 # Informs the user that phases should have been moved when
1340 # applicable.
1346 # applicable.
1341 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1347 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1342 phasestr = phases.phasenames[phase]
1348 phasestr = phases.phasenames[phase]
1343 if actualmoves:
1349 if actualmoves:
1344 pushop.ui.status(
1350 pushop.ui.status(
1345 _(
1351 _(
1346 b'cannot lock source repo, skipping '
1352 b'cannot lock source repo, skipping '
1347 b'local %s phase update\n'
1353 b'local %s phase update\n'
1348 )
1354 )
1349 % phasestr
1355 % phasestr
1350 )
1356 )
1351
1357
1352
1358
1353 def _pushobsolete(pushop):
1359 def _pushobsolete(pushop):
1354 """utility function to push obsolete markers to a remote"""
1360 """utility function to push obsolete markers to a remote"""
1355 if b'obsmarkers' in pushop.stepsdone:
1361 if b'obsmarkers' in pushop.stepsdone:
1356 return
1362 return
1357 repo = pushop.repo
1363 repo = pushop.repo
1358 remote = pushop.remote
1364 remote = pushop.remote
1359 pushop.stepsdone.add(b'obsmarkers')
1365 pushop.stepsdone.add(b'obsmarkers')
1360 if pushop.outobsmarkers:
1366 if pushop.outobsmarkers:
1361 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1367 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1362 rslts = []
1368 rslts = []
1363 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1369 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1364 remotedata = obsolete._pushkeyescape(markers)
1370 remotedata = obsolete._pushkeyescape(markers)
1365 for key in sorted(remotedata, reverse=True):
1371 for key in sorted(remotedata, reverse=True):
1366 # reverse sort to ensure we end with dump0
1372 # reverse sort to ensure we end with dump0
1367 data = remotedata[key]
1373 data = remotedata[key]
1368 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1374 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1369 if [r for r in rslts if not r]:
1375 if [r for r in rslts if not r]:
1370 msg = _(b'failed to push some obsolete markers!\n')
1376 msg = _(b'failed to push some obsolete markers!\n')
1371 repo.ui.warn(msg)
1377 repo.ui.warn(msg)
1372
1378
1373
1379
1374 def _pushbookmark(pushop):
1380 def _pushbookmark(pushop):
1375 """Update bookmark position on remote"""
1381 """Update bookmark position on remote"""
1376 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1382 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1377 return
1383 return
1378 pushop.stepsdone.add(b'bookmarks')
1384 pushop.stepsdone.add(b'bookmarks')
1379 ui = pushop.ui
1385 ui = pushop.ui
1380 remote = pushop.remote
1386 remote = pushop.remote
1381
1387
1382 for b, old, new in pushop.outbookmarks:
1388 for b, old, new in pushop.outbookmarks:
1383 action = b'update'
1389 action = b'update'
1384 if not old:
1390 if not old:
1385 action = b'export'
1391 action = b'export'
1386 elif not new:
1392 elif not new:
1387 action = b'delete'
1393 action = b'delete'
1388
1394
1389 with remote.commandexecutor() as e:
1395 with remote.commandexecutor() as e:
1390 r = e.callcommand(
1396 r = e.callcommand(
1391 b'pushkey',
1397 b'pushkey',
1392 {
1398 {
1393 b'namespace': b'bookmarks',
1399 b'namespace': b'bookmarks',
1394 b'key': b,
1400 b'key': b,
1395 b'old': hex(old),
1401 b'old': hex(old),
1396 b'new': hex(new),
1402 b'new': hex(new),
1397 },
1403 },
1398 ).result()
1404 ).result()
1399
1405
1400 if r:
1406 if r:
1401 ui.status(bookmsgmap[action][0] % b)
1407 ui.status(bookmsgmap[action][0] % b)
1402 else:
1408 else:
1403 ui.warn(bookmsgmap[action][1] % b)
1409 ui.warn(bookmsgmap[action][1] % b)
1404 # discovery can have set the value form invalid entry
1410 # discovery can have set the value form invalid entry
1405 if pushop.bkresult is not None:
1411 if pushop.bkresult is not None:
1406 pushop.bkresult = 1
1412 pushop.bkresult = 1
1407
1413
1408
1414
1409 class pulloperation:
1415 class pulloperation:
1410 """A object that represent a single pull operation
1416 """A object that represent a single pull operation
1411
1417
1412 It purpose is to carry pull related state and very common operation.
1418 It purpose is to carry pull related state and very common operation.
1413
1419
1414 A new should be created at the beginning of each pull and discarded
1420 A new should be created at the beginning of each pull and discarded
1415 afterward.
1421 afterward.
1416 """
1422 """
1417
1423
1418 def __init__(
1424 def __init__(
1419 self,
1425 self,
1420 repo,
1426 repo,
1421 remote,
1427 remote,
1422 heads=None,
1428 heads=None,
1423 force=False,
1429 force=False,
1424 bookmarks=(),
1430 bookmarks=(),
1425 remotebookmarks=None,
1431 remotebookmarks=None,
1426 streamclonerequested=None,
1432 streamclonerequested=None,
1427 includepats=None,
1433 includepats=None,
1428 excludepats=None,
1434 excludepats=None,
1429 depth=None,
1435 depth=None,
1430 path=None,
1436 path=None,
1431 ):
1437 ):
1432 # repo we pull into
1438 # repo we pull into
1433 self.repo = repo
1439 self.repo = repo
1434 # repo we pull from
1440 # repo we pull from
1435 self.remote = remote
1441 self.remote = remote
1436 # path object used to build this remote
1442 # path object used to build this remote
1437 #
1443 #
1438 # Ideally, the remote peer would carry that directly.
1444 # Ideally, the remote peer would carry that directly.
1439 self.remote_path = path
1445 self.remote_path = path
1440 # revision we try to pull (None is "all")
1446 # revision we try to pull (None is "all")
1441 self.heads = heads
1447 self.heads = heads
1442 # bookmark pulled explicitly
1448 # bookmark pulled explicitly
1443 self.explicitbookmarks = [
1449 self.explicitbookmarks = [
1444 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1450 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1445 ]
1451 ]
1446 # do we force pull?
1452 # do we force pull?
1447 self.force = force
1453 self.force = force
1448 # whether a streaming clone was requested
1454 # whether a streaming clone was requested
1449 self.streamclonerequested = streamclonerequested
1455 self.streamclonerequested = streamclonerequested
1450 # transaction manager
1456 # transaction manager
1451 self.trmanager = None
1457 self.trmanager = None
1452 # set of common changeset between local and remote before pull
1458 # set of common changeset between local and remote before pull
1453 self.common = None
1459 self.common = None
1454 # set of pulled head
1460 # set of pulled head
1455 self.rheads = None
1461 self.rheads = None
1456 # list of missing changeset to fetch remotely
1462 # list of missing changeset to fetch remotely
1457 self.fetch = None
1463 self.fetch = None
1458 # remote bookmarks data
1464 # remote bookmarks data
1459 self.remotebookmarks = remotebookmarks
1465 self.remotebookmarks = remotebookmarks
1460 # result of changegroup pulling (used as return code by pull)
1466 # result of changegroup pulling (used as return code by pull)
1461 self.cgresult = None
1467 self.cgresult = None
1462 # list of step already done
1468 # list of step already done
1463 self.stepsdone = set()
1469 self.stepsdone = set()
1464 # Whether we attempted a clone from pre-generated bundles.
1470 # Whether we attempted a clone from pre-generated bundles.
1465 self.clonebundleattempted = False
1471 self.clonebundleattempted = False
1466 # Set of file patterns to include.
1472 # Set of file patterns to include.
1467 self.includepats = includepats
1473 self.includepats = includepats
1468 # Set of file patterns to exclude.
1474 # Set of file patterns to exclude.
1469 self.excludepats = excludepats
1475 self.excludepats = excludepats
1470 # Number of ancestor changesets to pull from each pulled head.
1476 # Number of ancestor changesets to pull from each pulled head.
1471 self.depth = depth
1477 self.depth = depth
1472
1478
1473 @util.propertycache
1479 @util.propertycache
1474 def pulledsubset(self):
1480 def pulledsubset(self):
1475 """heads of the set of changeset target by the pull"""
1481 """heads of the set of changeset target by the pull"""
1476 # compute target subset
1482 # compute target subset
1477 if self.heads is None:
1483 if self.heads is None:
1478 # We pulled every thing possible
1484 # We pulled every thing possible
1479 # sync on everything common
1485 # sync on everything common
1480 c = set(self.common)
1486 c = set(self.common)
1481 ret = list(self.common)
1487 ret = list(self.common)
1482 for n in self.rheads:
1488 for n in self.rheads:
1483 if n not in c:
1489 if n not in c:
1484 ret.append(n)
1490 ret.append(n)
1485 return ret
1491 return ret
1486 else:
1492 else:
1487 # We pulled a specific subset
1493 # We pulled a specific subset
1488 # sync on this subset
1494 # sync on this subset
1489 return self.heads
1495 return self.heads
1490
1496
1491 @util.propertycache
1497 @util.propertycache
1492 def canusebundle2(self):
1498 def canusebundle2(self):
1493 return not _forcebundle1(self)
1499 return not _forcebundle1(self)
1494
1500
1495 @util.propertycache
1501 @util.propertycache
1496 def remotebundle2caps(self):
1502 def remotebundle2caps(self):
1497 return bundle2.bundle2caps(self.remote)
1503 return bundle2.bundle2caps(self.remote)
1498
1504
1499 def gettransaction(self):
1505 def gettransaction(self):
1500 # deprecated; talk to trmanager directly
1506 # deprecated; talk to trmanager directly
1501 return self.trmanager.transaction()
1507 return self.trmanager.transaction()
1502
1508
1503
1509
1504 class transactionmanager(util.transactional):
1510 class transactionmanager(util.transactional):
1505 """An object to manage the life cycle of a transaction
1511 """An object to manage the life cycle of a transaction
1506
1512
1507 It creates the transaction on demand and calls the appropriate hooks when
1513 It creates the transaction on demand and calls the appropriate hooks when
1508 closing the transaction."""
1514 closing the transaction."""
1509
1515
1510 def __init__(self, repo, source, url):
1516 def __init__(self, repo, source, url):
1511 self.repo = repo
1517 self.repo = repo
1512 self.source = source
1518 self.source = source
1513 self.url = url
1519 self.url = url
1514 self._tr = None
1520 self._tr = None
1515
1521
1516 def transaction(self):
1522 def transaction(self):
1517 """Return an open transaction object, constructing if necessary"""
1523 """Return an open transaction object, constructing if necessary"""
1518 if not self._tr:
1524 if not self._tr:
1519 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1525 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1520 self._tr = self.repo.transaction(trname)
1526 self._tr = self.repo.transaction(trname)
1521 self._tr.hookargs[b'source'] = self.source
1527 self._tr.hookargs[b'source'] = self.source
1522 self._tr.hookargs[b'url'] = self.url
1528 self._tr.hookargs[b'url'] = self.url
1523 return self._tr
1529 return self._tr
1524
1530
1525 def close(self):
1531 def close(self):
1526 """close transaction if created"""
1532 """close transaction if created"""
1527 if self._tr is not None:
1533 if self._tr is not None:
1528 self._tr.close()
1534 self._tr.close()
1529
1535
1530 def release(self):
1536 def release(self):
1531 """release transaction if created"""
1537 """release transaction if created"""
1532 if self._tr is not None:
1538 if self._tr is not None:
1533 self._tr.release()
1539 self._tr.release()
1534
1540
1535
1541
1536 def listkeys(remote, namespace):
1542 def listkeys(remote, namespace):
1537 with remote.commandexecutor() as e:
1543 with remote.commandexecutor() as e:
1538 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1544 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1539
1545
1540
1546
1541 def _fullpullbundle2(repo, pullop):
1547 def _fullpullbundle2(repo, pullop):
1542 # The server may send a partial reply, i.e. when inlining
1548 # The server may send a partial reply, i.e. when inlining
1543 # pre-computed bundles. In that case, update the common
1549 # pre-computed bundles. In that case, update the common
1544 # set based on the results and pull another bundle.
1550 # set based on the results and pull another bundle.
1545 #
1551 #
1546 # There are two indicators that the process is finished:
1552 # There are two indicators that the process is finished:
1547 # - no changeset has been added, or
1553 # - no changeset has been added, or
1548 # - all remote heads are known locally.
1554 # - all remote heads are known locally.
1549 # The head check must use the unfiltered view as obsoletion
1555 # The head check must use the unfiltered view as obsoletion
1550 # markers can hide heads.
1556 # markers can hide heads.
1551 unfi = repo.unfiltered()
1557 unfi = repo.unfiltered()
1552 unficl = unfi.changelog
1558 unficl = unfi.changelog
1553
1559
1554 def headsofdiff(h1, h2):
1560 def headsofdiff(h1, h2):
1555 """Returns heads(h1 % h2)"""
1561 """Returns heads(h1 % h2)"""
1556 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1562 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1557 return {ctx.node() for ctx in res}
1563 return {ctx.node() for ctx in res}
1558
1564
1559 def headsofunion(h1, h2):
1565 def headsofunion(h1, h2):
1560 """Returns heads((h1 + h2) - null)"""
1566 """Returns heads((h1 + h2) - null)"""
1561 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1567 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1562 return {ctx.node() for ctx in res}
1568 return {ctx.node() for ctx in res}
1563
1569
1564 while True:
1570 while True:
1565 old_heads = unficl.heads()
1571 old_heads = unficl.heads()
1566 clstart = len(unficl)
1572 clstart = len(unficl)
1567 _pullbundle2(pullop)
1573 _pullbundle2(pullop)
1568 if requirements.NARROW_REQUIREMENT in repo.requirements:
1574 if requirements.NARROW_REQUIREMENT in repo.requirements:
1569 # XXX narrow clones filter the heads on the server side during
1575 # XXX narrow clones filter the heads on the server side during
1570 # XXX getbundle and result in partial replies as well.
1576 # XXX getbundle and result in partial replies as well.
1571 # XXX Disable pull bundles in this case as band aid to avoid
1577 # XXX Disable pull bundles in this case as band aid to avoid
1572 # XXX extra round trips.
1578 # XXX extra round trips.
1573 break
1579 break
1574 if clstart == len(unficl):
1580 if clstart == len(unficl):
1575 break
1581 break
1576 if all(unficl.hasnode(n) for n in pullop.rheads):
1582 if all(unficl.hasnode(n) for n in pullop.rheads):
1577 break
1583 break
1578 new_heads = headsofdiff(unficl.heads(), old_heads)
1584 new_heads = headsofdiff(unficl.heads(), old_heads)
1579 pullop.common = headsofunion(new_heads, pullop.common)
1585 pullop.common = headsofunion(new_heads, pullop.common)
1580 pullop.rheads = set(pullop.rheads) - pullop.common
1586 pullop.rheads = set(pullop.rheads) - pullop.common
1581
1587
1582
1588
1583 def add_confirm_callback(repo, pullop):
1589 def add_confirm_callback(repo, pullop):
1584 """adds a finalize callback to transaction which can be used to show stats
1590 """adds a finalize callback to transaction which can be used to show stats
1585 to user and confirm the pull before committing transaction"""
1591 to user and confirm the pull before committing transaction"""
1586
1592
1587 tr = pullop.trmanager.transaction()
1593 tr = pullop.trmanager.transaction()
1588 scmutil.registersummarycallback(
1594 scmutil.registersummarycallback(
1589 repo, tr, txnname=b'pull', as_validator=True
1595 repo, tr, txnname=b'pull', as_validator=True
1590 )
1596 )
1591 reporef = weakref.ref(repo.unfiltered())
1597 reporef = weakref.ref(repo.unfiltered())
1592
1598
1593 def prompt(tr):
1599 def prompt(tr):
1594 repo = reporef()
1600 repo = reporef()
1595 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1601 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1596 if repo.ui.promptchoice(cm):
1602 if repo.ui.promptchoice(cm):
1597 raise error.Abort(b"user aborted")
1603 raise error.Abort(b"user aborted")
1598
1604
1599 tr.addvalidator(b'900-pull-prompt', prompt)
1605 tr.addvalidator(b'900-pull-prompt', prompt)
1600
1606
1601
1607
1602 def pull(
1608 def pull(
1603 repo,
1609 repo,
1604 remote,
1610 remote,
1605 path=None,
1611 path=None,
1606 heads=None,
1612 heads=None,
1607 force=False,
1613 force=False,
1608 bookmarks=(),
1614 bookmarks=(),
1609 opargs=None,
1615 opargs=None,
1610 streamclonerequested=None,
1616 streamclonerequested=None,
1611 includepats=None,
1617 includepats=None,
1612 excludepats=None,
1618 excludepats=None,
1613 depth=None,
1619 depth=None,
1614 confirm=None,
1620 confirm=None,
1615 ):
1621 ):
1616 """Fetch repository data from a remote.
1622 """Fetch repository data from a remote.
1617
1623
1618 This is the main function used to retrieve data from a remote repository.
1624 This is the main function used to retrieve data from a remote repository.
1619
1625
1620 ``repo`` is the local repository to clone into.
1626 ``repo`` is the local repository to clone into.
1621 ``remote`` is a peer instance.
1627 ``remote`` is a peer instance.
1622 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1628 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1623 default) means to pull everything from the remote.
1629 default) means to pull everything from the remote.
1624 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1630 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1625 default, all remote bookmarks are pulled.
1631 default, all remote bookmarks are pulled.
1626 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1632 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1627 initialization.
1633 initialization.
1628 ``streamclonerequested`` is a boolean indicating whether a "streaming
1634 ``streamclonerequested`` is a boolean indicating whether a "streaming
1629 clone" is requested. A "streaming clone" is essentially a raw file copy
1635 clone" is requested. A "streaming clone" is essentially a raw file copy
1630 of revlogs from the server. This only works when the local repository is
1636 of revlogs from the server. This only works when the local repository is
1631 empty. The default value of ``None`` means to respect the server
1637 empty. The default value of ``None`` means to respect the server
1632 configuration for preferring stream clones.
1638 configuration for preferring stream clones.
1633 ``includepats`` and ``excludepats`` define explicit file patterns to
1639 ``includepats`` and ``excludepats`` define explicit file patterns to
1634 include and exclude in storage, respectively. If not defined, narrow
1640 include and exclude in storage, respectively. If not defined, narrow
1635 patterns from the repo instance are used, if available.
1641 patterns from the repo instance are used, if available.
1636 ``depth`` is an integer indicating the DAG depth of history we're
1642 ``depth`` is an integer indicating the DAG depth of history we're
1637 interested in. If defined, for each revision specified in ``heads``, we
1643 interested in. If defined, for each revision specified in ``heads``, we
1638 will fetch up to this many of its ancestors and data associated with them.
1644 will fetch up to this many of its ancestors and data associated with them.
1639 ``confirm`` is a boolean indicating whether the pull should be confirmed
1645 ``confirm`` is a boolean indicating whether the pull should be confirmed
1640 before committing the transaction. This overrides HGPLAIN.
1646 before committing the transaction. This overrides HGPLAIN.
1641
1647
1642 Returns the ``pulloperation`` created for this pull.
1648 Returns the ``pulloperation`` created for this pull.
1643 """
1649 """
1644 if opargs is None:
1650 if opargs is None:
1645 opargs = {}
1651 opargs = {}
1646
1652
1647 # We allow the narrow patterns to be passed in explicitly to provide more
1653 # We allow the narrow patterns to be passed in explicitly to provide more
1648 # flexibility for API consumers.
1654 # flexibility for API consumers.
1649 if includepats is not None or excludepats is not None:
1655 if includepats is not None or excludepats is not None:
1650 includepats = includepats or set()
1656 includepats = includepats or set()
1651 excludepats = excludepats or set()
1657 excludepats = excludepats or set()
1652 else:
1658 else:
1653 includepats, excludepats = repo.narrowpats
1659 includepats, excludepats = repo.narrowpats
1654
1660
1655 narrowspec.validatepatterns(includepats)
1661 narrowspec.validatepatterns(includepats)
1656 narrowspec.validatepatterns(excludepats)
1662 narrowspec.validatepatterns(excludepats)
1657
1663
1658 pullop = pulloperation(
1664 pullop = pulloperation(
1659 repo,
1665 repo,
1660 remote,
1666 remote,
1661 path=path,
1667 path=path,
1662 heads=heads,
1668 heads=heads,
1663 force=force,
1669 force=force,
1664 bookmarks=bookmarks,
1670 bookmarks=bookmarks,
1665 streamclonerequested=streamclonerequested,
1671 streamclonerequested=streamclonerequested,
1666 includepats=includepats,
1672 includepats=includepats,
1667 excludepats=excludepats,
1673 excludepats=excludepats,
1668 depth=depth,
1674 depth=depth,
1669 **pycompat.strkwargs(opargs)
1675 **pycompat.strkwargs(opargs)
1670 )
1676 )
1671
1677
1672 peerlocal = pullop.remote.local()
1678 peerlocal = pullop.remote.local()
1673 if peerlocal:
1679 if peerlocal:
1674 missing = set(peerlocal.requirements) - pullop.repo.supported
1680 missing = set(peerlocal.requirements) - pullop.repo.supported
1675 if missing:
1681 if missing:
1676 msg = _(
1682 msg = _(
1677 b"required features are not"
1683 b"required features are not"
1678 b" supported in the destination:"
1684 b" supported in the destination:"
1679 b" %s"
1685 b" %s"
1680 ) % (b', '.join(sorted(missing)))
1686 ) % (b', '.join(sorted(missing)))
1681 raise error.Abort(msg)
1687 raise error.Abort(msg)
1682
1688
1683 for category in repo._wanted_sidedata:
1689 for category in repo._wanted_sidedata:
1684 # Check that a computer is registered for that category for at least
1690 # Check that a computer is registered for that category for at least
1685 # one revlog kind.
1691 # one revlog kind.
1686 for kind, computers in repo._sidedata_computers.items():
1692 for kind, computers in repo._sidedata_computers.items():
1687 if computers.get(category):
1693 if computers.get(category):
1688 break
1694 break
1689 else:
1695 else:
1690 # This should never happen since repos are supposed to be able to
1696 # This should never happen since repos are supposed to be able to
1691 # generate the sidedata they require.
1697 # generate the sidedata they require.
1692 raise error.ProgrammingError(
1698 raise error.ProgrammingError(
1693 _(
1699 _(
1694 b'sidedata category requested by local side without local'
1700 b'sidedata category requested by local side without local'
1695 b"support: '%s'"
1701 b"support: '%s'"
1696 )
1702 )
1697 % pycompat.bytestr(category)
1703 % pycompat.bytestr(category)
1698 )
1704 )
1699
1705
1700 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1706 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1701 wlock = util.nullcontextmanager
1707 wlock = util.nullcontextmanager
1702 if not bookmod.bookmarksinstore(repo):
1708 if not bookmod.bookmarksinstore(repo):
1703 wlock = repo.wlock
1709 wlock = repo.wlock
1704 with wlock(), repo.lock(), pullop.trmanager:
1710 with wlock(), repo.lock(), pullop.trmanager:
1705 if confirm or (
1711 if confirm or (
1706 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1712 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1707 ):
1713 ):
1708 add_confirm_callback(repo, pullop)
1714 add_confirm_callback(repo, pullop)
1709
1715
1710 # This should ideally be in _pullbundle2(). However, it needs to run
1716 # This should ideally be in _pullbundle2(). However, it needs to run
1711 # before discovery to avoid extra work.
1717 # before discovery to avoid extra work.
1712 _maybeapplyclonebundle(pullop)
1718 _maybeapplyclonebundle(pullop)
1713 streamclone.maybeperformlegacystreamclone(pullop)
1719 streamclone.maybeperformlegacystreamclone(pullop)
1714 _pulldiscovery(pullop)
1720 _pulldiscovery(pullop)
1715 if pullop.canusebundle2:
1721 if pullop.canusebundle2:
1716 _fullpullbundle2(repo, pullop)
1722 _fullpullbundle2(repo, pullop)
1717 _pullchangeset(pullop)
1723 _pullchangeset(pullop)
1718 _pullphase(pullop)
1724 _pullphase(pullop)
1719 _pullbookmarks(pullop)
1725 _pullbookmarks(pullop)
1720 _pullobsolete(pullop)
1726 _pullobsolete(pullop)
1721
1727
1722 # storing remotenames
1728 # storing remotenames
1723 if repo.ui.configbool(b'experimental', b'remotenames'):
1729 if repo.ui.configbool(b'experimental', b'remotenames'):
1724 logexchange.pullremotenames(repo, remote)
1730 logexchange.pullremotenames(repo, remote)
1725
1731
1726 return pullop
1732 return pullop
1727
1733
1728
1734
1729 # list of steps to perform discovery before pull
1735 # list of steps to perform discovery before pull
1730 pulldiscoveryorder = []
1736 pulldiscoveryorder = []
1731
1737
1732 # Mapping between step name and function
1738 # Mapping between step name and function
1733 #
1739 #
1734 # This exists to help extensions wrap steps if necessary
1740 # This exists to help extensions wrap steps if necessary
1735 pulldiscoverymapping = {}
1741 pulldiscoverymapping = {}
1736
1742
1737
1743
1738 def pulldiscovery(stepname):
1744 def pulldiscovery(stepname):
1739 """decorator for function performing discovery before pull
1745 """decorator for function performing discovery before pull
1740
1746
1741 The function is added to the step -> function mapping and appended to the
1747 The function is added to the step -> function mapping and appended to the
1742 list of steps. Beware that decorated function will be added in order (this
1748 list of steps. Beware that decorated function will be added in order (this
1743 may matter).
1749 may matter).
1744
1750
1745 You can only use this decorator for a new step, if you want to wrap a step
1751 You can only use this decorator for a new step, if you want to wrap a step
1746 from an extension, change the pulldiscovery dictionary directly."""
1752 from an extension, change the pulldiscovery dictionary directly."""
1747
1753
1748 def dec(func):
1754 def dec(func):
1749 assert stepname not in pulldiscoverymapping
1755 assert stepname not in pulldiscoverymapping
1750 pulldiscoverymapping[stepname] = func
1756 pulldiscoverymapping[stepname] = func
1751 pulldiscoveryorder.append(stepname)
1757 pulldiscoveryorder.append(stepname)
1752 return func
1758 return func
1753
1759
1754 return dec
1760 return dec
1755
1761
1756
1762
1757 def _pulldiscovery(pullop):
1763 def _pulldiscovery(pullop):
1758 """Run all discovery steps"""
1764 """Run all discovery steps"""
1759 for stepname in pulldiscoveryorder:
1765 for stepname in pulldiscoveryorder:
1760 step = pulldiscoverymapping[stepname]
1766 step = pulldiscoverymapping[stepname]
1761 step(pullop)
1767 step(pullop)
1762
1768
1763
1769
1764 @pulldiscovery(b'b1:bookmarks')
1770 @pulldiscovery(b'b1:bookmarks')
1765 def _pullbookmarkbundle1(pullop):
1771 def _pullbookmarkbundle1(pullop):
1766 """fetch bookmark data in bundle1 case
1772 """fetch bookmark data in bundle1 case
1767
1773
1768 If not using bundle2, we have to fetch bookmarks before changeset
1774 If not using bundle2, we have to fetch bookmarks before changeset
1769 discovery to reduce the chance and impact of race conditions."""
1775 discovery to reduce the chance and impact of race conditions."""
1770 if pullop.remotebookmarks is not None:
1776 if pullop.remotebookmarks is not None:
1771 return
1777 return
1772 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1778 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1773 # all known bundle2 servers now support listkeys, but lets be nice with
1779 # all known bundle2 servers now support listkeys, but lets be nice with
1774 # new implementation.
1780 # new implementation.
1775 return
1781 return
1776 books = listkeys(pullop.remote, b'bookmarks')
1782 books = listkeys(pullop.remote, b'bookmarks')
1777 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1783 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1778
1784
1779
1785
1780 @pulldiscovery(b'changegroup')
1786 @pulldiscovery(b'changegroup')
1781 def _pulldiscoverychangegroup(pullop):
1787 def _pulldiscoverychangegroup(pullop):
1782 """discovery phase for the pull
1788 """discovery phase for the pull
1783
1789
1784 Current handle changeset discovery only, will change handle all discovery
1790 Current handle changeset discovery only, will change handle all discovery
1785 at some point."""
1791 at some point."""
1786 tmp = discovery.findcommonincoming(
1792 tmp = discovery.findcommonincoming(
1787 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1793 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1788 )
1794 )
1789 common, fetch, rheads = tmp
1795 common, fetch, rheads = tmp
1790 has_node = pullop.repo.unfiltered().changelog.index.has_node
1796 has_node = pullop.repo.unfiltered().changelog.index.has_node
1791 if fetch and rheads:
1797 if fetch and rheads:
1792 # If a remote heads is filtered locally, put in back in common.
1798 # If a remote heads is filtered locally, put in back in common.
1793 #
1799 #
1794 # This is a hackish solution to catch most of "common but locally
1800 # This is a hackish solution to catch most of "common but locally
1795 # hidden situation". We do not performs discovery on unfiltered
1801 # hidden situation". We do not performs discovery on unfiltered
1796 # repository because it end up doing a pathological amount of round
1802 # repository because it end up doing a pathological amount of round
1797 # trip for w huge amount of changeset we do not care about.
1803 # trip for w huge amount of changeset we do not care about.
1798 #
1804 #
1799 # If a set of such "common but filtered" changeset exist on the server
1805 # If a set of such "common but filtered" changeset exist on the server
1800 # but are not including a remote heads, we'll not be able to detect it,
1806 # but are not including a remote heads, we'll not be able to detect it,
1801 scommon = set(common)
1807 scommon = set(common)
1802 for n in rheads:
1808 for n in rheads:
1803 if has_node(n):
1809 if has_node(n):
1804 if n not in scommon:
1810 if n not in scommon:
1805 common.append(n)
1811 common.append(n)
1806 if set(rheads).issubset(set(common)):
1812 if set(rheads).issubset(set(common)):
1807 fetch = []
1813 fetch = []
1808 pullop.common = common
1814 pullop.common = common
1809 pullop.fetch = fetch
1815 pullop.fetch = fetch
1810 pullop.rheads = rheads
1816 pullop.rheads = rheads
1811
1817
1812
1818
1813 def _pullbundle2(pullop):
1819 def _pullbundle2(pullop):
1814 """pull data using bundle2
1820 """pull data using bundle2
1815
1821
1816 For now, the only supported data are changegroup."""
1822 For now, the only supported data are changegroup."""
1817 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1823 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1818
1824
1819 # make ui easier to access
1825 # make ui easier to access
1820 ui = pullop.repo.ui
1826 ui = pullop.repo.ui
1821
1827
1822 # At the moment we don't do stream clones over bundle2. If that is
1828 # At the moment we don't do stream clones over bundle2. If that is
1823 # implemented then here's where the check for that will go.
1829 # implemented then here's where the check for that will go.
1824 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1830 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1825
1831
1826 # declare pull perimeters
1832 # declare pull perimeters
1827 kwargs[b'common'] = pullop.common
1833 kwargs[b'common'] = pullop.common
1828 kwargs[b'heads'] = pullop.heads or pullop.rheads
1834 kwargs[b'heads'] = pullop.heads or pullop.rheads
1829
1835
1830 # check server supports narrow and then adding includepats and excludepats
1836 # check server supports narrow and then adding includepats and excludepats
1831 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1837 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1832 if servernarrow and pullop.includepats:
1838 if servernarrow and pullop.includepats:
1833 kwargs[b'includepats'] = pullop.includepats
1839 kwargs[b'includepats'] = pullop.includepats
1834 if servernarrow and pullop.excludepats:
1840 if servernarrow and pullop.excludepats:
1835 kwargs[b'excludepats'] = pullop.excludepats
1841 kwargs[b'excludepats'] = pullop.excludepats
1836
1842
1837 if streaming:
1843 if streaming:
1838 kwargs[b'cg'] = False
1844 kwargs[b'cg'] = False
1839 kwargs[b'stream'] = True
1845 kwargs[b'stream'] = True
1840 pullop.stepsdone.add(b'changegroup')
1846 pullop.stepsdone.add(b'changegroup')
1841 pullop.stepsdone.add(b'phases')
1847 pullop.stepsdone.add(b'phases')
1842
1848
1843 else:
1849 else:
1844 # pulling changegroup
1850 # pulling changegroup
1845 pullop.stepsdone.add(b'changegroup')
1851 pullop.stepsdone.add(b'changegroup')
1846
1852
1847 kwargs[b'cg'] = pullop.fetch
1853 kwargs[b'cg'] = pullop.fetch
1848
1854
1849 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1855 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1850 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1856 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1851 if not legacyphase and hasbinaryphase:
1857 if not legacyphase and hasbinaryphase:
1852 kwargs[b'phases'] = True
1858 kwargs[b'phases'] = True
1853 pullop.stepsdone.add(b'phases')
1859 pullop.stepsdone.add(b'phases')
1854
1860
1855 if b'listkeys' in pullop.remotebundle2caps:
1861 if b'listkeys' in pullop.remotebundle2caps:
1856 if b'phases' not in pullop.stepsdone:
1862 if b'phases' not in pullop.stepsdone:
1857 kwargs[b'listkeys'] = [b'phases']
1863 kwargs[b'listkeys'] = [b'phases']
1858
1864
1859 bookmarksrequested = False
1865 bookmarksrequested = False
1860 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1866 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1861 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1867 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1862
1868
1863 if pullop.remotebookmarks is not None:
1869 if pullop.remotebookmarks is not None:
1864 pullop.stepsdone.add(b'request-bookmarks')
1870 pullop.stepsdone.add(b'request-bookmarks')
1865
1871
1866 if (
1872 if (
1867 b'request-bookmarks' not in pullop.stepsdone
1873 b'request-bookmarks' not in pullop.stepsdone
1868 and pullop.remotebookmarks is None
1874 and pullop.remotebookmarks is None
1869 and not legacybookmark
1875 and not legacybookmark
1870 and hasbinarybook
1876 and hasbinarybook
1871 ):
1877 ):
1872 kwargs[b'bookmarks'] = True
1878 kwargs[b'bookmarks'] = True
1873 bookmarksrequested = True
1879 bookmarksrequested = True
1874
1880
1875 if b'listkeys' in pullop.remotebundle2caps:
1881 if b'listkeys' in pullop.remotebundle2caps:
1876 if b'request-bookmarks' not in pullop.stepsdone:
1882 if b'request-bookmarks' not in pullop.stepsdone:
1877 # make sure to always includes bookmark data when migrating
1883 # make sure to always includes bookmark data when migrating
1878 # `hg incoming --bundle` to using this function.
1884 # `hg incoming --bundle` to using this function.
1879 pullop.stepsdone.add(b'request-bookmarks')
1885 pullop.stepsdone.add(b'request-bookmarks')
1880 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1886 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1881
1887
1882 # If this is a full pull / clone and the server supports the clone bundles
1888 # If this is a full pull / clone and the server supports the clone bundles
1883 # feature, tell the server whether we attempted a clone bundle. The
1889 # feature, tell the server whether we attempted a clone bundle. The
1884 # presence of this flag indicates the client supports clone bundles. This
1890 # presence of this flag indicates the client supports clone bundles. This
1885 # will enable the server to treat clients that support clone bundles
1891 # will enable the server to treat clients that support clone bundles
1886 # differently from those that don't.
1892 # differently from those that don't.
1887 if (
1893 if (
1888 pullop.remote.capable(b'clonebundles')
1894 pullop.remote.capable(b'clonebundles')
1889 and pullop.heads is None
1895 and pullop.heads is None
1890 and list(pullop.common) == [pullop.repo.nullid]
1896 and list(pullop.common) == [pullop.repo.nullid]
1891 ):
1897 ):
1892 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1898 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1893
1899
1894 if streaming:
1900 if streaming:
1895 pullop.repo.ui.status(_(b'streaming all changes\n'))
1901 pullop.repo.ui.status(_(b'streaming all changes\n'))
1896 elif not pullop.fetch:
1902 elif not pullop.fetch:
1897 pullop.repo.ui.status(_(b"no changes found\n"))
1903 pullop.repo.ui.status(_(b"no changes found\n"))
1898 pullop.cgresult = 0
1904 pullop.cgresult = 0
1899 else:
1905 else:
1900 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1906 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1901 pullop.repo.ui.status(_(b"requesting all changes\n"))
1907 pullop.repo.ui.status(_(b"requesting all changes\n"))
1902 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1908 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1903 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1909 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1904 if obsolete.commonversion(remoteversions) is not None:
1910 if obsolete.commonversion(remoteversions) is not None:
1905 kwargs[b'obsmarkers'] = True
1911 kwargs[b'obsmarkers'] = True
1906 pullop.stepsdone.add(b'obsmarkers')
1912 pullop.stepsdone.add(b'obsmarkers')
1907 _pullbundle2extraprepare(pullop, kwargs)
1913 _pullbundle2extraprepare(pullop, kwargs)
1908
1914
1909 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1915 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1910 if remote_sidedata:
1916 if remote_sidedata:
1911 kwargs[b'remote_sidedata'] = remote_sidedata
1917 kwargs[b'remote_sidedata'] = remote_sidedata
1912
1918
1913 with pullop.remote.commandexecutor() as e:
1919 with pullop.remote.commandexecutor() as e:
1914 args = dict(kwargs)
1920 args = dict(kwargs)
1915 args[b'source'] = b'pull'
1921 args[b'source'] = b'pull'
1916 bundle = e.callcommand(b'getbundle', args).result()
1922 bundle = e.callcommand(b'getbundle', args).result()
1917
1923
1918 try:
1924 try:
1919 op = bundle2.bundleoperation(
1925 op = bundle2.bundleoperation(
1920 pullop.repo,
1926 pullop.repo,
1921 pullop.gettransaction,
1927 pullop.gettransaction,
1922 source=b'pull',
1928 source=b'pull',
1923 remote=pullop.remote,
1929 remote=pullop.remote,
1924 )
1930 )
1925 op.modes[b'bookmarks'] = b'records'
1931 op.modes[b'bookmarks'] = b'records'
1926 bundle2.processbundle(
1932 bundle2.processbundle(
1927 pullop.repo,
1933 pullop.repo,
1928 bundle,
1934 bundle,
1929 op=op,
1935 op=op,
1930 remote=pullop.remote,
1936 remote=pullop.remote,
1931 )
1937 )
1932 except bundle2.AbortFromPart as exc:
1938 except bundle2.AbortFromPart as exc:
1933 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1939 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1934 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1940 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1935 except error.BundleValueError as exc:
1941 except error.BundleValueError as exc:
1936 raise error.RemoteError(_(b'missing support for %s') % exc)
1942 raise error.RemoteError(_(b'missing support for %s') % exc)
1937
1943
1938 if pullop.fetch:
1944 if pullop.fetch:
1939 pullop.cgresult = bundle2.combinechangegroupresults(op)
1945 pullop.cgresult = bundle2.combinechangegroupresults(op)
1940
1946
1941 # processing phases change
1947 # processing phases change
1942 for namespace, value in op.records[b'listkeys']:
1948 for namespace, value in op.records[b'listkeys']:
1943 if namespace == b'phases':
1949 if namespace == b'phases':
1944 _pullapplyphases(pullop, value)
1950 _pullapplyphases(pullop, value)
1945
1951
1946 # processing bookmark update
1952 # processing bookmark update
1947 if bookmarksrequested:
1953 if bookmarksrequested:
1948 books = {}
1954 books = {}
1949 for record in op.records[b'bookmarks']:
1955 for record in op.records[b'bookmarks']:
1950 books[record[b'bookmark']] = record[b"node"]
1956 books[record[b'bookmark']] = record[b"node"]
1951 pullop.remotebookmarks = books
1957 pullop.remotebookmarks = books
1952 else:
1958 else:
1953 for namespace, value in op.records[b'listkeys']:
1959 for namespace, value in op.records[b'listkeys']:
1954 if namespace == b'bookmarks':
1960 if namespace == b'bookmarks':
1955 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1961 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1956
1962
1957 # bookmark data were either already there or pulled in the bundle
1963 # bookmark data were either already there or pulled in the bundle
1958 if pullop.remotebookmarks is not None:
1964 if pullop.remotebookmarks is not None:
1959 _pullbookmarks(pullop)
1965 _pullbookmarks(pullop)
1960
1966
1961
1967
1962 def _pullbundle2extraprepare(pullop, kwargs):
1968 def _pullbundle2extraprepare(pullop, kwargs):
1963 """hook function so that extensions can extend the getbundle call"""
1969 """hook function so that extensions can extend the getbundle call"""
1964
1970
1965
1971
1966 def _pullchangeset(pullop):
1972 def _pullchangeset(pullop):
1967 """pull changeset from unbundle into the local repo"""
1973 """pull changeset from unbundle into the local repo"""
1968 # We delay the open of the transaction as late as possible so we
1974 # We delay the open of the transaction as late as possible so we
1969 # don't open transaction for nothing or you break future useful
1975 # don't open transaction for nothing or you break future useful
1970 # rollback call
1976 # rollback call
1971 if b'changegroup' in pullop.stepsdone:
1977 if b'changegroup' in pullop.stepsdone:
1972 return
1978 return
1973 pullop.stepsdone.add(b'changegroup')
1979 pullop.stepsdone.add(b'changegroup')
1974 if not pullop.fetch:
1980 if not pullop.fetch:
1975 pullop.repo.ui.status(_(b"no changes found\n"))
1981 pullop.repo.ui.status(_(b"no changes found\n"))
1976 pullop.cgresult = 0
1982 pullop.cgresult = 0
1977 return
1983 return
1978 tr = pullop.gettransaction()
1984 tr = pullop.gettransaction()
1979 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1985 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1980 pullop.repo.ui.status(_(b"requesting all changes\n"))
1986 pullop.repo.ui.status(_(b"requesting all changes\n"))
1981 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1987 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1982 # issue1320, avoid a race if remote changed after discovery
1988 # issue1320, avoid a race if remote changed after discovery
1983 pullop.heads = pullop.rheads
1989 pullop.heads = pullop.rheads
1984
1990
1985 if pullop.remote.capable(b'getbundle'):
1991 if pullop.remote.capable(b'getbundle'):
1986 # TODO: get bundlecaps from remote
1992 # TODO: get bundlecaps from remote
1987 cg = pullop.remote.getbundle(
1993 cg = pullop.remote.getbundle(
1988 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1994 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1989 )
1995 )
1990 elif pullop.heads is None:
1996 elif pullop.heads is None:
1991 with pullop.remote.commandexecutor() as e:
1997 with pullop.remote.commandexecutor() as e:
1992 cg = e.callcommand(
1998 cg = e.callcommand(
1993 b'changegroup',
1999 b'changegroup',
1994 {
2000 {
1995 b'nodes': pullop.fetch,
2001 b'nodes': pullop.fetch,
1996 b'source': b'pull',
2002 b'source': b'pull',
1997 },
2003 },
1998 ).result()
2004 ).result()
1999
2005
2000 elif not pullop.remote.capable(b'changegroupsubset'):
2006 elif not pullop.remote.capable(b'changegroupsubset'):
2001 raise error.Abort(
2007 raise error.Abort(
2002 _(
2008 _(
2003 b"partial pull cannot be done because "
2009 b"partial pull cannot be done because "
2004 b"other repository doesn't support "
2010 b"other repository doesn't support "
2005 b"changegroupsubset."
2011 b"changegroupsubset."
2006 )
2012 )
2007 )
2013 )
2008 else:
2014 else:
2009 with pullop.remote.commandexecutor() as e:
2015 with pullop.remote.commandexecutor() as e:
2010 cg = e.callcommand(
2016 cg = e.callcommand(
2011 b'changegroupsubset',
2017 b'changegroupsubset',
2012 {
2018 {
2013 b'bases': pullop.fetch,
2019 b'bases': pullop.fetch,
2014 b'heads': pullop.heads,
2020 b'heads': pullop.heads,
2015 b'source': b'pull',
2021 b'source': b'pull',
2016 },
2022 },
2017 ).result()
2023 ).result()
2018
2024
2019 bundleop = bundle2.applybundle(
2025 bundleop = bundle2.applybundle(
2020 pullop.repo,
2026 pullop.repo,
2021 cg,
2027 cg,
2022 tr,
2028 tr,
2023 b'pull',
2029 b'pull',
2024 pullop.remote.url(),
2030 pullop.remote.url(),
2025 remote=pullop.remote,
2031 remote=pullop.remote,
2026 )
2032 )
2027 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2033 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2028
2034
2029
2035
2030 def _pullphase(pullop):
2036 def _pullphase(pullop):
2031 # Get remote phases data from remote
2037 # Get remote phases data from remote
2032 if b'phases' in pullop.stepsdone:
2038 if b'phases' in pullop.stepsdone:
2033 return
2039 return
2034 remotephases = listkeys(pullop.remote, b'phases')
2040 remotephases = listkeys(pullop.remote, b'phases')
2035 _pullapplyphases(pullop, remotephases)
2041 _pullapplyphases(pullop, remotephases)
2036
2042
2037
2043
2038 def _pullapplyphases(pullop, remotephases):
2044 def _pullapplyphases(pullop, remotephases):
2039 """apply phase movement from observed remote state"""
2045 """apply phase movement from observed remote state"""
2040 if b'phases' in pullop.stepsdone:
2046 if b'phases' in pullop.stepsdone:
2041 return
2047 return
2042 pullop.stepsdone.add(b'phases')
2048 pullop.stepsdone.add(b'phases')
2043 publishing = bool(remotephases.get(b'publishing', False))
2049 publishing = bool(remotephases.get(b'publishing', False))
2044 if remotephases and not publishing:
2050 if remotephases and not publishing:
2045 # remote is new and non-publishing
2051 # remote is new and non-publishing
2046 pheads, _dr = phases.analyzeremotephases(
2052 pheads, _dr = phases.analyzeremotephases(
2047 pullop.repo, pullop.pulledsubset, remotephases
2053 pullop.repo, pullop.pulledsubset, remotephases
2048 )
2054 )
2049 dheads = pullop.pulledsubset
2055 dheads = pullop.pulledsubset
2050 else:
2056 else:
2051 # Remote is old or publishing all common changesets
2057 # Remote is old or publishing all common changesets
2052 # should be seen as public
2058 # should be seen as public
2053 pheads = pullop.pulledsubset
2059 pheads = pullop.pulledsubset
2054 dheads = []
2060 dheads = []
2055 unfi = pullop.repo.unfiltered()
2061 unfi = pullop.repo.unfiltered()
2056 phase = unfi._phasecache.phase
2062 phase = unfi._phasecache.phase
2057 rev = unfi.changelog.index.get_rev
2063 rev = unfi.changelog.index.get_rev
2058 public = phases.public
2064 public = phases.public
2059 draft = phases.draft
2065 draft = phases.draft
2060
2066
2061 # exclude changesets already public locally and update the others
2067 # exclude changesets already public locally and update the others
2062 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2068 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2063 if pheads:
2069 if pheads:
2064 tr = pullop.gettransaction()
2070 tr = pullop.gettransaction()
2065 phases.advanceboundary(pullop.repo, tr, public, pheads)
2071 phases.advanceboundary(pullop.repo, tr, public, pheads)
2066
2072
2067 # exclude changesets already draft locally and update the others
2073 # exclude changesets already draft locally and update the others
2068 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2074 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2069 if dheads:
2075 if dheads:
2070 tr = pullop.gettransaction()
2076 tr = pullop.gettransaction()
2071 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2077 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2072
2078
2073
2079
2074 def _pullbookmarks(pullop):
2080 def _pullbookmarks(pullop):
2075 """process the remote bookmark information to update the local one"""
2081 """process the remote bookmark information to update the local one"""
2076 if b'bookmarks' in pullop.stepsdone:
2082 if b'bookmarks' in pullop.stepsdone:
2077 return
2083 return
2078 pullop.stepsdone.add(b'bookmarks')
2084 pullop.stepsdone.add(b'bookmarks')
2079 repo = pullop.repo
2085 repo = pullop.repo
2080 remotebookmarks = pullop.remotebookmarks
2086 remotebookmarks = pullop.remotebookmarks
2081 bookmarks_mode = None
2087 bookmarks_mode = None
2082 if pullop.remote_path is not None:
2088 if pullop.remote_path is not None:
2083 bookmarks_mode = pullop.remote_path.bookmarks_mode
2089 bookmarks_mode = pullop.remote_path.bookmarks_mode
2084 bookmod.updatefromremote(
2090 bookmod.updatefromremote(
2085 repo.ui,
2091 repo.ui,
2086 repo,
2092 repo,
2087 remotebookmarks,
2093 remotebookmarks,
2088 pullop.remote.url(),
2094 pullop.remote.url(),
2089 pullop.gettransaction,
2095 pullop.gettransaction,
2090 explicit=pullop.explicitbookmarks,
2096 explicit=pullop.explicitbookmarks,
2091 mode=bookmarks_mode,
2097 mode=bookmarks_mode,
2092 )
2098 )
2093
2099
2094
2100
2095 def _pullobsolete(pullop):
2101 def _pullobsolete(pullop):
2096 """utility function to pull obsolete markers from a remote
2102 """utility function to pull obsolete markers from a remote
2097
2103
2098 The `gettransaction` is function that return the pull transaction, creating
2104 The `gettransaction` is function that return the pull transaction, creating
2099 one if necessary. We return the transaction to inform the calling code that
2105 one if necessary. We return the transaction to inform the calling code that
2100 a new transaction have been created (when applicable).
2106 a new transaction have been created (when applicable).
2101
2107
2102 Exists mostly to allow overriding for experimentation purpose"""
2108 Exists mostly to allow overriding for experimentation purpose"""
2103 if b'obsmarkers' in pullop.stepsdone:
2109 if b'obsmarkers' in pullop.stepsdone:
2104 return
2110 return
2105 pullop.stepsdone.add(b'obsmarkers')
2111 pullop.stepsdone.add(b'obsmarkers')
2106 tr = None
2112 tr = None
2107 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2113 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2108 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2114 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2109 remoteobs = listkeys(pullop.remote, b'obsolete')
2115 remoteobs = listkeys(pullop.remote, b'obsolete')
2110 if b'dump0' in remoteobs:
2116 if b'dump0' in remoteobs:
2111 tr = pullop.gettransaction()
2117 tr = pullop.gettransaction()
2112 markers = []
2118 markers = []
2113 for key in sorted(remoteobs, reverse=True):
2119 for key in sorted(remoteobs, reverse=True):
2114 if key.startswith(b'dump'):
2120 if key.startswith(b'dump'):
2115 data = util.b85decode(remoteobs[key])
2121 data = util.b85decode(remoteobs[key])
2116 version, newmarks = obsolete._readmarkers(data)
2122 version, newmarks = obsolete._readmarkers(data)
2117 markers += newmarks
2123 markers += newmarks
2118 if markers:
2124 if markers:
2119 pullop.repo.obsstore.add(tr, markers)
2125 pullop.repo.obsstore.add(tr, markers)
2120 pullop.repo.invalidatevolatilesets()
2126 pullop.repo.invalidatevolatilesets()
2121 return tr
2127 return tr
2122
2128
2123
2129
2124 def applynarrowacl(repo, kwargs):
2130 def applynarrowacl(repo, kwargs):
2125 """Apply narrow fetch access control.
2131 """Apply narrow fetch access control.
2126
2132
2127 This massages the named arguments for getbundle wire protocol commands
2133 This massages the named arguments for getbundle wire protocol commands
2128 so requested data is filtered through access control rules.
2134 so requested data is filtered through access control rules.
2129 """
2135 """
2130 ui = repo.ui
2136 ui = repo.ui
2131 # TODO this assumes existence of HTTP and is a layering violation.
2137 # TODO this assumes existence of HTTP and is a layering violation.
2132 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2138 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2133 user_includes = ui.configlist(
2139 user_includes = ui.configlist(
2134 _NARROWACL_SECTION,
2140 _NARROWACL_SECTION,
2135 username + b'.includes',
2141 username + b'.includes',
2136 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2142 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2137 )
2143 )
2138 user_excludes = ui.configlist(
2144 user_excludes = ui.configlist(
2139 _NARROWACL_SECTION,
2145 _NARROWACL_SECTION,
2140 username + b'.excludes',
2146 username + b'.excludes',
2141 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2147 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2142 )
2148 )
2143 if not user_includes:
2149 if not user_includes:
2144 raise error.Abort(
2150 raise error.Abort(
2145 _(b"%s configuration for user %s is empty")
2151 _(b"%s configuration for user %s is empty")
2146 % (_NARROWACL_SECTION, username)
2152 % (_NARROWACL_SECTION, username)
2147 )
2153 )
2148
2154
2149 user_includes = [
2155 user_includes = [
2150 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2156 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2151 ]
2157 ]
2152 user_excludes = [
2158 user_excludes = [
2153 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2159 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2154 ]
2160 ]
2155
2161
2156 req_includes = set(kwargs.get('includepats', []))
2162 req_includes = set(kwargs.get('includepats', []))
2157 req_excludes = set(kwargs.get('excludepats', []))
2163 req_excludes = set(kwargs.get('excludepats', []))
2158
2164
2159 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2165 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2160 req_includes, req_excludes, user_includes, user_excludes
2166 req_includes, req_excludes, user_includes, user_excludes
2161 )
2167 )
2162
2168
2163 if invalid_includes:
2169 if invalid_includes:
2164 raise error.Abort(
2170 raise error.Abort(
2165 _(b"The following includes are not accessible for %s: %s")
2171 _(b"The following includes are not accessible for %s: %s")
2166 % (username, stringutil.pprint(invalid_includes))
2172 % (username, stringutil.pprint(invalid_includes))
2167 )
2173 )
2168
2174
2169 new_args = {}
2175 new_args = {}
2170 new_args.update(kwargs)
2176 new_args.update(kwargs)
2171 new_args['narrow'] = True
2177 new_args['narrow'] = True
2172 new_args['narrow_acl'] = True
2178 new_args['narrow_acl'] = True
2173 new_args['includepats'] = req_includes
2179 new_args['includepats'] = req_includes
2174 if req_excludes:
2180 if req_excludes:
2175 new_args['excludepats'] = req_excludes
2181 new_args['excludepats'] = req_excludes
2176
2182
2177 return new_args
2183 return new_args
2178
2184
2179
2185
2180 def _computeellipsis(repo, common, heads, known, match, depth=None):
2186 def _computeellipsis(repo, common, heads, known, match, depth=None):
2181 """Compute the shape of a narrowed DAG.
2187 """Compute the shape of a narrowed DAG.
2182
2188
2183 Args:
2189 Args:
2184 repo: The repository we're transferring.
2190 repo: The repository we're transferring.
2185 common: The roots of the DAG range we're transferring.
2191 common: The roots of the DAG range we're transferring.
2186 May be just [nullid], which means all ancestors of heads.
2192 May be just [nullid], which means all ancestors of heads.
2187 heads: The heads of the DAG range we're transferring.
2193 heads: The heads of the DAG range we're transferring.
2188 match: The narrowmatcher that allows us to identify relevant changes.
2194 match: The narrowmatcher that allows us to identify relevant changes.
2189 depth: If not None, only consider nodes to be full nodes if they are at
2195 depth: If not None, only consider nodes to be full nodes if they are at
2190 most depth changesets away from one of heads.
2196 most depth changesets away from one of heads.
2191
2197
2192 Returns:
2198 Returns:
2193 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2199 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2194
2200
2195 visitnodes: The list of nodes (either full or ellipsis) which
2201 visitnodes: The list of nodes (either full or ellipsis) which
2196 need to be sent to the client.
2202 need to be sent to the client.
2197 relevant_nodes: The set of changelog nodes which change a file inside
2203 relevant_nodes: The set of changelog nodes which change a file inside
2198 the narrowspec. The client needs these as non-ellipsis nodes.
2204 the narrowspec. The client needs these as non-ellipsis nodes.
2199 ellipsisroots: A dict of {rev: parents} that is used in
2205 ellipsisroots: A dict of {rev: parents} that is used in
2200 narrowchangegroup to produce ellipsis nodes with the
2206 narrowchangegroup to produce ellipsis nodes with the
2201 correct parents.
2207 correct parents.
2202 """
2208 """
2203 cl = repo.changelog
2209 cl = repo.changelog
2204 mfl = repo.manifestlog
2210 mfl = repo.manifestlog
2205
2211
2206 clrev = cl.rev
2212 clrev = cl.rev
2207
2213
2208 commonrevs = {clrev(n) for n in common} | {nullrev}
2214 commonrevs = {clrev(n) for n in common} | {nullrev}
2209 headsrevs = {clrev(n) for n in heads}
2215 headsrevs = {clrev(n) for n in heads}
2210
2216
2211 if depth:
2217 if depth:
2212 revdepth = {h: 0 for h in headsrevs}
2218 revdepth = {h: 0 for h in headsrevs}
2213
2219
2214 ellipsisheads = collections.defaultdict(set)
2220 ellipsisheads = collections.defaultdict(set)
2215 ellipsisroots = collections.defaultdict(set)
2221 ellipsisroots = collections.defaultdict(set)
2216
2222
2217 def addroot(head, curchange):
2223 def addroot(head, curchange):
2218 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2224 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2219 ellipsisroots[head].add(curchange)
2225 ellipsisroots[head].add(curchange)
2220 # Recursively split ellipsis heads with 3 roots by finding the
2226 # Recursively split ellipsis heads with 3 roots by finding the
2221 # roots' youngest common descendant which is an elided merge commit.
2227 # roots' youngest common descendant which is an elided merge commit.
2222 # That descendant takes 2 of the 3 roots as its own, and becomes a
2228 # That descendant takes 2 of the 3 roots as its own, and becomes a
2223 # root of the head.
2229 # root of the head.
2224 while len(ellipsisroots[head]) > 2:
2230 while len(ellipsisroots[head]) > 2:
2225 child, roots = splithead(head)
2231 child, roots = splithead(head)
2226 splitroots(head, child, roots)
2232 splitroots(head, child, roots)
2227 head = child # Recurse in case we just added a 3rd root
2233 head = child # Recurse in case we just added a 3rd root
2228
2234
2229 def splitroots(head, child, roots):
2235 def splitroots(head, child, roots):
2230 ellipsisroots[head].difference_update(roots)
2236 ellipsisroots[head].difference_update(roots)
2231 ellipsisroots[head].add(child)
2237 ellipsisroots[head].add(child)
2232 ellipsisroots[child].update(roots)
2238 ellipsisroots[child].update(roots)
2233 ellipsisroots[child].discard(child)
2239 ellipsisroots[child].discard(child)
2234
2240
2235 def splithead(head):
2241 def splithead(head):
2236 r1, r2, r3 = sorted(ellipsisroots[head])
2242 r1, r2, r3 = sorted(ellipsisroots[head])
2237 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2243 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2238 mid = repo.revs(
2244 mid = repo.revs(
2239 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2245 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2240 )
2246 )
2241 for j in mid:
2247 for j in mid:
2242 if j == nr2:
2248 if j == nr2:
2243 return nr2, (nr1, nr2)
2249 return nr2, (nr1, nr2)
2244 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2250 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2245 return j, (nr1, nr2)
2251 return j, (nr1, nr2)
2246 raise error.Abort(
2252 raise error.Abort(
2247 _(
2253 _(
2248 b'Failed to split up ellipsis node! head: %d, '
2254 b'Failed to split up ellipsis node! head: %d, '
2249 b'roots: %d %d %d'
2255 b'roots: %d %d %d'
2250 )
2256 )
2251 % (head, r1, r2, r3)
2257 % (head, r1, r2, r3)
2252 )
2258 )
2253
2259
2254 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2260 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2255 visit = reversed(missing)
2261 visit = reversed(missing)
2256 relevant_nodes = set()
2262 relevant_nodes = set()
2257 visitnodes = [cl.node(m) for m in missing]
2263 visitnodes = [cl.node(m) for m in missing]
2258 required = set(headsrevs) | known
2264 required = set(headsrevs) | known
2259 for rev in visit:
2265 for rev in visit:
2260 clrev = cl.changelogrevision(rev)
2266 clrev = cl.changelogrevision(rev)
2261 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2267 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2262 if depth is not None:
2268 if depth is not None:
2263 curdepth = revdepth[rev]
2269 curdepth = revdepth[rev]
2264 for p in ps:
2270 for p in ps:
2265 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2271 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2266 needed = False
2272 needed = False
2267 shallow_enough = depth is None or revdepth[rev] <= depth
2273 shallow_enough = depth is None or revdepth[rev] <= depth
2268 if shallow_enough:
2274 if shallow_enough:
2269 curmf = mfl[clrev.manifest].read()
2275 curmf = mfl[clrev.manifest].read()
2270 if ps:
2276 if ps:
2271 # We choose to not trust the changed files list in
2277 # We choose to not trust the changed files list in
2272 # changesets because it's not always correct. TODO: could
2278 # changesets because it's not always correct. TODO: could
2273 # we trust it for the non-merge case?
2279 # we trust it for the non-merge case?
2274 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2280 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2275 needed = bool(curmf.diff(p1mf, match))
2281 needed = bool(curmf.diff(p1mf, match))
2276 if not needed and len(ps) > 1:
2282 if not needed and len(ps) > 1:
2277 # For merge changes, the list of changed files is not
2283 # For merge changes, the list of changed files is not
2278 # helpful, since we need to emit the merge if a file
2284 # helpful, since we need to emit the merge if a file
2279 # in the narrow spec has changed on either side of the
2285 # in the narrow spec has changed on either side of the
2280 # merge. As a result, we do a manifest diff to check.
2286 # merge. As a result, we do a manifest diff to check.
2281 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2287 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2282 needed = bool(curmf.diff(p2mf, match))
2288 needed = bool(curmf.diff(p2mf, match))
2283 else:
2289 else:
2284 # For a root node, we need to include the node if any
2290 # For a root node, we need to include the node if any
2285 # files in the node match the narrowspec.
2291 # files in the node match the narrowspec.
2286 needed = any(curmf.walk(match))
2292 needed = any(curmf.walk(match))
2287
2293
2288 if needed:
2294 if needed:
2289 for head in ellipsisheads[rev]:
2295 for head in ellipsisheads[rev]:
2290 addroot(head, rev)
2296 addroot(head, rev)
2291 for p in ps:
2297 for p in ps:
2292 required.add(p)
2298 required.add(p)
2293 relevant_nodes.add(cl.node(rev))
2299 relevant_nodes.add(cl.node(rev))
2294 else:
2300 else:
2295 if not ps:
2301 if not ps:
2296 ps = [nullrev]
2302 ps = [nullrev]
2297 if rev in required:
2303 if rev in required:
2298 for head in ellipsisheads[rev]:
2304 for head in ellipsisheads[rev]:
2299 addroot(head, rev)
2305 addroot(head, rev)
2300 for p in ps:
2306 for p in ps:
2301 ellipsisheads[p].add(rev)
2307 ellipsisheads[p].add(rev)
2302 else:
2308 else:
2303 for p in ps:
2309 for p in ps:
2304 ellipsisheads[p] |= ellipsisheads[rev]
2310 ellipsisheads[p] |= ellipsisheads[rev]
2305
2311
2306 # add common changesets as roots of their reachable ellipsis heads
2312 # add common changesets as roots of their reachable ellipsis heads
2307 for c in commonrevs:
2313 for c in commonrevs:
2308 for head in ellipsisheads[c]:
2314 for head in ellipsisheads[c]:
2309 addroot(head, c)
2315 addroot(head, c)
2310 return visitnodes, relevant_nodes, ellipsisroots
2316 return visitnodes, relevant_nodes, ellipsisroots
2311
2317
2312
2318
2313 def caps20to10(repo, role):
2319 def caps20to10(repo, role):
2314 """return a set with appropriate options to use bundle20 during getbundle"""
2320 """return a set with appropriate options to use bundle20 during getbundle"""
2315 caps = {b'HG20'}
2321 caps = {b'HG20'}
2316 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2322 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2317 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2323 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2318 return caps
2324 return caps
2319
2325
2320
2326
2321 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2327 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2322 getbundle2partsorder = []
2328 getbundle2partsorder = []
2323
2329
2324 # Mapping between step name and function
2330 # Mapping between step name and function
2325 #
2331 #
2326 # This exists to help extensions wrap steps if necessary
2332 # This exists to help extensions wrap steps if necessary
2327 getbundle2partsmapping = {}
2333 getbundle2partsmapping = {}
2328
2334
2329
2335
2330 def getbundle2partsgenerator(stepname, idx=None):
2336 def getbundle2partsgenerator(stepname, idx=None):
2331 """decorator for function generating bundle2 part for getbundle
2337 """decorator for function generating bundle2 part for getbundle
2332
2338
2333 The function is added to the step -> function mapping and appended to the
2339 The function is added to the step -> function mapping and appended to the
2334 list of steps. Beware that decorated functions will be added in order
2340 list of steps. Beware that decorated functions will be added in order
2335 (this may matter).
2341 (this may matter).
2336
2342
2337 You can only use this decorator for new steps, if you want to wrap a step
2343 You can only use this decorator for new steps, if you want to wrap a step
2338 from an extension, attack the getbundle2partsmapping dictionary directly."""
2344 from an extension, attack the getbundle2partsmapping dictionary directly."""
2339
2345
2340 def dec(func):
2346 def dec(func):
2341 assert stepname not in getbundle2partsmapping
2347 assert stepname not in getbundle2partsmapping
2342 getbundle2partsmapping[stepname] = func
2348 getbundle2partsmapping[stepname] = func
2343 if idx is None:
2349 if idx is None:
2344 getbundle2partsorder.append(stepname)
2350 getbundle2partsorder.append(stepname)
2345 else:
2351 else:
2346 getbundle2partsorder.insert(idx, stepname)
2352 getbundle2partsorder.insert(idx, stepname)
2347 return func
2353 return func
2348
2354
2349 return dec
2355 return dec
2350
2356
2351
2357
2352 def bundle2requested(bundlecaps):
2358 def bundle2requested(bundlecaps):
2353 if bundlecaps is not None:
2359 if bundlecaps is not None:
2354 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2360 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2355 return False
2361 return False
2356
2362
2357
2363
2358 def getbundlechunks(
2364 def getbundlechunks(
2359 repo,
2365 repo,
2360 source,
2366 source,
2361 heads=None,
2367 heads=None,
2362 common=None,
2368 common=None,
2363 bundlecaps=None,
2369 bundlecaps=None,
2364 remote_sidedata=None,
2370 remote_sidedata=None,
2365 **kwargs
2371 **kwargs
2366 ):
2372 ):
2367 """Return chunks constituting a bundle's raw data.
2373 """Return chunks constituting a bundle's raw data.
2368
2374
2369 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2375 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2370 passed.
2376 passed.
2371
2377
2372 Returns a 2-tuple of a dict with metadata about the generated bundle
2378 Returns a 2-tuple of a dict with metadata about the generated bundle
2373 and an iterator over raw chunks (of varying sizes).
2379 and an iterator over raw chunks (of varying sizes).
2374 """
2380 """
2375 kwargs = pycompat.byteskwargs(kwargs)
2381 kwargs = pycompat.byteskwargs(kwargs)
2376 info = {}
2382 info = {}
2377 usebundle2 = bundle2requested(bundlecaps)
2383 usebundle2 = bundle2requested(bundlecaps)
2378 # bundle10 case
2384 # bundle10 case
2379 if not usebundle2:
2385 if not usebundle2:
2380 if bundlecaps and not kwargs.get(b'cg', True):
2386 if bundlecaps and not kwargs.get(b'cg', True):
2381 raise ValueError(
2387 raise ValueError(
2382 _(b'request for bundle10 must include changegroup')
2388 _(b'request for bundle10 must include changegroup')
2383 )
2389 )
2384
2390
2385 if kwargs:
2391 if kwargs:
2386 raise ValueError(
2392 raise ValueError(
2387 _(b'unsupported getbundle arguments: %s')
2393 _(b'unsupported getbundle arguments: %s')
2388 % b', '.join(sorted(kwargs.keys()))
2394 % b', '.join(sorted(kwargs.keys()))
2389 )
2395 )
2390 outgoing = _computeoutgoing(repo, heads, common)
2396 outgoing = _computeoutgoing(repo, heads, common)
2391 info[b'bundleversion'] = 1
2397 info[b'bundleversion'] = 1
2392 return (
2398 return (
2393 info,
2399 info,
2394 changegroup.makestream(
2400 changegroup.makestream(
2395 repo,
2401 repo,
2396 outgoing,
2402 outgoing,
2397 b'01',
2403 b'01',
2398 source,
2404 source,
2399 bundlecaps=bundlecaps,
2405 bundlecaps=bundlecaps,
2400 remote_sidedata=remote_sidedata,
2406 remote_sidedata=remote_sidedata,
2401 ),
2407 ),
2402 )
2408 )
2403
2409
2404 # bundle20 case
2410 # bundle20 case
2405 info[b'bundleversion'] = 2
2411 info[b'bundleversion'] = 2
2406 b2caps = {}
2412 b2caps = {}
2407 for bcaps in bundlecaps:
2413 for bcaps in bundlecaps:
2408 if bcaps.startswith(b'bundle2='):
2414 if bcaps.startswith(b'bundle2='):
2409 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2415 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2410 b2caps.update(bundle2.decodecaps(blob))
2416 b2caps.update(bundle2.decodecaps(blob))
2411 bundler = bundle2.bundle20(repo.ui, b2caps)
2417 bundler = bundle2.bundle20(repo.ui, b2caps)
2412
2418
2413 kwargs[b'heads'] = heads
2419 kwargs[b'heads'] = heads
2414 kwargs[b'common'] = common
2420 kwargs[b'common'] = common
2415
2421
2416 for name in getbundle2partsorder:
2422 for name in getbundle2partsorder:
2417 func = getbundle2partsmapping[name]
2423 func = getbundle2partsmapping[name]
2418 func(
2424 func(
2419 bundler,
2425 bundler,
2420 repo,
2426 repo,
2421 source,
2427 source,
2422 bundlecaps=bundlecaps,
2428 bundlecaps=bundlecaps,
2423 b2caps=b2caps,
2429 b2caps=b2caps,
2424 remote_sidedata=remote_sidedata,
2430 remote_sidedata=remote_sidedata,
2425 **pycompat.strkwargs(kwargs)
2431 **pycompat.strkwargs(kwargs)
2426 )
2432 )
2427
2433
2428 info[b'prefercompressed'] = bundler.prefercompressed
2434 info[b'prefercompressed'] = bundler.prefercompressed
2429
2435
2430 return info, bundler.getchunks()
2436 return info, bundler.getchunks()
2431
2437
2432
2438
2433 @getbundle2partsgenerator(b'stream')
2439 @getbundle2partsgenerator(b'stream')
2434 def _getbundlestream2(bundler, repo, *args, **kwargs):
2440 def _getbundlestream2(bundler, repo, *args, **kwargs):
2435 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2441 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2436
2442
2437
2443
2438 @getbundle2partsgenerator(b'changegroup')
2444 @getbundle2partsgenerator(b'changegroup')
2439 def _getbundlechangegrouppart(
2445 def _getbundlechangegrouppart(
2440 bundler,
2446 bundler,
2441 repo,
2447 repo,
2442 source,
2448 source,
2443 bundlecaps=None,
2449 bundlecaps=None,
2444 b2caps=None,
2450 b2caps=None,
2445 heads=None,
2451 heads=None,
2446 common=None,
2452 common=None,
2447 remote_sidedata=None,
2453 remote_sidedata=None,
2448 **kwargs
2454 **kwargs
2449 ):
2455 ):
2450 """add a changegroup part to the requested bundle"""
2456 """add a changegroup part to the requested bundle"""
2451 if not kwargs.get('cg', True) or not b2caps:
2457 if not kwargs.get('cg', True) or not b2caps:
2452 return
2458 return
2453
2459
2454 version = b'01'
2460 version = b'01'
2455 cgversions = b2caps.get(b'changegroup')
2461 cgversions = b2caps.get(b'changegroup')
2456 if cgversions: # 3.1 and 3.2 ship with an empty value
2462 if cgversions: # 3.1 and 3.2 ship with an empty value
2457 cgversions = [
2463 cgversions = [
2458 v
2464 v
2459 for v in cgversions
2465 for v in cgversions
2460 if v in changegroup.supportedoutgoingversions(repo)
2466 if v in changegroup.supportedoutgoingversions(repo)
2461 ]
2467 ]
2462 if not cgversions:
2468 if not cgversions:
2463 raise error.Abort(_(b'no common changegroup version'))
2469 raise error.Abort(_(b'no common changegroup version'))
2464 version = max(cgversions)
2470 version = max(cgversions)
2465
2471
2466 outgoing = _computeoutgoing(repo, heads, common)
2472 outgoing = _computeoutgoing(repo, heads, common)
2467 if not outgoing.missing:
2473 if not outgoing.missing:
2468 return
2474 return
2469
2475
2470 if kwargs.get('narrow', False):
2476 if kwargs.get('narrow', False):
2471 include = sorted(filter(bool, kwargs.get('includepats', [])))
2477 include = sorted(filter(bool, kwargs.get('includepats', [])))
2472 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2478 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2473 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2479 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2474 else:
2480 else:
2475 matcher = None
2481 matcher = None
2476
2482
2477 cgstream = changegroup.makestream(
2483 cgstream = changegroup.makestream(
2478 repo,
2484 repo,
2479 outgoing,
2485 outgoing,
2480 version,
2486 version,
2481 source,
2487 source,
2482 bundlecaps=bundlecaps,
2488 bundlecaps=bundlecaps,
2483 matcher=matcher,
2489 matcher=matcher,
2484 remote_sidedata=remote_sidedata,
2490 remote_sidedata=remote_sidedata,
2485 )
2491 )
2486
2492
2487 part = bundler.newpart(b'changegroup', data=cgstream)
2493 part = bundler.newpart(b'changegroup', data=cgstream)
2488 if cgversions:
2494 if cgversions:
2489 part.addparam(b'version', version)
2495 part.addparam(b'version', version)
2490
2496
2491 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2497 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2492
2498
2493 if scmutil.istreemanifest(repo):
2499 if scmutil.istreemanifest(repo):
2494 part.addparam(b'treemanifest', b'1')
2500 part.addparam(b'treemanifest', b'1')
2495
2501
2496 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2502 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2497 part.addparam(b'exp-sidedata', b'1')
2503 part.addparam(b'exp-sidedata', b'1')
2498 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2504 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2499 part.addparam(b'exp-wanted-sidedata', sidedata)
2505 part.addparam(b'exp-wanted-sidedata', sidedata)
2500
2506
2501 if (
2507 if (
2502 kwargs.get('narrow', False)
2508 kwargs.get('narrow', False)
2503 and kwargs.get('narrow_acl', False)
2509 and kwargs.get('narrow_acl', False)
2504 and (include or exclude)
2510 and (include or exclude)
2505 ):
2511 ):
2506 # this is mandatory because otherwise ACL clients won't work
2512 # this is mandatory because otherwise ACL clients won't work
2507 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2513 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2508 narrowspecpart.data = b'%s\0%s' % (
2514 narrowspecpart.data = b'%s\0%s' % (
2509 b'\n'.join(include),
2515 b'\n'.join(include),
2510 b'\n'.join(exclude),
2516 b'\n'.join(exclude),
2511 )
2517 )
2512
2518
2513
2519
2514 @getbundle2partsgenerator(b'bookmarks')
2520 @getbundle2partsgenerator(b'bookmarks')
2515 def _getbundlebookmarkpart(
2521 def _getbundlebookmarkpart(
2516 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2522 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2517 ):
2523 ):
2518 """add a bookmark part to the requested bundle"""
2524 """add a bookmark part to the requested bundle"""
2519 if not kwargs.get('bookmarks', False):
2525 if not kwargs.get('bookmarks', False):
2520 return
2526 return
2521 if not b2caps or b'bookmarks' not in b2caps:
2527 if not b2caps or b'bookmarks' not in b2caps:
2522 raise error.Abort(_(b'no common bookmarks exchange method'))
2528 raise error.Abort(_(b'no common bookmarks exchange method'))
2523 books = bookmod.listbinbookmarks(repo)
2529 books = bookmod.listbinbookmarks(repo)
2524 data = bookmod.binaryencode(repo, books)
2530 data = bookmod.binaryencode(repo, books)
2525 if data:
2531 if data:
2526 bundler.newpart(b'bookmarks', data=data)
2532 bundler.newpart(b'bookmarks', data=data)
2527
2533
2528
2534
2529 @getbundle2partsgenerator(b'listkeys')
2535 @getbundle2partsgenerator(b'listkeys')
2530 def _getbundlelistkeysparts(
2536 def _getbundlelistkeysparts(
2531 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2537 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2532 ):
2538 ):
2533 """add parts containing listkeys namespaces to the requested bundle"""
2539 """add parts containing listkeys namespaces to the requested bundle"""
2534 listkeys = kwargs.get('listkeys', ())
2540 listkeys = kwargs.get('listkeys', ())
2535 for namespace in listkeys:
2541 for namespace in listkeys:
2536 part = bundler.newpart(b'listkeys')
2542 part = bundler.newpart(b'listkeys')
2537 part.addparam(b'namespace', namespace)
2543 part.addparam(b'namespace', namespace)
2538 keys = repo.listkeys(namespace).items()
2544 keys = repo.listkeys(namespace).items()
2539 part.data = pushkey.encodekeys(keys)
2545 part.data = pushkey.encodekeys(keys)
2540
2546
2541
2547
2542 @getbundle2partsgenerator(b'obsmarkers')
2548 @getbundle2partsgenerator(b'obsmarkers')
2543 def _getbundleobsmarkerpart(
2549 def _getbundleobsmarkerpart(
2544 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2550 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2545 ):
2551 ):
2546 """add an obsolescence markers part to the requested bundle"""
2552 """add an obsolescence markers part to the requested bundle"""
2547 if kwargs.get('obsmarkers', False):
2553 if kwargs.get('obsmarkers', False):
2548 if heads is None:
2554 if heads is None:
2549 heads = repo.heads()
2555 heads = repo.heads()
2550 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2556 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2551 markers = repo.obsstore.relevantmarkers(subset)
2557 markers = repo.obsstore.relevantmarkers(subset)
2552 markers = obsutil.sortedmarkers(markers)
2558 markers = obsutil.sortedmarkers(markers)
2553 bundle2.buildobsmarkerspart(bundler, markers)
2559 bundle2.buildobsmarkerspart(bundler, markers)
2554
2560
2555
2561
2556 @getbundle2partsgenerator(b'phases')
2562 @getbundle2partsgenerator(b'phases')
2557 def _getbundlephasespart(
2563 def _getbundlephasespart(
2558 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2564 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2559 ):
2565 ):
2560 """add phase heads part to the requested bundle"""
2566 """add phase heads part to the requested bundle"""
2561 if kwargs.get('phases', False):
2567 if kwargs.get('phases', False):
2562 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2568 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2563 raise error.Abort(_(b'no common phases exchange method'))
2569 raise error.Abort(_(b'no common phases exchange method'))
2564 if heads is None:
2570 if heads is None:
2565 heads = repo.heads()
2571 heads = repo.heads()
2566
2572
2567 headsbyphase = collections.defaultdict(set)
2573 headsbyphase = collections.defaultdict(set)
2568 if repo.publishing():
2574 if repo.publishing():
2569 headsbyphase[phases.public] = heads
2575 headsbyphase[phases.public] = heads
2570 else:
2576 else:
2571 # find the appropriate heads to move
2577 # find the appropriate heads to move
2572
2578
2573 phase = repo._phasecache.phase
2579 phase = repo._phasecache.phase
2574 node = repo.changelog.node
2580 node = repo.changelog.node
2575 rev = repo.changelog.rev
2581 rev = repo.changelog.rev
2576 for h in heads:
2582 for h in heads:
2577 headsbyphase[phase(repo, rev(h))].add(h)
2583 headsbyphase[phase(repo, rev(h))].add(h)
2578 seenphases = list(headsbyphase.keys())
2584 seenphases = list(headsbyphase.keys())
2579
2585
2580 # We do not handle anything but public and draft phase for now)
2586 # We do not handle anything but public and draft phase for now)
2581 if seenphases:
2587 if seenphases:
2582 assert max(seenphases) <= phases.draft
2588 assert max(seenphases) <= phases.draft
2583
2589
2584 # if client is pulling non-public changesets, we need to find
2590 # if client is pulling non-public changesets, we need to find
2585 # intermediate public heads.
2591 # intermediate public heads.
2586 draftheads = headsbyphase.get(phases.draft, set())
2592 draftheads = headsbyphase.get(phases.draft, set())
2587 if draftheads:
2593 if draftheads:
2588 publicheads = headsbyphase.get(phases.public, set())
2594 publicheads = headsbyphase.get(phases.public, set())
2589
2595
2590 revset = b'heads(only(%ln, %ln) and public())'
2596 revset = b'heads(only(%ln, %ln) and public())'
2591 extraheads = repo.revs(revset, draftheads, publicheads)
2597 extraheads = repo.revs(revset, draftheads, publicheads)
2592 for r in extraheads:
2598 for r in extraheads:
2593 headsbyphase[phases.public].add(node(r))
2599 headsbyphase[phases.public].add(node(r))
2594
2600
2595 # transform data in a format used by the encoding function
2601 # transform data in a format used by the encoding function
2596 phasemapping = {
2602 phasemapping = {
2597 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2603 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2598 }
2604 }
2599
2605
2600 # generate the actual part
2606 # generate the actual part
2601 phasedata = phases.binaryencode(phasemapping)
2607 phasedata = phases.binaryencode(phasemapping)
2602 bundler.newpart(b'phase-heads', data=phasedata)
2608 bundler.newpart(b'phase-heads', data=phasedata)
2603
2609
2604
2610
2605 @getbundle2partsgenerator(b'hgtagsfnodes')
2611 @getbundle2partsgenerator(b'hgtagsfnodes')
2606 def _getbundletagsfnodes(
2612 def _getbundletagsfnodes(
2607 bundler,
2613 bundler,
2608 repo,
2614 repo,
2609 source,
2615 source,
2610 bundlecaps=None,
2616 bundlecaps=None,
2611 b2caps=None,
2617 b2caps=None,
2612 heads=None,
2618 heads=None,
2613 common=None,
2619 common=None,
2614 **kwargs
2620 **kwargs
2615 ):
2621 ):
2616 """Transfer the .hgtags filenodes mapping.
2622 """Transfer the .hgtags filenodes mapping.
2617
2623
2618 Only values for heads in this bundle will be transferred.
2624 Only values for heads in this bundle will be transferred.
2619
2625
2620 The part data consists of pairs of 20 byte changeset node and .hgtags
2626 The part data consists of pairs of 20 byte changeset node and .hgtags
2621 filenodes raw values.
2627 filenodes raw values.
2622 """
2628 """
2623 # Don't send unless:
2629 # Don't send unless:
2624 # - changeset are being exchanged,
2630 # - changeset are being exchanged,
2625 # - the client supports it.
2631 # - the client supports it.
2626 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2632 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2627 return
2633 return
2628
2634
2629 outgoing = _computeoutgoing(repo, heads, common)
2635 outgoing = _computeoutgoing(repo, heads, common)
2630 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2636 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2631
2637
2632
2638
2633 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2639 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2634 def _getbundlerevbranchcache(
2640 def _getbundlerevbranchcache(
2635 bundler,
2641 bundler,
2636 repo,
2642 repo,
2637 source,
2643 source,
2638 bundlecaps=None,
2644 bundlecaps=None,
2639 b2caps=None,
2645 b2caps=None,
2640 heads=None,
2646 heads=None,
2641 common=None,
2647 common=None,
2642 **kwargs
2648 **kwargs
2643 ):
2649 ):
2644 """Transfer the rev-branch-cache mapping
2650 """Transfer the rev-branch-cache mapping
2645
2651
2646 The payload is a series of data related to each branch
2652 The payload is a series of data related to each branch
2647
2653
2648 1) branch name length
2654 1) branch name length
2649 2) number of open heads
2655 2) number of open heads
2650 3) number of closed heads
2656 3) number of closed heads
2651 4) open heads nodes
2657 4) open heads nodes
2652 5) closed heads nodes
2658 5) closed heads nodes
2653 """
2659 """
2654 # Don't send unless:
2660 # Don't send unless:
2655 # - changeset are being exchanged,
2661 # - changeset are being exchanged,
2656 # - the client supports it.
2662 # - the client supports it.
2657 # - narrow bundle isn't in play (not currently compatible).
2663 # - narrow bundle isn't in play (not currently compatible).
2658 if (
2664 if (
2659 not kwargs.get('cg', True)
2665 not kwargs.get('cg', True)
2660 or not b2caps
2666 or not b2caps
2661 or b'rev-branch-cache' not in b2caps
2667 or b'rev-branch-cache' not in b2caps
2662 or kwargs.get('narrow', False)
2668 or kwargs.get('narrow', False)
2663 or repo.ui.has_section(_NARROWACL_SECTION)
2669 or repo.ui.has_section(_NARROWACL_SECTION)
2664 ):
2670 ):
2665 return
2671 return
2666
2672
2667 outgoing = _computeoutgoing(repo, heads, common)
2673 outgoing = _computeoutgoing(repo, heads, common)
2668 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2674 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2669
2675
2670
2676
2671 def check_heads(repo, their_heads, context):
2677 def check_heads(repo, their_heads, context):
2672 """check if the heads of a repo have been modified
2678 """check if the heads of a repo have been modified
2673
2679
2674 Used by peer for unbundling.
2680 Used by peer for unbundling.
2675 """
2681 """
2676 heads = repo.heads()
2682 heads = repo.heads()
2677 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2683 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2678 if not (
2684 if not (
2679 their_heads == [b'force']
2685 their_heads == [b'force']
2680 or their_heads == heads
2686 or their_heads == heads
2681 or their_heads == [b'hashed', heads_hash]
2687 or their_heads == [b'hashed', heads_hash]
2682 ):
2688 ):
2683 # someone else committed/pushed/unbundled while we
2689 # someone else committed/pushed/unbundled while we
2684 # were transferring data
2690 # were transferring data
2685 raise error.PushRaced(
2691 raise error.PushRaced(
2686 b'repository changed while %s - please try again' % context
2692 b'repository changed while %s - please try again' % context
2687 )
2693 )
2688
2694
2689
2695
2690 def unbundle(repo, cg, heads, source, url):
2696 def unbundle(repo, cg, heads, source, url):
2691 """Apply a bundle to a repo.
2697 """Apply a bundle to a repo.
2692
2698
2693 this function makes sure the repo is locked during the application and have
2699 this function makes sure the repo is locked during the application and have
2694 mechanism to check that no push race occurred between the creation of the
2700 mechanism to check that no push race occurred between the creation of the
2695 bundle and its application.
2701 bundle and its application.
2696
2702
2697 If the push was raced as PushRaced exception is raised."""
2703 If the push was raced as PushRaced exception is raised."""
2698 r = 0
2704 r = 0
2699 # need a transaction when processing a bundle2 stream
2705 # need a transaction when processing a bundle2 stream
2700 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2706 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2701 lockandtr = [None, None, None]
2707 lockandtr = [None, None, None]
2702 recordout = None
2708 recordout = None
2703 # quick fix for output mismatch with bundle2 in 3.4
2709 # quick fix for output mismatch with bundle2 in 3.4
2704 captureoutput = repo.ui.configbool(
2710 captureoutput = repo.ui.configbool(
2705 b'experimental', b'bundle2-output-capture'
2711 b'experimental', b'bundle2-output-capture'
2706 )
2712 )
2707 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2713 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2708 captureoutput = True
2714 captureoutput = True
2709 try:
2715 try:
2710 # note: outside bundle1, 'heads' is expected to be empty and this
2716 # note: outside bundle1, 'heads' is expected to be empty and this
2711 # 'check_heads' call wil be a no-op
2717 # 'check_heads' call wil be a no-op
2712 check_heads(repo, heads, b'uploading changes')
2718 check_heads(repo, heads, b'uploading changes')
2713 # push can proceed
2719 # push can proceed
2714 if not isinstance(cg, bundle2.unbundle20):
2720 if not isinstance(cg, bundle2.unbundle20):
2715 # legacy case: bundle1 (changegroup 01)
2721 # legacy case: bundle1 (changegroup 01)
2716 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2722 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2717 with repo.lock(), repo.transaction(txnname) as tr:
2723 with repo.lock(), repo.transaction(txnname) as tr:
2718 op = bundle2.applybundle(repo, cg, tr, source, url)
2724 op = bundle2.applybundle(repo, cg, tr, source, url)
2719 r = bundle2.combinechangegroupresults(op)
2725 r = bundle2.combinechangegroupresults(op)
2720 else:
2726 else:
2721 r = None
2727 r = None
2722 try:
2728 try:
2723
2729
2724 def gettransaction():
2730 def gettransaction():
2725 if not lockandtr[2]:
2731 if not lockandtr[2]:
2726 if not bookmod.bookmarksinstore(repo):
2732 if not bookmod.bookmarksinstore(repo):
2727 lockandtr[0] = repo.wlock()
2733 lockandtr[0] = repo.wlock()
2728 lockandtr[1] = repo.lock()
2734 lockandtr[1] = repo.lock()
2729 lockandtr[2] = repo.transaction(source)
2735 lockandtr[2] = repo.transaction(source)
2730 lockandtr[2].hookargs[b'source'] = source
2736 lockandtr[2].hookargs[b'source'] = source
2731 lockandtr[2].hookargs[b'url'] = url
2737 lockandtr[2].hookargs[b'url'] = url
2732 lockandtr[2].hookargs[b'bundle2'] = b'1'
2738 lockandtr[2].hookargs[b'bundle2'] = b'1'
2733 return lockandtr[2]
2739 return lockandtr[2]
2734
2740
2735 # Do greedy locking by default until we're satisfied with lazy
2741 # Do greedy locking by default until we're satisfied with lazy
2736 # locking.
2742 # locking.
2737 if not repo.ui.configbool(
2743 if not repo.ui.configbool(
2738 b'experimental', b'bundle2lazylocking'
2744 b'experimental', b'bundle2lazylocking'
2739 ):
2745 ):
2740 gettransaction()
2746 gettransaction()
2741
2747
2742 op = bundle2.bundleoperation(
2748 op = bundle2.bundleoperation(
2743 repo,
2749 repo,
2744 gettransaction,
2750 gettransaction,
2745 captureoutput=captureoutput,
2751 captureoutput=captureoutput,
2746 source=b'push',
2752 source=b'push',
2747 )
2753 )
2748 try:
2754 try:
2749 op = bundle2.processbundle(repo, cg, op=op)
2755 op = bundle2.processbundle(repo, cg, op=op)
2750 finally:
2756 finally:
2751 r = op.reply
2757 r = op.reply
2752 if captureoutput and r is not None:
2758 if captureoutput and r is not None:
2753 repo.ui.pushbuffer(error=True, subproc=True)
2759 repo.ui.pushbuffer(error=True, subproc=True)
2754
2760
2755 def recordout(output):
2761 def recordout(output):
2756 r.newpart(b'output', data=output, mandatory=False)
2762 r.newpart(b'output', data=output, mandatory=False)
2757
2763
2758 if lockandtr[2] is not None:
2764 if lockandtr[2] is not None:
2759 lockandtr[2].close()
2765 lockandtr[2].close()
2760 except BaseException as exc:
2766 except BaseException as exc:
2761 exc.duringunbundle2 = True
2767 exc.duringunbundle2 = True
2762 if captureoutput and r is not None:
2768 if captureoutput and r is not None:
2763 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2769 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2764
2770
2765 def recordout(output):
2771 def recordout(output):
2766 part = bundle2.bundlepart(
2772 part = bundle2.bundlepart(
2767 b'output', data=output, mandatory=False
2773 b'output', data=output, mandatory=False
2768 )
2774 )
2769 parts.append(part)
2775 parts.append(part)
2770
2776
2771 raise
2777 raise
2772 finally:
2778 finally:
2773 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2779 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2774 if recordout is not None:
2780 if recordout is not None:
2775 recordout(repo.ui.popbuffer())
2781 recordout(repo.ui.popbuffer())
2776 return r
2782 return r
2777
2783
2778
2784
2779 def _maybeapplyclonebundle(pullop):
2785 def _maybeapplyclonebundle(pullop):
2780 """Apply a clone bundle from a remote, if possible."""
2786 """Apply a clone bundle from a remote, if possible."""
2781
2787
2782 repo = pullop.repo
2788 repo = pullop.repo
2783 remote = pullop.remote
2789 remote = pullop.remote
2784
2790
2785 if not repo.ui.configbool(b'ui', b'clonebundles'):
2791 if not repo.ui.configbool(b'ui', b'clonebundles'):
2786 return
2792 return
2787
2793
2788 # Only run if local repo is empty.
2794 # Only run if local repo is empty.
2789 if len(repo):
2795 if len(repo):
2790 return
2796 return
2791
2797
2792 if pullop.heads:
2798 if pullop.heads:
2793 return
2799 return
2794
2800
2795 if not remote.capable(b'clonebundles'):
2801 if not remote.capable(b'clonebundles'):
2796 return
2802 return
2797
2803
2798 with remote.commandexecutor() as e:
2804 with remote.commandexecutor() as e:
2799 res = e.callcommand(b'clonebundles', {}).result()
2805 res = e.callcommand(b'clonebundles', {}).result()
2800
2806
2801 # If we call the wire protocol command, that's good enough to record the
2807 # If we call the wire protocol command, that's good enough to record the
2802 # attempt.
2808 # attempt.
2803 pullop.clonebundleattempted = True
2809 pullop.clonebundleattempted = True
2804
2810
2805 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2811 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2806 if not entries:
2812 if not entries:
2807 repo.ui.note(
2813 repo.ui.note(
2808 _(
2814 _(
2809 b'no clone bundles available on remote; '
2815 b'no clone bundles available on remote; '
2810 b'falling back to regular clone\n'
2816 b'falling back to regular clone\n'
2811 )
2817 )
2812 )
2818 )
2813 return
2819 return
2814
2820
2815 entries = bundlecaches.filterclonebundleentries(
2821 entries = bundlecaches.filterclonebundleentries(
2816 repo, entries, streamclonerequested=pullop.streamclonerequested
2822 repo, entries, streamclonerequested=pullop.streamclonerequested
2817 )
2823 )
2818
2824
2819 if not entries:
2825 if not entries:
2820 # There is a thundering herd concern here. However, if a server
2826 # There is a thundering herd concern here. However, if a server
2821 # operator doesn't advertise bundles appropriate for its clients,
2827 # operator doesn't advertise bundles appropriate for its clients,
2822 # they deserve what's coming. Furthermore, from a client's
2828 # they deserve what's coming. Furthermore, from a client's
2823 # perspective, no automatic fallback would mean not being able to
2829 # perspective, no automatic fallback would mean not being able to
2824 # clone!
2830 # clone!
2825 repo.ui.warn(
2831 repo.ui.warn(
2826 _(
2832 _(
2827 b'no compatible clone bundles available on server; '
2833 b'no compatible clone bundles available on server; '
2828 b'falling back to regular clone\n'
2834 b'falling back to regular clone\n'
2829 )
2835 )
2830 )
2836 )
2831 repo.ui.warn(
2837 repo.ui.warn(
2832 _(b'(you may want to report this to the server operator)\n')
2838 _(b'(you may want to report this to the server operator)\n')
2833 )
2839 )
2834 return
2840 return
2835
2841
2836 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2842 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2837
2843
2838 url = entries[0][b'URL']
2844 url = entries[0][b'URL']
2839 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2845 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2840 if trypullbundlefromurl(repo.ui, repo, url, remote):
2846 if trypullbundlefromurl(repo.ui, repo, url, remote):
2841 repo.ui.status(_(b'finished applying clone bundle\n'))
2847 repo.ui.status(_(b'finished applying clone bundle\n'))
2842 # Bundle failed.
2848 # Bundle failed.
2843 #
2849 #
2844 # We abort by default to avoid the thundering herd of
2850 # We abort by default to avoid the thundering herd of
2845 # clients flooding a server that was expecting expensive
2851 # clients flooding a server that was expecting expensive
2846 # clone load to be offloaded.
2852 # clone load to be offloaded.
2847 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2853 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2848 repo.ui.warn(_(b'falling back to normal clone\n'))
2854 repo.ui.warn(_(b'falling back to normal clone\n'))
2849 else:
2855 else:
2850 raise error.Abort(
2856 raise error.Abort(
2851 _(b'error applying bundle'),
2857 _(b'error applying bundle'),
2852 hint=_(
2858 hint=_(
2853 b'if this error persists, consider contacting '
2859 b'if this error persists, consider contacting '
2854 b'the server operator or disable clone '
2860 b'the server operator or disable clone '
2855 b'bundles via '
2861 b'bundles via '
2856 b'"--config ui.clonebundles=false"'
2862 b'"--config ui.clonebundles=false"'
2857 ),
2863 ),
2858 )
2864 )
2859
2865
2860
2866
2861 def inline_clone_bundle_open(ui, url, peer):
2867 def inline_clone_bundle_open(ui, url, peer):
2862 if not peer:
2868 if not peer:
2863 raise error.Abort(_(b'no remote repository supplied for %s' % url))
2869 raise error.Abort(_(b'no remote repository supplied for %s' % url))
2864 clonebundleid = url[len(bundlecaches.CLONEBUNDLESCHEME) :]
2870 clonebundleid = url[len(bundlecaches.CLONEBUNDLESCHEME) :]
2865 peerclonebundle = peer.get_cached_bundle_inline(clonebundleid)
2871 peerclonebundle = peer.get_cached_bundle_inline(clonebundleid)
2866 return util.chunkbuffer(peerclonebundle)
2872 return util.chunkbuffer(peerclonebundle)
2867
2873
2868
2874
2869 def trypullbundlefromurl(ui, repo, url, peer):
2875 def trypullbundlefromurl(ui, repo, url, peer):
2870 """Attempt to apply a bundle from a URL."""
2876 """Attempt to apply a bundle from a URL."""
2871 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2877 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2872 try:
2878 try:
2873 if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
2879 if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
2874 fh = inline_clone_bundle_open(ui, url, peer)
2880 fh = inline_clone_bundle_open(ui, url, peer)
2875 else:
2881 else:
2876 fh = urlmod.open(ui, url)
2882 fh = urlmod.open(ui, url)
2877 cg = readbundle(ui, fh, b'stream')
2883 cg = readbundle(ui, fh, b'stream')
2878
2884
2879 if isinstance(cg, streamclone.streamcloneapplier):
2885 if isinstance(cg, streamclone.streamcloneapplier):
2880 cg.apply(repo)
2886 cg.apply(repo)
2881 else:
2887 else:
2882 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2888 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2883 return True
2889 return True
2884 except urlerr.httperror as e:
2890 except urlerr.httperror as e:
2885 ui.warn(
2891 ui.warn(
2886 _(b'HTTP error fetching bundle: %s\n')
2892 _(b'HTTP error fetching bundle: %s\n')
2887 % stringutil.forcebytestr(e)
2893 % stringutil.forcebytestr(e)
2888 )
2894 )
2889 except urlerr.urlerror as e:
2895 except urlerr.urlerror as e:
2890 ui.warn(
2896 ui.warn(
2891 _(b'error fetching bundle: %s\n')
2897 _(b'error fetching bundle: %s\n')
2892 % stringutil.forcebytestr(e.reason)
2898 % stringutil.forcebytestr(e.reason)
2893 )
2899 )
2894
2900
2895 return False
2901 return False
General Comments 0
You need to be logged in to leave comments. Login now