##// END OF EJS Templates
bundle2: print "error:abort" message to stderr instead of stdout...
Martin von Zweigbergk -
r47592:db9e33be default
parent child Browse files
Show More
@@ -1,2756 +1,2756 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from . import (
19 from . import (
20 bookmarks as bookmod,
20 bookmarks as bookmod,
21 bundle2,
21 bundle2,
22 bundlecaches,
22 bundlecaches,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 obsutil,
31 obsutil,
32 phases,
32 phases,
33 pushkey,
33 pushkey,
34 pycompat,
34 pycompat,
35 requirements,
35 requirements,
36 scmutil,
36 scmutil,
37 streamclone,
37 streamclone,
38 url as urlmod,
38 url as urlmod,
39 util,
39 util,
40 wireprototypes,
40 wireprototypes,
41 )
41 )
42 from .utils import (
42 from .utils import (
43 hashutil,
43 hashutil,
44 stringutil,
44 stringutil,
45 )
45 )
46
46
47 urlerr = util.urlerr
47 urlerr = util.urlerr
48 urlreq = util.urlreq
48 urlreq = util.urlreq
49
49
50 _NARROWACL_SECTION = b'narrowacl'
50 _NARROWACL_SECTION = b'narrowacl'
51
51
52
52
53 def readbundle(ui, fh, fname, vfs=None):
53 def readbundle(ui, fh, fname, vfs=None):
54 header = changegroup.readexactly(fh, 4)
54 header = changegroup.readexactly(fh, 4)
55
55
56 alg = None
56 alg = None
57 if not fname:
57 if not fname:
58 fname = b"stream"
58 fname = b"stream"
59 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 if not header.startswith(b'HG') and header.startswith(b'\0'):
60 fh = changegroup.headerlessfixup(fh, header)
60 fh = changegroup.headerlessfixup(fh, header)
61 header = b"HG10"
61 header = b"HG10"
62 alg = b'UN'
62 alg = b'UN'
63 elif vfs:
63 elif vfs:
64 fname = vfs.join(fname)
64 fname = vfs.join(fname)
65
65
66 magic, version = header[0:2], header[2:4]
66 magic, version = header[0:2], header[2:4]
67
67
68 if magic != b'HG':
68 if magic != b'HG':
69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
70 if version == b'10':
70 if version == b'10':
71 if alg is None:
71 if alg is None:
72 alg = changegroup.readexactly(fh, 2)
72 alg = changegroup.readexactly(fh, 2)
73 return changegroup.cg1unpacker(fh, alg)
73 return changegroup.cg1unpacker(fh, alg)
74 elif version.startswith(b'2'):
74 elif version.startswith(b'2'):
75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
76 elif version == b'S1':
76 elif version == b'S1':
77 return streamclone.streamcloneapplier(fh)
77 return streamclone.streamcloneapplier(fh)
78 else:
78 else:
79 raise error.Abort(
79 raise error.Abort(
80 _(b'%s: unknown bundle version %s') % (fname, version)
80 _(b'%s: unknown bundle version %s') % (fname, version)
81 )
81 )
82
82
83
83
84 def getbundlespec(ui, fh):
84 def getbundlespec(ui, fh):
85 """Infer the bundlespec from a bundle file handle.
85 """Infer the bundlespec from a bundle file handle.
86
86
87 The input file handle is seeked and the original seek position is not
87 The input file handle is seeked and the original seek position is not
88 restored.
88 restored.
89 """
89 """
90
90
91 def speccompression(alg):
91 def speccompression(alg):
92 try:
92 try:
93 return util.compengines.forbundletype(alg).bundletype()[0]
93 return util.compengines.forbundletype(alg).bundletype()[0]
94 except KeyError:
94 except KeyError:
95 return None
95 return None
96
96
97 b = readbundle(ui, fh, None)
97 b = readbundle(ui, fh, None)
98 if isinstance(b, changegroup.cg1unpacker):
98 if isinstance(b, changegroup.cg1unpacker):
99 alg = b._type
99 alg = b._type
100 if alg == b'_truncatedBZ':
100 if alg == b'_truncatedBZ':
101 alg = b'BZ'
101 alg = b'BZ'
102 comp = speccompression(alg)
102 comp = speccompression(alg)
103 if not comp:
103 if not comp:
104 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
104 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
105 return b'%s-v1' % comp
105 return b'%s-v1' % comp
106 elif isinstance(b, bundle2.unbundle20):
106 elif isinstance(b, bundle2.unbundle20):
107 if b'Compression' in b.params:
107 if b'Compression' in b.params:
108 comp = speccompression(b.params[b'Compression'])
108 comp = speccompression(b.params[b'Compression'])
109 if not comp:
109 if not comp:
110 raise error.Abort(
110 raise error.Abort(
111 _(b'unknown compression algorithm: %s') % comp
111 _(b'unknown compression algorithm: %s') % comp
112 )
112 )
113 else:
113 else:
114 comp = b'none'
114 comp = b'none'
115
115
116 version = None
116 version = None
117 for part in b.iterparts():
117 for part in b.iterparts():
118 if part.type == b'changegroup':
118 if part.type == b'changegroup':
119 version = part.params[b'version']
119 version = part.params[b'version']
120 if version in (b'01', b'02'):
120 if version in (b'01', b'02'):
121 version = b'v2'
121 version = b'v2'
122 else:
122 else:
123 raise error.Abort(
123 raise error.Abort(
124 _(
124 _(
125 b'changegroup version %s does not have '
125 b'changegroup version %s does not have '
126 b'a known bundlespec'
126 b'a known bundlespec'
127 )
127 )
128 % version,
128 % version,
129 hint=_(b'try upgrading your Mercurial client'),
129 hint=_(b'try upgrading your Mercurial client'),
130 )
130 )
131 elif part.type == b'stream2' and version is None:
131 elif part.type == b'stream2' and version is None:
132 # A stream2 part requires to be part of a v2 bundle
132 # A stream2 part requires to be part of a v2 bundle
133 requirements = urlreq.unquote(part.params[b'requirements'])
133 requirements = urlreq.unquote(part.params[b'requirements'])
134 splitted = requirements.split()
134 splitted = requirements.split()
135 params = bundle2._formatrequirementsparams(splitted)
135 params = bundle2._formatrequirementsparams(splitted)
136 return b'none-v2;stream=v2;%s' % params
136 return b'none-v2;stream=v2;%s' % params
137
137
138 if not version:
138 if not version:
139 raise error.Abort(
139 raise error.Abort(
140 _(b'could not identify changegroup version in bundle')
140 _(b'could not identify changegroup version in bundle')
141 )
141 )
142
142
143 return b'%s-%s' % (comp, version)
143 return b'%s-%s' % (comp, version)
144 elif isinstance(b, streamclone.streamcloneapplier):
144 elif isinstance(b, streamclone.streamcloneapplier):
145 requirements = streamclone.readbundle1header(fh)[2]
145 requirements = streamclone.readbundle1header(fh)[2]
146 formatted = bundle2._formatrequirementsparams(requirements)
146 formatted = bundle2._formatrequirementsparams(requirements)
147 return b'none-packed1;%s' % formatted
147 return b'none-packed1;%s' % formatted
148 else:
148 else:
149 raise error.Abort(_(b'unknown bundle type: %s') % b)
149 raise error.Abort(_(b'unknown bundle type: %s') % b)
150
150
151
151
152 def _computeoutgoing(repo, heads, common):
152 def _computeoutgoing(repo, heads, common):
153 """Computes which revs are outgoing given a set of common
153 """Computes which revs are outgoing given a set of common
154 and a set of heads.
154 and a set of heads.
155
155
156 This is a separate function so extensions can have access to
156 This is a separate function so extensions can have access to
157 the logic.
157 the logic.
158
158
159 Returns a discovery.outgoing object.
159 Returns a discovery.outgoing object.
160 """
160 """
161 cl = repo.changelog
161 cl = repo.changelog
162 if common:
162 if common:
163 hasnode = cl.hasnode
163 hasnode = cl.hasnode
164 common = [n for n in common if hasnode(n)]
164 common = [n for n in common if hasnode(n)]
165 else:
165 else:
166 common = [nullid]
166 common = [nullid]
167 if not heads:
167 if not heads:
168 heads = cl.heads()
168 heads = cl.heads()
169 return discovery.outgoing(repo, common, heads)
169 return discovery.outgoing(repo, common, heads)
170
170
171
171
172 def _checkpublish(pushop):
172 def _checkpublish(pushop):
173 repo = pushop.repo
173 repo = pushop.repo
174 ui = repo.ui
174 ui = repo.ui
175 behavior = ui.config(b'experimental', b'auto-publish')
175 behavior = ui.config(b'experimental', b'auto-publish')
176 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
176 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
177 return
177 return
178 remotephases = listkeys(pushop.remote, b'phases')
178 remotephases = listkeys(pushop.remote, b'phases')
179 if not remotephases.get(b'publishing', False):
179 if not remotephases.get(b'publishing', False):
180 return
180 return
181
181
182 if pushop.revs is None:
182 if pushop.revs is None:
183 published = repo.filtered(b'served').revs(b'not public()')
183 published = repo.filtered(b'served').revs(b'not public()')
184 else:
184 else:
185 published = repo.revs(b'::%ln - public()', pushop.revs)
185 published = repo.revs(b'::%ln - public()', pushop.revs)
186 if published:
186 if published:
187 if behavior == b'warn':
187 if behavior == b'warn':
188 ui.warn(
188 ui.warn(
189 _(b'%i changesets about to be published\n') % len(published)
189 _(b'%i changesets about to be published\n') % len(published)
190 )
190 )
191 elif behavior == b'confirm':
191 elif behavior == b'confirm':
192 if ui.promptchoice(
192 if ui.promptchoice(
193 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
193 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
194 % len(published)
194 % len(published)
195 ):
195 ):
196 raise error.CanceledError(_(b'user quit'))
196 raise error.CanceledError(_(b'user quit'))
197 elif behavior == b'abort':
197 elif behavior == b'abort':
198 msg = _(b'push would publish %i changesets') % len(published)
198 msg = _(b'push would publish %i changesets') % len(published)
199 hint = _(
199 hint = _(
200 b"use --publish or adjust 'experimental.auto-publish'"
200 b"use --publish or adjust 'experimental.auto-publish'"
201 b" config"
201 b" config"
202 )
202 )
203 raise error.Abort(msg, hint=hint)
203 raise error.Abort(msg, hint=hint)
204
204
205
205
206 def _forcebundle1(op):
206 def _forcebundle1(op):
207 """return true if a pull/push must use bundle1
207 """return true if a pull/push must use bundle1
208
208
209 This function is used to allow testing of the older bundle version"""
209 This function is used to allow testing of the older bundle version"""
210 ui = op.repo.ui
210 ui = op.repo.ui
211 # The goal is this config is to allow developer to choose the bundle
211 # The goal is this config is to allow developer to choose the bundle
212 # version used during exchanged. This is especially handy during test.
212 # version used during exchanged. This is especially handy during test.
213 # Value is a list of bundle version to be picked from, highest version
213 # Value is a list of bundle version to be picked from, highest version
214 # should be used.
214 # should be used.
215 #
215 #
216 # developer config: devel.legacy.exchange
216 # developer config: devel.legacy.exchange
217 exchange = ui.configlist(b'devel', b'legacy.exchange')
217 exchange = ui.configlist(b'devel', b'legacy.exchange')
218 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
218 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
219 return forcebundle1 or not op.remote.capable(b'bundle2')
219 return forcebundle1 or not op.remote.capable(b'bundle2')
220
220
221
221
222 class pushoperation(object):
222 class pushoperation(object):
223 """A object that represent a single push operation
223 """A object that represent a single push operation
224
224
225 Its purpose is to carry push related state and very common operations.
225 Its purpose is to carry push related state and very common operations.
226
226
227 A new pushoperation should be created at the beginning of each push and
227 A new pushoperation should be created at the beginning of each push and
228 discarded afterward.
228 discarded afterward.
229 """
229 """
230
230
231 def __init__(
231 def __init__(
232 self,
232 self,
233 repo,
233 repo,
234 remote,
234 remote,
235 force=False,
235 force=False,
236 revs=None,
236 revs=None,
237 newbranch=False,
237 newbranch=False,
238 bookmarks=(),
238 bookmarks=(),
239 publish=False,
239 publish=False,
240 pushvars=None,
240 pushvars=None,
241 ):
241 ):
242 # repo we push from
242 # repo we push from
243 self.repo = repo
243 self.repo = repo
244 self.ui = repo.ui
244 self.ui = repo.ui
245 # repo we push to
245 # repo we push to
246 self.remote = remote
246 self.remote = remote
247 # force option provided
247 # force option provided
248 self.force = force
248 self.force = force
249 # revs to be pushed (None is "all")
249 # revs to be pushed (None is "all")
250 self.revs = revs
250 self.revs = revs
251 # bookmark explicitly pushed
251 # bookmark explicitly pushed
252 self.bookmarks = bookmarks
252 self.bookmarks = bookmarks
253 # allow push of new branch
253 # allow push of new branch
254 self.newbranch = newbranch
254 self.newbranch = newbranch
255 # step already performed
255 # step already performed
256 # (used to check what steps have been already performed through bundle2)
256 # (used to check what steps have been already performed through bundle2)
257 self.stepsdone = set()
257 self.stepsdone = set()
258 # Integer version of the changegroup push result
258 # Integer version of the changegroup push result
259 # - None means nothing to push
259 # - None means nothing to push
260 # - 0 means HTTP error
260 # - 0 means HTTP error
261 # - 1 means we pushed and remote head count is unchanged *or*
261 # - 1 means we pushed and remote head count is unchanged *or*
262 # we have outgoing changesets but refused to push
262 # we have outgoing changesets but refused to push
263 # - other values as described by addchangegroup()
263 # - other values as described by addchangegroup()
264 self.cgresult = None
264 self.cgresult = None
265 # Boolean value for the bookmark push
265 # Boolean value for the bookmark push
266 self.bkresult = None
266 self.bkresult = None
267 # discover.outgoing object (contains common and outgoing data)
267 # discover.outgoing object (contains common and outgoing data)
268 self.outgoing = None
268 self.outgoing = None
269 # all remote topological heads before the push
269 # all remote topological heads before the push
270 self.remoteheads = None
270 self.remoteheads = None
271 # Details of the remote branch pre and post push
271 # Details of the remote branch pre and post push
272 #
272 #
273 # mapping: {'branch': ([remoteheads],
273 # mapping: {'branch': ([remoteheads],
274 # [newheads],
274 # [newheads],
275 # [unsyncedheads],
275 # [unsyncedheads],
276 # [discardedheads])}
276 # [discardedheads])}
277 # - branch: the branch name
277 # - branch: the branch name
278 # - remoteheads: the list of remote heads known locally
278 # - remoteheads: the list of remote heads known locally
279 # None if the branch is new
279 # None if the branch is new
280 # - newheads: the new remote heads (known locally) with outgoing pushed
280 # - newheads: the new remote heads (known locally) with outgoing pushed
281 # - unsyncedheads: the list of remote heads unknown locally.
281 # - unsyncedheads: the list of remote heads unknown locally.
282 # - discardedheads: the list of remote heads made obsolete by the push
282 # - discardedheads: the list of remote heads made obsolete by the push
283 self.pushbranchmap = None
283 self.pushbranchmap = None
284 # testable as a boolean indicating if any nodes are missing locally.
284 # testable as a boolean indicating if any nodes are missing locally.
285 self.incoming = None
285 self.incoming = None
286 # summary of the remote phase situation
286 # summary of the remote phase situation
287 self.remotephases = None
287 self.remotephases = None
288 # phases changes that must be pushed along side the changesets
288 # phases changes that must be pushed along side the changesets
289 self.outdatedphases = None
289 self.outdatedphases = None
290 # phases changes that must be pushed if changeset push fails
290 # phases changes that must be pushed if changeset push fails
291 self.fallbackoutdatedphases = None
291 self.fallbackoutdatedphases = None
292 # outgoing obsmarkers
292 # outgoing obsmarkers
293 self.outobsmarkers = set()
293 self.outobsmarkers = set()
294 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
294 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
295 self.outbookmarks = []
295 self.outbookmarks = []
296 # transaction manager
296 # transaction manager
297 self.trmanager = None
297 self.trmanager = None
298 # map { pushkey partid -> callback handling failure}
298 # map { pushkey partid -> callback handling failure}
299 # used to handle exception from mandatory pushkey part failure
299 # used to handle exception from mandatory pushkey part failure
300 self.pkfailcb = {}
300 self.pkfailcb = {}
301 # an iterable of pushvars or None
301 # an iterable of pushvars or None
302 self.pushvars = pushvars
302 self.pushvars = pushvars
303 # publish pushed changesets
303 # publish pushed changesets
304 self.publish = publish
304 self.publish = publish
305
305
306 @util.propertycache
306 @util.propertycache
307 def futureheads(self):
307 def futureheads(self):
308 """future remote heads if the changeset push succeeds"""
308 """future remote heads if the changeset push succeeds"""
309 return self.outgoing.ancestorsof
309 return self.outgoing.ancestorsof
310
310
311 @util.propertycache
311 @util.propertycache
312 def fallbackheads(self):
312 def fallbackheads(self):
313 """future remote heads if the changeset push fails"""
313 """future remote heads if the changeset push fails"""
314 if self.revs is None:
314 if self.revs is None:
315 # not target to push, all common are relevant
315 # not target to push, all common are relevant
316 return self.outgoing.commonheads
316 return self.outgoing.commonheads
317 unfi = self.repo.unfiltered()
317 unfi = self.repo.unfiltered()
318 # I want cheads = heads(::ancestorsof and ::commonheads)
318 # I want cheads = heads(::ancestorsof and ::commonheads)
319 # (ancestorsof is revs with secret changeset filtered out)
319 # (ancestorsof is revs with secret changeset filtered out)
320 #
320 #
321 # This can be expressed as:
321 # This can be expressed as:
322 # cheads = ( (ancestorsof and ::commonheads)
322 # cheads = ( (ancestorsof and ::commonheads)
323 # + (commonheads and ::ancestorsof))"
323 # + (commonheads and ::ancestorsof))"
324 # )
324 # )
325 #
325 #
326 # while trying to push we already computed the following:
326 # while trying to push we already computed the following:
327 # common = (::commonheads)
327 # common = (::commonheads)
328 # missing = ((commonheads::ancestorsof) - commonheads)
328 # missing = ((commonheads::ancestorsof) - commonheads)
329 #
329 #
330 # We can pick:
330 # We can pick:
331 # * ancestorsof part of common (::commonheads)
331 # * ancestorsof part of common (::commonheads)
332 common = self.outgoing.common
332 common = self.outgoing.common
333 rev = self.repo.changelog.index.rev
333 rev = self.repo.changelog.index.rev
334 cheads = [node for node in self.revs if rev(node) in common]
334 cheads = [node for node in self.revs if rev(node) in common]
335 # and
335 # and
336 # * commonheads parents on missing
336 # * commonheads parents on missing
337 revset = unfi.set(
337 revset = unfi.set(
338 b'%ln and parents(roots(%ln))',
338 b'%ln and parents(roots(%ln))',
339 self.outgoing.commonheads,
339 self.outgoing.commonheads,
340 self.outgoing.missing,
340 self.outgoing.missing,
341 )
341 )
342 cheads.extend(c.node() for c in revset)
342 cheads.extend(c.node() for c in revset)
343 return cheads
343 return cheads
344
344
345 @property
345 @property
346 def commonheads(self):
346 def commonheads(self):
347 """set of all common heads after changeset bundle push"""
347 """set of all common heads after changeset bundle push"""
348 if self.cgresult:
348 if self.cgresult:
349 return self.futureheads
349 return self.futureheads
350 else:
350 else:
351 return self.fallbackheads
351 return self.fallbackheads
352
352
353
353
354 # mapping of message used when pushing bookmark
354 # mapping of message used when pushing bookmark
355 bookmsgmap = {
355 bookmsgmap = {
356 b'update': (
356 b'update': (
357 _(b"updating bookmark %s\n"),
357 _(b"updating bookmark %s\n"),
358 _(b'updating bookmark %s failed\n'),
358 _(b'updating bookmark %s failed\n'),
359 ),
359 ),
360 b'export': (
360 b'export': (
361 _(b"exporting bookmark %s\n"),
361 _(b"exporting bookmark %s\n"),
362 _(b'exporting bookmark %s failed\n'),
362 _(b'exporting bookmark %s failed\n'),
363 ),
363 ),
364 b'delete': (
364 b'delete': (
365 _(b"deleting remote bookmark %s\n"),
365 _(b"deleting remote bookmark %s\n"),
366 _(b'deleting remote bookmark %s failed\n'),
366 _(b'deleting remote bookmark %s failed\n'),
367 ),
367 ),
368 }
368 }
369
369
370
370
371 def push(
371 def push(
372 repo,
372 repo,
373 remote,
373 remote,
374 force=False,
374 force=False,
375 revs=None,
375 revs=None,
376 newbranch=False,
376 newbranch=False,
377 bookmarks=(),
377 bookmarks=(),
378 publish=False,
378 publish=False,
379 opargs=None,
379 opargs=None,
380 ):
380 ):
381 """Push outgoing changesets (limited by revs) from a local
381 """Push outgoing changesets (limited by revs) from a local
382 repository to remote. Return an integer:
382 repository to remote. Return an integer:
383 - None means nothing to push
383 - None means nothing to push
384 - 0 means HTTP error
384 - 0 means HTTP error
385 - 1 means we pushed and remote head count is unchanged *or*
385 - 1 means we pushed and remote head count is unchanged *or*
386 we have outgoing changesets but refused to push
386 we have outgoing changesets but refused to push
387 - other values as described by addchangegroup()
387 - other values as described by addchangegroup()
388 """
388 """
389 if opargs is None:
389 if opargs is None:
390 opargs = {}
390 opargs = {}
391 pushop = pushoperation(
391 pushop = pushoperation(
392 repo,
392 repo,
393 remote,
393 remote,
394 force,
394 force,
395 revs,
395 revs,
396 newbranch,
396 newbranch,
397 bookmarks,
397 bookmarks,
398 publish,
398 publish,
399 **pycompat.strkwargs(opargs)
399 **pycompat.strkwargs(opargs)
400 )
400 )
401 if pushop.remote.local():
401 if pushop.remote.local():
402 missing = (
402 missing = (
403 set(pushop.repo.requirements) - pushop.remote.local().supported
403 set(pushop.repo.requirements) - pushop.remote.local().supported
404 )
404 )
405 if missing:
405 if missing:
406 msg = _(
406 msg = _(
407 b"required features are not"
407 b"required features are not"
408 b" supported in the destination:"
408 b" supported in the destination:"
409 b" %s"
409 b" %s"
410 ) % (b', '.join(sorted(missing)))
410 ) % (b', '.join(sorted(missing)))
411 raise error.Abort(msg)
411 raise error.Abort(msg)
412
412
413 if not pushop.remote.canpush():
413 if not pushop.remote.canpush():
414 raise error.Abort(_(b"destination does not support push"))
414 raise error.Abort(_(b"destination does not support push"))
415
415
416 if not pushop.remote.capable(b'unbundle'):
416 if not pushop.remote.capable(b'unbundle'):
417 raise error.Abort(
417 raise error.Abort(
418 _(
418 _(
419 b'cannot push: destination does not support the '
419 b'cannot push: destination does not support the '
420 b'unbundle wire protocol command'
420 b'unbundle wire protocol command'
421 )
421 )
422 )
422 )
423
423
424 # get lock as we might write phase data
424 # get lock as we might write phase data
425 wlock = lock = None
425 wlock = lock = None
426 try:
426 try:
427 # bundle2 push may receive a reply bundle touching bookmarks
427 # bundle2 push may receive a reply bundle touching bookmarks
428 # requiring the wlock. Take it now to ensure proper ordering.
428 # requiring the wlock. Take it now to ensure proper ordering.
429 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
429 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
430 if (
430 if (
431 (not _forcebundle1(pushop))
431 (not _forcebundle1(pushop))
432 and maypushback
432 and maypushback
433 and not bookmod.bookmarksinstore(repo)
433 and not bookmod.bookmarksinstore(repo)
434 ):
434 ):
435 wlock = pushop.repo.wlock()
435 wlock = pushop.repo.wlock()
436 lock = pushop.repo.lock()
436 lock = pushop.repo.lock()
437 pushop.trmanager = transactionmanager(
437 pushop.trmanager = transactionmanager(
438 pushop.repo, b'push-response', pushop.remote.url()
438 pushop.repo, b'push-response', pushop.remote.url()
439 )
439 )
440 except error.LockUnavailable as err:
440 except error.LockUnavailable as err:
441 # source repo cannot be locked.
441 # source repo cannot be locked.
442 # We do not abort the push, but just disable the local phase
442 # We do not abort the push, but just disable the local phase
443 # synchronisation.
443 # synchronisation.
444 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
444 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
445 err
445 err
446 )
446 )
447 pushop.ui.debug(msg)
447 pushop.ui.debug(msg)
448
448
449 with wlock or util.nullcontextmanager():
449 with wlock or util.nullcontextmanager():
450 with lock or util.nullcontextmanager():
450 with lock or util.nullcontextmanager():
451 with pushop.trmanager or util.nullcontextmanager():
451 with pushop.trmanager or util.nullcontextmanager():
452 pushop.repo.checkpush(pushop)
452 pushop.repo.checkpush(pushop)
453 _checkpublish(pushop)
453 _checkpublish(pushop)
454 _pushdiscovery(pushop)
454 _pushdiscovery(pushop)
455 if not pushop.force:
455 if not pushop.force:
456 _checksubrepostate(pushop)
456 _checksubrepostate(pushop)
457 if not _forcebundle1(pushop):
457 if not _forcebundle1(pushop):
458 _pushbundle2(pushop)
458 _pushbundle2(pushop)
459 _pushchangeset(pushop)
459 _pushchangeset(pushop)
460 _pushsyncphase(pushop)
460 _pushsyncphase(pushop)
461 _pushobsolete(pushop)
461 _pushobsolete(pushop)
462 _pushbookmark(pushop)
462 _pushbookmark(pushop)
463
463
464 if repo.ui.configbool(b'experimental', b'remotenames'):
464 if repo.ui.configbool(b'experimental', b'remotenames'):
465 logexchange.pullremotenames(repo, remote)
465 logexchange.pullremotenames(repo, remote)
466
466
467 return pushop
467 return pushop
468
468
469
469
470 # list of steps to perform discovery before push
470 # list of steps to perform discovery before push
471 pushdiscoveryorder = []
471 pushdiscoveryorder = []
472
472
473 # Mapping between step name and function
473 # Mapping between step name and function
474 #
474 #
475 # This exists to help extensions wrap steps if necessary
475 # This exists to help extensions wrap steps if necessary
476 pushdiscoverymapping = {}
476 pushdiscoverymapping = {}
477
477
478
478
479 def pushdiscovery(stepname):
479 def pushdiscovery(stepname):
480 """decorator for function performing discovery before push
480 """decorator for function performing discovery before push
481
481
482 The function is added to the step -> function mapping and appended to the
482 The function is added to the step -> function mapping and appended to the
483 list of steps. Beware that decorated function will be added in order (this
483 list of steps. Beware that decorated function will be added in order (this
484 may matter).
484 may matter).
485
485
486 You can only use this decorator for a new step, if you want to wrap a step
486 You can only use this decorator for a new step, if you want to wrap a step
487 from an extension, change the pushdiscovery dictionary directly."""
487 from an extension, change the pushdiscovery dictionary directly."""
488
488
489 def dec(func):
489 def dec(func):
490 assert stepname not in pushdiscoverymapping
490 assert stepname not in pushdiscoverymapping
491 pushdiscoverymapping[stepname] = func
491 pushdiscoverymapping[stepname] = func
492 pushdiscoveryorder.append(stepname)
492 pushdiscoveryorder.append(stepname)
493 return func
493 return func
494
494
495 return dec
495 return dec
496
496
497
497
498 def _pushdiscovery(pushop):
498 def _pushdiscovery(pushop):
499 """Run all discovery steps"""
499 """Run all discovery steps"""
500 for stepname in pushdiscoveryorder:
500 for stepname in pushdiscoveryorder:
501 step = pushdiscoverymapping[stepname]
501 step = pushdiscoverymapping[stepname]
502 step(pushop)
502 step(pushop)
503
503
504
504
505 def _checksubrepostate(pushop):
505 def _checksubrepostate(pushop):
506 """Ensure all outgoing referenced subrepo revisions are present locally"""
506 """Ensure all outgoing referenced subrepo revisions are present locally"""
507 for n in pushop.outgoing.missing:
507 for n in pushop.outgoing.missing:
508 ctx = pushop.repo[n]
508 ctx = pushop.repo[n]
509
509
510 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
510 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
511 for subpath in sorted(ctx.substate):
511 for subpath in sorted(ctx.substate):
512 sub = ctx.sub(subpath)
512 sub = ctx.sub(subpath)
513 sub.verify(onpush=True)
513 sub.verify(onpush=True)
514
514
515
515
516 @pushdiscovery(b'changeset')
516 @pushdiscovery(b'changeset')
517 def _pushdiscoverychangeset(pushop):
517 def _pushdiscoverychangeset(pushop):
518 """discover the changeset that need to be pushed"""
518 """discover the changeset that need to be pushed"""
519 fci = discovery.findcommonincoming
519 fci = discovery.findcommonincoming
520 if pushop.revs:
520 if pushop.revs:
521 commoninc = fci(
521 commoninc = fci(
522 pushop.repo,
522 pushop.repo,
523 pushop.remote,
523 pushop.remote,
524 force=pushop.force,
524 force=pushop.force,
525 ancestorsof=pushop.revs,
525 ancestorsof=pushop.revs,
526 )
526 )
527 else:
527 else:
528 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
528 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
529 common, inc, remoteheads = commoninc
529 common, inc, remoteheads = commoninc
530 fco = discovery.findcommonoutgoing
530 fco = discovery.findcommonoutgoing
531 outgoing = fco(
531 outgoing = fco(
532 pushop.repo,
532 pushop.repo,
533 pushop.remote,
533 pushop.remote,
534 onlyheads=pushop.revs,
534 onlyheads=pushop.revs,
535 commoninc=commoninc,
535 commoninc=commoninc,
536 force=pushop.force,
536 force=pushop.force,
537 )
537 )
538 pushop.outgoing = outgoing
538 pushop.outgoing = outgoing
539 pushop.remoteheads = remoteheads
539 pushop.remoteheads = remoteheads
540 pushop.incoming = inc
540 pushop.incoming = inc
541
541
542
542
543 @pushdiscovery(b'phase')
543 @pushdiscovery(b'phase')
544 def _pushdiscoveryphase(pushop):
544 def _pushdiscoveryphase(pushop):
545 """discover the phase that needs to be pushed
545 """discover the phase that needs to be pushed
546
546
547 (computed for both success and failure case for changesets push)"""
547 (computed for both success and failure case for changesets push)"""
548 outgoing = pushop.outgoing
548 outgoing = pushop.outgoing
549 unfi = pushop.repo.unfiltered()
549 unfi = pushop.repo.unfiltered()
550 remotephases = listkeys(pushop.remote, b'phases')
550 remotephases = listkeys(pushop.remote, b'phases')
551
551
552 if (
552 if (
553 pushop.ui.configbool(b'ui', b'_usedassubrepo')
553 pushop.ui.configbool(b'ui', b'_usedassubrepo')
554 and remotephases # server supports phases
554 and remotephases # server supports phases
555 and not pushop.outgoing.missing # no changesets to be pushed
555 and not pushop.outgoing.missing # no changesets to be pushed
556 and remotephases.get(b'publishing', False)
556 and remotephases.get(b'publishing', False)
557 ):
557 ):
558 # When:
558 # When:
559 # - this is a subrepo push
559 # - this is a subrepo push
560 # - and remote support phase
560 # - and remote support phase
561 # - and no changeset are to be pushed
561 # - and no changeset are to be pushed
562 # - and remote is publishing
562 # - and remote is publishing
563 # We may be in issue 3781 case!
563 # We may be in issue 3781 case!
564 # We drop the possible phase synchronisation done by
564 # We drop the possible phase synchronisation done by
565 # courtesy to publish changesets possibly locally draft
565 # courtesy to publish changesets possibly locally draft
566 # on the remote.
566 # on the remote.
567 pushop.outdatedphases = []
567 pushop.outdatedphases = []
568 pushop.fallbackoutdatedphases = []
568 pushop.fallbackoutdatedphases = []
569 return
569 return
570
570
571 pushop.remotephases = phases.remotephasessummary(
571 pushop.remotephases = phases.remotephasessummary(
572 pushop.repo, pushop.fallbackheads, remotephases
572 pushop.repo, pushop.fallbackheads, remotephases
573 )
573 )
574 droots = pushop.remotephases.draftroots
574 droots = pushop.remotephases.draftroots
575
575
576 extracond = b''
576 extracond = b''
577 if not pushop.remotephases.publishing:
577 if not pushop.remotephases.publishing:
578 extracond = b' and public()'
578 extracond = b' and public()'
579 revset = b'heads((%%ln::%%ln) %s)' % extracond
579 revset = b'heads((%%ln::%%ln) %s)' % extracond
580 # Get the list of all revs draft on remote by public here.
580 # Get the list of all revs draft on remote by public here.
581 # XXX Beware that revset break if droots is not strictly
581 # XXX Beware that revset break if droots is not strictly
582 # XXX root we may want to ensure it is but it is costly
582 # XXX root we may want to ensure it is but it is costly
583 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
583 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
584 if not pushop.remotephases.publishing and pushop.publish:
584 if not pushop.remotephases.publishing and pushop.publish:
585 future = list(
585 future = list(
586 unfi.set(
586 unfi.set(
587 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
587 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
588 )
588 )
589 )
589 )
590 elif not outgoing.missing:
590 elif not outgoing.missing:
591 future = fallback
591 future = fallback
592 else:
592 else:
593 # adds changeset we are going to push as draft
593 # adds changeset we are going to push as draft
594 #
594 #
595 # should not be necessary for publishing server, but because of an
595 # should not be necessary for publishing server, but because of an
596 # issue fixed in xxxxx we have to do it anyway.
596 # issue fixed in xxxxx we have to do it anyway.
597 fdroots = list(
597 fdroots = list(
598 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
598 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
599 )
599 )
600 fdroots = [f.node() for f in fdroots]
600 fdroots = [f.node() for f in fdroots]
601 future = list(unfi.set(revset, fdroots, pushop.futureheads))
601 future = list(unfi.set(revset, fdroots, pushop.futureheads))
602 pushop.outdatedphases = future
602 pushop.outdatedphases = future
603 pushop.fallbackoutdatedphases = fallback
603 pushop.fallbackoutdatedphases = fallback
604
604
605
605
606 @pushdiscovery(b'obsmarker')
606 @pushdiscovery(b'obsmarker')
607 def _pushdiscoveryobsmarkers(pushop):
607 def _pushdiscoveryobsmarkers(pushop):
608 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
608 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
609 return
609 return
610
610
611 if not pushop.repo.obsstore:
611 if not pushop.repo.obsstore:
612 return
612 return
613
613
614 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
614 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
615 return
615 return
616
616
617 repo = pushop.repo
617 repo = pushop.repo
618 # very naive computation, that can be quite expensive on big repo.
618 # very naive computation, that can be quite expensive on big repo.
619 # However: evolution is currently slow on them anyway.
619 # However: evolution is currently slow on them anyway.
620 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
620 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
621 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
621 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
622
622
623
623
624 @pushdiscovery(b'bookmarks')
624 @pushdiscovery(b'bookmarks')
625 def _pushdiscoverybookmarks(pushop):
625 def _pushdiscoverybookmarks(pushop):
626 ui = pushop.ui
626 ui = pushop.ui
627 repo = pushop.repo.unfiltered()
627 repo = pushop.repo.unfiltered()
628 remote = pushop.remote
628 remote = pushop.remote
629 ui.debug(b"checking for updated bookmarks\n")
629 ui.debug(b"checking for updated bookmarks\n")
630 ancestors = ()
630 ancestors = ()
631 if pushop.revs:
631 if pushop.revs:
632 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
632 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
633 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
633 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
634
634
635 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
635 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
636
636
637 explicit = {
637 explicit = {
638 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
638 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
639 }
639 }
640
640
641 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
641 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
642 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
642 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
643
643
644
644
645 def _processcompared(pushop, pushed, explicit, remotebms, comp):
645 def _processcompared(pushop, pushed, explicit, remotebms, comp):
646 """take decision on bookmarks to push to the remote repo
646 """take decision on bookmarks to push to the remote repo
647
647
648 Exists to help extensions alter this behavior.
648 Exists to help extensions alter this behavior.
649 """
649 """
650 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
650 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
651
651
652 repo = pushop.repo
652 repo = pushop.repo
653
653
654 for b, scid, dcid in advsrc:
654 for b, scid, dcid in advsrc:
655 if b in explicit:
655 if b in explicit:
656 explicit.remove(b)
656 explicit.remove(b)
657 if not pushed or repo[scid].rev() in pushed:
657 if not pushed or repo[scid].rev() in pushed:
658 pushop.outbookmarks.append((b, dcid, scid))
658 pushop.outbookmarks.append((b, dcid, scid))
659 # search added bookmark
659 # search added bookmark
660 for b, scid, dcid in addsrc:
660 for b, scid, dcid in addsrc:
661 if b in explicit:
661 if b in explicit:
662 explicit.remove(b)
662 explicit.remove(b)
663 if bookmod.isdivergent(b):
663 if bookmod.isdivergent(b):
664 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
664 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
665 pushop.bkresult = 2
665 pushop.bkresult = 2
666 else:
666 else:
667 pushop.outbookmarks.append((b, b'', scid))
667 pushop.outbookmarks.append((b, b'', scid))
668 # search for overwritten bookmark
668 # search for overwritten bookmark
669 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
669 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
670 if b in explicit:
670 if b in explicit:
671 explicit.remove(b)
671 explicit.remove(b)
672 pushop.outbookmarks.append((b, dcid, scid))
672 pushop.outbookmarks.append((b, dcid, scid))
673 # search for bookmark to delete
673 # search for bookmark to delete
674 for b, scid, dcid in adddst:
674 for b, scid, dcid in adddst:
675 if b in explicit:
675 if b in explicit:
676 explicit.remove(b)
676 explicit.remove(b)
677 # treat as "deleted locally"
677 # treat as "deleted locally"
678 pushop.outbookmarks.append((b, dcid, b''))
678 pushop.outbookmarks.append((b, dcid, b''))
679 # identical bookmarks shouldn't get reported
679 # identical bookmarks shouldn't get reported
680 for b, scid, dcid in same:
680 for b, scid, dcid in same:
681 if b in explicit:
681 if b in explicit:
682 explicit.remove(b)
682 explicit.remove(b)
683
683
684 if explicit:
684 if explicit:
685 explicit = sorted(explicit)
685 explicit = sorted(explicit)
686 # we should probably list all of them
686 # we should probably list all of them
687 pushop.ui.warn(
687 pushop.ui.warn(
688 _(
688 _(
689 b'bookmark %s does not exist on the local '
689 b'bookmark %s does not exist on the local '
690 b'or remote repository!\n'
690 b'or remote repository!\n'
691 )
691 )
692 % explicit[0]
692 % explicit[0]
693 )
693 )
694 pushop.bkresult = 2
694 pushop.bkresult = 2
695
695
696 pushop.outbookmarks.sort()
696 pushop.outbookmarks.sort()
697
697
698
698
699 def _pushcheckoutgoing(pushop):
699 def _pushcheckoutgoing(pushop):
700 outgoing = pushop.outgoing
700 outgoing = pushop.outgoing
701 unfi = pushop.repo.unfiltered()
701 unfi = pushop.repo.unfiltered()
702 if not outgoing.missing:
702 if not outgoing.missing:
703 # nothing to push
703 # nothing to push
704 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
704 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
705 return False
705 return False
706 # something to push
706 # something to push
707 if not pushop.force:
707 if not pushop.force:
708 # if repo.obsstore == False --> no obsolete
708 # if repo.obsstore == False --> no obsolete
709 # then, save the iteration
709 # then, save the iteration
710 if unfi.obsstore:
710 if unfi.obsstore:
711 # this message are here for 80 char limit reason
711 # this message are here for 80 char limit reason
712 mso = _(b"push includes obsolete changeset: %s!")
712 mso = _(b"push includes obsolete changeset: %s!")
713 mspd = _(b"push includes phase-divergent changeset: %s!")
713 mspd = _(b"push includes phase-divergent changeset: %s!")
714 mscd = _(b"push includes content-divergent changeset: %s!")
714 mscd = _(b"push includes content-divergent changeset: %s!")
715 mst = {
715 mst = {
716 b"orphan": _(b"push includes orphan changeset: %s!"),
716 b"orphan": _(b"push includes orphan changeset: %s!"),
717 b"phase-divergent": mspd,
717 b"phase-divergent": mspd,
718 b"content-divergent": mscd,
718 b"content-divergent": mscd,
719 }
719 }
720 # If we are to push if there is at least one
720 # If we are to push if there is at least one
721 # obsolete or unstable changeset in missing, at
721 # obsolete or unstable changeset in missing, at
722 # least one of the missinghead will be obsolete or
722 # least one of the missinghead will be obsolete or
723 # unstable. So checking heads only is ok
723 # unstable. So checking heads only is ok
724 for node in outgoing.ancestorsof:
724 for node in outgoing.ancestorsof:
725 ctx = unfi[node]
725 ctx = unfi[node]
726 if ctx.obsolete():
726 if ctx.obsolete():
727 raise error.Abort(mso % ctx)
727 raise error.Abort(mso % ctx)
728 elif ctx.isunstable():
728 elif ctx.isunstable():
729 # TODO print more than one instability in the abort
729 # TODO print more than one instability in the abort
730 # message
730 # message
731 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
731 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
732
732
733 discovery.checkheads(pushop)
733 discovery.checkheads(pushop)
734 return True
734 return True
735
735
736
736
737 # List of names of steps to perform for an outgoing bundle2, order matters.
737 # List of names of steps to perform for an outgoing bundle2, order matters.
738 b2partsgenorder = []
738 b2partsgenorder = []
739
739
740 # Mapping between step name and function
740 # Mapping between step name and function
741 #
741 #
742 # This exists to help extensions wrap steps if necessary
742 # This exists to help extensions wrap steps if necessary
743 b2partsgenmapping = {}
743 b2partsgenmapping = {}
744
744
745
745
746 def b2partsgenerator(stepname, idx=None):
746 def b2partsgenerator(stepname, idx=None):
747 """decorator for function generating bundle2 part
747 """decorator for function generating bundle2 part
748
748
749 The function is added to the step -> function mapping and appended to the
749 The function is added to the step -> function mapping and appended to the
750 list of steps. Beware that decorated functions will be added in order
750 list of steps. Beware that decorated functions will be added in order
751 (this may matter).
751 (this may matter).
752
752
753 You can only use this decorator for new steps, if you want to wrap a step
753 You can only use this decorator for new steps, if you want to wrap a step
754 from an extension, attack the b2partsgenmapping dictionary directly."""
754 from an extension, attack the b2partsgenmapping dictionary directly."""
755
755
756 def dec(func):
756 def dec(func):
757 assert stepname not in b2partsgenmapping
757 assert stepname not in b2partsgenmapping
758 b2partsgenmapping[stepname] = func
758 b2partsgenmapping[stepname] = func
759 if idx is None:
759 if idx is None:
760 b2partsgenorder.append(stepname)
760 b2partsgenorder.append(stepname)
761 else:
761 else:
762 b2partsgenorder.insert(idx, stepname)
762 b2partsgenorder.insert(idx, stepname)
763 return func
763 return func
764
764
765 return dec
765 return dec
766
766
767
767
768 def _pushb2ctxcheckheads(pushop, bundler):
768 def _pushb2ctxcheckheads(pushop, bundler):
769 """Generate race condition checking parts
769 """Generate race condition checking parts
770
770
771 Exists as an independent function to aid extensions
771 Exists as an independent function to aid extensions
772 """
772 """
773 # * 'force' do not check for push race,
773 # * 'force' do not check for push race,
774 # * if we don't push anything, there are nothing to check.
774 # * if we don't push anything, there are nothing to check.
775 if not pushop.force and pushop.outgoing.ancestorsof:
775 if not pushop.force and pushop.outgoing.ancestorsof:
776 allowunrelated = b'related' in bundler.capabilities.get(
776 allowunrelated = b'related' in bundler.capabilities.get(
777 b'checkheads', ()
777 b'checkheads', ()
778 )
778 )
779 emptyremote = pushop.pushbranchmap is None
779 emptyremote = pushop.pushbranchmap is None
780 if not allowunrelated or emptyremote:
780 if not allowunrelated or emptyremote:
781 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
781 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
782 else:
782 else:
783 affected = set()
783 affected = set()
784 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
784 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
785 remoteheads, newheads, unsyncedheads, discardedheads = heads
785 remoteheads, newheads, unsyncedheads, discardedheads = heads
786 if remoteheads is not None:
786 if remoteheads is not None:
787 remote = set(remoteheads)
787 remote = set(remoteheads)
788 affected |= set(discardedheads) & remote
788 affected |= set(discardedheads) & remote
789 affected |= remote - set(newheads)
789 affected |= remote - set(newheads)
790 if affected:
790 if affected:
791 data = iter(sorted(affected))
791 data = iter(sorted(affected))
792 bundler.newpart(b'check:updated-heads', data=data)
792 bundler.newpart(b'check:updated-heads', data=data)
793
793
794
794
795 def _pushing(pushop):
795 def _pushing(pushop):
796 """return True if we are pushing anything"""
796 """return True if we are pushing anything"""
797 return bool(
797 return bool(
798 pushop.outgoing.missing
798 pushop.outgoing.missing
799 or pushop.outdatedphases
799 or pushop.outdatedphases
800 or pushop.outobsmarkers
800 or pushop.outobsmarkers
801 or pushop.outbookmarks
801 or pushop.outbookmarks
802 )
802 )
803
803
804
804
805 @b2partsgenerator(b'check-bookmarks')
805 @b2partsgenerator(b'check-bookmarks')
806 def _pushb2checkbookmarks(pushop, bundler):
806 def _pushb2checkbookmarks(pushop, bundler):
807 """insert bookmark move checking"""
807 """insert bookmark move checking"""
808 if not _pushing(pushop) or pushop.force:
808 if not _pushing(pushop) or pushop.force:
809 return
809 return
810 b2caps = bundle2.bundle2caps(pushop.remote)
810 b2caps = bundle2.bundle2caps(pushop.remote)
811 hasbookmarkcheck = b'bookmarks' in b2caps
811 hasbookmarkcheck = b'bookmarks' in b2caps
812 if not (pushop.outbookmarks and hasbookmarkcheck):
812 if not (pushop.outbookmarks and hasbookmarkcheck):
813 return
813 return
814 data = []
814 data = []
815 for book, old, new in pushop.outbookmarks:
815 for book, old, new in pushop.outbookmarks:
816 data.append((book, old))
816 data.append((book, old))
817 checkdata = bookmod.binaryencode(data)
817 checkdata = bookmod.binaryencode(data)
818 bundler.newpart(b'check:bookmarks', data=checkdata)
818 bundler.newpart(b'check:bookmarks', data=checkdata)
819
819
820
820
821 @b2partsgenerator(b'check-phases')
821 @b2partsgenerator(b'check-phases')
822 def _pushb2checkphases(pushop, bundler):
822 def _pushb2checkphases(pushop, bundler):
823 """insert phase move checking"""
823 """insert phase move checking"""
824 if not _pushing(pushop) or pushop.force:
824 if not _pushing(pushop) or pushop.force:
825 return
825 return
826 b2caps = bundle2.bundle2caps(pushop.remote)
826 b2caps = bundle2.bundle2caps(pushop.remote)
827 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
827 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
828 if pushop.remotephases is not None and hasphaseheads:
828 if pushop.remotephases is not None and hasphaseheads:
829 # check that the remote phase has not changed
829 # check that the remote phase has not changed
830 checks = {p: [] for p in phases.allphases}
830 checks = {p: [] for p in phases.allphases}
831 checks[phases.public].extend(pushop.remotephases.publicheads)
831 checks[phases.public].extend(pushop.remotephases.publicheads)
832 checks[phases.draft].extend(pushop.remotephases.draftroots)
832 checks[phases.draft].extend(pushop.remotephases.draftroots)
833 if any(pycompat.itervalues(checks)):
833 if any(pycompat.itervalues(checks)):
834 for phase in checks:
834 for phase in checks:
835 checks[phase].sort()
835 checks[phase].sort()
836 checkdata = phases.binaryencode(checks)
836 checkdata = phases.binaryencode(checks)
837 bundler.newpart(b'check:phases', data=checkdata)
837 bundler.newpart(b'check:phases', data=checkdata)
838
838
839
839
840 @b2partsgenerator(b'changeset')
840 @b2partsgenerator(b'changeset')
841 def _pushb2ctx(pushop, bundler):
841 def _pushb2ctx(pushop, bundler):
842 """handle changegroup push through bundle2
842 """handle changegroup push through bundle2
843
843
844 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
844 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
845 """
845 """
846 if b'changesets' in pushop.stepsdone:
846 if b'changesets' in pushop.stepsdone:
847 return
847 return
848 pushop.stepsdone.add(b'changesets')
848 pushop.stepsdone.add(b'changesets')
849 # Send known heads to the server for race detection.
849 # Send known heads to the server for race detection.
850 if not _pushcheckoutgoing(pushop):
850 if not _pushcheckoutgoing(pushop):
851 return
851 return
852 pushop.repo.prepushoutgoinghooks(pushop)
852 pushop.repo.prepushoutgoinghooks(pushop)
853
853
854 _pushb2ctxcheckheads(pushop, bundler)
854 _pushb2ctxcheckheads(pushop, bundler)
855
855
856 b2caps = bundle2.bundle2caps(pushop.remote)
856 b2caps = bundle2.bundle2caps(pushop.remote)
857 version = b'01'
857 version = b'01'
858 cgversions = b2caps.get(b'changegroup')
858 cgversions = b2caps.get(b'changegroup')
859 if cgversions: # 3.1 and 3.2 ship with an empty value
859 if cgversions: # 3.1 and 3.2 ship with an empty value
860 cgversions = [
860 cgversions = [
861 v
861 v
862 for v in cgversions
862 for v in cgversions
863 if v in changegroup.supportedoutgoingversions(pushop.repo)
863 if v in changegroup.supportedoutgoingversions(pushop.repo)
864 ]
864 ]
865 if not cgversions:
865 if not cgversions:
866 raise error.Abort(_(b'no common changegroup version'))
866 raise error.Abort(_(b'no common changegroup version'))
867 version = max(cgversions)
867 version = max(cgversions)
868 cgstream = changegroup.makestream(
868 cgstream = changegroup.makestream(
869 pushop.repo, pushop.outgoing, version, b'push'
869 pushop.repo, pushop.outgoing, version, b'push'
870 )
870 )
871 cgpart = bundler.newpart(b'changegroup', data=cgstream)
871 cgpart = bundler.newpart(b'changegroup', data=cgstream)
872 if cgversions:
872 if cgversions:
873 cgpart.addparam(b'version', version)
873 cgpart.addparam(b'version', version)
874 if scmutil.istreemanifest(pushop.repo):
874 if scmutil.istreemanifest(pushop.repo):
875 cgpart.addparam(b'treemanifest', b'1')
875 cgpart.addparam(b'treemanifest', b'1')
876 if b'exp-sidedata-flag' in pushop.repo.requirements:
876 if b'exp-sidedata-flag' in pushop.repo.requirements:
877 cgpart.addparam(b'exp-sidedata', b'1')
877 cgpart.addparam(b'exp-sidedata', b'1')
878
878
879 def handlereply(op):
879 def handlereply(op):
880 """extract addchangegroup returns from server reply"""
880 """extract addchangegroup returns from server reply"""
881 cgreplies = op.records.getreplies(cgpart.id)
881 cgreplies = op.records.getreplies(cgpart.id)
882 assert len(cgreplies[b'changegroup']) == 1
882 assert len(cgreplies[b'changegroup']) == 1
883 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
883 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
884
884
885 return handlereply
885 return handlereply
886
886
887
887
888 @b2partsgenerator(b'phase')
888 @b2partsgenerator(b'phase')
889 def _pushb2phases(pushop, bundler):
889 def _pushb2phases(pushop, bundler):
890 """handle phase push through bundle2"""
890 """handle phase push through bundle2"""
891 if b'phases' in pushop.stepsdone:
891 if b'phases' in pushop.stepsdone:
892 return
892 return
893 b2caps = bundle2.bundle2caps(pushop.remote)
893 b2caps = bundle2.bundle2caps(pushop.remote)
894 ui = pushop.repo.ui
894 ui = pushop.repo.ui
895
895
896 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
896 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
897 haspushkey = b'pushkey' in b2caps
897 haspushkey = b'pushkey' in b2caps
898 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
898 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
899
899
900 if hasphaseheads and not legacyphase:
900 if hasphaseheads and not legacyphase:
901 return _pushb2phaseheads(pushop, bundler)
901 return _pushb2phaseheads(pushop, bundler)
902 elif haspushkey:
902 elif haspushkey:
903 return _pushb2phasespushkey(pushop, bundler)
903 return _pushb2phasespushkey(pushop, bundler)
904
904
905
905
906 def _pushb2phaseheads(pushop, bundler):
906 def _pushb2phaseheads(pushop, bundler):
907 """push phase information through a bundle2 - binary part"""
907 """push phase information through a bundle2 - binary part"""
908 pushop.stepsdone.add(b'phases')
908 pushop.stepsdone.add(b'phases')
909 if pushop.outdatedphases:
909 if pushop.outdatedphases:
910 updates = {p: [] for p in phases.allphases}
910 updates = {p: [] for p in phases.allphases}
911 updates[0].extend(h.node() for h in pushop.outdatedphases)
911 updates[0].extend(h.node() for h in pushop.outdatedphases)
912 phasedata = phases.binaryencode(updates)
912 phasedata = phases.binaryencode(updates)
913 bundler.newpart(b'phase-heads', data=phasedata)
913 bundler.newpart(b'phase-heads', data=phasedata)
914
914
915
915
916 def _pushb2phasespushkey(pushop, bundler):
916 def _pushb2phasespushkey(pushop, bundler):
917 """push phase information through a bundle2 - pushkey part"""
917 """push phase information through a bundle2 - pushkey part"""
918 pushop.stepsdone.add(b'phases')
918 pushop.stepsdone.add(b'phases')
919 part2node = []
919 part2node = []
920
920
921 def handlefailure(pushop, exc):
921 def handlefailure(pushop, exc):
922 targetid = int(exc.partid)
922 targetid = int(exc.partid)
923 for partid, node in part2node:
923 for partid, node in part2node:
924 if partid == targetid:
924 if partid == targetid:
925 raise error.Abort(_(b'updating %s to public failed') % node)
925 raise error.Abort(_(b'updating %s to public failed') % node)
926
926
927 enc = pushkey.encode
927 enc = pushkey.encode
928 for newremotehead in pushop.outdatedphases:
928 for newremotehead in pushop.outdatedphases:
929 part = bundler.newpart(b'pushkey')
929 part = bundler.newpart(b'pushkey')
930 part.addparam(b'namespace', enc(b'phases'))
930 part.addparam(b'namespace', enc(b'phases'))
931 part.addparam(b'key', enc(newremotehead.hex()))
931 part.addparam(b'key', enc(newremotehead.hex()))
932 part.addparam(b'old', enc(b'%d' % phases.draft))
932 part.addparam(b'old', enc(b'%d' % phases.draft))
933 part.addparam(b'new', enc(b'%d' % phases.public))
933 part.addparam(b'new', enc(b'%d' % phases.public))
934 part2node.append((part.id, newremotehead))
934 part2node.append((part.id, newremotehead))
935 pushop.pkfailcb[part.id] = handlefailure
935 pushop.pkfailcb[part.id] = handlefailure
936
936
937 def handlereply(op):
937 def handlereply(op):
938 for partid, node in part2node:
938 for partid, node in part2node:
939 partrep = op.records.getreplies(partid)
939 partrep = op.records.getreplies(partid)
940 results = partrep[b'pushkey']
940 results = partrep[b'pushkey']
941 assert len(results) <= 1
941 assert len(results) <= 1
942 msg = None
942 msg = None
943 if not results:
943 if not results:
944 msg = _(b'server ignored update of %s to public!\n') % node
944 msg = _(b'server ignored update of %s to public!\n') % node
945 elif not int(results[0][b'return']):
945 elif not int(results[0][b'return']):
946 msg = _(b'updating %s to public failed!\n') % node
946 msg = _(b'updating %s to public failed!\n') % node
947 if msg is not None:
947 if msg is not None:
948 pushop.ui.warn(msg)
948 pushop.ui.warn(msg)
949
949
950 return handlereply
950 return handlereply
951
951
952
952
953 @b2partsgenerator(b'obsmarkers')
953 @b2partsgenerator(b'obsmarkers')
954 def _pushb2obsmarkers(pushop, bundler):
954 def _pushb2obsmarkers(pushop, bundler):
955 if b'obsmarkers' in pushop.stepsdone:
955 if b'obsmarkers' in pushop.stepsdone:
956 return
956 return
957 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
957 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
958 if obsolete.commonversion(remoteversions) is None:
958 if obsolete.commonversion(remoteversions) is None:
959 return
959 return
960 pushop.stepsdone.add(b'obsmarkers')
960 pushop.stepsdone.add(b'obsmarkers')
961 if pushop.outobsmarkers:
961 if pushop.outobsmarkers:
962 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
962 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
963 bundle2.buildobsmarkerspart(bundler, markers)
963 bundle2.buildobsmarkerspart(bundler, markers)
964
964
965
965
966 @b2partsgenerator(b'bookmarks')
966 @b2partsgenerator(b'bookmarks')
967 def _pushb2bookmarks(pushop, bundler):
967 def _pushb2bookmarks(pushop, bundler):
968 """handle bookmark push through bundle2"""
968 """handle bookmark push through bundle2"""
969 if b'bookmarks' in pushop.stepsdone:
969 if b'bookmarks' in pushop.stepsdone:
970 return
970 return
971 b2caps = bundle2.bundle2caps(pushop.remote)
971 b2caps = bundle2.bundle2caps(pushop.remote)
972
972
973 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
973 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
974 legacybooks = b'bookmarks' in legacy
974 legacybooks = b'bookmarks' in legacy
975
975
976 if not legacybooks and b'bookmarks' in b2caps:
976 if not legacybooks and b'bookmarks' in b2caps:
977 return _pushb2bookmarkspart(pushop, bundler)
977 return _pushb2bookmarkspart(pushop, bundler)
978 elif b'pushkey' in b2caps:
978 elif b'pushkey' in b2caps:
979 return _pushb2bookmarkspushkey(pushop, bundler)
979 return _pushb2bookmarkspushkey(pushop, bundler)
980
980
981
981
982 def _bmaction(old, new):
982 def _bmaction(old, new):
983 """small utility for bookmark pushing"""
983 """small utility for bookmark pushing"""
984 if not old:
984 if not old:
985 return b'export'
985 return b'export'
986 elif not new:
986 elif not new:
987 return b'delete'
987 return b'delete'
988 return b'update'
988 return b'update'
989
989
990
990
991 def _abortonsecretctx(pushop, node, b):
991 def _abortonsecretctx(pushop, node, b):
992 """abort if a given bookmark points to a secret changeset"""
992 """abort if a given bookmark points to a secret changeset"""
993 if node and pushop.repo[node].phase() == phases.secret:
993 if node and pushop.repo[node].phase() == phases.secret:
994 raise error.Abort(
994 raise error.Abort(
995 _(b'cannot push bookmark %s as it points to a secret changeset') % b
995 _(b'cannot push bookmark %s as it points to a secret changeset') % b
996 )
996 )
997
997
998
998
999 def _pushb2bookmarkspart(pushop, bundler):
999 def _pushb2bookmarkspart(pushop, bundler):
1000 pushop.stepsdone.add(b'bookmarks')
1000 pushop.stepsdone.add(b'bookmarks')
1001 if not pushop.outbookmarks:
1001 if not pushop.outbookmarks:
1002 return
1002 return
1003
1003
1004 allactions = []
1004 allactions = []
1005 data = []
1005 data = []
1006 for book, old, new in pushop.outbookmarks:
1006 for book, old, new in pushop.outbookmarks:
1007 _abortonsecretctx(pushop, new, book)
1007 _abortonsecretctx(pushop, new, book)
1008 data.append((book, new))
1008 data.append((book, new))
1009 allactions.append((book, _bmaction(old, new)))
1009 allactions.append((book, _bmaction(old, new)))
1010 checkdata = bookmod.binaryencode(data)
1010 checkdata = bookmod.binaryencode(data)
1011 bundler.newpart(b'bookmarks', data=checkdata)
1011 bundler.newpart(b'bookmarks', data=checkdata)
1012
1012
1013 def handlereply(op):
1013 def handlereply(op):
1014 ui = pushop.ui
1014 ui = pushop.ui
1015 # if success
1015 # if success
1016 for book, action in allactions:
1016 for book, action in allactions:
1017 ui.status(bookmsgmap[action][0] % book)
1017 ui.status(bookmsgmap[action][0] % book)
1018
1018
1019 return handlereply
1019 return handlereply
1020
1020
1021
1021
1022 def _pushb2bookmarkspushkey(pushop, bundler):
1022 def _pushb2bookmarkspushkey(pushop, bundler):
1023 pushop.stepsdone.add(b'bookmarks')
1023 pushop.stepsdone.add(b'bookmarks')
1024 part2book = []
1024 part2book = []
1025 enc = pushkey.encode
1025 enc = pushkey.encode
1026
1026
1027 def handlefailure(pushop, exc):
1027 def handlefailure(pushop, exc):
1028 targetid = int(exc.partid)
1028 targetid = int(exc.partid)
1029 for partid, book, action in part2book:
1029 for partid, book, action in part2book:
1030 if partid == targetid:
1030 if partid == targetid:
1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 # we should not be called for part we did not generated
1032 # we should not be called for part we did not generated
1033 assert False
1033 assert False
1034
1034
1035 for book, old, new in pushop.outbookmarks:
1035 for book, old, new in pushop.outbookmarks:
1036 _abortonsecretctx(pushop, new, book)
1036 _abortonsecretctx(pushop, new, book)
1037 part = bundler.newpart(b'pushkey')
1037 part = bundler.newpart(b'pushkey')
1038 part.addparam(b'namespace', enc(b'bookmarks'))
1038 part.addparam(b'namespace', enc(b'bookmarks'))
1039 part.addparam(b'key', enc(book))
1039 part.addparam(b'key', enc(book))
1040 part.addparam(b'old', enc(hex(old)))
1040 part.addparam(b'old', enc(hex(old)))
1041 part.addparam(b'new', enc(hex(new)))
1041 part.addparam(b'new', enc(hex(new)))
1042 action = b'update'
1042 action = b'update'
1043 if not old:
1043 if not old:
1044 action = b'export'
1044 action = b'export'
1045 elif not new:
1045 elif not new:
1046 action = b'delete'
1046 action = b'delete'
1047 part2book.append((part.id, book, action))
1047 part2book.append((part.id, book, action))
1048 pushop.pkfailcb[part.id] = handlefailure
1048 pushop.pkfailcb[part.id] = handlefailure
1049
1049
1050 def handlereply(op):
1050 def handlereply(op):
1051 ui = pushop.ui
1051 ui = pushop.ui
1052 for partid, book, action in part2book:
1052 for partid, book, action in part2book:
1053 partrep = op.records.getreplies(partid)
1053 partrep = op.records.getreplies(partid)
1054 results = partrep[b'pushkey']
1054 results = partrep[b'pushkey']
1055 assert len(results) <= 1
1055 assert len(results) <= 1
1056 if not results:
1056 if not results:
1057 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1057 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1058 else:
1058 else:
1059 ret = int(results[0][b'return'])
1059 ret = int(results[0][b'return'])
1060 if ret:
1060 if ret:
1061 ui.status(bookmsgmap[action][0] % book)
1061 ui.status(bookmsgmap[action][0] % book)
1062 else:
1062 else:
1063 ui.warn(bookmsgmap[action][1] % book)
1063 ui.warn(bookmsgmap[action][1] % book)
1064 if pushop.bkresult is not None:
1064 if pushop.bkresult is not None:
1065 pushop.bkresult = 1
1065 pushop.bkresult = 1
1066
1066
1067 return handlereply
1067 return handlereply
1068
1068
1069
1069
1070 @b2partsgenerator(b'pushvars', idx=0)
1070 @b2partsgenerator(b'pushvars', idx=0)
1071 def _getbundlesendvars(pushop, bundler):
1071 def _getbundlesendvars(pushop, bundler):
1072 '''send shellvars via bundle2'''
1072 '''send shellvars via bundle2'''
1073 pushvars = pushop.pushvars
1073 pushvars = pushop.pushvars
1074 if pushvars:
1074 if pushvars:
1075 shellvars = {}
1075 shellvars = {}
1076 for raw in pushvars:
1076 for raw in pushvars:
1077 if b'=' not in raw:
1077 if b'=' not in raw:
1078 msg = (
1078 msg = (
1079 b"unable to parse variable '%s', should follow "
1079 b"unable to parse variable '%s', should follow "
1080 b"'KEY=VALUE' or 'KEY=' format"
1080 b"'KEY=VALUE' or 'KEY=' format"
1081 )
1081 )
1082 raise error.Abort(msg % raw)
1082 raise error.Abort(msg % raw)
1083 k, v = raw.split(b'=', 1)
1083 k, v = raw.split(b'=', 1)
1084 shellvars[k] = v
1084 shellvars[k] = v
1085
1085
1086 part = bundler.newpart(b'pushvars')
1086 part = bundler.newpart(b'pushvars')
1087
1087
1088 for key, value in pycompat.iteritems(shellvars):
1088 for key, value in pycompat.iteritems(shellvars):
1089 part.addparam(key, value, mandatory=False)
1089 part.addparam(key, value, mandatory=False)
1090
1090
1091
1091
1092 def _pushbundle2(pushop):
1092 def _pushbundle2(pushop):
1093 """push data to the remote using bundle2
1093 """push data to the remote using bundle2
1094
1094
1095 The only currently supported type of data is changegroup but this will
1095 The only currently supported type of data is changegroup but this will
1096 evolve in the future."""
1096 evolve in the future."""
1097 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1097 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1098 pushback = pushop.trmanager and pushop.ui.configbool(
1098 pushback = pushop.trmanager and pushop.ui.configbool(
1099 b'experimental', b'bundle2.pushback'
1099 b'experimental', b'bundle2.pushback'
1100 )
1100 )
1101
1101
1102 # create reply capability
1102 # create reply capability
1103 capsblob = bundle2.encodecaps(
1103 capsblob = bundle2.encodecaps(
1104 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1104 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1105 )
1105 )
1106 bundler.newpart(b'replycaps', data=capsblob)
1106 bundler.newpart(b'replycaps', data=capsblob)
1107 replyhandlers = []
1107 replyhandlers = []
1108 for partgenname in b2partsgenorder:
1108 for partgenname in b2partsgenorder:
1109 partgen = b2partsgenmapping[partgenname]
1109 partgen = b2partsgenmapping[partgenname]
1110 ret = partgen(pushop, bundler)
1110 ret = partgen(pushop, bundler)
1111 if callable(ret):
1111 if callable(ret):
1112 replyhandlers.append(ret)
1112 replyhandlers.append(ret)
1113 # do not push if nothing to push
1113 # do not push if nothing to push
1114 if bundler.nbparts <= 1:
1114 if bundler.nbparts <= 1:
1115 return
1115 return
1116 stream = util.chunkbuffer(bundler.getchunks())
1116 stream = util.chunkbuffer(bundler.getchunks())
1117 try:
1117 try:
1118 try:
1118 try:
1119 with pushop.remote.commandexecutor() as e:
1119 with pushop.remote.commandexecutor() as e:
1120 reply = e.callcommand(
1120 reply = e.callcommand(
1121 b'unbundle',
1121 b'unbundle',
1122 {
1122 {
1123 b'bundle': stream,
1123 b'bundle': stream,
1124 b'heads': [b'force'],
1124 b'heads': [b'force'],
1125 b'url': pushop.remote.url(),
1125 b'url': pushop.remote.url(),
1126 },
1126 },
1127 ).result()
1127 ).result()
1128 except error.BundleValueError as exc:
1128 except error.BundleValueError as exc:
1129 raise error.Abort(_(b'missing support for %s') % exc)
1129 raise error.Abort(_(b'missing support for %s') % exc)
1130 try:
1130 try:
1131 trgetter = None
1131 trgetter = None
1132 if pushback:
1132 if pushback:
1133 trgetter = pushop.trmanager.transaction
1133 trgetter = pushop.trmanager.transaction
1134 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1134 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1135 except error.BundleValueError as exc:
1135 except error.BundleValueError as exc:
1136 raise error.Abort(_(b'missing support for %s') % exc)
1136 raise error.Abort(_(b'missing support for %s') % exc)
1137 except bundle2.AbortFromPart as exc:
1137 except bundle2.AbortFromPart as exc:
1138 pushop.ui.status(_(b'remote: %s\n') % exc)
1138 pushop.ui.error(_(b'remote: %s\n') % exc)
1139 if exc.hint is not None:
1139 if exc.hint is not None:
1140 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1140 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1141 raise error.Abort(_(b'push failed on remote'))
1141 raise error.Abort(_(b'push failed on remote'))
1142 except error.PushkeyFailed as exc:
1142 except error.PushkeyFailed as exc:
1143 partid = int(exc.partid)
1143 partid = int(exc.partid)
1144 if partid not in pushop.pkfailcb:
1144 if partid not in pushop.pkfailcb:
1145 raise
1145 raise
1146 pushop.pkfailcb[partid](pushop, exc)
1146 pushop.pkfailcb[partid](pushop, exc)
1147 for rephand in replyhandlers:
1147 for rephand in replyhandlers:
1148 rephand(op)
1148 rephand(op)
1149
1149
1150
1150
1151 def _pushchangeset(pushop):
1151 def _pushchangeset(pushop):
1152 """Make the actual push of changeset bundle to remote repo"""
1152 """Make the actual push of changeset bundle to remote repo"""
1153 if b'changesets' in pushop.stepsdone:
1153 if b'changesets' in pushop.stepsdone:
1154 return
1154 return
1155 pushop.stepsdone.add(b'changesets')
1155 pushop.stepsdone.add(b'changesets')
1156 if not _pushcheckoutgoing(pushop):
1156 if not _pushcheckoutgoing(pushop):
1157 return
1157 return
1158
1158
1159 # Should have verified this in push().
1159 # Should have verified this in push().
1160 assert pushop.remote.capable(b'unbundle')
1160 assert pushop.remote.capable(b'unbundle')
1161
1161
1162 pushop.repo.prepushoutgoinghooks(pushop)
1162 pushop.repo.prepushoutgoinghooks(pushop)
1163 outgoing = pushop.outgoing
1163 outgoing = pushop.outgoing
1164 # TODO: get bundlecaps from remote
1164 # TODO: get bundlecaps from remote
1165 bundlecaps = None
1165 bundlecaps = None
1166 # create a changegroup from local
1166 # create a changegroup from local
1167 if pushop.revs is None and not (
1167 if pushop.revs is None and not (
1168 outgoing.excluded or pushop.repo.changelog.filteredrevs
1168 outgoing.excluded or pushop.repo.changelog.filteredrevs
1169 ):
1169 ):
1170 # push everything,
1170 # push everything,
1171 # use the fast path, no race possible on push
1171 # use the fast path, no race possible on push
1172 cg = changegroup.makechangegroup(
1172 cg = changegroup.makechangegroup(
1173 pushop.repo,
1173 pushop.repo,
1174 outgoing,
1174 outgoing,
1175 b'01',
1175 b'01',
1176 b'push',
1176 b'push',
1177 fastpath=True,
1177 fastpath=True,
1178 bundlecaps=bundlecaps,
1178 bundlecaps=bundlecaps,
1179 )
1179 )
1180 else:
1180 else:
1181 cg = changegroup.makechangegroup(
1181 cg = changegroup.makechangegroup(
1182 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1182 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1183 )
1183 )
1184
1184
1185 # apply changegroup to remote
1185 # apply changegroup to remote
1186 # local repo finds heads on server, finds out what
1186 # local repo finds heads on server, finds out what
1187 # revs it must push. once revs transferred, if server
1187 # revs it must push. once revs transferred, if server
1188 # finds it has different heads (someone else won
1188 # finds it has different heads (someone else won
1189 # commit/push race), server aborts.
1189 # commit/push race), server aborts.
1190 if pushop.force:
1190 if pushop.force:
1191 remoteheads = [b'force']
1191 remoteheads = [b'force']
1192 else:
1192 else:
1193 remoteheads = pushop.remoteheads
1193 remoteheads = pushop.remoteheads
1194 # ssh: return remote's addchangegroup()
1194 # ssh: return remote's addchangegroup()
1195 # http: return remote's addchangegroup() or 0 for error
1195 # http: return remote's addchangegroup() or 0 for error
1196 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1196 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1197
1197
1198
1198
1199 def _pushsyncphase(pushop):
1199 def _pushsyncphase(pushop):
1200 """synchronise phase information locally and remotely"""
1200 """synchronise phase information locally and remotely"""
1201 cheads = pushop.commonheads
1201 cheads = pushop.commonheads
1202 # even when we don't push, exchanging phase data is useful
1202 # even when we don't push, exchanging phase data is useful
1203 remotephases = listkeys(pushop.remote, b'phases')
1203 remotephases = listkeys(pushop.remote, b'phases')
1204 if (
1204 if (
1205 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1205 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1206 and remotephases # server supports phases
1206 and remotephases # server supports phases
1207 and pushop.cgresult is None # nothing was pushed
1207 and pushop.cgresult is None # nothing was pushed
1208 and remotephases.get(b'publishing', False)
1208 and remotephases.get(b'publishing', False)
1209 ):
1209 ):
1210 # When:
1210 # When:
1211 # - this is a subrepo push
1211 # - this is a subrepo push
1212 # - and remote support phase
1212 # - and remote support phase
1213 # - and no changeset was pushed
1213 # - and no changeset was pushed
1214 # - and remote is publishing
1214 # - and remote is publishing
1215 # We may be in issue 3871 case!
1215 # We may be in issue 3871 case!
1216 # We drop the possible phase synchronisation done by
1216 # We drop the possible phase synchronisation done by
1217 # courtesy to publish changesets possibly locally draft
1217 # courtesy to publish changesets possibly locally draft
1218 # on the remote.
1218 # on the remote.
1219 remotephases = {b'publishing': b'True'}
1219 remotephases = {b'publishing': b'True'}
1220 if not remotephases: # old server or public only reply from non-publishing
1220 if not remotephases: # old server or public only reply from non-publishing
1221 _localphasemove(pushop, cheads)
1221 _localphasemove(pushop, cheads)
1222 # don't push any phase data as there is nothing to push
1222 # don't push any phase data as there is nothing to push
1223 else:
1223 else:
1224 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1224 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1225 pheads, droots = ana
1225 pheads, droots = ana
1226 ### Apply remote phase on local
1226 ### Apply remote phase on local
1227 if remotephases.get(b'publishing', False):
1227 if remotephases.get(b'publishing', False):
1228 _localphasemove(pushop, cheads)
1228 _localphasemove(pushop, cheads)
1229 else: # publish = False
1229 else: # publish = False
1230 _localphasemove(pushop, pheads)
1230 _localphasemove(pushop, pheads)
1231 _localphasemove(pushop, cheads, phases.draft)
1231 _localphasemove(pushop, cheads, phases.draft)
1232 ### Apply local phase on remote
1232 ### Apply local phase on remote
1233
1233
1234 if pushop.cgresult:
1234 if pushop.cgresult:
1235 if b'phases' in pushop.stepsdone:
1235 if b'phases' in pushop.stepsdone:
1236 # phases already pushed though bundle2
1236 # phases already pushed though bundle2
1237 return
1237 return
1238 outdated = pushop.outdatedphases
1238 outdated = pushop.outdatedphases
1239 else:
1239 else:
1240 outdated = pushop.fallbackoutdatedphases
1240 outdated = pushop.fallbackoutdatedphases
1241
1241
1242 pushop.stepsdone.add(b'phases')
1242 pushop.stepsdone.add(b'phases')
1243
1243
1244 # filter heads already turned public by the push
1244 # filter heads already turned public by the push
1245 outdated = [c for c in outdated if c.node() not in pheads]
1245 outdated = [c for c in outdated if c.node() not in pheads]
1246 # fallback to independent pushkey command
1246 # fallback to independent pushkey command
1247 for newremotehead in outdated:
1247 for newremotehead in outdated:
1248 with pushop.remote.commandexecutor() as e:
1248 with pushop.remote.commandexecutor() as e:
1249 r = e.callcommand(
1249 r = e.callcommand(
1250 b'pushkey',
1250 b'pushkey',
1251 {
1251 {
1252 b'namespace': b'phases',
1252 b'namespace': b'phases',
1253 b'key': newremotehead.hex(),
1253 b'key': newremotehead.hex(),
1254 b'old': b'%d' % phases.draft,
1254 b'old': b'%d' % phases.draft,
1255 b'new': b'%d' % phases.public,
1255 b'new': b'%d' % phases.public,
1256 },
1256 },
1257 ).result()
1257 ).result()
1258
1258
1259 if not r:
1259 if not r:
1260 pushop.ui.warn(
1260 pushop.ui.warn(
1261 _(b'updating %s to public failed!\n') % newremotehead
1261 _(b'updating %s to public failed!\n') % newremotehead
1262 )
1262 )
1263
1263
1264
1264
1265 def _localphasemove(pushop, nodes, phase=phases.public):
1265 def _localphasemove(pushop, nodes, phase=phases.public):
1266 """move <nodes> to <phase> in the local source repo"""
1266 """move <nodes> to <phase> in the local source repo"""
1267 if pushop.trmanager:
1267 if pushop.trmanager:
1268 phases.advanceboundary(
1268 phases.advanceboundary(
1269 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1269 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1270 )
1270 )
1271 else:
1271 else:
1272 # repo is not locked, do not change any phases!
1272 # repo is not locked, do not change any phases!
1273 # Informs the user that phases should have been moved when
1273 # Informs the user that phases should have been moved when
1274 # applicable.
1274 # applicable.
1275 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1275 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1276 phasestr = phases.phasenames[phase]
1276 phasestr = phases.phasenames[phase]
1277 if actualmoves:
1277 if actualmoves:
1278 pushop.ui.status(
1278 pushop.ui.status(
1279 _(
1279 _(
1280 b'cannot lock source repo, skipping '
1280 b'cannot lock source repo, skipping '
1281 b'local %s phase update\n'
1281 b'local %s phase update\n'
1282 )
1282 )
1283 % phasestr
1283 % phasestr
1284 )
1284 )
1285
1285
1286
1286
1287 def _pushobsolete(pushop):
1287 def _pushobsolete(pushop):
1288 """utility function to push obsolete markers to a remote"""
1288 """utility function to push obsolete markers to a remote"""
1289 if b'obsmarkers' in pushop.stepsdone:
1289 if b'obsmarkers' in pushop.stepsdone:
1290 return
1290 return
1291 repo = pushop.repo
1291 repo = pushop.repo
1292 remote = pushop.remote
1292 remote = pushop.remote
1293 pushop.stepsdone.add(b'obsmarkers')
1293 pushop.stepsdone.add(b'obsmarkers')
1294 if pushop.outobsmarkers:
1294 if pushop.outobsmarkers:
1295 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1295 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1296 rslts = []
1296 rslts = []
1297 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1297 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1298 remotedata = obsolete._pushkeyescape(markers)
1298 remotedata = obsolete._pushkeyescape(markers)
1299 for key in sorted(remotedata, reverse=True):
1299 for key in sorted(remotedata, reverse=True):
1300 # reverse sort to ensure we end with dump0
1300 # reverse sort to ensure we end with dump0
1301 data = remotedata[key]
1301 data = remotedata[key]
1302 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1302 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1303 if [r for r in rslts if not r]:
1303 if [r for r in rslts if not r]:
1304 msg = _(b'failed to push some obsolete markers!\n')
1304 msg = _(b'failed to push some obsolete markers!\n')
1305 repo.ui.warn(msg)
1305 repo.ui.warn(msg)
1306
1306
1307
1307
1308 def _pushbookmark(pushop):
1308 def _pushbookmark(pushop):
1309 """Update bookmark position on remote"""
1309 """Update bookmark position on remote"""
1310 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1310 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1311 return
1311 return
1312 pushop.stepsdone.add(b'bookmarks')
1312 pushop.stepsdone.add(b'bookmarks')
1313 ui = pushop.ui
1313 ui = pushop.ui
1314 remote = pushop.remote
1314 remote = pushop.remote
1315
1315
1316 for b, old, new in pushop.outbookmarks:
1316 for b, old, new in pushop.outbookmarks:
1317 action = b'update'
1317 action = b'update'
1318 if not old:
1318 if not old:
1319 action = b'export'
1319 action = b'export'
1320 elif not new:
1320 elif not new:
1321 action = b'delete'
1321 action = b'delete'
1322
1322
1323 with remote.commandexecutor() as e:
1323 with remote.commandexecutor() as e:
1324 r = e.callcommand(
1324 r = e.callcommand(
1325 b'pushkey',
1325 b'pushkey',
1326 {
1326 {
1327 b'namespace': b'bookmarks',
1327 b'namespace': b'bookmarks',
1328 b'key': b,
1328 b'key': b,
1329 b'old': hex(old),
1329 b'old': hex(old),
1330 b'new': hex(new),
1330 b'new': hex(new),
1331 },
1331 },
1332 ).result()
1332 ).result()
1333
1333
1334 if r:
1334 if r:
1335 ui.status(bookmsgmap[action][0] % b)
1335 ui.status(bookmsgmap[action][0] % b)
1336 else:
1336 else:
1337 ui.warn(bookmsgmap[action][1] % b)
1337 ui.warn(bookmsgmap[action][1] % b)
1338 # discovery can have set the value form invalid entry
1338 # discovery can have set the value form invalid entry
1339 if pushop.bkresult is not None:
1339 if pushop.bkresult is not None:
1340 pushop.bkresult = 1
1340 pushop.bkresult = 1
1341
1341
1342
1342
1343 class pulloperation(object):
1343 class pulloperation(object):
1344 """A object that represent a single pull operation
1344 """A object that represent a single pull operation
1345
1345
1346 It purpose is to carry pull related state and very common operation.
1346 It purpose is to carry pull related state and very common operation.
1347
1347
1348 A new should be created at the beginning of each pull and discarded
1348 A new should be created at the beginning of each pull and discarded
1349 afterward.
1349 afterward.
1350 """
1350 """
1351
1351
1352 def __init__(
1352 def __init__(
1353 self,
1353 self,
1354 repo,
1354 repo,
1355 remote,
1355 remote,
1356 heads=None,
1356 heads=None,
1357 force=False,
1357 force=False,
1358 bookmarks=(),
1358 bookmarks=(),
1359 remotebookmarks=None,
1359 remotebookmarks=None,
1360 streamclonerequested=None,
1360 streamclonerequested=None,
1361 includepats=None,
1361 includepats=None,
1362 excludepats=None,
1362 excludepats=None,
1363 depth=None,
1363 depth=None,
1364 ):
1364 ):
1365 # repo we pull into
1365 # repo we pull into
1366 self.repo = repo
1366 self.repo = repo
1367 # repo we pull from
1367 # repo we pull from
1368 self.remote = remote
1368 self.remote = remote
1369 # revision we try to pull (None is "all")
1369 # revision we try to pull (None is "all")
1370 self.heads = heads
1370 self.heads = heads
1371 # bookmark pulled explicitly
1371 # bookmark pulled explicitly
1372 self.explicitbookmarks = [
1372 self.explicitbookmarks = [
1373 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1373 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1374 ]
1374 ]
1375 # do we force pull?
1375 # do we force pull?
1376 self.force = force
1376 self.force = force
1377 # whether a streaming clone was requested
1377 # whether a streaming clone was requested
1378 self.streamclonerequested = streamclonerequested
1378 self.streamclonerequested = streamclonerequested
1379 # transaction manager
1379 # transaction manager
1380 self.trmanager = None
1380 self.trmanager = None
1381 # set of common changeset between local and remote before pull
1381 # set of common changeset between local and remote before pull
1382 self.common = None
1382 self.common = None
1383 # set of pulled head
1383 # set of pulled head
1384 self.rheads = None
1384 self.rheads = None
1385 # list of missing changeset to fetch remotely
1385 # list of missing changeset to fetch remotely
1386 self.fetch = None
1386 self.fetch = None
1387 # remote bookmarks data
1387 # remote bookmarks data
1388 self.remotebookmarks = remotebookmarks
1388 self.remotebookmarks = remotebookmarks
1389 # result of changegroup pulling (used as return code by pull)
1389 # result of changegroup pulling (used as return code by pull)
1390 self.cgresult = None
1390 self.cgresult = None
1391 # list of step already done
1391 # list of step already done
1392 self.stepsdone = set()
1392 self.stepsdone = set()
1393 # Whether we attempted a clone from pre-generated bundles.
1393 # Whether we attempted a clone from pre-generated bundles.
1394 self.clonebundleattempted = False
1394 self.clonebundleattempted = False
1395 # Set of file patterns to include.
1395 # Set of file patterns to include.
1396 self.includepats = includepats
1396 self.includepats = includepats
1397 # Set of file patterns to exclude.
1397 # Set of file patterns to exclude.
1398 self.excludepats = excludepats
1398 self.excludepats = excludepats
1399 # Number of ancestor changesets to pull from each pulled head.
1399 # Number of ancestor changesets to pull from each pulled head.
1400 self.depth = depth
1400 self.depth = depth
1401
1401
1402 @util.propertycache
1402 @util.propertycache
1403 def pulledsubset(self):
1403 def pulledsubset(self):
1404 """heads of the set of changeset target by the pull"""
1404 """heads of the set of changeset target by the pull"""
1405 # compute target subset
1405 # compute target subset
1406 if self.heads is None:
1406 if self.heads is None:
1407 # We pulled every thing possible
1407 # We pulled every thing possible
1408 # sync on everything common
1408 # sync on everything common
1409 c = set(self.common)
1409 c = set(self.common)
1410 ret = list(self.common)
1410 ret = list(self.common)
1411 for n in self.rheads:
1411 for n in self.rheads:
1412 if n not in c:
1412 if n not in c:
1413 ret.append(n)
1413 ret.append(n)
1414 return ret
1414 return ret
1415 else:
1415 else:
1416 # We pulled a specific subset
1416 # We pulled a specific subset
1417 # sync on this subset
1417 # sync on this subset
1418 return self.heads
1418 return self.heads
1419
1419
1420 @util.propertycache
1420 @util.propertycache
1421 def canusebundle2(self):
1421 def canusebundle2(self):
1422 return not _forcebundle1(self)
1422 return not _forcebundle1(self)
1423
1423
1424 @util.propertycache
1424 @util.propertycache
1425 def remotebundle2caps(self):
1425 def remotebundle2caps(self):
1426 return bundle2.bundle2caps(self.remote)
1426 return bundle2.bundle2caps(self.remote)
1427
1427
1428 def gettransaction(self):
1428 def gettransaction(self):
1429 # deprecated; talk to trmanager directly
1429 # deprecated; talk to trmanager directly
1430 return self.trmanager.transaction()
1430 return self.trmanager.transaction()
1431
1431
1432
1432
1433 class transactionmanager(util.transactional):
1433 class transactionmanager(util.transactional):
1434 """An object to manage the life cycle of a transaction
1434 """An object to manage the life cycle of a transaction
1435
1435
1436 It creates the transaction on demand and calls the appropriate hooks when
1436 It creates the transaction on demand and calls the appropriate hooks when
1437 closing the transaction."""
1437 closing the transaction."""
1438
1438
1439 def __init__(self, repo, source, url):
1439 def __init__(self, repo, source, url):
1440 self.repo = repo
1440 self.repo = repo
1441 self.source = source
1441 self.source = source
1442 self.url = url
1442 self.url = url
1443 self._tr = None
1443 self._tr = None
1444
1444
1445 def transaction(self):
1445 def transaction(self):
1446 """Return an open transaction object, constructing if necessary"""
1446 """Return an open transaction object, constructing if necessary"""
1447 if not self._tr:
1447 if not self._tr:
1448 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1448 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1449 self._tr = self.repo.transaction(trname)
1449 self._tr = self.repo.transaction(trname)
1450 self._tr.hookargs[b'source'] = self.source
1450 self._tr.hookargs[b'source'] = self.source
1451 self._tr.hookargs[b'url'] = self.url
1451 self._tr.hookargs[b'url'] = self.url
1452 return self._tr
1452 return self._tr
1453
1453
1454 def close(self):
1454 def close(self):
1455 """close transaction if created"""
1455 """close transaction if created"""
1456 if self._tr is not None:
1456 if self._tr is not None:
1457 self._tr.close()
1457 self._tr.close()
1458
1458
1459 def release(self):
1459 def release(self):
1460 """release transaction if created"""
1460 """release transaction if created"""
1461 if self._tr is not None:
1461 if self._tr is not None:
1462 self._tr.release()
1462 self._tr.release()
1463
1463
1464
1464
1465 def listkeys(remote, namespace):
1465 def listkeys(remote, namespace):
1466 with remote.commandexecutor() as e:
1466 with remote.commandexecutor() as e:
1467 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1467 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1468
1468
1469
1469
1470 def _fullpullbundle2(repo, pullop):
1470 def _fullpullbundle2(repo, pullop):
1471 # The server may send a partial reply, i.e. when inlining
1471 # The server may send a partial reply, i.e. when inlining
1472 # pre-computed bundles. In that case, update the common
1472 # pre-computed bundles. In that case, update the common
1473 # set based on the results and pull another bundle.
1473 # set based on the results and pull another bundle.
1474 #
1474 #
1475 # There are two indicators that the process is finished:
1475 # There are two indicators that the process is finished:
1476 # - no changeset has been added, or
1476 # - no changeset has been added, or
1477 # - all remote heads are known locally.
1477 # - all remote heads are known locally.
1478 # The head check must use the unfiltered view as obsoletion
1478 # The head check must use the unfiltered view as obsoletion
1479 # markers can hide heads.
1479 # markers can hide heads.
1480 unfi = repo.unfiltered()
1480 unfi = repo.unfiltered()
1481 unficl = unfi.changelog
1481 unficl = unfi.changelog
1482
1482
1483 def headsofdiff(h1, h2):
1483 def headsofdiff(h1, h2):
1484 """Returns heads(h1 % h2)"""
1484 """Returns heads(h1 % h2)"""
1485 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1485 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1486 return {ctx.node() for ctx in res}
1486 return {ctx.node() for ctx in res}
1487
1487
1488 def headsofunion(h1, h2):
1488 def headsofunion(h1, h2):
1489 """Returns heads((h1 + h2) - null)"""
1489 """Returns heads((h1 + h2) - null)"""
1490 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1490 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1491 return {ctx.node() for ctx in res}
1491 return {ctx.node() for ctx in res}
1492
1492
1493 while True:
1493 while True:
1494 old_heads = unficl.heads()
1494 old_heads = unficl.heads()
1495 clstart = len(unficl)
1495 clstart = len(unficl)
1496 _pullbundle2(pullop)
1496 _pullbundle2(pullop)
1497 if requirements.NARROW_REQUIREMENT in repo.requirements:
1497 if requirements.NARROW_REQUIREMENT in repo.requirements:
1498 # XXX narrow clones filter the heads on the server side during
1498 # XXX narrow clones filter the heads on the server side during
1499 # XXX getbundle and result in partial replies as well.
1499 # XXX getbundle and result in partial replies as well.
1500 # XXX Disable pull bundles in this case as band aid to avoid
1500 # XXX Disable pull bundles in this case as band aid to avoid
1501 # XXX extra round trips.
1501 # XXX extra round trips.
1502 break
1502 break
1503 if clstart == len(unficl):
1503 if clstart == len(unficl):
1504 break
1504 break
1505 if all(unficl.hasnode(n) for n in pullop.rheads):
1505 if all(unficl.hasnode(n) for n in pullop.rheads):
1506 break
1506 break
1507 new_heads = headsofdiff(unficl.heads(), old_heads)
1507 new_heads = headsofdiff(unficl.heads(), old_heads)
1508 pullop.common = headsofunion(new_heads, pullop.common)
1508 pullop.common = headsofunion(new_heads, pullop.common)
1509 pullop.rheads = set(pullop.rheads) - pullop.common
1509 pullop.rheads = set(pullop.rheads) - pullop.common
1510
1510
1511
1511
1512 def add_confirm_callback(repo, pullop):
1512 def add_confirm_callback(repo, pullop):
1513 """adds a finalize callback to transaction which can be used to show stats
1513 """adds a finalize callback to transaction which can be used to show stats
1514 to user and confirm the pull before committing transaction"""
1514 to user and confirm the pull before committing transaction"""
1515
1515
1516 tr = pullop.trmanager.transaction()
1516 tr = pullop.trmanager.transaction()
1517 scmutil.registersummarycallback(
1517 scmutil.registersummarycallback(
1518 repo, tr, txnname=b'pull', as_validator=True
1518 repo, tr, txnname=b'pull', as_validator=True
1519 )
1519 )
1520 reporef = weakref.ref(repo.unfiltered())
1520 reporef = weakref.ref(repo.unfiltered())
1521
1521
1522 def prompt(tr):
1522 def prompt(tr):
1523 repo = reporef()
1523 repo = reporef()
1524 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1524 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1525 if repo.ui.promptchoice(cm):
1525 if repo.ui.promptchoice(cm):
1526 raise error.Abort(b"user aborted")
1526 raise error.Abort(b"user aborted")
1527
1527
1528 tr.addvalidator(b'900-pull-prompt', prompt)
1528 tr.addvalidator(b'900-pull-prompt', prompt)
1529
1529
1530
1530
1531 def pull(
1531 def pull(
1532 repo,
1532 repo,
1533 remote,
1533 remote,
1534 heads=None,
1534 heads=None,
1535 force=False,
1535 force=False,
1536 bookmarks=(),
1536 bookmarks=(),
1537 opargs=None,
1537 opargs=None,
1538 streamclonerequested=None,
1538 streamclonerequested=None,
1539 includepats=None,
1539 includepats=None,
1540 excludepats=None,
1540 excludepats=None,
1541 depth=None,
1541 depth=None,
1542 confirm=None,
1542 confirm=None,
1543 ):
1543 ):
1544 """Fetch repository data from a remote.
1544 """Fetch repository data from a remote.
1545
1545
1546 This is the main function used to retrieve data from a remote repository.
1546 This is the main function used to retrieve data from a remote repository.
1547
1547
1548 ``repo`` is the local repository to clone into.
1548 ``repo`` is the local repository to clone into.
1549 ``remote`` is a peer instance.
1549 ``remote`` is a peer instance.
1550 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1550 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1551 default) means to pull everything from the remote.
1551 default) means to pull everything from the remote.
1552 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1552 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1553 default, all remote bookmarks are pulled.
1553 default, all remote bookmarks are pulled.
1554 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1554 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1555 initialization.
1555 initialization.
1556 ``streamclonerequested`` is a boolean indicating whether a "streaming
1556 ``streamclonerequested`` is a boolean indicating whether a "streaming
1557 clone" is requested. A "streaming clone" is essentially a raw file copy
1557 clone" is requested. A "streaming clone" is essentially a raw file copy
1558 of revlogs from the server. This only works when the local repository is
1558 of revlogs from the server. This only works when the local repository is
1559 empty. The default value of ``None`` means to respect the server
1559 empty. The default value of ``None`` means to respect the server
1560 configuration for preferring stream clones.
1560 configuration for preferring stream clones.
1561 ``includepats`` and ``excludepats`` define explicit file patterns to
1561 ``includepats`` and ``excludepats`` define explicit file patterns to
1562 include and exclude in storage, respectively. If not defined, narrow
1562 include and exclude in storage, respectively. If not defined, narrow
1563 patterns from the repo instance are used, if available.
1563 patterns from the repo instance are used, if available.
1564 ``depth`` is an integer indicating the DAG depth of history we're
1564 ``depth`` is an integer indicating the DAG depth of history we're
1565 interested in. If defined, for each revision specified in ``heads``, we
1565 interested in. If defined, for each revision specified in ``heads``, we
1566 will fetch up to this many of its ancestors and data associated with them.
1566 will fetch up to this many of its ancestors and data associated with them.
1567 ``confirm`` is a boolean indicating whether the pull should be confirmed
1567 ``confirm`` is a boolean indicating whether the pull should be confirmed
1568 before committing the transaction. This overrides HGPLAIN.
1568 before committing the transaction. This overrides HGPLAIN.
1569
1569
1570 Returns the ``pulloperation`` created for this pull.
1570 Returns the ``pulloperation`` created for this pull.
1571 """
1571 """
1572 if opargs is None:
1572 if opargs is None:
1573 opargs = {}
1573 opargs = {}
1574
1574
1575 # We allow the narrow patterns to be passed in explicitly to provide more
1575 # We allow the narrow patterns to be passed in explicitly to provide more
1576 # flexibility for API consumers.
1576 # flexibility for API consumers.
1577 if includepats or excludepats:
1577 if includepats or excludepats:
1578 includepats = includepats or set()
1578 includepats = includepats or set()
1579 excludepats = excludepats or set()
1579 excludepats = excludepats or set()
1580 else:
1580 else:
1581 includepats, excludepats = repo.narrowpats
1581 includepats, excludepats = repo.narrowpats
1582
1582
1583 narrowspec.validatepatterns(includepats)
1583 narrowspec.validatepatterns(includepats)
1584 narrowspec.validatepatterns(excludepats)
1584 narrowspec.validatepatterns(excludepats)
1585
1585
1586 pullop = pulloperation(
1586 pullop = pulloperation(
1587 repo,
1587 repo,
1588 remote,
1588 remote,
1589 heads,
1589 heads,
1590 force,
1590 force,
1591 bookmarks=bookmarks,
1591 bookmarks=bookmarks,
1592 streamclonerequested=streamclonerequested,
1592 streamclonerequested=streamclonerequested,
1593 includepats=includepats,
1593 includepats=includepats,
1594 excludepats=excludepats,
1594 excludepats=excludepats,
1595 depth=depth,
1595 depth=depth,
1596 **pycompat.strkwargs(opargs)
1596 **pycompat.strkwargs(opargs)
1597 )
1597 )
1598
1598
1599 peerlocal = pullop.remote.local()
1599 peerlocal = pullop.remote.local()
1600 if peerlocal:
1600 if peerlocal:
1601 missing = set(peerlocal.requirements) - pullop.repo.supported
1601 missing = set(peerlocal.requirements) - pullop.repo.supported
1602 if missing:
1602 if missing:
1603 msg = _(
1603 msg = _(
1604 b"required features are not"
1604 b"required features are not"
1605 b" supported in the destination:"
1605 b" supported in the destination:"
1606 b" %s"
1606 b" %s"
1607 ) % (b', '.join(sorted(missing)))
1607 ) % (b', '.join(sorted(missing)))
1608 raise error.Abort(msg)
1608 raise error.Abort(msg)
1609
1609
1610 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1610 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1611 wlock = util.nullcontextmanager()
1611 wlock = util.nullcontextmanager()
1612 if not bookmod.bookmarksinstore(repo):
1612 if not bookmod.bookmarksinstore(repo):
1613 wlock = repo.wlock()
1613 wlock = repo.wlock()
1614 with wlock, repo.lock(), pullop.trmanager:
1614 with wlock, repo.lock(), pullop.trmanager:
1615 if confirm or (
1615 if confirm or (
1616 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1616 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1617 ):
1617 ):
1618 add_confirm_callback(repo, pullop)
1618 add_confirm_callback(repo, pullop)
1619
1619
1620 # Use the modern wire protocol, if available.
1620 # Use the modern wire protocol, if available.
1621 if remote.capable(b'command-changesetdata'):
1621 if remote.capable(b'command-changesetdata'):
1622 exchangev2.pull(pullop)
1622 exchangev2.pull(pullop)
1623 else:
1623 else:
1624 # This should ideally be in _pullbundle2(). However, it needs to run
1624 # This should ideally be in _pullbundle2(). However, it needs to run
1625 # before discovery to avoid extra work.
1625 # before discovery to avoid extra work.
1626 _maybeapplyclonebundle(pullop)
1626 _maybeapplyclonebundle(pullop)
1627 streamclone.maybeperformlegacystreamclone(pullop)
1627 streamclone.maybeperformlegacystreamclone(pullop)
1628 _pulldiscovery(pullop)
1628 _pulldiscovery(pullop)
1629 if pullop.canusebundle2:
1629 if pullop.canusebundle2:
1630 _fullpullbundle2(repo, pullop)
1630 _fullpullbundle2(repo, pullop)
1631 _pullchangeset(pullop)
1631 _pullchangeset(pullop)
1632 _pullphase(pullop)
1632 _pullphase(pullop)
1633 _pullbookmarks(pullop)
1633 _pullbookmarks(pullop)
1634 _pullobsolete(pullop)
1634 _pullobsolete(pullop)
1635
1635
1636 # storing remotenames
1636 # storing remotenames
1637 if repo.ui.configbool(b'experimental', b'remotenames'):
1637 if repo.ui.configbool(b'experimental', b'remotenames'):
1638 logexchange.pullremotenames(repo, remote)
1638 logexchange.pullremotenames(repo, remote)
1639
1639
1640 return pullop
1640 return pullop
1641
1641
1642
1642
1643 # list of steps to perform discovery before pull
1643 # list of steps to perform discovery before pull
1644 pulldiscoveryorder = []
1644 pulldiscoveryorder = []
1645
1645
1646 # Mapping between step name and function
1646 # Mapping between step name and function
1647 #
1647 #
1648 # This exists to help extensions wrap steps if necessary
1648 # This exists to help extensions wrap steps if necessary
1649 pulldiscoverymapping = {}
1649 pulldiscoverymapping = {}
1650
1650
1651
1651
1652 def pulldiscovery(stepname):
1652 def pulldiscovery(stepname):
1653 """decorator for function performing discovery before pull
1653 """decorator for function performing discovery before pull
1654
1654
1655 The function is added to the step -> function mapping and appended to the
1655 The function is added to the step -> function mapping and appended to the
1656 list of steps. Beware that decorated function will be added in order (this
1656 list of steps. Beware that decorated function will be added in order (this
1657 may matter).
1657 may matter).
1658
1658
1659 You can only use this decorator for a new step, if you want to wrap a step
1659 You can only use this decorator for a new step, if you want to wrap a step
1660 from an extension, change the pulldiscovery dictionary directly."""
1660 from an extension, change the pulldiscovery dictionary directly."""
1661
1661
1662 def dec(func):
1662 def dec(func):
1663 assert stepname not in pulldiscoverymapping
1663 assert stepname not in pulldiscoverymapping
1664 pulldiscoverymapping[stepname] = func
1664 pulldiscoverymapping[stepname] = func
1665 pulldiscoveryorder.append(stepname)
1665 pulldiscoveryorder.append(stepname)
1666 return func
1666 return func
1667
1667
1668 return dec
1668 return dec
1669
1669
1670
1670
1671 def _pulldiscovery(pullop):
1671 def _pulldiscovery(pullop):
1672 """Run all discovery steps"""
1672 """Run all discovery steps"""
1673 for stepname in pulldiscoveryorder:
1673 for stepname in pulldiscoveryorder:
1674 step = pulldiscoverymapping[stepname]
1674 step = pulldiscoverymapping[stepname]
1675 step(pullop)
1675 step(pullop)
1676
1676
1677
1677
1678 @pulldiscovery(b'b1:bookmarks')
1678 @pulldiscovery(b'b1:bookmarks')
1679 def _pullbookmarkbundle1(pullop):
1679 def _pullbookmarkbundle1(pullop):
1680 """fetch bookmark data in bundle1 case
1680 """fetch bookmark data in bundle1 case
1681
1681
1682 If not using bundle2, we have to fetch bookmarks before changeset
1682 If not using bundle2, we have to fetch bookmarks before changeset
1683 discovery to reduce the chance and impact of race conditions."""
1683 discovery to reduce the chance and impact of race conditions."""
1684 if pullop.remotebookmarks is not None:
1684 if pullop.remotebookmarks is not None:
1685 return
1685 return
1686 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1686 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1687 # all known bundle2 servers now support listkeys, but lets be nice with
1687 # all known bundle2 servers now support listkeys, but lets be nice with
1688 # new implementation.
1688 # new implementation.
1689 return
1689 return
1690 books = listkeys(pullop.remote, b'bookmarks')
1690 books = listkeys(pullop.remote, b'bookmarks')
1691 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1691 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1692
1692
1693
1693
1694 @pulldiscovery(b'changegroup')
1694 @pulldiscovery(b'changegroup')
1695 def _pulldiscoverychangegroup(pullop):
1695 def _pulldiscoverychangegroup(pullop):
1696 """discovery phase for the pull
1696 """discovery phase for the pull
1697
1697
1698 Current handle changeset discovery only, will change handle all discovery
1698 Current handle changeset discovery only, will change handle all discovery
1699 at some point."""
1699 at some point."""
1700 tmp = discovery.findcommonincoming(
1700 tmp = discovery.findcommonincoming(
1701 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1701 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1702 )
1702 )
1703 common, fetch, rheads = tmp
1703 common, fetch, rheads = tmp
1704 has_node = pullop.repo.unfiltered().changelog.index.has_node
1704 has_node = pullop.repo.unfiltered().changelog.index.has_node
1705 if fetch and rheads:
1705 if fetch and rheads:
1706 # If a remote heads is filtered locally, put in back in common.
1706 # If a remote heads is filtered locally, put in back in common.
1707 #
1707 #
1708 # This is a hackish solution to catch most of "common but locally
1708 # This is a hackish solution to catch most of "common but locally
1709 # hidden situation". We do not performs discovery on unfiltered
1709 # hidden situation". We do not performs discovery on unfiltered
1710 # repository because it end up doing a pathological amount of round
1710 # repository because it end up doing a pathological amount of round
1711 # trip for w huge amount of changeset we do not care about.
1711 # trip for w huge amount of changeset we do not care about.
1712 #
1712 #
1713 # If a set of such "common but filtered" changeset exist on the server
1713 # If a set of such "common but filtered" changeset exist on the server
1714 # but are not including a remote heads, we'll not be able to detect it,
1714 # but are not including a remote heads, we'll not be able to detect it,
1715 scommon = set(common)
1715 scommon = set(common)
1716 for n in rheads:
1716 for n in rheads:
1717 if has_node(n):
1717 if has_node(n):
1718 if n not in scommon:
1718 if n not in scommon:
1719 common.append(n)
1719 common.append(n)
1720 if set(rheads).issubset(set(common)):
1720 if set(rheads).issubset(set(common)):
1721 fetch = []
1721 fetch = []
1722 pullop.common = common
1722 pullop.common = common
1723 pullop.fetch = fetch
1723 pullop.fetch = fetch
1724 pullop.rheads = rheads
1724 pullop.rheads = rheads
1725
1725
1726
1726
1727 def _pullbundle2(pullop):
1727 def _pullbundle2(pullop):
1728 """pull data using bundle2
1728 """pull data using bundle2
1729
1729
1730 For now, the only supported data are changegroup."""
1730 For now, the only supported data are changegroup."""
1731 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1731 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1732
1732
1733 # make ui easier to access
1733 # make ui easier to access
1734 ui = pullop.repo.ui
1734 ui = pullop.repo.ui
1735
1735
1736 # At the moment we don't do stream clones over bundle2. If that is
1736 # At the moment we don't do stream clones over bundle2. If that is
1737 # implemented then here's where the check for that will go.
1737 # implemented then here's where the check for that will go.
1738 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1738 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1739
1739
1740 # declare pull perimeters
1740 # declare pull perimeters
1741 kwargs[b'common'] = pullop.common
1741 kwargs[b'common'] = pullop.common
1742 kwargs[b'heads'] = pullop.heads or pullop.rheads
1742 kwargs[b'heads'] = pullop.heads or pullop.rheads
1743
1743
1744 # check server supports narrow and then adding includepats and excludepats
1744 # check server supports narrow and then adding includepats and excludepats
1745 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1745 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1746 if servernarrow and pullop.includepats:
1746 if servernarrow and pullop.includepats:
1747 kwargs[b'includepats'] = pullop.includepats
1747 kwargs[b'includepats'] = pullop.includepats
1748 if servernarrow and pullop.excludepats:
1748 if servernarrow and pullop.excludepats:
1749 kwargs[b'excludepats'] = pullop.excludepats
1749 kwargs[b'excludepats'] = pullop.excludepats
1750
1750
1751 if streaming:
1751 if streaming:
1752 kwargs[b'cg'] = False
1752 kwargs[b'cg'] = False
1753 kwargs[b'stream'] = True
1753 kwargs[b'stream'] = True
1754 pullop.stepsdone.add(b'changegroup')
1754 pullop.stepsdone.add(b'changegroup')
1755 pullop.stepsdone.add(b'phases')
1755 pullop.stepsdone.add(b'phases')
1756
1756
1757 else:
1757 else:
1758 # pulling changegroup
1758 # pulling changegroup
1759 pullop.stepsdone.add(b'changegroup')
1759 pullop.stepsdone.add(b'changegroup')
1760
1760
1761 kwargs[b'cg'] = pullop.fetch
1761 kwargs[b'cg'] = pullop.fetch
1762
1762
1763 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1763 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1764 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1764 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1765 if not legacyphase and hasbinaryphase:
1765 if not legacyphase and hasbinaryphase:
1766 kwargs[b'phases'] = True
1766 kwargs[b'phases'] = True
1767 pullop.stepsdone.add(b'phases')
1767 pullop.stepsdone.add(b'phases')
1768
1768
1769 if b'listkeys' in pullop.remotebundle2caps:
1769 if b'listkeys' in pullop.remotebundle2caps:
1770 if b'phases' not in pullop.stepsdone:
1770 if b'phases' not in pullop.stepsdone:
1771 kwargs[b'listkeys'] = [b'phases']
1771 kwargs[b'listkeys'] = [b'phases']
1772
1772
1773 bookmarksrequested = False
1773 bookmarksrequested = False
1774 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1774 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1775 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1775 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1776
1776
1777 if pullop.remotebookmarks is not None:
1777 if pullop.remotebookmarks is not None:
1778 pullop.stepsdone.add(b'request-bookmarks')
1778 pullop.stepsdone.add(b'request-bookmarks')
1779
1779
1780 if (
1780 if (
1781 b'request-bookmarks' not in pullop.stepsdone
1781 b'request-bookmarks' not in pullop.stepsdone
1782 and pullop.remotebookmarks is None
1782 and pullop.remotebookmarks is None
1783 and not legacybookmark
1783 and not legacybookmark
1784 and hasbinarybook
1784 and hasbinarybook
1785 ):
1785 ):
1786 kwargs[b'bookmarks'] = True
1786 kwargs[b'bookmarks'] = True
1787 bookmarksrequested = True
1787 bookmarksrequested = True
1788
1788
1789 if b'listkeys' in pullop.remotebundle2caps:
1789 if b'listkeys' in pullop.remotebundle2caps:
1790 if b'request-bookmarks' not in pullop.stepsdone:
1790 if b'request-bookmarks' not in pullop.stepsdone:
1791 # make sure to always includes bookmark data when migrating
1791 # make sure to always includes bookmark data when migrating
1792 # `hg incoming --bundle` to using this function.
1792 # `hg incoming --bundle` to using this function.
1793 pullop.stepsdone.add(b'request-bookmarks')
1793 pullop.stepsdone.add(b'request-bookmarks')
1794 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1794 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1795
1795
1796 # If this is a full pull / clone and the server supports the clone bundles
1796 # If this is a full pull / clone and the server supports the clone bundles
1797 # feature, tell the server whether we attempted a clone bundle. The
1797 # feature, tell the server whether we attempted a clone bundle. The
1798 # presence of this flag indicates the client supports clone bundles. This
1798 # presence of this flag indicates the client supports clone bundles. This
1799 # will enable the server to treat clients that support clone bundles
1799 # will enable the server to treat clients that support clone bundles
1800 # differently from those that don't.
1800 # differently from those that don't.
1801 if (
1801 if (
1802 pullop.remote.capable(b'clonebundles')
1802 pullop.remote.capable(b'clonebundles')
1803 and pullop.heads is None
1803 and pullop.heads is None
1804 and list(pullop.common) == [nullid]
1804 and list(pullop.common) == [nullid]
1805 ):
1805 ):
1806 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1806 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1807
1807
1808 if streaming:
1808 if streaming:
1809 pullop.repo.ui.status(_(b'streaming all changes\n'))
1809 pullop.repo.ui.status(_(b'streaming all changes\n'))
1810 elif not pullop.fetch:
1810 elif not pullop.fetch:
1811 pullop.repo.ui.status(_(b"no changes found\n"))
1811 pullop.repo.ui.status(_(b"no changes found\n"))
1812 pullop.cgresult = 0
1812 pullop.cgresult = 0
1813 else:
1813 else:
1814 if pullop.heads is None and list(pullop.common) == [nullid]:
1814 if pullop.heads is None and list(pullop.common) == [nullid]:
1815 pullop.repo.ui.status(_(b"requesting all changes\n"))
1815 pullop.repo.ui.status(_(b"requesting all changes\n"))
1816 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1816 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1817 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1817 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1818 if obsolete.commonversion(remoteversions) is not None:
1818 if obsolete.commonversion(remoteversions) is not None:
1819 kwargs[b'obsmarkers'] = True
1819 kwargs[b'obsmarkers'] = True
1820 pullop.stepsdone.add(b'obsmarkers')
1820 pullop.stepsdone.add(b'obsmarkers')
1821 _pullbundle2extraprepare(pullop, kwargs)
1821 _pullbundle2extraprepare(pullop, kwargs)
1822
1822
1823 with pullop.remote.commandexecutor() as e:
1823 with pullop.remote.commandexecutor() as e:
1824 args = dict(kwargs)
1824 args = dict(kwargs)
1825 args[b'source'] = b'pull'
1825 args[b'source'] = b'pull'
1826 bundle = e.callcommand(b'getbundle', args).result()
1826 bundle = e.callcommand(b'getbundle', args).result()
1827
1827
1828 try:
1828 try:
1829 op = bundle2.bundleoperation(
1829 op = bundle2.bundleoperation(
1830 pullop.repo, pullop.gettransaction, source=b'pull'
1830 pullop.repo, pullop.gettransaction, source=b'pull'
1831 )
1831 )
1832 op.modes[b'bookmarks'] = b'records'
1832 op.modes[b'bookmarks'] = b'records'
1833 bundle2.processbundle(pullop.repo, bundle, op=op)
1833 bundle2.processbundle(pullop.repo, bundle, op=op)
1834 except bundle2.AbortFromPart as exc:
1834 except bundle2.AbortFromPart as exc:
1835 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1835 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1836 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1836 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1837 except error.BundleValueError as exc:
1837 except error.BundleValueError as exc:
1838 raise error.Abort(_(b'missing support for %s') % exc)
1838 raise error.Abort(_(b'missing support for %s') % exc)
1839
1839
1840 if pullop.fetch:
1840 if pullop.fetch:
1841 pullop.cgresult = bundle2.combinechangegroupresults(op)
1841 pullop.cgresult = bundle2.combinechangegroupresults(op)
1842
1842
1843 # processing phases change
1843 # processing phases change
1844 for namespace, value in op.records[b'listkeys']:
1844 for namespace, value in op.records[b'listkeys']:
1845 if namespace == b'phases':
1845 if namespace == b'phases':
1846 _pullapplyphases(pullop, value)
1846 _pullapplyphases(pullop, value)
1847
1847
1848 # processing bookmark update
1848 # processing bookmark update
1849 if bookmarksrequested:
1849 if bookmarksrequested:
1850 books = {}
1850 books = {}
1851 for record in op.records[b'bookmarks']:
1851 for record in op.records[b'bookmarks']:
1852 books[record[b'bookmark']] = record[b"node"]
1852 books[record[b'bookmark']] = record[b"node"]
1853 pullop.remotebookmarks = books
1853 pullop.remotebookmarks = books
1854 else:
1854 else:
1855 for namespace, value in op.records[b'listkeys']:
1855 for namespace, value in op.records[b'listkeys']:
1856 if namespace == b'bookmarks':
1856 if namespace == b'bookmarks':
1857 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1857 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1858
1858
1859 # bookmark data were either already there or pulled in the bundle
1859 # bookmark data were either already there or pulled in the bundle
1860 if pullop.remotebookmarks is not None:
1860 if pullop.remotebookmarks is not None:
1861 _pullbookmarks(pullop)
1861 _pullbookmarks(pullop)
1862
1862
1863
1863
1864 def _pullbundle2extraprepare(pullop, kwargs):
1864 def _pullbundle2extraprepare(pullop, kwargs):
1865 """hook function so that extensions can extend the getbundle call"""
1865 """hook function so that extensions can extend the getbundle call"""
1866
1866
1867
1867
1868 def _pullchangeset(pullop):
1868 def _pullchangeset(pullop):
1869 """pull changeset from unbundle into the local repo"""
1869 """pull changeset from unbundle into the local repo"""
1870 # We delay the open of the transaction as late as possible so we
1870 # We delay the open of the transaction as late as possible so we
1871 # don't open transaction for nothing or you break future useful
1871 # don't open transaction for nothing or you break future useful
1872 # rollback call
1872 # rollback call
1873 if b'changegroup' in pullop.stepsdone:
1873 if b'changegroup' in pullop.stepsdone:
1874 return
1874 return
1875 pullop.stepsdone.add(b'changegroup')
1875 pullop.stepsdone.add(b'changegroup')
1876 if not pullop.fetch:
1876 if not pullop.fetch:
1877 pullop.repo.ui.status(_(b"no changes found\n"))
1877 pullop.repo.ui.status(_(b"no changes found\n"))
1878 pullop.cgresult = 0
1878 pullop.cgresult = 0
1879 return
1879 return
1880 tr = pullop.gettransaction()
1880 tr = pullop.gettransaction()
1881 if pullop.heads is None and list(pullop.common) == [nullid]:
1881 if pullop.heads is None and list(pullop.common) == [nullid]:
1882 pullop.repo.ui.status(_(b"requesting all changes\n"))
1882 pullop.repo.ui.status(_(b"requesting all changes\n"))
1883 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1883 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1884 # issue1320, avoid a race if remote changed after discovery
1884 # issue1320, avoid a race if remote changed after discovery
1885 pullop.heads = pullop.rheads
1885 pullop.heads = pullop.rheads
1886
1886
1887 if pullop.remote.capable(b'getbundle'):
1887 if pullop.remote.capable(b'getbundle'):
1888 # TODO: get bundlecaps from remote
1888 # TODO: get bundlecaps from remote
1889 cg = pullop.remote.getbundle(
1889 cg = pullop.remote.getbundle(
1890 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1890 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1891 )
1891 )
1892 elif pullop.heads is None:
1892 elif pullop.heads is None:
1893 with pullop.remote.commandexecutor() as e:
1893 with pullop.remote.commandexecutor() as e:
1894 cg = e.callcommand(
1894 cg = e.callcommand(
1895 b'changegroup',
1895 b'changegroup',
1896 {
1896 {
1897 b'nodes': pullop.fetch,
1897 b'nodes': pullop.fetch,
1898 b'source': b'pull',
1898 b'source': b'pull',
1899 },
1899 },
1900 ).result()
1900 ).result()
1901
1901
1902 elif not pullop.remote.capable(b'changegroupsubset'):
1902 elif not pullop.remote.capable(b'changegroupsubset'):
1903 raise error.Abort(
1903 raise error.Abort(
1904 _(
1904 _(
1905 b"partial pull cannot be done because "
1905 b"partial pull cannot be done because "
1906 b"other repository doesn't support "
1906 b"other repository doesn't support "
1907 b"changegroupsubset."
1907 b"changegroupsubset."
1908 )
1908 )
1909 )
1909 )
1910 else:
1910 else:
1911 with pullop.remote.commandexecutor() as e:
1911 with pullop.remote.commandexecutor() as e:
1912 cg = e.callcommand(
1912 cg = e.callcommand(
1913 b'changegroupsubset',
1913 b'changegroupsubset',
1914 {
1914 {
1915 b'bases': pullop.fetch,
1915 b'bases': pullop.fetch,
1916 b'heads': pullop.heads,
1916 b'heads': pullop.heads,
1917 b'source': b'pull',
1917 b'source': b'pull',
1918 },
1918 },
1919 ).result()
1919 ).result()
1920
1920
1921 bundleop = bundle2.applybundle(
1921 bundleop = bundle2.applybundle(
1922 pullop.repo, cg, tr, b'pull', pullop.remote.url()
1922 pullop.repo, cg, tr, b'pull', pullop.remote.url()
1923 )
1923 )
1924 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1924 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1925
1925
1926
1926
1927 def _pullphase(pullop):
1927 def _pullphase(pullop):
1928 # Get remote phases data from remote
1928 # Get remote phases data from remote
1929 if b'phases' in pullop.stepsdone:
1929 if b'phases' in pullop.stepsdone:
1930 return
1930 return
1931 remotephases = listkeys(pullop.remote, b'phases')
1931 remotephases = listkeys(pullop.remote, b'phases')
1932 _pullapplyphases(pullop, remotephases)
1932 _pullapplyphases(pullop, remotephases)
1933
1933
1934
1934
1935 def _pullapplyphases(pullop, remotephases):
1935 def _pullapplyphases(pullop, remotephases):
1936 """apply phase movement from observed remote state"""
1936 """apply phase movement from observed remote state"""
1937 if b'phases' in pullop.stepsdone:
1937 if b'phases' in pullop.stepsdone:
1938 return
1938 return
1939 pullop.stepsdone.add(b'phases')
1939 pullop.stepsdone.add(b'phases')
1940 publishing = bool(remotephases.get(b'publishing', False))
1940 publishing = bool(remotephases.get(b'publishing', False))
1941 if remotephases and not publishing:
1941 if remotephases and not publishing:
1942 # remote is new and non-publishing
1942 # remote is new and non-publishing
1943 pheads, _dr = phases.analyzeremotephases(
1943 pheads, _dr = phases.analyzeremotephases(
1944 pullop.repo, pullop.pulledsubset, remotephases
1944 pullop.repo, pullop.pulledsubset, remotephases
1945 )
1945 )
1946 dheads = pullop.pulledsubset
1946 dheads = pullop.pulledsubset
1947 else:
1947 else:
1948 # Remote is old or publishing all common changesets
1948 # Remote is old or publishing all common changesets
1949 # should be seen as public
1949 # should be seen as public
1950 pheads = pullop.pulledsubset
1950 pheads = pullop.pulledsubset
1951 dheads = []
1951 dheads = []
1952 unfi = pullop.repo.unfiltered()
1952 unfi = pullop.repo.unfiltered()
1953 phase = unfi._phasecache.phase
1953 phase = unfi._phasecache.phase
1954 rev = unfi.changelog.index.get_rev
1954 rev = unfi.changelog.index.get_rev
1955 public = phases.public
1955 public = phases.public
1956 draft = phases.draft
1956 draft = phases.draft
1957
1957
1958 # exclude changesets already public locally and update the others
1958 # exclude changesets already public locally and update the others
1959 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1959 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1960 if pheads:
1960 if pheads:
1961 tr = pullop.gettransaction()
1961 tr = pullop.gettransaction()
1962 phases.advanceboundary(pullop.repo, tr, public, pheads)
1962 phases.advanceboundary(pullop.repo, tr, public, pheads)
1963
1963
1964 # exclude changesets already draft locally and update the others
1964 # exclude changesets already draft locally and update the others
1965 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1965 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1966 if dheads:
1966 if dheads:
1967 tr = pullop.gettransaction()
1967 tr = pullop.gettransaction()
1968 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1968 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1969
1969
1970
1970
1971 def _pullbookmarks(pullop):
1971 def _pullbookmarks(pullop):
1972 """process the remote bookmark information to update the local one"""
1972 """process the remote bookmark information to update the local one"""
1973 if b'bookmarks' in pullop.stepsdone:
1973 if b'bookmarks' in pullop.stepsdone:
1974 return
1974 return
1975 pullop.stepsdone.add(b'bookmarks')
1975 pullop.stepsdone.add(b'bookmarks')
1976 repo = pullop.repo
1976 repo = pullop.repo
1977 remotebookmarks = pullop.remotebookmarks
1977 remotebookmarks = pullop.remotebookmarks
1978 bookmod.updatefromremote(
1978 bookmod.updatefromremote(
1979 repo.ui,
1979 repo.ui,
1980 repo,
1980 repo,
1981 remotebookmarks,
1981 remotebookmarks,
1982 pullop.remote.url(),
1982 pullop.remote.url(),
1983 pullop.gettransaction,
1983 pullop.gettransaction,
1984 explicit=pullop.explicitbookmarks,
1984 explicit=pullop.explicitbookmarks,
1985 )
1985 )
1986
1986
1987
1987
1988 def _pullobsolete(pullop):
1988 def _pullobsolete(pullop):
1989 """utility function to pull obsolete markers from a remote
1989 """utility function to pull obsolete markers from a remote
1990
1990
1991 The `gettransaction` is function that return the pull transaction, creating
1991 The `gettransaction` is function that return the pull transaction, creating
1992 one if necessary. We return the transaction to inform the calling code that
1992 one if necessary. We return the transaction to inform the calling code that
1993 a new transaction have been created (when applicable).
1993 a new transaction have been created (when applicable).
1994
1994
1995 Exists mostly to allow overriding for experimentation purpose"""
1995 Exists mostly to allow overriding for experimentation purpose"""
1996 if b'obsmarkers' in pullop.stepsdone:
1996 if b'obsmarkers' in pullop.stepsdone:
1997 return
1997 return
1998 pullop.stepsdone.add(b'obsmarkers')
1998 pullop.stepsdone.add(b'obsmarkers')
1999 tr = None
1999 tr = None
2000 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2000 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2001 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2001 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2002 remoteobs = listkeys(pullop.remote, b'obsolete')
2002 remoteobs = listkeys(pullop.remote, b'obsolete')
2003 if b'dump0' in remoteobs:
2003 if b'dump0' in remoteobs:
2004 tr = pullop.gettransaction()
2004 tr = pullop.gettransaction()
2005 markers = []
2005 markers = []
2006 for key in sorted(remoteobs, reverse=True):
2006 for key in sorted(remoteobs, reverse=True):
2007 if key.startswith(b'dump'):
2007 if key.startswith(b'dump'):
2008 data = util.b85decode(remoteobs[key])
2008 data = util.b85decode(remoteobs[key])
2009 version, newmarks = obsolete._readmarkers(data)
2009 version, newmarks = obsolete._readmarkers(data)
2010 markers += newmarks
2010 markers += newmarks
2011 if markers:
2011 if markers:
2012 pullop.repo.obsstore.add(tr, markers)
2012 pullop.repo.obsstore.add(tr, markers)
2013 pullop.repo.invalidatevolatilesets()
2013 pullop.repo.invalidatevolatilesets()
2014 return tr
2014 return tr
2015
2015
2016
2016
2017 def applynarrowacl(repo, kwargs):
2017 def applynarrowacl(repo, kwargs):
2018 """Apply narrow fetch access control.
2018 """Apply narrow fetch access control.
2019
2019
2020 This massages the named arguments for getbundle wire protocol commands
2020 This massages the named arguments for getbundle wire protocol commands
2021 so requested data is filtered through access control rules.
2021 so requested data is filtered through access control rules.
2022 """
2022 """
2023 ui = repo.ui
2023 ui = repo.ui
2024 # TODO this assumes existence of HTTP and is a layering violation.
2024 # TODO this assumes existence of HTTP and is a layering violation.
2025 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2025 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2026 user_includes = ui.configlist(
2026 user_includes = ui.configlist(
2027 _NARROWACL_SECTION,
2027 _NARROWACL_SECTION,
2028 username + b'.includes',
2028 username + b'.includes',
2029 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2029 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2030 )
2030 )
2031 user_excludes = ui.configlist(
2031 user_excludes = ui.configlist(
2032 _NARROWACL_SECTION,
2032 _NARROWACL_SECTION,
2033 username + b'.excludes',
2033 username + b'.excludes',
2034 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2034 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2035 )
2035 )
2036 if not user_includes:
2036 if not user_includes:
2037 raise error.Abort(
2037 raise error.Abort(
2038 _(b"%s configuration for user %s is empty")
2038 _(b"%s configuration for user %s is empty")
2039 % (_NARROWACL_SECTION, username)
2039 % (_NARROWACL_SECTION, username)
2040 )
2040 )
2041
2041
2042 user_includes = [
2042 user_includes = [
2043 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2043 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2044 ]
2044 ]
2045 user_excludes = [
2045 user_excludes = [
2046 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2046 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2047 ]
2047 ]
2048
2048
2049 req_includes = set(kwargs.get('includepats', []))
2049 req_includes = set(kwargs.get('includepats', []))
2050 req_excludes = set(kwargs.get('excludepats', []))
2050 req_excludes = set(kwargs.get('excludepats', []))
2051
2051
2052 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2052 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2053 req_includes, req_excludes, user_includes, user_excludes
2053 req_includes, req_excludes, user_includes, user_excludes
2054 )
2054 )
2055
2055
2056 if invalid_includes:
2056 if invalid_includes:
2057 raise error.Abort(
2057 raise error.Abort(
2058 _(b"The following includes are not accessible for %s: %s")
2058 _(b"The following includes are not accessible for %s: %s")
2059 % (username, stringutil.pprint(invalid_includes))
2059 % (username, stringutil.pprint(invalid_includes))
2060 )
2060 )
2061
2061
2062 new_args = {}
2062 new_args = {}
2063 new_args.update(kwargs)
2063 new_args.update(kwargs)
2064 new_args['narrow'] = True
2064 new_args['narrow'] = True
2065 new_args['narrow_acl'] = True
2065 new_args['narrow_acl'] = True
2066 new_args['includepats'] = req_includes
2066 new_args['includepats'] = req_includes
2067 if req_excludes:
2067 if req_excludes:
2068 new_args['excludepats'] = req_excludes
2068 new_args['excludepats'] = req_excludes
2069
2069
2070 return new_args
2070 return new_args
2071
2071
2072
2072
2073 def _computeellipsis(repo, common, heads, known, match, depth=None):
2073 def _computeellipsis(repo, common, heads, known, match, depth=None):
2074 """Compute the shape of a narrowed DAG.
2074 """Compute the shape of a narrowed DAG.
2075
2075
2076 Args:
2076 Args:
2077 repo: The repository we're transferring.
2077 repo: The repository we're transferring.
2078 common: The roots of the DAG range we're transferring.
2078 common: The roots of the DAG range we're transferring.
2079 May be just [nullid], which means all ancestors of heads.
2079 May be just [nullid], which means all ancestors of heads.
2080 heads: The heads of the DAG range we're transferring.
2080 heads: The heads of the DAG range we're transferring.
2081 match: The narrowmatcher that allows us to identify relevant changes.
2081 match: The narrowmatcher that allows us to identify relevant changes.
2082 depth: If not None, only consider nodes to be full nodes if they are at
2082 depth: If not None, only consider nodes to be full nodes if they are at
2083 most depth changesets away from one of heads.
2083 most depth changesets away from one of heads.
2084
2084
2085 Returns:
2085 Returns:
2086 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2086 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2087
2087
2088 visitnodes: The list of nodes (either full or ellipsis) which
2088 visitnodes: The list of nodes (either full or ellipsis) which
2089 need to be sent to the client.
2089 need to be sent to the client.
2090 relevant_nodes: The set of changelog nodes which change a file inside
2090 relevant_nodes: The set of changelog nodes which change a file inside
2091 the narrowspec. The client needs these as non-ellipsis nodes.
2091 the narrowspec. The client needs these as non-ellipsis nodes.
2092 ellipsisroots: A dict of {rev: parents} that is used in
2092 ellipsisroots: A dict of {rev: parents} that is used in
2093 narrowchangegroup to produce ellipsis nodes with the
2093 narrowchangegroup to produce ellipsis nodes with the
2094 correct parents.
2094 correct parents.
2095 """
2095 """
2096 cl = repo.changelog
2096 cl = repo.changelog
2097 mfl = repo.manifestlog
2097 mfl = repo.manifestlog
2098
2098
2099 clrev = cl.rev
2099 clrev = cl.rev
2100
2100
2101 commonrevs = {clrev(n) for n in common} | {nullrev}
2101 commonrevs = {clrev(n) for n in common} | {nullrev}
2102 headsrevs = {clrev(n) for n in heads}
2102 headsrevs = {clrev(n) for n in heads}
2103
2103
2104 if depth:
2104 if depth:
2105 revdepth = {h: 0 for h in headsrevs}
2105 revdepth = {h: 0 for h in headsrevs}
2106
2106
2107 ellipsisheads = collections.defaultdict(set)
2107 ellipsisheads = collections.defaultdict(set)
2108 ellipsisroots = collections.defaultdict(set)
2108 ellipsisroots = collections.defaultdict(set)
2109
2109
2110 def addroot(head, curchange):
2110 def addroot(head, curchange):
2111 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2111 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2112 ellipsisroots[head].add(curchange)
2112 ellipsisroots[head].add(curchange)
2113 # Recursively split ellipsis heads with 3 roots by finding the
2113 # Recursively split ellipsis heads with 3 roots by finding the
2114 # roots' youngest common descendant which is an elided merge commit.
2114 # roots' youngest common descendant which is an elided merge commit.
2115 # That descendant takes 2 of the 3 roots as its own, and becomes a
2115 # That descendant takes 2 of the 3 roots as its own, and becomes a
2116 # root of the head.
2116 # root of the head.
2117 while len(ellipsisroots[head]) > 2:
2117 while len(ellipsisroots[head]) > 2:
2118 child, roots = splithead(head)
2118 child, roots = splithead(head)
2119 splitroots(head, child, roots)
2119 splitroots(head, child, roots)
2120 head = child # Recurse in case we just added a 3rd root
2120 head = child # Recurse in case we just added a 3rd root
2121
2121
2122 def splitroots(head, child, roots):
2122 def splitroots(head, child, roots):
2123 ellipsisroots[head].difference_update(roots)
2123 ellipsisroots[head].difference_update(roots)
2124 ellipsisroots[head].add(child)
2124 ellipsisroots[head].add(child)
2125 ellipsisroots[child].update(roots)
2125 ellipsisroots[child].update(roots)
2126 ellipsisroots[child].discard(child)
2126 ellipsisroots[child].discard(child)
2127
2127
2128 def splithead(head):
2128 def splithead(head):
2129 r1, r2, r3 = sorted(ellipsisroots[head])
2129 r1, r2, r3 = sorted(ellipsisroots[head])
2130 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2130 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2131 mid = repo.revs(
2131 mid = repo.revs(
2132 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2132 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2133 )
2133 )
2134 for j in mid:
2134 for j in mid:
2135 if j == nr2:
2135 if j == nr2:
2136 return nr2, (nr1, nr2)
2136 return nr2, (nr1, nr2)
2137 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2137 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2138 return j, (nr1, nr2)
2138 return j, (nr1, nr2)
2139 raise error.Abort(
2139 raise error.Abort(
2140 _(
2140 _(
2141 b'Failed to split up ellipsis node! head: %d, '
2141 b'Failed to split up ellipsis node! head: %d, '
2142 b'roots: %d %d %d'
2142 b'roots: %d %d %d'
2143 )
2143 )
2144 % (head, r1, r2, r3)
2144 % (head, r1, r2, r3)
2145 )
2145 )
2146
2146
2147 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2147 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2148 visit = reversed(missing)
2148 visit = reversed(missing)
2149 relevant_nodes = set()
2149 relevant_nodes = set()
2150 visitnodes = [cl.node(m) for m in missing]
2150 visitnodes = [cl.node(m) for m in missing]
2151 required = set(headsrevs) | known
2151 required = set(headsrevs) | known
2152 for rev in visit:
2152 for rev in visit:
2153 clrev = cl.changelogrevision(rev)
2153 clrev = cl.changelogrevision(rev)
2154 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2154 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2155 if depth is not None:
2155 if depth is not None:
2156 curdepth = revdepth[rev]
2156 curdepth = revdepth[rev]
2157 for p in ps:
2157 for p in ps:
2158 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2158 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2159 needed = False
2159 needed = False
2160 shallow_enough = depth is None or revdepth[rev] <= depth
2160 shallow_enough = depth is None or revdepth[rev] <= depth
2161 if shallow_enough:
2161 if shallow_enough:
2162 curmf = mfl[clrev.manifest].read()
2162 curmf = mfl[clrev.manifest].read()
2163 if ps:
2163 if ps:
2164 # We choose to not trust the changed files list in
2164 # We choose to not trust the changed files list in
2165 # changesets because it's not always correct. TODO: could
2165 # changesets because it's not always correct. TODO: could
2166 # we trust it for the non-merge case?
2166 # we trust it for the non-merge case?
2167 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2167 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2168 needed = bool(curmf.diff(p1mf, match))
2168 needed = bool(curmf.diff(p1mf, match))
2169 if not needed and len(ps) > 1:
2169 if not needed and len(ps) > 1:
2170 # For merge changes, the list of changed files is not
2170 # For merge changes, the list of changed files is not
2171 # helpful, since we need to emit the merge if a file
2171 # helpful, since we need to emit the merge if a file
2172 # in the narrow spec has changed on either side of the
2172 # in the narrow spec has changed on either side of the
2173 # merge. As a result, we do a manifest diff to check.
2173 # merge. As a result, we do a manifest diff to check.
2174 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2174 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2175 needed = bool(curmf.diff(p2mf, match))
2175 needed = bool(curmf.diff(p2mf, match))
2176 else:
2176 else:
2177 # For a root node, we need to include the node if any
2177 # For a root node, we need to include the node if any
2178 # files in the node match the narrowspec.
2178 # files in the node match the narrowspec.
2179 needed = any(curmf.walk(match))
2179 needed = any(curmf.walk(match))
2180
2180
2181 if needed:
2181 if needed:
2182 for head in ellipsisheads[rev]:
2182 for head in ellipsisheads[rev]:
2183 addroot(head, rev)
2183 addroot(head, rev)
2184 for p in ps:
2184 for p in ps:
2185 required.add(p)
2185 required.add(p)
2186 relevant_nodes.add(cl.node(rev))
2186 relevant_nodes.add(cl.node(rev))
2187 else:
2187 else:
2188 if not ps:
2188 if not ps:
2189 ps = [nullrev]
2189 ps = [nullrev]
2190 if rev in required:
2190 if rev in required:
2191 for head in ellipsisheads[rev]:
2191 for head in ellipsisheads[rev]:
2192 addroot(head, rev)
2192 addroot(head, rev)
2193 for p in ps:
2193 for p in ps:
2194 ellipsisheads[p].add(rev)
2194 ellipsisheads[p].add(rev)
2195 else:
2195 else:
2196 for p in ps:
2196 for p in ps:
2197 ellipsisheads[p] |= ellipsisheads[rev]
2197 ellipsisheads[p] |= ellipsisheads[rev]
2198
2198
2199 # add common changesets as roots of their reachable ellipsis heads
2199 # add common changesets as roots of their reachable ellipsis heads
2200 for c in commonrevs:
2200 for c in commonrevs:
2201 for head in ellipsisheads[c]:
2201 for head in ellipsisheads[c]:
2202 addroot(head, c)
2202 addroot(head, c)
2203 return visitnodes, relevant_nodes, ellipsisroots
2203 return visitnodes, relevant_nodes, ellipsisroots
2204
2204
2205
2205
2206 def caps20to10(repo, role):
2206 def caps20to10(repo, role):
2207 """return a set with appropriate options to use bundle20 during getbundle"""
2207 """return a set with appropriate options to use bundle20 during getbundle"""
2208 caps = {b'HG20'}
2208 caps = {b'HG20'}
2209 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2209 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2210 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2210 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2211 return caps
2211 return caps
2212
2212
2213
2213
2214 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2214 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2215 getbundle2partsorder = []
2215 getbundle2partsorder = []
2216
2216
2217 # Mapping between step name and function
2217 # Mapping between step name and function
2218 #
2218 #
2219 # This exists to help extensions wrap steps if necessary
2219 # This exists to help extensions wrap steps if necessary
2220 getbundle2partsmapping = {}
2220 getbundle2partsmapping = {}
2221
2221
2222
2222
2223 def getbundle2partsgenerator(stepname, idx=None):
2223 def getbundle2partsgenerator(stepname, idx=None):
2224 """decorator for function generating bundle2 part for getbundle
2224 """decorator for function generating bundle2 part for getbundle
2225
2225
2226 The function is added to the step -> function mapping and appended to the
2226 The function is added to the step -> function mapping and appended to the
2227 list of steps. Beware that decorated functions will be added in order
2227 list of steps. Beware that decorated functions will be added in order
2228 (this may matter).
2228 (this may matter).
2229
2229
2230 You can only use this decorator for new steps, if you want to wrap a step
2230 You can only use this decorator for new steps, if you want to wrap a step
2231 from an extension, attack the getbundle2partsmapping dictionary directly."""
2231 from an extension, attack the getbundle2partsmapping dictionary directly."""
2232
2232
2233 def dec(func):
2233 def dec(func):
2234 assert stepname not in getbundle2partsmapping
2234 assert stepname not in getbundle2partsmapping
2235 getbundle2partsmapping[stepname] = func
2235 getbundle2partsmapping[stepname] = func
2236 if idx is None:
2236 if idx is None:
2237 getbundle2partsorder.append(stepname)
2237 getbundle2partsorder.append(stepname)
2238 else:
2238 else:
2239 getbundle2partsorder.insert(idx, stepname)
2239 getbundle2partsorder.insert(idx, stepname)
2240 return func
2240 return func
2241
2241
2242 return dec
2242 return dec
2243
2243
2244
2244
2245 def bundle2requested(bundlecaps):
2245 def bundle2requested(bundlecaps):
2246 if bundlecaps is not None:
2246 if bundlecaps is not None:
2247 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2247 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2248 return False
2248 return False
2249
2249
2250
2250
2251 def getbundlechunks(
2251 def getbundlechunks(
2252 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2252 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2253 ):
2253 ):
2254 """Return chunks constituting a bundle's raw data.
2254 """Return chunks constituting a bundle's raw data.
2255
2255
2256 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2256 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2257 passed.
2257 passed.
2258
2258
2259 Returns a 2-tuple of a dict with metadata about the generated bundle
2259 Returns a 2-tuple of a dict with metadata about the generated bundle
2260 and an iterator over raw chunks (of varying sizes).
2260 and an iterator over raw chunks (of varying sizes).
2261 """
2261 """
2262 kwargs = pycompat.byteskwargs(kwargs)
2262 kwargs = pycompat.byteskwargs(kwargs)
2263 info = {}
2263 info = {}
2264 usebundle2 = bundle2requested(bundlecaps)
2264 usebundle2 = bundle2requested(bundlecaps)
2265 # bundle10 case
2265 # bundle10 case
2266 if not usebundle2:
2266 if not usebundle2:
2267 if bundlecaps and not kwargs.get(b'cg', True):
2267 if bundlecaps and not kwargs.get(b'cg', True):
2268 raise ValueError(
2268 raise ValueError(
2269 _(b'request for bundle10 must include changegroup')
2269 _(b'request for bundle10 must include changegroup')
2270 )
2270 )
2271
2271
2272 if kwargs:
2272 if kwargs:
2273 raise ValueError(
2273 raise ValueError(
2274 _(b'unsupported getbundle arguments: %s')
2274 _(b'unsupported getbundle arguments: %s')
2275 % b', '.join(sorted(kwargs.keys()))
2275 % b', '.join(sorted(kwargs.keys()))
2276 )
2276 )
2277 outgoing = _computeoutgoing(repo, heads, common)
2277 outgoing = _computeoutgoing(repo, heads, common)
2278 info[b'bundleversion'] = 1
2278 info[b'bundleversion'] = 1
2279 return (
2279 return (
2280 info,
2280 info,
2281 changegroup.makestream(
2281 changegroup.makestream(
2282 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2282 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2283 ),
2283 ),
2284 )
2284 )
2285
2285
2286 # bundle20 case
2286 # bundle20 case
2287 info[b'bundleversion'] = 2
2287 info[b'bundleversion'] = 2
2288 b2caps = {}
2288 b2caps = {}
2289 for bcaps in bundlecaps:
2289 for bcaps in bundlecaps:
2290 if bcaps.startswith(b'bundle2='):
2290 if bcaps.startswith(b'bundle2='):
2291 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2291 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2292 b2caps.update(bundle2.decodecaps(blob))
2292 b2caps.update(bundle2.decodecaps(blob))
2293 bundler = bundle2.bundle20(repo.ui, b2caps)
2293 bundler = bundle2.bundle20(repo.ui, b2caps)
2294
2294
2295 kwargs[b'heads'] = heads
2295 kwargs[b'heads'] = heads
2296 kwargs[b'common'] = common
2296 kwargs[b'common'] = common
2297
2297
2298 for name in getbundle2partsorder:
2298 for name in getbundle2partsorder:
2299 func = getbundle2partsmapping[name]
2299 func = getbundle2partsmapping[name]
2300 func(
2300 func(
2301 bundler,
2301 bundler,
2302 repo,
2302 repo,
2303 source,
2303 source,
2304 bundlecaps=bundlecaps,
2304 bundlecaps=bundlecaps,
2305 b2caps=b2caps,
2305 b2caps=b2caps,
2306 **pycompat.strkwargs(kwargs)
2306 **pycompat.strkwargs(kwargs)
2307 )
2307 )
2308
2308
2309 info[b'prefercompressed'] = bundler.prefercompressed
2309 info[b'prefercompressed'] = bundler.prefercompressed
2310
2310
2311 return info, bundler.getchunks()
2311 return info, bundler.getchunks()
2312
2312
2313
2313
2314 @getbundle2partsgenerator(b'stream2')
2314 @getbundle2partsgenerator(b'stream2')
2315 def _getbundlestream2(bundler, repo, *args, **kwargs):
2315 def _getbundlestream2(bundler, repo, *args, **kwargs):
2316 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2316 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2317
2317
2318
2318
2319 @getbundle2partsgenerator(b'changegroup')
2319 @getbundle2partsgenerator(b'changegroup')
2320 def _getbundlechangegrouppart(
2320 def _getbundlechangegrouppart(
2321 bundler,
2321 bundler,
2322 repo,
2322 repo,
2323 source,
2323 source,
2324 bundlecaps=None,
2324 bundlecaps=None,
2325 b2caps=None,
2325 b2caps=None,
2326 heads=None,
2326 heads=None,
2327 common=None,
2327 common=None,
2328 **kwargs
2328 **kwargs
2329 ):
2329 ):
2330 """add a changegroup part to the requested bundle"""
2330 """add a changegroup part to the requested bundle"""
2331 if not kwargs.get('cg', True) or not b2caps:
2331 if not kwargs.get('cg', True) or not b2caps:
2332 return
2332 return
2333
2333
2334 version = b'01'
2334 version = b'01'
2335 cgversions = b2caps.get(b'changegroup')
2335 cgversions = b2caps.get(b'changegroup')
2336 if cgversions: # 3.1 and 3.2 ship with an empty value
2336 if cgversions: # 3.1 and 3.2 ship with an empty value
2337 cgversions = [
2337 cgversions = [
2338 v
2338 v
2339 for v in cgversions
2339 for v in cgversions
2340 if v in changegroup.supportedoutgoingversions(repo)
2340 if v in changegroup.supportedoutgoingversions(repo)
2341 ]
2341 ]
2342 if not cgversions:
2342 if not cgversions:
2343 raise error.Abort(_(b'no common changegroup version'))
2343 raise error.Abort(_(b'no common changegroup version'))
2344 version = max(cgversions)
2344 version = max(cgversions)
2345
2345
2346 outgoing = _computeoutgoing(repo, heads, common)
2346 outgoing = _computeoutgoing(repo, heads, common)
2347 if not outgoing.missing:
2347 if not outgoing.missing:
2348 return
2348 return
2349
2349
2350 if kwargs.get('narrow', False):
2350 if kwargs.get('narrow', False):
2351 include = sorted(filter(bool, kwargs.get('includepats', [])))
2351 include = sorted(filter(bool, kwargs.get('includepats', [])))
2352 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2352 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2353 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2353 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2354 else:
2354 else:
2355 matcher = None
2355 matcher = None
2356
2356
2357 cgstream = changegroup.makestream(
2357 cgstream = changegroup.makestream(
2358 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2358 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2359 )
2359 )
2360
2360
2361 part = bundler.newpart(b'changegroup', data=cgstream)
2361 part = bundler.newpart(b'changegroup', data=cgstream)
2362 if cgversions:
2362 if cgversions:
2363 part.addparam(b'version', version)
2363 part.addparam(b'version', version)
2364
2364
2365 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2365 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2366
2366
2367 if scmutil.istreemanifest(repo):
2367 if scmutil.istreemanifest(repo):
2368 part.addparam(b'treemanifest', b'1')
2368 part.addparam(b'treemanifest', b'1')
2369
2369
2370 if b'exp-sidedata-flag' in repo.requirements:
2370 if b'exp-sidedata-flag' in repo.requirements:
2371 part.addparam(b'exp-sidedata', b'1')
2371 part.addparam(b'exp-sidedata', b'1')
2372
2372
2373 if (
2373 if (
2374 kwargs.get('narrow', False)
2374 kwargs.get('narrow', False)
2375 and kwargs.get('narrow_acl', False)
2375 and kwargs.get('narrow_acl', False)
2376 and (include or exclude)
2376 and (include or exclude)
2377 ):
2377 ):
2378 # this is mandatory because otherwise ACL clients won't work
2378 # this is mandatory because otherwise ACL clients won't work
2379 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2379 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2380 narrowspecpart.data = b'%s\0%s' % (
2380 narrowspecpart.data = b'%s\0%s' % (
2381 b'\n'.join(include),
2381 b'\n'.join(include),
2382 b'\n'.join(exclude),
2382 b'\n'.join(exclude),
2383 )
2383 )
2384
2384
2385
2385
2386 @getbundle2partsgenerator(b'bookmarks')
2386 @getbundle2partsgenerator(b'bookmarks')
2387 def _getbundlebookmarkpart(
2387 def _getbundlebookmarkpart(
2388 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2388 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2389 ):
2389 ):
2390 """add a bookmark part to the requested bundle"""
2390 """add a bookmark part to the requested bundle"""
2391 if not kwargs.get('bookmarks', False):
2391 if not kwargs.get('bookmarks', False):
2392 return
2392 return
2393 if not b2caps or b'bookmarks' not in b2caps:
2393 if not b2caps or b'bookmarks' not in b2caps:
2394 raise error.Abort(_(b'no common bookmarks exchange method'))
2394 raise error.Abort(_(b'no common bookmarks exchange method'))
2395 books = bookmod.listbinbookmarks(repo)
2395 books = bookmod.listbinbookmarks(repo)
2396 data = bookmod.binaryencode(books)
2396 data = bookmod.binaryencode(books)
2397 if data:
2397 if data:
2398 bundler.newpart(b'bookmarks', data=data)
2398 bundler.newpart(b'bookmarks', data=data)
2399
2399
2400
2400
2401 @getbundle2partsgenerator(b'listkeys')
2401 @getbundle2partsgenerator(b'listkeys')
2402 def _getbundlelistkeysparts(
2402 def _getbundlelistkeysparts(
2403 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2403 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2404 ):
2404 ):
2405 """add parts containing listkeys namespaces to the requested bundle"""
2405 """add parts containing listkeys namespaces to the requested bundle"""
2406 listkeys = kwargs.get('listkeys', ())
2406 listkeys = kwargs.get('listkeys', ())
2407 for namespace in listkeys:
2407 for namespace in listkeys:
2408 part = bundler.newpart(b'listkeys')
2408 part = bundler.newpart(b'listkeys')
2409 part.addparam(b'namespace', namespace)
2409 part.addparam(b'namespace', namespace)
2410 keys = repo.listkeys(namespace).items()
2410 keys = repo.listkeys(namespace).items()
2411 part.data = pushkey.encodekeys(keys)
2411 part.data = pushkey.encodekeys(keys)
2412
2412
2413
2413
2414 @getbundle2partsgenerator(b'obsmarkers')
2414 @getbundle2partsgenerator(b'obsmarkers')
2415 def _getbundleobsmarkerpart(
2415 def _getbundleobsmarkerpart(
2416 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2416 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2417 ):
2417 ):
2418 """add an obsolescence markers part to the requested bundle"""
2418 """add an obsolescence markers part to the requested bundle"""
2419 if kwargs.get('obsmarkers', False):
2419 if kwargs.get('obsmarkers', False):
2420 if heads is None:
2420 if heads is None:
2421 heads = repo.heads()
2421 heads = repo.heads()
2422 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2422 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2423 markers = repo.obsstore.relevantmarkers(subset)
2423 markers = repo.obsstore.relevantmarkers(subset)
2424 markers = obsutil.sortedmarkers(markers)
2424 markers = obsutil.sortedmarkers(markers)
2425 bundle2.buildobsmarkerspart(bundler, markers)
2425 bundle2.buildobsmarkerspart(bundler, markers)
2426
2426
2427
2427
2428 @getbundle2partsgenerator(b'phases')
2428 @getbundle2partsgenerator(b'phases')
2429 def _getbundlephasespart(
2429 def _getbundlephasespart(
2430 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2430 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2431 ):
2431 ):
2432 """add phase heads part to the requested bundle"""
2432 """add phase heads part to the requested bundle"""
2433 if kwargs.get('phases', False):
2433 if kwargs.get('phases', False):
2434 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2434 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2435 raise error.Abort(_(b'no common phases exchange method'))
2435 raise error.Abort(_(b'no common phases exchange method'))
2436 if heads is None:
2436 if heads is None:
2437 heads = repo.heads()
2437 heads = repo.heads()
2438
2438
2439 headsbyphase = collections.defaultdict(set)
2439 headsbyphase = collections.defaultdict(set)
2440 if repo.publishing():
2440 if repo.publishing():
2441 headsbyphase[phases.public] = heads
2441 headsbyphase[phases.public] = heads
2442 else:
2442 else:
2443 # find the appropriate heads to move
2443 # find the appropriate heads to move
2444
2444
2445 phase = repo._phasecache.phase
2445 phase = repo._phasecache.phase
2446 node = repo.changelog.node
2446 node = repo.changelog.node
2447 rev = repo.changelog.rev
2447 rev = repo.changelog.rev
2448 for h in heads:
2448 for h in heads:
2449 headsbyphase[phase(repo, rev(h))].add(h)
2449 headsbyphase[phase(repo, rev(h))].add(h)
2450 seenphases = list(headsbyphase.keys())
2450 seenphases = list(headsbyphase.keys())
2451
2451
2452 # We do not handle anything but public and draft phase for now)
2452 # We do not handle anything but public and draft phase for now)
2453 if seenphases:
2453 if seenphases:
2454 assert max(seenphases) <= phases.draft
2454 assert max(seenphases) <= phases.draft
2455
2455
2456 # if client is pulling non-public changesets, we need to find
2456 # if client is pulling non-public changesets, we need to find
2457 # intermediate public heads.
2457 # intermediate public heads.
2458 draftheads = headsbyphase.get(phases.draft, set())
2458 draftheads = headsbyphase.get(phases.draft, set())
2459 if draftheads:
2459 if draftheads:
2460 publicheads = headsbyphase.get(phases.public, set())
2460 publicheads = headsbyphase.get(phases.public, set())
2461
2461
2462 revset = b'heads(only(%ln, %ln) and public())'
2462 revset = b'heads(only(%ln, %ln) and public())'
2463 extraheads = repo.revs(revset, draftheads, publicheads)
2463 extraheads = repo.revs(revset, draftheads, publicheads)
2464 for r in extraheads:
2464 for r in extraheads:
2465 headsbyphase[phases.public].add(node(r))
2465 headsbyphase[phases.public].add(node(r))
2466
2466
2467 # transform data in a format used by the encoding function
2467 # transform data in a format used by the encoding function
2468 phasemapping = {
2468 phasemapping = {
2469 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2469 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2470 }
2470 }
2471
2471
2472 # generate the actual part
2472 # generate the actual part
2473 phasedata = phases.binaryencode(phasemapping)
2473 phasedata = phases.binaryencode(phasemapping)
2474 bundler.newpart(b'phase-heads', data=phasedata)
2474 bundler.newpart(b'phase-heads', data=phasedata)
2475
2475
2476
2476
2477 @getbundle2partsgenerator(b'hgtagsfnodes')
2477 @getbundle2partsgenerator(b'hgtagsfnodes')
2478 def _getbundletagsfnodes(
2478 def _getbundletagsfnodes(
2479 bundler,
2479 bundler,
2480 repo,
2480 repo,
2481 source,
2481 source,
2482 bundlecaps=None,
2482 bundlecaps=None,
2483 b2caps=None,
2483 b2caps=None,
2484 heads=None,
2484 heads=None,
2485 common=None,
2485 common=None,
2486 **kwargs
2486 **kwargs
2487 ):
2487 ):
2488 """Transfer the .hgtags filenodes mapping.
2488 """Transfer the .hgtags filenodes mapping.
2489
2489
2490 Only values for heads in this bundle will be transferred.
2490 Only values for heads in this bundle will be transferred.
2491
2491
2492 The part data consists of pairs of 20 byte changeset node and .hgtags
2492 The part data consists of pairs of 20 byte changeset node and .hgtags
2493 filenodes raw values.
2493 filenodes raw values.
2494 """
2494 """
2495 # Don't send unless:
2495 # Don't send unless:
2496 # - changeset are being exchanged,
2496 # - changeset are being exchanged,
2497 # - the client supports it.
2497 # - the client supports it.
2498 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2498 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2499 return
2499 return
2500
2500
2501 outgoing = _computeoutgoing(repo, heads, common)
2501 outgoing = _computeoutgoing(repo, heads, common)
2502 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2502 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2503
2503
2504
2504
2505 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2505 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2506 def _getbundlerevbranchcache(
2506 def _getbundlerevbranchcache(
2507 bundler,
2507 bundler,
2508 repo,
2508 repo,
2509 source,
2509 source,
2510 bundlecaps=None,
2510 bundlecaps=None,
2511 b2caps=None,
2511 b2caps=None,
2512 heads=None,
2512 heads=None,
2513 common=None,
2513 common=None,
2514 **kwargs
2514 **kwargs
2515 ):
2515 ):
2516 """Transfer the rev-branch-cache mapping
2516 """Transfer the rev-branch-cache mapping
2517
2517
2518 The payload is a series of data related to each branch
2518 The payload is a series of data related to each branch
2519
2519
2520 1) branch name length
2520 1) branch name length
2521 2) number of open heads
2521 2) number of open heads
2522 3) number of closed heads
2522 3) number of closed heads
2523 4) open heads nodes
2523 4) open heads nodes
2524 5) closed heads nodes
2524 5) closed heads nodes
2525 """
2525 """
2526 # Don't send unless:
2526 # Don't send unless:
2527 # - changeset are being exchanged,
2527 # - changeset are being exchanged,
2528 # - the client supports it.
2528 # - the client supports it.
2529 # - narrow bundle isn't in play (not currently compatible).
2529 # - narrow bundle isn't in play (not currently compatible).
2530 if (
2530 if (
2531 not kwargs.get('cg', True)
2531 not kwargs.get('cg', True)
2532 or not b2caps
2532 or not b2caps
2533 or b'rev-branch-cache' not in b2caps
2533 or b'rev-branch-cache' not in b2caps
2534 or kwargs.get('narrow', False)
2534 or kwargs.get('narrow', False)
2535 or repo.ui.has_section(_NARROWACL_SECTION)
2535 or repo.ui.has_section(_NARROWACL_SECTION)
2536 ):
2536 ):
2537 return
2537 return
2538
2538
2539 outgoing = _computeoutgoing(repo, heads, common)
2539 outgoing = _computeoutgoing(repo, heads, common)
2540 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2540 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2541
2541
2542
2542
2543 def check_heads(repo, their_heads, context):
2543 def check_heads(repo, their_heads, context):
2544 """check if the heads of a repo have been modified
2544 """check if the heads of a repo have been modified
2545
2545
2546 Used by peer for unbundling.
2546 Used by peer for unbundling.
2547 """
2547 """
2548 heads = repo.heads()
2548 heads = repo.heads()
2549 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2549 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2550 if not (
2550 if not (
2551 their_heads == [b'force']
2551 their_heads == [b'force']
2552 or their_heads == heads
2552 or their_heads == heads
2553 or their_heads == [b'hashed', heads_hash]
2553 or their_heads == [b'hashed', heads_hash]
2554 ):
2554 ):
2555 # someone else committed/pushed/unbundled while we
2555 # someone else committed/pushed/unbundled while we
2556 # were transferring data
2556 # were transferring data
2557 raise error.PushRaced(
2557 raise error.PushRaced(
2558 b'repository changed while %s - please try again' % context
2558 b'repository changed while %s - please try again' % context
2559 )
2559 )
2560
2560
2561
2561
2562 def unbundle(repo, cg, heads, source, url):
2562 def unbundle(repo, cg, heads, source, url):
2563 """Apply a bundle to a repo.
2563 """Apply a bundle to a repo.
2564
2564
2565 this function makes sure the repo is locked during the application and have
2565 this function makes sure the repo is locked during the application and have
2566 mechanism to check that no push race occurred between the creation of the
2566 mechanism to check that no push race occurred between the creation of the
2567 bundle and its application.
2567 bundle and its application.
2568
2568
2569 If the push was raced as PushRaced exception is raised."""
2569 If the push was raced as PushRaced exception is raised."""
2570 r = 0
2570 r = 0
2571 # need a transaction when processing a bundle2 stream
2571 # need a transaction when processing a bundle2 stream
2572 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2572 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2573 lockandtr = [None, None, None]
2573 lockandtr = [None, None, None]
2574 recordout = None
2574 recordout = None
2575 # quick fix for output mismatch with bundle2 in 3.4
2575 # quick fix for output mismatch with bundle2 in 3.4
2576 captureoutput = repo.ui.configbool(
2576 captureoutput = repo.ui.configbool(
2577 b'experimental', b'bundle2-output-capture'
2577 b'experimental', b'bundle2-output-capture'
2578 )
2578 )
2579 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2579 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2580 captureoutput = True
2580 captureoutput = True
2581 try:
2581 try:
2582 # note: outside bundle1, 'heads' is expected to be empty and this
2582 # note: outside bundle1, 'heads' is expected to be empty and this
2583 # 'check_heads' call wil be a no-op
2583 # 'check_heads' call wil be a no-op
2584 check_heads(repo, heads, b'uploading changes')
2584 check_heads(repo, heads, b'uploading changes')
2585 # push can proceed
2585 # push can proceed
2586 if not isinstance(cg, bundle2.unbundle20):
2586 if not isinstance(cg, bundle2.unbundle20):
2587 # legacy case: bundle1 (changegroup 01)
2587 # legacy case: bundle1 (changegroup 01)
2588 txnname = b"\n".join([source, util.hidepassword(url)])
2588 txnname = b"\n".join([source, util.hidepassword(url)])
2589 with repo.lock(), repo.transaction(txnname) as tr:
2589 with repo.lock(), repo.transaction(txnname) as tr:
2590 op = bundle2.applybundle(repo, cg, tr, source, url)
2590 op = bundle2.applybundle(repo, cg, tr, source, url)
2591 r = bundle2.combinechangegroupresults(op)
2591 r = bundle2.combinechangegroupresults(op)
2592 else:
2592 else:
2593 r = None
2593 r = None
2594 try:
2594 try:
2595
2595
2596 def gettransaction():
2596 def gettransaction():
2597 if not lockandtr[2]:
2597 if not lockandtr[2]:
2598 if not bookmod.bookmarksinstore(repo):
2598 if not bookmod.bookmarksinstore(repo):
2599 lockandtr[0] = repo.wlock()
2599 lockandtr[0] = repo.wlock()
2600 lockandtr[1] = repo.lock()
2600 lockandtr[1] = repo.lock()
2601 lockandtr[2] = repo.transaction(source)
2601 lockandtr[2] = repo.transaction(source)
2602 lockandtr[2].hookargs[b'source'] = source
2602 lockandtr[2].hookargs[b'source'] = source
2603 lockandtr[2].hookargs[b'url'] = url
2603 lockandtr[2].hookargs[b'url'] = url
2604 lockandtr[2].hookargs[b'bundle2'] = b'1'
2604 lockandtr[2].hookargs[b'bundle2'] = b'1'
2605 return lockandtr[2]
2605 return lockandtr[2]
2606
2606
2607 # Do greedy locking by default until we're satisfied with lazy
2607 # Do greedy locking by default until we're satisfied with lazy
2608 # locking.
2608 # locking.
2609 if not repo.ui.configbool(
2609 if not repo.ui.configbool(
2610 b'experimental', b'bundle2lazylocking'
2610 b'experimental', b'bundle2lazylocking'
2611 ):
2611 ):
2612 gettransaction()
2612 gettransaction()
2613
2613
2614 op = bundle2.bundleoperation(
2614 op = bundle2.bundleoperation(
2615 repo,
2615 repo,
2616 gettransaction,
2616 gettransaction,
2617 captureoutput=captureoutput,
2617 captureoutput=captureoutput,
2618 source=b'push',
2618 source=b'push',
2619 )
2619 )
2620 try:
2620 try:
2621 op = bundle2.processbundle(repo, cg, op=op)
2621 op = bundle2.processbundle(repo, cg, op=op)
2622 finally:
2622 finally:
2623 r = op.reply
2623 r = op.reply
2624 if captureoutput and r is not None:
2624 if captureoutput and r is not None:
2625 repo.ui.pushbuffer(error=True, subproc=True)
2625 repo.ui.pushbuffer(error=True, subproc=True)
2626
2626
2627 def recordout(output):
2627 def recordout(output):
2628 r.newpart(b'output', data=output, mandatory=False)
2628 r.newpart(b'output', data=output, mandatory=False)
2629
2629
2630 if lockandtr[2] is not None:
2630 if lockandtr[2] is not None:
2631 lockandtr[2].close()
2631 lockandtr[2].close()
2632 except BaseException as exc:
2632 except BaseException as exc:
2633 exc.duringunbundle2 = True
2633 exc.duringunbundle2 = True
2634 if captureoutput and r is not None:
2634 if captureoutput and r is not None:
2635 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2635 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2636
2636
2637 def recordout(output):
2637 def recordout(output):
2638 part = bundle2.bundlepart(
2638 part = bundle2.bundlepart(
2639 b'output', data=output, mandatory=False
2639 b'output', data=output, mandatory=False
2640 )
2640 )
2641 parts.append(part)
2641 parts.append(part)
2642
2642
2643 raise
2643 raise
2644 finally:
2644 finally:
2645 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2645 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2646 if recordout is not None:
2646 if recordout is not None:
2647 recordout(repo.ui.popbuffer())
2647 recordout(repo.ui.popbuffer())
2648 return r
2648 return r
2649
2649
2650
2650
2651 def _maybeapplyclonebundle(pullop):
2651 def _maybeapplyclonebundle(pullop):
2652 """Apply a clone bundle from a remote, if possible."""
2652 """Apply a clone bundle from a remote, if possible."""
2653
2653
2654 repo = pullop.repo
2654 repo = pullop.repo
2655 remote = pullop.remote
2655 remote = pullop.remote
2656
2656
2657 if not repo.ui.configbool(b'ui', b'clonebundles'):
2657 if not repo.ui.configbool(b'ui', b'clonebundles'):
2658 return
2658 return
2659
2659
2660 # Only run if local repo is empty.
2660 # Only run if local repo is empty.
2661 if len(repo):
2661 if len(repo):
2662 return
2662 return
2663
2663
2664 if pullop.heads:
2664 if pullop.heads:
2665 return
2665 return
2666
2666
2667 if not remote.capable(b'clonebundles'):
2667 if not remote.capable(b'clonebundles'):
2668 return
2668 return
2669
2669
2670 with remote.commandexecutor() as e:
2670 with remote.commandexecutor() as e:
2671 res = e.callcommand(b'clonebundles', {}).result()
2671 res = e.callcommand(b'clonebundles', {}).result()
2672
2672
2673 # If we call the wire protocol command, that's good enough to record the
2673 # If we call the wire protocol command, that's good enough to record the
2674 # attempt.
2674 # attempt.
2675 pullop.clonebundleattempted = True
2675 pullop.clonebundleattempted = True
2676
2676
2677 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2677 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2678 if not entries:
2678 if not entries:
2679 repo.ui.note(
2679 repo.ui.note(
2680 _(
2680 _(
2681 b'no clone bundles available on remote; '
2681 b'no clone bundles available on remote; '
2682 b'falling back to regular clone\n'
2682 b'falling back to regular clone\n'
2683 )
2683 )
2684 )
2684 )
2685 return
2685 return
2686
2686
2687 entries = bundlecaches.filterclonebundleentries(
2687 entries = bundlecaches.filterclonebundleentries(
2688 repo, entries, streamclonerequested=pullop.streamclonerequested
2688 repo, entries, streamclonerequested=pullop.streamclonerequested
2689 )
2689 )
2690
2690
2691 if not entries:
2691 if not entries:
2692 # There is a thundering herd concern here. However, if a server
2692 # There is a thundering herd concern here. However, if a server
2693 # operator doesn't advertise bundles appropriate for its clients,
2693 # operator doesn't advertise bundles appropriate for its clients,
2694 # they deserve what's coming. Furthermore, from a client's
2694 # they deserve what's coming. Furthermore, from a client's
2695 # perspective, no automatic fallback would mean not being able to
2695 # perspective, no automatic fallback would mean not being able to
2696 # clone!
2696 # clone!
2697 repo.ui.warn(
2697 repo.ui.warn(
2698 _(
2698 _(
2699 b'no compatible clone bundles available on server; '
2699 b'no compatible clone bundles available on server; '
2700 b'falling back to regular clone\n'
2700 b'falling back to regular clone\n'
2701 )
2701 )
2702 )
2702 )
2703 repo.ui.warn(
2703 repo.ui.warn(
2704 _(b'(you may want to report this to the server operator)\n')
2704 _(b'(you may want to report this to the server operator)\n')
2705 )
2705 )
2706 return
2706 return
2707
2707
2708 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2708 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2709
2709
2710 url = entries[0][b'URL']
2710 url = entries[0][b'URL']
2711 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2711 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2712 if trypullbundlefromurl(repo.ui, repo, url):
2712 if trypullbundlefromurl(repo.ui, repo, url):
2713 repo.ui.status(_(b'finished applying clone bundle\n'))
2713 repo.ui.status(_(b'finished applying clone bundle\n'))
2714 # Bundle failed.
2714 # Bundle failed.
2715 #
2715 #
2716 # We abort by default to avoid the thundering herd of
2716 # We abort by default to avoid the thundering herd of
2717 # clients flooding a server that was expecting expensive
2717 # clients flooding a server that was expecting expensive
2718 # clone load to be offloaded.
2718 # clone load to be offloaded.
2719 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2719 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2720 repo.ui.warn(_(b'falling back to normal clone\n'))
2720 repo.ui.warn(_(b'falling back to normal clone\n'))
2721 else:
2721 else:
2722 raise error.Abort(
2722 raise error.Abort(
2723 _(b'error applying bundle'),
2723 _(b'error applying bundle'),
2724 hint=_(
2724 hint=_(
2725 b'if this error persists, consider contacting '
2725 b'if this error persists, consider contacting '
2726 b'the server operator or disable clone '
2726 b'the server operator or disable clone '
2727 b'bundles via '
2727 b'bundles via '
2728 b'"--config ui.clonebundles=false"'
2728 b'"--config ui.clonebundles=false"'
2729 ),
2729 ),
2730 )
2730 )
2731
2731
2732
2732
2733 def trypullbundlefromurl(ui, repo, url):
2733 def trypullbundlefromurl(ui, repo, url):
2734 """Attempt to apply a bundle from a URL."""
2734 """Attempt to apply a bundle from a URL."""
2735 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2735 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2736 try:
2736 try:
2737 fh = urlmod.open(ui, url)
2737 fh = urlmod.open(ui, url)
2738 cg = readbundle(ui, fh, b'stream')
2738 cg = readbundle(ui, fh, b'stream')
2739
2739
2740 if isinstance(cg, streamclone.streamcloneapplier):
2740 if isinstance(cg, streamclone.streamcloneapplier):
2741 cg.apply(repo)
2741 cg.apply(repo)
2742 else:
2742 else:
2743 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2743 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2744 return True
2744 return True
2745 except urlerr.httperror as e:
2745 except urlerr.httperror as e:
2746 ui.warn(
2746 ui.warn(
2747 _(b'HTTP error fetching bundle: %s\n')
2747 _(b'HTTP error fetching bundle: %s\n')
2748 % stringutil.forcebytestr(e)
2748 % stringutil.forcebytestr(e)
2749 )
2749 )
2750 except urlerr.urlerror as e:
2750 except urlerr.urlerror as e:
2751 ui.warn(
2751 ui.warn(
2752 _(b'error fetching bundle: %s\n')
2752 _(b'error fetching bundle: %s\n')
2753 % stringutil.forcebytestr(e.reason)
2753 % stringutil.forcebytestr(e.reason)
2754 )
2754 )
2755
2755
2756 return False
2756 return False
@@ -1,153 +1,154 b''
1 $ . "$TESTDIR/narrow-library.sh"
1 $ . "$TESTDIR/narrow-library.sh"
2
2
3 $ hg init master
3 $ hg init master
4 $ cd master
4 $ cd master
5 $ cat >> .hg/hgrc <<EOF
5 $ cat >> .hg/hgrc <<EOF
6 > [narrow]
6 > [narrow]
7 > serveellipses=True
7 > serveellipses=True
8 > EOF
8 > EOF
9 $ for x in `$TESTDIR/seq.py 10`
9 $ for x in `$TESTDIR/seq.py 10`
10 > do
10 > do
11 > echo $x > "f$x"
11 > echo $x > "f$x"
12 > hg add "f$x"
12 > hg add "f$x"
13 > hg commit -m "Commit f$x"
13 > hg commit -m "Commit f$x"
14 > done
14 > done
15 $ cd ..
15 $ cd ..
16
16
17 narrow clone a couple files, f2 and f8
17 narrow clone a couple files, f2 and f8
18
18
19 $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
19 $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
20 requesting all changes
20 requesting all changes
21 adding changesets
21 adding changesets
22 adding manifests
22 adding manifests
23 adding file changes
23 adding file changes
24 added 5 changesets with 2 changes to 2 files
24 added 5 changesets with 2 changes to 2 files
25 new changesets *:* (glob)
25 new changesets *:* (glob)
26 updating to branch default
26 updating to branch default
27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 $ cd narrow
28 $ cd narrow
29 $ ls -A
29 $ ls -A
30 .hg
30 .hg
31 f2
31 f2
32 f8
32 f8
33 $ cat f2 f8
33 $ cat f2 f8
34 2
34 2
35 8
35 8
36
36
37 $ cd ..
37 $ cd ..
38
38
39 change every upstream file twice
39 change every upstream file twice
40
40
41 $ cd master
41 $ cd master
42 $ for x in `$TESTDIR/seq.py 10`
42 $ for x in `$TESTDIR/seq.py 10`
43 > do
43 > do
44 > echo "update#1 $x" >> "f$x"
44 > echo "update#1 $x" >> "f$x"
45 > hg commit -m "Update#1 to f$x" "f$x"
45 > hg commit -m "Update#1 to f$x" "f$x"
46 > done
46 > done
47 $ for x in `$TESTDIR/seq.py 10`
47 $ for x in `$TESTDIR/seq.py 10`
48 > do
48 > do
49 > echo "update#2 $x" >> "f$x"
49 > echo "update#2 $x" >> "f$x"
50 > hg commit -m "Update#2 to f$x" "f$x"
50 > hg commit -m "Update#2 to f$x" "f$x"
51 > done
51 > done
52 $ cd ..
52 $ cd ..
53
53
54 look for incoming changes
54 look for incoming changes
55
55
56 $ cd narrow
56 $ cd narrow
57 $ hg incoming --limit 3
57 $ hg incoming --limit 3
58 comparing with ssh://user@dummy/master
58 comparing with ssh://user@dummy/master
59 searching for changes
59 searching for changes
60 changeset: 5:ddc055582556
60 changeset: 5:ddc055582556
61 user: test
61 user: test
62 date: Thu Jan 01 00:00:00 1970 +0000
62 date: Thu Jan 01 00:00:00 1970 +0000
63 summary: Update#1 to f1
63 summary: Update#1 to f1
64
64
65 changeset: 6:f66eb5ad621d
65 changeset: 6:f66eb5ad621d
66 user: test
66 user: test
67 date: Thu Jan 01 00:00:00 1970 +0000
67 date: Thu Jan 01 00:00:00 1970 +0000
68 summary: Update#1 to f2
68 summary: Update#1 to f2
69
69
70 changeset: 7:c42ecff04e99
70 changeset: 7:c42ecff04e99
71 user: test
71 user: test
72 date: Thu Jan 01 00:00:00 1970 +0000
72 date: Thu Jan 01 00:00:00 1970 +0000
73 summary: Update#1 to f3
73 summary: Update#1 to f3
74
74
75
75
76 Interrupting the pull is safe
76 Interrupting the pull is safe
77 $ hg --config hooks.pretxnchangegroup.bad=false pull -q
77 $ hg --config hooks.pretxnchangegroup.bad=false pull -q
78 transaction abort!
78 transaction abort!
79 rollback completed
79 rollback completed
80 abort: pretxnchangegroup.bad hook exited with status 1
80 abort: pretxnchangegroup.bad hook exited with status 1
81 [40]
81 [40]
82 $ hg id
82 $ hg id
83 223311e70a6f tip
83 223311e70a6f tip
84
84
85 pull new changes down to the narrow clone. Should get 8 new changesets: 4
85 pull new changes down to the narrow clone. Should get 8 new changesets: 4
86 relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
86 relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
87
87
88 $ hg pull
88 $ hg pull
89 pulling from ssh://user@dummy/master
89 pulling from ssh://user@dummy/master
90 searching for changes
90 searching for changes
91 adding changesets
91 adding changesets
92 adding manifests
92 adding manifests
93 adding file changes
93 adding file changes
94 added 9 changesets with 4 changes to 2 files
94 added 9 changesets with 4 changes to 2 files
95 new changesets *:* (glob)
95 new changesets *:* (glob)
96 (run 'hg update' to get a working copy)
96 (run 'hg update' to get a working copy)
97 $ hg log -T '{rev}: {desc}\n'
97 $ hg log -T '{rev}: {desc}\n'
98 13: Update#2 to f10
98 13: Update#2 to f10
99 12: Update#2 to f8
99 12: Update#2 to f8
100 11: Update#2 to f7
100 11: Update#2 to f7
101 10: Update#2 to f2
101 10: Update#2 to f2
102 9: Update#2 to f1
102 9: Update#2 to f1
103 8: Update#1 to f8
103 8: Update#1 to f8
104 7: Update#1 to f7
104 7: Update#1 to f7
105 6: Update#1 to f2
105 6: Update#1 to f2
106 5: Update#1 to f1
106 5: Update#1 to f1
107 4: Commit f10
107 4: Commit f10
108 3: Commit f8
108 3: Commit f8
109 2: Commit f7
109 2: Commit f7
110 1: Commit f2
110 1: Commit f2
111 0: Commit f1
111 0: Commit f1
112 $ hg update tip
112 $ hg update tip
113 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
114
114
115 add a change and push it
115 add a change and push it
116
116
117 $ echo "update#3 2" >> f2
117 $ echo "update#3 2" >> f2
118 $ hg commit -m "Update#3 to f2" f2
118 $ hg commit -m "Update#3 to f2" f2
119 $ hg log f2 -T '{rev}: {desc}\n'
119 $ hg log f2 -T '{rev}: {desc}\n'
120 14: Update#3 to f2
120 14: Update#3 to f2
121 10: Update#2 to f2
121 10: Update#2 to f2
122 6: Update#1 to f2
122 6: Update#1 to f2
123 1: Commit f2
123 1: Commit f2
124 $ hg push
124 $ hg push
125 pushing to ssh://user@dummy/master
125 pushing to ssh://user@dummy/master
126 searching for changes
126 searching for changes
127 remote: adding changesets
127 remote: adding changesets
128 remote: adding manifests
128 remote: adding manifests
129 remote: adding file changes
129 remote: adding file changes
130 remote: added 1 changesets with 1 changes to 1 files
130 remote: added 1 changesets with 1 changes to 1 files
131 $ cd ..
131 $ cd ..
132
132
133 $ cd master
133 $ cd master
134 $ hg log f2 -T '{rev}: {desc}\n'
134 $ hg log f2 -T '{rev}: {desc}\n'
135 30: Update#3 to f2
135 30: Update#3 to f2
136 21: Update#2 to f2
136 21: Update#2 to f2
137 11: Update#1 to f2
137 11: Update#1 to f2
138 1: Commit f2
138 1: Commit f2
139 $ hg log -l 3 -T '{rev}: {desc}\n'
139 $ hg log -l 3 -T '{rev}: {desc}\n'
140 30: Update#3 to f2
140 30: Update#3 to f2
141 29: Update#2 to f10
141 29: Update#2 to f10
142 28: Update#2 to f9
142 28: Update#2 to f9
143
143
144 Can pull into repo with a single commit
144 Can pull into repo with a single commit
145
145
146 $ cd ..
146 $ cd ..
147 $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
147 $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
148 $ cd narrow2
148 $ cd narrow2
149 $ hg pull -q -r 1
149 $ hg pull -q -r 1
150 remote: abort: unexpected error: unable to resolve parent while packing b'00manifest.i' 1 for changeset 0
150 transaction abort!
151 transaction abort!
151 rollback completed
152 rollback completed
152 abort: pull failed on remote
153 abort: pull failed on remote
153 [255]
154 [255]
@@ -1,119 +1,118 b''
1 #require no-windows
1 #require no-windows
2
2
3 $ . "$TESTDIR/remotefilelog-library.sh"
3 $ . "$TESTDIR/remotefilelog-library.sh"
4
4
5 $ hg init master
5 $ hg init master
6 $ cd master
6 $ cd master
7 $ echo treemanifest >> .hg/requires
7 $ echo treemanifest >> .hg/requires
8 $ cat >> .hg/hgrc <<EOF
8 $ cat >> .hg/hgrc <<EOF
9 > [remotefilelog]
9 > [remotefilelog]
10 > server=True
10 > server=True
11 > EOF
11 > EOF
12 # uppercase directory name to test encoding
12 # uppercase directory name to test encoding
13 $ mkdir -p A/B
13 $ mkdir -p A/B
14 $ echo x > A/B/x
14 $ echo x > A/B/x
15 $ hg commit -qAm x
15 $ hg commit -qAm x
16
16
17 $ cd ..
17 $ cd ..
18
18
19 # shallow clone from full
19 # shallow clone from full
20
20
21 $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
21 $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
22 streaming all changes
22 streaming all changes
23 4 files to transfer, 449 bytes of data
23 4 files to transfer, 449 bytes of data
24 transferred 449 bytes in * seconds (*/sec) (glob)
24 transferred 449 bytes in * seconds (*/sec) (glob)
25 searching for changes
25 searching for changes
26 no changes found
26 no changes found
27 $ cd shallow
27 $ cd shallow
28 $ cat .hg/requires
28 $ cat .hg/requires
29 dotencode
29 dotencode
30 exp-remotefilelog-repo-req-1
30 exp-remotefilelog-repo-req-1
31 fncache
31 fncache
32 generaldelta
32 generaldelta
33 revlogv1
33 revlogv1
34 sparserevlog
34 sparserevlog
35 store
35 store
36 treemanifest
36 treemanifest
37 $ find .hg/store/meta | sort
37 $ find .hg/store/meta | sort
38 .hg/store/meta
38 .hg/store/meta
39 .hg/store/meta/_a
39 .hg/store/meta/_a
40 .hg/store/meta/_a/00manifest.i
40 .hg/store/meta/_a/00manifest.i
41 .hg/store/meta/_a/_b
41 .hg/store/meta/_a/_b
42 .hg/store/meta/_a/_b/00manifest.i
42 .hg/store/meta/_a/_b/00manifest.i
43
43
44 $ hg update
44 $ hg update
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
46 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
47
47
48 $ cat A/B/x
48 $ cat A/B/x
49 x
49 x
50
50
51 $ ls .hg/store/data
51 $ ls .hg/store/data
52 $ echo foo > A/B/F
52 $ echo foo > A/B/F
53 $ hg add A/B/F
53 $ hg add A/B/F
54 $ hg ci -m 'local content'
54 $ hg ci -m 'local content'
55 $ ls .hg/store/data
55 $ ls .hg/store/data
56 ca31988f085bfb945cb8115b78fabdee40f741aa
56 ca31988f085bfb945cb8115b78fabdee40f741aa
57
57
58 $ cd ..
58 $ cd ..
59
59
60 # shallow clone from shallow
60 # shallow clone from shallow
61
61
62 $ hgcloneshallow ssh://user@dummy/shallow shallow2 --noupdate
62 $ hgcloneshallow ssh://user@dummy/shallow shallow2 --noupdate
63 streaming all changes
63 streaming all changes
64 5 files to transfer, 1008 bytes of data
64 5 files to transfer, 1008 bytes of data
65 transferred 1008 bytes in * seconds (*/sec) (glob)
65 transferred 1008 bytes in * seconds (*/sec) (glob)
66 searching for changes
66 searching for changes
67 no changes found
67 no changes found
68 $ cd shallow2
68 $ cd shallow2
69 $ cat .hg/requires
69 $ cat .hg/requires
70 dotencode
70 dotencode
71 exp-remotefilelog-repo-req-1
71 exp-remotefilelog-repo-req-1
72 fncache
72 fncache
73 generaldelta
73 generaldelta
74 revlogv1
74 revlogv1
75 sparserevlog
75 sparserevlog
76 store
76 store
77 treemanifest
77 treemanifest
78 $ ls .hg/store/data
78 $ ls .hg/store/data
79 ca31988f085bfb945cb8115b78fabdee40f741aa
79 ca31988f085bfb945cb8115b78fabdee40f741aa
80
80
81 $ hg update
81 $ hg update
82 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
82 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
83
83
84 $ cat A/B/x
84 $ cat A/B/x
85 x
85 x
86
86
87 $ cd ..
87 $ cd ..
88
88
89 # full clone from shallow
89 # full clone from shallow
90 # - send stderr to /dev/null because the order of stdout/err causes
90 # - send stderr to /dev/null because the order of stdout/err causes
91 # flakiness here
91 # flakiness here
92 $ hg clone --noupdate ssh://user@dummy/shallow full 2>/dev/null
92 $ hg clone --noupdate ssh://user@dummy/shallow full 2>/dev/null
93 streaming all changes
93 streaming all changes
94 remote: abort: Cannot clone from a shallow repo to a full repo.
95 [255]
94 [255]
96
95
97 # getbundle full clone
96 # getbundle full clone
98
97
99 $ printf '[server]\npreferuncompressed=False\n' >> master/.hg/hgrc
98 $ printf '[server]\npreferuncompressed=False\n' >> master/.hg/hgrc
100 $ hgcloneshallow ssh://user@dummy/master shallow3
99 $ hgcloneshallow ssh://user@dummy/master shallow3
101 requesting all changes
100 requesting all changes
102 adding changesets
101 adding changesets
103 adding manifests
102 adding manifests
104 adding file changes
103 adding file changes
105 added 1 changesets with 0 changes to 0 files
104 added 1 changesets with 0 changes to 0 files
106 new changesets 18d955ee7ba0
105 new changesets 18d955ee7ba0
107 updating to branch default
106 updating to branch default
108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109
108
110 $ ls shallow3/.hg/store/data
109 $ ls shallow3/.hg/store/data
111 $ cat shallow3/.hg/requires
110 $ cat shallow3/.hg/requires
112 dotencode
111 dotencode
113 exp-remotefilelog-repo-req-1
112 exp-remotefilelog-repo-req-1
114 fncache
113 fncache
115 generaldelta
114 generaldelta
116 revlogv1
115 revlogv1
117 sparserevlog
116 sparserevlog
118 store
117 store
119 treemanifest
118 treemanifest
@@ -1,115 +1,115 b''
1 #require no-windows
1 #require no-windows
2
2
3 $ . "$TESTDIR/remotefilelog-library.sh"
3 $ . "$TESTDIR/remotefilelog-library.sh"
4
4
5 $ hg init master
5 $ hg init master
6 $ cd master
6 $ cd master
7 $ cat >> .hg/hgrc <<EOF
7 $ cat >> .hg/hgrc <<EOF
8 > [remotefilelog]
8 > [remotefilelog]
9 > server=True
9 > server=True
10 > EOF
10 > EOF
11 $ echo x > x
11 $ echo x > x
12 $ hg commit -qAm x
12 $ hg commit -qAm x
13
13
14 $ cd ..
14 $ cd ..
15
15
16 # shallow clone from full
16 # shallow clone from full
17
17
18 $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
18 $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
19 streaming all changes
19 streaming all changes
20 2 files to transfer, 227 bytes of data
20 2 files to transfer, 227 bytes of data
21 transferred 227 bytes in * seconds (*/sec) (glob)
21 transferred 227 bytes in * seconds (*/sec) (glob)
22 searching for changes
22 searching for changes
23 no changes found
23 no changes found
24 $ cd shallow
24 $ cd shallow
25 $ cat .hg/requires
25 $ cat .hg/requires
26 dotencode
26 dotencode
27 exp-remotefilelog-repo-req-1
27 exp-remotefilelog-repo-req-1
28 fncache
28 fncache
29 generaldelta
29 generaldelta
30 revlogv1
30 revlogv1
31 sparserevlog
31 sparserevlog
32 store
32 store
33
33
34 $ hg update
34 $ hg update
35 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
35 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
36 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
36 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
37
37
38 $ cat x
38 $ cat x
39 x
39 x
40
40
41 $ ls .hg/store/data
41 $ ls .hg/store/data
42 $ echo foo > f
42 $ echo foo > f
43 $ hg add f
43 $ hg add f
44 $ hg ci -m 'local content'
44 $ hg ci -m 'local content'
45 $ ls .hg/store/data
45 $ ls .hg/store/data
46 4a0a19218e082a343a1b17e5333409af9d98f0f5
46 4a0a19218e082a343a1b17e5333409af9d98f0f5
47
47
48 $ cd ..
48 $ cd ..
49
49
50 # shallow clone from shallow
50 # shallow clone from shallow
51
51
52 $ hgcloneshallow ssh://user@dummy/shallow shallow2 --noupdate
52 $ hgcloneshallow ssh://user@dummy/shallow shallow2 --noupdate
53 streaming all changes
53 streaming all changes
54 3 files to transfer, 564 bytes of data
54 3 files to transfer, 564 bytes of data
55 transferred 564 bytes in * seconds (*/sec) (glob)
55 transferred 564 bytes in * seconds (*/sec) (glob)
56 searching for changes
56 searching for changes
57 no changes found
57 no changes found
58 $ cd shallow2
58 $ cd shallow2
59 $ cat .hg/requires
59 $ cat .hg/requires
60 dotencode
60 dotencode
61 exp-remotefilelog-repo-req-1
61 exp-remotefilelog-repo-req-1
62 fncache
62 fncache
63 generaldelta
63 generaldelta
64 revlogv1
64 revlogv1
65 sparserevlog
65 sparserevlog
66 store
66 store
67 $ ls .hg/store/data
67 $ ls .hg/store/data
68 4a0a19218e082a343a1b17e5333409af9d98f0f5
68 4a0a19218e082a343a1b17e5333409af9d98f0f5
69
69
70 $ hg update
70 $ hg update
71 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
71 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
72
72
73 $ cat x
73 $ cat x
74 x
74 x
75
75
76 $ cd ..
76 $ cd ..
77
77
78 # full clone from shallow
78 # full clone from shallow
79
79
80 Note: the output to STDERR comes from a different process to the output on
80 Note: the output to STDERR comes from a different process to the output on
81 STDOUT and their relative ordering is not deterministic. As a result, the test
81 STDOUT and their relative ordering is not deterministic. As a result, the test
82 was failing sporadically. To avoid this, we capture STDERR to a file and
82 was failing sporadically. To avoid this, we capture STDERR to a file and
83 check its contents separately.
83 check its contents separately.
84
84
85 $ TEMP_STDERR=full-clone-from-shallow.stderr.tmp
85 $ TEMP_STDERR=full-clone-from-shallow.stderr.tmp
86 $ hg clone --noupdate ssh://user@dummy/shallow full 2>$TEMP_STDERR
86 $ hg clone --noupdate ssh://user@dummy/shallow full 2>$TEMP_STDERR
87 streaming all changes
87 streaming all changes
88 remote: abort: Cannot clone from a shallow repo to a full repo.
89 [255]
88 [255]
90 $ cat $TEMP_STDERR
89 $ cat $TEMP_STDERR
90 remote: abort: Cannot clone from a shallow repo to a full repo.
91 abort: pull failed on remote
91 abort: pull failed on remote
92 $ rm $TEMP_STDERR
92 $ rm $TEMP_STDERR
93
93
94 # getbundle full clone
94 # getbundle full clone
95
95
96 $ printf '[server]\npreferuncompressed=False\n' >> master/.hg/hgrc
96 $ printf '[server]\npreferuncompressed=False\n' >> master/.hg/hgrc
97 $ hgcloneshallow ssh://user@dummy/master shallow3
97 $ hgcloneshallow ssh://user@dummy/master shallow3
98 requesting all changes
98 requesting all changes
99 adding changesets
99 adding changesets
100 adding manifests
100 adding manifests
101 adding file changes
101 adding file changes
102 added 1 changesets with 0 changes to 0 files
102 added 1 changesets with 0 changes to 0 files
103 new changesets b292c1e3311f
103 new changesets b292c1e3311f
104 updating to branch default
104 updating to branch default
105 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
105 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
106
106
107 $ ls shallow3/.hg/store/data
107 $ ls shallow3/.hg/store/data
108 $ cat shallow3/.hg/requires
108 $ cat shallow3/.hg/requires
109 dotencode
109 dotencode
110 exp-remotefilelog-repo-req-1
110 exp-remotefilelog-repo-req-1
111 fncache
111 fncache
112 generaldelta
112 generaldelta
113 revlogv1
113 revlogv1
114 sparserevlog
114 sparserevlog
115 store
115 store
General Comments 0
You need to be logged in to leave comments. Login now