##// END OF EJS Templates
bookmark: fix remote bookmark deletion when the push is raced...
marmoute -
r52529:553eb132 stable
parent child Browse files
Show More
@@ -0,0 +1,153 b''
1 ============================================
2 Testing various race condition while pushing
3 ============================================
4
5 $ cat << EOF >> $HGRCPATH
6 > [command-templates]
7 > log={rev}:{node|short} {desc|firstline} {bookmarks}
8 > [ui]
9 > timeout = 20
10 > [phases]
11 > publish=False
12 > EOF
13
14 Initial Setup
15 =============
16
17 $ hg init dst
18 $ echo a > dst/a-file
19 $ hg --cwd dst add a-file
20 $ hg --cwd dst commit -m root
21 $ hg --cwd dst bookmark my-book
22 $ hg --cwd dst bookmarks
23 * my-book 0:a64e49638499
24 $ hg --cwd dst log -G
25 @ 0:a64e49638499 root my-book
26
27
28 $ hg clone ssh://user@dummy/dst src
29 requesting all changes
30 adding changesets
31 adding manifests
32 adding file changes
33 added 1 changesets with 1 changes to 1 files
34 new changesets a64e49638499 (1 drafts)
35 updating to branch default
36 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 $ hg --cwd src update my-book
38 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 (activating bookmark my-book)
40 $ hg --cwd src log -G
41 @ 0:a64e49638499 root my-book
42
43
44 $ echo b > src/a-file
45 $ hg --cwd src commit -m cA0_
46 $ hg --cwd src log -G
47 @ 1:e89d3a6ed79b cA0_ my-book
48 |
49 o 0:a64e49638499 root
50
51
52 Race condition while pushing a forward moving bookmarks
53 =======================================================
54
55 This is currently slightly broken as we eventually don't push the bookmark.
56 However at least we do not delete the remote one.
57
58 $ echo c > src/a-file
59 $ hg --cwd src push -B my-book --config hooks.prelock="hg commit -m cA1_"
60 pushing to ssh://user@dummy/dst
61 searching for changes
62 remote: adding changesets
63 remote: adding manifests
64 remote: adding file changes
65 remote: added 1 changesets with 1 changes to 1 files
66 $ hg --cwd src log -G
67 @ 2:08d837bbfe8d cA1_ my-book
68 |
69 o 1:e89d3a6ed79b cA0_
70 |
71 o 0:a64e49638499 root
72
73 $ hg --cwd dst log -G
74 o 1:e89d3a6ed79b cA0_
75 |
76 @ 0:a64e49638499 root my-book
77
78
79 create a side-moving bookmark
80 Race condition while pushing a side moving bookmarks
81 =======================================================
82
83 resynchronize the repo and setup test
84 -------------------------------------
85
86 $ hg --cwd src push -B my-book
87 pushing to ssh://user@dummy/dst
88 searching for changes
89 remote: adding changesets
90 remote: adding manifests
91 remote: adding file changes
92 remote: added 1 changesets with 1 changes to 1 files
93 updating bookmark my-book
94 $ hg --cwd dst log -G
95 o 2:08d837bbfe8d cA1_ my-book
96 |
97 o 1:e89d3a6ed79b cA0_
98 |
99 @ 0:a64e49638499 root
100
101
102 $ hg --cwd src up 'desc("root")'
103 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
104 (leaving bookmark my-book)
105 $ echo d > src/a-file
106 $ hg --cwd src commit -m cB0_
107 created new head
108 $ hg --cwd src bookmark --force my-book
109 $ echo e > src/a-file
110 $ hg --cwd src log -G
111 @ 3:726401661fe5 cB0_ my-book
112 |
113 | o 2:08d837bbfe8d cA1_
114 | |
115 | o 1:e89d3a6ed79b cA0_
116 |/
117 o 0:a64e49638499 root
118
119
120 Push the bookmark while a commit is being made
121 ----------------------------------------------
122
123 This is currently slightly broken as we eventually don't push the bookmark.
124 However at least we do not delete the remote one.
125
126 $ hg --cwd src push -f -r 'desc("cB0_")' -B my-book --config hooks.prelock="hg commit -m cB1_"
127 pushing to ssh://user@dummy/dst
128 searching for changes
129 remote: adding changesets
130 remote: adding manifests
131 remote: adding file changes
132 remote: added 1 changesets with 1 changes to 1 files (+1 heads)
133 $ hg --cwd src log -G
134 @ 4:a7f9cbf631a0 cB1_ my-book
135 |
136 o 3:726401661fe5 cB0_
137 |
138 | o 2:08d837bbfe8d cA1_
139 | |
140 | o 1:e89d3a6ed79b cA0_
141 |/
142 o 0:a64e49638499 root
143
144
145 $ hg --cwd dst log -G
146 o 3:726401661fe5 cB0_
147 |
148 | o 2:08d837bbfe8d cA1_ my-book
149 | |
150 | o 1:e89d3a6ed79b cA0_
151 |/
152 @ 0:a64e49638499 root
153
@@ -1,2895 +1,2901 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import weakref
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullrev,
16 16 )
17 17 from . import (
18 18 bookmarks as bookmod,
19 19 bundle2,
20 20 bundlecaches,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 logexchange,
26 26 narrowspec,
27 27 obsolete,
28 28 obsutil,
29 29 phases,
30 30 pushkey,
31 31 pycompat,
32 32 requirements,
33 33 scmutil,
34 34 streamclone,
35 35 url as urlmod,
36 36 util,
37 37 wireprototypes,
38 38 )
39 39 from .utils import (
40 40 hashutil,
41 41 stringutil,
42 42 urlutil,
43 43 )
44 44 from .interfaces import repository
45 45
46 46 urlerr = util.urlerr
47 47 urlreq = util.urlreq
48 48
49 49 _NARROWACL_SECTION = b'narrowacl'
50 50
51 51
52 52 def readbundle(ui, fh, fname, vfs=None):
53 53 header = changegroup.readexactly(fh, 4)
54 54
55 55 alg = None
56 56 if not fname:
57 57 fname = b"stream"
58 58 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 59 fh = changegroup.headerlessfixup(fh, header)
60 60 header = b"HG10"
61 61 alg = b'UN'
62 62 elif vfs:
63 63 fname = vfs.join(fname)
64 64
65 65 magic, version = header[0:2], header[2:4]
66 66
67 67 if magic != b'HG':
68 68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 69 if version == b'10':
70 70 if alg is None:
71 71 alg = changegroup.readexactly(fh, 2)
72 72 return changegroup.cg1unpacker(fh, alg)
73 73 elif version.startswith(b'2'):
74 74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 75 elif version == b'S1':
76 76 return streamclone.streamcloneapplier(fh)
77 77 else:
78 78 raise error.Abort(
79 79 _(b'%s: unknown bundle version %s') % (fname, version)
80 80 )
81 81
82 82
83 83 def _format_params(params):
84 84 parts = []
85 85 for key, value in sorted(params.items()):
86 86 value = urlreq.quote(value)
87 87 parts.append(b"%s=%s" % (key, value))
88 88 return b';'.join(parts)
89 89
90 90
91 91 def getbundlespec(ui, fh):
92 92 """Infer the bundlespec from a bundle file handle.
93 93
94 94 The input file handle is seeked and the original seek position is not
95 95 restored.
96 96 """
97 97
98 98 def speccompression(alg):
99 99 try:
100 100 return util.compengines.forbundletype(alg).bundletype()[0]
101 101 except KeyError:
102 102 return None
103 103
104 104 params = {}
105 105
106 106 b = readbundle(ui, fh, None)
107 107 if isinstance(b, changegroup.cg1unpacker):
108 108 alg = b._type
109 109 if alg == b'_truncatedBZ':
110 110 alg = b'BZ'
111 111 comp = speccompression(alg)
112 112 if not comp:
113 113 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
114 114 return b'%s-v1' % comp
115 115 elif isinstance(b, bundle2.unbundle20):
116 116 if b'Compression' in b.params:
117 117 comp = speccompression(b.params[b'Compression'])
118 118 if not comp:
119 119 raise error.Abort(
120 120 _(b'unknown compression algorithm: %s') % comp
121 121 )
122 122 else:
123 123 comp = b'none'
124 124
125 125 version = None
126 126 for part in b.iterparts():
127 127 if part.type == b'changegroup':
128 128 cgversion = part.params[b'version']
129 129 if cgversion in (b'01', b'02'):
130 130 version = b'v2'
131 131 elif cgversion in (b'03',):
132 132 version = b'v2'
133 133 params[b'cg.version'] = cgversion
134 134 else:
135 135 raise error.Abort(
136 136 _(
137 137 b'changegroup version %s does not have '
138 138 b'a known bundlespec'
139 139 )
140 140 % version,
141 141 hint=_(b'try upgrading your Mercurial client'),
142 142 )
143 143 elif part.type == b'stream2' and version is None:
144 144 # A stream2 part requires to be part of a v2 bundle
145 145 requirements = urlreq.unquote(part.params[b'requirements'])
146 146 splitted = requirements.split()
147 147 params = bundle2._formatrequirementsparams(splitted)
148 148 return b'none-v2;stream=v2;%s' % params
149 149 elif part.type == b'stream3-exp' and version is None:
150 150 # A stream3 part requires to be part of a v2 bundle
151 151 requirements = urlreq.unquote(part.params[b'requirements'])
152 152 splitted = requirements.split()
153 153 params = bundle2._formatrequirementsparams(splitted)
154 154 return b'none-v2;stream=v3-exp;%s' % params
155 155 elif part.type == b'obsmarkers':
156 156 params[b'obsolescence'] = b'yes'
157 157 if not part.mandatory:
158 158 params[b'obsolescence-mandatory'] = b'no'
159 159
160 160 if not version:
161 161 params[b'changegroup'] = b'no'
162 162 version = b'v2'
163 163 spec = b'%s-%s' % (comp, version)
164 164 if params:
165 165 spec += b';'
166 166 spec += _format_params(params)
167 167 return spec
168 168
169 169 elif isinstance(b, streamclone.streamcloneapplier):
170 170 requirements = streamclone.readbundle1header(fh)[2]
171 171 formatted = bundle2._formatrequirementsparams(requirements)
172 172 return b'none-packed1;%s' % formatted
173 173 else:
174 174 raise error.Abort(_(b'unknown bundle type: %s') % b)
175 175
176 176
177 177 def _computeoutgoing(repo, heads, common):
178 178 """Computes which revs are outgoing given a set of common
179 179 and a set of heads.
180 180
181 181 This is a separate function so extensions can have access to
182 182 the logic.
183 183
184 184 Returns a discovery.outgoing object.
185 185 """
186 186 cl = repo.changelog
187 187 if common:
188 188 hasnode = cl.hasnode
189 189 common = [n for n in common if hasnode(n)]
190 190 else:
191 191 common = [repo.nullid]
192 192 if not heads:
193 193 heads = cl.heads()
194 194 return discovery.outgoing(repo, common, heads)
195 195
196 196
197 197 def _checkpublish(pushop):
198 198 repo = pushop.repo
199 199 ui = repo.ui
200 200 behavior = ui.config(b'experimental', b'auto-publish')
201 201 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
202 202 return
203 203 remotephases = listkeys(pushop.remote, b'phases')
204 204 if not remotephases.get(b'publishing', False):
205 205 return
206 206
207 207 if pushop.revs is None:
208 208 published = repo.filtered(b'served').revs(b'not public()')
209 209 else:
210 210 published = repo.revs(b'::%ln - public()', pushop.revs)
211 211 # we want to use pushop.revs in the revset even if they themselves are
212 212 # secret, but we don't want to have anything that the server won't see
213 213 # in the result of this expression
214 214 published &= repo.filtered(b'served')
215 215 if published:
216 216 if behavior == b'warn':
217 217 ui.warn(
218 218 _(b'%i changesets about to be published\n') % len(published)
219 219 )
220 220 elif behavior == b'confirm':
221 221 if ui.promptchoice(
222 222 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
223 223 % len(published)
224 224 ):
225 225 raise error.CanceledError(_(b'user quit'))
226 226 elif behavior == b'abort':
227 227 msg = _(b'push would publish %i changesets') % len(published)
228 228 hint = _(
229 229 b"use --publish or adjust 'experimental.auto-publish'"
230 230 b" config"
231 231 )
232 232 raise error.Abort(msg, hint=hint)
233 233
234 234
235 235 def _forcebundle1(op):
236 236 """return true if a pull/push must use bundle1
237 237
238 238 This function is used to allow testing of the older bundle version"""
239 239 ui = op.repo.ui
240 240 # The goal is this config is to allow developer to choose the bundle
241 241 # version used during exchanged. This is especially handy during test.
242 242 # Value is a list of bundle version to be picked from, highest version
243 243 # should be used.
244 244 #
245 245 # developer config: devel.legacy.exchange
246 246 exchange = ui.configlist(b'devel', b'legacy.exchange')
247 247 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
248 248 return forcebundle1 or not op.remote.capable(b'bundle2')
249 249
250 250
251 251 class pushoperation:
252 252 """A object that represent a single push operation
253 253
254 254 Its purpose is to carry push related state and very common operations.
255 255
256 256 A new pushoperation should be created at the beginning of each push and
257 257 discarded afterward.
258 258 """
259 259
260 260 def __init__(
261 261 self,
262 262 repo,
263 263 remote,
264 264 force=False,
265 265 revs=None,
266 266 newbranch=False,
267 267 bookmarks=(),
268 268 publish=False,
269 269 pushvars=None,
270 270 ):
271 271 # repo we push from
272 272 self.repo = repo
273 273 self.ui = repo.ui
274 274 # repo we push to
275 275 self.remote = remote
276 276 # force option provided
277 277 self.force = force
278 278 # revs to be pushed (None is "all")
279 279 self.revs = revs
280 280 # bookmark explicitly pushed
281 281 self.bookmarks = bookmarks
282 282 # allow push of new branch
283 283 self.newbranch = newbranch
284 284 # step already performed
285 285 # (used to check what steps have been already performed through bundle2)
286 286 self.stepsdone = set()
287 287 # Integer version of the changegroup push result
288 288 # - None means nothing to push
289 289 # - 0 means HTTP error
290 290 # - 1 means we pushed and remote head count is unchanged *or*
291 291 # we have outgoing changesets but refused to push
292 292 # - other values as described by addchangegroup()
293 293 self.cgresult = None
294 294 # Boolean value for the bookmark push
295 295 self.bkresult = None
296 296 # discover.outgoing object (contains common and outgoing data)
297 297 self.outgoing = None
298 298 # all remote topological heads before the push
299 299 self.remoteheads = None
300 300 # Details of the remote branch pre and post push
301 301 #
302 302 # mapping: {'branch': ([remoteheads],
303 303 # [newheads],
304 304 # [unsyncedheads],
305 305 # [discardedheads])}
306 306 # - branch: the branch name
307 307 # - remoteheads: the list of remote heads known locally
308 308 # None if the branch is new
309 309 # - newheads: the new remote heads (known locally) with outgoing pushed
310 310 # - unsyncedheads: the list of remote heads unknown locally.
311 311 # - discardedheads: the list of remote heads made obsolete by the push
312 312 self.pushbranchmap = None
313 313 # testable as a boolean indicating if any nodes are missing locally.
314 314 self.incoming = None
315 315 # summary of the remote phase situation
316 316 self.remotephases = None
317 317 # phases changes that must be pushed along side the changesets
318 318 self.outdatedphases = None
319 319 # phases changes that must be pushed if changeset push fails
320 320 self.fallbackoutdatedphases = None
321 321 # outgoing obsmarkers
322 322 self.outobsmarkers = set()
323 323 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
324 324 self.outbookmarks = []
325 325 # transaction manager
326 326 self.trmanager = None
327 327 # map { pushkey partid -> callback handling failure}
328 328 # used to handle exception from mandatory pushkey part failure
329 329 self.pkfailcb = {}
330 330 # an iterable of pushvars or None
331 331 self.pushvars = pushvars
332 332 # publish pushed changesets
333 333 self.publish = publish
334 334
335 335 @util.propertycache
336 336 def futureheads(self):
337 337 """future remote heads if the changeset push succeeds"""
338 338 return self.outgoing.ancestorsof
339 339
340 340 @util.propertycache
341 341 def fallbackheads(self):
342 342 """future remote heads if the changeset push fails"""
343 343 if self.revs is None:
344 344 # not target to push, all common are relevant
345 345 return self.outgoing.commonheads
346 346 unfi = self.repo.unfiltered()
347 347 # I want cheads = heads(::ancestorsof and ::commonheads)
348 348 # (ancestorsof is revs with secret changeset filtered out)
349 349 #
350 350 # This can be expressed as:
351 351 # cheads = ( (ancestorsof and ::commonheads)
352 352 # + (commonheads and ::ancestorsof))"
353 353 # )
354 354 #
355 355 # while trying to push we already computed the following:
356 356 # common = (::commonheads)
357 357 # missing = ((commonheads::ancestorsof) - commonheads)
358 358 #
359 359 # We can pick:
360 360 # * ancestorsof part of common (::commonheads)
361 361 common = self.outgoing.common
362 362 rev = self.repo.changelog.index.rev
363 363 cheads = [node for node in self.revs if rev(node) in common]
364 364 # and
365 365 # * commonheads parents on missing
366 366 revset = unfi.set(
367 367 b'%ln and parents(roots(%ln))',
368 368 self.outgoing.commonheads,
369 369 self.outgoing.missing,
370 370 )
371 371 cheads.extend(c.node() for c in revset)
372 372 return cheads
373 373
374 374 @property
375 375 def commonheads(self):
376 376 """set of all common heads after changeset bundle push"""
377 377 if self.cgresult:
378 378 return self.futureheads
379 379 else:
380 380 return self.fallbackheads
381 381
382 382
383 383 # mapping of message used when pushing bookmark
384 384 bookmsgmap = {
385 385 b'update': (
386 386 _(b"updating bookmark %s\n"),
387 387 _(b'updating bookmark %s failed\n'),
388 388 ),
389 389 b'export': (
390 390 _(b"exporting bookmark %s\n"),
391 391 _(b'exporting bookmark %s failed\n'),
392 392 ),
393 393 b'delete': (
394 394 _(b"deleting remote bookmark %s\n"),
395 395 _(b'deleting remote bookmark %s failed\n'),
396 396 ),
397 397 }
398 398
399 399
400 400 def push(
401 401 repo,
402 402 remote,
403 403 force=False,
404 404 revs=None,
405 405 newbranch=False,
406 406 bookmarks=(),
407 407 publish=False,
408 408 opargs=None,
409 409 ):
410 410 """Push outgoing changesets (limited by revs) from a local
411 411 repository to remote. Return an integer:
412 412 - None means nothing to push
413 413 - 0 means HTTP error
414 414 - 1 means we pushed and remote head count is unchanged *or*
415 415 we have outgoing changesets but refused to push
416 416 - other values as described by addchangegroup()
417 417 """
418 418 if opargs is None:
419 419 opargs = {}
420 420 pushop = pushoperation(
421 421 repo,
422 422 remote,
423 423 force,
424 424 revs,
425 425 newbranch,
426 426 bookmarks,
427 427 publish,
428 428 **pycompat.strkwargs(opargs)
429 429 )
430 430 if pushop.remote.local():
431 431 missing = (
432 432 set(pushop.repo.requirements) - pushop.remote.local().supported
433 433 )
434 434 if missing:
435 435 msg = _(
436 436 b"required features are not"
437 437 b" supported in the destination:"
438 438 b" %s"
439 439 ) % (b', '.join(sorted(missing)))
440 440 raise error.Abort(msg)
441 441
442 442 if not pushop.remote.canpush():
443 443 raise error.Abort(_(b"destination does not support push"))
444 444
445 445 if not pushop.remote.capable(b'unbundle'):
446 446 raise error.Abort(
447 447 _(
448 448 b'cannot push: destination does not support the '
449 449 b'unbundle wire protocol command'
450 450 )
451 451 )
452 452 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
453 453 # Check that a computer is registered for that category for at least
454 454 # one revlog kind.
455 455 for kind, computers in repo._sidedata_computers.items():
456 456 if computers.get(category):
457 457 break
458 458 else:
459 459 raise error.Abort(
460 460 _(
461 461 b'cannot push: required sidedata category not supported'
462 462 b" by this client: '%s'"
463 463 )
464 464 % pycompat.bytestr(category)
465 465 )
466 466 # get lock as we might write phase data
467 467 wlock = lock = None
468 468 try:
469 469 try:
470 470 # bundle2 push may receive a reply bundle touching bookmarks
471 471 # requiring the wlock. Take it now to ensure proper ordering.
472 472 maypushback = pushop.ui.configbool(
473 473 b'experimental',
474 474 b'bundle2.pushback',
475 475 )
476 476 if (
477 477 (not _forcebundle1(pushop))
478 478 and maypushback
479 479 and not bookmod.bookmarksinstore(repo)
480 480 ):
481 481 wlock = pushop.repo.wlock()
482 482 lock = pushop.repo.lock()
483 483 pushop.trmanager = transactionmanager(
484 484 pushop.repo, b'push-response', pushop.remote.url()
485 485 )
486 486 except error.LockUnavailable as err:
487 487 # source repo cannot be locked.
488 488 # We do not abort the push, but just disable the local phase
489 489 # synchronisation.
490 490 msg = b'cannot lock source repository: %s\n'
491 491 msg %= stringutil.forcebytestr(err)
492 492 pushop.ui.debug(msg)
493 493
494 494 pushop.repo.checkpush(pushop)
495 495 _checkpublish(pushop)
496 496 _pushdiscovery(pushop)
497 497 if not pushop.force:
498 498 _checksubrepostate(pushop)
499 499 if not _forcebundle1(pushop):
500 500 _pushbundle2(pushop)
501 501 _pushchangeset(pushop)
502 502 _pushsyncphase(pushop)
503 503 _pushobsolete(pushop)
504 504 _pushbookmark(pushop)
505 505 if pushop.trmanager is not None:
506 506 pushop.trmanager.close()
507 507 finally:
508 508 lockmod.release(pushop.trmanager, lock, wlock)
509 509
510 510 if repo.ui.configbool(b'experimental', b'remotenames'):
511 511 logexchange.pullremotenames(repo, remote)
512 512
513 513 return pushop
514 514
515 515
516 516 # list of steps to perform discovery before push
517 517 pushdiscoveryorder = []
518 518
519 519 # Mapping between step name and function
520 520 #
521 521 # This exists to help extensions wrap steps if necessary
522 522 pushdiscoverymapping = {}
523 523
524 524
525 525 def pushdiscovery(stepname):
526 526 """decorator for function performing discovery before push
527 527
528 528 The function is added to the step -> function mapping and appended to the
529 529 list of steps. Beware that decorated function will be added in order (this
530 530 may matter).
531 531
532 532 You can only use this decorator for a new step, if you want to wrap a step
533 533 from an extension, change the pushdiscovery dictionary directly."""
534 534
535 535 def dec(func):
536 536 assert stepname not in pushdiscoverymapping
537 537 pushdiscoverymapping[stepname] = func
538 538 pushdiscoveryorder.append(stepname)
539 539 return func
540 540
541 541 return dec
542 542
543 543
544 544 def _pushdiscovery(pushop):
545 545 """Run all discovery steps"""
546 546 for stepname in pushdiscoveryorder:
547 547 step = pushdiscoverymapping[stepname]
548 548 step(pushop)
549 549
550 550
551 551 def _checksubrepostate(pushop):
552 552 """Ensure all outgoing referenced subrepo revisions are present locally"""
553 553
554 554 repo = pushop.repo
555 555
556 556 # If the repository does not use subrepos, skip the expensive
557 557 # manifest checks.
558 558 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
559 559 return
560 560
561 561 for n in pushop.outgoing.missing:
562 562 ctx = repo[n]
563 563
564 564 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
565 565 for subpath in sorted(ctx.substate):
566 566 sub = ctx.sub(subpath)
567 567 sub.verify(onpush=True)
568 568
569 569
570 570 @pushdiscovery(b'changeset')
571 571 def _pushdiscoverychangeset(pushop):
572 572 """discover the changeset that need to be pushed"""
573 573 fci = discovery.findcommonincoming
574 574 if pushop.revs:
575 575 commoninc = fci(
576 576 pushop.repo,
577 577 pushop.remote,
578 578 force=pushop.force,
579 579 ancestorsof=pushop.revs,
580 580 )
581 581 else:
582 582 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
583 583 common, inc, remoteheads = commoninc
584 584 fco = discovery.findcommonoutgoing
585 585 outgoing = fco(
586 586 pushop.repo,
587 587 pushop.remote,
588 588 onlyheads=pushop.revs,
589 589 commoninc=commoninc,
590 590 force=pushop.force,
591 591 )
592 592 pushop.outgoing = outgoing
593 593 pushop.remoteheads = remoteheads
594 594 pushop.incoming = inc
595 595
596 596
597 597 @pushdiscovery(b'phase')
598 598 def _pushdiscoveryphase(pushop):
599 599 """discover the phase that needs to be pushed
600 600
601 601 (computed for both success and failure case for changesets push)"""
602 602 outgoing = pushop.outgoing
603 603 unfi = pushop.repo.unfiltered()
604 604 remotephases = listkeys(pushop.remote, b'phases')
605 605
606 606 if (
607 607 pushop.ui.configbool(b'ui', b'_usedassubrepo')
608 608 and remotephases # server supports phases
609 609 and not pushop.outgoing.missing # no changesets to be pushed
610 610 and remotephases.get(b'publishing', False)
611 611 ):
612 612 # When:
613 613 # - this is a subrepo push
614 614 # - and remote support phase
615 615 # - and no changeset are to be pushed
616 616 # - and remote is publishing
617 617 # We may be in issue 3781 case!
618 618 # We drop the possible phase synchronisation done by
619 619 # courtesy to publish changesets possibly locally draft
620 620 # on the remote.
621 621 pushop.outdatedphases = []
622 622 pushop.fallbackoutdatedphases = []
623 623 return
624 624
625 625 pushop.remotephases = phases.remotephasessummary(
626 626 pushop.repo, pushop.fallbackheads, remotephases
627 627 )
628 628 droots = pushop.remotephases.draftroots
629 629
630 630 extracond = b''
631 631 if not pushop.remotephases.publishing:
632 632 extracond = b' and public()'
633 633 revset = b'heads((%%ln::%%ln) %s)' % extracond
634 634 # Get the list of all revs draft on remote by public here.
635 635 # XXX Beware that revset break if droots is not strictly
636 636 # XXX root we may want to ensure it is but it is costly
637 637 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
638 638 if not pushop.remotephases.publishing and pushop.publish:
639 639 future = list(
640 640 unfi.set(
641 641 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
642 642 )
643 643 )
644 644 elif not outgoing.missing:
645 645 future = fallback
646 646 else:
647 647 # adds changeset we are going to push as draft
648 648 #
649 649 # should not be necessary for publishing server, but because of an
650 650 # issue fixed in xxxxx we have to do it anyway.
651 651 fdroots = list(
652 652 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
653 653 )
654 654 fdroots = [f.node() for f in fdroots]
655 655 future = list(unfi.set(revset, fdroots, pushop.futureheads))
656 656 pushop.outdatedphases = future
657 657 pushop.fallbackoutdatedphases = fallback
658 658
659 659
660 660 @pushdiscovery(b'obsmarker')
661 661 def _pushdiscoveryobsmarkers(pushop):
662 662 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
663 663 return
664 664
665 665 if not pushop.repo.obsstore:
666 666 return
667 667
668 668 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
669 669 return
670 670
671 671 repo = pushop.repo
672 672 # very naive computation, that can be quite expensive on big repo.
673 673 # However: evolution is currently slow on them anyway.
674 674 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
675 675 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
676 676
677 677
678 678 @pushdiscovery(b'bookmarks')
679 679 def _pushdiscoverybookmarks(pushop):
680 680 ui = pushop.ui
681 681 repo = pushop.repo.unfiltered()
682 682 remote = pushop.remote
683 683 ui.debug(b"checking for updated bookmarks\n")
684 684 ancestors = ()
685 685 if pushop.revs:
686 686 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
687 687 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
688 688
689 689 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
690 690
691 691 explicit = {
692 692 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
693 693 }
694 694
695 695 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
696 696 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
697 697
698 698
699 699 def _processcompared(pushop, pushed, explicit, remotebms, comp):
700 700 """take decision on bookmarks to push to the remote repo
701 701
702 702 Exists to help extensions alter this behavior.
703 703 """
704 704 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
705 705
706 706 repo = pushop.repo
707 707
708 708 for b, scid, dcid in advsrc:
709 709 if b in explicit:
710 710 explicit.remove(b)
711 711 if not pushed or repo[scid].rev() in pushed:
712 712 pushop.outbookmarks.append((b, dcid, scid))
713 713 # search added bookmark
714 714 for b, scid, dcid in addsrc:
715 715 if b in explicit:
716 716 explicit.remove(b)
717 717 if bookmod.isdivergent(b):
718 718 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
719 719 pushop.bkresult = 2
720 elif pushed and repo[scid].rev() not in pushed:
721 # in case of race or secret
722 msg = _(b'cannot push bookmark X without its revision: %s!\n')
723 pushop.ui.warn(msg % b)
724 pushop.bkresult = 2
720 725 else:
721 726 pushop.outbookmarks.append((b, b'', scid))
722 727 # search for overwritten bookmark
723 728 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
724 729 if b in explicit:
725 730 explicit.remove(b)
726 pushop.outbookmarks.append((b, dcid, scid))
731 if not pushed or repo[scid].rev() in pushed:
732 pushop.outbookmarks.append((b, dcid, scid))
727 733 # search for bookmark to delete
728 734 for b, scid, dcid in adddst:
729 735 if b in explicit:
730 736 explicit.remove(b)
731 737 # treat as "deleted locally"
732 738 pushop.outbookmarks.append((b, dcid, b''))
733 739 # identical bookmarks shouldn't get reported
734 740 for b, scid, dcid in same:
735 741 if b in explicit:
736 742 explicit.remove(b)
737 743
738 744 if explicit:
739 745 explicit = sorted(explicit)
740 746 # we should probably list all of them
741 747 pushop.ui.warn(
742 748 _(
743 749 b'bookmark %s does not exist on the local '
744 750 b'or remote repository!\n'
745 751 )
746 752 % explicit[0]
747 753 )
748 754 pushop.bkresult = 2
749 755
750 756 pushop.outbookmarks.sort()
751 757
752 758
753 759 def _pushcheckoutgoing(pushop):
754 760 outgoing = pushop.outgoing
755 761 unfi = pushop.repo.unfiltered()
756 762 if not outgoing.missing:
757 763 # nothing to push
758 764 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
759 765 return False
760 766 # something to push
761 767 if not pushop.force:
762 768 # if repo.obsstore == False --> no obsolete
763 769 # then, save the iteration
764 770 if unfi.obsstore:
765 771 # this message are here for 80 char limit reason
766 772 mso = _(b"push includes obsolete changeset: %s!")
767 773 mspd = _(b"push includes phase-divergent changeset: %s!")
768 774 mscd = _(b"push includes content-divergent changeset: %s!")
769 775 mst = {
770 776 b"orphan": _(b"push includes orphan changeset: %s!"),
771 777 b"phase-divergent": mspd,
772 778 b"content-divergent": mscd,
773 779 }
774 780 # If we are to push if there is at least one
775 781 # obsolete or unstable changeset in missing, at
776 782 # least one of the missinghead will be obsolete or
777 783 # unstable. So checking heads only is ok
778 784 for node in outgoing.ancestorsof:
779 785 ctx = unfi[node]
780 786 if ctx.obsolete():
781 787 raise error.Abort(mso % ctx)
782 788 elif ctx.isunstable():
783 789 # TODO print more than one instability in the abort
784 790 # message
785 791 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
786 792
787 793 discovery.checkheads(pushop)
788 794 return True
789 795
790 796
791 797 # List of names of steps to perform for an outgoing bundle2, order matters.
792 798 b2partsgenorder = []
793 799
794 800 # Mapping between step name and function
795 801 #
796 802 # This exists to help extensions wrap steps if necessary
797 803 b2partsgenmapping = {}
798 804
799 805
800 806 def b2partsgenerator(stepname, idx=None):
801 807 """decorator for function generating bundle2 part
802 808
803 809 The function is added to the step -> function mapping and appended to the
804 810 list of steps. Beware that decorated functions will be added in order
805 811 (this may matter).
806 812
807 813 You can only use this decorator for new steps, if you want to wrap a step
808 814 from an extension, attack the b2partsgenmapping dictionary directly."""
809 815
810 816 def dec(func):
811 817 assert stepname not in b2partsgenmapping
812 818 b2partsgenmapping[stepname] = func
813 819 if idx is None:
814 820 b2partsgenorder.append(stepname)
815 821 else:
816 822 b2partsgenorder.insert(idx, stepname)
817 823 return func
818 824
819 825 return dec
820 826
821 827
822 828 def _pushb2ctxcheckheads(pushop, bundler):
823 829 """Generate race condition checking parts
824 830
825 831 Exists as an independent function to aid extensions
826 832 """
827 833 # * 'force' do not check for push race,
828 834 # * if we don't push anything, there are nothing to check.
829 835 if not pushop.force and pushop.outgoing.ancestorsof:
830 836 allowunrelated = b'related' in bundler.capabilities.get(
831 837 b'checkheads', ()
832 838 )
833 839 emptyremote = pushop.pushbranchmap is None
834 840 if not allowunrelated or emptyremote:
835 841 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
836 842 else:
837 843 affected = set()
838 844 for branch, heads in pushop.pushbranchmap.items():
839 845 remoteheads, newheads, unsyncedheads, discardedheads = heads
840 846 if remoteheads is not None:
841 847 remote = set(remoteheads)
842 848 affected |= set(discardedheads) & remote
843 849 affected |= remote - set(newheads)
844 850 if affected:
845 851 data = iter(sorted(affected))
846 852 bundler.newpart(b'check:updated-heads', data=data)
847 853
848 854
849 855 def _pushing(pushop):
850 856 """return True if we are pushing anything"""
851 857 return bool(
852 858 pushop.outgoing.missing
853 859 or pushop.outdatedphases
854 860 or pushop.outobsmarkers
855 861 or pushop.outbookmarks
856 862 )
857 863
858 864
859 865 @b2partsgenerator(b'check-bookmarks')
860 866 def _pushb2checkbookmarks(pushop, bundler):
861 867 """insert bookmark move checking"""
862 868 if not _pushing(pushop) or pushop.force:
863 869 return
864 870 b2caps = bundle2.bundle2caps(pushop.remote)
865 871 hasbookmarkcheck = b'bookmarks' in b2caps
866 872 if not (pushop.outbookmarks and hasbookmarkcheck):
867 873 return
868 874 data = []
869 875 for book, old, new in pushop.outbookmarks:
870 876 data.append((book, old))
871 877 checkdata = bookmod.binaryencode(pushop.repo, data)
872 878 bundler.newpart(b'check:bookmarks', data=checkdata)
873 879
874 880
875 881 @b2partsgenerator(b'check-phases')
876 882 def _pushb2checkphases(pushop, bundler):
877 883 """insert phase move checking"""
878 884 if not _pushing(pushop) or pushop.force:
879 885 return
880 886 b2caps = bundle2.bundle2caps(pushop.remote)
881 887 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
882 888 if pushop.remotephases is not None and hasphaseheads:
883 889 # check that the remote phase has not changed
884 890 checks = {p: [] for p in phases.allphases}
885 891 checks[phases.public].extend(pushop.remotephases.publicheads)
886 892 checks[phases.draft].extend(pushop.remotephases.draftroots)
887 893 if any(checks.values()):
888 894 for phase in checks:
889 895 checks[phase].sort()
890 896 checkdata = phases.binaryencode(checks)
891 897 bundler.newpart(b'check:phases', data=checkdata)
892 898
893 899
894 900 @b2partsgenerator(b'changeset')
895 901 def _pushb2ctx(pushop, bundler):
896 902 """handle changegroup push through bundle2
897 903
898 904 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
899 905 """
900 906 if b'changesets' in pushop.stepsdone:
901 907 return
902 908 pushop.stepsdone.add(b'changesets')
903 909 # Send known heads to the server for race detection.
904 910 if not _pushcheckoutgoing(pushop):
905 911 return
906 912 pushop.repo.prepushoutgoinghooks(pushop)
907 913
908 914 _pushb2ctxcheckheads(pushop, bundler)
909 915
910 916 b2caps = bundle2.bundle2caps(pushop.remote)
911 917 version = b'01'
912 918 cgversions = b2caps.get(b'changegroup')
913 919 if cgversions: # 3.1 and 3.2 ship with an empty value
914 920 cgversions = [
915 921 v
916 922 for v in cgversions
917 923 if v in changegroup.supportedoutgoingversions(pushop.repo)
918 924 ]
919 925 if not cgversions:
920 926 raise error.Abort(_(b'no common changegroup version'))
921 927 version = max(cgversions)
922 928
923 929 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
924 930 cgstream = changegroup.makestream(
925 931 pushop.repo,
926 932 pushop.outgoing,
927 933 version,
928 934 b'push',
929 935 bundlecaps=b2caps,
930 936 remote_sidedata=remote_sidedata,
931 937 )
932 938 cgpart = bundler.newpart(b'changegroup', data=cgstream)
933 939 if cgversions:
934 940 cgpart.addparam(b'version', version)
935 941 if scmutil.istreemanifest(pushop.repo):
936 942 cgpart.addparam(b'treemanifest', b'1')
937 943 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
938 944 cgpart.addparam(b'exp-sidedata', b'1')
939 945
940 946 def handlereply(op):
941 947 """extract addchangegroup returns from server reply"""
942 948 cgreplies = op.records.getreplies(cgpart.id)
943 949 assert len(cgreplies[b'changegroup']) == 1
944 950 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
945 951
946 952 return handlereply
947 953
948 954
949 955 @b2partsgenerator(b'phase')
950 956 def _pushb2phases(pushop, bundler):
951 957 """handle phase push through bundle2"""
952 958 if b'phases' in pushop.stepsdone:
953 959 return
954 960 b2caps = bundle2.bundle2caps(pushop.remote)
955 961 ui = pushop.repo.ui
956 962
957 963 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
958 964 haspushkey = b'pushkey' in b2caps
959 965 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
960 966
961 967 if hasphaseheads and not legacyphase:
962 968 return _pushb2phaseheads(pushop, bundler)
963 969 elif haspushkey:
964 970 return _pushb2phasespushkey(pushop, bundler)
965 971
966 972
967 973 def _pushb2phaseheads(pushop, bundler):
968 974 """push phase information through a bundle2 - binary part"""
969 975 pushop.stepsdone.add(b'phases')
970 976 if pushop.outdatedphases:
971 977 updates = {p: [] for p in phases.allphases}
972 978 updates[0].extend(h.node() for h in pushop.outdatedphases)
973 979 phasedata = phases.binaryencode(updates)
974 980 bundler.newpart(b'phase-heads', data=phasedata)
975 981
976 982
977 983 def _pushb2phasespushkey(pushop, bundler):
978 984 """push phase information through a bundle2 - pushkey part"""
979 985 pushop.stepsdone.add(b'phases')
980 986 part2node = []
981 987
982 988 def handlefailure(pushop, exc):
983 989 targetid = int(exc.partid)
984 990 for partid, node in part2node:
985 991 if partid == targetid:
986 992 raise error.Abort(_(b'updating %s to public failed') % node)
987 993
988 994 enc = pushkey.encode
989 995 for newremotehead in pushop.outdatedphases:
990 996 part = bundler.newpart(b'pushkey')
991 997 part.addparam(b'namespace', enc(b'phases'))
992 998 part.addparam(b'key', enc(newremotehead.hex()))
993 999 part.addparam(b'old', enc(b'%d' % phases.draft))
994 1000 part.addparam(b'new', enc(b'%d' % phases.public))
995 1001 part2node.append((part.id, newremotehead))
996 1002 pushop.pkfailcb[part.id] = handlefailure
997 1003
998 1004 def handlereply(op):
999 1005 for partid, node in part2node:
1000 1006 partrep = op.records.getreplies(partid)
1001 1007 results = partrep[b'pushkey']
1002 1008 assert len(results) <= 1
1003 1009 msg = None
1004 1010 if not results:
1005 1011 msg = _(b'server ignored update of %s to public!\n') % node
1006 1012 elif not int(results[0][b'return']):
1007 1013 msg = _(b'updating %s to public failed!\n') % node
1008 1014 if msg is not None:
1009 1015 pushop.ui.warn(msg)
1010 1016
1011 1017 return handlereply
1012 1018
1013 1019
1014 1020 @b2partsgenerator(b'obsmarkers')
1015 1021 def _pushb2obsmarkers(pushop, bundler):
1016 1022 if b'obsmarkers' in pushop.stepsdone:
1017 1023 return
1018 1024 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1019 1025 if obsolete.commonversion(remoteversions) is None:
1020 1026 return
1021 1027 pushop.stepsdone.add(b'obsmarkers')
1022 1028 if pushop.outobsmarkers:
1023 1029 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1024 1030 bundle2.buildobsmarkerspart(bundler, markers)
1025 1031
1026 1032
1027 1033 @b2partsgenerator(b'bookmarks')
1028 1034 def _pushb2bookmarks(pushop, bundler):
1029 1035 """handle bookmark push through bundle2"""
1030 1036 if b'bookmarks' in pushop.stepsdone:
1031 1037 return
1032 1038 b2caps = bundle2.bundle2caps(pushop.remote)
1033 1039
1034 1040 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1035 1041 legacybooks = b'bookmarks' in legacy
1036 1042
1037 1043 if not legacybooks and b'bookmarks' in b2caps:
1038 1044 return _pushb2bookmarkspart(pushop, bundler)
1039 1045 elif b'pushkey' in b2caps:
1040 1046 return _pushb2bookmarkspushkey(pushop, bundler)
1041 1047
1042 1048
1043 1049 def _bmaction(old, new):
1044 1050 """small utility for bookmark pushing"""
1045 1051 if not old:
1046 1052 return b'export'
1047 1053 elif not new:
1048 1054 return b'delete'
1049 1055 return b'update'
1050 1056
1051 1057
1052 1058 def _abortonsecretctx(pushop, node, b):
1053 1059 """abort if a given bookmark points to a secret changeset"""
1054 1060 if node and pushop.repo[node].phase() == phases.secret:
1055 1061 raise error.Abort(
1056 1062 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1057 1063 )
1058 1064
1059 1065
1060 1066 def _pushb2bookmarkspart(pushop, bundler):
1061 1067 pushop.stepsdone.add(b'bookmarks')
1062 1068 if not pushop.outbookmarks:
1063 1069 return
1064 1070
1065 1071 allactions = []
1066 1072 data = []
1067 1073 for book, old, new in pushop.outbookmarks:
1068 1074 _abortonsecretctx(pushop, new, book)
1069 1075 data.append((book, new))
1070 1076 allactions.append((book, _bmaction(old, new)))
1071 1077 checkdata = bookmod.binaryencode(pushop.repo, data)
1072 1078 bundler.newpart(b'bookmarks', data=checkdata)
1073 1079
1074 1080 def handlereply(op):
1075 1081 ui = pushop.ui
1076 1082 # if success
1077 1083 for book, action in allactions:
1078 1084 ui.status(bookmsgmap[action][0] % book)
1079 1085
1080 1086 return handlereply
1081 1087
1082 1088
1083 1089 def _pushb2bookmarkspushkey(pushop, bundler):
1084 1090 pushop.stepsdone.add(b'bookmarks')
1085 1091 part2book = []
1086 1092 enc = pushkey.encode
1087 1093
1088 1094 def handlefailure(pushop, exc):
1089 1095 targetid = int(exc.partid)
1090 1096 for partid, book, action in part2book:
1091 1097 if partid == targetid:
1092 1098 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1093 1099 # we should not be called for part we did not generated
1094 1100 assert False
1095 1101
1096 1102 for book, old, new in pushop.outbookmarks:
1097 1103 _abortonsecretctx(pushop, new, book)
1098 1104 part = bundler.newpart(b'pushkey')
1099 1105 part.addparam(b'namespace', enc(b'bookmarks'))
1100 1106 part.addparam(b'key', enc(book))
1101 1107 part.addparam(b'old', enc(hex(old)))
1102 1108 part.addparam(b'new', enc(hex(new)))
1103 1109 action = b'update'
1104 1110 if not old:
1105 1111 action = b'export'
1106 1112 elif not new:
1107 1113 action = b'delete'
1108 1114 part2book.append((part.id, book, action))
1109 1115 pushop.pkfailcb[part.id] = handlefailure
1110 1116
1111 1117 def handlereply(op):
1112 1118 ui = pushop.ui
1113 1119 for partid, book, action in part2book:
1114 1120 partrep = op.records.getreplies(partid)
1115 1121 results = partrep[b'pushkey']
1116 1122 assert len(results) <= 1
1117 1123 if not results:
1118 1124 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1119 1125 else:
1120 1126 ret = int(results[0][b'return'])
1121 1127 if ret:
1122 1128 ui.status(bookmsgmap[action][0] % book)
1123 1129 else:
1124 1130 ui.warn(bookmsgmap[action][1] % book)
1125 1131 if pushop.bkresult is not None:
1126 1132 pushop.bkresult = 1
1127 1133
1128 1134 return handlereply
1129 1135
1130 1136
1131 1137 @b2partsgenerator(b'pushvars', idx=0)
1132 1138 def _getbundlesendvars(pushop, bundler):
1133 1139 '''send shellvars via bundle2'''
1134 1140 pushvars = pushop.pushvars
1135 1141 if pushvars:
1136 1142 shellvars = {}
1137 1143 for raw in pushvars:
1138 1144 if b'=' not in raw:
1139 1145 msg = (
1140 1146 b"unable to parse variable '%s', should follow "
1141 1147 b"'KEY=VALUE' or 'KEY=' format"
1142 1148 )
1143 1149 raise error.Abort(msg % raw)
1144 1150 k, v = raw.split(b'=', 1)
1145 1151 shellvars[k] = v
1146 1152
1147 1153 part = bundler.newpart(b'pushvars')
1148 1154
1149 1155 for key, value in shellvars.items():
1150 1156 part.addparam(key, value, mandatory=False)
1151 1157
1152 1158
1153 1159 def _pushbundle2(pushop):
1154 1160 """push data to the remote using bundle2
1155 1161
1156 1162 The only currently supported type of data is changegroup but this will
1157 1163 evolve in the future."""
1158 1164 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1159 1165 pushback = pushop.trmanager and pushop.ui.configbool(
1160 1166 b'experimental', b'bundle2.pushback'
1161 1167 )
1162 1168
1163 1169 # create reply capability
1164 1170 capsblob = bundle2.encodecaps(
1165 1171 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1166 1172 )
1167 1173 bundler.newpart(b'replycaps', data=capsblob)
1168 1174 replyhandlers = []
1169 1175 for partgenname in b2partsgenorder:
1170 1176 partgen = b2partsgenmapping[partgenname]
1171 1177 ret = partgen(pushop, bundler)
1172 1178 if callable(ret):
1173 1179 replyhandlers.append(ret)
1174 1180 # do not push if nothing to push
1175 1181 if bundler.nbparts <= 1:
1176 1182 return
1177 1183 stream = util.chunkbuffer(bundler.getchunks())
1178 1184 try:
1179 1185 try:
1180 1186 with pushop.remote.commandexecutor() as e:
1181 1187 reply = e.callcommand(
1182 1188 b'unbundle',
1183 1189 {
1184 1190 b'bundle': stream,
1185 1191 b'heads': [b'force'],
1186 1192 b'url': pushop.remote.url(),
1187 1193 },
1188 1194 ).result()
1189 1195 except error.BundleValueError as exc:
1190 1196 raise error.RemoteError(_(b'missing support for %s') % exc)
1191 1197 try:
1192 1198 trgetter = None
1193 1199 if pushback:
1194 1200 trgetter = pushop.trmanager.transaction
1195 1201 op = bundle2.processbundle(
1196 1202 pushop.repo,
1197 1203 reply,
1198 1204 trgetter,
1199 1205 remote=pushop.remote,
1200 1206 )
1201 1207 except error.BundleValueError as exc:
1202 1208 raise error.RemoteError(_(b'missing support for %s') % exc)
1203 1209 except bundle2.AbortFromPart as exc:
1204 1210 pushop.ui.error(_(b'remote: %s\n') % exc)
1205 1211 if exc.hint is not None:
1206 1212 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1207 1213 raise error.RemoteError(_(b'push failed on remote'))
1208 1214 except error.PushkeyFailed as exc:
1209 1215 partid = int(exc.partid)
1210 1216 if partid not in pushop.pkfailcb:
1211 1217 raise
1212 1218 pushop.pkfailcb[partid](pushop, exc)
1213 1219 for rephand in replyhandlers:
1214 1220 rephand(op)
1215 1221
1216 1222
1217 1223 def _pushchangeset(pushop):
1218 1224 """Make the actual push of changeset bundle to remote repo"""
1219 1225 if b'changesets' in pushop.stepsdone:
1220 1226 return
1221 1227 pushop.stepsdone.add(b'changesets')
1222 1228 if not _pushcheckoutgoing(pushop):
1223 1229 return
1224 1230
1225 1231 # Should have verified this in push().
1226 1232 assert pushop.remote.capable(b'unbundle')
1227 1233
1228 1234 pushop.repo.prepushoutgoinghooks(pushop)
1229 1235 outgoing = pushop.outgoing
1230 1236 # TODO: get bundlecaps from remote
1231 1237 bundlecaps = None
1232 1238 # create a changegroup from local
1233 1239 if pushop.revs is None and not (
1234 1240 outgoing.excluded or pushop.repo.changelog.filteredrevs
1235 1241 ):
1236 1242 # push everything,
1237 1243 # use the fast path, no race possible on push
1238 1244 cg = changegroup.makechangegroup(
1239 1245 pushop.repo,
1240 1246 outgoing,
1241 1247 b'01',
1242 1248 b'push',
1243 1249 fastpath=True,
1244 1250 bundlecaps=bundlecaps,
1245 1251 )
1246 1252 else:
1247 1253 cg = changegroup.makechangegroup(
1248 1254 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1249 1255 )
1250 1256
1251 1257 # apply changegroup to remote
1252 1258 # local repo finds heads on server, finds out what
1253 1259 # revs it must push. once revs transferred, if server
1254 1260 # finds it has different heads (someone else won
1255 1261 # commit/push race), server aborts.
1256 1262 if pushop.force:
1257 1263 remoteheads = [b'force']
1258 1264 else:
1259 1265 remoteheads = pushop.remoteheads
1260 1266 # ssh: return remote's addchangegroup()
1261 1267 # http: return remote's addchangegroup() or 0 for error
1262 1268 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1263 1269
1264 1270
1265 1271 def _pushsyncphase(pushop):
1266 1272 """synchronise phase information locally and remotely"""
1267 1273 cheads = pushop.commonheads
1268 1274 # even when we don't push, exchanging phase data is useful
1269 1275 remotephases = listkeys(pushop.remote, b'phases')
1270 1276 if (
1271 1277 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1272 1278 and remotephases # server supports phases
1273 1279 and pushop.cgresult is None # nothing was pushed
1274 1280 and remotephases.get(b'publishing', False)
1275 1281 ):
1276 1282 # When:
1277 1283 # - this is a subrepo push
1278 1284 # - and remote support phase
1279 1285 # - and no changeset was pushed
1280 1286 # - and remote is publishing
1281 1287 # We may be in issue 3871 case!
1282 1288 # We drop the possible phase synchronisation done by
1283 1289 # courtesy to publish changesets possibly locally draft
1284 1290 # on the remote.
1285 1291 remotephases = {b'publishing': b'True'}
1286 1292 if not remotephases: # old server or public only reply from non-publishing
1287 1293 _localphasemove(pushop, cheads)
1288 1294 # don't push any phase data as there is nothing to push
1289 1295 else:
1290 1296 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1291 1297 pheads, droots = ana
1292 1298 ### Apply remote phase on local
1293 1299 if remotephases.get(b'publishing', False):
1294 1300 _localphasemove(pushop, cheads)
1295 1301 else: # publish = False
1296 1302 _localphasemove(pushop, pheads)
1297 1303 _localphasemove(pushop, cheads, phases.draft)
1298 1304 ### Apply local phase on remote
1299 1305
1300 1306 if pushop.cgresult:
1301 1307 if b'phases' in pushop.stepsdone:
1302 1308 # phases already pushed though bundle2
1303 1309 return
1304 1310 outdated = pushop.outdatedphases
1305 1311 else:
1306 1312 outdated = pushop.fallbackoutdatedphases
1307 1313
1308 1314 pushop.stepsdone.add(b'phases')
1309 1315
1310 1316 # filter heads already turned public by the push
1311 1317 outdated = [c for c in outdated if c.node() not in pheads]
1312 1318 # fallback to independent pushkey command
1313 1319 for newremotehead in outdated:
1314 1320 with pushop.remote.commandexecutor() as e:
1315 1321 r = e.callcommand(
1316 1322 b'pushkey',
1317 1323 {
1318 1324 b'namespace': b'phases',
1319 1325 b'key': newremotehead.hex(),
1320 1326 b'old': b'%d' % phases.draft,
1321 1327 b'new': b'%d' % phases.public,
1322 1328 },
1323 1329 ).result()
1324 1330
1325 1331 if not r:
1326 1332 pushop.ui.warn(
1327 1333 _(b'updating %s to public failed!\n') % newremotehead
1328 1334 )
1329 1335
1330 1336
1331 1337 def _localphasemove(pushop, nodes, phase=phases.public):
1332 1338 """move <nodes> to <phase> in the local source repo"""
1333 1339 if pushop.trmanager:
1334 1340 phases.advanceboundary(
1335 1341 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1336 1342 )
1337 1343 else:
1338 1344 # repo is not locked, do not change any phases!
1339 1345 # Informs the user that phases should have been moved when
1340 1346 # applicable.
1341 1347 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1342 1348 phasestr = phases.phasenames[phase]
1343 1349 if actualmoves:
1344 1350 pushop.ui.status(
1345 1351 _(
1346 1352 b'cannot lock source repo, skipping '
1347 1353 b'local %s phase update\n'
1348 1354 )
1349 1355 % phasestr
1350 1356 )
1351 1357
1352 1358
1353 1359 def _pushobsolete(pushop):
1354 1360 """utility function to push obsolete markers to a remote"""
1355 1361 if b'obsmarkers' in pushop.stepsdone:
1356 1362 return
1357 1363 repo = pushop.repo
1358 1364 remote = pushop.remote
1359 1365 pushop.stepsdone.add(b'obsmarkers')
1360 1366 if pushop.outobsmarkers:
1361 1367 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1362 1368 rslts = []
1363 1369 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1364 1370 remotedata = obsolete._pushkeyescape(markers)
1365 1371 for key in sorted(remotedata, reverse=True):
1366 1372 # reverse sort to ensure we end with dump0
1367 1373 data = remotedata[key]
1368 1374 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1369 1375 if [r for r in rslts if not r]:
1370 1376 msg = _(b'failed to push some obsolete markers!\n')
1371 1377 repo.ui.warn(msg)
1372 1378
1373 1379
1374 1380 def _pushbookmark(pushop):
1375 1381 """Update bookmark position on remote"""
1376 1382 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1377 1383 return
1378 1384 pushop.stepsdone.add(b'bookmarks')
1379 1385 ui = pushop.ui
1380 1386 remote = pushop.remote
1381 1387
1382 1388 for b, old, new in pushop.outbookmarks:
1383 1389 action = b'update'
1384 1390 if not old:
1385 1391 action = b'export'
1386 1392 elif not new:
1387 1393 action = b'delete'
1388 1394
1389 1395 with remote.commandexecutor() as e:
1390 1396 r = e.callcommand(
1391 1397 b'pushkey',
1392 1398 {
1393 1399 b'namespace': b'bookmarks',
1394 1400 b'key': b,
1395 1401 b'old': hex(old),
1396 1402 b'new': hex(new),
1397 1403 },
1398 1404 ).result()
1399 1405
1400 1406 if r:
1401 1407 ui.status(bookmsgmap[action][0] % b)
1402 1408 else:
1403 1409 ui.warn(bookmsgmap[action][1] % b)
1404 1410 # discovery can have set the value form invalid entry
1405 1411 if pushop.bkresult is not None:
1406 1412 pushop.bkresult = 1
1407 1413
1408 1414
1409 1415 class pulloperation:
1410 1416 """A object that represent a single pull operation
1411 1417
1412 1418 It purpose is to carry pull related state and very common operation.
1413 1419
1414 1420 A new should be created at the beginning of each pull and discarded
1415 1421 afterward.
1416 1422 """
1417 1423
1418 1424 def __init__(
1419 1425 self,
1420 1426 repo,
1421 1427 remote,
1422 1428 heads=None,
1423 1429 force=False,
1424 1430 bookmarks=(),
1425 1431 remotebookmarks=None,
1426 1432 streamclonerequested=None,
1427 1433 includepats=None,
1428 1434 excludepats=None,
1429 1435 depth=None,
1430 1436 path=None,
1431 1437 ):
1432 1438 # repo we pull into
1433 1439 self.repo = repo
1434 1440 # repo we pull from
1435 1441 self.remote = remote
1436 1442 # path object used to build this remote
1437 1443 #
1438 1444 # Ideally, the remote peer would carry that directly.
1439 1445 self.remote_path = path
1440 1446 # revision we try to pull (None is "all")
1441 1447 self.heads = heads
1442 1448 # bookmark pulled explicitly
1443 1449 self.explicitbookmarks = [
1444 1450 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1445 1451 ]
1446 1452 # do we force pull?
1447 1453 self.force = force
1448 1454 # whether a streaming clone was requested
1449 1455 self.streamclonerequested = streamclonerequested
1450 1456 # transaction manager
1451 1457 self.trmanager = None
1452 1458 # set of common changeset between local and remote before pull
1453 1459 self.common = None
1454 1460 # set of pulled head
1455 1461 self.rheads = None
1456 1462 # list of missing changeset to fetch remotely
1457 1463 self.fetch = None
1458 1464 # remote bookmarks data
1459 1465 self.remotebookmarks = remotebookmarks
1460 1466 # result of changegroup pulling (used as return code by pull)
1461 1467 self.cgresult = None
1462 1468 # list of step already done
1463 1469 self.stepsdone = set()
1464 1470 # Whether we attempted a clone from pre-generated bundles.
1465 1471 self.clonebundleattempted = False
1466 1472 # Set of file patterns to include.
1467 1473 self.includepats = includepats
1468 1474 # Set of file patterns to exclude.
1469 1475 self.excludepats = excludepats
1470 1476 # Number of ancestor changesets to pull from each pulled head.
1471 1477 self.depth = depth
1472 1478
1473 1479 @util.propertycache
1474 1480 def pulledsubset(self):
1475 1481 """heads of the set of changeset target by the pull"""
1476 1482 # compute target subset
1477 1483 if self.heads is None:
1478 1484 # We pulled every thing possible
1479 1485 # sync on everything common
1480 1486 c = set(self.common)
1481 1487 ret = list(self.common)
1482 1488 for n in self.rheads:
1483 1489 if n not in c:
1484 1490 ret.append(n)
1485 1491 return ret
1486 1492 else:
1487 1493 # We pulled a specific subset
1488 1494 # sync on this subset
1489 1495 return self.heads
1490 1496
1491 1497 @util.propertycache
1492 1498 def canusebundle2(self):
1493 1499 return not _forcebundle1(self)
1494 1500
1495 1501 @util.propertycache
1496 1502 def remotebundle2caps(self):
1497 1503 return bundle2.bundle2caps(self.remote)
1498 1504
1499 1505 def gettransaction(self):
1500 1506 # deprecated; talk to trmanager directly
1501 1507 return self.trmanager.transaction()
1502 1508
1503 1509
1504 1510 class transactionmanager(util.transactional):
1505 1511 """An object to manage the life cycle of a transaction
1506 1512
1507 1513 It creates the transaction on demand and calls the appropriate hooks when
1508 1514 closing the transaction."""
1509 1515
1510 1516 def __init__(self, repo, source, url):
1511 1517 self.repo = repo
1512 1518 self.source = source
1513 1519 self.url = url
1514 1520 self._tr = None
1515 1521
1516 1522 def transaction(self):
1517 1523 """Return an open transaction object, constructing if necessary"""
1518 1524 if not self._tr:
1519 1525 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1520 1526 self._tr = self.repo.transaction(trname)
1521 1527 self._tr.hookargs[b'source'] = self.source
1522 1528 self._tr.hookargs[b'url'] = self.url
1523 1529 return self._tr
1524 1530
1525 1531 def close(self):
1526 1532 """close transaction if created"""
1527 1533 if self._tr is not None:
1528 1534 self._tr.close()
1529 1535
1530 1536 def release(self):
1531 1537 """release transaction if created"""
1532 1538 if self._tr is not None:
1533 1539 self._tr.release()
1534 1540
1535 1541
1536 1542 def listkeys(remote, namespace):
1537 1543 with remote.commandexecutor() as e:
1538 1544 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1539 1545
1540 1546
1541 1547 def _fullpullbundle2(repo, pullop):
1542 1548 # The server may send a partial reply, i.e. when inlining
1543 1549 # pre-computed bundles. In that case, update the common
1544 1550 # set based on the results and pull another bundle.
1545 1551 #
1546 1552 # There are two indicators that the process is finished:
1547 1553 # - no changeset has been added, or
1548 1554 # - all remote heads are known locally.
1549 1555 # The head check must use the unfiltered view as obsoletion
1550 1556 # markers can hide heads.
1551 1557 unfi = repo.unfiltered()
1552 1558 unficl = unfi.changelog
1553 1559
1554 1560 def headsofdiff(h1, h2):
1555 1561 """Returns heads(h1 % h2)"""
1556 1562 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1557 1563 return {ctx.node() for ctx in res}
1558 1564
1559 1565 def headsofunion(h1, h2):
1560 1566 """Returns heads((h1 + h2) - null)"""
1561 1567 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1562 1568 return {ctx.node() for ctx in res}
1563 1569
1564 1570 while True:
1565 1571 old_heads = unficl.heads()
1566 1572 clstart = len(unficl)
1567 1573 _pullbundle2(pullop)
1568 1574 if requirements.NARROW_REQUIREMENT in repo.requirements:
1569 1575 # XXX narrow clones filter the heads on the server side during
1570 1576 # XXX getbundle and result in partial replies as well.
1571 1577 # XXX Disable pull bundles in this case as band aid to avoid
1572 1578 # XXX extra round trips.
1573 1579 break
1574 1580 if clstart == len(unficl):
1575 1581 break
1576 1582 if all(unficl.hasnode(n) for n in pullop.rheads):
1577 1583 break
1578 1584 new_heads = headsofdiff(unficl.heads(), old_heads)
1579 1585 pullop.common = headsofunion(new_heads, pullop.common)
1580 1586 pullop.rheads = set(pullop.rheads) - pullop.common
1581 1587
1582 1588
1583 1589 def add_confirm_callback(repo, pullop):
1584 1590 """adds a finalize callback to transaction which can be used to show stats
1585 1591 to user and confirm the pull before committing transaction"""
1586 1592
1587 1593 tr = pullop.trmanager.transaction()
1588 1594 scmutil.registersummarycallback(
1589 1595 repo, tr, txnname=b'pull', as_validator=True
1590 1596 )
1591 1597 reporef = weakref.ref(repo.unfiltered())
1592 1598
1593 1599 def prompt(tr):
1594 1600 repo = reporef()
1595 1601 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1596 1602 if repo.ui.promptchoice(cm):
1597 1603 raise error.Abort(b"user aborted")
1598 1604
1599 1605 tr.addvalidator(b'900-pull-prompt', prompt)
1600 1606
1601 1607
1602 1608 def pull(
1603 1609 repo,
1604 1610 remote,
1605 1611 path=None,
1606 1612 heads=None,
1607 1613 force=False,
1608 1614 bookmarks=(),
1609 1615 opargs=None,
1610 1616 streamclonerequested=None,
1611 1617 includepats=None,
1612 1618 excludepats=None,
1613 1619 depth=None,
1614 1620 confirm=None,
1615 1621 ):
1616 1622 """Fetch repository data from a remote.
1617 1623
1618 1624 This is the main function used to retrieve data from a remote repository.
1619 1625
1620 1626 ``repo`` is the local repository to clone into.
1621 1627 ``remote`` is a peer instance.
1622 1628 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1623 1629 default) means to pull everything from the remote.
1624 1630 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1625 1631 default, all remote bookmarks are pulled.
1626 1632 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1627 1633 initialization.
1628 1634 ``streamclonerequested`` is a boolean indicating whether a "streaming
1629 1635 clone" is requested. A "streaming clone" is essentially a raw file copy
1630 1636 of revlogs from the server. This only works when the local repository is
1631 1637 empty. The default value of ``None`` means to respect the server
1632 1638 configuration for preferring stream clones.
1633 1639 ``includepats`` and ``excludepats`` define explicit file patterns to
1634 1640 include and exclude in storage, respectively. If not defined, narrow
1635 1641 patterns from the repo instance are used, if available.
1636 1642 ``depth`` is an integer indicating the DAG depth of history we're
1637 1643 interested in. If defined, for each revision specified in ``heads``, we
1638 1644 will fetch up to this many of its ancestors and data associated with them.
1639 1645 ``confirm`` is a boolean indicating whether the pull should be confirmed
1640 1646 before committing the transaction. This overrides HGPLAIN.
1641 1647
1642 1648 Returns the ``pulloperation`` created for this pull.
1643 1649 """
1644 1650 if opargs is None:
1645 1651 opargs = {}
1646 1652
1647 1653 # We allow the narrow patterns to be passed in explicitly to provide more
1648 1654 # flexibility for API consumers.
1649 1655 if includepats is not None or excludepats is not None:
1650 1656 includepats = includepats or set()
1651 1657 excludepats = excludepats or set()
1652 1658 else:
1653 1659 includepats, excludepats = repo.narrowpats
1654 1660
1655 1661 narrowspec.validatepatterns(includepats)
1656 1662 narrowspec.validatepatterns(excludepats)
1657 1663
1658 1664 pullop = pulloperation(
1659 1665 repo,
1660 1666 remote,
1661 1667 path=path,
1662 1668 heads=heads,
1663 1669 force=force,
1664 1670 bookmarks=bookmarks,
1665 1671 streamclonerequested=streamclonerequested,
1666 1672 includepats=includepats,
1667 1673 excludepats=excludepats,
1668 1674 depth=depth,
1669 1675 **pycompat.strkwargs(opargs)
1670 1676 )
1671 1677
1672 1678 peerlocal = pullop.remote.local()
1673 1679 if peerlocal:
1674 1680 missing = set(peerlocal.requirements) - pullop.repo.supported
1675 1681 if missing:
1676 1682 msg = _(
1677 1683 b"required features are not"
1678 1684 b" supported in the destination:"
1679 1685 b" %s"
1680 1686 ) % (b', '.join(sorted(missing)))
1681 1687 raise error.Abort(msg)
1682 1688
1683 1689 for category in repo._wanted_sidedata:
1684 1690 # Check that a computer is registered for that category for at least
1685 1691 # one revlog kind.
1686 1692 for kind, computers in repo._sidedata_computers.items():
1687 1693 if computers.get(category):
1688 1694 break
1689 1695 else:
1690 1696 # This should never happen since repos are supposed to be able to
1691 1697 # generate the sidedata they require.
1692 1698 raise error.ProgrammingError(
1693 1699 _(
1694 1700 b'sidedata category requested by local side without local'
1695 1701 b"support: '%s'"
1696 1702 )
1697 1703 % pycompat.bytestr(category)
1698 1704 )
1699 1705
1700 1706 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1701 1707 wlock = util.nullcontextmanager
1702 1708 if not bookmod.bookmarksinstore(repo):
1703 1709 wlock = repo.wlock
1704 1710 with wlock(), repo.lock(), pullop.trmanager:
1705 1711 if confirm or (
1706 1712 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1707 1713 ):
1708 1714 add_confirm_callback(repo, pullop)
1709 1715
1710 1716 # This should ideally be in _pullbundle2(). However, it needs to run
1711 1717 # before discovery to avoid extra work.
1712 1718 _maybeapplyclonebundle(pullop)
1713 1719 streamclone.maybeperformlegacystreamclone(pullop)
1714 1720 _pulldiscovery(pullop)
1715 1721 if pullop.canusebundle2:
1716 1722 _fullpullbundle2(repo, pullop)
1717 1723 _pullchangeset(pullop)
1718 1724 _pullphase(pullop)
1719 1725 _pullbookmarks(pullop)
1720 1726 _pullobsolete(pullop)
1721 1727
1722 1728 # storing remotenames
1723 1729 if repo.ui.configbool(b'experimental', b'remotenames'):
1724 1730 logexchange.pullremotenames(repo, remote)
1725 1731
1726 1732 return pullop
1727 1733
1728 1734
1729 1735 # list of steps to perform discovery before pull
1730 1736 pulldiscoveryorder = []
1731 1737
1732 1738 # Mapping between step name and function
1733 1739 #
1734 1740 # This exists to help extensions wrap steps if necessary
1735 1741 pulldiscoverymapping = {}
1736 1742
1737 1743
1738 1744 def pulldiscovery(stepname):
1739 1745 """decorator for function performing discovery before pull
1740 1746
1741 1747 The function is added to the step -> function mapping and appended to the
1742 1748 list of steps. Beware that decorated function will be added in order (this
1743 1749 may matter).
1744 1750
1745 1751 You can only use this decorator for a new step, if you want to wrap a step
1746 1752 from an extension, change the pulldiscovery dictionary directly."""
1747 1753
1748 1754 def dec(func):
1749 1755 assert stepname not in pulldiscoverymapping
1750 1756 pulldiscoverymapping[stepname] = func
1751 1757 pulldiscoveryorder.append(stepname)
1752 1758 return func
1753 1759
1754 1760 return dec
1755 1761
1756 1762
1757 1763 def _pulldiscovery(pullop):
1758 1764 """Run all discovery steps"""
1759 1765 for stepname in pulldiscoveryorder:
1760 1766 step = pulldiscoverymapping[stepname]
1761 1767 step(pullop)
1762 1768
1763 1769
1764 1770 @pulldiscovery(b'b1:bookmarks')
1765 1771 def _pullbookmarkbundle1(pullop):
1766 1772 """fetch bookmark data in bundle1 case
1767 1773
1768 1774 If not using bundle2, we have to fetch bookmarks before changeset
1769 1775 discovery to reduce the chance and impact of race conditions."""
1770 1776 if pullop.remotebookmarks is not None:
1771 1777 return
1772 1778 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1773 1779 # all known bundle2 servers now support listkeys, but lets be nice with
1774 1780 # new implementation.
1775 1781 return
1776 1782 books = listkeys(pullop.remote, b'bookmarks')
1777 1783 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1778 1784
1779 1785
1780 1786 @pulldiscovery(b'changegroup')
1781 1787 def _pulldiscoverychangegroup(pullop):
1782 1788 """discovery phase for the pull
1783 1789
1784 1790 Current handle changeset discovery only, will change handle all discovery
1785 1791 at some point."""
1786 1792 tmp = discovery.findcommonincoming(
1787 1793 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1788 1794 )
1789 1795 common, fetch, rheads = tmp
1790 1796 has_node = pullop.repo.unfiltered().changelog.index.has_node
1791 1797 if fetch and rheads:
1792 1798 # If a remote heads is filtered locally, put in back in common.
1793 1799 #
1794 1800 # This is a hackish solution to catch most of "common but locally
1795 1801 # hidden situation". We do not performs discovery on unfiltered
1796 1802 # repository because it end up doing a pathological amount of round
1797 1803 # trip for w huge amount of changeset we do not care about.
1798 1804 #
1799 1805 # If a set of such "common but filtered" changeset exist on the server
1800 1806 # but are not including a remote heads, we'll not be able to detect it,
1801 1807 scommon = set(common)
1802 1808 for n in rheads:
1803 1809 if has_node(n):
1804 1810 if n not in scommon:
1805 1811 common.append(n)
1806 1812 if set(rheads).issubset(set(common)):
1807 1813 fetch = []
1808 1814 pullop.common = common
1809 1815 pullop.fetch = fetch
1810 1816 pullop.rheads = rheads
1811 1817
1812 1818
1813 1819 def _pullbundle2(pullop):
1814 1820 """pull data using bundle2
1815 1821
1816 1822 For now, the only supported data are changegroup."""
1817 1823 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1818 1824
1819 1825 # make ui easier to access
1820 1826 ui = pullop.repo.ui
1821 1827
1822 1828 # At the moment we don't do stream clones over bundle2. If that is
1823 1829 # implemented then here's where the check for that will go.
1824 1830 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1825 1831
1826 1832 # declare pull perimeters
1827 1833 kwargs[b'common'] = pullop.common
1828 1834 kwargs[b'heads'] = pullop.heads or pullop.rheads
1829 1835
1830 1836 # check server supports narrow and then adding includepats and excludepats
1831 1837 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1832 1838 if servernarrow and pullop.includepats:
1833 1839 kwargs[b'includepats'] = pullop.includepats
1834 1840 if servernarrow and pullop.excludepats:
1835 1841 kwargs[b'excludepats'] = pullop.excludepats
1836 1842
1837 1843 if streaming:
1838 1844 kwargs[b'cg'] = False
1839 1845 kwargs[b'stream'] = True
1840 1846 pullop.stepsdone.add(b'changegroup')
1841 1847 pullop.stepsdone.add(b'phases')
1842 1848
1843 1849 else:
1844 1850 # pulling changegroup
1845 1851 pullop.stepsdone.add(b'changegroup')
1846 1852
1847 1853 kwargs[b'cg'] = pullop.fetch
1848 1854
1849 1855 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1850 1856 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1851 1857 if not legacyphase and hasbinaryphase:
1852 1858 kwargs[b'phases'] = True
1853 1859 pullop.stepsdone.add(b'phases')
1854 1860
1855 1861 if b'listkeys' in pullop.remotebundle2caps:
1856 1862 if b'phases' not in pullop.stepsdone:
1857 1863 kwargs[b'listkeys'] = [b'phases']
1858 1864
1859 1865 bookmarksrequested = False
1860 1866 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1861 1867 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1862 1868
1863 1869 if pullop.remotebookmarks is not None:
1864 1870 pullop.stepsdone.add(b'request-bookmarks')
1865 1871
1866 1872 if (
1867 1873 b'request-bookmarks' not in pullop.stepsdone
1868 1874 and pullop.remotebookmarks is None
1869 1875 and not legacybookmark
1870 1876 and hasbinarybook
1871 1877 ):
1872 1878 kwargs[b'bookmarks'] = True
1873 1879 bookmarksrequested = True
1874 1880
1875 1881 if b'listkeys' in pullop.remotebundle2caps:
1876 1882 if b'request-bookmarks' not in pullop.stepsdone:
1877 1883 # make sure to always includes bookmark data when migrating
1878 1884 # `hg incoming --bundle` to using this function.
1879 1885 pullop.stepsdone.add(b'request-bookmarks')
1880 1886 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1881 1887
1882 1888 # If this is a full pull / clone and the server supports the clone bundles
1883 1889 # feature, tell the server whether we attempted a clone bundle. The
1884 1890 # presence of this flag indicates the client supports clone bundles. This
1885 1891 # will enable the server to treat clients that support clone bundles
1886 1892 # differently from those that don't.
1887 1893 if (
1888 1894 pullop.remote.capable(b'clonebundles')
1889 1895 and pullop.heads is None
1890 1896 and list(pullop.common) == [pullop.repo.nullid]
1891 1897 ):
1892 1898 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1893 1899
1894 1900 if streaming:
1895 1901 pullop.repo.ui.status(_(b'streaming all changes\n'))
1896 1902 elif not pullop.fetch:
1897 1903 pullop.repo.ui.status(_(b"no changes found\n"))
1898 1904 pullop.cgresult = 0
1899 1905 else:
1900 1906 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1901 1907 pullop.repo.ui.status(_(b"requesting all changes\n"))
1902 1908 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1903 1909 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1904 1910 if obsolete.commonversion(remoteversions) is not None:
1905 1911 kwargs[b'obsmarkers'] = True
1906 1912 pullop.stepsdone.add(b'obsmarkers')
1907 1913 _pullbundle2extraprepare(pullop, kwargs)
1908 1914
1909 1915 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1910 1916 if remote_sidedata:
1911 1917 kwargs[b'remote_sidedata'] = remote_sidedata
1912 1918
1913 1919 with pullop.remote.commandexecutor() as e:
1914 1920 args = dict(kwargs)
1915 1921 args[b'source'] = b'pull'
1916 1922 bundle = e.callcommand(b'getbundle', args).result()
1917 1923
1918 1924 try:
1919 1925 op = bundle2.bundleoperation(
1920 1926 pullop.repo,
1921 1927 pullop.gettransaction,
1922 1928 source=b'pull',
1923 1929 remote=pullop.remote,
1924 1930 )
1925 1931 op.modes[b'bookmarks'] = b'records'
1926 1932 bundle2.processbundle(
1927 1933 pullop.repo,
1928 1934 bundle,
1929 1935 op=op,
1930 1936 remote=pullop.remote,
1931 1937 )
1932 1938 except bundle2.AbortFromPart as exc:
1933 1939 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1934 1940 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1935 1941 except error.BundleValueError as exc:
1936 1942 raise error.RemoteError(_(b'missing support for %s') % exc)
1937 1943
1938 1944 if pullop.fetch:
1939 1945 pullop.cgresult = bundle2.combinechangegroupresults(op)
1940 1946
1941 1947 # processing phases change
1942 1948 for namespace, value in op.records[b'listkeys']:
1943 1949 if namespace == b'phases':
1944 1950 _pullapplyphases(pullop, value)
1945 1951
1946 1952 # processing bookmark update
1947 1953 if bookmarksrequested:
1948 1954 books = {}
1949 1955 for record in op.records[b'bookmarks']:
1950 1956 books[record[b'bookmark']] = record[b"node"]
1951 1957 pullop.remotebookmarks = books
1952 1958 else:
1953 1959 for namespace, value in op.records[b'listkeys']:
1954 1960 if namespace == b'bookmarks':
1955 1961 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1956 1962
1957 1963 # bookmark data were either already there or pulled in the bundle
1958 1964 if pullop.remotebookmarks is not None:
1959 1965 _pullbookmarks(pullop)
1960 1966
1961 1967
1962 1968 def _pullbundle2extraprepare(pullop, kwargs):
1963 1969 """hook function so that extensions can extend the getbundle call"""
1964 1970
1965 1971
1966 1972 def _pullchangeset(pullop):
1967 1973 """pull changeset from unbundle into the local repo"""
1968 1974 # We delay the open of the transaction as late as possible so we
1969 1975 # don't open transaction for nothing or you break future useful
1970 1976 # rollback call
1971 1977 if b'changegroup' in pullop.stepsdone:
1972 1978 return
1973 1979 pullop.stepsdone.add(b'changegroup')
1974 1980 if not pullop.fetch:
1975 1981 pullop.repo.ui.status(_(b"no changes found\n"))
1976 1982 pullop.cgresult = 0
1977 1983 return
1978 1984 tr = pullop.gettransaction()
1979 1985 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1980 1986 pullop.repo.ui.status(_(b"requesting all changes\n"))
1981 1987 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1982 1988 # issue1320, avoid a race if remote changed after discovery
1983 1989 pullop.heads = pullop.rheads
1984 1990
1985 1991 if pullop.remote.capable(b'getbundle'):
1986 1992 # TODO: get bundlecaps from remote
1987 1993 cg = pullop.remote.getbundle(
1988 1994 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1989 1995 )
1990 1996 elif pullop.heads is None:
1991 1997 with pullop.remote.commandexecutor() as e:
1992 1998 cg = e.callcommand(
1993 1999 b'changegroup',
1994 2000 {
1995 2001 b'nodes': pullop.fetch,
1996 2002 b'source': b'pull',
1997 2003 },
1998 2004 ).result()
1999 2005
2000 2006 elif not pullop.remote.capable(b'changegroupsubset'):
2001 2007 raise error.Abort(
2002 2008 _(
2003 2009 b"partial pull cannot be done because "
2004 2010 b"other repository doesn't support "
2005 2011 b"changegroupsubset."
2006 2012 )
2007 2013 )
2008 2014 else:
2009 2015 with pullop.remote.commandexecutor() as e:
2010 2016 cg = e.callcommand(
2011 2017 b'changegroupsubset',
2012 2018 {
2013 2019 b'bases': pullop.fetch,
2014 2020 b'heads': pullop.heads,
2015 2021 b'source': b'pull',
2016 2022 },
2017 2023 ).result()
2018 2024
2019 2025 bundleop = bundle2.applybundle(
2020 2026 pullop.repo,
2021 2027 cg,
2022 2028 tr,
2023 2029 b'pull',
2024 2030 pullop.remote.url(),
2025 2031 remote=pullop.remote,
2026 2032 )
2027 2033 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2028 2034
2029 2035
2030 2036 def _pullphase(pullop):
2031 2037 # Get remote phases data from remote
2032 2038 if b'phases' in pullop.stepsdone:
2033 2039 return
2034 2040 remotephases = listkeys(pullop.remote, b'phases')
2035 2041 _pullapplyphases(pullop, remotephases)
2036 2042
2037 2043
2038 2044 def _pullapplyphases(pullop, remotephases):
2039 2045 """apply phase movement from observed remote state"""
2040 2046 if b'phases' in pullop.stepsdone:
2041 2047 return
2042 2048 pullop.stepsdone.add(b'phases')
2043 2049 publishing = bool(remotephases.get(b'publishing', False))
2044 2050 if remotephases and not publishing:
2045 2051 # remote is new and non-publishing
2046 2052 pheads, _dr = phases.analyzeremotephases(
2047 2053 pullop.repo, pullop.pulledsubset, remotephases
2048 2054 )
2049 2055 dheads = pullop.pulledsubset
2050 2056 else:
2051 2057 # Remote is old or publishing all common changesets
2052 2058 # should be seen as public
2053 2059 pheads = pullop.pulledsubset
2054 2060 dheads = []
2055 2061 unfi = pullop.repo.unfiltered()
2056 2062 phase = unfi._phasecache.phase
2057 2063 rev = unfi.changelog.index.get_rev
2058 2064 public = phases.public
2059 2065 draft = phases.draft
2060 2066
2061 2067 # exclude changesets already public locally and update the others
2062 2068 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2063 2069 if pheads:
2064 2070 tr = pullop.gettransaction()
2065 2071 phases.advanceboundary(pullop.repo, tr, public, pheads)
2066 2072
2067 2073 # exclude changesets already draft locally and update the others
2068 2074 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2069 2075 if dheads:
2070 2076 tr = pullop.gettransaction()
2071 2077 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2072 2078
2073 2079
2074 2080 def _pullbookmarks(pullop):
2075 2081 """process the remote bookmark information to update the local one"""
2076 2082 if b'bookmarks' in pullop.stepsdone:
2077 2083 return
2078 2084 pullop.stepsdone.add(b'bookmarks')
2079 2085 repo = pullop.repo
2080 2086 remotebookmarks = pullop.remotebookmarks
2081 2087 bookmarks_mode = None
2082 2088 if pullop.remote_path is not None:
2083 2089 bookmarks_mode = pullop.remote_path.bookmarks_mode
2084 2090 bookmod.updatefromremote(
2085 2091 repo.ui,
2086 2092 repo,
2087 2093 remotebookmarks,
2088 2094 pullop.remote.url(),
2089 2095 pullop.gettransaction,
2090 2096 explicit=pullop.explicitbookmarks,
2091 2097 mode=bookmarks_mode,
2092 2098 )
2093 2099
2094 2100
2095 2101 def _pullobsolete(pullop):
2096 2102 """utility function to pull obsolete markers from a remote
2097 2103
2098 2104 The `gettransaction` is function that return the pull transaction, creating
2099 2105 one if necessary. We return the transaction to inform the calling code that
2100 2106 a new transaction have been created (when applicable).
2101 2107
2102 2108 Exists mostly to allow overriding for experimentation purpose"""
2103 2109 if b'obsmarkers' in pullop.stepsdone:
2104 2110 return
2105 2111 pullop.stepsdone.add(b'obsmarkers')
2106 2112 tr = None
2107 2113 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2108 2114 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2109 2115 remoteobs = listkeys(pullop.remote, b'obsolete')
2110 2116 if b'dump0' in remoteobs:
2111 2117 tr = pullop.gettransaction()
2112 2118 markers = []
2113 2119 for key in sorted(remoteobs, reverse=True):
2114 2120 if key.startswith(b'dump'):
2115 2121 data = util.b85decode(remoteobs[key])
2116 2122 version, newmarks = obsolete._readmarkers(data)
2117 2123 markers += newmarks
2118 2124 if markers:
2119 2125 pullop.repo.obsstore.add(tr, markers)
2120 2126 pullop.repo.invalidatevolatilesets()
2121 2127 return tr
2122 2128
2123 2129
2124 2130 def applynarrowacl(repo, kwargs):
2125 2131 """Apply narrow fetch access control.
2126 2132
2127 2133 This massages the named arguments for getbundle wire protocol commands
2128 2134 so requested data is filtered through access control rules.
2129 2135 """
2130 2136 ui = repo.ui
2131 2137 # TODO this assumes existence of HTTP and is a layering violation.
2132 2138 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2133 2139 user_includes = ui.configlist(
2134 2140 _NARROWACL_SECTION,
2135 2141 username + b'.includes',
2136 2142 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2137 2143 )
2138 2144 user_excludes = ui.configlist(
2139 2145 _NARROWACL_SECTION,
2140 2146 username + b'.excludes',
2141 2147 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2142 2148 )
2143 2149 if not user_includes:
2144 2150 raise error.Abort(
2145 2151 _(b"%s configuration for user %s is empty")
2146 2152 % (_NARROWACL_SECTION, username)
2147 2153 )
2148 2154
2149 2155 user_includes = [
2150 2156 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2151 2157 ]
2152 2158 user_excludes = [
2153 2159 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2154 2160 ]
2155 2161
2156 2162 req_includes = set(kwargs.get('includepats', []))
2157 2163 req_excludes = set(kwargs.get('excludepats', []))
2158 2164
2159 2165 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2160 2166 req_includes, req_excludes, user_includes, user_excludes
2161 2167 )
2162 2168
2163 2169 if invalid_includes:
2164 2170 raise error.Abort(
2165 2171 _(b"The following includes are not accessible for %s: %s")
2166 2172 % (username, stringutil.pprint(invalid_includes))
2167 2173 )
2168 2174
2169 2175 new_args = {}
2170 2176 new_args.update(kwargs)
2171 2177 new_args['narrow'] = True
2172 2178 new_args['narrow_acl'] = True
2173 2179 new_args['includepats'] = req_includes
2174 2180 if req_excludes:
2175 2181 new_args['excludepats'] = req_excludes
2176 2182
2177 2183 return new_args
2178 2184
2179 2185
2180 2186 def _computeellipsis(repo, common, heads, known, match, depth=None):
2181 2187 """Compute the shape of a narrowed DAG.
2182 2188
2183 2189 Args:
2184 2190 repo: The repository we're transferring.
2185 2191 common: The roots of the DAG range we're transferring.
2186 2192 May be just [nullid], which means all ancestors of heads.
2187 2193 heads: The heads of the DAG range we're transferring.
2188 2194 match: The narrowmatcher that allows us to identify relevant changes.
2189 2195 depth: If not None, only consider nodes to be full nodes if they are at
2190 2196 most depth changesets away from one of heads.
2191 2197
2192 2198 Returns:
2193 2199 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2194 2200
2195 2201 visitnodes: The list of nodes (either full or ellipsis) which
2196 2202 need to be sent to the client.
2197 2203 relevant_nodes: The set of changelog nodes which change a file inside
2198 2204 the narrowspec. The client needs these as non-ellipsis nodes.
2199 2205 ellipsisroots: A dict of {rev: parents} that is used in
2200 2206 narrowchangegroup to produce ellipsis nodes with the
2201 2207 correct parents.
2202 2208 """
2203 2209 cl = repo.changelog
2204 2210 mfl = repo.manifestlog
2205 2211
2206 2212 clrev = cl.rev
2207 2213
2208 2214 commonrevs = {clrev(n) for n in common} | {nullrev}
2209 2215 headsrevs = {clrev(n) for n in heads}
2210 2216
2211 2217 if depth:
2212 2218 revdepth = {h: 0 for h in headsrevs}
2213 2219
2214 2220 ellipsisheads = collections.defaultdict(set)
2215 2221 ellipsisroots = collections.defaultdict(set)
2216 2222
2217 2223 def addroot(head, curchange):
2218 2224 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2219 2225 ellipsisroots[head].add(curchange)
2220 2226 # Recursively split ellipsis heads with 3 roots by finding the
2221 2227 # roots' youngest common descendant which is an elided merge commit.
2222 2228 # That descendant takes 2 of the 3 roots as its own, and becomes a
2223 2229 # root of the head.
2224 2230 while len(ellipsisroots[head]) > 2:
2225 2231 child, roots = splithead(head)
2226 2232 splitroots(head, child, roots)
2227 2233 head = child # Recurse in case we just added a 3rd root
2228 2234
2229 2235 def splitroots(head, child, roots):
2230 2236 ellipsisroots[head].difference_update(roots)
2231 2237 ellipsisroots[head].add(child)
2232 2238 ellipsisroots[child].update(roots)
2233 2239 ellipsisroots[child].discard(child)
2234 2240
2235 2241 def splithead(head):
2236 2242 r1, r2, r3 = sorted(ellipsisroots[head])
2237 2243 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2238 2244 mid = repo.revs(
2239 2245 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2240 2246 )
2241 2247 for j in mid:
2242 2248 if j == nr2:
2243 2249 return nr2, (nr1, nr2)
2244 2250 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2245 2251 return j, (nr1, nr2)
2246 2252 raise error.Abort(
2247 2253 _(
2248 2254 b'Failed to split up ellipsis node! head: %d, '
2249 2255 b'roots: %d %d %d'
2250 2256 )
2251 2257 % (head, r1, r2, r3)
2252 2258 )
2253 2259
2254 2260 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2255 2261 visit = reversed(missing)
2256 2262 relevant_nodes = set()
2257 2263 visitnodes = [cl.node(m) for m in missing]
2258 2264 required = set(headsrevs) | known
2259 2265 for rev in visit:
2260 2266 clrev = cl.changelogrevision(rev)
2261 2267 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2262 2268 if depth is not None:
2263 2269 curdepth = revdepth[rev]
2264 2270 for p in ps:
2265 2271 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2266 2272 needed = False
2267 2273 shallow_enough = depth is None or revdepth[rev] <= depth
2268 2274 if shallow_enough:
2269 2275 curmf = mfl[clrev.manifest].read()
2270 2276 if ps:
2271 2277 # We choose to not trust the changed files list in
2272 2278 # changesets because it's not always correct. TODO: could
2273 2279 # we trust it for the non-merge case?
2274 2280 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2275 2281 needed = bool(curmf.diff(p1mf, match))
2276 2282 if not needed and len(ps) > 1:
2277 2283 # For merge changes, the list of changed files is not
2278 2284 # helpful, since we need to emit the merge if a file
2279 2285 # in the narrow spec has changed on either side of the
2280 2286 # merge. As a result, we do a manifest diff to check.
2281 2287 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2282 2288 needed = bool(curmf.diff(p2mf, match))
2283 2289 else:
2284 2290 # For a root node, we need to include the node if any
2285 2291 # files in the node match the narrowspec.
2286 2292 needed = any(curmf.walk(match))
2287 2293
2288 2294 if needed:
2289 2295 for head in ellipsisheads[rev]:
2290 2296 addroot(head, rev)
2291 2297 for p in ps:
2292 2298 required.add(p)
2293 2299 relevant_nodes.add(cl.node(rev))
2294 2300 else:
2295 2301 if not ps:
2296 2302 ps = [nullrev]
2297 2303 if rev in required:
2298 2304 for head in ellipsisheads[rev]:
2299 2305 addroot(head, rev)
2300 2306 for p in ps:
2301 2307 ellipsisheads[p].add(rev)
2302 2308 else:
2303 2309 for p in ps:
2304 2310 ellipsisheads[p] |= ellipsisheads[rev]
2305 2311
2306 2312 # add common changesets as roots of their reachable ellipsis heads
2307 2313 for c in commonrevs:
2308 2314 for head in ellipsisheads[c]:
2309 2315 addroot(head, c)
2310 2316 return visitnodes, relevant_nodes, ellipsisroots
2311 2317
2312 2318
2313 2319 def caps20to10(repo, role):
2314 2320 """return a set with appropriate options to use bundle20 during getbundle"""
2315 2321 caps = {b'HG20'}
2316 2322 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2317 2323 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2318 2324 return caps
2319 2325
2320 2326
2321 2327 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2322 2328 getbundle2partsorder = []
2323 2329
2324 2330 # Mapping between step name and function
2325 2331 #
2326 2332 # This exists to help extensions wrap steps if necessary
2327 2333 getbundle2partsmapping = {}
2328 2334
2329 2335
2330 2336 def getbundle2partsgenerator(stepname, idx=None):
2331 2337 """decorator for function generating bundle2 part for getbundle
2332 2338
2333 2339 The function is added to the step -> function mapping and appended to the
2334 2340 list of steps. Beware that decorated functions will be added in order
2335 2341 (this may matter).
2336 2342
2337 2343 You can only use this decorator for new steps, if you want to wrap a step
2338 2344 from an extension, attack the getbundle2partsmapping dictionary directly."""
2339 2345
2340 2346 def dec(func):
2341 2347 assert stepname not in getbundle2partsmapping
2342 2348 getbundle2partsmapping[stepname] = func
2343 2349 if idx is None:
2344 2350 getbundle2partsorder.append(stepname)
2345 2351 else:
2346 2352 getbundle2partsorder.insert(idx, stepname)
2347 2353 return func
2348 2354
2349 2355 return dec
2350 2356
2351 2357
2352 2358 def bundle2requested(bundlecaps):
2353 2359 if bundlecaps is not None:
2354 2360 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2355 2361 return False
2356 2362
2357 2363
2358 2364 def getbundlechunks(
2359 2365 repo,
2360 2366 source,
2361 2367 heads=None,
2362 2368 common=None,
2363 2369 bundlecaps=None,
2364 2370 remote_sidedata=None,
2365 2371 **kwargs
2366 2372 ):
2367 2373 """Return chunks constituting a bundle's raw data.
2368 2374
2369 2375 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2370 2376 passed.
2371 2377
2372 2378 Returns a 2-tuple of a dict with metadata about the generated bundle
2373 2379 and an iterator over raw chunks (of varying sizes).
2374 2380 """
2375 2381 kwargs = pycompat.byteskwargs(kwargs)
2376 2382 info = {}
2377 2383 usebundle2 = bundle2requested(bundlecaps)
2378 2384 # bundle10 case
2379 2385 if not usebundle2:
2380 2386 if bundlecaps and not kwargs.get(b'cg', True):
2381 2387 raise ValueError(
2382 2388 _(b'request for bundle10 must include changegroup')
2383 2389 )
2384 2390
2385 2391 if kwargs:
2386 2392 raise ValueError(
2387 2393 _(b'unsupported getbundle arguments: %s')
2388 2394 % b', '.join(sorted(kwargs.keys()))
2389 2395 )
2390 2396 outgoing = _computeoutgoing(repo, heads, common)
2391 2397 info[b'bundleversion'] = 1
2392 2398 return (
2393 2399 info,
2394 2400 changegroup.makestream(
2395 2401 repo,
2396 2402 outgoing,
2397 2403 b'01',
2398 2404 source,
2399 2405 bundlecaps=bundlecaps,
2400 2406 remote_sidedata=remote_sidedata,
2401 2407 ),
2402 2408 )
2403 2409
2404 2410 # bundle20 case
2405 2411 info[b'bundleversion'] = 2
2406 2412 b2caps = {}
2407 2413 for bcaps in bundlecaps:
2408 2414 if bcaps.startswith(b'bundle2='):
2409 2415 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2410 2416 b2caps.update(bundle2.decodecaps(blob))
2411 2417 bundler = bundle2.bundle20(repo.ui, b2caps)
2412 2418
2413 2419 kwargs[b'heads'] = heads
2414 2420 kwargs[b'common'] = common
2415 2421
2416 2422 for name in getbundle2partsorder:
2417 2423 func = getbundle2partsmapping[name]
2418 2424 func(
2419 2425 bundler,
2420 2426 repo,
2421 2427 source,
2422 2428 bundlecaps=bundlecaps,
2423 2429 b2caps=b2caps,
2424 2430 remote_sidedata=remote_sidedata,
2425 2431 **pycompat.strkwargs(kwargs)
2426 2432 )
2427 2433
2428 2434 info[b'prefercompressed'] = bundler.prefercompressed
2429 2435
2430 2436 return info, bundler.getchunks()
2431 2437
2432 2438
2433 2439 @getbundle2partsgenerator(b'stream')
2434 2440 def _getbundlestream2(bundler, repo, *args, **kwargs):
2435 2441 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2436 2442
2437 2443
2438 2444 @getbundle2partsgenerator(b'changegroup')
2439 2445 def _getbundlechangegrouppart(
2440 2446 bundler,
2441 2447 repo,
2442 2448 source,
2443 2449 bundlecaps=None,
2444 2450 b2caps=None,
2445 2451 heads=None,
2446 2452 common=None,
2447 2453 remote_sidedata=None,
2448 2454 **kwargs
2449 2455 ):
2450 2456 """add a changegroup part to the requested bundle"""
2451 2457 if not kwargs.get('cg', True) or not b2caps:
2452 2458 return
2453 2459
2454 2460 version = b'01'
2455 2461 cgversions = b2caps.get(b'changegroup')
2456 2462 if cgversions: # 3.1 and 3.2 ship with an empty value
2457 2463 cgversions = [
2458 2464 v
2459 2465 for v in cgversions
2460 2466 if v in changegroup.supportedoutgoingversions(repo)
2461 2467 ]
2462 2468 if not cgversions:
2463 2469 raise error.Abort(_(b'no common changegroup version'))
2464 2470 version = max(cgversions)
2465 2471
2466 2472 outgoing = _computeoutgoing(repo, heads, common)
2467 2473 if not outgoing.missing:
2468 2474 return
2469 2475
2470 2476 if kwargs.get('narrow', False):
2471 2477 include = sorted(filter(bool, kwargs.get('includepats', [])))
2472 2478 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2473 2479 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2474 2480 else:
2475 2481 matcher = None
2476 2482
2477 2483 cgstream = changegroup.makestream(
2478 2484 repo,
2479 2485 outgoing,
2480 2486 version,
2481 2487 source,
2482 2488 bundlecaps=bundlecaps,
2483 2489 matcher=matcher,
2484 2490 remote_sidedata=remote_sidedata,
2485 2491 )
2486 2492
2487 2493 part = bundler.newpart(b'changegroup', data=cgstream)
2488 2494 if cgversions:
2489 2495 part.addparam(b'version', version)
2490 2496
2491 2497 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2492 2498
2493 2499 if scmutil.istreemanifest(repo):
2494 2500 part.addparam(b'treemanifest', b'1')
2495 2501
2496 2502 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2497 2503 part.addparam(b'exp-sidedata', b'1')
2498 2504 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2499 2505 part.addparam(b'exp-wanted-sidedata', sidedata)
2500 2506
2501 2507 if (
2502 2508 kwargs.get('narrow', False)
2503 2509 and kwargs.get('narrow_acl', False)
2504 2510 and (include or exclude)
2505 2511 ):
2506 2512 # this is mandatory because otherwise ACL clients won't work
2507 2513 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2508 2514 narrowspecpart.data = b'%s\0%s' % (
2509 2515 b'\n'.join(include),
2510 2516 b'\n'.join(exclude),
2511 2517 )
2512 2518
2513 2519
2514 2520 @getbundle2partsgenerator(b'bookmarks')
2515 2521 def _getbundlebookmarkpart(
2516 2522 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2517 2523 ):
2518 2524 """add a bookmark part to the requested bundle"""
2519 2525 if not kwargs.get('bookmarks', False):
2520 2526 return
2521 2527 if not b2caps or b'bookmarks' not in b2caps:
2522 2528 raise error.Abort(_(b'no common bookmarks exchange method'))
2523 2529 books = bookmod.listbinbookmarks(repo)
2524 2530 data = bookmod.binaryencode(repo, books)
2525 2531 if data:
2526 2532 bundler.newpart(b'bookmarks', data=data)
2527 2533
2528 2534
2529 2535 @getbundle2partsgenerator(b'listkeys')
2530 2536 def _getbundlelistkeysparts(
2531 2537 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2532 2538 ):
2533 2539 """add parts containing listkeys namespaces to the requested bundle"""
2534 2540 listkeys = kwargs.get('listkeys', ())
2535 2541 for namespace in listkeys:
2536 2542 part = bundler.newpart(b'listkeys')
2537 2543 part.addparam(b'namespace', namespace)
2538 2544 keys = repo.listkeys(namespace).items()
2539 2545 part.data = pushkey.encodekeys(keys)
2540 2546
2541 2547
2542 2548 @getbundle2partsgenerator(b'obsmarkers')
2543 2549 def _getbundleobsmarkerpart(
2544 2550 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2545 2551 ):
2546 2552 """add an obsolescence markers part to the requested bundle"""
2547 2553 if kwargs.get('obsmarkers', False):
2548 2554 if heads is None:
2549 2555 heads = repo.heads()
2550 2556 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2551 2557 markers = repo.obsstore.relevantmarkers(subset)
2552 2558 markers = obsutil.sortedmarkers(markers)
2553 2559 bundle2.buildobsmarkerspart(bundler, markers)
2554 2560
2555 2561
2556 2562 @getbundle2partsgenerator(b'phases')
2557 2563 def _getbundlephasespart(
2558 2564 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2559 2565 ):
2560 2566 """add phase heads part to the requested bundle"""
2561 2567 if kwargs.get('phases', False):
2562 2568 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2563 2569 raise error.Abort(_(b'no common phases exchange method'))
2564 2570 if heads is None:
2565 2571 heads = repo.heads()
2566 2572
2567 2573 headsbyphase = collections.defaultdict(set)
2568 2574 if repo.publishing():
2569 2575 headsbyphase[phases.public] = heads
2570 2576 else:
2571 2577 # find the appropriate heads to move
2572 2578
2573 2579 phase = repo._phasecache.phase
2574 2580 node = repo.changelog.node
2575 2581 rev = repo.changelog.rev
2576 2582 for h in heads:
2577 2583 headsbyphase[phase(repo, rev(h))].add(h)
2578 2584 seenphases = list(headsbyphase.keys())
2579 2585
2580 2586 # We do not handle anything but public and draft phase for now)
2581 2587 if seenphases:
2582 2588 assert max(seenphases) <= phases.draft
2583 2589
2584 2590 # if client is pulling non-public changesets, we need to find
2585 2591 # intermediate public heads.
2586 2592 draftheads = headsbyphase.get(phases.draft, set())
2587 2593 if draftheads:
2588 2594 publicheads = headsbyphase.get(phases.public, set())
2589 2595
2590 2596 revset = b'heads(only(%ln, %ln) and public())'
2591 2597 extraheads = repo.revs(revset, draftheads, publicheads)
2592 2598 for r in extraheads:
2593 2599 headsbyphase[phases.public].add(node(r))
2594 2600
2595 2601 # transform data in a format used by the encoding function
2596 2602 phasemapping = {
2597 2603 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2598 2604 }
2599 2605
2600 2606 # generate the actual part
2601 2607 phasedata = phases.binaryencode(phasemapping)
2602 2608 bundler.newpart(b'phase-heads', data=phasedata)
2603 2609
2604 2610
2605 2611 @getbundle2partsgenerator(b'hgtagsfnodes')
2606 2612 def _getbundletagsfnodes(
2607 2613 bundler,
2608 2614 repo,
2609 2615 source,
2610 2616 bundlecaps=None,
2611 2617 b2caps=None,
2612 2618 heads=None,
2613 2619 common=None,
2614 2620 **kwargs
2615 2621 ):
2616 2622 """Transfer the .hgtags filenodes mapping.
2617 2623
2618 2624 Only values for heads in this bundle will be transferred.
2619 2625
2620 2626 The part data consists of pairs of 20 byte changeset node and .hgtags
2621 2627 filenodes raw values.
2622 2628 """
2623 2629 # Don't send unless:
2624 2630 # - changeset are being exchanged,
2625 2631 # - the client supports it.
2626 2632 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2627 2633 return
2628 2634
2629 2635 outgoing = _computeoutgoing(repo, heads, common)
2630 2636 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2631 2637
2632 2638
2633 2639 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2634 2640 def _getbundlerevbranchcache(
2635 2641 bundler,
2636 2642 repo,
2637 2643 source,
2638 2644 bundlecaps=None,
2639 2645 b2caps=None,
2640 2646 heads=None,
2641 2647 common=None,
2642 2648 **kwargs
2643 2649 ):
2644 2650 """Transfer the rev-branch-cache mapping
2645 2651
2646 2652 The payload is a series of data related to each branch
2647 2653
2648 2654 1) branch name length
2649 2655 2) number of open heads
2650 2656 3) number of closed heads
2651 2657 4) open heads nodes
2652 2658 5) closed heads nodes
2653 2659 """
2654 2660 # Don't send unless:
2655 2661 # - changeset are being exchanged,
2656 2662 # - the client supports it.
2657 2663 # - narrow bundle isn't in play (not currently compatible).
2658 2664 if (
2659 2665 not kwargs.get('cg', True)
2660 2666 or not b2caps
2661 2667 or b'rev-branch-cache' not in b2caps
2662 2668 or kwargs.get('narrow', False)
2663 2669 or repo.ui.has_section(_NARROWACL_SECTION)
2664 2670 ):
2665 2671 return
2666 2672
2667 2673 outgoing = _computeoutgoing(repo, heads, common)
2668 2674 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2669 2675
2670 2676
2671 2677 def check_heads(repo, their_heads, context):
2672 2678 """check if the heads of a repo have been modified
2673 2679
2674 2680 Used by peer for unbundling.
2675 2681 """
2676 2682 heads = repo.heads()
2677 2683 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2678 2684 if not (
2679 2685 their_heads == [b'force']
2680 2686 or their_heads == heads
2681 2687 or their_heads == [b'hashed', heads_hash]
2682 2688 ):
2683 2689 # someone else committed/pushed/unbundled while we
2684 2690 # were transferring data
2685 2691 raise error.PushRaced(
2686 2692 b'repository changed while %s - please try again' % context
2687 2693 )
2688 2694
2689 2695
2690 2696 def unbundle(repo, cg, heads, source, url):
2691 2697 """Apply a bundle to a repo.
2692 2698
2693 2699 this function makes sure the repo is locked during the application and have
2694 2700 mechanism to check that no push race occurred between the creation of the
2695 2701 bundle and its application.
2696 2702
2697 2703 If the push was raced as PushRaced exception is raised."""
2698 2704 r = 0
2699 2705 # need a transaction when processing a bundle2 stream
2700 2706 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2701 2707 lockandtr = [None, None, None]
2702 2708 recordout = None
2703 2709 # quick fix for output mismatch with bundle2 in 3.4
2704 2710 captureoutput = repo.ui.configbool(
2705 2711 b'experimental', b'bundle2-output-capture'
2706 2712 )
2707 2713 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2708 2714 captureoutput = True
2709 2715 try:
2710 2716 # note: outside bundle1, 'heads' is expected to be empty and this
2711 2717 # 'check_heads' call wil be a no-op
2712 2718 check_heads(repo, heads, b'uploading changes')
2713 2719 # push can proceed
2714 2720 if not isinstance(cg, bundle2.unbundle20):
2715 2721 # legacy case: bundle1 (changegroup 01)
2716 2722 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2717 2723 with repo.lock(), repo.transaction(txnname) as tr:
2718 2724 op = bundle2.applybundle(repo, cg, tr, source, url)
2719 2725 r = bundle2.combinechangegroupresults(op)
2720 2726 else:
2721 2727 r = None
2722 2728 try:
2723 2729
2724 2730 def gettransaction():
2725 2731 if not lockandtr[2]:
2726 2732 if not bookmod.bookmarksinstore(repo):
2727 2733 lockandtr[0] = repo.wlock()
2728 2734 lockandtr[1] = repo.lock()
2729 2735 lockandtr[2] = repo.transaction(source)
2730 2736 lockandtr[2].hookargs[b'source'] = source
2731 2737 lockandtr[2].hookargs[b'url'] = url
2732 2738 lockandtr[2].hookargs[b'bundle2'] = b'1'
2733 2739 return lockandtr[2]
2734 2740
2735 2741 # Do greedy locking by default until we're satisfied with lazy
2736 2742 # locking.
2737 2743 if not repo.ui.configbool(
2738 2744 b'experimental', b'bundle2lazylocking'
2739 2745 ):
2740 2746 gettransaction()
2741 2747
2742 2748 op = bundle2.bundleoperation(
2743 2749 repo,
2744 2750 gettransaction,
2745 2751 captureoutput=captureoutput,
2746 2752 source=b'push',
2747 2753 )
2748 2754 try:
2749 2755 op = bundle2.processbundle(repo, cg, op=op)
2750 2756 finally:
2751 2757 r = op.reply
2752 2758 if captureoutput and r is not None:
2753 2759 repo.ui.pushbuffer(error=True, subproc=True)
2754 2760
2755 2761 def recordout(output):
2756 2762 r.newpart(b'output', data=output, mandatory=False)
2757 2763
2758 2764 if lockandtr[2] is not None:
2759 2765 lockandtr[2].close()
2760 2766 except BaseException as exc:
2761 2767 exc.duringunbundle2 = True
2762 2768 if captureoutput and r is not None:
2763 2769 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2764 2770
2765 2771 def recordout(output):
2766 2772 part = bundle2.bundlepart(
2767 2773 b'output', data=output, mandatory=False
2768 2774 )
2769 2775 parts.append(part)
2770 2776
2771 2777 raise
2772 2778 finally:
2773 2779 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2774 2780 if recordout is not None:
2775 2781 recordout(repo.ui.popbuffer())
2776 2782 return r
2777 2783
2778 2784
2779 2785 def _maybeapplyclonebundle(pullop):
2780 2786 """Apply a clone bundle from a remote, if possible."""
2781 2787
2782 2788 repo = pullop.repo
2783 2789 remote = pullop.remote
2784 2790
2785 2791 if not repo.ui.configbool(b'ui', b'clonebundles'):
2786 2792 return
2787 2793
2788 2794 # Only run if local repo is empty.
2789 2795 if len(repo):
2790 2796 return
2791 2797
2792 2798 if pullop.heads:
2793 2799 return
2794 2800
2795 2801 if not remote.capable(b'clonebundles'):
2796 2802 return
2797 2803
2798 2804 with remote.commandexecutor() as e:
2799 2805 res = e.callcommand(b'clonebundles', {}).result()
2800 2806
2801 2807 # If we call the wire protocol command, that's good enough to record the
2802 2808 # attempt.
2803 2809 pullop.clonebundleattempted = True
2804 2810
2805 2811 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2806 2812 if not entries:
2807 2813 repo.ui.note(
2808 2814 _(
2809 2815 b'no clone bundles available on remote; '
2810 2816 b'falling back to regular clone\n'
2811 2817 )
2812 2818 )
2813 2819 return
2814 2820
2815 2821 entries = bundlecaches.filterclonebundleentries(
2816 2822 repo, entries, streamclonerequested=pullop.streamclonerequested
2817 2823 )
2818 2824
2819 2825 if not entries:
2820 2826 # There is a thundering herd concern here. However, if a server
2821 2827 # operator doesn't advertise bundles appropriate for its clients,
2822 2828 # they deserve what's coming. Furthermore, from a client's
2823 2829 # perspective, no automatic fallback would mean not being able to
2824 2830 # clone!
2825 2831 repo.ui.warn(
2826 2832 _(
2827 2833 b'no compatible clone bundles available on server; '
2828 2834 b'falling back to regular clone\n'
2829 2835 )
2830 2836 )
2831 2837 repo.ui.warn(
2832 2838 _(b'(you may want to report this to the server operator)\n')
2833 2839 )
2834 2840 return
2835 2841
2836 2842 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2837 2843
2838 2844 url = entries[0][b'URL']
2839 2845 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2840 2846 if trypullbundlefromurl(repo.ui, repo, url, remote):
2841 2847 repo.ui.status(_(b'finished applying clone bundle\n'))
2842 2848 # Bundle failed.
2843 2849 #
2844 2850 # We abort by default to avoid the thundering herd of
2845 2851 # clients flooding a server that was expecting expensive
2846 2852 # clone load to be offloaded.
2847 2853 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2848 2854 repo.ui.warn(_(b'falling back to normal clone\n'))
2849 2855 else:
2850 2856 raise error.Abort(
2851 2857 _(b'error applying bundle'),
2852 2858 hint=_(
2853 2859 b'if this error persists, consider contacting '
2854 2860 b'the server operator or disable clone '
2855 2861 b'bundles via '
2856 2862 b'"--config ui.clonebundles=false"'
2857 2863 ),
2858 2864 )
2859 2865
2860 2866
2861 2867 def inline_clone_bundle_open(ui, url, peer):
2862 2868 if not peer:
2863 2869 raise error.Abort(_(b'no remote repository supplied for %s' % url))
2864 2870 clonebundleid = url[len(bundlecaches.CLONEBUNDLESCHEME) :]
2865 2871 peerclonebundle = peer.get_cached_bundle_inline(clonebundleid)
2866 2872 return util.chunkbuffer(peerclonebundle)
2867 2873
2868 2874
2869 2875 def trypullbundlefromurl(ui, repo, url, peer):
2870 2876 """Attempt to apply a bundle from a URL."""
2871 2877 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2872 2878 try:
2873 2879 if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
2874 2880 fh = inline_clone_bundle_open(ui, url, peer)
2875 2881 else:
2876 2882 fh = urlmod.open(ui, url)
2877 2883 cg = readbundle(ui, fh, b'stream')
2878 2884
2879 2885 if isinstance(cg, streamclone.streamcloneapplier):
2880 2886 cg.apply(repo)
2881 2887 else:
2882 2888 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2883 2889 return True
2884 2890 except urlerr.httperror as e:
2885 2891 ui.warn(
2886 2892 _(b'HTTP error fetching bundle: %s\n')
2887 2893 % stringutil.forcebytestr(e)
2888 2894 )
2889 2895 except urlerr.urlerror as e:
2890 2896 ui.warn(
2891 2897 _(b'error fetching bundle: %s\n')
2892 2898 % stringutil.forcebytestr(e.reason)
2893 2899 )
2894 2900
2895 2901 return False
General Comments 0
You need to be logged in to leave comments. Login now