##// END OF EJS Templates
exchange: move disabling of rev-branch-cache bundle part out of narrow...
Gregory Szorc -
r38825:9b64b73d default
parent child Browse files
Show More
@@ -1,510 +1,497 b''
1 1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import struct
13 13
14 14 from mercurial.i18n import _
15 15 from mercurial.node import (
16 16 bin,
17 17 nullid,
18 18 nullrev,
19 19 )
20 20 from mercurial import (
21 21 bundle2,
22 22 changegroup,
23 23 dagutil,
24 24 error,
25 25 exchange,
26 26 extensions,
27 27 narrowspec,
28 28 repair,
29 29 util,
30 30 wireprototypes,
31 31 )
32 32 from mercurial.utils import (
33 33 stringutil,
34 34 )
35 35
36 36 NARROWCAP = 'narrow'
37 37 _NARROWACL_SECTION = 'narrowhgacl'
38 38 _CHANGESPECPART = NARROWCAP + ':changespec'
39 39 _SPECPART = NARROWCAP + ':spec'
40 40 _SPECPART_INCLUDE = 'include'
41 41 _SPECPART_EXCLUDE = 'exclude'
42 42 _KILLNODESIGNAL = 'KILL'
43 43 _DONESIGNAL = 'DONE'
44 44 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
45 45 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
46 46 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
47 47 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
48 48
49 49 # When advertising capabilities, always include narrow clone support.
50 50 def getrepocaps_narrow(orig, repo, **kwargs):
51 51 caps = orig(repo, **kwargs)
52 52 caps[NARROWCAP] = ['v0']
53 53 return caps
54 54
55 55 def _computeellipsis(repo, common, heads, known, match, depth=None):
56 56 """Compute the shape of a narrowed DAG.
57 57
58 58 Args:
59 59 repo: The repository we're transferring.
60 60 common: The roots of the DAG range we're transferring.
61 61 May be just [nullid], which means all ancestors of heads.
62 62 heads: The heads of the DAG range we're transferring.
63 63 match: The narrowmatcher that allows us to identify relevant changes.
64 64 depth: If not None, only consider nodes to be full nodes if they are at
65 65 most depth changesets away from one of heads.
66 66
67 67 Returns:
68 68 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
69 69
70 70 visitnodes: The list of nodes (either full or ellipsis) which
71 71 need to be sent to the client.
72 72 relevant_nodes: The set of changelog nodes which change a file inside
73 73 the narrowspec. The client needs these as non-ellipsis nodes.
74 74 ellipsisroots: A dict of {rev: parents} that is used in
75 75 narrowchangegroup to produce ellipsis nodes with the
76 76 correct parents.
77 77 """
78 78 cl = repo.changelog
79 79 mfl = repo.manifestlog
80 80
81 81 cldag = dagutil.revlogdag(cl)
82 82 # dagutil does not like nullid/nullrev
83 83 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
84 84 headsrevs = cldag.internalizeall(heads)
85 85 if depth:
86 86 revdepth = {h: 0 for h in headsrevs}
87 87
88 88 ellipsisheads = collections.defaultdict(set)
89 89 ellipsisroots = collections.defaultdict(set)
90 90
91 91 def addroot(head, curchange):
92 92 """Add a root to an ellipsis head, splitting heads with 3 roots."""
93 93 ellipsisroots[head].add(curchange)
94 94 # Recursively split ellipsis heads with 3 roots by finding the
95 95 # roots' youngest common descendant which is an elided merge commit.
96 96 # That descendant takes 2 of the 3 roots as its own, and becomes a
97 97 # root of the head.
98 98 while len(ellipsisroots[head]) > 2:
99 99 child, roots = splithead(head)
100 100 splitroots(head, child, roots)
101 101 head = child # Recurse in case we just added a 3rd root
102 102
103 103 def splitroots(head, child, roots):
104 104 ellipsisroots[head].difference_update(roots)
105 105 ellipsisroots[head].add(child)
106 106 ellipsisroots[child].update(roots)
107 107 ellipsisroots[child].discard(child)
108 108
109 109 def splithead(head):
110 110 r1, r2, r3 = sorted(ellipsisroots[head])
111 111 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
112 112 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
113 113 nr1, head, nr2, head)
114 114 for j in mid:
115 115 if j == nr2:
116 116 return nr2, (nr1, nr2)
117 117 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
118 118 return j, (nr1, nr2)
119 119 raise error.Abort('Failed to split up ellipsis node! head: %d, '
120 120 'roots: %d %d %d' % (head, r1, r2, r3))
121 121
122 122 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
123 123 visit = reversed(missing)
124 124 relevant_nodes = set()
125 125 visitnodes = [cl.node(m) for m in missing]
126 126 required = set(headsrevs) | known
127 127 for rev in visit:
128 128 clrev = cl.changelogrevision(rev)
129 129 ps = cldag.parents(rev)
130 130 if depth is not None:
131 131 curdepth = revdepth[rev]
132 132 for p in ps:
133 133 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
134 134 needed = False
135 135 shallow_enough = depth is None or revdepth[rev] <= depth
136 136 if shallow_enough:
137 137 curmf = mfl[clrev.manifest].read()
138 138 if ps:
139 139 # We choose to not trust the changed files list in
140 140 # changesets because it's not always correct. TODO: could
141 141 # we trust it for the non-merge case?
142 142 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
143 143 needed = bool(curmf.diff(p1mf, match))
144 144 if not needed and len(ps) > 1:
145 145 # For merge changes, the list of changed files is not
146 146 # helpful, since we need to emit the merge if a file
147 147 # in the narrow spec has changed on either side of the
148 148 # merge. As a result, we do a manifest diff to check.
149 149 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
150 150 needed = bool(curmf.diff(p2mf, match))
151 151 else:
152 152 # For a root node, we need to include the node if any
153 153 # files in the node match the narrowspec.
154 154 needed = any(curmf.walk(match))
155 155
156 156 if needed:
157 157 for head in ellipsisheads[rev]:
158 158 addroot(head, rev)
159 159 for p in ps:
160 160 required.add(p)
161 161 relevant_nodes.add(cl.node(rev))
162 162 else:
163 163 if not ps:
164 164 ps = [nullrev]
165 165 if rev in required:
166 166 for head in ellipsisheads[rev]:
167 167 addroot(head, rev)
168 168 for p in ps:
169 169 ellipsisheads[p].add(rev)
170 170 else:
171 171 for p in ps:
172 172 ellipsisheads[p] |= ellipsisheads[rev]
173 173
174 174 # add common changesets as roots of their reachable ellipsis heads
175 175 for c in commonrevs:
176 176 for head in ellipsisheads[c]:
177 177 addroot(head, c)
178 178 return visitnodes, relevant_nodes, ellipsisroots
179 179
180 180 def _packellipsischangegroup(repo, common, match, relevant_nodes,
181 181 ellipsisroots, visitnodes, depth, source, version):
182 182 if version in ('01', '02'):
183 183 raise error.Abort(
184 184 'ellipsis nodes require at least cg3 on client and server, '
185 185 'but negotiated version %s' % version)
186 186 # We wrap cg1packer.revchunk, using a side channel to pass
187 187 # relevant_nodes into that area. Then if linknode isn't in the
188 188 # set, we know we have an ellipsis node and we should defer
189 189 # sending that node's data. We override close() to detect
190 190 # pending ellipsis nodes and flush them.
191 191 packer = changegroup.getbundler(version, repo)
192 192 # Let the packer have access to the narrow matcher so it can
193 193 # omit filelogs and dirlogs as needed
194 194 packer._narrow_matcher = lambda : match
195 195 # Give the packer the list of nodes which should not be
196 196 # ellipsis nodes. We store this rather than the set of nodes
197 197 # that should be an ellipsis because for very large histories
198 198 # we expect this to be significantly smaller.
199 199 packer.full_nodes = relevant_nodes
200 200 # Maps ellipsis revs to their roots at the changelog level.
201 201 packer.precomputed_ellipsis = ellipsisroots
202 202 # Maps CL revs to per-revlog revisions. Cleared in close() at
203 203 # the end of each group.
204 204 packer.clrev_to_localrev = {}
205 205 packer.next_clrev_to_localrev = {}
206 206 # Maps changelog nodes to changelog revs. Filled in once
207 207 # during changelog stage and then left unmodified.
208 208 packer.clnode_to_rev = {}
209 209 packer.changelog_done = False
210 210 # If true, informs the packer that it is serving shallow content and might
211 211 # need to pack file contents not introduced by the changes being packed.
212 212 packer.is_shallow = depth is not None
213 213
214 214 return packer.generate(common, visitnodes, False, source)
215 215
216 216 # Serve a changegroup for a client with a narrow clone.
217 217 def getbundlechangegrouppart_narrow(bundler, repo, source,
218 218 bundlecaps=None, b2caps=None, heads=None,
219 219 common=None, **kwargs):
220 220 cgversions = b2caps.get('changegroup')
221 221 if cgversions: # 3.1 and 3.2 ship with an empty value
222 222 cgversions = [v for v in cgversions
223 223 if v in changegroup.supportedoutgoingversions(repo)]
224 224 if not cgversions:
225 225 raise ValueError(_('no common changegroup version'))
226 226 version = max(cgversions)
227 227 else:
228 228 raise ValueError(_("server does not advertise changegroup version,"
229 229 " can't negotiate support for ellipsis nodes"))
230 230
231 231 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
232 232 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
233 233 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
234 234 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
235 235 outgoing = exchange._computeoutgoing(repo, heads, common)
236 236 if not outgoing.missing:
237 237 return
238 238 def wrappedgetbundler(orig, *args, **kwargs):
239 239 bundler = orig(*args, **kwargs)
240 240 bundler._narrow_matcher = lambda : newmatch
241 241 return bundler
242 242 with extensions.wrappedfunction(changegroup, 'getbundler',
243 243 wrappedgetbundler):
244 244 cg = changegroup.makestream(repo, outgoing, version, source)
245 245 part = bundler.newpart('changegroup', data=cg)
246 246 part.addparam('version', version)
247 247 if 'treemanifest' in repo.requirements:
248 248 part.addparam('treemanifest', '1')
249 249
250 250 if include or exclude:
251 251 narrowspecpart = bundler.newpart(_SPECPART)
252 252 if include:
253 253 narrowspecpart.addparam(
254 254 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
255 255 if exclude:
256 256 narrowspecpart.addparam(
257 257 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
258 258
259 259 return
260 260
261 261 depth = kwargs.get(r'depth', None)
262 262 if depth is not None:
263 263 depth = int(depth)
264 264 if depth < 1:
265 265 raise error.Abort(_('depth must be positive, got %d') % depth)
266 266
267 267 heads = set(heads or repo.heads())
268 268 common = set(common or [nullid])
269 269 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
270 270 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
271 271 known = {bin(n) for n in kwargs.get(r'known', [])}
272 272 if known and (oldinclude != include or oldexclude != exclude):
273 273 # Steps:
274 274 # 1. Send kill for "$known & ::common"
275 275 #
276 276 # 2. Send changegroup for ::common
277 277 #
278 278 # 3. Proceed.
279 279 #
280 280 # In the future, we can send kills for only the specific
281 281 # nodes we know should go away or change shape, and then
282 282 # send a data stream that tells the client something like this:
283 283 #
284 284 # a) apply this changegroup
285 285 # b) apply nodes XXX, YYY, ZZZ that you already have
286 286 # c) goto a
287 287 #
288 288 # until they've built up the full new state.
289 289 # Convert to revnums and intersect with "common". The client should
290 290 # have made it a subset of "common" already, but let's be safe.
291 291 known = set(repo.revs("%ln & ::%ln", known, common))
292 292 # TODO: we could send only roots() of this set, and the
293 293 # list of nodes in common, and the client could work out
294 294 # what to strip, instead of us explicitly sending every
295 295 # single node.
296 296 deadrevs = known
297 297 def genkills():
298 298 for r in deadrevs:
299 299 yield _KILLNODESIGNAL
300 300 yield repo.changelog.node(r)
301 301 yield _DONESIGNAL
302 302 bundler.newpart(_CHANGESPECPART, data=genkills())
303 303 newvisit, newfull, newellipsis = _computeellipsis(
304 304 repo, set(), common, known, newmatch)
305 305 if newvisit:
306 306 cg = _packellipsischangegroup(
307 307 repo, common, newmatch, newfull, newellipsis,
308 308 newvisit, depth, source, version)
309 309 part = bundler.newpart('changegroup', data=cg)
310 310 part.addparam('version', version)
311 311 if 'treemanifest' in repo.requirements:
312 312 part.addparam('treemanifest', '1')
313 313
314 314 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
315 315 repo, common, heads, set(), newmatch, depth=depth)
316 316
317 317 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
318 318 if visitnodes:
319 319 cg = _packellipsischangegroup(
320 320 repo, common, newmatch, relevant_nodes, ellipsisroots,
321 321 visitnodes, depth, source, version)
322 322 part = bundler.newpart('changegroup', data=cg)
323 323 part.addparam('version', version)
324 324 if 'treemanifest' in repo.requirements:
325 325 part.addparam('treemanifest', '1')
326 326
327 327 def applyacl_narrow(repo, kwargs):
328 328 ui = repo.ui
329 329 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
330 330 user_includes = ui.configlist(
331 331 _NARROWACL_SECTION, username + '.includes',
332 332 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
333 333 user_excludes = ui.configlist(
334 334 _NARROWACL_SECTION, username + '.excludes',
335 335 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
336 336 if not user_includes:
337 337 raise error.Abort(_("{} configuration for user {} is empty")
338 338 .format(_NARROWACL_SECTION, username))
339 339
340 340 user_includes = [
341 341 'path:.' if p == '*' else 'path:' + p for p in user_includes]
342 342 user_excludes = [
343 343 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
344 344
345 345 req_includes = set(kwargs.get(r'includepats', []))
346 346 req_excludes = set(kwargs.get(r'excludepats', []))
347 347
348 348 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
349 349 req_includes, req_excludes, user_includes, user_excludes)
350 350
351 351 if invalid_includes:
352 352 raise error.Abort(
353 353 _("The following includes are not accessible for {}: {}")
354 354 .format(username, invalid_includes))
355 355
356 356 new_args = {}
357 357 new_args.update(kwargs)
358 358 new_args['includepats'] = req_includes
359 359 if req_excludes:
360 360 new_args['excludepats'] = req_excludes
361 361 return new_args
362 362
363 363 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
364 364 def _handlechangespec_2(op, inpart):
365 365 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
366 366 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
367 367 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
368 368 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
369 369 op.repo._writerequirements()
370 370 op.repo.setnarrowpats(includepats, excludepats)
371 371
372 372 @bundle2.parthandler(_CHANGESPECPART)
373 373 def _handlechangespec(op, inpart):
374 374 repo = op.repo
375 375 cl = repo.changelog
376 376
377 377 # changesets which need to be stripped entirely. either they're no longer
378 378 # needed in the new narrow spec, or the server is sending a replacement
379 379 # in the changegroup part.
380 380 clkills = set()
381 381
382 382 # A changespec part contains all the updates to ellipsis nodes
383 383 # that will happen as a result of widening or narrowing a
384 384 # repo. All the changes that this block encounters are ellipsis
385 385 # nodes or flags to kill an existing ellipsis.
386 386 chunksignal = changegroup.readexactly(inpart, 4)
387 387 while chunksignal != _DONESIGNAL:
388 388 if chunksignal == _KILLNODESIGNAL:
389 389 # a node used to be an ellipsis but isn't anymore
390 390 ck = changegroup.readexactly(inpart, 20)
391 391 if cl.hasnode(ck):
392 392 clkills.add(ck)
393 393 else:
394 394 raise error.Abort(
395 395 _('unexpected changespec node chunk type: %s') % chunksignal)
396 396 chunksignal = changegroup.readexactly(inpart, 4)
397 397
398 398 if clkills:
399 399 # preserve bookmarks that repair.strip() would otherwise strip
400 400 bmstore = repo._bookmarks
401 401 class dummybmstore(dict):
402 402 def applychanges(self, repo, tr, changes):
403 403 pass
404 404 def recordchange(self, tr): # legacy version
405 405 pass
406 406 repo._bookmarks = dummybmstore()
407 407 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
408 408 topic='widen')
409 409 repo._bookmarks = bmstore
410 410 if chgrpfile:
411 411 op._widen_uninterr = repo.ui.uninterruptable()
412 412 op._widen_uninterr.__enter__()
413 413 # presence of _widen_bundle attribute activates widen handler later
414 414 op._widen_bundle = chgrpfile
415 415 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
416 416 # will currently always be there when using the core+narrowhg server, but
417 417 # other servers may include a changespec part even when not widening (e.g.
418 418 # because we're deepening a shallow repo).
419 419 if util.safehasattr(repo, 'setnewnarrowpats'):
420 420 repo.setnewnarrowpats()
421 421
422 422 def handlechangegroup_widen(op, inpart):
423 423 """Changegroup exchange handler which restores temporarily-stripped nodes"""
424 424 # We saved a bundle with stripped node data we must now restore.
425 425 # This approach is based on mercurial/repair.py@6ee26a53c111.
426 426 repo = op.repo
427 427 ui = op.ui
428 428
429 429 chgrpfile = op._widen_bundle
430 430 del op._widen_bundle
431 431 vfs = repo.vfs
432 432
433 433 ui.note(_("adding branch\n"))
434 434 f = vfs.open(chgrpfile, "rb")
435 435 try:
436 436 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
437 437 if not ui.verbose:
438 438 # silence internal shuffling chatter
439 439 ui.pushbuffer()
440 440 if isinstance(gen, bundle2.unbundle20):
441 441 with repo.transaction('strip') as tr:
442 442 bundle2.processbundle(repo, gen, lambda: tr)
443 443 else:
444 444 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
445 445 if not ui.verbose:
446 446 ui.popbuffer()
447 447 finally:
448 448 f.close()
449 449
450 450 # remove undo files
451 451 for undovfs, undofile in repo.undofiles():
452 452 try:
453 453 undovfs.unlink(undofile)
454 454 except OSError as e:
455 455 if e.errno != errno.ENOENT:
456 456 ui.warn(_('error removing %s: %s\n') %
457 457 (undovfs.join(undofile), stringutil.forcebytestr(e)))
458 458
459 459 # Remove partial backup only if there were no exceptions
460 460 op._widen_uninterr.__exit__(None, None, None)
461 461 vfs.unlink(chgrpfile)
462 462
463 463 def setup():
464 464 """Enable narrow repo support in bundle2-related extension points."""
465 465 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
466 466
467 467 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
468 468
469 469 getbundleargs['narrow'] = 'boolean'
470 470 getbundleargs['depth'] = 'plain'
471 471 getbundleargs['oldincludepats'] = 'csv'
472 472 getbundleargs['oldexcludepats'] = 'csv'
473 473 getbundleargs['includepats'] = 'csv'
474 474 getbundleargs['excludepats'] = 'csv'
475 475 getbundleargs['known'] = 'csv'
476 476
477 477 # Extend changegroup serving to handle requests from narrow clients.
478 478 origcgfn = exchange.getbundle2partsmapping['changegroup']
479 479 def wrappedcgfn(*args, **kwargs):
480 480 repo = args[1]
481 481 if repo.ui.has_section(_NARROWACL_SECTION):
482 482 getbundlechangegrouppart_narrow(
483 483 *args, **applyacl_narrow(repo, kwargs))
484 484 elif kwargs.get(r'narrow', False):
485 485 getbundlechangegrouppart_narrow(*args, **kwargs)
486 486 else:
487 487 origcgfn(*args, **kwargs)
488 488 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
489 489
490 # disable rev branch cache exchange when serving a narrow bundle
491 # (currently incompatible with that part)
492 origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache']
493 def wrappedcgfn(*args, **kwargs):
494 repo = args[1]
495 if repo.ui.has_section(_NARROWACL_SECTION):
496 return
497 elif kwargs.get(r'narrow', False):
498 return
499 else:
500 origrbcfn(*args, **kwargs)
501 exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn
502
503 490 # Extend changegroup receiver so client can fixup after widen requests.
504 491 origcghandler = bundle2.parthandlermapping['changegroup']
505 492 def wrappedcghandler(op, inpart):
506 493 origcghandler(op, inpart)
507 494 if util.safehasattr(op, '_widen_bundle'):
508 495 handlechangegroup_widen(op, inpart)
509 496 wrappedcghandler.params = origcghandler.params
510 497 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,2421 +1,2428 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 nullid,
18 18 )
19 19 from .thirdparty import (
20 20 attr,
21 21 )
22 22 from . import (
23 23 bookmarks as bookmod,
24 24 bundle2,
25 25 changegroup,
26 26 discovery,
27 27 error,
28 28 lock as lockmod,
29 29 logexchange,
30 30 obsolete,
31 31 phases,
32 32 pushkey,
33 33 pycompat,
34 34 scmutil,
35 35 sslutil,
36 36 streamclone,
37 37 url as urlmod,
38 38 util,
39 39 )
40 40 from .utils import (
41 41 stringutil,
42 42 )
43 43
44 44 urlerr = util.urlerr
45 45 urlreq = util.urlreq
46 46
47 _NARROWACL_SECTION = 'narrowhgacl'
48
47 49 # Maps bundle version human names to changegroup versions.
48 50 _bundlespeccgversions = {'v1': '01',
49 51 'v2': '02',
50 52 'packed1': 's1',
51 53 'bundle2': '02', #legacy
52 54 }
53 55
54 56 # Maps bundle version with content opts to choose which part to bundle
55 57 _bundlespeccontentopts = {
56 58 'v1': {
57 59 'changegroup': True,
58 60 'cg.version': '01',
59 61 'obsolescence': False,
60 62 'phases': False,
61 63 'tagsfnodescache': False,
62 64 'revbranchcache': False
63 65 },
64 66 'v2': {
65 67 'changegroup': True,
66 68 'cg.version': '02',
67 69 'obsolescence': False,
68 70 'phases': False,
69 71 'tagsfnodescache': True,
70 72 'revbranchcache': True
71 73 },
72 74 'packed1' : {
73 75 'cg.version': 's1'
74 76 }
75 77 }
76 78 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
77 79
78 80 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
79 81 "tagsfnodescache": False,
80 82 "revbranchcache": False}}
81 83
82 84 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
83 85 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
84 86
85 87 @attr.s
86 88 class bundlespec(object):
87 89 compression = attr.ib()
88 90 wirecompression = attr.ib()
89 91 version = attr.ib()
90 92 wireversion = attr.ib()
91 93 params = attr.ib()
92 94 contentopts = attr.ib()
93 95
94 96 def parsebundlespec(repo, spec, strict=True):
95 97 """Parse a bundle string specification into parts.
96 98
97 99 Bundle specifications denote a well-defined bundle/exchange format.
98 100 The content of a given specification should not change over time in
99 101 order to ensure that bundles produced by a newer version of Mercurial are
100 102 readable from an older version.
101 103
102 104 The string currently has the form:
103 105
104 106 <compression>-<type>[;<parameter0>[;<parameter1>]]
105 107
106 108 Where <compression> is one of the supported compression formats
107 109 and <type> is (currently) a version string. A ";" can follow the type and
108 110 all text afterwards is interpreted as URI encoded, ";" delimited key=value
109 111 pairs.
110 112
111 113 If ``strict`` is True (the default) <compression> is required. Otherwise,
112 114 it is optional.
113 115
114 116 Returns a bundlespec object of (compression, version, parameters).
115 117 Compression will be ``None`` if not in strict mode and a compression isn't
116 118 defined.
117 119
118 120 An ``InvalidBundleSpecification`` is raised when the specification is
119 121 not syntactically well formed.
120 122
121 123 An ``UnsupportedBundleSpecification`` is raised when the compression or
122 124 bundle type/version is not recognized.
123 125
124 126 Note: this function will likely eventually return a more complex data
125 127 structure, including bundle2 part information.
126 128 """
127 129 def parseparams(s):
128 130 if ';' not in s:
129 131 return s, {}
130 132
131 133 params = {}
132 134 version, paramstr = s.split(';', 1)
133 135
134 136 for p in paramstr.split(';'):
135 137 if '=' not in p:
136 138 raise error.InvalidBundleSpecification(
137 139 _('invalid bundle specification: '
138 140 'missing "=" in parameter: %s') % p)
139 141
140 142 key, value = p.split('=', 1)
141 143 key = urlreq.unquote(key)
142 144 value = urlreq.unquote(value)
143 145 params[key] = value
144 146
145 147 return version, params
146 148
147 149
148 150 if strict and '-' not in spec:
149 151 raise error.InvalidBundleSpecification(
150 152 _('invalid bundle specification; '
151 153 'must be prefixed with compression: %s') % spec)
152 154
153 155 if '-' in spec:
154 156 compression, version = spec.split('-', 1)
155 157
156 158 if compression not in util.compengines.supportedbundlenames:
157 159 raise error.UnsupportedBundleSpecification(
158 160 _('%s compression is not supported') % compression)
159 161
160 162 version, params = parseparams(version)
161 163
162 164 if version not in _bundlespeccgversions:
163 165 raise error.UnsupportedBundleSpecification(
164 166 _('%s is not a recognized bundle version') % version)
165 167 else:
166 168 # Value could be just the compression or just the version, in which
167 169 # case some defaults are assumed (but only when not in strict mode).
168 170 assert not strict
169 171
170 172 spec, params = parseparams(spec)
171 173
172 174 if spec in util.compengines.supportedbundlenames:
173 175 compression = spec
174 176 version = 'v1'
175 177 # Generaldelta repos require v2.
176 178 if 'generaldelta' in repo.requirements:
177 179 version = 'v2'
178 180 # Modern compression engines require v2.
179 181 if compression not in _bundlespecv1compengines:
180 182 version = 'v2'
181 183 elif spec in _bundlespeccgversions:
182 184 if spec == 'packed1':
183 185 compression = 'none'
184 186 else:
185 187 compression = 'bzip2'
186 188 version = spec
187 189 else:
188 190 raise error.UnsupportedBundleSpecification(
189 191 _('%s is not a recognized bundle specification') % spec)
190 192
191 193 # Bundle version 1 only supports a known set of compression engines.
192 194 if version == 'v1' and compression not in _bundlespecv1compengines:
193 195 raise error.UnsupportedBundleSpecification(
194 196 _('compression engine %s is not supported on v1 bundles') %
195 197 compression)
196 198
197 199 # The specification for packed1 can optionally declare the data formats
198 200 # required to apply it. If we see this metadata, compare against what the
199 201 # repo supports and error if the bundle isn't compatible.
200 202 if version == 'packed1' and 'requirements' in params:
201 203 requirements = set(params['requirements'].split(','))
202 204 missingreqs = requirements - repo.supportedformats
203 205 if missingreqs:
204 206 raise error.UnsupportedBundleSpecification(
205 207 _('missing support for repository features: %s') %
206 208 ', '.join(sorted(missingreqs)))
207 209
208 210 # Compute contentopts based on the version
209 211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
210 212
211 213 # Process the variants
212 214 if "stream" in params and params["stream"] == "v2":
213 215 variant = _bundlespecvariants["streamv2"]
214 216 contentopts.update(variant)
215 217
216 218 engine = util.compengines.forbundlename(compression)
217 219 compression, wirecompression = engine.bundletype()
218 220 wireversion = _bundlespeccgversions[version]
219 221
220 222 return bundlespec(compression, wirecompression, version, wireversion,
221 223 params, contentopts)
222 224
223 225 def readbundle(ui, fh, fname, vfs=None):
224 226 header = changegroup.readexactly(fh, 4)
225 227
226 228 alg = None
227 229 if not fname:
228 230 fname = "stream"
229 231 if not header.startswith('HG') and header.startswith('\0'):
230 232 fh = changegroup.headerlessfixup(fh, header)
231 233 header = "HG10"
232 234 alg = 'UN'
233 235 elif vfs:
234 236 fname = vfs.join(fname)
235 237
236 238 magic, version = header[0:2], header[2:4]
237 239
238 240 if magic != 'HG':
239 241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
240 242 if version == '10':
241 243 if alg is None:
242 244 alg = changegroup.readexactly(fh, 2)
243 245 return changegroup.cg1unpacker(fh, alg)
244 246 elif version.startswith('2'):
245 247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
246 248 elif version == 'S1':
247 249 return streamclone.streamcloneapplier(fh)
248 250 else:
249 251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
250 252
251 253 def getbundlespec(ui, fh):
252 254 """Infer the bundlespec from a bundle file handle.
253 255
254 256 The input file handle is seeked and the original seek position is not
255 257 restored.
256 258 """
257 259 def speccompression(alg):
258 260 try:
259 261 return util.compengines.forbundletype(alg).bundletype()[0]
260 262 except KeyError:
261 263 return None
262 264
263 265 b = readbundle(ui, fh, None)
264 266 if isinstance(b, changegroup.cg1unpacker):
265 267 alg = b._type
266 268 if alg == '_truncatedBZ':
267 269 alg = 'BZ'
268 270 comp = speccompression(alg)
269 271 if not comp:
270 272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
271 273 return '%s-v1' % comp
272 274 elif isinstance(b, bundle2.unbundle20):
273 275 if 'Compression' in b.params:
274 276 comp = speccompression(b.params['Compression'])
275 277 if not comp:
276 278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
277 279 else:
278 280 comp = 'none'
279 281
280 282 version = None
281 283 for part in b.iterparts():
282 284 if part.type == 'changegroup':
283 285 version = part.params['version']
284 286 if version in ('01', '02'):
285 287 version = 'v2'
286 288 else:
287 289 raise error.Abort(_('changegroup version %s does not have '
288 290 'a known bundlespec') % version,
289 291 hint=_('try upgrading your Mercurial '
290 292 'client'))
291 293 elif part.type == 'stream2' and version is None:
292 294 # A stream2 part requires to be part of a v2 bundle
293 295 version = "v2"
294 296 requirements = urlreq.unquote(part.params['requirements'])
295 297 splitted = requirements.split()
296 298 params = bundle2._formatrequirementsparams(splitted)
297 299 return 'none-v2;stream=v2;%s' % params
298 300
299 301 if not version:
300 302 raise error.Abort(_('could not identify changegroup version in '
301 303 'bundle'))
302 304
303 305 return '%s-%s' % (comp, version)
304 306 elif isinstance(b, streamclone.streamcloneapplier):
305 307 requirements = streamclone.readbundle1header(fh)[2]
306 308 formatted = bundle2._formatrequirementsparams(requirements)
307 309 return 'none-packed1;%s' % formatted
308 310 else:
309 311 raise error.Abort(_('unknown bundle type: %s') % b)
310 312
311 313 def _computeoutgoing(repo, heads, common):
312 314 """Computes which revs are outgoing given a set of common
313 315 and a set of heads.
314 316
315 317 This is a separate function so extensions can have access to
316 318 the logic.
317 319
318 320 Returns a discovery.outgoing object.
319 321 """
320 322 cl = repo.changelog
321 323 if common:
322 324 hasnode = cl.hasnode
323 325 common = [n for n in common if hasnode(n)]
324 326 else:
325 327 common = [nullid]
326 328 if not heads:
327 329 heads = cl.heads()
328 330 return discovery.outgoing(repo, common, heads)
329 331
330 332 def _forcebundle1(op):
331 333 """return true if a pull/push must use bundle1
332 334
333 335 This function is used to allow testing of the older bundle version"""
334 336 ui = op.repo.ui
335 337 # The goal is this config is to allow developer to choose the bundle
336 338 # version used during exchanged. This is especially handy during test.
337 339 # Value is a list of bundle version to be picked from, highest version
338 340 # should be used.
339 341 #
340 342 # developer config: devel.legacy.exchange
341 343 exchange = ui.configlist('devel', 'legacy.exchange')
342 344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
343 345 return forcebundle1 or not op.remote.capable('bundle2')
344 346
345 347 class pushoperation(object):
346 348 """A object that represent a single push operation
347 349
348 350 Its purpose is to carry push related state and very common operations.
349 351
350 352 A new pushoperation should be created at the beginning of each push and
351 353 discarded afterward.
352 354 """
353 355
354 356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
355 357 bookmarks=(), pushvars=None):
356 358 # repo we push from
357 359 self.repo = repo
358 360 self.ui = repo.ui
359 361 # repo we push to
360 362 self.remote = remote
361 363 # force option provided
362 364 self.force = force
363 365 # revs to be pushed (None is "all")
364 366 self.revs = revs
365 367 # bookmark explicitly pushed
366 368 self.bookmarks = bookmarks
367 369 # allow push of new branch
368 370 self.newbranch = newbranch
369 371 # step already performed
370 372 # (used to check what steps have been already performed through bundle2)
371 373 self.stepsdone = set()
372 374 # Integer version of the changegroup push result
373 375 # - None means nothing to push
374 376 # - 0 means HTTP error
375 377 # - 1 means we pushed and remote head count is unchanged *or*
376 378 # we have outgoing changesets but refused to push
377 379 # - other values as described by addchangegroup()
378 380 self.cgresult = None
379 381 # Boolean value for the bookmark push
380 382 self.bkresult = None
381 383 # discover.outgoing object (contains common and outgoing data)
382 384 self.outgoing = None
383 385 # all remote topological heads before the push
384 386 self.remoteheads = None
385 387 # Details of the remote branch pre and post push
386 388 #
387 389 # mapping: {'branch': ([remoteheads],
388 390 # [newheads],
389 391 # [unsyncedheads],
390 392 # [discardedheads])}
391 393 # - branch: the branch name
392 394 # - remoteheads: the list of remote heads known locally
393 395 # None if the branch is new
394 396 # - newheads: the new remote heads (known locally) with outgoing pushed
395 397 # - unsyncedheads: the list of remote heads unknown locally.
396 398 # - discardedheads: the list of remote heads made obsolete by the push
397 399 self.pushbranchmap = None
398 400 # testable as a boolean indicating if any nodes are missing locally.
399 401 self.incoming = None
400 402 # summary of the remote phase situation
401 403 self.remotephases = None
402 404 # phases changes that must be pushed along side the changesets
403 405 self.outdatedphases = None
404 406 # phases changes that must be pushed if changeset push fails
405 407 self.fallbackoutdatedphases = None
406 408 # outgoing obsmarkers
407 409 self.outobsmarkers = set()
408 410 # outgoing bookmarks
409 411 self.outbookmarks = []
410 412 # transaction manager
411 413 self.trmanager = None
412 414 # map { pushkey partid -> callback handling failure}
413 415 # used to handle exception from mandatory pushkey part failure
414 416 self.pkfailcb = {}
415 417 # an iterable of pushvars or None
416 418 self.pushvars = pushvars
417 419
418 420 @util.propertycache
419 421 def futureheads(self):
420 422 """future remote heads if the changeset push succeeds"""
421 423 return self.outgoing.missingheads
422 424
423 425 @util.propertycache
424 426 def fallbackheads(self):
425 427 """future remote heads if the changeset push fails"""
426 428 if self.revs is None:
427 429 # not target to push, all common are relevant
428 430 return self.outgoing.commonheads
429 431 unfi = self.repo.unfiltered()
430 432 # I want cheads = heads(::missingheads and ::commonheads)
431 433 # (missingheads is revs with secret changeset filtered out)
432 434 #
433 435 # This can be expressed as:
434 436 # cheads = ( (missingheads and ::commonheads)
435 437 # + (commonheads and ::missingheads))"
436 438 # )
437 439 #
438 440 # while trying to push we already computed the following:
439 441 # common = (::commonheads)
440 442 # missing = ((commonheads::missingheads) - commonheads)
441 443 #
442 444 # We can pick:
443 445 # * missingheads part of common (::commonheads)
444 446 common = self.outgoing.common
445 447 nm = self.repo.changelog.nodemap
446 448 cheads = [node for node in self.revs if nm[node] in common]
447 449 # and
448 450 # * commonheads parents on missing
449 451 revset = unfi.set('%ln and parents(roots(%ln))',
450 452 self.outgoing.commonheads,
451 453 self.outgoing.missing)
452 454 cheads.extend(c.node() for c in revset)
453 455 return cheads
454 456
455 457 @property
456 458 def commonheads(self):
457 459 """set of all common heads after changeset bundle push"""
458 460 if self.cgresult:
459 461 return self.futureheads
460 462 else:
461 463 return self.fallbackheads
462 464
463 465 # mapping of message used when pushing bookmark
464 466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
465 467 _('updating bookmark %s failed!\n')),
466 468 'export': (_("exporting bookmark %s\n"),
467 469 _('exporting bookmark %s failed!\n')),
468 470 'delete': (_("deleting remote bookmark %s\n"),
469 471 _('deleting remote bookmark %s failed!\n')),
470 472 }
471 473
472 474
473 475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
474 476 opargs=None):
475 477 '''Push outgoing changesets (limited by revs) from a local
476 478 repository to remote. Return an integer:
477 479 - None means nothing to push
478 480 - 0 means HTTP error
479 481 - 1 means we pushed and remote head count is unchanged *or*
480 482 we have outgoing changesets but refused to push
481 483 - other values as described by addchangegroup()
482 484 '''
483 485 if opargs is None:
484 486 opargs = {}
485 487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
486 488 **pycompat.strkwargs(opargs))
487 489 if pushop.remote.local():
488 490 missing = (set(pushop.repo.requirements)
489 491 - pushop.remote.local().supported)
490 492 if missing:
491 493 msg = _("required features are not"
492 494 " supported in the destination:"
493 495 " %s") % (', '.join(sorted(missing)))
494 496 raise error.Abort(msg)
495 497
496 498 if not pushop.remote.canpush():
497 499 raise error.Abort(_("destination does not support push"))
498 500
499 501 if not pushop.remote.capable('unbundle'):
500 502 raise error.Abort(_('cannot push: destination does not support the '
501 503 'unbundle wire protocol command'))
502 504
503 505 # get lock as we might write phase data
504 506 wlock = lock = None
505 507 try:
506 508 # bundle2 push may receive a reply bundle touching bookmarks or other
507 509 # things requiring the wlock. Take it now to ensure proper ordering.
508 510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
509 511 if (not _forcebundle1(pushop)) and maypushback:
510 512 wlock = pushop.repo.wlock()
511 513 lock = pushop.repo.lock()
512 514 pushop.trmanager = transactionmanager(pushop.repo,
513 515 'push-response',
514 516 pushop.remote.url())
515 517 except error.LockUnavailable as err:
516 518 # source repo cannot be locked.
517 519 # We do not abort the push, but just disable the local phase
518 520 # synchronisation.
519 521 msg = 'cannot lock source repository: %s\n' % err
520 522 pushop.ui.debug(msg)
521 523
522 524 with wlock or util.nullcontextmanager(), \
523 525 lock or util.nullcontextmanager(), \
524 526 pushop.trmanager or util.nullcontextmanager():
525 527 pushop.repo.checkpush(pushop)
526 528 _pushdiscovery(pushop)
527 529 if not _forcebundle1(pushop):
528 530 _pushbundle2(pushop)
529 531 _pushchangeset(pushop)
530 532 _pushsyncphase(pushop)
531 533 _pushobsolete(pushop)
532 534 _pushbookmark(pushop)
533 535
534 536 if repo.ui.configbool('experimental', 'remotenames'):
535 537 logexchange.pullremotenames(repo, remote)
536 538
537 539 return pushop
538 540
539 541 # list of steps to perform discovery before push
540 542 pushdiscoveryorder = []
541 543
542 544 # Mapping between step name and function
543 545 #
544 546 # This exists to help extensions wrap steps if necessary
545 547 pushdiscoverymapping = {}
546 548
547 549 def pushdiscovery(stepname):
548 550 """decorator for function performing discovery before push
549 551
550 552 The function is added to the step -> function mapping and appended to the
551 553 list of steps. Beware that decorated function will be added in order (this
552 554 may matter).
553 555
554 556 You can only use this decorator for a new step, if you want to wrap a step
555 557 from an extension, change the pushdiscovery dictionary directly."""
556 558 def dec(func):
557 559 assert stepname not in pushdiscoverymapping
558 560 pushdiscoverymapping[stepname] = func
559 561 pushdiscoveryorder.append(stepname)
560 562 return func
561 563 return dec
562 564
563 565 def _pushdiscovery(pushop):
564 566 """Run all discovery steps"""
565 567 for stepname in pushdiscoveryorder:
566 568 step = pushdiscoverymapping[stepname]
567 569 step(pushop)
568 570
569 571 @pushdiscovery('changeset')
570 572 def _pushdiscoverychangeset(pushop):
571 573 """discover the changeset that need to be pushed"""
572 574 fci = discovery.findcommonincoming
573 575 if pushop.revs:
574 576 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
575 577 ancestorsof=pushop.revs)
576 578 else:
577 579 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
578 580 common, inc, remoteheads = commoninc
579 581 fco = discovery.findcommonoutgoing
580 582 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
581 583 commoninc=commoninc, force=pushop.force)
582 584 pushop.outgoing = outgoing
583 585 pushop.remoteheads = remoteheads
584 586 pushop.incoming = inc
585 587
586 588 @pushdiscovery('phase')
587 589 def _pushdiscoveryphase(pushop):
588 590 """discover the phase that needs to be pushed
589 591
590 592 (computed for both success and failure case for changesets push)"""
591 593 outgoing = pushop.outgoing
592 594 unfi = pushop.repo.unfiltered()
593 595 remotephases = listkeys(pushop.remote, 'phases')
594 596
595 597 if (pushop.ui.configbool('ui', '_usedassubrepo')
596 598 and remotephases # server supports phases
597 599 and not pushop.outgoing.missing # no changesets to be pushed
598 600 and remotephases.get('publishing', False)):
599 601 # When:
600 602 # - this is a subrepo push
601 603 # - and remote support phase
602 604 # - and no changeset are to be pushed
603 605 # - and remote is publishing
604 606 # We may be in issue 3781 case!
605 607 # We drop the possible phase synchronisation done by
606 608 # courtesy to publish changesets possibly locally draft
607 609 # on the remote.
608 610 pushop.outdatedphases = []
609 611 pushop.fallbackoutdatedphases = []
610 612 return
611 613
612 614 pushop.remotephases = phases.remotephasessummary(pushop.repo,
613 615 pushop.fallbackheads,
614 616 remotephases)
615 617 droots = pushop.remotephases.draftroots
616 618
617 619 extracond = ''
618 620 if not pushop.remotephases.publishing:
619 621 extracond = ' and public()'
620 622 revset = 'heads((%%ln::%%ln) %s)' % extracond
621 623 # Get the list of all revs draft on remote by public here.
622 624 # XXX Beware that revset break if droots is not strictly
623 625 # XXX root we may want to ensure it is but it is costly
624 626 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
625 627 if not outgoing.missing:
626 628 future = fallback
627 629 else:
628 630 # adds changeset we are going to push as draft
629 631 #
630 632 # should not be necessary for publishing server, but because of an
631 633 # issue fixed in xxxxx we have to do it anyway.
632 634 fdroots = list(unfi.set('roots(%ln + %ln::)',
633 635 outgoing.missing, droots))
634 636 fdroots = [f.node() for f in fdroots]
635 637 future = list(unfi.set(revset, fdroots, pushop.futureheads))
636 638 pushop.outdatedphases = future
637 639 pushop.fallbackoutdatedphases = fallback
638 640
639 641 @pushdiscovery('obsmarker')
640 642 def _pushdiscoveryobsmarkers(pushop):
641 643 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
642 644 return
643 645
644 646 if not pushop.repo.obsstore:
645 647 return
646 648
647 649 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
648 650 return
649 651
650 652 repo = pushop.repo
651 653 # very naive computation, that can be quite expensive on big repo.
652 654 # However: evolution is currently slow on them anyway.
653 655 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
654 656 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
655 657
656 658 @pushdiscovery('bookmarks')
657 659 def _pushdiscoverybookmarks(pushop):
658 660 ui = pushop.ui
659 661 repo = pushop.repo.unfiltered()
660 662 remote = pushop.remote
661 663 ui.debug("checking for updated bookmarks\n")
662 664 ancestors = ()
663 665 if pushop.revs:
664 666 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
665 667 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
666 668
667 669 remotebookmark = listkeys(remote, 'bookmarks')
668 670
669 671 explicit = set([repo._bookmarks.expandname(bookmark)
670 672 for bookmark in pushop.bookmarks])
671 673
672 674 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
673 675 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
674 676
675 677 def safehex(x):
676 678 if x is None:
677 679 return x
678 680 return hex(x)
679 681
680 682 def hexifycompbookmarks(bookmarks):
681 683 return [(b, safehex(scid), safehex(dcid))
682 684 for (b, scid, dcid) in bookmarks]
683 685
684 686 comp = [hexifycompbookmarks(marks) for marks in comp]
685 687 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
686 688
687 689 def _processcompared(pushop, pushed, explicit, remotebms, comp):
688 690 """take decision on bookmark to pull from the remote bookmark
689 691
690 692 Exist to help extensions who want to alter this behavior.
691 693 """
692 694 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
693 695
694 696 repo = pushop.repo
695 697
696 698 for b, scid, dcid in advsrc:
697 699 if b in explicit:
698 700 explicit.remove(b)
699 701 if not pushed or repo[scid].rev() in pushed:
700 702 pushop.outbookmarks.append((b, dcid, scid))
701 703 # search added bookmark
702 704 for b, scid, dcid in addsrc:
703 705 if b in explicit:
704 706 explicit.remove(b)
705 707 pushop.outbookmarks.append((b, '', scid))
706 708 # search for overwritten bookmark
707 709 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
708 710 if b in explicit:
709 711 explicit.remove(b)
710 712 pushop.outbookmarks.append((b, dcid, scid))
711 713 # search for bookmark to delete
712 714 for b, scid, dcid in adddst:
713 715 if b in explicit:
714 716 explicit.remove(b)
715 717 # treat as "deleted locally"
716 718 pushop.outbookmarks.append((b, dcid, ''))
717 719 # identical bookmarks shouldn't get reported
718 720 for b, scid, dcid in same:
719 721 if b in explicit:
720 722 explicit.remove(b)
721 723
722 724 if explicit:
723 725 explicit = sorted(explicit)
724 726 # we should probably list all of them
725 727 pushop.ui.warn(_('bookmark %s does not exist on the local '
726 728 'or remote repository!\n') % explicit[0])
727 729 pushop.bkresult = 2
728 730
729 731 pushop.outbookmarks.sort()
730 732
731 733 def _pushcheckoutgoing(pushop):
732 734 outgoing = pushop.outgoing
733 735 unfi = pushop.repo.unfiltered()
734 736 if not outgoing.missing:
735 737 # nothing to push
736 738 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
737 739 return False
738 740 # something to push
739 741 if not pushop.force:
740 742 # if repo.obsstore == False --> no obsolete
741 743 # then, save the iteration
742 744 if unfi.obsstore:
743 745 # this message are here for 80 char limit reason
744 746 mso = _("push includes obsolete changeset: %s!")
745 747 mspd = _("push includes phase-divergent changeset: %s!")
746 748 mscd = _("push includes content-divergent changeset: %s!")
747 749 mst = {"orphan": _("push includes orphan changeset: %s!"),
748 750 "phase-divergent": mspd,
749 751 "content-divergent": mscd}
750 752 # If we are to push if there is at least one
751 753 # obsolete or unstable changeset in missing, at
752 754 # least one of the missinghead will be obsolete or
753 755 # unstable. So checking heads only is ok
754 756 for node in outgoing.missingheads:
755 757 ctx = unfi[node]
756 758 if ctx.obsolete():
757 759 raise error.Abort(mso % ctx)
758 760 elif ctx.isunstable():
759 761 # TODO print more than one instability in the abort
760 762 # message
761 763 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
762 764
763 765 discovery.checkheads(pushop)
764 766 return True
765 767
766 768 # List of names of steps to perform for an outgoing bundle2, order matters.
767 769 b2partsgenorder = []
768 770
769 771 # Mapping between step name and function
770 772 #
771 773 # This exists to help extensions wrap steps if necessary
772 774 b2partsgenmapping = {}
773 775
774 776 def b2partsgenerator(stepname, idx=None):
775 777 """decorator for function generating bundle2 part
776 778
777 779 The function is added to the step -> function mapping and appended to the
778 780 list of steps. Beware that decorated functions will be added in order
779 781 (this may matter).
780 782
781 783 You can only use this decorator for new steps, if you want to wrap a step
782 784 from an extension, attack the b2partsgenmapping dictionary directly."""
783 785 def dec(func):
784 786 assert stepname not in b2partsgenmapping
785 787 b2partsgenmapping[stepname] = func
786 788 if idx is None:
787 789 b2partsgenorder.append(stepname)
788 790 else:
789 791 b2partsgenorder.insert(idx, stepname)
790 792 return func
791 793 return dec
792 794
793 795 def _pushb2ctxcheckheads(pushop, bundler):
794 796 """Generate race condition checking parts
795 797
796 798 Exists as an independent function to aid extensions
797 799 """
798 800 # * 'force' do not check for push race,
799 801 # * if we don't push anything, there are nothing to check.
800 802 if not pushop.force and pushop.outgoing.missingheads:
801 803 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
802 804 emptyremote = pushop.pushbranchmap is None
803 805 if not allowunrelated or emptyremote:
804 806 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
805 807 else:
806 808 affected = set()
807 809 for branch, heads in pushop.pushbranchmap.iteritems():
808 810 remoteheads, newheads, unsyncedheads, discardedheads = heads
809 811 if remoteheads is not None:
810 812 remote = set(remoteheads)
811 813 affected |= set(discardedheads) & remote
812 814 affected |= remote - set(newheads)
813 815 if affected:
814 816 data = iter(sorted(affected))
815 817 bundler.newpart('check:updated-heads', data=data)
816 818
817 819 def _pushing(pushop):
818 820 """return True if we are pushing anything"""
819 821 return bool(pushop.outgoing.missing
820 822 or pushop.outdatedphases
821 823 or pushop.outobsmarkers
822 824 or pushop.outbookmarks)
823 825
824 826 @b2partsgenerator('check-bookmarks')
825 827 def _pushb2checkbookmarks(pushop, bundler):
826 828 """insert bookmark move checking"""
827 829 if not _pushing(pushop) or pushop.force:
828 830 return
829 831 b2caps = bundle2.bundle2caps(pushop.remote)
830 832 hasbookmarkcheck = 'bookmarks' in b2caps
831 833 if not (pushop.outbookmarks and hasbookmarkcheck):
832 834 return
833 835 data = []
834 836 for book, old, new in pushop.outbookmarks:
835 837 old = bin(old)
836 838 data.append((book, old))
837 839 checkdata = bookmod.binaryencode(data)
838 840 bundler.newpart('check:bookmarks', data=checkdata)
839 841
840 842 @b2partsgenerator('check-phases')
841 843 def _pushb2checkphases(pushop, bundler):
842 844 """insert phase move checking"""
843 845 if not _pushing(pushop) or pushop.force:
844 846 return
845 847 b2caps = bundle2.bundle2caps(pushop.remote)
846 848 hasphaseheads = 'heads' in b2caps.get('phases', ())
847 849 if pushop.remotephases is not None and hasphaseheads:
848 850 # check that the remote phase has not changed
849 851 checks = [[] for p in phases.allphases]
850 852 checks[phases.public].extend(pushop.remotephases.publicheads)
851 853 checks[phases.draft].extend(pushop.remotephases.draftroots)
852 854 if any(checks):
853 855 for nodes in checks:
854 856 nodes.sort()
855 857 checkdata = phases.binaryencode(checks)
856 858 bundler.newpart('check:phases', data=checkdata)
857 859
858 860 @b2partsgenerator('changeset')
859 861 def _pushb2ctx(pushop, bundler):
860 862 """handle changegroup push through bundle2
861 863
862 864 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
863 865 """
864 866 if 'changesets' in pushop.stepsdone:
865 867 return
866 868 pushop.stepsdone.add('changesets')
867 869 # Send known heads to the server for race detection.
868 870 if not _pushcheckoutgoing(pushop):
869 871 return
870 872 pushop.repo.prepushoutgoinghooks(pushop)
871 873
872 874 _pushb2ctxcheckheads(pushop, bundler)
873 875
874 876 b2caps = bundle2.bundle2caps(pushop.remote)
875 877 version = '01'
876 878 cgversions = b2caps.get('changegroup')
877 879 if cgversions: # 3.1 and 3.2 ship with an empty value
878 880 cgversions = [v for v in cgversions
879 881 if v in changegroup.supportedoutgoingversions(
880 882 pushop.repo)]
881 883 if not cgversions:
882 884 raise ValueError(_('no common changegroup version'))
883 885 version = max(cgversions)
884 886 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
885 887 'push')
886 888 cgpart = bundler.newpart('changegroup', data=cgstream)
887 889 if cgversions:
888 890 cgpart.addparam('version', version)
889 891 if 'treemanifest' in pushop.repo.requirements:
890 892 cgpart.addparam('treemanifest', '1')
891 893 def handlereply(op):
892 894 """extract addchangegroup returns from server reply"""
893 895 cgreplies = op.records.getreplies(cgpart.id)
894 896 assert len(cgreplies['changegroup']) == 1
895 897 pushop.cgresult = cgreplies['changegroup'][0]['return']
896 898 return handlereply
897 899
898 900 @b2partsgenerator('phase')
899 901 def _pushb2phases(pushop, bundler):
900 902 """handle phase push through bundle2"""
901 903 if 'phases' in pushop.stepsdone:
902 904 return
903 905 b2caps = bundle2.bundle2caps(pushop.remote)
904 906 ui = pushop.repo.ui
905 907
906 908 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
907 909 haspushkey = 'pushkey' in b2caps
908 910 hasphaseheads = 'heads' in b2caps.get('phases', ())
909 911
910 912 if hasphaseheads and not legacyphase:
911 913 return _pushb2phaseheads(pushop, bundler)
912 914 elif haspushkey:
913 915 return _pushb2phasespushkey(pushop, bundler)
914 916
915 917 def _pushb2phaseheads(pushop, bundler):
916 918 """push phase information through a bundle2 - binary part"""
917 919 pushop.stepsdone.add('phases')
918 920 if pushop.outdatedphases:
919 921 updates = [[] for p in phases.allphases]
920 922 updates[0].extend(h.node() for h in pushop.outdatedphases)
921 923 phasedata = phases.binaryencode(updates)
922 924 bundler.newpart('phase-heads', data=phasedata)
923 925
924 926 def _pushb2phasespushkey(pushop, bundler):
925 927 """push phase information through a bundle2 - pushkey part"""
926 928 pushop.stepsdone.add('phases')
927 929 part2node = []
928 930
929 931 def handlefailure(pushop, exc):
930 932 targetid = int(exc.partid)
931 933 for partid, node in part2node:
932 934 if partid == targetid:
933 935 raise error.Abort(_('updating %s to public failed') % node)
934 936
935 937 enc = pushkey.encode
936 938 for newremotehead in pushop.outdatedphases:
937 939 part = bundler.newpart('pushkey')
938 940 part.addparam('namespace', enc('phases'))
939 941 part.addparam('key', enc(newremotehead.hex()))
940 942 part.addparam('old', enc('%d' % phases.draft))
941 943 part.addparam('new', enc('%d' % phases.public))
942 944 part2node.append((part.id, newremotehead))
943 945 pushop.pkfailcb[part.id] = handlefailure
944 946
945 947 def handlereply(op):
946 948 for partid, node in part2node:
947 949 partrep = op.records.getreplies(partid)
948 950 results = partrep['pushkey']
949 951 assert len(results) <= 1
950 952 msg = None
951 953 if not results:
952 954 msg = _('server ignored update of %s to public!\n') % node
953 955 elif not int(results[0]['return']):
954 956 msg = _('updating %s to public failed!\n') % node
955 957 if msg is not None:
956 958 pushop.ui.warn(msg)
957 959 return handlereply
958 960
959 961 @b2partsgenerator('obsmarkers')
960 962 def _pushb2obsmarkers(pushop, bundler):
961 963 if 'obsmarkers' in pushop.stepsdone:
962 964 return
963 965 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
964 966 if obsolete.commonversion(remoteversions) is None:
965 967 return
966 968 pushop.stepsdone.add('obsmarkers')
967 969 if pushop.outobsmarkers:
968 970 markers = sorted(pushop.outobsmarkers)
969 971 bundle2.buildobsmarkerspart(bundler, markers)
970 972
971 973 @b2partsgenerator('bookmarks')
972 974 def _pushb2bookmarks(pushop, bundler):
973 975 """handle bookmark push through bundle2"""
974 976 if 'bookmarks' in pushop.stepsdone:
975 977 return
976 978 b2caps = bundle2.bundle2caps(pushop.remote)
977 979
978 980 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
979 981 legacybooks = 'bookmarks' in legacy
980 982
981 983 if not legacybooks and 'bookmarks' in b2caps:
982 984 return _pushb2bookmarkspart(pushop, bundler)
983 985 elif 'pushkey' in b2caps:
984 986 return _pushb2bookmarkspushkey(pushop, bundler)
985 987
986 988 def _bmaction(old, new):
987 989 """small utility for bookmark pushing"""
988 990 if not old:
989 991 return 'export'
990 992 elif not new:
991 993 return 'delete'
992 994 return 'update'
993 995
994 996 def _pushb2bookmarkspart(pushop, bundler):
995 997 pushop.stepsdone.add('bookmarks')
996 998 if not pushop.outbookmarks:
997 999 return
998 1000
999 1001 allactions = []
1000 1002 data = []
1001 1003 for book, old, new in pushop.outbookmarks:
1002 1004 new = bin(new)
1003 1005 data.append((book, new))
1004 1006 allactions.append((book, _bmaction(old, new)))
1005 1007 checkdata = bookmod.binaryencode(data)
1006 1008 bundler.newpart('bookmarks', data=checkdata)
1007 1009
1008 1010 def handlereply(op):
1009 1011 ui = pushop.ui
1010 1012 # if success
1011 1013 for book, action in allactions:
1012 1014 ui.status(bookmsgmap[action][0] % book)
1013 1015
1014 1016 return handlereply
1015 1017
1016 1018 def _pushb2bookmarkspushkey(pushop, bundler):
1017 1019 pushop.stepsdone.add('bookmarks')
1018 1020 part2book = []
1019 1021 enc = pushkey.encode
1020 1022
1021 1023 def handlefailure(pushop, exc):
1022 1024 targetid = int(exc.partid)
1023 1025 for partid, book, action in part2book:
1024 1026 if partid == targetid:
1025 1027 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1026 1028 # we should not be called for part we did not generated
1027 1029 assert False
1028 1030
1029 1031 for book, old, new in pushop.outbookmarks:
1030 1032 part = bundler.newpart('pushkey')
1031 1033 part.addparam('namespace', enc('bookmarks'))
1032 1034 part.addparam('key', enc(book))
1033 1035 part.addparam('old', enc(old))
1034 1036 part.addparam('new', enc(new))
1035 1037 action = 'update'
1036 1038 if not old:
1037 1039 action = 'export'
1038 1040 elif not new:
1039 1041 action = 'delete'
1040 1042 part2book.append((part.id, book, action))
1041 1043 pushop.pkfailcb[part.id] = handlefailure
1042 1044
1043 1045 def handlereply(op):
1044 1046 ui = pushop.ui
1045 1047 for partid, book, action in part2book:
1046 1048 partrep = op.records.getreplies(partid)
1047 1049 results = partrep['pushkey']
1048 1050 assert len(results) <= 1
1049 1051 if not results:
1050 1052 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1051 1053 else:
1052 1054 ret = int(results[0]['return'])
1053 1055 if ret:
1054 1056 ui.status(bookmsgmap[action][0] % book)
1055 1057 else:
1056 1058 ui.warn(bookmsgmap[action][1] % book)
1057 1059 if pushop.bkresult is not None:
1058 1060 pushop.bkresult = 1
1059 1061 return handlereply
1060 1062
1061 1063 @b2partsgenerator('pushvars', idx=0)
1062 1064 def _getbundlesendvars(pushop, bundler):
1063 1065 '''send shellvars via bundle2'''
1064 1066 pushvars = pushop.pushvars
1065 1067 if pushvars:
1066 1068 shellvars = {}
1067 1069 for raw in pushvars:
1068 1070 if '=' not in raw:
1069 1071 msg = ("unable to parse variable '%s', should follow "
1070 1072 "'KEY=VALUE' or 'KEY=' format")
1071 1073 raise error.Abort(msg % raw)
1072 1074 k, v = raw.split('=', 1)
1073 1075 shellvars[k] = v
1074 1076
1075 1077 part = bundler.newpart('pushvars')
1076 1078
1077 1079 for key, value in shellvars.iteritems():
1078 1080 part.addparam(key, value, mandatory=False)
1079 1081
1080 1082 def _pushbundle2(pushop):
1081 1083 """push data to the remote using bundle2
1082 1084
1083 1085 The only currently supported type of data is changegroup but this will
1084 1086 evolve in the future."""
1085 1087 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1086 1088 pushback = (pushop.trmanager
1087 1089 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1088 1090
1089 1091 # create reply capability
1090 1092 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1091 1093 allowpushback=pushback,
1092 1094 role='client'))
1093 1095 bundler.newpart('replycaps', data=capsblob)
1094 1096 replyhandlers = []
1095 1097 for partgenname in b2partsgenorder:
1096 1098 partgen = b2partsgenmapping[partgenname]
1097 1099 ret = partgen(pushop, bundler)
1098 1100 if callable(ret):
1099 1101 replyhandlers.append(ret)
1100 1102 # do not push if nothing to push
1101 1103 if bundler.nbparts <= 1:
1102 1104 return
1103 1105 stream = util.chunkbuffer(bundler.getchunks())
1104 1106 try:
1105 1107 try:
1106 1108 with pushop.remote.commandexecutor() as e:
1107 1109 reply = e.callcommand('unbundle', {
1108 1110 'bundle': stream,
1109 1111 'heads': ['force'],
1110 1112 'url': pushop.remote.url(),
1111 1113 }).result()
1112 1114 except error.BundleValueError as exc:
1113 1115 raise error.Abort(_('missing support for %s') % exc)
1114 1116 try:
1115 1117 trgetter = None
1116 1118 if pushback:
1117 1119 trgetter = pushop.trmanager.transaction
1118 1120 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1119 1121 except error.BundleValueError as exc:
1120 1122 raise error.Abort(_('missing support for %s') % exc)
1121 1123 except bundle2.AbortFromPart as exc:
1122 1124 pushop.ui.status(_('remote: %s\n') % exc)
1123 1125 if exc.hint is not None:
1124 1126 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1125 1127 raise error.Abort(_('push failed on remote'))
1126 1128 except error.PushkeyFailed as exc:
1127 1129 partid = int(exc.partid)
1128 1130 if partid not in pushop.pkfailcb:
1129 1131 raise
1130 1132 pushop.pkfailcb[partid](pushop, exc)
1131 1133 for rephand in replyhandlers:
1132 1134 rephand(op)
1133 1135
1134 1136 def _pushchangeset(pushop):
1135 1137 """Make the actual push of changeset bundle to remote repo"""
1136 1138 if 'changesets' in pushop.stepsdone:
1137 1139 return
1138 1140 pushop.stepsdone.add('changesets')
1139 1141 if not _pushcheckoutgoing(pushop):
1140 1142 return
1141 1143
1142 1144 # Should have verified this in push().
1143 1145 assert pushop.remote.capable('unbundle')
1144 1146
1145 1147 pushop.repo.prepushoutgoinghooks(pushop)
1146 1148 outgoing = pushop.outgoing
1147 1149 # TODO: get bundlecaps from remote
1148 1150 bundlecaps = None
1149 1151 # create a changegroup from local
1150 1152 if pushop.revs is None and not (outgoing.excluded
1151 1153 or pushop.repo.changelog.filteredrevs):
1152 1154 # push everything,
1153 1155 # use the fast path, no race possible on push
1154 1156 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1155 1157 fastpath=True, bundlecaps=bundlecaps)
1156 1158 else:
1157 1159 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1158 1160 'push', bundlecaps=bundlecaps)
1159 1161
1160 1162 # apply changegroup to remote
1161 1163 # local repo finds heads on server, finds out what
1162 1164 # revs it must push. once revs transferred, if server
1163 1165 # finds it has different heads (someone else won
1164 1166 # commit/push race), server aborts.
1165 1167 if pushop.force:
1166 1168 remoteheads = ['force']
1167 1169 else:
1168 1170 remoteheads = pushop.remoteheads
1169 1171 # ssh: return remote's addchangegroup()
1170 1172 # http: return remote's addchangegroup() or 0 for error
1171 1173 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1172 1174 pushop.repo.url())
1173 1175
1174 1176 def _pushsyncphase(pushop):
1175 1177 """synchronise phase information locally and remotely"""
1176 1178 cheads = pushop.commonheads
1177 1179 # even when we don't push, exchanging phase data is useful
1178 1180 remotephases = listkeys(pushop.remote, 'phases')
1179 1181 if (pushop.ui.configbool('ui', '_usedassubrepo')
1180 1182 and remotephases # server supports phases
1181 1183 and pushop.cgresult is None # nothing was pushed
1182 1184 and remotephases.get('publishing', False)):
1183 1185 # When:
1184 1186 # - this is a subrepo push
1185 1187 # - and remote support phase
1186 1188 # - and no changeset was pushed
1187 1189 # - and remote is publishing
1188 1190 # We may be in issue 3871 case!
1189 1191 # We drop the possible phase synchronisation done by
1190 1192 # courtesy to publish changesets possibly locally draft
1191 1193 # on the remote.
1192 1194 remotephases = {'publishing': 'True'}
1193 1195 if not remotephases: # old server or public only reply from non-publishing
1194 1196 _localphasemove(pushop, cheads)
1195 1197 # don't push any phase data as there is nothing to push
1196 1198 else:
1197 1199 ana = phases.analyzeremotephases(pushop.repo, cheads,
1198 1200 remotephases)
1199 1201 pheads, droots = ana
1200 1202 ### Apply remote phase on local
1201 1203 if remotephases.get('publishing', False):
1202 1204 _localphasemove(pushop, cheads)
1203 1205 else: # publish = False
1204 1206 _localphasemove(pushop, pheads)
1205 1207 _localphasemove(pushop, cheads, phases.draft)
1206 1208 ### Apply local phase on remote
1207 1209
1208 1210 if pushop.cgresult:
1209 1211 if 'phases' in pushop.stepsdone:
1210 1212 # phases already pushed though bundle2
1211 1213 return
1212 1214 outdated = pushop.outdatedphases
1213 1215 else:
1214 1216 outdated = pushop.fallbackoutdatedphases
1215 1217
1216 1218 pushop.stepsdone.add('phases')
1217 1219
1218 1220 # filter heads already turned public by the push
1219 1221 outdated = [c for c in outdated if c.node() not in pheads]
1220 1222 # fallback to independent pushkey command
1221 1223 for newremotehead in outdated:
1222 1224 with pushop.remote.commandexecutor() as e:
1223 1225 r = e.callcommand('pushkey', {
1224 1226 'namespace': 'phases',
1225 1227 'key': newremotehead.hex(),
1226 1228 'old': '%d' % phases.draft,
1227 1229 'new': '%d' % phases.public
1228 1230 }).result()
1229 1231
1230 1232 if not r:
1231 1233 pushop.ui.warn(_('updating %s to public failed!\n')
1232 1234 % newremotehead)
1233 1235
1234 1236 def _localphasemove(pushop, nodes, phase=phases.public):
1235 1237 """move <nodes> to <phase> in the local source repo"""
1236 1238 if pushop.trmanager:
1237 1239 phases.advanceboundary(pushop.repo,
1238 1240 pushop.trmanager.transaction(),
1239 1241 phase,
1240 1242 nodes)
1241 1243 else:
1242 1244 # repo is not locked, do not change any phases!
1243 1245 # Informs the user that phases should have been moved when
1244 1246 # applicable.
1245 1247 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1246 1248 phasestr = phases.phasenames[phase]
1247 1249 if actualmoves:
1248 1250 pushop.ui.status(_('cannot lock source repo, skipping '
1249 1251 'local %s phase update\n') % phasestr)
1250 1252
1251 1253 def _pushobsolete(pushop):
1252 1254 """utility function to push obsolete markers to a remote"""
1253 1255 if 'obsmarkers' in pushop.stepsdone:
1254 1256 return
1255 1257 repo = pushop.repo
1256 1258 remote = pushop.remote
1257 1259 pushop.stepsdone.add('obsmarkers')
1258 1260 if pushop.outobsmarkers:
1259 1261 pushop.ui.debug('try to push obsolete markers to remote\n')
1260 1262 rslts = []
1261 1263 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1262 1264 for key in sorted(remotedata, reverse=True):
1263 1265 # reverse sort to ensure we end with dump0
1264 1266 data = remotedata[key]
1265 1267 rslts.append(remote.pushkey('obsolete', key, '', data))
1266 1268 if [r for r in rslts if not r]:
1267 1269 msg = _('failed to push some obsolete markers!\n')
1268 1270 repo.ui.warn(msg)
1269 1271
1270 1272 def _pushbookmark(pushop):
1271 1273 """Update bookmark position on remote"""
1272 1274 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1273 1275 return
1274 1276 pushop.stepsdone.add('bookmarks')
1275 1277 ui = pushop.ui
1276 1278 remote = pushop.remote
1277 1279
1278 1280 for b, old, new in pushop.outbookmarks:
1279 1281 action = 'update'
1280 1282 if not old:
1281 1283 action = 'export'
1282 1284 elif not new:
1283 1285 action = 'delete'
1284 1286
1285 1287 with remote.commandexecutor() as e:
1286 1288 r = e.callcommand('pushkey', {
1287 1289 'namespace': 'bookmarks',
1288 1290 'key': b,
1289 1291 'old': old,
1290 1292 'new': new,
1291 1293 }).result()
1292 1294
1293 1295 if r:
1294 1296 ui.status(bookmsgmap[action][0] % b)
1295 1297 else:
1296 1298 ui.warn(bookmsgmap[action][1] % b)
1297 1299 # discovery can have set the value form invalid entry
1298 1300 if pushop.bkresult is not None:
1299 1301 pushop.bkresult = 1
1300 1302
1301 1303 class pulloperation(object):
1302 1304 """A object that represent a single pull operation
1303 1305
1304 1306 It purpose is to carry pull related state and very common operation.
1305 1307
1306 1308 A new should be created at the beginning of each pull and discarded
1307 1309 afterward.
1308 1310 """
1309 1311
1310 1312 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1311 1313 remotebookmarks=None, streamclonerequested=None):
1312 1314 # repo we pull into
1313 1315 self.repo = repo
1314 1316 # repo we pull from
1315 1317 self.remote = remote
1316 1318 # revision we try to pull (None is "all")
1317 1319 self.heads = heads
1318 1320 # bookmark pulled explicitly
1319 1321 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1320 1322 for bookmark in bookmarks]
1321 1323 # do we force pull?
1322 1324 self.force = force
1323 1325 # whether a streaming clone was requested
1324 1326 self.streamclonerequested = streamclonerequested
1325 1327 # transaction manager
1326 1328 self.trmanager = None
1327 1329 # set of common changeset between local and remote before pull
1328 1330 self.common = None
1329 1331 # set of pulled head
1330 1332 self.rheads = None
1331 1333 # list of missing changeset to fetch remotely
1332 1334 self.fetch = None
1333 1335 # remote bookmarks data
1334 1336 self.remotebookmarks = remotebookmarks
1335 1337 # result of changegroup pulling (used as return code by pull)
1336 1338 self.cgresult = None
1337 1339 # list of step already done
1338 1340 self.stepsdone = set()
1339 1341 # Whether we attempted a clone from pre-generated bundles.
1340 1342 self.clonebundleattempted = False
1341 1343
1342 1344 @util.propertycache
1343 1345 def pulledsubset(self):
1344 1346 """heads of the set of changeset target by the pull"""
1345 1347 # compute target subset
1346 1348 if self.heads is None:
1347 1349 # We pulled every thing possible
1348 1350 # sync on everything common
1349 1351 c = set(self.common)
1350 1352 ret = list(self.common)
1351 1353 for n in self.rheads:
1352 1354 if n not in c:
1353 1355 ret.append(n)
1354 1356 return ret
1355 1357 else:
1356 1358 # We pulled a specific subset
1357 1359 # sync on this subset
1358 1360 return self.heads
1359 1361
1360 1362 @util.propertycache
1361 1363 def canusebundle2(self):
1362 1364 return not _forcebundle1(self)
1363 1365
1364 1366 @util.propertycache
1365 1367 def remotebundle2caps(self):
1366 1368 return bundle2.bundle2caps(self.remote)
1367 1369
1368 1370 def gettransaction(self):
1369 1371 # deprecated; talk to trmanager directly
1370 1372 return self.trmanager.transaction()
1371 1373
1372 1374 class transactionmanager(util.transactional):
1373 1375 """An object to manage the life cycle of a transaction
1374 1376
1375 1377 It creates the transaction on demand and calls the appropriate hooks when
1376 1378 closing the transaction."""
1377 1379 def __init__(self, repo, source, url):
1378 1380 self.repo = repo
1379 1381 self.source = source
1380 1382 self.url = url
1381 1383 self._tr = None
1382 1384
1383 1385 def transaction(self):
1384 1386 """Return an open transaction object, constructing if necessary"""
1385 1387 if not self._tr:
1386 1388 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1387 1389 self._tr = self.repo.transaction(trname)
1388 1390 self._tr.hookargs['source'] = self.source
1389 1391 self._tr.hookargs['url'] = self.url
1390 1392 return self._tr
1391 1393
1392 1394 def close(self):
1393 1395 """close transaction if created"""
1394 1396 if self._tr is not None:
1395 1397 self._tr.close()
1396 1398
1397 1399 def release(self):
1398 1400 """release transaction if created"""
1399 1401 if self._tr is not None:
1400 1402 self._tr.release()
1401 1403
1402 1404 def listkeys(remote, namespace):
1403 1405 with remote.commandexecutor() as e:
1404 1406 return e.callcommand('listkeys', {'namespace': namespace}).result()
1405 1407
1406 1408 def _fullpullbundle2(repo, pullop):
1407 1409 # The server may send a partial reply, i.e. when inlining
1408 1410 # pre-computed bundles. In that case, update the common
1409 1411 # set based on the results and pull another bundle.
1410 1412 #
1411 1413 # There are two indicators that the process is finished:
1412 1414 # - no changeset has been added, or
1413 1415 # - all remote heads are known locally.
1414 1416 # The head check must use the unfiltered view as obsoletion
1415 1417 # markers can hide heads.
1416 1418 unfi = repo.unfiltered()
1417 1419 unficl = unfi.changelog
1418 1420 def headsofdiff(h1, h2):
1419 1421 """Returns heads(h1 % h2)"""
1420 1422 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1421 1423 return set(ctx.node() for ctx in res)
1422 1424 def headsofunion(h1, h2):
1423 1425 """Returns heads((h1 + h2) - null)"""
1424 1426 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1425 1427 return set(ctx.node() for ctx in res)
1426 1428 while True:
1427 1429 old_heads = unficl.heads()
1428 1430 clstart = len(unficl)
1429 1431 _pullbundle2(pullop)
1430 1432 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1431 1433 # XXX narrow clones filter the heads on the server side during
1432 1434 # XXX getbundle and result in partial replies as well.
1433 1435 # XXX Disable pull bundles in this case as band aid to avoid
1434 1436 # XXX extra round trips.
1435 1437 break
1436 1438 if clstart == len(unficl):
1437 1439 break
1438 1440 if all(unficl.hasnode(n) for n in pullop.rheads):
1439 1441 break
1440 1442 new_heads = headsofdiff(unficl.heads(), old_heads)
1441 1443 pullop.common = headsofunion(new_heads, pullop.common)
1442 1444 pullop.rheads = set(pullop.rheads) - pullop.common
1443 1445
1444 1446 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1445 1447 streamclonerequested=None):
1446 1448 """Fetch repository data from a remote.
1447 1449
1448 1450 This is the main function used to retrieve data from a remote repository.
1449 1451
1450 1452 ``repo`` is the local repository to clone into.
1451 1453 ``remote`` is a peer instance.
1452 1454 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1453 1455 default) means to pull everything from the remote.
1454 1456 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1455 1457 default, all remote bookmarks are pulled.
1456 1458 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1457 1459 initialization.
1458 1460 ``streamclonerequested`` is a boolean indicating whether a "streaming
1459 1461 clone" is requested. A "streaming clone" is essentially a raw file copy
1460 1462 of revlogs from the server. This only works when the local repository is
1461 1463 empty. The default value of ``None`` means to respect the server
1462 1464 configuration for preferring stream clones.
1463 1465
1464 1466 Returns the ``pulloperation`` created for this pull.
1465 1467 """
1466 1468 if opargs is None:
1467 1469 opargs = {}
1468 1470 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1469 1471 streamclonerequested=streamclonerequested,
1470 1472 **pycompat.strkwargs(opargs))
1471 1473
1472 1474 peerlocal = pullop.remote.local()
1473 1475 if peerlocal:
1474 1476 missing = set(peerlocal.requirements) - pullop.repo.supported
1475 1477 if missing:
1476 1478 msg = _("required features are not"
1477 1479 " supported in the destination:"
1478 1480 " %s") % (', '.join(sorted(missing)))
1479 1481 raise error.Abort(msg)
1480 1482
1481 1483 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1482 1484 with repo.wlock(), repo.lock(), pullop.trmanager:
1483 1485 # This should ideally be in _pullbundle2(). However, it needs to run
1484 1486 # before discovery to avoid extra work.
1485 1487 _maybeapplyclonebundle(pullop)
1486 1488 streamclone.maybeperformlegacystreamclone(pullop)
1487 1489 _pulldiscovery(pullop)
1488 1490 if pullop.canusebundle2:
1489 1491 _fullpullbundle2(repo, pullop)
1490 1492 _pullchangeset(pullop)
1491 1493 _pullphase(pullop)
1492 1494 _pullbookmarks(pullop)
1493 1495 _pullobsolete(pullop)
1494 1496
1495 1497 # storing remotenames
1496 1498 if repo.ui.configbool('experimental', 'remotenames'):
1497 1499 logexchange.pullremotenames(repo, remote)
1498 1500
1499 1501 return pullop
1500 1502
1501 1503 # list of steps to perform discovery before pull
1502 1504 pulldiscoveryorder = []
1503 1505
1504 1506 # Mapping between step name and function
1505 1507 #
1506 1508 # This exists to help extensions wrap steps if necessary
1507 1509 pulldiscoverymapping = {}
1508 1510
1509 1511 def pulldiscovery(stepname):
1510 1512 """decorator for function performing discovery before pull
1511 1513
1512 1514 The function is added to the step -> function mapping and appended to the
1513 1515 list of steps. Beware that decorated function will be added in order (this
1514 1516 may matter).
1515 1517
1516 1518 You can only use this decorator for a new step, if you want to wrap a step
1517 1519 from an extension, change the pulldiscovery dictionary directly."""
1518 1520 def dec(func):
1519 1521 assert stepname not in pulldiscoverymapping
1520 1522 pulldiscoverymapping[stepname] = func
1521 1523 pulldiscoveryorder.append(stepname)
1522 1524 return func
1523 1525 return dec
1524 1526
1525 1527 def _pulldiscovery(pullop):
1526 1528 """Run all discovery steps"""
1527 1529 for stepname in pulldiscoveryorder:
1528 1530 step = pulldiscoverymapping[stepname]
1529 1531 step(pullop)
1530 1532
1531 1533 @pulldiscovery('b1:bookmarks')
1532 1534 def _pullbookmarkbundle1(pullop):
1533 1535 """fetch bookmark data in bundle1 case
1534 1536
1535 1537 If not using bundle2, we have to fetch bookmarks before changeset
1536 1538 discovery to reduce the chance and impact of race conditions."""
1537 1539 if pullop.remotebookmarks is not None:
1538 1540 return
1539 1541 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1540 1542 # all known bundle2 servers now support listkeys, but lets be nice with
1541 1543 # new implementation.
1542 1544 return
1543 1545 books = listkeys(pullop.remote, 'bookmarks')
1544 1546 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1545 1547
1546 1548
1547 1549 @pulldiscovery('changegroup')
1548 1550 def _pulldiscoverychangegroup(pullop):
1549 1551 """discovery phase for the pull
1550 1552
1551 1553 Current handle changeset discovery only, will change handle all discovery
1552 1554 at some point."""
1553 1555 tmp = discovery.findcommonincoming(pullop.repo,
1554 1556 pullop.remote,
1555 1557 heads=pullop.heads,
1556 1558 force=pullop.force)
1557 1559 common, fetch, rheads = tmp
1558 1560 nm = pullop.repo.unfiltered().changelog.nodemap
1559 1561 if fetch and rheads:
1560 1562 # If a remote heads is filtered locally, put in back in common.
1561 1563 #
1562 1564 # This is a hackish solution to catch most of "common but locally
1563 1565 # hidden situation". We do not performs discovery on unfiltered
1564 1566 # repository because it end up doing a pathological amount of round
1565 1567 # trip for w huge amount of changeset we do not care about.
1566 1568 #
1567 1569 # If a set of such "common but filtered" changeset exist on the server
1568 1570 # but are not including a remote heads, we'll not be able to detect it,
1569 1571 scommon = set(common)
1570 1572 for n in rheads:
1571 1573 if n in nm:
1572 1574 if n not in scommon:
1573 1575 common.append(n)
1574 1576 if set(rheads).issubset(set(common)):
1575 1577 fetch = []
1576 1578 pullop.common = common
1577 1579 pullop.fetch = fetch
1578 1580 pullop.rheads = rheads
1579 1581
1580 1582 def _pullbundle2(pullop):
1581 1583 """pull data using bundle2
1582 1584
1583 1585 For now, the only supported data are changegroup."""
1584 1586 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1585 1587
1586 1588 # make ui easier to access
1587 1589 ui = pullop.repo.ui
1588 1590
1589 1591 # At the moment we don't do stream clones over bundle2. If that is
1590 1592 # implemented then here's where the check for that will go.
1591 1593 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1592 1594
1593 1595 # declare pull perimeters
1594 1596 kwargs['common'] = pullop.common
1595 1597 kwargs['heads'] = pullop.heads or pullop.rheads
1596 1598
1597 1599 if streaming:
1598 1600 kwargs['cg'] = False
1599 1601 kwargs['stream'] = True
1600 1602 pullop.stepsdone.add('changegroup')
1601 1603 pullop.stepsdone.add('phases')
1602 1604
1603 1605 else:
1604 1606 # pulling changegroup
1605 1607 pullop.stepsdone.add('changegroup')
1606 1608
1607 1609 kwargs['cg'] = pullop.fetch
1608 1610
1609 1611 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1610 1612 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1611 1613 if (not legacyphase and hasbinaryphase):
1612 1614 kwargs['phases'] = True
1613 1615 pullop.stepsdone.add('phases')
1614 1616
1615 1617 if 'listkeys' in pullop.remotebundle2caps:
1616 1618 if 'phases' not in pullop.stepsdone:
1617 1619 kwargs['listkeys'] = ['phases']
1618 1620
1619 1621 bookmarksrequested = False
1620 1622 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1621 1623 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1622 1624
1623 1625 if pullop.remotebookmarks is not None:
1624 1626 pullop.stepsdone.add('request-bookmarks')
1625 1627
1626 1628 if ('request-bookmarks' not in pullop.stepsdone
1627 1629 and pullop.remotebookmarks is None
1628 1630 and not legacybookmark and hasbinarybook):
1629 1631 kwargs['bookmarks'] = True
1630 1632 bookmarksrequested = True
1631 1633
1632 1634 if 'listkeys' in pullop.remotebundle2caps:
1633 1635 if 'request-bookmarks' not in pullop.stepsdone:
1634 1636 # make sure to always includes bookmark data when migrating
1635 1637 # `hg incoming --bundle` to using this function.
1636 1638 pullop.stepsdone.add('request-bookmarks')
1637 1639 kwargs.setdefault('listkeys', []).append('bookmarks')
1638 1640
1639 1641 # If this is a full pull / clone and the server supports the clone bundles
1640 1642 # feature, tell the server whether we attempted a clone bundle. The
1641 1643 # presence of this flag indicates the client supports clone bundles. This
1642 1644 # will enable the server to treat clients that support clone bundles
1643 1645 # differently from those that don't.
1644 1646 if (pullop.remote.capable('clonebundles')
1645 1647 and pullop.heads is None and list(pullop.common) == [nullid]):
1646 1648 kwargs['cbattempted'] = pullop.clonebundleattempted
1647 1649
1648 1650 if streaming:
1649 1651 pullop.repo.ui.status(_('streaming all changes\n'))
1650 1652 elif not pullop.fetch:
1651 1653 pullop.repo.ui.status(_("no changes found\n"))
1652 1654 pullop.cgresult = 0
1653 1655 else:
1654 1656 if pullop.heads is None and list(pullop.common) == [nullid]:
1655 1657 pullop.repo.ui.status(_("requesting all changes\n"))
1656 1658 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1657 1659 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1658 1660 if obsolete.commonversion(remoteversions) is not None:
1659 1661 kwargs['obsmarkers'] = True
1660 1662 pullop.stepsdone.add('obsmarkers')
1661 1663 _pullbundle2extraprepare(pullop, kwargs)
1662 1664
1663 1665 with pullop.remote.commandexecutor() as e:
1664 1666 args = dict(kwargs)
1665 1667 args['source'] = 'pull'
1666 1668 bundle = e.callcommand('getbundle', args).result()
1667 1669
1668 1670 try:
1669 1671 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1670 1672 source='pull')
1671 1673 op.modes['bookmarks'] = 'records'
1672 1674 bundle2.processbundle(pullop.repo, bundle, op=op)
1673 1675 except bundle2.AbortFromPart as exc:
1674 1676 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1675 1677 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1676 1678 except error.BundleValueError as exc:
1677 1679 raise error.Abort(_('missing support for %s') % exc)
1678 1680
1679 1681 if pullop.fetch:
1680 1682 pullop.cgresult = bundle2.combinechangegroupresults(op)
1681 1683
1682 1684 # processing phases change
1683 1685 for namespace, value in op.records['listkeys']:
1684 1686 if namespace == 'phases':
1685 1687 _pullapplyphases(pullop, value)
1686 1688
1687 1689 # processing bookmark update
1688 1690 if bookmarksrequested:
1689 1691 books = {}
1690 1692 for record in op.records['bookmarks']:
1691 1693 books[record['bookmark']] = record["node"]
1692 1694 pullop.remotebookmarks = books
1693 1695 else:
1694 1696 for namespace, value in op.records['listkeys']:
1695 1697 if namespace == 'bookmarks':
1696 1698 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1697 1699
1698 1700 # bookmark data were either already there or pulled in the bundle
1699 1701 if pullop.remotebookmarks is not None:
1700 1702 _pullbookmarks(pullop)
1701 1703
1702 1704 def _pullbundle2extraprepare(pullop, kwargs):
1703 1705 """hook function so that extensions can extend the getbundle call"""
1704 1706
1705 1707 def _pullchangeset(pullop):
1706 1708 """pull changeset from unbundle into the local repo"""
1707 1709 # We delay the open of the transaction as late as possible so we
1708 1710 # don't open transaction for nothing or you break future useful
1709 1711 # rollback call
1710 1712 if 'changegroup' in pullop.stepsdone:
1711 1713 return
1712 1714 pullop.stepsdone.add('changegroup')
1713 1715 if not pullop.fetch:
1714 1716 pullop.repo.ui.status(_("no changes found\n"))
1715 1717 pullop.cgresult = 0
1716 1718 return
1717 1719 tr = pullop.gettransaction()
1718 1720 if pullop.heads is None and list(pullop.common) == [nullid]:
1719 1721 pullop.repo.ui.status(_("requesting all changes\n"))
1720 1722 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1721 1723 # issue1320, avoid a race if remote changed after discovery
1722 1724 pullop.heads = pullop.rheads
1723 1725
1724 1726 if pullop.remote.capable('getbundle'):
1725 1727 # TODO: get bundlecaps from remote
1726 1728 cg = pullop.remote.getbundle('pull', common=pullop.common,
1727 1729 heads=pullop.heads or pullop.rheads)
1728 1730 elif pullop.heads is None:
1729 1731 with pullop.remote.commandexecutor() as e:
1730 1732 cg = e.callcommand('changegroup', {
1731 1733 'nodes': pullop.fetch,
1732 1734 'source': 'pull',
1733 1735 }).result()
1734 1736
1735 1737 elif not pullop.remote.capable('changegroupsubset'):
1736 1738 raise error.Abort(_("partial pull cannot be done because "
1737 1739 "other repository doesn't support "
1738 1740 "changegroupsubset."))
1739 1741 else:
1740 1742 with pullop.remote.commandexecutor() as e:
1741 1743 cg = e.callcommand('changegroupsubset', {
1742 1744 'bases': pullop.fetch,
1743 1745 'heads': pullop.heads,
1744 1746 'source': 'pull',
1745 1747 }).result()
1746 1748
1747 1749 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1748 1750 pullop.remote.url())
1749 1751 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1750 1752
1751 1753 def _pullphase(pullop):
1752 1754 # Get remote phases data from remote
1753 1755 if 'phases' in pullop.stepsdone:
1754 1756 return
1755 1757 remotephases = listkeys(pullop.remote, 'phases')
1756 1758 _pullapplyphases(pullop, remotephases)
1757 1759
1758 1760 def _pullapplyphases(pullop, remotephases):
1759 1761 """apply phase movement from observed remote state"""
1760 1762 if 'phases' in pullop.stepsdone:
1761 1763 return
1762 1764 pullop.stepsdone.add('phases')
1763 1765 publishing = bool(remotephases.get('publishing', False))
1764 1766 if remotephases and not publishing:
1765 1767 # remote is new and non-publishing
1766 1768 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1767 1769 pullop.pulledsubset,
1768 1770 remotephases)
1769 1771 dheads = pullop.pulledsubset
1770 1772 else:
1771 1773 # Remote is old or publishing all common changesets
1772 1774 # should be seen as public
1773 1775 pheads = pullop.pulledsubset
1774 1776 dheads = []
1775 1777 unfi = pullop.repo.unfiltered()
1776 1778 phase = unfi._phasecache.phase
1777 1779 rev = unfi.changelog.nodemap.get
1778 1780 public = phases.public
1779 1781 draft = phases.draft
1780 1782
1781 1783 # exclude changesets already public locally and update the others
1782 1784 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1783 1785 if pheads:
1784 1786 tr = pullop.gettransaction()
1785 1787 phases.advanceboundary(pullop.repo, tr, public, pheads)
1786 1788
1787 1789 # exclude changesets already draft locally and update the others
1788 1790 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1789 1791 if dheads:
1790 1792 tr = pullop.gettransaction()
1791 1793 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1792 1794
1793 1795 def _pullbookmarks(pullop):
1794 1796 """process the remote bookmark information to update the local one"""
1795 1797 if 'bookmarks' in pullop.stepsdone:
1796 1798 return
1797 1799 pullop.stepsdone.add('bookmarks')
1798 1800 repo = pullop.repo
1799 1801 remotebookmarks = pullop.remotebookmarks
1800 1802 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1801 1803 pullop.remote.url(),
1802 1804 pullop.gettransaction,
1803 1805 explicit=pullop.explicitbookmarks)
1804 1806
1805 1807 def _pullobsolete(pullop):
1806 1808 """utility function to pull obsolete markers from a remote
1807 1809
1808 1810 The `gettransaction` is function that return the pull transaction, creating
1809 1811 one if necessary. We return the transaction to inform the calling code that
1810 1812 a new transaction have been created (when applicable).
1811 1813
1812 1814 Exists mostly to allow overriding for experimentation purpose"""
1813 1815 if 'obsmarkers' in pullop.stepsdone:
1814 1816 return
1815 1817 pullop.stepsdone.add('obsmarkers')
1816 1818 tr = None
1817 1819 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1818 1820 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1819 1821 remoteobs = listkeys(pullop.remote, 'obsolete')
1820 1822 if 'dump0' in remoteobs:
1821 1823 tr = pullop.gettransaction()
1822 1824 markers = []
1823 1825 for key in sorted(remoteobs, reverse=True):
1824 1826 if key.startswith('dump'):
1825 1827 data = util.b85decode(remoteobs[key])
1826 1828 version, newmarks = obsolete._readmarkers(data)
1827 1829 markers += newmarks
1828 1830 if markers:
1829 1831 pullop.repo.obsstore.add(tr, markers)
1830 1832 pullop.repo.invalidatevolatilesets()
1831 1833 return tr
1832 1834
1833 1835 def caps20to10(repo, role):
1834 1836 """return a set with appropriate options to use bundle20 during getbundle"""
1835 1837 caps = {'HG20'}
1836 1838 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1837 1839 caps.add('bundle2=' + urlreq.quote(capsblob))
1838 1840 return caps
1839 1841
1840 1842 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1841 1843 getbundle2partsorder = []
1842 1844
1843 1845 # Mapping between step name and function
1844 1846 #
1845 1847 # This exists to help extensions wrap steps if necessary
1846 1848 getbundle2partsmapping = {}
1847 1849
1848 1850 def getbundle2partsgenerator(stepname, idx=None):
1849 1851 """decorator for function generating bundle2 part for getbundle
1850 1852
1851 1853 The function is added to the step -> function mapping and appended to the
1852 1854 list of steps. Beware that decorated functions will be added in order
1853 1855 (this may matter).
1854 1856
1855 1857 You can only use this decorator for new steps, if you want to wrap a step
1856 1858 from an extension, attack the getbundle2partsmapping dictionary directly."""
1857 1859 def dec(func):
1858 1860 assert stepname not in getbundle2partsmapping
1859 1861 getbundle2partsmapping[stepname] = func
1860 1862 if idx is None:
1861 1863 getbundle2partsorder.append(stepname)
1862 1864 else:
1863 1865 getbundle2partsorder.insert(idx, stepname)
1864 1866 return func
1865 1867 return dec
1866 1868
1867 1869 def bundle2requested(bundlecaps):
1868 1870 if bundlecaps is not None:
1869 1871 return any(cap.startswith('HG2') for cap in bundlecaps)
1870 1872 return False
1871 1873
1872 1874 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1873 1875 **kwargs):
1874 1876 """Return chunks constituting a bundle's raw data.
1875 1877
1876 1878 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1877 1879 passed.
1878 1880
1879 1881 Returns a 2-tuple of a dict with metadata about the generated bundle
1880 1882 and an iterator over raw chunks (of varying sizes).
1881 1883 """
1882 1884 kwargs = pycompat.byteskwargs(kwargs)
1883 1885 info = {}
1884 1886 usebundle2 = bundle2requested(bundlecaps)
1885 1887 # bundle10 case
1886 1888 if not usebundle2:
1887 1889 if bundlecaps and not kwargs.get('cg', True):
1888 1890 raise ValueError(_('request for bundle10 must include changegroup'))
1889 1891
1890 1892 if kwargs:
1891 1893 raise ValueError(_('unsupported getbundle arguments: %s')
1892 1894 % ', '.join(sorted(kwargs.keys())))
1893 1895 outgoing = _computeoutgoing(repo, heads, common)
1894 1896 info['bundleversion'] = 1
1895 1897 return info, changegroup.makestream(repo, outgoing, '01', source,
1896 1898 bundlecaps=bundlecaps)
1897 1899
1898 1900 # bundle20 case
1899 1901 info['bundleversion'] = 2
1900 1902 b2caps = {}
1901 1903 for bcaps in bundlecaps:
1902 1904 if bcaps.startswith('bundle2='):
1903 1905 blob = urlreq.unquote(bcaps[len('bundle2='):])
1904 1906 b2caps.update(bundle2.decodecaps(blob))
1905 1907 bundler = bundle2.bundle20(repo.ui, b2caps)
1906 1908
1907 1909 kwargs['heads'] = heads
1908 1910 kwargs['common'] = common
1909 1911
1910 1912 for name in getbundle2partsorder:
1911 1913 func = getbundle2partsmapping[name]
1912 1914 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1913 1915 **pycompat.strkwargs(kwargs))
1914 1916
1915 1917 info['prefercompressed'] = bundler.prefercompressed
1916 1918
1917 1919 return info, bundler.getchunks()
1918 1920
1919 1921 @getbundle2partsgenerator('stream2')
1920 1922 def _getbundlestream2(bundler, repo, *args, **kwargs):
1921 1923 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1922 1924
1923 1925 @getbundle2partsgenerator('changegroup')
1924 1926 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1925 1927 b2caps=None, heads=None, common=None, **kwargs):
1926 1928 """add a changegroup part to the requested bundle"""
1927 1929 cgstream = None
1928 1930 if kwargs.get(r'cg', True):
1929 1931 # build changegroup bundle here.
1930 1932 version = '01'
1931 1933 cgversions = b2caps.get('changegroup')
1932 1934 if cgversions: # 3.1 and 3.2 ship with an empty value
1933 1935 cgversions = [v for v in cgversions
1934 1936 if v in changegroup.supportedoutgoingversions(repo)]
1935 1937 if not cgversions:
1936 1938 raise ValueError(_('no common changegroup version'))
1937 1939 version = max(cgversions)
1938 1940 outgoing = _computeoutgoing(repo, heads, common)
1939 1941 if outgoing.missing:
1940 1942 cgstream = changegroup.makestream(repo, outgoing, version, source,
1941 1943 bundlecaps=bundlecaps)
1942 1944
1943 1945 if cgstream:
1944 1946 part = bundler.newpart('changegroup', data=cgstream)
1945 1947 if cgversions:
1946 1948 part.addparam('version', version)
1947 1949 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1948 1950 mandatory=False)
1949 1951 if 'treemanifest' in repo.requirements:
1950 1952 part.addparam('treemanifest', '1')
1951 1953
1952 1954 @getbundle2partsgenerator('bookmarks')
1953 1955 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1954 1956 b2caps=None, **kwargs):
1955 1957 """add a bookmark part to the requested bundle"""
1956 1958 if not kwargs.get(r'bookmarks', False):
1957 1959 return
1958 1960 if 'bookmarks' not in b2caps:
1959 1961 raise ValueError(_('no common bookmarks exchange method'))
1960 1962 books = bookmod.listbinbookmarks(repo)
1961 1963 data = bookmod.binaryencode(books)
1962 1964 if data:
1963 1965 bundler.newpart('bookmarks', data=data)
1964 1966
1965 1967 @getbundle2partsgenerator('listkeys')
1966 1968 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1967 1969 b2caps=None, **kwargs):
1968 1970 """add parts containing listkeys namespaces to the requested bundle"""
1969 1971 listkeys = kwargs.get(r'listkeys', ())
1970 1972 for namespace in listkeys:
1971 1973 part = bundler.newpart('listkeys')
1972 1974 part.addparam('namespace', namespace)
1973 1975 keys = repo.listkeys(namespace).items()
1974 1976 part.data = pushkey.encodekeys(keys)
1975 1977
1976 1978 @getbundle2partsgenerator('obsmarkers')
1977 1979 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1978 1980 b2caps=None, heads=None, **kwargs):
1979 1981 """add an obsolescence markers part to the requested bundle"""
1980 1982 if kwargs.get(r'obsmarkers', False):
1981 1983 if heads is None:
1982 1984 heads = repo.heads()
1983 1985 subset = [c.node() for c in repo.set('::%ln', heads)]
1984 1986 markers = repo.obsstore.relevantmarkers(subset)
1985 1987 markers = sorted(markers)
1986 1988 bundle2.buildobsmarkerspart(bundler, markers)
1987 1989
1988 1990 @getbundle2partsgenerator('phases')
1989 1991 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1990 1992 b2caps=None, heads=None, **kwargs):
1991 1993 """add phase heads part to the requested bundle"""
1992 1994 if kwargs.get(r'phases', False):
1993 1995 if not 'heads' in b2caps.get('phases'):
1994 1996 raise ValueError(_('no common phases exchange method'))
1995 1997 if heads is None:
1996 1998 heads = repo.heads()
1997 1999
1998 2000 headsbyphase = collections.defaultdict(set)
1999 2001 if repo.publishing():
2000 2002 headsbyphase[phases.public] = heads
2001 2003 else:
2002 2004 # find the appropriate heads to move
2003 2005
2004 2006 phase = repo._phasecache.phase
2005 2007 node = repo.changelog.node
2006 2008 rev = repo.changelog.rev
2007 2009 for h in heads:
2008 2010 headsbyphase[phase(repo, rev(h))].add(h)
2009 2011 seenphases = list(headsbyphase.keys())
2010 2012
2011 2013 # We do not handle anything but public and draft phase for now)
2012 2014 if seenphases:
2013 2015 assert max(seenphases) <= phases.draft
2014 2016
2015 2017 # if client is pulling non-public changesets, we need to find
2016 2018 # intermediate public heads.
2017 2019 draftheads = headsbyphase.get(phases.draft, set())
2018 2020 if draftheads:
2019 2021 publicheads = headsbyphase.get(phases.public, set())
2020 2022
2021 2023 revset = 'heads(only(%ln, %ln) and public())'
2022 2024 extraheads = repo.revs(revset, draftheads, publicheads)
2023 2025 for r in extraheads:
2024 2026 headsbyphase[phases.public].add(node(r))
2025 2027
2026 2028 # transform data in a format used by the encoding function
2027 2029 phasemapping = []
2028 2030 for phase in phases.allphases:
2029 2031 phasemapping.append(sorted(headsbyphase[phase]))
2030 2032
2031 2033 # generate the actual part
2032 2034 phasedata = phases.binaryencode(phasemapping)
2033 2035 bundler.newpart('phase-heads', data=phasedata)
2034 2036
2035 2037 @getbundle2partsgenerator('hgtagsfnodes')
2036 2038 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2037 2039 b2caps=None, heads=None, common=None,
2038 2040 **kwargs):
2039 2041 """Transfer the .hgtags filenodes mapping.
2040 2042
2041 2043 Only values for heads in this bundle will be transferred.
2042 2044
2043 2045 The part data consists of pairs of 20 byte changeset node and .hgtags
2044 2046 filenodes raw values.
2045 2047 """
2046 2048 # Don't send unless:
2047 2049 # - changeset are being exchanged,
2048 2050 # - the client supports it.
2049 2051 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2050 2052 return
2051 2053
2052 2054 outgoing = _computeoutgoing(repo, heads, common)
2053 2055 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2054 2056
2055 2057 @getbundle2partsgenerator('cache:rev-branch-cache')
2056 2058 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2057 2059 b2caps=None, heads=None, common=None,
2058 2060 **kwargs):
2059 2061 """Transfer the rev-branch-cache mapping
2060 2062
2061 2063 The payload is a series of data related to each branch
2062 2064
2063 2065 1) branch name length
2064 2066 2) number of open heads
2065 2067 3) number of closed heads
2066 2068 4) open heads nodes
2067 2069 5) closed heads nodes
2068 2070 """
2069 2071 # Don't send unless:
2070 2072 # - changeset are being exchanged,
2071 2073 # - the client supports it.
2072 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2074 # - narrow bundle isn't in play (not currently compatible).
2075 if (not kwargs.get(r'cg', True)
2076 or 'rev-branch-cache' not in b2caps
2077 or kwargs.get(r'narrow', False)
2078 or repo.ui.has_section(_NARROWACL_SECTION)):
2073 2079 return
2080
2074 2081 outgoing = _computeoutgoing(repo, heads, common)
2075 2082 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2076 2083
2077 2084 def check_heads(repo, their_heads, context):
2078 2085 """check if the heads of a repo have been modified
2079 2086
2080 2087 Used by peer for unbundling.
2081 2088 """
2082 2089 heads = repo.heads()
2083 2090 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2084 2091 if not (their_heads == ['force'] or their_heads == heads or
2085 2092 their_heads == ['hashed', heads_hash]):
2086 2093 # someone else committed/pushed/unbundled while we
2087 2094 # were transferring data
2088 2095 raise error.PushRaced('repository changed while %s - '
2089 2096 'please try again' % context)
2090 2097
2091 2098 def unbundle(repo, cg, heads, source, url):
2092 2099 """Apply a bundle to a repo.
2093 2100
2094 2101 this function makes sure the repo is locked during the application and have
2095 2102 mechanism to check that no push race occurred between the creation of the
2096 2103 bundle and its application.
2097 2104
2098 2105 If the push was raced as PushRaced exception is raised."""
2099 2106 r = 0
2100 2107 # need a transaction when processing a bundle2 stream
2101 2108 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2102 2109 lockandtr = [None, None, None]
2103 2110 recordout = None
2104 2111 # quick fix for output mismatch with bundle2 in 3.4
2105 2112 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2106 2113 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2107 2114 captureoutput = True
2108 2115 try:
2109 2116 # note: outside bundle1, 'heads' is expected to be empty and this
2110 2117 # 'check_heads' call wil be a no-op
2111 2118 check_heads(repo, heads, 'uploading changes')
2112 2119 # push can proceed
2113 2120 if not isinstance(cg, bundle2.unbundle20):
2114 2121 # legacy case: bundle1 (changegroup 01)
2115 2122 txnname = "\n".join([source, util.hidepassword(url)])
2116 2123 with repo.lock(), repo.transaction(txnname) as tr:
2117 2124 op = bundle2.applybundle(repo, cg, tr, source, url)
2118 2125 r = bundle2.combinechangegroupresults(op)
2119 2126 else:
2120 2127 r = None
2121 2128 try:
2122 2129 def gettransaction():
2123 2130 if not lockandtr[2]:
2124 2131 lockandtr[0] = repo.wlock()
2125 2132 lockandtr[1] = repo.lock()
2126 2133 lockandtr[2] = repo.transaction(source)
2127 2134 lockandtr[2].hookargs['source'] = source
2128 2135 lockandtr[2].hookargs['url'] = url
2129 2136 lockandtr[2].hookargs['bundle2'] = '1'
2130 2137 return lockandtr[2]
2131 2138
2132 2139 # Do greedy locking by default until we're satisfied with lazy
2133 2140 # locking.
2134 2141 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2135 2142 gettransaction()
2136 2143
2137 2144 op = bundle2.bundleoperation(repo, gettransaction,
2138 2145 captureoutput=captureoutput,
2139 2146 source='push')
2140 2147 try:
2141 2148 op = bundle2.processbundle(repo, cg, op=op)
2142 2149 finally:
2143 2150 r = op.reply
2144 2151 if captureoutput and r is not None:
2145 2152 repo.ui.pushbuffer(error=True, subproc=True)
2146 2153 def recordout(output):
2147 2154 r.newpart('output', data=output, mandatory=False)
2148 2155 if lockandtr[2] is not None:
2149 2156 lockandtr[2].close()
2150 2157 except BaseException as exc:
2151 2158 exc.duringunbundle2 = True
2152 2159 if captureoutput and r is not None:
2153 2160 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2154 2161 def recordout(output):
2155 2162 part = bundle2.bundlepart('output', data=output,
2156 2163 mandatory=False)
2157 2164 parts.append(part)
2158 2165 raise
2159 2166 finally:
2160 2167 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2161 2168 if recordout is not None:
2162 2169 recordout(repo.ui.popbuffer())
2163 2170 return r
2164 2171
2165 2172 def _maybeapplyclonebundle(pullop):
2166 2173 """Apply a clone bundle from a remote, if possible."""
2167 2174
2168 2175 repo = pullop.repo
2169 2176 remote = pullop.remote
2170 2177
2171 2178 if not repo.ui.configbool('ui', 'clonebundles'):
2172 2179 return
2173 2180
2174 2181 # Only run if local repo is empty.
2175 2182 if len(repo):
2176 2183 return
2177 2184
2178 2185 if pullop.heads:
2179 2186 return
2180 2187
2181 2188 if not remote.capable('clonebundles'):
2182 2189 return
2183 2190
2184 2191 with remote.commandexecutor() as e:
2185 2192 res = e.callcommand('clonebundles', {}).result()
2186 2193
2187 2194 # If we call the wire protocol command, that's good enough to record the
2188 2195 # attempt.
2189 2196 pullop.clonebundleattempted = True
2190 2197
2191 2198 entries = parseclonebundlesmanifest(repo, res)
2192 2199 if not entries:
2193 2200 repo.ui.note(_('no clone bundles available on remote; '
2194 2201 'falling back to regular clone\n'))
2195 2202 return
2196 2203
2197 2204 entries = filterclonebundleentries(
2198 2205 repo, entries, streamclonerequested=pullop.streamclonerequested)
2199 2206
2200 2207 if not entries:
2201 2208 # There is a thundering herd concern here. However, if a server
2202 2209 # operator doesn't advertise bundles appropriate for its clients,
2203 2210 # they deserve what's coming. Furthermore, from a client's
2204 2211 # perspective, no automatic fallback would mean not being able to
2205 2212 # clone!
2206 2213 repo.ui.warn(_('no compatible clone bundles available on server; '
2207 2214 'falling back to regular clone\n'))
2208 2215 repo.ui.warn(_('(you may want to report this to the server '
2209 2216 'operator)\n'))
2210 2217 return
2211 2218
2212 2219 entries = sortclonebundleentries(repo.ui, entries)
2213 2220
2214 2221 url = entries[0]['URL']
2215 2222 repo.ui.status(_('applying clone bundle from %s\n') % url)
2216 2223 if trypullbundlefromurl(repo.ui, repo, url):
2217 2224 repo.ui.status(_('finished applying clone bundle\n'))
2218 2225 # Bundle failed.
2219 2226 #
2220 2227 # We abort by default to avoid the thundering herd of
2221 2228 # clients flooding a server that was expecting expensive
2222 2229 # clone load to be offloaded.
2223 2230 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2224 2231 repo.ui.warn(_('falling back to normal clone\n'))
2225 2232 else:
2226 2233 raise error.Abort(_('error applying bundle'),
2227 2234 hint=_('if this error persists, consider contacting '
2228 2235 'the server operator or disable clone '
2229 2236 'bundles via '
2230 2237 '"--config ui.clonebundles=false"'))
2231 2238
2232 2239 def parseclonebundlesmanifest(repo, s):
2233 2240 """Parses the raw text of a clone bundles manifest.
2234 2241
2235 2242 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2236 2243 to the URL and other keys are the attributes for the entry.
2237 2244 """
2238 2245 m = []
2239 2246 for line in s.splitlines():
2240 2247 fields = line.split()
2241 2248 if not fields:
2242 2249 continue
2243 2250 attrs = {'URL': fields[0]}
2244 2251 for rawattr in fields[1:]:
2245 2252 key, value = rawattr.split('=', 1)
2246 2253 key = urlreq.unquote(key)
2247 2254 value = urlreq.unquote(value)
2248 2255 attrs[key] = value
2249 2256
2250 2257 # Parse BUNDLESPEC into components. This makes client-side
2251 2258 # preferences easier to specify since you can prefer a single
2252 2259 # component of the BUNDLESPEC.
2253 2260 if key == 'BUNDLESPEC':
2254 2261 try:
2255 2262 bundlespec = parsebundlespec(repo, value)
2256 2263 attrs['COMPRESSION'] = bundlespec.compression
2257 2264 attrs['VERSION'] = bundlespec.version
2258 2265 except error.InvalidBundleSpecification:
2259 2266 pass
2260 2267 except error.UnsupportedBundleSpecification:
2261 2268 pass
2262 2269
2263 2270 m.append(attrs)
2264 2271
2265 2272 return m
2266 2273
2267 2274 def isstreamclonespec(bundlespec):
2268 2275 # Stream clone v1
2269 2276 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2270 2277 return True
2271 2278
2272 2279 # Stream clone v2
2273 2280 if (bundlespec.wirecompression == 'UN' and \
2274 2281 bundlespec.wireversion == '02' and \
2275 2282 bundlespec.contentopts.get('streamv2')):
2276 2283 return True
2277 2284
2278 2285 return False
2279 2286
2280 2287 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2281 2288 """Remove incompatible clone bundle manifest entries.
2282 2289
2283 2290 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2284 2291 and returns a new list consisting of only the entries that this client
2285 2292 should be able to apply.
2286 2293
2287 2294 There is no guarantee we'll be able to apply all returned entries because
2288 2295 the metadata we use to filter on may be missing or wrong.
2289 2296 """
2290 2297 newentries = []
2291 2298 for entry in entries:
2292 2299 spec = entry.get('BUNDLESPEC')
2293 2300 if spec:
2294 2301 try:
2295 2302 bundlespec = parsebundlespec(repo, spec, strict=True)
2296 2303
2297 2304 # If a stream clone was requested, filter out non-streamclone
2298 2305 # entries.
2299 2306 if streamclonerequested and not isstreamclonespec(bundlespec):
2300 2307 repo.ui.debug('filtering %s because not a stream clone\n' %
2301 2308 entry['URL'])
2302 2309 continue
2303 2310
2304 2311 except error.InvalidBundleSpecification as e:
2305 2312 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2306 2313 continue
2307 2314 except error.UnsupportedBundleSpecification as e:
2308 2315 repo.ui.debug('filtering %s because unsupported bundle '
2309 2316 'spec: %s\n' % (
2310 2317 entry['URL'], stringutil.forcebytestr(e)))
2311 2318 continue
2312 2319 # If we don't have a spec and requested a stream clone, we don't know
2313 2320 # what the entry is so don't attempt to apply it.
2314 2321 elif streamclonerequested:
2315 2322 repo.ui.debug('filtering %s because cannot determine if a stream '
2316 2323 'clone bundle\n' % entry['URL'])
2317 2324 continue
2318 2325
2319 2326 if 'REQUIRESNI' in entry and not sslutil.hassni:
2320 2327 repo.ui.debug('filtering %s because SNI not supported\n' %
2321 2328 entry['URL'])
2322 2329 continue
2323 2330
2324 2331 newentries.append(entry)
2325 2332
2326 2333 return newentries
2327 2334
2328 2335 class clonebundleentry(object):
2329 2336 """Represents an item in a clone bundles manifest.
2330 2337
2331 2338 This rich class is needed to support sorting since sorted() in Python 3
2332 2339 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2333 2340 won't work.
2334 2341 """
2335 2342
2336 2343 def __init__(self, value, prefers):
2337 2344 self.value = value
2338 2345 self.prefers = prefers
2339 2346
2340 2347 def _cmp(self, other):
2341 2348 for prefkey, prefvalue in self.prefers:
2342 2349 avalue = self.value.get(prefkey)
2343 2350 bvalue = other.value.get(prefkey)
2344 2351
2345 2352 # Special case for b missing attribute and a matches exactly.
2346 2353 if avalue is not None and bvalue is None and avalue == prefvalue:
2347 2354 return -1
2348 2355
2349 2356 # Special case for a missing attribute and b matches exactly.
2350 2357 if bvalue is not None and avalue is None and bvalue == prefvalue:
2351 2358 return 1
2352 2359
2353 2360 # We can't compare unless attribute present on both.
2354 2361 if avalue is None or bvalue is None:
2355 2362 continue
2356 2363
2357 2364 # Same values should fall back to next attribute.
2358 2365 if avalue == bvalue:
2359 2366 continue
2360 2367
2361 2368 # Exact matches come first.
2362 2369 if avalue == prefvalue:
2363 2370 return -1
2364 2371 if bvalue == prefvalue:
2365 2372 return 1
2366 2373
2367 2374 # Fall back to next attribute.
2368 2375 continue
2369 2376
2370 2377 # If we got here we couldn't sort by attributes and prefers. Fall
2371 2378 # back to index order.
2372 2379 return 0
2373 2380
2374 2381 def __lt__(self, other):
2375 2382 return self._cmp(other) < 0
2376 2383
2377 2384 def __gt__(self, other):
2378 2385 return self._cmp(other) > 0
2379 2386
2380 2387 def __eq__(self, other):
2381 2388 return self._cmp(other) == 0
2382 2389
2383 2390 def __le__(self, other):
2384 2391 return self._cmp(other) <= 0
2385 2392
2386 2393 def __ge__(self, other):
2387 2394 return self._cmp(other) >= 0
2388 2395
2389 2396 def __ne__(self, other):
2390 2397 return self._cmp(other) != 0
2391 2398
2392 2399 def sortclonebundleentries(ui, entries):
2393 2400 prefers = ui.configlist('ui', 'clonebundleprefers')
2394 2401 if not prefers:
2395 2402 return list(entries)
2396 2403
2397 2404 prefers = [p.split('=', 1) for p in prefers]
2398 2405
2399 2406 items = sorted(clonebundleentry(v, prefers) for v in entries)
2400 2407 return [i.value for i in items]
2401 2408
2402 2409 def trypullbundlefromurl(ui, repo, url):
2403 2410 """Attempt to apply a bundle from a URL."""
2404 2411 with repo.lock(), repo.transaction('bundleurl') as tr:
2405 2412 try:
2406 2413 fh = urlmod.open(ui, url)
2407 2414 cg = readbundle(ui, fh, 'stream')
2408 2415
2409 2416 if isinstance(cg, streamclone.streamcloneapplier):
2410 2417 cg.apply(repo)
2411 2418 else:
2412 2419 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2413 2420 return True
2414 2421 except urlerr.httperror as e:
2415 2422 ui.warn(_('HTTP error fetching bundle: %s\n') %
2416 2423 stringutil.forcebytestr(e))
2417 2424 except urlerr.urlerror as e:
2418 2425 ui.warn(_('error fetching bundle: %s\n') %
2419 2426 stringutil.forcebytestr(e.reason))
2420 2427
2421 2428 return False
General Comments 0
You need to be logged in to leave comments. Login now