##// END OF EJS Templates
exchange: move narrow acl functionality into core...
Gregory Szorc -
r38826:3e738733 default
parent child Browse files
Show More
@@ -1,497 +1,461 b''
1 1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import struct
13 13
14 14 from mercurial.i18n import _
15 15 from mercurial.node import (
16 16 bin,
17 17 nullid,
18 18 nullrev,
19 19 )
20 20 from mercurial import (
21 21 bundle2,
22 22 changegroup,
23 23 dagutil,
24 24 error,
25 25 exchange,
26 26 extensions,
27 27 narrowspec,
28 28 repair,
29 29 util,
30 30 wireprototypes,
31 31 )
32 32 from mercurial.utils import (
33 33 stringutil,
34 34 )
35 35
36 36 NARROWCAP = 'narrow'
37 37 _NARROWACL_SECTION = 'narrowhgacl'
38 38 _CHANGESPECPART = NARROWCAP + ':changespec'
39 39 _SPECPART = NARROWCAP + ':spec'
40 40 _SPECPART_INCLUDE = 'include'
41 41 _SPECPART_EXCLUDE = 'exclude'
42 42 _KILLNODESIGNAL = 'KILL'
43 43 _DONESIGNAL = 'DONE'
44 44 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
45 45 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
46 46 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
47 47 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
48 48
49 49 # When advertising capabilities, always include narrow clone support.
50 50 def getrepocaps_narrow(orig, repo, **kwargs):
51 51 caps = orig(repo, **kwargs)
52 52 caps[NARROWCAP] = ['v0']
53 53 return caps
54 54
55 55 def _computeellipsis(repo, common, heads, known, match, depth=None):
56 56 """Compute the shape of a narrowed DAG.
57 57
58 58 Args:
59 59 repo: The repository we're transferring.
60 60 common: The roots of the DAG range we're transferring.
61 61 May be just [nullid], which means all ancestors of heads.
62 62 heads: The heads of the DAG range we're transferring.
63 63 match: The narrowmatcher that allows us to identify relevant changes.
64 64 depth: If not None, only consider nodes to be full nodes if they are at
65 65 most depth changesets away from one of heads.
66 66
67 67 Returns:
68 68 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
69 69
70 70 visitnodes: The list of nodes (either full or ellipsis) which
71 71 need to be sent to the client.
72 72 relevant_nodes: The set of changelog nodes which change a file inside
73 73 the narrowspec. The client needs these as non-ellipsis nodes.
74 74 ellipsisroots: A dict of {rev: parents} that is used in
75 75 narrowchangegroup to produce ellipsis nodes with the
76 76 correct parents.
77 77 """
78 78 cl = repo.changelog
79 79 mfl = repo.manifestlog
80 80
81 81 cldag = dagutil.revlogdag(cl)
82 82 # dagutil does not like nullid/nullrev
83 83 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
84 84 headsrevs = cldag.internalizeall(heads)
85 85 if depth:
86 86 revdepth = {h: 0 for h in headsrevs}
87 87
88 88 ellipsisheads = collections.defaultdict(set)
89 89 ellipsisroots = collections.defaultdict(set)
90 90
91 91 def addroot(head, curchange):
92 92 """Add a root to an ellipsis head, splitting heads with 3 roots."""
93 93 ellipsisroots[head].add(curchange)
94 94 # Recursively split ellipsis heads with 3 roots by finding the
95 95 # roots' youngest common descendant which is an elided merge commit.
96 96 # That descendant takes 2 of the 3 roots as its own, and becomes a
97 97 # root of the head.
98 98 while len(ellipsisroots[head]) > 2:
99 99 child, roots = splithead(head)
100 100 splitroots(head, child, roots)
101 101 head = child # Recurse in case we just added a 3rd root
102 102
103 103 def splitroots(head, child, roots):
104 104 ellipsisroots[head].difference_update(roots)
105 105 ellipsisroots[head].add(child)
106 106 ellipsisroots[child].update(roots)
107 107 ellipsisroots[child].discard(child)
108 108
109 109 def splithead(head):
110 110 r1, r2, r3 = sorted(ellipsisroots[head])
111 111 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
112 112 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
113 113 nr1, head, nr2, head)
114 114 for j in mid:
115 115 if j == nr2:
116 116 return nr2, (nr1, nr2)
117 117 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
118 118 return j, (nr1, nr2)
119 119 raise error.Abort('Failed to split up ellipsis node! head: %d, '
120 120 'roots: %d %d %d' % (head, r1, r2, r3))
121 121
122 122 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
123 123 visit = reversed(missing)
124 124 relevant_nodes = set()
125 125 visitnodes = [cl.node(m) for m in missing]
126 126 required = set(headsrevs) | known
127 127 for rev in visit:
128 128 clrev = cl.changelogrevision(rev)
129 129 ps = cldag.parents(rev)
130 130 if depth is not None:
131 131 curdepth = revdepth[rev]
132 132 for p in ps:
133 133 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
134 134 needed = False
135 135 shallow_enough = depth is None or revdepth[rev] <= depth
136 136 if shallow_enough:
137 137 curmf = mfl[clrev.manifest].read()
138 138 if ps:
139 139 # We choose to not trust the changed files list in
140 140 # changesets because it's not always correct. TODO: could
141 141 # we trust it for the non-merge case?
142 142 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
143 143 needed = bool(curmf.diff(p1mf, match))
144 144 if not needed and len(ps) > 1:
145 145 # For merge changes, the list of changed files is not
146 146 # helpful, since we need to emit the merge if a file
147 147 # in the narrow spec has changed on either side of the
148 148 # merge. As a result, we do a manifest diff to check.
149 149 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
150 150 needed = bool(curmf.diff(p2mf, match))
151 151 else:
152 152 # For a root node, we need to include the node if any
153 153 # files in the node match the narrowspec.
154 154 needed = any(curmf.walk(match))
155 155
156 156 if needed:
157 157 for head in ellipsisheads[rev]:
158 158 addroot(head, rev)
159 159 for p in ps:
160 160 required.add(p)
161 161 relevant_nodes.add(cl.node(rev))
162 162 else:
163 163 if not ps:
164 164 ps = [nullrev]
165 165 if rev in required:
166 166 for head in ellipsisheads[rev]:
167 167 addroot(head, rev)
168 168 for p in ps:
169 169 ellipsisheads[p].add(rev)
170 170 else:
171 171 for p in ps:
172 172 ellipsisheads[p] |= ellipsisheads[rev]
173 173
174 174 # add common changesets as roots of their reachable ellipsis heads
175 175 for c in commonrevs:
176 176 for head in ellipsisheads[c]:
177 177 addroot(head, c)
178 178 return visitnodes, relevant_nodes, ellipsisroots
179 179
180 180 def _packellipsischangegroup(repo, common, match, relevant_nodes,
181 181 ellipsisroots, visitnodes, depth, source, version):
182 182 if version in ('01', '02'):
183 183 raise error.Abort(
184 184 'ellipsis nodes require at least cg3 on client and server, '
185 185 'but negotiated version %s' % version)
186 186 # We wrap cg1packer.revchunk, using a side channel to pass
187 187 # relevant_nodes into that area. Then if linknode isn't in the
188 188 # set, we know we have an ellipsis node and we should defer
189 189 # sending that node's data. We override close() to detect
190 190 # pending ellipsis nodes and flush them.
191 191 packer = changegroup.getbundler(version, repo)
192 192 # Let the packer have access to the narrow matcher so it can
193 193 # omit filelogs and dirlogs as needed
194 194 packer._narrow_matcher = lambda : match
195 195 # Give the packer the list of nodes which should not be
196 196 # ellipsis nodes. We store this rather than the set of nodes
197 197 # that should be an ellipsis because for very large histories
198 198 # we expect this to be significantly smaller.
199 199 packer.full_nodes = relevant_nodes
200 200 # Maps ellipsis revs to their roots at the changelog level.
201 201 packer.precomputed_ellipsis = ellipsisroots
202 202 # Maps CL revs to per-revlog revisions. Cleared in close() at
203 203 # the end of each group.
204 204 packer.clrev_to_localrev = {}
205 205 packer.next_clrev_to_localrev = {}
206 206 # Maps changelog nodes to changelog revs. Filled in once
207 207 # during changelog stage and then left unmodified.
208 208 packer.clnode_to_rev = {}
209 209 packer.changelog_done = False
210 210 # If true, informs the packer that it is serving shallow content and might
211 211 # need to pack file contents not introduced by the changes being packed.
212 212 packer.is_shallow = depth is not None
213 213
214 214 return packer.generate(common, visitnodes, False, source)
215 215
216 216 # Serve a changegroup for a client with a narrow clone.
217 217 def getbundlechangegrouppart_narrow(bundler, repo, source,
218 218 bundlecaps=None, b2caps=None, heads=None,
219 219 common=None, **kwargs):
220 220 cgversions = b2caps.get('changegroup')
221 221 if cgversions: # 3.1 and 3.2 ship with an empty value
222 222 cgversions = [v for v in cgversions
223 223 if v in changegroup.supportedoutgoingversions(repo)]
224 224 if not cgversions:
225 225 raise ValueError(_('no common changegroup version'))
226 226 version = max(cgversions)
227 227 else:
228 228 raise ValueError(_("server does not advertise changegroup version,"
229 229 " can't negotiate support for ellipsis nodes"))
230 230
231 231 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
232 232 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
233 233 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
234 234 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
235 235 outgoing = exchange._computeoutgoing(repo, heads, common)
236 236 if not outgoing.missing:
237 237 return
238 238 def wrappedgetbundler(orig, *args, **kwargs):
239 239 bundler = orig(*args, **kwargs)
240 240 bundler._narrow_matcher = lambda : newmatch
241 241 return bundler
242 242 with extensions.wrappedfunction(changegroup, 'getbundler',
243 243 wrappedgetbundler):
244 244 cg = changegroup.makestream(repo, outgoing, version, source)
245 245 part = bundler.newpart('changegroup', data=cg)
246 246 part.addparam('version', version)
247 247 if 'treemanifest' in repo.requirements:
248 248 part.addparam('treemanifest', '1')
249 249
250 250 if include or exclude:
251 251 narrowspecpart = bundler.newpart(_SPECPART)
252 252 if include:
253 253 narrowspecpart.addparam(
254 254 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
255 255 if exclude:
256 256 narrowspecpart.addparam(
257 257 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
258 258
259 259 return
260 260
261 261 depth = kwargs.get(r'depth', None)
262 262 if depth is not None:
263 263 depth = int(depth)
264 264 if depth < 1:
265 265 raise error.Abort(_('depth must be positive, got %d') % depth)
266 266
267 267 heads = set(heads or repo.heads())
268 268 common = set(common or [nullid])
269 269 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
270 270 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
271 271 known = {bin(n) for n in kwargs.get(r'known', [])}
272 272 if known and (oldinclude != include or oldexclude != exclude):
273 273 # Steps:
274 274 # 1. Send kill for "$known & ::common"
275 275 #
276 276 # 2. Send changegroup for ::common
277 277 #
278 278 # 3. Proceed.
279 279 #
280 280 # In the future, we can send kills for only the specific
281 281 # nodes we know should go away or change shape, and then
282 282 # send a data stream that tells the client something like this:
283 283 #
284 284 # a) apply this changegroup
285 285 # b) apply nodes XXX, YYY, ZZZ that you already have
286 286 # c) goto a
287 287 #
288 288 # until they've built up the full new state.
289 289 # Convert to revnums and intersect with "common". The client should
290 290 # have made it a subset of "common" already, but let's be safe.
291 291 known = set(repo.revs("%ln & ::%ln", known, common))
292 292 # TODO: we could send only roots() of this set, and the
293 293 # list of nodes in common, and the client could work out
294 294 # what to strip, instead of us explicitly sending every
295 295 # single node.
296 296 deadrevs = known
297 297 def genkills():
298 298 for r in deadrevs:
299 299 yield _KILLNODESIGNAL
300 300 yield repo.changelog.node(r)
301 301 yield _DONESIGNAL
302 302 bundler.newpart(_CHANGESPECPART, data=genkills())
303 303 newvisit, newfull, newellipsis = _computeellipsis(
304 304 repo, set(), common, known, newmatch)
305 305 if newvisit:
306 306 cg = _packellipsischangegroup(
307 307 repo, common, newmatch, newfull, newellipsis,
308 308 newvisit, depth, source, version)
309 309 part = bundler.newpart('changegroup', data=cg)
310 310 part.addparam('version', version)
311 311 if 'treemanifest' in repo.requirements:
312 312 part.addparam('treemanifest', '1')
313 313
314 314 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
315 315 repo, common, heads, set(), newmatch, depth=depth)
316 316
317 317 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
318 318 if visitnodes:
319 319 cg = _packellipsischangegroup(
320 320 repo, common, newmatch, relevant_nodes, ellipsisroots,
321 321 visitnodes, depth, source, version)
322 322 part = bundler.newpart('changegroup', data=cg)
323 323 part.addparam('version', version)
324 324 if 'treemanifest' in repo.requirements:
325 325 part.addparam('treemanifest', '1')
326 326
327 def applyacl_narrow(repo, kwargs):
328 ui = repo.ui
329 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
330 user_includes = ui.configlist(
331 _NARROWACL_SECTION, username + '.includes',
332 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
333 user_excludes = ui.configlist(
334 _NARROWACL_SECTION, username + '.excludes',
335 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
336 if not user_includes:
337 raise error.Abort(_("{} configuration for user {} is empty")
338 .format(_NARROWACL_SECTION, username))
339
340 user_includes = [
341 'path:.' if p == '*' else 'path:' + p for p in user_includes]
342 user_excludes = [
343 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
344
345 req_includes = set(kwargs.get(r'includepats', []))
346 req_excludes = set(kwargs.get(r'excludepats', []))
347
348 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
349 req_includes, req_excludes, user_includes, user_excludes)
350
351 if invalid_includes:
352 raise error.Abort(
353 _("The following includes are not accessible for {}: {}")
354 .format(username, invalid_includes))
355
356 new_args = {}
357 new_args.update(kwargs)
358 new_args['includepats'] = req_includes
359 if req_excludes:
360 new_args['excludepats'] = req_excludes
361 return new_args
362
363 327 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
364 328 def _handlechangespec_2(op, inpart):
365 329 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
366 330 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
367 331 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
368 332 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
369 333 op.repo._writerequirements()
370 334 op.repo.setnarrowpats(includepats, excludepats)
371 335
372 336 @bundle2.parthandler(_CHANGESPECPART)
373 337 def _handlechangespec(op, inpart):
374 338 repo = op.repo
375 339 cl = repo.changelog
376 340
377 341 # changesets which need to be stripped entirely. either they're no longer
378 342 # needed in the new narrow spec, or the server is sending a replacement
379 343 # in the changegroup part.
380 344 clkills = set()
381 345
382 346 # A changespec part contains all the updates to ellipsis nodes
383 347 # that will happen as a result of widening or narrowing a
384 348 # repo. All the changes that this block encounters are ellipsis
385 349 # nodes or flags to kill an existing ellipsis.
386 350 chunksignal = changegroup.readexactly(inpart, 4)
387 351 while chunksignal != _DONESIGNAL:
388 352 if chunksignal == _KILLNODESIGNAL:
389 353 # a node used to be an ellipsis but isn't anymore
390 354 ck = changegroup.readexactly(inpart, 20)
391 355 if cl.hasnode(ck):
392 356 clkills.add(ck)
393 357 else:
394 358 raise error.Abort(
395 359 _('unexpected changespec node chunk type: %s') % chunksignal)
396 360 chunksignal = changegroup.readexactly(inpart, 4)
397 361
398 362 if clkills:
399 363 # preserve bookmarks that repair.strip() would otherwise strip
400 364 bmstore = repo._bookmarks
401 365 class dummybmstore(dict):
402 366 def applychanges(self, repo, tr, changes):
403 367 pass
404 368 def recordchange(self, tr): # legacy version
405 369 pass
406 370 repo._bookmarks = dummybmstore()
407 371 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
408 372 topic='widen')
409 373 repo._bookmarks = bmstore
410 374 if chgrpfile:
411 375 op._widen_uninterr = repo.ui.uninterruptable()
412 376 op._widen_uninterr.__enter__()
413 377 # presence of _widen_bundle attribute activates widen handler later
414 378 op._widen_bundle = chgrpfile
415 379 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
416 380 # will currently always be there when using the core+narrowhg server, but
417 381 # other servers may include a changespec part even when not widening (e.g.
418 382 # because we're deepening a shallow repo).
419 383 if util.safehasattr(repo, 'setnewnarrowpats'):
420 384 repo.setnewnarrowpats()
421 385
422 386 def handlechangegroup_widen(op, inpart):
423 387 """Changegroup exchange handler which restores temporarily-stripped nodes"""
424 388 # We saved a bundle with stripped node data we must now restore.
425 389 # This approach is based on mercurial/repair.py@6ee26a53c111.
426 390 repo = op.repo
427 391 ui = op.ui
428 392
429 393 chgrpfile = op._widen_bundle
430 394 del op._widen_bundle
431 395 vfs = repo.vfs
432 396
433 397 ui.note(_("adding branch\n"))
434 398 f = vfs.open(chgrpfile, "rb")
435 399 try:
436 400 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
437 401 if not ui.verbose:
438 402 # silence internal shuffling chatter
439 403 ui.pushbuffer()
440 404 if isinstance(gen, bundle2.unbundle20):
441 405 with repo.transaction('strip') as tr:
442 406 bundle2.processbundle(repo, gen, lambda: tr)
443 407 else:
444 408 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
445 409 if not ui.verbose:
446 410 ui.popbuffer()
447 411 finally:
448 412 f.close()
449 413
450 414 # remove undo files
451 415 for undovfs, undofile in repo.undofiles():
452 416 try:
453 417 undovfs.unlink(undofile)
454 418 except OSError as e:
455 419 if e.errno != errno.ENOENT:
456 420 ui.warn(_('error removing %s: %s\n') %
457 421 (undovfs.join(undofile), stringutil.forcebytestr(e)))
458 422
459 423 # Remove partial backup only if there were no exceptions
460 424 op._widen_uninterr.__exit__(None, None, None)
461 425 vfs.unlink(chgrpfile)
462 426
463 427 def setup():
464 428 """Enable narrow repo support in bundle2-related extension points."""
465 429 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
466 430
467 431 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
468 432
469 433 getbundleargs['narrow'] = 'boolean'
470 434 getbundleargs['depth'] = 'plain'
471 435 getbundleargs['oldincludepats'] = 'csv'
472 436 getbundleargs['oldexcludepats'] = 'csv'
473 437 getbundleargs['includepats'] = 'csv'
474 438 getbundleargs['excludepats'] = 'csv'
475 439 getbundleargs['known'] = 'csv'
476 440
477 441 # Extend changegroup serving to handle requests from narrow clients.
478 442 origcgfn = exchange.getbundle2partsmapping['changegroup']
479 443 def wrappedcgfn(*args, **kwargs):
480 444 repo = args[1]
481 445 if repo.ui.has_section(_NARROWACL_SECTION):
482 446 getbundlechangegrouppart_narrow(
483 *args, **applyacl_narrow(repo, kwargs))
447 *args, **exchange.applynarrowacl(repo, kwargs))
484 448 elif kwargs.get(r'narrow', False):
485 449 getbundlechangegrouppart_narrow(*args, **kwargs)
486 450 else:
487 451 origcgfn(*args, **kwargs)
488 452 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
489 453
490 454 # Extend changegroup receiver so client can fixup after widen requests.
491 455 origcghandler = bundle2.parthandlermapping['changegroup']
492 456 def wrappedcghandler(op, inpart):
493 457 origcghandler(op, inpart)
494 458 if util.safehasattr(op, '_widen_bundle'):
495 459 handlechangegroup_widen(op, inpart)
496 460 wrappedcghandler.params = origcghandler.params
497 461 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,2428 +1,2471 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 bin,
16 16 hex,
17 17 nullid,
18 18 )
19 19 from .thirdparty import (
20 20 attr,
21 21 )
22 22 from . import (
23 23 bookmarks as bookmod,
24 24 bundle2,
25 25 changegroup,
26 26 discovery,
27 27 error,
28 28 lock as lockmod,
29 29 logexchange,
30 narrowspec,
30 31 obsolete,
31 32 phases,
32 33 pushkey,
33 34 pycompat,
34 35 scmutil,
35 36 sslutil,
36 37 streamclone,
37 38 url as urlmod,
38 39 util,
39 40 )
40 41 from .utils import (
41 42 stringutil,
42 43 )
43 44
44 45 urlerr = util.urlerr
45 46 urlreq = util.urlreq
46 47
47 48 _NARROWACL_SECTION = 'narrowhgacl'
48 49
49 50 # Maps bundle version human names to changegroup versions.
50 51 _bundlespeccgversions = {'v1': '01',
51 52 'v2': '02',
52 53 'packed1': 's1',
53 54 'bundle2': '02', #legacy
54 55 }
55 56
56 57 # Maps bundle version with content opts to choose which part to bundle
57 58 _bundlespeccontentopts = {
58 59 'v1': {
59 60 'changegroup': True,
60 61 'cg.version': '01',
61 62 'obsolescence': False,
62 63 'phases': False,
63 64 'tagsfnodescache': False,
64 65 'revbranchcache': False
65 66 },
66 67 'v2': {
67 68 'changegroup': True,
68 69 'cg.version': '02',
69 70 'obsolescence': False,
70 71 'phases': False,
71 72 'tagsfnodescache': True,
72 73 'revbranchcache': True
73 74 },
74 75 'packed1' : {
75 76 'cg.version': 's1'
76 77 }
77 78 }
78 79 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
79 80
80 81 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
81 82 "tagsfnodescache": False,
82 83 "revbranchcache": False}}
83 84
84 85 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
85 86 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
86 87
87 88 @attr.s
88 89 class bundlespec(object):
89 90 compression = attr.ib()
90 91 wirecompression = attr.ib()
91 92 version = attr.ib()
92 93 wireversion = attr.ib()
93 94 params = attr.ib()
94 95 contentopts = attr.ib()
95 96
96 97 def parsebundlespec(repo, spec, strict=True):
97 98 """Parse a bundle string specification into parts.
98 99
99 100 Bundle specifications denote a well-defined bundle/exchange format.
100 101 The content of a given specification should not change over time in
101 102 order to ensure that bundles produced by a newer version of Mercurial are
102 103 readable from an older version.
103 104
104 105 The string currently has the form:
105 106
106 107 <compression>-<type>[;<parameter0>[;<parameter1>]]
107 108
108 109 Where <compression> is one of the supported compression formats
109 110 and <type> is (currently) a version string. A ";" can follow the type and
110 111 all text afterwards is interpreted as URI encoded, ";" delimited key=value
111 112 pairs.
112 113
113 114 If ``strict`` is True (the default) <compression> is required. Otherwise,
114 115 it is optional.
115 116
116 117 Returns a bundlespec object of (compression, version, parameters).
117 118 Compression will be ``None`` if not in strict mode and a compression isn't
118 119 defined.
119 120
120 121 An ``InvalidBundleSpecification`` is raised when the specification is
121 122 not syntactically well formed.
122 123
123 124 An ``UnsupportedBundleSpecification`` is raised when the compression or
124 125 bundle type/version is not recognized.
125 126
126 127 Note: this function will likely eventually return a more complex data
127 128 structure, including bundle2 part information.
128 129 """
129 130 def parseparams(s):
130 131 if ';' not in s:
131 132 return s, {}
132 133
133 134 params = {}
134 135 version, paramstr = s.split(';', 1)
135 136
136 137 for p in paramstr.split(';'):
137 138 if '=' not in p:
138 139 raise error.InvalidBundleSpecification(
139 140 _('invalid bundle specification: '
140 141 'missing "=" in parameter: %s') % p)
141 142
142 143 key, value = p.split('=', 1)
143 144 key = urlreq.unquote(key)
144 145 value = urlreq.unquote(value)
145 146 params[key] = value
146 147
147 148 return version, params
148 149
149 150
150 151 if strict and '-' not in spec:
151 152 raise error.InvalidBundleSpecification(
152 153 _('invalid bundle specification; '
153 154 'must be prefixed with compression: %s') % spec)
154 155
155 156 if '-' in spec:
156 157 compression, version = spec.split('-', 1)
157 158
158 159 if compression not in util.compengines.supportedbundlenames:
159 160 raise error.UnsupportedBundleSpecification(
160 161 _('%s compression is not supported') % compression)
161 162
162 163 version, params = parseparams(version)
163 164
164 165 if version not in _bundlespeccgversions:
165 166 raise error.UnsupportedBundleSpecification(
166 167 _('%s is not a recognized bundle version') % version)
167 168 else:
168 169 # Value could be just the compression or just the version, in which
169 170 # case some defaults are assumed (but only when not in strict mode).
170 171 assert not strict
171 172
172 173 spec, params = parseparams(spec)
173 174
174 175 if spec in util.compengines.supportedbundlenames:
175 176 compression = spec
176 177 version = 'v1'
177 178 # Generaldelta repos require v2.
178 179 if 'generaldelta' in repo.requirements:
179 180 version = 'v2'
180 181 # Modern compression engines require v2.
181 182 if compression not in _bundlespecv1compengines:
182 183 version = 'v2'
183 184 elif spec in _bundlespeccgversions:
184 185 if spec == 'packed1':
185 186 compression = 'none'
186 187 else:
187 188 compression = 'bzip2'
188 189 version = spec
189 190 else:
190 191 raise error.UnsupportedBundleSpecification(
191 192 _('%s is not a recognized bundle specification') % spec)
192 193
193 194 # Bundle version 1 only supports a known set of compression engines.
194 195 if version == 'v1' and compression not in _bundlespecv1compengines:
195 196 raise error.UnsupportedBundleSpecification(
196 197 _('compression engine %s is not supported on v1 bundles') %
197 198 compression)
198 199
199 200 # The specification for packed1 can optionally declare the data formats
200 201 # required to apply it. If we see this metadata, compare against what the
201 202 # repo supports and error if the bundle isn't compatible.
202 203 if version == 'packed1' and 'requirements' in params:
203 204 requirements = set(params['requirements'].split(','))
204 205 missingreqs = requirements - repo.supportedformats
205 206 if missingreqs:
206 207 raise error.UnsupportedBundleSpecification(
207 208 _('missing support for repository features: %s') %
208 209 ', '.join(sorted(missingreqs)))
209 210
210 211 # Compute contentopts based on the version
211 212 contentopts = _bundlespeccontentopts.get(version, {}).copy()
212 213
213 214 # Process the variants
214 215 if "stream" in params and params["stream"] == "v2":
215 216 variant = _bundlespecvariants["streamv2"]
216 217 contentopts.update(variant)
217 218
218 219 engine = util.compengines.forbundlename(compression)
219 220 compression, wirecompression = engine.bundletype()
220 221 wireversion = _bundlespeccgversions[version]
221 222
222 223 return bundlespec(compression, wirecompression, version, wireversion,
223 224 params, contentopts)
224 225
225 226 def readbundle(ui, fh, fname, vfs=None):
226 227 header = changegroup.readexactly(fh, 4)
227 228
228 229 alg = None
229 230 if not fname:
230 231 fname = "stream"
231 232 if not header.startswith('HG') and header.startswith('\0'):
232 233 fh = changegroup.headerlessfixup(fh, header)
233 234 header = "HG10"
234 235 alg = 'UN'
235 236 elif vfs:
236 237 fname = vfs.join(fname)
237 238
238 239 magic, version = header[0:2], header[2:4]
239 240
240 241 if magic != 'HG':
241 242 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
242 243 if version == '10':
243 244 if alg is None:
244 245 alg = changegroup.readexactly(fh, 2)
245 246 return changegroup.cg1unpacker(fh, alg)
246 247 elif version.startswith('2'):
247 248 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
248 249 elif version == 'S1':
249 250 return streamclone.streamcloneapplier(fh)
250 251 else:
251 252 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
252 253
253 254 def getbundlespec(ui, fh):
254 255 """Infer the bundlespec from a bundle file handle.
255 256
256 257 The input file handle is seeked and the original seek position is not
257 258 restored.
258 259 """
259 260 def speccompression(alg):
260 261 try:
261 262 return util.compengines.forbundletype(alg).bundletype()[0]
262 263 except KeyError:
263 264 return None
264 265
265 266 b = readbundle(ui, fh, None)
266 267 if isinstance(b, changegroup.cg1unpacker):
267 268 alg = b._type
268 269 if alg == '_truncatedBZ':
269 270 alg = 'BZ'
270 271 comp = speccompression(alg)
271 272 if not comp:
272 273 raise error.Abort(_('unknown compression algorithm: %s') % alg)
273 274 return '%s-v1' % comp
274 275 elif isinstance(b, bundle2.unbundle20):
275 276 if 'Compression' in b.params:
276 277 comp = speccompression(b.params['Compression'])
277 278 if not comp:
278 279 raise error.Abort(_('unknown compression algorithm: %s') % comp)
279 280 else:
280 281 comp = 'none'
281 282
282 283 version = None
283 284 for part in b.iterparts():
284 285 if part.type == 'changegroup':
285 286 version = part.params['version']
286 287 if version in ('01', '02'):
287 288 version = 'v2'
288 289 else:
289 290 raise error.Abort(_('changegroup version %s does not have '
290 291 'a known bundlespec') % version,
291 292 hint=_('try upgrading your Mercurial '
292 293 'client'))
293 294 elif part.type == 'stream2' and version is None:
294 295 # A stream2 part requires to be part of a v2 bundle
295 296 version = "v2"
296 297 requirements = urlreq.unquote(part.params['requirements'])
297 298 splitted = requirements.split()
298 299 params = bundle2._formatrequirementsparams(splitted)
299 300 return 'none-v2;stream=v2;%s' % params
300 301
301 302 if not version:
302 303 raise error.Abort(_('could not identify changegroup version in '
303 304 'bundle'))
304 305
305 306 return '%s-%s' % (comp, version)
306 307 elif isinstance(b, streamclone.streamcloneapplier):
307 308 requirements = streamclone.readbundle1header(fh)[2]
308 309 formatted = bundle2._formatrequirementsparams(requirements)
309 310 return 'none-packed1;%s' % formatted
310 311 else:
311 312 raise error.Abort(_('unknown bundle type: %s') % b)
312 313
313 314 def _computeoutgoing(repo, heads, common):
314 315 """Computes which revs are outgoing given a set of common
315 316 and a set of heads.
316 317
317 318 This is a separate function so extensions can have access to
318 319 the logic.
319 320
320 321 Returns a discovery.outgoing object.
321 322 """
322 323 cl = repo.changelog
323 324 if common:
324 325 hasnode = cl.hasnode
325 326 common = [n for n in common if hasnode(n)]
326 327 else:
327 328 common = [nullid]
328 329 if not heads:
329 330 heads = cl.heads()
330 331 return discovery.outgoing(repo, common, heads)
331 332
332 333 def _forcebundle1(op):
333 334 """return true if a pull/push must use bundle1
334 335
335 336 This function is used to allow testing of the older bundle version"""
336 337 ui = op.repo.ui
337 338 # The goal is this config is to allow developer to choose the bundle
338 339 # version used during exchanged. This is especially handy during test.
339 340 # Value is a list of bundle version to be picked from, highest version
340 341 # should be used.
341 342 #
342 343 # developer config: devel.legacy.exchange
343 344 exchange = ui.configlist('devel', 'legacy.exchange')
344 345 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
345 346 return forcebundle1 or not op.remote.capable('bundle2')
346 347
347 348 class pushoperation(object):
348 349 """A object that represent a single push operation
349 350
350 351 Its purpose is to carry push related state and very common operations.
351 352
352 353 A new pushoperation should be created at the beginning of each push and
353 354 discarded afterward.
354 355 """
355 356
356 357 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
357 358 bookmarks=(), pushvars=None):
358 359 # repo we push from
359 360 self.repo = repo
360 361 self.ui = repo.ui
361 362 # repo we push to
362 363 self.remote = remote
363 364 # force option provided
364 365 self.force = force
365 366 # revs to be pushed (None is "all")
366 367 self.revs = revs
367 368 # bookmark explicitly pushed
368 369 self.bookmarks = bookmarks
369 370 # allow push of new branch
370 371 self.newbranch = newbranch
371 372 # step already performed
372 373 # (used to check what steps have been already performed through bundle2)
373 374 self.stepsdone = set()
374 375 # Integer version of the changegroup push result
375 376 # - None means nothing to push
376 377 # - 0 means HTTP error
377 378 # - 1 means we pushed and remote head count is unchanged *or*
378 379 # we have outgoing changesets but refused to push
379 380 # - other values as described by addchangegroup()
380 381 self.cgresult = None
381 382 # Boolean value for the bookmark push
382 383 self.bkresult = None
383 384 # discover.outgoing object (contains common and outgoing data)
384 385 self.outgoing = None
385 386 # all remote topological heads before the push
386 387 self.remoteheads = None
387 388 # Details of the remote branch pre and post push
388 389 #
389 390 # mapping: {'branch': ([remoteheads],
390 391 # [newheads],
391 392 # [unsyncedheads],
392 393 # [discardedheads])}
393 394 # - branch: the branch name
394 395 # - remoteheads: the list of remote heads known locally
395 396 # None if the branch is new
396 397 # - newheads: the new remote heads (known locally) with outgoing pushed
397 398 # - unsyncedheads: the list of remote heads unknown locally.
398 399 # - discardedheads: the list of remote heads made obsolete by the push
399 400 self.pushbranchmap = None
400 401 # testable as a boolean indicating if any nodes are missing locally.
401 402 self.incoming = None
402 403 # summary of the remote phase situation
403 404 self.remotephases = None
404 405 # phases changes that must be pushed along side the changesets
405 406 self.outdatedphases = None
406 407 # phases changes that must be pushed if changeset push fails
407 408 self.fallbackoutdatedphases = None
408 409 # outgoing obsmarkers
409 410 self.outobsmarkers = set()
410 411 # outgoing bookmarks
411 412 self.outbookmarks = []
412 413 # transaction manager
413 414 self.trmanager = None
414 415 # map { pushkey partid -> callback handling failure}
415 416 # used to handle exception from mandatory pushkey part failure
416 417 self.pkfailcb = {}
417 418 # an iterable of pushvars or None
418 419 self.pushvars = pushvars
419 420
420 421 @util.propertycache
421 422 def futureheads(self):
422 423 """future remote heads if the changeset push succeeds"""
423 424 return self.outgoing.missingheads
424 425
425 426 @util.propertycache
426 427 def fallbackheads(self):
427 428 """future remote heads if the changeset push fails"""
428 429 if self.revs is None:
429 430 # not target to push, all common are relevant
430 431 return self.outgoing.commonheads
431 432 unfi = self.repo.unfiltered()
432 433 # I want cheads = heads(::missingheads and ::commonheads)
433 434 # (missingheads is revs with secret changeset filtered out)
434 435 #
435 436 # This can be expressed as:
436 437 # cheads = ( (missingheads and ::commonheads)
437 438 # + (commonheads and ::missingheads))"
438 439 # )
439 440 #
440 441 # while trying to push we already computed the following:
441 442 # common = (::commonheads)
442 443 # missing = ((commonheads::missingheads) - commonheads)
443 444 #
444 445 # We can pick:
445 446 # * missingheads part of common (::commonheads)
446 447 common = self.outgoing.common
447 448 nm = self.repo.changelog.nodemap
448 449 cheads = [node for node in self.revs if nm[node] in common]
449 450 # and
450 451 # * commonheads parents on missing
451 452 revset = unfi.set('%ln and parents(roots(%ln))',
452 453 self.outgoing.commonheads,
453 454 self.outgoing.missing)
454 455 cheads.extend(c.node() for c in revset)
455 456 return cheads
456 457
457 458 @property
458 459 def commonheads(self):
459 460 """set of all common heads after changeset bundle push"""
460 461 if self.cgresult:
461 462 return self.futureheads
462 463 else:
463 464 return self.fallbackheads
464 465
465 466 # mapping of message used when pushing bookmark
466 467 bookmsgmap = {'update': (_("updating bookmark %s\n"),
467 468 _('updating bookmark %s failed!\n')),
468 469 'export': (_("exporting bookmark %s\n"),
469 470 _('exporting bookmark %s failed!\n')),
470 471 'delete': (_("deleting remote bookmark %s\n"),
471 472 _('deleting remote bookmark %s failed!\n')),
472 473 }
473 474
474 475
475 476 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
476 477 opargs=None):
477 478 '''Push outgoing changesets (limited by revs) from a local
478 479 repository to remote. Return an integer:
479 480 - None means nothing to push
480 481 - 0 means HTTP error
481 482 - 1 means we pushed and remote head count is unchanged *or*
482 483 we have outgoing changesets but refused to push
483 484 - other values as described by addchangegroup()
484 485 '''
485 486 if opargs is None:
486 487 opargs = {}
487 488 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
488 489 **pycompat.strkwargs(opargs))
489 490 if pushop.remote.local():
490 491 missing = (set(pushop.repo.requirements)
491 492 - pushop.remote.local().supported)
492 493 if missing:
493 494 msg = _("required features are not"
494 495 " supported in the destination:"
495 496 " %s") % (', '.join(sorted(missing)))
496 497 raise error.Abort(msg)
497 498
498 499 if not pushop.remote.canpush():
499 500 raise error.Abort(_("destination does not support push"))
500 501
501 502 if not pushop.remote.capable('unbundle'):
502 503 raise error.Abort(_('cannot push: destination does not support the '
503 504 'unbundle wire protocol command'))
504 505
505 506 # get lock as we might write phase data
506 507 wlock = lock = None
507 508 try:
508 509 # bundle2 push may receive a reply bundle touching bookmarks or other
509 510 # things requiring the wlock. Take it now to ensure proper ordering.
510 511 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
511 512 if (not _forcebundle1(pushop)) and maypushback:
512 513 wlock = pushop.repo.wlock()
513 514 lock = pushop.repo.lock()
514 515 pushop.trmanager = transactionmanager(pushop.repo,
515 516 'push-response',
516 517 pushop.remote.url())
517 518 except error.LockUnavailable as err:
518 519 # source repo cannot be locked.
519 520 # We do not abort the push, but just disable the local phase
520 521 # synchronisation.
521 522 msg = 'cannot lock source repository: %s\n' % err
522 523 pushop.ui.debug(msg)
523 524
524 525 with wlock or util.nullcontextmanager(), \
525 526 lock or util.nullcontextmanager(), \
526 527 pushop.trmanager or util.nullcontextmanager():
527 528 pushop.repo.checkpush(pushop)
528 529 _pushdiscovery(pushop)
529 530 if not _forcebundle1(pushop):
530 531 _pushbundle2(pushop)
531 532 _pushchangeset(pushop)
532 533 _pushsyncphase(pushop)
533 534 _pushobsolete(pushop)
534 535 _pushbookmark(pushop)
535 536
536 537 if repo.ui.configbool('experimental', 'remotenames'):
537 538 logexchange.pullremotenames(repo, remote)
538 539
539 540 return pushop
540 541
541 542 # list of steps to perform discovery before push
542 543 pushdiscoveryorder = []
543 544
544 545 # Mapping between step name and function
545 546 #
546 547 # This exists to help extensions wrap steps if necessary
547 548 pushdiscoverymapping = {}
548 549
549 550 def pushdiscovery(stepname):
550 551 """decorator for function performing discovery before push
551 552
552 553 The function is added to the step -> function mapping and appended to the
553 554 list of steps. Beware that decorated function will be added in order (this
554 555 may matter).
555 556
556 557 You can only use this decorator for a new step, if you want to wrap a step
557 558 from an extension, change the pushdiscovery dictionary directly."""
558 559 def dec(func):
559 560 assert stepname not in pushdiscoverymapping
560 561 pushdiscoverymapping[stepname] = func
561 562 pushdiscoveryorder.append(stepname)
562 563 return func
563 564 return dec
564 565
565 566 def _pushdiscovery(pushop):
566 567 """Run all discovery steps"""
567 568 for stepname in pushdiscoveryorder:
568 569 step = pushdiscoverymapping[stepname]
569 570 step(pushop)
570 571
571 572 @pushdiscovery('changeset')
572 573 def _pushdiscoverychangeset(pushop):
573 574 """discover the changeset that need to be pushed"""
574 575 fci = discovery.findcommonincoming
575 576 if pushop.revs:
576 577 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
577 578 ancestorsof=pushop.revs)
578 579 else:
579 580 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
580 581 common, inc, remoteheads = commoninc
581 582 fco = discovery.findcommonoutgoing
582 583 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
583 584 commoninc=commoninc, force=pushop.force)
584 585 pushop.outgoing = outgoing
585 586 pushop.remoteheads = remoteheads
586 587 pushop.incoming = inc
587 588
588 589 @pushdiscovery('phase')
589 590 def _pushdiscoveryphase(pushop):
590 591 """discover the phase that needs to be pushed
591 592
592 593 (computed for both success and failure case for changesets push)"""
593 594 outgoing = pushop.outgoing
594 595 unfi = pushop.repo.unfiltered()
595 596 remotephases = listkeys(pushop.remote, 'phases')
596 597
597 598 if (pushop.ui.configbool('ui', '_usedassubrepo')
598 599 and remotephases # server supports phases
599 600 and not pushop.outgoing.missing # no changesets to be pushed
600 601 and remotephases.get('publishing', False)):
601 602 # When:
602 603 # - this is a subrepo push
603 604 # - and remote support phase
604 605 # - and no changeset are to be pushed
605 606 # - and remote is publishing
606 607 # We may be in issue 3781 case!
607 608 # We drop the possible phase synchronisation done by
608 609 # courtesy to publish changesets possibly locally draft
609 610 # on the remote.
610 611 pushop.outdatedphases = []
611 612 pushop.fallbackoutdatedphases = []
612 613 return
613 614
614 615 pushop.remotephases = phases.remotephasessummary(pushop.repo,
615 616 pushop.fallbackheads,
616 617 remotephases)
617 618 droots = pushop.remotephases.draftroots
618 619
619 620 extracond = ''
620 621 if not pushop.remotephases.publishing:
621 622 extracond = ' and public()'
622 623 revset = 'heads((%%ln::%%ln) %s)' % extracond
623 624 # Get the list of all revs draft on remote by public here.
624 625 # XXX Beware that revset break if droots is not strictly
625 626 # XXX root we may want to ensure it is but it is costly
626 627 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
627 628 if not outgoing.missing:
628 629 future = fallback
629 630 else:
630 631 # adds changeset we are going to push as draft
631 632 #
632 633 # should not be necessary for publishing server, but because of an
633 634 # issue fixed in xxxxx we have to do it anyway.
634 635 fdroots = list(unfi.set('roots(%ln + %ln::)',
635 636 outgoing.missing, droots))
636 637 fdroots = [f.node() for f in fdroots]
637 638 future = list(unfi.set(revset, fdroots, pushop.futureheads))
638 639 pushop.outdatedphases = future
639 640 pushop.fallbackoutdatedphases = fallback
640 641
641 642 @pushdiscovery('obsmarker')
642 643 def _pushdiscoveryobsmarkers(pushop):
643 644 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
644 645 return
645 646
646 647 if not pushop.repo.obsstore:
647 648 return
648 649
649 650 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
650 651 return
651 652
652 653 repo = pushop.repo
653 654 # very naive computation, that can be quite expensive on big repo.
654 655 # However: evolution is currently slow on them anyway.
655 656 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
656 657 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
657 658
658 659 @pushdiscovery('bookmarks')
659 660 def _pushdiscoverybookmarks(pushop):
660 661 ui = pushop.ui
661 662 repo = pushop.repo.unfiltered()
662 663 remote = pushop.remote
663 664 ui.debug("checking for updated bookmarks\n")
664 665 ancestors = ()
665 666 if pushop.revs:
666 667 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
667 668 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
668 669
669 670 remotebookmark = listkeys(remote, 'bookmarks')
670 671
671 672 explicit = set([repo._bookmarks.expandname(bookmark)
672 673 for bookmark in pushop.bookmarks])
673 674
674 675 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
675 676 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
676 677
677 678 def safehex(x):
678 679 if x is None:
679 680 return x
680 681 return hex(x)
681 682
682 683 def hexifycompbookmarks(bookmarks):
683 684 return [(b, safehex(scid), safehex(dcid))
684 685 for (b, scid, dcid) in bookmarks]
685 686
686 687 comp = [hexifycompbookmarks(marks) for marks in comp]
687 688 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
688 689
689 690 def _processcompared(pushop, pushed, explicit, remotebms, comp):
690 691 """take decision on bookmark to pull from the remote bookmark
691 692
692 693 Exist to help extensions who want to alter this behavior.
693 694 """
694 695 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
695 696
696 697 repo = pushop.repo
697 698
698 699 for b, scid, dcid in advsrc:
699 700 if b in explicit:
700 701 explicit.remove(b)
701 702 if not pushed or repo[scid].rev() in pushed:
702 703 pushop.outbookmarks.append((b, dcid, scid))
703 704 # search added bookmark
704 705 for b, scid, dcid in addsrc:
705 706 if b in explicit:
706 707 explicit.remove(b)
707 708 pushop.outbookmarks.append((b, '', scid))
708 709 # search for overwritten bookmark
709 710 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
710 711 if b in explicit:
711 712 explicit.remove(b)
712 713 pushop.outbookmarks.append((b, dcid, scid))
713 714 # search for bookmark to delete
714 715 for b, scid, dcid in adddst:
715 716 if b in explicit:
716 717 explicit.remove(b)
717 718 # treat as "deleted locally"
718 719 pushop.outbookmarks.append((b, dcid, ''))
719 720 # identical bookmarks shouldn't get reported
720 721 for b, scid, dcid in same:
721 722 if b in explicit:
722 723 explicit.remove(b)
723 724
724 725 if explicit:
725 726 explicit = sorted(explicit)
726 727 # we should probably list all of them
727 728 pushop.ui.warn(_('bookmark %s does not exist on the local '
728 729 'or remote repository!\n') % explicit[0])
729 730 pushop.bkresult = 2
730 731
731 732 pushop.outbookmarks.sort()
732 733
733 734 def _pushcheckoutgoing(pushop):
734 735 outgoing = pushop.outgoing
735 736 unfi = pushop.repo.unfiltered()
736 737 if not outgoing.missing:
737 738 # nothing to push
738 739 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
739 740 return False
740 741 # something to push
741 742 if not pushop.force:
742 743 # if repo.obsstore == False --> no obsolete
743 744 # then, save the iteration
744 745 if unfi.obsstore:
745 746 # this message are here for 80 char limit reason
746 747 mso = _("push includes obsolete changeset: %s!")
747 748 mspd = _("push includes phase-divergent changeset: %s!")
748 749 mscd = _("push includes content-divergent changeset: %s!")
749 750 mst = {"orphan": _("push includes orphan changeset: %s!"),
750 751 "phase-divergent": mspd,
751 752 "content-divergent": mscd}
752 753 # If we are to push if there is at least one
753 754 # obsolete or unstable changeset in missing, at
754 755 # least one of the missinghead will be obsolete or
755 756 # unstable. So checking heads only is ok
756 757 for node in outgoing.missingheads:
757 758 ctx = unfi[node]
758 759 if ctx.obsolete():
759 760 raise error.Abort(mso % ctx)
760 761 elif ctx.isunstable():
761 762 # TODO print more than one instability in the abort
762 763 # message
763 764 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
764 765
765 766 discovery.checkheads(pushop)
766 767 return True
767 768
768 769 # List of names of steps to perform for an outgoing bundle2, order matters.
769 770 b2partsgenorder = []
770 771
771 772 # Mapping between step name and function
772 773 #
773 774 # This exists to help extensions wrap steps if necessary
774 775 b2partsgenmapping = {}
775 776
776 777 def b2partsgenerator(stepname, idx=None):
777 778 """decorator for function generating bundle2 part
778 779
779 780 The function is added to the step -> function mapping and appended to the
780 781 list of steps. Beware that decorated functions will be added in order
781 782 (this may matter).
782 783
783 784 You can only use this decorator for new steps, if you want to wrap a step
784 785 from an extension, attack the b2partsgenmapping dictionary directly."""
785 786 def dec(func):
786 787 assert stepname not in b2partsgenmapping
787 788 b2partsgenmapping[stepname] = func
788 789 if idx is None:
789 790 b2partsgenorder.append(stepname)
790 791 else:
791 792 b2partsgenorder.insert(idx, stepname)
792 793 return func
793 794 return dec
794 795
795 796 def _pushb2ctxcheckheads(pushop, bundler):
796 797 """Generate race condition checking parts
797 798
798 799 Exists as an independent function to aid extensions
799 800 """
800 801 # * 'force' do not check for push race,
801 802 # * if we don't push anything, there are nothing to check.
802 803 if not pushop.force and pushop.outgoing.missingheads:
803 804 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
804 805 emptyremote = pushop.pushbranchmap is None
805 806 if not allowunrelated or emptyremote:
806 807 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
807 808 else:
808 809 affected = set()
809 810 for branch, heads in pushop.pushbranchmap.iteritems():
810 811 remoteheads, newheads, unsyncedheads, discardedheads = heads
811 812 if remoteheads is not None:
812 813 remote = set(remoteheads)
813 814 affected |= set(discardedheads) & remote
814 815 affected |= remote - set(newheads)
815 816 if affected:
816 817 data = iter(sorted(affected))
817 818 bundler.newpart('check:updated-heads', data=data)
818 819
819 820 def _pushing(pushop):
820 821 """return True if we are pushing anything"""
821 822 return bool(pushop.outgoing.missing
822 823 or pushop.outdatedphases
823 824 or pushop.outobsmarkers
824 825 or pushop.outbookmarks)
825 826
826 827 @b2partsgenerator('check-bookmarks')
827 828 def _pushb2checkbookmarks(pushop, bundler):
828 829 """insert bookmark move checking"""
829 830 if not _pushing(pushop) or pushop.force:
830 831 return
831 832 b2caps = bundle2.bundle2caps(pushop.remote)
832 833 hasbookmarkcheck = 'bookmarks' in b2caps
833 834 if not (pushop.outbookmarks and hasbookmarkcheck):
834 835 return
835 836 data = []
836 837 for book, old, new in pushop.outbookmarks:
837 838 old = bin(old)
838 839 data.append((book, old))
839 840 checkdata = bookmod.binaryencode(data)
840 841 bundler.newpart('check:bookmarks', data=checkdata)
841 842
842 843 @b2partsgenerator('check-phases')
843 844 def _pushb2checkphases(pushop, bundler):
844 845 """insert phase move checking"""
845 846 if not _pushing(pushop) or pushop.force:
846 847 return
847 848 b2caps = bundle2.bundle2caps(pushop.remote)
848 849 hasphaseheads = 'heads' in b2caps.get('phases', ())
849 850 if pushop.remotephases is not None and hasphaseheads:
850 851 # check that the remote phase has not changed
851 852 checks = [[] for p in phases.allphases]
852 853 checks[phases.public].extend(pushop.remotephases.publicheads)
853 854 checks[phases.draft].extend(pushop.remotephases.draftroots)
854 855 if any(checks):
855 856 for nodes in checks:
856 857 nodes.sort()
857 858 checkdata = phases.binaryencode(checks)
858 859 bundler.newpart('check:phases', data=checkdata)
859 860
860 861 @b2partsgenerator('changeset')
861 862 def _pushb2ctx(pushop, bundler):
862 863 """handle changegroup push through bundle2
863 864
864 865 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
865 866 """
866 867 if 'changesets' in pushop.stepsdone:
867 868 return
868 869 pushop.stepsdone.add('changesets')
869 870 # Send known heads to the server for race detection.
870 871 if not _pushcheckoutgoing(pushop):
871 872 return
872 873 pushop.repo.prepushoutgoinghooks(pushop)
873 874
874 875 _pushb2ctxcheckheads(pushop, bundler)
875 876
876 877 b2caps = bundle2.bundle2caps(pushop.remote)
877 878 version = '01'
878 879 cgversions = b2caps.get('changegroup')
879 880 if cgversions: # 3.1 and 3.2 ship with an empty value
880 881 cgversions = [v for v in cgversions
881 882 if v in changegroup.supportedoutgoingversions(
882 883 pushop.repo)]
883 884 if not cgversions:
884 885 raise ValueError(_('no common changegroup version'))
885 886 version = max(cgversions)
886 887 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
887 888 'push')
888 889 cgpart = bundler.newpart('changegroup', data=cgstream)
889 890 if cgversions:
890 891 cgpart.addparam('version', version)
891 892 if 'treemanifest' in pushop.repo.requirements:
892 893 cgpart.addparam('treemanifest', '1')
893 894 def handlereply(op):
894 895 """extract addchangegroup returns from server reply"""
895 896 cgreplies = op.records.getreplies(cgpart.id)
896 897 assert len(cgreplies['changegroup']) == 1
897 898 pushop.cgresult = cgreplies['changegroup'][0]['return']
898 899 return handlereply
899 900
900 901 @b2partsgenerator('phase')
901 902 def _pushb2phases(pushop, bundler):
902 903 """handle phase push through bundle2"""
903 904 if 'phases' in pushop.stepsdone:
904 905 return
905 906 b2caps = bundle2.bundle2caps(pushop.remote)
906 907 ui = pushop.repo.ui
907 908
908 909 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
909 910 haspushkey = 'pushkey' in b2caps
910 911 hasphaseheads = 'heads' in b2caps.get('phases', ())
911 912
912 913 if hasphaseheads and not legacyphase:
913 914 return _pushb2phaseheads(pushop, bundler)
914 915 elif haspushkey:
915 916 return _pushb2phasespushkey(pushop, bundler)
916 917
917 918 def _pushb2phaseheads(pushop, bundler):
918 919 """push phase information through a bundle2 - binary part"""
919 920 pushop.stepsdone.add('phases')
920 921 if pushop.outdatedphases:
921 922 updates = [[] for p in phases.allphases]
922 923 updates[0].extend(h.node() for h in pushop.outdatedphases)
923 924 phasedata = phases.binaryencode(updates)
924 925 bundler.newpart('phase-heads', data=phasedata)
925 926
926 927 def _pushb2phasespushkey(pushop, bundler):
927 928 """push phase information through a bundle2 - pushkey part"""
928 929 pushop.stepsdone.add('phases')
929 930 part2node = []
930 931
931 932 def handlefailure(pushop, exc):
932 933 targetid = int(exc.partid)
933 934 for partid, node in part2node:
934 935 if partid == targetid:
935 936 raise error.Abort(_('updating %s to public failed') % node)
936 937
937 938 enc = pushkey.encode
938 939 for newremotehead in pushop.outdatedphases:
939 940 part = bundler.newpart('pushkey')
940 941 part.addparam('namespace', enc('phases'))
941 942 part.addparam('key', enc(newremotehead.hex()))
942 943 part.addparam('old', enc('%d' % phases.draft))
943 944 part.addparam('new', enc('%d' % phases.public))
944 945 part2node.append((part.id, newremotehead))
945 946 pushop.pkfailcb[part.id] = handlefailure
946 947
947 948 def handlereply(op):
948 949 for partid, node in part2node:
949 950 partrep = op.records.getreplies(partid)
950 951 results = partrep['pushkey']
951 952 assert len(results) <= 1
952 953 msg = None
953 954 if not results:
954 955 msg = _('server ignored update of %s to public!\n') % node
955 956 elif not int(results[0]['return']):
956 957 msg = _('updating %s to public failed!\n') % node
957 958 if msg is not None:
958 959 pushop.ui.warn(msg)
959 960 return handlereply
960 961
961 962 @b2partsgenerator('obsmarkers')
962 963 def _pushb2obsmarkers(pushop, bundler):
963 964 if 'obsmarkers' in pushop.stepsdone:
964 965 return
965 966 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
966 967 if obsolete.commonversion(remoteversions) is None:
967 968 return
968 969 pushop.stepsdone.add('obsmarkers')
969 970 if pushop.outobsmarkers:
970 971 markers = sorted(pushop.outobsmarkers)
971 972 bundle2.buildobsmarkerspart(bundler, markers)
972 973
973 974 @b2partsgenerator('bookmarks')
974 975 def _pushb2bookmarks(pushop, bundler):
975 976 """handle bookmark push through bundle2"""
976 977 if 'bookmarks' in pushop.stepsdone:
977 978 return
978 979 b2caps = bundle2.bundle2caps(pushop.remote)
979 980
980 981 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
981 982 legacybooks = 'bookmarks' in legacy
982 983
983 984 if not legacybooks and 'bookmarks' in b2caps:
984 985 return _pushb2bookmarkspart(pushop, bundler)
985 986 elif 'pushkey' in b2caps:
986 987 return _pushb2bookmarkspushkey(pushop, bundler)
987 988
988 989 def _bmaction(old, new):
989 990 """small utility for bookmark pushing"""
990 991 if not old:
991 992 return 'export'
992 993 elif not new:
993 994 return 'delete'
994 995 return 'update'
995 996
996 997 def _pushb2bookmarkspart(pushop, bundler):
997 998 pushop.stepsdone.add('bookmarks')
998 999 if not pushop.outbookmarks:
999 1000 return
1000 1001
1001 1002 allactions = []
1002 1003 data = []
1003 1004 for book, old, new in pushop.outbookmarks:
1004 1005 new = bin(new)
1005 1006 data.append((book, new))
1006 1007 allactions.append((book, _bmaction(old, new)))
1007 1008 checkdata = bookmod.binaryencode(data)
1008 1009 bundler.newpart('bookmarks', data=checkdata)
1009 1010
1010 1011 def handlereply(op):
1011 1012 ui = pushop.ui
1012 1013 # if success
1013 1014 for book, action in allactions:
1014 1015 ui.status(bookmsgmap[action][0] % book)
1015 1016
1016 1017 return handlereply
1017 1018
1018 1019 def _pushb2bookmarkspushkey(pushop, bundler):
1019 1020 pushop.stepsdone.add('bookmarks')
1020 1021 part2book = []
1021 1022 enc = pushkey.encode
1022 1023
1023 1024 def handlefailure(pushop, exc):
1024 1025 targetid = int(exc.partid)
1025 1026 for partid, book, action in part2book:
1026 1027 if partid == targetid:
1027 1028 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1028 1029 # we should not be called for part we did not generated
1029 1030 assert False
1030 1031
1031 1032 for book, old, new in pushop.outbookmarks:
1032 1033 part = bundler.newpart('pushkey')
1033 1034 part.addparam('namespace', enc('bookmarks'))
1034 1035 part.addparam('key', enc(book))
1035 1036 part.addparam('old', enc(old))
1036 1037 part.addparam('new', enc(new))
1037 1038 action = 'update'
1038 1039 if not old:
1039 1040 action = 'export'
1040 1041 elif not new:
1041 1042 action = 'delete'
1042 1043 part2book.append((part.id, book, action))
1043 1044 pushop.pkfailcb[part.id] = handlefailure
1044 1045
1045 1046 def handlereply(op):
1046 1047 ui = pushop.ui
1047 1048 for partid, book, action in part2book:
1048 1049 partrep = op.records.getreplies(partid)
1049 1050 results = partrep['pushkey']
1050 1051 assert len(results) <= 1
1051 1052 if not results:
1052 1053 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1053 1054 else:
1054 1055 ret = int(results[0]['return'])
1055 1056 if ret:
1056 1057 ui.status(bookmsgmap[action][0] % book)
1057 1058 else:
1058 1059 ui.warn(bookmsgmap[action][1] % book)
1059 1060 if pushop.bkresult is not None:
1060 1061 pushop.bkresult = 1
1061 1062 return handlereply
1062 1063
1063 1064 @b2partsgenerator('pushvars', idx=0)
1064 1065 def _getbundlesendvars(pushop, bundler):
1065 1066 '''send shellvars via bundle2'''
1066 1067 pushvars = pushop.pushvars
1067 1068 if pushvars:
1068 1069 shellvars = {}
1069 1070 for raw in pushvars:
1070 1071 if '=' not in raw:
1071 1072 msg = ("unable to parse variable '%s', should follow "
1072 1073 "'KEY=VALUE' or 'KEY=' format")
1073 1074 raise error.Abort(msg % raw)
1074 1075 k, v = raw.split('=', 1)
1075 1076 shellvars[k] = v
1076 1077
1077 1078 part = bundler.newpart('pushvars')
1078 1079
1079 1080 for key, value in shellvars.iteritems():
1080 1081 part.addparam(key, value, mandatory=False)
1081 1082
1082 1083 def _pushbundle2(pushop):
1083 1084 """push data to the remote using bundle2
1084 1085
1085 1086 The only currently supported type of data is changegroup but this will
1086 1087 evolve in the future."""
1087 1088 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1088 1089 pushback = (pushop.trmanager
1089 1090 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1090 1091
1091 1092 # create reply capability
1092 1093 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1093 1094 allowpushback=pushback,
1094 1095 role='client'))
1095 1096 bundler.newpart('replycaps', data=capsblob)
1096 1097 replyhandlers = []
1097 1098 for partgenname in b2partsgenorder:
1098 1099 partgen = b2partsgenmapping[partgenname]
1099 1100 ret = partgen(pushop, bundler)
1100 1101 if callable(ret):
1101 1102 replyhandlers.append(ret)
1102 1103 # do not push if nothing to push
1103 1104 if bundler.nbparts <= 1:
1104 1105 return
1105 1106 stream = util.chunkbuffer(bundler.getchunks())
1106 1107 try:
1107 1108 try:
1108 1109 with pushop.remote.commandexecutor() as e:
1109 1110 reply = e.callcommand('unbundle', {
1110 1111 'bundle': stream,
1111 1112 'heads': ['force'],
1112 1113 'url': pushop.remote.url(),
1113 1114 }).result()
1114 1115 except error.BundleValueError as exc:
1115 1116 raise error.Abort(_('missing support for %s') % exc)
1116 1117 try:
1117 1118 trgetter = None
1118 1119 if pushback:
1119 1120 trgetter = pushop.trmanager.transaction
1120 1121 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1121 1122 except error.BundleValueError as exc:
1122 1123 raise error.Abort(_('missing support for %s') % exc)
1123 1124 except bundle2.AbortFromPart as exc:
1124 1125 pushop.ui.status(_('remote: %s\n') % exc)
1125 1126 if exc.hint is not None:
1126 1127 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1127 1128 raise error.Abort(_('push failed on remote'))
1128 1129 except error.PushkeyFailed as exc:
1129 1130 partid = int(exc.partid)
1130 1131 if partid not in pushop.pkfailcb:
1131 1132 raise
1132 1133 pushop.pkfailcb[partid](pushop, exc)
1133 1134 for rephand in replyhandlers:
1134 1135 rephand(op)
1135 1136
1136 1137 def _pushchangeset(pushop):
1137 1138 """Make the actual push of changeset bundle to remote repo"""
1138 1139 if 'changesets' in pushop.stepsdone:
1139 1140 return
1140 1141 pushop.stepsdone.add('changesets')
1141 1142 if not _pushcheckoutgoing(pushop):
1142 1143 return
1143 1144
1144 1145 # Should have verified this in push().
1145 1146 assert pushop.remote.capable('unbundle')
1146 1147
1147 1148 pushop.repo.prepushoutgoinghooks(pushop)
1148 1149 outgoing = pushop.outgoing
1149 1150 # TODO: get bundlecaps from remote
1150 1151 bundlecaps = None
1151 1152 # create a changegroup from local
1152 1153 if pushop.revs is None and not (outgoing.excluded
1153 1154 or pushop.repo.changelog.filteredrevs):
1154 1155 # push everything,
1155 1156 # use the fast path, no race possible on push
1156 1157 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1157 1158 fastpath=True, bundlecaps=bundlecaps)
1158 1159 else:
1159 1160 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1160 1161 'push', bundlecaps=bundlecaps)
1161 1162
1162 1163 # apply changegroup to remote
1163 1164 # local repo finds heads on server, finds out what
1164 1165 # revs it must push. once revs transferred, if server
1165 1166 # finds it has different heads (someone else won
1166 1167 # commit/push race), server aborts.
1167 1168 if pushop.force:
1168 1169 remoteheads = ['force']
1169 1170 else:
1170 1171 remoteheads = pushop.remoteheads
1171 1172 # ssh: return remote's addchangegroup()
1172 1173 # http: return remote's addchangegroup() or 0 for error
1173 1174 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1174 1175 pushop.repo.url())
1175 1176
1176 1177 def _pushsyncphase(pushop):
1177 1178 """synchronise phase information locally and remotely"""
1178 1179 cheads = pushop.commonheads
1179 1180 # even when we don't push, exchanging phase data is useful
1180 1181 remotephases = listkeys(pushop.remote, 'phases')
1181 1182 if (pushop.ui.configbool('ui', '_usedassubrepo')
1182 1183 and remotephases # server supports phases
1183 1184 and pushop.cgresult is None # nothing was pushed
1184 1185 and remotephases.get('publishing', False)):
1185 1186 # When:
1186 1187 # - this is a subrepo push
1187 1188 # - and remote support phase
1188 1189 # - and no changeset was pushed
1189 1190 # - and remote is publishing
1190 1191 # We may be in issue 3871 case!
1191 1192 # We drop the possible phase synchronisation done by
1192 1193 # courtesy to publish changesets possibly locally draft
1193 1194 # on the remote.
1194 1195 remotephases = {'publishing': 'True'}
1195 1196 if not remotephases: # old server or public only reply from non-publishing
1196 1197 _localphasemove(pushop, cheads)
1197 1198 # don't push any phase data as there is nothing to push
1198 1199 else:
1199 1200 ana = phases.analyzeremotephases(pushop.repo, cheads,
1200 1201 remotephases)
1201 1202 pheads, droots = ana
1202 1203 ### Apply remote phase on local
1203 1204 if remotephases.get('publishing', False):
1204 1205 _localphasemove(pushop, cheads)
1205 1206 else: # publish = False
1206 1207 _localphasemove(pushop, pheads)
1207 1208 _localphasemove(pushop, cheads, phases.draft)
1208 1209 ### Apply local phase on remote
1209 1210
1210 1211 if pushop.cgresult:
1211 1212 if 'phases' in pushop.stepsdone:
1212 1213 # phases already pushed though bundle2
1213 1214 return
1214 1215 outdated = pushop.outdatedphases
1215 1216 else:
1216 1217 outdated = pushop.fallbackoutdatedphases
1217 1218
1218 1219 pushop.stepsdone.add('phases')
1219 1220
1220 1221 # filter heads already turned public by the push
1221 1222 outdated = [c for c in outdated if c.node() not in pheads]
1222 1223 # fallback to independent pushkey command
1223 1224 for newremotehead in outdated:
1224 1225 with pushop.remote.commandexecutor() as e:
1225 1226 r = e.callcommand('pushkey', {
1226 1227 'namespace': 'phases',
1227 1228 'key': newremotehead.hex(),
1228 1229 'old': '%d' % phases.draft,
1229 1230 'new': '%d' % phases.public
1230 1231 }).result()
1231 1232
1232 1233 if not r:
1233 1234 pushop.ui.warn(_('updating %s to public failed!\n')
1234 1235 % newremotehead)
1235 1236
1236 1237 def _localphasemove(pushop, nodes, phase=phases.public):
1237 1238 """move <nodes> to <phase> in the local source repo"""
1238 1239 if pushop.trmanager:
1239 1240 phases.advanceboundary(pushop.repo,
1240 1241 pushop.trmanager.transaction(),
1241 1242 phase,
1242 1243 nodes)
1243 1244 else:
1244 1245 # repo is not locked, do not change any phases!
1245 1246 # Informs the user that phases should have been moved when
1246 1247 # applicable.
1247 1248 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1248 1249 phasestr = phases.phasenames[phase]
1249 1250 if actualmoves:
1250 1251 pushop.ui.status(_('cannot lock source repo, skipping '
1251 1252 'local %s phase update\n') % phasestr)
1252 1253
1253 1254 def _pushobsolete(pushop):
1254 1255 """utility function to push obsolete markers to a remote"""
1255 1256 if 'obsmarkers' in pushop.stepsdone:
1256 1257 return
1257 1258 repo = pushop.repo
1258 1259 remote = pushop.remote
1259 1260 pushop.stepsdone.add('obsmarkers')
1260 1261 if pushop.outobsmarkers:
1261 1262 pushop.ui.debug('try to push obsolete markers to remote\n')
1262 1263 rslts = []
1263 1264 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1264 1265 for key in sorted(remotedata, reverse=True):
1265 1266 # reverse sort to ensure we end with dump0
1266 1267 data = remotedata[key]
1267 1268 rslts.append(remote.pushkey('obsolete', key, '', data))
1268 1269 if [r for r in rslts if not r]:
1269 1270 msg = _('failed to push some obsolete markers!\n')
1270 1271 repo.ui.warn(msg)
1271 1272
1272 1273 def _pushbookmark(pushop):
1273 1274 """Update bookmark position on remote"""
1274 1275 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1275 1276 return
1276 1277 pushop.stepsdone.add('bookmarks')
1277 1278 ui = pushop.ui
1278 1279 remote = pushop.remote
1279 1280
1280 1281 for b, old, new in pushop.outbookmarks:
1281 1282 action = 'update'
1282 1283 if not old:
1283 1284 action = 'export'
1284 1285 elif not new:
1285 1286 action = 'delete'
1286 1287
1287 1288 with remote.commandexecutor() as e:
1288 1289 r = e.callcommand('pushkey', {
1289 1290 'namespace': 'bookmarks',
1290 1291 'key': b,
1291 1292 'old': old,
1292 1293 'new': new,
1293 1294 }).result()
1294 1295
1295 1296 if r:
1296 1297 ui.status(bookmsgmap[action][0] % b)
1297 1298 else:
1298 1299 ui.warn(bookmsgmap[action][1] % b)
1299 1300 # discovery can have set the value form invalid entry
1300 1301 if pushop.bkresult is not None:
1301 1302 pushop.bkresult = 1
1302 1303
1303 1304 class pulloperation(object):
1304 1305 """A object that represent a single pull operation
1305 1306
1306 1307 It purpose is to carry pull related state and very common operation.
1307 1308
1308 1309 A new should be created at the beginning of each pull and discarded
1309 1310 afterward.
1310 1311 """
1311 1312
1312 1313 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1313 1314 remotebookmarks=None, streamclonerequested=None):
1314 1315 # repo we pull into
1315 1316 self.repo = repo
1316 1317 # repo we pull from
1317 1318 self.remote = remote
1318 1319 # revision we try to pull (None is "all")
1319 1320 self.heads = heads
1320 1321 # bookmark pulled explicitly
1321 1322 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1322 1323 for bookmark in bookmarks]
1323 1324 # do we force pull?
1324 1325 self.force = force
1325 1326 # whether a streaming clone was requested
1326 1327 self.streamclonerequested = streamclonerequested
1327 1328 # transaction manager
1328 1329 self.trmanager = None
1329 1330 # set of common changeset between local and remote before pull
1330 1331 self.common = None
1331 1332 # set of pulled head
1332 1333 self.rheads = None
1333 1334 # list of missing changeset to fetch remotely
1334 1335 self.fetch = None
1335 1336 # remote bookmarks data
1336 1337 self.remotebookmarks = remotebookmarks
1337 1338 # result of changegroup pulling (used as return code by pull)
1338 1339 self.cgresult = None
1339 1340 # list of step already done
1340 1341 self.stepsdone = set()
1341 1342 # Whether we attempted a clone from pre-generated bundles.
1342 1343 self.clonebundleattempted = False
1343 1344
1344 1345 @util.propertycache
1345 1346 def pulledsubset(self):
1346 1347 """heads of the set of changeset target by the pull"""
1347 1348 # compute target subset
1348 1349 if self.heads is None:
1349 1350 # We pulled every thing possible
1350 1351 # sync on everything common
1351 1352 c = set(self.common)
1352 1353 ret = list(self.common)
1353 1354 for n in self.rheads:
1354 1355 if n not in c:
1355 1356 ret.append(n)
1356 1357 return ret
1357 1358 else:
1358 1359 # We pulled a specific subset
1359 1360 # sync on this subset
1360 1361 return self.heads
1361 1362
1362 1363 @util.propertycache
1363 1364 def canusebundle2(self):
1364 1365 return not _forcebundle1(self)
1365 1366
1366 1367 @util.propertycache
1367 1368 def remotebundle2caps(self):
1368 1369 return bundle2.bundle2caps(self.remote)
1369 1370
1370 1371 def gettransaction(self):
1371 1372 # deprecated; talk to trmanager directly
1372 1373 return self.trmanager.transaction()
1373 1374
1374 1375 class transactionmanager(util.transactional):
1375 1376 """An object to manage the life cycle of a transaction
1376 1377
1377 1378 It creates the transaction on demand and calls the appropriate hooks when
1378 1379 closing the transaction."""
1379 1380 def __init__(self, repo, source, url):
1380 1381 self.repo = repo
1381 1382 self.source = source
1382 1383 self.url = url
1383 1384 self._tr = None
1384 1385
1385 1386 def transaction(self):
1386 1387 """Return an open transaction object, constructing if necessary"""
1387 1388 if not self._tr:
1388 1389 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1389 1390 self._tr = self.repo.transaction(trname)
1390 1391 self._tr.hookargs['source'] = self.source
1391 1392 self._tr.hookargs['url'] = self.url
1392 1393 return self._tr
1393 1394
1394 1395 def close(self):
1395 1396 """close transaction if created"""
1396 1397 if self._tr is not None:
1397 1398 self._tr.close()
1398 1399
1399 1400 def release(self):
1400 1401 """release transaction if created"""
1401 1402 if self._tr is not None:
1402 1403 self._tr.release()
1403 1404
1404 1405 def listkeys(remote, namespace):
1405 1406 with remote.commandexecutor() as e:
1406 1407 return e.callcommand('listkeys', {'namespace': namespace}).result()
1407 1408
1408 1409 def _fullpullbundle2(repo, pullop):
1409 1410 # The server may send a partial reply, i.e. when inlining
1410 1411 # pre-computed bundles. In that case, update the common
1411 1412 # set based on the results and pull another bundle.
1412 1413 #
1413 1414 # There are two indicators that the process is finished:
1414 1415 # - no changeset has been added, or
1415 1416 # - all remote heads are known locally.
1416 1417 # The head check must use the unfiltered view as obsoletion
1417 1418 # markers can hide heads.
1418 1419 unfi = repo.unfiltered()
1419 1420 unficl = unfi.changelog
1420 1421 def headsofdiff(h1, h2):
1421 1422 """Returns heads(h1 % h2)"""
1422 1423 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1423 1424 return set(ctx.node() for ctx in res)
1424 1425 def headsofunion(h1, h2):
1425 1426 """Returns heads((h1 + h2) - null)"""
1426 1427 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1427 1428 return set(ctx.node() for ctx in res)
1428 1429 while True:
1429 1430 old_heads = unficl.heads()
1430 1431 clstart = len(unficl)
1431 1432 _pullbundle2(pullop)
1432 1433 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1433 1434 # XXX narrow clones filter the heads on the server side during
1434 1435 # XXX getbundle and result in partial replies as well.
1435 1436 # XXX Disable pull bundles in this case as band aid to avoid
1436 1437 # XXX extra round trips.
1437 1438 break
1438 1439 if clstart == len(unficl):
1439 1440 break
1440 1441 if all(unficl.hasnode(n) for n in pullop.rheads):
1441 1442 break
1442 1443 new_heads = headsofdiff(unficl.heads(), old_heads)
1443 1444 pullop.common = headsofunion(new_heads, pullop.common)
1444 1445 pullop.rheads = set(pullop.rheads) - pullop.common
1445 1446
1446 1447 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1447 1448 streamclonerequested=None):
1448 1449 """Fetch repository data from a remote.
1449 1450
1450 1451 This is the main function used to retrieve data from a remote repository.
1451 1452
1452 1453 ``repo`` is the local repository to clone into.
1453 1454 ``remote`` is a peer instance.
1454 1455 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1455 1456 default) means to pull everything from the remote.
1456 1457 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1457 1458 default, all remote bookmarks are pulled.
1458 1459 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1459 1460 initialization.
1460 1461 ``streamclonerequested`` is a boolean indicating whether a "streaming
1461 1462 clone" is requested. A "streaming clone" is essentially a raw file copy
1462 1463 of revlogs from the server. This only works when the local repository is
1463 1464 empty. The default value of ``None`` means to respect the server
1464 1465 configuration for preferring stream clones.
1465 1466
1466 1467 Returns the ``pulloperation`` created for this pull.
1467 1468 """
1468 1469 if opargs is None:
1469 1470 opargs = {}
1470 1471 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1471 1472 streamclonerequested=streamclonerequested,
1472 1473 **pycompat.strkwargs(opargs))
1473 1474
1474 1475 peerlocal = pullop.remote.local()
1475 1476 if peerlocal:
1476 1477 missing = set(peerlocal.requirements) - pullop.repo.supported
1477 1478 if missing:
1478 1479 msg = _("required features are not"
1479 1480 " supported in the destination:"
1480 1481 " %s") % (', '.join(sorted(missing)))
1481 1482 raise error.Abort(msg)
1482 1483
1483 1484 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1484 1485 with repo.wlock(), repo.lock(), pullop.trmanager:
1485 1486 # This should ideally be in _pullbundle2(). However, it needs to run
1486 1487 # before discovery to avoid extra work.
1487 1488 _maybeapplyclonebundle(pullop)
1488 1489 streamclone.maybeperformlegacystreamclone(pullop)
1489 1490 _pulldiscovery(pullop)
1490 1491 if pullop.canusebundle2:
1491 1492 _fullpullbundle2(repo, pullop)
1492 1493 _pullchangeset(pullop)
1493 1494 _pullphase(pullop)
1494 1495 _pullbookmarks(pullop)
1495 1496 _pullobsolete(pullop)
1496 1497
1497 1498 # storing remotenames
1498 1499 if repo.ui.configbool('experimental', 'remotenames'):
1499 1500 logexchange.pullremotenames(repo, remote)
1500 1501
1501 1502 return pullop
1502 1503
1503 1504 # list of steps to perform discovery before pull
1504 1505 pulldiscoveryorder = []
1505 1506
1506 1507 # Mapping between step name and function
1507 1508 #
1508 1509 # This exists to help extensions wrap steps if necessary
1509 1510 pulldiscoverymapping = {}
1510 1511
1511 1512 def pulldiscovery(stepname):
1512 1513 """decorator for function performing discovery before pull
1513 1514
1514 1515 The function is added to the step -> function mapping and appended to the
1515 1516 list of steps. Beware that decorated function will be added in order (this
1516 1517 may matter).
1517 1518
1518 1519 You can only use this decorator for a new step, if you want to wrap a step
1519 1520 from an extension, change the pulldiscovery dictionary directly."""
1520 1521 def dec(func):
1521 1522 assert stepname not in pulldiscoverymapping
1522 1523 pulldiscoverymapping[stepname] = func
1523 1524 pulldiscoveryorder.append(stepname)
1524 1525 return func
1525 1526 return dec
1526 1527
1527 1528 def _pulldiscovery(pullop):
1528 1529 """Run all discovery steps"""
1529 1530 for stepname in pulldiscoveryorder:
1530 1531 step = pulldiscoverymapping[stepname]
1531 1532 step(pullop)
1532 1533
1533 1534 @pulldiscovery('b1:bookmarks')
1534 1535 def _pullbookmarkbundle1(pullop):
1535 1536 """fetch bookmark data in bundle1 case
1536 1537
1537 1538 If not using bundle2, we have to fetch bookmarks before changeset
1538 1539 discovery to reduce the chance and impact of race conditions."""
1539 1540 if pullop.remotebookmarks is not None:
1540 1541 return
1541 1542 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1542 1543 # all known bundle2 servers now support listkeys, but lets be nice with
1543 1544 # new implementation.
1544 1545 return
1545 1546 books = listkeys(pullop.remote, 'bookmarks')
1546 1547 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1547 1548
1548 1549
1549 1550 @pulldiscovery('changegroup')
1550 1551 def _pulldiscoverychangegroup(pullop):
1551 1552 """discovery phase for the pull
1552 1553
1553 1554 Current handle changeset discovery only, will change handle all discovery
1554 1555 at some point."""
1555 1556 tmp = discovery.findcommonincoming(pullop.repo,
1556 1557 pullop.remote,
1557 1558 heads=pullop.heads,
1558 1559 force=pullop.force)
1559 1560 common, fetch, rheads = tmp
1560 1561 nm = pullop.repo.unfiltered().changelog.nodemap
1561 1562 if fetch and rheads:
1562 1563 # If a remote heads is filtered locally, put in back in common.
1563 1564 #
1564 1565 # This is a hackish solution to catch most of "common but locally
1565 1566 # hidden situation". We do not performs discovery on unfiltered
1566 1567 # repository because it end up doing a pathological amount of round
1567 1568 # trip for w huge amount of changeset we do not care about.
1568 1569 #
1569 1570 # If a set of such "common but filtered" changeset exist on the server
1570 1571 # but are not including a remote heads, we'll not be able to detect it,
1571 1572 scommon = set(common)
1572 1573 for n in rheads:
1573 1574 if n in nm:
1574 1575 if n not in scommon:
1575 1576 common.append(n)
1576 1577 if set(rheads).issubset(set(common)):
1577 1578 fetch = []
1578 1579 pullop.common = common
1579 1580 pullop.fetch = fetch
1580 1581 pullop.rheads = rheads
1581 1582
1582 1583 def _pullbundle2(pullop):
1583 1584 """pull data using bundle2
1584 1585
1585 1586 For now, the only supported data are changegroup."""
1586 1587 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1587 1588
1588 1589 # make ui easier to access
1589 1590 ui = pullop.repo.ui
1590 1591
1591 1592 # At the moment we don't do stream clones over bundle2. If that is
1592 1593 # implemented then here's where the check for that will go.
1593 1594 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1594 1595
1595 1596 # declare pull perimeters
1596 1597 kwargs['common'] = pullop.common
1597 1598 kwargs['heads'] = pullop.heads or pullop.rheads
1598 1599
1599 1600 if streaming:
1600 1601 kwargs['cg'] = False
1601 1602 kwargs['stream'] = True
1602 1603 pullop.stepsdone.add('changegroup')
1603 1604 pullop.stepsdone.add('phases')
1604 1605
1605 1606 else:
1606 1607 # pulling changegroup
1607 1608 pullop.stepsdone.add('changegroup')
1608 1609
1609 1610 kwargs['cg'] = pullop.fetch
1610 1611
1611 1612 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1612 1613 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1613 1614 if (not legacyphase and hasbinaryphase):
1614 1615 kwargs['phases'] = True
1615 1616 pullop.stepsdone.add('phases')
1616 1617
1617 1618 if 'listkeys' in pullop.remotebundle2caps:
1618 1619 if 'phases' not in pullop.stepsdone:
1619 1620 kwargs['listkeys'] = ['phases']
1620 1621
1621 1622 bookmarksrequested = False
1622 1623 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1623 1624 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1624 1625
1625 1626 if pullop.remotebookmarks is not None:
1626 1627 pullop.stepsdone.add('request-bookmarks')
1627 1628
1628 1629 if ('request-bookmarks' not in pullop.stepsdone
1629 1630 and pullop.remotebookmarks is None
1630 1631 and not legacybookmark and hasbinarybook):
1631 1632 kwargs['bookmarks'] = True
1632 1633 bookmarksrequested = True
1633 1634
1634 1635 if 'listkeys' in pullop.remotebundle2caps:
1635 1636 if 'request-bookmarks' not in pullop.stepsdone:
1636 1637 # make sure to always includes bookmark data when migrating
1637 1638 # `hg incoming --bundle` to using this function.
1638 1639 pullop.stepsdone.add('request-bookmarks')
1639 1640 kwargs.setdefault('listkeys', []).append('bookmarks')
1640 1641
1641 1642 # If this is a full pull / clone and the server supports the clone bundles
1642 1643 # feature, tell the server whether we attempted a clone bundle. The
1643 1644 # presence of this flag indicates the client supports clone bundles. This
1644 1645 # will enable the server to treat clients that support clone bundles
1645 1646 # differently from those that don't.
1646 1647 if (pullop.remote.capable('clonebundles')
1647 1648 and pullop.heads is None and list(pullop.common) == [nullid]):
1648 1649 kwargs['cbattempted'] = pullop.clonebundleattempted
1649 1650
1650 1651 if streaming:
1651 1652 pullop.repo.ui.status(_('streaming all changes\n'))
1652 1653 elif not pullop.fetch:
1653 1654 pullop.repo.ui.status(_("no changes found\n"))
1654 1655 pullop.cgresult = 0
1655 1656 else:
1656 1657 if pullop.heads is None and list(pullop.common) == [nullid]:
1657 1658 pullop.repo.ui.status(_("requesting all changes\n"))
1658 1659 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1659 1660 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1660 1661 if obsolete.commonversion(remoteversions) is not None:
1661 1662 kwargs['obsmarkers'] = True
1662 1663 pullop.stepsdone.add('obsmarkers')
1663 1664 _pullbundle2extraprepare(pullop, kwargs)
1664 1665
1665 1666 with pullop.remote.commandexecutor() as e:
1666 1667 args = dict(kwargs)
1667 1668 args['source'] = 'pull'
1668 1669 bundle = e.callcommand('getbundle', args).result()
1669 1670
1670 1671 try:
1671 1672 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1672 1673 source='pull')
1673 1674 op.modes['bookmarks'] = 'records'
1674 1675 bundle2.processbundle(pullop.repo, bundle, op=op)
1675 1676 except bundle2.AbortFromPart as exc:
1676 1677 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1677 1678 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1678 1679 except error.BundleValueError as exc:
1679 1680 raise error.Abort(_('missing support for %s') % exc)
1680 1681
1681 1682 if pullop.fetch:
1682 1683 pullop.cgresult = bundle2.combinechangegroupresults(op)
1683 1684
1684 1685 # processing phases change
1685 1686 for namespace, value in op.records['listkeys']:
1686 1687 if namespace == 'phases':
1687 1688 _pullapplyphases(pullop, value)
1688 1689
1689 1690 # processing bookmark update
1690 1691 if bookmarksrequested:
1691 1692 books = {}
1692 1693 for record in op.records['bookmarks']:
1693 1694 books[record['bookmark']] = record["node"]
1694 1695 pullop.remotebookmarks = books
1695 1696 else:
1696 1697 for namespace, value in op.records['listkeys']:
1697 1698 if namespace == 'bookmarks':
1698 1699 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1699 1700
1700 1701 # bookmark data were either already there or pulled in the bundle
1701 1702 if pullop.remotebookmarks is not None:
1702 1703 _pullbookmarks(pullop)
1703 1704
1704 1705 def _pullbundle2extraprepare(pullop, kwargs):
1705 1706 """hook function so that extensions can extend the getbundle call"""
1706 1707
1707 1708 def _pullchangeset(pullop):
1708 1709 """pull changeset from unbundle into the local repo"""
1709 1710 # We delay the open of the transaction as late as possible so we
1710 1711 # don't open transaction for nothing or you break future useful
1711 1712 # rollback call
1712 1713 if 'changegroup' in pullop.stepsdone:
1713 1714 return
1714 1715 pullop.stepsdone.add('changegroup')
1715 1716 if not pullop.fetch:
1716 1717 pullop.repo.ui.status(_("no changes found\n"))
1717 1718 pullop.cgresult = 0
1718 1719 return
1719 1720 tr = pullop.gettransaction()
1720 1721 if pullop.heads is None and list(pullop.common) == [nullid]:
1721 1722 pullop.repo.ui.status(_("requesting all changes\n"))
1722 1723 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1723 1724 # issue1320, avoid a race if remote changed after discovery
1724 1725 pullop.heads = pullop.rheads
1725 1726
1726 1727 if pullop.remote.capable('getbundle'):
1727 1728 # TODO: get bundlecaps from remote
1728 1729 cg = pullop.remote.getbundle('pull', common=pullop.common,
1729 1730 heads=pullop.heads or pullop.rheads)
1730 1731 elif pullop.heads is None:
1731 1732 with pullop.remote.commandexecutor() as e:
1732 1733 cg = e.callcommand('changegroup', {
1733 1734 'nodes': pullop.fetch,
1734 1735 'source': 'pull',
1735 1736 }).result()
1736 1737
1737 1738 elif not pullop.remote.capable('changegroupsubset'):
1738 1739 raise error.Abort(_("partial pull cannot be done because "
1739 1740 "other repository doesn't support "
1740 1741 "changegroupsubset."))
1741 1742 else:
1742 1743 with pullop.remote.commandexecutor() as e:
1743 1744 cg = e.callcommand('changegroupsubset', {
1744 1745 'bases': pullop.fetch,
1745 1746 'heads': pullop.heads,
1746 1747 'source': 'pull',
1747 1748 }).result()
1748 1749
1749 1750 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1750 1751 pullop.remote.url())
1751 1752 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1752 1753
1753 1754 def _pullphase(pullop):
1754 1755 # Get remote phases data from remote
1755 1756 if 'phases' in pullop.stepsdone:
1756 1757 return
1757 1758 remotephases = listkeys(pullop.remote, 'phases')
1758 1759 _pullapplyphases(pullop, remotephases)
1759 1760
1760 1761 def _pullapplyphases(pullop, remotephases):
1761 1762 """apply phase movement from observed remote state"""
1762 1763 if 'phases' in pullop.stepsdone:
1763 1764 return
1764 1765 pullop.stepsdone.add('phases')
1765 1766 publishing = bool(remotephases.get('publishing', False))
1766 1767 if remotephases and not publishing:
1767 1768 # remote is new and non-publishing
1768 1769 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1769 1770 pullop.pulledsubset,
1770 1771 remotephases)
1771 1772 dheads = pullop.pulledsubset
1772 1773 else:
1773 1774 # Remote is old or publishing all common changesets
1774 1775 # should be seen as public
1775 1776 pheads = pullop.pulledsubset
1776 1777 dheads = []
1777 1778 unfi = pullop.repo.unfiltered()
1778 1779 phase = unfi._phasecache.phase
1779 1780 rev = unfi.changelog.nodemap.get
1780 1781 public = phases.public
1781 1782 draft = phases.draft
1782 1783
1783 1784 # exclude changesets already public locally and update the others
1784 1785 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1785 1786 if pheads:
1786 1787 tr = pullop.gettransaction()
1787 1788 phases.advanceboundary(pullop.repo, tr, public, pheads)
1788 1789
1789 1790 # exclude changesets already draft locally and update the others
1790 1791 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1791 1792 if dheads:
1792 1793 tr = pullop.gettransaction()
1793 1794 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1794 1795
1795 1796 def _pullbookmarks(pullop):
1796 1797 """process the remote bookmark information to update the local one"""
1797 1798 if 'bookmarks' in pullop.stepsdone:
1798 1799 return
1799 1800 pullop.stepsdone.add('bookmarks')
1800 1801 repo = pullop.repo
1801 1802 remotebookmarks = pullop.remotebookmarks
1802 1803 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1803 1804 pullop.remote.url(),
1804 1805 pullop.gettransaction,
1805 1806 explicit=pullop.explicitbookmarks)
1806 1807
1807 1808 def _pullobsolete(pullop):
1808 1809 """utility function to pull obsolete markers from a remote
1809 1810
1810 1811 The `gettransaction` is function that return the pull transaction, creating
1811 1812 one if necessary. We return the transaction to inform the calling code that
1812 1813 a new transaction have been created (when applicable).
1813 1814
1814 1815 Exists mostly to allow overriding for experimentation purpose"""
1815 1816 if 'obsmarkers' in pullop.stepsdone:
1816 1817 return
1817 1818 pullop.stepsdone.add('obsmarkers')
1818 1819 tr = None
1819 1820 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1820 1821 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1821 1822 remoteobs = listkeys(pullop.remote, 'obsolete')
1822 1823 if 'dump0' in remoteobs:
1823 1824 tr = pullop.gettransaction()
1824 1825 markers = []
1825 1826 for key in sorted(remoteobs, reverse=True):
1826 1827 if key.startswith('dump'):
1827 1828 data = util.b85decode(remoteobs[key])
1828 1829 version, newmarks = obsolete._readmarkers(data)
1829 1830 markers += newmarks
1830 1831 if markers:
1831 1832 pullop.repo.obsstore.add(tr, markers)
1832 1833 pullop.repo.invalidatevolatilesets()
1833 1834 return tr
1834 1835
1836 def applynarrowacl(repo, kwargs):
1837 """Apply narrow fetch access control.
1838
1839 This massages the named arguments for getbundle wire protocol commands
1840 so requested data is filtered through access control rules.
1841 """
1842 ui = repo.ui
1843 # TODO this assumes existence of HTTP and is a layering violation.
1844 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1845 user_includes = ui.configlist(
1846 _NARROWACL_SECTION, username + '.includes',
1847 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1848 user_excludes = ui.configlist(
1849 _NARROWACL_SECTION, username + '.excludes',
1850 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1851 if not user_includes:
1852 raise error.Abort(_("{} configuration for user {} is empty")
1853 .format(_NARROWACL_SECTION, username))
1854
1855 user_includes = [
1856 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1857 user_excludes = [
1858 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1859
1860 req_includes = set(kwargs.get(r'includepats', []))
1861 req_excludes = set(kwargs.get(r'excludepats', []))
1862
1863 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1864 req_includes, req_excludes, user_includes, user_excludes)
1865
1866 if invalid_includes:
1867 raise error.Abort(
1868 _("The following includes are not accessible for {}: {}")
1869 .format(username, invalid_includes))
1870
1871 new_args = {}
1872 new_args.update(kwargs)
1873 new_args['includepats'] = req_includes
1874 if req_excludes:
1875 new_args['excludepats'] = req_excludes
1876 return new_args
1877
1835 1878 def caps20to10(repo, role):
1836 1879 """return a set with appropriate options to use bundle20 during getbundle"""
1837 1880 caps = {'HG20'}
1838 1881 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1839 1882 caps.add('bundle2=' + urlreq.quote(capsblob))
1840 1883 return caps
1841 1884
1842 1885 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1843 1886 getbundle2partsorder = []
1844 1887
1845 1888 # Mapping between step name and function
1846 1889 #
1847 1890 # This exists to help extensions wrap steps if necessary
1848 1891 getbundle2partsmapping = {}
1849 1892
1850 1893 def getbundle2partsgenerator(stepname, idx=None):
1851 1894 """decorator for function generating bundle2 part for getbundle
1852 1895
1853 1896 The function is added to the step -> function mapping and appended to the
1854 1897 list of steps. Beware that decorated functions will be added in order
1855 1898 (this may matter).
1856 1899
1857 1900 You can only use this decorator for new steps, if you want to wrap a step
1858 1901 from an extension, attack the getbundle2partsmapping dictionary directly."""
1859 1902 def dec(func):
1860 1903 assert stepname not in getbundle2partsmapping
1861 1904 getbundle2partsmapping[stepname] = func
1862 1905 if idx is None:
1863 1906 getbundle2partsorder.append(stepname)
1864 1907 else:
1865 1908 getbundle2partsorder.insert(idx, stepname)
1866 1909 return func
1867 1910 return dec
1868 1911
1869 1912 def bundle2requested(bundlecaps):
1870 1913 if bundlecaps is not None:
1871 1914 return any(cap.startswith('HG2') for cap in bundlecaps)
1872 1915 return False
1873 1916
1874 1917 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1875 1918 **kwargs):
1876 1919 """Return chunks constituting a bundle's raw data.
1877 1920
1878 1921 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1879 1922 passed.
1880 1923
1881 1924 Returns a 2-tuple of a dict with metadata about the generated bundle
1882 1925 and an iterator over raw chunks (of varying sizes).
1883 1926 """
1884 1927 kwargs = pycompat.byteskwargs(kwargs)
1885 1928 info = {}
1886 1929 usebundle2 = bundle2requested(bundlecaps)
1887 1930 # bundle10 case
1888 1931 if not usebundle2:
1889 1932 if bundlecaps and not kwargs.get('cg', True):
1890 1933 raise ValueError(_('request for bundle10 must include changegroup'))
1891 1934
1892 1935 if kwargs:
1893 1936 raise ValueError(_('unsupported getbundle arguments: %s')
1894 1937 % ', '.join(sorted(kwargs.keys())))
1895 1938 outgoing = _computeoutgoing(repo, heads, common)
1896 1939 info['bundleversion'] = 1
1897 1940 return info, changegroup.makestream(repo, outgoing, '01', source,
1898 1941 bundlecaps=bundlecaps)
1899 1942
1900 1943 # bundle20 case
1901 1944 info['bundleversion'] = 2
1902 1945 b2caps = {}
1903 1946 for bcaps in bundlecaps:
1904 1947 if bcaps.startswith('bundle2='):
1905 1948 blob = urlreq.unquote(bcaps[len('bundle2='):])
1906 1949 b2caps.update(bundle2.decodecaps(blob))
1907 1950 bundler = bundle2.bundle20(repo.ui, b2caps)
1908 1951
1909 1952 kwargs['heads'] = heads
1910 1953 kwargs['common'] = common
1911 1954
1912 1955 for name in getbundle2partsorder:
1913 1956 func = getbundle2partsmapping[name]
1914 1957 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1915 1958 **pycompat.strkwargs(kwargs))
1916 1959
1917 1960 info['prefercompressed'] = bundler.prefercompressed
1918 1961
1919 1962 return info, bundler.getchunks()
1920 1963
1921 1964 @getbundle2partsgenerator('stream2')
1922 1965 def _getbundlestream2(bundler, repo, *args, **kwargs):
1923 1966 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1924 1967
1925 1968 @getbundle2partsgenerator('changegroup')
1926 1969 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1927 1970 b2caps=None, heads=None, common=None, **kwargs):
1928 1971 """add a changegroup part to the requested bundle"""
1929 1972 cgstream = None
1930 1973 if kwargs.get(r'cg', True):
1931 1974 # build changegroup bundle here.
1932 1975 version = '01'
1933 1976 cgversions = b2caps.get('changegroup')
1934 1977 if cgversions: # 3.1 and 3.2 ship with an empty value
1935 1978 cgversions = [v for v in cgversions
1936 1979 if v in changegroup.supportedoutgoingversions(repo)]
1937 1980 if not cgversions:
1938 1981 raise ValueError(_('no common changegroup version'))
1939 1982 version = max(cgversions)
1940 1983 outgoing = _computeoutgoing(repo, heads, common)
1941 1984 if outgoing.missing:
1942 1985 cgstream = changegroup.makestream(repo, outgoing, version, source,
1943 1986 bundlecaps=bundlecaps)
1944 1987
1945 1988 if cgstream:
1946 1989 part = bundler.newpart('changegroup', data=cgstream)
1947 1990 if cgversions:
1948 1991 part.addparam('version', version)
1949 1992 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1950 1993 mandatory=False)
1951 1994 if 'treemanifest' in repo.requirements:
1952 1995 part.addparam('treemanifest', '1')
1953 1996
1954 1997 @getbundle2partsgenerator('bookmarks')
1955 1998 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1956 1999 b2caps=None, **kwargs):
1957 2000 """add a bookmark part to the requested bundle"""
1958 2001 if not kwargs.get(r'bookmarks', False):
1959 2002 return
1960 2003 if 'bookmarks' not in b2caps:
1961 2004 raise ValueError(_('no common bookmarks exchange method'))
1962 2005 books = bookmod.listbinbookmarks(repo)
1963 2006 data = bookmod.binaryencode(books)
1964 2007 if data:
1965 2008 bundler.newpart('bookmarks', data=data)
1966 2009
1967 2010 @getbundle2partsgenerator('listkeys')
1968 2011 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1969 2012 b2caps=None, **kwargs):
1970 2013 """add parts containing listkeys namespaces to the requested bundle"""
1971 2014 listkeys = kwargs.get(r'listkeys', ())
1972 2015 for namespace in listkeys:
1973 2016 part = bundler.newpart('listkeys')
1974 2017 part.addparam('namespace', namespace)
1975 2018 keys = repo.listkeys(namespace).items()
1976 2019 part.data = pushkey.encodekeys(keys)
1977 2020
1978 2021 @getbundle2partsgenerator('obsmarkers')
1979 2022 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1980 2023 b2caps=None, heads=None, **kwargs):
1981 2024 """add an obsolescence markers part to the requested bundle"""
1982 2025 if kwargs.get(r'obsmarkers', False):
1983 2026 if heads is None:
1984 2027 heads = repo.heads()
1985 2028 subset = [c.node() for c in repo.set('::%ln', heads)]
1986 2029 markers = repo.obsstore.relevantmarkers(subset)
1987 2030 markers = sorted(markers)
1988 2031 bundle2.buildobsmarkerspart(bundler, markers)
1989 2032
1990 2033 @getbundle2partsgenerator('phases')
1991 2034 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1992 2035 b2caps=None, heads=None, **kwargs):
1993 2036 """add phase heads part to the requested bundle"""
1994 2037 if kwargs.get(r'phases', False):
1995 2038 if not 'heads' in b2caps.get('phases'):
1996 2039 raise ValueError(_('no common phases exchange method'))
1997 2040 if heads is None:
1998 2041 heads = repo.heads()
1999 2042
2000 2043 headsbyphase = collections.defaultdict(set)
2001 2044 if repo.publishing():
2002 2045 headsbyphase[phases.public] = heads
2003 2046 else:
2004 2047 # find the appropriate heads to move
2005 2048
2006 2049 phase = repo._phasecache.phase
2007 2050 node = repo.changelog.node
2008 2051 rev = repo.changelog.rev
2009 2052 for h in heads:
2010 2053 headsbyphase[phase(repo, rev(h))].add(h)
2011 2054 seenphases = list(headsbyphase.keys())
2012 2055
2013 2056 # We do not handle anything but public and draft phase for now)
2014 2057 if seenphases:
2015 2058 assert max(seenphases) <= phases.draft
2016 2059
2017 2060 # if client is pulling non-public changesets, we need to find
2018 2061 # intermediate public heads.
2019 2062 draftheads = headsbyphase.get(phases.draft, set())
2020 2063 if draftheads:
2021 2064 publicheads = headsbyphase.get(phases.public, set())
2022 2065
2023 2066 revset = 'heads(only(%ln, %ln) and public())'
2024 2067 extraheads = repo.revs(revset, draftheads, publicheads)
2025 2068 for r in extraheads:
2026 2069 headsbyphase[phases.public].add(node(r))
2027 2070
2028 2071 # transform data in a format used by the encoding function
2029 2072 phasemapping = []
2030 2073 for phase in phases.allphases:
2031 2074 phasemapping.append(sorted(headsbyphase[phase]))
2032 2075
2033 2076 # generate the actual part
2034 2077 phasedata = phases.binaryencode(phasemapping)
2035 2078 bundler.newpart('phase-heads', data=phasedata)
2036 2079
2037 2080 @getbundle2partsgenerator('hgtagsfnodes')
2038 2081 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2039 2082 b2caps=None, heads=None, common=None,
2040 2083 **kwargs):
2041 2084 """Transfer the .hgtags filenodes mapping.
2042 2085
2043 2086 Only values for heads in this bundle will be transferred.
2044 2087
2045 2088 The part data consists of pairs of 20 byte changeset node and .hgtags
2046 2089 filenodes raw values.
2047 2090 """
2048 2091 # Don't send unless:
2049 2092 # - changeset are being exchanged,
2050 2093 # - the client supports it.
2051 2094 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2052 2095 return
2053 2096
2054 2097 outgoing = _computeoutgoing(repo, heads, common)
2055 2098 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2056 2099
2057 2100 @getbundle2partsgenerator('cache:rev-branch-cache')
2058 2101 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2059 2102 b2caps=None, heads=None, common=None,
2060 2103 **kwargs):
2061 2104 """Transfer the rev-branch-cache mapping
2062 2105
2063 2106 The payload is a series of data related to each branch
2064 2107
2065 2108 1) branch name length
2066 2109 2) number of open heads
2067 2110 3) number of closed heads
2068 2111 4) open heads nodes
2069 2112 5) closed heads nodes
2070 2113 """
2071 2114 # Don't send unless:
2072 2115 # - changeset are being exchanged,
2073 2116 # - the client supports it.
2074 2117 # - narrow bundle isn't in play (not currently compatible).
2075 2118 if (not kwargs.get(r'cg', True)
2076 2119 or 'rev-branch-cache' not in b2caps
2077 2120 or kwargs.get(r'narrow', False)
2078 2121 or repo.ui.has_section(_NARROWACL_SECTION)):
2079 2122 return
2080 2123
2081 2124 outgoing = _computeoutgoing(repo, heads, common)
2082 2125 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2083 2126
2084 2127 def check_heads(repo, their_heads, context):
2085 2128 """check if the heads of a repo have been modified
2086 2129
2087 2130 Used by peer for unbundling.
2088 2131 """
2089 2132 heads = repo.heads()
2090 2133 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2091 2134 if not (their_heads == ['force'] or their_heads == heads or
2092 2135 their_heads == ['hashed', heads_hash]):
2093 2136 # someone else committed/pushed/unbundled while we
2094 2137 # were transferring data
2095 2138 raise error.PushRaced('repository changed while %s - '
2096 2139 'please try again' % context)
2097 2140
2098 2141 def unbundle(repo, cg, heads, source, url):
2099 2142 """Apply a bundle to a repo.
2100 2143
2101 2144 this function makes sure the repo is locked during the application and have
2102 2145 mechanism to check that no push race occurred between the creation of the
2103 2146 bundle and its application.
2104 2147
2105 2148 If the push was raced as PushRaced exception is raised."""
2106 2149 r = 0
2107 2150 # need a transaction when processing a bundle2 stream
2108 2151 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2109 2152 lockandtr = [None, None, None]
2110 2153 recordout = None
2111 2154 # quick fix for output mismatch with bundle2 in 3.4
2112 2155 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2113 2156 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2114 2157 captureoutput = True
2115 2158 try:
2116 2159 # note: outside bundle1, 'heads' is expected to be empty and this
2117 2160 # 'check_heads' call wil be a no-op
2118 2161 check_heads(repo, heads, 'uploading changes')
2119 2162 # push can proceed
2120 2163 if not isinstance(cg, bundle2.unbundle20):
2121 2164 # legacy case: bundle1 (changegroup 01)
2122 2165 txnname = "\n".join([source, util.hidepassword(url)])
2123 2166 with repo.lock(), repo.transaction(txnname) as tr:
2124 2167 op = bundle2.applybundle(repo, cg, tr, source, url)
2125 2168 r = bundle2.combinechangegroupresults(op)
2126 2169 else:
2127 2170 r = None
2128 2171 try:
2129 2172 def gettransaction():
2130 2173 if not lockandtr[2]:
2131 2174 lockandtr[0] = repo.wlock()
2132 2175 lockandtr[1] = repo.lock()
2133 2176 lockandtr[2] = repo.transaction(source)
2134 2177 lockandtr[2].hookargs['source'] = source
2135 2178 lockandtr[2].hookargs['url'] = url
2136 2179 lockandtr[2].hookargs['bundle2'] = '1'
2137 2180 return lockandtr[2]
2138 2181
2139 2182 # Do greedy locking by default until we're satisfied with lazy
2140 2183 # locking.
2141 2184 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2142 2185 gettransaction()
2143 2186
2144 2187 op = bundle2.bundleoperation(repo, gettransaction,
2145 2188 captureoutput=captureoutput,
2146 2189 source='push')
2147 2190 try:
2148 2191 op = bundle2.processbundle(repo, cg, op=op)
2149 2192 finally:
2150 2193 r = op.reply
2151 2194 if captureoutput and r is not None:
2152 2195 repo.ui.pushbuffer(error=True, subproc=True)
2153 2196 def recordout(output):
2154 2197 r.newpart('output', data=output, mandatory=False)
2155 2198 if lockandtr[2] is not None:
2156 2199 lockandtr[2].close()
2157 2200 except BaseException as exc:
2158 2201 exc.duringunbundle2 = True
2159 2202 if captureoutput and r is not None:
2160 2203 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2161 2204 def recordout(output):
2162 2205 part = bundle2.bundlepart('output', data=output,
2163 2206 mandatory=False)
2164 2207 parts.append(part)
2165 2208 raise
2166 2209 finally:
2167 2210 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2168 2211 if recordout is not None:
2169 2212 recordout(repo.ui.popbuffer())
2170 2213 return r
2171 2214
2172 2215 def _maybeapplyclonebundle(pullop):
2173 2216 """Apply a clone bundle from a remote, if possible."""
2174 2217
2175 2218 repo = pullop.repo
2176 2219 remote = pullop.remote
2177 2220
2178 2221 if not repo.ui.configbool('ui', 'clonebundles'):
2179 2222 return
2180 2223
2181 2224 # Only run if local repo is empty.
2182 2225 if len(repo):
2183 2226 return
2184 2227
2185 2228 if pullop.heads:
2186 2229 return
2187 2230
2188 2231 if not remote.capable('clonebundles'):
2189 2232 return
2190 2233
2191 2234 with remote.commandexecutor() as e:
2192 2235 res = e.callcommand('clonebundles', {}).result()
2193 2236
2194 2237 # If we call the wire protocol command, that's good enough to record the
2195 2238 # attempt.
2196 2239 pullop.clonebundleattempted = True
2197 2240
2198 2241 entries = parseclonebundlesmanifest(repo, res)
2199 2242 if not entries:
2200 2243 repo.ui.note(_('no clone bundles available on remote; '
2201 2244 'falling back to regular clone\n'))
2202 2245 return
2203 2246
2204 2247 entries = filterclonebundleentries(
2205 2248 repo, entries, streamclonerequested=pullop.streamclonerequested)
2206 2249
2207 2250 if not entries:
2208 2251 # There is a thundering herd concern here. However, if a server
2209 2252 # operator doesn't advertise bundles appropriate for its clients,
2210 2253 # they deserve what's coming. Furthermore, from a client's
2211 2254 # perspective, no automatic fallback would mean not being able to
2212 2255 # clone!
2213 2256 repo.ui.warn(_('no compatible clone bundles available on server; '
2214 2257 'falling back to regular clone\n'))
2215 2258 repo.ui.warn(_('(you may want to report this to the server '
2216 2259 'operator)\n'))
2217 2260 return
2218 2261
2219 2262 entries = sortclonebundleentries(repo.ui, entries)
2220 2263
2221 2264 url = entries[0]['URL']
2222 2265 repo.ui.status(_('applying clone bundle from %s\n') % url)
2223 2266 if trypullbundlefromurl(repo.ui, repo, url):
2224 2267 repo.ui.status(_('finished applying clone bundle\n'))
2225 2268 # Bundle failed.
2226 2269 #
2227 2270 # We abort by default to avoid the thundering herd of
2228 2271 # clients flooding a server that was expecting expensive
2229 2272 # clone load to be offloaded.
2230 2273 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2231 2274 repo.ui.warn(_('falling back to normal clone\n'))
2232 2275 else:
2233 2276 raise error.Abort(_('error applying bundle'),
2234 2277 hint=_('if this error persists, consider contacting '
2235 2278 'the server operator or disable clone '
2236 2279 'bundles via '
2237 2280 '"--config ui.clonebundles=false"'))
2238 2281
2239 2282 def parseclonebundlesmanifest(repo, s):
2240 2283 """Parses the raw text of a clone bundles manifest.
2241 2284
2242 2285 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2243 2286 to the URL and other keys are the attributes for the entry.
2244 2287 """
2245 2288 m = []
2246 2289 for line in s.splitlines():
2247 2290 fields = line.split()
2248 2291 if not fields:
2249 2292 continue
2250 2293 attrs = {'URL': fields[0]}
2251 2294 for rawattr in fields[1:]:
2252 2295 key, value = rawattr.split('=', 1)
2253 2296 key = urlreq.unquote(key)
2254 2297 value = urlreq.unquote(value)
2255 2298 attrs[key] = value
2256 2299
2257 2300 # Parse BUNDLESPEC into components. This makes client-side
2258 2301 # preferences easier to specify since you can prefer a single
2259 2302 # component of the BUNDLESPEC.
2260 2303 if key == 'BUNDLESPEC':
2261 2304 try:
2262 2305 bundlespec = parsebundlespec(repo, value)
2263 2306 attrs['COMPRESSION'] = bundlespec.compression
2264 2307 attrs['VERSION'] = bundlespec.version
2265 2308 except error.InvalidBundleSpecification:
2266 2309 pass
2267 2310 except error.UnsupportedBundleSpecification:
2268 2311 pass
2269 2312
2270 2313 m.append(attrs)
2271 2314
2272 2315 return m
2273 2316
2274 2317 def isstreamclonespec(bundlespec):
2275 2318 # Stream clone v1
2276 2319 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2277 2320 return True
2278 2321
2279 2322 # Stream clone v2
2280 2323 if (bundlespec.wirecompression == 'UN' and \
2281 2324 bundlespec.wireversion == '02' and \
2282 2325 bundlespec.contentopts.get('streamv2')):
2283 2326 return True
2284 2327
2285 2328 return False
2286 2329
2287 2330 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2288 2331 """Remove incompatible clone bundle manifest entries.
2289 2332
2290 2333 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2291 2334 and returns a new list consisting of only the entries that this client
2292 2335 should be able to apply.
2293 2336
2294 2337 There is no guarantee we'll be able to apply all returned entries because
2295 2338 the metadata we use to filter on may be missing or wrong.
2296 2339 """
2297 2340 newentries = []
2298 2341 for entry in entries:
2299 2342 spec = entry.get('BUNDLESPEC')
2300 2343 if spec:
2301 2344 try:
2302 2345 bundlespec = parsebundlespec(repo, spec, strict=True)
2303 2346
2304 2347 # If a stream clone was requested, filter out non-streamclone
2305 2348 # entries.
2306 2349 if streamclonerequested and not isstreamclonespec(bundlespec):
2307 2350 repo.ui.debug('filtering %s because not a stream clone\n' %
2308 2351 entry['URL'])
2309 2352 continue
2310 2353
2311 2354 except error.InvalidBundleSpecification as e:
2312 2355 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2313 2356 continue
2314 2357 except error.UnsupportedBundleSpecification as e:
2315 2358 repo.ui.debug('filtering %s because unsupported bundle '
2316 2359 'spec: %s\n' % (
2317 2360 entry['URL'], stringutil.forcebytestr(e)))
2318 2361 continue
2319 2362 # If we don't have a spec and requested a stream clone, we don't know
2320 2363 # what the entry is so don't attempt to apply it.
2321 2364 elif streamclonerequested:
2322 2365 repo.ui.debug('filtering %s because cannot determine if a stream '
2323 2366 'clone bundle\n' % entry['URL'])
2324 2367 continue
2325 2368
2326 2369 if 'REQUIRESNI' in entry and not sslutil.hassni:
2327 2370 repo.ui.debug('filtering %s because SNI not supported\n' %
2328 2371 entry['URL'])
2329 2372 continue
2330 2373
2331 2374 newentries.append(entry)
2332 2375
2333 2376 return newentries
2334 2377
2335 2378 class clonebundleentry(object):
2336 2379 """Represents an item in a clone bundles manifest.
2337 2380
2338 2381 This rich class is needed to support sorting since sorted() in Python 3
2339 2382 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2340 2383 won't work.
2341 2384 """
2342 2385
2343 2386 def __init__(self, value, prefers):
2344 2387 self.value = value
2345 2388 self.prefers = prefers
2346 2389
2347 2390 def _cmp(self, other):
2348 2391 for prefkey, prefvalue in self.prefers:
2349 2392 avalue = self.value.get(prefkey)
2350 2393 bvalue = other.value.get(prefkey)
2351 2394
2352 2395 # Special case for b missing attribute and a matches exactly.
2353 2396 if avalue is not None and bvalue is None and avalue == prefvalue:
2354 2397 return -1
2355 2398
2356 2399 # Special case for a missing attribute and b matches exactly.
2357 2400 if bvalue is not None and avalue is None and bvalue == prefvalue:
2358 2401 return 1
2359 2402
2360 2403 # We can't compare unless attribute present on both.
2361 2404 if avalue is None or bvalue is None:
2362 2405 continue
2363 2406
2364 2407 # Same values should fall back to next attribute.
2365 2408 if avalue == bvalue:
2366 2409 continue
2367 2410
2368 2411 # Exact matches come first.
2369 2412 if avalue == prefvalue:
2370 2413 return -1
2371 2414 if bvalue == prefvalue:
2372 2415 return 1
2373 2416
2374 2417 # Fall back to next attribute.
2375 2418 continue
2376 2419
2377 2420 # If we got here we couldn't sort by attributes and prefers. Fall
2378 2421 # back to index order.
2379 2422 return 0
2380 2423
2381 2424 def __lt__(self, other):
2382 2425 return self._cmp(other) < 0
2383 2426
2384 2427 def __gt__(self, other):
2385 2428 return self._cmp(other) > 0
2386 2429
2387 2430 def __eq__(self, other):
2388 2431 return self._cmp(other) == 0
2389 2432
2390 2433 def __le__(self, other):
2391 2434 return self._cmp(other) <= 0
2392 2435
2393 2436 def __ge__(self, other):
2394 2437 return self._cmp(other) >= 0
2395 2438
2396 2439 def __ne__(self, other):
2397 2440 return self._cmp(other) != 0
2398 2441
2399 2442 def sortclonebundleentries(ui, entries):
2400 2443 prefers = ui.configlist('ui', 'clonebundleprefers')
2401 2444 if not prefers:
2402 2445 return list(entries)
2403 2446
2404 2447 prefers = [p.split('=', 1) for p in prefers]
2405 2448
2406 2449 items = sorted(clonebundleentry(v, prefers) for v in entries)
2407 2450 return [i.value for i in items]
2408 2451
2409 2452 def trypullbundlefromurl(ui, repo, url):
2410 2453 """Attempt to apply a bundle from a URL."""
2411 2454 with repo.lock(), repo.transaction('bundleurl') as tr:
2412 2455 try:
2413 2456 fh = urlmod.open(ui, url)
2414 2457 cg = readbundle(ui, fh, 'stream')
2415 2458
2416 2459 if isinstance(cg, streamclone.streamcloneapplier):
2417 2460 cg.apply(repo)
2418 2461 else:
2419 2462 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2420 2463 return True
2421 2464 except urlerr.httperror as e:
2422 2465 ui.warn(_('HTTP error fetching bundle: %s\n') %
2423 2466 stringutil.forcebytestr(e))
2424 2467 except urlerr.urlerror as e:
2425 2468 ui.warn(_('error fetching bundle: %s\n') %
2426 2469 stringutil.forcebytestr(e.reason))
2427 2470
2428 2471 return False
General Comments 0
You need to be logged in to leave comments. Login now