##// END OF EJS Templates
clonebundles: filter on SNI requirement...
Gregory Szorc -
r26645:2faa7671 default
parent child Browse files
Show More
@@ -1,85 +1,98 b''
1 1 # This software may be used and distributed according to the terms of the
2 2 # GNU General Public License version 2 or any later version.
3 3
4 4 """server side extension to advertise pre-generated bundles to seed clones.
5 5
6 6 The extension essentially serves the content of a .hg/clonebundles.manifest
7 7 file to clients that request it.
8 8
9 9 The clonebundles.manifest file contains a list of URLs and attributes. URLs
10 10 hold pre-generated bundles that a client fetches and applies. After applying
11 11 the pre-generated bundle, the client will connect back to the original server
12 12 and pull data not in the pre-generated bundle.
13 13
14 14 Manifest File Format:
15 15
16 16 The manifest file contains a newline (\n) delimited list of entries.
17 17
18 18 Each line in this file defines an available bundle. Lines have the format:
19 19
20 20 <URL> [<key>=<value]
21 21
22 22 That is, a URL followed by extra metadata describing it. Metadata keys and
23 23 values should be URL encoded.
24 24
25 25 This metadata is optional. It is up to server operators to populate this
26 26 metadata.
27 27
28 28 Keys in UPPERCASE are reserved for use by Mercurial. All non-uppercase keys
29 29 can be used by site installations.
30 30
31 31 The server operator is responsible for generating the bundle manifest file.
32 32
33 33 Metadata Attributes:
34 34
35 35 BUNDLESPEC
36 36 A "bundle specification" string that describes the type of the bundle.
37 37
38 38 These are string values that are accepted by the "--type" argument of
39 39 `hg bundle`.
40 40
41 41 The values are parsed in strict mode, which means they must be of the
42 42 "<compression>-<type>" form. See
43 43 mercurial.exchange.parsebundlespec() for more details.
44 44
45 45 Clients will automatically filter out specifications that are unknown or
46 46 unsupported so they won't attempt to download something that likely won't
47 47 apply.
48 48
49 49 The actual value doesn't impact client behavior beyond filtering:
50 50 clients will still sniff the bundle type from the header of downloaded
51 51 files.
52
53 REQUIRESNI
54 Whether Server Name Indication (SNI) is required to connect to the URL.
55 SNI allows servers to use multiple certificates on the same IP. It is
56 somewhat common in CDNs and other hosting providers. Older Python
57 versions do not support SNI. Defining this attribute enables clients
58 with older Python versions to filter this entry.
59
60 If this is defined, it is important to advertise a non-SNI fallback
61 URL or clients running old Python releases may not be able to clone
62 with the clonebundles facility.
63
64 Value should be "true".
52 65 """
53 66
54 67 from mercurial import (
55 68 extensions,
56 69 wireproto,
57 70 )
58 71
59 72 testedwith = 'internal'
60 73
61 74 def capabilities(orig, repo, proto):
62 75 caps = orig(repo, proto)
63 76
64 77 # Only advertise if a manifest exists. This does add some I/O to requests.
65 78 # But this should be cheaper than a wasted network round trip due to
66 79 # missing file.
67 80 if repo.opener.exists('clonebundles.manifest'):
68 81 caps.append('clonebundles')
69 82
70 83 return caps
71 84
72 85 @wireproto.wireprotocommand('clonebundles', '')
73 86 def bundles(repo, proto):
74 87 """Server command for returning info for available bundles to seed clones.
75 88
76 89 Clients will parse this response and determine what bundle to fetch.
77 90
78 91 Other extensions may wrap this command to filter or dynamically emit
79 92 data depending on the request. e.g. you could advertise URLs for
80 93 the closest data center given the client's IP address.
81 94 """
82 95 return repo.opener.tryread('clonebundles.manifest')
83 96
84 97 def extsetup(ui):
85 98 extensions.wrapfunction(wireproto, '_capabilities', capabilities)
@@ -1,1702 +1,1708 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib, urllib2
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14 import streamclone
15 import sslutil
15 16 import tags
16 17 import url as urlmod
17 18
18 19 # Maps bundle compression human names to internal representation.
19 20 _bundlespeccompressions = {'none': None,
20 21 'bzip2': 'BZ',
21 22 'gzip': 'GZ',
22 23 }
23 24
24 25 # Maps bundle version human names to changegroup versions.
25 26 _bundlespeccgversions = {'v1': '01',
26 27 'v2': '02',
27 28 'bundle2': '02', #legacy
28 29 }
29 30
30 31 def parsebundlespec(repo, spec, strict=True):
31 32 """Parse a bundle string specification into parts.
32 33
33 34 Bundle specifications denote a well-defined bundle/exchange format.
34 35 The content of a given specification should not change over time in
35 36 order to ensure that bundles produced by a newer version of Mercurial are
36 37 readable from an older version.
37 38
38 39 The string currently has the form:
39 40
40 41 <compression>-<type>
41 42
42 43 Where <compression> is one of the supported compression formats
43 44 and <type> is (currently) a version string.
44 45
45 46 If ``strict`` is True (the default) <compression> is required. Otherwise,
46 47 it is optional.
47 48
48 49 Returns a 2-tuple of (compression, version). Compression will be ``None``
49 50 if not in strict mode and a compression isn't defined.
50 51
51 52 An ``InvalidBundleSpecification`` is raised when the specification is
52 53 not syntactically well formed.
53 54
54 55 An ``UnsupportedBundleSpecification`` is raised when the compression or
55 56 bundle type/version is not recognized.
56 57
57 58 Note: this function will likely eventually return a more complex data
58 59 structure, including bundle2 part information.
59 60 """
60 61 if strict and '-' not in spec:
61 62 raise error.InvalidBundleSpecification(
62 63 _('invalid bundle specification; '
63 64 'must be prefixed with compression: %s') % spec)
64 65
65 66 if '-' in spec:
66 67 compression, version = spec.split('-', 1)
67 68
68 69 if compression not in _bundlespeccompressions:
69 70 raise error.UnsupportedBundleSpecification(
70 71 _('%s compression is not supported') % compression)
71 72
72 73 if version not in _bundlespeccgversions:
73 74 raise error.UnsupportedBundleSpecification(
74 75 _('%s is not a recognized bundle version') % version)
75 76 else:
76 77 # Value could be just the compression or just the version, in which
77 78 # case some defaults are assumed (but only when not in strict mode).
78 79 assert not strict
79 80
80 81 if spec in _bundlespeccompressions:
81 82 compression = spec
82 83 version = 'v1'
83 84 if 'generaldelta' in repo.requirements:
84 85 version = 'v2'
85 86 elif spec in _bundlespeccgversions:
86 87 compression = 'bzip2'
87 88 version = spec
88 89 else:
89 90 raise error.UnsupportedBundleSpecification(
90 91 _('%s is not a recognized bundle specification') % spec)
91 92
92 93 compression = _bundlespeccompressions[compression]
93 94 version = _bundlespeccgversions[version]
94 95 return compression, version
95 96
96 97 def readbundle(ui, fh, fname, vfs=None):
97 98 header = changegroup.readexactly(fh, 4)
98 99
99 100 alg = None
100 101 if not fname:
101 102 fname = "stream"
102 103 if not header.startswith('HG') and header.startswith('\0'):
103 104 fh = changegroup.headerlessfixup(fh, header)
104 105 header = "HG10"
105 106 alg = 'UN'
106 107 elif vfs:
107 108 fname = vfs.join(fname)
108 109
109 110 magic, version = header[0:2], header[2:4]
110 111
111 112 if magic != 'HG':
112 113 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
113 114 if version == '10':
114 115 if alg is None:
115 116 alg = changegroup.readexactly(fh, 2)
116 117 return changegroup.cg1unpacker(fh, alg)
117 118 elif version.startswith('2'):
118 119 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
119 120 else:
120 121 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
121 122
122 123 def buildobsmarkerspart(bundler, markers):
123 124 """add an obsmarker part to the bundler with <markers>
124 125
125 126 No part is created if markers is empty.
126 127 Raises ValueError if the bundler doesn't support any known obsmarker format.
127 128 """
128 129 if markers:
129 130 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
130 131 version = obsolete.commonversion(remoteversions)
131 132 if version is None:
132 133 raise ValueError('bundler do not support common obsmarker format')
133 134 stream = obsolete.encodemarkers(markers, True, version=version)
134 135 return bundler.newpart('obsmarkers', data=stream)
135 136 return None
136 137
137 138 def _canusebundle2(op):
138 139 """return true if a pull/push can use bundle2
139 140
140 141 Feel free to nuke this function when we drop the experimental option"""
141 142 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
142 143 and op.remote.capable('bundle2'))
143 144
144 145
145 146 class pushoperation(object):
146 147 """A object that represent a single push operation
147 148
148 149 It purpose is to carry push related state and very common operation.
149 150
150 151 A new should be created at the beginning of each push and discarded
151 152 afterward.
152 153 """
153 154
154 155 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
155 156 bookmarks=()):
156 157 # repo we push from
157 158 self.repo = repo
158 159 self.ui = repo.ui
159 160 # repo we push to
160 161 self.remote = remote
161 162 # force option provided
162 163 self.force = force
163 164 # revs to be pushed (None is "all")
164 165 self.revs = revs
165 166 # bookmark explicitly pushed
166 167 self.bookmarks = bookmarks
167 168 # allow push of new branch
168 169 self.newbranch = newbranch
169 170 # did a local lock get acquired?
170 171 self.locallocked = None
171 172 # step already performed
172 173 # (used to check what steps have been already performed through bundle2)
173 174 self.stepsdone = set()
174 175 # Integer version of the changegroup push result
175 176 # - None means nothing to push
176 177 # - 0 means HTTP error
177 178 # - 1 means we pushed and remote head count is unchanged *or*
178 179 # we have outgoing changesets but refused to push
179 180 # - other values as described by addchangegroup()
180 181 self.cgresult = None
181 182 # Boolean value for the bookmark push
182 183 self.bkresult = None
183 184 # discover.outgoing object (contains common and outgoing data)
184 185 self.outgoing = None
185 186 # all remote heads before the push
186 187 self.remoteheads = None
187 188 # testable as a boolean indicating if any nodes are missing locally.
188 189 self.incoming = None
189 190 # phases changes that must be pushed along side the changesets
190 191 self.outdatedphases = None
191 192 # phases changes that must be pushed if changeset push fails
192 193 self.fallbackoutdatedphases = None
193 194 # outgoing obsmarkers
194 195 self.outobsmarkers = set()
195 196 # outgoing bookmarks
196 197 self.outbookmarks = []
197 198 # transaction manager
198 199 self.trmanager = None
199 200 # map { pushkey partid -> callback handling failure}
200 201 # used to handle exception from mandatory pushkey part failure
201 202 self.pkfailcb = {}
202 203
203 204 @util.propertycache
204 205 def futureheads(self):
205 206 """future remote heads if the changeset push succeeds"""
206 207 return self.outgoing.missingheads
207 208
208 209 @util.propertycache
209 210 def fallbackheads(self):
210 211 """future remote heads if the changeset push fails"""
211 212 if self.revs is None:
212 213 # not target to push, all common are relevant
213 214 return self.outgoing.commonheads
214 215 unfi = self.repo.unfiltered()
215 216 # I want cheads = heads(::missingheads and ::commonheads)
216 217 # (missingheads is revs with secret changeset filtered out)
217 218 #
218 219 # This can be expressed as:
219 220 # cheads = ( (missingheads and ::commonheads)
220 221 # + (commonheads and ::missingheads))"
221 222 # )
222 223 #
223 224 # while trying to push we already computed the following:
224 225 # common = (::commonheads)
225 226 # missing = ((commonheads::missingheads) - commonheads)
226 227 #
227 228 # We can pick:
228 229 # * missingheads part of common (::commonheads)
229 230 common = self.outgoing.common
230 231 nm = self.repo.changelog.nodemap
231 232 cheads = [node for node in self.revs if nm[node] in common]
232 233 # and
233 234 # * commonheads parents on missing
234 235 revset = unfi.set('%ln and parents(roots(%ln))',
235 236 self.outgoing.commonheads,
236 237 self.outgoing.missing)
237 238 cheads.extend(c.node() for c in revset)
238 239 return cheads
239 240
240 241 @property
241 242 def commonheads(self):
242 243 """set of all common heads after changeset bundle push"""
243 244 if self.cgresult:
244 245 return self.futureheads
245 246 else:
246 247 return self.fallbackheads
247 248
248 249 # mapping of message used when pushing bookmark
249 250 bookmsgmap = {'update': (_("updating bookmark %s\n"),
250 251 _('updating bookmark %s failed!\n')),
251 252 'export': (_("exporting bookmark %s\n"),
252 253 _('exporting bookmark %s failed!\n')),
253 254 'delete': (_("deleting remote bookmark %s\n"),
254 255 _('deleting remote bookmark %s failed!\n')),
255 256 }
256 257
257 258
258 259 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
259 260 '''Push outgoing changesets (limited by revs) from a local
260 261 repository to remote. Return an integer:
261 262 - None means nothing to push
262 263 - 0 means HTTP error
263 264 - 1 means we pushed and remote head count is unchanged *or*
264 265 we have outgoing changesets but refused to push
265 266 - other values as described by addchangegroup()
266 267 '''
267 268 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
268 269 if pushop.remote.local():
269 270 missing = (set(pushop.repo.requirements)
270 271 - pushop.remote.local().supported)
271 272 if missing:
272 273 msg = _("required features are not"
273 274 " supported in the destination:"
274 275 " %s") % (', '.join(sorted(missing)))
275 276 raise error.Abort(msg)
276 277
277 278 # there are two ways to push to remote repo:
278 279 #
279 280 # addchangegroup assumes local user can lock remote
280 281 # repo (local filesystem, old ssh servers).
281 282 #
282 283 # unbundle assumes local user cannot lock remote repo (new ssh
283 284 # servers, http servers).
284 285
285 286 if not pushop.remote.canpush():
286 287 raise error.Abort(_("destination does not support push"))
287 288 # get local lock as we might write phase data
288 289 localwlock = locallock = None
289 290 try:
290 291 # bundle2 push may receive a reply bundle touching bookmarks or other
291 292 # things requiring the wlock. Take it now to ensure proper ordering.
292 293 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
293 294 if _canusebundle2(pushop) and maypushback:
294 295 localwlock = pushop.repo.wlock()
295 296 locallock = pushop.repo.lock()
296 297 pushop.locallocked = True
297 298 except IOError as err:
298 299 pushop.locallocked = False
299 300 if err.errno != errno.EACCES:
300 301 raise
301 302 # source repo cannot be locked.
302 303 # We do not abort the push, but just disable the local phase
303 304 # synchronisation.
304 305 msg = 'cannot lock source repository: %s\n' % err
305 306 pushop.ui.debug(msg)
306 307 try:
307 308 if pushop.locallocked:
308 309 pushop.trmanager = transactionmanager(repo,
309 310 'push-response',
310 311 pushop.remote.url())
311 312 pushop.repo.checkpush(pushop)
312 313 lock = None
313 314 unbundle = pushop.remote.capable('unbundle')
314 315 if not unbundle:
315 316 lock = pushop.remote.lock()
316 317 try:
317 318 _pushdiscovery(pushop)
318 319 if _canusebundle2(pushop):
319 320 _pushbundle2(pushop)
320 321 _pushchangeset(pushop)
321 322 _pushsyncphase(pushop)
322 323 _pushobsolete(pushop)
323 324 _pushbookmark(pushop)
324 325 finally:
325 326 if lock is not None:
326 327 lock.release()
327 328 if pushop.trmanager:
328 329 pushop.trmanager.close()
329 330 finally:
330 331 if pushop.trmanager:
331 332 pushop.trmanager.release()
332 333 if locallock is not None:
333 334 locallock.release()
334 335 if localwlock is not None:
335 336 localwlock.release()
336 337
337 338 return pushop
338 339
339 340 # list of steps to perform discovery before push
340 341 pushdiscoveryorder = []
341 342
342 343 # Mapping between step name and function
343 344 #
344 345 # This exists to help extensions wrap steps if necessary
345 346 pushdiscoverymapping = {}
346 347
347 348 def pushdiscovery(stepname):
348 349 """decorator for function performing discovery before push
349 350
350 351 The function is added to the step -> function mapping and appended to the
351 352 list of steps. Beware that decorated function will be added in order (this
352 353 may matter).
353 354
354 355 You can only use this decorator for a new step, if you want to wrap a step
355 356 from an extension, change the pushdiscovery dictionary directly."""
356 357 def dec(func):
357 358 assert stepname not in pushdiscoverymapping
358 359 pushdiscoverymapping[stepname] = func
359 360 pushdiscoveryorder.append(stepname)
360 361 return func
361 362 return dec
362 363
363 364 def _pushdiscovery(pushop):
364 365 """Run all discovery steps"""
365 366 for stepname in pushdiscoveryorder:
366 367 step = pushdiscoverymapping[stepname]
367 368 step(pushop)
368 369
369 370 @pushdiscovery('changeset')
370 371 def _pushdiscoverychangeset(pushop):
371 372 """discover the changeset that need to be pushed"""
372 373 fci = discovery.findcommonincoming
373 374 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
374 375 common, inc, remoteheads = commoninc
375 376 fco = discovery.findcommonoutgoing
376 377 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
377 378 commoninc=commoninc, force=pushop.force)
378 379 pushop.outgoing = outgoing
379 380 pushop.remoteheads = remoteheads
380 381 pushop.incoming = inc
381 382
382 383 @pushdiscovery('phase')
383 384 def _pushdiscoveryphase(pushop):
384 385 """discover the phase that needs to be pushed
385 386
386 387 (computed for both success and failure case for changesets push)"""
387 388 outgoing = pushop.outgoing
388 389 unfi = pushop.repo.unfiltered()
389 390 remotephases = pushop.remote.listkeys('phases')
390 391 publishing = remotephases.get('publishing', False)
391 392 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
392 393 and remotephases # server supports phases
393 394 and not pushop.outgoing.missing # no changesets to be pushed
394 395 and publishing):
395 396 # When:
396 397 # - this is a subrepo push
397 398 # - and remote support phase
398 399 # - and no changeset are to be pushed
399 400 # - and remote is publishing
400 401 # We may be in issue 3871 case!
401 402 # We drop the possible phase synchronisation done by
402 403 # courtesy to publish changesets possibly locally draft
403 404 # on the remote.
404 405 remotephases = {'publishing': 'True'}
405 406 ana = phases.analyzeremotephases(pushop.repo,
406 407 pushop.fallbackheads,
407 408 remotephases)
408 409 pheads, droots = ana
409 410 extracond = ''
410 411 if not publishing:
411 412 extracond = ' and public()'
412 413 revset = 'heads((%%ln::%%ln) %s)' % extracond
413 414 # Get the list of all revs draft on remote by public here.
414 415 # XXX Beware that revset break if droots is not strictly
415 416 # XXX root we may want to ensure it is but it is costly
416 417 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
417 418 if not outgoing.missing:
418 419 future = fallback
419 420 else:
420 421 # adds changeset we are going to push as draft
421 422 #
422 423 # should not be necessary for publishing server, but because of an
423 424 # issue fixed in xxxxx we have to do it anyway.
424 425 fdroots = list(unfi.set('roots(%ln + %ln::)',
425 426 outgoing.missing, droots))
426 427 fdroots = [f.node() for f in fdroots]
427 428 future = list(unfi.set(revset, fdroots, pushop.futureheads))
428 429 pushop.outdatedphases = future
429 430 pushop.fallbackoutdatedphases = fallback
430 431
431 432 @pushdiscovery('obsmarker')
432 433 def _pushdiscoveryobsmarkers(pushop):
433 434 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
434 435 and pushop.repo.obsstore
435 436 and 'obsolete' in pushop.remote.listkeys('namespaces')):
436 437 repo = pushop.repo
437 438 # very naive computation, that can be quite expensive on big repo.
438 439 # However: evolution is currently slow on them anyway.
439 440 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
440 441 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
441 442
442 443 @pushdiscovery('bookmarks')
443 444 def _pushdiscoverybookmarks(pushop):
444 445 ui = pushop.ui
445 446 repo = pushop.repo.unfiltered()
446 447 remote = pushop.remote
447 448 ui.debug("checking for updated bookmarks\n")
448 449 ancestors = ()
449 450 if pushop.revs:
450 451 revnums = map(repo.changelog.rev, pushop.revs)
451 452 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
452 453 remotebookmark = remote.listkeys('bookmarks')
453 454
454 455 explicit = set(pushop.bookmarks)
455 456
456 457 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
457 458 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
458 459 for b, scid, dcid in advsrc:
459 460 if b in explicit:
460 461 explicit.remove(b)
461 462 if not ancestors or repo[scid].rev() in ancestors:
462 463 pushop.outbookmarks.append((b, dcid, scid))
463 464 # search added bookmark
464 465 for b, scid, dcid in addsrc:
465 466 if b in explicit:
466 467 explicit.remove(b)
467 468 pushop.outbookmarks.append((b, '', scid))
468 469 # search for overwritten bookmark
469 470 for b, scid, dcid in advdst + diverge + differ:
470 471 if b in explicit:
471 472 explicit.remove(b)
472 473 pushop.outbookmarks.append((b, dcid, scid))
473 474 # search for bookmark to delete
474 475 for b, scid, dcid in adddst:
475 476 if b in explicit:
476 477 explicit.remove(b)
477 478 # treat as "deleted locally"
478 479 pushop.outbookmarks.append((b, dcid, ''))
479 480 # identical bookmarks shouldn't get reported
480 481 for b, scid, dcid in same:
481 482 if b in explicit:
482 483 explicit.remove(b)
483 484
484 485 if explicit:
485 486 explicit = sorted(explicit)
486 487 # we should probably list all of them
487 488 ui.warn(_('bookmark %s does not exist on the local '
488 489 'or remote repository!\n') % explicit[0])
489 490 pushop.bkresult = 2
490 491
491 492 pushop.outbookmarks.sort()
492 493
493 494 def _pushcheckoutgoing(pushop):
494 495 outgoing = pushop.outgoing
495 496 unfi = pushop.repo.unfiltered()
496 497 if not outgoing.missing:
497 498 # nothing to push
498 499 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
499 500 return False
500 501 # something to push
501 502 if not pushop.force:
502 503 # if repo.obsstore == False --> no obsolete
503 504 # then, save the iteration
504 505 if unfi.obsstore:
505 506 # this message are here for 80 char limit reason
506 507 mso = _("push includes obsolete changeset: %s!")
507 508 mst = {"unstable": _("push includes unstable changeset: %s!"),
508 509 "bumped": _("push includes bumped changeset: %s!"),
509 510 "divergent": _("push includes divergent changeset: %s!")}
510 511 # If we are to push if there is at least one
511 512 # obsolete or unstable changeset in missing, at
512 513 # least one of the missinghead will be obsolete or
513 514 # unstable. So checking heads only is ok
514 515 for node in outgoing.missingheads:
515 516 ctx = unfi[node]
516 517 if ctx.obsolete():
517 518 raise error.Abort(mso % ctx)
518 519 elif ctx.troubled():
519 520 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
520 521
521 522 # internal config: bookmarks.pushing
522 523 newbm = pushop.ui.configlist('bookmarks', 'pushing')
523 524 discovery.checkheads(unfi, pushop.remote, outgoing,
524 525 pushop.remoteheads,
525 526 pushop.newbranch,
526 527 bool(pushop.incoming),
527 528 newbm)
528 529 return True
529 530
530 531 # List of names of steps to perform for an outgoing bundle2, order matters.
531 532 b2partsgenorder = []
532 533
533 534 # Mapping between step name and function
534 535 #
535 536 # This exists to help extensions wrap steps if necessary
536 537 b2partsgenmapping = {}
537 538
538 539 def b2partsgenerator(stepname, idx=None):
539 540 """decorator for function generating bundle2 part
540 541
541 542 The function is added to the step -> function mapping and appended to the
542 543 list of steps. Beware that decorated functions will be added in order
543 544 (this may matter).
544 545
545 546 You can only use this decorator for new steps, if you want to wrap a step
546 547 from an extension, attack the b2partsgenmapping dictionary directly."""
547 548 def dec(func):
548 549 assert stepname not in b2partsgenmapping
549 550 b2partsgenmapping[stepname] = func
550 551 if idx is None:
551 552 b2partsgenorder.append(stepname)
552 553 else:
553 554 b2partsgenorder.insert(idx, stepname)
554 555 return func
555 556 return dec
556 557
557 558 def _pushb2ctxcheckheads(pushop, bundler):
558 559 """Generate race condition checking parts
559 560
560 561 Exists as an indepedent function to aid extensions
561 562 """
562 563 if not pushop.force:
563 564 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
564 565
565 566 @b2partsgenerator('changeset')
566 567 def _pushb2ctx(pushop, bundler):
567 568 """handle changegroup push through bundle2
568 569
569 570 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
570 571 """
571 572 if 'changesets' in pushop.stepsdone:
572 573 return
573 574 pushop.stepsdone.add('changesets')
574 575 # Send known heads to the server for race detection.
575 576 if not _pushcheckoutgoing(pushop):
576 577 return
577 578 pushop.repo.prepushoutgoinghooks(pushop.repo,
578 579 pushop.remote,
579 580 pushop.outgoing)
580 581
581 582 _pushb2ctxcheckheads(pushop, bundler)
582 583
583 584 b2caps = bundle2.bundle2caps(pushop.remote)
584 585 version = None
585 586 cgversions = b2caps.get('changegroup')
586 587 if not cgversions: # 3.1 and 3.2 ship with an empty value
587 588 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
588 589 pushop.outgoing)
589 590 else:
590 591 cgversions = [v for v in cgversions if v in changegroup.packermap]
591 592 if not cgversions:
592 593 raise ValueError(_('no common changegroup version'))
593 594 version = max(cgversions)
594 595 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
595 596 pushop.outgoing,
596 597 version=version)
597 598 cgpart = bundler.newpart('changegroup', data=cg)
598 599 if version is not None:
599 600 cgpart.addparam('version', version)
600 601 def handlereply(op):
601 602 """extract addchangegroup returns from server reply"""
602 603 cgreplies = op.records.getreplies(cgpart.id)
603 604 assert len(cgreplies['changegroup']) == 1
604 605 pushop.cgresult = cgreplies['changegroup'][0]['return']
605 606 return handlereply
606 607
607 608 @b2partsgenerator('phase')
608 609 def _pushb2phases(pushop, bundler):
609 610 """handle phase push through bundle2"""
610 611 if 'phases' in pushop.stepsdone:
611 612 return
612 613 b2caps = bundle2.bundle2caps(pushop.remote)
613 614 if not 'pushkey' in b2caps:
614 615 return
615 616 pushop.stepsdone.add('phases')
616 617 part2node = []
617 618
618 619 def handlefailure(pushop, exc):
619 620 targetid = int(exc.partid)
620 621 for partid, node in part2node:
621 622 if partid == targetid:
622 623 raise error.Abort(_('updating %s to public failed') % node)
623 624
624 625 enc = pushkey.encode
625 626 for newremotehead in pushop.outdatedphases:
626 627 part = bundler.newpart('pushkey')
627 628 part.addparam('namespace', enc('phases'))
628 629 part.addparam('key', enc(newremotehead.hex()))
629 630 part.addparam('old', enc(str(phases.draft)))
630 631 part.addparam('new', enc(str(phases.public)))
631 632 part2node.append((part.id, newremotehead))
632 633 pushop.pkfailcb[part.id] = handlefailure
633 634
634 635 def handlereply(op):
635 636 for partid, node in part2node:
636 637 partrep = op.records.getreplies(partid)
637 638 results = partrep['pushkey']
638 639 assert len(results) <= 1
639 640 msg = None
640 641 if not results:
641 642 msg = _('server ignored update of %s to public!\n') % node
642 643 elif not int(results[0]['return']):
643 644 msg = _('updating %s to public failed!\n') % node
644 645 if msg is not None:
645 646 pushop.ui.warn(msg)
646 647 return handlereply
647 648
648 649 @b2partsgenerator('obsmarkers')
649 650 def _pushb2obsmarkers(pushop, bundler):
650 651 if 'obsmarkers' in pushop.stepsdone:
651 652 return
652 653 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
653 654 if obsolete.commonversion(remoteversions) is None:
654 655 return
655 656 pushop.stepsdone.add('obsmarkers')
656 657 if pushop.outobsmarkers:
657 658 markers = sorted(pushop.outobsmarkers)
658 659 buildobsmarkerspart(bundler, markers)
659 660
660 661 @b2partsgenerator('bookmarks')
661 662 def _pushb2bookmarks(pushop, bundler):
662 663 """handle bookmark push through bundle2"""
663 664 if 'bookmarks' in pushop.stepsdone:
664 665 return
665 666 b2caps = bundle2.bundle2caps(pushop.remote)
666 667 if 'pushkey' not in b2caps:
667 668 return
668 669 pushop.stepsdone.add('bookmarks')
669 670 part2book = []
670 671 enc = pushkey.encode
671 672
672 673 def handlefailure(pushop, exc):
673 674 targetid = int(exc.partid)
674 675 for partid, book, action in part2book:
675 676 if partid == targetid:
676 677 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
677 678 # we should not be called for part we did not generated
678 679 assert False
679 680
680 681 for book, old, new in pushop.outbookmarks:
681 682 part = bundler.newpart('pushkey')
682 683 part.addparam('namespace', enc('bookmarks'))
683 684 part.addparam('key', enc(book))
684 685 part.addparam('old', enc(old))
685 686 part.addparam('new', enc(new))
686 687 action = 'update'
687 688 if not old:
688 689 action = 'export'
689 690 elif not new:
690 691 action = 'delete'
691 692 part2book.append((part.id, book, action))
692 693 pushop.pkfailcb[part.id] = handlefailure
693 694
694 695 def handlereply(op):
695 696 ui = pushop.ui
696 697 for partid, book, action in part2book:
697 698 partrep = op.records.getreplies(partid)
698 699 results = partrep['pushkey']
699 700 assert len(results) <= 1
700 701 if not results:
701 702 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
702 703 else:
703 704 ret = int(results[0]['return'])
704 705 if ret:
705 706 ui.status(bookmsgmap[action][0] % book)
706 707 else:
707 708 ui.warn(bookmsgmap[action][1] % book)
708 709 if pushop.bkresult is not None:
709 710 pushop.bkresult = 1
710 711 return handlereply
711 712
712 713
713 714 def _pushbundle2(pushop):
714 715 """push data to the remote using bundle2
715 716
716 717 The only currently supported type of data is changegroup but this will
717 718 evolve in the future."""
718 719 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
719 720 pushback = (pushop.trmanager
720 721 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
721 722
722 723 # create reply capability
723 724 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
724 725 allowpushback=pushback))
725 726 bundler.newpart('replycaps', data=capsblob)
726 727 replyhandlers = []
727 728 for partgenname in b2partsgenorder:
728 729 partgen = b2partsgenmapping[partgenname]
729 730 ret = partgen(pushop, bundler)
730 731 if callable(ret):
731 732 replyhandlers.append(ret)
732 733 # do not push if nothing to push
733 734 if bundler.nbparts <= 1:
734 735 return
735 736 stream = util.chunkbuffer(bundler.getchunks())
736 737 try:
737 738 try:
738 739 reply = pushop.remote.unbundle(stream, ['force'], 'push')
739 740 except error.BundleValueError as exc:
740 741 raise error.Abort('missing support for %s' % exc)
741 742 try:
742 743 trgetter = None
743 744 if pushback:
744 745 trgetter = pushop.trmanager.transaction
745 746 op = bundle2.processbundle(pushop.repo, reply, trgetter)
746 747 except error.BundleValueError as exc:
747 748 raise error.Abort('missing support for %s' % exc)
748 749 except error.PushkeyFailed as exc:
749 750 partid = int(exc.partid)
750 751 if partid not in pushop.pkfailcb:
751 752 raise
752 753 pushop.pkfailcb[partid](pushop, exc)
753 754 for rephand in replyhandlers:
754 755 rephand(op)
755 756
756 757 def _pushchangeset(pushop):
757 758 """Make the actual push of changeset bundle to remote repo"""
758 759 if 'changesets' in pushop.stepsdone:
759 760 return
760 761 pushop.stepsdone.add('changesets')
761 762 if not _pushcheckoutgoing(pushop):
762 763 return
763 764 pushop.repo.prepushoutgoinghooks(pushop.repo,
764 765 pushop.remote,
765 766 pushop.outgoing)
766 767 outgoing = pushop.outgoing
767 768 unbundle = pushop.remote.capable('unbundle')
768 769 # TODO: get bundlecaps from remote
769 770 bundlecaps = None
770 771 # create a changegroup from local
771 772 if pushop.revs is None and not (outgoing.excluded
772 773 or pushop.repo.changelog.filteredrevs):
773 774 # push everything,
774 775 # use the fast path, no race possible on push
775 776 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
776 777 cg = changegroup.getsubset(pushop.repo,
777 778 outgoing,
778 779 bundler,
779 780 'push',
780 781 fastpath=True)
781 782 else:
782 783 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
783 784 bundlecaps)
784 785
785 786 # apply changegroup to remote
786 787 if unbundle:
787 788 # local repo finds heads on server, finds out what
788 789 # revs it must push. once revs transferred, if server
789 790 # finds it has different heads (someone else won
790 791 # commit/push race), server aborts.
791 792 if pushop.force:
792 793 remoteheads = ['force']
793 794 else:
794 795 remoteheads = pushop.remoteheads
795 796 # ssh: return remote's addchangegroup()
796 797 # http: return remote's addchangegroup() or 0 for error
797 798 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
798 799 pushop.repo.url())
799 800 else:
800 801 # we return an integer indicating remote head count
801 802 # change
802 803 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
803 804 pushop.repo.url())
804 805
805 806 def _pushsyncphase(pushop):
806 807 """synchronise phase information locally and remotely"""
807 808 cheads = pushop.commonheads
808 809 # even when we don't push, exchanging phase data is useful
809 810 remotephases = pushop.remote.listkeys('phases')
810 811 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
811 812 and remotephases # server supports phases
812 813 and pushop.cgresult is None # nothing was pushed
813 814 and remotephases.get('publishing', False)):
814 815 # When:
815 816 # - this is a subrepo push
816 817 # - and remote support phase
817 818 # - and no changeset was pushed
818 819 # - and remote is publishing
819 820 # We may be in issue 3871 case!
820 821 # We drop the possible phase synchronisation done by
821 822 # courtesy to publish changesets possibly locally draft
822 823 # on the remote.
823 824 remotephases = {'publishing': 'True'}
824 825 if not remotephases: # old server or public only reply from non-publishing
825 826 _localphasemove(pushop, cheads)
826 827 # don't push any phase data as there is nothing to push
827 828 else:
828 829 ana = phases.analyzeremotephases(pushop.repo, cheads,
829 830 remotephases)
830 831 pheads, droots = ana
831 832 ### Apply remote phase on local
832 833 if remotephases.get('publishing', False):
833 834 _localphasemove(pushop, cheads)
834 835 else: # publish = False
835 836 _localphasemove(pushop, pheads)
836 837 _localphasemove(pushop, cheads, phases.draft)
837 838 ### Apply local phase on remote
838 839
839 840 if pushop.cgresult:
840 841 if 'phases' in pushop.stepsdone:
841 842 # phases already pushed though bundle2
842 843 return
843 844 outdated = pushop.outdatedphases
844 845 else:
845 846 outdated = pushop.fallbackoutdatedphases
846 847
847 848 pushop.stepsdone.add('phases')
848 849
849 850 # filter heads already turned public by the push
850 851 outdated = [c for c in outdated if c.node() not in pheads]
851 852 # fallback to independent pushkey command
852 853 for newremotehead in outdated:
853 854 r = pushop.remote.pushkey('phases',
854 855 newremotehead.hex(),
855 856 str(phases.draft),
856 857 str(phases.public))
857 858 if not r:
858 859 pushop.ui.warn(_('updating %s to public failed!\n')
859 860 % newremotehead)
860 861
861 862 def _localphasemove(pushop, nodes, phase=phases.public):
862 863 """move <nodes> to <phase> in the local source repo"""
863 864 if pushop.trmanager:
864 865 phases.advanceboundary(pushop.repo,
865 866 pushop.trmanager.transaction(),
866 867 phase,
867 868 nodes)
868 869 else:
869 870 # repo is not locked, do not change any phases!
870 871 # Informs the user that phases should have been moved when
871 872 # applicable.
872 873 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
873 874 phasestr = phases.phasenames[phase]
874 875 if actualmoves:
875 876 pushop.ui.status(_('cannot lock source repo, skipping '
876 877 'local %s phase update\n') % phasestr)
877 878
878 879 def _pushobsolete(pushop):
879 880 """utility function to push obsolete markers to a remote"""
880 881 if 'obsmarkers' in pushop.stepsdone:
881 882 return
882 883 repo = pushop.repo
883 884 remote = pushop.remote
884 885 pushop.stepsdone.add('obsmarkers')
885 886 if pushop.outobsmarkers:
886 887 pushop.ui.debug('try to push obsolete markers to remote\n')
887 888 rslts = []
888 889 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
889 890 for key in sorted(remotedata, reverse=True):
890 891 # reverse sort to ensure we end with dump0
891 892 data = remotedata[key]
892 893 rslts.append(remote.pushkey('obsolete', key, '', data))
893 894 if [r for r in rslts if not r]:
894 895 msg = _('failed to push some obsolete markers!\n')
895 896 repo.ui.warn(msg)
896 897
897 898 def _pushbookmark(pushop):
898 899 """Update bookmark position on remote"""
899 900 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
900 901 return
901 902 pushop.stepsdone.add('bookmarks')
902 903 ui = pushop.ui
903 904 remote = pushop.remote
904 905
905 906 for b, old, new in pushop.outbookmarks:
906 907 action = 'update'
907 908 if not old:
908 909 action = 'export'
909 910 elif not new:
910 911 action = 'delete'
911 912 if remote.pushkey('bookmarks', b, old, new):
912 913 ui.status(bookmsgmap[action][0] % b)
913 914 else:
914 915 ui.warn(bookmsgmap[action][1] % b)
915 916 # discovery can have set the value form invalid entry
916 917 if pushop.bkresult is not None:
917 918 pushop.bkresult = 1
918 919
919 920 class pulloperation(object):
920 921 """A object that represent a single pull operation
921 922
922 923 It purpose is to carry pull related state and very common operation.
923 924
924 925 A new should be created at the beginning of each pull and discarded
925 926 afterward.
926 927 """
927 928
928 929 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
929 930 remotebookmarks=None, streamclonerequested=None):
930 931 # repo we pull into
931 932 self.repo = repo
932 933 # repo we pull from
933 934 self.remote = remote
934 935 # revision we try to pull (None is "all")
935 936 self.heads = heads
936 937 # bookmark pulled explicitly
937 938 self.explicitbookmarks = bookmarks
938 939 # do we force pull?
939 940 self.force = force
940 941 # whether a streaming clone was requested
941 942 self.streamclonerequested = streamclonerequested
942 943 # transaction manager
943 944 self.trmanager = None
944 945 # set of common changeset between local and remote before pull
945 946 self.common = None
946 947 # set of pulled head
947 948 self.rheads = None
948 949 # list of missing changeset to fetch remotely
949 950 self.fetch = None
950 951 # remote bookmarks data
951 952 self.remotebookmarks = remotebookmarks
952 953 # result of changegroup pulling (used as return code by pull)
953 954 self.cgresult = None
954 955 # list of step already done
955 956 self.stepsdone = set()
956 957
957 958 @util.propertycache
958 959 def pulledsubset(self):
959 960 """heads of the set of changeset target by the pull"""
960 961 # compute target subset
961 962 if self.heads is None:
962 963 # We pulled every thing possible
963 964 # sync on everything common
964 965 c = set(self.common)
965 966 ret = list(self.common)
966 967 for n in self.rheads:
967 968 if n not in c:
968 969 ret.append(n)
969 970 return ret
970 971 else:
971 972 # We pulled a specific subset
972 973 # sync on this subset
973 974 return self.heads
974 975
975 976 @util.propertycache
976 977 def canusebundle2(self):
977 978 return _canusebundle2(self)
978 979
979 980 @util.propertycache
980 981 def remotebundle2caps(self):
981 982 return bundle2.bundle2caps(self.remote)
982 983
983 984 def gettransaction(self):
984 985 # deprecated; talk to trmanager directly
985 986 return self.trmanager.transaction()
986 987
987 988 class transactionmanager(object):
988 989 """An object to manage the life cycle of a transaction
989 990
990 991 It creates the transaction on demand and calls the appropriate hooks when
991 992 closing the transaction."""
992 993 def __init__(self, repo, source, url):
993 994 self.repo = repo
994 995 self.source = source
995 996 self.url = url
996 997 self._tr = None
997 998
998 999 def transaction(self):
999 1000 """Return an open transaction object, constructing if necessary"""
1000 1001 if not self._tr:
1001 1002 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1002 1003 self._tr = self.repo.transaction(trname)
1003 1004 self._tr.hookargs['source'] = self.source
1004 1005 self._tr.hookargs['url'] = self.url
1005 1006 return self._tr
1006 1007
1007 1008 def close(self):
1008 1009 """close transaction if created"""
1009 1010 if self._tr is not None:
1010 1011 self._tr.close()
1011 1012
1012 1013 def release(self):
1013 1014 """release transaction if created"""
1014 1015 if self._tr is not None:
1015 1016 self._tr.release()
1016 1017
1017 1018 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1018 1019 streamclonerequested=None):
1019 1020 """Fetch repository data from a remote.
1020 1021
1021 1022 This is the main function used to retrieve data from a remote repository.
1022 1023
1023 1024 ``repo`` is the local repository to clone into.
1024 1025 ``remote`` is a peer instance.
1025 1026 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1026 1027 default) means to pull everything from the remote.
1027 1028 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1028 1029 default, all remote bookmarks are pulled.
1029 1030 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1030 1031 initialization.
1031 1032 ``streamclonerequested`` is a boolean indicating whether a "streaming
1032 1033 clone" is requested. A "streaming clone" is essentially a raw file copy
1033 1034 of revlogs from the server. This only works when the local repository is
1034 1035 empty. The default value of ``None`` means to respect the server
1035 1036 configuration for preferring stream clones.
1036 1037
1037 1038 Returns the ``pulloperation`` created for this pull.
1038 1039 """
1039 1040 if opargs is None:
1040 1041 opargs = {}
1041 1042 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1042 1043 streamclonerequested=streamclonerequested, **opargs)
1043 1044 if pullop.remote.local():
1044 1045 missing = set(pullop.remote.requirements) - pullop.repo.supported
1045 1046 if missing:
1046 1047 msg = _("required features are not"
1047 1048 " supported in the destination:"
1048 1049 " %s") % (', '.join(sorted(missing)))
1049 1050 raise error.Abort(msg)
1050 1051
1051 1052 lock = pullop.repo.lock()
1052 1053 try:
1053 1054 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1054 1055 streamclone.maybeperformlegacystreamclone(pullop)
1055 1056 # This should ideally be in _pullbundle2(). However, it needs to run
1056 1057 # before discovery to avoid extra work.
1057 1058 _maybeapplyclonebundle(pullop)
1058 1059 _pulldiscovery(pullop)
1059 1060 if pullop.canusebundle2:
1060 1061 _pullbundle2(pullop)
1061 1062 _pullchangeset(pullop)
1062 1063 _pullphase(pullop)
1063 1064 _pullbookmarks(pullop)
1064 1065 _pullobsolete(pullop)
1065 1066 pullop.trmanager.close()
1066 1067 finally:
1067 1068 pullop.trmanager.release()
1068 1069 lock.release()
1069 1070
1070 1071 return pullop
1071 1072
1072 1073 # list of steps to perform discovery before pull
1073 1074 pulldiscoveryorder = []
1074 1075
1075 1076 # Mapping between step name and function
1076 1077 #
1077 1078 # This exists to help extensions wrap steps if necessary
1078 1079 pulldiscoverymapping = {}
1079 1080
1080 1081 def pulldiscovery(stepname):
1081 1082 """decorator for function performing discovery before pull
1082 1083
1083 1084 The function is added to the step -> function mapping and appended to the
1084 1085 list of steps. Beware that decorated function will be added in order (this
1085 1086 may matter).
1086 1087
1087 1088 You can only use this decorator for a new step, if you want to wrap a step
1088 1089 from an extension, change the pulldiscovery dictionary directly."""
1089 1090 def dec(func):
1090 1091 assert stepname not in pulldiscoverymapping
1091 1092 pulldiscoverymapping[stepname] = func
1092 1093 pulldiscoveryorder.append(stepname)
1093 1094 return func
1094 1095 return dec
1095 1096
1096 1097 def _pulldiscovery(pullop):
1097 1098 """Run all discovery steps"""
1098 1099 for stepname in pulldiscoveryorder:
1099 1100 step = pulldiscoverymapping[stepname]
1100 1101 step(pullop)
1101 1102
1102 1103 @pulldiscovery('b1:bookmarks')
1103 1104 def _pullbookmarkbundle1(pullop):
1104 1105 """fetch bookmark data in bundle1 case
1105 1106
1106 1107 If not using bundle2, we have to fetch bookmarks before changeset
1107 1108 discovery to reduce the chance and impact of race conditions."""
1108 1109 if pullop.remotebookmarks is not None:
1109 1110 return
1110 1111 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1111 1112 # all known bundle2 servers now support listkeys, but lets be nice with
1112 1113 # new implementation.
1113 1114 return
1114 1115 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1115 1116
1116 1117
1117 1118 @pulldiscovery('changegroup')
1118 1119 def _pulldiscoverychangegroup(pullop):
1119 1120 """discovery phase for the pull
1120 1121
1121 1122 Current handle changeset discovery only, will change handle all discovery
1122 1123 at some point."""
1123 1124 tmp = discovery.findcommonincoming(pullop.repo,
1124 1125 pullop.remote,
1125 1126 heads=pullop.heads,
1126 1127 force=pullop.force)
1127 1128 common, fetch, rheads = tmp
1128 1129 nm = pullop.repo.unfiltered().changelog.nodemap
1129 1130 if fetch and rheads:
1130 1131 # If a remote heads in filtered locally, lets drop it from the unknown
1131 1132 # remote heads and put in back in common.
1132 1133 #
1133 1134 # This is a hackish solution to catch most of "common but locally
1134 1135 # hidden situation". We do not performs discovery on unfiltered
1135 1136 # repository because it end up doing a pathological amount of round
1136 1137 # trip for w huge amount of changeset we do not care about.
1137 1138 #
1138 1139 # If a set of such "common but filtered" changeset exist on the server
1139 1140 # but are not including a remote heads, we'll not be able to detect it,
1140 1141 scommon = set(common)
1141 1142 filteredrheads = []
1142 1143 for n in rheads:
1143 1144 if n in nm:
1144 1145 if n not in scommon:
1145 1146 common.append(n)
1146 1147 else:
1147 1148 filteredrheads.append(n)
1148 1149 if not filteredrheads:
1149 1150 fetch = []
1150 1151 rheads = filteredrheads
1151 1152 pullop.common = common
1152 1153 pullop.fetch = fetch
1153 1154 pullop.rheads = rheads
1154 1155
1155 1156 def _pullbundle2(pullop):
1156 1157 """pull data using bundle2
1157 1158
1158 1159 For now, the only supported data are changegroup."""
1159 1160 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1160 1161
1161 1162 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1162 1163
1163 1164 # pulling changegroup
1164 1165 pullop.stepsdone.add('changegroup')
1165 1166
1166 1167 kwargs['common'] = pullop.common
1167 1168 kwargs['heads'] = pullop.heads or pullop.rheads
1168 1169 kwargs['cg'] = pullop.fetch
1169 1170 if 'listkeys' in pullop.remotebundle2caps:
1170 1171 kwargs['listkeys'] = ['phase']
1171 1172 if pullop.remotebookmarks is None:
1172 1173 # make sure to always includes bookmark data when migrating
1173 1174 # `hg incoming --bundle` to using this function.
1174 1175 kwargs['listkeys'].append('bookmarks')
1175 1176 if streaming:
1176 1177 pullop.repo.ui.status(_('streaming all changes\n'))
1177 1178 elif not pullop.fetch:
1178 1179 pullop.repo.ui.status(_("no changes found\n"))
1179 1180 pullop.cgresult = 0
1180 1181 else:
1181 1182 if pullop.heads is None and list(pullop.common) == [nullid]:
1182 1183 pullop.repo.ui.status(_("requesting all changes\n"))
1183 1184 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1184 1185 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1185 1186 if obsolete.commonversion(remoteversions) is not None:
1186 1187 kwargs['obsmarkers'] = True
1187 1188 pullop.stepsdone.add('obsmarkers')
1188 1189 _pullbundle2extraprepare(pullop, kwargs)
1189 1190 bundle = pullop.remote.getbundle('pull', **kwargs)
1190 1191 try:
1191 1192 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1192 1193 except error.BundleValueError as exc:
1193 1194 raise error.Abort('missing support for %s' % exc)
1194 1195
1195 1196 if pullop.fetch:
1196 1197 results = [cg['return'] for cg in op.records['changegroup']]
1197 1198 pullop.cgresult = changegroup.combineresults(results)
1198 1199
1199 1200 # processing phases change
1200 1201 for namespace, value in op.records['listkeys']:
1201 1202 if namespace == 'phases':
1202 1203 _pullapplyphases(pullop, value)
1203 1204
1204 1205 # processing bookmark update
1205 1206 for namespace, value in op.records['listkeys']:
1206 1207 if namespace == 'bookmarks':
1207 1208 pullop.remotebookmarks = value
1208 1209
1209 1210 # bookmark data were either already there or pulled in the bundle
1210 1211 if pullop.remotebookmarks is not None:
1211 1212 _pullbookmarks(pullop)
1212 1213
1213 1214 def _pullbundle2extraprepare(pullop, kwargs):
1214 1215 """hook function so that extensions can extend the getbundle call"""
1215 1216 pass
1216 1217
1217 1218 def _pullchangeset(pullop):
1218 1219 """pull changeset from unbundle into the local repo"""
1219 1220 # We delay the open of the transaction as late as possible so we
1220 1221 # don't open transaction for nothing or you break future useful
1221 1222 # rollback call
1222 1223 if 'changegroup' in pullop.stepsdone:
1223 1224 return
1224 1225 pullop.stepsdone.add('changegroup')
1225 1226 if not pullop.fetch:
1226 1227 pullop.repo.ui.status(_("no changes found\n"))
1227 1228 pullop.cgresult = 0
1228 1229 return
1229 1230 pullop.gettransaction()
1230 1231 if pullop.heads is None and list(pullop.common) == [nullid]:
1231 1232 pullop.repo.ui.status(_("requesting all changes\n"))
1232 1233 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1233 1234 # issue1320, avoid a race if remote changed after discovery
1234 1235 pullop.heads = pullop.rheads
1235 1236
1236 1237 if pullop.remote.capable('getbundle'):
1237 1238 # TODO: get bundlecaps from remote
1238 1239 cg = pullop.remote.getbundle('pull', common=pullop.common,
1239 1240 heads=pullop.heads or pullop.rheads)
1240 1241 elif pullop.heads is None:
1241 1242 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1242 1243 elif not pullop.remote.capable('changegroupsubset'):
1243 1244 raise error.Abort(_("partial pull cannot be done because "
1244 1245 "other repository doesn't support "
1245 1246 "changegroupsubset."))
1246 1247 else:
1247 1248 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1248 1249 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1249 1250 pullop.remote.url())
1250 1251
1251 1252 def _pullphase(pullop):
1252 1253 # Get remote phases data from remote
1253 1254 if 'phases' in pullop.stepsdone:
1254 1255 return
1255 1256 remotephases = pullop.remote.listkeys('phases')
1256 1257 _pullapplyphases(pullop, remotephases)
1257 1258
1258 1259 def _pullapplyphases(pullop, remotephases):
1259 1260 """apply phase movement from observed remote state"""
1260 1261 if 'phases' in pullop.stepsdone:
1261 1262 return
1262 1263 pullop.stepsdone.add('phases')
1263 1264 publishing = bool(remotephases.get('publishing', False))
1264 1265 if remotephases and not publishing:
1265 1266 # remote is new and unpublishing
1266 1267 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1267 1268 pullop.pulledsubset,
1268 1269 remotephases)
1269 1270 dheads = pullop.pulledsubset
1270 1271 else:
1271 1272 # Remote is old or publishing all common changesets
1272 1273 # should be seen as public
1273 1274 pheads = pullop.pulledsubset
1274 1275 dheads = []
1275 1276 unfi = pullop.repo.unfiltered()
1276 1277 phase = unfi._phasecache.phase
1277 1278 rev = unfi.changelog.nodemap.get
1278 1279 public = phases.public
1279 1280 draft = phases.draft
1280 1281
1281 1282 # exclude changesets already public locally and update the others
1282 1283 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1283 1284 if pheads:
1284 1285 tr = pullop.gettransaction()
1285 1286 phases.advanceboundary(pullop.repo, tr, public, pheads)
1286 1287
1287 1288 # exclude changesets already draft locally and update the others
1288 1289 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1289 1290 if dheads:
1290 1291 tr = pullop.gettransaction()
1291 1292 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1292 1293
1293 1294 def _pullbookmarks(pullop):
1294 1295 """process the remote bookmark information to update the local one"""
1295 1296 if 'bookmarks' in pullop.stepsdone:
1296 1297 return
1297 1298 pullop.stepsdone.add('bookmarks')
1298 1299 repo = pullop.repo
1299 1300 remotebookmarks = pullop.remotebookmarks
1300 1301 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1301 1302 pullop.remote.url(),
1302 1303 pullop.gettransaction,
1303 1304 explicit=pullop.explicitbookmarks)
1304 1305
1305 1306 def _pullobsolete(pullop):
1306 1307 """utility function to pull obsolete markers from a remote
1307 1308
1308 1309 The `gettransaction` is function that return the pull transaction, creating
1309 1310 one if necessary. We return the transaction to inform the calling code that
1310 1311 a new transaction have been created (when applicable).
1311 1312
1312 1313 Exists mostly to allow overriding for experimentation purpose"""
1313 1314 if 'obsmarkers' in pullop.stepsdone:
1314 1315 return
1315 1316 pullop.stepsdone.add('obsmarkers')
1316 1317 tr = None
1317 1318 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1318 1319 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1319 1320 remoteobs = pullop.remote.listkeys('obsolete')
1320 1321 if 'dump0' in remoteobs:
1321 1322 tr = pullop.gettransaction()
1322 1323 for key in sorted(remoteobs, reverse=True):
1323 1324 if key.startswith('dump'):
1324 1325 data = base85.b85decode(remoteobs[key])
1325 1326 pullop.repo.obsstore.mergemarkers(tr, data)
1326 1327 pullop.repo.invalidatevolatilesets()
1327 1328 return tr
1328 1329
1329 1330 def caps20to10(repo):
1330 1331 """return a set with appropriate options to use bundle20 during getbundle"""
1331 1332 caps = set(['HG20'])
1332 1333 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1333 1334 caps.add('bundle2=' + urllib.quote(capsblob))
1334 1335 return caps
1335 1336
1336 1337 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1337 1338 getbundle2partsorder = []
1338 1339
1339 1340 # Mapping between step name and function
1340 1341 #
1341 1342 # This exists to help extensions wrap steps if necessary
1342 1343 getbundle2partsmapping = {}
1343 1344
1344 1345 def getbundle2partsgenerator(stepname, idx=None):
1345 1346 """decorator for function generating bundle2 part for getbundle
1346 1347
1347 1348 The function is added to the step -> function mapping and appended to the
1348 1349 list of steps. Beware that decorated functions will be added in order
1349 1350 (this may matter).
1350 1351
1351 1352 You can only use this decorator for new steps, if you want to wrap a step
1352 1353 from an extension, attack the getbundle2partsmapping dictionary directly."""
1353 1354 def dec(func):
1354 1355 assert stepname not in getbundle2partsmapping
1355 1356 getbundle2partsmapping[stepname] = func
1356 1357 if idx is None:
1357 1358 getbundle2partsorder.append(stepname)
1358 1359 else:
1359 1360 getbundle2partsorder.insert(idx, stepname)
1360 1361 return func
1361 1362 return dec
1362 1363
1363 1364 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1364 1365 **kwargs):
1365 1366 """return a full bundle (with potentially multiple kind of parts)
1366 1367
1367 1368 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1368 1369 passed. For now, the bundle can contain only changegroup, but this will
1369 1370 changes when more part type will be available for bundle2.
1370 1371
1371 1372 This is different from changegroup.getchangegroup that only returns an HG10
1372 1373 changegroup bundle. They may eventually get reunited in the future when we
1373 1374 have a clearer idea of the API we what to query different data.
1374 1375
1375 1376 The implementation is at a very early stage and will get massive rework
1376 1377 when the API of bundle is refined.
1377 1378 """
1378 1379 # bundle10 case
1379 1380 usebundle2 = False
1380 1381 if bundlecaps is not None:
1381 1382 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1382 1383 if not usebundle2:
1383 1384 if bundlecaps and not kwargs.get('cg', True):
1384 1385 raise ValueError(_('request for bundle10 must include changegroup'))
1385 1386
1386 1387 if kwargs:
1387 1388 raise ValueError(_('unsupported getbundle arguments: %s')
1388 1389 % ', '.join(sorted(kwargs.keys())))
1389 1390 return changegroup.getchangegroup(repo, source, heads=heads,
1390 1391 common=common, bundlecaps=bundlecaps)
1391 1392
1392 1393 # bundle20 case
1393 1394 b2caps = {}
1394 1395 for bcaps in bundlecaps:
1395 1396 if bcaps.startswith('bundle2='):
1396 1397 blob = urllib.unquote(bcaps[len('bundle2='):])
1397 1398 b2caps.update(bundle2.decodecaps(blob))
1398 1399 bundler = bundle2.bundle20(repo.ui, b2caps)
1399 1400
1400 1401 kwargs['heads'] = heads
1401 1402 kwargs['common'] = common
1402 1403
1403 1404 for name in getbundle2partsorder:
1404 1405 func = getbundle2partsmapping[name]
1405 1406 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1406 1407 **kwargs)
1407 1408
1408 1409 return util.chunkbuffer(bundler.getchunks())
1409 1410
1410 1411 @getbundle2partsgenerator('changegroup')
1411 1412 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1412 1413 b2caps=None, heads=None, common=None, **kwargs):
1413 1414 """add a changegroup part to the requested bundle"""
1414 1415 cg = None
1415 1416 if kwargs.get('cg', True):
1416 1417 # build changegroup bundle here.
1417 1418 version = None
1418 1419 cgversions = b2caps.get('changegroup')
1419 1420 getcgkwargs = {}
1420 1421 if cgversions: # 3.1 and 3.2 ship with an empty value
1421 1422 cgversions = [v for v in cgversions if v in changegroup.packermap]
1422 1423 if not cgversions:
1423 1424 raise ValueError(_('no common changegroup version'))
1424 1425 version = getcgkwargs['version'] = max(cgversions)
1425 1426 outgoing = changegroup.computeoutgoing(repo, heads, common)
1426 1427 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1427 1428 bundlecaps=bundlecaps,
1428 1429 **getcgkwargs)
1429 1430
1430 1431 if cg:
1431 1432 part = bundler.newpart('changegroup', data=cg)
1432 1433 if version is not None:
1433 1434 part.addparam('version', version)
1434 1435 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1435 1436
1436 1437 @getbundle2partsgenerator('listkeys')
1437 1438 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1438 1439 b2caps=None, **kwargs):
1439 1440 """add parts containing listkeys namespaces to the requested bundle"""
1440 1441 listkeys = kwargs.get('listkeys', ())
1441 1442 for namespace in listkeys:
1442 1443 part = bundler.newpart('listkeys')
1443 1444 part.addparam('namespace', namespace)
1444 1445 keys = repo.listkeys(namespace).items()
1445 1446 part.data = pushkey.encodekeys(keys)
1446 1447
1447 1448 @getbundle2partsgenerator('obsmarkers')
1448 1449 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1449 1450 b2caps=None, heads=None, **kwargs):
1450 1451 """add an obsolescence markers part to the requested bundle"""
1451 1452 if kwargs.get('obsmarkers', False):
1452 1453 if heads is None:
1453 1454 heads = repo.heads()
1454 1455 subset = [c.node() for c in repo.set('::%ln', heads)]
1455 1456 markers = repo.obsstore.relevantmarkers(subset)
1456 1457 markers = sorted(markers)
1457 1458 buildobsmarkerspart(bundler, markers)
1458 1459
1459 1460 @getbundle2partsgenerator('hgtagsfnodes')
1460 1461 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1461 1462 b2caps=None, heads=None, common=None,
1462 1463 **kwargs):
1463 1464 """Transfer the .hgtags filenodes mapping.
1464 1465
1465 1466 Only values for heads in this bundle will be transferred.
1466 1467
1467 1468 The part data consists of pairs of 20 byte changeset node and .hgtags
1468 1469 filenodes raw values.
1469 1470 """
1470 1471 # Don't send unless:
1471 1472 # - changeset are being exchanged,
1472 1473 # - the client supports it.
1473 1474 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1474 1475 return
1475 1476
1476 1477 outgoing = changegroup.computeoutgoing(repo, heads, common)
1477 1478
1478 1479 if not outgoing.missingheads:
1479 1480 return
1480 1481
1481 1482 cache = tags.hgtagsfnodescache(repo.unfiltered())
1482 1483 chunks = []
1483 1484
1484 1485 # .hgtags fnodes are only relevant for head changesets. While we could
1485 1486 # transfer values for all known nodes, there will likely be little to
1486 1487 # no benefit.
1487 1488 #
1488 1489 # We don't bother using a generator to produce output data because
1489 1490 # a) we only have 40 bytes per head and even esoteric numbers of heads
1490 1491 # consume little memory (1M heads is 40MB) b) we don't want to send the
1491 1492 # part if we don't have entries and knowing if we have entries requires
1492 1493 # cache lookups.
1493 1494 for node in outgoing.missingheads:
1494 1495 # Don't compute missing, as this may slow down serving.
1495 1496 fnode = cache.getfnode(node, computemissing=False)
1496 1497 if fnode is not None:
1497 1498 chunks.extend([node, fnode])
1498 1499
1499 1500 if chunks:
1500 1501 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1501 1502
1502 1503 def check_heads(repo, their_heads, context):
1503 1504 """check if the heads of a repo have been modified
1504 1505
1505 1506 Used by peer for unbundling.
1506 1507 """
1507 1508 heads = repo.heads()
1508 1509 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1509 1510 if not (their_heads == ['force'] or their_heads == heads or
1510 1511 their_heads == ['hashed', heads_hash]):
1511 1512 # someone else committed/pushed/unbundled while we
1512 1513 # were transferring data
1513 1514 raise error.PushRaced('repository changed while %s - '
1514 1515 'please try again' % context)
1515 1516
1516 1517 def unbundle(repo, cg, heads, source, url):
1517 1518 """Apply a bundle to a repo.
1518 1519
1519 1520 this function makes sure the repo is locked during the application and have
1520 1521 mechanism to check that no push race occurred between the creation of the
1521 1522 bundle and its application.
1522 1523
1523 1524 If the push was raced as PushRaced exception is raised."""
1524 1525 r = 0
1525 1526 # need a transaction when processing a bundle2 stream
1526 1527 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1527 1528 lockandtr = [None, None, None]
1528 1529 recordout = None
1529 1530 # quick fix for output mismatch with bundle2 in 3.4
1530 1531 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1531 1532 False)
1532 1533 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1533 1534 captureoutput = True
1534 1535 try:
1535 1536 check_heads(repo, heads, 'uploading changes')
1536 1537 # push can proceed
1537 1538 if util.safehasattr(cg, 'params'):
1538 1539 r = None
1539 1540 try:
1540 1541 def gettransaction():
1541 1542 if not lockandtr[2]:
1542 1543 lockandtr[0] = repo.wlock()
1543 1544 lockandtr[1] = repo.lock()
1544 1545 lockandtr[2] = repo.transaction(source)
1545 1546 lockandtr[2].hookargs['source'] = source
1546 1547 lockandtr[2].hookargs['url'] = url
1547 1548 lockandtr[2].hookargs['bundle2'] = '1'
1548 1549 return lockandtr[2]
1549 1550
1550 1551 # Do greedy locking by default until we're satisfied with lazy
1551 1552 # locking.
1552 1553 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1553 1554 gettransaction()
1554 1555
1555 1556 op = bundle2.bundleoperation(repo, gettransaction,
1556 1557 captureoutput=captureoutput)
1557 1558 try:
1558 1559 op = bundle2.processbundle(repo, cg, op=op)
1559 1560 finally:
1560 1561 r = op.reply
1561 1562 if captureoutput and r is not None:
1562 1563 repo.ui.pushbuffer(error=True, subproc=True)
1563 1564 def recordout(output):
1564 1565 r.newpart('output', data=output, mandatory=False)
1565 1566 if lockandtr[2] is not None:
1566 1567 lockandtr[2].close()
1567 1568 except BaseException as exc:
1568 1569 exc.duringunbundle2 = True
1569 1570 if captureoutput and r is not None:
1570 1571 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1571 1572 def recordout(output):
1572 1573 part = bundle2.bundlepart('output', data=output,
1573 1574 mandatory=False)
1574 1575 parts.append(part)
1575 1576 raise
1576 1577 else:
1577 1578 lockandtr[1] = repo.lock()
1578 1579 r = changegroup.addchangegroup(repo, cg, source, url)
1579 1580 finally:
1580 1581 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1581 1582 if recordout is not None:
1582 1583 recordout(repo.ui.popbuffer())
1583 1584 return r
1584 1585
1585 1586 def _maybeapplyclonebundle(pullop):
1586 1587 """Apply a clone bundle from a remote, if possible."""
1587 1588
1588 1589 repo = pullop.repo
1589 1590 remote = pullop.remote
1590 1591
1591 1592 if not repo.ui.configbool('experimental', 'clonebundles', False):
1592 1593 return
1593 1594
1594 1595 if pullop.heads:
1595 1596 return
1596 1597
1597 1598 if not remote.capable('clonebundles'):
1598 1599 return
1599 1600
1600 1601 res = remote._call('clonebundles')
1601 1602 entries = parseclonebundlesmanifest(res)
1602 1603 if not entries:
1603 1604 repo.ui.note(_('no clone bundles available on remote; '
1604 1605 'falling back to regular clone\n'))
1605 1606 return
1606 1607
1607 1608 entries = filterclonebundleentries(repo, entries)
1608 1609 if not entries:
1609 1610 # There is a thundering herd concern here. However, if a server
1610 1611 # operator doesn't advertise bundles appropriate for its clients,
1611 1612 # they deserve what's coming. Furthermore, from a client's
1612 1613 # perspective, no automatic fallback would mean not being able to
1613 1614 # clone!
1614 1615 repo.ui.warn(_('no compatible clone bundles available on server; '
1615 1616 'falling back to regular clone\n'))
1616 1617 repo.ui.warn(_('(you may want to report this to the server '
1617 1618 'operator)\n'))
1618 1619 return
1619 1620
1620 1621 # TODO sort entries by user preferences.
1621 1622
1622 1623 url = entries[0]['URL']
1623 1624 repo.ui.status(_('applying clone bundle from %s\n') % url)
1624 1625 if trypullbundlefromurl(repo.ui, repo, url):
1625 1626 repo.ui.status(_('finished applying clone bundle\n'))
1626 1627 # Bundle failed.
1627 1628 #
1628 1629 # We abort by default to avoid the thundering herd of
1629 1630 # clients flooding a server that was expecting expensive
1630 1631 # clone load to be offloaded.
1631 1632 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1632 1633 repo.ui.warn(_('falling back to normal clone\n'))
1633 1634 else:
1634 1635 raise error.Abort(_('error applying bundle'),
1635 1636 hint=_('consider contacting the server '
1636 1637 'operator if this error persists'))
1637 1638
1638 1639 def parseclonebundlesmanifest(s):
1639 1640 """Parses the raw text of a clone bundles manifest.
1640 1641
1641 1642 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1642 1643 to the URL and other keys are the attributes for the entry.
1643 1644 """
1644 1645 m = []
1645 1646 for line in s.splitlines():
1646 1647 fields = line.split()
1647 1648 if not fields:
1648 1649 continue
1649 1650 attrs = {'URL': fields[0]}
1650 1651 for rawattr in fields[1:]:
1651 1652 key, value = rawattr.split('=', 1)
1652 1653 attrs[urllib.unquote(key)] = urllib.unquote(value)
1653 1654
1654 1655 m.append(attrs)
1655 1656
1656 1657 return m
1657 1658
1658 1659 def filterclonebundleentries(repo, entries):
1659 1660 newentries = []
1660 1661 for entry in entries:
1661 1662 spec = entry.get('BUNDLESPEC')
1662 1663 if spec:
1663 1664 try:
1664 1665 parsebundlespec(repo, spec, strict=True)
1665 1666 except error.InvalidBundleSpecification as e:
1666 1667 repo.ui.debug(str(e) + '\n')
1667 1668 continue
1668 1669 except error.UnsupportedBundleSpecification as e:
1669 1670 repo.ui.debug('filtering %s because unsupported bundle '
1670 1671 'spec: %s\n' % (entry['URL'], str(e)))
1671 1672 continue
1672 1673
1674 if 'REQUIRESNI' in entry and not sslutil.hassni:
1675 repo.ui.debug('filtering %s because SNI not supported\n' %
1676 entry['URL'])
1677 continue
1678
1673 1679 newentries.append(entry)
1674 1680
1675 1681 return newentries
1676 1682
1677 1683 def trypullbundlefromurl(ui, repo, url):
1678 1684 """Attempt to apply a bundle from a URL."""
1679 1685 lock = repo.lock()
1680 1686 try:
1681 1687 tr = repo.transaction('bundleurl')
1682 1688 try:
1683 1689 try:
1684 1690 fh = urlmod.open(ui, url)
1685 1691 cg = readbundle(ui, fh, 'stream')
1686 1692
1687 1693 if isinstance(cg, bundle2.unbundle20):
1688 1694 bundle2.processbundle(repo, cg, lambda: tr)
1689 1695 else:
1690 1696 changegroup.addchangegroup(repo, cg, 'clonebundles', url)
1691 1697 tr.close()
1692 1698 return True
1693 1699 except urllib2.HTTPError as e:
1694 1700 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1695 1701 except urllib2.URLError as e:
1696 1702 ui.warn(_('error fetching bundle: %s\n') % e.reason)
1697 1703
1698 1704 return False
1699 1705 finally:
1700 1706 tr.release()
1701 1707 finally:
1702 1708 lock.release()
@@ -1,229 +1,263 b''
1 1 Set up a server
2 2
3 3 $ hg init server
4 4 $ cd server
5 5 $ cat >> .hg/hgrc << EOF
6 6 > [extensions]
7 7 > clonebundles =
8 8 > EOF
9 9
10 10 $ touch foo
11 11 $ hg -q commit -A -m 'add foo'
12 12 $ touch bar
13 13 $ hg -q commit -A -m 'add bar'
14 14
15 15 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
16 16 $ cat hg.pid >> $DAEMON_PIDS
17 17 $ cd ..
18 18
19 19 Feature disabled by default
20 20 (client should not request manifest)
21 21
22 22 $ hg clone -U http://localhost:$HGPORT feature-disabled
23 23 requesting all changes
24 24 adding changesets
25 25 adding manifests
26 26 adding file changes
27 27 added 2 changesets with 2 changes to 2 files
28 28
29 29 $ cat server/access.log
30 30 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
31 31 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
32 32 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
33 33 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
34 34
35 35 $ cat >> $HGRCPATH << EOF
36 36 > [experimental]
37 37 > clonebundles = true
38 38 > EOF
39 39
40 40 Missing manifest should not result in server lookup
41 41
42 42 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
43 43 requesting all changes
44 44 adding changesets
45 45 adding manifests
46 46 adding file changes
47 47 added 2 changesets with 2 changes to 2 files
48 48
49 49 $ tail -4 server/access.log
50 50 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
51 51 * - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
52 52 * - - [*] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=phase%2Cbookmarks (glob)
53 53 * - - [*] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases (glob)
54 54
55 55 Empty manifest file results in retrieval
56 56 (the extension only checks if the manifest file exists)
57 57
58 58 $ touch server/.hg/clonebundles.manifest
59 59 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
60 60 no clone bundles available on remote; falling back to regular clone
61 61 requesting all changes
62 62 adding changesets
63 63 adding manifests
64 64 adding file changes
65 65 added 2 changesets with 2 changes to 2 files
66 66
67 67 Manifest file with invalid URL aborts
68 68
69 69 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
70 70 $ hg clone http://localhost:$HGPORT 404-url
71 71 applying clone bundle from http://does.not.exist/bundle.hg
72 72 error fetching bundle: [Errno -2] Name or service not known
73 73 abort: error applying bundle
74 74 (consider contacting the server operator if this error persists)
75 75 [255]
76 76
77 77 Server is not running aborts
78 78
79 79 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
80 80 $ hg clone http://localhost:$HGPORT server-not-runner
81 81 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
82 82 error fetching bundle: [Errno 111] Connection refused
83 83 abort: error applying bundle
84 84 (consider contacting the server operator if this error persists)
85 85 [255]
86 86
87 87 Server returns 404
88 88
89 89 $ python $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
90 90 $ cat http.pid >> $DAEMON_PIDS
91 91 $ hg clone http://localhost:$HGPORT running-404
92 92 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
93 93 HTTP error fetching bundle: HTTP Error 404: File not found
94 94 abort: error applying bundle
95 95 (consider contacting the server operator if this error persists)
96 96 [255]
97 97
98 98 We can override failure to fall back to regular clone
99 99
100 100 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
101 101 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
102 102 HTTP error fetching bundle: HTTP Error 404: File not found
103 103 falling back to normal clone
104 104 requesting all changes
105 105 adding changesets
106 106 adding manifests
107 107 adding file changes
108 108 added 2 changesets with 2 changes to 2 files
109 109
110 110 Bundle with partial content works
111 111
112 112 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
113 113 1 changesets found
114 114
115 115 We verify exact bundle content as an extra check against accidental future
116 116 changes. If this output changes, we could break old clients.
117 117
118 118 $ f --size --hexdump partial.hg
119 119 partial.hg: size=208
120 120 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
121 121 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
122 122 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
123 123 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
124 124 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
125 125 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
126 126 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
127 127 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
128 128 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
129 129 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
130 130 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
131 131 00b0: 96 b0 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
132 132 00c0: 78 ed fc d5 76 f1 36 95 dc 05 07 00 ad 39 5e d3 |x...v.6......9^.|
133 133
134 134 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
135 135 $ hg clone -U http://localhost:$HGPORT partial-bundle
136 136 applying clone bundle from http://localhost:$HGPORT1/partial.hg
137 137 adding changesets
138 138 adding manifests
139 139 adding file changes
140 140 added 1 changesets with 1 changes to 1 files
141 141 finished applying clone bundle
142 142 searching for changes
143 143 adding changesets
144 144 adding manifests
145 145 adding file changes
146 146 added 1 changesets with 1 changes to 1 files
147 147
148 148 Bundle with full content works
149 149
150 150 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
151 151 2 changesets found
152 152
153 153 Again, we perform an extra check against bundle content changes. If this content
154 154 changes, clone bundles produced by new Mercurial versions may not be readable
155 155 by old clients.
156 156
157 157 $ f --size --hexdump full.hg
158 158 full.hg: size=408
159 159 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
160 160 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 90 e5 76 f6 70 |ion=GZx.c``..v.p|
161 161 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 06 76 a6 b2 |.swu....`..F.v..|
162 162 0030: d4 a2 e2 cc fc 3c 03 23 06 06 e6 7d 40 b1 4d c1 |.....<.#...}@.M.|
163 163 0040: 2a 31 09 cf 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 |*1...:R.........|
164 164 0050: 97 17 b2 c9 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 |...........%....|
165 165 0060: a4 a4 1a 5b 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 |...[X..'..Y..Y..|
166 166 0070: a4 59 26 5a 18 9a 18 59 5a 26 1a 27 27 25 99 a6 |.Y&Z...YZ&.''%..|
167 167 0080: 99 1a 70 95 a4 16 97 70 19 28 18 70 a5 e5 e7 73 |..p....p.(.p...s|
168 168 0090: 71 25 a6 a4 28 00 19 40 13 0e ac fa df ab ff 7b |q%..(..@.......{|
169 169 00a0: 3f fb 92 dc 8b 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 |?.....b......=ZD|
170 170 00b0: ac 2f b0 a9 c3 66 1e 54 b9 26 08 a7 1a 1b 1a a7 |./...f.T.&......|
171 171 00c0: 25 1b 9a 1b 99 19 9a 5a 18 9b a6 18 19 00 dd 67 |%......Z.......g|
172 172 00d0: 61 61 98 06 f4 80 49 4a 8a 65 52 92 41 9a 81 81 |aa....IJ.eR.A...|
173 173 00e0: a5 11 17 50 31 30 58 19 cc 80 98 25 29 b1 08 c4 |...P10X....%)...|
174 174 00f0: 37 07 79 19 88 d9 41 ee 07 8a 41 cd 5d 98 65 fb |7.y...A...A.].e.|
175 175 0100: e5 9e 45 bf 8d 7f 9f c6 97 9f 2b 44 34 67 d9 ec |..E.......+D4g..|
176 176 0110: 8e 0f a0 61 a8 eb 82 82 2e c9 c2 20 25 d5 34 c5 |...a....... %.4.|
177 177 0120: d0 d8 c2 dc d4 c2 d4 c4 30 d9 34 cd c0 d4 c8 cc |........0.4.....|
178 178 0130: 34 31 c5 d0 c4 24 31 c9 32 2d d1 c2 2c c5 30 25 |41...$1.2-..,.0%|
179 179 0140: 09 e4 ee 85 8f 85 ff 88 ab 89 36 c7 2a c4 47 34 |..........6.*.G4|
180 180 0150: fe f8 ec 7b 73 37 3f c3 24 62 1d 8d 4d 1d 9e 40 |...{s7?.$b..M..@|
181 181 0160: 06 3b 10 14 36 a4 38 10 04 d8 21 01 5a b2 83 f7 |.;..6.8...!.Z...|
182 182 0170: e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a 78 ed fc d5 |.E..V....R..x...|
183 183 0180: 76 f1 36 25 81 49 c0 ad 30 c0 0e 49 8f 54 b7 9e |v.6%.I..0..I.T..|
184 184 0190: d4 1c 09 00 bb 8d f0 bd |........|
185 185
186 186 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
187 187 $ hg clone -U http://localhost:$HGPORT full-bundle
188 188 applying clone bundle from http://localhost:$HGPORT1/full.hg
189 189 adding changesets
190 190 adding manifests
191 191 adding file changes
192 192 added 2 changesets with 2 changes to 2 files
193 193 finished applying clone bundle
194 194 searching for changes
195 195 no changes found
196 196
197 197 Entry with unknown BUNDLESPEC is filtered and not used
198 198
199 199 $ cat > server/.hg/clonebundles.manifest << EOF
200 200 > http://bad.entry1 BUNDLESPEC=UNKNOWN
201 201 > http://bad.entry2 BUNDLESPEC=xz-v1
202 202 > http://bad.entry3 BUNDLESPEC=none-v100
203 203 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
204 204 > EOF
205 205
206 206 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
207 207 applying clone bundle from http://localhost:$HGPORT1/full.hg
208 208 adding changesets
209 209 adding manifests
210 210 adding file changes
211 211 added 2 changesets with 2 changes to 2 files
212 212 finished applying clone bundle
213 213 searching for changes
214 214 no changes found
215 215
216 216 Automatic fallback when all entries are filtered
217 217
218 218 $ cat > server/.hg/clonebundles.manifest << EOF
219 219 > http://bad.entry BUNDLESPEC=UNKNOWN
220 220 > EOF
221 221
222 222 $ hg clone -U http://localhost:$HGPORT filter-all
223 223 no compatible clone bundles available on server; falling back to regular clone
224 224 (you may want to report this to the server operator)
225 225 requesting all changes
226 226 adding changesets
227 227 adding manifests
228 228 adding file changes
229 229 added 2 changesets with 2 changes to 2 files
230
231 URLs requiring SNI are filtered in Python <2.7.9
232
233 $ cp full.hg sni.hg
234 $ cat > server/.hg/clonebundles.manifest << EOF
235 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
236 > http://localhost:$HGPORT1/full.hg
237 > EOF
238
239 #if sslcontext
240 Python 2.7.9+ support SNI
241
242 $ hg clone -U http://localhost:$HGPORT sni-supported
243 applying clone bundle from http://localhost:$HGPORT1/sni.hg
244 adding changesets
245 adding manifests
246 adding file changes
247 added 2 changesets with 2 changes to 2 files
248 finished applying clone bundle
249 searching for changes
250 no changes found
251 #else
252 Python <2.7.9 will filter SNI URLs
253
254 $ hg clone -U http://localhost:$HGPORT sni-unsupported
255 applying clone bundle from http://localhost:$HGPORT1/full.hg
256 adding changesets
257 adding manifests
258 adding file changes
259 added 2 changesets with 2 changes to 2 files
260 finished applying clone bundle
261 searching for changes
262 no changes found
263 #endif
General Comments 0
You need to be logged in to leave comments. Login now