##// END OF EJS Templates
bundle2: add an 'idx' argument to the 'b2partsgenerator'...
Pierre-Yves David -
r24731:88a36ede default
parent child Browse files
Show More
@@ -1,1294 +1,1297 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version.startswith('2'):
36 36 return bundle2.getunbundler(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('obsmarkers', data=stream)
53 53 return None
54 54
55 55 def _canusebundle2(op):
56 56 """return true if a pull/push can use bundle2
57 57
58 58 Feel free to nuke this function when we drop the experimental option"""
59 59 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
60 60 and op.remote.capable('bundle2'))
61 61
62 62
63 63 class pushoperation(object):
64 64 """A object that represent a single push operation
65 65
66 66 It purpose is to carry push related state and very common operation.
67 67
68 68 A new should be created at the beginning of each push and discarded
69 69 afterward.
70 70 """
71 71
72 72 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
73 73 bookmarks=()):
74 74 # repo we push from
75 75 self.repo = repo
76 76 self.ui = repo.ui
77 77 # repo we push to
78 78 self.remote = remote
79 79 # force option provided
80 80 self.force = force
81 81 # revs to be pushed (None is "all")
82 82 self.revs = revs
83 83 # bookmark explicitly pushed
84 84 self.bookmarks = bookmarks
85 85 # allow push of new branch
86 86 self.newbranch = newbranch
87 87 # did a local lock get acquired?
88 88 self.locallocked = None
89 89 # step already performed
90 90 # (used to check what steps have been already performed through bundle2)
91 91 self.stepsdone = set()
92 92 # Integer version of the changegroup push result
93 93 # - None means nothing to push
94 94 # - 0 means HTTP error
95 95 # - 1 means we pushed and remote head count is unchanged *or*
96 96 # we have outgoing changesets but refused to push
97 97 # - other values as described by addchangegroup()
98 98 self.cgresult = None
99 99 # Boolean value for the bookmark push
100 100 self.bkresult = None
101 101 # discover.outgoing object (contains common and outgoing data)
102 102 self.outgoing = None
103 103 # all remote heads before the push
104 104 self.remoteheads = None
105 105 # testable as a boolean indicating if any nodes are missing locally.
106 106 self.incoming = None
107 107 # phases changes that must be pushed along side the changesets
108 108 self.outdatedphases = None
109 109 # phases changes that must be pushed if changeset push fails
110 110 self.fallbackoutdatedphases = None
111 111 # outgoing obsmarkers
112 112 self.outobsmarkers = set()
113 113 # outgoing bookmarks
114 114 self.outbookmarks = []
115 115 # transaction manager
116 116 self.trmanager = None
117 117
118 118 @util.propertycache
119 119 def futureheads(self):
120 120 """future remote heads if the changeset push succeeds"""
121 121 return self.outgoing.missingheads
122 122
123 123 @util.propertycache
124 124 def fallbackheads(self):
125 125 """future remote heads if the changeset push fails"""
126 126 if self.revs is None:
127 127 # not target to push, all common are relevant
128 128 return self.outgoing.commonheads
129 129 unfi = self.repo.unfiltered()
130 130 # I want cheads = heads(::missingheads and ::commonheads)
131 131 # (missingheads is revs with secret changeset filtered out)
132 132 #
133 133 # This can be expressed as:
134 134 # cheads = ( (missingheads and ::commonheads)
135 135 # + (commonheads and ::missingheads))"
136 136 # )
137 137 #
138 138 # while trying to push we already computed the following:
139 139 # common = (::commonheads)
140 140 # missing = ((commonheads::missingheads) - commonheads)
141 141 #
142 142 # We can pick:
143 143 # * missingheads part of common (::commonheads)
144 144 common = set(self.outgoing.common)
145 145 nm = self.repo.changelog.nodemap
146 146 cheads = [node for node in self.revs if nm[node] in common]
147 147 # and
148 148 # * commonheads parents on missing
149 149 revset = unfi.set('%ln and parents(roots(%ln))',
150 150 self.outgoing.commonheads,
151 151 self.outgoing.missing)
152 152 cheads.extend(c.node() for c in revset)
153 153 return cheads
154 154
155 155 @property
156 156 def commonheads(self):
157 157 """set of all common heads after changeset bundle push"""
158 158 if self.cgresult:
159 159 return self.futureheads
160 160 else:
161 161 return self.fallbackheads
162 162
163 163 # mapping of message used when pushing bookmark
164 164 bookmsgmap = {'update': (_("updating bookmark %s\n"),
165 165 _('updating bookmark %s failed!\n')),
166 166 'export': (_("exporting bookmark %s\n"),
167 167 _('exporting bookmark %s failed!\n')),
168 168 'delete': (_("deleting remote bookmark %s\n"),
169 169 _('deleting remote bookmark %s failed!\n')),
170 170 }
171 171
172 172
173 173 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
174 174 '''Push outgoing changesets (limited by revs) from a local
175 175 repository to remote. Return an integer:
176 176 - None means nothing to push
177 177 - 0 means HTTP error
178 178 - 1 means we pushed and remote head count is unchanged *or*
179 179 we have outgoing changesets but refused to push
180 180 - other values as described by addchangegroup()
181 181 '''
182 182 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
183 183 if pushop.remote.local():
184 184 missing = (set(pushop.repo.requirements)
185 185 - pushop.remote.local().supported)
186 186 if missing:
187 187 msg = _("required features are not"
188 188 " supported in the destination:"
189 189 " %s") % (', '.join(sorted(missing)))
190 190 raise util.Abort(msg)
191 191
192 192 # there are two ways to push to remote repo:
193 193 #
194 194 # addchangegroup assumes local user can lock remote
195 195 # repo (local filesystem, old ssh servers).
196 196 #
197 197 # unbundle assumes local user cannot lock remote repo (new ssh
198 198 # servers, http servers).
199 199
200 200 if not pushop.remote.canpush():
201 201 raise util.Abort(_("destination does not support push"))
202 202 # get local lock as we might write phase data
203 203 locallock = None
204 204 try:
205 205 locallock = pushop.repo.lock()
206 206 pushop.locallocked = True
207 207 except IOError, err:
208 208 pushop.locallocked = False
209 209 if err.errno != errno.EACCES:
210 210 raise
211 211 # source repo cannot be locked.
212 212 # We do not abort the push, but just disable the local phase
213 213 # synchronisation.
214 214 msg = 'cannot lock source repository: %s\n' % err
215 215 pushop.ui.debug(msg)
216 216 try:
217 217 if pushop.locallocked:
218 218 pushop.trmanager = transactionmanager(repo,
219 219 'push-response',
220 220 pushop.remote.url())
221 221 pushop.repo.checkpush(pushop)
222 222 lock = None
223 223 unbundle = pushop.remote.capable('unbundle')
224 224 if not unbundle:
225 225 lock = pushop.remote.lock()
226 226 try:
227 227 _pushdiscovery(pushop)
228 228 if _canusebundle2(pushop):
229 229 _pushbundle2(pushop)
230 230 _pushchangeset(pushop)
231 231 _pushsyncphase(pushop)
232 232 _pushobsolete(pushop)
233 233 _pushbookmark(pushop)
234 234 finally:
235 235 if lock is not None:
236 236 lock.release()
237 237 if pushop.trmanager:
238 238 pushop.trmanager.close()
239 239 finally:
240 240 if pushop.trmanager:
241 241 pushop.trmanager.release()
242 242 if locallock is not None:
243 243 locallock.release()
244 244
245 245 return pushop
246 246
247 247 # list of steps to perform discovery before push
248 248 pushdiscoveryorder = []
249 249
250 250 # Mapping between step name and function
251 251 #
252 252 # This exists to help extensions wrap steps if necessary
253 253 pushdiscoverymapping = {}
254 254
255 255 def pushdiscovery(stepname):
256 256 """decorator for function performing discovery before push
257 257
258 258 The function is added to the step -> function mapping and appended to the
259 259 list of steps. Beware that decorated function will be added in order (this
260 260 may matter).
261 261
262 262 You can only use this decorator for a new step, if you want to wrap a step
263 263 from an extension, change the pushdiscovery dictionary directly."""
264 264 def dec(func):
265 265 assert stepname not in pushdiscoverymapping
266 266 pushdiscoverymapping[stepname] = func
267 267 pushdiscoveryorder.append(stepname)
268 268 return func
269 269 return dec
270 270
271 271 def _pushdiscovery(pushop):
272 272 """Run all discovery steps"""
273 273 for stepname in pushdiscoveryorder:
274 274 step = pushdiscoverymapping[stepname]
275 275 step(pushop)
276 276
277 277 @pushdiscovery('changeset')
278 278 def _pushdiscoverychangeset(pushop):
279 279 """discover the changeset that need to be pushed"""
280 280 fci = discovery.findcommonincoming
281 281 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
282 282 common, inc, remoteheads = commoninc
283 283 fco = discovery.findcommonoutgoing
284 284 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
285 285 commoninc=commoninc, force=pushop.force)
286 286 pushop.outgoing = outgoing
287 287 pushop.remoteheads = remoteheads
288 288 pushop.incoming = inc
289 289
290 290 @pushdiscovery('phase')
291 291 def _pushdiscoveryphase(pushop):
292 292 """discover the phase that needs to be pushed
293 293
294 294 (computed for both success and failure case for changesets push)"""
295 295 outgoing = pushop.outgoing
296 296 unfi = pushop.repo.unfiltered()
297 297 remotephases = pushop.remote.listkeys('phases')
298 298 publishing = remotephases.get('publishing', False)
299 299 ana = phases.analyzeremotephases(pushop.repo,
300 300 pushop.fallbackheads,
301 301 remotephases)
302 302 pheads, droots = ana
303 303 extracond = ''
304 304 if not publishing:
305 305 extracond = ' and public()'
306 306 revset = 'heads((%%ln::%%ln) %s)' % extracond
307 307 # Get the list of all revs draft on remote by public here.
308 308 # XXX Beware that revset break if droots is not strictly
309 309 # XXX root we may want to ensure it is but it is costly
310 310 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
311 311 if not outgoing.missing:
312 312 future = fallback
313 313 else:
314 314 # adds changeset we are going to push as draft
315 315 #
316 316 # should not be necessary for publishing server, but because of an
317 317 # issue fixed in xxxxx we have to do it anyway.
318 318 fdroots = list(unfi.set('roots(%ln + %ln::)',
319 319 outgoing.missing, droots))
320 320 fdroots = [f.node() for f in fdroots]
321 321 future = list(unfi.set(revset, fdroots, pushop.futureheads))
322 322 pushop.outdatedphases = future
323 323 pushop.fallbackoutdatedphases = fallback
324 324
325 325 @pushdiscovery('obsmarker')
326 326 def _pushdiscoveryobsmarkers(pushop):
327 327 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
328 328 and pushop.repo.obsstore
329 329 and 'obsolete' in pushop.remote.listkeys('namespaces')):
330 330 repo = pushop.repo
331 331 # very naive computation, that can be quite expensive on big repo.
332 332 # However: evolution is currently slow on them anyway.
333 333 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
334 334 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
335 335
336 336 @pushdiscovery('bookmarks')
337 337 def _pushdiscoverybookmarks(pushop):
338 338 ui = pushop.ui
339 339 repo = pushop.repo.unfiltered()
340 340 remote = pushop.remote
341 341 ui.debug("checking for updated bookmarks\n")
342 342 ancestors = ()
343 343 if pushop.revs:
344 344 revnums = map(repo.changelog.rev, pushop.revs)
345 345 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
346 346 remotebookmark = remote.listkeys('bookmarks')
347 347
348 348 explicit = set(pushop.bookmarks)
349 349
350 350 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
351 351 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
352 352 for b, scid, dcid in advsrc:
353 353 if b in explicit:
354 354 explicit.remove(b)
355 355 if not ancestors or repo[scid].rev() in ancestors:
356 356 pushop.outbookmarks.append((b, dcid, scid))
357 357 # search added bookmark
358 358 for b, scid, dcid in addsrc:
359 359 if b in explicit:
360 360 explicit.remove(b)
361 361 pushop.outbookmarks.append((b, '', scid))
362 362 # search for overwritten bookmark
363 363 for b, scid, dcid in advdst + diverge + differ:
364 364 if b in explicit:
365 365 explicit.remove(b)
366 366 pushop.outbookmarks.append((b, dcid, scid))
367 367 # search for bookmark to delete
368 368 for b, scid, dcid in adddst:
369 369 if b in explicit:
370 370 explicit.remove(b)
371 371 # treat as "deleted locally"
372 372 pushop.outbookmarks.append((b, dcid, ''))
373 373 # identical bookmarks shouldn't get reported
374 374 for b, scid, dcid in same:
375 375 if b in explicit:
376 376 explicit.remove(b)
377 377
378 378 if explicit:
379 379 explicit = sorted(explicit)
380 380 # we should probably list all of them
381 381 ui.warn(_('bookmark %s does not exist on the local '
382 382 'or remote repository!\n') % explicit[0])
383 383 pushop.bkresult = 2
384 384
385 385 pushop.outbookmarks.sort()
386 386
387 387 def _pushcheckoutgoing(pushop):
388 388 outgoing = pushop.outgoing
389 389 unfi = pushop.repo.unfiltered()
390 390 if not outgoing.missing:
391 391 # nothing to push
392 392 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
393 393 return False
394 394 # something to push
395 395 if not pushop.force:
396 396 # if repo.obsstore == False --> no obsolete
397 397 # then, save the iteration
398 398 if unfi.obsstore:
399 399 # this message are here for 80 char limit reason
400 400 mso = _("push includes obsolete changeset: %s!")
401 401 mst = {"unstable": _("push includes unstable changeset: %s!"),
402 402 "bumped": _("push includes bumped changeset: %s!"),
403 403 "divergent": _("push includes divergent changeset: %s!")}
404 404 # If we are to push if there is at least one
405 405 # obsolete or unstable changeset in missing, at
406 406 # least one of the missinghead will be obsolete or
407 407 # unstable. So checking heads only is ok
408 408 for node in outgoing.missingheads:
409 409 ctx = unfi[node]
410 410 if ctx.obsolete():
411 411 raise util.Abort(mso % ctx)
412 412 elif ctx.troubled():
413 413 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
414 414 newbm = pushop.ui.configlist('bookmarks', 'pushing')
415 415 discovery.checkheads(unfi, pushop.remote, outgoing,
416 416 pushop.remoteheads,
417 417 pushop.newbranch,
418 418 bool(pushop.incoming),
419 419 newbm)
420 420 return True
421 421
422 422 # List of names of steps to perform for an outgoing bundle2, order matters.
423 423 b2partsgenorder = []
424 424
425 425 # Mapping between step name and function
426 426 #
427 427 # This exists to help extensions wrap steps if necessary
428 428 b2partsgenmapping = {}
429 429
430 def b2partsgenerator(stepname):
430 def b2partsgenerator(stepname, idx=None):
431 431 """decorator for function generating bundle2 part
432 432
433 433 The function is added to the step -> function mapping and appended to the
434 434 list of steps. Beware that decorated functions will be added in order
435 435 (this may matter).
436 436
437 437 You can only use this decorator for new steps, if you want to wrap a step
438 438 from an extension, attack the b2partsgenmapping dictionary directly."""
439 439 def dec(func):
440 440 assert stepname not in b2partsgenmapping
441 441 b2partsgenmapping[stepname] = func
442 b2partsgenorder.append(stepname)
442 if idx is None:
443 b2partsgenorder.append(stepname)
444 else:
445 b2partsgenorder.insert(idx, stepname)
443 446 return func
444 447 return dec
445 448
446 449 @b2partsgenerator('changeset')
447 450 def _pushb2ctx(pushop, bundler):
448 451 """handle changegroup push through bundle2
449 452
450 453 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
451 454 """
452 455 if 'changesets' in pushop.stepsdone:
453 456 return
454 457 pushop.stepsdone.add('changesets')
455 458 # Send known heads to the server for race detection.
456 459 if not _pushcheckoutgoing(pushop):
457 460 return
458 461 pushop.repo.prepushoutgoinghooks(pushop.repo,
459 462 pushop.remote,
460 463 pushop.outgoing)
461 464 if not pushop.force:
462 465 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
463 466 b2caps = bundle2.bundle2caps(pushop.remote)
464 467 version = None
465 468 cgversions = b2caps.get('changegroup')
466 469 if not cgversions: # 3.1 and 3.2 ship with an empty value
467 470 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
468 471 pushop.outgoing)
469 472 else:
470 473 cgversions = [v for v in cgversions if v in changegroup.packermap]
471 474 if not cgversions:
472 475 raise ValueError(_('no common changegroup version'))
473 476 version = max(cgversions)
474 477 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
475 478 pushop.outgoing,
476 479 version=version)
477 480 cgpart = bundler.newpart('changegroup', data=cg)
478 481 if version is not None:
479 482 cgpart.addparam('version', version)
480 483 def handlereply(op):
481 484 """extract addchangegroup returns from server reply"""
482 485 cgreplies = op.records.getreplies(cgpart.id)
483 486 assert len(cgreplies['changegroup']) == 1
484 487 pushop.cgresult = cgreplies['changegroup'][0]['return']
485 488 return handlereply
486 489
487 490 @b2partsgenerator('phase')
488 491 def _pushb2phases(pushop, bundler):
489 492 """handle phase push through bundle2"""
490 493 if 'phases' in pushop.stepsdone:
491 494 return
492 495 b2caps = bundle2.bundle2caps(pushop.remote)
493 496 if not 'pushkey' in b2caps:
494 497 return
495 498 pushop.stepsdone.add('phases')
496 499 part2node = []
497 500 enc = pushkey.encode
498 501 for newremotehead in pushop.outdatedphases:
499 502 part = bundler.newpart('pushkey')
500 503 part.addparam('namespace', enc('phases'))
501 504 part.addparam('key', enc(newremotehead.hex()))
502 505 part.addparam('old', enc(str(phases.draft)))
503 506 part.addparam('new', enc(str(phases.public)))
504 507 part2node.append((part.id, newremotehead))
505 508 def handlereply(op):
506 509 for partid, node in part2node:
507 510 partrep = op.records.getreplies(partid)
508 511 results = partrep['pushkey']
509 512 assert len(results) <= 1
510 513 msg = None
511 514 if not results:
512 515 msg = _('server ignored update of %s to public!\n') % node
513 516 elif not int(results[0]['return']):
514 517 msg = _('updating %s to public failed!\n') % node
515 518 if msg is not None:
516 519 pushop.ui.warn(msg)
517 520 return handlereply
518 521
519 522 @b2partsgenerator('obsmarkers')
520 523 def _pushb2obsmarkers(pushop, bundler):
521 524 if 'obsmarkers' in pushop.stepsdone:
522 525 return
523 526 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
524 527 if obsolete.commonversion(remoteversions) is None:
525 528 return
526 529 pushop.stepsdone.add('obsmarkers')
527 530 if pushop.outobsmarkers:
528 531 buildobsmarkerspart(bundler, pushop.outobsmarkers)
529 532
530 533 @b2partsgenerator('bookmarks')
531 534 def _pushb2bookmarks(pushop, bundler):
532 535 """handle phase push through bundle2"""
533 536 if 'bookmarks' in pushop.stepsdone:
534 537 return
535 538 b2caps = bundle2.bundle2caps(pushop.remote)
536 539 if 'pushkey' not in b2caps:
537 540 return
538 541 pushop.stepsdone.add('bookmarks')
539 542 part2book = []
540 543 enc = pushkey.encode
541 544 for book, old, new in pushop.outbookmarks:
542 545 part = bundler.newpart('pushkey')
543 546 part.addparam('namespace', enc('bookmarks'))
544 547 part.addparam('key', enc(book))
545 548 part.addparam('old', enc(old))
546 549 part.addparam('new', enc(new))
547 550 action = 'update'
548 551 if not old:
549 552 action = 'export'
550 553 elif not new:
551 554 action = 'delete'
552 555 part2book.append((part.id, book, action))
553 556
554 557
555 558 def handlereply(op):
556 559 ui = pushop.ui
557 560 for partid, book, action in part2book:
558 561 partrep = op.records.getreplies(partid)
559 562 results = partrep['pushkey']
560 563 assert len(results) <= 1
561 564 if not results:
562 565 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
563 566 else:
564 567 ret = int(results[0]['return'])
565 568 if ret:
566 569 ui.status(bookmsgmap[action][0] % book)
567 570 else:
568 571 ui.warn(bookmsgmap[action][1] % book)
569 572 if pushop.bkresult is not None:
570 573 pushop.bkresult = 1
571 574 return handlereply
572 575
573 576
574 577 def _pushbundle2(pushop):
575 578 """push data to the remote using bundle2
576 579
577 580 The only currently supported type of data is changegroup but this will
578 581 evolve in the future."""
579 582 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
580 583 pushback = (pushop.trmanager
581 584 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
582 585
583 586 # create reply capability
584 587 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
585 588 allowpushback=pushback))
586 589 bundler.newpart('replycaps', data=capsblob)
587 590 replyhandlers = []
588 591 for partgenname in b2partsgenorder:
589 592 partgen = b2partsgenmapping[partgenname]
590 593 ret = partgen(pushop, bundler)
591 594 if callable(ret):
592 595 replyhandlers.append(ret)
593 596 # do not push if nothing to push
594 597 if bundler.nbparts <= 1:
595 598 return
596 599 stream = util.chunkbuffer(bundler.getchunks())
597 600 try:
598 601 reply = pushop.remote.unbundle(stream, ['force'], 'push')
599 602 except error.BundleValueError, exc:
600 603 raise util.Abort('missing support for %s' % exc)
601 604 try:
602 605 trgetter = None
603 606 if pushback:
604 607 trgetter = pushop.trmanager.transaction
605 608 op = bundle2.processbundle(pushop.repo, reply, trgetter)
606 609 except error.BundleValueError, exc:
607 610 raise util.Abort('missing support for %s' % exc)
608 611 for rephand in replyhandlers:
609 612 rephand(op)
610 613
611 614 def _pushchangeset(pushop):
612 615 """Make the actual push of changeset bundle to remote repo"""
613 616 if 'changesets' in pushop.stepsdone:
614 617 return
615 618 pushop.stepsdone.add('changesets')
616 619 if not _pushcheckoutgoing(pushop):
617 620 return
618 621 pushop.repo.prepushoutgoinghooks(pushop.repo,
619 622 pushop.remote,
620 623 pushop.outgoing)
621 624 outgoing = pushop.outgoing
622 625 unbundle = pushop.remote.capable('unbundle')
623 626 # TODO: get bundlecaps from remote
624 627 bundlecaps = None
625 628 # create a changegroup from local
626 629 if pushop.revs is None and not (outgoing.excluded
627 630 or pushop.repo.changelog.filteredrevs):
628 631 # push everything,
629 632 # use the fast path, no race possible on push
630 633 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
631 634 cg = changegroup.getsubset(pushop.repo,
632 635 outgoing,
633 636 bundler,
634 637 'push',
635 638 fastpath=True)
636 639 else:
637 640 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
638 641 bundlecaps)
639 642
640 643 # apply changegroup to remote
641 644 if unbundle:
642 645 # local repo finds heads on server, finds out what
643 646 # revs it must push. once revs transferred, if server
644 647 # finds it has different heads (someone else won
645 648 # commit/push race), server aborts.
646 649 if pushop.force:
647 650 remoteheads = ['force']
648 651 else:
649 652 remoteheads = pushop.remoteheads
650 653 # ssh: return remote's addchangegroup()
651 654 # http: return remote's addchangegroup() or 0 for error
652 655 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
653 656 pushop.repo.url())
654 657 else:
655 658 # we return an integer indicating remote head count
656 659 # change
657 660 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
658 661 pushop.repo.url())
659 662
660 663 def _pushsyncphase(pushop):
661 664 """synchronise phase information locally and remotely"""
662 665 cheads = pushop.commonheads
663 666 # even when we don't push, exchanging phase data is useful
664 667 remotephases = pushop.remote.listkeys('phases')
665 668 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
666 669 and remotephases # server supports phases
667 670 and pushop.cgresult is None # nothing was pushed
668 671 and remotephases.get('publishing', False)):
669 672 # When:
670 673 # - this is a subrepo push
671 674 # - and remote support phase
672 675 # - and no changeset was pushed
673 676 # - and remote is publishing
674 677 # We may be in issue 3871 case!
675 678 # We drop the possible phase synchronisation done by
676 679 # courtesy to publish changesets possibly locally draft
677 680 # on the remote.
678 681 remotephases = {'publishing': 'True'}
679 682 if not remotephases: # old server or public only reply from non-publishing
680 683 _localphasemove(pushop, cheads)
681 684 # don't push any phase data as there is nothing to push
682 685 else:
683 686 ana = phases.analyzeremotephases(pushop.repo, cheads,
684 687 remotephases)
685 688 pheads, droots = ana
686 689 ### Apply remote phase on local
687 690 if remotephases.get('publishing', False):
688 691 _localphasemove(pushop, cheads)
689 692 else: # publish = False
690 693 _localphasemove(pushop, pheads)
691 694 _localphasemove(pushop, cheads, phases.draft)
692 695 ### Apply local phase on remote
693 696
694 697 if pushop.cgresult:
695 698 if 'phases' in pushop.stepsdone:
696 699 # phases already pushed though bundle2
697 700 return
698 701 outdated = pushop.outdatedphases
699 702 else:
700 703 outdated = pushop.fallbackoutdatedphases
701 704
702 705 pushop.stepsdone.add('phases')
703 706
704 707 # filter heads already turned public by the push
705 708 outdated = [c for c in outdated if c.node() not in pheads]
706 709 # fallback to independent pushkey command
707 710 for newremotehead in outdated:
708 711 r = pushop.remote.pushkey('phases',
709 712 newremotehead.hex(),
710 713 str(phases.draft),
711 714 str(phases.public))
712 715 if not r:
713 716 pushop.ui.warn(_('updating %s to public failed!\n')
714 717 % newremotehead)
715 718
716 719 def _localphasemove(pushop, nodes, phase=phases.public):
717 720 """move <nodes> to <phase> in the local source repo"""
718 721 if pushop.trmanager:
719 722 phases.advanceboundary(pushop.repo,
720 723 pushop.trmanager.transaction(),
721 724 phase,
722 725 nodes)
723 726 else:
724 727 # repo is not locked, do not change any phases!
725 728 # Informs the user that phases should have been moved when
726 729 # applicable.
727 730 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
728 731 phasestr = phases.phasenames[phase]
729 732 if actualmoves:
730 733 pushop.ui.status(_('cannot lock source repo, skipping '
731 734 'local %s phase update\n') % phasestr)
732 735
733 736 def _pushobsolete(pushop):
734 737 """utility function to push obsolete markers to a remote"""
735 738 if 'obsmarkers' in pushop.stepsdone:
736 739 return
737 740 pushop.ui.debug('try to push obsolete markers to remote\n')
738 741 repo = pushop.repo
739 742 remote = pushop.remote
740 743 pushop.stepsdone.add('obsmarkers')
741 744 if pushop.outobsmarkers:
742 745 rslts = []
743 746 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
744 747 for key in sorted(remotedata, reverse=True):
745 748 # reverse sort to ensure we end with dump0
746 749 data = remotedata[key]
747 750 rslts.append(remote.pushkey('obsolete', key, '', data))
748 751 if [r for r in rslts if not r]:
749 752 msg = _('failed to push some obsolete markers!\n')
750 753 repo.ui.warn(msg)
751 754
752 755 def _pushbookmark(pushop):
753 756 """Update bookmark position on remote"""
754 757 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
755 758 return
756 759 pushop.stepsdone.add('bookmarks')
757 760 ui = pushop.ui
758 761 remote = pushop.remote
759 762
760 763 for b, old, new in pushop.outbookmarks:
761 764 action = 'update'
762 765 if not old:
763 766 action = 'export'
764 767 elif not new:
765 768 action = 'delete'
766 769 if remote.pushkey('bookmarks', b, old, new):
767 770 ui.status(bookmsgmap[action][0] % b)
768 771 else:
769 772 ui.warn(bookmsgmap[action][1] % b)
770 773 # discovery can have set the value form invalid entry
771 774 if pushop.bkresult is not None:
772 775 pushop.bkresult = 1
773 776
774 777 class pulloperation(object):
775 778 """A object that represent a single pull operation
776 779
777 780 It purpose is to carry pull related state and very common operation.
778 781
779 782 A new should be created at the beginning of each pull and discarded
780 783 afterward.
781 784 """
782 785
783 786 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
784 787 # repo we pull into
785 788 self.repo = repo
786 789 # repo we pull from
787 790 self.remote = remote
788 791 # revision we try to pull (None is "all")
789 792 self.heads = heads
790 793 # bookmark pulled explicitly
791 794 self.explicitbookmarks = bookmarks
792 795 # do we force pull?
793 796 self.force = force
794 797 # transaction manager
795 798 self.trmanager = None
796 799 # set of common changeset between local and remote before pull
797 800 self.common = None
798 801 # set of pulled head
799 802 self.rheads = None
800 803 # list of missing changeset to fetch remotely
801 804 self.fetch = None
802 805 # remote bookmarks data
803 806 self.remotebookmarks = None
804 807 # result of changegroup pulling (used as return code by pull)
805 808 self.cgresult = None
806 809 # list of step already done
807 810 self.stepsdone = set()
808 811
809 812 @util.propertycache
810 813 def pulledsubset(self):
811 814 """heads of the set of changeset target by the pull"""
812 815 # compute target subset
813 816 if self.heads is None:
814 817 # We pulled every thing possible
815 818 # sync on everything common
816 819 c = set(self.common)
817 820 ret = list(self.common)
818 821 for n in self.rheads:
819 822 if n not in c:
820 823 ret.append(n)
821 824 return ret
822 825 else:
823 826 # We pulled a specific subset
824 827 # sync on this subset
825 828 return self.heads
826 829
827 830 def gettransaction(self):
828 831 # deprecated; talk to trmanager directly
829 832 return self.trmanager.transaction()
830 833
831 834 class transactionmanager(object):
832 835 """An object to manage the life cycle of a transaction
833 836
834 837 It creates the transaction on demand and calls the appropriate hooks when
835 838 closing the transaction."""
836 839 def __init__(self, repo, source, url):
837 840 self.repo = repo
838 841 self.source = source
839 842 self.url = url
840 843 self._tr = None
841 844
842 845 def transaction(self):
843 846 """Return an open transaction object, constructing if necessary"""
844 847 if not self._tr:
845 848 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
846 849 self._tr = self.repo.transaction(trname)
847 850 self._tr.hookargs['source'] = self.source
848 851 self._tr.hookargs['url'] = self.url
849 852 return self._tr
850 853
851 854 def close(self):
852 855 """close transaction if created"""
853 856 if self._tr is not None:
854 857 self._tr.close()
855 858
856 859 def release(self):
857 860 """release transaction if created"""
858 861 if self._tr is not None:
859 862 self._tr.release()
860 863
861 864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
862 865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
863 866 if pullop.remote.local():
864 867 missing = set(pullop.remote.requirements) - pullop.repo.supported
865 868 if missing:
866 869 msg = _("required features are not"
867 870 " supported in the destination:"
868 871 " %s") % (', '.join(sorted(missing)))
869 872 raise util.Abort(msg)
870 873
871 874 pullop.remotebookmarks = remote.listkeys('bookmarks')
872 875 lock = pullop.repo.lock()
873 876 try:
874 877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
875 878 _pulldiscovery(pullop)
876 879 if _canusebundle2(pullop):
877 880 _pullbundle2(pullop)
878 881 _pullchangeset(pullop)
879 882 _pullphase(pullop)
880 883 _pullbookmarks(pullop)
881 884 _pullobsolete(pullop)
882 885 pullop.trmanager.close()
883 886 finally:
884 887 pullop.trmanager.release()
885 888 lock.release()
886 889
887 890 return pullop
888 891
889 892 # list of steps to perform discovery before pull
890 893 pulldiscoveryorder = []
891 894
892 895 # Mapping between step name and function
893 896 #
894 897 # This exists to help extensions wrap steps if necessary
895 898 pulldiscoverymapping = {}
896 899
897 900 def pulldiscovery(stepname):
898 901 """decorator for function performing discovery before pull
899 902
900 903 The function is added to the step -> function mapping and appended to the
901 904 list of steps. Beware that decorated function will be added in order (this
902 905 may matter).
903 906
904 907 You can only use this decorator for a new step, if you want to wrap a step
905 908 from an extension, change the pulldiscovery dictionary directly."""
906 909 def dec(func):
907 910 assert stepname not in pulldiscoverymapping
908 911 pulldiscoverymapping[stepname] = func
909 912 pulldiscoveryorder.append(stepname)
910 913 return func
911 914 return dec
912 915
913 916 def _pulldiscovery(pullop):
914 917 """Run all discovery steps"""
915 918 for stepname in pulldiscoveryorder:
916 919 step = pulldiscoverymapping[stepname]
917 920 step(pullop)
918 921
919 922 @pulldiscovery('changegroup')
920 923 def _pulldiscoverychangegroup(pullop):
921 924 """discovery phase for the pull
922 925
923 926 Current handle changeset discovery only, will change handle all discovery
924 927 at some point."""
925 928 tmp = discovery.findcommonincoming(pullop.repo,
926 929 pullop.remote,
927 930 heads=pullop.heads,
928 931 force=pullop.force)
929 932 common, fetch, rheads = tmp
930 933 nm = pullop.repo.unfiltered().changelog.nodemap
931 934 if fetch and rheads:
932 935 # If a remote heads in filtered locally, lets drop it from the unknown
933 936 # remote heads and put in back in common.
934 937 #
935 938 # This is a hackish solution to catch most of "common but locally
936 939 # hidden situation". We do not performs discovery on unfiltered
937 940 # repository because it end up doing a pathological amount of round
938 941 # trip for w huge amount of changeset we do not care about.
939 942 #
940 943 # If a set of such "common but filtered" changeset exist on the server
941 944 # but are not including a remote heads, we'll not be able to detect it,
942 945 scommon = set(common)
943 946 filteredrheads = []
944 947 for n in rheads:
945 948 if n in nm:
946 949 if n not in scommon:
947 950 common.append(n)
948 951 else:
949 952 filteredrheads.append(n)
950 953 if not filteredrheads:
951 954 fetch = []
952 955 rheads = filteredrheads
953 956 pullop.common = common
954 957 pullop.fetch = fetch
955 958 pullop.rheads = rheads
956 959
957 960 def _pullbundle2(pullop):
958 961 """pull data using bundle2
959 962
960 963 For now, the only supported data are changegroup."""
961 964 remotecaps = bundle2.bundle2caps(pullop.remote)
962 965 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
963 966 # pulling changegroup
964 967 pullop.stepsdone.add('changegroup')
965 968
966 969 kwargs['common'] = pullop.common
967 970 kwargs['heads'] = pullop.heads or pullop.rheads
968 971 kwargs['cg'] = pullop.fetch
969 972 if 'listkeys' in remotecaps:
970 973 kwargs['listkeys'] = ['phase', 'bookmarks']
971 974 if not pullop.fetch:
972 975 pullop.repo.ui.status(_("no changes found\n"))
973 976 pullop.cgresult = 0
974 977 else:
975 978 if pullop.heads is None and list(pullop.common) == [nullid]:
976 979 pullop.repo.ui.status(_("requesting all changes\n"))
977 980 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
978 981 remoteversions = bundle2.obsmarkersversion(remotecaps)
979 982 if obsolete.commonversion(remoteversions) is not None:
980 983 kwargs['obsmarkers'] = True
981 984 pullop.stepsdone.add('obsmarkers')
982 985 _pullbundle2extraprepare(pullop, kwargs)
983 986 bundle = pullop.remote.getbundle('pull', **kwargs)
984 987 try:
985 988 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
986 989 except error.BundleValueError, exc:
987 990 raise util.Abort('missing support for %s' % exc)
988 991
989 992 if pullop.fetch:
990 993 results = [cg['return'] for cg in op.records['changegroup']]
991 994 pullop.cgresult = changegroup.combineresults(results)
992 995
993 996 # processing phases change
994 997 for namespace, value in op.records['listkeys']:
995 998 if namespace == 'phases':
996 999 _pullapplyphases(pullop, value)
997 1000
998 1001 # processing bookmark update
999 1002 for namespace, value in op.records['listkeys']:
1000 1003 if namespace == 'bookmarks':
1001 1004 pullop.remotebookmarks = value
1002 1005 _pullbookmarks(pullop)
1003 1006
1004 1007 def _pullbundle2extraprepare(pullop, kwargs):
1005 1008 """hook function so that extensions can extend the getbundle call"""
1006 1009 pass
1007 1010
1008 1011 def _pullchangeset(pullop):
1009 1012 """pull changeset from unbundle into the local repo"""
1010 1013 # We delay the open of the transaction as late as possible so we
1011 1014 # don't open transaction for nothing or you break future useful
1012 1015 # rollback call
1013 1016 if 'changegroup' in pullop.stepsdone:
1014 1017 return
1015 1018 pullop.stepsdone.add('changegroup')
1016 1019 if not pullop.fetch:
1017 1020 pullop.repo.ui.status(_("no changes found\n"))
1018 1021 pullop.cgresult = 0
1019 1022 return
1020 1023 pullop.gettransaction()
1021 1024 if pullop.heads is None and list(pullop.common) == [nullid]:
1022 1025 pullop.repo.ui.status(_("requesting all changes\n"))
1023 1026 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1024 1027 # issue1320, avoid a race if remote changed after discovery
1025 1028 pullop.heads = pullop.rheads
1026 1029
1027 1030 if pullop.remote.capable('getbundle'):
1028 1031 # TODO: get bundlecaps from remote
1029 1032 cg = pullop.remote.getbundle('pull', common=pullop.common,
1030 1033 heads=pullop.heads or pullop.rheads)
1031 1034 elif pullop.heads is None:
1032 1035 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1033 1036 elif not pullop.remote.capable('changegroupsubset'):
1034 1037 raise util.Abort(_("partial pull cannot be done because "
1035 1038 "other repository doesn't support "
1036 1039 "changegroupsubset."))
1037 1040 else:
1038 1041 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1039 1042 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1040 1043 pullop.remote.url())
1041 1044
1042 1045 def _pullphase(pullop):
1043 1046 # Get remote phases data from remote
1044 1047 if 'phases' in pullop.stepsdone:
1045 1048 return
1046 1049 remotephases = pullop.remote.listkeys('phases')
1047 1050 _pullapplyphases(pullop, remotephases)
1048 1051
1049 1052 def _pullapplyphases(pullop, remotephases):
1050 1053 """apply phase movement from observed remote state"""
1051 1054 if 'phases' in pullop.stepsdone:
1052 1055 return
1053 1056 pullop.stepsdone.add('phases')
1054 1057 publishing = bool(remotephases.get('publishing', False))
1055 1058 if remotephases and not publishing:
1056 1059 # remote is new and unpublishing
1057 1060 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1058 1061 pullop.pulledsubset,
1059 1062 remotephases)
1060 1063 dheads = pullop.pulledsubset
1061 1064 else:
1062 1065 # Remote is old or publishing all common changesets
1063 1066 # should be seen as public
1064 1067 pheads = pullop.pulledsubset
1065 1068 dheads = []
1066 1069 unfi = pullop.repo.unfiltered()
1067 1070 phase = unfi._phasecache.phase
1068 1071 rev = unfi.changelog.nodemap.get
1069 1072 public = phases.public
1070 1073 draft = phases.draft
1071 1074
1072 1075 # exclude changesets already public locally and update the others
1073 1076 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1074 1077 if pheads:
1075 1078 tr = pullop.gettransaction()
1076 1079 phases.advanceboundary(pullop.repo, tr, public, pheads)
1077 1080
1078 1081 # exclude changesets already draft locally and update the others
1079 1082 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1080 1083 if dheads:
1081 1084 tr = pullop.gettransaction()
1082 1085 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1083 1086
1084 1087 def _pullbookmarks(pullop):
1085 1088 """process the remote bookmark information to update the local one"""
1086 1089 if 'bookmarks' in pullop.stepsdone:
1087 1090 return
1088 1091 pullop.stepsdone.add('bookmarks')
1089 1092 repo = pullop.repo
1090 1093 remotebookmarks = pullop.remotebookmarks
1091 1094 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1092 1095 pullop.remote.url(),
1093 1096 pullop.gettransaction,
1094 1097 explicit=pullop.explicitbookmarks)
1095 1098
1096 1099 def _pullobsolete(pullop):
1097 1100 """utility function to pull obsolete markers from a remote
1098 1101
1099 1102 The `gettransaction` is function that return the pull transaction, creating
1100 1103 one if necessary. We return the transaction to inform the calling code that
1101 1104 a new transaction have been created (when applicable).
1102 1105
1103 1106 Exists mostly to allow overriding for experimentation purpose"""
1104 1107 if 'obsmarkers' in pullop.stepsdone:
1105 1108 return
1106 1109 pullop.stepsdone.add('obsmarkers')
1107 1110 tr = None
1108 1111 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1109 1112 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1110 1113 remoteobs = pullop.remote.listkeys('obsolete')
1111 1114 if 'dump0' in remoteobs:
1112 1115 tr = pullop.gettransaction()
1113 1116 for key in sorted(remoteobs, reverse=True):
1114 1117 if key.startswith('dump'):
1115 1118 data = base85.b85decode(remoteobs[key])
1116 1119 pullop.repo.obsstore.mergemarkers(tr, data)
1117 1120 pullop.repo.invalidatevolatilesets()
1118 1121 return tr
1119 1122
1120 1123 def caps20to10(repo):
1121 1124 """return a set with appropriate options to use bundle20 during getbundle"""
1122 1125 caps = set(['HG20'])
1123 1126 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1124 1127 caps.add('bundle2=' + urllib.quote(capsblob))
1125 1128 return caps
1126 1129
1127 1130 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1128 1131 getbundle2partsorder = []
1129 1132
1130 1133 # Mapping between step name and function
1131 1134 #
1132 1135 # This exists to help extensions wrap steps if necessary
1133 1136 getbundle2partsmapping = {}
1134 1137
1135 1138 def getbundle2partsgenerator(stepname):
1136 1139 """decorator for function generating bundle2 part for getbundle
1137 1140
1138 1141 The function is added to the step -> function mapping and appended to the
1139 1142 list of steps. Beware that decorated functions will be added in order
1140 1143 (this may matter).
1141 1144
1142 1145 You can only use this decorator for new steps, if you want to wrap a step
1143 1146 from an extension, attack the getbundle2partsmapping dictionary directly."""
1144 1147 def dec(func):
1145 1148 assert stepname not in getbundle2partsmapping
1146 1149 getbundle2partsmapping[stepname] = func
1147 1150 getbundle2partsorder.append(stepname)
1148 1151 return func
1149 1152 return dec
1150 1153
1151 1154 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1152 1155 **kwargs):
1153 1156 """return a full bundle (with potentially multiple kind of parts)
1154 1157
1155 1158 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1156 1159 passed. For now, the bundle can contain only changegroup, but this will
1157 1160 changes when more part type will be available for bundle2.
1158 1161
1159 1162 This is different from changegroup.getchangegroup that only returns an HG10
1160 1163 changegroup bundle. They may eventually get reunited in the future when we
1161 1164 have a clearer idea of the API we what to query different data.
1162 1165
1163 1166 The implementation is at a very early stage and will get massive rework
1164 1167 when the API of bundle is refined.
1165 1168 """
1166 1169 # bundle10 case
1167 1170 usebundle2 = False
1168 1171 if bundlecaps is not None:
1169 1172 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1170 1173 if not usebundle2:
1171 1174 if bundlecaps and not kwargs.get('cg', True):
1172 1175 raise ValueError(_('request for bundle10 must include changegroup'))
1173 1176
1174 1177 if kwargs:
1175 1178 raise ValueError(_('unsupported getbundle arguments: %s')
1176 1179 % ', '.join(sorted(kwargs.keys())))
1177 1180 return changegroup.getchangegroup(repo, source, heads=heads,
1178 1181 common=common, bundlecaps=bundlecaps)
1179 1182
1180 1183 # bundle20 case
1181 1184 b2caps = {}
1182 1185 for bcaps in bundlecaps:
1183 1186 if bcaps.startswith('bundle2='):
1184 1187 blob = urllib.unquote(bcaps[len('bundle2='):])
1185 1188 b2caps.update(bundle2.decodecaps(blob))
1186 1189 bundler = bundle2.bundle20(repo.ui, b2caps)
1187 1190
1188 1191 kwargs['heads'] = heads
1189 1192 kwargs['common'] = common
1190 1193
1191 1194 for name in getbundle2partsorder:
1192 1195 func = getbundle2partsmapping[name]
1193 1196 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1194 1197 **kwargs)
1195 1198
1196 1199 return util.chunkbuffer(bundler.getchunks())
1197 1200
1198 1201 @getbundle2partsgenerator('changegroup')
1199 1202 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1200 1203 b2caps=None, heads=None, common=None, **kwargs):
1201 1204 """add a changegroup part to the requested bundle"""
1202 1205 cg = None
1203 1206 if kwargs.get('cg', True):
1204 1207 # build changegroup bundle here.
1205 1208 version = None
1206 1209 cgversions = b2caps.get('changegroup')
1207 1210 if not cgversions: # 3.1 and 3.2 ship with an empty value
1208 1211 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1209 1212 common=common,
1210 1213 bundlecaps=bundlecaps)
1211 1214 else:
1212 1215 cgversions = [v for v in cgversions if v in changegroup.packermap]
1213 1216 if not cgversions:
1214 1217 raise ValueError(_('no common changegroup version'))
1215 1218 version = max(cgversions)
1216 1219 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1217 1220 common=common,
1218 1221 bundlecaps=bundlecaps,
1219 1222 version=version)
1220 1223
1221 1224 if cg:
1222 1225 part = bundler.newpart('changegroup', data=cg)
1223 1226 if version is not None:
1224 1227 part.addparam('version', version)
1225 1228
1226 1229 @getbundle2partsgenerator('listkeys')
1227 1230 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1228 1231 b2caps=None, **kwargs):
1229 1232 """add parts containing listkeys namespaces to the requested bundle"""
1230 1233 listkeys = kwargs.get('listkeys', ())
1231 1234 for namespace in listkeys:
1232 1235 part = bundler.newpart('listkeys')
1233 1236 part.addparam('namespace', namespace)
1234 1237 keys = repo.listkeys(namespace).items()
1235 1238 part.data = pushkey.encodekeys(keys)
1236 1239
1237 1240 @getbundle2partsgenerator('obsmarkers')
1238 1241 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1239 1242 b2caps=None, heads=None, **kwargs):
1240 1243 """add an obsolescence markers part to the requested bundle"""
1241 1244 if kwargs.get('obsmarkers', False):
1242 1245 if heads is None:
1243 1246 heads = repo.heads()
1244 1247 subset = [c.node() for c in repo.set('::%ln', heads)]
1245 1248 markers = repo.obsstore.relevantmarkers(subset)
1246 1249 buildobsmarkerspart(bundler, markers)
1247 1250
1248 1251 def check_heads(repo, their_heads, context):
1249 1252 """check if the heads of a repo have been modified
1250 1253
1251 1254 Used by peer for unbundling.
1252 1255 """
1253 1256 heads = repo.heads()
1254 1257 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1255 1258 if not (their_heads == ['force'] or their_heads == heads or
1256 1259 their_heads == ['hashed', heads_hash]):
1257 1260 # someone else committed/pushed/unbundled while we
1258 1261 # were transferring data
1259 1262 raise error.PushRaced('repository changed while %s - '
1260 1263 'please try again' % context)
1261 1264
1262 1265 def unbundle(repo, cg, heads, source, url):
1263 1266 """Apply a bundle to a repo.
1264 1267
1265 1268 this function makes sure the repo is locked during the application and have
1266 1269 mechanism to check that no push race occurred between the creation of the
1267 1270 bundle and its application.
1268 1271
1269 1272 If the push was raced as PushRaced exception is raised."""
1270 1273 r = 0
1271 1274 # need a transaction when processing a bundle2 stream
1272 1275 tr = None
1273 1276 lock = repo.lock()
1274 1277 try:
1275 1278 check_heads(repo, heads, 'uploading changes')
1276 1279 # push can proceed
1277 1280 if util.safehasattr(cg, 'params'):
1278 1281 try:
1279 1282 tr = repo.transaction('unbundle')
1280 1283 tr.hookargs['source'] = source
1281 1284 tr.hookargs['url'] = url
1282 1285 tr.hookargs['bundle2'] = '1'
1283 1286 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1284 1287 tr.close()
1285 1288 except Exception, exc:
1286 1289 exc.duringunbundle2 = True
1287 1290 raise
1288 1291 else:
1289 1292 r = changegroup.addchangegroup(repo, cg, source, url)
1290 1293 finally:
1291 1294 if tr is not None:
1292 1295 tr.release()
1293 1296 lock.release()
1294 1297 return r
General Comments 0
You need to be logged in to leave comments. Login now