##// END OF EJS Templates
bundle2: drop the experimental hooks...
Pierre-Yves David -
r24697:52ff737c default
parent child Browse files
Show More
@@ -1,1311 +1,1294 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version.startswith('2'):
36 36 return bundle2.getunbundler(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('obsmarkers', data=stream)
53 53 return None
54 54
55 55 def _canusebundle2(op):
56 56 """return true if a pull/push can use bundle2
57 57
58 58 Feel free to nuke this function when we drop the experimental option"""
59 59 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
60 60 and op.remote.capable('bundle2'))
61 61
62 62
63 63 class pushoperation(object):
64 64 """A object that represent a single push operation
65 65
66 66 It purpose is to carry push related state and very common operation.
67 67
68 68 A new should be created at the beginning of each push and discarded
69 69 afterward.
70 70 """
71 71
72 72 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
73 73 bookmarks=()):
74 74 # repo we push from
75 75 self.repo = repo
76 76 self.ui = repo.ui
77 77 # repo we push to
78 78 self.remote = remote
79 79 # force option provided
80 80 self.force = force
81 81 # revs to be pushed (None is "all")
82 82 self.revs = revs
83 83 # bookmark explicitly pushed
84 84 self.bookmarks = bookmarks
85 85 # allow push of new branch
86 86 self.newbranch = newbranch
87 87 # did a local lock get acquired?
88 88 self.locallocked = None
89 89 # step already performed
90 90 # (used to check what steps have been already performed through bundle2)
91 91 self.stepsdone = set()
92 92 # Integer version of the changegroup push result
93 93 # - None means nothing to push
94 94 # - 0 means HTTP error
95 95 # - 1 means we pushed and remote head count is unchanged *or*
96 96 # we have outgoing changesets but refused to push
97 97 # - other values as described by addchangegroup()
98 98 self.cgresult = None
99 99 # Boolean value for the bookmark push
100 100 self.bkresult = None
101 101 # discover.outgoing object (contains common and outgoing data)
102 102 self.outgoing = None
103 103 # all remote heads before the push
104 104 self.remoteheads = None
105 105 # testable as a boolean indicating if any nodes are missing locally.
106 106 self.incoming = None
107 107 # phases changes that must be pushed along side the changesets
108 108 self.outdatedphases = None
109 109 # phases changes that must be pushed if changeset push fails
110 110 self.fallbackoutdatedphases = None
111 111 # outgoing obsmarkers
112 112 self.outobsmarkers = set()
113 113 # outgoing bookmarks
114 114 self.outbookmarks = []
115 115 # transaction manager
116 116 self.trmanager = None
117 117
118 118 @util.propertycache
119 119 def futureheads(self):
120 120 """future remote heads if the changeset push succeeds"""
121 121 return self.outgoing.missingheads
122 122
123 123 @util.propertycache
124 124 def fallbackheads(self):
125 125 """future remote heads if the changeset push fails"""
126 126 if self.revs is None:
127 127 # not target to push, all common are relevant
128 128 return self.outgoing.commonheads
129 129 unfi = self.repo.unfiltered()
130 130 # I want cheads = heads(::missingheads and ::commonheads)
131 131 # (missingheads is revs with secret changeset filtered out)
132 132 #
133 133 # This can be expressed as:
134 134 # cheads = ( (missingheads and ::commonheads)
135 135 # + (commonheads and ::missingheads))"
136 136 # )
137 137 #
138 138 # while trying to push we already computed the following:
139 139 # common = (::commonheads)
140 140 # missing = ((commonheads::missingheads) - commonheads)
141 141 #
142 142 # We can pick:
143 143 # * missingheads part of common (::commonheads)
144 144 common = set(self.outgoing.common)
145 145 nm = self.repo.changelog.nodemap
146 146 cheads = [node for node in self.revs if nm[node] in common]
147 147 # and
148 148 # * commonheads parents on missing
149 149 revset = unfi.set('%ln and parents(roots(%ln))',
150 150 self.outgoing.commonheads,
151 151 self.outgoing.missing)
152 152 cheads.extend(c.node() for c in revset)
153 153 return cheads
154 154
155 155 @property
156 156 def commonheads(self):
157 157 """set of all common heads after changeset bundle push"""
158 158 if self.cgresult:
159 159 return self.futureheads
160 160 else:
161 161 return self.fallbackheads
162 162
163 163 # mapping of message used when pushing bookmark
164 164 bookmsgmap = {'update': (_("updating bookmark %s\n"),
165 165 _('updating bookmark %s failed!\n')),
166 166 'export': (_("exporting bookmark %s\n"),
167 167 _('exporting bookmark %s failed!\n')),
168 168 'delete': (_("deleting remote bookmark %s\n"),
169 169 _('deleting remote bookmark %s failed!\n')),
170 170 }
171 171
172 172
173 173 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
174 174 '''Push outgoing changesets (limited by revs) from a local
175 175 repository to remote. Return an integer:
176 176 - None means nothing to push
177 177 - 0 means HTTP error
178 178 - 1 means we pushed and remote head count is unchanged *or*
179 179 we have outgoing changesets but refused to push
180 180 - other values as described by addchangegroup()
181 181 '''
182 182 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
183 183 if pushop.remote.local():
184 184 missing = (set(pushop.repo.requirements)
185 185 - pushop.remote.local().supported)
186 186 if missing:
187 187 msg = _("required features are not"
188 188 " supported in the destination:"
189 189 " %s") % (', '.join(sorted(missing)))
190 190 raise util.Abort(msg)
191 191
192 192 # there are two ways to push to remote repo:
193 193 #
194 194 # addchangegroup assumes local user can lock remote
195 195 # repo (local filesystem, old ssh servers).
196 196 #
197 197 # unbundle assumes local user cannot lock remote repo (new ssh
198 198 # servers, http servers).
199 199
200 200 if not pushop.remote.canpush():
201 201 raise util.Abort(_("destination does not support push"))
202 202 # get local lock as we might write phase data
203 203 locallock = None
204 204 try:
205 205 locallock = pushop.repo.lock()
206 206 pushop.locallocked = True
207 207 except IOError, err:
208 208 pushop.locallocked = False
209 209 if err.errno != errno.EACCES:
210 210 raise
211 211 # source repo cannot be locked.
212 212 # We do not abort the push, but just disable the local phase
213 213 # synchronisation.
214 214 msg = 'cannot lock source repository: %s\n' % err
215 215 pushop.ui.debug(msg)
216 216 try:
217 217 if pushop.locallocked:
218 218 pushop.trmanager = transactionmanager(repo,
219 219 'push-response',
220 220 pushop.remote.url())
221 221 pushop.repo.checkpush(pushop)
222 222 lock = None
223 223 unbundle = pushop.remote.capable('unbundle')
224 224 if not unbundle:
225 225 lock = pushop.remote.lock()
226 226 try:
227 227 _pushdiscovery(pushop)
228 228 if _canusebundle2(pushop):
229 229 _pushbundle2(pushop)
230 230 _pushchangeset(pushop)
231 231 _pushsyncphase(pushop)
232 232 _pushobsolete(pushop)
233 233 _pushbookmark(pushop)
234 234 finally:
235 235 if lock is not None:
236 236 lock.release()
237 237 if pushop.trmanager:
238 238 pushop.trmanager.close()
239 239 finally:
240 240 if pushop.trmanager:
241 241 pushop.trmanager.release()
242 242 if locallock is not None:
243 243 locallock.release()
244 244
245 245 return pushop
246 246
247 247 # list of steps to perform discovery before push
248 248 pushdiscoveryorder = []
249 249
250 250 # Mapping between step name and function
251 251 #
252 252 # This exists to help extensions wrap steps if necessary
253 253 pushdiscoverymapping = {}
254 254
255 255 def pushdiscovery(stepname):
256 256 """decorator for function performing discovery before push
257 257
258 258 The function is added to the step -> function mapping and appended to the
259 259 list of steps. Beware that decorated function will be added in order (this
260 260 may matter).
261 261
262 262 You can only use this decorator for a new step, if you want to wrap a step
263 263 from an extension, change the pushdiscovery dictionary directly."""
264 264 def dec(func):
265 265 assert stepname not in pushdiscoverymapping
266 266 pushdiscoverymapping[stepname] = func
267 267 pushdiscoveryorder.append(stepname)
268 268 return func
269 269 return dec
270 270
271 271 def _pushdiscovery(pushop):
272 272 """Run all discovery steps"""
273 273 for stepname in pushdiscoveryorder:
274 274 step = pushdiscoverymapping[stepname]
275 275 step(pushop)
276 276
277 277 @pushdiscovery('changeset')
278 278 def _pushdiscoverychangeset(pushop):
279 279 """discover the changeset that need to be pushed"""
280 280 fci = discovery.findcommonincoming
281 281 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
282 282 common, inc, remoteheads = commoninc
283 283 fco = discovery.findcommonoutgoing
284 284 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
285 285 commoninc=commoninc, force=pushop.force)
286 286 pushop.outgoing = outgoing
287 287 pushop.remoteheads = remoteheads
288 288 pushop.incoming = inc
289 289
290 290 @pushdiscovery('phase')
291 291 def _pushdiscoveryphase(pushop):
292 292 """discover the phase that needs to be pushed
293 293
294 294 (computed for both success and failure case for changesets push)"""
295 295 outgoing = pushop.outgoing
296 296 unfi = pushop.repo.unfiltered()
297 297 remotephases = pushop.remote.listkeys('phases')
298 298 publishing = remotephases.get('publishing', False)
299 299 ana = phases.analyzeremotephases(pushop.repo,
300 300 pushop.fallbackheads,
301 301 remotephases)
302 302 pheads, droots = ana
303 303 extracond = ''
304 304 if not publishing:
305 305 extracond = ' and public()'
306 306 revset = 'heads((%%ln::%%ln) %s)' % extracond
307 307 # Get the list of all revs draft on remote by public here.
308 308 # XXX Beware that revset break if droots is not strictly
309 309 # XXX root we may want to ensure it is but it is costly
310 310 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
311 311 if not outgoing.missing:
312 312 future = fallback
313 313 else:
314 314 # adds changeset we are going to push as draft
315 315 #
316 316 # should not be necessary for publishing server, but because of an
317 317 # issue fixed in xxxxx we have to do it anyway.
318 318 fdroots = list(unfi.set('roots(%ln + %ln::)',
319 319 outgoing.missing, droots))
320 320 fdroots = [f.node() for f in fdroots]
321 321 future = list(unfi.set(revset, fdroots, pushop.futureheads))
322 322 pushop.outdatedphases = future
323 323 pushop.fallbackoutdatedphases = fallback
324 324
325 325 @pushdiscovery('obsmarker')
326 326 def _pushdiscoveryobsmarkers(pushop):
327 327 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
328 328 and pushop.repo.obsstore
329 329 and 'obsolete' in pushop.remote.listkeys('namespaces')):
330 330 repo = pushop.repo
331 331 # very naive computation, that can be quite expensive on big repo.
332 332 # However: evolution is currently slow on them anyway.
333 333 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
334 334 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
335 335
336 336 @pushdiscovery('bookmarks')
337 337 def _pushdiscoverybookmarks(pushop):
338 338 ui = pushop.ui
339 339 repo = pushop.repo.unfiltered()
340 340 remote = pushop.remote
341 341 ui.debug("checking for updated bookmarks\n")
342 342 ancestors = ()
343 343 if pushop.revs:
344 344 revnums = map(repo.changelog.rev, pushop.revs)
345 345 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
346 346 remotebookmark = remote.listkeys('bookmarks')
347 347
348 348 explicit = set(pushop.bookmarks)
349 349
350 350 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
351 351 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
352 352 for b, scid, dcid in advsrc:
353 353 if b in explicit:
354 354 explicit.remove(b)
355 355 if not ancestors or repo[scid].rev() in ancestors:
356 356 pushop.outbookmarks.append((b, dcid, scid))
357 357 # search added bookmark
358 358 for b, scid, dcid in addsrc:
359 359 if b in explicit:
360 360 explicit.remove(b)
361 361 pushop.outbookmarks.append((b, '', scid))
362 362 # search for overwritten bookmark
363 363 for b, scid, dcid in advdst + diverge + differ:
364 364 if b in explicit:
365 365 explicit.remove(b)
366 366 pushop.outbookmarks.append((b, dcid, scid))
367 367 # search for bookmark to delete
368 368 for b, scid, dcid in adddst:
369 369 if b in explicit:
370 370 explicit.remove(b)
371 371 # treat as "deleted locally"
372 372 pushop.outbookmarks.append((b, dcid, ''))
373 373 # identical bookmarks shouldn't get reported
374 374 for b, scid, dcid in same:
375 375 if b in explicit:
376 376 explicit.remove(b)
377 377
378 378 if explicit:
379 379 explicit = sorted(explicit)
380 380 # we should probably list all of them
381 381 ui.warn(_('bookmark %s does not exist on the local '
382 382 'or remote repository!\n') % explicit[0])
383 383 pushop.bkresult = 2
384 384
385 385 pushop.outbookmarks.sort()
386 386
387 387 def _pushcheckoutgoing(pushop):
388 388 outgoing = pushop.outgoing
389 389 unfi = pushop.repo.unfiltered()
390 390 if not outgoing.missing:
391 391 # nothing to push
392 392 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
393 393 return False
394 394 # something to push
395 395 if not pushop.force:
396 396 # if repo.obsstore == False --> no obsolete
397 397 # then, save the iteration
398 398 if unfi.obsstore:
399 399 # this message are here for 80 char limit reason
400 400 mso = _("push includes obsolete changeset: %s!")
401 401 mst = {"unstable": _("push includes unstable changeset: %s!"),
402 402 "bumped": _("push includes bumped changeset: %s!"),
403 403 "divergent": _("push includes divergent changeset: %s!")}
404 404 # If we are to push if there is at least one
405 405 # obsolete or unstable changeset in missing, at
406 406 # least one of the missinghead will be obsolete or
407 407 # unstable. So checking heads only is ok
408 408 for node in outgoing.missingheads:
409 409 ctx = unfi[node]
410 410 if ctx.obsolete():
411 411 raise util.Abort(mso % ctx)
412 412 elif ctx.troubled():
413 413 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
414 414 newbm = pushop.ui.configlist('bookmarks', 'pushing')
415 415 discovery.checkheads(unfi, pushop.remote, outgoing,
416 416 pushop.remoteheads,
417 417 pushop.newbranch,
418 418 bool(pushop.incoming),
419 419 newbm)
420 420 return True
421 421
422 422 # List of names of steps to perform for an outgoing bundle2, order matters.
423 423 b2partsgenorder = []
424 424
425 425 # Mapping between step name and function
426 426 #
427 427 # This exists to help extensions wrap steps if necessary
428 428 b2partsgenmapping = {}
429 429
430 430 def b2partsgenerator(stepname):
431 431 """decorator for function generating bundle2 part
432 432
433 433 The function is added to the step -> function mapping and appended to the
434 434 list of steps. Beware that decorated functions will be added in order
435 435 (this may matter).
436 436
437 437 You can only use this decorator for new steps, if you want to wrap a step
438 438 from an extension, attack the b2partsgenmapping dictionary directly."""
439 439 def dec(func):
440 440 assert stepname not in b2partsgenmapping
441 441 b2partsgenmapping[stepname] = func
442 442 b2partsgenorder.append(stepname)
443 443 return func
444 444 return dec
445 445
446 446 @b2partsgenerator('changeset')
447 447 def _pushb2ctx(pushop, bundler):
448 448 """handle changegroup push through bundle2
449 449
450 450 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
451 451 """
452 452 if 'changesets' in pushop.stepsdone:
453 453 return
454 454 pushop.stepsdone.add('changesets')
455 455 # Send known heads to the server for race detection.
456 456 if not _pushcheckoutgoing(pushop):
457 457 return
458 458 pushop.repo.prepushoutgoinghooks(pushop.repo,
459 459 pushop.remote,
460 460 pushop.outgoing)
461 461 if not pushop.force:
462 462 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
463 463 b2caps = bundle2.bundle2caps(pushop.remote)
464 464 version = None
465 465 cgversions = b2caps.get('changegroup')
466 466 if not cgversions: # 3.1 and 3.2 ship with an empty value
467 467 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
468 468 pushop.outgoing)
469 469 else:
470 470 cgversions = [v for v in cgversions if v in changegroup.packermap]
471 471 if not cgversions:
472 472 raise ValueError(_('no common changegroup version'))
473 473 version = max(cgversions)
474 474 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
475 475 pushop.outgoing,
476 476 version=version)
477 477 cgpart = bundler.newpart('changegroup', data=cg)
478 478 if version is not None:
479 479 cgpart.addparam('version', version)
480 480 def handlereply(op):
481 481 """extract addchangegroup returns from server reply"""
482 482 cgreplies = op.records.getreplies(cgpart.id)
483 483 assert len(cgreplies['changegroup']) == 1
484 484 pushop.cgresult = cgreplies['changegroup'][0]['return']
485 485 return handlereply
486 486
487 487 @b2partsgenerator('phase')
488 488 def _pushb2phases(pushop, bundler):
489 489 """handle phase push through bundle2"""
490 490 if 'phases' in pushop.stepsdone:
491 491 return
492 492 b2caps = bundle2.bundle2caps(pushop.remote)
493 493 if not 'pushkey' in b2caps:
494 494 return
495 495 pushop.stepsdone.add('phases')
496 496 part2node = []
497 497 enc = pushkey.encode
498 498 for newremotehead in pushop.outdatedphases:
499 499 part = bundler.newpart('pushkey')
500 500 part.addparam('namespace', enc('phases'))
501 501 part.addparam('key', enc(newremotehead.hex()))
502 502 part.addparam('old', enc(str(phases.draft)))
503 503 part.addparam('new', enc(str(phases.public)))
504 504 part2node.append((part.id, newremotehead))
505 505 def handlereply(op):
506 506 for partid, node in part2node:
507 507 partrep = op.records.getreplies(partid)
508 508 results = partrep['pushkey']
509 509 assert len(results) <= 1
510 510 msg = None
511 511 if not results:
512 512 msg = _('server ignored update of %s to public!\n') % node
513 513 elif not int(results[0]['return']):
514 514 msg = _('updating %s to public failed!\n') % node
515 515 if msg is not None:
516 516 pushop.ui.warn(msg)
517 517 return handlereply
518 518
519 519 @b2partsgenerator('obsmarkers')
520 520 def _pushb2obsmarkers(pushop, bundler):
521 521 if 'obsmarkers' in pushop.stepsdone:
522 522 return
523 523 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
524 524 if obsolete.commonversion(remoteversions) is None:
525 525 return
526 526 pushop.stepsdone.add('obsmarkers')
527 527 if pushop.outobsmarkers:
528 528 buildobsmarkerspart(bundler, pushop.outobsmarkers)
529 529
530 530 @b2partsgenerator('bookmarks')
531 531 def _pushb2bookmarks(pushop, bundler):
532 532 """handle phase push through bundle2"""
533 533 if 'bookmarks' in pushop.stepsdone:
534 534 return
535 535 b2caps = bundle2.bundle2caps(pushop.remote)
536 536 if 'pushkey' not in b2caps:
537 537 return
538 538 pushop.stepsdone.add('bookmarks')
539 539 part2book = []
540 540 enc = pushkey.encode
541 541 for book, old, new in pushop.outbookmarks:
542 542 part = bundler.newpart('pushkey')
543 543 part.addparam('namespace', enc('bookmarks'))
544 544 part.addparam('key', enc(book))
545 545 part.addparam('old', enc(old))
546 546 part.addparam('new', enc(new))
547 547 action = 'update'
548 548 if not old:
549 549 action = 'export'
550 550 elif not new:
551 551 action = 'delete'
552 552 part2book.append((part.id, book, action))
553 553
554 554
555 555 def handlereply(op):
556 556 ui = pushop.ui
557 557 for partid, book, action in part2book:
558 558 partrep = op.records.getreplies(partid)
559 559 results = partrep['pushkey']
560 560 assert len(results) <= 1
561 561 if not results:
562 562 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
563 563 else:
564 564 ret = int(results[0]['return'])
565 565 if ret:
566 566 ui.status(bookmsgmap[action][0] % book)
567 567 else:
568 568 ui.warn(bookmsgmap[action][1] % book)
569 569 if pushop.bkresult is not None:
570 570 pushop.bkresult = 1
571 571 return handlereply
572 572
573 573
574 574 def _pushbundle2(pushop):
575 575 """push data to the remote using bundle2
576 576
577 577 The only currently supported type of data is changegroup but this will
578 578 evolve in the future."""
579 579 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
580 580 pushback = (pushop.trmanager
581 581 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
582 582
583 583 # create reply capability
584 584 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
585 585 allowpushback=pushback))
586 586 bundler.newpart('replycaps', data=capsblob)
587 587 replyhandlers = []
588 588 for partgenname in b2partsgenorder:
589 589 partgen = b2partsgenmapping[partgenname]
590 590 ret = partgen(pushop, bundler)
591 591 if callable(ret):
592 592 replyhandlers.append(ret)
593 593 # do not push if nothing to push
594 594 if bundler.nbparts <= 1:
595 595 return
596 596 stream = util.chunkbuffer(bundler.getchunks())
597 597 try:
598 598 reply = pushop.remote.unbundle(stream, ['force'], 'push')
599 599 except error.BundleValueError, exc:
600 600 raise util.Abort('missing support for %s' % exc)
601 601 try:
602 602 trgetter = None
603 603 if pushback:
604 604 trgetter = pushop.trmanager.transaction
605 605 op = bundle2.processbundle(pushop.repo, reply, trgetter)
606 606 except error.BundleValueError, exc:
607 607 raise util.Abort('missing support for %s' % exc)
608 608 for rephand in replyhandlers:
609 609 rephand(op)
610 610
611 611 def _pushchangeset(pushop):
612 612 """Make the actual push of changeset bundle to remote repo"""
613 613 if 'changesets' in pushop.stepsdone:
614 614 return
615 615 pushop.stepsdone.add('changesets')
616 616 if not _pushcheckoutgoing(pushop):
617 617 return
618 618 pushop.repo.prepushoutgoinghooks(pushop.repo,
619 619 pushop.remote,
620 620 pushop.outgoing)
621 621 outgoing = pushop.outgoing
622 622 unbundle = pushop.remote.capable('unbundle')
623 623 # TODO: get bundlecaps from remote
624 624 bundlecaps = None
625 625 # create a changegroup from local
626 626 if pushop.revs is None and not (outgoing.excluded
627 627 or pushop.repo.changelog.filteredrevs):
628 628 # push everything,
629 629 # use the fast path, no race possible on push
630 630 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
631 631 cg = changegroup.getsubset(pushop.repo,
632 632 outgoing,
633 633 bundler,
634 634 'push',
635 635 fastpath=True)
636 636 else:
637 637 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
638 638 bundlecaps)
639 639
640 640 # apply changegroup to remote
641 641 if unbundle:
642 642 # local repo finds heads on server, finds out what
643 643 # revs it must push. once revs transferred, if server
644 644 # finds it has different heads (someone else won
645 645 # commit/push race), server aborts.
646 646 if pushop.force:
647 647 remoteheads = ['force']
648 648 else:
649 649 remoteheads = pushop.remoteheads
650 650 # ssh: return remote's addchangegroup()
651 651 # http: return remote's addchangegroup() or 0 for error
652 652 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
653 653 pushop.repo.url())
654 654 else:
655 655 # we return an integer indicating remote head count
656 656 # change
657 657 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
658 658 pushop.repo.url())
659 659
660 660 def _pushsyncphase(pushop):
661 661 """synchronise phase information locally and remotely"""
662 662 cheads = pushop.commonheads
663 663 # even when we don't push, exchanging phase data is useful
664 664 remotephases = pushop.remote.listkeys('phases')
665 665 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
666 666 and remotephases # server supports phases
667 667 and pushop.cgresult is None # nothing was pushed
668 668 and remotephases.get('publishing', False)):
669 669 # When:
670 670 # - this is a subrepo push
671 671 # - and remote support phase
672 672 # - and no changeset was pushed
673 673 # - and remote is publishing
674 674 # We may be in issue 3871 case!
675 675 # We drop the possible phase synchronisation done by
676 676 # courtesy to publish changesets possibly locally draft
677 677 # on the remote.
678 678 remotephases = {'publishing': 'True'}
679 679 if not remotephases: # old server or public only reply from non-publishing
680 680 _localphasemove(pushop, cheads)
681 681 # don't push any phase data as there is nothing to push
682 682 else:
683 683 ana = phases.analyzeremotephases(pushop.repo, cheads,
684 684 remotephases)
685 685 pheads, droots = ana
686 686 ### Apply remote phase on local
687 687 if remotephases.get('publishing', False):
688 688 _localphasemove(pushop, cheads)
689 689 else: # publish = False
690 690 _localphasemove(pushop, pheads)
691 691 _localphasemove(pushop, cheads, phases.draft)
692 692 ### Apply local phase on remote
693 693
694 694 if pushop.cgresult:
695 695 if 'phases' in pushop.stepsdone:
696 696 # phases already pushed though bundle2
697 697 return
698 698 outdated = pushop.outdatedphases
699 699 else:
700 700 outdated = pushop.fallbackoutdatedphases
701 701
702 702 pushop.stepsdone.add('phases')
703 703
704 704 # filter heads already turned public by the push
705 705 outdated = [c for c in outdated if c.node() not in pheads]
706 706 # fallback to independent pushkey command
707 707 for newremotehead in outdated:
708 708 r = pushop.remote.pushkey('phases',
709 709 newremotehead.hex(),
710 710 str(phases.draft),
711 711 str(phases.public))
712 712 if not r:
713 713 pushop.ui.warn(_('updating %s to public failed!\n')
714 714 % newremotehead)
715 715
716 716 def _localphasemove(pushop, nodes, phase=phases.public):
717 717 """move <nodes> to <phase> in the local source repo"""
718 718 if pushop.trmanager:
719 719 phases.advanceboundary(pushop.repo,
720 720 pushop.trmanager.transaction(),
721 721 phase,
722 722 nodes)
723 723 else:
724 724 # repo is not locked, do not change any phases!
725 725 # Informs the user that phases should have been moved when
726 726 # applicable.
727 727 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
728 728 phasestr = phases.phasenames[phase]
729 729 if actualmoves:
730 730 pushop.ui.status(_('cannot lock source repo, skipping '
731 731 'local %s phase update\n') % phasestr)
732 732
733 733 def _pushobsolete(pushop):
734 734 """utility function to push obsolete markers to a remote"""
735 735 if 'obsmarkers' in pushop.stepsdone:
736 736 return
737 737 pushop.ui.debug('try to push obsolete markers to remote\n')
738 738 repo = pushop.repo
739 739 remote = pushop.remote
740 740 pushop.stepsdone.add('obsmarkers')
741 741 if pushop.outobsmarkers:
742 742 rslts = []
743 743 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
744 744 for key in sorted(remotedata, reverse=True):
745 745 # reverse sort to ensure we end with dump0
746 746 data = remotedata[key]
747 747 rslts.append(remote.pushkey('obsolete', key, '', data))
748 748 if [r for r in rslts if not r]:
749 749 msg = _('failed to push some obsolete markers!\n')
750 750 repo.ui.warn(msg)
751 751
752 752 def _pushbookmark(pushop):
753 753 """Update bookmark position on remote"""
754 754 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
755 755 return
756 756 pushop.stepsdone.add('bookmarks')
757 757 ui = pushop.ui
758 758 remote = pushop.remote
759 759
760 760 for b, old, new in pushop.outbookmarks:
761 761 action = 'update'
762 762 if not old:
763 763 action = 'export'
764 764 elif not new:
765 765 action = 'delete'
766 766 if remote.pushkey('bookmarks', b, old, new):
767 767 ui.status(bookmsgmap[action][0] % b)
768 768 else:
769 769 ui.warn(bookmsgmap[action][1] % b)
770 770 # discovery can have set the value form invalid entry
771 771 if pushop.bkresult is not None:
772 772 pushop.bkresult = 1
773 773
774 774 class pulloperation(object):
775 775 """A object that represent a single pull operation
776 776
777 777 It purpose is to carry pull related state and very common operation.
778 778
779 779 A new should be created at the beginning of each pull and discarded
780 780 afterward.
781 781 """
782 782
783 783 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
784 784 # repo we pull into
785 785 self.repo = repo
786 786 # repo we pull from
787 787 self.remote = remote
788 788 # revision we try to pull (None is "all")
789 789 self.heads = heads
790 790 # bookmark pulled explicitly
791 791 self.explicitbookmarks = bookmarks
792 792 # do we force pull?
793 793 self.force = force
794 794 # transaction manager
795 795 self.trmanager = None
796 796 # set of common changeset between local and remote before pull
797 797 self.common = None
798 798 # set of pulled head
799 799 self.rheads = None
800 800 # list of missing changeset to fetch remotely
801 801 self.fetch = None
802 802 # remote bookmarks data
803 803 self.remotebookmarks = None
804 804 # result of changegroup pulling (used as return code by pull)
805 805 self.cgresult = None
806 806 # list of step already done
807 807 self.stepsdone = set()
808 808
809 809 @util.propertycache
810 810 def pulledsubset(self):
811 811 """heads of the set of changeset target by the pull"""
812 812 # compute target subset
813 813 if self.heads is None:
814 814 # We pulled every thing possible
815 815 # sync on everything common
816 816 c = set(self.common)
817 817 ret = list(self.common)
818 818 for n in self.rheads:
819 819 if n not in c:
820 820 ret.append(n)
821 821 return ret
822 822 else:
823 823 # We pulled a specific subset
824 824 # sync on this subset
825 825 return self.heads
826 826
827 827 def gettransaction(self):
828 828 # deprecated; talk to trmanager directly
829 829 return self.trmanager.transaction()
830 830
831 831 class transactionmanager(object):
832 832 """An object to manage the life cycle of a transaction
833 833
834 834 It creates the transaction on demand and calls the appropriate hooks when
835 835 closing the transaction."""
836 836 def __init__(self, repo, source, url):
837 837 self.repo = repo
838 838 self.source = source
839 839 self.url = url
840 840 self._tr = None
841 841
842 842 def transaction(self):
843 843 """Return an open transaction object, constructing if necessary"""
844 844 if not self._tr:
845 845 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
846 846 self._tr = self.repo.transaction(trname)
847 847 self._tr.hookargs['source'] = self.source
848 848 self._tr.hookargs['url'] = self.url
849 849 return self._tr
850 850
851 851 def close(self):
852 852 """close transaction if created"""
853 853 if self._tr is not None:
854 repo = self.repo
855 p = lambda: self._tr.writepending() and repo.root or ""
856 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
857 **self._tr.hookargs)
858 hookargs = dict(self._tr.hookargs)
859 def runhooks():
860 repo.hook('b2x-transactionclose', **hookargs)
861 self._tr.addpostclose('b2x-hook-transactionclose',
862 lambda tr: repo._afterlock(runhooks))
863 854 self._tr.close()
864 855
865 856 def release(self):
866 857 """release transaction if created"""
867 858 if self._tr is not None:
868 859 self._tr.release()
869 860
870 861 def pull(repo, remote, heads=None, force=False, bookmarks=()):
871 862 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
872 863 if pullop.remote.local():
873 864 missing = set(pullop.remote.requirements) - pullop.repo.supported
874 865 if missing:
875 866 msg = _("required features are not"
876 867 " supported in the destination:"
877 868 " %s") % (', '.join(sorted(missing)))
878 869 raise util.Abort(msg)
879 870
880 871 pullop.remotebookmarks = remote.listkeys('bookmarks')
881 872 lock = pullop.repo.lock()
882 873 try:
883 874 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
884 875 _pulldiscovery(pullop)
885 876 if _canusebundle2(pullop):
886 877 _pullbundle2(pullop)
887 878 _pullchangeset(pullop)
888 879 _pullphase(pullop)
889 880 _pullbookmarks(pullop)
890 881 _pullobsolete(pullop)
891 882 pullop.trmanager.close()
892 883 finally:
893 884 pullop.trmanager.release()
894 885 lock.release()
895 886
896 887 return pullop
897 888
898 889 # list of steps to perform discovery before pull
899 890 pulldiscoveryorder = []
900 891
901 892 # Mapping between step name and function
902 893 #
903 894 # This exists to help extensions wrap steps if necessary
904 895 pulldiscoverymapping = {}
905 896
906 897 def pulldiscovery(stepname):
907 898 """decorator for function performing discovery before pull
908 899
909 900 The function is added to the step -> function mapping and appended to the
910 901 list of steps. Beware that decorated function will be added in order (this
911 902 may matter).
912 903
913 904 You can only use this decorator for a new step, if you want to wrap a step
914 905 from an extension, change the pulldiscovery dictionary directly."""
915 906 def dec(func):
916 907 assert stepname not in pulldiscoverymapping
917 908 pulldiscoverymapping[stepname] = func
918 909 pulldiscoveryorder.append(stepname)
919 910 return func
920 911 return dec
921 912
922 913 def _pulldiscovery(pullop):
923 914 """Run all discovery steps"""
924 915 for stepname in pulldiscoveryorder:
925 916 step = pulldiscoverymapping[stepname]
926 917 step(pullop)
927 918
928 919 @pulldiscovery('changegroup')
929 920 def _pulldiscoverychangegroup(pullop):
930 921 """discovery phase for the pull
931 922
932 923 Current handle changeset discovery only, will change handle all discovery
933 924 at some point."""
934 925 tmp = discovery.findcommonincoming(pullop.repo,
935 926 pullop.remote,
936 927 heads=pullop.heads,
937 928 force=pullop.force)
938 929 common, fetch, rheads = tmp
939 930 nm = pullop.repo.unfiltered().changelog.nodemap
940 931 if fetch and rheads:
941 932 # If a remote heads in filtered locally, lets drop it from the unknown
942 933 # remote heads and put in back in common.
943 934 #
944 935 # This is a hackish solution to catch most of "common but locally
945 936 # hidden situation". We do not performs discovery on unfiltered
946 937 # repository because it end up doing a pathological amount of round
947 938 # trip for w huge amount of changeset we do not care about.
948 939 #
949 940 # If a set of such "common but filtered" changeset exist on the server
950 941 # but are not including a remote heads, we'll not be able to detect it,
951 942 scommon = set(common)
952 943 filteredrheads = []
953 944 for n in rheads:
954 945 if n in nm:
955 946 if n not in scommon:
956 947 common.append(n)
957 948 else:
958 949 filteredrheads.append(n)
959 950 if not filteredrheads:
960 951 fetch = []
961 952 rheads = filteredrheads
962 953 pullop.common = common
963 954 pullop.fetch = fetch
964 955 pullop.rheads = rheads
965 956
966 957 def _pullbundle2(pullop):
967 958 """pull data using bundle2
968 959
969 960 For now, the only supported data are changegroup."""
970 961 remotecaps = bundle2.bundle2caps(pullop.remote)
971 962 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
972 963 # pulling changegroup
973 964 pullop.stepsdone.add('changegroup')
974 965
975 966 kwargs['common'] = pullop.common
976 967 kwargs['heads'] = pullop.heads or pullop.rheads
977 968 kwargs['cg'] = pullop.fetch
978 969 if 'listkeys' in remotecaps:
979 970 kwargs['listkeys'] = ['phase', 'bookmarks']
980 971 if not pullop.fetch:
981 972 pullop.repo.ui.status(_("no changes found\n"))
982 973 pullop.cgresult = 0
983 974 else:
984 975 if pullop.heads is None and list(pullop.common) == [nullid]:
985 976 pullop.repo.ui.status(_("requesting all changes\n"))
986 977 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
987 978 remoteversions = bundle2.obsmarkersversion(remotecaps)
988 979 if obsolete.commonversion(remoteversions) is not None:
989 980 kwargs['obsmarkers'] = True
990 981 pullop.stepsdone.add('obsmarkers')
991 982 _pullbundle2extraprepare(pullop, kwargs)
992 983 bundle = pullop.remote.getbundle('pull', **kwargs)
993 984 try:
994 985 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
995 986 except error.BundleValueError, exc:
996 987 raise util.Abort('missing support for %s' % exc)
997 988
998 989 if pullop.fetch:
999 990 results = [cg['return'] for cg in op.records['changegroup']]
1000 991 pullop.cgresult = changegroup.combineresults(results)
1001 992
1002 993 # processing phases change
1003 994 for namespace, value in op.records['listkeys']:
1004 995 if namespace == 'phases':
1005 996 _pullapplyphases(pullop, value)
1006 997
1007 998 # processing bookmark update
1008 999 for namespace, value in op.records['listkeys']:
1009 1000 if namespace == 'bookmarks':
1010 1001 pullop.remotebookmarks = value
1011 1002 _pullbookmarks(pullop)
1012 1003
1013 1004 def _pullbundle2extraprepare(pullop, kwargs):
1014 1005 """hook function so that extensions can extend the getbundle call"""
1015 1006 pass
1016 1007
1017 1008 def _pullchangeset(pullop):
1018 1009 """pull changeset from unbundle into the local repo"""
1019 1010 # We delay the open of the transaction as late as possible so we
1020 1011 # don't open transaction for nothing or you break future useful
1021 1012 # rollback call
1022 1013 if 'changegroup' in pullop.stepsdone:
1023 1014 return
1024 1015 pullop.stepsdone.add('changegroup')
1025 1016 if not pullop.fetch:
1026 1017 pullop.repo.ui.status(_("no changes found\n"))
1027 1018 pullop.cgresult = 0
1028 1019 return
1029 1020 pullop.gettransaction()
1030 1021 if pullop.heads is None and list(pullop.common) == [nullid]:
1031 1022 pullop.repo.ui.status(_("requesting all changes\n"))
1032 1023 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1033 1024 # issue1320, avoid a race if remote changed after discovery
1034 1025 pullop.heads = pullop.rheads
1035 1026
1036 1027 if pullop.remote.capable('getbundle'):
1037 1028 # TODO: get bundlecaps from remote
1038 1029 cg = pullop.remote.getbundle('pull', common=pullop.common,
1039 1030 heads=pullop.heads or pullop.rheads)
1040 1031 elif pullop.heads is None:
1041 1032 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1042 1033 elif not pullop.remote.capable('changegroupsubset'):
1043 1034 raise util.Abort(_("partial pull cannot be done because "
1044 1035 "other repository doesn't support "
1045 1036 "changegroupsubset."))
1046 1037 else:
1047 1038 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1048 1039 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1049 1040 pullop.remote.url())
1050 1041
1051 1042 def _pullphase(pullop):
1052 1043 # Get remote phases data from remote
1053 1044 if 'phases' in pullop.stepsdone:
1054 1045 return
1055 1046 remotephases = pullop.remote.listkeys('phases')
1056 1047 _pullapplyphases(pullop, remotephases)
1057 1048
1058 1049 def _pullapplyphases(pullop, remotephases):
1059 1050 """apply phase movement from observed remote state"""
1060 1051 if 'phases' in pullop.stepsdone:
1061 1052 return
1062 1053 pullop.stepsdone.add('phases')
1063 1054 publishing = bool(remotephases.get('publishing', False))
1064 1055 if remotephases and not publishing:
1065 1056 # remote is new and unpublishing
1066 1057 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1067 1058 pullop.pulledsubset,
1068 1059 remotephases)
1069 1060 dheads = pullop.pulledsubset
1070 1061 else:
1071 1062 # Remote is old or publishing all common changesets
1072 1063 # should be seen as public
1073 1064 pheads = pullop.pulledsubset
1074 1065 dheads = []
1075 1066 unfi = pullop.repo.unfiltered()
1076 1067 phase = unfi._phasecache.phase
1077 1068 rev = unfi.changelog.nodemap.get
1078 1069 public = phases.public
1079 1070 draft = phases.draft
1080 1071
1081 1072 # exclude changesets already public locally and update the others
1082 1073 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1083 1074 if pheads:
1084 1075 tr = pullop.gettransaction()
1085 1076 phases.advanceboundary(pullop.repo, tr, public, pheads)
1086 1077
1087 1078 # exclude changesets already draft locally and update the others
1088 1079 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1089 1080 if dheads:
1090 1081 tr = pullop.gettransaction()
1091 1082 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1092 1083
1093 1084 def _pullbookmarks(pullop):
1094 1085 """process the remote bookmark information to update the local one"""
1095 1086 if 'bookmarks' in pullop.stepsdone:
1096 1087 return
1097 1088 pullop.stepsdone.add('bookmarks')
1098 1089 repo = pullop.repo
1099 1090 remotebookmarks = pullop.remotebookmarks
1100 1091 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1101 1092 pullop.remote.url(),
1102 1093 pullop.gettransaction,
1103 1094 explicit=pullop.explicitbookmarks)
1104 1095
1105 1096 def _pullobsolete(pullop):
1106 1097 """utility function to pull obsolete markers from a remote
1107 1098
1108 1099 The `gettransaction` is function that return the pull transaction, creating
1109 1100 one if necessary. We return the transaction to inform the calling code that
1110 1101 a new transaction have been created (when applicable).
1111 1102
1112 1103 Exists mostly to allow overriding for experimentation purpose"""
1113 1104 if 'obsmarkers' in pullop.stepsdone:
1114 1105 return
1115 1106 pullop.stepsdone.add('obsmarkers')
1116 1107 tr = None
1117 1108 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1118 1109 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1119 1110 remoteobs = pullop.remote.listkeys('obsolete')
1120 1111 if 'dump0' in remoteobs:
1121 1112 tr = pullop.gettransaction()
1122 1113 for key in sorted(remoteobs, reverse=True):
1123 1114 if key.startswith('dump'):
1124 1115 data = base85.b85decode(remoteobs[key])
1125 1116 pullop.repo.obsstore.mergemarkers(tr, data)
1126 1117 pullop.repo.invalidatevolatilesets()
1127 1118 return tr
1128 1119
1129 1120 def caps20to10(repo):
1130 1121 """return a set with appropriate options to use bundle20 during getbundle"""
1131 1122 caps = set(['HG20'])
1132 1123 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1133 1124 caps.add('bundle2=' + urllib.quote(capsblob))
1134 1125 return caps
1135 1126
1136 1127 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1137 1128 getbundle2partsorder = []
1138 1129
1139 1130 # Mapping between step name and function
1140 1131 #
1141 1132 # This exists to help extensions wrap steps if necessary
1142 1133 getbundle2partsmapping = {}
1143 1134
1144 1135 def getbundle2partsgenerator(stepname):
1145 1136 """decorator for function generating bundle2 part for getbundle
1146 1137
1147 1138 The function is added to the step -> function mapping and appended to the
1148 1139 list of steps. Beware that decorated functions will be added in order
1149 1140 (this may matter).
1150 1141
1151 1142 You can only use this decorator for new steps, if you want to wrap a step
1152 1143 from an extension, attack the getbundle2partsmapping dictionary directly."""
1153 1144 def dec(func):
1154 1145 assert stepname not in getbundle2partsmapping
1155 1146 getbundle2partsmapping[stepname] = func
1156 1147 getbundle2partsorder.append(stepname)
1157 1148 return func
1158 1149 return dec
1159 1150
1160 1151 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1161 1152 **kwargs):
1162 1153 """return a full bundle (with potentially multiple kind of parts)
1163 1154
1164 1155 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1165 1156 passed. For now, the bundle can contain only changegroup, but this will
1166 1157 changes when more part type will be available for bundle2.
1167 1158
1168 1159 This is different from changegroup.getchangegroup that only returns an HG10
1169 1160 changegroup bundle. They may eventually get reunited in the future when we
1170 1161 have a clearer idea of the API we what to query different data.
1171 1162
1172 1163 The implementation is at a very early stage and will get massive rework
1173 1164 when the API of bundle is refined.
1174 1165 """
1175 1166 # bundle10 case
1176 1167 usebundle2 = False
1177 1168 if bundlecaps is not None:
1178 1169 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1179 1170 if not usebundle2:
1180 1171 if bundlecaps and not kwargs.get('cg', True):
1181 1172 raise ValueError(_('request for bundle10 must include changegroup'))
1182 1173
1183 1174 if kwargs:
1184 1175 raise ValueError(_('unsupported getbundle arguments: %s')
1185 1176 % ', '.join(sorted(kwargs.keys())))
1186 1177 return changegroup.getchangegroup(repo, source, heads=heads,
1187 1178 common=common, bundlecaps=bundlecaps)
1188 1179
1189 1180 # bundle20 case
1190 1181 b2caps = {}
1191 1182 for bcaps in bundlecaps:
1192 1183 if bcaps.startswith('bundle2='):
1193 1184 blob = urllib.unquote(bcaps[len('bundle2='):])
1194 1185 b2caps.update(bundle2.decodecaps(blob))
1195 1186 bundler = bundle2.bundle20(repo.ui, b2caps)
1196 1187
1197 1188 kwargs['heads'] = heads
1198 1189 kwargs['common'] = common
1199 1190
1200 1191 for name in getbundle2partsorder:
1201 1192 func = getbundle2partsmapping[name]
1202 1193 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1203 1194 **kwargs)
1204 1195
1205 1196 return util.chunkbuffer(bundler.getchunks())
1206 1197
1207 1198 @getbundle2partsgenerator('changegroup')
1208 1199 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1209 1200 b2caps=None, heads=None, common=None, **kwargs):
1210 1201 """add a changegroup part to the requested bundle"""
1211 1202 cg = None
1212 1203 if kwargs.get('cg', True):
1213 1204 # build changegroup bundle here.
1214 1205 version = None
1215 1206 cgversions = b2caps.get('changegroup')
1216 1207 if not cgversions: # 3.1 and 3.2 ship with an empty value
1217 1208 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1218 1209 common=common,
1219 1210 bundlecaps=bundlecaps)
1220 1211 else:
1221 1212 cgversions = [v for v in cgversions if v in changegroup.packermap]
1222 1213 if not cgversions:
1223 1214 raise ValueError(_('no common changegroup version'))
1224 1215 version = max(cgversions)
1225 1216 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1226 1217 common=common,
1227 1218 bundlecaps=bundlecaps,
1228 1219 version=version)
1229 1220
1230 1221 if cg:
1231 1222 part = bundler.newpart('changegroup', data=cg)
1232 1223 if version is not None:
1233 1224 part.addparam('version', version)
1234 1225
1235 1226 @getbundle2partsgenerator('listkeys')
1236 1227 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1237 1228 b2caps=None, **kwargs):
1238 1229 """add parts containing listkeys namespaces to the requested bundle"""
1239 1230 listkeys = kwargs.get('listkeys', ())
1240 1231 for namespace in listkeys:
1241 1232 part = bundler.newpart('listkeys')
1242 1233 part.addparam('namespace', namespace)
1243 1234 keys = repo.listkeys(namespace).items()
1244 1235 part.data = pushkey.encodekeys(keys)
1245 1236
1246 1237 @getbundle2partsgenerator('obsmarkers')
1247 1238 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1248 1239 b2caps=None, heads=None, **kwargs):
1249 1240 """add an obsolescence markers part to the requested bundle"""
1250 1241 if kwargs.get('obsmarkers', False):
1251 1242 if heads is None:
1252 1243 heads = repo.heads()
1253 1244 subset = [c.node() for c in repo.set('::%ln', heads)]
1254 1245 markers = repo.obsstore.relevantmarkers(subset)
1255 1246 buildobsmarkerspart(bundler, markers)
1256 1247
1257 1248 def check_heads(repo, their_heads, context):
1258 1249 """check if the heads of a repo have been modified
1259 1250
1260 1251 Used by peer for unbundling.
1261 1252 """
1262 1253 heads = repo.heads()
1263 1254 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1264 1255 if not (their_heads == ['force'] or their_heads == heads or
1265 1256 their_heads == ['hashed', heads_hash]):
1266 1257 # someone else committed/pushed/unbundled while we
1267 1258 # were transferring data
1268 1259 raise error.PushRaced('repository changed while %s - '
1269 1260 'please try again' % context)
1270 1261
1271 1262 def unbundle(repo, cg, heads, source, url):
1272 1263 """Apply a bundle to a repo.
1273 1264
1274 1265 this function makes sure the repo is locked during the application and have
1275 1266 mechanism to check that no push race occurred between the creation of the
1276 1267 bundle and its application.
1277 1268
1278 1269 If the push was raced as PushRaced exception is raised."""
1279 1270 r = 0
1280 1271 # need a transaction when processing a bundle2 stream
1281 1272 tr = None
1282 1273 lock = repo.lock()
1283 1274 try:
1284 1275 check_heads(repo, heads, 'uploading changes')
1285 1276 # push can proceed
1286 1277 if util.safehasattr(cg, 'params'):
1287 1278 try:
1288 1279 tr = repo.transaction('unbundle')
1289 1280 tr.hookargs['source'] = source
1290 1281 tr.hookargs['url'] = url
1291 1282 tr.hookargs['bundle2'] = '1'
1292 1283 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1293 p = lambda: tr.writepending() and repo.root or ""
1294 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1295 **tr.hookargs)
1296 hookargs = dict(tr.hookargs)
1297 def runhooks():
1298 repo.hook('b2x-transactionclose', **hookargs)
1299 tr.addpostclose('b2x-hook-transactionclose',
1300 lambda tr: repo._afterlock(runhooks))
1301 1284 tr.close()
1302 1285 except Exception, exc:
1303 1286 exc.duringunbundle2 = True
1304 1287 raise
1305 1288 else:
1306 1289 r = changegroup.addchangegroup(repo, cg, source, url)
1307 1290 finally:
1308 1291 if tr is not None:
1309 1292 tr.release()
1310 1293 lock.release()
1311 1294 return r
General Comments 0
You need to be logged in to leave comments. Login now