##// END OF EJS Templates
bundle2: add an 'idx' argument to the 'getbundle2partsgenerator'...
Pierre-Yves David -
r24732:8f70b529 default
parent child Browse files
Show More
@@ -1,1297 +1,1300
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version.startswith('2'):
36 36 return bundle2.getunbundler(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('obsmarkers', data=stream)
53 53 return None
54 54
55 55 def _canusebundle2(op):
56 56 """return true if a pull/push can use bundle2
57 57
58 58 Feel free to nuke this function when we drop the experimental option"""
59 59 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
60 60 and op.remote.capable('bundle2'))
61 61
62 62
63 63 class pushoperation(object):
64 64 """A object that represent a single push operation
65 65
66 66 It purpose is to carry push related state and very common operation.
67 67
68 68 A new should be created at the beginning of each push and discarded
69 69 afterward.
70 70 """
71 71
72 72 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
73 73 bookmarks=()):
74 74 # repo we push from
75 75 self.repo = repo
76 76 self.ui = repo.ui
77 77 # repo we push to
78 78 self.remote = remote
79 79 # force option provided
80 80 self.force = force
81 81 # revs to be pushed (None is "all")
82 82 self.revs = revs
83 83 # bookmark explicitly pushed
84 84 self.bookmarks = bookmarks
85 85 # allow push of new branch
86 86 self.newbranch = newbranch
87 87 # did a local lock get acquired?
88 88 self.locallocked = None
89 89 # step already performed
90 90 # (used to check what steps have been already performed through bundle2)
91 91 self.stepsdone = set()
92 92 # Integer version of the changegroup push result
93 93 # - None means nothing to push
94 94 # - 0 means HTTP error
95 95 # - 1 means we pushed and remote head count is unchanged *or*
96 96 # we have outgoing changesets but refused to push
97 97 # - other values as described by addchangegroup()
98 98 self.cgresult = None
99 99 # Boolean value for the bookmark push
100 100 self.bkresult = None
101 101 # discover.outgoing object (contains common and outgoing data)
102 102 self.outgoing = None
103 103 # all remote heads before the push
104 104 self.remoteheads = None
105 105 # testable as a boolean indicating if any nodes are missing locally.
106 106 self.incoming = None
107 107 # phases changes that must be pushed along side the changesets
108 108 self.outdatedphases = None
109 109 # phases changes that must be pushed if changeset push fails
110 110 self.fallbackoutdatedphases = None
111 111 # outgoing obsmarkers
112 112 self.outobsmarkers = set()
113 113 # outgoing bookmarks
114 114 self.outbookmarks = []
115 115 # transaction manager
116 116 self.trmanager = None
117 117
118 118 @util.propertycache
119 119 def futureheads(self):
120 120 """future remote heads if the changeset push succeeds"""
121 121 return self.outgoing.missingheads
122 122
123 123 @util.propertycache
124 124 def fallbackheads(self):
125 125 """future remote heads if the changeset push fails"""
126 126 if self.revs is None:
127 127 # not target to push, all common are relevant
128 128 return self.outgoing.commonheads
129 129 unfi = self.repo.unfiltered()
130 130 # I want cheads = heads(::missingheads and ::commonheads)
131 131 # (missingheads is revs with secret changeset filtered out)
132 132 #
133 133 # This can be expressed as:
134 134 # cheads = ( (missingheads and ::commonheads)
135 135 # + (commonheads and ::missingheads))"
136 136 # )
137 137 #
138 138 # while trying to push we already computed the following:
139 139 # common = (::commonheads)
140 140 # missing = ((commonheads::missingheads) - commonheads)
141 141 #
142 142 # We can pick:
143 143 # * missingheads part of common (::commonheads)
144 144 common = set(self.outgoing.common)
145 145 nm = self.repo.changelog.nodemap
146 146 cheads = [node for node in self.revs if nm[node] in common]
147 147 # and
148 148 # * commonheads parents on missing
149 149 revset = unfi.set('%ln and parents(roots(%ln))',
150 150 self.outgoing.commonheads,
151 151 self.outgoing.missing)
152 152 cheads.extend(c.node() for c in revset)
153 153 return cheads
154 154
155 155 @property
156 156 def commonheads(self):
157 157 """set of all common heads after changeset bundle push"""
158 158 if self.cgresult:
159 159 return self.futureheads
160 160 else:
161 161 return self.fallbackheads
162 162
163 163 # mapping of message used when pushing bookmark
164 164 bookmsgmap = {'update': (_("updating bookmark %s\n"),
165 165 _('updating bookmark %s failed!\n')),
166 166 'export': (_("exporting bookmark %s\n"),
167 167 _('exporting bookmark %s failed!\n')),
168 168 'delete': (_("deleting remote bookmark %s\n"),
169 169 _('deleting remote bookmark %s failed!\n')),
170 170 }
171 171
172 172
173 173 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
174 174 '''Push outgoing changesets (limited by revs) from a local
175 175 repository to remote. Return an integer:
176 176 - None means nothing to push
177 177 - 0 means HTTP error
178 178 - 1 means we pushed and remote head count is unchanged *or*
179 179 we have outgoing changesets but refused to push
180 180 - other values as described by addchangegroup()
181 181 '''
182 182 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
183 183 if pushop.remote.local():
184 184 missing = (set(pushop.repo.requirements)
185 185 - pushop.remote.local().supported)
186 186 if missing:
187 187 msg = _("required features are not"
188 188 " supported in the destination:"
189 189 " %s") % (', '.join(sorted(missing)))
190 190 raise util.Abort(msg)
191 191
192 192 # there are two ways to push to remote repo:
193 193 #
194 194 # addchangegroup assumes local user can lock remote
195 195 # repo (local filesystem, old ssh servers).
196 196 #
197 197 # unbundle assumes local user cannot lock remote repo (new ssh
198 198 # servers, http servers).
199 199
200 200 if not pushop.remote.canpush():
201 201 raise util.Abort(_("destination does not support push"))
202 202 # get local lock as we might write phase data
203 203 locallock = None
204 204 try:
205 205 locallock = pushop.repo.lock()
206 206 pushop.locallocked = True
207 207 except IOError, err:
208 208 pushop.locallocked = False
209 209 if err.errno != errno.EACCES:
210 210 raise
211 211 # source repo cannot be locked.
212 212 # We do not abort the push, but just disable the local phase
213 213 # synchronisation.
214 214 msg = 'cannot lock source repository: %s\n' % err
215 215 pushop.ui.debug(msg)
216 216 try:
217 217 if pushop.locallocked:
218 218 pushop.trmanager = transactionmanager(repo,
219 219 'push-response',
220 220 pushop.remote.url())
221 221 pushop.repo.checkpush(pushop)
222 222 lock = None
223 223 unbundle = pushop.remote.capable('unbundle')
224 224 if not unbundle:
225 225 lock = pushop.remote.lock()
226 226 try:
227 227 _pushdiscovery(pushop)
228 228 if _canusebundle2(pushop):
229 229 _pushbundle2(pushop)
230 230 _pushchangeset(pushop)
231 231 _pushsyncphase(pushop)
232 232 _pushobsolete(pushop)
233 233 _pushbookmark(pushop)
234 234 finally:
235 235 if lock is not None:
236 236 lock.release()
237 237 if pushop.trmanager:
238 238 pushop.trmanager.close()
239 239 finally:
240 240 if pushop.trmanager:
241 241 pushop.trmanager.release()
242 242 if locallock is not None:
243 243 locallock.release()
244 244
245 245 return pushop
246 246
247 247 # list of steps to perform discovery before push
248 248 pushdiscoveryorder = []
249 249
250 250 # Mapping between step name and function
251 251 #
252 252 # This exists to help extensions wrap steps if necessary
253 253 pushdiscoverymapping = {}
254 254
255 255 def pushdiscovery(stepname):
256 256 """decorator for function performing discovery before push
257 257
258 258 The function is added to the step -> function mapping and appended to the
259 259 list of steps. Beware that decorated function will be added in order (this
260 260 may matter).
261 261
262 262 You can only use this decorator for a new step, if you want to wrap a step
263 263 from an extension, change the pushdiscovery dictionary directly."""
264 264 def dec(func):
265 265 assert stepname not in pushdiscoverymapping
266 266 pushdiscoverymapping[stepname] = func
267 267 pushdiscoveryorder.append(stepname)
268 268 return func
269 269 return dec
270 270
271 271 def _pushdiscovery(pushop):
272 272 """Run all discovery steps"""
273 273 for stepname in pushdiscoveryorder:
274 274 step = pushdiscoverymapping[stepname]
275 275 step(pushop)
276 276
277 277 @pushdiscovery('changeset')
278 278 def _pushdiscoverychangeset(pushop):
279 279 """discover the changeset that need to be pushed"""
280 280 fci = discovery.findcommonincoming
281 281 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
282 282 common, inc, remoteheads = commoninc
283 283 fco = discovery.findcommonoutgoing
284 284 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
285 285 commoninc=commoninc, force=pushop.force)
286 286 pushop.outgoing = outgoing
287 287 pushop.remoteheads = remoteheads
288 288 pushop.incoming = inc
289 289
290 290 @pushdiscovery('phase')
291 291 def _pushdiscoveryphase(pushop):
292 292 """discover the phase that needs to be pushed
293 293
294 294 (computed for both success and failure case for changesets push)"""
295 295 outgoing = pushop.outgoing
296 296 unfi = pushop.repo.unfiltered()
297 297 remotephases = pushop.remote.listkeys('phases')
298 298 publishing = remotephases.get('publishing', False)
299 299 ana = phases.analyzeremotephases(pushop.repo,
300 300 pushop.fallbackheads,
301 301 remotephases)
302 302 pheads, droots = ana
303 303 extracond = ''
304 304 if not publishing:
305 305 extracond = ' and public()'
306 306 revset = 'heads((%%ln::%%ln) %s)' % extracond
307 307 # Get the list of all revs draft on remote by public here.
308 308 # XXX Beware that revset break if droots is not strictly
309 309 # XXX root we may want to ensure it is but it is costly
310 310 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
311 311 if not outgoing.missing:
312 312 future = fallback
313 313 else:
314 314 # adds changeset we are going to push as draft
315 315 #
316 316 # should not be necessary for publishing server, but because of an
317 317 # issue fixed in xxxxx we have to do it anyway.
318 318 fdroots = list(unfi.set('roots(%ln + %ln::)',
319 319 outgoing.missing, droots))
320 320 fdroots = [f.node() for f in fdroots]
321 321 future = list(unfi.set(revset, fdroots, pushop.futureheads))
322 322 pushop.outdatedphases = future
323 323 pushop.fallbackoutdatedphases = fallback
324 324
325 325 @pushdiscovery('obsmarker')
326 326 def _pushdiscoveryobsmarkers(pushop):
327 327 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
328 328 and pushop.repo.obsstore
329 329 and 'obsolete' in pushop.remote.listkeys('namespaces')):
330 330 repo = pushop.repo
331 331 # very naive computation, that can be quite expensive on big repo.
332 332 # However: evolution is currently slow on them anyway.
333 333 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
334 334 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
335 335
336 336 @pushdiscovery('bookmarks')
337 337 def _pushdiscoverybookmarks(pushop):
338 338 ui = pushop.ui
339 339 repo = pushop.repo.unfiltered()
340 340 remote = pushop.remote
341 341 ui.debug("checking for updated bookmarks\n")
342 342 ancestors = ()
343 343 if pushop.revs:
344 344 revnums = map(repo.changelog.rev, pushop.revs)
345 345 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
346 346 remotebookmark = remote.listkeys('bookmarks')
347 347
348 348 explicit = set(pushop.bookmarks)
349 349
350 350 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
351 351 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
352 352 for b, scid, dcid in advsrc:
353 353 if b in explicit:
354 354 explicit.remove(b)
355 355 if not ancestors or repo[scid].rev() in ancestors:
356 356 pushop.outbookmarks.append((b, dcid, scid))
357 357 # search added bookmark
358 358 for b, scid, dcid in addsrc:
359 359 if b in explicit:
360 360 explicit.remove(b)
361 361 pushop.outbookmarks.append((b, '', scid))
362 362 # search for overwritten bookmark
363 363 for b, scid, dcid in advdst + diverge + differ:
364 364 if b in explicit:
365 365 explicit.remove(b)
366 366 pushop.outbookmarks.append((b, dcid, scid))
367 367 # search for bookmark to delete
368 368 for b, scid, dcid in adddst:
369 369 if b in explicit:
370 370 explicit.remove(b)
371 371 # treat as "deleted locally"
372 372 pushop.outbookmarks.append((b, dcid, ''))
373 373 # identical bookmarks shouldn't get reported
374 374 for b, scid, dcid in same:
375 375 if b in explicit:
376 376 explicit.remove(b)
377 377
378 378 if explicit:
379 379 explicit = sorted(explicit)
380 380 # we should probably list all of them
381 381 ui.warn(_('bookmark %s does not exist on the local '
382 382 'or remote repository!\n') % explicit[0])
383 383 pushop.bkresult = 2
384 384
385 385 pushop.outbookmarks.sort()
386 386
387 387 def _pushcheckoutgoing(pushop):
388 388 outgoing = pushop.outgoing
389 389 unfi = pushop.repo.unfiltered()
390 390 if not outgoing.missing:
391 391 # nothing to push
392 392 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
393 393 return False
394 394 # something to push
395 395 if not pushop.force:
396 396 # if repo.obsstore == False --> no obsolete
397 397 # then, save the iteration
398 398 if unfi.obsstore:
399 399 # this message are here for 80 char limit reason
400 400 mso = _("push includes obsolete changeset: %s!")
401 401 mst = {"unstable": _("push includes unstable changeset: %s!"),
402 402 "bumped": _("push includes bumped changeset: %s!"),
403 403 "divergent": _("push includes divergent changeset: %s!")}
404 404 # If we are to push if there is at least one
405 405 # obsolete or unstable changeset in missing, at
406 406 # least one of the missinghead will be obsolete or
407 407 # unstable. So checking heads only is ok
408 408 for node in outgoing.missingheads:
409 409 ctx = unfi[node]
410 410 if ctx.obsolete():
411 411 raise util.Abort(mso % ctx)
412 412 elif ctx.troubled():
413 413 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
414 414 newbm = pushop.ui.configlist('bookmarks', 'pushing')
415 415 discovery.checkheads(unfi, pushop.remote, outgoing,
416 416 pushop.remoteheads,
417 417 pushop.newbranch,
418 418 bool(pushop.incoming),
419 419 newbm)
420 420 return True
421 421
422 422 # List of names of steps to perform for an outgoing bundle2, order matters.
423 423 b2partsgenorder = []
424 424
425 425 # Mapping between step name and function
426 426 #
427 427 # This exists to help extensions wrap steps if necessary
428 428 b2partsgenmapping = {}
429 429
430 430 def b2partsgenerator(stepname, idx=None):
431 431 """decorator for function generating bundle2 part
432 432
433 433 The function is added to the step -> function mapping and appended to the
434 434 list of steps. Beware that decorated functions will be added in order
435 435 (this may matter).
436 436
437 437 You can only use this decorator for new steps, if you want to wrap a step
438 438 from an extension, attack the b2partsgenmapping dictionary directly."""
439 439 def dec(func):
440 440 assert stepname not in b2partsgenmapping
441 441 b2partsgenmapping[stepname] = func
442 442 if idx is None:
443 443 b2partsgenorder.append(stepname)
444 444 else:
445 445 b2partsgenorder.insert(idx, stepname)
446 446 return func
447 447 return dec
448 448
449 449 @b2partsgenerator('changeset')
450 450 def _pushb2ctx(pushop, bundler):
451 451 """handle changegroup push through bundle2
452 452
453 453 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
454 454 """
455 455 if 'changesets' in pushop.stepsdone:
456 456 return
457 457 pushop.stepsdone.add('changesets')
458 458 # Send known heads to the server for race detection.
459 459 if not _pushcheckoutgoing(pushop):
460 460 return
461 461 pushop.repo.prepushoutgoinghooks(pushop.repo,
462 462 pushop.remote,
463 463 pushop.outgoing)
464 464 if not pushop.force:
465 465 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
466 466 b2caps = bundle2.bundle2caps(pushop.remote)
467 467 version = None
468 468 cgversions = b2caps.get('changegroup')
469 469 if not cgversions: # 3.1 and 3.2 ship with an empty value
470 470 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
471 471 pushop.outgoing)
472 472 else:
473 473 cgversions = [v for v in cgversions if v in changegroup.packermap]
474 474 if not cgversions:
475 475 raise ValueError(_('no common changegroup version'))
476 476 version = max(cgversions)
477 477 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
478 478 pushop.outgoing,
479 479 version=version)
480 480 cgpart = bundler.newpart('changegroup', data=cg)
481 481 if version is not None:
482 482 cgpart.addparam('version', version)
483 483 def handlereply(op):
484 484 """extract addchangegroup returns from server reply"""
485 485 cgreplies = op.records.getreplies(cgpart.id)
486 486 assert len(cgreplies['changegroup']) == 1
487 487 pushop.cgresult = cgreplies['changegroup'][0]['return']
488 488 return handlereply
489 489
490 490 @b2partsgenerator('phase')
491 491 def _pushb2phases(pushop, bundler):
492 492 """handle phase push through bundle2"""
493 493 if 'phases' in pushop.stepsdone:
494 494 return
495 495 b2caps = bundle2.bundle2caps(pushop.remote)
496 496 if not 'pushkey' in b2caps:
497 497 return
498 498 pushop.stepsdone.add('phases')
499 499 part2node = []
500 500 enc = pushkey.encode
501 501 for newremotehead in pushop.outdatedphases:
502 502 part = bundler.newpart('pushkey')
503 503 part.addparam('namespace', enc('phases'))
504 504 part.addparam('key', enc(newremotehead.hex()))
505 505 part.addparam('old', enc(str(phases.draft)))
506 506 part.addparam('new', enc(str(phases.public)))
507 507 part2node.append((part.id, newremotehead))
508 508 def handlereply(op):
509 509 for partid, node in part2node:
510 510 partrep = op.records.getreplies(partid)
511 511 results = partrep['pushkey']
512 512 assert len(results) <= 1
513 513 msg = None
514 514 if not results:
515 515 msg = _('server ignored update of %s to public!\n') % node
516 516 elif not int(results[0]['return']):
517 517 msg = _('updating %s to public failed!\n') % node
518 518 if msg is not None:
519 519 pushop.ui.warn(msg)
520 520 return handlereply
521 521
522 522 @b2partsgenerator('obsmarkers')
523 523 def _pushb2obsmarkers(pushop, bundler):
524 524 if 'obsmarkers' in pushop.stepsdone:
525 525 return
526 526 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
527 527 if obsolete.commonversion(remoteversions) is None:
528 528 return
529 529 pushop.stepsdone.add('obsmarkers')
530 530 if pushop.outobsmarkers:
531 531 buildobsmarkerspart(bundler, pushop.outobsmarkers)
532 532
533 533 @b2partsgenerator('bookmarks')
534 534 def _pushb2bookmarks(pushop, bundler):
535 535 """handle phase push through bundle2"""
536 536 if 'bookmarks' in pushop.stepsdone:
537 537 return
538 538 b2caps = bundle2.bundle2caps(pushop.remote)
539 539 if 'pushkey' not in b2caps:
540 540 return
541 541 pushop.stepsdone.add('bookmarks')
542 542 part2book = []
543 543 enc = pushkey.encode
544 544 for book, old, new in pushop.outbookmarks:
545 545 part = bundler.newpart('pushkey')
546 546 part.addparam('namespace', enc('bookmarks'))
547 547 part.addparam('key', enc(book))
548 548 part.addparam('old', enc(old))
549 549 part.addparam('new', enc(new))
550 550 action = 'update'
551 551 if not old:
552 552 action = 'export'
553 553 elif not new:
554 554 action = 'delete'
555 555 part2book.append((part.id, book, action))
556 556
557 557
558 558 def handlereply(op):
559 559 ui = pushop.ui
560 560 for partid, book, action in part2book:
561 561 partrep = op.records.getreplies(partid)
562 562 results = partrep['pushkey']
563 563 assert len(results) <= 1
564 564 if not results:
565 565 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
566 566 else:
567 567 ret = int(results[0]['return'])
568 568 if ret:
569 569 ui.status(bookmsgmap[action][0] % book)
570 570 else:
571 571 ui.warn(bookmsgmap[action][1] % book)
572 572 if pushop.bkresult is not None:
573 573 pushop.bkresult = 1
574 574 return handlereply
575 575
576 576
577 577 def _pushbundle2(pushop):
578 578 """push data to the remote using bundle2
579 579
580 580 The only currently supported type of data is changegroup but this will
581 581 evolve in the future."""
582 582 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
583 583 pushback = (pushop.trmanager
584 584 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
585 585
586 586 # create reply capability
587 587 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
588 588 allowpushback=pushback))
589 589 bundler.newpart('replycaps', data=capsblob)
590 590 replyhandlers = []
591 591 for partgenname in b2partsgenorder:
592 592 partgen = b2partsgenmapping[partgenname]
593 593 ret = partgen(pushop, bundler)
594 594 if callable(ret):
595 595 replyhandlers.append(ret)
596 596 # do not push if nothing to push
597 597 if bundler.nbparts <= 1:
598 598 return
599 599 stream = util.chunkbuffer(bundler.getchunks())
600 600 try:
601 601 reply = pushop.remote.unbundle(stream, ['force'], 'push')
602 602 except error.BundleValueError, exc:
603 603 raise util.Abort('missing support for %s' % exc)
604 604 try:
605 605 trgetter = None
606 606 if pushback:
607 607 trgetter = pushop.trmanager.transaction
608 608 op = bundle2.processbundle(pushop.repo, reply, trgetter)
609 609 except error.BundleValueError, exc:
610 610 raise util.Abort('missing support for %s' % exc)
611 611 for rephand in replyhandlers:
612 612 rephand(op)
613 613
614 614 def _pushchangeset(pushop):
615 615 """Make the actual push of changeset bundle to remote repo"""
616 616 if 'changesets' in pushop.stepsdone:
617 617 return
618 618 pushop.stepsdone.add('changesets')
619 619 if not _pushcheckoutgoing(pushop):
620 620 return
621 621 pushop.repo.prepushoutgoinghooks(pushop.repo,
622 622 pushop.remote,
623 623 pushop.outgoing)
624 624 outgoing = pushop.outgoing
625 625 unbundle = pushop.remote.capable('unbundle')
626 626 # TODO: get bundlecaps from remote
627 627 bundlecaps = None
628 628 # create a changegroup from local
629 629 if pushop.revs is None and not (outgoing.excluded
630 630 or pushop.repo.changelog.filteredrevs):
631 631 # push everything,
632 632 # use the fast path, no race possible on push
633 633 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
634 634 cg = changegroup.getsubset(pushop.repo,
635 635 outgoing,
636 636 bundler,
637 637 'push',
638 638 fastpath=True)
639 639 else:
640 640 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
641 641 bundlecaps)
642 642
643 643 # apply changegroup to remote
644 644 if unbundle:
645 645 # local repo finds heads on server, finds out what
646 646 # revs it must push. once revs transferred, if server
647 647 # finds it has different heads (someone else won
648 648 # commit/push race), server aborts.
649 649 if pushop.force:
650 650 remoteheads = ['force']
651 651 else:
652 652 remoteheads = pushop.remoteheads
653 653 # ssh: return remote's addchangegroup()
654 654 # http: return remote's addchangegroup() or 0 for error
655 655 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
656 656 pushop.repo.url())
657 657 else:
658 658 # we return an integer indicating remote head count
659 659 # change
660 660 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
661 661 pushop.repo.url())
662 662
663 663 def _pushsyncphase(pushop):
664 664 """synchronise phase information locally and remotely"""
665 665 cheads = pushop.commonheads
666 666 # even when we don't push, exchanging phase data is useful
667 667 remotephases = pushop.remote.listkeys('phases')
668 668 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
669 669 and remotephases # server supports phases
670 670 and pushop.cgresult is None # nothing was pushed
671 671 and remotephases.get('publishing', False)):
672 672 # When:
673 673 # - this is a subrepo push
674 674 # - and remote support phase
675 675 # - and no changeset was pushed
676 676 # - and remote is publishing
677 677 # We may be in issue 3871 case!
678 678 # We drop the possible phase synchronisation done by
679 679 # courtesy to publish changesets possibly locally draft
680 680 # on the remote.
681 681 remotephases = {'publishing': 'True'}
682 682 if not remotephases: # old server or public only reply from non-publishing
683 683 _localphasemove(pushop, cheads)
684 684 # don't push any phase data as there is nothing to push
685 685 else:
686 686 ana = phases.analyzeremotephases(pushop.repo, cheads,
687 687 remotephases)
688 688 pheads, droots = ana
689 689 ### Apply remote phase on local
690 690 if remotephases.get('publishing', False):
691 691 _localphasemove(pushop, cheads)
692 692 else: # publish = False
693 693 _localphasemove(pushop, pheads)
694 694 _localphasemove(pushop, cheads, phases.draft)
695 695 ### Apply local phase on remote
696 696
697 697 if pushop.cgresult:
698 698 if 'phases' in pushop.stepsdone:
699 699 # phases already pushed though bundle2
700 700 return
701 701 outdated = pushop.outdatedphases
702 702 else:
703 703 outdated = pushop.fallbackoutdatedphases
704 704
705 705 pushop.stepsdone.add('phases')
706 706
707 707 # filter heads already turned public by the push
708 708 outdated = [c for c in outdated if c.node() not in pheads]
709 709 # fallback to independent pushkey command
710 710 for newremotehead in outdated:
711 711 r = pushop.remote.pushkey('phases',
712 712 newremotehead.hex(),
713 713 str(phases.draft),
714 714 str(phases.public))
715 715 if not r:
716 716 pushop.ui.warn(_('updating %s to public failed!\n')
717 717 % newremotehead)
718 718
719 719 def _localphasemove(pushop, nodes, phase=phases.public):
720 720 """move <nodes> to <phase> in the local source repo"""
721 721 if pushop.trmanager:
722 722 phases.advanceboundary(pushop.repo,
723 723 pushop.trmanager.transaction(),
724 724 phase,
725 725 nodes)
726 726 else:
727 727 # repo is not locked, do not change any phases!
728 728 # Informs the user that phases should have been moved when
729 729 # applicable.
730 730 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
731 731 phasestr = phases.phasenames[phase]
732 732 if actualmoves:
733 733 pushop.ui.status(_('cannot lock source repo, skipping '
734 734 'local %s phase update\n') % phasestr)
735 735
736 736 def _pushobsolete(pushop):
737 737 """utility function to push obsolete markers to a remote"""
738 738 if 'obsmarkers' in pushop.stepsdone:
739 739 return
740 740 pushop.ui.debug('try to push obsolete markers to remote\n')
741 741 repo = pushop.repo
742 742 remote = pushop.remote
743 743 pushop.stepsdone.add('obsmarkers')
744 744 if pushop.outobsmarkers:
745 745 rslts = []
746 746 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
747 747 for key in sorted(remotedata, reverse=True):
748 748 # reverse sort to ensure we end with dump0
749 749 data = remotedata[key]
750 750 rslts.append(remote.pushkey('obsolete', key, '', data))
751 751 if [r for r in rslts if not r]:
752 752 msg = _('failed to push some obsolete markers!\n')
753 753 repo.ui.warn(msg)
754 754
755 755 def _pushbookmark(pushop):
756 756 """Update bookmark position on remote"""
757 757 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
758 758 return
759 759 pushop.stepsdone.add('bookmarks')
760 760 ui = pushop.ui
761 761 remote = pushop.remote
762 762
763 763 for b, old, new in pushop.outbookmarks:
764 764 action = 'update'
765 765 if not old:
766 766 action = 'export'
767 767 elif not new:
768 768 action = 'delete'
769 769 if remote.pushkey('bookmarks', b, old, new):
770 770 ui.status(bookmsgmap[action][0] % b)
771 771 else:
772 772 ui.warn(bookmsgmap[action][1] % b)
773 773 # discovery can have set the value form invalid entry
774 774 if pushop.bkresult is not None:
775 775 pushop.bkresult = 1
776 776
777 777 class pulloperation(object):
778 778 """A object that represent a single pull operation
779 779
780 780 It purpose is to carry pull related state and very common operation.
781 781
782 782 A new should be created at the beginning of each pull and discarded
783 783 afterward.
784 784 """
785 785
786 786 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
787 787 # repo we pull into
788 788 self.repo = repo
789 789 # repo we pull from
790 790 self.remote = remote
791 791 # revision we try to pull (None is "all")
792 792 self.heads = heads
793 793 # bookmark pulled explicitly
794 794 self.explicitbookmarks = bookmarks
795 795 # do we force pull?
796 796 self.force = force
797 797 # transaction manager
798 798 self.trmanager = None
799 799 # set of common changeset between local and remote before pull
800 800 self.common = None
801 801 # set of pulled head
802 802 self.rheads = None
803 803 # list of missing changeset to fetch remotely
804 804 self.fetch = None
805 805 # remote bookmarks data
806 806 self.remotebookmarks = None
807 807 # result of changegroup pulling (used as return code by pull)
808 808 self.cgresult = None
809 809 # list of step already done
810 810 self.stepsdone = set()
811 811
812 812 @util.propertycache
813 813 def pulledsubset(self):
814 814 """heads of the set of changeset target by the pull"""
815 815 # compute target subset
816 816 if self.heads is None:
817 817 # We pulled every thing possible
818 818 # sync on everything common
819 819 c = set(self.common)
820 820 ret = list(self.common)
821 821 for n in self.rheads:
822 822 if n not in c:
823 823 ret.append(n)
824 824 return ret
825 825 else:
826 826 # We pulled a specific subset
827 827 # sync on this subset
828 828 return self.heads
829 829
830 830 def gettransaction(self):
831 831 # deprecated; talk to trmanager directly
832 832 return self.trmanager.transaction()
833 833
834 834 class transactionmanager(object):
835 835 """An object to manage the life cycle of a transaction
836 836
837 837 It creates the transaction on demand and calls the appropriate hooks when
838 838 closing the transaction."""
839 839 def __init__(self, repo, source, url):
840 840 self.repo = repo
841 841 self.source = source
842 842 self.url = url
843 843 self._tr = None
844 844
845 845 def transaction(self):
846 846 """Return an open transaction object, constructing if necessary"""
847 847 if not self._tr:
848 848 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
849 849 self._tr = self.repo.transaction(trname)
850 850 self._tr.hookargs['source'] = self.source
851 851 self._tr.hookargs['url'] = self.url
852 852 return self._tr
853 853
854 854 def close(self):
855 855 """close transaction if created"""
856 856 if self._tr is not None:
857 857 self._tr.close()
858 858
859 859 def release(self):
860 860 """release transaction if created"""
861 861 if self._tr is not None:
862 862 self._tr.release()
863 863
864 864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
865 865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
866 866 if pullop.remote.local():
867 867 missing = set(pullop.remote.requirements) - pullop.repo.supported
868 868 if missing:
869 869 msg = _("required features are not"
870 870 " supported in the destination:"
871 871 " %s") % (', '.join(sorted(missing)))
872 872 raise util.Abort(msg)
873 873
874 874 pullop.remotebookmarks = remote.listkeys('bookmarks')
875 875 lock = pullop.repo.lock()
876 876 try:
877 877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
878 878 _pulldiscovery(pullop)
879 879 if _canusebundle2(pullop):
880 880 _pullbundle2(pullop)
881 881 _pullchangeset(pullop)
882 882 _pullphase(pullop)
883 883 _pullbookmarks(pullop)
884 884 _pullobsolete(pullop)
885 885 pullop.trmanager.close()
886 886 finally:
887 887 pullop.trmanager.release()
888 888 lock.release()
889 889
890 890 return pullop
891 891
892 892 # list of steps to perform discovery before pull
893 893 pulldiscoveryorder = []
894 894
895 895 # Mapping between step name and function
896 896 #
897 897 # This exists to help extensions wrap steps if necessary
898 898 pulldiscoverymapping = {}
899 899
900 900 def pulldiscovery(stepname):
901 901 """decorator for function performing discovery before pull
902 902
903 903 The function is added to the step -> function mapping and appended to the
904 904 list of steps. Beware that decorated function will be added in order (this
905 905 may matter).
906 906
907 907 You can only use this decorator for a new step, if you want to wrap a step
908 908 from an extension, change the pulldiscovery dictionary directly."""
909 909 def dec(func):
910 910 assert stepname not in pulldiscoverymapping
911 911 pulldiscoverymapping[stepname] = func
912 912 pulldiscoveryorder.append(stepname)
913 913 return func
914 914 return dec
915 915
916 916 def _pulldiscovery(pullop):
917 917 """Run all discovery steps"""
918 918 for stepname in pulldiscoveryorder:
919 919 step = pulldiscoverymapping[stepname]
920 920 step(pullop)
921 921
922 922 @pulldiscovery('changegroup')
923 923 def _pulldiscoverychangegroup(pullop):
924 924 """discovery phase for the pull
925 925
926 926 Current handle changeset discovery only, will change handle all discovery
927 927 at some point."""
928 928 tmp = discovery.findcommonincoming(pullop.repo,
929 929 pullop.remote,
930 930 heads=pullop.heads,
931 931 force=pullop.force)
932 932 common, fetch, rheads = tmp
933 933 nm = pullop.repo.unfiltered().changelog.nodemap
934 934 if fetch and rheads:
935 935 # If a remote heads in filtered locally, lets drop it from the unknown
936 936 # remote heads and put in back in common.
937 937 #
938 938 # This is a hackish solution to catch most of "common but locally
939 939 # hidden situation". We do not performs discovery on unfiltered
940 940 # repository because it end up doing a pathological amount of round
941 941 # trip for w huge amount of changeset we do not care about.
942 942 #
943 943 # If a set of such "common but filtered" changeset exist on the server
944 944 # but are not including a remote heads, we'll not be able to detect it,
945 945 scommon = set(common)
946 946 filteredrheads = []
947 947 for n in rheads:
948 948 if n in nm:
949 949 if n not in scommon:
950 950 common.append(n)
951 951 else:
952 952 filteredrheads.append(n)
953 953 if not filteredrheads:
954 954 fetch = []
955 955 rheads = filteredrheads
956 956 pullop.common = common
957 957 pullop.fetch = fetch
958 958 pullop.rheads = rheads
959 959
960 960 def _pullbundle2(pullop):
961 961 """pull data using bundle2
962 962
963 963 For now, the only supported data are changegroup."""
964 964 remotecaps = bundle2.bundle2caps(pullop.remote)
965 965 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
966 966 # pulling changegroup
967 967 pullop.stepsdone.add('changegroup')
968 968
969 969 kwargs['common'] = pullop.common
970 970 kwargs['heads'] = pullop.heads or pullop.rheads
971 971 kwargs['cg'] = pullop.fetch
972 972 if 'listkeys' in remotecaps:
973 973 kwargs['listkeys'] = ['phase', 'bookmarks']
974 974 if not pullop.fetch:
975 975 pullop.repo.ui.status(_("no changes found\n"))
976 976 pullop.cgresult = 0
977 977 else:
978 978 if pullop.heads is None and list(pullop.common) == [nullid]:
979 979 pullop.repo.ui.status(_("requesting all changes\n"))
980 980 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
981 981 remoteversions = bundle2.obsmarkersversion(remotecaps)
982 982 if obsolete.commonversion(remoteversions) is not None:
983 983 kwargs['obsmarkers'] = True
984 984 pullop.stepsdone.add('obsmarkers')
985 985 _pullbundle2extraprepare(pullop, kwargs)
986 986 bundle = pullop.remote.getbundle('pull', **kwargs)
987 987 try:
988 988 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
989 989 except error.BundleValueError, exc:
990 990 raise util.Abort('missing support for %s' % exc)
991 991
992 992 if pullop.fetch:
993 993 results = [cg['return'] for cg in op.records['changegroup']]
994 994 pullop.cgresult = changegroup.combineresults(results)
995 995
996 996 # processing phases change
997 997 for namespace, value in op.records['listkeys']:
998 998 if namespace == 'phases':
999 999 _pullapplyphases(pullop, value)
1000 1000
1001 1001 # processing bookmark update
1002 1002 for namespace, value in op.records['listkeys']:
1003 1003 if namespace == 'bookmarks':
1004 1004 pullop.remotebookmarks = value
1005 1005 _pullbookmarks(pullop)
1006 1006
1007 1007 def _pullbundle2extraprepare(pullop, kwargs):
1008 1008 """hook function so that extensions can extend the getbundle call"""
1009 1009 pass
1010 1010
1011 1011 def _pullchangeset(pullop):
1012 1012 """pull changeset from unbundle into the local repo"""
1013 1013 # We delay the open of the transaction as late as possible so we
1014 1014 # don't open transaction for nothing or you break future useful
1015 1015 # rollback call
1016 1016 if 'changegroup' in pullop.stepsdone:
1017 1017 return
1018 1018 pullop.stepsdone.add('changegroup')
1019 1019 if not pullop.fetch:
1020 1020 pullop.repo.ui.status(_("no changes found\n"))
1021 1021 pullop.cgresult = 0
1022 1022 return
1023 1023 pullop.gettransaction()
1024 1024 if pullop.heads is None and list(pullop.common) == [nullid]:
1025 1025 pullop.repo.ui.status(_("requesting all changes\n"))
1026 1026 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1027 1027 # issue1320, avoid a race if remote changed after discovery
1028 1028 pullop.heads = pullop.rheads
1029 1029
1030 1030 if pullop.remote.capable('getbundle'):
1031 1031 # TODO: get bundlecaps from remote
1032 1032 cg = pullop.remote.getbundle('pull', common=pullop.common,
1033 1033 heads=pullop.heads or pullop.rheads)
1034 1034 elif pullop.heads is None:
1035 1035 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1036 1036 elif not pullop.remote.capable('changegroupsubset'):
1037 1037 raise util.Abort(_("partial pull cannot be done because "
1038 1038 "other repository doesn't support "
1039 1039 "changegroupsubset."))
1040 1040 else:
1041 1041 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1042 1042 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1043 1043 pullop.remote.url())
1044 1044
1045 1045 def _pullphase(pullop):
1046 1046 # Get remote phases data from remote
1047 1047 if 'phases' in pullop.stepsdone:
1048 1048 return
1049 1049 remotephases = pullop.remote.listkeys('phases')
1050 1050 _pullapplyphases(pullop, remotephases)
1051 1051
1052 1052 def _pullapplyphases(pullop, remotephases):
1053 1053 """apply phase movement from observed remote state"""
1054 1054 if 'phases' in pullop.stepsdone:
1055 1055 return
1056 1056 pullop.stepsdone.add('phases')
1057 1057 publishing = bool(remotephases.get('publishing', False))
1058 1058 if remotephases and not publishing:
1059 1059 # remote is new and unpublishing
1060 1060 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1061 1061 pullop.pulledsubset,
1062 1062 remotephases)
1063 1063 dheads = pullop.pulledsubset
1064 1064 else:
1065 1065 # Remote is old or publishing all common changesets
1066 1066 # should be seen as public
1067 1067 pheads = pullop.pulledsubset
1068 1068 dheads = []
1069 1069 unfi = pullop.repo.unfiltered()
1070 1070 phase = unfi._phasecache.phase
1071 1071 rev = unfi.changelog.nodemap.get
1072 1072 public = phases.public
1073 1073 draft = phases.draft
1074 1074
1075 1075 # exclude changesets already public locally and update the others
1076 1076 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1077 1077 if pheads:
1078 1078 tr = pullop.gettransaction()
1079 1079 phases.advanceboundary(pullop.repo, tr, public, pheads)
1080 1080
1081 1081 # exclude changesets already draft locally and update the others
1082 1082 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1083 1083 if dheads:
1084 1084 tr = pullop.gettransaction()
1085 1085 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1086 1086
1087 1087 def _pullbookmarks(pullop):
1088 1088 """process the remote bookmark information to update the local one"""
1089 1089 if 'bookmarks' in pullop.stepsdone:
1090 1090 return
1091 1091 pullop.stepsdone.add('bookmarks')
1092 1092 repo = pullop.repo
1093 1093 remotebookmarks = pullop.remotebookmarks
1094 1094 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1095 1095 pullop.remote.url(),
1096 1096 pullop.gettransaction,
1097 1097 explicit=pullop.explicitbookmarks)
1098 1098
1099 1099 def _pullobsolete(pullop):
1100 1100 """utility function to pull obsolete markers from a remote
1101 1101
1102 1102 The `gettransaction` is function that return the pull transaction, creating
1103 1103 one if necessary. We return the transaction to inform the calling code that
1104 1104 a new transaction have been created (when applicable).
1105 1105
1106 1106 Exists mostly to allow overriding for experimentation purpose"""
1107 1107 if 'obsmarkers' in pullop.stepsdone:
1108 1108 return
1109 1109 pullop.stepsdone.add('obsmarkers')
1110 1110 tr = None
1111 1111 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1112 1112 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1113 1113 remoteobs = pullop.remote.listkeys('obsolete')
1114 1114 if 'dump0' in remoteobs:
1115 1115 tr = pullop.gettransaction()
1116 1116 for key in sorted(remoteobs, reverse=True):
1117 1117 if key.startswith('dump'):
1118 1118 data = base85.b85decode(remoteobs[key])
1119 1119 pullop.repo.obsstore.mergemarkers(tr, data)
1120 1120 pullop.repo.invalidatevolatilesets()
1121 1121 return tr
1122 1122
1123 1123 def caps20to10(repo):
1124 1124 """return a set with appropriate options to use bundle20 during getbundle"""
1125 1125 caps = set(['HG20'])
1126 1126 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1127 1127 caps.add('bundle2=' + urllib.quote(capsblob))
1128 1128 return caps
1129 1129
1130 1130 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1131 1131 getbundle2partsorder = []
1132 1132
1133 1133 # Mapping between step name and function
1134 1134 #
1135 1135 # This exists to help extensions wrap steps if necessary
1136 1136 getbundle2partsmapping = {}
1137 1137
1138 def getbundle2partsgenerator(stepname):
1138 def getbundle2partsgenerator(stepname, idx=None):
1139 1139 """decorator for function generating bundle2 part for getbundle
1140 1140
1141 1141 The function is added to the step -> function mapping and appended to the
1142 1142 list of steps. Beware that decorated functions will be added in order
1143 1143 (this may matter).
1144 1144
1145 1145 You can only use this decorator for new steps, if you want to wrap a step
1146 1146 from an extension, attack the getbundle2partsmapping dictionary directly."""
1147 1147 def dec(func):
1148 1148 assert stepname not in getbundle2partsmapping
1149 1149 getbundle2partsmapping[stepname] = func
1150 if idx is None:
1150 1151 getbundle2partsorder.append(stepname)
1152 else:
1153 getbundle2partsorder.insert(idx, stepname)
1151 1154 return func
1152 1155 return dec
1153 1156
1154 1157 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1155 1158 **kwargs):
1156 1159 """return a full bundle (with potentially multiple kind of parts)
1157 1160
1158 1161 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1159 1162 passed. For now, the bundle can contain only changegroup, but this will
1160 1163 changes when more part type will be available for bundle2.
1161 1164
1162 1165 This is different from changegroup.getchangegroup that only returns an HG10
1163 1166 changegroup bundle. They may eventually get reunited in the future when we
1164 1167 have a clearer idea of the API we what to query different data.
1165 1168
1166 1169 The implementation is at a very early stage and will get massive rework
1167 1170 when the API of bundle is refined.
1168 1171 """
1169 1172 # bundle10 case
1170 1173 usebundle2 = False
1171 1174 if bundlecaps is not None:
1172 1175 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1173 1176 if not usebundle2:
1174 1177 if bundlecaps and not kwargs.get('cg', True):
1175 1178 raise ValueError(_('request for bundle10 must include changegroup'))
1176 1179
1177 1180 if kwargs:
1178 1181 raise ValueError(_('unsupported getbundle arguments: %s')
1179 1182 % ', '.join(sorted(kwargs.keys())))
1180 1183 return changegroup.getchangegroup(repo, source, heads=heads,
1181 1184 common=common, bundlecaps=bundlecaps)
1182 1185
1183 1186 # bundle20 case
1184 1187 b2caps = {}
1185 1188 for bcaps in bundlecaps:
1186 1189 if bcaps.startswith('bundle2='):
1187 1190 blob = urllib.unquote(bcaps[len('bundle2='):])
1188 1191 b2caps.update(bundle2.decodecaps(blob))
1189 1192 bundler = bundle2.bundle20(repo.ui, b2caps)
1190 1193
1191 1194 kwargs['heads'] = heads
1192 1195 kwargs['common'] = common
1193 1196
1194 1197 for name in getbundle2partsorder:
1195 1198 func = getbundle2partsmapping[name]
1196 1199 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1197 1200 **kwargs)
1198 1201
1199 1202 return util.chunkbuffer(bundler.getchunks())
1200 1203
1201 1204 @getbundle2partsgenerator('changegroup')
1202 1205 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1203 1206 b2caps=None, heads=None, common=None, **kwargs):
1204 1207 """add a changegroup part to the requested bundle"""
1205 1208 cg = None
1206 1209 if kwargs.get('cg', True):
1207 1210 # build changegroup bundle here.
1208 1211 version = None
1209 1212 cgversions = b2caps.get('changegroup')
1210 1213 if not cgversions: # 3.1 and 3.2 ship with an empty value
1211 1214 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1212 1215 common=common,
1213 1216 bundlecaps=bundlecaps)
1214 1217 else:
1215 1218 cgversions = [v for v in cgversions if v in changegroup.packermap]
1216 1219 if not cgversions:
1217 1220 raise ValueError(_('no common changegroup version'))
1218 1221 version = max(cgversions)
1219 1222 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1220 1223 common=common,
1221 1224 bundlecaps=bundlecaps,
1222 1225 version=version)
1223 1226
1224 1227 if cg:
1225 1228 part = bundler.newpart('changegroup', data=cg)
1226 1229 if version is not None:
1227 1230 part.addparam('version', version)
1228 1231
1229 1232 @getbundle2partsgenerator('listkeys')
1230 1233 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1231 1234 b2caps=None, **kwargs):
1232 1235 """add parts containing listkeys namespaces to the requested bundle"""
1233 1236 listkeys = kwargs.get('listkeys', ())
1234 1237 for namespace in listkeys:
1235 1238 part = bundler.newpart('listkeys')
1236 1239 part.addparam('namespace', namespace)
1237 1240 keys = repo.listkeys(namespace).items()
1238 1241 part.data = pushkey.encodekeys(keys)
1239 1242
1240 1243 @getbundle2partsgenerator('obsmarkers')
1241 1244 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1242 1245 b2caps=None, heads=None, **kwargs):
1243 1246 """add an obsolescence markers part to the requested bundle"""
1244 1247 if kwargs.get('obsmarkers', False):
1245 1248 if heads is None:
1246 1249 heads = repo.heads()
1247 1250 subset = [c.node() for c in repo.set('::%ln', heads)]
1248 1251 markers = repo.obsstore.relevantmarkers(subset)
1249 1252 buildobsmarkerspart(bundler, markers)
1250 1253
1251 1254 def check_heads(repo, their_heads, context):
1252 1255 """check if the heads of a repo have been modified
1253 1256
1254 1257 Used by peer for unbundling.
1255 1258 """
1256 1259 heads = repo.heads()
1257 1260 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1258 1261 if not (their_heads == ['force'] or their_heads == heads or
1259 1262 their_heads == ['hashed', heads_hash]):
1260 1263 # someone else committed/pushed/unbundled while we
1261 1264 # were transferring data
1262 1265 raise error.PushRaced('repository changed while %s - '
1263 1266 'please try again' % context)
1264 1267
1265 1268 def unbundle(repo, cg, heads, source, url):
1266 1269 """Apply a bundle to a repo.
1267 1270
1268 1271 this function makes sure the repo is locked during the application and have
1269 1272 mechanism to check that no push race occurred between the creation of the
1270 1273 bundle and its application.
1271 1274
1272 1275 If the push was raced as PushRaced exception is raised."""
1273 1276 r = 0
1274 1277 # need a transaction when processing a bundle2 stream
1275 1278 tr = None
1276 1279 lock = repo.lock()
1277 1280 try:
1278 1281 check_heads(repo, heads, 'uploading changes')
1279 1282 # push can proceed
1280 1283 if util.safehasattr(cg, 'params'):
1281 1284 try:
1282 1285 tr = repo.transaction('unbundle')
1283 1286 tr.hookargs['source'] = source
1284 1287 tr.hookargs['url'] = url
1285 1288 tr.hookargs['bundle2'] = '1'
1286 1289 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1287 1290 tr.close()
1288 1291 except Exception, exc:
1289 1292 exc.duringunbundle2 = True
1290 1293 raise
1291 1294 else:
1292 1295 r = changegroup.addchangegroup(repo, cg, source, url)
1293 1296 finally:
1294 1297 if tr is not None:
1295 1298 tr.release()
1296 1299 lock.release()
1297 1300 return r
General Comments 0
You need to be logged in to leave comments. Login now