##// END OF EJS Templates
getbundle: send highest changegroup format supported by both side...
Pierre-Yves David -
r23179:6bb9533f default
parent child Browse files
Show More
@@ -1,1271 +1,1286
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version == '2Y':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 53 return None
54 54
55 55 class pushoperation(object):
56 56 """A object that represent a single push operation
57 57
58 58 It purpose is to carry push related state and very common operation.
59 59
60 60 A new should be created at the beginning of each push and discarded
61 61 afterward.
62 62 """
63 63
64 64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 65 bookmarks=()):
66 66 # repo we push from
67 67 self.repo = repo
68 68 self.ui = repo.ui
69 69 # repo we push to
70 70 self.remote = remote
71 71 # force option provided
72 72 self.force = force
73 73 # revs to be pushed (None is "all")
74 74 self.revs = revs
75 75 # bookmark explicitly pushed
76 76 self.bookmarks = bookmarks
77 77 # allow push of new branch
78 78 self.newbranch = newbranch
79 79 # did a local lock get acquired?
80 80 self.locallocked = None
81 81 # step already performed
82 82 # (used to check what steps have been already performed through bundle2)
83 83 self.stepsdone = set()
84 84 # Integer version of the changegroup push result
85 85 # - None means nothing to push
86 86 # - 0 means HTTP error
87 87 # - 1 means we pushed and remote head count is unchanged *or*
88 88 # we have outgoing changesets but refused to push
89 89 # - other values as described by addchangegroup()
90 90 self.cgresult = None
91 91 # Boolean value for the bookmark push
92 92 self.bkresult = None
93 93 # discover.outgoing object (contains common and outgoing data)
94 94 self.outgoing = None
95 95 # all remote heads before the push
96 96 self.remoteheads = None
97 97 # testable as a boolean indicating if any nodes are missing locally.
98 98 self.incoming = None
99 99 # phases changes that must be pushed along side the changesets
100 100 self.outdatedphases = None
101 101 # phases changes that must be pushed if changeset push fails
102 102 self.fallbackoutdatedphases = None
103 103 # outgoing obsmarkers
104 104 self.outobsmarkers = set()
105 105 # outgoing bookmarks
106 106 self.outbookmarks = []
107 107
108 108 @util.propertycache
109 109 def futureheads(self):
110 110 """future remote heads if the changeset push succeeds"""
111 111 return self.outgoing.missingheads
112 112
113 113 @util.propertycache
114 114 def fallbackheads(self):
115 115 """future remote heads if the changeset push fails"""
116 116 if self.revs is None:
117 117 # not target to push, all common are relevant
118 118 return self.outgoing.commonheads
119 119 unfi = self.repo.unfiltered()
120 120 # I want cheads = heads(::missingheads and ::commonheads)
121 121 # (missingheads is revs with secret changeset filtered out)
122 122 #
123 123 # This can be expressed as:
124 124 # cheads = ( (missingheads and ::commonheads)
125 125 # + (commonheads and ::missingheads))"
126 126 # )
127 127 #
128 128 # while trying to push we already computed the following:
129 129 # common = (::commonheads)
130 130 # missing = ((commonheads::missingheads) - commonheads)
131 131 #
132 132 # We can pick:
133 133 # * missingheads part of common (::commonheads)
134 134 common = set(self.outgoing.common)
135 135 nm = self.repo.changelog.nodemap
136 136 cheads = [node for node in self.revs if nm[node] in common]
137 137 # and
138 138 # * commonheads parents on missing
139 139 revset = unfi.set('%ln and parents(roots(%ln))',
140 140 self.outgoing.commonheads,
141 141 self.outgoing.missing)
142 142 cheads.extend(c.node() for c in revset)
143 143 return cheads
144 144
145 145 @property
146 146 def commonheads(self):
147 147 """set of all common heads after changeset bundle push"""
148 148 if self.cgresult:
149 149 return self.futureheads
150 150 else:
151 151 return self.fallbackheads
152 152
153 153 # mapping of message used when pushing bookmark
154 154 bookmsgmap = {'update': (_("updating bookmark %s\n"),
155 155 _('updating bookmark %s failed!\n')),
156 156 'export': (_("exporting bookmark %s\n"),
157 157 _('exporting bookmark %s failed!\n')),
158 158 'delete': (_("deleting remote bookmark %s\n"),
159 159 _('deleting remote bookmark %s failed!\n')),
160 160 }
161 161
162 162
163 163 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
164 164 '''Push outgoing changesets (limited by revs) from a local
165 165 repository to remote. Return an integer:
166 166 - None means nothing to push
167 167 - 0 means HTTP error
168 168 - 1 means we pushed and remote head count is unchanged *or*
169 169 we have outgoing changesets but refused to push
170 170 - other values as described by addchangegroup()
171 171 '''
172 172 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
173 173 if pushop.remote.local():
174 174 missing = (set(pushop.repo.requirements)
175 175 - pushop.remote.local().supported)
176 176 if missing:
177 177 msg = _("required features are not"
178 178 " supported in the destination:"
179 179 " %s") % (', '.join(sorted(missing)))
180 180 raise util.Abort(msg)
181 181
182 182 # there are two ways to push to remote repo:
183 183 #
184 184 # addchangegroup assumes local user can lock remote
185 185 # repo (local filesystem, old ssh servers).
186 186 #
187 187 # unbundle assumes local user cannot lock remote repo (new ssh
188 188 # servers, http servers).
189 189
190 190 if not pushop.remote.canpush():
191 191 raise util.Abort(_("destination does not support push"))
192 192 # get local lock as we might write phase data
193 193 locallock = None
194 194 try:
195 195 locallock = pushop.repo.lock()
196 196 pushop.locallocked = True
197 197 except IOError, err:
198 198 pushop.locallocked = False
199 199 if err.errno != errno.EACCES:
200 200 raise
201 201 # source repo cannot be locked.
202 202 # We do not abort the push, but just disable the local phase
203 203 # synchronisation.
204 204 msg = 'cannot lock source repository: %s\n' % err
205 205 pushop.ui.debug(msg)
206 206 try:
207 207 pushop.repo.checkpush(pushop)
208 208 lock = None
209 209 unbundle = pushop.remote.capable('unbundle')
210 210 if not unbundle:
211 211 lock = pushop.remote.lock()
212 212 try:
213 213 _pushdiscovery(pushop)
214 214 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
215 215 False)
216 216 and pushop.remote.capable('bundle2-exp')):
217 217 _pushbundle2(pushop)
218 218 _pushchangeset(pushop)
219 219 _pushsyncphase(pushop)
220 220 _pushobsolete(pushop)
221 221 _pushbookmark(pushop)
222 222 finally:
223 223 if lock is not None:
224 224 lock.release()
225 225 finally:
226 226 if locallock is not None:
227 227 locallock.release()
228 228
229 229 return pushop
230 230
231 231 # list of steps to perform discovery before push
232 232 pushdiscoveryorder = []
233 233
234 234 # Mapping between step name and function
235 235 #
236 236 # This exists to help extensions wrap steps if necessary
237 237 pushdiscoverymapping = {}
238 238
239 239 def pushdiscovery(stepname):
240 240 """decorator for function performing discovery before push
241 241
242 242 The function is added to the step -> function mapping and appended to the
243 243 list of steps. Beware that decorated function will be added in order (this
244 244 may matter).
245 245
246 246 You can only use this decorator for a new step, if you want to wrap a step
247 247 from an extension, change the pushdiscovery dictionary directly."""
248 248 def dec(func):
249 249 assert stepname not in pushdiscoverymapping
250 250 pushdiscoverymapping[stepname] = func
251 251 pushdiscoveryorder.append(stepname)
252 252 return func
253 253 return dec
254 254
255 255 def _pushdiscovery(pushop):
256 256 """Run all discovery steps"""
257 257 for stepname in pushdiscoveryorder:
258 258 step = pushdiscoverymapping[stepname]
259 259 step(pushop)
260 260
261 261 @pushdiscovery('changeset')
262 262 def _pushdiscoverychangeset(pushop):
263 263 """discover the changeset that need to be pushed"""
264 264 unfi = pushop.repo.unfiltered()
265 265 fci = discovery.findcommonincoming
266 266 commoninc = fci(unfi, pushop.remote, force=pushop.force)
267 267 common, inc, remoteheads = commoninc
268 268 fco = discovery.findcommonoutgoing
269 269 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
270 270 commoninc=commoninc, force=pushop.force)
271 271 pushop.outgoing = outgoing
272 272 pushop.remoteheads = remoteheads
273 273 pushop.incoming = inc
274 274
275 275 @pushdiscovery('phase')
276 276 def _pushdiscoveryphase(pushop):
277 277 """discover the phase that needs to be pushed
278 278
279 279 (computed for both success and failure case for changesets push)"""
280 280 outgoing = pushop.outgoing
281 281 unfi = pushop.repo.unfiltered()
282 282 remotephases = pushop.remote.listkeys('phases')
283 283 publishing = remotephases.get('publishing', False)
284 284 ana = phases.analyzeremotephases(pushop.repo,
285 285 pushop.fallbackheads,
286 286 remotephases)
287 287 pheads, droots = ana
288 288 extracond = ''
289 289 if not publishing:
290 290 extracond = ' and public()'
291 291 revset = 'heads((%%ln::%%ln) %s)' % extracond
292 292 # Get the list of all revs draft on remote by public here.
293 293 # XXX Beware that revset break if droots is not strictly
294 294 # XXX root we may want to ensure it is but it is costly
295 295 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
296 296 if not outgoing.missing:
297 297 future = fallback
298 298 else:
299 299 # adds changeset we are going to push as draft
300 300 #
301 301 # should not be necessary for publishing server, but because of an
302 302 # issue fixed in xxxxx we have to do it anyway.
303 303 fdroots = list(unfi.set('roots(%ln + %ln::)',
304 304 outgoing.missing, droots))
305 305 fdroots = [f.node() for f in fdroots]
306 306 future = list(unfi.set(revset, fdroots, pushop.futureheads))
307 307 pushop.outdatedphases = future
308 308 pushop.fallbackoutdatedphases = fallback
309 309
310 310 @pushdiscovery('obsmarker')
311 311 def _pushdiscoveryobsmarkers(pushop):
312 312 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
313 313 and pushop.repo.obsstore
314 314 and 'obsolete' in pushop.remote.listkeys('namespaces')):
315 315 repo = pushop.repo
316 316 # very naive computation, that can be quite expensive on big repo.
317 317 # However: evolution is currently slow on them anyway.
318 318 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
319 319 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
320 320
321 321 @pushdiscovery('bookmarks')
322 322 def _pushdiscoverybookmarks(pushop):
323 323 ui = pushop.ui
324 324 repo = pushop.repo.unfiltered()
325 325 remote = pushop.remote
326 326 ui.debug("checking for updated bookmarks\n")
327 327 ancestors = ()
328 328 if pushop.revs:
329 329 revnums = map(repo.changelog.rev, pushop.revs)
330 330 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
331 331 remotebookmark = remote.listkeys('bookmarks')
332 332
333 333 explicit = set(pushop.bookmarks)
334 334
335 335 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
336 336 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
337 337 for b, scid, dcid in advsrc:
338 338 if b in explicit:
339 339 explicit.remove(b)
340 340 if not ancestors or repo[scid].rev() in ancestors:
341 341 pushop.outbookmarks.append((b, dcid, scid))
342 342 # search added bookmark
343 343 for b, scid, dcid in addsrc:
344 344 if b in explicit:
345 345 explicit.remove(b)
346 346 pushop.outbookmarks.append((b, '', scid))
347 347 # search for overwritten bookmark
348 348 for b, scid, dcid in advdst + diverge + differ:
349 349 if b in explicit:
350 350 explicit.remove(b)
351 351 pushop.outbookmarks.append((b, dcid, scid))
352 352 # search for bookmark to delete
353 353 for b, scid, dcid in adddst:
354 354 if b in explicit:
355 355 explicit.remove(b)
356 356 # treat as "deleted locally"
357 357 pushop.outbookmarks.append((b, dcid, ''))
358 358 # identical bookmarks shouldn't get reported
359 359 for b, scid, dcid in same:
360 360 if b in explicit:
361 361 explicit.remove(b)
362 362
363 363 if explicit:
364 364 explicit = sorted(explicit)
365 365 # we should probably list all of them
366 366 ui.warn(_('bookmark %s does not exist on the local '
367 367 'or remote repository!\n') % explicit[0])
368 368 pushop.bkresult = 2
369 369
370 370 pushop.outbookmarks.sort()
371 371
372 372 def _pushcheckoutgoing(pushop):
373 373 outgoing = pushop.outgoing
374 374 unfi = pushop.repo.unfiltered()
375 375 if not outgoing.missing:
376 376 # nothing to push
377 377 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
378 378 return False
379 379 # something to push
380 380 if not pushop.force:
381 381 # if repo.obsstore == False --> no obsolete
382 382 # then, save the iteration
383 383 if unfi.obsstore:
384 384 # this message are here for 80 char limit reason
385 385 mso = _("push includes obsolete changeset: %s!")
386 386 mst = {"unstable": _("push includes unstable changeset: %s!"),
387 387 "bumped": _("push includes bumped changeset: %s!"),
388 388 "divergent": _("push includes divergent changeset: %s!")}
389 389 # If we are to push if there is at least one
390 390 # obsolete or unstable changeset in missing, at
391 391 # least one of the missinghead will be obsolete or
392 392 # unstable. So checking heads only is ok
393 393 for node in outgoing.missingheads:
394 394 ctx = unfi[node]
395 395 if ctx.obsolete():
396 396 raise util.Abort(mso % ctx)
397 397 elif ctx.troubled():
398 398 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
399 399 newbm = pushop.ui.configlist('bookmarks', 'pushing')
400 400 discovery.checkheads(unfi, pushop.remote, outgoing,
401 401 pushop.remoteheads,
402 402 pushop.newbranch,
403 403 bool(pushop.incoming),
404 404 newbm)
405 405 return True
406 406
407 407 # List of names of steps to perform for an outgoing bundle2, order matters.
408 408 b2partsgenorder = []
409 409
410 410 # Mapping between step name and function
411 411 #
412 412 # This exists to help extensions wrap steps if necessary
413 413 b2partsgenmapping = {}
414 414
415 415 def b2partsgenerator(stepname):
416 416 """decorator for function generating bundle2 part
417 417
418 418 The function is added to the step -> function mapping and appended to the
419 419 list of steps. Beware that decorated functions will be added in order
420 420 (this may matter).
421 421
422 422 You can only use this decorator for new steps, if you want to wrap a step
423 423 from an extension, attack the b2partsgenmapping dictionary directly."""
424 424 def dec(func):
425 425 assert stepname not in b2partsgenmapping
426 426 b2partsgenmapping[stepname] = func
427 427 b2partsgenorder.append(stepname)
428 428 return func
429 429 return dec
430 430
431 431 @b2partsgenerator('changeset')
432 432 def _pushb2ctx(pushop, bundler):
433 433 """handle changegroup push through bundle2
434 434
435 435 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
436 436 """
437 437 if 'changesets' in pushop.stepsdone:
438 438 return
439 439 pushop.stepsdone.add('changesets')
440 440 # Send known heads to the server for race detection.
441 441 if not _pushcheckoutgoing(pushop):
442 442 return
443 443 pushop.repo.prepushoutgoinghooks(pushop.repo,
444 444 pushop.remote,
445 445 pushop.outgoing)
446 446 if not pushop.force:
447 447 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
448 448 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
449 449 pushop.outgoing)
450 450 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg)
451 451 def handlereply(op):
452 452 """extract addchangegroup returns from server reply"""
453 453 cgreplies = op.records.getreplies(cgpart.id)
454 454 assert len(cgreplies['changegroup']) == 1
455 455 pushop.cgresult = cgreplies['changegroup'][0]['return']
456 456 return handlereply
457 457
458 458 @b2partsgenerator('phase')
459 459 def _pushb2phases(pushop, bundler):
460 460 """handle phase push through bundle2"""
461 461 if 'phases' in pushop.stepsdone:
462 462 return
463 463 b2caps = bundle2.bundle2caps(pushop.remote)
464 464 if not 'b2x:pushkey' in b2caps:
465 465 return
466 466 pushop.stepsdone.add('phases')
467 467 part2node = []
468 468 enc = pushkey.encode
469 469 for newremotehead in pushop.outdatedphases:
470 470 part = bundler.newpart('b2x:pushkey')
471 471 part.addparam('namespace', enc('phases'))
472 472 part.addparam('key', enc(newremotehead.hex()))
473 473 part.addparam('old', enc(str(phases.draft)))
474 474 part.addparam('new', enc(str(phases.public)))
475 475 part2node.append((part.id, newremotehead))
476 476 def handlereply(op):
477 477 for partid, node in part2node:
478 478 partrep = op.records.getreplies(partid)
479 479 results = partrep['pushkey']
480 480 assert len(results) <= 1
481 481 msg = None
482 482 if not results:
483 483 msg = _('server ignored update of %s to public!\n') % node
484 484 elif not int(results[0]['return']):
485 485 msg = _('updating %s to public failed!\n') % node
486 486 if msg is not None:
487 487 pushop.ui.warn(msg)
488 488 return handlereply
489 489
490 490 @b2partsgenerator('obsmarkers')
491 491 def _pushb2obsmarkers(pushop, bundler):
492 492 if 'obsmarkers' in pushop.stepsdone:
493 493 return
494 494 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
495 495 if obsolete.commonversion(remoteversions) is None:
496 496 return
497 497 pushop.stepsdone.add('obsmarkers')
498 498 if pushop.outobsmarkers:
499 499 buildobsmarkerspart(bundler, pushop.outobsmarkers)
500 500
501 501 @b2partsgenerator('bookmarks')
502 502 def _pushb2bookmarks(pushop, bundler):
503 503 """handle phase push through bundle2"""
504 504 if 'bookmarks' in pushop.stepsdone:
505 505 return
506 506 b2caps = bundle2.bundle2caps(pushop.remote)
507 507 if 'b2x:pushkey' not in b2caps:
508 508 return
509 509 pushop.stepsdone.add('bookmarks')
510 510 part2book = []
511 511 enc = pushkey.encode
512 512 for book, old, new in pushop.outbookmarks:
513 513 part = bundler.newpart('b2x:pushkey')
514 514 part.addparam('namespace', enc('bookmarks'))
515 515 part.addparam('key', enc(book))
516 516 part.addparam('old', enc(old))
517 517 part.addparam('new', enc(new))
518 518 action = 'update'
519 519 if not old:
520 520 action = 'export'
521 521 elif not new:
522 522 action = 'delete'
523 523 part2book.append((part.id, book, action))
524 524
525 525
526 526 def handlereply(op):
527 527 ui = pushop.ui
528 528 for partid, book, action in part2book:
529 529 partrep = op.records.getreplies(partid)
530 530 results = partrep['pushkey']
531 531 assert len(results) <= 1
532 532 if not results:
533 533 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
534 534 else:
535 535 ret = int(results[0]['return'])
536 536 if ret:
537 537 ui.status(bookmsgmap[action][0] % book)
538 538 else:
539 539 ui.warn(bookmsgmap[action][1] % book)
540 540 if pushop.bkresult is not None:
541 541 pushop.bkresult = 1
542 542 return handlereply
543 543
544 544
545 545 def _pushbundle2(pushop):
546 546 """push data to the remote using bundle2
547 547
548 548 The only currently supported type of data is changegroup but this will
549 549 evolve in the future."""
550 550 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
551 551 # create reply capability
552 552 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
553 553 bundler.newpart('b2x:replycaps', data=capsblob)
554 554 replyhandlers = []
555 555 for partgenname in b2partsgenorder:
556 556 partgen = b2partsgenmapping[partgenname]
557 557 ret = partgen(pushop, bundler)
558 558 if callable(ret):
559 559 replyhandlers.append(ret)
560 560 # do not push if nothing to push
561 561 if bundler.nbparts <= 1:
562 562 return
563 563 stream = util.chunkbuffer(bundler.getchunks())
564 564 try:
565 565 reply = pushop.remote.unbundle(stream, ['force'], 'push')
566 566 except error.BundleValueError, exc:
567 567 raise util.Abort('missing support for %s' % exc)
568 568 try:
569 569 op = bundle2.processbundle(pushop.repo, reply)
570 570 except error.BundleValueError, exc:
571 571 raise util.Abort('missing support for %s' % exc)
572 572 for rephand in replyhandlers:
573 573 rephand(op)
574 574
575 575 def _pushchangeset(pushop):
576 576 """Make the actual push of changeset bundle to remote repo"""
577 577 if 'changesets' in pushop.stepsdone:
578 578 return
579 579 pushop.stepsdone.add('changesets')
580 580 if not _pushcheckoutgoing(pushop):
581 581 return
582 582 pushop.repo.prepushoutgoinghooks(pushop.repo,
583 583 pushop.remote,
584 584 pushop.outgoing)
585 585 outgoing = pushop.outgoing
586 586 unbundle = pushop.remote.capable('unbundle')
587 587 # TODO: get bundlecaps from remote
588 588 bundlecaps = None
589 589 # create a changegroup from local
590 590 if pushop.revs is None and not (outgoing.excluded
591 591 or pushop.repo.changelog.filteredrevs):
592 592 # push everything,
593 593 # use the fast path, no race possible on push
594 594 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
595 595 cg = changegroup.getsubset(pushop.repo,
596 596 outgoing,
597 597 bundler,
598 598 'push',
599 599 fastpath=True)
600 600 else:
601 601 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
602 602 bundlecaps)
603 603
604 604 # apply changegroup to remote
605 605 if unbundle:
606 606 # local repo finds heads on server, finds out what
607 607 # revs it must push. once revs transferred, if server
608 608 # finds it has different heads (someone else won
609 609 # commit/push race), server aborts.
610 610 if pushop.force:
611 611 remoteheads = ['force']
612 612 else:
613 613 remoteheads = pushop.remoteheads
614 614 # ssh: return remote's addchangegroup()
615 615 # http: return remote's addchangegroup() or 0 for error
616 616 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
617 617 pushop.repo.url())
618 618 else:
619 619 # we return an integer indicating remote head count
620 620 # change
621 621 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
622 622 pushop.repo.url())
623 623
624 624 def _pushsyncphase(pushop):
625 625 """synchronise phase information locally and remotely"""
626 626 cheads = pushop.commonheads
627 627 # even when we don't push, exchanging phase data is useful
628 628 remotephases = pushop.remote.listkeys('phases')
629 629 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
630 630 and remotephases # server supports phases
631 631 and pushop.cgresult is None # nothing was pushed
632 632 and remotephases.get('publishing', False)):
633 633 # When:
634 634 # - this is a subrepo push
635 635 # - and remote support phase
636 636 # - and no changeset was pushed
637 637 # - and remote is publishing
638 638 # We may be in issue 3871 case!
639 639 # We drop the possible phase synchronisation done by
640 640 # courtesy to publish changesets possibly locally draft
641 641 # on the remote.
642 642 remotephases = {'publishing': 'True'}
643 643 if not remotephases: # old server or public only reply from non-publishing
644 644 _localphasemove(pushop, cheads)
645 645 # don't push any phase data as there is nothing to push
646 646 else:
647 647 ana = phases.analyzeremotephases(pushop.repo, cheads,
648 648 remotephases)
649 649 pheads, droots = ana
650 650 ### Apply remote phase on local
651 651 if remotephases.get('publishing', False):
652 652 _localphasemove(pushop, cheads)
653 653 else: # publish = False
654 654 _localphasemove(pushop, pheads)
655 655 _localphasemove(pushop, cheads, phases.draft)
656 656 ### Apply local phase on remote
657 657
658 658 if pushop.cgresult:
659 659 if 'phases' in pushop.stepsdone:
660 660 # phases already pushed though bundle2
661 661 return
662 662 outdated = pushop.outdatedphases
663 663 else:
664 664 outdated = pushop.fallbackoutdatedphases
665 665
666 666 pushop.stepsdone.add('phases')
667 667
668 668 # filter heads already turned public by the push
669 669 outdated = [c for c in outdated if c.node() not in pheads]
670 670 b2caps = bundle2.bundle2caps(pushop.remote)
671 671 if 'b2x:pushkey' in b2caps:
672 672 # server supports bundle2, let's do a batched push through it
673 673 #
674 674 # This will eventually be unified with the changesets bundle2 push
675 675 bundler = bundle2.bundle20(pushop.ui, b2caps)
676 676 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
677 677 bundler.newpart('b2x:replycaps', data=capsblob)
678 678 part2node = []
679 679 enc = pushkey.encode
680 680 for newremotehead in outdated:
681 681 part = bundler.newpart('b2x:pushkey')
682 682 part.addparam('namespace', enc('phases'))
683 683 part.addparam('key', enc(newremotehead.hex()))
684 684 part.addparam('old', enc(str(phases.draft)))
685 685 part.addparam('new', enc(str(phases.public)))
686 686 part2node.append((part.id, newremotehead))
687 687 stream = util.chunkbuffer(bundler.getchunks())
688 688 try:
689 689 reply = pushop.remote.unbundle(stream, ['force'], 'push')
690 690 op = bundle2.processbundle(pushop.repo, reply)
691 691 except error.BundleValueError, exc:
692 692 raise util.Abort('missing support for %s' % exc)
693 693 for partid, node in part2node:
694 694 partrep = op.records.getreplies(partid)
695 695 results = partrep['pushkey']
696 696 assert len(results) <= 1
697 697 msg = None
698 698 if not results:
699 699 msg = _('server ignored update of %s to public!\n') % node
700 700 elif not int(results[0]['return']):
701 701 msg = _('updating %s to public failed!\n') % node
702 702 if msg is not None:
703 703 pushop.ui.warn(msg)
704 704
705 705 else:
706 706 # fallback to independent pushkey command
707 707 for newremotehead in outdated:
708 708 r = pushop.remote.pushkey('phases',
709 709 newremotehead.hex(),
710 710 str(phases.draft),
711 711 str(phases.public))
712 712 if not r:
713 713 pushop.ui.warn(_('updating %s to public failed!\n')
714 714 % newremotehead)
715 715
716 716 def _localphasemove(pushop, nodes, phase=phases.public):
717 717 """move <nodes> to <phase> in the local source repo"""
718 718 if pushop.locallocked:
719 719 tr = pushop.repo.transaction('push-phase-sync')
720 720 try:
721 721 phases.advanceboundary(pushop.repo, tr, phase, nodes)
722 722 tr.close()
723 723 finally:
724 724 tr.release()
725 725 else:
726 726 # repo is not locked, do not change any phases!
727 727 # Informs the user that phases should have been moved when
728 728 # applicable.
729 729 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
730 730 phasestr = phases.phasenames[phase]
731 731 if actualmoves:
732 732 pushop.ui.status(_('cannot lock source repo, skipping '
733 733 'local %s phase update\n') % phasestr)
734 734
735 735 def _pushobsolete(pushop):
736 736 """utility function to push obsolete markers to a remote"""
737 737 if 'obsmarkers' in pushop.stepsdone:
738 738 return
739 739 pushop.ui.debug('try to push obsolete markers to remote\n')
740 740 repo = pushop.repo
741 741 remote = pushop.remote
742 742 pushop.stepsdone.add('obsmarkers')
743 743 if pushop.outobsmarkers:
744 744 rslts = []
745 745 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
746 746 for key in sorted(remotedata, reverse=True):
747 747 # reverse sort to ensure we end with dump0
748 748 data = remotedata[key]
749 749 rslts.append(remote.pushkey('obsolete', key, '', data))
750 750 if [r for r in rslts if not r]:
751 751 msg = _('failed to push some obsolete markers!\n')
752 752 repo.ui.warn(msg)
753 753
754 754 def _pushbookmark(pushop):
755 755 """Update bookmark position on remote"""
756 756 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
757 757 return
758 758 pushop.stepsdone.add('bookmarks')
759 759 ui = pushop.ui
760 760 remote = pushop.remote
761 761
762 762 for b, old, new in pushop.outbookmarks:
763 763 action = 'update'
764 764 if not old:
765 765 action = 'export'
766 766 elif not new:
767 767 action = 'delete'
768 768 if remote.pushkey('bookmarks', b, old, new):
769 769 ui.status(bookmsgmap[action][0] % b)
770 770 else:
771 771 ui.warn(bookmsgmap[action][1] % b)
772 772 # discovery can have set the value form invalid entry
773 773 if pushop.bkresult is not None:
774 774 pushop.bkresult = 1
775 775
776 776 class pulloperation(object):
777 777 """A object that represent a single pull operation
778 778
779 779 It purpose is to carry push related state and very common operation.
780 780
781 781 A new should be created at the beginning of each pull and discarded
782 782 afterward.
783 783 """
784 784
785 785 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
786 786 # repo we pull into
787 787 self.repo = repo
788 788 # repo we pull from
789 789 self.remote = remote
790 790 # revision we try to pull (None is "all")
791 791 self.heads = heads
792 792 # bookmark pulled explicitly
793 793 self.explicitbookmarks = bookmarks
794 794 # do we force pull?
795 795 self.force = force
796 796 # the name the pull transaction
797 797 self._trname = 'pull\n' + util.hidepassword(remote.url())
798 798 # hold the transaction once created
799 799 self._tr = None
800 800 # set of common changeset between local and remote before pull
801 801 self.common = None
802 802 # set of pulled head
803 803 self.rheads = None
804 804 # list of missing changeset to fetch remotely
805 805 self.fetch = None
806 806 # remote bookmarks data
807 807 self.remotebookmarks = None
808 808 # result of changegroup pulling (used as return code by pull)
809 809 self.cgresult = None
810 810 # list of step already done
811 811 self.stepsdone = set()
812 812
813 813 @util.propertycache
814 814 def pulledsubset(self):
815 815 """heads of the set of changeset target by the pull"""
816 816 # compute target subset
817 817 if self.heads is None:
818 818 # We pulled every thing possible
819 819 # sync on everything common
820 820 c = set(self.common)
821 821 ret = list(self.common)
822 822 for n in self.rheads:
823 823 if n not in c:
824 824 ret.append(n)
825 825 return ret
826 826 else:
827 827 # We pulled a specific subset
828 828 # sync on this subset
829 829 return self.heads
830 830
831 831 def gettransaction(self):
832 832 """get appropriate pull transaction, creating it if needed"""
833 833 if self._tr is None:
834 834 self._tr = self.repo.transaction(self._trname)
835 835 self._tr.hookargs['source'] = 'pull'
836 836 self._tr.hookargs['url'] = self.remote.url()
837 837 return self._tr
838 838
839 839 def closetransaction(self):
840 840 """close transaction if created"""
841 841 if self._tr is not None:
842 842 repo = self.repo
843 843 cl = repo.unfiltered().changelog
844 844 p = cl.writepending() and repo.root or ""
845 845 p = cl.writepending() and repo.root or ""
846 846 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
847 847 **self._tr.hookargs)
848 848 self._tr.close()
849 849 hookargs = dict(self._tr.hookargs)
850 850 def runhooks():
851 851 repo.hook('b2x-transactionclose', **hookargs)
852 852 repo._afterlock(runhooks)
853 853
854 854 def releasetransaction(self):
855 855 """release transaction if created"""
856 856 if self._tr is not None:
857 857 self._tr.release()
858 858
859 859 def pull(repo, remote, heads=None, force=False, bookmarks=()):
860 860 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
861 861 if pullop.remote.local():
862 862 missing = set(pullop.remote.requirements) - pullop.repo.supported
863 863 if missing:
864 864 msg = _("required features are not"
865 865 " supported in the destination:"
866 866 " %s") % (', '.join(sorted(missing)))
867 867 raise util.Abort(msg)
868 868
869 869 pullop.remotebookmarks = remote.listkeys('bookmarks')
870 870 lock = pullop.repo.lock()
871 871 try:
872 872 _pulldiscovery(pullop)
873 873 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
874 874 and pullop.remote.capable('bundle2-exp')):
875 875 _pullbundle2(pullop)
876 876 _pullchangeset(pullop)
877 877 _pullphase(pullop)
878 878 _pullbookmarks(pullop)
879 879 _pullobsolete(pullop)
880 880 pullop.closetransaction()
881 881 finally:
882 882 pullop.releasetransaction()
883 883 lock.release()
884 884
885 885 return pullop
886 886
887 887 # list of steps to perform discovery before pull
888 888 pulldiscoveryorder = []
889 889
890 890 # Mapping between step name and function
891 891 #
892 892 # This exists to help extensions wrap steps if necessary
893 893 pulldiscoverymapping = {}
894 894
895 895 def pulldiscovery(stepname):
896 896 """decorator for function performing discovery before pull
897 897
898 898 The function is added to the step -> function mapping and appended to the
899 899 list of steps. Beware that decorated function will be added in order (this
900 900 may matter).
901 901
902 902 You can only use this decorator for a new step, if you want to wrap a step
903 903 from an extension, change the pulldiscovery dictionary directly."""
904 904 def dec(func):
905 905 assert stepname not in pulldiscoverymapping
906 906 pulldiscoverymapping[stepname] = func
907 907 pulldiscoveryorder.append(stepname)
908 908 return func
909 909 return dec
910 910
911 911 def _pulldiscovery(pullop):
912 912 """Run all discovery steps"""
913 913 for stepname in pulldiscoveryorder:
914 914 step = pulldiscoverymapping[stepname]
915 915 step(pullop)
916 916
917 917 @pulldiscovery('changegroup')
918 918 def _pulldiscoverychangegroup(pullop):
919 919 """discovery phase for the pull
920 920
921 921 Current handle changeset discovery only, will change handle all discovery
922 922 at some point."""
923 923 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
924 924 pullop.remote,
925 925 heads=pullop.heads,
926 926 force=pullop.force)
927 927 pullop.common, pullop.fetch, pullop.rheads = tmp
928 928
929 929 def _pullbundle2(pullop):
930 930 """pull data using bundle2
931 931
932 932 For now, the only supported data are changegroup."""
933 933 remotecaps = bundle2.bundle2caps(pullop.remote)
934 934 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
935 935 # pulling changegroup
936 936 pullop.stepsdone.add('changegroup')
937 937
938 938 kwargs['common'] = pullop.common
939 939 kwargs['heads'] = pullop.heads or pullop.rheads
940 940 kwargs['cg'] = pullop.fetch
941 941 if 'b2x:listkeys' in remotecaps:
942 942 kwargs['listkeys'] = ['phase', 'bookmarks']
943 943 if not pullop.fetch:
944 944 pullop.repo.ui.status(_("no changes found\n"))
945 945 pullop.cgresult = 0
946 946 else:
947 947 if pullop.heads is None and list(pullop.common) == [nullid]:
948 948 pullop.repo.ui.status(_("requesting all changes\n"))
949 949 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
950 950 remoteversions = bundle2.obsmarkersversion(remotecaps)
951 951 if obsolete.commonversion(remoteversions) is not None:
952 952 kwargs['obsmarkers'] = True
953 953 pullop.stepsdone.add('obsmarkers')
954 954 _pullbundle2extraprepare(pullop, kwargs)
955 955 if kwargs.keys() == ['format']:
956 956 return # nothing to pull
957 957 bundle = pullop.remote.getbundle('pull', **kwargs)
958 958 try:
959 959 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
960 960 except error.BundleValueError, exc:
961 961 raise util.Abort('missing support for %s' % exc)
962 962
963 963 if pullop.fetch:
964 964 changedheads = 0
965 965 pullop.cgresult = 1
966 966 for cg in op.records['changegroup']:
967 967 ret = cg['return']
968 968 # If any changegroup result is 0, return 0
969 969 if ret == 0:
970 970 pullop.cgresult = 0
971 971 break
972 972 if ret < -1:
973 973 changedheads += ret + 1
974 974 elif ret > 1:
975 975 changedheads += ret - 1
976 976 if changedheads > 0:
977 977 pullop.cgresult = 1 + changedheads
978 978 elif changedheads < 0:
979 979 pullop.cgresult = -1 + changedheads
980 980
981 981 # processing phases change
982 982 for namespace, value in op.records['listkeys']:
983 983 if namespace == 'phases':
984 984 _pullapplyphases(pullop, value)
985 985
986 986 # processing bookmark update
987 987 for namespace, value in op.records['listkeys']:
988 988 if namespace == 'bookmarks':
989 989 pullop.remotebookmarks = value
990 990 _pullbookmarks(pullop)
991 991
992 992 def _pullbundle2extraprepare(pullop, kwargs):
993 993 """hook function so that extensions can extend the getbundle call"""
994 994 pass
995 995
996 996 def _pullchangeset(pullop):
997 997 """pull changeset from unbundle into the local repo"""
998 998 # We delay the open of the transaction as late as possible so we
999 999 # don't open transaction for nothing or you break future useful
1000 1000 # rollback call
1001 1001 if 'changegroup' in pullop.stepsdone:
1002 1002 return
1003 1003 pullop.stepsdone.add('changegroup')
1004 1004 if not pullop.fetch:
1005 1005 pullop.repo.ui.status(_("no changes found\n"))
1006 1006 pullop.cgresult = 0
1007 1007 return
1008 1008 pullop.gettransaction()
1009 1009 if pullop.heads is None and list(pullop.common) == [nullid]:
1010 1010 pullop.repo.ui.status(_("requesting all changes\n"))
1011 1011 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1012 1012 # issue1320, avoid a race if remote changed after discovery
1013 1013 pullop.heads = pullop.rheads
1014 1014
1015 1015 if pullop.remote.capable('getbundle'):
1016 1016 # TODO: get bundlecaps from remote
1017 1017 cg = pullop.remote.getbundle('pull', common=pullop.common,
1018 1018 heads=pullop.heads or pullop.rheads)
1019 1019 elif pullop.heads is None:
1020 1020 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1021 1021 elif not pullop.remote.capable('changegroupsubset'):
1022 1022 raise util.Abort(_("partial pull cannot be done because "
1023 1023 "other repository doesn't support "
1024 1024 "changegroupsubset."))
1025 1025 else:
1026 1026 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1027 1027 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1028 1028 pullop.remote.url())
1029 1029
1030 1030 def _pullphase(pullop):
1031 1031 # Get remote phases data from remote
1032 1032 if 'phases' in pullop.stepsdone:
1033 1033 return
1034 1034 remotephases = pullop.remote.listkeys('phases')
1035 1035 _pullapplyphases(pullop, remotephases)
1036 1036
1037 1037 def _pullapplyphases(pullop, remotephases):
1038 1038 """apply phase movement from observed remote state"""
1039 1039 if 'phases' in pullop.stepsdone:
1040 1040 return
1041 1041 pullop.stepsdone.add('phases')
1042 1042 publishing = bool(remotephases.get('publishing', False))
1043 1043 if remotephases and not publishing:
1044 1044 # remote is new and unpublishing
1045 1045 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1046 1046 pullop.pulledsubset,
1047 1047 remotephases)
1048 1048 dheads = pullop.pulledsubset
1049 1049 else:
1050 1050 # Remote is old or publishing all common changesets
1051 1051 # should be seen as public
1052 1052 pheads = pullop.pulledsubset
1053 1053 dheads = []
1054 1054 unfi = pullop.repo.unfiltered()
1055 1055 phase = unfi._phasecache.phase
1056 1056 rev = unfi.changelog.nodemap.get
1057 1057 public = phases.public
1058 1058 draft = phases.draft
1059 1059
1060 1060 # exclude changesets already public locally and update the others
1061 1061 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1062 1062 if pheads:
1063 1063 tr = pullop.gettransaction()
1064 1064 phases.advanceboundary(pullop.repo, tr, public, pheads)
1065 1065
1066 1066 # exclude changesets already draft locally and update the others
1067 1067 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1068 1068 if dheads:
1069 1069 tr = pullop.gettransaction()
1070 1070 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1071 1071
1072 1072 def _pullbookmarks(pullop):
1073 1073 """process the remote bookmark information to update the local one"""
1074 1074 if 'bookmarks' in pullop.stepsdone:
1075 1075 return
1076 1076 pullop.stepsdone.add('bookmarks')
1077 1077 repo = pullop.repo
1078 1078 remotebookmarks = pullop.remotebookmarks
1079 1079 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1080 1080 pullop.remote.url(),
1081 1081 pullop.gettransaction,
1082 1082 explicit=pullop.explicitbookmarks)
1083 1083
1084 1084 def _pullobsolete(pullop):
1085 1085 """utility function to pull obsolete markers from a remote
1086 1086
1087 1087 The `gettransaction` is function that return the pull transaction, creating
1088 1088 one if necessary. We return the transaction to inform the calling code that
1089 1089 a new transaction have been created (when applicable).
1090 1090
1091 1091 Exists mostly to allow overriding for experimentation purpose"""
1092 1092 if 'obsmarkers' in pullop.stepsdone:
1093 1093 return
1094 1094 pullop.stepsdone.add('obsmarkers')
1095 1095 tr = None
1096 1096 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1097 1097 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1098 1098 remoteobs = pullop.remote.listkeys('obsolete')
1099 1099 if 'dump0' in remoteobs:
1100 1100 tr = pullop.gettransaction()
1101 1101 for key in sorted(remoteobs, reverse=True):
1102 1102 if key.startswith('dump'):
1103 1103 data = base85.b85decode(remoteobs[key])
1104 1104 pullop.repo.obsstore.mergemarkers(tr, data)
1105 1105 pullop.repo.invalidatevolatilesets()
1106 1106 return tr
1107 1107
1108 1108 def caps20to10(repo):
1109 1109 """return a set with appropriate options to use bundle20 during getbundle"""
1110 1110 caps = set(['HG2Y'])
1111 1111 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1112 1112 caps.add('bundle2=' + urllib.quote(capsblob))
1113 1113 return caps
1114 1114
1115 1115 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1116 1116 getbundle2partsorder = []
1117 1117
1118 1118 # Mapping between step name and function
1119 1119 #
1120 1120 # This exists to help extensions wrap steps if necessary
1121 1121 getbundle2partsmapping = {}
1122 1122
1123 1123 def getbundle2partsgenerator(stepname):
1124 1124 """decorator for function generating bundle2 part for getbundle
1125 1125
1126 1126 The function is added to the step -> function mapping and appended to the
1127 1127 list of steps. Beware that decorated functions will be added in order
1128 1128 (this may matter).
1129 1129
1130 1130 You can only use this decorator for new steps, if you want to wrap a step
1131 1131 from an extension, attack the getbundle2partsmapping dictionary directly."""
1132 1132 def dec(func):
1133 1133 assert stepname not in getbundle2partsmapping
1134 1134 getbundle2partsmapping[stepname] = func
1135 1135 getbundle2partsorder.append(stepname)
1136 1136 return func
1137 1137 return dec
1138 1138
1139 1139 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1140 1140 **kwargs):
1141 1141 """return a full bundle (with potentially multiple kind of parts)
1142 1142
1143 1143 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1144 1144 passed. For now, the bundle can contain only changegroup, but this will
1145 1145 changes when more part type will be available for bundle2.
1146 1146
1147 1147 This is different from changegroup.getchangegroup that only returns an HG10
1148 1148 changegroup bundle. They may eventually get reunited in the future when we
1149 1149 have a clearer idea of the API we what to query different data.
1150 1150
1151 1151 The implementation is at a very early stage and will get massive rework
1152 1152 when the API of bundle is refined.
1153 1153 """
1154 1154 # bundle10 case
1155 1155 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1156 1156 if bundlecaps and not kwargs.get('cg', True):
1157 1157 raise ValueError(_('request for bundle10 must include changegroup'))
1158 1158
1159 1159 if kwargs:
1160 1160 raise ValueError(_('unsupported getbundle arguments: %s')
1161 1161 % ', '.join(sorted(kwargs.keys())))
1162 1162 return changegroup.getchangegroup(repo, source, heads=heads,
1163 1163 common=common, bundlecaps=bundlecaps)
1164 1164
1165 1165 # bundle20 case
1166 1166 b2caps = {}
1167 1167 for bcaps in bundlecaps:
1168 1168 if bcaps.startswith('bundle2='):
1169 1169 blob = urllib.unquote(bcaps[len('bundle2='):])
1170 1170 b2caps.update(bundle2.decodecaps(blob))
1171 1171 bundler = bundle2.bundle20(repo.ui, b2caps)
1172 1172
1173 1173 for name in getbundle2partsorder:
1174 1174 func = getbundle2partsmapping[name]
1175 1175 kwargs['heads'] = heads
1176 1176 kwargs['common'] = common
1177 1177 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1178 1178 **kwargs)
1179 1179
1180 1180 return util.chunkbuffer(bundler.getchunks())
1181 1181
1182 1182 @getbundle2partsgenerator('changegroup')
1183 1183 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1184 1184 b2caps=None, heads=None, common=None, **kwargs):
1185 1185 """add a changegroup part to the requested bundle"""
1186 1186 cg = None
1187 1187 if kwargs.get('cg', True):
1188 1188 # build changegroup bundle here.
1189 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1190 common=common, bundlecaps=bundlecaps)
1189 version = None
1190 cgversions = b2caps.get('b2x:changegroup')
1191 if cgversions is None:
1192 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1193 common=common,
1194 bundlecaps=bundlecaps)
1195 else:
1196 cgversions = [v for v in cgversions if v in changegroup.packermap]
1197 if not cgversions:
1198 raise ValueError(_('no common changegroup version'))
1199 version = max(cgversions)
1200 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1201 common=common,
1202 bundlecaps=bundlecaps,
1203 version=version)
1191 1204
1192 1205 if cg:
1193 bundler.newpart('b2x:changegroup', data=cg)
1206 part = bundler.newpart('b2x:changegroup', data=cg)
1207 if version is not None:
1208 part.addparam('version', version)
1194 1209
1195 1210 @getbundle2partsgenerator('listkeys')
1196 1211 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1197 1212 b2caps=None, **kwargs):
1198 1213 """add parts containing listkeys namespaces to the requested bundle"""
1199 1214 listkeys = kwargs.get('listkeys', ())
1200 1215 for namespace in listkeys:
1201 1216 part = bundler.newpart('b2x:listkeys')
1202 1217 part.addparam('namespace', namespace)
1203 1218 keys = repo.listkeys(namespace).items()
1204 1219 part.data = pushkey.encodekeys(keys)
1205 1220
1206 1221 @getbundle2partsgenerator('obsmarkers')
1207 1222 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1208 1223 b2caps=None, heads=None, **kwargs):
1209 1224 """add an obsolescence markers part to the requested bundle"""
1210 1225 if kwargs.get('obsmarkers', False):
1211 1226 if heads is None:
1212 1227 heads = repo.heads()
1213 1228 subset = [c.node() for c in repo.set('::%ln', heads)]
1214 1229 markers = repo.obsstore.relevantmarkers(subset)
1215 1230 buildobsmarkerspart(bundler, markers)
1216 1231
1217 1232 def check_heads(repo, their_heads, context):
1218 1233 """check if the heads of a repo have been modified
1219 1234
1220 1235 Used by peer for unbundling.
1221 1236 """
1222 1237 heads = repo.heads()
1223 1238 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1224 1239 if not (their_heads == ['force'] or their_heads == heads or
1225 1240 their_heads == ['hashed', heads_hash]):
1226 1241 # someone else committed/pushed/unbundled while we
1227 1242 # were transferring data
1228 1243 raise error.PushRaced('repository changed while %s - '
1229 1244 'please try again' % context)
1230 1245
1231 1246 def unbundle(repo, cg, heads, source, url):
1232 1247 """Apply a bundle to a repo.
1233 1248
1234 1249 this function makes sure the repo is locked during the application and have
1235 1250 mechanism to check that no push race occurred between the creation of the
1236 1251 bundle and its application.
1237 1252
1238 1253 If the push was raced as PushRaced exception is raised."""
1239 1254 r = 0
1240 1255 # need a transaction when processing a bundle2 stream
1241 1256 tr = None
1242 1257 lock = repo.lock()
1243 1258 try:
1244 1259 check_heads(repo, heads, 'uploading changes')
1245 1260 # push can proceed
1246 1261 if util.safehasattr(cg, 'params'):
1247 1262 try:
1248 1263 tr = repo.transaction('unbundle')
1249 1264 tr.hookargs['source'] = source
1250 1265 tr.hookargs['url'] = url
1251 1266 tr.hookargs['bundle2-exp'] = '1'
1252 1267 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1253 1268 cl = repo.unfiltered().changelog
1254 1269 p = cl.writepending() and repo.root or ""
1255 1270 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1256 1271 **tr.hookargs)
1257 1272 tr.close()
1258 1273 hookargs = dict(tr.hookargs)
1259 1274 def runhooks():
1260 1275 repo.hook('b2x-transactionclose', **hookargs)
1261 1276 repo._afterlock(runhooks)
1262 1277 except Exception, exc:
1263 1278 exc.duringunbundle2 = True
1264 1279 raise
1265 1280 else:
1266 1281 r = changegroup.addchangegroup(repo, cg, source, url)
1267 1282 finally:
1268 1283 if tr is not None:
1269 1284 tr.release()
1270 1285 lock.release()
1271 1286 return r
General Comments 0
You need to be logged in to leave comments. Login now