##// END OF EJS Templates
push: send highest changegroup format supported by both side...
Pierre-Yves David -
r23180:116b80d6 default
parent child Browse files
Show More
@@ -1,1286 +1,1300 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version == '2Y':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 53 return None
54 54
55 55 class pushoperation(object):
56 56 """A object that represent a single push operation
57 57
58 58 It purpose is to carry push related state and very common operation.
59 59
60 60 A new should be created at the beginning of each push and discarded
61 61 afterward.
62 62 """
63 63
64 64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 65 bookmarks=()):
66 66 # repo we push from
67 67 self.repo = repo
68 68 self.ui = repo.ui
69 69 # repo we push to
70 70 self.remote = remote
71 71 # force option provided
72 72 self.force = force
73 73 # revs to be pushed (None is "all")
74 74 self.revs = revs
75 75 # bookmark explicitly pushed
76 76 self.bookmarks = bookmarks
77 77 # allow push of new branch
78 78 self.newbranch = newbranch
79 79 # did a local lock get acquired?
80 80 self.locallocked = None
81 81 # step already performed
82 82 # (used to check what steps have been already performed through bundle2)
83 83 self.stepsdone = set()
84 84 # Integer version of the changegroup push result
85 85 # - None means nothing to push
86 86 # - 0 means HTTP error
87 87 # - 1 means we pushed and remote head count is unchanged *or*
88 88 # we have outgoing changesets but refused to push
89 89 # - other values as described by addchangegroup()
90 90 self.cgresult = None
91 91 # Boolean value for the bookmark push
92 92 self.bkresult = None
93 93 # discover.outgoing object (contains common and outgoing data)
94 94 self.outgoing = None
95 95 # all remote heads before the push
96 96 self.remoteheads = None
97 97 # testable as a boolean indicating if any nodes are missing locally.
98 98 self.incoming = None
99 99 # phases changes that must be pushed along side the changesets
100 100 self.outdatedphases = None
101 101 # phases changes that must be pushed if changeset push fails
102 102 self.fallbackoutdatedphases = None
103 103 # outgoing obsmarkers
104 104 self.outobsmarkers = set()
105 105 # outgoing bookmarks
106 106 self.outbookmarks = []
107 107
108 108 @util.propertycache
109 109 def futureheads(self):
110 110 """future remote heads if the changeset push succeeds"""
111 111 return self.outgoing.missingheads
112 112
113 113 @util.propertycache
114 114 def fallbackheads(self):
115 115 """future remote heads if the changeset push fails"""
116 116 if self.revs is None:
117 117 # not target to push, all common are relevant
118 118 return self.outgoing.commonheads
119 119 unfi = self.repo.unfiltered()
120 120 # I want cheads = heads(::missingheads and ::commonheads)
121 121 # (missingheads is revs with secret changeset filtered out)
122 122 #
123 123 # This can be expressed as:
124 124 # cheads = ( (missingheads and ::commonheads)
125 125 # + (commonheads and ::missingheads))"
126 126 # )
127 127 #
128 128 # while trying to push we already computed the following:
129 129 # common = (::commonheads)
130 130 # missing = ((commonheads::missingheads) - commonheads)
131 131 #
132 132 # We can pick:
133 133 # * missingheads part of common (::commonheads)
134 134 common = set(self.outgoing.common)
135 135 nm = self.repo.changelog.nodemap
136 136 cheads = [node for node in self.revs if nm[node] in common]
137 137 # and
138 138 # * commonheads parents on missing
139 139 revset = unfi.set('%ln and parents(roots(%ln))',
140 140 self.outgoing.commonheads,
141 141 self.outgoing.missing)
142 142 cheads.extend(c.node() for c in revset)
143 143 return cheads
144 144
145 145 @property
146 146 def commonheads(self):
147 147 """set of all common heads after changeset bundle push"""
148 148 if self.cgresult:
149 149 return self.futureheads
150 150 else:
151 151 return self.fallbackheads
152 152
153 153 # mapping of message used when pushing bookmark
154 154 bookmsgmap = {'update': (_("updating bookmark %s\n"),
155 155 _('updating bookmark %s failed!\n')),
156 156 'export': (_("exporting bookmark %s\n"),
157 157 _('exporting bookmark %s failed!\n')),
158 158 'delete': (_("deleting remote bookmark %s\n"),
159 159 _('deleting remote bookmark %s failed!\n')),
160 160 }
161 161
162 162
163 163 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
164 164 '''Push outgoing changesets (limited by revs) from a local
165 165 repository to remote. Return an integer:
166 166 - None means nothing to push
167 167 - 0 means HTTP error
168 168 - 1 means we pushed and remote head count is unchanged *or*
169 169 we have outgoing changesets but refused to push
170 170 - other values as described by addchangegroup()
171 171 '''
172 172 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
173 173 if pushop.remote.local():
174 174 missing = (set(pushop.repo.requirements)
175 175 - pushop.remote.local().supported)
176 176 if missing:
177 177 msg = _("required features are not"
178 178 " supported in the destination:"
179 179 " %s") % (', '.join(sorted(missing)))
180 180 raise util.Abort(msg)
181 181
182 182 # there are two ways to push to remote repo:
183 183 #
184 184 # addchangegroup assumes local user can lock remote
185 185 # repo (local filesystem, old ssh servers).
186 186 #
187 187 # unbundle assumes local user cannot lock remote repo (new ssh
188 188 # servers, http servers).
189 189
190 190 if not pushop.remote.canpush():
191 191 raise util.Abort(_("destination does not support push"))
192 192 # get local lock as we might write phase data
193 193 locallock = None
194 194 try:
195 195 locallock = pushop.repo.lock()
196 196 pushop.locallocked = True
197 197 except IOError, err:
198 198 pushop.locallocked = False
199 199 if err.errno != errno.EACCES:
200 200 raise
201 201 # source repo cannot be locked.
202 202 # We do not abort the push, but just disable the local phase
203 203 # synchronisation.
204 204 msg = 'cannot lock source repository: %s\n' % err
205 205 pushop.ui.debug(msg)
206 206 try:
207 207 pushop.repo.checkpush(pushop)
208 208 lock = None
209 209 unbundle = pushop.remote.capable('unbundle')
210 210 if not unbundle:
211 211 lock = pushop.remote.lock()
212 212 try:
213 213 _pushdiscovery(pushop)
214 214 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
215 215 False)
216 216 and pushop.remote.capable('bundle2-exp')):
217 217 _pushbundle2(pushop)
218 218 _pushchangeset(pushop)
219 219 _pushsyncphase(pushop)
220 220 _pushobsolete(pushop)
221 221 _pushbookmark(pushop)
222 222 finally:
223 223 if lock is not None:
224 224 lock.release()
225 225 finally:
226 226 if locallock is not None:
227 227 locallock.release()
228 228
229 229 return pushop
230 230
231 231 # list of steps to perform discovery before push
232 232 pushdiscoveryorder = []
233 233
234 234 # Mapping between step name and function
235 235 #
236 236 # This exists to help extensions wrap steps if necessary
237 237 pushdiscoverymapping = {}
238 238
239 239 def pushdiscovery(stepname):
240 240 """decorator for function performing discovery before push
241 241
242 242 The function is added to the step -> function mapping and appended to the
243 243 list of steps. Beware that decorated function will be added in order (this
244 244 may matter).
245 245
246 246 You can only use this decorator for a new step, if you want to wrap a step
247 247 from an extension, change the pushdiscovery dictionary directly."""
248 248 def dec(func):
249 249 assert stepname not in pushdiscoverymapping
250 250 pushdiscoverymapping[stepname] = func
251 251 pushdiscoveryorder.append(stepname)
252 252 return func
253 253 return dec
254 254
255 255 def _pushdiscovery(pushop):
256 256 """Run all discovery steps"""
257 257 for stepname in pushdiscoveryorder:
258 258 step = pushdiscoverymapping[stepname]
259 259 step(pushop)
260 260
261 261 @pushdiscovery('changeset')
262 262 def _pushdiscoverychangeset(pushop):
263 263 """discover the changeset that need to be pushed"""
264 264 unfi = pushop.repo.unfiltered()
265 265 fci = discovery.findcommonincoming
266 266 commoninc = fci(unfi, pushop.remote, force=pushop.force)
267 267 common, inc, remoteheads = commoninc
268 268 fco = discovery.findcommonoutgoing
269 269 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
270 270 commoninc=commoninc, force=pushop.force)
271 271 pushop.outgoing = outgoing
272 272 pushop.remoteheads = remoteheads
273 273 pushop.incoming = inc
274 274
275 275 @pushdiscovery('phase')
276 276 def _pushdiscoveryphase(pushop):
277 277 """discover the phase that needs to be pushed
278 278
279 279 (computed for both success and failure case for changesets push)"""
280 280 outgoing = pushop.outgoing
281 281 unfi = pushop.repo.unfiltered()
282 282 remotephases = pushop.remote.listkeys('phases')
283 283 publishing = remotephases.get('publishing', False)
284 284 ana = phases.analyzeremotephases(pushop.repo,
285 285 pushop.fallbackheads,
286 286 remotephases)
287 287 pheads, droots = ana
288 288 extracond = ''
289 289 if not publishing:
290 290 extracond = ' and public()'
291 291 revset = 'heads((%%ln::%%ln) %s)' % extracond
292 292 # Get the list of all revs draft on remote by public here.
293 293 # XXX Beware that revset break if droots is not strictly
294 294 # XXX root we may want to ensure it is but it is costly
295 295 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
296 296 if not outgoing.missing:
297 297 future = fallback
298 298 else:
299 299 # adds changeset we are going to push as draft
300 300 #
301 301 # should not be necessary for publishing server, but because of an
302 302 # issue fixed in xxxxx we have to do it anyway.
303 303 fdroots = list(unfi.set('roots(%ln + %ln::)',
304 304 outgoing.missing, droots))
305 305 fdroots = [f.node() for f in fdroots]
306 306 future = list(unfi.set(revset, fdroots, pushop.futureheads))
307 307 pushop.outdatedphases = future
308 308 pushop.fallbackoutdatedphases = fallback
309 309
310 310 @pushdiscovery('obsmarker')
311 311 def _pushdiscoveryobsmarkers(pushop):
312 312 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
313 313 and pushop.repo.obsstore
314 314 and 'obsolete' in pushop.remote.listkeys('namespaces')):
315 315 repo = pushop.repo
316 316 # very naive computation, that can be quite expensive on big repo.
317 317 # However: evolution is currently slow on them anyway.
318 318 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
319 319 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
320 320
321 321 @pushdiscovery('bookmarks')
322 322 def _pushdiscoverybookmarks(pushop):
323 323 ui = pushop.ui
324 324 repo = pushop.repo.unfiltered()
325 325 remote = pushop.remote
326 326 ui.debug("checking for updated bookmarks\n")
327 327 ancestors = ()
328 328 if pushop.revs:
329 329 revnums = map(repo.changelog.rev, pushop.revs)
330 330 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
331 331 remotebookmark = remote.listkeys('bookmarks')
332 332
333 333 explicit = set(pushop.bookmarks)
334 334
335 335 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
336 336 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
337 337 for b, scid, dcid in advsrc:
338 338 if b in explicit:
339 339 explicit.remove(b)
340 340 if not ancestors or repo[scid].rev() in ancestors:
341 341 pushop.outbookmarks.append((b, dcid, scid))
342 342 # search added bookmark
343 343 for b, scid, dcid in addsrc:
344 344 if b in explicit:
345 345 explicit.remove(b)
346 346 pushop.outbookmarks.append((b, '', scid))
347 347 # search for overwritten bookmark
348 348 for b, scid, dcid in advdst + diverge + differ:
349 349 if b in explicit:
350 350 explicit.remove(b)
351 351 pushop.outbookmarks.append((b, dcid, scid))
352 352 # search for bookmark to delete
353 353 for b, scid, dcid in adddst:
354 354 if b in explicit:
355 355 explicit.remove(b)
356 356 # treat as "deleted locally"
357 357 pushop.outbookmarks.append((b, dcid, ''))
358 358 # identical bookmarks shouldn't get reported
359 359 for b, scid, dcid in same:
360 360 if b in explicit:
361 361 explicit.remove(b)
362 362
363 363 if explicit:
364 364 explicit = sorted(explicit)
365 365 # we should probably list all of them
366 366 ui.warn(_('bookmark %s does not exist on the local '
367 367 'or remote repository!\n') % explicit[0])
368 368 pushop.bkresult = 2
369 369
370 370 pushop.outbookmarks.sort()
371 371
372 372 def _pushcheckoutgoing(pushop):
373 373 outgoing = pushop.outgoing
374 374 unfi = pushop.repo.unfiltered()
375 375 if not outgoing.missing:
376 376 # nothing to push
377 377 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
378 378 return False
379 379 # something to push
380 380 if not pushop.force:
381 381 # if repo.obsstore == False --> no obsolete
382 382 # then, save the iteration
383 383 if unfi.obsstore:
384 384 # this message are here for 80 char limit reason
385 385 mso = _("push includes obsolete changeset: %s!")
386 386 mst = {"unstable": _("push includes unstable changeset: %s!"),
387 387 "bumped": _("push includes bumped changeset: %s!"),
388 388 "divergent": _("push includes divergent changeset: %s!")}
389 389 # If we are to push if there is at least one
390 390 # obsolete or unstable changeset in missing, at
391 391 # least one of the missinghead will be obsolete or
392 392 # unstable. So checking heads only is ok
393 393 for node in outgoing.missingheads:
394 394 ctx = unfi[node]
395 395 if ctx.obsolete():
396 396 raise util.Abort(mso % ctx)
397 397 elif ctx.troubled():
398 398 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
399 399 newbm = pushop.ui.configlist('bookmarks', 'pushing')
400 400 discovery.checkheads(unfi, pushop.remote, outgoing,
401 401 pushop.remoteheads,
402 402 pushop.newbranch,
403 403 bool(pushop.incoming),
404 404 newbm)
405 405 return True
406 406
407 407 # List of names of steps to perform for an outgoing bundle2, order matters.
408 408 b2partsgenorder = []
409 409
410 410 # Mapping between step name and function
411 411 #
412 412 # This exists to help extensions wrap steps if necessary
413 413 b2partsgenmapping = {}
414 414
415 415 def b2partsgenerator(stepname):
416 416 """decorator for function generating bundle2 part
417 417
418 418 The function is added to the step -> function mapping and appended to the
419 419 list of steps. Beware that decorated functions will be added in order
420 420 (this may matter).
421 421
422 422 You can only use this decorator for new steps, if you want to wrap a step
423 423 from an extension, attack the b2partsgenmapping dictionary directly."""
424 424 def dec(func):
425 425 assert stepname not in b2partsgenmapping
426 426 b2partsgenmapping[stepname] = func
427 427 b2partsgenorder.append(stepname)
428 428 return func
429 429 return dec
430 430
431 431 @b2partsgenerator('changeset')
432 432 def _pushb2ctx(pushop, bundler):
433 433 """handle changegroup push through bundle2
434 434
435 435 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
436 436 """
437 437 if 'changesets' in pushop.stepsdone:
438 438 return
439 439 pushop.stepsdone.add('changesets')
440 440 # Send known heads to the server for race detection.
441 441 if not _pushcheckoutgoing(pushop):
442 442 return
443 443 pushop.repo.prepushoutgoinghooks(pushop.repo,
444 444 pushop.remote,
445 445 pushop.outgoing)
446 446 if not pushop.force:
447 447 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
448 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
449 pushop.outgoing)
448 b2caps = bundle2.bundle2caps(pushop.remote)
449 version = None
450 cgversions = b2caps.get('b2x:changegroup')
451 if cgversions is None:
452 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
453 pushop.outgoing)
454 else:
455 cgversions = [v for v in cgversions if v in changegroup.packermap]
456 if not cgversions:
457 raise ValueError(_('no common changegroup version'))
458 version = max(cgversions)
459 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
460 pushop.outgoing,
461 version=version)
450 462 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg)
463 if version is not None:
464 cgpart.addparam('version', version)
451 465 def handlereply(op):
452 466 """extract addchangegroup returns from server reply"""
453 467 cgreplies = op.records.getreplies(cgpart.id)
454 468 assert len(cgreplies['changegroup']) == 1
455 469 pushop.cgresult = cgreplies['changegroup'][0]['return']
456 470 return handlereply
457 471
458 472 @b2partsgenerator('phase')
459 473 def _pushb2phases(pushop, bundler):
460 474 """handle phase push through bundle2"""
461 475 if 'phases' in pushop.stepsdone:
462 476 return
463 477 b2caps = bundle2.bundle2caps(pushop.remote)
464 478 if not 'b2x:pushkey' in b2caps:
465 479 return
466 480 pushop.stepsdone.add('phases')
467 481 part2node = []
468 482 enc = pushkey.encode
469 483 for newremotehead in pushop.outdatedphases:
470 484 part = bundler.newpart('b2x:pushkey')
471 485 part.addparam('namespace', enc('phases'))
472 486 part.addparam('key', enc(newremotehead.hex()))
473 487 part.addparam('old', enc(str(phases.draft)))
474 488 part.addparam('new', enc(str(phases.public)))
475 489 part2node.append((part.id, newremotehead))
476 490 def handlereply(op):
477 491 for partid, node in part2node:
478 492 partrep = op.records.getreplies(partid)
479 493 results = partrep['pushkey']
480 494 assert len(results) <= 1
481 495 msg = None
482 496 if not results:
483 497 msg = _('server ignored update of %s to public!\n') % node
484 498 elif not int(results[0]['return']):
485 499 msg = _('updating %s to public failed!\n') % node
486 500 if msg is not None:
487 501 pushop.ui.warn(msg)
488 502 return handlereply
489 503
490 504 @b2partsgenerator('obsmarkers')
491 505 def _pushb2obsmarkers(pushop, bundler):
492 506 if 'obsmarkers' in pushop.stepsdone:
493 507 return
494 508 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
495 509 if obsolete.commonversion(remoteversions) is None:
496 510 return
497 511 pushop.stepsdone.add('obsmarkers')
498 512 if pushop.outobsmarkers:
499 513 buildobsmarkerspart(bundler, pushop.outobsmarkers)
500 514
501 515 @b2partsgenerator('bookmarks')
502 516 def _pushb2bookmarks(pushop, bundler):
503 517 """handle phase push through bundle2"""
504 518 if 'bookmarks' in pushop.stepsdone:
505 519 return
506 520 b2caps = bundle2.bundle2caps(pushop.remote)
507 521 if 'b2x:pushkey' not in b2caps:
508 522 return
509 523 pushop.stepsdone.add('bookmarks')
510 524 part2book = []
511 525 enc = pushkey.encode
512 526 for book, old, new in pushop.outbookmarks:
513 527 part = bundler.newpart('b2x:pushkey')
514 528 part.addparam('namespace', enc('bookmarks'))
515 529 part.addparam('key', enc(book))
516 530 part.addparam('old', enc(old))
517 531 part.addparam('new', enc(new))
518 532 action = 'update'
519 533 if not old:
520 534 action = 'export'
521 535 elif not new:
522 536 action = 'delete'
523 537 part2book.append((part.id, book, action))
524 538
525 539
526 540 def handlereply(op):
527 541 ui = pushop.ui
528 542 for partid, book, action in part2book:
529 543 partrep = op.records.getreplies(partid)
530 544 results = partrep['pushkey']
531 545 assert len(results) <= 1
532 546 if not results:
533 547 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
534 548 else:
535 549 ret = int(results[0]['return'])
536 550 if ret:
537 551 ui.status(bookmsgmap[action][0] % book)
538 552 else:
539 553 ui.warn(bookmsgmap[action][1] % book)
540 554 if pushop.bkresult is not None:
541 555 pushop.bkresult = 1
542 556 return handlereply
543 557
544 558
545 559 def _pushbundle2(pushop):
546 560 """push data to the remote using bundle2
547 561
548 562 The only currently supported type of data is changegroup but this will
549 563 evolve in the future."""
550 564 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
551 565 # create reply capability
552 566 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
553 567 bundler.newpart('b2x:replycaps', data=capsblob)
554 568 replyhandlers = []
555 569 for partgenname in b2partsgenorder:
556 570 partgen = b2partsgenmapping[partgenname]
557 571 ret = partgen(pushop, bundler)
558 572 if callable(ret):
559 573 replyhandlers.append(ret)
560 574 # do not push if nothing to push
561 575 if bundler.nbparts <= 1:
562 576 return
563 577 stream = util.chunkbuffer(bundler.getchunks())
564 578 try:
565 579 reply = pushop.remote.unbundle(stream, ['force'], 'push')
566 580 except error.BundleValueError, exc:
567 581 raise util.Abort('missing support for %s' % exc)
568 582 try:
569 583 op = bundle2.processbundle(pushop.repo, reply)
570 584 except error.BundleValueError, exc:
571 585 raise util.Abort('missing support for %s' % exc)
572 586 for rephand in replyhandlers:
573 587 rephand(op)
574 588
575 589 def _pushchangeset(pushop):
576 590 """Make the actual push of changeset bundle to remote repo"""
577 591 if 'changesets' in pushop.stepsdone:
578 592 return
579 593 pushop.stepsdone.add('changesets')
580 594 if not _pushcheckoutgoing(pushop):
581 595 return
582 596 pushop.repo.prepushoutgoinghooks(pushop.repo,
583 597 pushop.remote,
584 598 pushop.outgoing)
585 599 outgoing = pushop.outgoing
586 600 unbundle = pushop.remote.capable('unbundle')
587 601 # TODO: get bundlecaps from remote
588 602 bundlecaps = None
589 603 # create a changegroup from local
590 604 if pushop.revs is None and not (outgoing.excluded
591 605 or pushop.repo.changelog.filteredrevs):
592 606 # push everything,
593 607 # use the fast path, no race possible on push
594 608 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
595 609 cg = changegroup.getsubset(pushop.repo,
596 610 outgoing,
597 611 bundler,
598 612 'push',
599 613 fastpath=True)
600 614 else:
601 615 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
602 616 bundlecaps)
603 617
604 618 # apply changegroup to remote
605 619 if unbundle:
606 620 # local repo finds heads on server, finds out what
607 621 # revs it must push. once revs transferred, if server
608 622 # finds it has different heads (someone else won
609 623 # commit/push race), server aborts.
610 624 if pushop.force:
611 625 remoteheads = ['force']
612 626 else:
613 627 remoteheads = pushop.remoteheads
614 628 # ssh: return remote's addchangegroup()
615 629 # http: return remote's addchangegroup() or 0 for error
616 630 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
617 631 pushop.repo.url())
618 632 else:
619 633 # we return an integer indicating remote head count
620 634 # change
621 635 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
622 636 pushop.repo.url())
623 637
624 638 def _pushsyncphase(pushop):
625 639 """synchronise phase information locally and remotely"""
626 640 cheads = pushop.commonheads
627 641 # even when we don't push, exchanging phase data is useful
628 642 remotephases = pushop.remote.listkeys('phases')
629 643 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
630 644 and remotephases # server supports phases
631 645 and pushop.cgresult is None # nothing was pushed
632 646 and remotephases.get('publishing', False)):
633 647 # When:
634 648 # - this is a subrepo push
635 649 # - and remote support phase
636 650 # - and no changeset was pushed
637 651 # - and remote is publishing
638 652 # We may be in issue 3871 case!
639 653 # We drop the possible phase synchronisation done by
640 654 # courtesy to publish changesets possibly locally draft
641 655 # on the remote.
642 656 remotephases = {'publishing': 'True'}
643 657 if not remotephases: # old server or public only reply from non-publishing
644 658 _localphasemove(pushop, cheads)
645 659 # don't push any phase data as there is nothing to push
646 660 else:
647 661 ana = phases.analyzeremotephases(pushop.repo, cheads,
648 662 remotephases)
649 663 pheads, droots = ana
650 664 ### Apply remote phase on local
651 665 if remotephases.get('publishing', False):
652 666 _localphasemove(pushop, cheads)
653 667 else: # publish = False
654 668 _localphasemove(pushop, pheads)
655 669 _localphasemove(pushop, cheads, phases.draft)
656 670 ### Apply local phase on remote
657 671
658 672 if pushop.cgresult:
659 673 if 'phases' in pushop.stepsdone:
660 674 # phases already pushed though bundle2
661 675 return
662 676 outdated = pushop.outdatedphases
663 677 else:
664 678 outdated = pushop.fallbackoutdatedphases
665 679
666 680 pushop.stepsdone.add('phases')
667 681
668 682 # filter heads already turned public by the push
669 683 outdated = [c for c in outdated if c.node() not in pheads]
670 684 b2caps = bundle2.bundle2caps(pushop.remote)
671 685 if 'b2x:pushkey' in b2caps:
672 686 # server supports bundle2, let's do a batched push through it
673 687 #
674 688 # This will eventually be unified with the changesets bundle2 push
675 689 bundler = bundle2.bundle20(pushop.ui, b2caps)
676 690 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
677 691 bundler.newpart('b2x:replycaps', data=capsblob)
678 692 part2node = []
679 693 enc = pushkey.encode
680 694 for newremotehead in outdated:
681 695 part = bundler.newpart('b2x:pushkey')
682 696 part.addparam('namespace', enc('phases'))
683 697 part.addparam('key', enc(newremotehead.hex()))
684 698 part.addparam('old', enc(str(phases.draft)))
685 699 part.addparam('new', enc(str(phases.public)))
686 700 part2node.append((part.id, newremotehead))
687 701 stream = util.chunkbuffer(bundler.getchunks())
688 702 try:
689 703 reply = pushop.remote.unbundle(stream, ['force'], 'push')
690 704 op = bundle2.processbundle(pushop.repo, reply)
691 705 except error.BundleValueError, exc:
692 706 raise util.Abort('missing support for %s' % exc)
693 707 for partid, node in part2node:
694 708 partrep = op.records.getreplies(partid)
695 709 results = partrep['pushkey']
696 710 assert len(results) <= 1
697 711 msg = None
698 712 if not results:
699 713 msg = _('server ignored update of %s to public!\n') % node
700 714 elif not int(results[0]['return']):
701 715 msg = _('updating %s to public failed!\n') % node
702 716 if msg is not None:
703 717 pushop.ui.warn(msg)
704 718
705 719 else:
706 720 # fallback to independent pushkey command
707 721 for newremotehead in outdated:
708 722 r = pushop.remote.pushkey('phases',
709 723 newremotehead.hex(),
710 724 str(phases.draft),
711 725 str(phases.public))
712 726 if not r:
713 727 pushop.ui.warn(_('updating %s to public failed!\n')
714 728 % newremotehead)
715 729
716 730 def _localphasemove(pushop, nodes, phase=phases.public):
717 731 """move <nodes> to <phase> in the local source repo"""
718 732 if pushop.locallocked:
719 733 tr = pushop.repo.transaction('push-phase-sync')
720 734 try:
721 735 phases.advanceboundary(pushop.repo, tr, phase, nodes)
722 736 tr.close()
723 737 finally:
724 738 tr.release()
725 739 else:
726 740 # repo is not locked, do not change any phases!
727 741 # Informs the user that phases should have been moved when
728 742 # applicable.
729 743 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
730 744 phasestr = phases.phasenames[phase]
731 745 if actualmoves:
732 746 pushop.ui.status(_('cannot lock source repo, skipping '
733 747 'local %s phase update\n') % phasestr)
734 748
735 749 def _pushobsolete(pushop):
736 750 """utility function to push obsolete markers to a remote"""
737 751 if 'obsmarkers' in pushop.stepsdone:
738 752 return
739 753 pushop.ui.debug('try to push obsolete markers to remote\n')
740 754 repo = pushop.repo
741 755 remote = pushop.remote
742 756 pushop.stepsdone.add('obsmarkers')
743 757 if pushop.outobsmarkers:
744 758 rslts = []
745 759 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
746 760 for key in sorted(remotedata, reverse=True):
747 761 # reverse sort to ensure we end with dump0
748 762 data = remotedata[key]
749 763 rslts.append(remote.pushkey('obsolete', key, '', data))
750 764 if [r for r in rslts if not r]:
751 765 msg = _('failed to push some obsolete markers!\n')
752 766 repo.ui.warn(msg)
753 767
754 768 def _pushbookmark(pushop):
755 769 """Update bookmark position on remote"""
756 770 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
757 771 return
758 772 pushop.stepsdone.add('bookmarks')
759 773 ui = pushop.ui
760 774 remote = pushop.remote
761 775
762 776 for b, old, new in pushop.outbookmarks:
763 777 action = 'update'
764 778 if not old:
765 779 action = 'export'
766 780 elif not new:
767 781 action = 'delete'
768 782 if remote.pushkey('bookmarks', b, old, new):
769 783 ui.status(bookmsgmap[action][0] % b)
770 784 else:
771 785 ui.warn(bookmsgmap[action][1] % b)
772 786 # discovery can have set the value form invalid entry
773 787 if pushop.bkresult is not None:
774 788 pushop.bkresult = 1
775 789
776 790 class pulloperation(object):
777 791 """A object that represent a single pull operation
778 792
779 793 It purpose is to carry push related state and very common operation.
780 794
781 795 A new should be created at the beginning of each pull and discarded
782 796 afterward.
783 797 """
784 798
785 799 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
786 800 # repo we pull into
787 801 self.repo = repo
788 802 # repo we pull from
789 803 self.remote = remote
790 804 # revision we try to pull (None is "all")
791 805 self.heads = heads
792 806 # bookmark pulled explicitly
793 807 self.explicitbookmarks = bookmarks
794 808 # do we force pull?
795 809 self.force = force
796 810 # the name the pull transaction
797 811 self._trname = 'pull\n' + util.hidepassword(remote.url())
798 812 # hold the transaction once created
799 813 self._tr = None
800 814 # set of common changeset between local and remote before pull
801 815 self.common = None
802 816 # set of pulled head
803 817 self.rheads = None
804 818 # list of missing changeset to fetch remotely
805 819 self.fetch = None
806 820 # remote bookmarks data
807 821 self.remotebookmarks = None
808 822 # result of changegroup pulling (used as return code by pull)
809 823 self.cgresult = None
810 824 # list of step already done
811 825 self.stepsdone = set()
812 826
813 827 @util.propertycache
814 828 def pulledsubset(self):
815 829 """heads of the set of changeset target by the pull"""
816 830 # compute target subset
817 831 if self.heads is None:
818 832 # We pulled every thing possible
819 833 # sync on everything common
820 834 c = set(self.common)
821 835 ret = list(self.common)
822 836 for n in self.rheads:
823 837 if n not in c:
824 838 ret.append(n)
825 839 return ret
826 840 else:
827 841 # We pulled a specific subset
828 842 # sync on this subset
829 843 return self.heads
830 844
831 845 def gettransaction(self):
832 846 """get appropriate pull transaction, creating it if needed"""
833 847 if self._tr is None:
834 848 self._tr = self.repo.transaction(self._trname)
835 849 self._tr.hookargs['source'] = 'pull'
836 850 self._tr.hookargs['url'] = self.remote.url()
837 851 return self._tr
838 852
839 853 def closetransaction(self):
840 854 """close transaction if created"""
841 855 if self._tr is not None:
842 856 repo = self.repo
843 857 cl = repo.unfiltered().changelog
844 858 p = cl.writepending() and repo.root or ""
845 859 p = cl.writepending() and repo.root or ""
846 860 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
847 861 **self._tr.hookargs)
848 862 self._tr.close()
849 863 hookargs = dict(self._tr.hookargs)
850 864 def runhooks():
851 865 repo.hook('b2x-transactionclose', **hookargs)
852 866 repo._afterlock(runhooks)
853 867
854 868 def releasetransaction(self):
855 869 """release transaction if created"""
856 870 if self._tr is not None:
857 871 self._tr.release()
858 872
859 873 def pull(repo, remote, heads=None, force=False, bookmarks=()):
860 874 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
861 875 if pullop.remote.local():
862 876 missing = set(pullop.remote.requirements) - pullop.repo.supported
863 877 if missing:
864 878 msg = _("required features are not"
865 879 " supported in the destination:"
866 880 " %s") % (', '.join(sorted(missing)))
867 881 raise util.Abort(msg)
868 882
869 883 pullop.remotebookmarks = remote.listkeys('bookmarks')
870 884 lock = pullop.repo.lock()
871 885 try:
872 886 _pulldiscovery(pullop)
873 887 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
874 888 and pullop.remote.capable('bundle2-exp')):
875 889 _pullbundle2(pullop)
876 890 _pullchangeset(pullop)
877 891 _pullphase(pullop)
878 892 _pullbookmarks(pullop)
879 893 _pullobsolete(pullop)
880 894 pullop.closetransaction()
881 895 finally:
882 896 pullop.releasetransaction()
883 897 lock.release()
884 898
885 899 return pullop
886 900
887 901 # list of steps to perform discovery before pull
888 902 pulldiscoveryorder = []
889 903
890 904 # Mapping between step name and function
891 905 #
892 906 # This exists to help extensions wrap steps if necessary
893 907 pulldiscoverymapping = {}
894 908
895 909 def pulldiscovery(stepname):
896 910 """decorator for function performing discovery before pull
897 911
898 912 The function is added to the step -> function mapping and appended to the
899 913 list of steps. Beware that decorated function will be added in order (this
900 914 may matter).
901 915
902 916 You can only use this decorator for a new step, if you want to wrap a step
903 917 from an extension, change the pulldiscovery dictionary directly."""
904 918 def dec(func):
905 919 assert stepname not in pulldiscoverymapping
906 920 pulldiscoverymapping[stepname] = func
907 921 pulldiscoveryorder.append(stepname)
908 922 return func
909 923 return dec
910 924
911 925 def _pulldiscovery(pullop):
912 926 """Run all discovery steps"""
913 927 for stepname in pulldiscoveryorder:
914 928 step = pulldiscoverymapping[stepname]
915 929 step(pullop)
916 930
917 931 @pulldiscovery('changegroup')
918 932 def _pulldiscoverychangegroup(pullop):
919 933 """discovery phase for the pull
920 934
921 935 Current handle changeset discovery only, will change handle all discovery
922 936 at some point."""
923 937 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
924 938 pullop.remote,
925 939 heads=pullop.heads,
926 940 force=pullop.force)
927 941 pullop.common, pullop.fetch, pullop.rheads = tmp
928 942
929 943 def _pullbundle2(pullop):
930 944 """pull data using bundle2
931 945
932 946 For now, the only supported data are changegroup."""
933 947 remotecaps = bundle2.bundle2caps(pullop.remote)
934 948 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
935 949 # pulling changegroup
936 950 pullop.stepsdone.add('changegroup')
937 951
938 952 kwargs['common'] = pullop.common
939 953 kwargs['heads'] = pullop.heads or pullop.rheads
940 954 kwargs['cg'] = pullop.fetch
941 955 if 'b2x:listkeys' in remotecaps:
942 956 kwargs['listkeys'] = ['phase', 'bookmarks']
943 957 if not pullop.fetch:
944 958 pullop.repo.ui.status(_("no changes found\n"))
945 959 pullop.cgresult = 0
946 960 else:
947 961 if pullop.heads is None and list(pullop.common) == [nullid]:
948 962 pullop.repo.ui.status(_("requesting all changes\n"))
949 963 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
950 964 remoteversions = bundle2.obsmarkersversion(remotecaps)
951 965 if obsolete.commonversion(remoteversions) is not None:
952 966 kwargs['obsmarkers'] = True
953 967 pullop.stepsdone.add('obsmarkers')
954 968 _pullbundle2extraprepare(pullop, kwargs)
955 969 if kwargs.keys() == ['format']:
956 970 return # nothing to pull
957 971 bundle = pullop.remote.getbundle('pull', **kwargs)
958 972 try:
959 973 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
960 974 except error.BundleValueError, exc:
961 975 raise util.Abort('missing support for %s' % exc)
962 976
963 977 if pullop.fetch:
964 978 changedheads = 0
965 979 pullop.cgresult = 1
966 980 for cg in op.records['changegroup']:
967 981 ret = cg['return']
968 982 # If any changegroup result is 0, return 0
969 983 if ret == 0:
970 984 pullop.cgresult = 0
971 985 break
972 986 if ret < -1:
973 987 changedheads += ret + 1
974 988 elif ret > 1:
975 989 changedheads += ret - 1
976 990 if changedheads > 0:
977 991 pullop.cgresult = 1 + changedheads
978 992 elif changedheads < 0:
979 993 pullop.cgresult = -1 + changedheads
980 994
981 995 # processing phases change
982 996 for namespace, value in op.records['listkeys']:
983 997 if namespace == 'phases':
984 998 _pullapplyphases(pullop, value)
985 999
986 1000 # processing bookmark update
987 1001 for namespace, value in op.records['listkeys']:
988 1002 if namespace == 'bookmarks':
989 1003 pullop.remotebookmarks = value
990 1004 _pullbookmarks(pullop)
991 1005
992 1006 def _pullbundle2extraprepare(pullop, kwargs):
993 1007 """hook function so that extensions can extend the getbundle call"""
994 1008 pass
995 1009
996 1010 def _pullchangeset(pullop):
997 1011 """pull changeset from unbundle into the local repo"""
998 1012 # We delay the open of the transaction as late as possible so we
999 1013 # don't open transaction for nothing or you break future useful
1000 1014 # rollback call
1001 1015 if 'changegroup' in pullop.stepsdone:
1002 1016 return
1003 1017 pullop.stepsdone.add('changegroup')
1004 1018 if not pullop.fetch:
1005 1019 pullop.repo.ui.status(_("no changes found\n"))
1006 1020 pullop.cgresult = 0
1007 1021 return
1008 1022 pullop.gettransaction()
1009 1023 if pullop.heads is None and list(pullop.common) == [nullid]:
1010 1024 pullop.repo.ui.status(_("requesting all changes\n"))
1011 1025 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1012 1026 # issue1320, avoid a race if remote changed after discovery
1013 1027 pullop.heads = pullop.rheads
1014 1028
1015 1029 if pullop.remote.capable('getbundle'):
1016 1030 # TODO: get bundlecaps from remote
1017 1031 cg = pullop.remote.getbundle('pull', common=pullop.common,
1018 1032 heads=pullop.heads or pullop.rheads)
1019 1033 elif pullop.heads is None:
1020 1034 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1021 1035 elif not pullop.remote.capable('changegroupsubset'):
1022 1036 raise util.Abort(_("partial pull cannot be done because "
1023 1037 "other repository doesn't support "
1024 1038 "changegroupsubset."))
1025 1039 else:
1026 1040 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1027 1041 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1028 1042 pullop.remote.url())
1029 1043
1030 1044 def _pullphase(pullop):
1031 1045 # Get remote phases data from remote
1032 1046 if 'phases' in pullop.stepsdone:
1033 1047 return
1034 1048 remotephases = pullop.remote.listkeys('phases')
1035 1049 _pullapplyphases(pullop, remotephases)
1036 1050
1037 1051 def _pullapplyphases(pullop, remotephases):
1038 1052 """apply phase movement from observed remote state"""
1039 1053 if 'phases' in pullop.stepsdone:
1040 1054 return
1041 1055 pullop.stepsdone.add('phases')
1042 1056 publishing = bool(remotephases.get('publishing', False))
1043 1057 if remotephases and not publishing:
1044 1058 # remote is new and unpublishing
1045 1059 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1046 1060 pullop.pulledsubset,
1047 1061 remotephases)
1048 1062 dheads = pullop.pulledsubset
1049 1063 else:
1050 1064 # Remote is old or publishing all common changesets
1051 1065 # should be seen as public
1052 1066 pheads = pullop.pulledsubset
1053 1067 dheads = []
1054 1068 unfi = pullop.repo.unfiltered()
1055 1069 phase = unfi._phasecache.phase
1056 1070 rev = unfi.changelog.nodemap.get
1057 1071 public = phases.public
1058 1072 draft = phases.draft
1059 1073
1060 1074 # exclude changesets already public locally and update the others
1061 1075 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1062 1076 if pheads:
1063 1077 tr = pullop.gettransaction()
1064 1078 phases.advanceboundary(pullop.repo, tr, public, pheads)
1065 1079
1066 1080 # exclude changesets already draft locally and update the others
1067 1081 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1068 1082 if dheads:
1069 1083 tr = pullop.gettransaction()
1070 1084 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1071 1085
1072 1086 def _pullbookmarks(pullop):
1073 1087 """process the remote bookmark information to update the local one"""
1074 1088 if 'bookmarks' in pullop.stepsdone:
1075 1089 return
1076 1090 pullop.stepsdone.add('bookmarks')
1077 1091 repo = pullop.repo
1078 1092 remotebookmarks = pullop.remotebookmarks
1079 1093 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1080 1094 pullop.remote.url(),
1081 1095 pullop.gettransaction,
1082 1096 explicit=pullop.explicitbookmarks)
1083 1097
1084 1098 def _pullobsolete(pullop):
1085 1099 """utility function to pull obsolete markers from a remote
1086 1100
1087 1101 The `gettransaction` is function that return the pull transaction, creating
1088 1102 one if necessary. We return the transaction to inform the calling code that
1089 1103 a new transaction have been created (when applicable).
1090 1104
1091 1105 Exists mostly to allow overriding for experimentation purpose"""
1092 1106 if 'obsmarkers' in pullop.stepsdone:
1093 1107 return
1094 1108 pullop.stepsdone.add('obsmarkers')
1095 1109 tr = None
1096 1110 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1097 1111 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1098 1112 remoteobs = pullop.remote.listkeys('obsolete')
1099 1113 if 'dump0' in remoteobs:
1100 1114 tr = pullop.gettransaction()
1101 1115 for key in sorted(remoteobs, reverse=True):
1102 1116 if key.startswith('dump'):
1103 1117 data = base85.b85decode(remoteobs[key])
1104 1118 pullop.repo.obsstore.mergemarkers(tr, data)
1105 1119 pullop.repo.invalidatevolatilesets()
1106 1120 return tr
1107 1121
1108 1122 def caps20to10(repo):
1109 1123 """return a set with appropriate options to use bundle20 during getbundle"""
1110 1124 caps = set(['HG2Y'])
1111 1125 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1112 1126 caps.add('bundle2=' + urllib.quote(capsblob))
1113 1127 return caps
1114 1128
1115 1129 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1116 1130 getbundle2partsorder = []
1117 1131
1118 1132 # Mapping between step name and function
1119 1133 #
1120 1134 # This exists to help extensions wrap steps if necessary
1121 1135 getbundle2partsmapping = {}
1122 1136
1123 1137 def getbundle2partsgenerator(stepname):
1124 1138 """decorator for function generating bundle2 part for getbundle
1125 1139
1126 1140 The function is added to the step -> function mapping and appended to the
1127 1141 list of steps. Beware that decorated functions will be added in order
1128 1142 (this may matter).
1129 1143
1130 1144 You can only use this decorator for new steps, if you want to wrap a step
1131 1145 from an extension, attack the getbundle2partsmapping dictionary directly."""
1132 1146 def dec(func):
1133 1147 assert stepname not in getbundle2partsmapping
1134 1148 getbundle2partsmapping[stepname] = func
1135 1149 getbundle2partsorder.append(stepname)
1136 1150 return func
1137 1151 return dec
1138 1152
1139 1153 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1140 1154 **kwargs):
1141 1155 """return a full bundle (with potentially multiple kind of parts)
1142 1156
1143 1157 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1144 1158 passed. For now, the bundle can contain only changegroup, but this will
1145 1159 changes when more part type will be available for bundle2.
1146 1160
1147 1161 This is different from changegroup.getchangegroup that only returns an HG10
1148 1162 changegroup bundle. They may eventually get reunited in the future when we
1149 1163 have a clearer idea of the API we what to query different data.
1150 1164
1151 1165 The implementation is at a very early stage and will get massive rework
1152 1166 when the API of bundle is refined.
1153 1167 """
1154 1168 # bundle10 case
1155 1169 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1156 1170 if bundlecaps and not kwargs.get('cg', True):
1157 1171 raise ValueError(_('request for bundle10 must include changegroup'))
1158 1172
1159 1173 if kwargs:
1160 1174 raise ValueError(_('unsupported getbundle arguments: %s')
1161 1175 % ', '.join(sorted(kwargs.keys())))
1162 1176 return changegroup.getchangegroup(repo, source, heads=heads,
1163 1177 common=common, bundlecaps=bundlecaps)
1164 1178
1165 1179 # bundle20 case
1166 1180 b2caps = {}
1167 1181 for bcaps in bundlecaps:
1168 1182 if bcaps.startswith('bundle2='):
1169 1183 blob = urllib.unquote(bcaps[len('bundle2='):])
1170 1184 b2caps.update(bundle2.decodecaps(blob))
1171 1185 bundler = bundle2.bundle20(repo.ui, b2caps)
1172 1186
1173 1187 for name in getbundle2partsorder:
1174 1188 func = getbundle2partsmapping[name]
1175 1189 kwargs['heads'] = heads
1176 1190 kwargs['common'] = common
1177 1191 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1178 1192 **kwargs)
1179 1193
1180 1194 return util.chunkbuffer(bundler.getchunks())
1181 1195
1182 1196 @getbundle2partsgenerator('changegroup')
1183 1197 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1184 1198 b2caps=None, heads=None, common=None, **kwargs):
1185 1199 """add a changegroup part to the requested bundle"""
1186 1200 cg = None
1187 1201 if kwargs.get('cg', True):
1188 1202 # build changegroup bundle here.
1189 1203 version = None
1190 1204 cgversions = b2caps.get('b2x:changegroup')
1191 1205 if cgversions is None:
1192 1206 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1193 1207 common=common,
1194 1208 bundlecaps=bundlecaps)
1195 1209 else:
1196 1210 cgversions = [v for v in cgversions if v in changegroup.packermap]
1197 1211 if not cgversions:
1198 1212 raise ValueError(_('no common changegroup version'))
1199 1213 version = max(cgversions)
1200 1214 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1201 1215 common=common,
1202 1216 bundlecaps=bundlecaps,
1203 1217 version=version)
1204 1218
1205 1219 if cg:
1206 1220 part = bundler.newpart('b2x:changegroup', data=cg)
1207 1221 if version is not None:
1208 1222 part.addparam('version', version)
1209 1223
1210 1224 @getbundle2partsgenerator('listkeys')
1211 1225 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1212 1226 b2caps=None, **kwargs):
1213 1227 """add parts containing listkeys namespaces to the requested bundle"""
1214 1228 listkeys = kwargs.get('listkeys', ())
1215 1229 for namespace in listkeys:
1216 1230 part = bundler.newpart('b2x:listkeys')
1217 1231 part.addparam('namespace', namespace)
1218 1232 keys = repo.listkeys(namespace).items()
1219 1233 part.data = pushkey.encodekeys(keys)
1220 1234
1221 1235 @getbundle2partsgenerator('obsmarkers')
1222 1236 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1223 1237 b2caps=None, heads=None, **kwargs):
1224 1238 """add an obsolescence markers part to the requested bundle"""
1225 1239 if kwargs.get('obsmarkers', False):
1226 1240 if heads is None:
1227 1241 heads = repo.heads()
1228 1242 subset = [c.node() for c in repo.set('::%ln', heads)]
1229 1243 markers = repo.obsstore.relevantmarkers(subset)
1230 1244 buildobsmarkerspart(bundler, markers)
1231 1245
1232 1246 def check_heads(repo, their_heads, context):
1233 1247 """check if the heads of a repo have been modified
1234 1248
1235 1249 Used by peer for unbundling.
1236 1250 """
1237 1251 heads = repo.heads()
1238 1252 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1239 1253 if not (their_heads == ['force'] or their_heads == heads or
1240 1254 their_heads == ['hashed', heads_hash]):
1241 1255 # someone else committed/pushed/unbundled while we
1242 1256 # were transferring data
1243 1257 raise error.PushRaced('repository changed while %s - '
1244 1258 'please try again' % context)
1245 1259
1246 1260 def unbundle(repo, cg, heads, source, url):
1247 1261 """Apply a bundle to a repo.
1248 1262
1249 1263 this function makes sure the repo is locked during the application and have
1250 1264 mechanism to check that no push race occurred between the creation of the
1251 1265 bundle and its application.
1252 1266
1253 1267 If the push was raced as PushRaced exception is raised."""
1254 1268 r = 0
1255 1269 # need a transaction when processing a bundle2 stream
1256 1270 tr = None
1257 1271 lock = repo.lock()
1258 1272 try:
1259 1273 check_heads(repo, heads, 'uploading changes')
1260 1274 # push can proceed
1261 1275 if util.safehasattr(cg, 'params'):
1262 1276 try:
1263 1277 tr = repo.transaction('unbundle')
1264 1278 tr.hookargs['source'] = source
1265 1279 tr.hookargs['url'] = url
1266 1280 tr.hookargs['bundle2-exp'] = '1'
1267 1281 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1268 1282 cl = repo.unfiltered().changelog
1269 1283 p = cl.writepending() and repo.root or ""
1270 1284 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1271 1285 **tr.hookargs)
1272 1286 tr.close()
1273 1287 hookargs = dict(tr.hookargs)
1274 1288 def runhooks():
1275 1289 repo.hook('b2x-transactionclose', **hookargs)
1276 1290 repo._afterlock(runhooks)
1277 1291 except Exception, exc:
1278 1292 exc.duringunbundle2 = True
1279 1293 raise
1280 1294 else:
1281 1295 r = changegroup.addchangegroup(repo, cg, source, url)
1282 1296 finally:
1283 1297 if tr is not None:
1284 1298 tr.release()
1285 1299 lock.release()
1286 1300 return r
General Comments 0
You need to be logged in to leave comments. Login now