##// END OF EJS Templates
exchange: catch down to BaseException when handling bundle2...
Pierre-Yves David -
r25182:ee665d3b default
parent child Browse files
Show More
@@ -1,1334 +1,1334 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14
15 15 def readbundle(ui, fh, fname, vfs=None):
16 16 header = changegroup.readexactly(fh, 4)
17 17
18 18 alg = None
19 19 if not fname:
20 20 fname = "stream"
21 21 if not header.startswith('HG') and header.startswith('\0'):
22 22 fh = changegroup.headerlessfixup(fh, header)
23 23 header = "HG10"
24 24 alg = 'UN'
25 25 elif vfs:
26 26 fname = vfs.join(fname)
27 27
28 28 magic, version = header[0:2], header[2:4]
29 29
30 30 if magic != 'HG':
31 31 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
32 32 if version == '10':
33 33 if alg is None:
34 34 alg = changegroup.readexactly(fh, 2)
35 35 return changegroup.cg1unpacker(fh, alg)
36 36 elif version.startswith('2'):
37 37 return bundle2.getunbundler(ui, fh, header=magic + version)
38 38 else:
39 39 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
40 40
41 41 def buildobsmarkerspart(bundler, markers):
42 42 """add an obsmarker part to the bundler with <markers>
43 43
44 44 No part is created if markers is empty.
45 45 Raises ValueError if the bundler doesn't support any known obsmarker format.
46 46 """
47 47 if markers:
48 48 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
49 49 version = obsolete.commonversion(remoteversions)
50 50 if version is None:
51 51 raise ValueError('bundler do not support common obsmarker format')
52 52 stream = obsolete.encodemarkers(markers, True, version=version)
53 53 return bundler.newpart('obsmarkers', data=stream)
54 54 return None
55 55
56 56 def _canusebundle2(op):
57 57 """return true if a pull/push can use bundle2
58 58
59 59 Feel free to nuke this function when we drop the experimental option"""
60 60 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
61 61 and op.remote.capable('bundle2'))
62 62
63 63
64 64 class pushoperation(object):
65 65 """A object that represent a single push operation
66 66
67 67 It purpose is to carry push related state and very common operation.
68 68
69 69 A new should be created at the beginning of each push and discarded
70 70 afterward.
71 71 """
72 72
73 73 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
74 74 bookmarks=()):
75 75 # repo we push from
76 76 self.repo = repo
77 77 self.ui = repo.ui
78 78 # repo we push to
79 79 self.remote = remote
80 80 # force option provided
81 81 self.force = force
82 82 # revs to be pushed (None is "all")
83 83 self.revs = revs
84 84 # bookmark explicitly pushed
85 85 self.bookmarks = bookmarks
86 86 # allow push of new branch
87 87 self.newbranch = newbranch
88 88 # did a local lock get acquired?
89 89 self.locallocked = None
90 90 # step already performed
91 91 # (used to check what steps have been already performed through bundle2)
92 92 self.stepsdone = set()
93 93 # Integer version of the changegroup push result
94 94 # - None means nothing to push
95 95 # - 0 means HTTP error
96 96 # - 1 means we pushed and remote head count is unchanged *or*
97 97 # we have outgoing changesets but refused to push
98 98 # - other values as described by addchangegroup()
99 99 self.cgresult = None
100 100 # Boolean value for the bookmark push
101 101 self.bkresult = None
102 102 # discover.outgoing object (contains common and outgoing data)
103 103 self.outgoing = None
104 104 # all remote heads before the push
105 105 self.remoteheads = None
106 106 # testable as a boolean indicating if any nodes are missing locally.
107 107 self.incoming = None
108 108 # phases changes that must be pushed along side the changesets
109 109 self.outdatedphases = None
110 110 # phases changes that must be pushed if changeset push fails
111 111 self.fallbackoutdatedphases = None
112 112 # outgoing obsmarkers
113 113 self.outobsmarkers = set()
114 114 # outgoing bookmarks
115 115 self.outbookmarks = []
116 116 # transaction manager
117 117 self.trmanager = None
118 118
119 119 @util.propertycache
120 120 def futureheads(self):
121 121 """future remote heads if the changeset push succeeds"""
122 122 return self.outgoing.missingheads
123 123
124 124 @util.propertycache
125 125 def fallbackheads(self):
126 126 """future remote heads if the changeset push fails"""
127 127 if self.revs is None:
128 128 # not target to push, all common are relevant
129 129 return self.outgoing.commonheads
130 130 unfi = self.repo.unfiltered()
131 131 # I want cheads = heads(::missingheads and ::commonheads)
132 132 # (missingheads is revs with secret changeset filtered out)
133 133 #
134 134 # This can be expressed as:
135 135 # cheads = ( (missingheads and ::commonheads)
136 136 # + (commonheads and ::missingheads))"
137 137 # )
138 138 #
139 139 # while trying to push we already computed the following:
140 140 # common = (::commonheads)
141 141 # missing = ((commonheads::missingheads) - commonheads)
142 142 #
143 143 # We can pick:
144 144 # * missingheads part of common (::commonheads)
145 145 common = set(self.outgoing.common)
146 146 nm = self.repo.changelog.nodemap
147 147 cheads = [node for node in self.revs if nm[node] in common]
148 148 # and
149 149 # * commonheads parents on missing
150 150 revset = unfi.set('%ln and parents(roots(%ln))',
151 151 self.outgoing.commonheads,
152 152 self.outgoing.missing)
153 153 cheads.extend(c.node() for c in revset)
154 154 return cheads
155 155
156 156 @property
157 157 def commonheads(self):
158 158 """set of all common heads after changeset bundle push"""
159 159 if self.cgresult:
160 160 return self.futureheads
161 161 else:
162 162 return self.fallbackheads
163 163
164 164 # mapping of message used when pushing bookmark
165 165 bookmsgmap = {'update': (_("updating bookmark %s\n"),
166 166 _('updating bookmark %s failed!\n')),
167 167 'export': (_("exporting bookmark %s\n"),
168 168 _('exporting bookmark %s failed!\n')),
169 169 'delete': (_("deleting remote bookmark %s\n"),
170 170 _('deleting remote bookmark %s failed!\n')),
171 171 }
172 172
173 173
174 174 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
175 175 '''Push outgoing changesets (limited by revs) from a local
176 176 repository to remote. Return an integer:
177 177 - None means nothing to push
178 178 - 0 means HTTP error
179 179 - 1 means we pushed and remote head count is unchanged *or*
180 180 we have outgoing changesets but refused to push
181 181 - other values as described by addchangegroup()
182 182 '''
183 183 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
184 184 if pushop.remote.local():
185 185 missing = (set(pushop.repo.requirements)
186 186 - pushop.remote.local().supported)
187 187 if missing:
188 188 msg = _("required features are not"
189 189 " supported in the destination:"
190 190 " %s") % (', '.join(sorted(missing)))
191 191 raise util.Abort(msg)
192 192
193 193 # there are two ways to push to remote repo:
194 194 #
195 195 # addchangegroup assumes local user can lock remote
196 196 # repo (local filesystem, old ssh servers).
197 197 #
198 198 # unbundle assumes local user cannot lock remote repo (new ssh
199 199 # servers, http servers).
200 200
201 201 if not pushop.remote.canpush():
202 202 raise util.Abort(_("destination does not support push"))
203 203 # get local lock as we might write phase data
204 204 localwlock = locallock = None
205 205 try:
206 206 # bundle2 push may receive a reply bundle touching bookmarks or other
207 207 # things requiring the wlock. Take it now to ensure proper ordering.
208 208 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
209 209 if _canusebundle2(pushop) and maypushback:
210 210 localwlock = pushop.repo.wlock()
211 211 locallock = pushop.repo.lock()
212 212 pushop.locallocked = True
213 213 except IOError, err:
214 214 pushop.locallocked = False
215 215 if err.errno != errno.EACCES:
216 216 raise
217 217 # source repo cannot be locked.
218 218 # We do not abort the push, but just disable the local phase
219 219 # synchronisation.
220 220 msg = 'cannot lock source repository: %s\n' % err
221 221 pushop.ui.debug(msg)
222 222 try:
223 223 if pushop.locallocked:
224 224 pushop.trmanager = transactionmanager(repo,
225 225 'push-response',
226 226 pushop.remote.url())
227 227 pushop.repo.checkpush(pushop)
228 228 lock = None
229 229 unbundle = pushop.remote.capable('unbundle')
230 230 if not unbundle:
231 231 lock = pushop.remote.lock()
232 232 try:
233 233 _pushdiscovery(pushop)
234 234 if _canusebundle2(pushop):
235 235 _pushbundle2(pushop)
236 236 _pushchangeset(pushop)
237 237 _pushsyncphase(pushop)
238 238 _pushobsolete(pushop)
239 239 _pushbookmark(pushop)
240 240 finally:
241 241 if lock is not None:
242 242 lock.release()
243 243 if pushop.trmanager:
244 244 pushop.trmanager.close()
245 245 finally:
246 246 if pushop.trmanager:
247 247 pushop.trmanager.release()
248 248 if locallock is not None:
249 249 locallock.release()
250 250 if localwlock is not None:
251 251 localwlock.release()
252 252
253 253 return pushop
254 254
255 255 # list of steps to perform discovery before push
256 256 pushdiscoveryorder = []
257 257
258 258 # Mapping between step name and function
259 259 #
260 260 # This exists to help extensions wrap steps if necessary
261 261 pushdiscoverymapping = {}
262 262
263 263 def pushdiscovery(stepname):
264 264 """decorator for function performing discovery before push
265 265
266 266 The function is added to the step -> function mapping and appended to the
267 267 list of steps. Beware that decorated function will be added in order (this
268 268 may matter).
269 269
270 270 You can only use this decorator for a new step, if you want to wrap a step
271 271 from an extension, change the pushdiscovery dictionary directly."""
272 272 def dec(func):
273 273 assert stepname not in pushdiscoverymapping
274 274 pushdiscoverymapping[stepname] = func
275 275 pushdiscoveryorder.append(stepname)
276 276 return func
277 277 return dec
278 278
279 279 def _pushdiscovery(pushop):
280 280 """Run all discovery steps"""
281 281 for stepname in pushdiscoveryorder:
282 282 step = pushdiscoverymapping[stepname]
283 283 step(pushop)
284 284
285 285 @pushdiscovery('changeset')
286 286 def _pushdiscoverychangeset(pushop):
287 287 """discover the changeset that need to be pushed"""
288 288 fci = discovery.findcommonincoming
289 289 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
290 290 common, inc, remoteheads = commoninc
291 291 fco = discovery.findcommonoutgoing
292 292 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
293 293 commoninc=commoninc, force=pushop.force)
294 294 pushop.outgoing = outgoing
295 295 pushop.remoteheads = remoteheads
296 296 pushop.incoming = inc
297 297
298 298 @pushdiscovery('phase')
299 299 def _pushdiscoveryphase(pushop):
300 300 """discover the phase that needs to be pushed
301 301
302 302 (computed for both success and failure case for changesets push)"""
303 303 outgoing = pushop.outgoing
304 304 unfi = pushop.repo.unfiltered()
305 305 remotephases = pushop.remote.listkeys('phases')
306 306 publishing = remotephases.get('publishing', False)
307 307 ana = phases.analyzeremotephases(pushop.repo,
308 308 pushop.fallbackheads,
309 309 remotephases)
310 310 pheads, droots = ana
311 311 extracond = ''
312 312 if not publishing:
313 313 extracond = ' and public()'
314 314 revset = 'heads((%%ln::%%ln) %s)' % extracond
315 315 # Get the list of all revs draft on remote by public here.
316 316 # XXX Beware that revset break if droots is not strictly
317 317 # XXX root we may want to ensure it is but it is costly
318 318 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
319 319 if not outgoing.missing:
320 320 future = fallback
321 321 else:
322 322 # adds changeset we are going to push as draft
323 323 #
324 324 # should not be necessary for publishing server, but because of an
325 325 # issue fixed in xxxxx we have to do it anyway.
326 326 fdroots = list(unfi.set('roots(%ln + %ln::)',
327 327 outgoing.missing, droots))
328 328 fdroots = [f.node() for f in fdroots]
329 329 future = list(unfi.set(revset, fdroots, pushop.futureheads))
330 330 pushop.outdatedphases = future
331 331 pushop.fallbackoutdatedphases = fallback
332 332
333 333 @pushdiscovery('obsmarker')
334 334 def _pushdiscoveryobsmarkers(pushop):
335 335 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
336 336 and pushop.repo.obsstore
337 337 and 'obsolete' in pushop.remote.listkeys('namespaces')):
338 338 repo = pushop.repo
339 339 # very naive computation, that can be quite expensive on big repo.
340 340 # However: evolution is currently slow on them anyway.
341 341 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
342 342 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
343 343
344 344 @pushdiscovery('bookmarks')
345 345 def _pushdiscoverybookmarks(pushop):
346 346 ui = pushop.ui
347 347 repo = pushop.repo.unfiltered()
348 348 remote = pushop.remote
349 349 ui.debug("checking for updated bookmarks\n")
350 350 ancestors = ()
351 351 if pushop.revs:
352 352 revnums = map(repo.changelog.rev, pushop.revs)
353 353 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
354 354 remotebookmark = remote.listkeys('bookmarks')
355 355
356 356 explicit = set(pushop.bookmarks)
357 357
358 358 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
359 359 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
360 360 for b, scid, dcid in advsrc:
361 361 if b in explicit:
362 362 explicit.remove(b)
363 363 if not ancestors or repo[scid].rev() in ancestors:
364 364 pushop.outbookmarks.append((b, dcid, scid))
365 365 # search added bookmark
366 366 for b, scid, dcid in addsrc:
367 367 if b in explicit:
368 368 explicit.remove(b)
369 369 pushop.outbookmarks.append((b, '', scid))
370 370 # search for overwritten bookmark
371 371 for b, scid, dcid in advdst + diverge + differ:
372 372 if b in explicit:
373 373 explicit.remove(b)
374 374 pushop.outbookmarks.append((b, dcid, scid))
375 375 # search for bookmark to delete
376 376 for b, scid, dcid in adddst:
377 377 if b in explicit:
378 378 explicit.remove(b)
379 379 # treat as "deleted locally"
380 380 pushop.outbookmarks.append((b, dcid, ''))
381 381 # identical bookmarks shouldn't get reported
382 382 for b, scid, dcid in same:
383 383 if b in explicit:
384 384 explicit.remove(b)
385 385
386 386 if explicit:
387 387 explicit = sorted(explicit)
388 388 # we should probably list all of them
389 389 ui.warn(_('bookmark %s does not exist on the local '
390 390 'or remote repository!\n') % explicit[0])
391 391 pushop.bkresult = 2
392 392
393 393 pushop.outbookmarks.sort()
394 394
395 395 def _pushcheckoutgoing(pushop):
396 396 outgoing = pushop.outgoing
397 397 unfi = pushop.repo.unfiltered()
398 398 if not outgoing.missing:
399 399 # nothing to push
400 400 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
401 401 return False
402 402 # something to push
403 403 if not pushop.force:
404 404 # if repo.obsstore == False --> no obsolete
405 405 # then, save the iteration
406 406 if unfi.obsstore:
407 407 # this message are here for 80 char limit reason
408 408 mso = _("push includes obsolete changeset: %s!")
409 409 mst = {"unstable": _("push includes unstable changeset: %s!"),
410 410 "bumped": _("push includes bumped changeset: %s!"),
411 411 "divergent": _("push includes divergent changeset: %s!")}
412 412 # If we are to push if there is at least one
413 413 # obsolete or unstable changeset in missing, at
414 414 # least one of the missinghead will be obsolete or
415 415 # unstable. So checking heads only is ok
416 416 for node in outgoing.missingheads:
417 417 ctx = unfi[node]
418 418 if ctx.obsolete():
419 419 raise util.Abort(mso % ctx)
420 420 elif ctx.troubled():
421 421 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
422 422 newbm = pushop.ui.configlist('bookmarks', 'pushing')
423 423 discovery.checkheads(unfi, pushop.remote, outgoing,
424 424 pushop.remoteheads,
425 425 pushop.newbranch,
426 426 bool(pushop.incoming),
427 427 newbm)
428 428 return True
429 429
430 430 # List of names of steps to perform for an outgoing bundle2, order matters.
431 431 b2partsgenorder = []
432 432
433 433 # Mapping between step name and function
434 434 #
435 435 # This exists to help extensions wrap steps if necessary
436 436 b2partsgenmapping = {}
437 437
438 438 def b2partsgenerator(stepname, idx=None):
439 439 """decorator for function generating bundle2 part
440 440
441 441 The function is added to the step -> function mapping and appended to the
442 442 list of steps. Beware that decorated functions will be added in order
443 443 (this may matter).
444 444
445 445 You can only use this decorator for new steps, if you want to wrap a step
446 446 from an extension, attack the b2partsgenmapping dictionary directly."""
447 447 def dec(func):
448 448 assert stepname not in b2partsgenmapping
449 449 b2partsgenmapping[stepname] = func
450 450 if idx is None:
451 451 b2partsgenorder.append(stepname)
452 452 else:
453 453 b2partsgenorder.insert(idx, stepname)
454 454 return func
455 455 return dec
456 456
457 457 @b2partsgenerator('changeset')
458 458 def _pushb2ctx(pushop, bundler):
459 459 """handle changegroup push through bundle2
460 460
461 461 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
462 462 """
463 463 if 'changesets' in pushop.stepsdone:
464 464 return
465 465 pushop.stepsdone.add('changesets')
466 466 # Send known heads to the server for race detection.
467 467 if not _pushcheckoutgoing(pushop):
468 468 return
469 469 pushop.repo.prepushoutgoinghooks(pushop.repo,
470 470 pushop.remote,
471 471 pushop.outgoing)
472 472 if not pushop.force:
473 473 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
474 474 b2caps = bundle2.bundle2caps(pushop.remote)
475 475 version = None
476 476 cgversions = b2caps.get('changegroup')
477 477 if not cgversions: # 3.1 and 3.2 ship with an empty value
478 478 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
479 479 pushop.outgoing)
480 480 else:
481 481 cgversions = [v for v in cgversions if v in changegroup.packermap]
482 482 if not cgversions:
483 483 raise ValueError(_('no common changegroup version'))
484 484 version = max(cgversions)
485 485 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
486 486 pushop.outgoing,
487 487 version=version)
488 488 cgpart = bundler.newpart('changegroup', data=cg)
489 489 if version is not None:
490 490 cgpart.addparam('version', version)
491 491 def handlereply(op):
492 492 """extract addchangegroup returns from server reply"""
493 493 cgreplies = op.records.getreplies(cgpart.id)
494 494 assert len(cgreplies['changegroup']) == 1
495 495 pushop.cgresult = cgreplies['changegroup'][0]['return']
496 496 return handlereply
497 497
498 498 @b2partsgenerator('phase')
499 499 def _pushb2phases(pushop, bundler):
500 500 """handle phase push through bundle2"""
501 501 if 'phases' in pushop.stepsdone:
502 502 return
503 503 b2caps = bundle2.bundle2caps(pushop.remote)
504 504 if not 'pushkey' in b2caps:
505 505 return
506 506 pushop.stepsdone.add('phases')
507 507 part2node = []
508 508 enc = pushkey.encode
509 509 for newremotehead in pushop.outdatedphases:
510 510 part = bundler.newpart('pushkey')
511 511 part.addparam('namespace', enc('phases'))
512 512 part.addparam('key', enc(newremotehead.hex()))
513 513 part.addparam('old', enc(str(phases.draft)))
514 514 part.addparam('new', enc(str(phases.public)))
515 515 part2node.append((part.id, newremotehead))
516 516 def handlereply(op):
517 517 for partid, node in part2node:
518 518 partrep = op.records.getreplies(partid)
519 519 results = partrep['pushkey']
520 520 assert len(results) <= 1
521 521 msg = None
522 522 if not results:
523 523 msg = _('server ignored update of %s to public!\n') % node
524 524 elif not int(results[0]['return']):
525 525 msg = _('updating %s to public failed!\n') % node
526 526 if msg is not None:
527 527 pushop.ui.warn(msg)
528 528 return handlereply
529 529
530 530 @b2partsgenerator('obsmarkers')
531 531 def _pushb2obsmarkers(pushop, bundler):
532 532 if 'obsmarkers' in pushop.stepsdone:
533 533 return
534 534 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
535 535 if obsolete.commonversion(remoteversions) is None:
536 536 return
537 537 pushop.stepsdone.add('obsmarkers')
538 538 if pushop.outobsmarkers:
539 539 markers = sorted(pushop.outobsmarkers)
540 540 buildobsmarkerspart(bundler, markers)
541 541
542 542 @b2partsgenerator('bookmarks')
543 543 def _pushb2bookmarks(pushop, bundler):
544 544 """handle phase push through bundle2"""
545 545 if 'bookmarks' in pushop.stepsdone:
546 546 return
547 547 b2caps = bundle2.bundle2caps(pushop.remote)
548 548 if 'pushkey' not in b2caps:
549 549 return
550 550 pushop.stepsdone.add('bookmarks')
551 551 part2book = []
552 552 enc = pushkey.encode
553 553 for book, old, new in pushop.outbookmarks:
554 554 part = bundler.newpart('pushkey')
555 555 part.addparam('namespace', enc('bookmarks'))
556 556 part.addparam('key', enc(book))
557 557 part.addparam('old', enc(old))
558 558 part.addparam('new', enc(new))
559 559 action = 'update'
560 560 if not old:
561 561 action = 'export'
562 562 elif not new:
563 563 action = 'delete'
564 564 part2book.append((part.id, book, action))
565 565
566 566
567 567 def handlereply(op):
568 568 ui = pushop.ui
569 569 for partid, book, action in part2book:
570 570 partrep = op.records.getreplies(partid)
571 571 results = partrep['pushkey']
572 572 assert len(results) <= 1
573 573 if not results:
574 574 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
575 575 else:
576 576 ret = int(results[0]['return'])
577 577 if ret:
578 578 ui.status(bookmsgmap[action][0] % book)
579 579 else:
580 580 ui.warn(bookmsgmap[action][1] % book)
581 581 if pushop.bkresult is not None:
582 582 pushop.bkresult = 1
583 583 return handlereply
584 584
585 585
586 586 def _pushbundle2(pushop):
587 587 """push data to the remote using bundle2
588 588
589 589 The only currently supported type of data is changegroup but this will
590 590 evolve in the future."""
591 591 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
592 592 pushback = (pushop.trmanager
593 593 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
594 594
595 595 # create reply capability
596 596 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
597 597 allowpushback=pushback))
598 598 bundler.newpart('replycaps', data=capsblob)
599 599 replyhandlers = []
600 600 for partgenname in b2partsgenorder:
601 601 partgen = b2partsgenmapping[partgenname]
602 602 ret = partgen(pushop, bundler)
603 603 if callable(ret):
604 604 replyhandlers.append(ret)
605 605 # do not push if nothing to push
606 606 if bundler.nbparts <= 1:
607 607 return
608 608 stream = util.chunkbuffer(bundler.getchunks())
609 609 try:
610 610 reply = pushop.remote.unbundle(stream, ['force'], 'push')
611 611 except error.BundleValueError, exc:
612 612 raise util.Abort('missing support for %s' % exc)
613 613 try:
614 614 trgetter = None
615 615 if pushback:
616 616 trgetter = pushop.trmanager.transaction
617 617 op = bundle2.processbundle(pushop.repo, reply, trgetter)
618 618 except error.BundleValueError, exc:
619 619 raise util.Abort('missing support for %s' % exc)
620 620 for rephand in replyhandlers:
621 621 rephand(op)
622 622
623 623 def _pushchangeset(pushop):
624 624 """Make the actual push of changeset bundle to remote repo"""
625 625 if 'changesets' in pushop.stepsdone:
626 626 return
627 627 pushop.stepsdone.add('changesets')
628 628 if not _pushcheckoutgoing(pushop):
629 629 return
630 630 pushop.repo.prepushoutgoinghooks(pushop.repo,
631 631 pushop.remote,
632 632 pushop.outgoing)
633 633 outgoing = pushop.outgoing
634 634 unbundle = pushop.remote.capable('unbundle')
635 635 # TODO: get bundlecaps from remote
636 636 bundlecaps = None
637 637 # create a changegroup from local
638 638 if pushop.revs is None and not (outgoing.excluded
639 639 or pushop.repo.changelog.filteredrevs):
640 640 # push everything,
641 641 # use the fast path, no race possible on push
642 642 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
643 643 cg = changegroup.getsubset(pushop.repo,
644 644 outgoing,
645 645 bundler,
646 646 'push',
647 647 fastpath=True)
648 648 else:
649 649 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
650 650 bundlecaps)
651 651
652 652 # apply changegroup to remote
653 653 if unbundle:
654 654 # local repo finds heads on server, finds out what
655 655 # revs it must push. once revs transferred, if server
656 656 # finds it has different heads (someone else won
657 657 # commit/push race), server aborts.
658 658 if pushop.force:
659 659 remoteheads = ['force']
660 660 else:
661 661 remoteheads = pushop.remoteheads
662 662 # ssh: return remote's addchangegroup()
663 663 # http: return remote's addchangegroup() or 0 for error
664 664 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
665 665 pushop.repo.url())
666 666 else:
667 667 # we return an integer indicating remote head count
668 668 # change
669 669 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
670 670 pushop.repo.url())
671 671
672 672 def _pushsyncphase(pushop):
673 673 """synchronise phase information locally and remotely"""
674 674 cheads = pushop.commonheads
675 675 # even when we don't push, exchanging phase data is useful
676 676 remotephases = pushop.remote.listkeys('phases')
677 677 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
678 678 and remotephases # server supports phases
679 679 and pushop.cgresult is None # nothing was pushed
680 680 and remotephases.get('publishing', False)):
681 681 # When:
682 682 # - this is a subrepo push
683 683 # - and remote support phase
684 684 # - and no changeset was pushed
685 685 # - and remote is publishing
686 686 # We may be in issue 3871 case!
687 687 # We drop the possible phase synchronisation done by
688 688 # courtesy to publish changesets possibly locally draft
689 689 # on the remote.
690 690 remotephases = {'publishing': 'True'}
691 691 if not remotephases: # old server or public only reply from non-publishing
692 692 _localphasemove(pushop, cheads)
693 693 # don't push any phase data as there is nothing to push
694 694 else:
695 695 ana = phases.analyzeremotephases(pushop.repo, cheads,
696 696 remotephases)
697 697 pheads, droots = ana
698 698 ### Apply remote phase on local
699 699 if remotephases.get('publishing', False):
700 700 _localphasemove(pushop, cheads)
701 701 else: # publish = False
702 702 _localphasemove(pushop, pheads)
703 703 _localphasemove(pushop, cheads, phases.draft)
704 704 ### Apply local phase on remote
705 705
706 706 if pushop.cgresult:
707 707 if 'phases' in pushop.stepsdone:
708 708 # phases already pushed though bundle2
709 709 return
710 710 outdated = pushop.outdatedphases
711 711 else:
712 712 outdated = pushop.fallbackoutdatedphases
713 713
714 714 pushop.stepsdone.add('phases')
715 715
716 716 # filter heads already turned public by the push
717 717 outdated = [c for c in outdated if c.node() not in pheads]
718 718 # fallback to independent pushkey command
719 719 for newremotehead in outdated:
720 720 r = pushop.remote.pushkey('phases',
721 721 newremotehead.hex(),
722 722 str(phases.draft),
723 723 str(phases.public))
724 724 if not r:
725 725 pushop.ui.warn(_('updating %s to public failed!\n')
726 726 % newremotehead)
727 727
728 728 def _localphasemove(pushop, nodes, phase=phases.public):
729 729 """move <nodes> to <phase> in the local source repo"""
730 730 if pushop.trmanager:
731 731 phases.advanceboundary(pushop.repo,
732 732 pushop.trmanager.transaction(),
733 733 phase,
734 734 nodes)
735 735 else:
736 736 # repo is not locked, do not change any phases!
737 737 # Informs the user that phases should have been moved when
738 738 # applicable.
739 739 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
740 740 phasestr = phases.phasenames[phase]
741 741 if actualmoves:
742 742 pushop.ui.status(_('cannot lock source repo, skipping '
743 743 'local %s phase update\n') % phasestr)
744 744
745 745 def _pushobsolete(pushop):
746 746 """utility function to push obsolete markers to a remote"""
747 747 if 'obsmarkers' in pushop.stepsdone:
748 748 return
749 749 pushop.ui.debug('try to push obsolete markers to remote\n')
750 750 repo = pushop.repo
751 751 remote = pushop.remote
752 752 pushop.stepsdone.add('obsmarkers')
753 753 if pushop.outobsmarkers:
754 754 rslts = []
755 755 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
756 756 for key in sorted(remotedata, reverse=True):
757 757 # reverse sort to ensure we end with dump0
758 758 data = remotedata[key]
759 759 rslts.append(remote.pushkey('obsolete', key, '', data))
760 760 if [r for r in rslts if not r]:
761 761 msg = _('failed to push some obsolete markers!\n')
762 762 repo.ui.warn(msg)
763 763
764 764 def _pushbookmark(pushop):
765 765 """Update bookmark position on remote"""
766 766 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
767 767 return
768 768 pushop.stepsdone.add('bookmarks')
769 769 ui = pushop.ui
770 770 remote = pushop.remote
771 771
772 772 for b, old, new in pushop.outbookmarks:
773 773 action = 'update'
774 774 if not old:
775 775 action = 'export'
776 776 elif not new:
777 777 action = 'delete'
778 778 if remote.pushkey('bookmarks', b, old, new):
779 779 ui.status(bookmsgmap[action][0] % b)
780 780 else:
781 781 ui.warn(bookmsgmap[action][1] % b)
782 782 # discovery can have set the value form invalid entry
783 783 if pushop.bkresult is not None:
784 784 pushop.bkresult = 1
785 785
786 786 class pulloperation(object):
787 787 """A object that represent a single pull operation
788 788
789 789 It purpose is to carry pull related state and very common operation.
790 790
791 791 A new should be created at the beginning of each pull and discarded
792 792 afterward.
793 793 """
794 794
795 795 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
796 796 # repo we pull into
797 797 self.repo = repo
798 798 # repo we pull from
799 799 self.remote = remote
800 800 # revision we try to pull (None is "all")
801 801 self.heads = heads
802 802 # bookmark pulled explicitly
803 803 self.explicitbookmarks = bookmarks
804 804 # do we force pull?
805 805 self.force = force
806 806 # transaction manager
807 807 self.trmanager = None
808 808 # set of common changeset between local and remote before pull
809 809 self.common = None
810 810 # set of pulled head
811 811 self.rheads = None
812 812 # list of missing changeset to fetch remotely
813 813 self.fetch = None
814 814 # remote bookmarks data
815 815 self.remotebookmarks = None
816 816 # result of changegroup pulling (used as return code by pull)
817 817 self.cgresult = None
818 818 # list of step already done
819 819 self.stepsdone = set()
820 820
821 821 @util.propertycache
822 822 def pulledsubset(self):
823 823 """heads of the set of changeset target by the pull"""
824 824 # compute target subset
825 825 if self.heads is None:
826 826 # We pulled every thing possible
827 827 # sync on everything common
828 828 c = set(self.common)
829 829 ret = list(self.common)
830 830 for n in self.rheads:
831 831 if n not in c:
832 832 ret.append(n)
833 833 return ret
834 834 else:
835 835 # We pulled a specific subset
836 836 # sync on this subset
837 837 return self.heads
838 838
839 839 def gettransaction(self):
840 840 # deprecated; talk to trmanager directly
841 841 return self.trmanager.transaction()
842 842
843 843 class transactionmanager(object):
844 844 """An object to manage the life cycle of a transaction
845 845
846 846 It creates the transaction on demand and calls the appropriate hooks when
847 847 closing the transaction."""
848 848 def __init__(self, repo, source, url):
849 849 self.repo = repo
850 850 self.source = source
851 851 self.url = url
852 852 self._tr = None
853 853
854 854 def transaction(self):
855 855 """Return an open transaction object, constructing if necessary"""
856 856 if not self._tr:
857 857 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
858 858 self._tr = self.repo.transaction(trname)
859 859 self._tr.hookargs['source'] = self.source
860 860 self._tr.hookargs['url'] = self.url
861 861 return self._tr
862 862
863 863 def close(self):
864 864 """close transaction if created"""
865 865 if self._tr is not None:
866 866 self._tr.close()
867 867
868 868 def release(self):
869 869 """release transaction if created"""
870 870 if self._tr is not None:
871 871 self._tr.release()
872 872
873 873 def pull(repo, remote, heads=None, force=False, bookmarks=()):
874 874 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
875 875 if pullop.remote.local():
876 876 missing = set(pullop.remote.requirements) - pullop.repo.supported
877 877 if missing:
878 878 msg = _("required features are not"
879 879 " supported in the destination:"
880 880 " %s") % (', '.join(sorted(missing)))
881 881 raise util.Abort(msg)
882 882
883 883 pullop.remotebookmarks = remote.listkeys('bookmarks')
884 884 lock = pullop.repo.lock()
885 885 try:
886 886 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
887 887 _pulldiscovery(pullop)
888 888 if _canusebundle2(pullop):
889 889 _pullbundle2(pullop)
890 890 _pullchangeset(pullop)
891 891 _pullphase(pullop)
892 892 _pullbookmarks(pullop)
893 893 _pullobsolete(pullop)
894 894 pullop.trmanager.close()
895 895 finally:
896 896 pullop.trmanager.release()
897 897 lock.release()
898 898
899 899 return pullop
900 900
901 901 # list of steps to perform discovery before pull
902 902 pulldiscoveryorder = []
903 903
904 904 # Mapping between step name and function
905 905 #
906 906 # This exists to help extensions wrap steps if necessary
907 907 pulldiscoverymapping = {}
908 908
909 909 def pulldiscovery(stepname):
910 910 """decorator for function performing discovery before pull
911 911
912 912 The function is added to the step -> function mapping and appended to the
913 913 list of steps. Beware that decorated function will be added in order (this
914 914 may matter).
915 915
916 916 You can only use this decorator for a new step, if you want to wrap a step
917 917 from an extension, change the pulldiscovery dictionary directly."""
918 918 def dec(func):
919 919 assert stepname not in pulldiscoverymapping
920 920 pulldiscoverymapping[stepname] = func
921 921 pulldiscoveryorder.append(stepname)
922 922 return func
923 923 return dec
924 924
925 925 def _pulldiscovery(pullop):
926 926 """Run all discovery steps"""
927 927 for stepname in pulldiscoveryorder:
928 928 step = pulldiscoverymapping[stepname]
929 929 step(pullop)
930 930
931 931 @pulldiscovery('changegroup')
932 932 def _pulldiscoverychangegroup(pullop):
933 933 """discovery phase for the pull
934 934
935 935 Current handle changeset discovery only, will change handle all discovery
936 936 at some point."""
937 937 tmp = discovery.findcommonincoming(pullop.repo,
938 938 pullop.remote,
939 939 heads=pullop.heads,
940 940 force=pullop.force)
941 941 common, fetch, rheads = tmp
942 942 nm = pullop.repo.unfiltered().changelog.nodemap
943 943 if fetch and rheads:
944 944 # If a remote heads in filtered locally, lets drop it from the unknown
945 945 # remote heads and put in back in common.
946 946 #
947 947 # This is a hackish solution to catch most of "common but locally
948 948 # hidden situation". We do not performs discovery on unfiltered
949 949 # repository because it end up doing a pathological amount of round
950 950 # trip for w huge amount of changeset we do not care about.
951 951 #
952 952 # If a set of such "common but filtered" changeset exist on the server
953 953 # but are not including a remote heads, we'll not be able to detect it,
954 954 scommon = set(common)
955 955 filteredrheads = []
956 956 for n in rheads:
957 957 if n in nm:
958 958 if n not in scommon:
959 959 common.append(n)
960 960 else:
961 961 filteredrheads.append(n)
962 962 if not filteredrheads:
963 963 fetch = []
964 964 rheads = filteredrheads
965 965 pullop.common = common
966 966 pullop.fetch = fetch
967 967 pullop.rheads = rheads
968 968
969 969 def _pullbundle2(pullop):
970 970 """pull data using bundle2
971 971
972 972 For now, the only supported data are changegroup."""
973 973 remotecaps = bundle2.bundle2caps(pullop.remote)
974 974 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
975 975 # pulling changegroup
976 976 pullop.stepsdone.add('changegroup')
977 977
978 978 kwargs['common'] = pullop.common
979 979 kwargs['heads'] = pullop.heads or pullop.rheads
980 980 kwargs['cg'] = pullop.fetch
981 981 if 'listkeys' in remotecaps:
982 982 kwargs['listkeys'] = ['phase', 'bookmarks']
983 983 if not pullop.fetch:
984 984 pullop.repo.ui.status(_("no changes found\n"))
985 985 pullop.cgresult = 0
986 986 else:
987 987 if pullop.heads is None and list(pullop.common) == [nullid]:
988 988 pullop.repo.ui.status(_("requesting all changes\n"))
989 989 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
990 990 remoteversions = bundle2.obsmarkersversion(remotecaps)
991 991 if obsolete.commonversion(remoteversions) is not None:
992 992 kwargs['obsmarkers'] = True
993 993 pullop.stepsdone.add('obsmarkers')
994 994 _pullbundle2extraprepare(pullop, kwargs)
995 995 bundle = pullop.remote.getbundle('pull', **kwargs)
996 996 try:
997 997 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
998 998 except error.BundleValueError, exc:
999 999 raise util.Abort('missing support for %s' % exc)
1000 1000
1001 1001 if pullop.fetch:
1002 1002 results = [cg['return'] for cg in op.records['changegroup']]
1003 1003 pullop.cgresult = changegroup.combineresults(results)
1004 1004
1005 1005 # processing phases change
1006 1006 for namespace, value in op.records['listkeys']:
1007 1007 if namespace == 'phases':
1008 1008 _pullapplyphases(pullop, value)
1009 1009
1010 1010 # processing bookmark update
1011 1011 for namespace, value in op.records['listkeys']:
1012 1012 if namespace == 'bookmarks':
1013 1013 pullop.remotebookmarks = value
1014 1014 _pullbookmarks(pullop)
1015 1015
1016 1016 def _pullbundle2extraprepare(pullop, kwargs):
1017 1017 """hook function so that extensions can extend the getbundle call"""
1018 1018 pass
1019 1019
1020 1020 def _pullchangeset(pullop):
1021 1021 """pull changeset from unbundle into the local repo"""
1022 1022 # We delay the open of the transaction as late as possible so we
1023 1023 # don't open transaction for nothing or you break future useful
1024 1024 # rollback call
1025 1025 if 'changegroup' in pullop.stepsdone:
1026 1026 return
1027 1027 pullop.stepsdone.add('changegroup')
1028 1028 if not pullop.fetch:
1029 1029 pullop.repo.ui.status(_("no changes found\n"))
1030 1030 pullop.cgresult = 0
1031 1031 return
1032 1032 pullop.gettransaction()
1033 1033 if pullop.heads is None and list(pullop.common) == [nullid]:
1034 1034 pullop.repo.ui.status(_("requesting all changes\n"))
1035 1035 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1036 1036 # issue1320, avoid a race if remote changed after discovery
1037 1037 pullop.heads = pullop.rheads
1038 1038
1039 1039 if pullop.remote.capable('getbundle'):
1040 1040 # TODO: get bundlecaps from remote
1041 1041 cg = pullop.remote.getbundle('pull', common=pullop.common,
1042 1042 heads=pullop.heads or pullop.rheads)
1043 1043 elif pullop.heads is None:
1044 1044 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1045 1045 elif not pullop.remote.capable('changegroupsubset'):
1046 1046 raise util.Abort(_("partial pull cannot be done because "
1047 1047 "other repository doesn't support "
1048 1048 "changegroupsubset."))
1049 1049 else:
1050 1050 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1051 1051 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1052 1052 pullop.remote.url())
1053 1053
1054 1054 def _pullphase(pullop):
1055 1055 # Get remote phases data from remote
1056 1056 if 'phases' in pullop.stepsdone:
1057 1057 return
1058 1058 remotephases = pullop.remote.listkeys('phases')
1059 1059 _pullapplyphases(pullop, remotephases)
1060 1060
1061 1061 def _pullapplyphases(pullop, remotephases):
1062 1062 """apply phase movement from observed remote state"""
1063 1063 if 'phases' in pullop.stepsdone:
1064 1064 return
1065 1065 pullop.stepsdone.add('phases')
1066 1066 publishing = bool(remotephases.get('publishing', False))
1067 1067 if remotephases and not publishing:
1068 1068 # remote is new and unpublishing
1069 1069 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1070 1070 pullop.pulledsubset,
1071 1071 remotephases)
1072 1072 dheads = pullop.pulledsubset
1073 1073 else:
1074 1074 # Remote is old or publishing all common changesets
1075 1075 # should be seen as public
1076 1076 pheads = pullop.pulledsubset
1077 1077 dheads = []
1078 1078 unfi = pullop.repo.unfiltered()
1079 1079 phase = unfi._phasecache.phase
1080 1080 rev = unfi.changelog.nodemap.get
1081 1081 public = phases.public
1082 1082 draft = phases.draft
1083 1083
1084 1084 # exclude changesets already public locally and update the others
1085 1085 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1086 1086 if pheads:
1087 1087 tr = pullop.gettransaction()
1088 1088 phases.advanceboundary(pullop.repo, tr, public, pheads)
1089 1089
1090 1090 # exclude changesets already draft locally and update the others
1091 1091 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1092 1092 if dheads:
1093 1093 tr = pullop.gettransaction()
1094 1094 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1095 1095
1096 1096 def _pullbookmarks(pullop):
1097 1097 """process the remote bookmark information to update the local one"""
1098 1098 if 'bookmarks' in pullop.stepsdone:
1099 1099 return
1100 1100 pullop.stepsdone.add('bookmarks')
1101 1101 repo = pullop.repo
1102 1102 remotebookmarks = pullop.remotebookmarks
1103 1103 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1104 1104 pullop.remote.url(),
1105 1105 pullop.gettransaction,
1106 1106 explicit=pullop.explicitbookmarks)
1107 1107
1108 1108 def _pullobsolete(pullop):
1109 1109 """utility function to pull obsolete markers from a remote
1110 1110
1111 1111 The `gettransaction` is function that return the pull transaction, creating
1112 1112 one if necessary. We return the transaction to inform the calling code that
1113 1113 a new transaction have been created (when applicable).
1114 1114
1115 1115 Exists mostly to allow overriding for experimentation purpose"""
1116 1116 if 'obsmarkers' in pullop.stepsdone:
1117 1117 return
1118 1118 pullop.stepsdone.add('obsmarkers')
1119 1119 tr = None
1120 1120 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1121 1121 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1122 1122 remoteobs = pullop.remote.listkeys('obsolete')
1123 1123 if 'dump0' in remoteobs:
1124 1124 tr = pullop.gettransaction()
1125 1125 for key in sorted(remoteobs, reverse=True):
1126 1126 if key.startswith('dump'):
1127 1127 data = base85.b85decode(remoteobs[key])
1128 1128 pullop.repo.obsstore.mergemarkers(tr, data)
1129 1129 pullop.repo.invalidatevolatilesets()
1130 1130 return tr
1131 1131
1132 1132 def caps20to10(repo):
1133 1133 """return a set with appropriate options to use bundle20 during getbundle"""
1134 1134 caps = set(['HG20'])
1135 1135 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1136 1136 caps.add('bundle2=' + urllib.quote(capsblob))
1137 1137 return caps
1138 1138
1139 1139 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1140 1140 getbundle2partsorder = []
1141 1141
1142 1142 # Mapping between step name and function
1143 1143 #
1144 1144 # This exists to help extensions wrap steps if necessary
1145 1145 getbundle2partsmapping = {}
1146 1146
1147 1147 def getbundle2partsgenerator(stepname, idx=None):
1148 1148 """decorator for function generating bundle2 part for getbundle
1149 1149
1150 1150 The function is added to the step -> function mapping and appended to the
1151 1151 list of steps. Beware that decorated functions will be added in order
1152 1152 (this may matter).
1153 1153
1154 1154 You can only use this decorator for new steps, if you want to wrap a step
1155 1155 from an extension, attack the getbundle2partsmapping dictionary directly."""
1156 1156 def dec(func):
1157 1157 assert stepname not in getbundle2partsmapping
1158 1158 getbundle2partsmapping[stepname] = func
1159 1159 if idx is None:
1160 1160 getbundle2partsorder.append(stepname)
1161 1161 else:
1162 1162 getbundle2partsorder.insert(idx, stepname)
1163 1163 return func
1164 1164 return dec
1165 1165
1166 1166 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1167 1167 **kwargs):
1168 1168 """return a full bundle (with potentially multiple kind of parts)
1169 1169
1170 1170 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1171 1171 passed. For now, the bundle can contain only changegroup, but this will
1172 1172 changes when more part type will be available for bundle2.
1173 1173
1174 1174 This is different from changegroup.getchangegroup that only returns an HG10
1175 1175 changegroup bundle. They may eventually get reunited in the future when we
1176 1176 have a clearer idea of the API we what to query different data.
1177 1177
1178 1178 The implementation is at a very early stage and will get massive rework
1179 1179 when the API of bundle is refined.
1180 1180 """
1181 1181 # bundle10 case
1182 1182 usebundle2 = False
1183 1183 if bundlecaps is not None:
1184 1184 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1185 1185 if not usebundle2:
1186 1186 if bundlecaps and not kwargs.get('cg', True):
1187 1187 raise ValueError(_('request for bundle10 must include changegroup'))
1188 1188
1189 1189 if kwargs:
1190 1190 raise ValueError(_('unsupported getbundle arguments: %s')
1191 1191 % ', '.join(sorted(kwargs.keys())))
1192 1192 return changegroup.getchangegroup(repo, source, heads=heads,
1193 1193 common=common, bundlecaps=bundlecaps)
1194 1194
1195 1195 # bundle20 case
1196 1196 b2caps = {}
1197 1197 for bcaps in bundlecaps:
1198 1198 if bcaps.startswith('bundle2='):
1199 1199 blob = urllib.unquote(bcaps[len('bundle2='):])
1200 1200 b2caps.update(bundle2.decodecaps(blob))
1201 1201 bundler = bundle2.bundle20(repo.ui, b2caps)
1202 1202
1203 1203 kwargs['heads'] = heads
1204 1204 kwargs['common'] = common
1205 1205
1206 1206 for name in getbundle2partsorder:
1207 1207 func = getbundle2partsmapping[name]
1208 1208 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1209 1209 **kwargs)
1210 1210
1211 1211 return util.chunkbuffer(bundler.getchunks())
1212 1212
1213 1213 @getbundle2partsgenerator('changegroup')
1214 1214 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1215 1215 b2caps=None, heads=None, common=None, **kwargs):
1216 1216 """add a changegroup part to the requested bundle"""
1217 1217 cg = None
1218 1218 if kwargs.get('cg', True):
1219 1219 # build changegroup bundle here.
1220 1220 version = None
1221 1221 cgversions = b2caps.get('changegroup')
1222 1222 if not cgversions: # 3.1 and 3.2 ship with an empty value
1223 1223 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1224 1224 common=common,
1225 1225 bundlecaps=bundlecaps)
1226 1226 else:
1227 1227 cgversions = [v for v in cgversions if v in changegroup.packermap]
1228 1228 if not cgversions:
1229 1229 raise ValueError(_('no common changegroup version'))
1230 1230 version = max(cgversions)
1231 1231 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1232 1232 common=common,
1233 1233 bundlecaps=bundlecaps,
1234 1234 version=version)
1235 1235
1236 1236 if cg:
1237 1237 part = bundler.newpart('changegroup', data=cg)
1238 1238 if version is not None:
1239 1239 part.addparam('version', version)
1240 1240
1241 1241 @getbundle2partsgenerator('listkeys')
1242 1242 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1243 1243 b2caps=None, **kwargs):
1244 1244 """add parts containing listkeys namespaces to the requested bundle"""
1245 1245 listkeys = kwargs.get('listkeys', ())
1246 1246 for namespace in listkeys:
1247 1247 part = bundler.newpart('listkeys')
1248 1248 part.addparam('namespace', namespace)
1249 1249 keys = repo.listkeys(namespace).items()
1250 1250 part.data = pushkey.encodekeys(keys)
1251 1251
1252 1252 @getbundle2partsgenerator('obsmarkers')
1253 1253 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1254 1254 b2caps=None, heads=None, **kwargs):
1255 1255 """add an obsolescence markers part to the requested bundle"""
1256 1256 if kwargs.get('obsmarkers', False):
1257 1257 if heads is None:
1258 1258 heads = repo.heads()
1259 1259 subset = [c.node() for c in repo.set('::%ln', heads)]
1260 1260 markers = repo.obsstore.relevantmarkers(subset)
1261 1261 markers = sorted(markers)
1262 1262 buildobsmarkerspart(bundler, markers)
1263 1263
1264 1264 def check_heads(repo, their_heads, context):
1265 1265 """check if the heads of a repo have been modified
1266 1266
1267 1267 Used by peer for unbundling.
1268 1268 """
1269 1269 heads = repo.heads()
1270 1270 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1271 1271 if not (their_heads == ['force'] or their_heads == heads or
1272 1272 their_heads == ['hashed', heads_hash]):
1273 1273 # someone else committed/pushed/unbundled while we
1274 1274 # were transferring data
1275 1275 raise error.PushRaced('repository changed while %s - '
1276 1276 'please try again' % context)
1277 1277
1278 1278 def unbundle(repo, cg, heads, source, url):
1279 1279 """Apply a bundle to a repo.
1280 1280
1281 1281 this function makes sure the repo is locked during the application and have
1282 1282 mechanism to check that no push race occurred between the creation of the
1283 1283 bundle and its application.
1284 1284
1285 1285 If the push was raced as PushRaced exception is raised."""
1286 1286 r = 0
1287 1287 # need a transaction when processing a bundle2 stream
1288 1288 wlock = lock = tr = None
1289 1289 recordout = None
1290 1290 # quick fix for output mismatch with bundle2 in 3.4
1291 1291 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1292 1292 False)
1293 1293 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1294 1294 captureoutput = True
1295 1295 try:
1296 1296 check_heads(repo, heads, 'uploading changes')
1297 1297 # push can proceed
1298 1298 if util.safehasattr(cg, 'params'):
1299 1299 r = None
1300 1300 try:
1301 1301 wlock = repo.wlock()
1302 1302 lock = repo.lock()
1303 1303 tr = repo.transaction(source)
1304 1304 tr.hookargs['source'] = source
1305 1305 tr.hookargs['url'] = url
1306 1306 tr.hookargs['bundle2'] = '1'
1307 1307 op = bundle2.bundleoperation(repo, lambda: tr,
1308 1308 captureoutput=captureoutput)
1309 1309 try:
1310 1310 r = bundle2.processbundle(repo, cg, op=op)
1311 1311 finally:
1312 1312 r = op.reply
1313 1313 if captureoutput and r is not None:
1314 1314 repo.ui.pushbuffer(error=True, subproc=True)
1315 1315 def recordout(output):
1316 1316 r.newpart('output', data=output, mandatory=False)
1317 1317 tr.close()
1318 except Exception, exc:
1318 except BaseException, exc:
1319 1319 exc.duringunbundle2 = True
1320 1320 if captureoutput and r is not None:
1321 1321 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1322 1322 def recordout(output):
1323 1323 part = bundle2.bundlepart('output', data=output,
1324 1324 mandatory=False)
1325 1325 parts.append(part)
1326 1326 raise
1327 1327 else:
1328 1328 lock = repo.lock()
1329 1329 r = changegroup.addchangegroup(repo, cg, source, url)
1330 1330 finally:
1331 1331 lockmod.release(tr, lock, wlock)
1332 1332 if recordout is not None:
1333 1333 recordout(repo.ui.popbuffer())
1334 1334 return r
General Comments 0
You need to be logged in to leave comments. Login now