##// END OF EJS Templates
exchange: remove check for 'format' key...
Martin von Zweigbergk -
r24638:13a19717 default
parent child Browse files
Show More
@@ -1,1305 +1,1303 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version == '2Y':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('b2x:obsmarkers', data=stream)
53 53 return None
54 54
55 55 class pushoperation(object):
56 56 """A object that represent a single push operation
57 57
58 58 It purpose is to carry push related state and very common operation.
59 59
60 60 A new should be created at the beginning of each push and discarded
61 61 afterward.
62 62 """
63 63
64 64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 65 bookmarks=()):
66 66 # repo we push from
67 67 self.repo = repo
68 68 self.ui = repo.ui
69 69 # repo we push to
70 70 self.remote = remote
71 71 # force option provided
72 72 self.force = force
73 73 # revs to be pushed (None is "all")
74 74 self.revs = revs
75 75 # bookmark explicitly pushed
76 76 self.bookmarks = bookmarks
77 77 # allow push of new branch
78 78 self.newbranch = newbranch
79 79 # did a local lock get acquired?
80 80 self.locallocked = None
81 81 # step already performed
82 82 # (used to check what steps have been already performed through bundle2)
83 83 self.stepsdone = set()
84 84 # Integer version of the changegroup push result
85 85 # - None means nothing to push
86 86 # - 0 means HTTP error
87 87 # - 1 means we pushed and remote head count is unchanged *or*
88 88 # we have outgoing changesets but refused to push
89 89 # - other values as described by addchangegroup()
90 90 self.cgresult = None
91 91 # Boolean value for the bookmark push
92 92 self.bkresult = None
93 93 # discover.outgoing object (contains common and outgoing data)
94 94 self.outgoing = None
95 95 # all remote heads before the push
96 96 self.remoteheads = None
97 97 # testable as a boolean indicating if any nodes are missing locally.
98 98 self.incoming = None
99 99 # phases changes that must be pushed along side the changesets
100 100 self.outdatedphases = None
101 101 # phases changes that must be pushed if changeset push fails
102 102 self.fallbackoutdatedphases = None
103 103 # outgoing obsmarkers
104 104 self.outobsmarkers = set()
105 105 # outgoing bookmarks
106 106 self.outbookmarks = []
107 107 # transaction manager
108 108 self.trmanager = None
109 109
110 110 @util.propertycache
111 111 def futureheads(self):
112 112 """future remote heads if the changeset push succeeds"""
113 113 return self.outgoing.missingheads
114 114
115 115 @util.propertycache
116 116 def fallbackheads(self):
117 117 """future remote heads if the changeset push fails"""
118 118 if self.revs is None:
119 119 # not target to push, all common are relevant
120 120 return self.outgoing.commonheads
121 121 unfi = self.repo.unfiltered()
122 122 # I want cheads = heads(::missingheads and ::commonheads)
123 123 # (missingheads is revs with secret changeset filtered out)
124 124 #
125 125 # This can be expressed as:
126 126 # cheads = ( (missingheads and ::commonheads)
127 127 # + (commonheads and ::missingheads))"
128 128 # )
129 129 #
130 130 # while trying to push we already computed the following:
131 131 # common = (::commonheads)
132 132 # missing = ((commonheads::missingheads) - commonheads)
133 133 #
134 134 # We can pick:
135 135 # * missingheads part of common (::commonheads)
136 136 common = set(self.outgoing.common)
137 137 nm = self.repo.changelog.nodemap
138 138 cheads = [node for node in self.revs if nm[node] in common]
139 139 # and
140 140 # * commonheads parents on missing
141 141 revset = unfi.set('%ln and parents(roots(%ln))',
142 142 self.outgoing.commonheads,
143 143 self.outgoing.missing)
144 144 cheads.extend(c.node() for c in revset)
145 145 return cheads
146 146
147 147 @property
148 148 def commonheads(self):
149 149 """set of all common heads after changeset bundle push"""
150 150 if self.cgresult:
151 151 return self.futureheads
152 152 else:
153 153 return self.fallbackheads
154 154
155 155 # mapping of message used when pushing bookmark
156 156 bookmsgmap = {'update': (_("updating bookmark %s\n"),
157 157 _('updating bookmark %s failed!\n')),
158 158 'export': (_("exporting bookmark %s\n"),
159 159 _('exporting bookmark %s failed!\n')),
160 160 'delete': (_("deleting remote bookmark %s\n"),
161 161 _('deleting remote bookmark %s failed!\n')),
162 162 }
163 163
164 164
165 165 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
166 166 '''Push outgoing changesets (limited by revs) from a local
167 167 repository to remote. Return an integer:
168 168 - None means nothing to push
169 169 - 0 means HTTP error
170 170 - 1 means we pushed and remote head count is unchanged *or*
171 171 we have outgoing changesets but refused to push
172 172 - other values as described by addchangegroup()
173 173 '''
174 174 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
175 175 if pushop.remote.local():
176 176 missing = (set(pushop.repo.requirements)
177 177 - pushop.remote.local().supported)
178 178 if missing:
179 179 msg = _("required features are not"
180 180 " supported in the destination:"
181 181 " %s") % (', '.join(sorted(missing)))
182 182 raise util.Abort(msg)
183 183
184 184 # there are two ways to push to remote repo:
185 185 #
186 186 # addchangegroup assumes local user can lock remote
187 187 # repo (local filesystem, old ssh servers).
188 188 #
189 189 # unbundle assumes local user cannot lock remote repo (new ssh
190 190 # servers, http servers).
191 191
192 192 if not pushop.remote.canpush():
193 193 raise util.Abort(_("destination does not support push"))
194 194 # get local lock as we might write phase data
195 195 locallock = None
196 196 try:
197 197 locallock = pushop.repo.lock()
198 198 pushop.locallocked = True
199 199 except IOError, err:
200 200 pushop.locallocked = False
201 201 if err.errno != errno.EACCES:
202 202 raise
203 203 # source repo cannot be locked.
204 204 # We do not abort the push, but just disable the local phase
205 205 # synchronisation.
206 206 msg = 'cannot lock source repository: %s\n' % err
207 207 pushop.ui.debug(msg)
208 208 try:
209 209 if pushop.locallocked:
210 210 pushop.trmanager = transactionmanager(repo,
211 211 'push-response',
212 212 pushop.remote.url())
213 213 pushop.repo.checkpush(pushop)
214 214 lock = None
215 215 unbundle = pushop.remote.capable('unbundle')
216 216 if not unbundle:
217 217 lock = pushop.remote.lock()
218 218 try:
219 219 _pushdiscovery(pushop)
220 220 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
221 221 False)
222 222 and pushop.remote.capable('bundle2-exp')):
223 223 _pushbundle2(pushop)
224 224 _pushchangeset(pushop)
225 225 _pushsyncphase(pushop)
226 226 _pushobsolete(pushop)
227 227 _pushbookmark(pushop)
228 228 finally:
229 229 if lock is not None:
230 230 lock.release()
231 231 if pushop.trmanager:
232 232 pushop.trmanager.close()
233 233 finally:
234 234 if pushop.trmanager:
235 235 pushop.trmanager.release()
236 236 if locallock is not None:
237 237 locallock.release()
238 238
239 239 return pushop
240 240
241 241 # list of steps to perform discovery before push
242 242 pushdiscoveryorder = []
243 243
244 244 # Mapping between step name and function
245 245 #
246 246 # This exists to help extensions wrap steps if necessary
247 247 pushdiscoverymapping = {}
248 248
249 249 def pushdiscovery(stepname):
250 250 """decorator for function performing discovery before push
251 251
252 252 The function is added to the step -> function mapping and appended to the
253 253 list of steps. Beware that decorated function will be added in order (this
254 254 may matter).
255 255
256 256 You can only use this decorator for a new step, if you want to wrap a step
257 257 from an extension, change the pushdiscovery dictionary directly."""
258 258 def dec(func):
259 259 assert stepname not in pushdiscoverymapping
260 260 pushdiscoverymapping[stepname] = func
261 261 pushdiscoveryorder.append(stepname)
262 262 return func
263 263 return dec
264 264
265 265 def _pushdiscovery(pushop):
266 266 """Run all discovery steps"""
267 267 for stepname in pushdiscoveryorder:
268 268 step = pushdiscoverymapping[stepname]
269 269 step(pushop)
270 270
271 271 @pushdiscovery('changeset')
272 272 def _pushdiscoverychangeset(pushop):
273 273 """discover the changeset that need to be pushed"""
274 274 fci = discovery.findcommonincoming
275 275 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
276 276 common, inc, remoteheads = commoninc
277 277 fco = discovery.findcommonoutgoing
278 278 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
279 279 commoninc=commoninc, force=pushop.force)
280 280 pushop.outgoing = outgoing
281 281 pushop.remoteheads = remoteheads
282 282 pushop.incoming = inc
283 283
284 284 @pushdiscovery('phase')
285 285 def _pushdiscoveryphase(pushop):
286 286 """discover the phase that needs to be pushed
287 287
288 288 (computed for both success and failure case for changesets push)"""
289 289 outgoing = pushop.outgoing
290 290 unfi = pushop.repo.unfiltered()
291 291 remotephases = pushop.remote.listkeys('phases')
292 292 publishing = remotephases.get('publishing', False)
293 293 ana = phases.analyzeremotephases(pushop.repo,
294 294 pushop.fallbackheads,
295 295 remotephases)
296 296 pheads, droots = ana
297 297 extracond = ''
298 298 if not publishing:
299 299 extracond = ' and public()'
300 300 revset = 'heads((%%ln::%%ln) %s)' % extracond
301 301 # Get the list of all revs draft on remote by public here.
302 302 # XXX Beware that revset break if droots is not strictly
303 303 # XXX root we may want to ensure it is but it is costly
304 304 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
305 305 if not outgoing.missing:
306 306 future = fallback
307 307 else:
308 308 # adds changeset we are going to push as draft
309 309 #
310 310 # should not be necessary for publishing server, but because of an
311 311 # issue fixed in xxxxx we have to do it anyway.
312 312 fdroots = list(unfi.set('roots(%ln + %ln::)',
313 313 outgoing.missing, droots))
314 314 fdroots = [f.node() for f in fdroots]
315 315 future = list(unfi.set(revset, fdroots, pushop.futureheads))
316 316 pushop.outdatedphases = future
317 317 pushop.fallbackoutdatedphases = fallback
318 318
319 319 @pushdiscovery('obsmarker')
320 320 def _pushdiscoveryobsmarkers(pushop):
321 321 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
322 322 and pushop.repo.obsstore
323 323 and 'obsolete' in pushop.remote.listkeys('namespaces')):
324 324 repo = pushop.repo
325 325 # very naive computation, that can be quite expensive on big repo.
326 326 # However: evolution is currently slow on them anyway.
327 327 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
328 328 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
329 329
330 330 @pushdiscovery('bookmarks')
331 331 def _pushdiscoverybookmarks(pushop):
332 332 ui = pushop.ui
333 333 repo = pushop.repo.unfiltered()
334 334 remote = pushop.remote
335 335 ui.debug("checking for updated bookmarks\n")
336 336 ancestors = ()
337 337 if pushop.revs:
338 338 revnums = map(repo.changelog.rev, pushop.revs)
339 339 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
340 340 remotebookmark = remote.listkeys('bookmarks')
341 341
342 342 explicit = set(pushop.bookmarks)
343 343
344 344 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
345 345 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
346 346 for b, scid, dcid in advsrc:
347 347 if b in explicit:
348 348 explicit.remove(b)
349 349 if not ancestors or repo[scid].rev() in ancestors:
350 350 pushop.outbookmarks.append((b, dcid, scid))
351 351 # search added bookmark
352 352 for b, scid, dcid in addsrc:
353 353 if b in explicit:
354 354 explicit.remove(b)
355 355 pushop.outbookmarks.append((b, '', scid))
356 356 # search for overwritten bookmark
357 357 for b, scid, dcid in advdst + diverge + differ:
358 358 if b in explicit:
359 359 explicit.remove(b)
360 360 pushop.outbookmarks.append((b, dcid, scid))
361 361 # search for bookmark to delete
362 362 for b, scid, dcid in adddst:
363 363 if b in explicit:
364 364 explicit.remove(b)
365 365 # treat as "deleted locally"
366 366 pushop.outbookmarks.append((b, dcid, ''))
367 367 # identical bookmarks shouldn't get reported
368 368 for b, scid, dcid in same:
369 369 if b in explicit:
370 370 explicit.remove(b)
371 371
372 372 if explicit:
373 373 explicit = sorted(explicit)
374 374 # we should probably list all of them
375 375 ui.warn(_('bookmark %s does not exist on the local '
376 376 'or remote repository!\n') % explicit[0])
377 377 pushop.bkresult = 2
378 378
379 379 pushop.outbookmarks.sort()
380 380
381 381 def _pushcheckoutgoing(pushop):
382 382 outgoing = pushop.outgoing
383 383 unfi = pushop.repo.unfiltered()
384 384 if not outgoing.missing:
385 385 # nothing to push
386 386 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
387 387 return False
388 388 # something to push
389 389 if not pushop.force:
390 390 # if repo.obsstore == False --> no obsolete
391 391 # then, save the iteration
392 392 if unfi.obsstore:
393 393 # this message are here for 80 char limit reason
394 394 mso = _("push includes obsolete changeset: %s!")
395 395 mst = {"unstable": _("push includes unstable changeset: %s!"),
396 396 "bumped": _("push includes bumped changeset: %s!"),
397 397 "divergent": _("push includes divergent changeset: %s!")}
398 398 # If we are to push if there is at least one
399 399 # obsolete or unstable changeset in missing, at
400 400 # least one of the missinghead will be obsolete or
401 401 # unstable. So checking heads only is ok
402 402 for node in outgoing.missingheads:
403 403 ctx = unfi[node]
404 404 if ctx.obsolete():
405 405 raise util.Abort(mso % ctx)
406 406 elif ctx.troubled():
407 407 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
408 408 newbm = pushop.ui.configlist('bookmarks', 'pushing')
409 409 discovery.checkheads(unfi, pushop.remote, outgoing,
410 410 pushop.remoteheads,
411 411 pushop.newbranch,
412 412 bool(pushop.incoming),
413 413 newbm)
414 414 return True
415 415
416 416 # List of names of steps to perform for an outgoing bundle2, order matters.
417 417 b2partsgenorder = []
418 418
419 419 # Mapping between step name and function
420 420 #
421 421 # This exists to help extensions wrap steps if necessary
422 422 b2partsgenmapping = {}
423 423
424 424 def b2partsgenerator(stepname):
425 425 """decorator for function generating bundle2 part
426 426
427 427 The function is added to the step -> function mapping and appended to the
428 428 list of steps. Beware that decorated functions will be added in order
429 429 (this may matter).
430 430
431 431 You can only use this decorator for new steps, if you want to wrap a step
432 432 from an extension, attack the b2partsgenmapping dictionary directly."""
433 433 def dec(func):
434 434 assert stepname not in b2partsgenmapping
435 435 b2partsgenmapping[stepname] = func
436 436 b2partsgenorder.append(stepname)
437 437 return func
438 438 return dec
439 439
440 440 @b2partsgenerator('changeset')
441 441 def _pushb2ctx(pushop, bundler):
442 442 """handle changegroup push through bundle2
443 443
444 444 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
445 445 """
446 446 if 'changesets' in pushop.stepsdone:
447 447 return
448 448 pushop.stepsdone.add('changesets')
449 449 # Send known heads to the server for race detection.
450 450 if not _pushcheckoutgoing(pushop):
451 451 return
452 452 pushop.repo.prepushoutgoinghooks(pushop.repo,
453 453 pushop.remote,
454 454 pushop.outgoing)
455 455 if not pushop.force:
456 456 bundler.newpart('b2x:check:heads', data=iter(pushop.remoteheads))
457 457 b2caps = bundle2.bundle2caps(pushop.remote)
458 458 version = None
459 459 cgversions = b2caps.get('b2x:changegroup')
460 460 if not cgversions: # 3.1 and 3.2 ship with an empty value
461 461 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
462 462 pushop.outgoing)
463 463 else:
464 464 cgversions = [v for v in cgversions if v in changegroup.packermap]
465 465 if not cgversions:
466 466 raise ValueError(_('no common changegroup version'))
467 467 version = max(cgversions)
468 468 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
469 469 pushop.outgoing,
470 470 version=version)
471 471 cgpart = bundler.newpart('b2x:changegroup', data=cg)
472 472 if version is not None:
473 473 cgpart.addparam('version', version)
474 474 def handlereply(op):
475 475 """extract addchangegroup returns from server reply"""
476 476 cgreplies = op.records.getreplies(cgpart.id)
477 477 assert len(cgreplies['changegroup']) == 1
478 478 pushop.cgresult = cgreplies['changegroup'][0]['return']
479 479 return handlereply
480 480
481 481 @b2partsgenerator('phase')
482 482 def _pushb2phases(pushop, bundler):
483 483 """handle phase push through bundle2"""
484 484 if 'phases' in pushop.stepsdone:
485 485 return
486 486 b2caps = bundle2.bundle2caps(pushop.remote)
487 487 if not 'b2x:pushkey' in b2caps:
488 488 return
489 489 pushop.stepsdone.add('phases')
490 490 part2node = []
491 491 enc = pushkey.encode
492 492 for newremotehead in pushop.outdatedphases:
493 493 part = bundler.newpart('b2x:pushkey')
494 494 part.addparam('namespace', enc('phases'))
495 495 part.addparam('key', enc(newremotehead.hex()))
496 496 part.addparam('old', enc(str(phases.draft)))
497 497 part.addparam('new', enc(str(phases.public)))
498 498 part2node.append((part.id, newremotehead))
499 499 def handlereply(op):
500 500 for partid, node in part2node:
501 501 partrep = op.records.getreplies(partid)
502 502 results = partrep['pushkey']
503 503 assert len(results) <= 1
504 504 msg = None
505 505 if not results:
506 506 msg = _('server ignored update of %s to public!\n') % node
507 507 elif not int(results[0]['return']):
508 508 msg = _('updating %s to public failed!\n') % node
509 509 if msg is not None:
510 510 pushop.ui.warn(msg)
511 511 return handlereply
512 512
513 513 @b2partsgenerator('obsmarkers')
514 514 def _pushb2obsmarkers(pushop, bundler):
515 515 if 'obsmarkers' in pushop.stepsdone:
516 516 return
517 517 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
518 518 if obsolete.commonversion(remoteversions) is None:
519 519 return
520 520 pushop.stepsdone.add('obsmarkers')
521 521 if pushop.outobsmarkers:
522 522 buildobsmarkerspart(bundler, pushop.outobsmarkers)
523 523
524 524 @b2partsgenerator('bookmarks')
525 525 def _pushb2bookmarks(pushop, bundler):
526 526 """handle phase push through bundle2"""
527 527 if 'bookmarks' in pushop.stepsdone:
528 528 return
529 529 b2caps = bundle2.bundle2caps(pushop.remote)
530 530 if 'b2x:pushkey' not in b2caps:
531 531 return
532 532 pushop.stepsdone.add('bookmarks')
533 533 part2book = []
534 534 enc = pushkey.encode
535 535 for book, old, new in pushop.outbookmarks:
536 536 part = bundler.newpart('b2x:pushkey')
537 537 part.addparam('namespace', enc('bookmarks'))
538 538 part.addparam('key', enc(book))
539 539 part.addparam('old', enc(old))
540 540 part.addparam('new', enc(new))
541 541 action = 'update'
542 542 if not old:
543 543 action = 'export'
544 544 elif not new:
545 545 action = 'delete'
546 546 part2book.append((part.id, book, action))
547 547
548 548
549 549 def handlereply(op):
550 550 ui = pushop.ui
551 551 for partid, book, action in part2book:
552 552 partrep = op.records.getreplies(partid)
553 553 results = partrep['pushkey']
554 554 assert len(results) <= 1
555 555 if not results:
556 556 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
557 557 else:
558 558 ret = int(results[0]['return'])
559 559 if ret:
560 560 ui.status(bookmsgmap[action][0] % book)
561 561 else:
562 562 ui.warn(bookmsgmap[action][1] % book)
563 563 if pushop.bkresult is not None:
564 564 pushop.bkresult = 1
565 565 return handlereply
566 566
567 567
568 568 def _pushbundle2(pushop):
569 569 """push data to the remote using bundle2
570 570
571 571 The only currently supported type of data is changegroup but this will
572 572 evolve in the future."""
573 573 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
574 574 pushback = (pushop.trmanager
575 575 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
576 576
577 577 # create reply capability
578 578 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
579 579 allowpushback=pushback))
580 580 bundler.newpart('b2x:replycaps', data=capsblob)
581 581 replyhandlers = []
582 582 for partgenname in b2partsgenorder:
583 583 partgen = b2partsgenmapping[partgenname]
584 584 ret = partgen(pushop, bundler)
585 585 if callable(ret):
586 586 replyhandlers.append(ret)
587 587 # do not push if nothing to push
588 588 if bundler.nbparts <= 1:
589 589 return
590 590 stream = util.chunkbuffer(bundler.getchunks())
591 591 try:
592 592 reply = pushop.remote.unbundle(stream, ['force'], 'push')
593 593 except error.BundleValueError, exc:
594 594 raise util.Abort('missing support for %s' % exc)
595 595 try:
596 596 trgetter = None
597 597 if pushback:
598 598 trgetter = pushop.trmanager.transaction
599 599 op = bundle2.processbundle(pushop.repo, reply, trgetter)
600 600 except error.BundleValueError, exc:
601 601 raise util.Abort('missing support for %s' % exc)
602 602 for rephand in replyhandlers:
603 603 rephand(op)
604 604
605 605 def _pushchangeset(pushop):
606 606 """Make the actual push of changeset bundle to remote repo"""
607 607 if 'changesets' in pushop.stepsdone:
608 608 return
609 609 pushop.stepsdone.add('changesets')
610 610 if not _pushcheckoutgoing(pushop):
611 611 return
612 612 pushop.repo.prepushoutgoinghooks(pushop.repo,
613 613 pushop.remote,
614 614 pushop.outgoing)
615 615 outgoing = pushop.outgoing
616 616 unbundle = pushop.remote.capable('unbundle')
617 617 # TODO: get bundlecaps from remote
618 618 bundlecaps = None
619 619 # create a changegroup from local
620 620 if pushop.revs is None and not (outgoing.excluded
621 621 or pushop.repo.changelog.filteredrevs):
622 622 # push everything,
623 623 # use the fast path, no race possible on push
624 624 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
625 625 cg = changegroup.getsubset(pushop.repo,
626 626 outgoing,
627 627 bundler,
628 628 'push',
629 629 fastpath=True)
630 630 else:
631 631 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
632 632 bundlecaps)
633 633
634 634 # apply changegroup to remote
635 635 if unbundle:
636 636 # local repo finds heads on server, finds out what
637 637 # revs it must push. once revs transferred, if server
638 638 # finds it has different heads (someone else won
639 639 # commit/push race), server aborts.
640 640 if pushop.force:
641 641 remoteheads = ['force']
642 642 else:
643 643 remoteheads = pushop.remoteheads
644 644 # ssh: return remote's addchangegroup()
645 645 # http: return remote's addchangegroup() or 0 for error
646 646 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
647 647 pushop.repo.url())
648 648 else:
649 649 # we return an integer indicating remote head count
650 650 # change
651 651 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
652 652 pushop.repo.url())
653 653
654 654 def _pushsyncphase(pushop):
655 655 """synchronise phase information locally and remotely"""
656 656 cheads = pushop.commonheads
657 657 # even when we don't push, exchanging phase data is useful
658 658 remotephases = pushop.remote.listkeys('phases')
659 659 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
660 660 and remotephases # server supports phases
661 661 and pushop.cgresult is None # nothing was pushed
662 662 and remotephases.get('publishing', False)):
663 663 # When:
664 664 # - this is a subrepo push
665 665 # - and remote support phase
666 666 # - and no changeset was pushed
667 667 # - and remote is publishing
668 668 # We may be in issue 3871 case!
669 669 # We drop the possible phase synchronisation done by
670 670 # courtesy to publish changesets possibly locally draft
671 671 # on the remote.
672 672 remotephases = {'publishing': 'True'}
673 673 if not remotephases: # old server or public only reply from non-publishing
674 674 _localphasemove(pushop, cheads)
675 675 # don't push any phase data as there is nothing to push
676 676 else:
677 677 ana = phases.analyzeremotephases(pushop.repo, cheads,
678 678 remotephases)
679 679 pheads, droots = ana
680 680 ### Apply remote phase on local
681 681 if remotephases.get('publishing', False):
682 682 _localphasemove(pushop, cheads)
683 683 else: # publish = False
684 684 _localphasemove(pushop, pheads)
685 685 _localphasemove(pushop, cheads, phases.draft)
686 686 ### Apply local phase on remote
687 687
688 688 if pushop.cgresult:
689 689 if 'phases' in pushop.stepsdone:
690 690 # phases already pushed though bundle2
691 691 return
692 692 outdated = pushop.outdatedphases
693 693 else:
694 694 outdated = pushop.fallbackoutdatedphases
695 695
696 696 pushop.stepsdone.add('phases')
697 697
698 698 # filter heads already turned public by the push
699 699 outdated = [c for c in outdated if c.node() not in pheads]
700 700 # fallback to independent pushkey command
701 701 for newremotehead in outdated:
702 702 r = pushop.remote.pushkey('phases',
703 703 newremotehead.hex(),
704 704 str(phases.draft),
705 705 str(phases.public))
706 706 if not r:
707 707 pushop.ui.warn(_('updating %s to public failed!\n')
708 708 % newremotehead)
709 709
710 710 def _localphasemove(pushop, nodes, phase=phases.public):
711 711 """move <nodes> to <phase> in the local source repo"""
712 712 if pushop.trmanager:
713 713 phases.advanceboundary(pushop.repo,
714 714 pushop.trmanager.transaction(),
715 715 phase,
716 716 nodes)
717 717 else:
718 718 # repo is not locked, do not change any phases!
719 719 # Informs the user that phases should have been moved when
720 720 # applicable.
721 721 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
722 722 phasestr = phases.phasenames[phase]
723 723 if actualmoves:
724 724 pushop.ui.status(_('cannot lock source repo, skipping '
725 725 'local %s phase update\n') % phasestr)
726 726
727 727 def _pushobsolete(pushop):
728 728 """utility function to push obsolete markers to a remote"""
729 729 if 'obsmarkers' in pushop.stepsdone:
730 730 return
731 731 pushop.ui.debug('try to push obsolete markers to remote\n')
732 732 repo = pushop.repo
733 733 remote = pushop.remote
734 734 pushop.stepsdone.add('obsmarkers')
735 735 if pushop.outobsmarkers:
736 736 rslts = []
737 737 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
738 738 for key in sorted(remotedata, reverse=True):
739 739 # reverse sort to ensure we end with dump0
740 740 data = remotedata[key]
741 741 rslts.append(remote.pushkey('obsolete', key, '', data))
742 742 if [r for r in rslts if not r]:
743 743 msg = _('failed to push some obsolete markers!\n')
744 744 repo.ui.warn(msg)
745 745
746 746 def _pushbookmark(pushop):
747 747 """Update bookmark position on remote"""
748 748 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
749 749 return
750 750 pushop.stepsdone.add('bookmarks')
751 751 ui = pushop.ui
752 752 remote = pushop.remote
753 753
754 754 for b, old, new in pushop.outbookmarks:
755 755 action = 'update'
756 756 if not old:
757 757 action = 'export'
758 758 elif not new:
759 759 action = 'delete'
760 760 if remote.pushkey('bookmarks', b, old, new):
761 761 ui.status(bookmsgmap[action][0] % b)
762 762 else:
763 763 ui.warn(bookmsgmap[action][1] % b)
764 764 # discovery can have set the value form invalid entry
765 765 if pushop.bkresult is not None:
766 766 pushop.bkresult = 1
767 767
768 768 class pulloperation(object):
769 769 """A object that represent a single pull operation
770 770
771 771 It purpose is to carry pull related state and very common operation.
772 772
773 773 A new should be created at the beginning of each pull and discarded
774 774 afterward.
775 775 """
776 776
777 777 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
778 778 # repo we pull into
779 779 self.repo = repo
780 780 # repo we pull from
781 781 self.remote = remote
782 782 # revision we try to pull (None is "all")
783 783 self.heads = heads
784 784 # bookmark pulled explicitly
785 785 self.explicitbookmarks = bookmarks
786 786 # do we force pull?
787 787 self.force = force
788 788 # transaction manager
789 789 self.trmanager = None
790 790 # set of common changeset between local and remote before pull
791 791 self.common = None
792 792 # set of pulled head
793 793 self.rheads = None
794 794 # list of missing changeset to fetch remotely
795 795 self.fetch = None
796 796 # remote bookmarks data
797 797 self.remotebookmarks = None
798 798 # result of changegroup pulling (used as return code by pull)
799 799 self.cgresult = None
800 800 # list of step already done
801 801 self.stepsdone = set()
802 802
803 803 @util.propertycache
804 804 def pulledsubset(self):
805 805 """heads of the set of changeset target by the pull"""
806 806 # compute target subset
807 807 if self.heads is None:
808 808 # We pulled every thing possible
809 809 # sync on everything common
810 810 c = set(self.common)
811 811 ret = list(self.common)
812 812 for n in self.rheads:
813 813 if n not in c:
814 814 ret.append(n)
815 815 return ret
816 816 else:
817 817 # We pulled a specific subset
818 818 # sync on this subset
819 819 return self.heads
820 820
821 821 def gettransaction(self):
822 822 # deprecated; talk to trmanager directly
823 823 return self.trmanager.transaction()
824 824
825 825 class transactionmanager(object):
826 826 """An object to manage the life cycle of a transaction
827 827
828 828 It creates the transaction on demand and calls the appropriate hooks when
829 829 closing the transaction."""
830 830 def __init__(self, repo, source, url):
831 831 self.repo = repo
832 832 self.source = source
833 833 self.url = url
834 834 self._tr = None
835 835
836 836 def transaction(self):
837 837 """Return an open transaction object, constructing if necessary"""
838 838 if not self._tr:
839 839 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
840 840 self._tr = self.repo.transaction(trname)
841 841 self._tr.hookargs['source'] = self.source
842 842 self._tr.hookargs['url'] = self.url
843 843 return self._tr
844 844
845 845 def close(self):
846 846 """close transaction if created"""
847 847 if self._tr is not None:
848 848 repo = self.repo
849 849 p = lambda: self._tr.writepending() and repo.root or ""
850 850 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
851 851 **self._tr.hookargs)
852 852 hookargs = dict(self._tr.hookargs)
853 853 def runhooks():
854 854 repo.hook('b2x-transactionclose', **hookargs)
855 855 self._tr.addpostclose('b2x-hook-transactionclose',
856 856 lambda tr: repo._afterlock(runhooks))
857 857 self._tr.close()
858 858
859 859 def release(self):
860 860 """release transaction if created"""
861 861 if self._tr is not None:
862 862 self._tr.release()
863 863
864 864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
865 865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
866 866 if pullop.remote.local():
867 867 missing = set(pullop.remote.requirements) - pullop.repo.supported
868 868 if missing:
869 869 msg = _("required features are not"
870 870 " supported in the destination:"
871 871 " %s") % (', '.join(sorted(missing)))
872 872 raise util.Abort(msg)
873 873
874 874 pullop.remotebookmarks = remote.listkeys('bookmarks')
875 875 lock = pullop.repo.lock()
876 876 try:
877 877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
878 878 _pulldiscovery(pullop)
879 879 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
880 880 and pullop.remote.capable('bundle2-exp')):
881 881 _pullbundle2(pullop)
882 882 _pullchangeset(pullop)
883 883 _pullphase(pullop)
884 884 _pullbookmarks(pullop)
885 885 _pullobsolete(pullop)
886 886 pullop.trmanager.close()
887 887 finally:
888 888 pullop.trmanager.release()
889 889 lock.release()
890 890
891 891 return pullop
892 892
893 893 # list of steps to perform discovery before pull
894 894 pulldiscoveryorder = []
895 895
896 896 # Mapping between step name and function
897 897 #
898 898 # This exists to help extensions wrap steps if necessary
899 899 pulldiscoverymapping = {}
900 900
901 901 def pulldiscovery(stepname):
902 902 """decorator for function performing discovery before pull
903 903
904 904 The function is added to the step -> function mapping and appended to the
905 905 list of steps. Beware that decorated function will be added in order (this
906 906 may matter).
907 907
908 908 You can only use this decorator for a new step, if you want to wrap a step
909 909 from an extension, change the pulldiscovery dictionary directly."""
910 910 def dec(func):
911 911 assert stepname not in pulldiscoverymapping
912 912 pulldiscoverymapping[stepname] = func
913 913 pulldiscoveryorder.append(stepname)
914 914 return func
915 915 return dec
916 916
917 917 def _pulldiscovery(pullop):
918 918 """Run all discovery steps"""
919 919 for stepname in pulldiscoveryorder:
920 920 step = pulldiscoverymapping[stepname]
921 921 step(pullop)
922 922
923 923 @pulldiscovery('changegroup')
924 924 def _pulldiscoverychangegroup(pullop):
925 925 """discovery phase for the pull
926 926
927 927 Current handle changeset discovery only, will change handle all discovery
928 928 at some point."""
929 929 tmp = discovery.findcommonincoming(pullop.repo,
930 930 pullop.remote,
931 931 heads=pullop.heads,
932 932 force=pullop.force)
933 933 common, fetch, rheads = tmp
934 934 nm = pullop.repo.unfiltered().changelog.nodemap
935 935 if fetch and rheads:
936 936 # If a remote heads in filtered locally, lets drop it from the unknown
937 937 # remote heads and put in back in common.
938 938 #
939 939 # This is a hackish solution to catch most of "common but locally
940 940 # hidden situation". We do not performs discovery on unfiltered
941 941 # repository because it end up doing a pathological amount of round
942 942 # trip for w huge amount of changeset we do not care about.
943 943 #
944 944 # If a set of such "common but filtered" changeset exist on the server
945 945 # but are not including a remote heads, we'll not be able to detect it,
946 946 scommon = set(common)
947 947 filteredrheads = []
948 948 for n in rheads:
949 949 if n in nm:
950 950 if n not in scommon:
951 951 common.append(n)
952 952 else:
953 953 filteredrheads.append(n)
954 954 if not filteredrheads:
955 955 fetch = []
956 956 rheads = filteredrheads
957 957 pullop.common = common
958 958 pullop.fetch = fetch
959 959 pullop.rheads = rheads
960 960
961 961 def _pullbundle2(pullop):
962 962 """pull data using bundle2
963 963
964 964 For now, the only supported data are changegroup."""
965 965 remotecaps = bundle2.bundle2caps(pullop.remote)
966 966 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
967 967 # pulling changegroup
968 968 pullop.stepsdone.add('changegroup')
969 969
970 970 kwargs['common'] = pullop.common
971 971 kwargs['heads'] = pullop.heads or pullop.rheads
972 972 kwargs['cg'] = pullop.fetch
973 973 if 'b2x:listkeys' in remotecaps:
974 974 kwargs['listkeys'] = ['phase', 'bookmarks']
975 975 if not pullop.fetch:
976 976 pullop.repo.ui.status(_("no changes found\n"))
977 977 pullop.cgresult = 0
978 978 else:
979 979 if pullop.heads is None and list(pullop.common) == [nullid]:
980 980 pullop.repo.ui.status(_("requesting all changes\n"))
981 981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
982 982 remoteversions = bundle2.obsmarkersversion(remotecaps)
983 983 if obsolete.commonversion(remoteversions) is not None:
984 984 kwargs['obsmarkers'] = True
985 985 pullop.stepsdone.add('obsmarkers')
986 986 _pullbundle2extraprepare(pullop, kwargs)
987 if kwargs.keys() == ['format']:
988 return # nothing to pull
989 987 bundle = pullop.remote.getbundle('pull', **kwargs)
990 988 try:
991 989 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
992 990 except error.BundleValueError, exc:
993 991 raise util.Abort('missing support for %s' % exc)
994 992
995 993 if pullop.fetch:
996 994 results = [cg['return'] for cg in op.records['changegroup']]
997 995 pullop.cgresult = changegroup.combineresults(results)
998 996
999 997 # processing phases change
1000 998 for namespace, value in op.records['listkeys']:
1001 999 if namespace == 'phases':
1002 1000 _pullapplyphases(pullop, value)
1003 1001
1004 1002 # processing bookmark update
1005 1003 for namespace, value in op.records['listkeys']:
1006 1004 if namespace == 'bookmarks':
1007 1005 pullop.remotebookmarks = value
1008 1006 _pullbookmarks(pullop)
1009 1007
1010 1008 def _pullbundle2extraprepare(pullop, kwargs):
1011 1009 """hook function so that extensions can extend the getbundle call"""
1012 1010 pass
1013 1011
1014 1012 def _pullchangeset(pullop):
1015 1013 """pull changeset from unbundle into the local repo"""
1016 1014 # We delay the open of the transaction as late as possible so we
1017 1015 # don't open transaction for nothing or you break future useful
1018 1016 # rollback call
1019 1017 if 'changegroup' in pullop.stepsdone:
1020 1018 return
1021 1019 pullop.stepsdone.add('changegroup')
1022 1020 if not pullop.fetch:
1023 1021 pullop.repo.ui.status(_("no changes found\n"))
1024 1022 pullop.cgresult = 0
1025 1023 return
1026 1024 pullop.gettransaction()
1027 1025 if pullop.heads is None and list(pullop.common) == [nullid]:
1028 1026 pullop.repo.ui.status(_("requesting all changes\n"))
1029 1027 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1030 1028 # issue1320, avoid a race if remote changed after discovery
1031 1029 pullop.heads = pullop.rheads
1032 1030
1033 1031 if pullop.remote.capable('getbundle'):
1034 1032 # TODO: get bundlecaps from remote
1035 1033 cg = pullop.remote.getbundle('pull', common=pullop.common,
1036 1034 heads=pullop.heads or pullop.rheads)
1037 1035 elif pullop.heads is None:
1038 1036 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1039 1037 elif not pullop.remote.capable('changegroupsubset'):
1040 1038 raise util.Abort(_("partial pull cannot be done because "
1041 1039 "other repository doesn't support "
1042 1040 "changegroupsubset."))
1043 1041 else:
1044 1042 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1045 1043 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1046 1044 pullop.remote.url())
1047 1045
1048 1046 def _pullphase(pullop):
1049 1047 # Get remote phases data from remote
1050 1048 if 'phases' in pullop.stepsdone:
1051 1049 return
1052 1050 remotephases = pullop.remote.listkeys('phases')
1053 1051 _pullapplyphases(pullop, remotephases)
1054 1052
1055 1053 def _pullapplyphases(pullop, remotephases):
1056 1054 """apply phase movement from observed remote state"""
1057 1055 if 'phases' in pullop.stepsdone:
1058 1056 return
1059 1057 pullop.stepsdone.add('phases')
1060 1058 publishing = bool(remotephases.get('publishing', False))
1061 1059 if remotephases and not publishing:
1062 1060 # remote is new and unpublishing
1063 1061 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1064 1062 pullop.pulledsubset,
1065 1063 remotephases)
1066 1064 dheads = pullop.pulledsubset
1067 1065 else:
1068 1066 # Remote is old or publishing all common changesets
1069 1067 # should be seen as public
1070 1068 pheads = pullop.pulledsubset
1071 1069 dheads = []
1072 1070 unfi = pullop.repo.unfiltered()
1073 1071 phase = unfi._phasecache.phase
1074 1072 rev = unfi.changelog.nodemap.get
1075 1073 public = phases.public
1076 1074 draft = phases.draft
1077 1075
1078 1076 # exclude changesets already public locally and update the others
1079 1077 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1080 1078 if pheads:
1081 1079 tr = pullop.gettransaction()
1082 1080 phases.advanceboundary(pullop.repo, tr, public, pheads)
1083 1081
1084 1082 # exclude changesets already draft locally and update the others
1085 1083 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1086 1084 if dheads:
1087 1085 tr = pullop.gettransaction()
1088 1086 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1089 1087
1090 1088 def _pullbookmarks(pullop):
1091 1089 """process the remote bookmark information to update the local one"""
1092 1090 if 'bookmarks' in pullop.stepsdone:
1093 1091 return
1094 1092 pullop.stepsdone.add('bookmarks')
1095 1093 repo = pullop.repo
1096 1094 remotebookmarks = pullop.remotebookmarks
1097 1095 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1098 1096 pullop.remote.url(),
1099 1097 pullop.gettransaction,
1100 1098 explicit=pullop.explicitbookmarks)
1101 1099
1102 1100 def _pullobsolete(pullop):
1103 1101 """utility function to pull obsolete markers from a remote
1104 1102
1105 1103 The `gettransaction` is function that return the pull transaction, creating
1106 1104 one if necessary. We return the transaction to inform the calling code that
1107 1105 a new transaction have been created (when applicable).
1108 1106
1109 1107 Exists mostly to allow overriding for experimentation purpose"""
1110 1108 if 'obsmarkers' in pullop.stepsdone:
1111 1109 return
1112 1110 pullop.stepsdone.add('obsmarkers')
1113 1111 tr = None
1114 1112 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1115 1113 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1116 1114 remoteobs = pullop.remote.listkeys('obsolete')
1117 1115 if 'dump0' in remoteobs:
1118 1116 tr = pullop.gettransaction()
1119 1117 for key in sorted(remoteobs, reverse=True):
1120 1118 if key.startswith('dump'):
1121 1119 data = base85.b85decode(remoteobs[key])
1122 1120 pullop.repo.obsstore.mergemarkers(tr, data)
1123 1121 pullop.repo.invalidatevolatilesets()
1124 1122 return tr
1125 1123
1126 1124 def caps20to10(repo):
1127 1125 """return a set with appropriate options to use bundle20 during getbundle"""
1128 1126 caps = set(['HG2Y'])
1129 1127 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1130 1128 caps.add('bundle2=' + urllib.quote(capsblob))
1131 1129 return caps
1132 1130
1133 1131 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1134 1132 getbundle2partsorder = []
1135 1133
1136 1134 # Mapping between step name and function
1137 1135 #
1138 1136 # This exists to help extensions wrap steps if necessary
1139 1137 getbundle2partsmapping = {}
1140 1138
1141 1139 def getbundle2partsgenerator(stepname):
1142 1140 """decorator for function generating bundle2 part for getbundle
1143 1141
1144 1142 The function is added to the step -> function mapping and appended to the
1145 1143 list of steps. Beware that decorated functions will be added in order
1146 1144 (this may matter).
1147 1145
1148 1146 You can only use this decorator for new steps, if you want to wrap a step
1149 1147 from an extension, attack the getbundle2partsmapping dictionary directly."""
1150 1148 def dec(func):
1151 1149 assert stepname not in getbundle2partsmapping
1152 1150 getbundle2partsmapping[stepname] = func
1153 1151 getbundle2partsorder.append(stepname)
1154 1152 return func
1155 1153 return dec
1156 1154
1157 1155 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1158 1156 **kwargs):
1159 1157 """return a full bundle (with potentially multiple kind of parts)
1160 1158
1161 1159 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1162 1160 passed. For now, the bundle can contain only changegroup, but this will
1163 1161 changes when more part type will be available for bundle2.
1164 1162
1165 1163 This is different from changegroup.getchangegroup that only returns an HG10
1166 1164 changegroup bundle. They may eventually get reunited in the future when we
1167 1165 have a clearer idea of the API we what to query different data.
1168 1166
1169 1167 The implementation is at a very early stage and will get massive rework
1170 1168 when the API of bundle is refined.
1171 1169 """
1172 1170 # bundle10 case
1173 1171 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1174 1172 if bundlecaps and not kwargs.get('cg', True):
1175 1173 raise ValueError(_('request for bundle10 must include changegroup'))
1176 1174
1177 1175 if kwargs:
1178 1176 raise ValueError(_('unsupported getbundle arguments: %s')
1179 1177 % ', '.join(sorted(kwargs.keys())))
1180 1178 return changegroup.getchangegroup(repo, source, heads=heads,
1181 1179 common=common, bundlecaps=bundlecaps)
1182 1180
1183 1181 # bundle20 case
1184 1182 b2caps = {}
1185 1183 for bcaps in bundlecaps:
1186 1184 if bcaps.startswith('bundle2='):
1187 1185 blob = urllib.unquote(bcaps[len('bundle2='):])
1188 1186 b2caps.update(bundle2.decodecaps(blob))
1189 1187 bundler = bundle2.bundle20(repo.ui, b2caps)
1190 1188
1191 1189 kwargs['heads'] = heads
1192 1190 kwargs['common'] = common
1193 1191
1194 1192 for name in getbundle2partsorder:
1195 1193 func = getbundle2partsmapping[name]
1196 1194 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1197 1195 **kwargs)
1198 1196
1199 1197 return util.chunkbuffer(bundler.getchunks())
1200 1198
1201 1199 @getbundle2partsgenerator('changegroup')
1202 1200 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1203 1201 b2caps=None, heads=None, common=None, **kwargs):
1204 1202 """add a changegroup part to the requested bundle"""
1205 1203 cg = None
1206 1204 if kwargs.get('cg', True):
1207 1205 # build changegroup bundle here.
1208 1206 version = None
1209 1207 cgversions = b2caps.get('b2x:changegroup')
1210 1208 if not cgversions: # 3.1 and 3.2 ship with an empty value
1211 1209 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1212 1210 common=common,
1213 1211 bundlecaps=bundlecaps)
1214 1212 else:
1215 1213 cgversions = [v for v in cgversions if v in changegroup.packermap]
1216 1214 if not cgversions:
1217 1215 raise ValueError(_('no common changegroup version'))
1218 1216 version = max(cgversions)
1219 1217 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1220 1218 common=common,
1221 1219 bundlecaps=bundlecaps,
1222 1220 version=version)
1223 1221
1224 1222 if cg:
1225 1223 part = bundler.newpart('b2x:changegroup', data=cg)
1226 1224 if version is not None:
1227 1225 part.addparam('version', version)
1228 1226
1229 1227 @getbundle2partsgenerator('listkeys')
1230 1228 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1231 1229 b2caps=None, **kwargs):
1232 1230 """add parts containing listkeys namespaces to the requested bundle"""
1233 1231 listkeys = kwargs.get('listkeys', ())
1234 1232 for namespace in listkeys:
1235 1233 part = bundler.newpart('b2x:listkeys')
1236 1234 part.addparam('namespace', namespace)
1237 1235 keys = repo.listkeys(namespace).items()
1238 1236 part.data = pushkey.encodekeys(keys)
1239 1237
1240 1238 @getbundle2partsgenerator('obsmarkers')
1241 1239 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1242 1240 b2caps=None, heads=None, **kwargs):
1243 1241 """add an obsolescence markers part to the requested bundle"""
1244 1242 if kwargs.get('obsmarkers', False):
1245 1243 if heads is None:
1246 1244 heads = repo.heads()
1247 1245 subset = [c.node() for c in repo.set('::%ln', heads)]
1248 1246 markers = repo.obsstore.relevantmarkers(subset)
1249 1247 buildobsmarkerspart(bundler, markers)
1250 1248
1251 1249 def check_heads(repo, their_heads, context):
1252 1250 """check if the heads of a repo have been modified
1253 1251
1254 1252 Used by peer for unbundling.
1255 1253 """
1256 1254 heads = repo.heads()
1257 1255 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1258 1256 if not (their_heads == ['force'] or their_heads == heads or
1259 1257 their_heads == ['hashed', heads_hash]):
1260 1258 # someone else committed/pushed/unbundled while we
1261 1259 # were transferring data
1262 1260 raise error.PushRaced('repository changed while %s - '
1263 1261 'please try again' % context)
1264 1262
1265 1263 def unbundle(repo, cg, heads, source, url):
1266 1264 """Apply a bundle to a repo.
1267 1265
1268 1266 this function makes sure the repo is locked during the application and have
1269 1267 mechanism to check that no push race occurred between the creation of the
1270 1268 bundle and its application.
1271 1269
1272 1270 If the push was raced as PushRaced exception is raised."""
1273 1271 r = 0
1274 1272 # need a transaction when processing a bundle2 stream
1275 1273 tr = None
1276 1274 lock = repo.lock()
1277 1275 try:
1278 1276 check_heads(repo, heads, 'uploading changes')
1279 1277 # push can proceed
1280 1278 if util.safehasattr(cg, 'params'):
1281 1279 try:
1282 1280 tr = repo.transaction('unbundle')
1283 1281 tr.hookargs['source'] = source
1284 1282 tr.hookargs['url'] = url
1285 1283 tr.hookargs['bundle2-exp'] = '1'
1286 1284 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1287 1285 p = lambda: tr.writepending() and repo.root or ""
1288 1286 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1289 1287 **tr.hookargs)
1290 1288 hookargs = dict(tr.hookargs)
1291 1289 def runhooks():
1292 1290 repo.hook('b2x-transactionclose', **hookargs)
1293 1291 tr.addpostclose('b2x-hook-transactionclose',
1294 1292 lambda tr: repo._afterlock(runhooks))
1295 1293 tr.close()
1296 1294 except Exception, exc:
1297 1295 exc.duringunbundle2 = True
1298 1296 raise
1299 1297 else:
1300 1298 r = changegroup.addchangegroup(repo, cg, source, url)
1301 1299 finally:
1302 1300 if tr is not None:
1303 1301 tr.release()
1304 1302 lock.release()
1305 1303 return r
General Comments 0
You need to be logged in to leave comments. Login now