##// END OF EJS Templates
exchange: swap "push" for "pull" in pulloperation docstring
Mike Edgar -
r23219:61cd79ac default
parent child Browse files
Show More
@@ -1,1298 +1,1298
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version == '2Y':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 53 return None
54 54
55 55 class pushoperation(object):
56 56 """A object that represent a single push operation
57 57
58 58 It purpose is to carry push related state and very common operation.
59 59
60 60 A new should be created at the beginning of each push and discarded
61 61 afterward.
62 62 """
63 63
64 64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 65 bookmarks=()):
66 66 # repo we push from
67 67 self.repo = repo
68 68 self.ui = repo.ui
69 69 # repo we push to
70 70 self.remote = remote
71 71 # force option provided
72 72 self.force = force
73 73 # revs to be pushed (None is "all")
74 74 self.revs = revs
75 75 # bookmark explicitly pushed
76 76 self.bookmarks = bookmarks
77 77 # allow push of new branch
78 78 self.newbranch = newbranch
79 79 # did a local lock get acquired?
80 80 self.locallocked = None
81 81 # step already performed
82 82 # (used to check what steps have been already performed through bundle2)
83 83 self.stepsdone = set()
84 84 # Integer version of the changegroup push result
85 85 # - None means nothing to push
86 86 # - 0 means HTTP error
87 87 # - 1 means we pushed and remote head count is unchanged *or*
88 88 # we have outgoing changesets but refused to push
89 89 # - other values as described by addchangegroup()
90 90 self.cgresult = None
91 91 # Boolean value for the bookmark push
92 92 self.bkresult = None
93 93 # discover.outgoing object (contains common and outgoing data)
94 94 self.outgoing = None
95 95 # all remote heads before the push
96 96 self.remoteheads = None
97 97 # testable as a boolean indicating if any nodes are missing locally.
98 98 self.incoming = None
99 99 # phases changes that must be pushed along side the changesets
100 100 self.outdatedphases = None
101 101 # phases changes that must be pushed if changeset push fails
102 102 self.fallbackoutdatedphases = None
103 103 # outgoing obsmarkers
104 104 self.outobsmarkers = set()
105 105 # outgoing bookmarks
106 106 self.outbookmarks = []
107 107
108 108 @util.propertycache
109 109 def futureheads(self):
110 110 """future remote heads if the changeset push succeeds"""
111 111 return self.outgoing.missingheads
112 112
113 113 @util.propertycache
114 114 def fallbackheads(self):
115 115 """future remote heads if the changeset push fails"""
116 116 if self.revs is None:
117 117 # not target to push, all common are relevant
118 118 return self.outgoing.commonheads
119 119 unfi = self.repo.unfiltered()
120 120 # I want cheads = heads(::missingheads and ::commonheads)
121 121 # (missingheads is revs with secret changeset filtered out)
122 122 #
123 123 # This can be expressed as:
124 124 # cheads = ( (missingheads and ::commonheads)
125 125 # + (commonheads and ::missingheads))"
126 126 # )
127 127 #
128 128 # while trying to push we already computed the following:
129 129 # common = (::commonheads)
130 130 # missing = ((commonheads::missingheads) - commonheads)
131 131 #
132 132 # We can pick:
133 133 # * missingheads part of common (::commonheads)
134 134 common = set(self.outgoing.common)
135 135 nm = self.repo.changelog.nodemap
136 136 cheads = [node for node in self.revs if nm[node] in common]
137 137 # and
138 138 # * commonheads parents on missing
139 139 revset = unfi.set('%ln and parents(roots(%ln))',
140 140 self.outgoing.commonheads,
141 141 self.outgoing.missing)
142 142 cheads.extend(c.node() for c in revset)
143 143 return cheads
144 144
145 145 @property
146 146 def commonheads(self):
147 147 """set of all common heads after changeset bundle push"""
148 148 if self.cgresult:
149 149 return self.futureheads
150 150 else:
151 151 return self.fallbackheads
152 152
153 153 # mapping of message used when pushing bookmark
154 154 bookmsgmap = {'update': (_("updating bookmark %s\n"),
155 155 _('updating bookmark %s failed!\n')),
156 156 'export': (_("exporting bookmark %s\n"),
157 157 _('exporting bookmark %s failed!\n')),
158 158 'delete': (_("deleting remote bookmark %s\n"),
159 159 _('deleting remote bookmark %s failed!\n')),
160 160 }
161 161
162 162
163 163 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
164 164 '''Push outgoing changesets (limited by revs) from a local
165 165 repository to remote. Return an integer:
166 166 - None means nothing to push
167 167 - 0 means HTTP error
168 168 - 1 means we pushed and remote head count is unchanged *or*
169 169 we have outgoing changesets but refused to push
170 170 - other values as described by addchangegroup()
171 171 '''
172 172 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
173 173 if pushop.remote.local():
174 174 missing = (set(pushop.repo.requirements)
175 175 - pushop.remote.local().supported)
176 176 if missing:
177 177 msg = _("required features are not"
178 178 " supported in the destination:"
179 179 " %s") % (', '.join(sorted(missing)))
180 180 raise util.Abort(msg)
181 181
182 182 # there are two ways to push to remote repo:
183 183 #
184 184 # addchangegroup assumes local user can lock remote
185 185 # repo (local filesystem, old ssh servers).
186 186 #
187 187 # unbundle assumes local user cannot lock remote repo (new ssh
188 188 # servers, http servers).
189 189
190 190 if not pushop.remote.canpush():
191 191 raise util.Abort(_("destination does not support push"))
192 192 # get local lock as we might write phase data
193 193 locallock = None
194 194 try:
195 195 locallock = pushop.repo.lock()
196 196 pushop.locallocked = True
197 197 except IOError, err:
198 198 pushop.locallocked = False
199 199 if err.errno != errno.EACCES:
200 200 raise
201 201 # source repo cannot be locked.
202 202 # We do not abort the push, but just disable the local phase
203 203 # synchronisation.
204 204 msg = 'cannot lock source repository: %s\n' % err
205 205 pushop.ui.debug(msg)
206 206 try:
207 207 pushop.repo.checkpush(pushop)
208 208 lock = None
209 209 unbundle = pushop.remote.capable('unbundle')
210 210 if not unbundle:
211 211 lock = pushop.remote.lock()
212 212 try:
213 213 _pushdiscovery(pushop)
214 214 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
215 215 False)
216 216 and pushop.remote.capable('bundle2-exp')):
217 217 _pushbundle2(pushop)
218 218 _pushchangeset(pushop)
219 219 _pushsyncphase(pushop)
220 220 _pushobsolete(pushop)
221 221 _pushbookmark(pushop)
222 222 finally:
223 223 if lock is not None:
224 224 lock.release()
225 225 finally:
226 226 if locallock is not None:
227 227 locallock.release()
228 228
229 229 return pushop
230 230
231 231 # list of steps to perform discovery before push
232 232 pushdiscoveryorder = []
233 233
234 234 # Mapping between step name and function
235 235 #
236 236 # This exists to help extensions wrap steps if necessary
237 237 pushdiscoverymapping = {}
238 238
239 239 def pushdiscovery(stepname):
240 240 """decorator for function performing discovery before push
241 241
242 242 The function is added to the step -> function mapping and appended to the
243 243 list of steps. Beware that decorated function will be added in order (this
244 244 may matter).
245 245
246 246 You can only use this decorator for a new step, if you want to wrap a step
247 247 from an extension, change the pushdiscovery dictionary directly."""
248 248 def dec(func):
249 249 assert stepname not in pushdiscoverymapping
250 250 pushdiscoverymapping[stepname] = func
251 251 pushdiscoveryorder.append(stepname)
252 252 return func
253 253 return dec
254 254
255 255 def _pushdiscovery(pushop):
256 256 """Run all discovery steps"""
257 257 for stepname in pushdiscoveryorder:
258 258 step = pushdiscoverymapping[stepname]
259 259 step(pushop)
260 260
261 261 @pushdiscovery('changeset')
262 262 def _pushdiscoverychangeset(pushop):
263 263 """discover the changeset that need to be pushed"""
264 264 unfi = pushop.repo.unfiltered()
265 265 fci = discovery.findcommonincoming
266 266 commoninc = fci(unfi, pushop.remote, force=pushop.force)
267 267 common, inc, remoteheads = commoninc
268 268 fco = discovery.findcommonoutgoing
269 269 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
270 270 commoninc=commoninc, force=pushop.force)
271 271 pushop.outgoing = outgoing
272 272 pushop.remoteheads = remoteheads
273 273 pushop.incoming = inc
274 274
275 275 @pushdiscovery('phase')
276 276 def _pushdiscoveryphase(pushop):
277 277 """discover the phase that needs to be pushed
278 278
279 279 (computed for both success and failure case for changesets push)"""
280 280 outgoing = pushop.outgoing
281 281 unfi = pushop.repo.unfiltered()
282 282 remotephases = pushop.remote.listkeys('phases')
283 283 publishing = remotephases.get('publishing', False)
284 284 ana = phases.analyzeremotephases(pushop.repo,
285 285 pushop.fallbackheads,
286 286 remotephases)
287 287 pheads, droots = ana
288 288 extracond = ''
289 289 if not publishing:
290 290 extracond = ' and public()'
291 291 revset = 'heads((%%ln::%%ln) %s)' % extracond
292 292 # Get the list of all revs draft on remote by public here.
293 293 # XXX Beware that revset break if droots is not strictly
294 294 # XXX root we may want to ensure it is but it is costly
295 295 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
296 296 if not outgoing.missing:
297 297 future = fallback
298 298 else:
299 299 # adds changeset we are going to push as draft
300 300 #
301 301 # should not be necessary for publishing server, but because of an
302 302 # issue fixed in xxxxx we have to do it anyway.
303 303 fdroots = list(unfi.set('roots(%ln + %ln::)',
304 304 outgoing.missing, droots))
305 305 fdroots = [f.node() for f in fdroots]
306 306 future = list(unfi.set(revset, fdroots, pushop.futureheads))
307 307 pushop.outdatedphases = future
308 308 pushop.fallbackoutdatedphases = fallback
309 309
310 310 @pushdiscovery('obsmarker')
311 311 def _pushdiscoveryobsmarkers(pushop):
312 312 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
313 313 and pushop.repo.obsstore
314 314 and 'obsolete' in pushop.remote.listkeys('namespaces')):
315 315 repo = pushop.repo
316 316 # very naive computation, that can be quite expensive on big repo.
317 317 # However: evolution is currently slow on them anyway.
318 318 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
319 319 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
320 320
321 321 @pushdiscovery('bookmarks')
322 322 def _pushdiscoverybookmarks(pushop):
323 323 ui = pushop.ui
324 324 repo = pushop.repo.unfiltered()
325 325 remote = pushop.remote
326 326 ui.debug("checking for updated bookmarks\n")
327 327 ancestors = ()
328 328 if pushop.revs:
329 329 revnums = map(repo.changelog.rev, pushop.revs)
330 330 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
331 331 remotebookmark = remote.listkeys('bookmarks')
332 332
333 333 explicit = set(pushop.bookmarks)
334 334
335 335 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
336 336 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
337 337 for b, scid, dcid in advsrc:
338 338 if b in explicit:
339 339 explicit.remove(b)
340 340 if not ancestors or repo[scid].rev() in ancestors:
341 341 pushop.outbookmarks.append((b, dcid, scid))
342 342 # search added bookmark
343 343 for b, scid, dcid in addsrc:
344 344 if b in explicit:
345 345 explicit.remove(b)
346 346 pushop.outbookmarks.append((b, '', scid))
347 347 # search for overwritten bookmark
348 348 for b, scid, dcid in advdst + diverge + differ:
349 349 if b in explicit:
350 350 explicit.remove(b)
351 351 pushop.outbookmarks.append((b, dcid, scid))
352 352 # search for bookmark to delete
353 353 for b, scid, dcid in adddst:
354 354 if b in explicit:
355 355 explicit.remove(b)
356 356 # treat as "deleted locally"
357 357 pushop.outbookmarks.append((b, dcid, ''))
358 358 # identical bookmarks shouldn't get reported
359 359 for b, scid, dcid in same:
360 360 if b in explicit:
361 361 explicit.remove(b)
362 362
363 363 if explicit:
364 364 explicit = sorted(explicit)
365 365 # we should probably list all of them
366 366 ui.warn(_('bookmark %s does not exist on the local '
367 367 'or remote repository!\n') % explicit[0])
368 368 pushop.bkresult = 2
369 369
370 370 pushop.outbookmarks.sort()
371 371
372 372 def _pushcheckoutgoing(pushop):
373 373 outgoing = pushop.outgoing
374 374 unfi = pushop.repo.unfiltered()
375 375 if not outgoing.missing:
376 376 # nothing to push
377 377 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
378 378 return False
379 379 # something to push
380 380 if not pushop.force:
381 381 # if repo.obsstore == False --> no obsolete
382 382 # then, save the iteration
383 383 if unfi.obsstore:
384 384 # this message are here for 80 char limit reason
385 385 mso = _("push includes obsolete changeset: %s!")
386 386 mst = {"unstable": _("push includes unstable changeset: %s!"),
387 387 "bumped": _("push includes bumped changeset: %s!"),
388 388 "divergent": _("push includes divergent changeset: %s!")}
389 389 # If we are to push if there is at least one
390 390 # obsolete or unstable changeset in missing, at
391 391 # least one of the missinghead will be obsolete or
392 392 # unstable. So checking heads only is ok
393 393 for node in outgoing.missingheads:
394 394 ctx = unfi[node]
395 395 if ctx.obsolete():
396 396 raise util.Abort(mso % ctx)
397 397 elif ctx.troubled():
398 398 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
399 399 newbm = pushop.ui.configlist('bookmarks', 'pushing')
400 400 discovery.checkheads(unfi, pushop.remote, outgoing,
401 401 pushop.remoteheads,
402 402 pushop.newbranch,
403 403 bool(pushop.incoming),
404 404 newbm)
405 405 return True
406 406
407 407 # List of names of steps to perform for an outgoing bundle2, order matters.
408 408 b2partsgenorder = []
409 409
410 410 # Mapping between step name and function
411 411 #
412 412 # This exists to help extensions wrap steps if necessary
413 413 b2partsgenmapping = {}
414 414
415 415 def b2partsgenerator(stepname):
416 416 """decorator for function generating bundle2 part
417 417
418 418 The function is added to the step -> function mapping and appended to the
419 419 list of steps. Beware that decorated functions will be added in order
420 420 (this may matter).
421 421
422 422 You can only use this decorator for new steps, if you want to wrap a step
423 423 from an extension, attack the b2partsgenmapping dictionary directly."""
424 424 def dec(func):
425 425 assert stepname not in b2partsgenmapping
426 426 b2partsgenmapping[stepname] = func
427 427 b2partsgenorder.append(stepname)
428 428 return func
429 429 return dec
430 430
431 431 @b2partsgenerator('changeset')
432 432 def _pushb2ctx(pushop, bundler):
433 433 """handle changegroup push through bundle2
434 434
435 435 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
436 436 """
437 437 if 'changesets' in pushop.stepsdone:
438 438 return
439 439 pushop.stepsdone.add('changesets')
440 440 # Send known heads to the server for race detection.
441 441 if not _pushcheckoutgoing(pushop):
442 442 return
443 443 pushop.repo.prepushoutgoinghooks(pushop.repo,
444 444 pushop.remote,
445 445 pushop.outgoing)
446 446 if not pushop.force:
447 447 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
448 448 b2caps = bundle2.bundle2caps(pushop.remote)
449 449 version = None
450 450 cgversions = b2caps.get('b2x:changegroup')
451 451 if not cgversions: # 3.1 and 3.2 ship with an empty value
452 452 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
453 453 pushop.outgoing)
454 454 else:
455 455 cgversions = [v for v in cgversions if v in changegroup.packermap]
456 456 if not cgversions:
457 457 raise ValueError(_('no common changegroup version'))
458 458 version = max(cgversions)
459 459 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
460 460 pushop.outgoing,
461 461 version=version)
462 462 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg)
463 463 if version is not None:
464 464 cgpart.addparam('version', version)
465 465 def handlereply(op):
466 466 """extract addchangegroup returns from server reply"""
467 467 cgreplies = op.records.getreplies(cgpart.id)
468 468 assert len(cgreplies['changegroup']) == 1
469 469 pushop.cgresult = cgreplies['changegroup'][0]['return']
470 470 return handlereply
471 471
472 472 @b2partsgenerator('phase')
473 473 def _pushb2phases(pushop, bundler):
474 474 """handle phase push through bundle2"""
475 475 if 'phases' in pushop.stepsdone:
476 476 return
477 477 b2caps = bundle2.bundle2caps(pushop.remote)
478 478 if not 'b2x:pushkey' in b2caps:
479 479 return
480 480 pushop.stepsdone.add('phases')
481 481 part2node = []
482 482 enc = pushkey.encode
483 483 for newremotehead in pushop.outdatedphases:
484 484 part = bundler.newpart('b2x:pushkey')
485 485 part.addparam('namespace', enc('phases'))
486 486 part.addparam('key', enc(newremotehead.hex()))
487 487 part.addparam('old', enc(str(phases.draft)))
488 488 part.addparam('new', enc(str(phases.public)))
489 489 part2node.append((part.id, newremotehead))
490 490 def handlereply(op):
491 491 for partid, node in part2node:
492 492 partrep = op.records.getreplies(partid)
493 493 results = partrep['pushkey']
494 494 assert len(results) <= 1
495 495 msg = None
496 496 if not results:
497 497 msg = _('server ignored update of %s to public!\n') % node
498 498 elif not int(results[0]['return']):
499 499 msg = _('updating %s to public failed!\n') % node
500 500 if msg is not None:
501 501 pushop.ui.warn(msg)
502 502 return handlereply
503 503
504 504 @b2partsgenerator('obsmarkers')
505 505 def _pushb2obsmarkers(pushop, bundler):
506 506 if 'obsmarkers' in pushop.stepsdone:
507 507 return
508 508 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
509 509 if obsolete.commonversion(remoteversions) is None:
510 510 return
511 511 pushop.stepsdone.add('obsmarkers')
512 512 if pushop.outobsmarkers:
513 513 buildobsmarkerspart(bundler, pushop.outobsmarkers)
514 514
515 515 @b2partsgenerator('bookmarks')
516 516 def _pushb2bookmarks(pushop, bundler):
517 517 """handle phase push through bundle2"""
518 518 if 'bookmarks' in pushop.stepsdone:
519 519 return
520 520 b2caps = bundle2.bundle2caps(pushop.remote)
521 521 if 'b2x:pushkey' not in b2caps:
522 522 return
523 523 pushop.stepsdone.add('bookmarks')
524 524 part2book = []
525 525 enc = pushkey.encode
526 526 for book, old, new in pushop.outbookmarks:
527 527 part = bundler.newpart('b2x:pushkey')
528 528 part.addparam('namespace', enc('bookmarks'))
529 529 part.addparam('key', enc(book))
530 530 part.addparam('old', enc(old))
531 531 part.addparam('new', enc(new))
532 532 action = 'update'
533 533 if not old:
534 534 action = 'export'
535 535 elif not new:
536 536 action = 'delete'
537 537 part2book.append((part.id, book, action))
538 538
539 539
540 540 def handlereply(op):
541 541 ui = pushop.ui
542 542 for partid, book, action in part2book:
543 543 partrep = op.records.getreplies(partid)
544 544 results = partrep['pushkey']
545 545 assert len(results) <= 1
546 546 if not results:
547 547 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
548 548 else:
549 549 ret = int(results[0]['return'])
550 550 if ret:
551 551 ui.status(bookmsgmap[action][0] % book)
552 552 else:
553 553 ui.warn(bookmsgmap[action][1] % book)
554 554 if pushop.bkresult is not None:
555 555 pushop.bkresult = 1
556 556 return handlereply
557 557
558 558
559 559 def _pushbundle2(pushop):
560 560 """push data to the remote using bundle2
561 561
562 562 The only currently supported type of data is changegroup but this will
563 563 evolve in the future."""
564 564 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
565 565 # create reply capability
566 566 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
567 567 bundler.newpart('b2x:replycaps', data=capsblob)
568 568 replyhandlers = []
569 569 for partgenname in b2partsgenorder:
570 570 partgen = b2partsgenmapping[partgenname]
571 571 ret = partgen(pushop, bundler)
572 572 if callable(ret):
573 573 replyhandlers.append(ret)
574 574 # do not push if nothing to push
575 575 if bundler.nbparts <= 1:
576 576 return
577 577 stream = util.chunkbuffer(bundler.getchunks())
578 578 try:
579 579 reply = pushop.remote.unbundle(stream, ['force'], 'push')
580 580 except error.BundleValueError, exc:
581 581 raise util.Abort('missing support for %s' % exc)
582 582 try:
583 583 op = bundle2.processbundle(pushop.repo, reply)
584 584 except error.BundleValueError, exc:
585 585 raise util.Abort('missing support for %s' % exc)
586 586 for rephand in replyhandlers:
587 587 rephand(op)
588 588
589 589 def _pushchangeset(pushop):
590 590 """Make the actual push of changeset bundle to remote repo"""
591 591 if 'changesets' in pushop.stepsdone:
592 592 return
593 593 pushop.stepsdone.add('changesets')
594 594 if not _pushcheckoutgoing(pushop):
595 595 return
596 596 pushop.repo.prepushoutgoinghooks(pushop.repo,
597 597 pushop.remote,
598 598 pushop.outgoing)
599 599 outgoing = pushop.outgoing
600 600 unbundle = pushop.remote.capable('unbundle')
601 601 # TODO: get bundlecaps from remote
602 602 bundlecaps = None
603 603 # create a changegroup from local
604 604 if pushop.revs is None and not (outgoing.excluded
605 605 or pushop.repo.changelog.filteredrevs):
606 606 # push everything,
607 607 # use the fast path, no race possible on push
608 608 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
609 609 cg = changegroup.getsubset(pushop.repo,
610 610 outgoing,
611 611 bundler,
612 612 'push',
613 613 fastpath=True)
614 614 else:
615 615 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
616 616 bundlecaps)
617 617
618 618 # apply changegroup to remote
619 619 if unbundle:
620 620 # local repo finds heads on server, finds out what
621 621 # revs it must push. once revs transferred, if server
622 622 # finds it has different heads (someone else won
623 623 # commit/push race), server aborts.
624 624 if pushop.force:
625 625 remoteheads = ['force']
626 626 else:
627 627 remoteheads = pushop.remoteheads
628 628 # ssh: return remote's addchangegroup()
629 629 # http: return remote's addchangegroup() or 0 for error
630 630 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
631 631 pushop.repo.url())
632 632 else:
633 633 # we return an integer indicating remote head count
634 634 # change
635 635 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
636 636 pushop.repo.url())
637 637
638 638 def _pushsyncphase(pushop):
639 639 """synchronise phase information locally and remotely"""
640 640 cheads = pushop.commonheads
641 641 # even when we don't push, exchanging phase data is useful
642 642 remotephases = pushop.remote.listkeys('phases')
643 643 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
644 644 and remotephases # server supports phases
645 645 and pushop.cgresult is None # nothing was pushed
646 646 and remotephases.get('publishing', False)):
647 647 # When:
648 648 # - this is a subrepo push
649 649 # - and remote support phase
650 650 # - and no changeset was pushed
651 651 # - and remote is publishing
652 652 # We may be in issue 3871 case!
653 653 # We drop the possible phase synchronisation done by
654 654 # courtesy to publish changesets possibly locally draft
655 655 # on the remote.
656 656 remotephases = {'publishing': 'True'}
657 657 if not remotephases: # old server or public only reply from non-publishing
658 658 _localphasemove(pushop, cheads)
659 659 # don't push any phase data as there is nothing to push
660 660 else:
661 661 ana = phases.analyzeremotephases(pushop.repo, cheads,
662 662 remotephases)
663 663 pheads, droots = ana
664 664 ### Apply remote phase on local
665 665 if remotephases.get('publishing', False):
666 666 _localphasemove(pushop, cheads)
667 667 else: # publish = False
668 668 _localphasemove(pushop, pheads)
669 669 _localphasemove(pushop, cheads, phases.draft)
670 670 ### Apply local phase on remote
671 671
672 672 if pushop.cgresult:
673 673 if 'phases' in pushop.stepsdone:
674 674 # phases already pushed though bundle2
675 675 return
676 676 outdated = pushop.outdatedphases
677 677 else:
678 678 outdated = pushop.fallbackoutdatedphases
679 679
680 680 pushop.stepsdone.add('phases')
681 681
682 682 # filter heads already turned public by the push
683 683 outdated = [c for c in outdated if c.node() not in pheads]
684 684 b2caps = bundle2.bundle2caps(pushop.remote)
685 685 if 'b2x:pushkey' in b2caps:
686 686 # server supports bundle2, let's do a batched push through it
687 687 #
688 688 # This will eventually be unified with the changesets bundle2 push
689 689 bundler = bundle2.bundle20(pushop.ui, b2caps)
690 690 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
691 691 bundler.newpart('b2x:replycaps', data=capsblob)
692 692 part2node = []
693 693 enc = pushkey.encode
694 694 for newremotehead in outdated:
695 695 part = bundler.newpart('b2x:pushkey')
696 696 part.addparam('namespace', enc('phases'))
697 697 part.addparam('key', enc(newremotehead.hex()))
698 698 part.addparam('old', enc(str(phases.draft)))
699 699 part.addparam('new', enc(str(phases.public)))
700 700 part2node.append((part.id, newremotehead))
701 701 stream = util.chunkbuffer(bundler.getchunks())
702 702 try:
703 703 reply = pushop.remote.unbundle(stream, ['force'], 'push')
704 704 op = bundle2.processbundle(pushop.repo, reply)
705 705 except error.BundleValueError, exc:
706 706 raise util.Abort('missing support for %s' % exc)
707 707 for partid, node in part2node:
708 708 partrep = op.records.getreplies(partid)
709 709 results = partrep['pushkey']
710 710 assert len(results) <= 1
711 711 msg = None
712 712 if not results:
713 713 msg = _('server ignored update of %s to public!\n') % node
714 714 elif not int(results[0]['return']):
715 715 msg = _('updating %s to public failed!\n') % node
716 716 if msg is not None:
717 717 pushop.ui.warn(msg)
718 718
719 719 else:
720 720 # fallback to independent pushkey command
721 721 for newremotehead in outdated:
722 722 r = pushop.remote.pushkey('phases',
723 723 newremotehead.hex(),
724 724 str(phases.draft),
725 725 str(phases.public))
726 726 if not r:
727 727 pushop.ui.warn(_('updating %s to public failed!\n')
728 728 % newremotehead)
729 729
730 730 def _localphasemove(pushop, nodes, phase=phases.public):
731 731 """move <nodes> to <phase> in the local source repo"""
732 732 if pushop.locallocked:
733 733 tr = pushop.repo.transaction('push-phase-sync')
734 734 try:
735 735 phases.advanceboundary(pushop.repo, tr, phase, nodes)
736 736 tr.close()
737 737 finally:
738 738 tr.release()
739 739 else:
740 740 # repo is not locked, do not change any phases!
741 741 # Informs the user that phases should have been moved when
742 742 # applicable.
743 743 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
744 744 phasestr = phases.phasenames[phase]
745 745 if actualmoves:
746 746 pushop.ui.status(_('cannot lock source repo, skipping '
747 747 'local %s phase update\n') % phasestr)
748 748
749 749 def _pushobsolete(pushop):
750 750 """utility function to push obsolete markers to a remote"""
751 751 if 'obsmarkers' in pushop.stepsdone:
752 752 return
753 753 pushop.ui.debug('try to push obsolete markers to remote\n')
754 754 repo = pushop.repo
755 755 remote = pushop.remote
756 756 pushop.stepsdone.add('obsmarkers')
757 757 if pushop.outobsmarkers:
758 758 rslts = []
759 759 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
760 760 for key in sorted(remotedata, reverse=True):
761 761 # reverse sort to ensure we end with dump0
762 762 data = remotedata[key]
763 763 rslts.append(remote.pushkey('obsolete', key, '', data))
764 764 if [r for r in rslts if not r]:
765 765 msg = _('failed to push some obsolete markers!\n')
766 766 repo.ui.warn(msg)
767 767
768 768 def _pushbookmark(pushop):
769 769 """Update bookmark position on remote"""
770 770 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
771 771 return
772 772 pushop.stepsdone.add('bookmarks')
773 773 ui = pushop.ui
774 774 remote = pushop.remote
775 775
776 776 for b, old, new in pushop.outbookmarks:
777 777 action = 'update'
778 778 if not old:
779 779 action = 'export'
780 780 elif not new:
781 781 action = 'delete'
782 782 if remote.pushkey('bookmarks', b, old, new):
783 783 ui.status(bookmsgmap[action][0] % b)
784 784 else:
785 785 ui.warn(bookmsgmap[action][1] % b)
786 786 # discovery can have set the value form invalid entry
787 787 if pushop.bkresult is not None:
788 788 pushop.bkresult = 1
789 789
790 790 class pulloperation(object):
791 791 """A object that represent a single pull operation
792 792
793 It purpose is to carry push related state and very common operation.
793 It purpose is to carry pull related state and very common operation.
794 794
795 795 A new should be created at the beginning of each pull and discarded
796 796 afterward.
797 797 """
798 798
799 799 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
800 800 # repo we pull into
801 801 self.repo = repo
802 802 # repo we pull from
803 803 self.remote = remote
804 804 # revision we try to pull (None is "all")
805 805 self.heads = heads
806 806 # bookmark pulled explicitly
807 807 self.explicitbookmarks = bookmarks
808 808 # do we force pull?
809 809 self.force = force
810 810 # the name the pull transaction
811 811 self._trname = 'pull\n' + util.hidepassword(remote.url())
812 812 # hold the transaction once created
813 813 self._tr = None
814 814 # set of common changeset between local and remote before pull
815 815 self.common = None
816 816 # set of pulled head
817 817 self.rheads = None
818 818 # list of missing changeset to fetch remotely
819 819 self.fetch = None
820 820 # remote bookmarks data
821 821 self.remotebookmarks = None
822 822 # result of changegroup pulling (used as return code by pull)
823 823 self.cgresult = None
824 824 # list of step already done
825 825 self.stepsdone = set()
826 826
827 827 @util.propertycache
828 828 def pulledsubset(self):
829 829 """heads of the set of changeset target by the pull"""
830 830 # compute target subset
831 831 if self.heads is None:
832 832 # We pulled every thing possible
833 833 # sync on everything common
834 834 c = set(self.common)
835 835 ret = list(self.common)
836 836 for n in self.rheads:
837 837 if n not in c:
838 838 ret.append(n)
839 839 return ret
840 840 else:
841 841 # We pulled a specific subset
842 842 # sync on this subset
843 843 return self.heads
844 844
845 845 def gettransaction(self):
846 846 """get appropriate pull transaction, creating it if needed"""
847 847 if self._tr is None:
848 848 self._tr = self.repo.transaction(self._trname)
849 849 self._tr.hookargs['source'] = 'pull'
850 850 self._tr.hookargs['url'] = self.remote.url()
851 851 return self._tr
852 852
853 853 def closetransaction(self):
854 854 """close transaction if created"""
855 855 if self._tr is not None:
856 856 repo = self.repo
857 857 p = lambda: self._tr.writepending() and repo.root or ""
858 858 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
859 859 **self._tr.hookargs)
860 860 self._tr.close()
861 861 hookargs = dict(self._tr.hookargs)
862 862 def runhooks():
863 863 repo.hook('b2x-transactionclose', **hookargs)
864 864 repo._afterlock(runhooks)
865 865
866 866 def releasetransaction(self):
867 867 """release transaction if created"""
868 868 if self._tr is not None:
869 869 self._tr.release()
870 870
871 871 def pull(repo, remote, heads=None, force=False, bookmarks=()):
872 872 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
873 873 if pullop.remote.local():
874 874 missing = set(pullop.remote.requirements) - pullop.repo.supported
875 875 if missing:
876 876 msg = _("required features are not"
877 877 " supported in the destination:"
878 878 " %s") % (', '.join(sorted(missing)))
879 879 raise util.Abort(msg)
880 880
881 881 pullop.remotebookmarks = remote.listkeys('bookmarks')
882 882 lock = pullop.repo.lock()
883 883 try:
884 884 _pulldiscovery(pullop)
885 885 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
886 886 and pullop.remote.capable('bundle2-exp')):
887 887 _pullbundle2(pullop)
888 888 _pullchangeset(pullop)
889 889 _pullphase(pullop)
890 890 _pullbookmarks(pullop)
891 891 _pullobsolete(pullop)
892 892 pullop.closetransaction()
893 893 finally:
894 894 pullop.releasetransaction()
895 895 lock.release()
896 896
897 897 return pullop
898 898
899 899 # list of steps to perform discovery before pull
900 900 pulldiscoveryorder = []
901 901
902 902 # Mapping between step name and function
903 903 #
904 904 # This exists to help extensions wrap steps if necessary
905 905 pulldiscoverymapping = {}
906 906
907 907 def pulldiscovery(stepname):
908 908 """decorator for function performing discovery before pull
909 909
910 910 The function is added to the step -> function mapping and appended to the
911 911 list of steps. Beware that decorated function will be added in order (this
912 912 may matter).
913 913
914 914 You can only use this decorator for a new step, if you want to wrap a step
915 915 from an extension, change the pulldiscovery dictionary directly."""
916 916 def dec(func):
917 917 assert stepname not in pulldiscoverymapping
918 918 pulldiscoverymapping[stepname] = func
919 919 pulldiscoveryorder.append(stepname)
920 920 return func
921 921 return dec
922 922
923 923 def _pulldiscovery(pullop):
924 924 """Run all discovery steps"""
925 925 for stepname in pulldiscoveryorder:
926 926 step = pulldiscoverymapping[stepname]
927 927 step(pullop)
928 928
929 929 @pulldiscovery('changegroup')
930 930 def _pulldiscoverychangegroup(pullop):
931 931 """discovery phase for the pull
932 932
933 933 Current handle changeset discovery only, will change handle all discovery
934 934 at some point."""
935 935 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
936 936 pullop.remote,
937 937 heads=pullop.heads,
938 938 force=pullop.force)
939 939 pullop.common, pullop.fetch, pullop.rheads = tmp
940 940
941 941 def _pullbundle2(pullop):
942 942 """pull data using bundle2
943 943
944 944 For now, the only supported data are changegroup."""
945 945 remotecaps = bundle2.bundle2caps(pullop.remote)
946 946 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
947 947 # pulling changegroup
948 948 pullop.stepsdone.add('changegroup')
949 949
950 950 kwargs['common'] = pullop.common
951 951 kwargs['heads'] = pullop.heads or pullop.rheads
952 952 kwargs['cg'] = pullop.fetch
953 953 if 'b2x:listkeys' in remotecaps:
954 954 kwargs['listkeys'] = ['phase', 'bookmarks']
955 955 if not pullop.fetch:
956 956 pullop.repo.ui.status(_("no changes found\n"))
957 957 pullop.cgresult = 0
958 958 else:
959 959 if pullop.heads is None and list(pullop.common) == [nullid]:
960 960 pullop.repo.ui.status(_("requesting all changes\n"))
961 961 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
962 962 remoteversions = bundle2.obsmarkersversion(remotecaps)
963 963 if obsolete.commonversion(remoteversions) is not None:
964 964 kwargs['obsmarkers'] = True
965 965 pullop.stepsdone.add('obsmarkers')
966 966 _pullbundle2extraprepare(pullop, kwargs)
967 967 if kwargs.keys() == ['format']:
968 968 return # nothing to pull
969 969 bundle = pullop.remote.getbundle('pull', **kwargs)
970 970 try:
971 971 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
972 972 except error.BundleValueError, exc:
973 973 raise util.Abort('missing support for %s' % exc)
974 974
975 975 if pullop.fetch:
976 976 changedheads = 0
977 977 pullop.cgresult = 1
978 978 for cg in op.records['changegroup']:
979 979 ret = cg['return']
980 980 # If any changegroup result is 0, return 0
981 981 if ret == 0:
982 982 pullop.cgresult = 0
983 983 break
984 984 if ret < -1:
985 985 changedheads += ret + 1
986 986 elif ret > 1:
987 987 changedheads += ret - 1
988 988 if changedheads > 0:
989 989 pullop.cgresult = 1 + changedheads
990 990 elif changedheads < 0:
991 991 pullop.cgresult = -1 + changedheads
992 992
993 993 # processing phases change
994 994 for namespace, value in op.records['listkeys']:
995 995 if namespace == 'phases':
996 996 _pullapplyphases(pullop, value)
997 997
998 998 # processing bookmark update
999 999 for namespace, value in op.records['listkeys']:
1000 1000 if namespace == 'bookmarks':
1001 1001 pullop.remotebookmarks = value
1002 1002 _pullbookmarks(pullop)
1003 1003
1004 1004 def _pullbundle2extraprepare(pullop, kwargs):
1005 1005 """hook function so that extensions can extend the getbundle call"""
1006 1006 pass
1007 1007
1008 1008 def _pullchangeset(pullop):
1009 1009 """pull changeset from unbundle into the local repo"""
1010 1010 # We delay the open of the transaction as late as possible so we
1011 1011 # don't open transaction for nothing or you break future useful
1012 1012 # rollback call
1013 1013 if 'changegroup' in pullop.stepsdone:
1014 1014 return
1015 1015 pullop.stepsdone.add('changegroup')
1016 1016 if not pullop.fetch:
1017 1017 pullop.repo.ui.status(_("no changes found\n"))
1018 1018 pullop.cgresult = 0
1019 1019 return
1020 1020 pullop.gettransaction()
1021 1021 if pullop.heads is None and list(pullop.common) == [nullid]:
1022 1022 pullop.repo.ui.status(_("requesting all changes\n"))
1023 1023 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1024 1024 # issue1320, avoid a race if remote changed after discovery
1025 1025 pullop.heads = pullop.rheads
1026 1026
1027 1027 if pullop.remote.capable('getbundle'):
1028 1028 # TODO: get bundlecaps from remote
1029 1029 cg = pullop.remote.getbundle('pull', common=pullop.common,
1030 1030 heads=pullop.heads or pullop.rheads)
1031 1031 elif pullop.heads is None:
1032 1032 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1033 1033 elif not pullop.remote.capable('changegroupsubset'):
1034 1034 raise util.Abort(_("partial pull cannot be done because "
1035 1035 "other repository doesn't support "
1036 1036 "changegroupsubset."))
1037 1037 else:
1038 1038 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1039 1039 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1040 1040 pullop.remote.url())
1041 1041
1042 1042 def _pullphase(pullop):
1043 1043 # Get remote phases data from remote
1044 1044 if 'phases' in pullop.stepsdone:
1045 1045 return
1046 1046 remotephases = pullop.remote.listkeys('phases')
1047 1047 _pullapplyphases(pullop, remotephases)
1048 1048
1049 1049 def _pullapplyphases(pullop, remotephases):
1050 1050 """apply phase movement from observed remote state"""
1051 1051 if 'phases' in pullop.stepsdone:
1052 1052 return
1053 1053 pullop.stepsdone.add('phases')
1054 1054 publishing = bool(remotephases.get('publishing', False))
1055 1055 if remotephases and not publishing:
1056 1056 # remote is new and unpublishing
1057 1057 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1058 1058 pullop.pulledsubset,
1059 1059 remotephases)
1060 1060 dheads = pullop.pulledsubset
1061 1061 else:
1062 1062 # Remote is old or publishing all common changesets
1063 1063 # should be seen as public
1064 1064 pheads = pullop.pulledsubset
1065 1065 dheads = []
1066 1066 unfi = pullop.repo.unfiltered()
1067 1067 phase = unfi._phasecache.phase
1068 1068 rev = unfi.changelog.nodemap.get
1069 1069 public = phases.public
1070 1070 draft = phases.draft
1071 1071
1072 1072 # exclude changesets already public locally and update the others
1073 1073 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1074 1074 if pheads:
1075 1075 tr = pullop.gettransaction()
1076 1076 phases.advanceboundary(pullop.repo, tr, public, pheads)
1077 1077
1078 1078 # exclude changesets already draft locally and update the others
1079 1079 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1080 1080 if dheads:
1081 1081 tr = pullop.gettransaction()
1082 1082 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1083 1083
1084 1084 def _pullbookmarks(pullop):
1085 1085 """process the remote bookmark information to update the local one"""
1086 1086 if 'bookmarks' in pullop.stepsdone:
1087 1087 return
1088 1088 pullop.stepsdone.add('bookmarks')
1089 1089 repo = pullop.repo
1090 1090 remotebookmarks = pullop.remotebookmarks
1091 1091 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1092 1092 pullop.remote.url(),
1093 1093 pullop.gettransaction,
1094 1094 explicit=pullop.explicitbookmarks)
1095 1095
1096 1096 def _pullobsolete(pullop):
1097 1097 """utility function to pull obsolete markers from a remote
1098 1098
1099 1099 The `gettransaction` is function that return the pull transaction, creating
1100 1100 one if necessary. We return the transaction to inform the calling code that
1101 1101 a new transaction have been created (when applicable).
1102 1102
1103 1103 Exists mostly to allow overriding for experimentation purpose"""
1104 1104 if 'obsmarkers' in pullop.stepsdone:
1105 1105 return
1106 1106 pullop.stepsdone.add('obsmarkers')
1107 1107 tr = None
1108 1108 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1109 1109 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1110 1110 remoteobs = pullop.remote.listkeys('obsolete')
1111 1111 if 'dump0' in remoteobs:
1112 1112 tr = pullop.gettransaction()
1113 1113 for key in sorted(remoteobs, reverse=True):
1114 1114 if key.startswith('dump'):
1115 1115 data = base85.b85decode(remoteobs[key])
1116 1116 pullop.repo.obsstore.mergemarkers(tr, data)
1117 1117 pullop.repo.invalidatevolatilesets()
1118 1118 return tr
1119 1119
1120 1120 def caps20to10(repo):
1121 1121 """return a set with appropriate options to use bundle20 during getbundle"""
1122 1122 caps = set(['HG2Y'])
1123 1123 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1124 1124 caps.add('bundle2=' + urllib.quote(capsblob))
1125 1125 return caps
1126 1126
1127 1127 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1128 1128 getbundle2partsorder = []
1129 1129
1130 1130 # Mapping between step name and function
1131 1131 #
1132 1132 # This exists to help extensions wrap steps if necessary
1133 1133 getbundle2partsmapping = {}
1134 1134
1135 1135 def getbundle2partsgenerator(stepname):
1136 1136 """decorator for function generating bundle2 part for getbundle
1137 1137
1138 1138 The function is added to the step -> function mapping and appended to the
1139 1139 list of steps. Beware that decorated functions will be added in order
1140 1140 (this may matter).
1141 1141
1142 1142 You can only use this decorator for new steps, if you want to wrap a step
1143 1143 from an extension, attack the getbundle2partsmapping dictionary directly."""
1144 1144 def dec(func):
1145 1145 assert stepname not in getbundle2partsmapping
1146 1146 getbundle2partsmapping[stepname] = func
1147 1147 getbundle2partsorder.append(stepname)
1148 1148 return func
1149 1149 return dec
1150 1150
1151 1151 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1152 1152 **kwargs):
1153 1153 """return a full bundle (with potentially multiple kind of parts)
1154 1154
1155 1155 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1156 1156 passed. For now, the bundle can contain only changegroup, but this will
1157 1157 changes when more part type will be available for bundle2.
1158 1158
1159 1159 This is different from changegroup.getchangegroup that only returns an HG10
1160 1160 changegroup bundle. They may eventually get reunited in the future when we
1161 1161 have a clearer idea of the API we what to query different data.
1162 1162
1163 1163 The implementation is at a very early stage and will get massive rework
1164 1164 when the API of bundle is refined.
1165 1165 """
1166 1166 # bundle10 case
1167 1167 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1168 1168 if bundlecaps and not kwargs.get('cg', True):
1169 1169 raise ValueError(_('request for bundle10 must include changegroup'))
1170 1170
1171 1171 if kwargs:
1172 1172 raise ValueError(_('unsupported getbundle arguments: %s')
1173 1173 % ', '.join(sorted(kwargs.keys())))
1174 1174 return changegroup.getchangegroup(repo, source, heads=heads,
1175 1175 common=common, bundlecaps=bundlecaps)
1176 1176
1177 1177 # bundle20 case
1178 1178 b2caps = {}
1179 1179 for bcaps in bundlecaps:
1180 1180 if bcaps.startswith('bundle2='):
1181 1181 blob = urllib.unquote(bcaps[len('bundle2='):])
1182 1182 b2caps.update(bundle2.decodecaps(blob))
1183 1183 bundler = bundle2.bundle20(repo.ui, b2caps)
1184 1184
1185 1185 kwargs['heads'] = heads
1186 1186 kwargs['common'] = common
1187 1187
1188 1188 for name in getbundle2partsorder:
1189 1189 func = getbundle2partsmapping[name]
1190 1190 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1191 1191 **kwargs)
1192 1192
1193 1193 return util.chunkbuffer(bundler.getchunks())
1194 1194
1195 1195 @getbundle2partsgenerator('changegroup')
1196 1196 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1197 1197 b2caps=None, heads=None, common=None, **kwargs):
1198 1198 """add a changegroup part to the requested bundle"""
1199 1199 cg = None
1200 1200 if kwargs.get('cg', True):
1201 1201 # build changegroup bundle here.
1202 1202 version = None
1203 1203 cgversions = b2caps.get('b2x:changegroup')
1204 1204 if not cgversions: # 3.1 and 3.2 ship with an empty value
1205 1205 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1206 1206 common=common,
1207 1207 bundlecaps=bundlecaps)
1208 1208 else:
1209 1209 cgversions = [v for v in cgversions if v in changegroup.packermap]
1210 1210 if not cgversions:
1211 1211 raise ValueError(_('no common changegroup version'))
1212 1212 version = max(cgversions)
1213 1213 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1214 1214 common=common,
1215 1215 bundlecaps=bundlecaps,
1216 1216 version=version)
1217 1217
1218 1218 if cg:
1219 1219 part = bundler.newpart('b2x:changegroup', data=cg)
1220 1220 if version is not None:
1221 1221 part.addparam('version', version)
1222 1222
1223 1223 @getbundle2partsgenerator('listkeys')
1224 1224 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1225 1225 b2caps=None, **kwargs):
1226 1226 """add parts containing listkeys namespaces to the requested bundle"""
1227 1227 listkeys = kwargs.get('listkeys', ())
1228 1228 for namespace in listkeys:
1229 1229 part = bundler.newpart('b2x:listkeys')
1230 1230 part.addparam('namespace', namespace)
1231 1231 keys = repo.listkeys(namespace).items()
1232 1232 part.data = pushkey.encodekeys(keys)
1233 1233
1234 1234 @getbundle2partsgenerator('obsmarkers')
1235 1235 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1236 1236 b2caps=None, heads=None, **kwargs):
1237 1237 """add an obsolescence markers part to the requested bundle"""
1238 1238 if kwargs.get('obsmarkers', False):
1239 1239 if heads is None:
1240 1240 heads = repo.heads()
1241 1241 subset = [c.node() for c in repo.set('::%ln', heads)]
1242 1242 markers = repo.obsstore.relevantmarkers(subset)
1243 1243 buildobsmarkerspart(bundler, markers)
1244 1244
1245 1245 def check_heads(repo, their_heads, context):
1246 1246 """check if the heads of a repo have been modified
1247 1247
1248 1248 Used by peer for unbundling.
1249 1249 """
1250 1250 heads = repo.heads()
1251 1251 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1252 1252 if not (their_heads == ['force'] or their_heads == heads or
1253 1253 their_heads == ['hashed', heads_hash]):
1254 1254 # someone else committed/pushed/unbundled while we
1255 1255 # were transferring data
1256 1256 raise error.PushRaced('repository changed while %s - '
1257 1257 'please try again' % context)
1258 1258
1259 1259 def unbundle(repo, cg, heads, source, url):
1260 1260 """Apply a bundle to a repo.
1261 1261
1262 1262 this function makes sure the repo is locked during the application and have
1263 1263 mechanism to check that no push race occurred between the creation of the
1264 1264 bundle and its application.
1265 1265
1266 1266 If the push was raced as PushRaced exception is raised."""
1267 1267 r = 0
1268 1268 # need a transaction when processing a bundle2 stream
1269 1269 tr = None
1270 1270 lock = repo.lock()
1271 1271 try:
1272 1272 check_heads(repo, heads, 'uploading changes')
1273 1273 # push can proceed
1274 1274 if util.safehasattr(cg, 'params'):
1275 1275 try:
1276 1276 tr = repo.transaction('unbundle')
1277 1277 tr.hookargs['source'] = source
1278 1278 tr.hookargs['url'] = url
1279 1279 tr.hookargs['bundle2-exp'] = '1'
1280 1280 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1281 1281 p = lambda: tr.writepending() and repo.root or ""
1282 1282 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1283 1283 **tr.hookargs)
1284 1284 tr.close()
1285 1285 hookargs = dict(tr.hookargs)
1286 1286 def runhooks():
1287 1287 repo.hook('b2x-transactionclose', **hookargs)
1288 1288 repo._afterlock(runhooks)
1289 1289 except Exception, exc:
1290 1290 exc.duringunbundle2 = True
1291 1291 raise
1292 1292 else:
1293 1293 r = changegroup.addchangegroup(repo, cg, source, url)
1294 1294 finally:
1295 1295 if tr is not None:
1296 1296 tr.release()
1297 1297 lock.release()
1298 1298 return r
General Comments 0
You need to be logged in to leave comments. Login now