##// END OF EJS Templates
push: acquire local 'wlock' if "pushback" is expected (BC) (issue4596)...
Pierre-Yves David -
r24754:5dc5cd7a default
parent child Browse files
Show More
@@ -1,1301 +1,1308 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14
15 15 def readbundle(ui, fh, fname, vfs=None):
16 16 header = changegroup.readexactly(fh, 4)
17 17
18 18 alg = None
19 19 if not fname:
20 20 fname = "stream"
21 21 if not header.startswith('HG') and header.startswith('\0'):
22 22 fh = changegroup.headerlessfixup(fh, header)
23 23 header = "HG10"
24 24 alg = 'UN'
25 25 elif vfs:
26 26 fname = vfs.join(fname)
27 27
28 28 magic, version = header[0:2], header[2:4]
29 29
30 30 if magic != 'HG':
31 31 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
32 32 if version == '10':
33 33 if alg is None:
34 34 alg = changegroup.readexactly(fh, 2)
35 35 return changegroup.cg1unpacker(fh, alg)
36 36 elif version.startswith('2'):
37 37 return bundle2.getunbundler(ui, fh, header=magic + version)
38 38 else:
39 39 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
40 40
41 41 def buildobsmarkerspart(bundler, markers):
42 42 """add an obsmarker part to the bundler with <markers>
43 43
44 44 No part is created if markers is empty.
45 45 Raises ValueError if the bundler doesn't support any known obsmarker format.
46 46 """
47 47 if markers:
48 48 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
49 49 version = obsolete.commonversion(remoteversions)
50 50 if version is None:
51 51 raise ValueError('bundler do not support common obsmarker format')
52 52 stream = obsolete.encodemarkers(markers, True, version=version)
53 53 return bundler.newpart('obsmarkers', data=stream)
54 54 return None
55 55
56 56 def _canusebundle2(op):
57 57 """return true if a pull/push can use bundle2
58 58
59 59 Feel free to nuke this function when we drop the experimental option"""
60 60 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
61 61 and op.remote.capable('bundle2'))
62 62
63 63
64 64 class pushoperation(object):
65 65 """A object that represent a single push operation
66 66
67 67 It purpose is to carry push related state and very common operation.
68 68
69 69 A new should be created at the beginning of each push and discarded
70 70 afterward.
71 71 """
72 72
73 73 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
74 74 bookmarks=()):
75 75 # repo we push from
76 76 self.repo = repo
77 77 self.ui = repo.ui
78 78 # repo we push to
79 79 self.remote = remote
80 80 # force option provided
81 81 self.force = force
82 82 # revs to be pushed (None is "all")
83 83 self.revs = revs
84 84 # bookmark explicitly pushed
85 85 self.bookmarks = bookmarks
86 86 # allow push of new branch
87 87 self.newbranch = newbranch
88 88 # did a local lock get acquired?
89 89 self.locallocked = None
90 90 # step already performed
91 91 # (used to check what steps have been already performed through bundle2)
92 92 self.stepsdone = set()
93 93 # Integer version of the changegroup push result
94 94 # - None means nothing to push
95 95 # - 0 means HTTP error
96 96 # - 1 means we pushed and remote head count is unchanged *or*
97 97 # we have outgoing changesets but refused to push
98 98 # - other values as described by addchangegroup()
99 99 self.cgresult = None
100 100 # Boolean value for the bookmark push
101 101 self.bkresult = None
102 102 # discover.outgoing object (contains common and outgoing data)
103 103 self.outgoing = None
104 104 # all remote heads before the push
105 105 self.remoteheads = None
106 106 # testable as a boolean indicating if any nodes are missing locally.
107 107 self.incoming = None
108 108 # phases changes that must be pushed along side the changesets
109 109 self.outdatedphases = None
110 110 # phases changes that must be pushed if changeset push fails
111 111 self.fallbackoutdatedphases = None
112 112 # outgoing obsmarkers
113 113 self.outobsmarkers = set()
114 114 # outgoing bookmarks
115 115 self.outbookmarks = []
116 116 # transaction manager
117 117 self.trmanager = None
118 118
119 119 @util.propertycache
120 120 def futureheads(self):
121 121 """future remote heads if the changeset push succeeds"""
122 122 return self.outgoing.missingheads
123 123
124 124 @util.propertycache
125 125 def fallbackheads(self):
126 126 """future remote heads if the changeset push fails"""
127 127 if self.revs is None:
128 128 # not target to push, all common are relevant
129 129 return self.outgoing.commonheads
130 130 unfi = self.repo.unfiltered()
131 131 # I want cheads = heads(::missingheads and ::commonheads)
132 132 # (missingheads is revs with secret changeset filtered out)
133 133 #
134 134 # This can be expressed as:
135 135 # cheads = ( (missingheads and ::commonheads)
136 136 # + (commonheads and ::missingheads))"
137 137 # )
138 138 #
139 139 # while trying to push we already computed the following:
140 140 # common = (::commonheads)
141 141 # missing = ((commonheads::missingheads) - commonheads)
142 142 #
143 143 # We can pick:
144 144 # * missingheads part of common (::commonheads)
145 145 common = set(self.outgoing.common)
146 146 nm = self.repo.changelog.nodemap
147 147 cheads = [node for node in self.revs if nm[node] in common]
148 148 # and
149 149 # * commonheads parents on missing
150 150 revset = unfi.set('%ln and parents(roots(%ln))',
151 151 self.outgoing.commonheads,
152 152 self.outgoing.missing)
153 153 cheads.extend(c.node() for c in revset)
154 154 return cheads
155 155
156 156 @property
157 157 def commonheads(self):
158 158 """set of all common heads after changeset bundle push"""
159 159 if self.cgresult:
160 160 return self.futureheads
161 161 else:
162 162 return self.fallbackheads
163 163
164 164 # mapping of message used when pushing bookmark
165 165 bookmsgmap = {'update': (_("updating bookmark %s\n"),
166 166 _('updating bookmark %s failed!\n')),
167 167 'export': (_("exporting bookmark %s\n"),
168 168 _('exporting bookmark %s failed!\n')),
169 169 'delete': (_("deleting remote bookmark %s\n"),
170 170 _('deleting remote bookmark %s failed!\n')),
171 171 }
172 172
173 173
174 174 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
175 175 '''Push outgoing changesets (limited by revs) from a local
176 176 repository to remote. Return an integer:
177 177 - None means nothing to push
178 178 - 0 means HTTP error
179 179 - 1 means we pushed and remote head count is unchanged *or*
180 180 we have outgoing changesets but refused to push
181 181 - other values as described by addchangegroup()
182 182 '''
183 183 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
184 184 if pushop.remote.local():
185 185 missing = (set(pushop.repo.requirements)
186 186 - pushop.remote.local().supported)
187 187 if missing:
188 188 msg = _("required features are not"
189 189 " supported in the destination:"
190 190 " %s") % (', '.join(sorted(missing)))
191 191 raise util.Abort(msg)
192 192
193 193 # there are two ways to push to remote repo:
194 194 #
195 195 # addchangegroup assumes local user can lock remote
196 196 # repo (local filesystem, old ssh servers).
197 197 #
198 198 # unbundle assumes local user cannot lock remote repo (new ssh
199 199 # servers, http servers).
200 200
201 201 if not pushop.remote.canpush():
202 202 raise util.Abort(_("destination does not support push"))
203 203 # get local lock as we might write phase data
204 locallock = None
204 localwlock = locallock = None
205 205 try:
206 # bundle2 push may receive a reply bundle touching bookmarks or other
207 # things requiring the wlock. Take it now to ensure proper ordering.
208 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
209 if _canusebundle2(pushop) and maypushback:
210 localwlock = pushop.repo.wlock()
206 211 locallock = pushop.repo.lock()
207 212 pushop.locallocked = True
208 213 except IOError, err:
209 214 pushop.locallocked = False
210 215 if err.errno != errno.EACCES:
211 216 raise
212 217 # source repo cannot be locked.
213 218 # We do not abort the push, but just disable the local phase
214 219 # synchronisation.
215 220 msg = 'cannot lock source repository: %s\n' % err
216 221 pushop.ui.debug(msg)
217 222 try:
218 223 if pushop.locallocked:
219 224 pushop.trmanager = transactionmanager(repo,
220 225 'push-response',
221 226 pushop.remote.url())
222 227 pushop.repo.checkpush(pushop)
223 228 lock = None
224 229 unbundle = pushop.remote.capable('unbundle')
225 230 if not unbundle:
226 231 lock = pushop.remote.lock()
227 232 try:
228 233 _pushdiscovery(pushop)
229 234 if _canusebundle2(pushop):
230 235 _pushbundle2(pushop)
231 236 _pushchangeset(pushop)
232 237 _pushsyncphase(pushop)
233 238 _pushobsolete(pushop)
234 239 _pushbookmark(pushop)
235 240 finally:
236 241 if lock is not None:
237 242 lock.release()
238 243 if pushop.trmanager:
239 244 pushop.trmanager.close()
240 245 finally:
241 246 if pushop.trmanager:
242 247 pushop.trmanager.release()
243 248 if locallock is not None:
244 249 locallock.release()
250 if localwlock is not None:
251 localwlock.release()
245 252
246 253 return pushop
247 254
248 255 # list of steps to perform discovery before push
249 256 pushdiscoveryorder = []
250 257
251 258 # Mapping between step name and function
252 259 #
253 260 # This exists to help extensions wrap steps if necessary
254 261 pushdiscoverymapping = {}
255 262
256 263 def pushdiscovery(stepname):
257 264 """decorator for function performing discovery before push
258 265
259 266 The function is added to the step -> function mapping and appended to the
260 267 list of steps. Beware that decorated function will be added in order (this
261 268 may matter).
262 269
263 270 You can only use this decorator for a new step, if you want to wrap a step
264 271 from an extension, change the pushdiscovery dictionary directly."""
265 272 def dec(func):
266 273 assert stepname not in pushdiscoverymapping
267 274 pushdiscoverymapping[stepname] = func
268 275 pushdiscoveryorder.append(stepname)
269 276 return func
270 277 return dec
271 278
272 279 def _pushdiscovery(pushop):
273 280 """Run all discovery steps"""
274 281 for stepname in pushdiscoveryorder:
275 282 step = pushdiscoverymapping[stepname]
276 283 step(pushop)
277 284
278 285 @pushdiscovery('changeset')
279 286 def _pushdiscoverychangeset(pushop):
280 287 """discover the changeset that need to be pushed"""
281 288 fci = discovery.findcommonincoming
282 289 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
283 290 common, inc, remoteheads = commoninc
284 291 fco = discovery.findcommonoutgoing
285 292 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
286 293 commoninc=commoninc, force=pushop.force)
287 294 pushop.outgoing = outgoing
288 295 pushop.remoteheads = remoteheads
289 296 pushop.incoming = inc
290 297
291 298 @pushdiscovery('phase')
292 299 def _pushdiscoveryphase(pushop):
293 300 """discover the phase that needs to be pushed
294 301
295 302 (computed for both success and failure case for changesets push)"""
296 303 outgoing = pushop.outgoing
297 304 unfi = pushop.repo.unfiltered()
298 305 remotephases = pushop.remote.listkeys('phases')
299 306 publishing = remotephases.get('publishing', False)
300 307 ana = phases.analyzeremotephases(pushop.repo,
301 308 pushop.fallbackheads,
302 309 remotephases)
303 310 pheads, droots = ana
304 311 extracond = ''
305 312 if not publishing:
306 313 extracond = ' and public()'
307 314 revset = 'heads((%%ln::%%ln) %s)' % extracond
308 315 # Get the list of all revs draft on remote by public here.
309 316 # XXX Beware that revset break if droots is not strictly
310 317 # XXX root we may want to ensure it is but it is costly
311 318 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
312 319 if not outgoing.missing:
313 320 future = fallback
314 321 else:
315 322 # adds changeset we are going to push as draft
316 323 #
317 324 # should not be necessary for publishing server, but because of an
318 325 # issue fixed in xxxxx we have to do it anyway.
319 326 fdroots = list(unfi.set('roots(%ln + %ln::)',
320 327 outgoing.missing, droots))
321 328 fdroots = [f.node() for f in fdroots]
322 329 future = list(unfi.set(revset, fdroots, pushop.futureheads))
323 330 pushop.outdatedphases = future
324 331 pushop.fallbackoutdatedphases = fallback
325 332
326 333 @pushdiscovery('obsmarker')
327 334 def _pushdiscoveryobsmarkers(pushop):
328 335 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
329 336 and pushop.repo.obsstore
330 337 and 'obsolete' in pushop.remote.listkeys('namespaces')):
331 338 repo = pushop.repo
332 339 # very naive computation, that can be quite expensive on big repo.
333 340 # However: evolution is currently slow on them anyway.
334 341 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
335 342 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
336 343
337 344 @pushdiscovery('bookmarks')
338 345 def _pushdiscoverybookmarks(pushop):
339 346 ui = pushop.ui
340 347 repo = pushop.repo.unfiltered()
341 348 remote = pushop.remote
342 349 ui.debug("checking for updated bookmarks\n")
343 350 ancestors = ()
344 351 if pushop.revs:
345 352 revnums = map(repo.changelog.rev, pushop.revs)
346 353 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
347 354 remotebookmark = remote.listkeys('bookmarks')
348 355
349 356 explicit = set(pushop.bookmarks)
350 357
351 358 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
352 359 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
353 360 for b, scid, dcid in advsrc:
354 361 if b in explicit:
355 362 explicit.remove(b)
356 363 if not ancestors or repo[scid].rev() in ancestors:
357 364 pushop.outbookmarks.append((b, dcid, scid))
358 365 # search added bookmark
359 366 for b, scid, dcid in addsrc:
360 367 if b in explicit:
361 368 explicit.remove(b)
362 369 pushop.outbookmarks.append((b, '', scid))
363 370 # search for overwritten bookmark
364 371 for b, scid, dcid in advdst + diverge + differ:
365 372 if b in explicit:
366 373 explicit.remove(b)
367 374 pushop.outbookmarks.append((b, dcid, scid))
368 375 # search for bookmark to delete
369 376 for b, scid, dcid in adddst:
370 377 if b in explicit:
371 378 explicit.remove(b)
372 379 # treat as "deleted locally"
373 380 pushop.outbookmarks.append((b, dcid, ''))
374 381 # identical bookmarks shouldn't get reported
375 382 for b, scid, dcid in same:
376 383 if b in explicit:
377 384 explicit.remove(b)
378 385
379 386 if explicit:
380 387 explicit = sorted(explicit)
381 388 # we should probably list all of them
382 389 ui.warn(_('bookmark %s does not exist on the local '
383 390 'or remote repository!\n') % explicit[0])
384 391 pushop.bkresult = 2
385 392
386 393 pushop.outbookmarks.sort()
387 394
388 395 def _pushcheckoutgoing(pushop):
389 396 outgoing = pushop.outgoing
390 397 unfi = pushop.repo.unfiltered()
391 398 if not outgoing.missing:
392 399 # nothing to push
393 400 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
394 401 return False
395 402 # something to push
396 403 if not pushop.force:
397 404 # if repo.obsstore == False --> no obsolete
398 405 # then, save the iteration
399 406 if unfi.obsstore:
400 407 # this message are here for 80 char limit reason
401 408 mso = _("push includes obsolete changeset: %s!")
402 409 mst = {"unstable": _("push includes unstable changeset: %s!"),
403 410 "bumped": _("push includes bumped changeset: %s!"),
404 411 "divergent": _("push includes divergent changeset: %s!")}
405 412 # If we are to push if there is at least one
406 413 # obsolete or unstable changeset in missing, at
407 414 # least one of the missinghead will be obsolete or
408 415 # unstable. So checking heads only is ok
409 416 for node in outgoing.missingheads:
410 417 ctx = unfi[node]
411 418 if ctx.obsolete():
412 419 raise util.Abort(mso % ctx)
413 420 elif ctx.troubled():
414 421 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
415 422 newbm = pushop.ui.configlist('bookmarks', 'pushing')
416 423 discovery.checkheads(unfi, pushop.remote, outgoing,
417 424 pushop.remoteheads,
418 425 pushop.newbranch,
419 426 bool(pushop.incoming),
420 427 newbm)
421 428 return True
422 429
423 430 # List of names of steps to perform for an outgoing bundle2, order matters.
424 431 b2partsgenorder = []
425 432
426 433 # Mapping between step name and function
427 434 #
428 435 # This exists to help extensions wrap steps if necessary
429 436 b2partsgenmapping = {}
430 437
431 438 def b2partsgenerator(stepname, idx=None):
432 439 """decorator for function generating bundle2 part
433 440
434 441 The function is added to the step -> function mapping and appended to the
435 442 list of steps. Beware that decorated functions will be added in order
436 443 (this may matter).
437 444
438 445 You can only use this decorator for new steps, if you want to wrap a step
439 446 from an extension, attack the b2partsgenmapping dictionary directly."""
440 447 def dec(func):
441 448 assert stepname not in b2partsgenmapping
442 449 b2partsgenmapping[stepname] = func
443 450 if idx is None:
444 451 b2partsgenorder.append(stepname)
445 452 else:
446 453 b2partsgenorder.insert(idx, stepname)
447 454 return func
448 455 return dec
449 456
450 457 @b2partsgenerator('changeset')
451 458 def _pushb2ctx(pushop, bundler):
452 459 """handle changegroup push through bundle2
453 460
454 461 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
455 462 """
456 463 if 'changesets' in pushop.stepsdone:
457 464 return
458 465 pushop.stepsdone.add('changesets')
459 466 # Send known heads to the server for race detection.
460 467 if not _pushcheckoutgoing(pushop):
461 468 return
462 469 pushop.repo.prepushoutgoinghooks(pushop.repo,
463 470 pushop.remote,
464 471 pushop.outgoing)
465 472 if not pushop.force:
466 473 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
467 474 b2caps = bundle2.bundle2caps(pushop.remote)
468 475 version = None
469 476 cgversions = b2caps.get('changegroup')
470 477 if not cgversions: # 3.1 and 3.2 ship with an empty value
471 478 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
472 479 pushop.outgoing)
473 480 else:
474 481 cgversions = [v for v in cgversions if v in changegroup.packermap]
475 482 if not cgversions:
476 483 raise ValueError(_('no common changegroup version'))
477 484 version = max(cgversions)
478 485 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
479 486 pushop.outgoing,
480 487 version=version)
481 488 cgpart = bundler.newpart('changegroup', data=cg)
482 489 if version is not None:
483 490 cgpart.addparam('version', version)
484 491 def handlereply(op):
485 492 """extract addchangegroup returns from server reply"""
486 493 cgreplies = op.records.getreplies(cgpart.id)
487 494 assert len(cgreplies['changegroup']) == 1
488 495 pushop.cgresult = cgreplies['changegroup'][0]['return']
489 496 return handlereply
490 497
491 498 @b2partsgenerator('phase')
492 499 def _pushb2phases(pushop, bundler):
493 500 """handle phase push through bundle2"""
494 501 if 'phases' in pushop.stepsdone:
495 502 return
496 503 b2caps = bundle2.bundle2caps(pushop.remote)
497 504 if not 'pushkey' in b2caps:
498 505 return
499 506 pushop.stepsdone.add('phases')
500 507 part2node = []
501 508 enc = pushkey.encode
502 509 for newremotehead in pushop.outdatedphases:
503 510 part = bundler.newpart('pushkey')
504 511 part.addparam('namespace', enc('phases'))
505 512 part.addparam('key', enc(newremotehead.hex()))
506 513 part.addparam('old', enc(str(phases.draft)))
507 514 part.addparam('new', enc(str(phases.public)))
508 515 part2node.append((part.id, newremotehead))
509 516 def handlereply(op):
510 517 for partid, node in part2node:
511 518 partrep = op.records.getreplies(partid)
512 519 results = partrep['pushkey']
513 520 assert len(results) <= 1
514 521 msg = None
515 522 if not results:
516 523 msg = _('server ignored update of %s to public!\n') % node
517 524 elif not int(results[0]['return']):
518 525 msg = _('updating %s to public failed!\n') % node
519 526 if msg is not None:
520 527 pushop.ui.warn(msg)
521 528 return handlereply
522 529
523 530 @b2partsgenerator('obsmarkers')
524 531 def _pushb2obsmarkers(pushop, bundler):
525 532 if 'obsmarkers' in pushop.stepsdone:
526 533 return
527 534 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
528 535 if obsolete.commonversion(remoteversions) is None:
529 536 return
530 537 pushop.stepsdone.add('obsmarkers')
531 538 if pushop.outobsmarkers:
532 539 buildobsmarkerspart(bundler, pushop.outobsmarkers)
533 540
534 541 @b2partsgenerator('bookmarks')
535 542 def _pushb2bookmarks(pushop, bundler):
536 543 """handle phase push through bundle2"""
537 544 if 'bookmarks' in pushop.stepsdone:
538 545 return
539 546 b2caps = bundle2.bundle2caps(pushop.remote)
540 547 if 'pushkey' not in b2caps:
541 548 return
542 549 pushop.stepsdone.add('bookmarks')
543 550 part2book = []
544 551 enc = pushkey.encode
545 552 for book, old, new in pushop.outbookmarks:
546 553 part = bundler.newpart('pushkey')
547 554 part.addparam('namespace', enc('bookmarks'))
548 555 part.addparam('key', enc(book))
549 556 part.addparam('old', enc(old))
550 557 part.addparam('new', enc(new))
551 558 action = 'update'
552 559 if not old:
553 560 action = 'export'
554 561 elif not new:
555 562 action = 'delete'
556 563 part2book.append((part.id, book, action))
557 564
558 565
559 566 def handlereply(op):
560 567 ui = pushop.ui
561 568 for partid, book, action in part2book:
562 569 partrep = op.records.getreplies(partid)
563 570 results = partrep['pushkey']
564 571 assert len(results) <= 1
565 572 if not results:
566 573 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
567 574 else:
568 575 ret = int(results[0]['return'])
569 576 if ret:
570 577 ui.status(bookmsgmap[action][0] % book)
571 578 else:
572 579 ui.warn(bookmsgmap[action][1] % book)
573 580 if pushop.bkresult is not None:
574 581 pushop.bkresult = 1
575 582 return handlereply
576 583
577 584
578 585 def _pushbundle2(pushop):
579 586 """push data to the remote using bundle2
580 587
581 588 The only currently supported type of data is changegroup but this will
582 589 evolve in the future."""
583 590 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
584 591 pushback = (pushop.trmanager
585 592 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
586 593
587 594 # create reply capability
588 595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
589 596 allowpushback=pushback))
590 597 bundler.newpart('replycaps', data=capsblob)
591 598 replyhandlers = []
592 599 for partgenname in b2partsgenorder:
593 600 partgen = b2partsgenmapping[partgenname]
594 601 ret = partgen(pushop, bundler)
595 602 if callable(ret):
596 603 replyhandlers.append(ret)
597 604 # do not push if nothing to push
598 605 if bundler.nbparts <= 1:
599 606 return
600 607 stream = util.chunkbuffer(bundler.getchunks())
601 608 try:
602 609 reply = pushop.remote.unbundle(stream, ['force'], 'push')
603 610 except error.BundleValueError, exc:
604 611 raise util.Abort('missing support for %s' % exc)
605 612 try:
606 613 trgetter = None
607 614 if pushback:
608 615 trgetter = pushop.trmanager.transaction
609 616 op = bundle2.processbundle(pushop.repo, reply, trgetter)
610 617 except error.BundleValueError, exc:
611 618 raise util.Abort('missing support for %s' % exc)
612 619 for rephand in replyhandlers:
613 620 rephand(op)
614 621
615 622 def _pushchangeset(pushop):
616 623 """Make the actual push of changeset bundle to remote repo"""
617 624 if 'changesets' in pushop.stepsdone:
618 625 return
619 626 pushop.stepsdone.add('changesets')
620 627 if not _pushcheckoutgoing(pushop):
621 628 return
622 629 pushop.repo.prepushoutgoinghooks(pushop.repo,
623 630 pushop.remote,
624 631 pushop.outgoing)
625 632 outgoing = pushop.outgoing
626 633 unbundle = pushop.remote.capable('unbundle')
627 634 # TODO: get bundlecaps from remote
628 635 bundlecaps = None
629 636 # create a changegroup from local
630 637 if pushop.revs is None and not (outgoing.excluded
631 638 or pushop.repo.changelog.filteredrevs):
632 639 # push everything,
633 640 # use the fast path, no race possible on push
634 641 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
635 642 cg = changegroup.getsubset(pushop.repo,
636 643 outgoing,
637 644 bundler,
638 645 'push',
639 646 fastpath=True)
640 647 else:
641 648 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
642 649 bundlecaps)
643 650
644 651 # apply changegroup to remote
645 652 if unbundle:
646 653 # local repo finds heads on server, finds out what
647 654 # revs it must push. once revs transferred, if server
648 655 # finds it has different heads (someone else won
649 656 # commit/push race), server aborts.
650 657 if pushop.force:
651 658 remoteheads = ['force']
652 659 else:
653 660 remoteheads = pushop.remoteheads
654 661 # ssh: return remote's addchangegroup()
655 662 # http: return remote's addchangegroup() or 0 for error
656 663 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
657 664 pushop.repo.url())
658 665 else:
659 666 # we return an integer indicating remote head count
660 667 # change
661 668 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
662 669 pushop.repo.url())
663 670
664 671 def _pushsyncphase(pushop):
665 672 """synchronise phase information locally and remotely"""
666 673 cheads = pushop.commonheads
667 674 # even when we don't push, exchanging phase data is useful
668 675 remotephases = pushop.remote.listkeys('phases')
669 676 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
670 677 and remotephases # server supports phases
671 678 and pushop.cgresult is None # nothing was pushed
672 679 and remotephases.get('publishing', False)):
673 680 # When:
674 681 # - this is a subrepo push
675 682 # - and remote support phase
676 683 # - and no changeset was pushed
677 684 # - and remote is publishing
678 685 # We may be in issue 3871 case!
679 686 # We drop the possible phase synchronisation done by
680 687 # courtesy to publish changesets possibly locally draft
681 688 # on the remote.
682 689 remotephases = {'publishing': 'True'}
683 690 if not remotephases: # old server or public only reply from non-publishing
684 691 _localphasemove(pushop, cheads)
685 692 # don't push any phase data as there is nothing to push
686 693 else:
687 694 ana = phases.analyzeremotephases(pushop.repo, cheads,
688 695 remotephases)
689 696 pheads, droots = ana
690 697 ### Apply remote phase on local
691 698 if remotephases.get('publishing', False):
692 699 _localphasemove(pushop, cheads)
693 700 else: # publish = False
694 701 _localphasemove(pushop, pheads)
695 702 _localphasemove(pushop, cheads, phases.draft)
696 703 ### Apply local phase on remote
697 704
698 705 if pushop.cgresult:
699 706 if 'phases' in pushop.stepsdone:
700 707 # phases already pushed though bundle2
701 708 return
702 709 outdated = pushop.outdatedphases
703 710 else:
704 711 outdated = pushop.fallbackoutdatedphases
705 712
706 713 pushop.stepsdone.add('phases')
707 714
708 715 # filter heads already turned public by the push
709 716 outdated = [c for c in outdated if c.node() not in pheads]
710 717 # fallback to independent pushkey command
711 718 for newremotehead in outdated:
712 719 r = pushop.remote.pushkey('phases',
713 720 newremotehead.hex(),
714 721 str(phases.draft),
715 722 str(phases.public))
716 723 if not r:
717 724 pushop.ui.warn(_('updating %s to public failed!\n')
718 725 % newremotehead)
719 726
720 727 def _localphasemove(pushop, nodes, phase=phases.public):
721 728 """move <nodes> to <phase> in the local source repo"""
722 729 if pushop.trmanager:
723 730 phases.advanceboundary(pushop.repo,
724 731 pushop.trmanager.transaction(),
725 732 phase,
726 733 nodes)
727 734 else:
728 735 # repo is not locked, do not change any phases!
729 736 # Informs the user that phases should have been moved when
730 737 # applicable.
731 738 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
732 739 phasestr = phases.phasenames[phase]
733 740 if actualmoves:
734 741 pushop.ui.status(_('cannot lock source repo, skipping '
735 742 'local %s phase update\n') % phasestr)
736 743
737 744 def _pushobsolete(pushop):
738 745 """utility function to push obsolete markers to a remote"""
739 746 if 'obsmarkers' in pushop.stepsdone:
740 747 return
741 748 pushop.ui.debug('try to push obsolete markers to remote\n')
742 749 repo = pushop.repo
743 750 remote = pushop.remote
744 751 pushop.stepsdone.add('obsmarkers')
745 752 if pushop.outobsmarkers:
746 753 rslts = []
747 754 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
748 755 for key in sorted(remotedata, reverse=True):
749 756 # reverse sort to ensure we end with dump0
750 757 data = remotedata[key]
751 758 rslts.append(remote.pushkey('obsolete', key, '', data))
752 759 if [r for r in rslts if not r]:
753 760 msg = _('failed to push some obsolete markers!\n')
754 761 repo.ui.warn(msg)
755 762
756 763 def _pushbookmark(pushop):
757 764 """Update bookmark position on remote"""
758 765 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
759 766 return
760 767 pushop.stepsdone.add('bookmarks')
761 768 ui = pushop.ui
762 769 remote = pushop.remote
763 770
764 771 for b, old, new in pushop.outbookmarks:
765 772 action = 'update'
766 773 if not old:
767 774 action = 'export'
768 775 elif not new:
769 776 action = 'delete'
770 777 if remote.pushkey('bookmarks', b, old, new):
771 778 ui.status(bookmsgmap[action][0] % b)
772 779 else:
773 780 ui.warn(bookmsgmap[action][1] % b)
774 781 # discovery can have set the value form invalid entry
775 782 if pushop.bkresult is not None:
776 783 pushop.bkresult = 1
777 784
778 785 class pulloperation(object):
779 786 """A object that represent a single pull operation
780 787
781 788 It purpose is to carry pull related state and very common operation.
782 789
783 790 A new should be created at the beginning of each pull and discarded
784 791 afterward.
785 792 """
786 793
787 794 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
788 795 # repo we pull into
789 796 self.repo = repo
790 797 # repo we pull from
791 798 self.remote = remote
792 799 # revision we try to pull (None is "all")
793 800 self.heads = heads
794 801 # bookmark pulled explicitly
795 802 self.explicitbookmarks = bookmarks
796 803 # do we force pull?
797 804 self.force = force
798 805 # transaction manager
799 806 self.trmanager = None
800 807 # set of common changeset between local and remote before pull
801 808 self.common = None
802 809 # set of pulled head
803 810 self.rheads = None
804 811 # list of missing changeset to fetch remotely
805 812 self.fetch = None
806 813 # remote bookmarks data
807 814 self.remotebookmarks = None
808 815 # result of changegroup pulling (used as return code by pull)
809 816 self.cgresult = None
810 817 # list of step already done
811 818 self.stepsdone = set()
812 819
813 820 @util.propertycache
814 821 def pulledsubset(self):
815 822 """heads of the set of changeset target by the pull"""
816 823 # compute target subset
817 824 if self.heads is None:
818 825 # We pulled every thing possible
819 826 # sync on everything common
820 827 c = set(self.common)
821 828 ret = list(self.common)
822 829 for n in self.rheads:
823 830 if n not in c:
824 831 ret.append(n)
825 832 return ret
826 833 else:
827 834 # We pulled a specific subset
828 835 # sync on this subset
829 836 return self.heads
830 837
831 838 def gettransaction(self):
832 839 # deprecated; talk to trmanager directly
833 840 return self.trmanager.transaction()
834 841
835 842 class transactionmanager(object):
836 843 """An object to manage the life cycle of a transaction
837 844
838 845 It creates the transaction on demand and calls the appropriate hooks when
839 846 closing the transaction."""
840 847 def __init__(self, repo, source, url):
841 848 self.repo = repo
842 849 self.source = source
843 850 self.url = url
844 851 self._tr = None
845 852
846 853 def transaction(self):
847 854 """Return an open transaction object, constructing if necessary"""
848 855 if not self._tr:
849 856 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
850 857 self._tr = self.repo.transaction(trname)
851 858 self._tr.hookargs['source'] = self.source
852 859 self._tr.hookargs['url'] = self.url
853 860 return self._tr
854 861
855 862 def close(self):
856 863 """close transaction if created"""
857 864 if self._tr is not None:
858 865 self._tr.close()
859 866
860 867 def release(self):
861 868 """release transaction if created"""
862 869 if self._tr is not None:
863 870 self._tr.release()
864 871
865 872 def pull(repo, remote, heads=None, force=False, bookmarks=()):
866 873 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
867 874 if pullop.remote.local():
868 875 missing = set(pullop.remote.requirements) - pullop.repo.supported
869 876 if missing:
870 877 msg = _("required features are not"
871 878 " supported in the destination:"
872 879 " %s") % (', '.join(sorted(missing)))
873 880 raise util.Abort(msg)
874 881
875 882 pullop.remotebookmarks = remote.listkeys('bookmarks')
876 883 lock = pullop.repo.lock()
877 884 try:
878 885 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
879 886 _pulldiscovery(pullop)
880 887 if _canusebundle2(pullop):
881 888 _pullbundle2(pullop)
882 889 _pullchangeset(pullop)
883 890 _pullphase(pullop)
884 891 _pullbookmarks(pullop)
885 892 _pullobsolete(pullop)
886 893 pullop.trmanager.close()
887 894 finally:
888 895 pullop.trmanager.release()
889 896 lock.release()
890 897
891 898 return pullop
892 899
893 900 # list of steps to perform discovery before pull
894 901 pulldiscoveryorder = []
895 902
896 903 # Mapping between step name and function
897 904 #
898 905 # This exists to help extensions wrap steps if necessary
899 906 pulldiscoverymapping = {}
900 907
901 908 def pulldiscovery(stepname):
902 909 """decorator for function performing discovery before pull
903 910
904 911 The function is added to the step -> function mapping and appended to the
905 912 list of steps. Beware that decorated function will be added in order (this
906 913 may matter).
907 914
908 915 You can only use this decorator for a new step, if you want to wrap a step
909 916 from an extension, change the pulldiscovery dictionary directly."""
910 917 def dec(func):
911 918 assert stepname not in pulldiscoverymapping
912 919 pulldiscoverymapping[stepname] = func
913 920 pulldiscoveryorder.append(stepname)
914 921 return func
915 922 return dec
916 923
917 924 def _pulldiscovery(pullop):
918 925 """Run all discovery steps"""
919 926 for stepname in pulldiscoveryorder:
920 927 step = pulldiscoverymapping[stepname]
921 928 step(pullop)
922 929
923 930 @pulldiscovery('changegroup')
924 931 def _pulldiscoverychangegroup(pullop):
925 932 """discovery phase for the pull
926 933
927 934 Current handle changeset discovery only, will change handle all discovery
928 935 at some point."""
929 936 tmp = discovery.findcommonincoming(pullop.repo,
930 937 pullop.remote,
931 938 heads=pullop.heads,
932 939 force=pullop.force)
933 940 common, fetch, rheads = tmp
934 941 nm = pullop.repo.unfiltered().changelog.nodemap
935 942 if fetch and rheads:
936 943 # If a remote heads in filtered locally, lets drop it from the unknown
937 944 # remote heads and put in back in common.
938 945 #
939 946 # This is a hackish solution to catch most of "common but locally
940 947 # hidden situation". We do not performs discovery on unfiltered
941 948 # repository because it end up doing a pathological amount of round
942 949 # trip for w huge amount of changeset we do not care about.
943 950 #
944 951 # If a set of such "common but filtered" changeset exist on the server
945 952 # but are not including a remote heads, we'll not be able to detect it,
946 953 scommon = set(common)
947 954 filteredrheads = []
948 955 for n in rheads:
949 956 if n in nm:
950 957 if n not in scommon:
951 958 common.append(n)
952 959 else:
953 960 filteredrheads.append(n)
954 961 if not filteredrheads:
955 962 fetch = []
956 963 rheads = filteredrheads
957 964 pullop.common = common
958 965 pullop.fetch = fetch
959 966 pullop.rheads = rheads
960 967
961 968 def _pullbundle2(pullop):
962 969 """pull data using bundle2
963 970
964 971 For now, the only supported data are changegroup."""
965 972 remotecaps = bundle2.bundle2caps(pullop.remote)
966 973 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
967 974 # pulling changegroup
968 975 pullop.stepsdone.add('changegroup')
969 976
970 977 kwargs['common'] = pullop.common
971 978 kwargs['heads'] = pullop.heads or pullop.rheads
972 979 kwargs['cg'] = pullop.fetch
973 980 if 'listkeys' in remotecaps:
974 981 kwargs['listkeys'] = ['phase', 'bookmarks']
975 982 if not pullop.fetch:
976 983 pullop.repo.ui.status(_("no changes found\n"))
977 984 pullop.cgresult = 0
978 985 else:
979 986 if pullop.heads is None and list(pullop.common) == [nullid]:
980 987 pullop.repo.ui.status(_("requesting all changes\n"))
981 988 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
982 989 remoteversions = bundle2.obsmarkersversion(remotecaps)
983 990 if obsolete.commonversion(remoteversions) is not None:
984 991 kwargs['obsmarkers'] = True
985 992 pullop.stepsdone.add('obsmarkers')
986 993 _pullbundle2extraprepare(pullop, kwargs)
987 994 bundle = pullop.remote.getbundle('pull', **kwargs)
988 995 try:
989 996 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
990 997 except error.BundleValueError, exc:
991 998 raise util.Abort('missing support for %s' % exc)
992 999
993 1000 if pullop.fetch:
994 1001 results = [cg['return'] for cg in op.records['changegroup']]
995 1002 pullop.cgresult = changegroup.combineresults(results)
996 1003
997 1004 # processing phases change
998 1005 for namespace, value in op.records['listkeys']:
999 1006 if namespace == 'phases':
1000 1007 _pullapplyphases(pullop, value)
1001 1008
1002 1009 # processing bookmark update
1003 1010 for namespace, value in op.records['listkeys']:
1004 1011 if namespace == 'bookmarks':
1005 1012 pullop.remotebookmarks = value
1006 1013 _pullbookmarks(pullop)
1007 1014
1008 1015 def _pullbundle2extraprepare(pullop, kwargs):
1009 1016 """hook function so that extensions can extend the getbundle call"""
1010 1017 pass
1011 1018
1012 1019 def _pullchangeset(pullop):
1013 1020 """pull changeset from unbundle into the local repo"""
1014 1021 # We delay the open of the transaction as late as possible so we
1015 1022 # don't open transaction for nothing or you break future useful
1016 1023 # rollback call
1017 1024 if 'changegroup' in pullop.stepsdone:
1018 1025 return
1019 1026 pullop.stepsdone.add('changegroup')
1020 1027 if not pullop.fetch:
1021 1028 pullop.repo.ui.status(_("no changes found\n"))
1022 1029 pullop.cgresult = 0
1023 1030 return
1024 1031 pullop.gettransaction()
1025 1032 if pullop.heads is None and list(pullop.common) == [nullid]:
1026 1033 pullop.repo.ui.status(_("requesting all changes\n"))
1027 1034 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1028 1035 # issue1320, avoid a race if remote changed after discovery
1029 1036 pullop.heads = pullop.rheads
1030 1037
1031 1038 if pullop.remote.capable('getbundle'):
1032 1039 # TODO: get bundlecaps from remote
1033 1040 cg = pullop.remote.getbundle('pull', common=pullop.common,
1034 1041 heads=pullop.heads or pullop.rheads)
1035 1042 elif pullop.heads is None:
1036 1043 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1037 1044 elif not pullop.remote.capable('changegroupsubset'):
1038 1045 raise util.Abort(_("partial pull cannot be done because "
1039 1046 "other repository doesn't support "
1040 1047 "changegroupsubset."))
1041 1048 else:
1042 1049 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1043 1050 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1044 1051 pullop.remote.url())
1045 1052
1046 1053 def _pullphase(pullop):
1047 1054 # Get remote phases data from remote
1048 1055 if 'phases' in pullop.stepsdone:
1049 1056 return
1050 1057 remotephases = pullop.remote.listkeys('phases')
1051 1058 _pullapplyphases(pullop, remotephases)
1052 1059
1053 1060 def _pullapplyphases(pullop, remotephases):
1054 1061 """apply phase movement from observed remote state"""
1055 1062 if 'phases' in pullop.stepsdone:
1056 1063 return
1057 1064 pullop.stepsdone.add('phases')
1058 1065 publishing = bool(remotephases.get('publishing', False))
1059 1066 if remotephases and not publishing:
1060 1067 # remote is new and unpublishing
1061 1068 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1062 1069 pullop.pulledsubset,
1063 1070 remotephases)
1064 1071 dheads = pullop.pulledsubset
1065 1072 else:
1066 1073 # Remote is old or publishing all common changesets
1067 1074 # should be seen as public
1068 1075 pheads = pullop.pulledsubset
1069 1076 dheads = []
1070 1077 unfi = pullop.repo.unfiltered()
1071 1078 phase = unfi._phasecache.phase
1072 1079 rev = unfi.changelog.nodemap.get
1073 1080 public = phases.public
1074 1081 draft = phases.draft
1075 1082
1076 1083 # exclude changesets already public locally and update the others
1077 1084 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1078 1085 if pheads:
1079 1086 tr = pullop.gettransaction()
1080 1087 phases.advanceboundary(pullop.repo, tr, public, pheads)
1081 1088
1082 1089 # exclude changesets already draft locally and update the others
1083 1090 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1084 1091 if dheads:
1085 1092 tr = pullop.gettransaction()
1086 1093 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1087 1094
1088 1095 def _pullbookmarks(pullop):
1089 1096 """process the remote bookmark information to update the local one"""
1090 1097 if 'bookmarks' in pullop.stepsdone:
1091 1098 return
1092 1099 pullop.stepsdone.add('bookmarks')
1093 1100 repo = pullop.repo
1094 1101 remotebookmarks = pullop.remotebookmarks
1095 1102 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1096 1103 pullop.remote.url(),
1097 1104 pullop.gettransaction,
1098 1105 explicit=pullop.explicitbookmarks)
1099 1106
1100 1107 def _pullobsolete(pullop):
1101 1108 """utility function to pull obsolete markers from a remote
1102 1109
1103 1110 The `gettransaction` is function that return the pull transaction, creating
1104 1111 one if necessary. We return the transaction to inform the calling code that
1105 1112 a new transaction have been created (when applicable).
1106 1113
1107 1114 Exists mostly to allow overriding for experimentation purpose"""
1108 1115 if 'obsmarkers' in pullop.stepsdone:
1109 1116 return
1110 1117 pullop.stepsdone.add('obsmarkers')
1111 1118 tr = None
1112 1119 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1113 1120 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1114 1121 remoteobs = pullop.remote.listkeys('obsolete')
1115 1122 if 'dump0' in remoteobs:
1116 1123 tr = pullop.gettransaction()
1117 1124 for key in sorted(remoteobs, reverse=True):
1118 1125 if key.startswith('dump'):
1119 1126 data = base85.b85decode(remoteobs[key])
1120 1127 pullop.repo.obsstore.mergemarkers(tr, data)
1121 1128 pullop.repo.invalidatevolatilesets()
1122 1129 return tr
1123 1130
1124 1131 def caps20to10(repo):
1125 1132 """return a set with appropriate options to use bundle20 during getbundle"""
1126 1133 caps = set(['HG20'])
1127 1134 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1128 1135 caps.add('bundle2=' + urllib.quote(capsblob))
1129 1136 return caps
1130 1137
1131 1138 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1132 1139 getbundle2partsorder = []
1133 1140
1134 1141 # Mapping between step name and function
1135 1142 #
1136 1143 # This exists to help extensions wrap steps if necessary
1137 1144 getbundle2partsmapping = {}
1138 1145
1139 1146 def getbundle2partsgenerator(stepname, idx=None):
1140 1147 """decorator for function generating bundle2 part for getbundle
1141 1148
1142 1149 The function is added to the step -> function mapping and appended to the
1143 1150 list of steps. Beware that decorated functions will be added in order
1144 1151 (this may matter).
1145 1152
1146 1153 You can only use this decorator for new steps, if you want to wrap a step
1147 1154 from an extension, attack the getbundle2partsmapping dictionary directly."""
1148 1155 def dec(func):
1149 1156 assert stepname not in getbundle2partsmapping
1150 1157 getbundle2partsmapping[stepname] = func
1151 1158 if idx is None:
1152 1159 getbundle2partsorder.append(stepname)
1153 1160 else:
1154 1161 getbundle2partsorder.insert(idx, stepname)
1155 1162 return func
1156 1163 return dec
1157 1164
1158 1165 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1159 1166 **kwargs):
1160 1167 """return a full bundle (with potentially multiple kind of parts)
1161 1168
1162 1169 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1163 1170 passed. For now, the bundle can contain only changegroup, but this will
1164 1171 changes when more part type will be available for bundle2.
1165 1172
1166 1173 This is different from changegroup.getchangegroup that only returns an HG10
1167 1174 changegroup bundle. They may eventually get reunited in the future when we
1168 1175 have a clearer idea of the API we what to query different data.
1169 1176
1170 1177 The implementation is at a very early stage and will get massive rework
1171 1178 when the API of bundle is refined.
1172 1179 """
1173 1180 # bundle10 case
1174 1181 usebundle2 = False
1175 1182 if bundlecaps is not None:
1176 1183 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1177 1184 if not usebundle2:
1178 1185 if bundlecaps and not kwargs.get('cg', True):
1179 1186 raise ValueError(_('request for bundle10 must include changegroup'))
1180 1187
1181 1188 if kwargs:
1182 1189 raise ValueError(_('unsupported getbundle arguments: %s')
1183 1190 % ', '.join(sorted(kwargs.keys())))
1184 1191 return changegroup.getchangegroup(repo, source, heads=heads,
1185 1192 common=common, bundlecaps=bundlecaps)
1186 1193
1187 1194 # bundle20 case
1188 1195 b2caps = {}
1189 1196 for bcaps in bundlecaps:
1190 1197 if bcaps.startswith('bundle2='):
1191 1198 blob = urllib.unquote(bcaps[len('bundle2='):])
1192 1199 b2caps.update(bundle2.decodecaps(blob))
1193 1200 bundler = bundle2.bundle20(repo.ui, b2caps)
1194 1201
1195 1202 kwargs['heads'] = heads
1196 1203 kwargs['common'] = common
1197 1204
1198 1205 for name in getbundle2partsorder:
1199 1206 func = getbundle2partsmapping[name]
1200 1207 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1201 1208 **kwargs)
1202 1209
1203 1210 return util.chunkbuffer(bundler.getchunks())
1204 1211
1205 1212 @getbundle2partsgenerator('changegroup')
1206 1213 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1207 1214 b2caps=None, heads=None, common=None, **kwargs):
1208 1215 """add a changegroup part to the requested bundle"""
1209 1216 cg = None
1210 1217 if kwargs.get('cg', True):
1211 1218 # build changegroup bundle here.
1212 1219 version = None
1213 1220 cgversions = b2caps.get('changegroup')
1214 1221 if not cgversions: # 3.1 and 3.2 ship with an empty value
1215 1222 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1216 1223 common=common,
1217 1224 bundlecaps=bundlecaps)
1218 1225 else:
1219 1226 cgversions = [v for v in cgversions if v in changegroup.packermap]
1220 1227 if not cgversions:
1221 1228 raise ValueError(_('no common changegroup version'))
1222 1229 version = max(cgversions)
1223 1230 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1224 1231 common=common,
1225 1232 bundlecaps=bundlecaps,
1226 1233 version=version)
1227 1234
1228 1235 if cg:
1229 1236 part = bundler.newpart('changegroup', data=cg)
1230 1237 if version is not None:
1231 1238 part.addparam('version', version)
1232 1239
1233 1240 @getbundle2partsgenerator('listkeys')
1234 1241 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1235 1242 b2caps=None, **kwargs):
1236 1243 """add parts containing listkeys namespaces to the requested bundle"""
1237 1244 listkeys = kwargs.get('listkeys', ())
1238 1245 for namespace in listkeys:
1239 1246 part = bundler.newpart('listkeys')
1240 1247 part.addparam('namespace', namespace)
1241 1248 keys = repo.listkeys(namespace).items()
1242 1249 part.data = pushkey.encodekeys(keys)
1243 1250
1244 1251 @getbundle2partsgenerator('obsmarkers')
1245 1252 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1246 1253 b2caps=None, heads=None, **kwargs):
1247 1254 """add an obsolescence markers part to the requested bundle"""
1248 1255 if kwargs.get('obsmarkers', False):
1249 1256 if heads is None:
1250 1257 heads = repo.heads()
1251 1258 subset = [c.node() for c in repo.set('::%ln', heads)]
1252 1259 markers = repo.obsstore.relevantmarkers(subset)
1253 1260 buildobsmarkerspart(bundler, markers)
1254 1261
1255 1262 def check_heads(repo, their_heads, context):
1256 1263 """check if the heads of a repo have been modified
1257 1264
1258 1265 Used by peer for unbundling.
1259 1266 """
1260 1267 heads = repo.heads()
1261 1268 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1262 1269 if not (their_heads == ['force'] or their_heads == heads or
1263 1270 their_heads == ['hashed', heads_hash]):
1264 1271 # someone else committed/pushed/unbundled while we
1265 1272 # were transferring data
1266 1273 raise error.PushRaced('repository changed while %s - '
1267 1274 'please try again' % context)
1268 1275
1269 1276 def unbundle(repo, cg, heads, source, url):
1270 1277 """Apply a bundle to a repo.
1271 1278
1272 1279 this function makes sure the repo is locked during the application and have
1273 1280 mechanism to check that no push race occurred between the creation of the
1274 1281 bundle and its application.
1275 1282
1276 1283 If the push was raced as PushRaced exception is raised."""
1277 1284 r = 0
1278 1285 # need a transaction when processing a bundle2 stream
1279 1286 wlock = lock = tr = None
1280 1287 try:
1281 1288 check_heads(repo, heads, 'uploading changes')
1282 1289 # push can proceed
1283 1290 if util.safehasattr(cg, 'params'):
1284 1291 try:
1285 1292 wlock = repo.wlock()
1286 1293 lock = repo.lock()
1287 1294 tr = repo.transaction(source)
1288 1295 tr.hookargs['source'] = source
1289 1296 tr.hookargs['url'] = url
1290 1297 tr.hookargs['bundle2'] = '1'
1291 1298 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1292 1299 tr.close()
1293 1300 except Exception, exc:
1294 1301 exc.duringunbundle2 = True
1295 1302 raise
1296 1303 else:
1297 1304 lock = repo.lock()
1298 1305 r = changegroup.addchangegroup(repo, cg, source, url)
1299 1306 finally:
1300 1307 lockmod.release(tr, lock, wlock)
1301 1308 return r
@@ -1,112 +1,111 b''
1 1 $ cat > bundle2.py << EOF
2 2 > """A small extension to test bundle2 pushback parts.
3 3 > Current bundle2 implementation doesn't provide a way to generate those
4 4 > parts, so they must be created by extensions.
5 5 > """
6 6 > from mercurial import bundle2, pushkey, exchange, util
7 7 > def _newhandlechangegroup(op, inpart):
8 8 > """This function wraps the changegroup part handler for getbundle.
9 9 > It issues an additional pushkey part to send a new
10 10 > bookmark back to the client"""
11 11 > result = bundle2.handlechangegroup(op, inpart)
12 12 > if 'pushback' in op.reply.capabilities:
13 13 > params = {'namespace': 'bookmarks',
14 14 > 'key': 'new-server-mark',
15 15 > 'old': '',
16 16 > 'new': 'tip'}
17 17 > encodedparams = [(k, pushkey.encode(v)) for (k,v) in params.items()]
18 18 > op.reply.newpart('pushkey', mandatoryparams=encodedparams)
19 19 > else:
20 20 > op.reply.newpart('output', data='pushback not enabled')
21 21 > return result
22 22 > _newhandlechangegroup.params = bundle2.handlechangegroup.params
23 23 > bundle2.parthandlermapping['changegroup'] = _newhandlechangegroup
24 24 > EOF
25 25
26 26 $ cat >> $HGRCPATH <<EOF
27 27 > [ui]
28 28 > ssh = python "$TESTDIR/dummyssh"
29 29 > username = nobody <no.reply@example.com>
30 30 >
31 31 > [alias]
32 32 > tglog = log -G -T "{desc} [{phase}:{node|short}]"
33 33 > EOF
34 34
35 35 Set up server repository
36 36
37 37 $ hg init server
38 38 $ cd server
39 39 $ echo c0 > f0
40 40 $ hg commit -Am 0
41 41 adding f0
42 42
43 43 Set up client repository
44 44
45 45 $ cd ..
46 46 $ hg clone ssh://user@dummy/server client -q
47 47 $ cd client
48 48
49 49 Enable extension
50 50 $ cat >> $HGRCPATH <<EOF
51 51 > [extensions]
52 52 > bundle2=$TESTTMP/bundle2.py
53 53 > [experimental]
54 54 > bundle2-exp = True
55 55 > EOF
56 56
57 57 Without config
58 58
59 59 $ cd ../client
60 60 $ echo c1 > f1
61 61 $ hg commit -Am 1
62 62 adding f1
63 63 $ hg push
64 64 pushing to ssh://user@dummy/server
65 65 searching for changes
66 66 remote: pushback not enabled
67 67 remote: adding changesets
68 68 remote: adding manifests
69 69 remote: adding file changes
70 70 remote: added 1 changesets with 1 changes to 1 files
71 71 $ hg bookmark
72 72 no bookmarks set
73 73
74 74 $ cd ../server
75 75 $ hg tglog
76 76 o 1 [public:2b9c7234e035]
77 77 |
78 78 @ 0 [public:6cee5c8f3e5b]
79 79
80 80
81 81
82 82
83 83 With config
84 84
85 85 $ cd ../client
86 86 $ echo '[experimental]' >> .hg/hgrc
87 87 $ echo 'bundle2.pushback = True' >> .hg/hgrc
88 88 $ echo c2 > f2
89 89 $ hg commit -Am 2
90 90 adding f2
91 91 $ hg push
92 92 pushing to ssh://user@dummy/server
93 93 searching for changes
94 "wlock" acquired after "lock" at: */mercurial/bookmarks.py:259 (pushbookmark) (glob)
95 94 remote: adding changesets
96 95 remote: adding manifests
97 96 remote: adding file changes
98 97 remote: added 1 changesets with 1 changes to 1 files
99 98 $ hg bookmark
100 99 new-server-mark 2:0a76dfb2e179
101 100
102 101 $ cd ../server
103 102 $ hg tglog
104 103 o 2 [public:0a76dfb2e179]
105 104 |
106 105 o 1 [public:2b9c7234e035]
107 106 |
108 107 @ 0 [public:6cee5c8f3e5b]
109 108
110 109
111 110
112 111
General Comments 0
You need to be logged in to leave comments. Login now