##// END OF EJS Templates
push: prepare the issue of multiple kinds of messages...
Pierre-Yves David -
r22650:36952c91 default
parent child Browse files
Show More
@@ -1,1143 +1,1170 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version == '2X':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('B2X:OBSMARKERS', data=stream)
53 53 return None
54 54
55 55 class pushoperation(object):
56 56 """A object that represent a single push operation
57 57
58 58 It purpose is to carry push related state and very common operation.
59 59
60 60 A new should be created at the beginning of each push and discarded
61 61 afterward.
62 62 """
63 63
64 64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 65 bookmarks=()):
66 66 # repo we push from
67 67 self.repo = repo
68 68 self.ui = repo.ui
69 69 # repo we push to
70 70 self.remote = remote
71 71 # force option provided
72 72 self.force = force
73 73 # revs to be pushed (None is "all")
74 74 self.revs = revs
75 75 # bookmark explicitly pushed
76 76 self.bookmarks = bookmarks
77 77 # allow push of new branch
78 78 self.newbranch = newbranch
79 79 # did a local lock get acquired?
80 80 self.locallocked = None
81 81 # step already performed
82 82 # (used to check what steps have been already performed through bundle2)
83 83 self.stepsdone = set()
84 84 # Integer version of the changegroup push result
85 85 # - None means nothing to push
86 86 # - 0 means HTTP error
87 87 # - 1 means we pushed and remote head count is unchanged *or*
88 88 # we have outgoing changesets but refused to push
89 89 # - other values as described by addchangegroup()
90 90 self.cgresult = None
91 91 # Boolean value for the bookmark push
92 92 self.bkresult = None
93 93 # discover.outgoing object (contains common and outgoing data)
94 94 self.outgoing = None
95 95 # all remote heads before the push
96 96 self.remoteheads = None
97 97 # testable as a boolean indicating if any nodes are missing locally.
98 98 self.incoming = None
99 99 # phases changes that must be pushed along side the changesets
100 100 self.outdatedphases = None
101 101 # phases changes that must be pushed if changeset push fails
102 102 self.fallbackoutdatedphases = None
103 103 # outgoing obsmarkers
104 104 self.outobsmarkers = set()
105 105 # outgoing bookmarks
106 106 self.outbookmarks = []
107 107
108 108 @util.propertycache
109 109 def futureheads(self):
110 110 """future remote heads if the changeset push succeeds"""
111 111 return self.outgoing.missingheads
112 112
113 113 @util.propertycache
114 114 def fallbackheads(self):
115 115 """future remote heads if the changeset push fails"""
116 116 if self.revs is None:
117 117 # not target to push, all common are relevant
118 118 return self.outgoing.commonheads
119 119 unfi = self.repo.unfiltered()
120 120 # I want cheads = heads(::missingheads and ::commonheads)
121 121 # (missingheads is revs with secret changeset filtered out)
122 122 #
123 123 # This can be expressed as:
124 124 # cheads = ( (missingheads and ::commonheads)
125 125 # + (commonheads and ::missingheads))"
126 126 # )
127 127 #
128 128 # while trying to push we already computed the following:
129 129 # common = (::commonheads)
130 130 # missing = ((commonheads::missingheads) - commonheads)
131 131 #
132 132 # We can pick:
133 133 # * missingheads part of common (::commonheads)
134 134 common = set(self.outgoing.common)
135 135 nm = self.repo.changelog.nodemap
136 136 cheads = [node for node in self.revs if nm[node] in common]
137 137 # and
138 138 # * commonheads parents on missing
139 139 revset = unfi.set('%ln and parents(roots(%ln))',
140 140 self.outgoing.commonheads,
141 141 self.outgoing.missing)
142 142 cheads.extend(c.node() for c in revset)
143 143 return cheads
144 144
145 145 @property
146 146 def commonheads(self):
147 147 """set of all common heads after changeset bundle push"""
148 148 if self.cgresult:
149 149 return self.futureheads
150 150 else:
151 151 return self.fallbackheads
152 152
153 # mapping of message used when pushing bookmark
154 bookmsgmap = {'update': (_("updating bookmark %s\n"),
155 _('updating bookmark %s failed!\n')),
156 'export': (_("exporting bookmark %s\n"),
157 _('exporting bookmark %s failed!\n')),
158 'delete': (_("deleting remote bookmark %s\n"),
159 _('deleting remote bookmark %s failed!\n')),
160 }
161
162
153 163 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
154 164 '''Push outgoing changesets (limited by revs) from a local
155 165 repository to remote. Return an integer:
156 166 - None means nothing to push
157 167 - 0 means HTTP error
158 168 - 1 means we pushed and remote head count is unchanged *or*
159 169 we have outgoing changesets but refused to push
160 170 - other values as described by addchangegroup()
161 171 '''
162 172 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
163 173 if pushop.remote.local():
164 174 missing = (set(pushop.repo.requirements)
165 175 - pushop.remote.local().supported)
166 176 if missing:
167 177 msg = _("required features are not"
168 178 " supported in the destination:"
169 179 " %s") % (', '.join(sorted(missing)))
170 180 raise util.Abort(msg)
171 181
172 182 # there are two ways to push to remote repo:
173 183 #
174 184 # addchangegroup assumes local user can lock remote
175 185 # repo (local filesystem, old ssh servers).
176 186 #
177 187 # unbundle assumes local user cannot lock remote repo (new ssh
178 188 # servers, http servers).
179 189
180 190 if not pushop.remote.canpush():
181 191 raise util.Abort(_("destination does not support push"))
182 192 # get local lock as we might write phase data
183 193 locallock = None
184 194 try:
185 195 locallock = pushop.repo.lock()
186 196 pushop.locallocked = True
187 197 except IOError, err:
188 198 pushop.locallocked = False
189 199 if err.errno != errno.EACCES:
190 200 raise
191 201 # source repo cannot be locked.
192 202 # We do not abort the push, but just disable the local phase
193 203 # synchronisation.
194 204 msg = 'cannot lock source repository: %s\n' % err
195 205 pushop.ui.debug(msg)
196 206 try:
197 207 pushop.repo.checkpush(pushop)
198 208 lock = None
199 209 unbundle = pushop.remote.capable('unbundle')
200 210 if not unbundle:
201 211 lock = pushop.remote.lock()
202 212 try:
203 213 _pushdiscovery(pushop)
204 214 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
205 215 False)
206 216 and pushop.remote.capable('bundle2-exp')):
207 217 _pushbundle2(pushop)
208 218 _pushchangeset(pushop)
209 219 _pushsyncphase(pushop)
210 220 _pushobsolete(pushop)
211 221 _pushbookmark(pushop)
212 222 finally:
213 223 if lock is not None:
214 224 lock.release()
215 225 finally:
216 226 if locallock is not None:
217 227 locallock.release()
218 228
219 229 if pushop.bookmarks:
220 230 pushop.bkresult = bookmod.pushtoremote(repo.ui, repo, remote,
221 231 pushop.bookmarks)
222 232
223 233 return pushop
224 234
225 235 # list of steps to perform discovery before push
226 236 pushdiscoveryorder = []
227 237
228 238 # Mapping between step name and function
229 239 #
230 240 # This exists to help extensions wrap steps if necessary
231 241 pushdiscoverymapping = {}
232 242
233 243 def pushdiscovery(stepname):
234 244 """decorator for function performing discovery before push
235 245
236 246 The function is added to the step -> function mapping and appended to the
237 247 list of steps. Beware that decorated function will be added in order (this
238 248 may matter).
239 249
240 250 You can only use this decorator for a new step, if you want to wrap a step
241 251 from an extension, change the pushdiscovery dictionary directly."""
242 252 def dec(func):
243 253 assert stepname not in pushdiscoverymapping
244 254 pushdiscoverymapping[stepname] = func
245 255 pushdiscoveryorder.append(stepname)
246 256 return func
247 257 return dec
248 258
249 259 def _pushdiscovery(pushop):
250 260 """Run all discovery steps"""
251 261 for stepname in pushdiscoveryorder:
252 262 step = pushdiscoverymapping[stepname]
253 263 step(pushop)
254 264
255 265 @pushdiscovery('changeset')
256 266 def _pushdiscoverychangeset(pushop):
257 267 """discover the changeset that need to be pushed"""
258 268 unfi = pushop.repo.unfiltered()
259 269 fci = discovery.findcommonincoming
260 270 commoninc = fci(unfi, pushop.remote, force=pushop.force)
261 271 common, inc, remoteheads = commoninc
262 272 fco = discovery.findcommonoutgoing
263 273 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
264 274 commoninc=commoninc, force=pushop.force)
265 275 pushop.outgoing = outgoing
266 276 pushop.remoteheads = remoteheads
267 277 pushop.incoming = inc
268 278
269 279 @pushdiscovery('phase')
270 280 def _pushdiscoveryphase(pushop):
271 281 """discover the phase that needs to be pushed
272 282
273 283 (computed for both success and failure case for changesets push)"""
274 284 outgoing = pushop.outgoing
275 285 unfi = pushop.repo.unfiltered()
276 286 remotephases = pushop.remote.listkeys('phases')
277 287 publishing = remotephases.get('publishing', False)
278 288 ana = phases.analyzeremotephases(pushop.repo,
279 289 pushop.fallbackheads,
280 290 remotephases)
281 291 pheads, droots = ana
282 292 extracond = ''
283 293 if not publishing:
284 294 extracond = ' and public()'
285 295 revset = 'heads((%%ln::%%ln) %s)' % extracond
286 296 # Get the list of all revs draft on remote by public here.
287 297 # XXX Beware that revset break if droots is not strictly
288 298 # XXX root we may want to ensure it is but it is costly
289 299 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
290 300 if not outgoing.missing:
291 301 future = fallback
292 302 else:
293 303 # adds changeset we are going to push as draft
294 304 #
295 305 # should not be necessary for pushblishing server, but because of an
296 306 # issue fixed in xxxxx we have to do it anyway.
297 307 fdroots = list(unfi.set('roots(%ln + %ln::)',
298 308 outgoing.missing, droots))
299 309 fdroots = [f.node() for f in fdroots]
300 310 future = list(unfi.set(revset, fdroots, pushop.futureheads))
301 311 pushop.outdatedphases = future
302 312 pushop.fallbackoutdatedphases = fallback
303 313
304 314 @pushdiscovery('obsmarker')
305 315 def _pushdiscoveryobsmarkers(pushop):
306 316 if (obsolete._enabled
307 317 and pushop.repo.obsstore
308 318 and 'obsolete' in pushop.remote.listkeys('namespaces')):
309 319 repo = pushop.repo
310 320 # very naive computation, that can be quite expensive on big repo.
311 321 # However: evolution is currently slow on them anyway.
312 322 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
313 323 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
314 324
315 325 @pushdiscovery('bookmarks')
316 326 def _pushdiscoverybookmarks(pushop):
317 327 ui = pushop.ui
318 328 repo = pushop.repo.unfiltered()
319 329 remote = pushop.remote
320 330 ui.debug("checking for updated bookmarks\n")
321 331 ancestors = ()
322 332 if pushop.revs:
323 333 revnums = map(repo.changelog.rev, pushop.revs)
324 334 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
325 335 remotebookmark = remote.listkeys('bookmarks')
326 336
327 337 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
328 338 addsrc, adddst, advsrc, advdst, diverge, differ, invalid = comp
329 339 for b, scid, dcid in advsrc:
330 340 if not ancestors or repo[scid].rev() in ancestors:
331 341 pushop.outbookmarks.append((b, dcid, scid))
332 342
333 343 def _pushcheckoutgoing(pushop):
334 344 outgoing = pushop.outgoing
335 345 unfi = pushop.repo.unfiltered()
336 346 if not outgoing.missing:
337 347 # nothing to push
338 348 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
339 349 return False
340 350 # something to push
341 351 if not pushop.force:
342 352 # if repo.obsstore == False --> no obsolete
343 353 # then, save the iteration
344 354 if unfi.obsstore:
345 355 # this message are here for 80 char limit reason
346 356 mso = _("push includes obsolete changeset: %s!")
347 357 mst = {"unstable": _("push includes unstable changeset: %s!"),
348 358 "bumped": _("push includes bumped changeset: %s!"),
349 359 "divergent": _("push includes divergent changeset: %s!")}
350 360 # If we are to push if there is at least one
351 361 # obsolete or unstable changeset in missing, at
352 362 # least one of the missinghead will be obsolete or
353 363 # unstable. So checking heads only is ok
354 364 for node in outgoing.missingheads:
355 365 ctx = unfi[node]
356 366 if ctx.obsolete():
357 367 raise util.Abort(mso % ctx)
358 368 elif ctx.troubled():
359 369 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
360 370 newbm = pushop.ui.configlist('bookmarks', 'pushing')
361 371 discovery.checkheads(unfi, pushop.remote, outgoing,
362 372 pushop.remoteheads,
363 373 pushop.newbranch,
364 374 bool(pushop.incoming),
365 375 newbm)
366 376 return True
367 377
368 378 # List of names of steps to perform for an outgoing bundle2, order matters.
369 379 b2partsgenorder = []
370 380
371 381 # Mapping between step name and function
372 382 #
373 383 # This exists to help extensions wrap steps if necessary
374 384 b2partsgenmapping = {}
375 385
376 386 def b2partsgenerator(stepname):
377 387 """decorator for function generating bundle2 part
378 388
379 389 The function is added to the step -> function mapping and appended to the
380 390 list of steps. Beware that decorated functions will be added in order
381 391 (this may matter).
382 392
383 393 You can only use this decorator for new steps, if you want to wrap a step
384 394 from an extension, attack the b2partsgenmapping dictionary directly."""
385 395 def dec(func):
386 396 assert stepname not in b2partsgenmapping
387 397 b2partsgenmapping[stepname] = func
388 398 b2partsgenorder.append(stepname)
389 399 return func
390 400 return dec
391 401
392 402 @b2partsgenerator('changeset')
393 403 def _pushb2ctx(pushop, bundler):
394 404 """handle changegroup push through bundle2
395 405
396 406 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
397 407 """
398 408 if 'changesets' in pushop.stepsdone:
399 409 return
400 410 pushop.stepsdone.add('changesets')
401 411 # Send known heads to the server for race detection.
402 412 if not _pushcheckoutgoing(pushop):
403 413 return
404 414 pushop.repo.prepushoutgoinghooks(pushop.repo,
405 415 pushop.remote,
406 416 pushop.outgoing)
407 417 if not pushop.force:
408 418 bundler.newpart('B2X:CHECK:HEADS', data=iter(pushop.remoteheads))
409 419 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', pushop.outgoing)
410 420 cgpart = bundler.newpart('B2X:CHANGEGROUP', data=cg.getchunks())
411 421 def handlereply(op):
412 422 """extract addchangroup returns from server reply"""
413 423 cgreplies = op.records.getreplies(cgpart.id)
414 424 assert len(cgreplies['changegroup']) == 1
415 425 pushop.cgresult = cgreplies['changegroup'][0]['return']
416 426 return handlereply
417 427
418 428 @b2partsgenerator('phase')
419 429 def _pushb2phases(pushop, bundler):
420 430 """handle phase push through bundle2"""
421 431 if 'phases' in pushop.stepsdone:
422 432 return
423 433 b2caps = bundle2.bundle2caps(pushop.remote)
424 434 if not 'b2x:pushkey' in b2caps:
425 435 return
426 436 pushop.stepsdone.add('phases')
427 437 part2node = []
428 438 enc = pushkey.encode
429 439 for newremotehead in pushop.outdatedphases:
430 440 part = bundler.newpart('b2x:pushkey')
431 441 part.addparam('namespace', enc('phases'))
432 442 part.addparam('key', enc(newremotehead.hex()))
433 443 part.addparam('old', enc(str(phases.draft)))
434 444 part.addparam('new', enc(str(phases.public)))
435 445 part2node.append((part.id, newremotehead))
436 446 def handlereply(op):
437 447 for partid, node in part2node:
438 448 partrep = op.records.getreplies(partid)
439 449 results = partrep['pushkey']
440 450 assert len(results) <= 1
441 451 msg = None
442 452 if not results:
443 453 msg = _('server ignored update of %s to public!\n') % node
444 454 elif not int(results[0]['return']):
445 455 msg = _('updating %s to public failed!\n') % node
446 456 if msg is not None:
447 457 pushop.ui.warn(msg)
448 458 return handlereply
449 459
450 460 @b2partsgenerator('obsmarkers')
451 461 def _pushb2obsmarkers(pushop, bundler):
452 462 if 'obsmarkers' in pushop.stepsdone:
453 463 return
454 464 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
455 465 if obsolete.commonversion(remoteversions) is None:
456 466 return
457 467 pushop.stepsdone.add('obsmarkers')
458 468 if pushop.outobsmarkers:
459 469 buildobsmarkerspart(bundler, pushop.outobsmarkers)
460 470
461 471 @b2partsgenerator('bookmarks')
462 472 def _pushb2bookmarks(pushop, bundler):
463 473 """handle phase push through bundle2"""
464 474 if 'bookmarks' in pushop.stepsdone:
465 475 return
466 476 b2caps = bundle2.bundle2caps(pushop.remote)
467 477 if 'b2x:pushkey' not in b2caps:
468 478 return
469 479 pushop.stepsdone.add('bookmarks')
470 480 part2book = []
471 481 enc = pushkey.encode
472 482 for book, old, new in pushop.outbookmarks:
473 483 part = bundler.newpart('b2x:pushkey')
474 484 part.addparam('namespace', enc('bookmarks'))
475 485 part.addparam('key', enc(book))
476 486 part.addparam('old', enc(old))
477 487 part.addparam('new', enc(new))
478 part2book.append((part.id, book))
488 action = 'update'
489 if not old:
490 action = 'export'
491 elif not new:
492 action = 'delete'
493 part2book.append((part.id, book, action))
494
495
479 496 def handlereply(op):
480 for partid, book in part2book:
497 ui = pushop.ui
498 for partid, book, action in part2book:
481 499 partrep = op.records.getreplies(partid)
482 500 results = partrep['pushkey']
483 501 assert len(results) <= 1
484 502 if not results:
485 503 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
486 504 else:
487 505 ret = int(results[0]['return'])
488 506 if ret:
489 pushop.ui.status(_("updating bookmark %s\n") % book)
507 ui.status(bookmsgmap[action][0] % book)
490 508 else:
491 pushop.ui.warn(_('updating bookmark %s failed!\n') % book)
509 ui.warn(bookmsgmap[action][1] % book)
492 510 if pushop.bkresult is not None:
493 511 pushop.bkresult = 1
494 512 return handlereply
495 513
496 514
497 515 def _pushbundle2(pushop):
498 516 """push data to the remote using bundle2
499 517
500 518 The only currently supported type of data is changegroup but this will
501 519 evolve in the future."""
502 520 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
503 521 # create reply capability
504 522 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
505 523 bundler.newpart('b2x:replycaps', data=capsblob)
506 524 replyhandlers = []
507 525 for partgenname in b2partsgenorder:
508 526 partgen = b2partsgenmapping[partgenname]
509 527 ret = partgen(pushop, bundler)
510 528 if callable(ret):
511 529 replyhandlers.append(ret)
512 530 # do not push if nothing to push
513 531 if bundler.nbparts <= 1:
514 532 return
515 533 stream = util.chunkbuffer(bundler.getchunks())
516 534 try:
517 535 reply = pushop.remote.unbundle(stream, ['force'], 'push')
518 536 except error.BundleValueError, exc:
519 537 raise util.Abort('missing support for %s' % exc)
520 538 try:
521 539 op = bundle2.processbundle(pushop.repo, reply)
522 540 except error.BundleValueError, exc:
523 541 raise util.Abort('missing support for %s' % exc)
524 542 for rephand in replyhandlers:
525 543 rephand(op)
526 544
527 545 def _pushchangeset(pushop):
528 546 """Make the actual push of changeset bundle to remote repo"""
529 547 if 'changesets' in pushop.stepsdone:
530 548 return
531 549 pushop.stepsdone.add('changesets')
532 550 if not _pushcheckoutgoing(pushop):
533 551 return
534 552 pushop.repo.prepushoutgoinghooks(pushop.repo,
535 553 pushop.remote,
536 554 pushop.outgoing)
537 555 outgoing = pushop.outgoing
538 556 unbundle = pushop.remote.capable('unbundle')
539 557 # TODO: get bundlecaps from remote
540 558 bundlecaps = None
541 559 # create a changegroup from local
542 560 if pushop.revs is None and not (outgoing.excluded
543 561 or pushop.repo.changelog.filteredrevs):
544 562 # push everything,
545 563 # use the fast path, no race possible on push
546 564 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
547 565 cg = changegroup.getsubset(pushop.repo,
548 566 outgoing,
549 567 bundler,
550 568 'push',
551 569 fastpath=True)
552 570 else:
553 571 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
554 572 bundlecaps)
555 573
556 574 # apply changegroup to remote
557 575 if unbundle:
558 576 # local repo finds heads on server, finds out what
559 577 # revs it must push. once revs transferred, if server
560 578 # finds it has different heads (someone else won
561 579 # commit/push race), server aborts.
562 580 if pushop.force:
563 581 remoteheads = ['force']
564 582 else:
565 583 remoteheads = pushop.remoteheads
566 584 # ssh: return remote's addchangegroup()
567 585 # http: return remote's addchangegroup() or 0 for error
568 586 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
569 587 pushop.repo.url())
570 588 else:
571 589 # we return an integer indicating remote head count
572 590 # change
573 591 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
574 592 pushop.repo.url())
575 593
576 594 def _pushsyncphase(pushop):
577 595 """synchronise phase information locally and remotely"""
578 596 cheads = pushop.commonheads
579 597 # even when we don't push, exchanging phase data is useful
580 598 remotephases = pushop.remote.listkeys('phases')
581 599 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
582 600 and remotephases # server supports phases
583 601 and pushop.cgresult is None # nothing was pushed
584 602 and remotephases.get('publishing', False)):
585 603 # When:
586 604 # - this is a subrepo push
587 605 # - and remote support phase
588 606 # - and no changeset was pushed
589 607 # - and remote is publishing
590 608 # We may be in issue 3871 case!
591 609 # We drop the possible phase synchronisation done by
592 610 # courtesy to publish changesets possibly locally draft
593 611 # on the remote.
594 612 remotephases = {'publishing': 'True'}
595 613 if not remotephases: # old server or public only reply from non-publishing
596 614 _localphasemove(pushop, cheads)
597 615 # don't push any phase data as there is nothing to push
598 616 else:
599 617 ana = phases.analyzeremotephases(pushop.repo, cheads,
600 618 remotephases)
601 619 pheads, droots = ana
602 620 ### Apply remote phase on local
603 621 if remotephases.get('publishing', False):
604 622 _localphasemove(pushop, cheads)
605 623 else: # publish = False
606 624 _localphasemove(pushop, pheads)
607 625 _localphasemove(pushop, cheads, phases.draft)
608 626 ### Apply local phase on remote
609 627
610 628 if pushop.cgresult:
611 629 if 'phases' in pushop.stepsdone:
612 630 # phases already pushed though bundle2
613 631 return
614 632 outdated = pushop.outdatedphases
615 633 else:
616 634 outdated = pushop.fallbackoutdatedphases
617 635
618 636 pushop.stepsdone.add('phases')
619 637
620 638 # filter heads already turned public by the push
621 639 outdated = [c for c in outdated if c.node() not in pheads]
622 640 b2caps = bundle2.bundle2caps(pushop.remote)
623 641 if 'b2x:pushkey' in b2caps:
624 642 # server supports bundle2, let's do a batched push through it
625 643 #
626 644 # This will eventually be unified with the changesets bundle2 push
627 645 bundler = bundle2.bundle20(pushop.ui, b2caps)
628 646 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo))
629 647 bundler.newpart('b2x:replycaps', data=capsblob)
630 648 part2node = []
631 649 enc = pushkey.encode
632 650 for newremotehead in outdated:
633 651 part = bundler.newpart('b2x:pushkey')
634 652 part.addparam('namespace', enc('phases'))
635 653 part.addparam('key', enc(newremotehead.hex()))
636 654 part.addparam('old', enc(str(phases.draft)))
637 655 part.addparam('new', enc(str(phases.public)))
638 656 part2node.append((part.id, newremotehead))
639 657 stream = util.chunkbuffer(bundler.getchunks())
640 658 try:
641 659 reply = pushop.remote.unbundle(stream, ['force'], 'push')
642 660 op = bundle2.processbundle(pushop.repo, reply)
643 661 except error.BundleValueError, exc:
644 662 raise util.Abort('missing support for %s' % exc)
645 663 for partid, node in part2node:
646 664 partrep = op.records.getreplies(partid)
647 665 results = partrep['pushkey']
648 666 assert len(results) <= 1
649 667 msg = None
650 668 if not results:
651 669 msg = _('server ignored update of %s to public!\n') % node
652 670 elif not int(results[0]['return']):
653 671 msg = _('updating %s to public failed!\n') % node
654 672 if msg is not None:
655 673 pushop.ui.warn(msg)
656 674
657 675 else:
658 676 # fallback to independant pushkey command
659 677 for newremotehead in outdated:
660 678 r = pushop.remote.pushkey('phases',
661 679 newremotehead.hex(),
662 680 str(phases.draft),
663 681 str(phases.public))
664 682 if not r:
665 683 pushop.ui.warn(_('updating %s to public failed!\n')
666 684 % newremotehead)
667 685
668 686 def _localphasemove(pushop, nodes, phase=phases.public):
669 687 """move <nodes> to <phase> in the local source repo"""
670 688 if pushop.locallocked:
671 689 tr = pushop.repo.transaction('push-phase-sync')
672 690 try:
673 691 phases.advanceboundary(pushop.repo, tr, phase, nodes)
674 692 tr.close()
675 693 finally:
676 694 tr.release()
677 695 else:
678 696 # repo is not locked, do not change any phases!
679 697 # Informs the user that phases should have been moved when
680 698 # applicable.
681 699 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
682 700 phasestr = phases.phasenames[phase]
683 701 if actualmoves:
684 702 pushop.ui.status(_('cannot lock source repo, skipping '
685 703 'local %s phase update\n') % phasestr)
686 704
687 705 def _pushobsolete(pushop):
688 706 """utility function to push obsolete markers to a remote"""
689 707 if 'obsmarkers' in pushop.stepsdone:
690 708 return
691 709 pushop.ui.debug('try to push obsolete markers to remote\n')
692 710 repo = pushop.repo
693 711 remote = pushop.remote
694 712 pushop.stepsdone.add('obsmarkers')
695 713 if pushop.outobsmarkers:
696 714 rslts = []
697 715 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
698 716 for key in sorted(remotedata, reverse=True):
699 717 # reverse sort to ensure we end with dump0
700 718 data = remotedata[key]
701 719 rslts.append(remote.pushkey('obsolete', key, '', data))
702 720 if [r for r in rslts if not r]:
703 721 msg = _('failed to push some obsolete markers!\n')
704 722 repo.ui.warn(msg)
705 723
706 724 def _pushbookmark(pushop):
707 725 """Update bookmark position on remote"""
708 726 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
709 727 return
710 728 pushop.stepsdone.add('bookmarks')
711 729 ui = pushop.ui
712 730 remote = pushop.remote
731
713 732 for b, old, new in pushop.outbookmarks:
733 action = 'update'
734 if not old:
735 action = 'export'
736 elif not new:
737 action = 'delete'
714 738 if remote.pushkey('bookmarks', b, old, new):
715 ui.status(_("updating bookmark %s\n") % b)
739 ui.status(bookmsgmap[action][0] % b)
716 740 else:
717 ui.warn(_('updating bookmark %s failed!\n') % b)
741 ui.warn(bookmsgmap[action][1] % b)
742 # discovery can have set the value form invalid entry
743 if pushop.bkresult is not None:
744 pushop.bkresult = 1
718 745
719 746 class pulloperation(object):
720 747 """A object that represent a single pull operation
721 748
722 749 It purpose is to carry push related state and very common operation.
723 750
724 751 A new should be created at the beginning of each pull and discarded
725 752 afterward.
726 753 """
727 754
728 755 def __init__(self, repo, remote, heads=None, force=False):
729 756 # repo we pull into
730 757 self.repo = repo
731 758 # repo we pull from
732 759 self.remote = remote
733 760 # revision we try to pull (None is "all")
734 761 self.heads = heads
735 762 # do we force pull?
736 763 self.force = force
737 764 # the name the pull transaction
738 765 self._trname = 'pull\n' + util.hidepassword(remote.url())
739 766 # hold the transaction once created
740 767 self._tr = None
741 768 # set of common changeset between local and remote before pull
742 769 self.common = None
743 770 # set of pulled head
744 771 self.rheads = None
745 772 # list of missing changeset to fetch remotely
746 773 self.fetch = None
747 774 # result of changegroup pulling (used as return code by pull)
748 775 self.cgresult = None
749 776 # list of step remaining todo (related to future bundle2 usage)
750 777 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
751 778
752 779 @util.propertycache
753 780 def pulledsubset(self):
754 781 """heads of the set of changeset target by the pull"""
755 782 # compute target subset
756 783 if self.heads is None:
757 784 # We pulled every thing possible
758 785 # sync on everything common
759 786 c = set(self.common)
760 787 ret = list(self.common)
761 788 for n in self.rheads:
762 789 if n not in c:
763 790 ret.append(n)
764 791 return ret
765 792 else:
766 793 # We pulled a specific subset
767 794 # sync on this subset
768 795 return self.heads
769 796
770 797 def gettransaction(self):
771 798 """get appropriate pull transaction, creating it if needed"""
772 799 if self._tr is None:
773 800 self._tr = self.repo.transaction(self._trname)
774 801 return self._tr
775 802
776 803 def closetransaction(self):
777 804 """close transaction if created"""
778 805 if self._tr is not None:
779 806 self._tr.close()
780 807
781 808 def releasetransaction(self):
782 809 """release transaction if created"""
783 810 if self._tr is not None:
784 811 self._tr.release()
785 812
786 813 def pull(repo, remote, heads=None, force=False, bookmarks=()):
787 814 pullop = pulloperation(repo, remote, heads, force)
788 815 if pullop.remote.local():
789 816 missing = set(pullop.remote.requirements) - pullop.repo.supported
790 817 if missing:
791 818 msg = _("required features are not"
792 819 " supported in the destination:"
793 820 " %s") % (', '.join(sorted(missing)))
794 821 raise util.Abort(msg)
795 822
796 823 remotebookmarks = remote.listkeys('bookmarks')
797 824 lock = pullop.repo.lock()
798 825 try:
799 826 _pulldiscovery(pullop)
800 827 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
801 828 and pullop.remote.capable('bundle2-exp')):
802 829 _pullbundle2(pullop)
803 830 if 'changegroup' in pullop.todosteps:
804 831 _pullchangeset(pullop)
805 832 if 'phases' in pullop.todosteps:
806 833 _pullphase(pullop)
807 834 if 'obsmarkers' in pullop.todosteps:
808 835 _pullobsolete(pullop)
809 836 pullop.closetransaction()
810 837 finally:
811 838 pullop.releasetransaction()
812 839 lock.release()
813 840 bookmod.updatefromremote(repo.ui, repo, remotebookmarks, remote.url())
814 841 # update specified bookmarks
815 842 if bookmarks:
816 843 marks = repo._bookmarks
817 844 writer = repo.ui.status
818 845 if repo.ui.configbool('ui', 'quietbookmarkmove', False):
819 846 writer = repo.ui.debug
820 847 for b in bookmarks:
821 848 # explicit pull overrides local bookmark if any
822 849 writer(_("importing bookmark %s\n") % b)
823 850 marks[b] = repo[remotebookmarks[b]].node()
824 851 marks.write()
825 852
826 853 return pullop.cgresult
827 854
828 855 def _pulldiscovery(pullop):
829 856 """discovery phase for the pull
830 857
831 858 Current handle changeset discovery only, will change handle all discovery
832 859 at some point."""
833 860 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
834 861 pullop.remote,
835 862 heads=pullop.heads,
836 863 force=pullop.force)
837 864 pullop.common, pullop.fetch, pullop.rheads = tmp
838 865
839 866 def _pullbundle2(pullop):
840 867 """pull data using bundle2
841 868
842 869 For now, the only supported data are changegroup."""
843 870 remotecaps = bundle2.bundle2caps(pullop.remote)
844 871 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
845 872 # pulling changegroup
846 873 pullop.todosteps.remove('changegroup')
847 874
848 875 kwargs['common'] = pullop.common
849 876 kwargs['heads'] = pullop.heads or pullop.rheads
850 877 kwargs['cg'] = pullop.fetch
851 878 if 'b2x:listkeys' in remotecaps:
852 879 kwargs['listkeys'] = ['phase']
853 880 if not pullop.fetch:
854 881 pullop.repo.ui.status(_("no changes found\n"))
855 882 pullop.cgresult = 0
856 883 else:
857 884 if pullop.heads is None and list(pullop.common) == [nullid]:
858 885 pullop.repo.ui.status(_("requesting all changes\n"))
859 886 if obsolete._enabled:
860 887 remoteversions = bundle2.obsmarkersversion(remotecaps)
861 888 if obsolete.commonversion(remoteversions) is not None:
862 889 kwargs['obsmarkers'] = True
863 890 pullop.todosteps.remove('obsmarkers')
864 891 _pullbundle2extraprepare(pullop, kwargs)
865 892 if kwargs.keys() == ['format']:
866 893 return # nothing to pull
867 894 bundle = pullop.remote.getbundle('pull', **kwargs)
868 895 try:
869 896 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
870 897 except error.BundleValueError, exc:
871 898 raise util.Abort('missing support for %s' % exc)
872 899
873 900 if pullop.fetch:
874 901 assert len(op.records['changegroup']) == 1
875 902 pullop.cgresult = op.records['changegroup'][0]['return']
876 903
877 904 # processing phases change
878 905 for namespace, value in op.records['listkeys']:
879 906 if namespace == 'phases':
880 907 _pullapplyphases(pullop, value)
881 908
882 909 def _pullbundle2extraprepare(pullop, kwargs):
883 910 """hook function so that extensions can extend the getbundle call"""
884 911 pass
885 912
886 913 def _pullchangeset(pullop):
887 914 """pull changeset from unbundle into the local repo"""
888 915 # We delay the open of the transaction as late as possible so we
889 916 # don't open transaction for nothing or you break future useful
890 917 # rollback call
891 918 pullop.todosteps.remove('changegroup')
892 919 if not pullop.fetch:
893 920 pullop.repo.ui.status(_("no changes found\n"))
894 921 pullop.cgresult = 0
895 922 return
896 923 pullop.gettransaction()
897 924 if pullop.heads is None and list(pullop.common) == [nullid]:
898 925 pullop.repo.ui.status(_("requesting all changes\n"))
899 926 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
900 927 # issue1320, avoid a race if remote changed after discovery
901 928 pullop.heads = pullop.rheads
902 929
903 930 if pullop.remote.capable('getbundle'):
904 931 # TODO: get bundlecaps from remote
905 932 cg = pullop.remote.getbundle('pull', common=pullop.common,
906 933 heads=pullop.heads or pullop.rheads)
907 934 elif pullop.heads is None:
908 935 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
909 936 elif not pullop.remote.capable('changegroupsubset'):
910 937 raise util.Abort(_("partial pull cannot be done because "
911 938 "other repository doesn't support "
912 939 "changegroupsubset."))
913 940 else:
914 941 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
915 942 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
916 943 pullop.remote.url())
917 944
918 945 def _pullphase(pullop):
919 946 # Get remote phases data from remote
920 947 remotephases = pullop.remote.listkeys('phases')
921 948 _pullapplyphases(pullop, remotephases)
922 949
923 950 def _pullapplyphases(pullop, remotephases):
924 951 """apply phase movement from observed remote state"""
925 952 pullop.todosteps.remove('phases')
926 953 publishing = bool(remotephases.get('publishing', False))
927 954 if remotephases and not publishing:
928 955 # remote is new and unpublishing
929 956 pheads, _dr = phases.analyzeremotephases(pullop.repo,
930 957 pullop.pulledsubset,
931 958 remotephases)
932 959 dheads = pullop.pulledsubset
933 960 else:
934 961 # Remote is old or publishing all common changesets
935 962 # should be seen as public
936 963 pheads = pullop.pulledsubset
937 964 dheads = []
938 965 unfi = pullop.repo.unfiltered()
939 966 phase = unfi._phasecache.phase
940 967 rev = unfi.changelog.nodemap.get
941 968 public = phases.public
942 969 draft = phases.draft
943 970
944 971 # exclude changesets already public locally and update the others
945 972 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
946 973 if pheads:
947 974 tr = pullop.gettransaction()
948 975 phases.advanceboundary(pullop.repo, tr, public, pheads)
949 976
950 977 # exclude changesets already draft locally and update the others
951 978 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
952 979 if dheads:
953 980 tr = pullop.gettransaction()
954 981 phases.advanceboundary(pullop.repo, tr, draft, dheads)
955 982
956 983 def _pullobsolete(pullop):
957 984 """utility function to pull obsolete markers from a remote
958 985
959 986 The `gettransaction` is function that return the pull transaction, creating
960 987 one if necessary. We return the transaction to inform the calling code that
961 988 a new transaction have been created (when applicable).
962 989
963 990 Exists mostly to allow overriding for experimentation purpose"""
964 991 pullop.todosteps.remove('obsmarkers')
965 992 tr = None
966 993 if obsolete._enabled:
967 994 pullop.repo.ui.debug('fetching remote obsolete markers\n')
968 995 remoteobs = pullop.remote.listkeys('obsolete')
969 996 if 'dump0' in remoteobs:
970 997 tr = pullop.gettransaction()
971 998 for key in sorted(remoteobs, reverse=True):
972 999 if key.startswith('dump'):
973 1000 data = base85.b85decode(remoteobs[key])
974 1001 pullop.repo.obsstore.mergemarkers(tr, data)
975 1002 pullop.repo.invalidatevolatilesets()
976 1003 return tr
977 1004
978 1005 def caps20to10(repo):
979 1006 """return a set with appropriate options to use bundle20 during getbundle"""
980 1007 caps = set(['HG2X'])
981 1008 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
982 1009 caps.add('bundle2=' + urllib.quote(capsblob))
983 1010 return caps
984 1011
985 1012 # List of names of steps to perform for a bundle2 for getbundle, order matters.
986 1013 getbundle2partsorder = []
987 1014
988 1015 # Mapping between step name and function
989 1016 #
990 1017 # This exists to help extensions wrap steps if necessary
991 1018 getbundle2partsmapping = {}
992 1019
993 1020 def getbundle2partsgenerator(stepname):
994 1021 """decorator for function generating bundle2 part for getbundle
995 1022
996 1023 The function is added to the step -> function mapping and appended to the
997 1024 list of steps. Beware that decorated functions will be added in order
998 1025 (this may matter).
999 1026
1000 1027 You can only use this decorator for new steps, if you want to wrap a step
1001 1028 from an extension, attack the getbundle2partsmapping dictionary directly."""
1002 1029 def dec(func):
1003 1030 assert stepname not in getbundle2partsmapping
1004 1031 getbundle2partsmapping[stepname] = func
1005 1032 getbundle2partsorder.append(stepname)
1006 1033 return func
1007 1034 return dec
1008 1035
1009 1036 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1010 1037 **kwargs):
1011 1038 """return a full bundle (with potentially multiple kind of parts)
1012 1039
1013 1040 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
1014 1041 passed. For now, the bundle can contain only changegroup, but this will
1015 1042 changes when more part type will be available for bundle2.
1016 1043
1017 1044 This is different from changegroup.getchangegroup that only returns an HG10
1018 1045 changegroup bundle. They may eventually get reunited in the future when we
1019 1046 have a clearer idea of the API we what to query different data.
1020 1047
1021 1048 The implementation is at a very early stage and will get massive rework
1022 1049 when the API of bundle is refined.
1023 1050 """
1024 1051 # bundle10 case
1025 1052 if bundlecaps is None or 'HG2X' not in bundlecaps:
1026 1053 if bundlecaps and not kwargs.get('cg', True):
1027 1054 raise ValueError(_('request for bundle10 must include changegroup'))
1028 1055
1029 1056 if kwargs:
1030 1057 raise ValueError(_('unsupported getbundle arguments: %s')
1031 1058 % ', '.join(sorted(kwargs.keys())))
1032 1059 return changegroup.getchangegroup(repo, source, heads=heads,
1033 1060 common=common, bundlecaps=bundlecaps)
1034 1061
1035 1062 # bundle20 case
1036 1063 b2caps = {}
1037 1064 for bcaps in bundlecaps:
1038 1065 if bcaps.startswith('bundle2='):
1039 1066 blob = urllib.unquote(bcaps[len('bundle2='):])
1040 1067 b2caps.update(bundle2.decodecaps(blob))
1041 1068 bundler = bundle2.bundle20(repo.ui, b2caps)
1042 1069
1043 1070 for name in getbundle2partsorder:
1044 1071 func = getbundle2partsmapping[name]
1045 1072 kwargs['heads'] = heads
1046 1073 kwargs['common'] = common
1047 1074 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1048 1075 **kwargs)
1049 1076
1050 1077 return util.chunkbuffer(bundler.getchunks())
1051 1078
1052 1079 @getbundle2partsgenerator('changegroup')
1053 1080 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1054 1081 b2caps=None, heads=None, common=None, **kwargs):
1055 1082 """add a changegroup part to the requested bundle"""
1056 1083 cg = None
1057 1084 if kwargs.get('cg', True):
1058 1085 # build changegroup bundle here.
1059 1086 cg = changegroup.getchangegroup(repo, source, heads=heads,
1060 1087 common=common, bundlecaps=bundlecaps)
1061 1088
1062 1089 if cg:
1063 1090 bundler.newpart('b2x:changegroup', data=cg.getchunks())
1064 1091
1065 1092 @getbundle2partsgenerator('listkeys')
1066 1093 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1067 1094 b2caps=None, **kwargs):
1068 1095 """add parts containing listkeys namespaces to the requested bundle"""
1069 1096 listkeys = kwargs.get('listkeys', ())
1070 1097 for namespace in listkeys:
1071 1098 part = bundler.newpart('b2x:listkeys')
1072 1099 part.addparam('namespace', namespace)
1073 1100 keys = repo.listkeys(namespace).items()
1074 1101 part.data = pushkey.encodekeys(keys)
1075 1102
1076 1103 @getbundle2partsgenerator('obsmarkers')
1077 1104 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1078 1105 b2caps=None, heads=None, **kwargs):
1079 1106 """add an obsolescence markers part to the requested bundle"""
1080 1107 if kwargs.get('obsmarkers', False):
1081 1108 if heads is None:
1082 1109 heads = repo.heads()
1083 1110 subset = [c.node() for c in repo.set('::%ln', heads)]
1084 1111 markers = repo.obsstore.relevantmarkers(subset)
1085 1112 buildobsmarkerspart(bundler, markers)
1086 1113
1087 1114 @getbundle2partsgenerator('extra')
1088 1115 def _getbundleextrapart(bundler, repo, source, bundlecaps=None,
1089 1116 b2caps=None, **kwargs):
1090 1117 """hook function to let extensions add parts to the requested bundle"""
1091 1118 pass
1092 1119
1093 1120 def check_heads(repo, their_heads, context):
1094 1121 """check if the heads of a repo have been modified
1095 1122
1096 1123 Used by peer for unbundling.
1097 1124 """
1098 1125 heads = repo.heads()
1099 1126 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1100 1127 if not (their_heads == ['force'] or their_heads == heads or
1101 1128 their_heads == ['hashed', heads_hash]):
1102 1129 # someone else committed/pushed/unbundled while we
1103 1130 # were transferring data
1104 1131 raise error.PushRaced('repository changed while %s - '
1105 1132 'please try again' % context)
1106 1133
1107 1134 def unbundle(repo, cg, heads, source, url):
1108 1135 """Apply a bundle to a repo.
1109 1136
1110 1137 this function makes sure the repo is locked during the application and have
1111 1138 mechanism to check that no push race occurred between the creation of the
1112 1139 bundle and its application.
1113 1140
1114 1141 If the push was raced as PushRaced exception is raised."""
1115 1142 r = 0
1116 1143 # need a transaction when processing a bundle2 stream
1117 1144 tr = None
1118 1145 lock = repo.lock()
1119 1146 try:
1120 1147 check_heads(repo, heads, 'uploading changes')
1121 1148 # push can proceed
1122 1149 if util.safehasattr(cg, 'params'):
1123 1150 try:
1124 1151 tr = repo.transaction('unbundle')
1125 1152 tr.hookargs['bundle2-exp'] = '1'
1126 1153 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1127 1154 cl = repo.unfiltered().changelog
1128 1155 p = cl.writepending() and repo.root or ""
1129 1156 repo.hook('b2x-pretransactionclose', throw=True, source=source,
1130 1157 url=url, pending=p, **tr.hookargs)
1131 1158 tr.close()
1132 1159 repo.hook('b2x-transactionclose', source=source, url=url,
1133 1160 **tr.hookargs)
1134 1161 except Exception, exc:
1135 1162 exc.duringunbundle2 = True
1136 1163 raise
1137 1164 else:
1138 1165 r = changegroup.addchangegroup(repo, cg, source, url)
1139 1166 finally:
1140 1167 if tr is not None:
1141 1168 tr.release()
1142 1169 lock.release()
1143 1170 return r
General Comments 0
You need to be logged in to leave comments. Login now