##// END OF EJS Templates
exchange: fix pyflakes import complaint
Matt Mackall -
r20974:ef377f2e default
parent child Browse files
Show More
@@ -1,648 +1,647 b''
1 1 # exchange.py - utily to exchange data between repo.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 import sys
9 8 from i18n import _
10 9 from node import hex, nullid
11 10 import cStringIO
12 11 import errno
13 12 import util, scmutil, changegroup, base85
14 13 import discovery, phases, obsolete, bookmarks, bundle2
15 14
16 15
17 16 class pushoperation(object):
18 17 """A object that represent a single push operation
19 18
20 19 It purpose is to carry push related state and very common operation.
21 20
22 21 A new should be created at the begining of each push and discarded
23 22 afterward.
24 23 """
25 24
26 25 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
27 26 # repo we push from
28 27 self.repo = repo
29 28 self.ui = repo.ui
30 29 # repo we push to
31 30 self.remote = remote
32 31 # force option provided
33 32 self.force = force
34 33 # revs to be pushed (None is "all")
35 34 self.revs = revs
36 35 # allow push of new branch
37 36 self.newbranch = newbranch
38 37 # did a local lock get acquired?
39 38 self.locallocked = None
40 39 # Integer version of the push result
41 40 # - None means nothing to push
42 41 # - 0 means HTTP error
43 42 # - 1 means we pushed and remote head count is unchanged *or*
44 43 # we have outgoing changesets but refused to push
45 44 # - other values as described by addchangegroup()
46 45 self.ret = None
47 46 # discover.outgoing object (contains common and outgoin data)
48 47 self.outgoing = None
49 48 # all remote heads before the push
50 49 self.remoteheads = None
51 50 # testable as a boolean indicating if any nodes are missing locally.
52 51 self.incoming = None
53 52 # set of all heads common after changeset bundle push
54 53 self.commonheads = None
55 54
56 55 def push(repo, remote, force=False, revs=None, newbranch=False):
57 56 '''Push outgoing changesets (limited by revs) from a local
58 57 repository to remote. Return an integer:
59 58 - None means nothing to push
60 59 - 0 means HTTP error
61 60 - 1 means we pushed and remote head count is unchanged *or*
62 61 we have outgoing changesets but refused to push
63 62 - other values as described by addchangegroup()
64 63 '''
65 64 pushop = pushoperation(repo, remote, force, revs, newbranch)
66 65 if pushop.remote.local():
67 66 missing = (set(pushop.repo.requirements)
68 67 - pushop.remote.local().supported)
69 68 if missing:
70 69 msg = _("required features are not"
71 70 " supported in the destination:"
72 71 " %s") % (', '.join(sorted(missing)))
73 72 raise util.Abort(msg)
74 73
75 74 # there are two ways to push to remote repo:
76 75 #
77 76 # addchangegroup assumes local user can lock remote
78 77 # repo (local filesystem, old ssh servers).
79 78 #
80 79 # unbundle assumes local user cannot lock remote repo (new ssh
81 80 # servers, http servers).
82 81
83 82 if not pushop.remote.canpush():
84 83 raise util.Abort(_("destination does not support push"))
85 84 # get local lock as we might write phase data
86 85 locallock = None
87 86 try:
88 87 locallock = pushop.repo.lock()
89 88 pushop.locallocked = True
90 89 except IOError, err:
91 90 pushop.locallocked = False
92 91 if err.errno != errno.EACCES:
93 92 raise
94 93 # source repo cannot be locked.
95 94 # We do not abort the push, but just disable the local phase
96 95 # synchronisation.
97 96 msg = 'cannot lock source repository: %s\n' % err
98 97 pushop.ui.debug(msg)
99 98 try:
100 99 pushop.repo.checkpush(pushop)
101 100 lock = None
102 101 unbundle = pushop.remote.capable('unbundle')
103 102 if not unbundle:
104 103 lock = pushop.remote.lock()
105 104 try:
106 105 _pushdiscovery(pushop)
107 106 if _pushcheckoutgoing(pushop):
108 107 _pushchangeset(pushop)
109 108 _pushcomputecommonheads(pushop)
110 109 _pushsyncphase(pushop)
111 110 _pushobsolete(pushop)
112 111 finally:
113 112 if lock is not None:
114 113 lock.release()
115 114 finally:
116 115 if locallock is not None:
117 116 locallock.release()
118 117
119 118 _pushbookmark(pushop)
120 119 return pushop.ret
121 120
122 121 def _pushdiscovery(pushop):
123 122 # discovery
124 123 unfi = pushop.repo.unfiltered()
125 124 fci = discovery.findcommonincoming
126 125 commoninc = fci(unfi, pushop.remote, force=pushop.force)
127 126 common, inc, remoteheads = commoninc
128 127 fco = discovery.findcommonoutgoing
129 128 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
130 129 commoninc=commoninc, force=pushop.force)
131 130 pushop.outgoing = outgoing
132 131 pushop.remoteheads = remoteheads
133 132 pushop.incoming = inc
134 133
135 134 def _pushcheckoutgoing(pushop):
136 135 outgoing = pushop.outgoing
137 136 unfi = pushop.repo.unfiltered()
138 137 if not outgoing.missing:
139 138 # nothing to push
140 139 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
141 140 return False
142 141 # something to push
143 142 if not pushop.force:
144 143 # if repo.obsstore == False --> no obsolete
145 144 # then, save the iteration
146 145 if unfi.obsstore:
147 146 # this message are here for 80 char limit reason
148 147 mso = _("push includes obsolete changeset: %s!")
149 148 mst = "push includes %s changeset: %s!"
150 149 # plain versions for i18n tool to detect them
151 150 _("push includes unstable changeset: %s!")
152 151 _("push includes bumped changeset: %s!")
153 152 _("push includes divergent changeset: %s!")
154 153 # If we are to push if there is at least one
155 154 # obsolete or unstable changeset in missing, at
156 155 # least one of the missinghead will be obsolete or
157 156 # unstable. So checking heads only is ok
158 157 for node in outgoing.missingheads:
159 158 ctx = unfi[node]
160 159 if ctx.obsolete():
161 160 raise util.Abort(mso % ctx)
162 161 elif ctx.troubled():
163 162 raise util.Abort(_(mst)
164 163 % (ctx.troubles()[0],
165 164 ctx))
166 165 newbm = pushop.ui.configlist('bookmarks', 'pushing')
167 166 discovery.checkheads(unfi, pushop.remote, outgoing,
168 167 pushop.remoteheads,
169 168 pushop.newbranch,
170 169 bool(pushop.incoming),
171 170 newbm)
172 171 return True
173 172
174 173 def _pushchangeset(pushop):
175 174 """Make the actual push of changeset bundle to remote repo"""
176 175 outgoing = pushop.outgoing
177 176 unbundle = pushop.remote.capable('unbundle')
178 177 # TODO: get bundlecaps from remote
179 178 bundlecaps = None
180 179 # create a changegroup from local
181 180 if pushop.revs is None and not (outgoing.excluded
182 181 or pushop.repo.changelog.filteredrevs):
183 182 # push everything,
184 183 # use the fast path, no race possible on push
185 184 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
186 185 cg = changegroup.getsubset(pushop.repo,
187 186 outgoing,
188 187 bundler,
189 188 'push',
190 189 fastpath=True)
191 190 else:
192 191 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
193 192 bundlecaps)
194 193
195 194 # apply changegroup to remote
196 195 if unbundle:
197 196 # local repo finds heads on server, finds out what
198 197 # revs it must push. once revs transferred, if server
199 198 # finds it has different heads (someone else won
200 199 # commit/push race), server aborts.
201 200 if pushop.force:
202 201 remoteheads = ['force']
203 202 else:
204 203 remoteheads = pushop.remoteheads
205 204 # ssh: return remote's addchangegroup()
206 205 # http: return remote's addchangegroup() or 0 for error
207 206 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
208 207 'push')
209 208 else:
210 209 # we return an integer indicating remote head count
211 210 # change
212 211 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
213 212
214 213 def _pushcomputecommonheads(pushop):
215 214 unfi = pushop.repo.unfiltered()
216 215 if pushop.ret:
217 216 # push succeed, synchronize target of the push
218 217 cheads = pushop.outgoing.missingheads
219 218 elif pushop.revs is None:
220 219 # All out push fails. synchronize all common
221 220 cheads = pushop.outgoing.commonheads
222 221 else:
223 222 # I want cheads = heads(::missingheads and ::commonheads)
224 223 # (missingheads is revs with secret changeset filtered out)
225 224 #
226 225 # This can be expressed as:
227 226 # cheads = ( (missingheads and ::commonheads)
228 227 # + (commonheads and ::missingheads))"
229 228 # )
230 229 #
231 230 # while trying to push we already computed the following:
232 231 # common = (::commonheads)
233 232 # missing = ((commonheads::missingheads) - commonheads)
234 233 #
235 234 # We can pick:
236 235 # * missingheads part of common (::commonheads)
237 236 common = set(pushop.outgoing.common)
238 237 nm = pushop.repo.changelog.nodemap
239 238 cheads = [node for node in pushop.revs if nm[node] in common]
240 239 # and
241 240 # * commonheads parents on missing
242 241 revset = unfi.set('%ln and parents(roots(%ln))',
243 242 pushop.outgoing.commonheads,
244 243 pushop.outgoing.missing)
245 244 cheads.extend(c.node() for c in revset)
246 245 pushop.commonheads = cheads
247 246
248 247 def _pushsyncphase(pushop):
249 248 """synchronise phase information locally and remotly"""
250 249 unfi = pushop.repo.unfiltered()
251 250 cheads = pushop.commonheads
252 251 if pushop.ret:
253 252 # push succeed, synchronize target of the push
254 253 cheads = pushop.outgoing.missingheads
255 254 elif pushop.revs is None:
256 255 # All out push fails. synchronize all common
257 256 cheads = pushop.outgoing.commonheads
258 257 else:
259 258 # I want cheads = heads(::missingheads and ::commonheads)
260 259 # (missingheads is revs with secret changeset filtered out)
261 260 #
262 261 # This can be expressed as:
263 262 # cheads = ( (missingheads and ::commonheads)
264 263 # + (commonheads and ::missingheads))"
265 264 # )
266 265 #
267 266 # while trying to push we already computed the following:
268 267 # common = (::commonheads)
269 268 # missing = ((commonheads::missingheads) - commonheads)
270 269 #
271 270 # We can pick:
272 271 # * missingheads part of common (::commonheads)
273 272 common = set(pushop.outgoing.common)
274 273 nm = pushop.repo.changelog.nodemap
275 274 cheads = [node for node in pushop.revs if nm[node] in common]
276 275 # and
277 276 # * commonheads parents on missing
278 277 revset = unfi.set('%ln and parents(roots(%ln))',
279 278 pushop.outgoing.commonheads,
280 279 pushop.outgoing.missing)
281 280 cheads.extend(c.node() for c in revset)
282 281 pushop.commonheads = cheads
283 282 # even when we don't push, exchanging phase data is useful
284 283 remotephases = pushop.remote.listkeys('phases')
285 284 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
286 285 and remotephases # server supports phases
287 286 and pushop.ret is None # nothing was pushed
288 287 and remotephases.get('publishing', False)):
289 288 # When:
290 289 # - this is a subrepo push
291 290 # - and remote support phase
292 291 # - and no changeset was pushed
293 292 # - and remote is publishing
294 293 # We may be in issue 3871 case!
295 294 # We drop the possible phase synchronisation done by
296 295 # courtesy to publish changesets possibly locally draft
297 296 # on the remote.
298 297 remotephases = {'publishing': 'True'}
299 298 if not remotephases: # old server or public only rer
300 299 _localphasemove(pushop, cheads)
301 300 # don't push any phase data as there is nothing to push
302 301 else:
303 302 ana = phases.analyzeremotephases(pushop.repo, cheads,
304 303 remotephases)
305 304 pheads, droots = ana
306 305 ### Apply remote phase on local
307 306 if remotephases.get('publishing', False):
308 307 _localphasemove(pushop, cheads)
309 308 else: # publish = False
310 309 _localphasemove(pushop, pheads)
311 310 _localphasemove(pushop, cheads, phases.draft)
312 311 ### Apply local phase on remote
313 312
314 313 # Get the list of all revs draft on remote by public here.
315 314 # XXX Beware that revset break if droots is not strictly
316 315 # XXX root we may want to ensure it is but it is costly
317 316 outdated = unfi.set('heads((%ln::%ln) and public())',
318 317 droots, cheads)
319 318 for newremotehead in outdated:
320 319 r = pushop.remote.pushkey('phases',
321 320 newremotehead.hex(),
322 321 str(phases.draft),
323 322 str(phases.public))
324 323 if not r:
325 324 pushop.ui.warn(_('updating %s to public failed!\n')
326 325 % newremotehead)
327 326
328 327 def _localphasemove(pushop, nodes, phase=phases.public):
329 328 """move <nodes> to <phase> in the local source repo"""
330 329 if pushop.locallocked:
331 330 phases.advanceboundary(pushop.repo, phase, nodes)
332 331 else:
333 332 # repo is not locked, do not change any phases!
334 333 # Informs the user that phases should have been moved when
335 334 # applicable.
336 335 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
337 336 phasestr = phases.phasenames[phase]
338 337 if actualmoves:
339 338 pushop.ui.status(_('cannot lock source repo, skipping '
340 339 'local %s phase update\n') % phasestr)
341 340
342 341 def _pushobsolete(pushop):
343 342 """utility function to push obsolete markers to a remote"""
344 343 pushop.ui.debug('try to push obsolete markers to remote\n')
345 344 repo = pushop.repo
346 345 remote = pushop.remote
347 346 if (obsolete._enabled and repo.obsstore and
348 347 'obsolete' in remote.listkeys('namespaces')):
349 348 rslts = []
350 349 remotedata = repo.listkeys('obsolete')
351 350 for key in sorted(remotedata, reverse=True):
352 351 # reverse sort to ensure we end with dump0
353 352 data = remotedata[key]
354 353 rslts.append(remote.pushkey('obsolete', key, '', data))
355 354 if [r for r in rslts if not r]:
356 355 msg = _('failed to push some obsolete markers!\n')
357 356 repo.ui.warn(msg)
358 357
359 358 def _pushbookmark(pushop):
360 359 """Update bookmark position on remote"""
361 360 ui = pushop.ui
362 361 repo = pushop.repo.unfiltered()
363 362 remote = pushop.remote
364 363 ui.debug("checking for updated bookmarks\n")
365 364 revnums = map(repo.changelog.rev, pushop.revs or [])
366 365 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
367 366 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
368 367 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
369 368 srchex=hex)
370 369
371 370 for b, scid, dcid in advsrc:
372 371 if ancestors and repo[scid].rev() not in ancestors:
373 372 continue
374 373 if remote.pushkey('bookmarks', b, dcid, scid):
375 374 ui.status(_("updating bookmark %s\n") % b)
376 375 else:
377 376 ui.warn(_('updating bookmark %s failed!\n') % b)
378 377
379 378 class pulloperation(object):
380 379 """A object that represent a single pull operation
381 380
382 381 It purpose is to carry push related state and very common operation.
383 382
384 383 A new should be created at the begining of each pull and discarded
385 384 afterward.
386 385 """
387 386
388 387 def __init__(self, repo, remote, heads=None, force=False):
389 388 # repo we pull into
390 389 self.repo = repo
391 390 # repo we pull from
392 391 self.remote = remote
393 392 # revision we try to pull (None is "all")
394 393 self.heads = heads
395 394 # do we force pull?
396 395 self.force = force
397 396 # the name the pull transaction
398 397 self._trname = 'pull\n' + util.hidepassword(remote.url())
399 398 # hold the transaction once created
400 399 self._tr = None
401 400 # set of common changeset between local and remote before pull
402 401 self.common = None
403 402 # set of pulled head
404 403 self.rheads = None
405 404 # list of missing changeset to fetch remotly
406 405 self.fetch = None
407 406 # result of changegroup pulling (used as returng code by pull)
408 407 self.cgresult = None
409 408 # list of step remaining todo (related to future bundle2 usage)
410 409 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
411 410
412 411 @util.propertycache
413 412 def pulledsubset(self):
414 413 """heads of the set of changeset target by the pull"""
415 414 # compute target subset
416 415 if self.heads is None:
417 416 # We pulled every thing possible
418 417 # sync on everything common
419 418 c = set(self.common)
420 419 ret = list(self.common)
421 420 for n in self.rheads:
422 421 if n not in c:
423 422 ret.append(n)
424 423 return ret
425 424 else:
426 425 # We pulled a specific subset
427 426 # sync on this subset
428 427 return self.heads
429 428
430 429 def gettransaction(self):
431 430 """get appropriate pull transaction, creating it if needed"""
432 431 if self._tr is None:
433 432 self._tr = self.repo.transaction(self._trname)
434 433 return self._tr
435 434
436 435 def closetransaction(self):
437 436 """close transaction if created"""
438 437 if self._tr is not None:
439 438 self._tr.close()
440 439
441 440 def releasetransaction(self):
442 441 """release transaction if created"""
443 442 if self._tr is not None:
444 443 self._tr.release()
445 444
446 445 def pull(repo, remote, heads=None, force=False):
447 446 pullop = pulloperation(repo, remote, heads, force)
448 447 if pullop.remote.local():
449 448 missing = set(pullop.remote.requirements) - pullop.repo.supported
450 449 if missing:
451 450 msg = _("required features are not"
452 451 " supported in the destination:"
453 452 " %s") % (', '.join(sorted(missing)))
454 453 raise util.Abort(msg)
455 454
456 455 lock = pullop.repo.lock()
457 456 try:
458 457 _pulldiscovery(pullop)
459 458 if pullop.remote.capable('bundle2'):
460 459 _pullbundle2(pullop)
461 460 if 'changegroup' in pullop.todosteps:
462 461 _pullchangeset(pullop)
463 462 if 'phases' in pullop.todosteps:
464 463 _pullphase(pullop)
465 464 if 'obsmarkers' in pullop.todosteps:
466 465 _pullobsolete(pullop)
467 466 pullop.closetransaction()
468 467 finally:
469 468 pullop.releasetransaction()
470 469 lock.release()
471 470
472 471 return pullop.cgresult
473 472
474 473 def _pulldiscovery(pullop):
475 474 """discovery phase for the pull
476 475
477 476 Current handle changeset discovery only, will change handle all discovery
478 477 at some point."""
479 478 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
480 479 pullop.remote,
481 480 heads=pullop.heads,
482 481 force=pullop.force)
483 482 pullop.common, pullop.fetch, pullop.rheads = tmp
484 483
485 484 def _pullbundle2(pullop):
486 485 """pull data using bundle2
487 486
488 487 For now, the only supported data are changegroup."""
489 488 kwargs = {'bundlecaps': set(['HG20'])}
490 489 # pulling changegroup
491 490 pullop.todosteps.remove('changegroup')
492 491 if not pullop.fetch:
493 492 pullop.repo.ui.status(_("no changes found\n"))
494 493 pullop.cgresult = 0
495 494 else:
496 495 kwargs['common'] = pullop.common
497 496 kwargs['heads'] = pullop.heads or pullop.rheads
498 497 if pullop.heads is None and list(pullop.common) == [nullid]:
499 498 pullop.repo.ui.status(_("requesting all changes\n"))
500 499 if kwargs.keys() == ['format']:
501 500 return # nothing to pull
502 501 bundle = pullop.remote.getbundle('pull', **kwargs)
503 502 try:
504 503 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
505 504 except KeyError, exc:
506 505 raise util.Abort('missing support for %s' % exc)
507 506 assert len(op.records['changegroup']) == 1
508 507 pullop.cgresult = op.records['changegroup'][0]['return']
509 508
510 509 def _pullchangeset(pullop):
511 510 """pull changeset from unbundle into the local repo"""
512 511 # We delay the open of the transaction as late as possible so we
513 512 # don't open transaction for nothing or you break future useful
514 513 # rollback call
515 514 pullop.todosteps.remove('changegroup')
516 515 if not pullop.fetch:
517 516 pullop.repo.ui.status(_("no changes found\n"))
518 517 pullop.cgresult = 0
519 518 return
520 519 pullop.gettransaction()
521 520 if pullop.heads is None and list(pullop.common) == [nullid]:
522 521 pullop.repo.ui.status(_("requesting all changes\n"))
523 522 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
524 523 # issue1320, avoid a race if remote changed after discovery
525 524 pullop.heads = pullop.rheads
526 525
527 526 if pullop.remote.capable('getbundle'):
528 527 # TODO: get bundlecaps from remote
529 528 cg = pullop.remote.getbundle('pull', common=pullop.common,
530 529 heads=pullop.heads or pullop.rheads)
531 530 elif pullop.heads is None:
532 531 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
533 532 elif not pullop.remote.capable('changegroupsubset'):
534 533 raise util.Abort(_("partial pull cannot be done because "
535 534 "other repository doesn't support "
536 535 "changegroupsubset."))
537 536 else:
538 537 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
539 538 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
540 539 pullop.remote.url())
541 540
542 541 def _pullphase(pullop):
543 542 # Get remote phases data from remote
544 543 pullop.todosteps.remove('phases')
545 544 remotephases = pullop.remote.listkeys('phases')
546 545 publishing = bool(remotephases.get('publishing', False))
547 546 if remotephases and not publishing:
548 547 # remote is new and unpublishing
549 548 pheads, _dr = phases.analyzeremotephases(pullop.repo,
550 549 pullop.pulledsubset,
551 550 remotephases)
552 551 phases.advanceboundary(pullop.repo, phases.public, pheads)
553 552 phases.advanceboundary(pullop.repo, phases.draft,
554 553 pullop.pulledsubset)
555 554 else:
556 555 # Remote is old or publishing all common changesets
557 556 # should be seen as public
558 557 phases.advanceboundary(pullop.repo, phases.public,
559 558 pullop.pulledsubset)
560 559
561 560 def _pullobsolete(pullop):
562 561 """utility function to pull obsolete markers from a remote
563 562
564 563 The `gettransaction` is function that return the pull transaction, creating
565 564 one if necessary. We return the transaction to inform the calling code that
566 565 a new transaction have been created (when applicable).
567 566
568 567 Exists mostly to allow overriding for experimentation purpose"""
569 568 pullop.todosteps.remove('obsmarkers')
570 569 tr = None
571 570 if obsolete._enabled:
572 571 pullop.repo.ui.debug('fetching remote obsolete markers\n')
573 572 remoteobs = pullop.remote.listkeys('obsolete')
574 573 if 'dump0' in remoteobs:
575 574 tr = pullop.gettransaction()
576 575 for key in sorted(remoteobs, reverse=True):
577 576 if key.startswith('dump'):
578 577 data = base85.b85decode(remoteobs[key])
579 578 pullop.repo.obsstore.mergemarkers(tr, data)
580 579 pullop.repo.invalidatevolatilesets()
581 580 return tr
582 581
583 582 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
584 583 """return a full bundle (with potentially multiple kind of parts)
585 584
586 585 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
587 586 passed. For now, the bundle can contain only changegroup, but this will
588 587 changes when more part type will be available for bundle2.
589 588
590 589 This is different from changegroup.getbundle that only returns an HG10
591 590 changegroup bundle. They may eventually get reunited in the future when we
592 591 have a clearer idea of the API we what to query different data.
593 592
594 593 The implementation is at a very early stage and will get massive rework
595 594 when the API of bundle is refined.
596 595 """
597 596 # build bundle here.
598 597 cg = changegroup.getbundle(repo, source, heads=heads,
599 598 common=common, bundlecaps=bundlecaps)
600 599 if bundlecaps is None or 'HG20' not in bundlecaps:
601 600 return cg
602 601 # very crude first implementation,
603 602 # the bundle API will change and the generation will be done lazily.
604 603 bundler = bundle2.bundle20(repo.ui)
605 604 tempname = changegroup.writebundle(cg, None, 'HG10UN')
606 605 data = open(tempname).read()
607 606 part = bundle2.part('changegroup', data=data)
608 607 bundler.addpart(part)
609 608 temp = cStringIO.StringIO()
610 609 for c in bundler.getchunks():
611 610 temp.write(c)
612 611 temp.seek(0)
613 612 return bundle2.unbundle20(repo.ui, temp)
614 613
615 614 class PushRaced(RuntimeError):
616 615 """An exception raised during unbunding that indicate a push race"""
617 616
618 617 def check_heads(repo, their_heads, context):
619 618 """check if the heads of a repo have been modified
620 619
621 620 Used by peer for unbundling.
622 621 """
623 622 heads = repo.heads()
624 623 heads_hash = util.sha1(''.join(sorted(heads))).digest()
625 624 if not (their_heads == ['force'] or their_heads == heads or
626 625 their_heads == ['hashed', heads_hash]):
627 626 # someone else committed/pushed/unbundled while we
628 627 # were transferring data
629 628 raise PushRaced('repository changed while %s - '
630 629 'please try again' % context)
631 630
632 631 def unbundle(repo, cg, heads, source, url):
633 632 """Apply a bundle to a repo.
634 633
635 634 this function makes sure the repo is locked during the application and have
636 635 mechanism to check that no push race occured between the creation of the
637 636 bundle and its application.
638 637
639 638 If the push was raced as PushRaced exception is raised."""
640 639 r = 0
641 640 lock = repo.lock()
642 641 try:
643 642 check_heads(repo, heads, 'uploading changes')
644 643 # push can proceed
645 644 r = changegroup.addchangegroup(repo, cg, source, url)
646 645 finally:
647 646 lock.release()
648 647 return r
General Comments 0
You need to be logged in to leave comments. Login now