##// END OF EJS Templates
push: restore contents of HG_URL for hooks (issue4268)
Matt Mackall -
r21761:b2dc026a stable
parent child Browse files
Show More
@@ -1,765 +1,765
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks, bundle2
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.unbundle10(fh, alg)
35 35 elif version == '2X':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40
41 41 class pushoperation(object):
42 42 """A object that represent a single push operation
43 43
44 44 It purpose is to carry push related state and very common operation.
45 45
46 46 A new should be created at the beginning of each push and discarded
47 47 afterward.
48 48 """
49 49
50 50 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
51 51 # repo we push from
52 52 self.repo = repo
53 53 self.ui = repo.ui
54 54 # repo we push to
55 55 self.remote = remote
56 56 # force option provided
57 57 self.force = force
58 58 # revs to be pushed (None is "all")
59 59 self.revs = revs
60 60 # allow push of new branch
61 61 self.newbranch = newbranch
62 62 # did a local lock get acquired?
63 63 self.locallocked = None
64 64 # Integer version of the push result
65 65 # - None means nothing to push
66 66 # - 0 means HTTP error
67 67 # - 1 means we pushed and remote head count is unchanged *or*
68 68 # we have outgoing changesets but refused to push
69 69 # - other values as described by addchangegroup()
70 70 self.ret = None
71 71 # discover.outgoing object (contains common and outgoing data)
72 72 self.outgoing = None
73 73 # all remote heads before the push
74 74 self.remoteheads = None
75 75 # testable as a boolean indicating if any nodes are missing locally.
76 76 self.incoming = None
77 77 # set of all heads common after changeset bundle push
78 78 self.commonheads = None
79 79
80 80 def push(repo, remote, force=False, revs=None, newbranch=False):
81 81 '''Push outgoing changesets (limited by revs) from a local
82 82 repository to remote. Return an integer:
83 83 - None means nothing to push
84 84 - 0 means HTTP error
85 85 - 1 means we pushed and remote head count is unchanged *or*
86 86 we have outgoing changesets but refused to push
87 87 - other values as described by addchangegroup()
88 88 '''
89 89 pushop = pushoperation(repo, remote, force, revs, newbranch)
90 90 if pushop.remote.local():
91 91 missing = (set(pushop.repo.requirements)
92 92 - pushop.remote.local().supported)
93 93 if missing:
94 94 msg = _("required features are not"
95 95 " supported in the destination:"
96 96 " %s") % (', '.join(sorted(missing)))
97 97 raise util.Abort(msg)
98 98
99 99 # there are two ways to push to remote repo:
100 100 #
101 101 # addchangegroup assumes local user can lock remote
102 102 # repo (local filesystem, old ssh servers).
103 103 #
104 104 # unbundle assumes local user cannot lock remote repo (new ssh
105 105 # servers, http servers).
106 106
107 107 if not pushop.remote.canpush():
108 108 raise util.Abort(_("destination does not support push"))
109 109 # get local lock as we might write phase data
110 110 locallock = None
111 111 try:
112 112 locallock = pushop.repo.lock()
113 113 pushop.locallocked = True
114 114 except IOError, err:
115 115 pushop.locallocked = False
116 116 if err.errno != errno.EACCES:
117 117 raise
118 118 # source repo cannot be locked.
119 119 # We do not abort the push, but just disable the local phase
120 120 # synchronisation.
121 121 msg = 'cannot lock source repository: %s\n' % err
122 122 pushop.ui.debug(msg)
123 123 try:
124 124 pushop.repo.checkpush(pushop)
125 125 lock = None
126 126 unbundle = pushop.remote.capable('unbundle')
127 127 if not unbundle:
128 128 lock = pushop.remote.lock()
129 129 try:
130 130 _pushdiscovery(pushop)
131 131 if _pushcheckoutgoing(pushop):
132 132 pushop.repo.prepushoutgoinghooks(pushop.repo,
133 133 pushop.remote,
134 134 pushop.outgoing)
135 135 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
136 136 False)
137 137 and pushop.remote.capable('bundle2-exp')):
138 138 _pushbundle2(pushop)
139 139 else:
140 140 _pushchangeset(pushop)
141 141 _pushcomputecommonheads(pushop)
142 142 _pushsyncphase(pushop)
143 143 _pushobsolete(pushop)
144 144 finally:
145 145 if lock is not None:
146 146 lock.release()
147 147 finally:
148 148 if locallock is not None:
149 149 locallock.release()
150 150
151 151 _pushbookmark(pushop)
152 152 return pushop.ret
153 153
154 154 def _pushdiscovery(pushop):
155 155 # discovery
156 156 unfi = pushop.repo.unfiltered()
157 157 fci = discovery.findcommonincoming
158 158 commoninc = fci(unfi, pushop.remote, force=pushop.force)
159 159 common, inc, remoteheads = commoninc
160 160 fco = discovery.findcommonoutgoing
161 161 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
162 162 commoninc=commoninc, force=pushop.force)
163 163 pushop.outgoing = outgoing
164 164 pushop.remoteheads = remoteheads
165 165 pushop.incoming = inc
166 166
167 167 def _pushcheckoutgoing(pushop):
168 168 outgoing = pushop.outgoing
169 169 unfi = pushop.repo.unfiltered()
170 170 if not outgoing.missing:
171 171 # nothing to push
172 172 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
173 173 return False
174 174 # something to push
175 175 if not pushop.force:
176 176 # if repo.obsstore == False --> no obsolete
177 177 # then, save the iteration
178 178 if unfi.obsstore:
179 179 # this message are here for 80 char limit reason
180 180 mso = _("push includes obsolete changeset: %s!")
181 181 mst = "push includes %s changeset: %s!"
182 182 # plain versions for i18n tool to detect them
183 183 _("push includes unstable changeset: %s!")
184 184 _("push includes bumped changeset: %s!")
185 185 _("push includes divergent changeset: %s!")
186 186 # If we are to push if there is at least one
187 187 # obsolete or unstable changeset in missing, at
188 188 # least one of the missinghead will be obsolete or
189 189 # unstable. So checking heads only is ok
190 190 for node in outgoing.missingheads:
191 191 ctx = unfi[node]
192 192 if ctx.obsolete():
193 193 raise util.Abort(mso % ctx)
194 194 elif ctx.troubled():
195 195 raise util.Abort(_(mst)
196 196 % (ctx.troubles()[0],
197 197 ctx))
198 198 newbm = pushop.ui.configlist('bookmarks', 'pushing')
199 199 discovery.checkheads(unfi, pushop.remote, outgoing,
200 200 pushop.remoteheads,
201 201 pushop.newbranch,
202 202 bool(pushop.incoming),
203 203 newbm)
204 204 return True
205 205
206 206 def _pushbundle2(pushop):
207 207 """push data to the remote using bundle2
208 208
209 209 The only currently supported type of data is changegroup but this will
210 210 evolve in the future."""
211 211 # Send known head to the server for race detection.
212 212 capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
213 213 caps = bundle2.decodecaps(capsblob)
214 214 bundler = bundle2.bundle20(pushop.ui, caps)
215 215 # create reply capability
216 216 capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
217 217 bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
218 218 if not pushop.force:
219 219 part = bundle2.bundlepart('B2X:CHECK:HEADS',
220 220 data=iter(pushop.remoteheads))
221 221 bundler.addpart(part)
222 222 extrainfo = _pushbundle2extraparts(pushop, bundler)
223 223 # add the changegroup bundle
224 224 cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
225 225 cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
226 226 bundler.addpart(cgpart)
227 227 stream = util.chunkbuffer(bundler.getchunks())
228 228 try:
229 229 reply = pushop.remote.unbundle(stream, ['force'], 'push')
230 230 except bundle2.UnknownPartError, exc:
231 231 raise util.Abort('missing support for %s' % exc)
232 232 try:
233 233 op = bundle2.processbundle(pushop.repo, reply)
234 234 except bundle2.UnknownPartError, exc:
235 235 raise util.Abort('missing support for %s' % exc)
236 236 cgreplies = op.records.getreplies(cgpart.id)
237 237 assert len(cgreplies['changegroup']) == 1
238 238 pushop.ret = cgreplies['changegroup'][0]['return']
239 239 _pushbundle2extrareply(pushop, op, extrainfo)
240 240
241 241 def _pushbundle2extraparts(pushop, bundler):
242 242 """hook function to let extensions add parts
243 243
244 244 Return a dict to let extensions pass data to the reply processing.
245 245 """
246 246 return {}
247 247
248 248 def _pushbundle2extrareply(pushop, op, extrainfo):
249 249 """hook function to let extensions react to part replies
250 250
251 251 The dict from _pushbundle2extrareply is fed to this function.
252 252 """
253 253 pass
254 254
255 255 def _pushchangeset(pushop):
256 256 """Make the actual push of changeset bundle to remote repo"""
257 257 outgoing = pushop.outgoing
258 258 unbundle = pushop.remote.capable('unbundle')
259 259 # TODO: get bundlecaps from remote
260 260 bundlecaps = None
261 261 # create a changegroup from local
262 262 if pushop.revs is None and not (outgoing.excluded
263 263 or pushop.repo.changelog.filteredrevs):
264 264 # push everything,
265 265 # use the fast path, no race possible on push
266 266 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
267 267 cg = changegroup.getsubset(pushop.repo,
268 268 outgoing,
269 269 bundler,
270 270 'push',
271 271 fastpath=True)
272 272 else:
273 273 cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing,
274 274 bundlecaps)
275 275
276 276 # apply changegroup to remote
277 277 if unbundle:
278 278 # local repo finds heads on server, finds out what
279 279 # revs it must push. once revs transferred, if server
280 280 # finds it has different heads (someone else won
281 281 # commit/push race), server aborts.
282 282 if pushop.force:
283 283 remoteheads = ['force']
284 284 else:
285 285 remoteheads = pushop.remoteheads
286 286 # ssh: return remote's addchangegroup()
287 287 # http: return remote's addchangegroup() or 0 for error
288 288 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
289 'push')
289 pushop.repo.url())
290 290 else:
291 291 # we return an integer indicating remote head count
292 292 # change
293 293 pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url())
294 294
295 295 def _pushcomputecommonheads(pushop):
296 296 unfi = pushop.repo.unfiltered()
297 297 if pushop.ret:
298 298 # push succeed, synchronize target of the push
299 299 cheads = pushop.outgoing.missingheads
300 300 elif pushop.revs is None:
301 301 # All out push fails. synchronize all common
302 302 cheads = pushop.outgoing.commonheads
303 303 else:
304 304 # I want cheads = heads(::missingheads and ::commonheads)
305 305 # (missingheads is revs with secret changeset filtered out)
306 306 #
307 307 # This can be expressed as:
308 308 # cheads = ( (missingheads and ::commonheads)
309 309 # + (commonheads and ::missingheads))"
310 310 # )
311 311 #
312 312 # while trying to push we already computed the following:
313 313 # common = (::commonheads)
314 314 # missing = ((commonheads::missingheads) - commonheads)
315 315 #
316 316 # We can pick:
317 317 # * missingheads part of common (::commonheads)
318 318 common = set(pushop.outgoing.common)
319 319 nm = pushop.repo.changelog.nodemap
320 320 cheads = [node for node in pushop.revs if nm[node] in common]
321 321 # and
322 322 # * commonheads parents on missing
323 323 revset = unfi.set('%ln and parents(roots(%ln))',
324 324 pushop.outgoing.commonheads,
325 325 pushop.outgoing.missing)
326 326 cheads.extend(c.node() for c in revset)
327 327 pushop.commonheads = cheads
328 328
329 329 def _pushsyncphase(pushop):
330 330 """synchronise phase information locally and remotely"""
331 331 unfi = pushop.repo.unfiltered()
332 332 cheads = pushop.commonheads
333 333 if pushop.ret:
334 334 # push succeed, synchronize target of the push
335 335 cheads = pushop.outgoing.missingheads
336 336 elif pushop.revs is None:
337 337 # All out push fails. synchronize all common
338 338 cheads = pushop.outgoing.commonheads
339 339 else:
340 340 # I want cheads = heads(::missingheads and ::commonheads)
341 341 # (missingheads is revs with secret changeset filtered out)
342 342 #
343 343 # This can be expressed as:
344 344 # cheads = ( (missingheads and ::commonheads)
345 345 # + (commonheads and ::missingheads))"
346 346 # )
347 347 #
348 348 # while trying to push we already computed the following:
349 349 # common = (::commonheads)
350 350 # missing = ((commonheads::missingheads) - commonheads)
351 351 #
352 352 # We can pick:
353 353 # * missingheads part of common (::commonheads)
354 354 common = set(pushop.outgoing.common)
355 355 nm = pushop.repo.changelog.nodemap
356 356 cheads = [node for node in pushop.revs if nm[node] in common]
357 357 # and
358 358 # * commonheads parents on missing
359 359 revset = unfi.set('%ln and parents(roots(%ln))',
360 360 pushop.outgoing.commonheads,
361 361 pushop.outgoing.missing)
362 362 cheads.extend(c.node() for c in revset)
363 363 pushop.commonheads = cheads
364 364 # even when we don't push, exchanging phase data is useful
365 365 remotephases = pushop.remote.listkeys('phases')
366 366 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
367 367 and remotephases # server supports phases
368 368 and pushop.ret is None # nothing was pushed
369 369 and remotephases.get('publishing', False)):
370 370 # When:
371 371 # - this is a subrepo push
372 372 # - and remote support phase
373 373 # - and no changeset was pushed
374 374 # - and remote is publishing
375 375 # We may be in issue 3871 case!
376 376 # We drop the possible phase synchronisation done by
377 377 # courtesy to publish changesets possibly locally draft
378 378 # on the remote.
379 379 remotephases = {'publishing': 'True'}
380 380 if not remotephases: # old server or public only reply from non-publishing
381 381 _localphasemove(pushop, cheads)
382 382 # don't push any phase data as there is nothing to push
383 383 else:
384 384 ana = phases.analyzeremotephases(pushop.repo, cheads,
385 385 remotephases)
386 386 pheads, droots = ana
387 387 ### Apply remote phase on local
388 388 if remotephases.get('publishing', False):
389 389 _localphasemove(pushop, cheads)
390 390 else: # publish = False
391 391 _localphasemove(pushop, pheads)
392 392 _localphasemove(pushop, cheads, phases.draft)
393 393 ### Apply local phase on remote
394 394
395 395 # Get the list of all revs draft on remote by public here.
396 396 # XXX Beware that revset break if droots is not strictly
397 397 # XXX root we may want to ensure it is but it is costly
398 398 outdated = unfi.set('heads((%ln::%ln) and public())',
399 399 droots, cheads)
400 400 for newremotehead in outdated:
401 401 r = pushop.remote.pushkey('phases',
402 402 newremotehead.hex(),
403 403 str(phases.draft),
404 404 str(phases.public))
405 405 if not r:
406 406 pushop.ui.warn(_('updating %s to public failed!\n')
407 407 % newremotehead)
408 408
409 409 def _localphasemove(pushop, nodes, phase=phases.public):
410 410 """move <nodes> to <phase> in the local source repo"""
411 411 if pushop.locallocked:
412 412 phases.advanceboundary(pushop.repo, phase, nodes)
413 413 else:
414 414 # repo is not locked, do not change any phases!
415 415 # Informs the user that phases should have been moved when
416 416 # applicable.
417 417 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
418 418 phasestr = phases.phasenames[phase]
419 419 if actualmoves:
420 420 pushop.ui.status(_('cannot lock source repo, skipping '
421 421 'local %s phase update\n') % phasestr)
422 422
423 423 def _pushobsolete(pushop):
424 424 """utility function to push obsolete markers to a remote"""
425 425 pushop.ui.debug('try to push obsolete markers to remote\n')
426 426 repo = pushop.repo
427 427 remote = pushop.remote
428 428 if (obsolete._enabled and repo.obsstore and
429 429 'obsolete' in remote.listkeys('namespaces')):
430 430 rslts = []
431 431 remotedata = repo.listkeys('obsolete')
432 432 for key in sorted(remotedata, reverse=True):
433 433 # reverse sort to ensure we end with dump0
434 434 data = remotedata[key]
435 435 rslts.append(remote.pushkey('obsolete', key, '', data))
436 436 if [r for r in rslts if not r]:
437 437 msg = _('failed to push some obsolete markers!\n')
438 438 repo.ui.warn(msg)
439 439
440 440 def _pushbookmark(pushop):
441 441 """Update bookmark position on remote"""
442 442 ui = pushop.ui
443 443 repo = pushop.repo.unfiltered()
444 444 remote = pushop.remote
445 445 ui.debug("checking for updated bookmarks\n")
446 446 revnums = map(repo.changelog.rev, pushop.revs or [])
447 447 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
448 448 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
449 449 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
450 450 srchex=hex)
451 451
452 452 for b, scid, dcid in advsrc:
453 453 if ancestors and repo[scid].rev() not in ancestors:
454 454 continue
455 455 if remote.pushkey('bookmarks', b, dcid, scid):
456 456 ui.status(_("updating bookmark %s\n") % b)
457 457 else:
458 458 ui.warn(_('updating bookmark %s failed!\n') % b)
459 459
460 460 class pulloperation(object):
461 461 """A object that represent a single pull operation
462 462
463 463 It purpose is to carry push related state and very common operation.
464 464
465 465 A new should be created at the beginning of each pull and discarded
466 466 afterward.
467 467 """
468 468
469 469 def __init__(self, repo, remote, heads=None, force=False):
470 470 # repo we pull into
471 471 self.repo = repo
472 472 # repo we pull from
473 473 self.remote = remote
474 474 # revision we try to pull (None is "all")
475 475 self.heads = heads
476 476 # do we force pull?
477 477 self.force = force
478 478 # the name the pull transaction
479 479 self._trname = 'pull\n' + util.hidepassword(remote.url())
480 480 # hold the transaction once created
481 481 self._tr = None
482 482 # set of common changeset between local and remote before pull
483 483 self.common = None
484 484 # set of pulled head
485 485 self.rheads = None
486 486 # list of missing changeset to fetch remotely
487 487 self.fetch = None
488 488 # result of changegroup pulling (used as return code by pull)
489 489 self.cgresult = None
490 490 # list of step remaining todo (related to future bundle2 usage)
491 491 self.todosteps = set(['changegroup', 'phases', 'obsmarkers'])
492 492
493 493 @util.propertycache
494 494 def pulledsubset(self):
495 495 """heads of the set of changeset target by the pull"""
496 496 # compute target subset
497 497 if self.heads is None:
498 498 # We pulled every thing possible
499 499 # sync on everything common
500 500 c = set(self.common)
501 501 ret = list(self.common)
502 502 for n in self.rheads:
503 503 if n not in c:
504 504 ret.append(n)
505 505 return ret
506 506 else:
507 507 # We pulled a specific subset
508 508 # sync on this subset
509 509 return self.heads
510 510
511 511 def gettransaction(self):
512 512 """get appropriate pull transaction, creating it if needed"""
513 513 if self._tr is None:
514 514 self._tr = self.repo.transaction(self._trname)
515 515 return self._tr
516 516
517 517 def closetransaction(self):
518 518 """close transaction if created"""
519 519 if self._tr is not None:
520 520 self._tr.close()
521 521
522 522 def releasetransaction(self):
523 523 """release transaction if created"""
524 524 if self._tr is not None:
525 525 self._tr.release()
526 526
527 527 def pull(repo, remote, heads=None, force=False):
528 528 pullop = pulloperation(repo, remote, heads, force)
529 529 if pullop.remote.local():
530 530 missing = set(pullop.remote.requirements) - pullop.repo.supported
531 531 if missing:
532 532 msg = _("required features are not"
533 533 " supported in the destination:"
534 534 " %s") % (', '.join(sorted(missing)))
535 535 raise util.Abort(msg)
536 536
537 537 lock = pullop.repo.lock()
538 538 try:
539 539 _pulldiscovery(pullop)
540 540 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
541 541 and pullop.remote.capable('bundle2-exp')):
542 542 _pullbundle2(pullop)
543 543 if 'changegroup' in pullop.todosteps:
544 544 _pullchangeset(pullop)
545 545 if 'phases' in pullop.todosteps:
546 546 _pullphase(pullop)
547 547 if 'obsmarkers' in pullop.todosteps:
548 548 _pullobsolete(pullop)
549 549 pullop.closetransaction()
550 550 finally:
551 551 pullop.releasetransaction()
552 552 lock.release()
553 553
554 554 return pullop.cgresult
555 555
556 556 def _pulldiscovery(pullop):
557 557 """discovery phase for the pull
558 558
559 559 Current handle changeset discovery only, will change handle all discovery
560 560 at some point."""
561 561 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
562 562 pullop.remote,
563 563 heads=pullop.heads,
564 564 force=pullop.force)
565 565 pullop.common, pullop.fetch, pullop.rheads = tmp
566 566
567 567 def _pullbundle2(pullop):
568 568 """pull data using bundle2
569 569
570 570 For now, the only supported data are changegroup."""
571 571 kwargs = {'bundlecaps': set(['HG2X'])}
572 572 capsblob = bundle2.encodecaps(pullop.repo.bundle2caps)
573 573 kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob))
574 574 # pulling changegroup
575 575 pullop.todosteps.remove('changegroup')
576 576
577 577 kwargs['common'] = pullop.common
578 578 kwargs['heads'] = pullop.heads or pullop.rheads
579 579 if not pullop.fetch:
580 580 pullop.repo.ui.status(_("no changes found\n"))
581 581 pullop.cgresult = 0
582 582 else:
583 583 if pullop.heads is None and list(pullop.common) == [nullid]:
584 584 pullop.repo.ui.status(_("requesting all changes\n"))
585 585 _pullbundle2extraprepare(pullop, kwargs)
586 586 if kwargs.keys() == ['format']:
587 587 return # nothing to pull
588 588 bundle = pullop.remote.getbundle('pull', **kwargs)
589 589 try:
590 590 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
591 591 except bundle2.UnknownPartError, exc:
592 592 raise util.Abort('missing support for %s' % exc)
593 593
594 594 if pullop.fetch:
595 595 assert len(op.records['changegroup']) == 1
596 596 pullop.cgresult = op.records['changegroup'][0]['return']
597 597
598 598 def _pullbundle2extraprepare(pullop, kwargs):
599 599 """hook function so that extensions can extend the getbundle call"""
600 600 pass
601 601
602 602 def _pullchangeset(pullop):
603 603 """pull changeset from unbundle into the local repo"""
604 604 # We delay the open of the transaction as late as possible so we
605 605 # don't open transaction for nothing or you break future useful
606 606 # rollback call
607 607 pullop.todosteps.remove('changegroup')
608 608 if not pullop.fetch:
609 609 pullop.repo.ui.status(_("no changes found\n"))
610 610 pullop.cgresult = 0
611 611 return
612 612 pullop.gettransaction()
613 613 if pullop.heads is None and list(pullop.common) == [nullid]:
614 614 pullop.repo.ui.status(_("requesting all changes\n"))
615 615 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
616 616 # issue1320, avoid a race if remote changed after discovery
617 617 pullop.heads = pullop.rheads
618 618
619 619 if pullop.remote.capable('getbundle'):
620 620 # TODO: get bundlecaps from remote
621 621 cg = pullop.remote.getbundle('pull', common=pullop.common,
622 622 heads=pullop.heads or pullop.rheads)
623 623 elif pullop.heads is None:
624 624 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
625 625 elif not pullop.remote.capable('changegroupsubset'):
626 626 raise util.Abort(_("partial pull cannot be done because "
627 627 "other repository doesn't support "
628 628 "changegroupsubset."))
629 629 else:
630 630 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
631 631 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
632 632 pullop.remote.url())
633 633
634 634 def _pullphase(pullop):
635 635 # Get remote phases data from remote
636 636 pullop.todosteps.remove('phases')
637 637 remotephases = pullop.remote.listkeys('phases')
638 638 publishing = bool(remotephases.get('publishing', False))
639 639 if remotephases and not publishing:
640 640 # remote is new and unpublishing
641 641 pheads, _dr = phases.analyzeremotephases(pullop.repo,
642 642 pullop.pulledsubset,
643 643 remotephases)
644 644 phases.advanceboundary(pullop.repo, phases.public, pheads)
645 645 phases.advanceboundary(pullop.repo, phases.draft,
646 646 pullop.pulledsubset)
647 647 else:
648 648 # Remote is old or publishing all common changesets
649 649 # should be seen as public
650 650 phases.advanceboundary(pullop.repo, phases.public,
651 651 pullop.pulledsubset)
652 652
653 653 def _pullobsolete(pullop):
654 654 """utility function to pull obsolete markers from a remote
655 655
656 656 The `gettransaction` is function that return the pull transaction, creating
657 657 one if necessary. We return the transaction to inform the calling code that
658 658 a new transaction have been created (when applicable).
659 659
660 660 Exists mostly to allow overriding for experimentation purpose"""
661 661 pullop.todosteps.remove('obsmarkers')
662 662 tr = None
663 663 if obsolete._enabled:
664 664 pullop.repo.ui.debug('fetching remote obsolete markers\n')
665 665 remoteobs = pullop.remote.listkeys('obsolete')
666 666 if 'dump0' in remoteobs:
667 667 tr = pullop.gettransaction()
668 668 for key in sorted(remoteobs, reverse=True):
669 669 if key.startswith('dump'):
670 670 data = base85.b85decode(remoteobs[key])
671 671 pullop.repo.obsstore.mergemarkers(tr, data)
672 672 pullop.repo.invalidatevolatilesets()
673 673 return tr
674 674
675 675 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
676 676 **kwargs):
677 677 """return a full bundle (with potentially multiple kind of parts)
678 678
679 679 Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
680 680 passed. For now, the bundle can contain only changegroup, but this will
681 681 changes when more part type will be available for bundle2.
682 682
683 683 This is different from changegroup.getbundle that only returns an HG10
684 684 changegroup bundle. They may eventually get reunited in the future when we
685 685 have a clearer idea of the API we what to query different data.
686 686
687 687 The implementation is at a very early stage and will get massive rework
688 688 when the API of bundle is refined.
689 689 """
690 690 # build changegroup bundle here.
691 691 cg = changegroup.getbundle(repo, source, heads=heads,
692 692 common=common, bundlecaps=bundlecaps)
693 693 if bundlecaps is None or 'HG2X' not in bundlecaps:
694 694 return cg
695 695 # very crude first implementation,
696 696 # the bundle API will change and the generation will be done lazily.
697 697 b2caps = {}
698 698 for bcaps in bundlecaps:
699 699 if bcaps.startswith('bundle2='):
700 700 blob = urllib.unquote(bcaps[len('bundle2='):])
701 701 b2caps.update(bundle2.decodecaps(blob))
702 702 bundler = bundle2.bundle20(repo.ui, b2caps)
703 703 if cg:
704 704 part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
705 705 bundler.addpart(part)
706 706 _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
707 707 bundlecaps=bundlecaps, **kwargs)
708 708 return util.chunkbuffer(bundler.getchunks())
709 709
710 710 def _getbundleextrapart(bundler, repo, source, heads=None, common=None,
711 711 bundlecaps=None, **kwargs):
712 712 """hook function to let extensions add parts to the requested bundle"""
713 713 pass
714 714
715 715 def check_heads(repo, their_heads, context):
716 716 """check if the heads of a repo have been modified
717 717
718 718 Used by peer for unbundling.
719 719 """
720 720 heads = repo.heads()
721 721 heads_hash = util.sha1(''.join(sorted(heads))).digest()
722 722 if not (their_heads == ['force'] or their_heads == heads or
723 723 their_heads == ['hashed', heads_hash]):
724 724 # someone else committed/pushed/unbundled while we
725 725 # were transferring data
726 726 raise error.PushRaced('repository changed while %s - '
727 727 'please try again' % context)
728 728
729 729 def unbundle(repo, cg, heads, source, url):
730 730 """Apply a bundle to a repo.
731 731
732 732 this function makes sure the repo is locked during the application and have
733 733 mechanism to check that no push race occurred between the creation of the
734 734 bundle and its application.
735 735
736 736 If the push was raced as PushRaced exception is raised."""
737 737 r = 0
738 738 # need a transaction when processing a bundle2 stream
739 739 tr = None
740 740 lock = repo.lock()
741 741 try:
742 742 check_heads(repo, heads, 'uploading changes')
743 743 # push can proceed
744 744 if util.safehasattr(cg, 'params'):
745 745 try:
746 746 tr = repo.transaction('unbundle')
747 747 tr.hookargs['bundle2-exp'] = '1'
748 748 r = bundle2.processbundle(repo, cg, lambda: tr).reply
749 749 cl = repo.unfiltered().changelog
750 750 p = cl.writepending() and repo.root or ""
751 751 repo.hook('b2x-pretransactionclose', throw=True, source=source,
752 752 url=url, pending=p, **tr.hookargs)
753 753 tr.close()
754 754 repo.hook('b2x-transactionclose', source=source, url=url,
755 755 **tr.hookargs)
756 756 except Exception, exc:
757 757 exc.duringunbundle2 = True
758 758 raise
759 759 else:
760 760 r = changegroup.addchangegroup(repo, cg, source, url)
761 761 finally:
762 762 if tr is not None:
763 763 tr.release()
764 764 lock.release()
765 765 return r
General Comments 0
You need to be logged in to leave comments. Login now