##// END OF EJS Templates
streamclone: refactor maybeperformstreamclone to take a pullop...
Gregory Szorc -
r26458:36279329 default
parent child Browse files
Show More
@@ -1,1481 +1,1479 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13 import lock as lockmod
14 14 import streamclone
15 15 import tags
16 16
17 17 def readbundle(ui, fh, fname, vfs=None):
18 18 header = changegroup.readexactly(fh, 4)
19 19
20 20 alg = None
21 21 if not fname:
22 22 fname = "stream"
23 23 if not header.startswith('HG') and header.startswith('\0'):
24 24 fh = changegroup.headerlessfixup(fh, header)
25 25 header = "HG10"
26 26 alg = 'UN'
27 27 elif vfs:
28 28 fname = vfs.join(fname)
29 29
30 30 magic, version = header[0:2], header[2:4]
31 31
32 32 if magic != 'HG':
33 33 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
34 34 if version == '10':
35 35 if alg is None:
36 36 alg = changegroup.readexactly(fh, 2)
37 37 return changegroup.cg1unpacker(fh, alg)
38 38 elif version.startswith('2'):
39 39 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
40 40 else:
41 41 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
42 42
43 43 def buildobsmarkerspart(bundler, markers):
44 44 """add an obsmarker part to the bundler with <markers>
45 45
46 46 No part is created if markers is empty.
47 47 Raises ValueError if the bundler doesn't support any known obsmarker format.
48 48 """
49 49 if markers:
50 50 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
51 51 version = obsolete.commonversion(remoteversions)
52 52 if version is None:
53 53 raise ValueError('bundler do not support common obsmarker format')
54 54 stream = obsolete.encodemarkers(markers, True, version=version)
55 55 return bundler.newpart('obsmarkers', data=stream)
56 56 return None
57 57
58 58 def _canusebundle2(op):
59 59 """return true if a pull/push can use bundle2
60 60
61 61 Feel free to nuke this function when we drop the experimental option"""
62 62 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
63 63 and op.remote.capable('bundle2'))
64 64
65 65
66 66 class pushoperation(object):
67 67 """A object that represent a single push operation
68 68
69 69 It purpose is to carry push related state and very common operation.
70 70
71 71 A new should be created at the beginning of each push and discarded
72 72 afterward.
73 73 """
74 74
75 75 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
76 76 bookmarks=()):
77 77 # repo we push from
78 78 self.repo = repo
79 79 self.ui = repo.ui
80 80 # repo we push to
81 81 self.remote = remote
82 82 # force option provided
83 83 self.force = force
84 84 # revs to be pushed (None is "all")
85 85 self.revs = revs
86 86 # bookmark explicitly pushed
87 87 self.bookmarks = bookmarks
88 88 # allow push of new branch
89 89 self.newbranch = newbranch
90 90 # did a local lock get acquired?
91 91 self.locallocked = None
92 92 # step already performed
93 93 # (used to check what steps have been already performed through bundle2)
94 94 self.stepsdone = set()
95 95 # Integer version of the changegroup push result
96 96 # - None means nothing to push
97 97 # - 0 means HTTP error
98 98 # - 1 means we pushed and remote head count is unchanged *or*
99 99 # we have outgoing changesets but refused to push
100 100 # - other values as described by addchangegroup()
101 101 self.cgresult = None
102 102 # Boolean value for the bookmark push
103 103 self.bkresult = None
104 104 # discover.outgoing object (contains common and outgoing data)
105 105 self.outgoing = None
106 106 # all remote heads before the push
107 107 self.remoteheads = None
108 108 # testable as a boolean indicating if any nodes are missing locally.
109 109 self.incoming = None
110 110 # phases changes that must be pushed along side the changesets
111 111 self.outdatedphases = None
112 112 # phases changes that must be pushed if changeset push fails
113 113 self.fallbackoutdatedphases = None
114 114 # outgoing obsmarkers
115 115 self.outobsmarkers = set()
116 116 # outgoing bookmarks
117 117 self.outbookmarks = []
118 118 # transaction manager
119 119 self.trmanager = None
120 120 # map { pushkey partid -> callback handling failure}
121 121 # used to handle exception from mandatory pushkey part failure
122 122 self.pkfailcb = {}
123 123
124 124 @util.propertycache
125 125 def futureheads(self):
126 126 """future remote heads if the changeset push succeeds"""
127 127 return self.outgoing.missingheads
128 128
129 129 @util.propertycache
130 130 def fallbackheads(self):
131 131 """future remote heads if the changeset push fails"""
132 132 if self.revs is None:
133 133 # not target to push, all common are relevant
134 134 return self.outgoing.commonheads
135 135 unfi = self.repo.unfiltered()
136 136 # I want cheads = heads(::missingheads and ::commonheads)
137 137 # (missingheads is revs with secret changeset filtered out)
138 138 #
139 139 # This can be expressed as:
140 140 # cheads = ( (missingheads and ::commonheads)
141 141 # + (commonheads and ::missingheads))"
142 142 # )
143 143 #
144 144 # while trying to push we already computed the following:
145 145 # common = (::commonheads)
146 146 # missing = ((commonheads::missingheads) - commonheads)
147 147 #
148 148 # We can pick:
149 149 # * missingheads part of common (::commonheads)
150 150 common = self.outgoing.common
151 151 nm = self.repo.changelog.nodemap
152 152 cheads = [node for node in self.revs if nm[node] in common]
153 153 # and
154 154 # * commonheads parents on missing
155 155 revset = unfi.set('%ln and parents(roots(%ln))',
156 156 self.outgoing.commonheads,
157 157 self.outgoing.missing)
158 158 cheads.extend(c.node() for c in revset)
159 159 return cheads
160 160
161 161 @property
162 162 def commonheads(self):
163 163 """set of all common heads after changeset bundle push"""
164 164 if self.cgresult:
165 165 return self.futureheads
166 166 else:
167 167 return self.fallbackheads
168 168
169 169 # mapping of message used when pushing bookmark
170 170 bookmsgmap = {'update': (_("updating bookmark %s\n"),
171 171 _('updating bookmark %s failed!\n')),
172 172 'export': (_("exporting bookmark %s\n"),
173 173 _('exporting bookmark %s failed!\n')),
174 174 'delete': (_("deleting remote bookmark %s\n"),
175 175 _('deleting remote bookmark %s failed!\n')),
176 176 }
177 177
178 178
179 179 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
180 180 '''Push outgoing changesets (limited by revs) from a local
181 181 repository to remote. Return an integer:
182 182 - None means nothing to push
183 183 - 0 means HTTP error
184 184 - 1 means we pushed and remote head count is unchanged *or*
185 185 we have outgoing changesets but refused to push
186 186 - other values as described by addchangegroup()
187 187 '''
188 188 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
189 189 if pushop.remote.local():
190 190 missing = (set(pushop.repo.requirements)
191 191 - pushop.remote.local().supported)
192 192 if missing:
193 193 msg = _("required features are not"
194 194 " supported in the destination:"
195 195 " %s") % (', '.join(sorted(missing)))
196 196 raise util.Abort(msg)
197 197
198 198 # there are two ways to push to remote repo:
199 199 #
200 200 # addchangegroup assumes local user can lock remote
201 201 # repo (local filesystem, old ssh servers).
202 202 #
203 203 # unbundle assumes local user cannot lock remote repo (new ssh
204 204 # servers, http servers).
205 205
206 206 if not pushop.remote.canpush():
207 207 raise util.Abort(_("destination does not support push"))
208 208 # get local lock as we might write phase data
209 209 localwlock = locallock = None
210 210 try:
211 211 # bundle2 push may receive a reply bundle touching bookmarks or other
212 212 # things requiring the wlock. Take it now to ensure proper ordering.
213 213 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
214 214 if _canusebundle2(pushop) and maypushback:
215 215 localwlock = pushop.repo.wlock()
216 216 locallock = pushop.repo.lock()
217 217 pushop.locallocked = True
218 218 except IOError as err:
219 219 pushop.locallocked = False
220 220 if err.errno != errno.EACCES:
221 221 raise
222 222 # source repo cannot be locked.
223 223 # We do not abort the push, but just disable the local phase
224 224 # synchronisation.
225 225 msg = 'cannot lock source repository: %s\n' % err
226 226 pushop.ui.debug(msg)
227 227 try:
228 228 if pushop.locallocked:
229 229 pushop.trmanager = transactionmanager(repo,
230 230 'push-response',
231 231 pushop.remote.url())
232 232 pushop.repo.checkpush(pushop)
233 233 lock = None
234 234 unbundle = pushop.remote.capable('unbundle')
235 235 if not unbundle:
236 236 lock = pushop.remote.lock()
237 237 try:
238 238 _pushdiscovery(pushop)
239 239 if _canusebundle2(pushop):
240 240 _pushbundle2(pushop)
241 241 _pushchangeset(pushop)
242 242 _pushsyncphase(pushop)
243 243 _pushobsolete(pushop)
244 244 _pushbookmark(pushop)
245 245 finally:
246 246 if lock is not None:
247 247 lock.release()
248 248 if pushop.trmanager:
249 249 pushop.trmanager.close()
250 250 finally:
251 251 if pushop.trmanager:
252 252 pushop.trmanager.release()
253 253 if locallock is not None:
254 254 locallock.release()
255 255 if localwlock is not None:
256 256 localwlock.release()
257 257
258 258 return pushop
259 259
260 260 # list of steps to perform discovery before push
261 261 pushdiscoveryorder = []
262 262
263 263 # Mapping between step name and function
264 264 #
265 265 # This exists to help extensions wrap steps if necessary
266 266 pushdiscoverymapping = {}
267 267
268 268 def pushdiscovery(stepname):
269 269 """decorator for function performing discovery before push
270 270
271 271 The function is added to the step -> function mapping and appended to the
272 272 list of steps. Beware that decorated function will be added in order (this
273 273 may matter).
274 274
275 275 You can only use this decorator for a new step, if you want to wrap a step
276 276 from an extension, change the pushdiscovery dictionary directly."""
277 277 def dec(func):
278 278 assert stepname not in pushdiscoverymapping
279 279 pushdiscoverymapping[stepname] = func
280 280 pushdiscoveryorder.append(stepname)
281 281 return func
282 282 return dec
283 283
284 284 def _pushdiscovery(pushop):
285 285 """Run all discovery steps"""
286 286 for stepname in pushdiscoveryorder:
287 287 step = pushdiscoverymapping[stepname]
288 288 step(pushop)
289 289
290 290 @pushdiscovery('changeset')
291 291 def _pushdiscoverychangeset(pushop):
292 292 """discover the changeset that need to be pushed"""
293 293 fci = discovery.findcommonincoming
294 294 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
295 295 common, inc, remoteheads = commoninc
296 296 fco = discovery.findcommonoutgoing
297 297 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
298 298 commoninc=commoninc, force=pushop.force)
299 299 pushop.outgoing = outgoing
300 300 pushop.remoteheads = remoteheads
301 301 pushop.incoming = inc
302 302
303 303 @pushdiscovery('phase')
304 304 def _pushdiscoveryphase(pushop):
305 305 """discover the phase that needs to be pushed
306 306
307 307 (computed for both success and failure case for changesets push)"""
308 308 outgoing = pushop.outgoing
309 309 unfi = pushop.repo.unfiltered()
310 310 remotephases = pushop.remote.listkeys('phases')
311 311 publishing = remotephases.get('publishing', False)
312 312 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
313 313 and remotephases # server supports phases
314 314 and not pushop.outgoing.missing # no changesets to be pushed
315 315 and publishing):
316 316 # When:
317 317 # - this is a subrepo push
318 318 # - and remote support phase
319 319 # - and no changeset are to be pushed
320 320 # - and remote is publishing
321 321 # We may be in issue 3871 case!
322 322 # We drop the possible phase synchronisation done by
323 323 # courtesy to publish changesets possibly locally draft
324 324 # on the remote.
325 325 remotephases = {'publishing': 'True'}
326 326 ana = phases.analyzeremotephases(pushop.repo,
327 327 pushop.fallbackheads,
328 328 remotephases)
329 329 pheads, droots = ana
330 330 extracond = ''
331 331 if not publishing:
332 332 extracond = ' and public()'
333 333 revset = 'heads((%%ln::%%ln) %s)' % extracond
334 334 # Get the list of all revs draft on remote by public here.
335 335 # XXX Beware that revset break if droots is not strictly
336 336 # XXX root we may want to ensure it is but it is costly
337 337 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
338 338 if not outgoing.missing:
339 339 future = fallback
340 340 else:
341 341 # adds changeset we are going to push as draft
342 342 #
343 343 # should not be necessary for publishing server, but because of an
344 344 # issue fixed in xxxxx we have to do it anyway.
345 345 fdroots = list(unfi.set('roots(%ln + %ln::)',
346 346 outgoing.missing, droots))
347 347 fdroots = [f.node() for f in fdroots]
348 348 future = list(unfi.set(revset, fdroots, pushop.futureheads))
349 349 pushop.outdatedphases = future
350 350 pushop.fallbackoutdatedphases = fallback
351 351
352 352 @pushdiscovery('obsmarker')
353 353 def _pushdiscoveryobsmarkers(pushop):
354 354 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
355 355 and pushop.repo.obsstore
356 356 and 'obsolete' in pushop.remote.listkeys('namespaces')):
357 357 repo = pushop.repo
358 358 # very naive computation, that can be quite expensive on big repo.
359 359 # However: evolution is currently slow on them anyway.
360 360 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
361 361 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
362 362
363 363 @pushdiscovery('bookmarks')
364 364 def _pushdiscoverybookmarks(pushop):
365 365 ui = pushop.ui
366 366 repo = pushop.repo.unfiltered()
367 367 remote = pushop.remote
368 368 ui.debug("checking for updated bookmarks\n")
369 369 ancestors = ()
370 370 if pushop.revs:
371 371 revnums = map(repo.changelog.rev, pushop.revs)
372 372 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
373 373 remotebookmark = remote.listkeys('bookmarks')
374 374
375 375 explicit = set(pushop.bookmarks)
376 376
377 377 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
378 378 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
379 379 for b, scid, dcid in advsrc:
380 380 if b in explicit:
381 381 explicit.remove(b)
382 382 if not ancestors or repo[scid].rev() in ancestors:
383 383 pushop.outbookmarks.append((b, dcid, scid))
384 384 # search added bookmark
385 385 for b, scid, dcid in addsrc:
386 386 if b in explicit:
387 387 explicit.remove(b)
388 388 pushop.outbookmarks.append((b, '', scid))
389 389 # search for overwritten bookmark
390 390 for b, scid, dcid in advdst + diverge + differ:
391 391 if b in explicit:
392 392 explicit.remove(b)
393 393 pushop.outbookmarks.append((b, dcid, scid))
394 394 # search for bookmark to delete
395 395 for b, scid, dcid in adddst:
396 396 if b in explicit:
397 397 explicit.remove(b)
398 398 # treat as "deleted locally"
399 399 pushop.outbookmarks.append((b, dcid, ''))
400 400 # identical bookmarks shouldn't get reported
401 401 for b, scid, dcid in same:
402 402 if b in explicit:
403 403 explicit.remove(b)
404 404
405 405 if explicit:
406 406 explicit = sorted(explicit)
407 407 # we should probably list all of them
408 408 ui.warn(_('bookmark %s does not exist on the local '
409 409 'or remote repository!\n') % explicit[0])
410 410 pushop.bkresult = 2
411 411
412 412 pushop.outbookmarks.sort()
413 413
414 414 def _pushcheckoutgoing(pushop):
415 415 outgoing = pushop.outgoing
416 416 unfi = pushop.repo.unfiltered()
417 417 if not outgoing.missing:
418 418 # nothing to push
419 419 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
420 420 return False
421 421 # something to push
422 422 if not pushop.force:
423 423 # if repo.obsstore == False --> no obsolete
424 424 # then, save the iteration
425 425 if unfi.obsstore:
426 426 # this message are here for 80 char limit reason
427 427 mso = _("push includes obsolete changeset: %s!")
428 428 mst = {"unstable": _("push includes unstable changeset: %s!"),
429 429 "bumped": _("push includes bumped changeset: %s!"),
430 430 "divergent": _("push includes divergent changeset: %s!")}
431 431 # If we are to push if there is at least one
432 432 # obsolete or unstable changeset in missing, at
433 433 # least one of the missinghead will be obsolete or
434 434 # unstable. So checking heads only is ok
435 435 for node in outgoing.missingheads:
436 436 ctx = unfi[node]
437 437 if ctx.obsolete():
438 438 raise util.Abort(mso % ctx)
439 439 elif ctx.troubled():
440 440 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
441 441
442 442 # internal config: bookmarks.pushing
443 443 newbm = pushop.ui.configlist('bookmarks', 'pushing')
444 444 discovery.checkheads(unfi, pushop.remote, outgoing,
445 445 pushop.remoteheads,
446 446 pushop.newbranch,
447 447 bool(pushop.incoming),
448 448 newbm)
449 449 return True
450 450
451 451 # List of names of steps to perform for an outgoing bundle2, order matters.
452 452 b2partsgenorder = []
453 453
454 454 # Mapping between step name and function
455 455 #
456 456 # This exists to help extensions wrap steps if necessary
457 457 b2partsgenmapping = {}
458 458
459 459 def b2partsgenerator(stepname, idx=None):
460 460 """decorator for function generating bundle2 part
461 461
462 462 The function is added to the step -> function mapping and appended to the
463 463 list of steps. Beware that decorated functions will be added in order
464 464 (this may matter).
465 465
466 466 You can only use this decorator for new steps, if you want to wrap a step
467 467 from an extension, attack the b2partsgenmapping dictionary directly."""
468 468 def dec(func):
469 469 assert stepname not in b2partsgenmapping
470 470 b2partsgenmapping[stepname] = func
471 471 if idx is None:
472 472 b2partsgenorder.append(stepname)
473 473 else:
474 474 b2partsgenorder.insert(idx, stepname)
475 475 return func
476 476 return dec
477 477
478 478 def _pushb2ctxcheckheads(pushop, bundler):
479 479 """Generate race condition checking parts
480 480
481 481 Exists as an indepedent function to aid extensions
482 482 """
483 483 if not pushop.force:
484 484 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
485 485
486 486 @b2partsgenerator('changeset')
487 487 def _pushb2ctx(pushop, bundler):
488 488 """handle changegroup push through bundle2
489 489
490 490 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
491 491 """
492 492 if 'changesets' in pushop.stepsdone:
493 493 return
494 494 pushop.stepsdone.add('changesets')
495 495 # Send known heads to the server for race detection.
496 496 if not _pushcheckoutgoing(pushop):
497 497 return
498 498 pushop.repo.prepushoutgoinghooks(pushop.repo,
499 499 pushop.remote,
500 500 pushop.outgoing)
501 501
502 502 _pushb2ctxcheckheads(pushop, bundler)
503 503
504 504 b2caps = bundle2.bundle2caps(pushop.remote)
505 505 version = None
506 506 cgversions = b2caps.get('changegroup')
507 507 if not cgversions: # 3.1 and 3.2 ship with an empty value
508 508 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
509 509 pushop.outgoing)
510 510 else:
511 511 cgversions = [v for v in cgversions if v in changegroup.packermap]
512 512 if not cgversions:
513 513 raise ValueError(_('no common changegroup version'))
514 514 version = max(cgversions)
515 515 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
516 516 pushop.outgoing,
517 517 version=version)
518 518 cgpart = bundler.newpart('changegroup', data=cg)
519 519 if version is not None:
520 520 cgpart.addparam('version', version)
521 521 def handlereply(op):
522 522 """extract addchangegroup returns from server reply"""
523 523 cgreplies = op.records.getreplies(cgpart.id)
524 524 assert len(cgreplies['changegroup']) == 1
525 525 pushop.cgresult = cgreplies['changegroup'][0]['return']
526 526 return handlereply
527 527
528 528 @b2partsgenerator('phase')
529 529 def _pushb2phases(pushop, bundler):
530 530 """handle phase push through bundle2"""
531 531 if 'phases' in pushop.stepsdone:
532 532 return
533 533 b2caps = bundle2.bundle2caps(pushop.remote)
534 534 if not 'pushkey' in b2caps:
535 535 return
536 536 pushop.stepsdone.add('phases')
537 537 part2node = []
538 538
539 539 def handlefailure(pushop, exc):
540 540 targetid = int(exc.partid)
541 541 for partid, node in part2node:
542 542 if partid == targetid:
543 543 raise error.Abort(_('updating %s to public failed') % node)
544 544
545 545 enc = pushkey.encode
546 546 for newremotehead in pushop.outdatedphases:
547 547 part = bundler.newpart('pushkey')
548 548 part.addparam('namespace', enc('phases'))
549 549 part.addparam('key', enc(newremotehead.hex()))
550 550 part.addparam('old', enc(str(phases.draft)))
551 551 part.addparam('new', enc(str(phases.public)))
552 552 part2node.append((part.id, newremotehead))
553 553 pushop.pkfailcb[part.id] = handlefailure
554 554
555 555 def handlereply(op):
556 556 for partid, node in part2node:
557 557 partrep = op.records.getreplies(partid)
558 558 results = partrep['pushkey']
559 559 assert len(results) <= 1
560 560 msg = None
561 561 if not results:
562 562 msg = _('server ignored update of %s to public!\n') % node
563 563 elif not int(results[0]['return']):
564 564 msg = _('updating %s to public failed!\n') % node
565 565 if msg is not None:
566 566 pushop.ui.warn(msg)
567 567 return handlereply
568 568
569 569 @b2partsgenerator('obsmarkers')
570 570 def _pushb2obsmarkers(pushop, bundler):
571 571 if 'obsmarkers' in pushop.stepsdone:
572 572 return
573 573 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
574 574 if obsolete.commonversion(remoteversions) is None:
575 575 return
576 576 pushop.stepsdone.add('obsmarkers')
577 577 if pushop.outobsmarkers:
578 578 markers = sorted(pushop.outobsmarkers)
579 579 buildobsmarkerspart(bundler, markers)
580 580
581 581 @b2partsgenerator('bookmarks')
582 582 def _pushb2bookmarks(pushop, bundler):
583 583 """handle bookmark push through bundle2"""
584 584 if 'bookmarks' in pushop.stepsdone:
585 585 return
586 586 b2caps = bundle2.bundle2caps(pushop.remote)
587 587 if 'pushkey' not in b2caps:
588 588 return
589 589 pushop.stepsdone.add('bookmarks')
590 590 part2book = []
591 591 enc = pushkey.encode
592 592
593 593 def handlefailure(pushop, exc):
594 594 targetid = int(exc.partid)
595 595 for partid, book, action in part2book:
596 596 if partid == targetid:
597 597 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
598 598 # we should not be called for part we did not generated
599 599 assert False
600 600
601 601 for book, old, new in pushop.outbookmarks:
602 602 part = bundler.newpart('pushkey')
603 603 part.addparam('namespace', enc('bookmarks'))
604 604 part.addparam('key', enc(book))
605 605 part.addparam('old', enc(old))
606 606 part.addparam('new', enc(new))
607 607 action = 'update'
608 608 if not old:
609 609 action = 'export'
610 610 elif not new:
611 611 action = 'delete'
612 612 part2book.append((part.id, book, action))
613 613 pushop.pkfailcb[part.id] = handlefailure
614 614
615 615 def handlereply(op):
616 616 ui = pushop.ui
617 617 for partid, book, action in part2book:
618 618 partrep = op.records.getreplies(partid)
619 619 results = partrep['pushkey']
620 620 assert len(results) <= 1
621 621 if not results:
622 622 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
623 623 else:
624 624 ret = int(results[0]['return'])
625 625 if ret:
626 626 ui.status(bookmsgmap[action][0] % book)
627 627 else:
628 628 ui.warn(bookmsgmap[action][1] % book)
629 629 if pushop.bkresult is not None:
630 630 pushop.bkresult = 1
631 631 return handlereply
632 632
633 633
634 634 def _pushbundle2(pushop):
635 635 """push data to the remote using bundle2
636 636
637 637 The only currently supported type of data is changegroup but this will
638 638 evolve in the future."""
639 639 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
640 640 pushback = (pushop.trmanager
641 641 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
642 642
643 643 # create reply capability
644 644 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
645 645 allowpushback=pushback))
646 646 bundler.newpart('replycaps', data=capsblob)
647 647 replyhandlers = []
648 648 for partgenname in b2partsgenorder:
649 649 partgen = b2partsgenmapping[partgenname]
650 650 ret = partgen(pushop, bundler)
651 651 if callable(ret):
652 652 replyhandlers.append(ret)
653 653 # do not push if nothing to push
654 654 if bundler.nbparts <= 1:
655 655 return
656 656 stream = util.chunkbuffer(bundler.getchunks())
657 657 try:
658 658 try:
659 659 reply = pushop.remote.unbundle(stream, ['force'], 'push')
660 660 except error.BundleValueError as exc:
661 661 raise util.Abort('missing support for %s' % exc)
662 662 try:
663 663 trgetter = None
664 664 if pushback:
665 665 trgetter = pushop.trmanager.transaction
666 666 op = bundle2.processbundle(pushop.repo, reply, trgetter)
667 667 except error.BundleValueError as exc:
668 668 raise util.Abort('missing support for %s' % exc)
669 669 except error.PushkeyFailed as exc:
670 670 partid = int(exc.partid)
671 671 if partid not in pushop.pkfailcb:
672 672 raise
673 673 pushop.pkfailcb[partid](pushop, exc)
674 674 for rephand in replyhandlers:
675 675 rephand(op)
676 676
677 677 def _pushchangeset(pushop):
678 678 """Make the actual push of changeset bundle to remote repo"""
679 679 if 'changesets' in pushop.stepsdone:
680 680 return
681 681 pushop.stepsdone.add('changesets')
682 682 if not _pushcheckoutgoing(pushop):
683 683 return
684 684 pushop.repo.prepushoutgoinghooks(pushop.repo,
685 685 pushop.remote,
686 686 pushop.outgoing)
687 687 outgoing = pushop.outgoing
688 688 unbundle = pushop.remote.capable('unbundle')
689 689 # TODO: get bundlecaps from remote
690 690 bundlecaps = None
691 691 # create a changegroup from local
692 692 if pushop.revs is None and not (outgoing.excluded
693 693 or pushop.repo.changelog.filteredrevs):
694 694 # push everything,
695 695 # use the fast path, no race possible on push
696 696 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
697 697 cg = changegroup.getsubset(pushop.repo,
698 698 outgoing,
699 699 bundler,
700 700 'push',
701 701 fastpath=True)
702 702 else:
703 703 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
704 704 bundlecaps)
705 705
706 706 # apply changegroup to remote
707 707 if unbundle:
708 708 # local repo finds heads on server, finds out what
709 709 # revs it must push. once revs transferred, if server
710 710 # finds it has different heads (someone else won
711 711 # commit/push race), server aborts.
712 712 if pushop.force:
713 713 remoteheads = ['force']
714 714 else:
715 715 remoteheads = pushop.remoteheads
716 716 # ssh: return remote's addchangegroup()
717 717 # http: return remote's addchangegroup() or 0 for error
718 718 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
719 719 pushop.repo.url())
720 720 else:
721 721 # we return an integer indicating remote head count
722 722 # change
723 723 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
724 724 pushop.repo.url())
725 725
726 726 def _pushsyncphase(pushop):
727 727 """synchronise phase information locally and remotely"""
728 728 cheads = pushop.commonheads
729 729 # even when we don't push, exchanging phase data is useful
730 730 remotephases = pushop.remote.listkeys('phases')
731 731 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
732 732 and remotephases # server supports phases
733 733 and pushop.cgresult is None # nothing was pushed
734 734 and remotephases.get('publishing', False)):
735 735 # When:
736 736 # - this is a subrepo push
737 737 # - and remote support phase
738 738 # - and no changeset was pushed
739 739 # - and remote is publishing
740 740 # We may be in issue 3871 case!
741 741 # We drop the possible phase synchronisation done by
742 742 # courtesy to publish changesets possibly locally draft
743 743 # on the remote.
744 744 remotephases = {'publishing': 'True'}
745 745 if not remotephases: # old server or public only reply from non-publishing
746 746 _localphasemove(pushop, cheads)
747 747 # don't push any phase data as there is nothing to push
748 748 else:
749 749 ana = phases.analyzeremotephases(pushop.repo, cheads,
750 750 remotephases)
751 751 pheads, droots = ana
752 752 ### Apply remote phase on local
753 753 if remotephases.get('publishing', False):
754 754 _localphasemove(pushop, cheads)
755 755 else: # publish = False
756 756 _localphasemove(pushop, pheads)
757 757 _localphasemove(pushop, cheads, phases.draft)
758 758 ### Apply local phase on remote
759 759
760 760 if pushop.cgresult:
761 761 if 'phases' in pushop.stepsdone:
762 762 # phases already pushed though bundle2
763 763 return
764 764 outdated = pushop.outdatedphases
765 765 else:
766 766 outdated = pushop.fallbackoutdatedphases
767 767
768 768 pushop.stepsdone.add('phases')
769 769
770 770 # filter heads already turned public by the push
771 771 outdated = [c for c in outdated if c.node() not in pheads]
772 772 # fallback to independent pushkey command
773 773 for newremotehead in outdated:
774 774 r = pushop.remote.pushkey('phases',
775 775 newremotehead.hex(),
776 776 str(phases.draft),
777 777 str(phases.public))
778 778 if not r:
779 779 pushop.ui.warn(_('updating %s to public failed!\n')
780 780 % newremotehead)
781 781
782 782 def _localphasemove(pushop, nodes, phase=phases.public):
783 783 """move <nodes> to <phase> in the local source repo"""
784 784 if pushop.trmanager:
785 785 phases.advanceboundary(pushop.repo,
786 786 pushop.trmanager.transaction(),
787 787 phase,
788 788 nodes)
789 789 else:
790 790 # repo is not locked, do not change any phases!
791 791 # Informs the user that phases should have been moved when
792 792 # applicable.
793 793 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
794 794 phasestr = phases.phasenames[phase]
795 795 if actualmoves:
796 796 pushop.ui.status(_('cannot lock source repo, skipping '
797 797 'local %s phase update\n') % phasestr)
798 798
799 799 def _pushobsolete(pushop):
800 800 """utility function to push obsolete markers to a remote"""
801 801 if 'obsmarkers' in pushop.stepsdone:
802 802 return
803 803 repo = pushop.repo
804 804 remote = pushop.remote
805 805 pushop.stepsdone.add('obsmarkers')
806 806 if pushop.outobsmarkers:
807 807 pushop.ui.debug('try to push obsolete markers to remote\n')
808 808 rslts = []
809 809 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
810 810 for key in sorted(remotedata, reverse=True):
811 811 # reverse sort to ensure we end with dump0
812 812 data = remotedata[key]
813 813 rslts.append(remote.pushkey('obsolete', key, '', data))
814 814 if [r for r in rslts if not r]:
815 815 msg = _('failed to push some obsolete markers!\n')
816 816 repo.ui.warn(msg)
817 817
818 818 def _pushbookmark(pushop):
819 819 """Update bookmark position on remote"""
820 820 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
821 821 return
822 822 pushop.stepsdone.add('bookmarks')
823 823 ui = pushop.ui
824 824 remote = pushop.remote
825 825
826 826 for b, old, new in pushop.outbookmarks:
827 827 action = 'update'
828 828 if not old:
829 829 action = 'export'
830 830 elif not new:
831 831 action = 'delete'
832 832 if remote.pushkey('bookmarks', b, old, new):
833 833 ui.status(bookmsgmap[action][0] % b)
834 834 else:
835 835 ui.warn(bookmsgmap[action][1] % b)
836 836 # discovery can have set the value form invalid entry
837 837 if pushop.bkresult is not None:
838 838 pushop.bkresult = 1
839 839
840 840 class pulloperation(object):
841 841 """A object that represent a single pull operation
842 842
843 843 It purpose is to carry pull related state and very common operation.
844 844
845 845 A new should be created at the beginning of each pull and discarded
846 846 afterward.
847 847 """
848 848
849 849 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
850 850 remotebookmarks=None, streamclonerequested=None):
851 851 # repo we pull into
852 852 self.repo = repo
853 853 # repo we pull from
854 854 self.remote = remote
855 855 # revision we try to pull (None is "all")
856 856 self.heads = heads
857 857 # bookmark pulled explicitly
858 858 self.explicitbookmarks = bookmarks
859 859 # do we force pull?
860 860 self.force = force
861 861 # whether a streaming clone was requested
862 862 self.streamclonerequested = streamclonerequested
863 863 # transaction manager
864 864 self.trmanager = None
865 865 # set of common changeset between local and remote before pull
866 866 self.common = None
867 867 # set of pulled head
868 868 self.rheads = None
869 869 # list of missing changeset to fetch remotely
870 870 self.fetch = None
871 871 # remote bookmarks data
872 872 self.remotebookmarks = remotebookmarks
873 873 # result of changegroup pulling (used as return code by pull)
874 874 self.cgresult = None
875 875 # list of step already done
876 876 self.stepsdone = set()
877 877
878 878 @util.propertycache
879 879 def pulledsubset(self):
880 880 """heads of the set of changeset target by the pull"""
881 881 # compute target subset
882 882 if self.heads is None:
883 883 # We pulled every thing possible
884 884 # sync on everything common
885 885 c = set(self.common)
886 886 ret = list(self.common)
887 887 for n in self.rheads:
888 888 if n not in c:
889 889 ret.append(n)
890 890 return ret
891 891 else:
892 892 # We pulled a specific subset
893 893 # sync on this subset
894 894 return self.heads
895 895
896 896 def gettransaction(self):
897 897 # deprecated; talk to trmanager directly
898 898 return self.trmanager.transaction()
899 899
900 900 class transactionmanager(object):
901 901 """An object to manage the life cycle of a transaction
902 902
903 903 It creates the transaction on demand and calls the appropriate hooks when
904 904 closing the transaction."""
905 905 def __init__(self, repo, source, url):
906 906 self.repo = repo
907 907 self.source = source
908 908 self.url = url
909 909 self._tr = None
910 910
911 911 def transaction(self):
912 912 """Return an open transaction object, constructing if necessary"""
913 913 if not self._tr:
914 914 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
915 915 self._tr = self.repo.transaction(trname)
916 916 self._tr.hookargs['source'] = self.source
917 917 self._tr.hookargs['url'] = self.url
918 918 return self._tr
919 919
920 920 def close(self):
921 921 """close transaction if created"""
922 922 if self._tr is not None:
923 923 self._tr.close()
924 924
925 925 def release(self):
926 926 """release transaction if created"""
927 927 if self._tr is not None:
928 928 self._tr.release()
929 929
930 930 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
931 931 streamclonerequested=None):
932 932 """Fetch repository data from a remote.
933 933
934 934 This is the main function used to retrieve data from a remote repository.
935 935
936 936 ``repo`` is the local repository to clone into.
937 937 ``remote`` is a peer instance.
938 938 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
939 939 default) means to pull everything from the remote.
940 940 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
941 941 default, all remote bookmarks are pulled.
942 942 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
943 943 initialization.
944 944 ``streamclonerequested`` is a boolean indicating whether a "streaming
945 945 clone" is requested. A "streaming clone" is essentially a raw file copy
946 946 of revlogs from the server. This only works when the local repository is
947 947 empty. The default value of ``None`` means to respect the server
948 948 configuration for preferring stream clones.
949 949
950 950 Returns the ``pulloperation`` created for this pull.
951 951 """
952 952 if opargs is None:
953 953 opargs = {}
954 954 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
955 955 streamclonerequested=streamclonerequested, **opargs)
956 956 if pullop.remote.local():
957 957 missing = set(pullop.remote.requirements) - pullop.repo.supported
958 958 if missing:
959 959 msg = _("required features are not"
960 960 " supported in the destination:"
961 961 " %s") % (', '.join(sorted(missing)))
962 962 raise util.Abort(msg)
963 963
964 964 lock = pullop.repo.lock()
965 965 try:
966 966 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
967 streamclone.maybeperformstreamclone(pullop.repo, pullop.remote,
968 pullop.heads,
969 pullop.streamclonerequested)
967 streamclone.maybeperformstreamclone(pullop)
970 968 _pulldiscovery(pullop)
971 969 if _canusebundle2(pullop):
972 970 _pullbundle2(pullop)
973 971 _pullchangeset(pullop)
974 972 _pullphase(pullop)
975 973 _pullbookmarks(pullop)
976 974 _pullobsolete(pullop)
977 975 pullop.trmanager.close()
978 976 finally:
979 977 pullop.trmanager.release()
980 978 lock.release()
981 979
982 980 return pullop
983 981
984 982 # list of steps to perform discovery before pull
985 983 pulldiscoveryorder = []
986 984
987 985 # Mapping between step name and function
988 986 #
989 987 # This exists to help extensions wrap steps if necessary
990 988 pulldiscoverymapping = {}
991 989
992 990 def pulldiscovery(stepname):
993 991 """decorator for function performing discovery before pull
994 992
995 993 The function is added to the step -> function mapping and appended to the
996 994 list of steps. Beware that decorated function will be added in order (this
997 995 may matter).
998 996
999 997 You can only use this decorator for a new step, if you want to wrap a step
1000 998 from an extension, change the pulldiscovery dictionary directly."""
1001 999 def dec(func):
1002 1000 assert stepname not in pulldiscoverymapping
1003 1001 pulldiscoverymapping[stepname] = func
1004 1002 pulldiscoveryorder.append(stepname)
1005 1003 return func
1006 1004 return dec
1007 1005
1008 1006 def _pulldiscovery(pullop):
1009 1007 """Run all discovery steps"""
1010 1008 for stepname in pulldiscoveryorder:
1011 1009 step = pulldiscoverymapping[stepname]
1012 1010 step(pullop)
1013 1011
1014 1012 @pulldiscovery('b1:bookmarks')
1015 1013 def _pullbookmarkbundle1(pullop):
1016 1014 """fetch bookmark data in bundle1 case
1017 1015
1018 1016 If not using bundle2, we have to fetch bookmarks before changeset
1019 1017 discovery to reduce the chance and impact of race conditions."""
1020 1018 if pullop.remotebookmarks is not None:
1021 1019 return
1022 1020 if (_canusebundle2(pullop)
1023 1021 and 'listkeys' in bundle2.bundle2caps(pullop.remote)):
1024 1022 # all known bundle2 servers now support listkeys, but lets be nice with
1025 1023 # new implementation.
1026 1024 return
1027 1025 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1028 1026
1029 1027
1030 1028 @pulldiscovery('changegroup')
1031 1029 def _pulldiscoverychangegroup(pullop):
1032 1030 """discovery phase for the pull
1033 1031
1034 1032 Current handle changeset discovery only, will change handle all discovery
1035 1033 at some point."""
1036 1034 tmp = discovery.findcommonincoming(pullop.repo,
1037 1035 pullop.remote,
1038 1036 heads=pullop.heads,
1039 1037 force=pullop.force)
1040 1038 common, fetch, rheads = tmp
1041 1039 nm = pullop.repo.unfiltered().changelog.nodemap
1042 1040 if fetch and rheads:
1043 1041 # If a remote heads in filtered locally, lets drop it from the unknown
1044 1042 # remote heads and put in back in common.
1045 1043 #
1046 1044 # This is a hackish solution to catch most of "common but locally
1047 1045 # hidden situation". We do not performs discovery on unfiltered
1048 1046 # repository because it end up doing a pathological amount of round
1049 1047 # trip for w huge amount of changeset we do not care about.
1050 1048 #
1051 1049 # If a set of such "common but filtered" changeset exist on the server
1052 1050 # but are not including a remote heads, we'll not be able to detect it,
1053 1051 scommon = set(common)
1054 1052 filteredrheads = []
1055 1053 for n in rheads:
1056 1054 if n in nm:
1057 1055 if n not in scommon:
1058 1056 common.append(n)
1059 1057 else:
1060 1058 filteredrheads.append(n)
1061 1059 if not filteredrheads:
1062 1060 fetch = []
1063 1061 rheads = filteredrheads
1064 1062 pullop.common = common
1065 1063 pullop.fetch = fetch
1066 1064 pullop.rheads = rheads
1067 1065
1068 1066 def _pullbundle2(pullop):
1069 1067 """pull data using bundle2
1070 1068
1071 1069 For now, the only supported data are changegroup."""
1072 1070 remotecaps = bundle2.bundle2caps(pullop.remote)
1073 1071 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1074 1072 # pulling changegroup
1075 1073 pullop.stepsdone.add('changegroup')
1076 1074
1077 1075 kwargs['common'] = pullop.common
1078 1076 kwargs['heads'] = pullop.heads or pullop.rheads
1079 1077 kwargs['cg'] = pullop.fetch
1080 1078 if 'listkeys' in remotecaps:
1081 1079 kwargs['listkeys'] = ['phase']
1082 1080 if pullop.remotebookmarks is None:
1083 1081 # make sure to always includes bookmark data when migrating
1084 1082 # `hg incoming --bundle` to using this function.
1085 1083 kwargs['listkeys'].append('bookmarks')
1086 1084 if not pullop.fetch:
1087 1085 pullop.repo.ui.status(_("no changes found\n"))
1088 1086 pullop.cgresult = 0
1089 1087 else:
1090 1088 if pullop.heads is None and list(pullop.common) == [nullid]:
1091 1089 pullop.repo.ui.status(_("requesting all changes\n"))
1092 1090 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1093 1091 remoteversions = bundle2.obsmarkersversion(remotecaps)
1094 1092 if obsolete.commonversion(remoteversions) is not None:
1095 1093 kwargs['obsmarkers'] = True
1096 1094 pullop.stepsdone.add('obsmarkers')
1097 1095 _pullbundle2extraprepare(pullop, kwargs)
1098 1096 bundle = pullop.remote.getbundle('pull', **kwargs)
1099 1097 try:
1100 1098 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1101 1099 except error.BundleValueError as exc:
1102 1100 raise util.Abort('missing support for %s' % exc)
1103 1101
1104 1102 if pullop.fetch:
1105 1103 results = [cg['return'] for cg in op.records['changegroup']]
1106 1104 pullop.cgresult = changegroup.combineresults(results)
1107 1105
1108 1106 # processing phases change
1109 1107 for namespace, value in op.records['listkeys']:
1110 1108 if namespace == 'phases':
1111 1109 _pullapplyphases(pullop, value)
1112 1110
1113 1111 # processing bookmark update
1114 1112 for namespace, value in op.records['listkeys']:
1115 1113 if namespace == 'bookmarks':
1116 1114 pullop.remotebookmarks = value
1117 1115
1118 1116 # bookmark data were either already there or pulled in the bundle
1119 1117 if pullop.remotebookmarks is not None:
1120 1118 _pullbookmarks(pullop)
1121 1119
1122 1120 def _pullbundle2extraprepare(pullop, kwargs):
1123 1121 """hook function so that extensions can extend the getbundle call"""
1124 1122 pass
1125 1123
1126 1124 def _pullchangeset(pullop):
1127 1125 """pull changeset from unbundle into the local repo"""
1128 1126 # We delay the open of the transaction as late as possible so we
1129 1127 # don't open transaction for nothing or you break future useful
1130 1128 # rollback call
1131 1129 if 'changegroup' in pullop.stepsdone:
1132 1130 return
1133 1131 pullop.stepsdone.add('changegroup')
1134 1132 if not pullop.fetch:
1135 1133 pullop.repo.ui.status(_("no changes found\n"))
1136 1134 pullop.cgresult = 0
1137 1135 return
1138 1136 pullop.gettransaction()
1139 1137 if pullop.heads is None and list(pullop.common) == [nullid]:
1140 1138 pullop.repo.ui.status(_("requesting all changes\n"))
1141 1139 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1142 1140 # issue1320, avoid a race if remote changed after discovery
1143 1141 pullop.heads = pullop.rheads
1144 1142
1145 1143 if pullop.remote.capable('getbundle'):
1146 1144 # TODO: get bundlecaps from remote
1147 1145 cg = pullop.remote.getbundle('pull', common=pullop.common,
1148 1146 heads=pullop.heads or pullop.rheads)
1149 1147 elif pullop.heads is None:
1150 1148 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1151 1149 elif not pullop.remote.capable('changegroupsubset'):
1152 1150 raise util.Abort(_("partial pull cannot be done because "
1153 1151 "other repository doesn't support "
1154 1152 "changegroupsubset."))
1155 1153 else:
1156 1154 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1157 1155 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1158 1156 pullop.remote.url())
1159 1157
1160 1158 def _pullphase(pullop):
1161 1159 # Get remote phases data from remote
1162 1160 if 'phases' in pullop.stepsdone:
1163 1161 return
1164 1162 remotephases = pullop.remote.listkeys('phases')
1165 1163 _pullapplyphases(pullop, remotephases)
1166 1164
1167 1165 def _pullapplyphases(pullop, remotephases):
1168 1166 """apply phase movement from observed remote state"""
1169 1167 if 'phases' in pullop.stepsdone:
1170 1168 return
1171 1169 pullop.stepsdone.add('phases')
1172 1170 publishing = bool(remotephases.get('publishing', False))
1173 1171 if remotephases and not publishing:
1174 1172 # remote is new and unpublishing
1175 1173 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1176 1174 pullop.pulledsubset,
1177 1175 remotephases)
1178 1176 dheads = pullop.pulledsubset
1179 1177 else:
1180 1178 # Remote is old or publishing all common changesets
1181 1179 # should be seen as public
1182 1180 pheads = pullop.pulledsubset
1183 1181 dheads = []
1184 1182 unfi = pullop.repo.unfiltered()
1185 1183 phase = unfi._phasecache.phase
1186 1184 rev = unfi.changelog.nodemap.get
1187 1185 public = phases.public
1188 1186 draft = phases.draft
1189 1187
1190 1188 # exclude changesets already public locally and update the others
1191 1189 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1192 1190 if pheads:
1193 1191 tr = pullop.gettransaction()
1194 1192 phases.advanceboundary(pullop.repo, tr, public, pheads)
1195 1193
1196 1194 # exclude changesets already draft locally and update the others
1197 1195 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1198 1196 if dheads:
1199 1197 tr = pullop.gettransaction()
1200 1198 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1201 1199
1202 1200 def _pullbookmarks(pullop):
1203 1201 """process the remote bookmark information to update the local one"""
1204 1202 if 'bookmarks' in pullop.stepsdone:
1205 1203 return
1206 1204 pullop.stepsdone.add('bookmarks')
1207 1205 repo = pullop.repo
1208 1206 remotebookmarks = pullop.remotebookmarks
1209 1207 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1210 1208 pullop.remote.url(),
1211 1209 pullop.gettransaction,
1212 1210 explicit=pullop.explicitbookmarks)
1213 1211
1214 1212 def _pullobsolete(pullop):
1215 1213 """utility function to pull obsolete markers from a remote
1216 1214
1217 1215 The `gettransaction` is function that return the pull transaction, creating
1218 1216 one if necessary. We return the transaction to inform the calling code that
1219 1217 a new transaction have been created (when applicable).
1220 1218
1221 1219 Exists mostly to allow overriding for experimentation purpose"""
1222 1220 if 'obsmarkers' in pullop.stepsdone:
1223 1221 return
1224 1222 pullop.stepsdone.add('obsmarkers')
1225 1223 tr = None
1226 1224 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1227 1225 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1228 1226 remoteobs = pullop.remote.listkeys('obsolete')
1229 1227 if 'dump0' in remoteobs:
1230 1228 tr = pullop.gettransaction()
1231 1229 for key in sorted(remoteobs, reverse=True):
1232 1230 if key.startswith('dump'):
1233 1231 data = base85.b85decode(remoteobs[key])
1234 1232 pullop.repo.obsstore.mergemarkers(tr, data)
1235 1233 pullop.repo.invalidatevolatilesets()
1236 1234 return tr
1237 1235
1238 1236 def caps20to10(repo):
1239 1237 """return a set with appropriate options to use bundle20 during getbundle"""
1240 1238 caps = set(['HG20'])
1241 1239 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1242 1240 caps.add('bundle2=' + urllib.quote(capsblob))
1243 1241 return caps
1244 1242
1245 1243 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1246 1244 getbundle2partsorder = []
1247 1245
1248 1246 # Mapping between step name and function
1249 1247 #
1250 1248 # This exists to help extensions wrap steps if necessary
1251 1249 getbundle2partsmapping = {}
1252 1250
1253 1251 def getbundle2partsgenerator(stepname, idx=None):
1254 1252 """decorator for function generating bundle2 part for getbundle
1255 1253
1256 1254 The function is added to the step -> function mapping and appended to the
1257 1255 list of steps. Beware that decorated functions will be added in order
1258 1256 (this may matter).
1259 1257
1260 1258 You can only use this decorator for new steps, if you want to wrap a step
1261 1259 from an extension, attack the getbundle2partsmapping dictionary directly."""
1262 1260 def dec(func):
1263 1261 assert stepname not in getbundle2partsmapping
1264 1262 getbundle2partsmapping[stepname] = func
1265 1263 if idx is None:
1266 1264 getbundle2partsorder.append(stepname)
1267 1265 else:
1268 1266 getbundle2partsorder.insert(idx, stepname)
1269 1267 return func
1270 1268 return dec
1271 1269
1272 1270 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1273 1271 **kwargs):
1274 1272 """return a full bundle (with potentially multiple kind of parts)
1275 1273
1276 1274 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1277 1275 passed. For now, the bundle can contain only changegroup, but this will
1278 1276 changes when more part type will be available for bundle2.
1279 1277
1280 1278 This is different from changegroup.getchangegroup that only returns an HG10
1281 1279 changegroup bundle. They may eventually get reunited in the future when we
1282 1280 have a clearer idea of the API we what to query different data.
1283 1281
1284 1282 The implementation is at a very early stage and will get massive rework
1285 1283 when the API of bundle is refined.
1286 1284 """
1287 1285 # bundle10 case
1288 1286 usebundle2 = False
1289 1287 if bundlecaps is not None:
1290 1288 usebundle2 = any((cap.startswith('HG2') for cap in bundlecaps))
1291 1289 if not usebundle2:
1292 1290 if bundlecaps and not kwargs.get('cg', True):
1293 1291 raise ValueError(_('request for bundle10 must include changegroup'))
1294 1292
1295 1293 if kwargs:
1296 1294 raise ValueError(_('unsupported getbundle arguments: %s')
1297 1295 % ', '.join(sorted(kwargs.keys())))
1298 1296 return changegroup.getchangegroup(repo, source, heads=heads,
1299 1297 common=common, bundlecaps=bundlecaps)
1300 1298
1301 1299 # bundle20 case
1302 1300 b2caps = {}
1303 1301 for bcaps in bundlecaps:
1304 1302 if bcaps.startswith('bundle2='):
1305 1303 blob = urllib.unquote(bcaps[len('bundle2='):])
1306 1304 b2caps.update(bundle2.decodecaps(blob))
1307 1305 bundler = bundle2.bundle20(repo.ui, b2caps)
1308 1306
1309 1307 kwargs['heads'] = heads
1310 1308 kwargs['common'] = common
1311 1309
1312 1310 for name in getbundle2partsorder:
1313 1311 func = getbundle2partsmapping[name]
1314 1312 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1315 1313 **kwargs)
1316 1314
1317 1315 return util.chunkbuffer(bundler.getchunks())
1318 1316
1319 1317 @getbundle2partsgenerator('changegroup')
1320 1318 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1321 1319 b2caps=None, heads=None, common=None, **kwargs):
1322 1320 """add a changegroup part to the requested bundle"""
1323 1321 cg = None
1324 1322 if kwargs.get('cg', True):
1325 1323 # build changegroup bundle here.
1326 1324 version = None
1327 1325 cgversions = b2caps.get('changegroup')
1328 1326 getcgkwargs = {}
1329 1327 if cgversions: # 3.1 and 3.2 ship with an empty value
1330 1328 cgversions = [v for v in cgversions if v in changegroup.packermap]
1331 1329 if not cgversions:
1332 1330 raise ValueError(_('no common changegroup version'))
1333 1331 version = getcgkwargs['version'] = max(cgversions)
1334 1332 outgoing = changegroup.computeoutgoing(repo, heads, common)
1335 1333 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1336 1334 bundlecaps=bundlecaps,
1337 1335 **getcgkwargs)
1338 1336
1339 1337 if cg:
1340 1338 part = bundler.newpart('changegroup', data=cg)
1341 1339 if version is not None:
1342 1340 part.addparam('version', version)
1343 1341 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1344 1342
1345 1343 @getbundle2partsgenerator('listkeys')
1346 1344 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1347 1345 b2caps=None, **kwargs):
1348 1346 """add parts containing listkeys namespaces to the requested bundle"""
1349 1347 listkeys = kwargs.get('listkeys', ())
1350 1348 for namespace in listkeys:
1351 1349 part = bundler.newpart('listkeys')
1352 1350 part.addparam('namespace', namespace)
1353 1351 keys = repo.listkeys(namespace).items()
1354 1352 part.data = pushkey.encodekeys(keys)
1355 1353
1356 1354 @getbundle2partsgenerator('obsmarkers')
1357 1355 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1358 1356 b2caps=None, heads=None, **kwargs):
1359 1357 """add an obsolescence markers part to the requested bundle"""
1360 1358 if kwargs.get('obsmarkers', False):
1361 1359 if heads is None:
1362 1360 heads = repo.heads()
1363 1361 subset = [c.node() for c in repo.set('::%ln', heads)]
1364 1362 markers = repo.obsstore.relevantmarkers(subset)
1365 1363 markers = sorted(markers)
1366 1364 buildobsmarkerspart(bundler, markers)
1367 1365
1368 1366 @getbundle2partsgenerator('hgtagsfnodes')
1369 1367 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1370 1368 b2caps=None, heads=None, common=None,
1371 1369 **kwargs):
1372 1370 """Transfer the .hgtags filenodes mapping.
1373 1371
1374 1372 Only values for heads in this bundle will be transferred.
1375 1373
1376 1374 The part data consists of pairs of 20 byte changeset node and .hgtags
1377 1375 filenodes raw values.
1378 1376 """
1379 1377 # Don't send unless:
1380 1378 # - changeset are being exchanged,
1381 1379 # - the client supports it.
1382 1380 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1383 1381 return
1384 1382
1385 1383 outgoing = changegroup.computeoutgoing(repo, heads, common)
1386 1384
1387 1385 if not outgoing.missingheads:
1388 1386 return
1389 1387
1390 1388 cache = tags.hgtagsfnodescache(repo.unfiltered())
1391 1389 chunks = []
1392 1390
1393 1391 # .hgtags fnodes are only relevant for head changesets. While we could
1394 1392 # transfer values for all known nodes, there will likely be little to
1395 1393 # no benefit.
1396 1394 #
1397 1395 # We don't bother using a generator to produce output data because
1398 1396 # a) we only have 40 bytes per head and even esoteric numbers of heads
1399 1397 # consume little memory (1M heads is 40MB) b) we don't want to send the
1400 1398 # part if we don't have entries and knowing if we have entries requires
1401 1399 # cache lookups.
1402 1400 for node in outgoing.missingheads:
1403 1401 # Don't compute missing, as this may slow down serving.
1404 1402 fnode = cache.getfnode(node, computemissing=False)
1405 1403 if fnode is not None:
1406 1404 chunks.extend([node, fnode])
1407 1405
1408 1406 if chunks:
1409 1407 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1410 1408
1411 1409 def check_heads(repo, their_heads, context):
1412 1410 """check if the heads of a repo have been modified
1413 1411
1414 1412 Used by peer for unbundling.
1415 1413 """
1416 1414 heads = repo.heads()
1417 1415 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1418 1416 if not (their_heads == ['force'] or their_heads == heads or
1419 1417 their_heads == ['hashed', heads_hash]):
1420 1418 # someone else committed/pushed/unbundled while we
1421 1419 # were transferring data
1422 1420 raise error.PushRaced('repository changed while %s - '
1423 1421 'please try again' % context)
1424 1422
1425 1423 def unbundle(repo, cg, heads, source, url):
1426 1424 """Apply a bundle to a repo.
1427 1425
1428 1426 this function makes sure the repo is locked during the application and have
1429 1427 mechanism to check that no push race occurred between the creation of the
1430 1428 bundle and its application.
1431 1429
1432 1430 If the push was raced as PushRaced exception is raised."""
1433 1431 r = 0
1434 1432 # need a transaction when processing a bundle2 stream
1435 1433 wlock = lock = tr = None
1436 1434 recordout = None
1437 1435 # quick fix for output mismatch with bundle2 in 3.4
1438 1436 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1439 1437 False)
1440 1438 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1441 1439 captureoutput = True
1442 1440 try:
1443 1441 check_heads(repo, heads, 'uploading changes')
1444 1442 # push can proceed
1445 1443 if util.safehasattr(cg, 'params'):
1446 1444 r = None
1447 1445 try:
1448 1446 wlock = repo.wlock()
1449 1447 lock = repo.lock()
1450 1448 tr = repo.transaction(source)
1451 1449 tr.hookargs['source'] = source
1452 1450 tr.hookargs['url'] = url
1453 1451 tr.hookargs['bundle2'] = '1'
1454 1452 op = bundle2.bundleoperation(repo, lambda: tr,
1455 1453 captureoutput=captureoutput)
1456 1454 try:
1457 1455 op = bundle2.processbundle(repo, cg, op=op)
1458 1456 finally:
1459 1457 r = op.reply
1460 1458 if captureoutput and r is not None:
1461 1459 repo.ui.pushbuffer(error=True, subproc=True)
1462 1460 def recordout(output):
1463 1461 r.newpart('output', data=output, mandatory=False)
1464 1462 tr.close()
1465 1463 except BaseException as exc:
1466 1464 exc.duringunbundle2 = True
1467 1465 if captureoutput and r is not None:
1468 1466 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1469 1467 def recordout(output):
1470 1468 part = bundle2.bundlepart('output', data=output,
1471 1469 mandatory=False)
1472 1470 parts.append(part)
1473 1471 raise
1474 1472 else:
1475 1473 lock = repo.lock()
1476 1474 r = changegroup.addchangegroup(repo, cg, source, url)
1477 1475 finally:
1478 1476 lockmod.release(tr, lock, wlock)
1479 1477 if recordout is not None:
1480 1478 recordout(repo.ui.popbuffer())
1481 1479 return r
@@ -1,282 +1,287 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import time
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 branchmap,
15 15 error,
16 16 store,
17 17 util,
18 18 )
19 19
20 20 def canperformstreamclone(repo, remote, heads, streamrequested=None):
21 21 """Whether it is possible to perform a streaming clone as part of pull.
22 22
23 23 Returns a tuple of (supported, requirements). ``supported`` is True if
24 24 streaming clone is supported and False otherwise. ``requirements`` is
25 25 a set of repo requirements from the remote, or ``None`` if stream clone
26 26 isn't supported.
27 27 """
28 28 # Streaming clone only works on empty repositories.
29 29 if len(repo):
30 30 return False, None
31 31
32 32 # Streaming clone only works if all data is being requested.
33 33 if heads:
34 34 return False, None
35 35
36 36 # If we don't have a preference, let the server decide for us. This
37 37 # likely only comes into play in LANs.
38 38 if streamrequested is None:
39 39 # The server can advertise whether to prefer streaming clone.
40 40 streamrequested = remote.capable('stream-preferred')
41 41
42 42 if not streamrequested:
43 43 return False, None
44 44
45 45 # In order for stream clone to work, the client has to support all the
46 46 # requirements advertised by the server.
47 47 #
48 48 # The server advertises its requirements via the "stream" and "streamreqs"
49 49 # capability. "stream" (a value-less capability) is advertised if and only
50 50 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
51 51 # is advertised and contains a comma-delimited list of requirements.
52 52 requirements = set()
53 53 if remote.capable('stream'):
54 54 requirements.add('revlogv1')
55 55 else:
56 56 streamreqs = remote.capable('streamreqs')
57 57 # This is weird and shouldn't happen with modern servers.
58 58 if not streamreqs:
59 59 return False, None
60 60
61 61 streamreqs = set(streamreqs.split(','))
62 62 # Server requires something we don't support. Bail.
63 63 if streamreqs - repo.supportedformats:
64 64 return False, None
65 65 requirements = streamreqs
66 66
67 67 return True, requirements
68 68
69 def maybeperformstreamclone(repo, remote, heads, stream):
70 supported, requirements = canperformstreamclone(repo, remote, heads,
71 streamrequested=stream)
69 def maybeperformstreamclone(pullop):
70 repo = pullop.repo
71 remote = pullop.remote
72
73 r = canperformstreamclone(repo, remote, pullop.heads,
74 streamrequested=pullop.streamclonerequested)
75 supported, requirements = r
76
72 77 if not supported:
73 78 return
74 79
75 80 streamin(repo, remote, requirements)
76 81
77 82 def allowservergeneration(ui):
78 83 """Whether streaming clones are allowed from the server."""
79 84 return ui.configbool('server', 'uncompressed', True, untrusted=True)
80 85
81 86 # This is it's own function so extensions can override it.
82 87 def _walkstreamfiles(repo):
83 88 return repo.store.walk()
84 89
85 90 def generatev1(repo):
86 91 """Emit content for version 1 of a streaming clone.
87 92
88 93 This is a generator of raw chunks that constitute a streaming clone.
89 94
90 95 The stream begins with a line of 2 space-delimited integers containing the
91 96 number of entries and total bytes size.
92 97
93 98 Next, are N entries for each file being transferred. Each file entry starts
94 99 as a line with the file name and integer size delimited by a null byte.
95 100 The raw file data follows. Following the raw file data is the next file
96 101 entry, or EOF.
97 102
98 103 When used on the wire protocol, an additional line indicating protocol
99 104 success will be prepended to the stream. This function is not responsible
100 105 for adding it.
101 106
102 107 This function will obtain a repository lock to ensure a consistent view of
103 108 the store is captured. It therefore may raise LockError.
104 109 """
105 110 entries = []
106 111 total_bytes = 0
107 112 # Get consistent snapshot of repo, lock during scan.
108 113 lock = repo.lock()
109 114 try:
110 115 repo.ui.debug('scanning\n')
111 116 for name, ename, size in _walkstreamfiles(repo):
112 117 if size:
113 118 entries.append((name, size))
114 119 total_bytes += size
115 120 finally:
116 121 lock.release()
117 122
118 123 repo.ui.debug('%d files, %d bytes to transfer\n' %
119 124 (len(entries), total_bytes))
120 125 yield '%d %d\n' % (len(entries), total_bytes)
121 126
122 127 svfs = repo.svfs
123 128 oldaudit = svfs.mustaudit
124 129 debugflag = repo.ui.debugflag
125 130 svfs.mustaudit = False
126 131
127 132 try:
128 133 for name, size in entries:
129 134 if debugflag:
130 135 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
131 136 # partially encode name over the wire for backwards compat
132 137 yield '%s\0%d\n' % (store.encodedir(name), size)
133 138 if size <= 65536:
134 139 fp = svfs(name)
135 140 try:
136 141 data = fp.read(size)
137 142 finally:
138 143 fp.close()
139 144 yield data
140 145 else:
141 146 for chunk in util.filechunkiter(svfs(name), limit=size):
142 147 yield chunk
143 148 finally:
144 149 svfs.mustaudit = oldaudit
145 150
146 151 def consumev1(repo, fp):
147 152 """Apply the contents from version 1 of a streaming clone file handle.
148 153
149 154 This takes the output from "streamout" and applies it to the specified
150 155 repository.
151 156
152 157 Like "streamout," the status line added by the wire protocol is not handled
153 158 by this function.
154 159 """
155 160 lock = repo.lock()
156 161 try:
157 162 repo.ui.status(_('streaming all changes\n'))
158 163 l = fp.readline()
159 164 try:
160 165 total_files, total_bytes = map(int, l.split(' ', 1))
161 166 except (ValueError, TypeError):
162 167 raise error.ResponseError(
163 168 _('unexpected response from remote server:'), l)
164 169 repo.ui.status(_('%d files to transfer, %s of data\n') %
165 170 (total_files, util.bytecount(total_bytes)))
166 171 handled_bytes = 0
167 172 repo.ui.progress(_('clone'), 0, total=total_bytes)
168 173 start = time.time()
169 174
170 175 tr = repo.transaction(_('clone'))
171 176 try:
172 177 for i in xrange(total_files):
173 178 # XXX doesn't support '\n' or '\r' in filenames
174 179 l = fp.readline()
175 180 try:
176 181 name, size = l.split('\0', 1)
177 182 size = int(size)
178 183 except (ValueError, TypeError):
179 184 raise error.ResponseError(
180 185 _('unexpected response from remote server:'), l)
181 186 if repo.ui.debugflag:
182 187 repo.ui.debug('adding %s (%s)\n' %
183 188 (name, util.bytecount(size)))
184 189 # for backwards compat, name was partially encoded
185 190 ofp = repo.svfs(store.decodedir(name), 'w')
186 191 for chunk in util.filechunkiter(fp, limit=size):
187 192 handled_bytes += len(chunk)
188 193 repo.ui.progress(_('clone'), handled_bytes,
189 194 total=total_bytes)
190 195 ofp.write(chunk)
191 196 ofp.close()
192 197 tr.close()
193 198 finally:
194 199 tr.release()
195 200
196 201 # Writing straight to files circumvented the inmemory caches
197 202 repo.invalidate()
198 203
199 204 elapsed = time.time() - start
200 205 if elapsed <= 0:
201 206 elapsed = 0.001
202 207 repo.ui.progress(_('clone'), None)
203 208 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
204 209 (util.bytecount(total_bytes), elapsed,
205 210 util.bytecount(total_bytes / elapsed)))
206 211 finally:
207 212 lock.release()
208 213
209 214 def streamin(repo, remote, remotereqs):
210 215 # Save remote branchmap. We will use it later
211 216 # to speed up branchcache creation
212 217 rbranchmap = None
213 218 if remote.capable("branchmap"):
214 219 rbranchmap = remote.branchmap()
215 220
216 221 fp = remote.stream_out()
217 222 l = fp.readline()
218 223 try:
219 224 resp = int(l)
220 225 except ValueError:
221 226 raise error.ResponseError(
222 227 _('unexpected response from remote server:'), l)
223 228 if resp == 1:
224 229 raise util.Abort(_('operation forbidden by server'))
225 230 elif resp == 2:
226 231 raise util.Abort(_('locking the remote repository failed'))
227 232 elif resp != 0:
228 233 raise util.Abort(_('the server sent an unknown error code'))
229 234
230 235 applyremotedata(repo, remotereqs, rbranchmap, fp)
231 236 return len(repo.heads()) + 1
232 237
233 238 def applyremotedata(repo, remotereqs, remotebranchmap, fp):
234 239 """Apply stream clone data to a repository.
235 240
236 241 "remotereqs" is a set of requirements to handle the incoming data.
237 242 "remotebranchmap" is the result of a branchmap lookup on the remote. It
238 243 can be None.
239 244 "fp" is a file object containing the raw stream data, suitable for
240 245 feeding into consumev1().
241 246 """
242 247 lock = repo.lock()
243 248 try:
244 249 consumev1(repo, fp)
245 250
246 251 # new requirements = old non-format requirements +
247 252 # new format-related remote requirements
248 253 # requirements from the streamed-in repository
249 254 repo.requirements = remotereqs | (
250 255 repo.requirements - repo.supportedformats)
251 256 repo._applyopenerreqs()
252 257 repo._writerequirements()
253 258
254 259 if remotebranchmap:
255 260 rbheads = []
256 261 closed = []
257 262 for bheads in remotebranchmap.itervalues():
258 263 rbheads.extend(bheads)
259 264 for h in bheads:
260 265 r = repo.changelog.rev(h)
261 266 b, c = repo.changelog.branchinfo(r)
262 267 if c:
263 268 closed.append(h)
264 269
265 270 if rbheads:
266 271 rtiprev = max((int(repo.changelog.rev(node))
267 272 for node in rbheads))
268 273 cache = branchmap.branchcache(remotebranchmap,
269 274 repo[rtiprev].node(),
270 275 rtiprev,
271 276 closednodes=closed)
272 277 # Try to stick it as low as possible
273 278 # filter above served are unlikely to be fetch from a clone
274 279 for candidate in ('base', 'immutable', 'served'):
275 280 rview = repo.filtered(candidate)
276 281 if cache.validfor(rview):
277 282 repo._branchcaches[candidate] = cache
278 283 cache.write(rview)
279 284 break
280 285 repo.invalidate()
281 286 finally:
282 287 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now