##// END OF EJS Templates
exchange: fix docs for pulloperation...
Siddharth Agarwal -
r20596:004a1744 default
parent child Browse files
Show More
@@ -1,530 +1,530 b''
1 1 # exchange.py - utily to exchange data between repo.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno
11 11 import util, scmutil, changegroup, base85
12 12 import discovery, phases, obsolete, bookmarks
13 13
14 14
15 15 class pushoperation(object):
16 16 """A object that represent a single push operation
17 17
18 18 It purpose is to carry push related state and very common operation.
19 19
20 20 A new should be created at the begining of each push and discarded
21 21 afterward.
22 22 """
23 23
24 24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 25 # repo we push from
26 26 self.repo = repo
27 27 self.ui = repo.ui
28 28 # repo we push to
29 29 self.remote = remote
30 30 # force option provided
31 31 self.force = force
32 32 # revs to be pushed (None is "all")
33 33 self.revs = revs
34 34 # allow push of new branch
35 35 self.newbranch = newbranch
36 36 # did a local lock get acquired?
37 37 self.locallocked = None
38 38 # Integer version of the push result
39 39 # - None means nothing to push
40 40 # - 0 means HTTP error
41 41 # - 1 means we pushed and remote head count is unchanged *or*
42 42 # we have outgoing changesets but refused to push
43 43 # - other values as described by addchangegroup()
44 44 self.ret = None
45 45 # discover.outgoing object (contains common and outgoin data)
46 46 self.outgoing = None
47 47 # all remote heads before the push
48 48 self.remoteheads = None
49 49 # testable as a boolean indicating if any nodes are missing locally.
50 50 self.incoming = None
51 51 # set of all heads common after changeset bundle push
52 52 self.commonheads = None
53 53
54 54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 55 '''Push outgoing changesets (limited by revs) from a local
56 56 repository to remote. Return an integer:
57 57 - None means nothing to push
58 58 - 0 means HTTP error
59 59 - 1 means we pushed and remote head count is unchanged *or*
60 60 we have outgoing changesets but refused to push
61 61 - other values as described by addchangegroup()
62 62 '''
63 63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 64 if pushop.remote.local():
65 65 missing = (set(pushop.repo.requirements)
66 66 - pushop.remote.local().supported)
67 67 if missing:
68 68 msg = _("required features are not"
69 69 " supported in the destination:"
70 70 " %s") % (', '.join(sorted(missing)))
71 71 raise util.Abort(msg)
72 72
73 73 # there are two ways to push to remote repo:
74 74 #
75 75 # addchangegroup assumes local user can lock remote
76 76 # repo (local filesystem, old ssh servers).
77 77 #
78 78 # unbundle assumes local user cannot lock remote repo (new ssh
79 79 # servers, http servers).
80 80
81 81 if not pushop.remote.canpush():
82 82 raise util.Abort(_("destination does not support push"))
83 83 # get local lock as we might write phase data
84 84 locallock = None
85 85 try:
86 86 locallock = pushop.repo.lock()
87 87 pushop.locallocked = True
88 88 except IOError, err:
89 89 pushop.locallocked = False
90 90 if err.errno != errno.EACCES:
91 91 raise
92 92 # source repo cannot be locked.
93 93 # We do not abort the push, but just disable the local phase
94 94 # synchronisation.
95 95 msg = 'cannot lock source repository: %s\n' % err
96 96 pushop.ui.debug(msg)
97 97 try:
98 98 pushop.repo.checkpush(pushop.force, pushop.revs)
99 99 lock = None
100 100 unbundle = pushop.remote.capable('unbundle')
101 101 if not unbundle:
102 102 lock = pushop.remote.lock()
103 103 try:
104 104 _pushdiscovery(pushop)
105 105 if _pushcheckoutgoing(pushop):
106 106 _pushchangeset(pushop)
107 107 _pushcomputecommonheads(pushop)
108 108 _pushsyncphase(pushop)
109 109 _pushobsolete(pushop)
110 110 finally:
111 111 if lock is not None:
112 112 lock.release()
113 113 finally:
114 114 if locallock is not None:
115 115 locallock.release()
116 116
117 117 _pushbookmark(pushop)
118 118 return pushop.ret
119 119
120 120 def _pushdiscovery(pushop):
121 121 # discovery
122 122 unfi = pushop.repo.unfiltered()
123 123 fci = discovery.findcommonincoming
124 124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 125 common, inc, remoteheads = commoninc
126 126 fco = discovery.findcommonoutgoing
127 127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 128 commoninc=commoninc, force=pushop.force)
129 129 pushop.outgoing = outgoing
130 130 pushop.remoteheads = remoteheads
131 131 pushop.incoming = inc
132 132
133 133 def _pushcheckoutgoing(pushop):
134 134 outgoing = pushop.outgoing
135 135 unfi = pushop.repo.unfiltered()
136 136 if not outgoing.missing:
137 137 # nothing to push
138 138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 139 return False
140 140 # something to push
141 141 if not pushop.force:
142 142 # if repo.obsstore == False --> no obsolete
143 143 # then, save the iteration
144 144 if unfi.obsstore:
145 145 # this message are here for 80 char limit reason
146 146 mso = _("push includes obsolete changeset: %s!")
147 147 mst = "push includes %s changeset: %s!"
148 148 # plain versions for i18n tool to detect them
149 149 _("push includes unstable changeset: %s!")
150 150 _("push includes bumped changeset: %s!")
151 151 _("push includes divergent changeset: %s!")
152 152 # If we are to push if there is at least one
153 153 # obsolete or unstable changeset in missing, at
154 154 # least one of the missinghead will be obsolete or
155 155 # unstable. So checking heads only is ok
156 156 for node in outgoing.missingheads:
157 157 ctx = unfi[node]
158 158 if ctx.obsolete():
159 159 raise util.Abort(mso % ctx)
160 160 elif ctx.troubled():
161 161 raise util.Abort(_(mst)
162 162 % (ctx.troubles()[0],
163 163 ctx))
164 164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 165 discovery.checkheads(unfi, pushop.remote, outgoing,
166 166 pushop.remoteheads,
167 167 pushop.newbranch,
168 168 bool(pushop.incoming),
169 169 newbm)
170 170 return True
171 171
172 172 def _pushchangeset(pushop):
173 173 """Make the actual push of changeset bundle to remote repo"""
174 174 outgoing = pushop.outgoing
175 175 unbundle = pushop.remote.capable('unbundle')
176 176 # TODO: get bundlecaps from remote
177 177 bundlecaps = None
178 178 # create a changegroup from local
179 179 if pushop.revs is None and not (outgoing.excluded
180 180 or pushop.repo.changelog.filteredrevs):
181 181 # push everything,
182 182 # use the fast path, no race possible on push
183 183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 184 cg = pushop.repo._changegroupsubset(outgoing,
185 185 bundler,
186 186 'push',
187 187 fastpath=True)
188 188 else:
189 189 cg = pushop.repo.getlocalbundle('push', outgoing, bundlecaps)
190 190
191 191 # apply changegroup to remote
192 192 if unbundle:
193 193 # local repo finds heads on server, finds out what
194 194 # revs it must push. once revs transferred, if server
195 195 # finds it has different heads (someone else won
196 196 # commit/push race), server aborts.
197 197 if pushop.force:
198 198 remoteheads = ['force']
199 199 else:
200 200 remoteheads = pushop.remoteheads
201 201 # ssh: return remote's addchangegroup()
202 202 # http: return remote's addchangegroup() or 0 for error
203 203 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
204 204 'push')
205 205 else:
206 206 # we return an integer indicating remote head count
207 207 # change
208 208 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
209 209 pushop.repo.url())
210 210
211 211 def _pushcomputecommonheads(pushop):
212 212 unfi = pushop.repo.unfiltered()
213 213 if pushop.ret:
214 214 # push succeed, synchronize target of the push
215 215 cheads = pushop.outgoing.missingheads
216 216 elif pushop.revs is None:
217 217 # All out push fails. synchronize all common
218 218 cheads = pushop.outgoing.commonheads
219 219 else:
220 220 # I want cheads = heads(::missingheads and ::commonheads)
221 221 # (missingheads is revs with secret changeset filtered out)
222 222 #
223 223 # This can be expressed as:
224 224 # cheads = ( (missingheads and ::commonheads)
225 225 # + (commonheads and ::missingheads))"
226 226 # )
227 227 #
228 228 # while trying to push we already computed the following:
229 229 # common = (::commonheads)
230 230 # missing = ((commonheads::missingheads) - commonheads)
231 231 #
232 232 # We can pick:
233 233 # * missingheads part of common (::commonheads)
234 234 common = set(pushop.outgoing.common)
235 235 nm = pushop.repo.changelog.nodemap
236 236 cheads = [node for node in pushop.revs if nm[node] in common]
237 237 # and
238 238 # * commonheads parents on missing
239 239 revset = unfi.set('%ln and parents(roots(%ln))',
240 240 pushop.outgoing.commonheads,
241 241 pushop.outgoing.missing)
242 242 cheads.extend(c.node() for c in revset)
243 243 pushop.commonheads = cheads
244 244
245 245 def _pushsyncphase(pushop):
246 246 """synchronise phase information locally and remotly"""
247 247 unfi = pushop.repo.unfiltered()
248 248 cheads = pushop.commonheads
249 249 if pushop.ret:
250 250 # push succeed, synchronize target of the push
251 251 cheads = pushop.outgoing.missingheads
252 252 elif pushop.revs is None:
253 253 # All out push fails. synchronize all common
254 254 cheads = pushop.outgoing.commonheads
255 255 else:
256 256 # I want cheads = heads(::missingheads and ::commonheads)
257 257 # (missingheads is revs with secret changeset filtered out)
258 258 #
259 259 # This can be expressed as:
260 260 # cheads = ( (missingheads and ::commonheads)
261 261 # + (commonheads and ::missingheads))"
262 262 # )
263 263 #
264 264 # while trying to push we already computed the following:
265 265 # common = (::commonheads)
266 266 # missing = ((commonheads::missingheads) - commonheads)
267 267 #
268 268 # We can pick:
269 269 # * missingheads part of common (::commonheads)
270 270 common = set(pushop.outgoing.common)
271 271 nm = pushop.repo.changelog.nodemap
272 272 cheads = [node for node in pushop.revs if nm[node] in common]
273 273 # and
274 274 # * commonheads parents on missing
275 275 revset = unfi.set('%ln and parents(roots(%ln))',
276 276 pushop.outgoing.commonheads,
277 277 pushop.outgoing.missing)
278 278 cheads.extend(c.node() for c in revset)
279 279 pushop.commonheads = cheads
280 280 # even when we don't push, exchanging phase data is useful
281 281 remotephases = pushop.remote.listkeys('phases')
282 282 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
283 283 and remotephases # server supports phases
284 284 and pushop.ret is None # nothing was pushed
285 285 and remotephases.get('publishing', False)):
286 286 # When:
287 287 # - this is a subrepo push
288 288 # - and remote support phase
289 289 # - and no changeset was pushed
290 290 # - and remote is publishing
291 291 # We may be in issue 3871 case!
292 292 # We drop the possible phase synchronisation done by
293 293 # courtesy to publish changesets possibly locally draft
294 294 # on the remote.
295 295 remotephases = {'publishing': 'True'}
296 296 if not remotephases: # old server or public only rer
297 297 _localphasemove(pushop, cheads)
298 298 # don't push any phase data as there is nothing to push
299 299 else:
300 300 ana = phases.analyzeremotephases(pushop.repo, cheads,
301 301 remotephases)
302 302 pheads, droots = ana
303 303 ### Apply remote phase on local
304 304 if remotephases.get('publishing', False):
305 305 _localphasemove(pushop, cheads)
306 306 else: # publish = False
307 307 _localphasemove(pushop, pheads)
308 308 _localphasemove(pushop, cheads, phases.draft)
309 309 ### Apply local phase on remote
310 310
311 311 # Get the list of all revs draft on remote by public here.
312 312 # XXX Beware that revset break if droots is not strictly
313 313 # XXX root we may want to ensure it is but it is costly
314 314 outdated = unfi.set('heads((%ln::%ln) and public())',
315 315 droots, cheads)
316 316 for newremotehead in outdated:
317 317 r = pushop.remote.pushkey('phases',
318 318 newremotehead.hex(),
319 319 str(phases.draft),
320 320 str(phases.public))
321 321 if not r:
322 322 pushop.ui.warn(_('updating %s to public failed!\n')
323 323 % newremotehead)
324 324
325 325 def _localphasemove(pushop, nodes, phase=phases.public):
326 326 """move <nodes> to <phase> in the local source repo"""
327 327 if pushop.locallocked:
328 328 phases.advanceboundary(pushop.repo, phase, nodes)
329 329 else:
330 330 # repo is not locked, do not change any phases!
331 331 # Informs the user that phases should have been moved when
332 332 # applicable.
333 333 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
334 334 phasestr = phases.phasenames[phase]
335 335 if actualmoves:
336 336 pushop.ui.status(_('cannot lock source repo, skipping '
337 337 'local %s phase update\n') % phasestr)
338 338
339 339 def _pushobsolete(pushop):
340 340 """utility function to push obsolete markers to a remote"""
341 341 pushop.ui.debug('try to push obsolete markers to remote\n')
342 342 repo = pushop.repo
343 343 remote = pushop.remote
344 344 if (obsolete._enabled and repo.obsstore and
345 345 'obsolete' in remote.listkeys('namespaces')):
346 346 rslts = []
347 347 remotedata = repo.listkeys('obsolete')
348 348 for key in sorted(remotedata, reverse=True):
349 349 # reverse sort to ensure we end with dump0
350 350 data = remotedata[key]
351 351 rslts.append(remote.pushkey('obsolete', key, '', data))
352 352 if [r for r in rslts if not r]:
353 353 msg = _('failed to push some obsolete markers!\n')
354 354 repo.ui.warn(msg)
355 355
356 356 def _pushbookmark(pushop):
357 357 """Update bookmark position on remote"""
358 358 ui = pushop.ui
359 359 repo = pushop.repo.unfiltered()
360 360 remote = pushop.remote
361 361 ui.debug("checking for updated bookmarks\n")
362 362 revnums = map(repo.changelog.rev, pushop.revs or [])
363 363 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
364 364 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
365 365 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
366 366 srchex=hex)
367 367
368 368 for b, scid, dcid in advsrc:
369 369 if ancestors and repo[scid].rev() not in ancestors:
370 370 continue
371 371 if remote.pushkey('bookmarks', b, dcid, scid):
372 372 ui.status(_("updating bookmark %s\n") % b)
373 373 else:
374 374 ui.warn(_('updating bookmark %s failed!\n') % b)
375 375
376 376 class pulloperation(object):
377 377 """A object that represent a single pull operation
378 378
379 379 It purpose is to carry push related state and very common operation.
380 380
381 A new should be created at the begining of each push and discarded
381 A new should be created at the begining of each pull and discarded
382 382 afterward.
383 383 """
384 384
385 385 def __init__(self, repo, remote, heads=None, force=False):
386 # repo we pull from
386 # repo we pull into
387 387 self.repo = repo
388 # repo we pull to
388 # repo we pull from
389 389 self.remote = remote
390 390 # revision we try to pull (None is "all")
391 391 self.heads = heads
392 392 # do we force pull?
393 393 self.force = force
394 394 # the name the pull transaction
395 395 self._trname = 'pull\n' + util.hidepassword(remote.url())
396 396 # hold the transaction once created
397 397 self._tr = None
398 398 # set of common changeset between local and remote before pull
399 399 self.common = None
400 400 # set of pulled head
401 401 self.rheads = None
402 402 # list of missing changeset to fetch remotly
403 403 self.fetch = None
404 404
405 405 @util.propertycache
406 406 def pulledsubset(self):
407 407 """heads of the set of changeset target by the pull"""
408 408 # compute target subset
409 409 if self.heads is None:
410 410 # We pulled every thing possible
411 411 # sync on everything common
412 412 return self.common + self.rheads
413 413 else:
414 414 # We pulled a specific subset
415 415 # sync on this subset
416 416 return self.heads
417 417
418 418 def gettransaction(self):
419 419 """get appropriate pull transaction, creating it if needed"""
420 420 if self._tr is None:
421 421 self._tr = self.repo.transaction(self._trname)
422 422 return self._tr
423 423
424 424 def closetransaction(self):
425 425 """close transaction if created"""
426 426 if self._tr is not None:
427 427 self._tr.close()
428 428
429 429 def releasetransaction(self):
430 430 """release transaction if created"""
431 431 if self._tr is not None:
432 432 self._tr.release()
433 433
434 434 def pull(repo, remote, heads=None, force=False):
435 435 pullop = pulloperation(repo, remote, heads, force)
436 436 if pullop.remote.local():
437 437 missing = set(pullop.remote.requirements) - pullop.repo.supported
438 438 if missing:
439 439 msg = _("required features are not"
440 440 " supported in the destination:"
441 441 " %s") % (', '.join(sorted(missing)))
442 442 raise util.Abort(msg)
443 443
444 444 lock = pullop.repo.lock()
445 445 try:
446 446 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
447 447 pullop.remote,
448 448 heads=pullop.heads,
449 449 force=force)
450 450 pullop.common, pullop.fetch, pullop.rheads = tmp
451 451 if not pullop.fetch:
452 452 pullop.repo.ui.status(_("no changes found\n"))
453 453 result = 0
454 454 else:
455 455 result = _pullchangeset(pullop)
456 456
457 457 _pullphase(pullop)
458 458 _pullobsolete(pullop)
459 459 pullop.closetransaction()
460 460 finally:
461 461 pullop.releasetransaction()
462 462 lock.release()
463 463
464 464 return result
465 465
466 466 def _pullchangeset(pullop):
467 467 """pull changeset from unbundle into the local repo"""
468 468 # We delay the open of the transaction as late as possible so we
469 469 # don't open transaction for nothing or you break future useful
470 470 # rollback call
471 471 pullop.gettransaction()
472 472 if pullop.heads is None and list(pullop.common) == [nullid]:
473 473 pullop.repo.ui.status(_("requesting all changes\n"))
474 474 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
475 475 # issue1320, avoid a race if remote changed after discovery
476 476 pullop.heads = pullop.rheads
477 477
478 478 if pullop.remote.capable('getbundle'):
479 479 # TODO: get bundlecaps from remote
480 480 cg = pullop.remote.getbundle('pull', common=pullop.common,
481 481 heads=pullop.heads or pullop.rheads)
482 482 elif pullop.heads is None:
483 483 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
484 484 elif not pullop.remote.capable('changegroupsubset'):
485 485 raise util.Abort(_("partial pull cannot be done because "
486 486 "other repository doesn't support "
487 487 "changegroupsubset."))
488 488 else:
489 489 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
490 490 return pullop.repo.addchangegroup(cg, 'pull', pullop.remote.url())
491 491
492 492 def _pullphase(pullop):
493 493 # Get remote phases data from remote
494 494 remotephases = pullop.remote.listkeys('phases')
495 495 publishing = bool(remotephases.get('publishing', False))
496 496 if remotephases and not publishing:
497 497 # remote is new and unpublishing
498 498 pheads, _dr = phases.analyzeremotephases(pullop.repo,
499 499 pullop.pulledsubset,
500 500 remotephases)
501 501 phases.advanceboundary(pullop.repo, phases.public, pheads)
502 502 phases.advanceboundary(pullop.repo, phases.draft,
503 503 pullop.pulledsubset)
504 504 else:
505 505 # Remote is old or publishing all common changesets
506 506 # should be seen as public
507 507 phases.advanceboundary(pullop.repo, phases.public,
508 508 pullop.pulledsubset)
509 509
510 510 def _pullobsolete(pullop):
511 511 """utility function to pull obsolete markers from a remote
512 512
513 513 The `gettransaction` is function that return the pull transaction, creating
514 514 one if necessary. We return the transaction to inform the calling code that
515 515 a new transaction have been created (when applicable).
516 516
517 517 Exists mostly to allow overriding for experimentation purpose"""
518 518 tr = None
519 519 if obsolete._enabled:
520 520 pullop.repo.ui.debug('fetching remote obsolete markers\n')
521 521 remoteobs = pullop.remote.listkeys('obsolete')
522 522 if 'dump0' in remoteobs:
523 523 tr = pullop.gettransaction()
524 524 for key in sorted(remoteobs, reverse=True):
525 525 if key.startswith('dump'):
526 526 data = base85.b85decode(remoteobs[key])
527 527 pullop.repo.obsstore.mergemarkers(tr, data)
528 528 pullop.repo.invalidatevolatilesets()
529 529 return tr
530 530
General Comments 0
You need to be logged in to leave comments. Login now