##// END OF EJS Templates
pull: move return code in the pull operation object...
Pierre-Yves David -
r20898:f295b2ac default
parent child Browse files
Show More
@@ -1,535 +1,537 b''
1 1 # exchange.py - utily to exchange data between repo.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno
11 11 import util, scmutil, changegroup, base85
12 12 import discovery, phases, obsolete, bookmarks
13 13
14 14
15 15 class pushoperation(object):
16 16 """A object that represent a single push operation
17 17
18 18 It purpose is to carry push related state and very common operation.
19 19
20 20 A new should be created at the begining of each push and discarded
21 21 afterward.
22 22 """
23 23
24 24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 25 # repo we push from
26 26 self.repo = repo
27 27 self.ui = repo.ui
28 28 # repo we push to
29 29 self.remote = remote
30 30 # force option provided
31 31 self.force = force
32 32 # revs to be pushed (None is "all")
33 33 self.revs = revs
34 34 # allow push of new branch
35 35 self.newbranch = newbranch
36 36 # did a local lock get acquired?
37 37 self.locallocked = None
38 38 # Integer version of the push result
39 39 # - None means nothing to push
40 40 # - 0 means HTTP error
41 41 # - 1 means we pushed and remote head count is unchanged *or*
42 42 # we have outgoing changesets but refused to push
43 43 # - other values as described by addchangegroup()
44 44 self.ret = None
45 45 # discover.outgoing object (contains common and outgoin data)
46 46 self.outgoing = None
47 47 # all remote heads before the push
48 48 self.remoteheads = None
49 49 # testable as a boolean indicating if any nodes are missing locally.
50 50 self.incoming = None
51 51 # set of all heads common after changeset bundle push
52 52 self.commonheads = None
53 53
54 54 def push(repo, remote, force=False, revs=None, newbranch=False):
55 55 '''Push outgoing changesets (limited by revs) from a local
56 56 repository to remote. Return an integer:
57 57 - None means nothing to push
58 58 - 0 means HTTP error
59 59 - 1 means we pushed and remote head count is unchanged *or*
60 60 we have outgoing changesets but refused to push
61 61 - other values as described by addchangegroup()
62 62 '''
63 63 pushop = pushoperation(repo, remote, force, revs, newbranch)
64 64 if pushop.remote.local():
65 65 missing = (set(pushop.repo.requirements)
66 66 - pushop.remote.local().supported)
67 67 if missing:
68 68 msg = _("required features are not"
69 69 " supported in the destination:"
70 70 " %s") % (', '.join(sorted(missing)))
71 71 raise util.Abort(msg)
72 72
73 73 # there are two ways to push to remote repo:
74 74 #
75 75 # addchangegroup assumes local user can lock remote
76 76 # repo (local filesystem, old ssh servers).
77 77 #
78 78 # unbundle assumes local user cannot lock remote repo (new ssh
79 79 # servers, http servers).
80 80
81 81 if not pushop.remote.canpush():
82 82 raise util.Abort(_("destination does not support push"))
83 83 # get local lock as we might write phase data
84 84 locallock = None
85 85 try:
86 86 locallock = pushop.repo.lock()
87 87 pushop.locallocked = True
88 88 except IOError, err:
89 89 pushop.locallocked = False
90 90 if err.errno != errno.EACCES:
91 91 raise
92 92 # source repo cannot be locked.
93 93 # We do not abort the push, but just disable the local phase
94 94 # synchronisation.
95 95 msg = 'cannot lock source repository: %s\n' % err
96 96 pushop.ui.debug(msg)
97 97 try:
98 98 pushop.repo.checkpush(pushop.force, pushop.revs)
99 99 lock = None
100 100 unbundle = pushop.remote.capable('unbundle')
101 101 if not unbundle:
102 102 lock = pushop.remote.lock()
103 103 try:
104 104 _pushdiscovery(pushop)
105 105 if _pushcheckoutgoing(pushop):
106 106 _pushchangeset(pushop)
107 107 _pushcomputecommonheads(pushop)
108 108 _pushsyncphase(pushop)
109 109 _pushobsolete(pushop)
110 110 finally:
111 111 if lock is not None:
112 112 lock.release()
113 113 finally:
114 114 if locallock is not None:
115 115 locallock.release()
116 116
117 117 _pushbookmark(pushop)
118 118 return pushop.ret
119 119
120 120 def _pushdiscovery(pushop):
121 121 # discovery
122 122 unfi = pushop.repo.unfiltered()
123 123 fci = discovery.findcommonincoming
124 124 commoninc = fci(unfi, pushop.remote, force=pushop.force)
125 125 common, inc, remoteheads = commoninc
126 126 fco = discovery.findcommonoutgoing
127 127 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
128 128 commoninc=commoninc, force=pushop.force)
129 129 pushop.outgoing = outgoing
130 130 pushop.remoteheads = remoteheads
131 131 pushop.incoming = inc
132 132
133 133 def _pushcheckoutgoing(pushop):
134 134 outgoing = pushop.outgoing
135 135 unfi = pushop.repo.unfiltered()
136 136 if not outgoing.missing:
137 137 # nothing to push
138 138 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
139 139 return False
140 140 # something to push
141 141 if not pushop.force:
142 142 # if repo.obsstore == False --> no obsolete
143 143 # then, save the iteration
144 144 if unfi.obsstore:
145 145 # this message are here for 80 char limit reason
146 146 mso = _("push includes obsolete changeset: %s!")
147 147 mst = "push includes %s changeset: %s!"
148 148 # plain versions for i18n tool to detect them
149 149 _("push includes unstable changeset: %s!")
150 150 _("push includes bumped changeset: %s!")
151 151 _("push includes divergent changeset: %s!")
152 152 # If we are to push if there is at least one
153 153 # obsolete or unstable changeset in missing, at
154 154 # least one of the missinghead will be obsolete or
155 155 # unstable. So checking heads only is ok
156 156 for node in outgoing.missingheads:
157 157 ctx = unfi[node]
158 158 if ctx.obsolete():
159 159 raise util.Abort(mso % ctx)
160 160 elif ctx.troubled():
161 161 raise util.Abort(_(mst)
162 162 % (ctx.troubles()[0],
163 163 ctx))
164 164 newbm = pushop.ui.configlist('bookmarks', 'pushing')
165 165 discovery.checkheads(unfi, pushop.remote, outgoing,
166 166 pushop.remoteheads,
167 167 pushop.newbranch,
168 168 bool(pushop.incoming),
169 169 newbm)
170 170 return True
171 171
172 172 def _pushchangeset(pushop):
173 173 """Make the actual push of changeset bundle to remote repo"""
174 174 outgoing = pushop.outgoing
175 175 unbundle = pushop.remote.capable('unbundle')
176 176 # TODO: get bundlecaps from remote
177 177 bundlecaps = None
178 178 # create a changegroup from local
179 179 if pushop.revs is None and not (outgoing.excluded
180 180 or pushop.repo.changelog.filteredrevs):
181 181 # push everything,
182 182 # use the fast path, no race possible on push
183 183 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
184 184 cg = pushop.repo._changegroupsubset(outgoing,
185 185 bundler,
186 186 'push',
187 187 fastpath=True)
188 188 else:
189 189 cg = pushop.repo.getlocalbundle('push', outgoing, bundlecaps)
190 190
191 191 # apply changegroup to remote
192 192 if unbundle:
193 193 # local repo finds heads on server, finds out what
194 194 # revs it must push. once revs transferred, if server
195 195 # finds it has different heads (someone else won
196 196 # commit/push race), server aborts.
197 197 if pushop.force:
198 198 remoteheads = ['force']
199 199 else:
200 200 remoteheads = pushop.remoteheads
201 201 # ssh: return remote's addchangegroup()
202 202 # http: return remote's addchangegroup() or 0 for error
203 203 pushop.ret = pushop.remote.unbundle(cg, remoteheads,
204 204 'push')
205 205 else:
206 206 # we return an integer indicating remote head count
207 207 # change
208 208 pushop.ret = pushop.remote.addchangegroup(cg, 'push',
209 209 pushop.repo.url())
210 210
211 211 def _pushcomputecommonheads(pushop):
212 212 unfi = pushop.repo.unfiltered()
213 213 if pushop.ret:
214 214 # push succeed, synchronize target of the push
215 215 cheads = pushop.outgoing.missingheads
216 216 elif pushop.revs is None:
217 217 # All out push fails. synchronize all common
218 218 cheads = pushop.outgoing.commonheads
219 219 else:
220 220 # I want cheads = heads(::missingheads and ::commonheads)
221 221 # (missingheads is revs with secret changeset filtered out)
222 222 #
223 223 # This can be expressed as:
224 224 # cheads = ( (missingheads and ::commonheads)
225 225 # + (commonheads and ::missingheads))"
226 226 # )
227 227 #
228 228 # while trying to push we already computed the following:
229 229 # common = (::commonheads)
230 230 # missing = ((commonheads::missingheads) - commonheads)
231 231 #
232 232 # We can pick:
233 233 # * missingheads part of common (::commonheads)
234 234 common = set(pushop.outgoing.common)
235 235 nm = pushop.repo.changelog.nodemap
236 236 cheads = [node for node in pushop.revs if nm[node] in common]
237 237 # and
238 238 # * commonheads parents on missing
239 239 revset = unfi.set('%ln and parents(roots(%ln))',
240 240 pushop.outgoing.commonheads,
241 241 pushop.outgoing.missing)
242 242 cheads.extend(c.node() for c in revset)
243 243 pushop.commonheads = cheads
244 244
245 245 def _pushsyncphase(pushop):
246 246 """synchronise phase information locally and remotly"""
247 247 unfi = pushop.repo.unfiltered()
248 248 cheads = pushop.commonheads
249 249 if pushop.ret:
250 250 # push succeed, synchronize target of the push
251 251 cheads = pushop.outgoing.missingheads
252 252 elif pushop.revs is None:
253 253 # All out push fails. synchronize all common
254 254 cheads = pushop.outgoing.commonheads
255 255 else:
256 256 # I want cheads = heads(::missingheads and ::commonheads)
257 257 # (missingheads is revs with secret changeset filtered out)
258 258 #
259 259 # This can be expressed as:
260 260 # cheads = ( (missingheads and ::commonheads)
261 261 # + (commonheads and ::missingheads))"
262 262 # )
263 263 #
264 264 # while trying to push we already computed the following:
265 265 # common = (::commonheads)
266 266 # missing = ((commonheads::missingheads) - commonheads)
267 267 #
268 268 # We can pick:
269 269 # * missingheads part of common (::commonheads)
270 270 common = set(pushop.outgoing.common)
271 271 nm = pushop.repo.changelog.nodemap
272 272 cheads = [node for node in pushop.revs if nm[node] in common]
273 273 # and
274 274 # * commonheads parents on missing
275 275 revset = unfi.set('%ln and parents(roots(%ln))',
276 276 pushop.outgoing.commonheads,
277 277 pushop.outgoing.missing)
278 278 cheads.extend(c.node() for c in revset)
279 279 pushop.commonheads = cheads
280 280 # even when we don't push, exchanging phase data is useful
281 281 remotephases = pushop.remote.listkeys('phases')
282 282 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
283 283 and remotephases # server supports phases
284 284 and pushop.ret is None # nothing was pushed
285 285 and remotephases.get('publishing', False)):
286 286 # When:
287 287 # - this is a subrepo push
288 288 # - and remote support phase
289 289 # - and no changeset was pushed
290 290 # - and remote is publishing
291 291 # We may be in issue 3871 case!
292 292 # We drop the possible phase synchronisation done by
293 293 # courtesy to publish changesets possibly locally draft
294 294 # on the remote.
295 295 remotephases = {'publishing': 'True'}
296 296 if not remotephases: # old server or public only rer
297 297 _localphasemove(pushop, cheads)
298 298 # don't push any phase data as there is nothing to push
299 299 else:
300 300 ana = phases.analyzeremotephases(pushop.repo, cheads,
301 301 remotephases)
302 302 pheads, droots = ana
303 303 ### Apply remote phase on local
304 304 if remotephases.get('publishing', False):
305 305 _localphasemove(pushop, cheads)
306 306 else: # publish = False
307 307 _localphasemove(pushop, pheads)
308 308 _localphasemove(pushop, cheads, phases.draft)
309 309 ### Apply local phase on remote
310 310
311 311 # Get the list of all revs draft on remote by public here.
312 312 # XXX Beware that revset break if droots is not strictly
313 313 # XXX root we may want to ensure it is but it is costly
314 314 outdated = unfi.set('heads((%ln::%ln) and public())',
315 315 droots, cheads)
316 316 for newremotehead in outdated:
317 317 r = pushop.remote.pushkey('phases',
318 318 newremotehead.hex(),
319 319 str(phases.draft),
320 320 str(phases.public))
321 321 if not r:
322 322 pushop.ui.warn(_('updating %s to public failed!\n')
323 323 % newremotehead)
324 324
325 325 def _localphasemove(pushop, nodes, phase=phases.public):
326 326 """move <nodes> to <phase> in the local source repo"""
327 327 if pushop.locallocked:
328 328 phases.advanceboundary(pushop.repo, phase, nodes)
329 329 else:
330 330 # repo is not locked, do not change any phases!
331 331 # Informs the user that phases should have been moved when
332 332 # applicable.
333 333 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
334 334 phasestr = phases.phasenames[phase]
335 335 if actualmoves:
336 336 pushop.ui.status(_('cannot lock source repo, skipping '
337 337 'local %s phase update\n') % phasestr)
338 338
339 339 def _pushobsolete(pushop):
340 340 """utility function to push obsolete markers to a remote"""
341 341 pushop.ui.debug('try to push obsolete markers to remote\n')
342 342 repo = pushop.repo
343 343 remote = pushop.remote
344 344 if (obsolete._enabled and repo.obsstore and
345 345 'obsolete' in remote.listkeys('namespaces')):
346 346 rslts = []
347 347 remotedata = repo.listkeys('obsolete')
348 348 for key in sorted(remotedata, reverse=True):
349 349 # reverse sort to ensure we end with dump0
350 350 data = remotedata[key]
351 351 rslts.append(remote.pushkey('obsolete', key, '', data))
352 352 if [r for r in rslts if not r]:
353 353 msg = _('failed to push some obsolete markers!\n')
354 354 repo.ui.warn(msg)
355 355
356 356 def _pushbookmark(pushop):
357 357 """Update bookmark position on remote"""
358 358 ui = pushop.ui
359 359 repo = pushop.repo.unfiltered()
360 360 remote = pushop.remote
361 361 ui.debug("checking for updated bookmarks\n")
362 362 revnums = map(repo.changelog.rev, pushop.revs or [])
363 363 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
364 364 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
365 365 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
366 366 srchex=hex)
367 367
368 368 for b, scid, dcid in advsrc:
369 369 if ancestors and repo[scid].rev() not in ancestors:
370 370 continue
371 371 if remote.pushkey('bookmarks', b, dcid, scid):
372 372 ui.status(_("updating bookmark %s\n") % b)
373 373 else:
374 374 ui.warn(_('updating bookmark %s failed!\n') % b)
375 375
376 376 class pulloperation(object):
377 377 """A object that represent a single pull operation
378 378
379 379 It purpose is to carry push related state and very common operation.
380 380
381 381 A new should be created at the begining of each pull and discarded
382 382 afterward.
383 383 """
384 384
385 385 def __init__(self, repo, remote, heads=None, force=False):
386 386 # repo we pull into
387 387 self.repo = repo
388 388 # repo we pull from
389 389 self.remote = remote
390 390 # revision we try to pull (None is "all")
391 391 self.heads = heads
392 392 # do we force pull?
393 393 self.force = force
394 394 # the name the pull transaction
395 395 self._trname = 'pull\n' + util.hidepassword(remote.url())
396 396 # hold the transaction once created
397 397 self._tr = None
398 398 # set of common changeset between local and remote before pull
399 399 self.common = None
400 400 # set of pulled head
401 401 self.rheads = None
402 402 # list of missing changeset to fetch remotly
403 403 self.fetch = None
404 # result of changegroup pulling (used as returng code by pull)
405 self.cgresult = None
404 406
405 407 @util.propertycache
406 408 def pulledsubset(self):
407 409 """heads of the set of changeset target by the pull"""
408 410 # compute target subset
409 411 if self.heads is None:
410 412 # We pulled every thing possible
411 413 # sync on everything common
412 414 c = set(self.common)
413 415 ret = list(self.common)
414 416 for n in self.rheads:
415 417 if n not in c:
416 418 ret.append(n)
417 419 return ret
418 420 else:
419 421 # We pulled a specific subset
420 422 # sync on this subset
421 423 return self.heads
422 424
423 425 def gettransaction(self):
424 426 """get appropriate pull transaction, creating it if needed"""
425 427 if self._tr is None:
426 428 self._tr = self.repo.transaction(self._trname)
427 429 return self._tr
428 430
429 431 def closetransaction(self):
430 432 """close transaction if created"""
431 433 if self._tr is not None:
432 434 self._tr.close()
433 435
434 436 def releasetransaction(self):
435 437 """release transaction if created"""
436 438 if self._tr is not None:
437 439 self._tr.release()
438 440
439 441 def pull(repo, remote, heads=None, force=False):
440 442 pullop = pulloperation(repo, remote, heads, force)
441 443 if pullop.remote.local():
442 444 missing = set(pullop.remote.requirements) - pullop.repo.supported
443 445 if missing:
444 446 msg = _("required features are not"
445 447 " supported in the destination:"
446 448 " %s") % (', '.join(sorted(missing)))
447 449 raise util.Abort(msg)
448 450
449 451 lock = pullop.repo.lock()
450 452 try:
451 453 tmp = discovery.findcommonincoming(pullop.repo.unfiltered(),
452 454 pullop.remote,
453 455 heads=pullop.heads,
454 456 force=force)
455 457 pullop.common, pullop.fetch, pullop.rheads = tmp
456 458 if not pullop.fetch:
457 459 pullop.repo.ui.status(_("no changes found\n"))
458 result = 0
460 pullop.cgresult = 0
459 461 else:
460 result = _pullchangeset(pullop)
462 pullop.cgresult = _pullchangeset(pullop)
461 463
462 464 _pullphase(pullop)
463 465 _pullobsolete(pullop)
464 466 pullop.closetransaction()
465 467 finally:
466 468 pullop.releasetransaction()
467 469 lock.release()
468 470
469 return result
471 return pullop.cgresult
470 472
471 473 def _pullchangeset(pullop):
472 474 """pull changeset from unbundle into the local repo"""
473 475 # We delay the open of the transaction as late as possible so we
474 476 # don't open transaction for nothing or you break future useful
475 477 # rollback call
476 478 pullop.gettransaction()
477 479 if pullop.heads is None and list(pullop.common) == [nullid]:
478 480 pullop.repo.ui.status(_("requesting all changes\n"))
479 481 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
480 482 # issue1320, avoid a race if remote changed after discovery
481 483 pullop.heads = pullop.rheads
482 484
483 485 if pullop.remote.capable('getbundle'):
484 486 # TODO: get bundlecaps from remote
485 487 cg = pullop.remote.getbundle('pull', common=pullop.common,
486 488 heads=pullop.heads or pullop.rheads)
487 489 elif pullop.heads is None:
488 490 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
489 491 elif not pullop.remote.capable('changegroupsubset'):
490 492 raise util.Abort(_("partial pull cannot be done because "
491 493 "other repository doesn't support "
492 494 "changegroupsubset."))
493 495 else:
494 496 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
495 497 return pullop.repo.addchangegroup(cg, 'pull', pullop.remote.url())
496 498
497 499 def _pullphase(pullop):
498 500 # Get remote phases data from remote
499 501 remotephases = pullop.remote.listkeys('phases')
500 502 publishing = bool(remotephases.get('publishing', False))
501 503 if remotephases and not publishing:
502 504 # remote is new and unpublishing
503 505 pheads, _dr = phases.analyzeremotephases(pullop.repo,
504 506 pullop.pulledsubset,
505 507 remotephases)
506 508 phases.advanceboundary(pullop.repo, phases.public, pheads)
507 509 phases.advanceboundary(pullop.repo, phases.draft,
508 510 pullop.pulledsubset)
509 511 else:
510 512 # Remote is old or publishing all common changesets
511 513 # should be seen as public
512 514 phases.advanceboundary(pullop.repo, phases.public,
513 515 pullop.pulledsubset)
514 516
515 517 def _pullobsolete(pullop):
516 518 """utility function to pull obsolete markers from a remote
517 519
518 520 The `gettransaction` is function that return the pull transaction, creating
519 521 one if necessary. We return the transaction to inform the calling code that
520 522 a new transaction have been created (when applicable).
521 523
522 524 Exists mostly to allow overriding for experimentation purpose"""
523 525 tr = None
524 526 if obsolete._enabled:
525 527 pullop.repo.ui.debug('fetching remote obsolete markers\n')
526 528 remoteobs = pullop.remote.listkeys('obsolete')
527 529 if 'dump0' in remoteobs:
528 530 tr = pullop.gettransaction()
529 531 for key in sorted(remoteobs, reverse=True):
530 532 if key.startswith('dump'):
531 533 data = base85.b85decode(remoteobs[key])
532 534 pullop.repo.obsstore.mergemarkers(tr, data)
533 535 pullop.repo.invalidatevolatilesets()
534 536 return tr
535 537
General Comments 0
You need to be logged in to leave comments. Login now