##// END OF EJS Templates
discovery: properly exclude locally known but filtered heads...
Pierre-Yves David -
r23975:3b7088a5 stable
parent child Browse files
Show More
@@ -1,1304 +1,1305 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex, nullid
10 10 import errno, urllib
11 11 import util, scmutil, changegroup, base85, error
12 12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 13
14 14 def readbundle(ui, fh, fname, vfs=None):
15 15 header = changegroup.readexactly(fh, 4)
16 16
17 17 alg = None
18 18 if not fname:
19 19 fname = "stream"
20 20 if not header.startswith('HG') and header.startswith('\0'):
21 21 fh = changegroup.headerlessfixup(fh, header)
22 22 header = "HG10"
23 23 alg = 'UN'
24 24 elif vfs:
25 25 fname = vfs.join(fname)
26 26
27 27 magic, version = header[0:2], header[2:4]
28 28
29 29 if magic != 'HG':
30 30 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 31 if version == '10':
32 32 if alg is None:
33 33 alg = changegroup.readexactly(fh, 2)
34 34 return changegroup.cg1unpacker(fh, alg)
35 35 elif version == '2Y':
36 36 return bundle2.unbundle20(ui, fh, header=magic + version)
37 37 else:
38 38 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 39
40 40 def buildobsmarkerspart(bundler, markers):
41 41 """add an obsmarker part to the bundler with <markers>
42 42
43 43 No part is created if markers is empty.
44 44 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 45 """
46 46 if markers:
47 47 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 48 version = obsolete.commonversion(remoteversions)
49 49 if version is None:
50 50 raise ValueError('bundler do not support common obsmarker format')
51 51 stream = obsolete.encodemarkers(markers, True, version=version)
52 52 return bundler.newpart('b2x:obsmarkers', data=stream)
53 53 return None
54 54
55 55 class pushoperation(object):
56 56 """A object that represent a single push operation
57 57
58 58 It purpose is to carry push related state and very common operation.
59 59
60 60 A new should be created at the beginning of each push and discarded
61 61 afterward.
62 62 """
63 63
64 64 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
65 65 bookmarks=()):
66 66 # repo we push from
67 67 self.repo = repo
68 68 self.ui = repo.ui
69 69 # repo we push to
70 70 self.remote = remote
71 71 # force option provided
72 72 self.force = force
73 73 # revs to be pushed (None is "all")
74 74 self.revs = revs
75 75 # bookmark explicitly pushed
76 76 self.bookmarks = bookmarks
77 77 # allow push of new branch
78 78 self.newbranch = newbranch
79 79 # did a local lock get acquired?
80 80 self.locallocked = None
81 81 # step already performed
82 82 # (used to check what steps have been already performed through bundle2)
83 83 self.stepsdone = set()
84 84 # Integer version of the changegroup push result
85 85 # - None means nothing to push
86 86 # - 0 means HTTP error
87 87 # - 1 means we pushed and remote head count is unchanged *or*
88 88 # we have outgoing changesets but refused to push
89 89 # - other values as described by addchangegroup()
90 90 self.cgresult = None
91 91 # Boolean value for the bookmark push
92 92 self.bkresult = None
93 93 # discover.outgoing object (contains common and outgoing data)
94 94 self.outgoing = None
95 95 # all remote heads before the push
96 96 self.remoteheads = None
97 97 # testable as a boolean indicating if any nodes are missing locally.
98 98 self.incoming = None
99 99 # phases changes that must be pushed along side the changesets
100 100 self.outdatedphases = None
101 101 # phases changes that must be pushed if changeset push fails
102 102 self.fallbackoutdatedphases = None
103 103 # outgoing obsmarkers
104 104 self.outobsmarkers = set()
105 105 # outgoing bookmarks
106 106 self.outbookmarks = []
107 107 # transaction manager
108 108 self.trmanager = None
109 109
110 110 @util.propertycache
111 111 def futureheads(self):
112 112 """future remote heads if the changeset push succeeds"""
113 113 return self.outgoing.missingheads
114 114
115 115 @util.propertycache
116 116 def fallbackheads(self):
117 117 """future remote heads if the changeset push fails"""
118 118 if self.revs is None:
119 119 # not target to push, all common are relevant
120 120 return self.outgoing.commonheads
121 121 unfi = self.repo.unfiltered()
122 122 # I want cheads = heads(::missingheads and ::commonheads)
123 123 # (missingheads is revs with secret changeset filtered out)
124 124 #
125 125 # This can be expressed as:
126 126 # cheads = ( (missingheads and ::commonheads)
127 127 # + (commonheads and ::missingheads))"
128 128 # )
129 129 #
130 130 # while trying to push we already computed the following:
131 131 # common = (::commonheads)
132 132 # missing = ((commonheads::missingheads) - commonheads)
133 133 #
134 134 # We can pick:
135 135 # * missingheads part of common (::commonheads)
136 136 common = set(self.outgoing.common)
137 137 nm = self.repo.changelog.nodemap
138 138 cheads = [node for node in self.revs if nm[node] in common]
139 139 # and
140 140 # * commonheads parents on missing
141 141 revset = unfi.set('%ln and parents(roots(%ln))',
142 142 self.outgoing.commonheads,
143 143 self.outgoing.missing)
144 144 cheads.extend(c.node() for c in revset)
145 145 return cheads
146 146
147 147 @property
148 148 def commonheads(self):
149 149 """set of all common heads after changeset bundle push"""
150 150 if self.cgresult:
151 151 return self.futureheads
152 152 else:
153 153 return self.fallbackheads
154 154
155 155 # mapping of message used when pushing bookmark
156 156 bookmsgmap = {'update': (_("updating bookmark %s\n"),
157 157 _('updating bookmark %s failed!\n')),
158 158 'export': (_("exporting bookmark %s\n"),
159 159 _('exporting bookmark %s failed!\n')),
160 160 'delete': (_("deleting remote bookmark %s\n"),
161 161 _('deleting remote bookmark %s failed!\n')),
162 162 }
163 163
164 164
165 165 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
166 166 '''Push outgoing changesets (limited by revs) from a local
167 167 repository to remote. Return an integer:
168 168 - None means nothing to push
169 169 - 0 means HTTP error
170 170 - 1 means we pushed and remote head count is unchanged *or*
171 171 we have outgoing changesets but refused to push
172 172 - other values as described by addchangegroup()
173 173 '''
174 174 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
175 175 if pushop.remote.local():
176 176 missing = (set(pushop.repo.requirements)
177 177 - pushop.remote.local().supported)
178 178 if missing:
179 179 msg = _("required features are not"
180 180 " supported in the destination:"
181 181 " %s") % (', '.join(sorted(missing)))
182 182 raise util.Abort(msg)
183 183
184 184 # there are two ways to push to remote repo:
185 185 #
186 186 # addchangegroup assumes local user can lock remote
187 187 # repo (local filesystem, old ssh servers).
188 188 #
189 189 # unbundle assumes local user cannot lock remote repo (new ssh
190 190 # servers, http servers).
191 191
192 192 if not pushop.remote.canpush():
193 193 raise util.Abort(_("destination does not support push"))
194 194 # get local lock as we might write phase data
195 195 locallock = None
196 196 try:
197 197 locallock = pushop.repo.lock()
198 198 pushop.locallocked = True
199 199 except IOError, err:
200 200 pushop.locallocked = False
201 201 if err.errno != errno.EACCES:
202 202 raise
203 203 # source repo cannot be locked.
204 204 # We do not abort the push, but just disable the local phase
205 205 # synchronisation.
206 206 msg = 'cannot lock source repository: %s\n' % err
207 207 pushop.ui.debug(msg)
208 208 try:
209 209 if pushop.locallocked:
210 210 pushop.trmanager = transactionmanager(repo,
211 211 'push-response',
212 212 pushop.remote.url())
213 213 pushop.repo.checkpush(pushop)
214 214 lock = None
215 215 unbundle = pushop.remote.capable('unbundle')
216 216 if not unbundle:
217 217 lock = pushop.remote.lock()
218 218 try:
219 219 _pushdiscovery(pushop)
220 220 if (pushop.repo.ui.configbool('experimental', 'bundle2-exp',
221 221 False)
222 222 and pushop.remote.capable('bundle2-exp')):
223 223 _pushbundle2(pushop)
224 224 _pushchangeset(pushop)
225 225 _pushsyncphase(pushop)
226 226 _pushobsolete(pushop)
227 227 _pushbookmark(pushop)
228 228 finally:
229 229 if lock is not None:
230 230 lock.release()
231 231 if pushop.trmanager:
232 232 pushop.trmanager.close()
233 233 finally:
234 234 if pushop.trmanager:
235 235 pushop.trmanager.release()
236 236 if locallock is not None:
237 237 locallock.release()
238 238
239 239 return pushop
240 240
241 241 # list of steps to perform discovery before push
242 242 pushdiscoveryorder = []
243 243
244 244 # Mapping between step name and function
245 245 #
246 246 # This exists to help extensions wrap steps if necessary
247 247 pushdiscoverymapping = {}
248 248
249 249 def pushdiscovery(stepname):
250 250 """decorator for function performing discovery before push
251 251
252 252 The function is added to the step -> function mapping and appended to the
253 253 list of steps. Beware that decorated function will be added in order (this
254 254 may matter).
255 255
256 256 You can only use this decorator for a new step, if you want to wrap a step
257 257 from an extension, change the pushdiscovery dictionary directly."""
258 258 def dec(func):
259 259 assert stepname not in pushdiscoverymapping
260 260 pushdiscoverymapping[stepname] = func
261 261 pushdiscoveryorder.append(stepname)
262 262 return func
263 263 return dec
264 264
265 265 def _pushdiscovery(pushop):
266 266 """Run all discovery steps"""
267 267 for stepname in pushdiscoveryorder:
268 268 step = pushdiscoverymapping[stepname]
269 269 step(pushop)
270 270
271 271 @pushdiscovery('changeset')
272 272 def _pushdiscoverychangeset(pushop):
273 273 """discover the changeset that need to be pushed"""
274 274 fci = discovery.findcommonincoming
275 275 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
276 276 common, inc, remoteheads = commoninc
277 277 fco = discovery.findcommonoutgoing
278 278 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
279 279 commoninc=commoninc, force=pushop.force)
280 280 pushop.outgoing = outgoing
281 281 pushop.remoteheads = remoteheads
282 282 pushop.incoming = inc
283 283
284 284 @pushdiscovery('phase')
285 285 def _pushdiscoveryphase(pushop):
286 286 """discover the phase that needs to be pushed
287 287
288 288 (computed for both success and failure case for changesets push)"""
289 289 outgoing = pushop.outgoing
290 290 unfi = pushop.repo.unfiltered()
291 291 remotephases = pushop.remote.listkeys('phases')
292 292 publishing = remotephases.get('publishing', False)
293 293 ana = phases.analyzeremotephases(pushop.repo,
294 294 pushop.fallbackheads,
295 295 remotephases)
296 296 pheads, droots = ana
297 297 extracond = ''
298 298 if not publishing:
299 299 extracond = ' and public()'
300 300 revset = 'heads((%%ln::%%ln) %s)' % extracond
301 301 # Get the list of all revs draft on remote by public here.
302 302 # XXX Beware that revset break if droots is not strictly
303 303 # XXX root we may want to ensure it is but it is costly
304 304 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
305 305 if not outgoing.missing:
306 306 future = fallback
307 307 else:
308 308 # adds changeset we are going to push as draft
309 309 #
310 310 # should not be necessary for publishing server, but because of an
311 311 # issue fixed in xxxxx we have to do it anyway.
312 312 fdroots = list(unfi.set('roots(%ln + %ln::)',
313 313 outgoing.missing, droots))
314 314 fdroots = [f.node() for f in fdroots]
315 315 future = list(unfi.set(revset, fdroots, pushop.futureheads))
316 316 pushop.outdatedphases = future
317 317 pushop.fallbackoutdatedphases = fallback
318 318
319 319 @pushdiscovery('obsmarker')
320 320 def _pushdiscoveryobsmarkers(pushop):
321 321 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
322 322 and pushop.repo.obsstore
323 323 and 'obsolete' in pushop.remote.listkeys('namespaces')):
324 324 repo = pushop.repo
325 325 # very naive computation, that can be quite expensive on big repo.
326 326 # However: evolution is currently slow on them anyway.
327 327 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
328 328 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
329 329
330 330 @pushdiscovery('bookmarks')
331 331 def _pushdiscoverybookmarks(pushop):
332 332 ui = pushop.ui
333 333 repo = pushop.repo.unfiltered()
334 334 remote = pushop.remote
335 335 ui.debug("checking for updated bookmarks\n")
336 336 ancestors = ()
337 337 if pushop.revs:
338 338 revnums = map(repo.changelog.rev, pushop.revs)
339 339 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
340 340 remotebookmark = remote.listkeys('bookmarks')
341 341
342 342 explicit = set(pushop.bookmarks)
343 343
344 344 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
345 345 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
346 346 for b, scid, dcid in advsrc:
347 347 if b in explicit:
348 348 explicit.remove(b)
349 349 if not ancestors or repo[scid].rev() in ancestors:
350 350 pushop.outbookmarks.append((b, dcid, scid))
351 351 # search added bookmark
352 352 for b, scid, dcid in addsrc:
353 353 if b in explicit:
354 354 explicit.remove(b)
355 355 pushop.outbookmarks.append((b, '', scid))
356 356 # search for overwritten bookmark
357 357 for b, scid, dcid in advdst + diverge + differ:
358 358 if b in explicit:
359 359 explicit.remove(b)
360 360 pushop.outbookmarks.append((b, dcid, scid))
361 361 # search for bookmark to delete
362 362 for b, scid, dcid in adddst:
363 363 if b in explicit:
364 364 explicit.remove(b)
365 365 # treat as "deleted locally"
366 366 pushop.outbookmarks.append((b, dcid, ''))
367 367 # identical bookmarks shouldn't get reported
368 368 for b, scid, dcid in same:
369 369 if b in explicit:
370 370 explicit.remove(b)
371 371
372 372 if explicit:
373 373 explicit = sorted(explicit)
374 374 # we should probably list all of them
375 375 ui.warn(_('bookmark %s does not exist on the local '
376 376 'or remote repository!\n') % explicit[0])
377 377 pushop.bkresult = 2
378 378
379 379 pushop.outbookmarks.sort()
380 380
381 381 def _pushcheckoutgoing(pushop):
382 382 outgoing = pushop.outgoing
383 383 unfi = pushop.repo.unfiltered()
384 384 if not outgoing.missing:
385 385 # nothing to push
386 386 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
387 387 return False
388 388 # something to push
389 389 if not pushop.force:
390 390 # if repo.obsstore == False --> no obsolete
391 391 # then, save the iteration
392 392 if unfi.obsstore:
393 393 # this message are here for 80 char limit reason
394 394 mso = _("push includes obsolete changeset: %s!")
395 395 mst = {"unstable": _("push includes unstable changeset: %s!"),
396 396 "bumped": _("push includes bumped changeset: %s!"),
397 397 "divergent": _("push includes divergent changeset: %s!")}
398 398 # If we are to push if there is at least one
399 399 # obsolete or unstable changeset in missing, at
400 400 # least one of the missinghead will be obsolete or
401 401 # unstable. So checking heads only is ok
402 402 for node in outgoing.missingheads:
403 403 ctx = unfi[node]
404 404 if ctx.obsolete():
405 405 raise util.Abort(mso % ctx)
406 406 elif ctx.troubled():
407 407 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
408 408 newbm = pushop.ui.configlist('bookmarks', 'pushing')
409 409 discovery.checkheads(unfi, pushop.remote, outgoing,
410 410 pushop.remoteheads,
411 411 pushop.newbranch,
412 412 bool(pushop.incoming),
413 413 newbm)
414 414 return True
415 415
416 416 # List of names of steps to perform for an outgoing bundle2, order matters.
417 417 b2partsgenorder = []
418 418
419 419 # Mapping between step name and function
420 420 #
421 421 # This exists to help extensions wrap steps if necessary
422 422 b2partsgenmapping = {}
423 423
424 424 def b2partsgenerator(stepname):
425 425 """decorator for function generating bundle2 part
426 426
427 427 The function is added to the step -> function mapping and appended to the
428 428 list of steps. Beware that decorated functions will be added in order
429 429 (this may matter).
430 430
431 431 You can only use this decorator for new steps, if you want to wrap a step
432 432 from an extension, attack the b2partsgenmapping dictionary directly."""
433 433 def dec(func):
434 434 assert stepname not in b2partsgenmapping
435 435 b2partsgenmapping[stepname] = func
436 436 b2partsgenorder.append(stepname)
437 437 return func
438 438 return dec
439 439
440 440 @b2partsgenerator('changeset')
441 441 def _pushb2ctx(pushop, bundler):
442 442 """handle changegroup push through bundle2
443 443
444 444 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
445 445 """
446 446 if 'changesets' in pushop.stepsdone:
447 447 return
448 448 pushop.stepsdone.add('changesets')
449 449 # Send known heads to the server for race detection.
450 450 if not _pushcheckoutgoing(pushop):
451 451 return
452 452 pushop.repo.prepushoutgoinghooks(pushop.repo,
453 453 pushop.remote,
454 454 pushop.outgoing)
455 455 if not pushop.force:
456 456 bundler.newpart('b2x:check:heads', data=iter(pushop.remoteheads))
457 457 b2caps = bundle2.bundle2caps(pushop.remote)
458 458 version = None
459 459 cgversions = b2caps.get('b2x:changegroup')
460 460 if not cgversions: # 3.1 and 3.2 ship with an empty value
461 461 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
462 462 pushop.outgoing)
463 463 else:
464 464 cgversions = [v for v in cgversions if v in changegroup.packermap]
465 465 if not cgversions:
466 466 raise ValueError(_('no common changegroup version'))
467 467 version = max(cgversions)
468 468 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
469 469 pushop.outgoing,
470 470 version=version)
471 471 cgpart = bundler.newpart('b2x:changegroup', data=cg)
472 472 if version is not None:
473 473 cgpart.addparam('version', version)
474 474 def handlereply(op):
475 475 """extract addchangegroup returns from server reply"""
476 476 cgreplies = op.records.getreplies(cgpart.id)
477 477 assert len(cgreplies['changegroup']) == 1
478 478 pushop.cgresult = cgreplies['changegroup'][0]['return']
479 479 return handlereply
480 480
481 481 @b2partsgenerator('phase')
482 482 def _pushb2phases(pushop, bundler):
483 483 """handle phase push through bundle2"""
484 484 if 'phases' in pushop.stepsdone:
485 485 return
486 486 b2caps = bundle2.bundle2caps(pushop.remote)
487 487 if not 'b2x:pushkey' in b2caps:
488 488 return
489 489 pushop.stepsdone.add('phases')
490 490 part2node = []
491 491 enc = pushkey.encode
492 492 for newremotehead in pushop.outdatedphases:
493 493 part = bundler.newpart('b2x:pushkey')
494 494 part.addparam('namespace', enc('phases'))
495 495 part.addparam('key', enc(newremotehead.hex()))
496 496 part.addparam('old', enc(str(phases.draft)))
497 497 part.addparam('new', enc(str(phases.public)))
498 498 part2node.append((part.id, newremotehead))
499 499 def handlereply(op):
500 500 for partid, node in part2node:
501 501 partrep = op.records.getreplies(partid)
502 502 results = partrep['pushkey']
503 503 assert len(results) <= 1
504 504 msg = None
505 505 if not results:
506 506 msg = _('server ignored update of %s to public!\n') % node
507 507 elif not int(results[0]['return']):
508 508 msg = _('updating %s to public failed!\n') % node
509 509 if msg is not None:
510 510 pushop.ui.warn(msg)
511 511 return handlereply
512 512
513 513 @b2partsgenerator('obsmarkers')
514 514 def _pushb2obsmarkers(pushop, bundler):
515 515 if 'obsmarkers' in pushop.stepsdone:
516 516 return
517 517 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
518 518 if obsolete.commonversion(remoteversions) is None:
519 519 return
520 520 pushop.stepsdone.add('obsmarkers')
521 521 if pushop.outobsmarkers:
522 522 buildobsmarkerspart(bundler, pushop.outobsmarkers)
523 523
524 524 @b2partsgenerator('bookmarks')
525 525 def _pushb2bookmarks(pushop, bundler):
526 526 """handle phase push through bundle2"""
527 527 if 'bookmarks' in pushop.stepsdone:
528 528 return
529 529 b2caps = bundle2.bundle2caps(pushop.remote)
530 530 if 'b2x:pushkey' not in b2caps:
531 531 return
532 532 pushop.stepsdone.add('bookmarks')
533 533 part2book = []
534 534 enc = pushkey.encode
535 535 for book, old, new in pushop.outbookmarks:
536 536 part = bundler.newpart('b2x:pushkey')
537 537 part.addparam('namespace', enc('bookmarks'))
538 538 part.addparam('key', enc(book))
539 539 part.addparam('old', enc(old))
540 540 part.addparam('new', enc(new))
541 541 action = 'update'
542 542 if not old:
543 543 action = 'export'
544 544 elif not new:
545 545 action = 'delete'
546 546 part2book.append((part.id, book, action))
547 547
548 548
549 549 def handlereply(op):
550 550 ui = pushop.ui
551 551 for partid, book, action in part2book:
552 552 partrep = op.records.getreplies(partid)
553 553 results = partrep['pushkey']
554 554 assert len(results) <= 1
555 555 if not results:
556 556 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
557 557 else:
558 558 ret = int(results[0]['return'])
559 559 if ret:
560 560 ui.status(bookmsgmap[action][0] % book)
561 561 else:
562 562 ui.warn(bookmsgmap[action][1] % book)
563 563 if pushop.bkresult is not None:
564 564 pushop.bkresult = 1
565 565 return handlereply
566 566
567 567
568 568 def _pushbundle2(pushop):
569 569 """push data to the remote using bundle2
570 570
571 571 The only currently supported type of data is changegroup but this will
572 572 evolve in the future."""
573 573 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
574 574 pushback = (pushop.trmanager
575 575 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
576 576
577 577 # create reply capability
578 578 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
579 579 allowpushback=pushback))
580 580 bundler.newpart('b2x:replycaps', data=capsblob)
581 581 replyhandlers = []
582 582 for partgenname in b2partsgenorder:
583 583 partgen = b2partsgenmapping[partgenname]
584 584 ret = partgen(pushop, bundler)
585 585 if callable(ret):
586 586 replyhandlers.append(ret)
587 587 # do not push if nothing to push
588 588 if bundler.nbparts <= 1:
589 589 return
590 590 stream = util.chunkbuffer(bundler.getchunks())
591 591 try:
592 592 reply = pushop.remote.unbundle(stream, ['force'], 'push')
593 593 except error.BundleValueError, exc:
594 594 raise util.Abort('missing support for %s' % exc)
595 595 try:
596 596 trgetter = None
597 597 if pushback:
598 598 trgetter = pushop.trmanager.transaction
599 599 op = bundle2.processbundle(pushop.repo, reply, trgetter)
600 600 except error.BundleValueError, exc:
601 601 raise util.Abort('missing support for %s' % exc)
602 602 for rephand in replyhandlers:
603 603 rephand(op)
604 604
605 605 def _pushchangeset(pushop):
606 606 """Make the actual push of changeset bundle to remote repo"""
607 607 if 'changesets' in pushop.stepsdone:
608 608 return
609 609 pushop.stepsdone.add('changesets')
610 610 if not _pushcheckoutgoing(pushop):
611 611 return
612 612 pushop.repo.prepushoutgoinghooks(pushop.repo,
613 613 pushop.remote,
614 614 pushop.outgoing)
615 615 outgoing = pushop.outgoing
616 616 unbundle = pushop.remote.capable('unbundle')
617 617 # TODO: get bundlecaps from remote
618 618 bundlecaps = None
619 619 # create a changegroup from local
620 620 if pushop.revs is None and not (outgoing.excluded
621 621 or pushop.repo.changelog.filteredrevs):
622 622 # push everything,
623 623 # use the fast path, no race possible on push
624 624 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
625 625 cg = changegroup.getsubset(pushop.repo,
626 626 outgoing,
627 627 bundler,
628 628 'push',
629 629 fastpath=True)
630 630 else:
631 631 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
632 632 bundlecaps)
633 633
634 634 # apply changegroup to remote
635 635 if unbundle:
636 636 # local repo finds heads on server, finds out what
637 637 # revs it must push. once revs transferred, if server
638 638 # finds it has different heads (someone else won
639 639 # commit/push race), server aborts.
640 640 if pushop.force:
641 641 remoteheads = ['force']
642 642 else:
643 643 remoteheads = pushop.remoteheads
644 644 # ssh: return remote's addchangegroup()
645 645 # http: return remote's addchangegroup() or 0 for error
646 646 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
647 647 pushop.repo.url())
648 648 else:
649 649 # we return an integer indicating remote head count
650 650 # change
651 651 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
652 652 pushop.repo.url())
653 653
654 654 def _pushsyncphase(pushop):
655 655 """synchronise phase information locally and remotely"""
656 656 cheads = pushop.commonheads
657 657 # even when we don't push, exchanging phase data is useful
658 658 remotephases = pushop.remote.listkeys('phases')
659 659 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
660 660 and remotephases # server supports phases
661 661 and pushop.cgresult is None # nothing was pushed
662 662 and remotephases.get('publishing', False)):
663 663 # When:
664 664 # - this is a subrepo push
665 665 # - and remote support phase
666 666 # - and no changeset was pushed
667 667 # - and remote is publishing
668 668 # We may be in issue 3871 case!
669 669 # We drop the possible phase synchronisation done by
670 670 # courtesy to publish changesets possibly locally draft
671 671 # on the remote.
672 672 remotephases = {'publishing': 'True'}
673 673 if not remotephases: # old server or public only reply from non-publishing
674 674 _localphasemove(pushop, cheads)
675 675 # don't push any phase data as there is nothing to push
676 676 else:
677 677 ana = phases.analyzeremotephases(pushop.repo, cheads,
678 678 remotephases)
679 679 pheads, droots = ana
680 680 ### Apply remote phase on local
681 681 if remotephases.get('publishing', False):
682 682 _localphasemove(pushop, cheads)
683 683 else: # publish = False
684 684 _localphasemove(pushop, pheads)
685 685 _localphasemove(pushop, cheads, phases.draft)
686 686 ### Apply local phase on remote
687 687
688 688 if pushop.cgresult:
689 689 if 'phases' in pushop.stepsdone:
690 690 # phases already pushed though bundle2
691 691 return
692 692 outdated = pushop.outdatedphases
693 693 else:
694 694 outdated = pushop.fallbackoutdatedphases
695 695
696 696 pushop.stepsdone.add('phases')
697 697
698 698 # filter heads already turned public by the push
699 699 outdated = [c for c in outdated if c.node() not in pheads]
700 700 # fallback to independent pushkey command
701 701 for newremotehead in outdated:
702 702 r = pushop.remote.pushkey('phases',
703 703 newremotehead.hex(),
704 704 str(phases.draft),
705 705 str(phases.public))
706 706 if not r:
707 707 pushop.ui.warn(_('updating %s to public failed!\n')
708 708 % newremotehead)
709 709
710 710 def _localphasemove(pushop, nodes, phase=phases.public):
711 711 """move <nodes> to <phase> in the local source repo"""
712 712 if pushop.trmanager:
713 713 phases.advanceboundary(pushop.repo,
714 714 pushop.trmanager.transaction(),
715 715 phase,
716 716 nodes)
717 717 else:
718 718 # repo is not locked, do not change any phases!
719 719 # Informs the user that phases should have been moved when
720 720 # applicable.
721 721 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
722 722 phasestr = phases.phasenames[phase]
723 723 if actualmoves:
724 724 pushop.ui.status(_('cannot lock source repo, skipping '
725 725 'local %s phase update\n') % phasestr)
726 726
727 727 def _pushobsolete(pushop):
728 728 """utility function to push obsolete markers to a remote"""
729 729 if 'obsmarkers' in pushop.stepsdone:
730 730 return
731 731 pushop.ui.debug('try to push obsolete markers to remote\n')
732 732 repo = pushop.repo
733 733 remote = pushop.remote
734 734 pushop.stepsdone.add('obsmarkers')
735 735 if pushop.outobsmarkers:
736 736 rslts = []
737 737 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
738 738 for key in sorted(remotedata, reverse=True):
739 739 # reverse sort to ensure we end with dump0
740 740 data = remotedata[key]
741 741 rslts.append(remote.pushkey('obsolete', key, '', data))
742 742 if [r for r in rslts if not r]:
743 743 msg = _('failed to push some obsolete markers!\n')
744 744 repo.ui.warn(msg)
745 745
746 746 def _pushbookmark(pushop):
747 747 """Update bookmark position on remote"""
748 748 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
749 749 return
750 750 pushop.stepsdone.add('bookmarks')
751 751 ui = pushop.ui
752 752 remote = pushop.remote
753 753
754 754 for b, old, new in pushop.outbookmarks:
755 755 action = 'update'
756 756 if not old:
757 757 action = 'export'
758 758 elif not new:
759 759 action = 'delete'
760 760 if remote.pushkey('bookmarks', b, old, new):
761 761 ui.status(bookmsgmap[action][0] % b)
762 762 else:
763 763 ui.warn(bookmsgmap[action][1] % b)
764 764 # discovery can have set the value form invalid entry
765 765 if pushop.bkresult is not None:
766 766 pushop.bkresult = 1
767 767
768 768 class pulloperation(object):
769 769 """A object that represent a single pull operation
770 770
771 771 It purpose is to carry pull related state and very common operation.
772 772
773 773 A new should be created at the beginning of each pull and discarded
774 774 afterward.
775 775 """
776 776
777 777 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
778 778 # repo we pull into
779 779 self.repo = repo
780 780 # repo we pull from
781 781 self.remote = remote
782 782 # revision we try to pull (None is "all")
783 783 self.heads = heads
784 784 # bookmark pulled explicitly
785 785 self.explicitbookmarks = bookmarks
786 786 # do we force pull?
787 787 self.force = force
788 788 # transaction manager
789 789 self.trmanager = None
790 790 # set of common changeset between local and remote before pull
791 791 self.common = None
792 792 # set of pulled head
793 793 self.rheads = None
794 794 # list of missing changeset to fetch remotely
795 795 self.fetch = None
796 796 # remote bookmarks data
797 797 self.remotebookmarks = None
798 798 # result of changegroup pulling (used as return code by pull)
799 799 self.cgresult = None
800 800 # list of step already done
801 801 self.stepsdone = set()
802 802
803 803 @util.propertycache
804 804 def pulledsubset(self):
805 805 """heads of the set of changeset target by the pull"""
806 806 # compute target subset
807 807 if self.heads is None:
808 808 # We pulled every thing possible
809 809 # sync on everything common
810 810 c = set(self.common)
811 811 ret = list(self.common)
812 812 for n in self.rheads:
813 813 if n not in c:
814 814 ret.append(n)
815 815 return ret
816 816 else:
817 817 # We pulled a specific subset
818 818 # sync on this subset
819 819 return self.heads
820 820
821 821 def gettransaction(self):
822 822 # deprecated; talk to trmanager directly
823 823 return self.trmanager.transaction()
824 824
825 825 class transactionmanager(object):
826 826 """An object to manage the life cycle of a transaction
827 827
828 828 It creates the transaction on demand and calls the appropriate hooks when
829 829 closing the transaction."""
830 830 def __init__(self, repo, source, url):
831 831 self.repo = repo
832 832 self.source = source
833 833 self.url = url
834 834 self._tr = None
835 835
836 836 def transaction(self):
837 837 """Return an open transaction object, constructing if necessary"""
838 838 if not self._tr:
839 839 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
840 840 self._tr = self.repo.transaction(trname)
841 841 self._tr.hookargs['source'] = self.source
842 842 self._tr.hookargs['url'] = self.url
843 843 return self._tr
844 844
845 845 def close(self):
846 846 """close transaction if created"""
847 847 if self._tr is not None:
848 848 repo = self.repo
849 849 p = lambda: self._tr.writepending() and repo.root or ""
850 850 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
851 851 **self._tr.hookargs)
852 852 hookargs = dict(self._tr.hookargs)
853 853 def runhooks():
854 854 repo.hook('b2x-transactionclose', **hookargs)
855 855 self._tr.addpostclose('b2x-hook-transactionclose',
856 856 lambda tr: repo._afterlock(runhooks))
857 857 self._tr.close()
858 858
859 859 def release(self):
860 860 """release transaction if created"""
861 861 if self._tr is not None:
862 862 self._tr.release()
863 863
864 864 def pull(repo, remote, heads=None, force=False, bookmarks=()):
865 865 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
866 866 if pullop.remote.local():
867 867 missing = set(pullop.remote.requirements) - pullop.repo.supported
868 868 if missing:
869 869 msg = _("required features are not"
870 870 " supported in the destination:"
871 871 " %s") % (', '.join(sorted(missing)))
872 872 raise util.Abort(msg)
873 873
874 874 pullop.remotebookmarks = remote.listkeys('bookmarks')
875 875 lock = pullop.repo.lock()
876 876 try:
877 877 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
878 878 _pulldiscovery(pullop)
879 879 if (pullop.repo.ui.configbool('experimental', 'bundle2-exp', False)
880 880 and pullop.remote.capable('bundle2-exp')):
881 881 _pullbundle2(pullop)
882 882 _pullchangeset(pullop)
883 883 _pullphase(pullop)
884 884 _pullbookmarks(pullop)
885 885 _pullobsolete(pullop)
886 886 pullop.trmanager.close()
887 887 finally:
888 888 pullop.trmanager.release()
889 889 lock.release()
890 890
891 891 return pullop
892 892
893 893 # list of steps to perform discovery before pull
894 894 pulldiscoveryorder = []
895 895
896 896 # Mapping between step name and function
897 897 #
898 898 # This exists to help extensions wrap steps if necessary
899 899 pulldiscoverymapping = {}
900 900
901 901 def pulldiscovery(stepname):
902 902 """decorator for function performing discovery before pull
903 903
904 904 The function is added to the step -> function mapping and appended to the
905 905 list of steps. Beware that decorated function will be added in order (this
906 906 may matter).
907 907
908 908 You can only use this decorator for a new step, if you want to wrap a step
909 909 from an extension, change the pulldiscovery dictionary directly."""
910 910 def dec(func):
911 911 assert stepname not in pulldiscoverymapping
912 912 pulldiscoverymapping[stepname] = func
913 913 pulldiscoveryorder.append(stepname)
914 914 return func
915 915 return dec
916 916
917 917 def _pulldiscovery(pullop):
918 918 """Run all discovery steps"""
919 919 for stepname in pulldiscoveryorder:
920 920 step = pulldiscoverymapping[stepname]
921 921 step(pullop)
922 922
923 923 @pulldiscovery('changegroup')
924 924 def _pulldiscoverychangegroup(pullop):
925 925 """discovery phase for the pull
926 926
927 927 Current handle changeset discovery only, will change handle all discovery
928 928 at some point."""
929 929 tmp = discovery.findcommonincoming(pullop.repo,
930 930 pullop.remote,
931 931 heads=pullop.heads,
932 932 force=pullop.force)
933 933 common, fetch, rheads = tmp
934 934 nm = pullop.repo.unfiltered().changelog.nodemap
935 935 if fetch and rheads:
936 936 # If a remote heads in filtered locally, lets drop it from the unknown
937 937 # remote heads and put in back in common.
938 938 #
939 939 # This is a hackish solution to catch most of "common but locally
940 940 # hidden situation". We do not performs discovery on unfiltered
941 941 # repository because it end up doing a pathological amount of round
942 942 # trip for w huge amount of changeset we do not care about.
943 943 #
944 944 # If a set of such "common but filtered" changeset exist on the server
945 945 # but are not including a remote heads, we'll not be able to detect it,
946 946 scommon = set(common)
947 947 filteredrheads = []
948 948 for n in rheads:
949 if n in nm and n not in scommon:
950 common.append(n)
949 if n in nm:
950 if n not in scommon:
951 common.append(n)
951 952 else:
952 953 filteredrheads.append(n)
953 954 if not filteredrheads:
954 955 fetch = []
955 956 rheads = filteredrheads
956 957 pullop.common = common
957 958 pullop.fetch = fetch
958 959 pullop.rheads = rheads
959 960
960 961 def _pullbundle2(pullop):
961 962 """pull data using bundle2
962 963
963 964 For now, the only supported data are changegroup."""
964 965 remotecaps = bundle2.bundle2caps(pullop.remote)
965 966 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
966 967 # pulling changegroup
967 968 pullop.stepsdone.add('changegroup')
968 969
969 970 kwargs['common'] = pullop.common
970 971 kwargs['heads'] = pullop.heads or pullop.rheads
971 972 kwargs['cg'] = pullop.fetch
972 973 if 'b2x:listkeys' in remotecaps:
973 974 kwargs['listkeys'] = ['phase', 'bookmarks']
974 975 if not pullop.fetch:
975 976 pullop.repo.ui.status(_("no changes found\n"))
976 977 pullop.cgresult = 0
977 978 else:
978 979 if pullop.heads is None and list(pullop.common) == [nullid]:
979 980 pullop.repo.ui.status(_("requesting all changes\n"))
980 981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
981 982 remoteversions = bundle2.obsmarkersversion(remotecaps)
982 983 if obsolete.commonversion(remoteversions) is not None:
983 984 kwargs['obsmarkers'] = True
984 985 pullop.stepsdone.add('obsmarkers')
985 986 _pullbundle2extraprepare(pullop, kwargs)
986 987 if kwargs.keys() == ['format']:
987 988 return # nothing to pull
988 989 bundle = pullop.remote.getbundle('pull', **kwargs)
989 990 try:
990 991 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
991 992 except error.BundleValueError, exc:
992 993 raise util.Abort('missing support for %s' % exc)
993 994
994 995 if pullop.fetch:
995 996 results = [cg['return'] for cg in op.records['changegroup']]
996 997 pullop.cgresult = changegroup.combineresults(results)
997 998
998 999 # processing phases change
999 1000 for namespace, value in op.records['listkeys']:
1000 1001 if namespace == 'phases':
1001 1002 _pullapplyphases(pullop, value)
1002 1003
1003 1004 # processing bookmark update
1004 1005 for namespace, value in op.records['listkeys']:
1005 1006 if namespace == 'bookmarks':
1006 1007 pullop.remotebookmarks = value
1007 1008 _pullbookmarks(pullop)
1008 1009
1009 1010 def _pullbundle2extraprepare(pullop, kwargs):
1010 1011 """hook function so that extensions can extend the getbundle call"""
1011 1012 pass
1012 1013
1013 1014 def _pullchangeset(pullop):
1014 1015 """pull changeset from unbundle into the local repo"""
1015 1016 # We delay the open of the transaction as late as possible so we
1016 1017 # don't open transaction for nothing or you break future useful
1017 1018 # rollback call
1018 1019 if 'changegroup' in pullop.stepsdone:
1019 1020 return
1020 1021 pullop.stepsdone.add('changegroup')
1021 1022 if not pullop.fetch:
1022 1023 pullop.repo.ui.status(_("no changes found\n"))
1023 1024 pullop.cgresult = 0
1024 1025 return
1025 1026 pullop.gettransaction()
1026 1027 if pullop.heads is None and list(pullop.common) == [nullid]:
1027 1028 pullop.repo.ui.status(_("requesting all changes\n"))
1028 1029 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1029 1030 # issue1320, avoid a race if remote changed after discovery
1030 1031 pullop.heads = pullop.rheads
1031 1032
1032 1033 if pullop.remote.capable('getbundle'):
1033 1034 # TODO: get bundlecaps from remote
1034 1035 cg = pullop.remote.getbundle('pull', common=pullop.common,
1035 1036 heads=pullop.heads or pullop.rheads)
1036 1037 elif pullop.heads is None:
1037 1038 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1038 1039 elif not pullop.remote.capable('changegroupsubset'):
1039 1040 raise util.Abort(_("partial pull cannot be done because "
1040 1041 "other repository doesn't support "
1041 1042 "changegroupsubset."))
1042 1043 else:
1043 1044 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1044 1045 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1045 1046 pullop.remote.url())
1046 1047
1047 1048 def _pullphase(pullop):
1048 1049 # Get remote phases data from remote
1049 1050 if 'phases' in pullop.stepsdone:
1050 1051 return
1051 1052 remotephases = pullop.remote.listkeys('phases')
1052 1053 _pullapplyphases(pullop, remotephases)
1053 1054
1054 1055 def _pullapplyphases(pullop, remotephases):
1055 1056 """apply phase movement from observed remote state"""
1056 1057 if 'phases' in pullop.stepsdone:
1057 1058 return
1058 1059 pullop.stepsdone.add('phases')
1059 1060 publishing = bool(remotephases.get('publishing', False))
1060 1061 if remotephases and not publishing:
1061 1062 # remote is new and unpublishing
1062 1063 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1063 1064 pullop.pulledsubset,
1064 1065 remotephases)
1065 1066 dheads = pullop.pulledsubset
1066 1067 else:
1067 1068 # Remote is old or publishing all common changesets
1068 1069 # should be seen as public
1069 1070 pheads = pullop.pulledsubset
1070 1071 dheads = []
1071 1072 unfi = pullop.repo.unfiltered()
1072 1073 phase = unfi._phasecache.phase
1073 1074 rev = unfi.changelog.nodemap.get
1074 1075 public = phases.public
1075 1076 draft = phases.draft
1076 1077
1077 1078 # exclude changesets already public locally and update the others
1078 1079 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1079 1080 if pheads:
1080 1081 tr = pullop.gettransaction()
1081 1082 phases.advanceboundary(pullop.repo, tr, public, pheads)
1082 1083
1083 1084 # exclude changesets already draft locally and update the others
1084 1085 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1085 1086 if dheads:
1086 1087 tr = pullop.gettransaction()
1087 1088 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1088 1089
1089 1090 def _pullbookmarks(pullop):
1090 1091 """process the remote bookmark information to update the local one"""
1091 1092 if 'bookmarks' in pullop.stepsdone:
1092 1093 return
1093 1094 pullop.stepsdone.add('bookmarks')
1094 1095 repo = pullop.repo
1095 1096 remotebookmarks = pullop.remotebookmarks
1096 1097 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1097 1098 pullop.remote.url(),
1098 1099 pullop.gettransaction,
1099 1100 explicit=pullop.explicitbookmarks)
1100 1101
1101 1102 def _pullobsolete(pullop):
1102 1103 """utility function to pull obsolete markers from a remote
1103 1104
1104 1105 The `gettransaction` is function that return the pull transaction, creating
1105 1106 one if necessary. We return the transaction to inform the calling code that
1106 1107 a new transaction have been created (when applicable).
1107 1108
1108 1109 Exists mostly to allow overriding for experimentation purpose"""
1109 1110 if 'obsmarkers' in pullop.stepsdone:
1110 1111 return
1111 1112 pullop.stepsdone.add('obsmarkers')
1112 1113 tr = None
1113 1114 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1114 1115 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1115 1116 remoteobs = pullop.remote.listkeys('obsolete')
1116 1117 if 'dump0' in remoteobs:
1117 1118 tr = pullop.gettransaction()
1118 1119 for key in sorted(remoteobs, reverse=True):
1119 1120 if key.startswith('dump'):
1120 1121 data = base85.b85decode(remoteobs[key])
1121 1122 pullop.repo.obsstore.mergemarkers(tr, data)
1122 1123 pullop.repo.invalidatevolatilesets()
1123 1124 return tr
1124 1125
1125 1126 def caps20to10(repo):
1126 1127 """return a set with appropriate options to use bundle20 during getbundle"""
1127 1128 caps = set(['HG2Y'])
1128 1129 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1129 1130 caps.add('bundle2=' + urllib.quote(capsblob))
1130 1131 return caps
1131 1132
1132 1133 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1133 1134 getbundle2partsorder = []
1134 1135
1135 1136 # Mapping between step name and function
1136 1137 #
1137 1138 # This exists to help extensions wrap steps if necessary
1138 1139 getbundle2partsmapping = {}
1139 1140
1140 1141 def getbundle2partsgenerator(stepname):
1141 1142 """decorator for function generating bundle2 part for getbundle
1142 1143
1143 1144 The function is added to the step -> function mapping and appended to the
1144 1145 list of steps. Beware that decorated functions will be added in order
1145 1146 (this may matter).
1146 1147
1147 1148 You can only use this decorator for new steps, if you want to wrap a step
1148 1149 from an extension, attack the getbundle2partsmapping dictionary directly."""
1149 1150 def dec(func):
1150 1151 assert stepname not in getbundle2partsmapping
1151 1152 getbundle2partsmapping[stepname] = func
1152 1153 getbundle2partsorder.append(stepname)
1153 1154 return func
1154 1155 return dec
1155 1156
1156 1157 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1157 1158 **kwargs):
1158 1159 """return a full bundle (with potentially multiple kind of parts)
1159 1160
1160 1161 Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
1161 1162 passed. For now, the bundle can contain only changegroup, but this will
1162 1163 changes when more part type will be available for bundle2.
1163 1164
1164 1165 This is different from changegroup.getchangegroup that only returns an HG10
1165 1166 changegroup bundle. They may eventually get reunited in the future when we
1166 1167 have a clearer idea of the API we what to query different data.
1167 1168
1168 1169 The implementation is at a very early stage and will get massive rework
1169 1170 when the API of bundle is refined.
1170 1171 """
1171 1172 # bundle10 case
1172 1173 if bundlecaps is None or 'HG2Y' not in bundlecaps:
1173 1174 if bundlecaps and not kwargs.get('cg', True):
1174 1175 raise ValueError(_('request for bundle10 must include changegroup'))
1175 1176
1176 1177 if kwargs:
1177 1178 raise ValueError(_('unsupported getbundle arguments: %s')
1178 1179 % ', '.join(sorted(kwargs.keys())))
1179 1180 return changegroup.getchangegroup(repo, source, heads=heads,
1180 1181 common=common, bundlecaps=bundlecaps)
1181 1182
1182 1183 # bundle20 case
1183 1184 b2caps = {}
1184 1185 for bcaps in bundlecaps:
1185 1186 if bcaps.startswith('bundle2='):
1186 1187 blob = urllib.unquote(bcaps[len('bundle2='):])
1187 1188 b2caps.update(bundle2.decodecaps(blob))
1188 1189 bundler = bundle2.bundle20(repo.ui, b2caps)
1189 1190
1190 1191 kwargs['heads'] = heads
1191 1192 kwargs['common'] = common
1192 1193
1193 1194 for name in getbundle2partsorder:
1194 1195 func = getbundle2partsmapping[name]
1195 1196 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1196 1197 **kwargs)
1197 1198
1198 1199 return util.chunkbuffer(bundler.getchunks())
1199 1200
1200 1201 @getbundle2partsgenerator('changegroup')
1201 1202 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1202 1203 b2caps=None, heads=None, common=None, **kwargs):
1203 1204 """add a changegroup part to the requested bundle"""
1204 1205 cg = None
1205 1206 if kwargs.get('cg', True):
1206 1207 # build changegroup bundle here.
1207 1208 version = None
1208 1209 cgversions = b2caps.get('b2x:changegroup')
1209 1210 if not cgversions: # 3.1 and 3.2 ship with an empty value
1210 1211 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1211 1212 common=common,
1212 1213 bundlecaps=bundlecaps)
1213 1214 else:
1214 1215 cgversions = [v for v in cgversions if v in changegroup.packermap]
1215 1216 if not cgversions:
1216 1217 raise ValueError(_('no common changegroup version'))
1217 1218 version = max(cgversions)
1218 1219 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1219 1220 common=common,
1220 1221 bundlecaps=bundlecaps,
1221 1222 version=version)
1222 1223
1223 1224 if cg:
1224 1225 part = bundler.newpart('b2x:changegroup', data=cg)
1225 1226 if version is not None:
1226 1227 part.addparam('version', version)
1227 1228
1228 1229 @getbundle2partsgenerator('listkeys')
1229 1230 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1230 1231 b2caps=None, **kwargs):
1231 1232 """add parts containing listkeys namespaces to the requested bundle"""
1232 1233 listkeys = kwargs.get('listkeys', ())
1233 1234 for namespace in listkeys:
1234 1235 part = bundler.newpart('b2x:listkeys')
1235 1236 part.addparam('namespace', namespace)
1236 1237 keys = repo.listkeys(namespace).items()
1237 1238 part.data = pushkey.encodekeys(keys)
1238 1239
1239 1240 @getbundle2partsgenerator('obsmarkers')
1240 1241 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1241 1242 b2caps=None, heads=None, **kwargs):
1242 1243 """add an obsolescence markers part to the requested bundle"""
1243 1244 if kwargs.get('obsmarkers', False):
1244 1245 if heads is None:
1245 1246 heads = repo.heads()
1246 1247 subset = [c.node() for c in repo.set('::%ln', heads)]
1247 1248 markers = repo.obsstore.relevantmarkers(subset)
1248 1249 buildobsmarkerspart(bundler, markers)
1249 1250
1250 1251 def check_heads(repo, their_heads, context):
1251 1252 """check if the heads of a repo have been modified
1252 1253
1253 1254 Used by peer for unbundling.
1254 1255 """
1255 1256 heads = repo.heads()
1256 1257 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1257 1258 if not (their_heads == ['force'] or their_heads == heads or
1258 1259 their_heads == ['hashed', heads_hash]):
1259 1260 # someone else committed/pushed/unbundled while we
1260 1261 # were transferring data
1261 1262 raise error.PushRaced('repository changed while %s - '
1262 1263 'please try again' % context)
1263 1264
1264 1265 def unbundle(repo, cg, heads, source, url):
1265 1266 """Apply a bundle to a repo.
1266 1267
1267 1268 this function makes sure the repo is locked during the application and have
1268 1269 mechanism to check that no push race occurred between the creation of the
1269 1270 bundle and its application.
1270 1271
1271 1272 If the push was raced as PushRaced exception is raised."""
1272 1273 r = 0
1273 1274 # need a transaction when processing a bundle2 stream
1274 1275 tr = None
1275 1276 lock = repo.lock()
1276 1277 try:
1277 1278 check_heads(repo, heads, 'uploading changes')
1278 1279 # push can proceed
1279 1280 if util.safehasattr(cg, 'params'):
1280 1281 try:
1281 1282 tr = repo.transaction('unbundle')
1282 1283 tr.hookargs['source'] = source
1283 1284 tr.hookargs['url'] = url
1284 1285 tr.hookargs['bundle2-exp'] = '1'
1285 1286 r = bundle2.processbundle(repo, cg, lambda: tr).reply
1286 1287 p = lambda: tr.writepending() and repo.root or ""
1287 1288 repo.hook('b2x-pretransactionclose', throw=True, pending=p,
1288 1289 **tr.hookargs)
1289 1290 hookargs = dict(tr.hookargs)
1290 1291 def runhooks():
1291 1292 repo.hook('b2x-transactionclose', **hookargs)
1292 1293 tr.addpostclose('b2x-hook-transactionclose',
1293 1294 lambda tr: repo._afterlock(runhooks))
1294 1295 tr.close()
1295 1296 except Exception, exc:
1296 1297 exc.duringunbundle2 = True
1297 1298 raise
1298 1299 else:
1299 1300 r = changegroup.addchangegroup(repo, cg, source, url)
1300 1301 finally:
1301 1302 if tr is not None:
1302 1303 tr.release()
1303 1304 lock.release()
1304 1305 return r
@@ -1,537 +1,537 b''
1 1 #require killdaemons
2 2
3 3 Tests discovery against servers without getbundle support:
4 4
5 5 $ CAP=getbundle
6 6 $ . "$TESTDIR/notcapable"
7 7 $ cat >> $HGRCPATH <<EOF
8 8 > [ui]
9 9 > logtemplate="{rev} {node|short}: {desc} {branches}\n"
10 10 > EOF
11 11
12 12 Setup HTTP server control:
13 13
14 14 $ remote=http://localhost:$HGPORT/
15 15 $ export remote
16 16 $ tstart() {
17 17 > echo '[web]' > $1/.hg/hgrc
18 18 > echo 'push_ssl = false' >> $1/.hg/hgrc
19 19 > echo 'allow_push = *' >> $1/.hg/hgrc
20 20 > hg serve -R $1 -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
21 21 > cat hg.pid >> $DAEMON_PIDS
22 22 > }
23 23 $ tstop() {
24 24 > "$TESTDIR/killdaemons.py" $DAEMON_PIDS
25 25 > [ "$1" ] && cut -d' ' -f6- access.log && cat errors.log
26 26 > rm access.log errors.log
27 27 > }
28 28
29 29 Both are empty:
30 30
31 31 $ hg init empty1
32 32 $ hg init empty2
33 33 $ tstart empty2
34 34 $ hg incoming -R empty1 $remote
35 35 comparing with http://localhost:$HGPORT/
36 36 no changes found
37 37 [1]
38 38 $ hg outgoing -R empty1 $remote
39 39 comparing with http://localhost:$HGPORT/
40 40 no changes found
41 41 [1]
42 42 $ hg pull -R empty1 $remote
43 43 pulling from http://localhost:$HGPORT/
44 44 no changes found
45 45 $ hg push -R empty1 $remote
46 46 pushing to http://localhost:$HGPORT/
47 47 no changes found
48 48 [1]
49 49 $ tstop
50 50
51 51 Base repo:
52 52
53 53 $ hg init main
54 54 $ cd main
55 55 $ hg debugbuilddag -mo '+2:tbase @name1 +3:thead1 <tbase @name2 +4:thead2 @both /thead1 +2:tmaintip'
56 56 $ hg log -G
57 57 o 11 a19bfa7e7328: r11 both
58 58 |
59 59 o 10 8b6bad1512e1: r10 both
60 60 |
61 61 o 9 025829e08038: r9 both
62 62 |\
63 63 | o 8 d8f638ac69e9: r8 name2
64 64 | |
65 65 | o 7 b6b4d315a2ac: r7 name2
66 66 | |
67 67 | o 6 6c6f5d5f3c11: r6 name2
68 68 | |
69 69 | o 5 70314b29987d: r5 name2
70 70 | |
71 71 o | 4 e71dbbc70e03: r4 name1
72 72 | |
73 73 o | 3 2c8d5d5ec612: r3 name1
74 74 | |
75 75 o | 2 a7892891da29: r2 name1
76 76 |/
77 77 o 1 0019a3b924fd: r1
78 78 |
79 79 o 0 d57206cc072a: r0
80 80
81 81 $ cd ..
82 82 $ tstart main
83 83
84 84 Full clone:
85 85
86 86 $ hg clone main full
87 87 updating to branch default
88 88 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 89 $ cd full
90 90 $ hg incoming $remote
91 91 comparing with http://localhost:$HGPORT/
92 92 searching for changes
93 93 no changes found
94 94 [1]
95 95 $ hg outgoing $remote
96 96 comparing with http://localhost:$HGPORT/
97 97 searching for changes
98 98 no changes found
99 99 [1]
100 100 $ hg pull $remote
101 101 pulling from http://localhost:$HGPORT/
102 102 searching for changes
103 103 no changes found
104 104 $ hg push $remote
105 105 pushing to http://localhost:$HGPORT/
106 106 searching for changes
107 107 no changes found
108 108 [1]
109 109 $ cd ..
110 110
111 111 Local is empty:
112 112
113 113 $ cd empty1
114 114 $ hg incoming $remote
115 115 comparing with http://localhost:$HGPORT/
116 116 0 d57206cc072a: r0
117 117 1 0019a3b924fd: r1
118 118 2 a7892891da29: r2 name1
119 119 3 2c8d5d5ec612: r3 name1
120 120 4 e71dbbc70e03: r4 name1
121 121 5 70314b29987d: r5 name2
122 122 6 6c6f5d5f3c11: r6 name2
123 123 7 b6b4d315a2ac: r7 name2
124 124 8 d8f638ac69e9: r8 name2
125 125 9 025829e08038: r9 both
126 126 10 8b6bad1512e1: r10 both
127 127 11 a19bfa7e7328: r11 both
128 128 $ hg outgoing $remote
129 129 comparing with http://localhost:$HGPORT/
130 130 no changes found
131 131 [1]
132 132 $ hg push $remote
133 133 pushing to http://localhost:$HGPORT/
134 134 no changes found
135 135 [1]
136 136 $ hg pull $remote
137 137 pulling from http://localhost:$HGPORT/
138 138 requesting all changes
139 139 adding changesets
140 140 adding manifests
141 141 adding file changes
142 142 added 12 changesets with 24 changes to 2 files
143 143 (run 'hg update' to get a working copy)
144 144 $ hg incoming $remote
145 145 comparing with http://localhost:$HGPORT/
146 146 searching for changes
147 147 no changes found
148 148 [1]
149 149 $ cd ..
150 150
151 151 Local is subset:
152 152
153 153 $ hg clone main subset --rev name2 ; cd subset
154 154 adding changesets
155 155 adding manifests
156 156 adding file changes
157 157 added 6 changesets with 12 changes to 2 files
158 158 updating to branch name2
159 159 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
160 160 $ hg incoming $remote
161 161 comparing with http://localhost:$HGPORT/
162 162 searching for changes
163 163 6 a7892891da29: r2 name1
164 164 7 2c8d5d5ec612: r3 name1
165 165 8 e71dbbc70e03: r4 name1
166 166 9 025829e08038: r9 both
167 167 10 8b6bad1512e1: r10 both
168 168 11 a19bfa7e7328: r11 both
169 169 $ hg outgoing $remote
170 170 comparing with http://localhost:$HGPORT/
171 171 searching for changes
172 172 no changes found
173 173 [1]
174 174 $ hg push $remote
175 175 pushing to http://localhost:$HGPORT/
176 176 searching for changes
177 177 no changes found
178 178 [1]
179 179 $ hg pull $remote
180 180 pulling from http://localhost:$HGPORT/
181 181 searching for changes
182 182 adding changesets
183 183 adding manifests
184 184 adding file changes
185 185 added 6 changesets with 12 changes to 2 files
186 186 (run 'hg update' to get a working copy)
187 187 $ hg incoming $remote
188 188 comparing with http://localhost:$HGPORT/
189 189 searching for changes
190 190 no changes found
191 191 [1]
192 192 $ cd ..
193 193 $ tstop
194 194
195 195 Remote is empty:
196 196
197 197 $ tstart empty2
198 198 $ cd main
199 199 $ hg incoming $remote
200 200 comparing with http://localhost:$HGPORT/
201 201 searching for changes
202 202 no changes found
203 203 [1]
204 204 $ hg outgoing $remote
205 205 comparing with http://localhost:$HGPORT/
206 206 searching for changes
207 207 0 d57206cc072a: r0
208 208 1 0019a3b924fd: r1
209 209 2 a7892891da29: r2 name1
210 210 3 2c8d5d5ec612: r3 name1
211 211 4 e71dbbc70e03: r4 name1
212 212 5 70314b29987d: r5 name2
213 213 6 6c6f5d5f3c11: r6 name2
214 214 7 b6b4d315a2ac: r7 name2
215 215 8 d8f638ac69e9: r8 name2
216 216 9 025829e08038: r9 both
217 217 10 8b6bad1512e1: r10 both
218 218 11 a19bfa7e7328: r11 both
219 219 $ hg pull $remote
220 220 pulling from http://localhost:$HGPORT/
221 221 searching for changes
222 222 no changes found
223 223 $ hg push $remote
224 224 pushing to http://localhost:$HGPORT/
225 225 searching for changes
226 226 remote: adding changesets
227 227 remote: adding manifests
228 228 remote: adding file changes
229 229 remote: added 12 changesets with 24 changes to 2 files
230 230 $ hg outgoing $remote
231 231 comparing with http://localhost:$HGPORT/
232 232 searching for changes
233 233 no changes found
234 234 [1]
235 235 $ cd ..
236 236 $ tstop
237 237
238 238 Local is superset:
239 239
240 240 $ hg clone main subset2 --rev name2
241 241 adding changesets
242 242 adding manifests
243 243 adding file changes
244 244 added 6 changesets with 12 changes to 2 files
245 245 updating to branch name2
246 246 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 247 $ tstart subset2
248 248 $ cd main
249 249 $ hg incoming $remote
250 250 comparing with http://localhost:$HGPORT/
251 251 searching for changes
252 252 no changes found
253 253 [1]
254 254 $ hg outgoing $remote
255 255 comparing with http://localhost:$HGPORT/
256 256 searching for changes
257 257 2 a7892891da29: r2 name1
258 258 3 2c8d5d5ec612: r3 name1
259 259 4 e71dbbc70e03: r4 name1
260 260 9 025829e08038: r9 both
261 261 10 8b6bad1512e1: r10 both
262 262 11 a19bfa7e7328: r11 both
263 263 $ hg pull $remote
264 264 pulling from http://localhost:$HGPORT/
265 265 searching for changes
266 266 no changes found
267 267 $ hg push $remote
268 268 pushing to http://localhost:$HGPORT/
269 269 searching for changes
270 270 abort: push creates new remote branches: both, name1!
271 271 (use 'hg push --new-branch' to create new remote branches)
272 272 [255]
273 273 $ hg push $remote --new-branch
274 274 pushing to http://localhost:$HGPORT/
275 275 searching for changes
276 276 remote: adding changesets
277 277 remote: adding manifests
278 278 remote: adding file changes
279 279 remote: added 6 changesets with 12 changes to 2 files
280 280 $ hg outgoing $remote
281 281 comparing with http://localhost:$HGPORT/
282 282 searching for changes
283 283 no changes found
284 284 [1]
285 285 $ cd ..
286 286 $ tstop
287 287
288 288 Partial pull:
289 289
290 290 $ tstart main
291 291 $ hg clone $remote partial --rev name2
292 292 adding changesets
293 293 adding manifests
294 294 adding file changes
295 295 added 6 changesets with 12 changes to 2 files
296 296 updating to branch name2
297 297 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
298 298 $ cd partial
299 299 $ hg incoming $remote
300 300 comparing with http://localhost:$HGPORT/
301 301 searching for changes
302 302 6 a7892891da29: r2 name1
303 303 7 2c8d5d5ec612: r3 name1
304 304 8 e71dbbc70e03: r4 name1
305 305 9 025829e08038: r9 both
306 306 10 8b6bad1512e1: r10 both
307 307 11 a19bfa7e7328: r11 both
308 308 $ hg incoming $remote --rev name1
309 309 comparing with http://localhost:$HGPORT/
310 310 searching for changes
311 311 6 a7892891da29: r2 name1
312 312 7 2c8d5d5ec612: r3 name1
313 313 8 e71dbbc70e03: r4 name1
314 314 $ hg pull $remote --rev name1
315 315 pulling from http://localhost:$HGPORT/
316 316 searching for changes
317 317 adding changesets
318 318 adding manifests
319 319 adding file changes
320 320 added 3 changesets with 6 changes to 2 files (+1 heads)
321 321 (run 'hg heads' to see heads)
322 322 $ hg incoming $remote
323 323 comparing with http://localhost:$HGPORT/
324 324 searching for changes
325 325 9 025829e08038: r9 both
326 326 10 8b6bad1512e1: r10 both
327 327 11 a19bfa7e7328: r11 both
328 328 $ cd ..
329 329 $ tstop
330 330
331 331 Both have new stuff in new named branches:
332 332
333 333 $ hg clone main repo1a --rev name1 -q
334 334 $ hg clone repo1a repo1b -q
335 335 $ hg clone main repo2a --rev name2 -q
336 336 $ hg clone repo2a repo2b -q
337 337 $ tstart repo1a
338 338
339 339 $ cd repo2a
340 340 $ hg incoming $remote
341 341 comparing with http://localhost:$HGPORT/
342 342 searching for changes
343 343 6 a7892891da29: r2 name1
344 344 7 2c8d5d5ec612: r3 name1
345 345 8 e71dbbc70e03: r4 name1
346 346 $ hg outgoing $remote
347 347 comparing with http://localhost:$HGPORT/
348 348 searching for changes
349 349 2 70314b29987d: r5 name2
350 350 3 6c6f5d5f3c11: r6 name2
351 351 4 b6b4d315a2ac: r7 name2
352 352 5 d8f638ac69e9: r8 name2
353 353 $ hg push $remote --new-branch
354 354 pushing to http://localhost:$HGPORT/
355 355 searching for changes
356 356 remote: adding changesets
357 357 remote: adding manifests
358 358 remote: adding file changes
359 359 remote: added 4 changesets with 8 changes to 2 files (+1 heads)
360 360 $ hg pull $remote
361 361 pulling from http://localhost:$HGPORT/
362 362 searching for changes
363 363 adding changesets
364 364 adding manifests
365 365 adding file changes
366 366 added 3 changesets with 6 changes to 2 files (+1 heads)
367 367 (run 'hg heads' to see heads)
368 368 $ hg incoming $remote
369 369 comparing with http://localhost:$HGPORT/
370 370 searching for changes
371 371 no changes found
372 372 [1]
373 373 $ hg outgoing $remote
374 374 comparing with http://localhost:$HGPORT/
375 375 searching for changes
376 376 no changes found
377 377 [1]
378 378 $ cd ..
379 379 $ tstop
380 380
381 381 $ tstart repo1b
382 382 $ cd repo2b
383 383 $ hg incoming $remote
384 384 comparing with http://localhost:$HGPORT/
385 385 searching for changes
386 386 6 a7892891da29: r2 name1
387 387 7 2c8d5d5ec612: r3 name1
388 388 8 e71dbbc70e03: r4 name1
389 389 $ hg outgoing $remote
390 390 comparing with http://localhost:$HGPORT/
391 391 searching for changes
392 392 2 70314b29987d: r5 name2
393 393 3 6c6f5d5f3c11: r6 name2
394 394 4 b6b4d315a2ac: r7 name2
395 395 5 d8f638ac69e9: r8 name2
396 396 $ hg pull $remote
397 397 pulling from http://localhost:$HGPORT/
398 398 searching for changes
399 399 adding changesets
400 400 adding manifests
401 401 adding file changes
402 402 added 3 changesets with 6 changes to 2 files (+1 heads)
403 403 (run 'hg heads' to see heads)
404 404 $ hg push $remote --new-branch
405 405 pushing to http://localhost:$HGPORT/
406 406 searching for changes
407 407 remote: adding changesets
408 408 remote: adding manifests
409 409 remote: adding file changes
410 410 remote: added 4 changesets with 8 changes to 2 files (+1 heads)
411 411 $ hg incoming $remote
412 412 comparing with http://localhost:$HGPORT/
413 413 searching for changes
414 414 no changes found
415 415 [1]
416 416 $ hg outgoing $remote
417 417 comparing with http://localhost:$HGPORT/
418 418 searching for changes
419 419 no changes found
420 420 [1]
421 421 $ cd ..
422 422 $ tstop
423 423
424 424 Both have new stuff in existing named branches:
425 425
426 426 $ rm -r repo1a repo1b repo2a repo2b
427 427 $ hg clone main repo1a --rev 3 --rev 8 -q
428 428 $ hg clone repo1a repo1b -q
429 429 $ hg clone main repo2a --rev 4 --rev 7 -q
430 430 $ hg clone repo2a repo2b -q
431 431 $ tstart repo1a
432 432
433 433 $ cd repo2a
434 434 $ hg incoming $remote
435 435 comparing with http://localhost:$HGPORT/
436 436 searching for changes
437 437 8 d8f638ac69e9: r8 name2
438 438 $ hg outgoing $remote
439 439 comparing with http://localhost:$HGPORT/
440 440 searching for changes
441 441 4 e71dbbc70e03: r4 name1
442 442 $ hg push $remote --new-branch
443 443 pushing to http://localhost:$HGPORT/
444 444 searching for changes
445 445 remote: adding changesets
446 446 remote: adding manifests
447 447 remote: adding file changes
448 448 remote: added 1 changesets with 2 changes to 2 files
449 449 $ hg pull $remote
450 450 pulling from http://localhost:$HGPORT/
451 451 searching for changes
452 452 adding changesets
453 453 adding manifests
454 454 adding file changes
455 455 added 1 changesets with 2 changes to 2 files
456 456 (run 'hg update' to get a working copy)
457 457 $ hg incoming $remote
458 458 comparing with http://localhost:$HGPORT/
459 459 searching for changes
460 460 no changes found
461 461 [1]
462 462 $ hg outgoing $remote
463 463 comparing with http://localhost:$HGPORT/
464 464 searching for changes
465 465 no changes found
466 466 [1]
467 467 $ cd ..
468 468 $ tstop
469 469
470 470 $ tstart repo1b
471 471 $ cd repo2b
472 472 $ hg incoming $remote
473 473 comparing with http://localhost:$HGPORT/
474 474 searching for changes
475 475 8 d8f638ac69e9: r8 name2
476 476 $ hg outgoing $remote
477 477 comparing with http://localhost:$HGPORT/
478 478 searching for changes
479 479 4 e71dbbc70e03: r4 name1
480 480 $ hg pull $remote
481 481 pulling from http://localhost:$HGPORT/
482 482 searching for changes
483 483 adding changesets
484 484 adding manifests
485 485 adding file changes
486 486 added 1 changesets with 2 changes to 2 files
487 487 (run 'hg update' to get a working copy)
488 488 $ hg push $remote --new-branch
489 489 pushing to http://localhost:$HGPORT/
490 490 searching for changes
491 491 remote: adding changesets
492 492 remote: adding manifests
493 493 remote: adding file changes
494 494 remote: added 1 changesets with 2 changes to 2 files
495 495 $ hg incoming $remote
496 496 comparing with http://localhost:$HGPORT/
497 497 searching for changes
498 498 no changes found
499 499 [1]
500 500 $ hg outgoing $remote
501 501 comparing with http://localhost:$HGPORT/
502 502 searching for changes
503 503 no changes found
504 504 [1]
505 505 $ cd ..
506 506 $ tstop show
507 507 "GET /?cmd=capabilities HTTP/1.1" 200 -
508 508 "GET /?cmd=heads HTTP/1.1" 200 -
509 509 "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961
510 510 "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785
511 511 "GET /?cmd=changegroupsubset HTTP/1.1" 200 - x-hgarg-1:bases=d8f638ac69e9ae8dea4f09f11d696546a912d961&heads=d8f638ac69e9ae8dea4f09f11d696546a912d961
512 512 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
513 513 "GET /?cmd=capabilities HTTP/1.1" 200 -
514 514 "GET /?cmd=heads HTTP/1.1" 200 -
515 515 "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961
516 516 "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785
517 517 "GET /?cmd=capabilities HTTP/1.1" 200 -
518 518 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
519 519 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
520 520 "GET /?cmd=heads HTTP/1.1" 200 -
521 521 "GET /?cmd=branches HTTP/1.1" 200 - x-hgarg-1:nodes=d8f638ac69e9ae8dea4f09f11d696546a912d961
522 522 "GET /?cmd=between HTTP/1.1" 200 - x-hgarg-1:pairs=d8f638ac69e9ae8dea4f09f11d696546a912d961-d57206cc072a18317c1e381fb60aa31bd3401785
523 "GET /?cmd=changegroupsubset HTTP/1.1" 200 - x-hgarg-1:bases=d8f638ac69e9ae8dea4f09f11d696546a912d961&heads=d8f638ac69e9ae8dea4f09f11d696546a912d961+2c8d5d5ec612be65cdfdeac78b7662ab1696324a
523 "GET /?cmd=changegroupsubset HTTP/1.1" 200 - x-hgarg-1:bases=d8f638ac69e9ae8dea4f09f11d696546a912d961&heads=d8f638ac69e9ae8dea4f09f11d696546a912d961
524 524 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
525 525 "GET /?cmd=capabilities HTTP/1.1" 200 -
526 526 "GET /?cmd=heads HTTP/1.1" 200 -
527 527 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
528 528 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
529 529 "GET /?cmd=branchmap HTTP/1.1" 200 -
530 530 "GET /?cmd=branchmap HTTP/1.1" 200 -
531 531 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
532 532 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+1827a5bb63e602382eb89dd58f2ac9f3b007ad91
533 533 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
534 534 "GET /?cmd=capabilities HTTP/1.1" 200 -
535 535 "GET /?cmd=heads HTTP/1.1" 200 -
536 536 "GET /?cmd=capabilities HTTP/1.1" 200 -
537 537 "GET /?cmd=heads HTTP/1.1" 200 -
General Comments 0
You need to be logged in to leave comments. Login now