##// END OF EJS Templates
changegroup: replace getlocalchangegroupraw with makestream...
Durham Goode -
r34100:d8245139 default
parent child Browse files
Show More
@@ -1,1016 +1,1005 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullrev,
19 19 short,
20 20 )
21 21
22 22 from . import (
23 23 dagutil,
24 24 discovery,
25 25 error,
26 26 mdiff,
27 27 phases,
28 28 pycompat,
29 29 util,
30 30 )
31 31
32 32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 35
36 36 def readexactly(stream, n):
37 37 '''read n bytes from stream.read and abort if less was available'''
38 38 s = stream.read(n)
39 39 if len(s) < n:
40 40 raise error.Abort(_("stream ended unexpectedly"
41 41 " (got %d bytes, expected %d)")
42 42 % (len(s), n))
43 43 return s
44 44
45 45 def getchunk(stream):
46 46 """return the next chunk from stream as a string"""
47 47 d = readexactly(stream, 4)
48 48 l = struct.unpack(">l", d)[0]
49 49 if l <= 4:
50 50 if l:
51 51 raise error.Abort(_("invalid chunk length %d") % l)
52 52 return ""
53 53 return readexactly(stream, l - 4)
54 54
55 55 def chunkheader(length):
56 56 """return a changegroup chunk header (string)"""
57 57 return struct.pack(">l", length + 4)
58 58
59 59 def closechunk():
60 60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 61 return struct.pack(">l", 0)
62 62
63 63 def writechunks(ui, chunks, filename, vfs=None):
64 64 """Write chunks to a file and return its filename.
65 65
66 66 The stream is assumed to be a bundle file.
67 67 Existing files will not be overwritten.
68 68 If no filename is specified, a temporary file is created.
69 69 """
70 70 fh = None
71 71 cleanup = None
72 72 try:
73 73 if filename:
74 74 if vfs:
75 75 fh = vfs.open(filename, "wb")
76 76 else:
77 77 # Increase default buffer size because default is usually
78 78 # small (4k is common on Linux).
79 79 fh = open(filename, "wb", 131072)
80 80 else:
81 81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
83 83 cleanup = filename
84 84 for c in chunks:
85 85 fh.write(c)
86 86 cleanup = None
87 87 return filename
88 88 finally:
89 89 if fh is not None:
90 90 fh.close()
91 91 if cleanup is not None:
92 92 if filename and vfs:
93 93 vfs.unlink(cleanup)
94 94 else:
95 95 os.unlink(cleanup)
96 96
97 97 class cg1unpacker(object):
98 98 """Unpacker for cg1 changegroup streams.
99 99
100 100 A changegroup unpacker handles the framing of the revision data in
101 101 the wire format. Most consumers will want to use the apply()
102 102 method to add the changes from the changegroup to a repository.
103 103
104 104 If you're forwarding a changegroup unmodified to another consumer,
105 105 use getchunks(), which returns an iterator of changegroup
106 106 chunks. This is mostly useful for cases where you need to know the
107 107 data stream has ended by observing the end of the changegroup.
108 108
109 109 deltachunk() is useful only if you're applying delta data. Most
110 110 consumers should prefer apply() instead.
111 111
112 112 A few other public methods exist. Those are used only for
113 113 bundlerepo and some debug commands - their use is discouraged.
114 114 """
115 115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 116 deltaheadersize = struct.calcsize(deltaheader)
117 117 version = '01'
118 118 _grouplistcount = 1 # One list of files after the manifests
119 119
120 120 def __init__(self, fh, alg, extras=None):
121 121 if alg is None:
122 122 alg = 'UN'
123 123 if alg not in util.compengines.supportedbundletypes:
124 124 raise error.Abort(_('unknown stream compression type: %s')
125 125 % alg)
126 126 if alg == 'BZ':
127 127 alg = '_truncatedBZ'
128 128
129 129 compengine = util.compengines.forbundletype(alg)
130 130 self._stream = compengine.decompressorreader(fh)
131 131 self._type = alg
132 132 self.extras = extras or {}
133 133 self.callback = None
134 134
135 135 # These methods (compressed, read, seek, tell) all appear to only
136 136 # be used by bundlerepo, but it's a little hard to tell.
137 137 def compressed(self):
138 138 return self._type is not None and self._type != 'UN'
139 139 def read(self, l):
140 140 return self._stream.read(l)
141 141 def seek(self, pos):
142 142 return self._stream.seek(pos)
143 143 def tell(self):
144 144 return self._stream.tell()
145 145 def close(self):
146 146 return self._stream.close()
147 147
148 148 def _chunklength(self):
149 149 d = readexactly(self._stream, 4)
150 150 l = struct.unpack(">l", d)[0]
151 151 if l <= 4:
152 152 if l:
153 153 raise error.Abort(_("invalid chunk length %d") % l)
154 154 return 0
155 155 if self.callback:
156 156 self.callback()
157 157 return l - 4
158 158
159 159 def changelogheader(self):
160 160 """v10 does not have a changelog header chunk"""
161 161 return {}
162 162
163 163 def manifestheader(self):
164 164 """v10 does not have a manifest header chunk"""
165 165 return {}
166 166
167 167 def filelogheader(self):
168 168 """return the header of the filelogs chunk, v10 only has the filename"""
169 169 l = self._chunklength()
170 170 if not l:
171 171 return {}
172 172 fname = readexactly(self._stream, l)
173 173 return {'filename': fname}
174 174
175 175 def _deltaheader(self, headertuple, prevnode):
176 176 node, p1, p2, cs = headertuple
177 177 if prevnode is None:
178 178 deltabase = p1
179 179 else:
180 180 deltabase = prevnode
181 181 flags = 0
182 182 return node, p1, p2, deltabase, cs, flags
183 183
184 184 def deltachunk(self, prevnode):
185 185 l = self._chunklength()
186 186 if not l:
187 187 return {}
188 188 headerdata = readexactly(self._stream, self.deltaheadersize)
189 189 header = struct.unpack(self.deltaheader, headerdata)
190 190 delta = readexactly(self._stream, l - self.deltaheadersize)
191 191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
193 193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
194 194
195 195 def getchunks(self):
196 196 """returns all the chunks contains in the bundle
197 197
198 198 Used when you need to forward the binary stream to a file or another
199 199 network API. To do so, it parse the changegroup data, otherwise it will
200 200 block in case of sshrepo because it don't know the end of the stream.
201 201 """
202 202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 205 # filelogs.
206 206 #
207 207 # Changelog and manifestlog parts are terminated with empty chunks. The
208 208 # tree and file parts are a list of entry sections. Each entry section
209 209 # is a series of chunks terminating in an empty chunk. The list of these
210 210 # entry sections is terminated in yet another empty chunk, so we know
211 211 # we've reached the end of the tree/file list when we reach an empty
212 212 # chunk that was proceeded by no non-empty chunks.
213 213
214 214 parts = 0
215 215 while parts < 2 + self._grouplistcount:
216 216 noentries = True
217 217 while True:
218 218 chunk = getchunk(self)
219 219 if not chunk:
220 220 # The first two empty chunks represent the end of the
221 221 # changelog and the manifestlog portions. The remaining
222 222 # empty chunks represent either A) the end of individual
223 223 # tree or file entries in the file list, or B) the end of
224 224 # the entire list. It's the end of the entire list if there
225 225 # were no entries (i.e. noentries is True).
226 226 if parts < 2:
227 227 parts += 1
228 228 elif noentries:
229 229 parts += 1
230 230 break
231 231 noentries = False
232 232 yield chunkheader(len(chunk))
233 233 pos = 0
234 234 while pos < len(chunk):
235 235 next = pos + 2**20
236 236 yield chunk[pos:next]
237 237 pos = next
238 238 yield closechunk()
239 239
240 240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
241 241 # We know that we'll never have more manifests than we had
242 242 # changesets.
243 243 self.callback = prog(_('manifests'), numchanges)
244 244 # no need to check for empty manifest group here:
245 245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
246 246 # no new manifest will be created and the manifest group will
247 247 # be empty during the pull
248 248 self.manifestheader()
249 249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
250 250 repo.ui.progress(_('manifests'), None)
251 251 self.callback = None
252 252
253 253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
254 254 expectedtotal=None):
255 255 """Add the changegroup returned by source.read() to this repo.
256 256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
257 257 the URL of the repo where this changegroup is coming from.
258 258
259 259 Return an integer summarizing the change to this repo:
260 260 - nothing changed or no source: 0
261 261 - more heads than before: 1+added heads (2..n)
262 262 - fewer heads than before: -1-removed heads (-2..-n)
263 263 - number of heads stays the same: 1
264 264 """
265 265 repo = repo.unfiltered()
266 266 def csmap(x):
267 267 repo.ui.debug("add changeset %s\n" % short(x))
268 268 return len(cl)
269 269
270 270 def revmap(x):
271 271 return cl.rev(x)
272 272
273 273 changesets = files = revisions = 0
274 274
275 275 try:
276 276 # The transaction may already carry source information. In this
277 277 # case we use the top level data. We overwrite the argument
278 278 # because we need to use the top level value (if they exist)
279 279 # in this function.
280 280 srctype = tr.hookargs.setdefault('source', srctype)
281 281 url = tr.hookargs.setdefault('url', url)
282 282 repo.hook('prechangegroup',
283 283 throw=True, **pycompat.strkwargs(tr.hookargs))
284 284
285 285 # write changelog data to temp files so concurrent readers
286 286 # will not see an inconsistent view
287 287 cl = repo.changelog
288 288 cl.delayupdate(tr)
289 289 oldheads = set(cl.heads())
290 290
291 291 trp = weakref.proxy(tr)
292 292 # pull off the changeset group
293 293 repo.ui.status(_("adding changesets\n"))
294 294 clstart = len(cl)
295 295 class prog(object):
296 296 def __init__(self, step, total):
297 297 self._step = step
298 298 self._total = total
299 299 self._count = 1
300 300 def __call__(self):
301 301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
302 302 total=self._total)
303 303 self._count += 1
304 304 self.callback = prog(_('changesets'), expectedtotal)
305 305
306 306 efiles = set()
307 307 def onchangelog(cl, node):
308 308 efiles.update(cl.readfiles(node))
309 309
310 310 self.changelogheader()
311 311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
312 312 efiles = len(efiles)
313 313
314 314 if not cgnodes:
315 315 repo.ui.develwarn('applied empty changegroup',
316 316 config='empty-changegroup')
317 317 clend = len(cl)
318 318 changesets = clend - clstart
319 319 repo.ui.progress(_('changesets'), None)
320 320 self.callback = None
321 321
322 322 # pull off the manifest group
323 323 repo.ui.status(_("adding manifests\n"))
324 324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
325 325
326 326 needfiles = {}
327 327 if repo.ui.configbool('server', 'validate'):
328 328 cl = repo.changelog
329 329 ml = repo.manifestlog
330 330 # validate incoming csets have their manifests
331 331 for cset in xrange(clstart, clend):
332 332 mfnode = cl.changelogrevision(cset).manifest
333 333 mfest = ml[mfnode].readdelta()
334 334 # store file cgnodes we must see
335 335 for f, n in mfest.iteritems():
336 336 needfiles.setdefault(f, set()).add(n)
337 337
338 338 # process the files
339 339 repo.ui.status(_("adding file changes\n"))
340 340 newrevs, newfiles = _addchangegroupfiles(
341 341 repo, self, revmap, trp, efiles, needfiles)
342 342 revisions += newrevs
343 343 files += newfiles
344 344
345 345 deltaheads = 0
346 346 if oldheads:
347 347 heads = cl.heads()
348 348 deltaheads = len(heads) - len(oldheads)
349 349 for h in heads:
350 350 if h not in oldheads and repo[h].closesbranch():
351 351 deltaheads -= 1
352 352 htext = ""
353 353 if deltaheads:
354 354 htext = _(" (%+d heads)") % deltaheads
355 355
356 356 repo.ui.status(_("added %d changesets"
357 357 " with %d changes to %d files%s\n")
358 358 % (changesets, revisions, files, htext))
359 359 repo.invalidatevolatilesets()
360 360
361 361 if changesets > 0:
362 362 if 'node' not in tr.hookargs:
363 363 tr.hookargs['node'] = hex(cl.node(clstart))
364 364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
365 365 hookargs = dict(tr.hookargs)
366 366 else:
367 367 hookargs = dict(tr.hookargs)
368 368 hookargs['node'] = hex(cl.node(clstart))
369 369 hookargs['node_last'] = hex(cl.node(clend - 1))
370 370 repo.hook('pretxnchangegroup',
371 371 throw=True, **pycompat.strkwargs(hookargs))
372 372
373 373 added = [cl.node(r) for r in xrange(clstart, clend)]
374 374 phaseall = None
375 375 if srctype in ('push', 'serve'):
376 376 # Old servers can not push the boundary themselves.
377 377 # New servers won't push the boundary if changeset already
378 378 # exists locally as secret
379 379 #
380 380 # We should not use added here but the list of all change in
381 381 # the bundle
382 382 if repo.publishing():
383 383 targetphase = phaseall = phases.public
384 384 else:
385 385 # closer target phase computation
386 386
387 387 # Those changesets have been pushed from the
388 388 # outside, their phases are going to be pushed
389 389 # alongside. Therefor `targetphase` is
390 390 # ignored.
391 391 targetphase = phaseall = phases.draft
392 392 if added:
393 393 phases.registernew(repo, tr, targetphase, added)
394 394 if phaseall is not None:
395 395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
396 396
397 397 if changesets > 0:
398 398
399 399 def runhooks():
400 400 # These hooks run when the lock releases, not when the
401 401 # transaction closes. So it's possible for the changelog
402 402 # to have changed since we last saw it.
403 403 if clstart >= len(repo):
404 404 return
405 405
406 406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
407 407
408 408 for n in added:
409 409 args = hookargs.copy()
410 410 args['node'] = hex(n)
411 411 del args['node_last']
412 412 repo.hook("incoming", **pycompat.strkwargs(args))
413 413
414 414 newheads = [h for h in repo.heads()
415 415 if h not in oldheads]
416 416 repo.ui.log("incoming",
417 417 "%s incoming changes - new heads: %s\n",
418 418 len(added),
419 419 ', '.join([hex(c[:6]) for c in newheads]))
420 420
421 421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
422 422 lambda tr: repo._afterlock(runhooks))
423 423 finally:
424 424 repo.ui.flush()
425 425 # never return 0 here:
426 426 if deltaheads < 0:
427 427 ret = deltaheads - 1
428 428 else:
429 429 ret = deltaheads + 1
430 430 return ret
431 431
432 432 class cg2unpacker(cg1unpacker):
433 433 """Unpacker for cg2 streams.
434 434
435 435 cg2 streams add support for generaldelta, so the delta header
436 436 format is slightly different. All other features about the data
437 437 remain the same.
438 438 """
439 439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
440 440 deltaheadersize = struct.calcsize(deltaheader)
441 441 version = '02'
442 442
443 443 def _deltaheader(self, headertuple, prevnode):
444 444 node, p1, p2, deltabase, cs = headertuple
445 445 flags = 0
446 446 return node, p1, p2, deltabase, cs, flags
447 447
448 448 class cg3unpacker(cg2unpacker):
449 449 """Unpacker for cg3 streams.
450 450
451 451 cg3 streams add support for exchanging treemanifests and revlog
452 452 flags. It adds the revlog flags to the delta header and an empty chunk
453 453 separating manifests and files.
454 454 """
455 455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
456 456 deltaheadersize = struct.calcsize(deltaheader)
457 457 version = '03'
458 458 _grouplistcount = 2 # One list of manifests and one list of files
459 459
460 460 def _deltaheader(self, headertuple, prevnode):
461 461 node, p1, p2, deltabase, cs, flags = headertuple
462 462 return node, p1, p2, deltabase, cs, flags
463 463
464 464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
465 465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
466 466 numchanges)
467 467 for chunkdata in iter(self.filelogheader, {}):
468 468 # If we get here, there are directory manifests in the changegroup
469 469 d = chunkdata["filename"]
470 470 repo.ui.debug("adding %s revisions\n" % d)
471 471 dirlog = repo.manifestlog._revlog.dirlog(d)
472 472 if not dirlog.addgroup(self, revmap, trp):
473 473 raise error.Abort(_("received dir revlog group is empty"))
474 474
475 475 class headerlessfixup(object):
476 476 def __init__(self, fh, h):
477 477 self._h = h
478 478 self._fh = fh
479 479 def read(self, n):
480 480 if self._h:
481 481 d, self._h = self._h[:n], self._h[n:]
482 482 if len(d) < n:
483 483 d += readexactly(self._fh, n - len(d))
484 484 return d
485 485 return readexactly(self._fh, n)
486 486
487 487 class cg1packer(object):
488 488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
489 489 version = '01'
490 490 def __init__(self, repo, bundlecaps=None):
491 491 """Given a source repo, construct a bundler.
492 492
493 493 bundlecaps is optional and can be used to specify the set of
494 494 capabilities which can be used to build the bundle. While bundlecaps is
495 495 unused in core Mercurial, extensions rely on this feature to communicate
496 496 capabilities to customize the changegroup packer.
497 497 """
498 498 # Set of capabilities we can use to build the bundle.
499 499 if bundlecaps is None:
500 500 bundlecaps = set()
501 501 self._bundlecaps = bundlecaps
502 502 # experimental config: bundle.reorder
503 503 reorder = repo.ui.config('bundle', 'reorder')
504 504 if reorder == 'auto':
505 505 reorder = None
506 506 else:
507 507 reorder = util.parsebool(reorder)
508 508 self._repo = repo
509 509 self._reorder = reorder
510 510 self._progress = repo.ui.progress
511 511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
512 512 self._verbosenote = self._repo.ui.note
513 513 else:
514 514 self._verbosenote = lambda s: None
515 515
516 516 def close(self):
517 517 return closechunk()
518 518
519 519 def fileheader(self, fname):
520 520 return chunkheader(len(fname)) + fname
521 521
522 522 # Extracted both for clarity and for overriding in extensions.
523 523 def _sortgroup(self, revlog, nodelist, lookup):
524 524 """Sort nodes for change group and turn them into revnums."""
525 525 # for generaldelta revlogs, we linearize the revs; this will both be
526 526 # much quicker and generate a much smaller bundle
527 527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
528 528 dag = dagutil.revlogdag(revlog)
529 529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
530 530 else:
531 531 return sorted([revlog.rev(n) for n in nodelist])
532 532
533 533 def group(self, nodelist, revlog, lookup, units=None):
534 534 """Calculate a delta group, yielding a sequence of changegroup chunks
535 535 (strings).
536 536
537 537 Given a list of changeset revs, return a set of deltas and
538 538 metadata corresponding to nodes. The first delta is
539 539 first parent(nodelist[0]) -> nodelist[0], the receiver is
540 540 guaranteed to have this parent as it has all history before
541 541 these changesets. In the case firstparent is nullrev the
542 542 changegroup starts with a full revision.
543 543
544 544 If units is not None, progress detail will be generated, units specifies
545 545 the type of revlog that is touched (changelog, manifest, etc.).
546 546 """
547 547 # if we don't have any revisions touched by these changesets, bail
548 548 if len(nodelist) == 0:
549 549 yield self.close()
550 550 return
551 551
552 552 revs = self._sortgroup(revlog, nodelist, lookup)
553 553
554 554 # add the parent of the first rev
555 555 p = revlog.parentrevs(revs[0])[0]
556 556 revs.insert(0, p)
557 557
558 558 # build deltas
559 559 total = len(revs) - 1
560 560 msgbundling = _('bundling')
561 561 for r in xrange(len(revs) - 1):
562 562 if units is not None:
563 563 self._progress(msgbundling, r + 1, unit=units, total=total)
564 564 prev, curr = revs[r], revs[r + 1]
565 565 linknode = lookup(revlog.node(curr))
566 566 for c in self.revchunk(revlog, curr, prev, linknode):
567 567 yield c
568 568
569 569 if units is not None:
570 570 self._progress(msgbundling, None)
571 571 yield self.close()
572 572
573 573 # filter any nodes that claim to be part of the known set
574 574 def prune(self, revlog, missing, commonrevs):
575 575 rr, rl = revlog.rev, revlog.linkrev
576 576 return [n for n in missing if rl(rr(n)) not in commonrevs]
577 577
578 578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
579 579 """Pack flat manifests into a changegroup stream."""
580 580 assert not dir
581 581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
582 582 lookuplinknode, units=_('manifests')):
583 583 yield chunk
584 584
585 585 def _manifestsdone(self):
586 586 return ''
587 587
588 588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
589 589 '''yield a sequence of changegroup chunks (strings)'''
590 590 repo = self._repo
591 591 cl = repo.changelog
592 592
593 593 clrevorder = {}
594 594 mfs = {} # needed manifests
595 595 fnodes = {} # needed file nodes
596 596 changedfiles = set()
597 597
598 598 # Callback for the changelog, used to collect changed files and manifest
599 599 # nodes.
600 600 # Returns the linkrev node (identity in the changelog case).
601 601 def lookupcl(x):
602 602 c = cl.read(x)
603 603 clrevorder[x] = len(clrevorder)
604 604 n = c[0]
605 605 # record the first changeset introducing this manifest version
606 606 mfs.setdefault(n, x)
607 607 # Record a complete list of potentially-changed files in
608 608 # this manifest.
609 609 changedfiles.update(c[3])
610 610 return x
611 611
612 612 self._verbosenote(_('uncompressed size of bundle content:\n'))
613 613 size = 0
614 614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
615 615 size += len(chunk)
616 616 yield chunk
617 617 self._verbosenote(_('%8.i (changelog)\n') % size)
618 618
619 619 # We need to make sure that the linkrev in the changegroup refers to
620 620 # the first changeset that introduced the manifest or file revision.
621 621 # The fastpath is usually safer than the slowpath, because the filelogs
622 622 # are walked in revlog order.
623 623 #
624 624 # When taking the slowpath with reorder=None and the manifest revlog
625 625 # uses generaldelta, the manifest may be walked in the "wrong" order.
626 626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
627 627 # cc0ff93d0c0c).
628 628 #
629 629 # When taking the fastpath, we are only vulnerable to reordering
630 630 # of the changelog itself. The changelog never uses generaldelta, so
631 631 # it is only reordered when reorder=True. To handle this case, we
632 632 # simply take the slowpath, which already has the 'clrevorder' logic.
633 633 # This was also fixed in cc0ff93d0c0c.
634 634 fastpathlinkrev = fastpathlinkrev and not self._reorder
635 635 # Treemanifests don't work correctly with fastpathlinkrev
636 636 # either, because we don't discover which directory nodes to
637 637 # send along with files. This could probably be fixed.
638 638 fastpathlinkrev = fastpathlinkrev and (
639 639 'treemanifest' not in repo.requirements)
640 640
641 641 for chunk in self.generatemanifests(commonrevs, clrevorder,
642 642 fastpathlinkrev, mfs, fnodes):
643 643 yield chunk
644 644 mfs.clear()
645 645 clrevs = set(cl.rev(x) for x in clnodes)
646 646
647 647 if not fastpathlinkrev:
648 648 def linknodes(unused, fname):
649 649 return fnodes.get(fname, {})
650 650 else:
651 651 cln = cl.node
652 652 def linknodes(filerevlog, fname):
653 653 llr = filerevlog.linkrev
654 654 fln = filerevlog.node
655 655 revs = ((r, llr(r)) for r in filerevlog)
656 656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
657 657
658 658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
659 659 source):
660 660 yield chunk
661 661
662 662 yield self.close()
663 663
664 664 if clnodes:
665 665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
666 666
667 667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
668 668 fnodes):
669 669 repo = self._repo
670 670 mfl = repo.manifestlog
671 671 dirlog = mfl._revlog.dirlog
672 672 tmfnodes = {'': mfs}
673 673
674 674 # Callback for the manifest, used to collect linkrevs for filelog
675 675 # revisions.
676 676 # Returns the linkrev node (collected in lookupcl).
677 677 def makelookupmflinknode(dir):
678 678 if fastpathlinkrev:
679 679 assert not dir
680 680 return mfs.__getitem__
681 681
682 682 def lookupmflinknode(x):
683 683 """Callback for looking up the linknode for manifests.
684 684
685 685 Returns the linkrev node for the specified manifest.
686 686
687 687 SIDE EFFECT:
688 688
689 689 1) fclnodes gets populated with the list of relevant
690 690 file nodes if we're not using fastpathlinkrev
691 691 2) When treemanifests are in use, collects treemanifest nodes
692 692 to send
693 693
694 694 Note that this means manifests must be completely sent to
695 695 the client before you can trust the list of files and
696 696 treemanifests to send.
697 697 """
698 698 clnode = tmfnodes[dir][x]
699 699 mdata = mfl.get(dir, x).readfast(shallow=True)
700 700 for p, n, fl in mdata.iterentries():
701 701 if fl == 't': # subdirectory manifest
702 702 subdir = dir + p + '/'
703 703 tmfclnodes = tmfnodes.setdefault(subdir, {})
704 704 tmfclnode = tmfclnodes.setdefault(n, clnode)
705 705 if clrevorder[clnode] < clrevorder[tmfclnode]:
706 706 tmfclnodes[n] = clnode
707 707 else:
708 708 f = dir + p
709 709 fclnodes = fnodes.setdefault(f, {})
710 710 fclnode = fclnodes.setdefault(n, clnode)
711 711 if clrevorder[clnode] < clrevorder[fclnode]:
712 712 fclnodes[n] = clnode
713 713 return clnode
714 714 return lookupmflinknode
715 715
716 716 size = 0
717 717 while tmfnodes:
718 718 dir = min(tmfnodes)
719 719 nodes = tmfnodes[dir]
720 720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
721 721 if not dir or prunednodes:
722 722 for x in self._packmanifests(dir, prunednodes,
723 723 makelookupmflinknode(dir)):
724 724 size += len(x)
725 725 yield x
726 726 del tmfnodes[dir]
727 727 self._verbosenote(_('%8.i (manifests)\n') % size)
728 728 yield self._manifestsdone()
729 729
730 730 # The 'source' parameter is useful for extensions
731 731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
732 732 repo = self._repo
733 733 progress = self._progress
734 734 msgbundling = _('bundling')
735 735
736 736 total = len(changedfiles)
737 737 # for progress output
738 738 msgfiles = _('files')
739 739 for i, fname in enumerate(sorted(changedfiles)):
740 740 filerevlog = repo.file(fname)
741 741 if not filerevlog:
742 742 raise error.Abort(_("empty or missing revlog for %s") % fname)
743 743
744 744 linkrevnodes = linknodes(filerevlog, fname)
745 745 # Lookup for filenodes, we collected the linkrev nodes above in the
746 746 # fastpath case and with lookupmf in the slowpath case.
747 747 def lookupfilelog(x):
748 748 return linkrevnodes[x]
749 749
750 750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
751 751 if filenodes:
752 752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
753 753 total=total)
754 754 h = self.fileheader(fname)
755 755 size = len(h)
756 756 yield h
757 757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
758 758 size += len(chunk)
759 759 yield chunk
760 760 self._verbosenote(_('%8.i %s\n') % (size, fname))
761 761 progress(msgbundling, None)
762 762
763 763 def deltaparent(self, revlog, rev, p1, p2, prev):
764 764 return prev
765 765
766 766 def revchunk(self, revlog, rev, prev, linknode):
767 767 node = revlog.node(rev)
768 768 p1, p2 = revlog.parentrevs(rev)
769 769 base = self.deltaparent(revlog, rev, p1, p2, prev)
770 770
771 771 prefix = ''
772 772 if revlog.iscensored(base) or revlog.iscensored(rev):
773 773 try:
774 774 delta = revlog.revision(node, raw=True)
775 775 except error.CensoredNodeError as e:
776 776 delta = e.tombstone
777 777 if base == nullrev:
778 778 prefix = mdiff.trivialdiffheader(len(delta))
779 779 else:
780 780 baselen = revlog.rawsize(base)
781 781 prefix = mdiff.replacediffheader(baselen, len(delta))
782 782 elif base == nullrev:
783 783 delta = revlog.revision(node, raw=True)
784 784 prefix = mdiff.trivialdiffheader(len(delta))
785 785 else:
786 786 delta = revlog.revdiff(base, rev)
787 787 p1n, p2n = revlog.parents(node)
788 788 basenode = revlog.node(base)
789 789 flags = revlog.flags(rev)
790 790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
791 791 meta += prefix
792 792 l = len(meta) + len(delta)
793 793 yield chunkheader(l)
794 794 yield meta
795 795 yield delta
796 796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
797 797 # do nothing with basenode, it is implicitly the previous one in HG10
798 798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
799 799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
800 800
801 801 class cg2packer(cg1packer):
802 802 version = '02'
803 803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
804 804
805 805 def __init__(self, repo, bundlecaps=None):
806 806 super(cg2packer, self).__init__(repo, bundlecaps)
807 807 if self._reorder is None:
808 808 # Since generaldelta is directly supported by cg2, reordering
809 809 # generally doesn't help, so we disable it by default (treating
810 810 # bundle.reorder=auto just like bundle.reorder=False).
811 811 self._reorder = False
812 812
813 813 def deltaparent(self, revlog, rev, p1, p2, prev):
814 814 dp = revlog.deltaparent(rev)
815 815 if dp == nullrev and revlog.storedeltachains:
816 816 # Avoid sending full revisions when delta parent is null. Pick prev
817 817 # in that case. It's tempting to pick p1 in this case, as p1 will
818 818 # be smaller in the common case. However, computing a delta against
819 819 # p1 may require resolving the raw text of p1, which could be
820 820 # expensive. The revlog caches should have prev cached, meaning
821 821 # less CPU for changegroup generation. There is likely room to add
822 822 # a flag and/or config option to control this behavior.
823 823 return prev
824 824 elif dp == nullrev:
825 825 # revlog is configured to use full snapshot for a reason,
826 826 # stick to full snapshot.
827 827 return nullrev
828 828 elif dp not in (p1, p2, prev):
829 829 # Pick prev when we can't be sure remote has the base revision.
830 830 return prev
831 831 else:
832 832 return dp
833 833
834 834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
835 835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
836 836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
837 837
838 838 class cg3packer(cg2packer):
839 839 version = '03'
840 840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
841 841
842 842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
843 843 if dir:
844 844 yield self.fileheader(dir)
845 845
846 846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
847 847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
848 848 units=_('manifests')):
849 849 yield chunk
850 850
851 851 def _manifestsdone(self):
852 852 return self.close()
853 853
854 854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
855 855 return struct.pack(
856 856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
857 857
858 858 _packermap = {'01': (cg1packer, cg1unpacker),
859 859 # cg2 adds support for exchanging generaldelta
860 860 '02': (cg2packer, cg2unpacker),
861 861 # cg3 adds support for exchanging revlog flags and treemanifests
862 862 '03': (cg3packer, cg3unpacker),
863 863 }
864 864
865 865 def allsupportedversions(repo):
866 866 versions = set(_packermap.keys())
867 867 if not (repo.ui.configbool('experimental', 'changegroup3') or
868 868 repo.ui.configbool('experimental', 'treemanifest') or
869 869 'treemanifest' in repo.requirements):
870 870 versions.discard('03')
871 871 return versions
872 872
873 873 # Changegroup versions that can be applied to the repo
874 874 def supportedincomingversions(repo):
875 875 return allsupportedversions(repo)
876 876
877 877 # Changegroup versions that can be created from the repo
878 878 def supportedoutgoingversions(repo):
879 879 versions = allsupportedversions(repo)
880 880 if 'treemanifest' in repo.requirements:
881 881 # Versions 01 and 02 support only flat manifests and it's just too
882 882 # expensive to convert between the flat manifest and tree manifest on
883 883 # the fly. Since tree manifests are hashed differently, all of history
884 884 # would have to be converted. Instead, we simply don't even pretend to
885 885 # support versions 01 and 02.
886 886 versions.discard('01')
887 887 versions.discard('02')
888 888 return versions
889 889
890 890 def safeversion(repo):
891 891 # Finds the smallest version that it's safe to assume clients of the repo
892 892 # will support. For example, all hg versions that support generaldelta also
893 893 # support changegroup 02.
894 894 versions = supportedoutgoingversions(repo)
895 895 if 'generaldelta' in repo.requirements:
896 896 versions.discard('01')
897 897 assert versions
898 898 return min(versions)
899 899
900 900 def getbundler(version, repo, bundlecaps=None):
901 901 assert version in supportedoutgoingversions(repo)
902 902 return _packermap[version][0](repo, bundlecaps)
903 903
904 904 def getunbundler(version, fh, alg, extras=None):
905 905 return _packermap[version][1](fh, alg, extras=extras)
906 906
907 907 def _changegroupinfo(repo, nodes, source):
908 908 if repo.ui.verbose or source == 'bundle':
909 909 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 910 if repo.ui.debugflag:
911 911 repo.ui.debug("list of changesets:\n")
912 912 for node in nodes:
913 913 repo.ui.debug("%s\n" % hex(node))
914 914
915 915 def makestream(repo, outgoing, version, source, fastpath=False,
916 916 bundlecaps=None):
917 917 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
918 918 return getsubsetraw(repo, outgoing, bundler, source, fastpath=fastpath)
919 919
920 920 def makechangegroup(repo, outgoing, version, source, fastpath=False,
921 921 bundlecaps=None):
922 922 cgstream = makestream(repo, outgoing, version, source,
923 923 fastpath=fastpath, bundlecaps=bundlecaps)
924 924 return getunbundler(version, util.chunkbuffer(cgstream), None,
925 925 {'clcount': len(outgoing.missing) })
926 926
927 927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
928 928 repo = repo.unfiltered()
929 929 commonrevs = outgoing.common
930 930 csets = outgoing.missing
931 931 heads = outgoing.missingheads
932 932 # We go through the fast path if we get told to, or if all (unfiltered
933 933 # heads have been requested (since we then know there all linkrevs will
934 934 # be pulled by the client).
935 935 heads.sort()
936 936 fastpathlinkrev = fastpath or (
937 937 repo.filtername is None and heads == sorted(repo.heads()))
938 938
939 939 repo.hook('preoutgoing', throw=True, source=source)
940 940 _changegroupinfo(repo, csets, source)
941 941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
942 942
943 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
944 version='01'):
945 """Like getbundle, but taking a discovery.outgoing as an argument.
946
947 This is only implemented for local repos and reuses potentially
948 precomputed sets in outgoing. Returns a raw changegroup generator."""
949 if not outgoing.missing:
950 return None
951 bundler = getbundler(version, repo, bundlecaps)
952 return getsubsetraw(repo, outgoing, bundler, source)
953
954 943 def getchangegroup(repo, source, outgoing, bundlecaps=None,
955 944 version='01'):
956 945 """Like getbundle, but taking a discovery.outgoing as an argument.
957 946
958 947 This is only implemented for local repos and reuses potentially
959 948 precomputed sets in outgoing."""
960 949 if not outgoing.missing:
961 950 return None
962 951 return makechangegroup(repo, outgoing, version, source,
963 952 bundlecaps=bundlecaps)
964 953
965 954 def getlocalchangegroup(repo, *args, **kwargs):
966 955 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
967 956 '4.3')
968 957 return getchangegroup(repo, *args, **kwargs)
969 958
970 959 def changegroup(repo, basenodes, source):
971 960 # to avoid a race we use changegroupsubset() (issue1320)
972 961 outgoing = discovery.outgoing(repo, missingroots=basenodes,
973 962 missingheads=repo.heads())
974 963 return makechangegroup(repo, outgoing, '01', source)
975 964
976 965 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
977 966 revisions = 0
978 967 files = 0
979 968 for chunkdata in iter(source.filelogheader, {}):
980 969 files += 1
981 970 f = chunkdata["filename"]
982 971 repo.ui.debug("adding %s revisions\n" % f)
983 972 repo.ui.progress(_('files'), files, unit=_('files'),
984 973 total=expectedfiles)
985 974 fl = repo.file(f)
986 975 o = len(fl)
987 976 try:
988 977 if not fl.addgroup(source, revmap, trp):
989 978 raise error.Abort(_("received file revlog group is empty"))
990 979 except error.CensoredBaseError as e:
991 980 raise error.Abort(_("received delta base is censored: %s") % e)
992 981 revisions += len(fl) - o
993 982 if f in needfiles:
994 983 needs = needfiles[f]
995 984 for new in xrange(o, len(fl)):
996 985 n = fl.node(new)
997 986 if n in needs:
998 987 needs.remove(n)
999 988 else:
1000 989 raise error.Abort(
1001 990 _("received spurious file revlog entry"))
1002 991 if not needs:
1003 992 del needfiles[f]
1004 993 repo.ui.progress(_('files'), None)
1005 994
1006 995 for f, needs in needfiles.iteritems():
1007 996 fl = repo.file(f)
1008 997 for n in needs:
1009 998 try:
1010 999 fl.rev(n)
1011 1000 except error.LookupError:
1012 1001 raise error.Abort(
1013 1002 _('missing file data for %s:%s - run hg verify') %
1014 1003 (f, hex(n)))
1015 1004
1016 1005 return revisions, files
@@ -1,2013 +1,2011 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 obsolete,
26 26 phases,
27 27 pushkey,
28 28 pycompat,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 url as urlmod,
33 33 util,
34 34 )
35 35
36 36 urlerr = util.urlerr
37 37 urlreq = util.urlreq
38 38
39 39 # Maps bundle version human names to changegroup versions.
40 40 _bundlespeccgversions = {'v1': '01',
41 41 'v2': '02',
42 42 'packed1': 's1',
43 43 'bundle2': '02', #legacy
44 44 }
45 45
46 46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48 48
49 49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 50 """Parse a bundle string specification into parts.
51 51
52 52 Bundle specifications denote a well-defined bundle/exchange format.
53 53 The content of a given specification should not change over time in
54 54 order to ensure that bundles produced by a newer version of Mercurial are
55 55 readable from an older version.
56 56
57 57 The string currently has the form:
58 58
59 59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60 60
61 61 Where <compression> is one of the supported compression formats
62 62 and <type> is (currently) a version string. A ";" can follow the type and
63 63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 64 pairs.
65 65
66 66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 67 it is optional.
68 68
69 69 If ``externalnames`` is False (the default), the human-centric names will
70 70 be converted to their internal representation.
71 71
72 72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 73 be ``None`` if not in strict mode and a compression isn't defined.
74 74
75 75 An ``InvalidBundleSpecification`` is raised when the specification is
76 76 not syntactically well formed.
77 77
78 78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 79 bundle type/version is not recognized.
80 80
81 81 Note: this function will likely eventually return a more complex data
82 82 structure, including bundle2 part information.
83 83 """
84 84 def parseparams(s):
85 85 if ';' not in s:
86 86 return s, {}
87 87
88 88 params = {}
89 89 version, paramstr = s.split(';', 1)
90 90
91 91 for p in paramstr.split(';'):
92 92 if '=' not in p:
93 93 raise error.InvalidBundleSpecification(
94 94 _('invalid bundle specification: '
95 95 'missing "=" in parameter: %s') % p)
96 96
97 97 key, value = p.split('=', 1)
98 98 key = urlreq.unquote(key)
99 99 value = urlreq.unquote(value)
100 100 params[key] = value
101 101
102 102 return version, params
103 103
104 104
105 105 if strict and '-' not in spec:
106 106 raise error.InvalidBundleSpecification(
107 107 _('invalid bundle specification; '
108 108 'must be prefixed with compression: %s') % spec)
109 109
110 110 if '-' in spec:
111 111 compression, version = spec.split('-', 1)
112 112
113 113 if compression not in util.compengines.supportedbundlenames:
114 114 raise error.UnsupportedBundleSpecification(
115 115 _('%s compression is not supported') % compression)
116 116
117 117 version, params = parseparams(version)
118 118
119 119 if version not in _bundlespeccgversions:
120 120 raise error.UnsupportedBundleSpecification(
121 121 _('%s is not a recognized bundle version') % version)
122 122 else:
123 123 # Value could be just the compression or just the version, in which
124 124 # case some defaults are assumed (but only when not in strict mode).
125 125 assert not strict
126 126
127 127 spec, params = parseparams(spec)
128 128
129 129 if spec in util.compengines.supportedbundlenames:
130 130 compression = spec
131 131 version = 'v1'
132 132 # Generaldelta repos require v2.
133 133 if 'generaldelta' in repo.requirements:
134 134 version = 'v2'
135 135 # Modern compression engines require v2.
136 136 if compression not in _bundlespecv1compengines:
137 137 version = 'v2'
138 138 elif spec in _bundlespeccgversions:
139 139 if spec == 'packed1':
140 140 compression = 'none'
141 141 else:
142 142 compression = 'bzip2'
143 143 version = spec
144 144 else:
145 145 raise error.UnsupportedBundleSpecification(
146 146 _('%s is not a recognized bundle specification') % spec)
147 147
148 148 # Bundle version 1 only supports a known set of compression engines.
149 149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 150 raise error.UnsupportedBundleSpecification(
151 151 _('compression engine %s is not supported on v1 bundles') %
152 152 compression)
153 153
154 154 # The specification for packed1 can optionally declare the data formats
155 155 # required to apply it. If we see this metadata, compare against what the
156 156 # repo supports and error if the bundle isn't compatible.
157 157 if version == 'packed1' and 'requirements' in params:
158 158 requirements = set(params['requirements'].split(','))
159 159 missingreqs = requirements - repo.supportedformats
160 160 if missingreqs:
161 161 raise error.UnsupportedBundleSpecification(
162 162 _('missing support for repository features: %s') %
163 163 ', '.join(sorted(missingreqs)))
164 164
165 165 if not externalnames:
166 166 engine = util.compengines.forbundlename(compression)
167 167 compression = engine.bundletype()[1]
168 168 version = _bundlespeccgversions[version]
169 169 return compression, version, params
170 170
171 171 def readbundle(ui, fh, fname, vfs=None):
172 172 header = changegroup.readexactly(fh, 4)
173 173
174 174 alg = None
175 175 if not fname:
176 176 fname = "stream"
177 177 if not header.startswith('HG') and header.startswith('\0'):
178 178 fh = changegroup.headerlessfixup(fh, header)
179 179 header = "HG10"
180 180 alg = 'UN'
181 181 elif vfs:
182 182 fname = vfs.join(fname)
183 183
184 184 magic, version = header[0:2], header[2:4]
185 185
186 186 if magic != 'HG':
187 187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 188 if version == '10':
189 189 if alg is None:
190 190 alg = changegroup.readexactly(fh, 2)
191 191 return changegroup.cg1unpacker(fh, alg)
192 192 elif version.startswith('2'):
193 193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 194 elif version == 'S1':
195 195 return streamclone.streamcloneapplier(fh)
196 196 else:
197 197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198 198
199 199 def getbundlespec(ui, fh):
200 200 """Infer the bundlespec from a bundle file handle.
201 201
202 202 The input file handle is seeked and the original seek position is not
203 203 restored.
204 204 """
205 205 def speccompression(alg):
206 206 try:
207 207 return util.compengines.forbundletype(alg).bundletype()[0]
208 208 except KeyError:
209 209 return None
210 210
211 211 b = readbundle(ui, fh, None)
212 212 if isinstance(b, changegroup.cg1unpacker):
213 213 alg = b._type
214 214 if alg == '_truncatedBZ':
215 215 alg = 'BZ'
216 216 comp = speccompression(alg)
217 217 if not comp:
218 218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 219 return '%s-v1' % comp
220 220 elif isinstance(b, bundle2.unbundle20):
221 221 if 'Compression' in b.params:
222 222 comp = speccompression(b.params['Compression'])
223 223 if not comp:
224 224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 225 else:
226 226 comp = 'none'
227 227
228 228 version = None
229 229 for part in b.iterparts():
230 230 if part.type == 'changegroup':
231 231 version = part.params['version']
232 232 if version in ('01', '02'):
233 233 version = 'v2'
234 234 else:
235 235 raise error.Abort(_('changegroup version %s does not have '
236 236 'a known bundlespec') % version,
237 237 hint=_('try upgrading your Mercurial '
238 238 'client'))
239 239
240 240 if not version:
241 241 raise error.Abort(_('could not identify changegroup version in '
242 242 'bundle'))
243 243
244 244 return '%s-%s' % (comp, version)
245 245 elif isinstance(b, streamclone.streamcloneapplier):
246 246 requirements = streamclone.readbundle1header(fh)[2]
247 247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 248 return 'none-packed1;%s' % urlreq.quote(params)
249 249 else:
250 250 raise error.Abort(_('unknown bundle type: %s') % b)
251 251
252 252 def _computeoutgoing(repo, heads, common):
253 253 """Computes which revs are outgoing given a set of common
254 254 and a set of heads.
255 255
256 256 This is a separate function so extensions can have access to
257 257 the logic.
258 258
259 259 Returns a discovery.outgoing object.
260 260 """
261 261 cl = repo.changelog
262 262 if common:
263 263 hasnode = cl.hasnode
264 264 common = [n for n in common if hasnode(n)]
265 265 else:
266 266 common = [nullid]
267 267 if not heads:
268 268 heads = cl.heads()
269 269 return discovery.outgoing(repo, common, heads)
270 270
271 271 def _forcebundle1(op):
272 272 """return true if a pull/push must use bundle1
273 273
274 274 This function is used to allow testing of the older bundle version"""
275 275 ui = op.repo.ui
276 276 forcebundle1 = False
277 277 # The goal is this config is to allow developer to choose the bundle
278 278 # version used during exchanged. This is especially handy during test.
279 279 # Value is a list of bundle version to be picked from, highest version
280 280 # should be used.
281 281 #
282 282 # developer config: devel.legacy.exchange
283 283 exchange = ui.configlist('devel', 'legacy.exchange')
284 284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 285 return forcebundle1 or not op.remote.capable('bundle2')
286 286
287 287 class pushoperation(object):
288 288 """A object that represent a single push operation
289 289
290 290 Its purpose is to carry push related state and very common operations.
291 291
292 292 A new pushoperation should be created at the beginning of each push and
293 293 discarded afterward.
294 294 """
295 295
296 296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 297 bookmarks=(), pushvars=None):
298 298 # repo we push from
299 299 self.repo = repo
300 300 self.ui = repo.ui
301 301 # repo we push to
302 302 self.remote = remote
303 303 # force option provided
304 304 self.force = force
305 305 # revs to be pushed (None is "all")
306 306 self.revs = revs
307 307 # bookmark explicitly pushed
308 308 self.bookmarks = bookmarks
309 309 # allow push of new branch
310 310 self.newbranch = newbranch
311 311 # step already performed
312 312 # (used to check what steps have been already performed through bundle2)
313 313 self.stepsdone = set()
314 314 # Integer version of the changegroup push result
315 315 # - None means nothing to push
316 316 # - 0 means HTTP error
317 317 # - 1 means we pushed and remote head count is unchanged *or*
318 318 # we have outgoing changesets but refused to push
319 319 # - other values as described by addchangegroup()
320 320 self.cgresult = None
321 321 # Boolean value for the bookmark push
322 322 self.bkresult = None
323 323 # discover.outgoing object (contains common and outgoing data)
324 324 self.outgoing = None
325 325 # all remote topological heads before the push
326 326 self.remoteheads = None
327 327 # Details of the remote branch pre and post push
328 328 #
329 329 # mapping: {'branch': ([remoteheads],
330 330 # [newheads],
331 331 # [unsyncedheads],
332 332 # [discardedheads])}
333 333 # - branch: the branch name
334 334 # - remoteheads: the list of remote heads known locally
335 335 # None if the branch is new
336 336 # - newheads: the new remote heads (known locally) with outgoing pushed
337 337 # - unsyncedheads: the list of remote heads unknown locally.
338 338 # - discardedheads: the list of remote heads made obsolete by the push
339 339 self.pushbranchmap = None
340 340 # testable as a boolean indicating if any nodes are missing locally.
341 341 self.incoming = None
342 342 # phases changes that must be pushed along side the changesets
343 343 self.outdatedphases = None
344 344 # phases changes that must be pushed if changeset push fails
345 345 self.fallbackoutdatedphases = None
346 346 # outgoing obsmarkers
347 347 self.outobsmarkers = set()
348 348 # outgoing bookmarks
349 349 self.outbookmarks = []
350 350 # transaction manager
351 351 self.trmanager = None
352 352 # map { pushkey partid -> callback handling failure}
353 353 # used to handle exception from mandatory pushkey part failure
354 354 self.pkfailcb = {}
355 355 # an iterable of pushvars or None
356 356 self.pushvars = pushvars
357 357
358 358 @util.propertycache
359 359 def futureheads(self):
360 360 """future remote heads if the changeset push succeeds"""
361 361 return self.outgoing.missingheads
362 362
363 363 @util.propertycache
364 364 def fallbackheads(self):
365 365 """future remote heads if the changeset push fails"""
366 366 if self.revs is None:
367 367 # not target to push, all common are relevant
368 368 return self.outgoing.commonheads
369 369 unfi = self.repo.unfiltered()
370 370 # I want cheads = heads(::missingheads and ::commonheads)
371 371 # (missingheads is revs with secret changeset filtered out)
372 372 #
373 373 # This can be expressed as:
374 374 # cheads = ( (missingheads and ::commonheads)
375 375 # + (commonheads and ::missingheads))"
376 376 # )
377 377 #
378 378 # while trying to push we already computed the following:
379 379 # common = (::commonheads)
380 380 # missing = ((commonheads::missingheads) - commonheads)
381 381 #
382 382 # We can pick:
383 383 # * missingheads part of common (::commonheads)
384 384 common = self.outgoing.common
385 385 nm = self.repo.changelog.nodemap
386 386 cheads = [node for node in self.revs if nm[node] in common]
387 387 # and
388 388 # * commonheads parents on missing
389 389 revset = unfi.set('%ln and parents(roots(%ln))',
390 390 self.outgoing.commonheads,
391 391 self.outgoing.missing)
392 392 cheads.extend(c.node() for c in revset)
393 393 return cheads
394 394
395 395 @property
396 396 def commonheads(self):
397 397 """set of all common heads after changeset bundle push"""
398 398 if self.cgresult:
399 399 return self.futureheads
400 400 else:
401 401 return self.fallbackheads
402 402
403 403 # mapping of message used when pushing bookmark
404 404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 405 _('updating bookmark %s failed!\n')),
406 406 'export': (_("exporting bookmark %s\n"),
407 407 _('exporting bookmark %s failed!\n')),
408 408 'delete': (_("deleting remote bookmark %s\n"),
409 409 _('deleting remote bookmark %s failed!\n')),
410 410 }
411 411
412 412
413 413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 414 opargs=None):
415 415 '''Push outgoing changesets (limited by revs) from a local
416 416 repository to remote. Return an integer:
417 417 - None means nothing to push
418 418 - 0 means HTTP error
419 419 - 1 means we pushed and remote head count is unchanged *or*
420 420 we have outgoing changesets but refused to push
421 421 - other values as described by addchangegroup()
422 422 '''
423 423 if opargs is None:
424 424 opargs = {}
425 425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 426 **opargs)
427 427 if pushop.remote.local():
428 428 missing = (set(pushop.repo.requirements)
429 429 - pushop.remote.local().supported)
430 430 if missing:
431 431 msg = _("required features are not"
432 432 " supported in the destination:"
433 433 " %s") % (', '.join(sorted(missing)))
434 434 raise error.Abort(msg)
435 435
436 436 if not pushop.remote.canpush():
437 437 raise error.Abort(_("destination does not support push"))
438 438
439 439 if not pushop.remote.capable('unbundle'):
440 440 raise error.Abort(_('cannot push: destination does not support the '
441 441 'unbundle wire protocol command'))
442 442
443 443 # get lock as we might write phase data
444 444 wlock = lock = None
445 445 try:
446 446 # bundle2 push may receive a reply bundle touching bookmarks or other
447 447 # things requiring the wlock. Take it now to ensure proper ordering.
448 448 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
449 449 if (not _forcebundle1(pushop)) and maypushback:
450 450 wlock = pushop.repo.wlock()
451 451 lock = pushop.repo.lock()
452 452 pushop.trmanager = transactionmanager(pushop.repo,
453 453 'push-response',
454 454 pushop.remote.url())
455 455 except IOError as err:
456 456 if err.errno != errno.EACCES:
457 457 raise
458 458 # source repo cannot be locked.
459 459 # We do not abort the push, but just disable the local phase
460 460 # synchronisation.
461 461 msg = 'cannot lock source repository: %s\n' % err
462 462 pushop.ui.debug(msg)
463 463
464 464 with wlock or util.nullcontextmanager(), \
465 465 lock or util.nullcontextmanager(), \
466 466 pushop.trmanager or util.nullcontextmanager():
467 467 pushop.repo.checkpush(pushop)
468 468 _pushdiscovery(pushop)
469 469 if not _forcebundle1(pushop):
470 470 _pushbundle2(pushop)
471 471 _pushchangeset(pushop)
472 472 _pushsyncphase(pushop)
473 473 _pushobsolete(pushop)
474 474 _pushbookmark(pushop)
475 475
476 476 return pushop
477 477
478 478 # list of steps to perform discovery before push
479 479 pushdiscoveryorder = []
480 480
481 481 # Mapping between step name and function
482 482 #
483 483 # This exists to help extensions wrap steps if necessary
484 484 pushdiscoverymapping = {}
485 485
486 486 def pushdiscovery(stepname):
487 487 """decorator for function performing discovery before push
488 488
489 489 The function is added to the step -> function mapping and appended to the
490 490 list of steps. Beware that decorated function will be added in order (this
491 491 may matter).
492 492
493 493 You can only use this decorator for a new step, if you want to wrap a step
494 494 from an extension, change the pushdiscovery dictionary directly."""
495 495 def dec(func):
496 496 assert stepname not in pushdiscoverymapping
497 497 pushdiscoverymapping[stepname] = func
498 498 pushdiscoveryorder.append(stepname)
499 499 return func
500 500 return dec
501 501
502 502 def _pushdiscovery(pushop):
503 503 """Run all discovery steps"""
504 504 for stepname in pushdiscoveryorder:
505 505 step = pushdiscoverymapping[stepname]
506 506 step(pushop)
507 507
508 508 @pushdiscovery('changeset')
509 509 def _pushdiscoverychangeset(pushop):
510 510 """discover the changeset that need to be pushed"""
511 511 fci = discovery.findcommonincoming
512 512 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
513 513 common, inc, remoteheads = commoninc
514 514 fco = discovery.findcommonoutgoing
515 515 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
516 516 commoninc=commoninc, force=pushop.force)
517 517 pushop.outgoing = outgoing
518 518 pushop.remoteheads = remoteheads
519 519 pushop.incoming = inc
520 520
521 521 @pushdiscovery('phase')
522 522 def _pushdiscoveryphase(pushop):
523 523 """discover the phase that needs to be pushed
524 524
525 525 (computed for both success and failure case for changesets push)"""
526 526 outgoing = pushop.outgoing
527 527 unfi = pushop.repo.unfiltered()
528 528 remotephases = pushop.remote.listkeys('phases')
529 529 publishing = remotephases.get('publishing', False)
530 530 if (pushop.ui.configbool('ui', '_usedassubrepo')
531 531 and remotephases # server supports phases
532 532 and not pushop.outgoing.missing # no changesets to be pushed
533 533 and publishing):
534 534 # When:
535 535 # - this is a subrepo push
536 536 # - and remote support phase
537 537 # - and no changeset are to be pushed
538 538 # - and remote is publishing
539 539 # We may be in issue 3871 case!
540 540 # We drop the possible phase synchronisation done by
541 541 # courtesy to publish changesets possibly locally draft
542 542 # on the remote.
543 543 remotephases = {'publishing': 'True'}
544 544 ana = phases.analyzeremotephases(pushop.repo,
545 545 pushop.fallbackheads,
546 546 remotephases)
547 547 pheads, droots = ana
548 548 extracond = ''
549 549 if not publishing:
550 550 extracond = ' and public()'
551 551 revset = 'heads((%%ln::%%ln) %s)' % extracond
552 552 # Get the list of all revs draft on remote by public here.
553 553 # XXX Beware that revset break if droots is not strictly
554 554 # XXX root we may want to ensure it is but it is costly
555 555 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
556 556 if not outgoing.missing:
557 557 future = fallback
558 558 else:
559 559 # adds changeset we are going to push as draft
560 560 #
561 561 # should not be necessary for publishing server, but because of an
562 562 # issue fixed in xxxxx we have to do it anyway.
563 563 fdroots = list(unfi.set('roots(%ln + %ln::)',
564 564 outgoing.missing, droots))
565 565 fdroots = [f.node() for f in fdroots]
566 566 future = list(unfi.set(revset, fdroots, pushop.futureheads))
567 567 pushop.outdatedphases = future
568 568 pushop.fallbackoutdatedphases = fallback
569 569
570 570 @pushdiscovery('obsmarker')
571 571 def _pushdiscoveryobsmarkers(pushop):
572 572 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
573 573 and pushop.repo.obsstore
574 574 and 'obsolete' in pushop.remote.listkeys('namespaces')):
575 575 repo = pushop.repo
576 576 # very naive computation, that can be quite expensive on big repo.
577 577 # However: evolution is currently slow on them anyway.
578 578 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
579 579 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
580 580
581 581 @pushdiscovery('bookmarks')
582 582 def _pushdiscoverybookmarks(pushop):
583 583 ui = pushop.ui
584 584 repo = pushop.repo.unfiltered()
585 585 remote = pushop.remote
586 586 ui.debug("checking for updated bookmarks\n")
587 587 ancestors = ()
588 588 if pushop.revs:
589 589 revnums = map(repo.changelog.rev, pushop.revs)
590 590 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
591 591 remotebookmark = remote.listkeys('bookmarks')
592 592
593 593 explicit = set([repo._bookmarks.expandname(bookmark)
594 594 for bookmark in pushop.bookmarks])
595 595
596 596 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
597 597 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
598 598
599 599 def safehex(x):
600 600 if x is None:
601 601 return x
602 602 return hex(x)
603 603
604 604 def hexifycompbookmarks(bookmarks):
605 605 for b, scid, dcid in bookmarks:
606 606 yield b, safehex(scid), safehex(dcid)
607 607
608 608 comp = [hexifycompbookmarks(marks) for marks in comp]
609 609 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
610 610
611 611 for b, scid, dcid in advsrc:
612 612 if b in explicit:
613 613 explicit.remove(b)
614 614 if not ancestors or repo[scid].rev() in ancestors:
615 615 pushop.outbookmarks.append((b, dcid, scid))
616 616 # search added bookmark
617 617 for b, scid, dcid in addsrc:
618 618 if b in explicit:
619 619 explicit.remove(b)
620 620 pushop.outbookmarks.append((b, '', scid))
621 621 # search for overwritten bookmark
622 622 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
623 623 if b in explicit:
624 624 explicit.remove(b)
625 625 pushop.outbookmarks.append((b, dcid, scid))
626 626 # search for bookmark to delete
627 627 for b, scid, dcid in adddst:
628 628 if b in explicit:
629 629 explicit.remove(b)
630 630 # treat as "deleted locally"
631 631 pushop.outbookmarks.append((b, dcid, ''))
632 632 # identical bookmarks shouldn't get reported
633 633 for b, scid, dcid in same:
634 634 if b in explicit:
635 635 explicit.remove(b)
636 636
637 637 if explicit:
638 638 explicit = sorted(explicit)
639 639 # we should probably list all of them
640 640 ui.warn(_('bookmark %s does not exist on the local '
641 641 'or remote repository!\n') % explicit[0])
642 642 pushop.bkresult = 2
643 643
644 644 pushop.outbookmarks.sort()
645 645
646 646 def _pushcheckoutgoing(pushop):
647 647 outgoing = pushop.outgoing
648 648 unfi = pushop.repo.unfiltered()
649 649 if not outgoing.missing:
650 650 # nothing to push
651 651 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
652 652 return False
653 653 # something to push
654 654 if not pushop.force:
655 655 # if repo.obsstore == False --> no obsolete
656 656 # then, save the iteration
657 657 if unfi.obsstore:
658 658 # this message are here for 80 char limit reason
659 659 mso = _("push includes obsolete changeset: %s!")
660 660 mspd = _("push includes phase-divergent changeset: %s!")
661 661 mscd = _("push includes content-divergent changeset: %s!")
662 662 mst = {"orphan": _("push includes orphan changeset: %s!"),
663 663 "phase-divergent": mspd,
664 664 "content-divergent": mscd}
665 665 # If we are to push if there is at least one
666 666 # obsolete or unstable changeset in missing, at
667 667 # least one of the missinghead will be obsolete or
668 668 # unstable. So checking heads only is ok
669 669 for node in outgoing.missingheads:
670 670 ctx = unfi[node]
671 671 if ctx.obsolete():
672 672 raise error.Abort(mso % ctx)
673 673 elif ctx.isunstable():
674 674 # TODO print more than one instability in the abort
675 675 # message
676 676 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
677 677
678 678 discovery.checkheads(pushop)
679 679 return True
680 680
681 681 # List of names of steps to perform for an outgoing bundle2, order matters.
682 682 b2partsgenorder = []
683 683
684 684 # Mapping between step name and function
685 685 #
686 686 # This exists to help extensions wrap steps if necessary
687 687 b2partsgenmapping = {}
688 688
689 689 def b2partsgenerator(stepname, idx=None):
690 690 """decorator for function generating bundle2 part
691 691
692 692 The function is added to the step -> function mapping and appended to the
693 693 list of steps. Beware that decorated functions will be added in order
694 694 (this may matter).
695 695
696 696 You can only use this decorator for new steps, if you want to wrap a step
697 697 from an extension, attack the b2partsgenmapping dictionary directly."""
698 698 def dec(func):
699 699 assert stepname not in b2partsgenmapping
700 700 b2partsgenmapping[stepname] = func
701 701 if idx is None:
702 702 b2partsgenorder.append(stepname)
703 703 else:
704 704 b2partsgenorder.insert(idx, stepname)
705 705 return func
706 706 return dec
707 707
708 708 def _pushb2ctxcheckheads(pushop, bundler):
709 709 """Generate race condition checking parts
710 710
711 711 Exists as an independent function to aid extensions
712 712 """
713 713 # * 'force' do not check for push race,
714 714 # * if we don't push anything, there are nothing to check.
715 715 if not pushop.force and pushop.outgoing.missingheads:
716 716 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
717 717 emptyremote = pushop.pushbranchmap is None
718 718 if not allowunrelated or emptyremote:
719 719 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
720 720 else:
721 721 affected = set()
722 722 for branch, heads in pushop.pushbranchmap.iteritems():
723 723 remoteheads, newheads, unsyncedheads, discardedheads = heads
724 724 if remoteheads is not None:
725 725 remote = set(remoteheads)
726 726 affected |= set(discardedheads) & remote
727 727 affected |= remote - set(newheads)
728 728 if affected:
729 729 data = iter(sorted(affected))
730 730 bundler.newpart('check:updated-heads', data=data)
731 731
732 732 @b2partsgenerator('changeset')
733 733 def _pushb2ctx(pushop, bundler):
734 734 """handle changegroup push through bundle2
735 735
736 736 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
737 737 """
738 738 if 'changesets' in pushop.stepsdone:
739 739 return
740 740 pushop.stepsdone.add('changesets')
741 741 # Send known heads to the server for race detection.
742 742 if not _pushcheckoutgoing(pushop):
743 743 return
744 744 pushop.repo.prepushoutgoinghooks(pushop)
745 745
746 746 _pushb2ctxcheckheads(pushop, bundler)
747 747
748 748 b2caps = bundle2.bundle2caps(pushop.remote)
749 749 version = '01'
750 750 cgversions = b2caps.get('changegroup')
751 751 if cgversions: # 3.1 and 3.2 ship with an empty value
752 752 cgversions = [v for v in cgversions
753 753 if v in changegroup.supportedoutgoingversions(
754 754 pushop.repo)]
755 755 if not cgversions:
756 756 raise ValueError(_('no common changegroup version'))
757 757 version = max(cgversions)
758 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
759 pushop.outgoing,
760 version=version)
761 cgpart = bundler.newpart('changegroup', data=cg)
758 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
759 'push')
760 cgpart = bundler.newpart('changegroup', data=cgstream)
762 761 if cgversions:
763 762 cgpart.addparam('version', version)
764 763 if 'treemanifest' in pushop.repo.requirements:
765 764 cgpart.addparam('treemanifest', '1')
766 765 def handlereply(op):
767 766 """extract addchangegroup returns from server reply"""
768 767 cgreplies = op.records.getreplies(cgpart.id)
769 768 assert len(cgreplies['changegroup']) == 1
770 769 pushop.cgresult = cgreplies['changegroup'][0]['return']
771 770 return handlereply
772 771
773 772 @b2partsgenerator('phase')
774 773 def _pushb2phases(pushop, bundler):
775 774 """handle phase push through bundle2"""
776 775 if 'phases' in pushop.stepsdone:
777 776 return
778 777 b2caps = bundle2.bundle2caps(pushop.remote)
779 778 if not 'pushkey' in b2caps:
780 779 return
781 780 pushop.stepsdone.add('phases')
782 781 part2node = []
783 782
784 783 def handlefailure(pushop, exc):
785 784 targetid = int(exc.partid)
786 785 for partid, node in part2node:
787 786 if partid == targetid:
788 787 raise error.Abort(_('updating %s to public failed') % node)
789 788
790 789 enc = pushkey.encode
791 790 for newremotehead in pushop.outdatedphases:
792 791 part = bundler.newpart('pushkey')
793 792 part.addparam('namespace', enc('phases'))
794 793 part.addparam('key', enc(newremotehead.hex()))
795 794 part.addparam('old', enc(str(phases.draft)))
796 795 part.addparam('new', enc(str(phases.public)))
797 796 part2node.append((part.id, newremotehead))
798 797 pushop.pkfailcb[part.id] = handlefailure
799 798
800 799 def handlereply(op):
801 800 for partid, node in part2node:
802 801 partrep = op.records.getreplies(partid)
803 802 results = partrep['pushkey']
804 803 assert len(results) <= 1
805 804 msg = None
806 805 if not results:
807 806 msg = _('server ignored update of %s to public!\n') % node
808 807 elif not int(results[0]['return']):
809 808 msg = _('updating %s to public failed!\n') % node
810 809 if msg is not None:
811 810 pushop.ui.warn(msg)
812 811 return handlereply
813 812
814 813 @b2partsgenerator('obsmarkers')
815 814 def _pushb2obsmarkers(pushop, bundler):
816 815 if 'obsmarkers' in pushop.stepsdone:
817 816 return
818 817 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
819 818 if obsolete.commonversion(remoteversions) is None:
820 819 return
821 820 pushop.stepsdone.add('obsmarkers')
822 821 if pushop.outobsmarkers:
823 822 markers = sorted(pushop.outobsmarkers)
824 823 bundle2.buildobsmarkerspart(bundler, markers)
825 824
826 825 @b2partsgenerator('bookmarks')
827 826 def _pushb2bookmarks(pushop, bundler):
828 827 """handle bookmark push through bundle2"""
829 828 if 'bookmarks' in pushop.stepsdone:
830 829 return
831 830 b2caps = bundle2.bundle2caps(pushop.remote)
832 831 if 'pushkey' not in b2caps:
833 832 return
834 833 pushop.stepsdone.add('bookmarks')
835 834 part2book = []
836 835 enc = pushkey.encode
837 836
838 837 def handlefailure(pushop, exc):
839 838 targetid = int(exc.partid)
840 839 for partid, book, action in part2book:
841 840 if partid == targetid:
842 841 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
843 842 # we should not be called for part we did not generated
844 843 assert False
845 844
846 845 for book, old, new in pushop.outbookmarks:
847 846 part = bundler.newpart('pushkey')
848 847 part.addparam('namespace', enc('bookmarks'))
849 848 part.addparam('key', enc(book))
850 849 part.addparam('old', enc(old))
851 850 part.addparam('new', enc(new))
852 851 action = 'update'
853 852 if not old:
854 853 action = 'export'
855 854 elif not new:
856 855 action = 'delete'
857 856 part2book.append((part.id, book, action))
858 857 pushop.pkfailcb[part.id] = handlefailure
859 858
860 859 def handlereply(op):
861 860 ui = pushop.ui
862 861 for partid, book, action in part2book:
863 862 partrep = op.records.getreplies(partid)
864 863 results = partrep['pushkey']
865 864 assert len(results) <= 1
866 865 if not results:
867 866 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
868 867 else:
869 868 ret = int(results[0]['return'])
870 869 if ret:
871 870 ui.status(bookmsgmap[action][0] % book)
872 871 else:
873 872 ui.warn(bookmsgmap[action][1] % book)
874 873 if pushop.bkresult is not None:
875 874 pushop.bkresult = 1
876 875 return handlereply
877 876
878 877 @b2partsgenerator('pushvars', idx=0)
879 878 def _getbundlesendvars(pushop, bundler):
880 879 '''send shellvars via bundle2'''
881 880 pushvars = pushop.pushvars
882 881 if pushvars:
883 882 shellvars = {}
884 883 for raw in pushvars:
885 884 if '=' not in raw:
886 885 msg = ("unable to parse variable '%s', should follow "
887 886 "'KEY=VALUE' or 'KEY=' format")
888 887 raise error.Abort(msg % raw)
889 888 k, v = raw.split('=', 1)
890 889 shellvars[k] = v
891 890
892 891 part = bundler.newpart('pushvars')
893 892
894 893 for key, value in shellvars.iteritems():
895 894 part.addparam(key, value, mandatory=False)
896 895
897 896 def _pushbundle2(pushop):
898 897 """push data to the remote using bundle2
899 898
900 899 The only currently supported type of data is changegroup but this will
901 900 evolve in the future."""
902 901 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
903 902 pushback = (pushop.trmanager
904 903 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
905 904
906 905 # create reply capability
907 906 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
908 907 allowpushback=pushback))
909 908 bundler.newpart('replycaps', data=capsblob)
910 909 replyhandlers = []
911 910 for partgenname in b2partsgenorder:
912 911 partgen = b2partsgenmapping[partgenname]
913 912 ret = partgen(pushop, bundler)
914 913 if callable(ret):
915 914 replyhandlers.append(ret)
916 915 # do not push if nothing to push
917 916 if bundler.nbparts <= 1:
918 917 return
919 918 stream = util.chunkbuffer(bundler.getchunks())
920 919 try:
921 920 try:
922 921 reply = pushop.remote.unbundle(
923 922 stream, ['force'], pushop.remote.url())
924 923 except error.BundleValueError as exc:
925 924 raise error.Abort(_('missing support for %s') % exc)
926 925 try:
927 926 trgetter = None
928 927 if pushback:
929 928 trgetter = pushop.trmanager.transaction
930 929 op = bundle2.processbundle(pushop.repo, reply, trgetter)
931 930 except error.BundleValueError as exc:
932 931 raise error.Abort(_('missing support for %s') % exc)
933 932 except bundle2.AbortFromPart as exc:
934 933 pushop.ui.status(_('remote: %s\n') % exc)
935 934 if exc.hint is not None:
936 935 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
937 936 raise error.Abort(_('push failed on remote'))
938 937 except error.PushkeyFailed as exc:
939 938 partid = int(exc.partid)
940 939 if partid not in pushop.pkfailcb:
941 940 raise
942 941 pushop.pkfailcb[partid](pushop, exc)
943 942 for rephand in replyhandlers:
944 943 rephand(op)
945 944
946 945 def _pushchangeset(pushop):
947 946 """Make the actual push of changeset bundle to remote repo"""
948 947 if 'changesets' in pushop.stepsdone:
949 948 return
950 949 pushop.stepsdone.add('changesets')
951 950 if not _pushcheckoutgoing(pushop):
952 951 return
953 952
954 953 # Should have verified this in push().
955 954 assert pushop.remote.capable('unbundle')
956 955
957 956 pushop.repo.prepushoutgoinghooks(pushop)
958 957 outgoing = pushop.outgoing
959 958 # TODO: get bundlecaps from remote
960 959 bundlecaps = None
961 960 # create a changegroup from local
962 961 if pushop.revs is None and not (outgoing.excluded
963 962 or pushop.repo.changelog.filteredrevs):
964 963 # push everything,
965 964 # use the fast path, no race possible on push
966 965 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
967 966 fastpath=True, bundlecaps=bundlecaps)
968 967 else:
969 968 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
970 969 bundlecaps=bundlecaps)
971 970
972 971 # apply changegroup to remote
973 972 # local repo finds heads on server, finds out what
974 973 # revs it must push. once revs transferred, if server
975 974 # finds it has different heads (someone else won
976 975 # commit/push race), server aborts.
977 976 if pushop.force:
978 977 remoteheads = ['force']
979 978 else:
980 979 remoteheads = pushop.remoteheads
981 980 # ssh: return remote's addchangegroup()
982 981 # http: return remote's addchangegroup() or 0 for error
983 982 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
984 983 pushop.repo.url())
985 984
986 985 def _pushsyncphase(pushop):
987 986 """synchronise phase information locally and remotely"""
988 987 cheads = pushop.commonheads
989 988 # even when we don't push, exchanging phase data is useful
990 989 remotephases = pushop.remote.listkeys('phases')
991 990 if (pushop.ui.configbool('ui', '_usedassubrepo')
992 991 and remotephases # server supports phases
993 992 and pushop.cgresult is None # nothing was pushed
994 993 and remotephases.get('publishing', False)):
995 994 # When:
996 995 # - this is a subrepo push
997 996 # - and remote support phase
998 997 # - and no changeset was pushed
999 998 # - and remote is publishing
1000 999 # We may be in issue 3871 case!
1001 1000 # We drop the possible phase synchronisation done by
1002 1001 # courtesy to publish changesets possibly locally draft
1003 1002 # on the remote.
1004 1003 remotephases = {'publishing': 'True'}
1005 1004 if not remotephases: # old server or public only reply from non-publishing
1006 1005 _localphasemove(pushop, cheads)
1007 1006 # don't push any phase data as there is nothing to push
1008 1007 else:
1009 1008 ana = phases.analyzeremotephases(pushop.repo, cheads,
1010 1009 remotephases)
1011 1010 pheads, droots = ana
1012 1011 ### Apply remote phase on local
1013 1012 if remotephases.get('publishing', False):
1014 1013 _localphasemove(pushop, cheads)
1015 1014 else: # publish = False
1016 1015 _localphasemove(pushop, pheads)
1017 1016 _localphasemove(pushop, cheads, phases.draft)
1018 1017 ### Apply local phase on remote
1019 1018
1020 1019 if pushop.cgresult:
1021 1020 if 'phases' in pushop.stepsdone:
1022 1021 # phases already pushed though bundle2
1023 1022 return
1024 1023 outdated = pushop.outdatedphases
1025 1024 else:
1026 1025 outdated = pushop.fallbackoutdatedphases
1027 1026
1028 1027 pushop.stepsdone.add('phases')
1029 1028
1030 1029 # filter heads already turned public by the push
1031 1030 outdated = [c for c in outdated if c.node() not in pheads]
1032 1031 # fallback to independent pushkey command
1033 1032 for newremotehead in outdated:
1034 1033 r = pushop.remote.pushkey('phases',
1035 1034 newremotehead.hex(),
1036 1035 str(phases.draft),
1037 1036 str(phases.public))
1038 1037 if not r:
1039 1038 pushop.ui.warn(_('updating %s to public failed!\n')
1040 1039 % newremotehead)
1041 1040
1042 1041 def _localphasemove(pushop, nodes, phase=phases.public):
1043 1042 """move <nodes> to <phase> in the local source repo"""
1044 1043 if pushop.trmanager:
1045 1044 phases.advanceboundary(pushop.repo,
1046 1045 pushop.trmanager.transaction(),
1047 1046 phase,
1048 1047 nodes)
1049 1048 else:
1050 1049 # repo is not locked, do not change any phases!
1051 1050 # Informs the user that phases should have been moved when
1052 1051 # applicable.
1053 1052 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1054 1053 phasestr = phases.phasenames[phase]
1055 1054 if actualmoves:
1056 1055 pushop.ui.status(_('cannot lock source repo, skipping '
1057 1056 'local %s phase update\n') % phasestr)
1058 1057
1059 1058 def _pushobsolete(pushop):
1060 1059 """utility function to push obsolete markers to a remote"""
1061 1060 if 'obsmarkers' in pushop.stepsdone:
1062 1061 return
1063 1062 repo = pushop.repo
1064 1063 remote = pushop.remote
1065 1064 pushop.stepsdone.add('obsmarkers')
1066 1065 if pushop.outobsmarkers:
1067 1066 pushop.ui.debug('try to push obsolete markers to remote\n')
1068 1067 rslts = []
1069 1068 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1070 1069 for key in sorted(remotedata, reverse=True):
1071 1070 # reverse sort to ensure we end with dump0
1072 1071 data = remotedata[key]
1073 1072 rslts.append(remote.pushkey('obsolete', key, '', data))
1074 1073 if [r for r in rslts if not r]:
1075 1074 msg = _('failed to push some obsolete markers!\n')
1076 1075 repo.ui.warn(msg)
1077 1076
1078 1077 def _pushbookmark(pushop):
1079 1078 """Update bookmark position on remote"""
1080 1079 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1081 1080 return
1082 1081 pushop.stepsdone.add('bookmarks')
1083 1082 ui = pushop.ui
1084 1083 remote = pushop.remote
1085 1084
1086 1085 for b, old, new in pushop.outbookmarks:
1087 1086 action = 'update'
1088 1087 if not old:
1089 1088 action = 'export'
1090 1089 elif not new:
1091 1090 action = 'delete'
1092 1091 if remote.pushkey('bookmarks', b, old, new):
1093 1092 ui.status(bookmsgmap[action][0] % b)
1094 1093 else:
1095 1094 ui.warn(bookmsgmap[action][1] % b)
1096 1095 # discovery can have set the value form invalid entry
1097 1096 if pushop.bkresult is not None:
1098 1097 pushop.bkresult = 1
1099 1098
1100 1099 class pulloperation(object):
1101 1100 """A object that represent a single pull operation
1102 1101
1103 1102 It purpose is to carry pull related state and very common operation.
1104 1103
1105 1104 A new should be created at the beginning of each pull and discarded
1106 1105 afterward.
1107 1106 """
1108 1107
1109 1108 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1110 1109 remotebookmarks=None, streamclonerequested=None):
1111 1110 # repo we pull into
1112 1111 self.repo = repo
1113 1112 # repo we pull from
1114 1113 self.remote = remote
1115 1114 # revision we try to pull (None is "all")
1116 1115 self.heads = heads
1117 1116 # bookmark pulled explicitly
1118 1117 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1119 1118 for bookmark in bookmarks]
1120 1119 # do we force pull?
1121 1120 self.force = force
1122 1121 # whether a streaming clone was requested
1123 1122 self.streamclonerequested = streamclonerequested
1124 1123 # transaction manager
1125 1124 self.trmanager = None
1126 1125 # set of common changeset between local and remote before pull
1127 1126 self.common = None
1128 1127 # set of pulled head
1129 1128 self.rheads = None
1130 1129 # list of missing changeset to fetch remotely
1131 1130 self.fetch = None
1132 1131 # remote bookmarks data
1133 1132 self.remotebookmarks = remotebookmarks
1134 1133 # result of changegroup pulling (used as return code by pull)
1135 1134 self.cgresult = None
1136 1135 # list of step already done
1137 1136 self.stepsdone = set()
1138 1137 # Whether we attempted a clone from pre-generated bundles.
1139 1138 self.clonebundleattempted = False
1140 1139
1141 1140 @util.propertycache
1142 1141 def pulledsubset(self):
1143 1142 """heads of the set of changeset target by the pull"""
1144 1143 # compute target subset
1145 1144 if self.heads is None:
1146 1145 # We pulled every thing possible
1147 1146 # sync on everything common
1148 1147 c = set(self.common)
1149 1148 ret = list(self.common)
1150 1149 for n in self.rheads:
1151 1150 if n not in c:
1152 1151 ret.append(n)
1153 1152 return ret
1154 1153 else:
1155 1154 # We pulled a specific subset
1156 1155 # sync on this subset
1157 1156 return self.heads
1158 1157
1159 1158 @util.propertycache
1160 1159 def canusebundle2(self):
1161 1160 return not _forcebundle1(self)
1162 1161
1163 1162 @util.propertycache
1164 1163 def remotebundle2caps(self):
1165 1164 return bundle2.bundle2caps(self.remote)
1166 1165
1167 1166 def gettransaction(self):
1168 1167 # deprecated; talk to trmanager directly
1169 1168 return self.trmanager.transaction()
1170 1169
1171 1170 class transactionmanager(util.transactional):
1172 1171 """An object to manage the life cycle of a transaction
1173 1172
1174 1173 It creates the transaction on demand and calls the appropriate hooks when
1175 1174 closing the transaction."""
1176 1175 def __init__(self, repo, source, url):
1177 1176 self.repo = repo
1178 1177 self.source = source
1179 1178 self.url = url
1180 1179 self._tr = None
1181 1180
1182 1181 def transaction(self):
1183 1182 """Return an open transaction object, constructing if necessary"""
1184 1183 if not self._tr:
1185 1184 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1186 1185 self._tr = self.repo.transaction(trname)
1187 1186 self._tr.hookargs['source'] = self.source
1188 1187 self._tr.hookargs['url'] = self.url
1189 1188 return self._tr
1190 1189
1191 1190 def close(self):
1192 1191 """close transaction if created"""
1193 1192 if self._tr is not None:
1194 1193 self._tr.close()
1195 1194
1196 1195 def release(self):
1197 1196 """release transaction if created"""
1198 1197 if self._tr is not None:
1199 1198 self._tr.release()
1200 1199
1201 1200 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1202 1201 streamclonerequested=None):
1203 1202 """Fetch repository data from a remote.
1204 1203
1205 1204 This is the main function used to retrieve data from a remote repository.
1206 1205
1207 1206 ``repo`` is the local repository to clone into.
1208 1207 ``remote`` is a peer instance.
1209 1208 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1210 1209 default) means to pull everything from the remote.
1211 1210 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1212 1211 default, all remote bookmarks are pulled.
1213 1212 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1214 1213 initialization.
1215 1214 ``streamclonerequested`` is a boolean indicating whether a "streaming
1216 1215 clone" is requested. A "streaming clone" is essentially a raw file copy
1217 1216 of revlogs from the server. This only works when the local repository is
1218 1217 empty. The default value of ``None`` means to respect the server
1219 1218 configuration for preferring stream clones.
1220 1219
1221 1220 Returns the ``pulloperation`` created for this pull.
1222 1221 """
1223 1222 if opargs is None:
1224 1223 opargs = {}
1225 1224 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1226 1225 streamclonerequested=streamclonerequested, **opargs)
1227 1226
1228 1227 peerlocal = pullop.remote.local()
1229 1228 if peerlocal:
1230 1229 missing = set(peerlocal.requirements) - pullop.repo.supported
1231 1230 if missing:
1232 1231 msg = _("required features are not"
1233 1232 " supported in the destination:"
1234 1233 " %s") % (', '.join(sorted(missing)))
1235 1234 raise error.Abort(msg)
1236 1235
1237 1236 wlock = lock = None
1238 1237 try:
1239 1238 wlock = pullop.repo.wlock()
1240 1239 lock = pullop.repo.lock()
1241 1240 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1242 1241 streamclone.maybeperformlegacystreamclone(pullop)
1243 1242 # This should ideally be in _pullbundle2(). However, it needs to run
1244 1243 # before discovery to avoid extra work.
1245 1244 _maybeapplyclonebundle(pullop)
1246 1245 _pulldiscovery(pullop)
1247 1246 if pullop.canusebundle2:
1248 1247 _pullbundle2(pullop)
1249 1248 _pullchangeset(pullop)
1250 1249 _pullphase(pullop)
1251 1250 _pullbookmarks(pullop)
1252 1251 _pullobsolete(pullop)
1253 1252 pullop.trmanager.close()
1254 1253 finally:
1255 1254 lockmod.release(pullop.trmanager, lock, wlock)
1256 1255
1257 1256 return pullop
1258 1257
1259 1258 # list of steps to perform discovery before pull
1260 1259 pulldiscoveryorder = []
1261 1260
1262 1261 # Mapping between step name and function
1263 1262 #
1264 1263 # This exists to help extensions wrap steps if necessary
1265 1264 pulldiscoverymapping = {}
1266 1265
1267 1266 def pulldiscovery(stepname):
1268 1267 """decorator for function performing discovery before pull
1269 1268
1270 1269 The function is added to the step -> function mapping and appended to the
1271 1270 list of steps. Beware that decorated function will be added in order (this
1272 1271 may matter).
1273 1272
1274 1273 You can only use this decorator for a new step, if you want to wrap a step
1275 1274 from an extension, change the pulldiscovery dictionary directly."""
1276 1275 def dec(func):
1277 1276 assert stepname not in pulldiscoverymapping
1278 1277 pulldiscoverymapping[stepname] = func
1279 1278 pulldiscoveryorder.append(stepname)
1280 1279 return func
1281 1280 return dec
1282 1281
1283 1282 def _pulldiscovery(pullop):
1284 1283 """Run all discovery steps"""
1285 1284 for stepname in pulldiscoveryorder:
1286 1285 step = pulldiscoverymapping[stepname]
1287 1286 step(pullop)
1288 1287
1289 1288 @pulldiscovery('b1:bookmarks')
1290 1289 def _pullbookmarkbundle1(pullop):
1291 1290 """fetch bookmark data in bundle1 case
1292 1291
1293 1292 If not using bundle2, we have to fetch bookmarks before changeset
1294 1293 discovery to reduce the chance and impact of race conditions."""
1295 1294 if pullop.remotebookmarks is not None:
1296 1295 return
1297 1296 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1298 1297 # all known bundle2 servers now support listkeys, but lets be nice with
1299 1298 # new implementation.
1300 1299 return
1301 1300 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1302 1301
1303 1302
1304 1303 @pulldiscovery('changegroup')
1305 1304 def _pulldiscoverychangegroup(pullop):
1306 1305 """discovery phase for the pull
1307 1306
1308 1307 Current handle changeset discovery only, will change handle all discovery
1309 1308 at some point."""
1310 1309 tmp = discovery.findcommonincoming(pullop.repo,
1311 1310 pullop.remote,
1312 1311 heads=pullop.heads,
1313 1312 force=pullop.force)
1314 1313 common, fetch, rheads = tmp
1315 1314 nm = pullop.repo.unfiltered().changelog.nodemap
1316 1315 if fetch and rheads:
1317 1316 # If a remote heads in filtered locally, lets drop it from the unknown
1318 1317 # remote heads and put in back in common.
1319 1318 #
1320 1319 # This is a hackish solution to catch most of "common but locally
1321 1320 # hidden situation". We do not performs discovery on unfiltered
1322 1321 # repository because it end up doing a pathological amount of round
1323 1322 # trip for w huge amount of changeset we do not care about.
1324 1323 #
1325 1324 # If a set of such "common but filtered" changeset exist on the server
1326 1325 # but are not including a remote heads, we'll not be able to detect it,
1327 1326 scommon = set(common)
1328 1327 filteredrheads = []
1329 1328 for n in rheads:
1330 1329 if n in nm:
1331 1330 if n not in scommon:
1332 1331 common.append(n)
1333 1332 else:
1334 1333 filteredrheads.append(n)
1335 1334 if not filteredrheads:
1336 1335 fetch = []
1337 1336 rheads = filteredrheads
1338 1337 pullop.common = common
1339 1338 pullop.fetch = fetch
1340 1339 pullop.rheads = rheads
1341 1340
1342 1341 def _pullbundle2(pullop):
1343 1342 """pull data using bundle2
1344 1343
1345 1344 For now, the only supported data are changegroup."""
1346 1345 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1347 1346
1348 1347 # At the moment we don't do stream clones over bundle2. If that is
1349 1348 # implemented then here's where the check for that will go.
1350 1349 streaming = False
1351 1350
1352 1351 # pulling changegroup
1353 1352 pullop.stepsdone.add('changegroup')
1354 1353
1355 1354 kwargs['common'] = pullop.common
1356 1355 kwargs['heads'] = pullop.heads or pullop.rheads
1357 1356 kwargs['cg'] = pullop.fetch
1358 1357 if 'listkeys' in pullop.remotebundle2caps:
1359 1358 kwargs['listkeys'] = ['phases']
1360 1359 if pullop.remotebookmarks is None:
1361 1360 # make sure to always includes bookmark data when migrating
1362 1361 # `hg incoming --bundle` to using this function.
1363 1362 kwargs['listkeys'].append('bookmarks')
1364 1363
1365 1364 # If this is a full pull / clone and the server supports the clone bundles
1366 1365 # feature, tell the server whether we attempted a clone bundle. The
1367 1366 # presence of this flag indicates the client supports clone bundles. This
1368 1367 # will enable the server to treat clients that support clone bundles
1369 1368 # differently from those that don't.
1370 1369 if (pullop.remote.capable('clonebundles')
1371 1370 and pullop.heads is None and list(pullop.common) == [nullid]):
1372 1371 kwargs['cbattempted'] = pullop.clonebundleattempted
1373 1372
1374 1373 if streaming:
1375 1374 pullop.repo.ui.status(_('streaming all changes\n'))
1376 1375 elif not pullop.fetch:
1377 1376 pullop.repo.ui.status(_("no changes found\n"))
1378 1377 pullop.cgresult = 0
1379 1378 else:
1380 1379 if pullop.heads is None and list(pullop.common) == [nullid]:
1381 1380 pullop.repo.ui.status(_("requesting all changes\n"))
1382 1381 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1383 1382 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1384 1383 if obsolete.commonversion(remoteversions) is not None:
1385 1384 kwargs['obsmarkers'] = True
1386 1385 pullop.stepsdone.add('obsmarkers')
1387 1386 _pullbundle2extraprepare(pullop, kwargs)
1388 1387 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1389 1388 try:
1390 1389 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1391 1390 except bundle2.AbortFromPart as exc:
1392 1391 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1393 1392 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1394 1393 except error.BundleValueError as exc:
1395 1394 raise error.Abort(_('missing support for %s') % exc)
1396 1395
1397 1396 if pullop.fetch:
1398 1397 pullop.cgresult = bundle2.combinechangegroupresults(op)
1399 1398
1400 1399 # If the bundle had a phase-heads part, then phase exchange is already done
1401 1400 if op.records['phase-heads']:
1402 1401 pullop.stepsdone.add('phases')
1403 1402
1404 1403 # processing phases change
1405 1404 for namespace, value in op.records['listkeys']:
1406 1405 if namespace == 'phases':
1407 1406 _pullapplyphases(pullop, value)
1408 1407
1409 1408 # processing bookmark update
1410 1409 for namespace, value in op.records['listkeys']:
1411 1410 if namespace == 'bookmarks':
1412 1411 pullop.remotebookmarks = value
1413 1412
1414 1413 # bookmark data were either already there or pulled in the bundle
1415 1414 if pullop.remotebookmarks is not None:
1416 1415 _pullbookmarks(pullop)
1417 1416
1418 1417 def _pullbundle2extraprepare(pullop, kwargs):
1419 1418 """hook function so that extensions can extend the getbundle call"""
1420 1419 pass
1421 1420
1422 1421 def _pullchangeset(pullop):
1423 1422 """pull changeset from unbundle into the local repo"""
1424 1423 # We delay the open of the transaction as late as possible so we
1425 1424 # don't open transaction for nothing or you break future useful
1426 1425 # rollback call
1427 1426 if 'changegroup' in pullop.stepsdone:
1428 1427 return
1429 1428 pullop.stepsdone.add('changegroup')
1430 1429 if not pullop.fetch:
1431 1430 pullop.repo.ui.status(_("no changes found\n"))
1432 1431 pullop.cgresult = 0
1433 1432 return
1434 1433 tr = pullop.gettransaction()
1435 1434 if pullop.heads is None and list(pullop.common) == [nullid]:
1436 1435 pullop.repo.ui.status(_("requesting all changes\n"))
1437 1436 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1438 1437 # issue1320, avoid a race if remote changed after discovery
1439 1438 pullop.heads = pullop.rheads
1440 1439
1441 1440 if pullop.remote.capable('getbundle'):
1442 1441 # TODO: get bundlecaps from remote
1443 1442 cg = pullop.remote.getbundle('pull', common=pullop.common,
1444 1443 heads=pullop.heads or pullop.rheads)
1445 1444 elif pullop.heads is None:
1446 1445 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1447 1446 elif not pullop.remote.capable('changegroupsubset'):
1448 1447 raise error.Abort(_("partial pull cannot be done because "
1449 1448 "other repository doesn't support "
1450 1449 "changegroupsubset."))
1451 1450 else:
1452 1451 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1453 1452 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1454 1453 pullop.remote.url())
1455 1454 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1456 1455
1457 1456 def _pullphase(pullop):
1458 1457 # Get remote phases data from remote
1459 1458 if 'phases' in pullop.stepsdone:
1460 1459 return
1461 1460 remotephases = pullop.remote.listkeys('phases')
1462 1461 _pullapplyphases(pullop, remotephases)
1463 1462
1464 1463 def _pullapplyphases(pullop, remotephases):
1465 1464 """apply phase movement from observed remote state"""
1466 1465 if 'phases' in pullop.stepsdone:
1467 1466 return
1468 1467 pullop.stepsdone.add('phases')
1469 1468 publishing = bool(remotephases.get('publishing', False))
1470 1469 if remotephases and not publishing:
1471 1470 # remote is new and non-publishing
1472 1471 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1473 1472 pullop.pulledsubset,
1474 1473 remotephases)
1475 1474 dheads = pullop.pulledsubset
1476 1475 else:
1477 1476 # Remote is old or publishing all common changesets
1478 1477 # should be seen as public
1479 1478 pheads = pullop.pulledsubset
1480 1479 dheads = []
1481 1480 unfi = pullop.repo.unfiltered()
1482 1481 phase = unfi._phasecache.phase
1483 1482 rev = unfi.changelog.nodemap.get
1484 1483 public = phases.public
1485 1484 draft = phases.draft
1486 1485
1487 1486 # exclude changesets already public locally and update the others
1488 1487 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1489 1488 if pheads:
1490 1489 tr = pullop.gettransaction()
1491 1490 phases.advanceboundary(pullop.repo, tr, public, pheads)
1492 1491
1493 1492 # exclude changesets already draft locally and update the others
1494 1493 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1495 1494 if dheads:
1496 1495 tr = pullop.gettransaction()
1497 1496 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1498 1497
1499 1498 def _pullbookmarks(pullop):
1500 1499 """process the remote bookmark information to update the local one"""
1501 1500 if 'bookmarks' in pullop.stepsdone:
1502 1501 return
1503 1502 pullop.stepsdone.add('bookmarks')
1504 1503 repo = pullop.repo
1505 1504 remotebookmarks = pullop.remotebookmarks
1506 1505 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1507 1506 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1508 1507 pullop.remote.url(),
1509 1508 pullop.gettransaction,
1510 1509 explicit=pullop.explicitbookmarks)
1511 1510
1512 1511 def _pullobsolete(pullop):
1513 1512 """utility function to pull obsolete markers from a remote
1514 1513
1515 1514 The `gettransaction` is function that return the pull transaction, creating
1516 1515 one if necessary. We return the transaction to inform the calling code that
1517 1516 a new transaction have been created (when applicable).
1518 1517
1519 1518 Exists mostly to allow overriding for experimentation purpose"""
1520 1519 if 'obsmarkers' in pullop.stepsdone:
1521 1520 return
1522 1521 pullop.stepsdone.add('obsmarkers')
1523 1522 tr = None
1524 1523 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1525 1524 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1526 1525 remoteobs = pullop.remote.listkeys('obsolete')
1527 1526 if 'dump0' in remoteobs:
1528 1527 tr = pullop.gettransaction()
1529 1528 markers = []
1530 1529 for key in sorted(remoteobs, reverse=True):
1531 1530 if key.startswith('dump'):
1532 1531 data = util.b85decode(remoteobs[key])
1533 1532 version, newmarks = obsolete._readmarkers(data)
1534 1533 markers += newmarks
1535 1534 if markers:
1536 1535 pullop.repo.obsstore.add(tr, markers)
1537 1536 pullop.repo.invalidatevolatilesets()
1538 1537 return tr
1539 1538
1540 1539 def caps20to10(repo):
1541 1540 """return a set with appropriate options to use bundle20 during getbundle"""
1542 1541 caps = {'HG20'}
1543 1542 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1544 1543 caps.add('bundle2=' + urlreq.quote(capsblob))
1545 1544 return caps
1546 1545
1547 1546 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1548 1547 getbundle2partsorder = []
1549 1548
1550 1549 # Mapping between step name and function
1551 1550 #
1552 1551 # This exists to help extensions wrap steps if necessary
1553 1552 getbundle2partsmapping = {}
1554 1553
1555 1554 def getbundle2partsgenerator(stepname, idx=None):
1556 1555 """decorator for function generating bundle2 part for getbundle
1557 1556
1558 1557 The function is added to the step -> function mapping and appended to the
1559 1558 list of steps. Beware that decorated functions will be added in order
1560 1559 (this may matter).
1561 1560
1562 1561 You can only use this decorator for new steps, if you want to wrap a step
1563 1562 from an extension, attack the getbundle2partsmapping dictionary directly."""
1564 1563 def dec(func):
1565 1564 assert stepname not in getbundle2partsmapping
1566 1565 getbundle2partsmapping[stepname] = func
1567 1566 if idx is None:
1568 1567 getbundle2partsorder.append(stepname)
1569 1568 else:
1570 1569 getbundle2partsorder.insert(idx, stepname)
1571 1570 return func
1572 1571 return dec
1573 1572
1574 1573 def bundle2requested(bundlecaps):
1575 1574 if bundlecaps is not None:
1576 1575 return any(cap.startswith('HG2') for cap in bundlecaps)
1577 1576 return False
1578 1577
1579 1578 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1580 1579 **kwargs):
1581 1580 """Return chunks constituting a bundle's raw data.
1582 1581
1583 1582 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1584 1583 passed.
1585 1584
1586 1585 Returns an iterator over raw chunks (of varying sizes).
1587 1586 """
1588 1587 kwargs = pycompat.byteskwargs(kwargs)
1589 1588 usebundle2 = bundle2requested(bundlecaps)
1590 1589 # bundle10 case
1591 1590 if not usebundle2:
1592 1591 if bundlecaps and not kwargs.get('cg', True):
1593 1592 raise ValueError(_('request for bundle10 must include changegroup'))
1594 1593
1595 1594 if kwargs:
1596 1595 raise ValueError(_('unsupported getbundle arguments: %s')
1597 1596 % ', '.join(sorted(kwargs.keys())))
1598 1597 outgoing = _computeoutgoing(repo, heads, common)
1599 1598 bundler = changegroup.getbundler('01', repo, bundlecaps)
1600 1599 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1601 1600
1602 1601 # bundle20 case
1603 1602 b2caps = {}
1604 1603 for bcaps in bundlecaps:
1605 1604 if bcaps.startswith('bundle2='):
1606 1605 blob = urlreq.unquote(bcaps[len('bundle2='):])
1607 1606 b2caps.update(bundle2.decodecaps(blob))
1608 1607 bundler = bundle2.bundle20(repo.ui, b2caps)
1609 1608
1610 1609 kwargs['heads'] = heads
1611 1610 kwargs['common'] = common
1612 1611
1613 1612 for name in getbundle2partsorder:
1614 1613 func = getbundle2partsmapping[name]
1615 1614 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1616 1615 **pycompat.strkwargs(kwargs))
1617 1616
1618 1617 return bundler.getchunks()
1619 1618
1620 1619 @getbundle2partsgenerator('changegroup')
1621 1620 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1622 1621 b2caps=None, heads=None, common=None, **kwargs):
1623 1622 """add a changegroup part to the requested bundle"""
1624 cg = None
1623 cgstream = None
1625 1624 if kwargs.get('cg', True):
1626 1625 # build changegroup bundle here.
1627 1626 version = '01'
1628 1627 cgversions = b2caps.get('changegroup')
1629 1628 if cgversions: # 3.1 and 3.2 ship with an empty value
1630 1629 cgversions = [v for v in cgversions
1631 1630 if v in changegroup.supportedoutgoingversions(repo)]
1632 1631 if not cgversions:
1633 1632 raise ValueError(_('no common changegroup version'))
1634 1633 version = max(cgversions)
1635 1634 outgoing = _computeoutgoing(repo, heads, common)
1636 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1637 bundlecaps=bundlecaps,
1638 version=version)
1635 cgstream = changegroup.makestream(repo, outgoing, version, source,
1636 bundlecaps=bundlecaps)
1639 1637
1640 if cg:
1641 part = bundler.newpart('changegroup', data=cg)
1638 if cgstream:
1639 part = bundler.newpart('changegroup', data=cgstream)
1642 1640 if cgversions:
1643 1641 part.addparam('version', version)
1644 1642 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1645 1643 if 'treemanifest' in repo.requirements:
1646 1644 part.addparam('treemanifest', '1')
1647 1645
1648 1646 @getbundle2partsgenerator('listkeys')
1649 1647 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1650 1648 b2caps=None, **kwargs):
1651 1649 """add parts containing listkeys namespaces to the requested bundle"""
1652 1650 listkeys = kwargs.get('listkeys', ())
1653 1651 for namespace in listkeys:
1654 1652 part = bundler.newpart('listkeys')
1655 1653 part.addparam('namespace', namespace)
1656 1654 keys = repo.listkeys(namespace).items()
1657 1655 part.data = pushkey.encodekeys(keys)
1658 1656
1659 1657 @getbundle2partsgenerator('obsmarkers')
1660 1658 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1661 1659 b2caps=None, heads=None, **kwargs):
1662 1660 """add an obsolescence markers part to the requested bundle"""
1663 1661 if kwargs.get('obsmarkers', False):
1664 1662 if heads is None:
1665 1663 heads = repo.heads()
1666 1664 subset = [c.node() for c in repo.set('::%ln', heads)]
1667 1665 markers = repo.obsstore.relevantmarkers(subset)
1668 1666 markers = sorted(markers)
1669 1667 bundle2.buildobsmarkerspart(bundler, markers)
1670 1668
1671 1669 @getbundle2partsgenerator('hgtagsfnodes')
1672 1670 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1673 1671 b2caps=None, heads=None, common=None,
1674 1672 **kwargs):
1675 1673 """Transfer the .hgtags filenodes mapping.
1676 1674
1677 1675 Only values for heads in this bundle will be transferred.
1678 1676
1679 1677 The part data consists of pairs of 20 byte changeset node and .hgtags
1680 1678 filenodes raw values.
1681 1679 """
1682 1680 # Don't send unless:
1683 1681 # - changeset are being exchanged,
1684 1682 # - the client supports it.
1685 1683 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1686 1684 return
1687 1685
1688 1686 outgoing = _computeoutgoing(repo, heads, common)
1689 1687 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1690 1688
1691 1689 def _getbookmarks(repo, **kwargs):
1692 1690 """Returns bookmark to node mapping.
1693 1691
1694 1692 This function is primarily used to generate `bookmarks` bundle2 part.
1695 1693 It is a separate function in order to make it easy to wrap it
1696 1694 in extensions. Passing `kwargs` to the function makes it easy to
1697 1695 add new parameters in extensions.
1698 1696 """
1699 1697
1700 1698 return dict(bookmod.listbinbookmarks(repo))
1701 1699
1702 1700 def check_heads(repo, their_heads, context):
1703 1701 """check if the heads of a repo have been modified
1704 1702
1705 1703 Used by peer for unbundling.
1706 1704 """
1707 1705 heads = repo.heads()
1708 1706 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1709 1707 if not (their_heads == ['force'] or their_heads == heads or
1710 1708 their_heads == ['hashed', heads_hash]):
1711 1709 # someone else committed/pushed/unbundled while we
1712 1710 # were transferring data
1713 1711 raise error.PushRaced('repository changed while %s - '
1714 1712 'please try again' % context)
1715 1713
1716 1714 def unbundle(repo, cg, heads, source, url):
1717 1715 """Apply a bundle to a repo.
1718 1716
1719 1717 this function makes sure the repo is locked during the application and have
1720 1718 mechanism to check that no push race occurred between the creation of the
1721 1719 bundle and its application.
1722 1720
1723 1721 If the push was raced as PushRaced exception is raised."""
1724 1722 r = 0
1725 1723 # need a transaction when processing a bundle2 stream
1726 1724 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1727 1725 lockandtr = [None, None, None]
1728 1726 recordout = None
1729 1727 # quick fix for output mismatch with bundle2 in 3.4
1730 1728 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1731 1729 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1732 1730 captureoutput = True
1733 1731 try:
1734 1732 # note: outside bundle1, 'heads' is expected to be empty and this
1735 1733 # 'check_heads' call wil be a no-op
1736 1734 check_heads(repo, heads, 'uploading changes')
1737 1735 # push can proceed
1738 1736 if not isinstance(cg, bundle2.unbundle20):
1739 1737 # legacy case: bundle1 (changegroup 01)
1740 1738 txnname = "\n".join([source, util.hidepassword(url)])
1741 1739 with repo.lock(), repo.transaction(txnname) as tr:
1742 1740 op = bundle2.applybundle(repo, cg, tr, source, url)
1743 1741 r = bundle2.combinechangegroupresults(op)
1744 1742 else:
1745 1743 r = None
1746 1744 try:
1747 1745 def gettransaction():
1748 1746 if not lockandtr[2]:
1749 1747 lockandtr[0] = repo.wlock()
1750 1748 lockandtr[1] = repo.lock()
1751 1749 lockandtr[2] = repo.transaction(source)
1752 1750 lockandtr[2].hookargs['source'] = source
1753 1751 lockandtr[2].hookargs['url'] = url
1754 1752 lockandtr[2].hookargs['bundle2'] = '1'
1755 1753 return lockandtr[2]
1756 1754
1757 1755 # Do greedy locking by default until we're satisfied with lazy
1758 1756 # locking.
1759 1757 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1760 1758 gettransaction()
1761 1759
1762 1760 op = bundle2.bundleoperation(repo, gettransaction,
1763 1761 captureoutput=captureoutput)
1764 1762 try:
1765 1763 op = bundle2.processbundle(repo, cg, op=op)
1766 1764 finally:
1767 1765 r = op.reply
1768 1766 if captureoutput and r is not None:
1769 1767 repo.ui.pushbuffer(error=True, subproc=True)
1770 1768 def recordout(output):
1771 1769 r.newpart('output', data=output, mandatory=False)
1772 1770 if lockandtr[2] is not None:
1773 1771 lockandtr[2].close()
1774 1772 except BaseException as exc:
1775 1773 exc.duringunbundle2 = True
1776 1774 if captureoutput and r is not None:
1777 1775 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1778 1776 def recordout(output):
1779 1777 part = bundle2.bundlepart('output', data=output,
1780 1778 mandatory=False)
1781 1779 parts.append(part)
1782 1780 raise
1783 1781 finally:
1784 1782 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1785 1783 if recordout is not None:
1786 1784 recordout(repo.ui.popbuffer())
1787 1785 return r
1788 1786
1789 1787 def _maybeapplyclonebundle(pullop):
1790 1788 """Apply a clone bundle from a remote, if possible."""
1791 1789
1792 1790 repo = pullop.repo
1793 1791 remote = pullop.remote
1794 1792
1795 1793 if not repo.ui.configbool('ui', 'clonebundles'):
1796 1794 return
1797 1795
1798 1796 # Only run if local repo is empty.
1799 1797 if len(repo):
1800 1798 return
1801 1799
1802 1800 if pullop.heads:
1803 1801 return
1804 1802
1805 1803 if not remote.capable('clonebundles'):
1806 1804 return
1807 1805
1808 1806 res = remote._call('clonebundles')
1809 1807
1810 1808 # If we call the wire protocol command, that's good enough to record the
1811 1809 # attempt.
1812 1810 pullop.clonebundleattempted = True
1813 1811
1814 1812 entries = parseclonebundlesmanifest(repo, res)
1815 1813 if not entries:
1816 1814 repo.ui.note(_('no clone bundles available on remote; '
1817 1815 'falling back to regular clone\n'))
1818 1816 return
1819 1817
1820 1818 entries = filterclonebundleentries(repo, entries)
1821 1819 if not entries:
1822 1820 # There is a thundering herd concern here. However, if a server
1823 1821 # operator doesn't advertise bundles appropriate for its clients,
1824 1822 # they deserve what's coming. Furthermore, from a client's
1825 1823 # perspective, no automatic fallback would mean not being able to
1826 1824 # clone!
1827 1825 repo.ui.warn(_('no compatible clone bundles available on server; '
1828 1826 'falling back to regular clone\n'))
1829 1827 repo.ui.warn(_('(you may want to report this to the server '
1830 1828 'operator)\n'))
1831 1829 return
1832 1830
1833 1831 entries = sortclonebundleentries(repo.ui, entries)
1834 1832
1835 1833 url = entries[0]['URL']
1836 1834 repo.ui.status(_('applying clone bundle from %s\n') % url)
1837 1835 if trypullbundlefromurl(repo.ui, repo, url):
1838 1836 repo.ui.status(_('finished applying clone bundle\n'))
1839 1837 # Bundle failed.
1840 1838 #
1841 1839 # We abort by default to avoid the thundering herd of
1842 1840 # clients flooding a server that was expecting expensive
1843 1841 # clone load to be offloaded.
1844 1842 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1845 1843 repo.ui.warn(_('falling back to normal clone\n'))
1846 1844 else:
1847 1845 raise error.Abort(_('error applying bundle'),
1848 1846 hint=_('if this error persists, consider contacting '
1849 1847 'the server operator or disable clone '
1850 1848 'bundles via '
1851 1849 '"--config ui.clonebundles=false"'))
1852 1850
1853 1851 def parseclonebundlesmanifest(repo, s):
1854 1852 """Parses the raw text of a clone bundles manifest.
1855 1853
1856 1854 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1857 1855 to the URL and other keys are the attributes for the entry.
1858 1856 """
1859 1857 m = []
1860 1858 for line in s.splitlines():
1861 1859 fields = line.split()
1862 1860 if not fields:
1863 1861 continue
1864 1862 attrs = {'URL': fields[0]}
1865 1863 for rawattr in fields[1:]:
1866 1864 key, value = rawattr.split('=', 1)
1867 1865 key = urlreq.unquote(key)
1868 1866 value = urlreq.unquote(value)
1869 1867 attrs[key] = value
1870 1868
1871 1869 # Parse BUNDLESPEC into components. This makes client-side
1872 1870 # preferences easier to specify since you can prefer a single
1873 1871 # component of the BUNDLESPEC.
1874 1872 if key == 'BUNDLESPEC':
1875 1873 try:
1876 1874 comp, version, params = parsebundlespec(repo, value,
1877 1875 externalnames=True)
1878 1876 attrs['COMPRESSION'] = comp
1879 1877 attrs['VERSION'] = version
1880 1878 except error.InvalidBundleSpecification:
1881 1879 pass
1882 1880 except error.UnsupportedBundleSpecification:
1883 1881 pass
1884 1882
1885 1883 m.append(attrs)
1886 1884
1887 1885 return m
1888 1886
1889 1887 def filterclonebundleentries(repo, entries):
1890 1888 """Remove incompatible clone bundle manifest entries.
1891 1889
1892 1890 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1893 1891 and returns a new list consisting of only the entries that this client
1894 1892 should be able to apply.
1895 1893
1896 1894 There is no guarantee we'll be able to apply all returned entries because
1897 1895 the metadata we use to filter on may be missing or wrong.
1898 1896 """
1899 1897 newentries = []
1900 1898 for entry in entries:
1901 1899 spec = entry.get('BUNDLESPEC')
1902 1900 if spec:
1903 1901 try:
1904 1902 parsebundlespec(repo, spec, strict=True)
1905 1903 except error.InvalidBundleSpecification as e:
1906 1904 repo.ui.debug(str(e) + '\n')
1907 1905 continue
1908 1906 except error.UnsupportedBundleSpecification as e:
1909 1907 repo.ui.debug('filtering %s because unsupported bundle '
1910 1908 'spec: %s\n' % (entry['URL'], str(e)))
1911 1909 continue
1912 1910
1913 1911 if 'REQUIRESNI' in entry and not sslutil.hassni:
1914 1912 repo.ui.debug('filtering %s because SNI not supported\n' %
1915 1913 entry['URL'])
1916 1914 continue
1917 1915
1918 1916 newentries.append(entry)
1919 1917
1920 1918 return newentries
1921 1919
1922 1920 class clonebundleentry(object):
1923 1921 """Represents an item in a clone bundles manifest.
1924 1922
1925 1923 This rich class is needed to support sorting since sorted() in Python 3
1926 1924 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1927 1925 won't work.
1928 1926 """
1929 1927
1930 1928 def __init__(self, value, prefers):
1931 1929 self.value = value
1932 1930 self.prefers = prefers
1933 1931
1934 1932 def _cmp(self, other):
1935 1933 for prefkey, prefvalue in self.prefers:
1936 1934 avalue = self.value.get(prefkey)
1937 1935 bvalue = other.value.get(prefkey)
1938 1936
1939 1937 # Special case for b missing attribute and a matches exactly.
1940 1938 if avalue is not None and bvalue is None and avalue == prefvalue:
1941 1939 return -1
1942 1940
1943 1941 # Special case for a missing attribute and b matches exactly.
1944 1942 if bvalue is not None and avalue is None and bvalue == prefvalue:
1945 1943 return 1
1946 1944
1947 1945 # We can't compare unless attribute present on both.
1948 1946 if avalue is None or bvalue is None:
1949 1947 continue
1950 1948
1951 1949 # Same values should fall back to next attribute.
1952 1950 if avalue == bvalue:
1953 1951 continue
1954 1952
1955 1953 # Exact matches come first.
1956 1954 if avalue == prefvalue:
1957 1955 return -1
1958 1956 if bvalue == prefvalue:
1959 1957 return 1
1960 1958
1961 1959 # Fall back to next attribute.
1962 1960 continue
1963 1961
1964 1962 # If we got here we couldn't sort by attributes and prefers. Fall
1965 1963 # back to index order.
1966 1964 return 0
1967 1965
1968 1966 def __lt__(self, other):
1969 1967 return self._cmp(other) < 0
1970 1968
1971 1969 def __gt__(self, other):
1972 1970 return self._cmp(other) > 0
1973 1971
1974 1972 def __eq__(self, other):
1975 1973 return self._cmp(other) == 0
1976 1974
1977 1975 def __le__(self, other):
1978 1976 return self._cmp(other) <= 0
1979 1977
1980 1978 def __ge__(self, other):
1981 1979 return self._cmp(other) >= 0
1982 1980
1983 1981 def __ne__(self, other):
1984 1982 return self._cmp(other) != 0
1985 1983
1986 1984 def sortclonebundleentries(ui, entries):
1987 1985 prefers = ui.configlist('ui', 'clonebundleprefers')
1988 1986 if not prefers:
1989 1987 return list(entries)
1990 1988
1991 1989 prefers = [p.split('=', 1) for p in prefers]
1992 1990
1993 1991 items = sorted(clonebundleentry(v, prefers) for v in entries)
1994 1992 return [i.value for i in items]
1995 1993
1996 1994 def trypullbundlefromurl(ui, repo, url):
1997 1995 """Attempt to apply a bundle from a URL."""
1998 1996 with repo.lock(), repo.transaction('bundleurl') as tr:
1999 1997 try:
2000 1998 fh = urlmod.open(ui, url)
2001 1999 cg = readbundle(ui, fh, 'stream')
2002 2000
2003 2001 if isinstance(cg, streamclone.streamcloneapplier):
2004 2002 cg.apply(repo)
2005 2003 else:
2006 2004 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2007 2005 return True
2008 2006 except urlerr.httperror as e:
2009 2007 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2010 2008 except urlerr.urlerror as e:
2011 2009 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2012 2010
2013 2011 return False
General Comments 0
You need to be logged in to leave comments. Login now