##// END OF EJS Templates
changegroup: replace getsubset with makechangegroup...
Durham Goode -
r34098:f85dfde1 default
parent child Browse files
Show More
@@ -1,1024 +1,1030 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullrev,
19 19 short,
20 20 )
21 21
22 22 from . import (
23 23 dagutil,
24 24 discovery,
25 25 error,
26 26 mdiff,
27 27 phases,
28 28 pycompat,
29 29 util,
30 30 )
31 31
32 32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 35
36 36 def readexactly(stream, n):
37 37 '''read n bytes from stream.read and abort if less was available'''
38 38 s = stream.read(n)
39 39 if len(s) < n:
40 40 raise error.Abort(_("stream ended unexpectedly"
41 41 " (got %d bytes, expected %d)")
42 42 % (len(s), n))
43 43 return s
44 44
45 45 def getchunk(stream):
46 46 """return the next chunk from stream as a string"""
47 47 d = readexactly(stream, 4)
48 48 l = struct.unpack(">l", d)[0]
49 49 if l <= 4:
50 50 if l:
51 51 raise error.Abort(_("invalid chunk length %d") % l)
52 52 return ""
53 53 return readexactly(stream, l - 4)
54 54
55 55 def chunkheader(length):
56 56 """return a changegroup chunk header (string)"""
57 57 return struct.pack(">l", length + 4)
58 58
59 59 def closechunk():
60 60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 61 return struct.pack(">l", 0)
62 62
63 63 def writechunks(ui, chunks, filename, vfs=None):
64 64 """Write chunks to a file and return its filename.
65 65
66 66 The stream is assumed to be a bundle file.
67 67 Existing files will not be overwritten.
68 68 If no filename is specified, a temporary file is created.
69 69 """
70 70 fh = None
71 71 cleanup = None
72 72 try:
73 73 if filename:
74 74 if vfs:
75 75 fh = vfs.open(filename, "wb")
76 76 else:
77 77 # Increase default buffer size because default is usually
78 78 # small (4k is common on Linux).
79 79 fh = open(filename, "wb", 131072)
80 80 else:
81 81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
83 83 cleanup = filename
84 84 for c in chunks:
85 85 fh.write(c)
86 86 cleanup = None
87 87 return filename
88 88 finally:
89 89 if fh is not None:
90 90 fh.close()
91 91 if cleanup is not None:
92 92 if filename and vfs:
93 93 vfs.unlink(cleanup)
94 94 else:
95 95 os.unlink(cleanup)
96 96
97 97 class cg1unpacker(object):
98 98 """Unpacker for cg1 changegroup streams.
99 99
100 100 A changegroup unpacker handles the framing of the revision data in
101 101 the wire format. Most consumers will want to use the apply()
102 102 method to add the changes from the changegroup to a repository.
103 103
104 104 If you're forwarding a changegroup unmodified to another consumer,
105 105 use getchunks(), which returns an iterator of changegroup
106 106 chunks. This is mostly useful for cases where you need to know the
107 107 data stream has ended by observing the end of the changegroup.
108 108
109 109 deltachunk() is useful only if you're applying delta data. Most
110 110 consumers should prefer apply() instead.
111 111
112 112 A few other public methods exist. Those are used only for
113 113 bundlerepo and some debug commands - their use is discouraged.
114 114 """
115 115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 116 deltaheadersize = struct.calcsize(deltaheader)
117 117 version = '01'
118 118 _grouplistcount = 1 # One list of files after the manifests
119 119
120 120 def __init__(self, fh, alg, extras=None):
121 121 if alg is None:
122 122 alg = 'UN'
123 123 if alg not in util.compengines.supportedbundletypes:
124 124 raise error.Abort(_('unknown stream compression type: %s')
125 125 % alg)
126 126 if alg == 'BZ':
127 127 alg = '_truncatedBZ'
128 128
129 129 compengine = util.compengines.forbundletype(alg)
130 130 self._stream = compengine.decompressorreader(fh)
131 131 self._type = alg
132 132 self.extras = extras or {}
133 133 self.callback = None
134 134
135 135 # These methods (compressed, read, seek, tell) all appear to only
136 136 # be used by bundlerepo, but it's a little hard to tell.
137 137 def compressed(self):
138 138 return self._type is not None and self._type != 'UN'
139 139 def read(self, l):
140 140 return self._stream.read(l)
141 141 def seek(self, pos):
142 142 return self._stream.seek(pos)
143 143 def tell(self):
144 144 return self._stream.tell()
145 145 def close(self):
146 146 return self._stream.close()
147 147
148 148 def _chunklength(self):
149 149 d = readexactly(self._stream, 4)
150 150 l = struct.unpack(">l", d)[0]
151 151 if l <= 4:
152 152 if l:
153 153 raise error.Abort(_("invalid chunk length %d") % l)
154 154 return 0
155 155 if self.callback:
156 156 self.callback()
157 157 return l - 4
158 158
159 159 def changelogheader(self):
160 160 """v10 does not have a changelog header chunk"""
161 161 return {}
162 162
163 163 def manifestheader(self):
164 164 """v10 does not have a manifest header chunk"""
165 165 return {}
166 166
167 167 def filelogheader(self):
168 168 """return the header of the filelogs chunk, v10 only has the filename"""
169 169 l = self._chunklength()
170 170 if not l:
171 171 return {}
172 172 fname = readexactly(self._stream, l)
173 173 return {'filename': fname}
174 174
175 175 def _deltaheader(self, headertuple, prevnode):
176 176 node, p1, p2, cs = headertuple
177 177 if prevnode is None:
178 178 deltabase = p1
179 179 else:
180 180 deltabase = prevnode
181 181 flags = 0
182 182 return node, p1, p2, deltabase, cs, flags
183 183
184 184 def deltachunk(self, prevnode):
185 185 l = self._chunklength()
186 186 if not l:
187 187 return {}
188 188 headerdata = readexactly(self._stream, self.deltaheadersize)
189 189 header = struct.unpack(self.deltaheader, headerdata)
190 190 delta = readexactly(self._stream, l - self.deltaheadersize)
191 191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
193 193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
194 194
195 195 def getchunks(self):
196 196 """returns all the chunks contains in the bundle
197 197
198 198 Used when you need to forward the binary stream to a file or another
199 199 network API. To do so, it parse the changegroup data, otherwise it will
200 200 block in case of sshrepo because it don't know the end of the stream.
201 201 """
202 202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 205 # filelogs.
206 206 #
207 207 # Changelog and manifestlog parts are terminated with empty chunks. The
208 208 # tree and file parts are a list of entry sections. Each entry section
209 209 # is a series of chunks terminating in an empty chunk. The list of these
210 210 # entry sections is terminated in yet another empty chunk, so we know
211 211 # we've reached the end of the tree/file list when we reach an empty
212 212 # chunk that was proceeded by no non-empty chunks.
213 213
214 214 parts = 0
215 215 while parts < 2 + self._grouplistcount:
216 216 noentries = True
217 217 while True:
218 218 chunk = getchunk(self)
219 219 if not chunk:
220 220 # The first two empty chunks represent the end of the
221 221 # changelog and the manifestlog portions. The remaining
222 222 # empty chunks represent either A) the end of individual
223 223 # tree or file entries in the file list, or B) the end of
224 224 # the entire list. It's the end of the entire list if there
225 225 # were no entries (i.e. noentries is True).
226 226 if parts < 2:
227 227 parts += 1
228 228 elif noentries:
229 229 parts += 1
230 230 break
231 231 noentries = False
232 232 yield chunkheader(len(chunk))
233 233 pos = 0
234 234 while pos < len(chunk):
235 235 next = pos + 2**20
236 236 yield chunk[pos:next]
237 237 pos = next
238 238 yield closechunk()
239 239
240 240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
241 241 # We know that we'll never have more manifests than we had
242 242 # changesets.
243 243 self.callback = prog(_('manifests'), numchanges)
244 244 # no need to check for empty manifest group here:
245 245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
246 246 # no new manifest will be created and the manifest group will
247 247 # be empty during the pull
248 248 self.manifestheader()
249 249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
250 250 repo.ui.progress(_('manifests'), None)
251 251 self.callback = None
252 252
253 253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
254 254 expectedtotal=None):
255 255 """Add the changegroup returned by source.read() to this repo.
256 256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
257 257 the URL of the repo where this changegroup is coming from.
258 258
259 259 Return an integer summarizing the change to this repo:
260 260 - nothing changed or no source: 0
261 261 - more heads than before: 1+added heads (2..n)
262 262 - fewer heads than before: -1-removed heads (-2..-n)
263 263 - number of heads stays the same: 1
264 264 """
265 265 repo = repo.unfiltered()
266 266 def csmap(x):
267 267 repo.ui.debug("add changeset %s\n" % short(x))
268 268 return len(cl)
269 269
270 270 def revmap(x):
271 271 return cl.rev(x)
272 272
273 273 changesets = files = revisions = 0
274 274
275 275 try:
276 276 # The transaction may already carry source information. In this
277 277 # case we use the top level data. We overwrite the argument
278 278 # because we need to use the top level value (if they exist)
279 279 # in this function.
280 280 srctype = tr.hookargs.setdefault('source', srctype)
281 281 url = tr.hookargs.setdefault('url', url)
282 282 repo.hook('prechangegroup',
283 283 throw=True, **pycompat.strkwargs(tr.hookargs))
284 284
285 285 # write changelog data to temp files so concurrent readers
286 286 # will not see an inconsistent view
287 287 cl = repo.changelog
288 288 cl.delayupdate(tr)
289 289 oldheads = set(cl.heads())
290 290
291 291 trp = weakref.proxy(tr)
292 292 # pull off the changeset group
293 293 repo.ui.status(_("adding changesets\n"))
294 294 clstart = len(cl)
295 295 class prog(object):
296 296 def __init__(self, step, total):
297 297 self._step = step
298 298 self._total = total
299 299 self._count = 1
300 300 def __call__(self):
301 301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
302 302 total=self._total)
303 303 self._count += 1
304 304 self.callback = prog(_('changesets'), expectedtotal)
305 305
306 306 efiles = set()
307 307 def onchangelog(cl, node):
308 308 efiles.update(cl.readfiles(node))
309 309
310 310 self.changelogheader()
311 311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
312 312 efiles = len(efiles)
313 313
314 314 if not cgnodes:
315 315 repo.ui.develwarn('applied empty changegroup',
316 316 config='empty-changegroup')
317 317 clend = len(cl)
318 318 changesets = clend - clstart
319 319 repo.ui.progress(_('changesets'), None)
320 320 self.callback = None
321 321
322 322 # pull off the manifest group
323 323 repo.ui.status(_("adding manifests\n"))
324 324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
325 325
326 326 needfiles = {}
327 327 if repo.ui.configbool('server', 'validate'):
328 328 cl = repo.changelog
329 329 ml = repo.manifestlog
330 330 # validate incoming csets have their manifests
331 331 for cset in xrange(clstart, clend):
332 332 mfnode = cl.changelogrevision(cset).manifest
333 333 mfest = ml[mfnode].readdelta()
334 334 # store file cgnodes we must see
335 335 for f, n in mfest.iteritems():
336 336 needfiles.setdefault(f, set()).add(n)
337 337
338 338 # process the files
339 339 repo.ui.status(_("adding file changes\n"))
340 340 newrevs, newfiles = _addchangegroupfiles(
341 341 repo, self, revmap, trp, efiles, needfiles)
342 342 revisions += newrevs
343 343 files += newfiles
344 344
345 345 deltaheads = 0
346 346 if oldheads:
347 347 heads = cl.heads()
348 348 deltaheads = len(heads) - len(oldheads)
349 349 for h in heads:
350 350 if h not in oldheads and repo[h].closesbranch():
351 351 deltaheads -= 1
352 352 htext = ""
353 353 if deltaheads:
354 354 htext = _(" (%+d heads)") % deltaheads
355 355
356 356 repo.ui.status(_("added %d changesets"
357 357 " with %d changes to %d files%s\n")
358 358 % (changesets, revisions, files, htext))
359 359 repo.invalidatevolatilesets()
360 360
361 361 if changesets > 0:
362 362 if 'node' not in tr.hookargs:
363 363 tr.hookargs['node'] = hex(cl.node(clstart))
364 364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
365 365 hookargs = dict(tr.hookargs)
366 366 else:
367 367 hookargs = dict(tr.hookargs)
368 368 hookargs['node'] = hex(cl.node(clstart))
369 369 hookargs['node_last'] = hex(cl.node(clend - 1))
370 370 repo.hook('pretxnchangegroup',
371 371 throw=True, **pycompat.strkwargs(hookargs))
372 372
373 373 added = [cl.node(r) for r in xrange(clstart, clend)]
374 374 phaseall = None
375 375 if srctype in ('push', 'serve'):
376 376 # Old servers can not push the boundary themselves.
377 377 # New servers won't push the boundary if changeset already
378 378 # exists locally as secret
379 379 #
380 380 # We should not use added here but the list of all change in
381 381 # the bundle
382 382 if repo.publishing():
383 383 targetphase = phaseall = phases.public
384 384 else:
385 385 # closer target phase computation
386 386
387 387 # Those changesets have been pushed from the
388 388 # outside, their phases are going to be pushed
389 389 # alongside. Therefor `targetphase` is
390 390 # ignored.
391 391 targetphase = phaseall = phases.draft
392 392 if added:
393 393 phases.registernew(repo, tr, targetphase, added)
394 394 if phaseall is not None:
395 395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
396 396
397 397 if changesets > 0:
398 398
399 399 def runhooks():
400 400 # These hooks run when the lock releases, not when the
401 401 # transaction closes. So it's possible for the changelog
402 402 # to have changed since we last saw it.
403 403 if clstart >= len(repo):
404 404 return
405 405
406 406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
407 407
408 408 for n in added:
409 409 args = hookargs.copy()
410 410 args['node'] = hex(n)
411 411 del args['node_last']
412 412 repo.hook("incoming", **pycompat.strkwargs(args))
413 413
414 414 newheads = [h for h in repo.heads()
415 415 if h not in oldheads]
416 416 repo.ui.log("incoming",
417 417 "%s incoming changes - new heads: %s\n",
418 418 len(added),
419 419 ', '.join([hex(c[:6]) for c in newheads]))
420 420
421 421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
422 422 lambda tr: repo._afterlock(runhooks))
423 423 finally:
424 424 repo.ui.flush()
425 425 # never return 0 here:
426 426 if deltaheads < 0:
427 427 ret = deltaheads - 1
428 428 else:
429 429 ret = deltaheads + 1
430 430 return ret
431 431
432 432 class cg2unpacker(cg1unpacker):
433 433 """Unpacker for cg2 streams.
434 434
435 435 cg2 streams add support for generaldelta, so the delta header
436 436 format is slightly different. All other features about the data
437 437 remain the same.
438 438 """
439 439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
440 440 deltaheadersize = struct.calcsize(deltaheader)
441 441 version = '02'
442 442
443 443 def _deltaheader(self, headertuple, prevnode):
444 444 node, p1, p2, deltabase, cs = headertuple
445 445 flags = 0
446 446 return node, p1, p2, deltabase, cs, flags
447 447
448 448 class cg3unpacker(cg2unpacker):
449 449 """Unpacker for cg3 streams.
450 450
451 451 cg3 streams add support for exchanging treemanifests and revlog
452 452 flags. It adds the revlog flags to the delta header and an empty chunk
453 453 separating manifests and files.
454 454 """
455 455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
456 456 deltaheadersize = struct.calcsize(deltaheader)
457 457 version = '03'
458 458 _grouplistcount = 2 # One list of manifests and one list of files
459 459
460 460 def _deltaheader(self, headertuple, prevnode):
461 461 node, p1, p2, deltabase, cs, flags = headertuple
462 462 return node, p1, p2, deltabase, cs, flags
463 463
464 464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
465 465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
466 466 numchanges)
467 467 for chunkdata in iter(self.filelogheader, {}):
468 468 # If we get here, there are directory manifests in the changegroup
469 469 d = chunkdata["filename"]
470 470 repo.ui.debug("adding %s revisions\n" % d)
471 471 dirlog = repo.manifestlog._revlog.dirlog(d)
472 472 if not dirlog.addgroup(self, revmap, trp):
473 473 raise error.Abort(_("received dir revlog group is empty"))
474 474
475 475 class headerlessfixup(object):
476 476 def __init__(self, fh, h):
477 477 self._h = h
478 478 self._fh = fh
479 479 def read(self, n):
480 480 if self._h:
481 481 d, self._h = self._h[:n], self._h[n:]
482 482 if len(d) < n:
483 483 d += readexactly(self._fh, n - len(d))
484 484 return d
485 485 return readexactly(self._fh, n)
486 486
487 487 class cg1packer(object):
488 488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
489 489 version = '01'
490 490 def __init__(self, repo, bundlecaps=None):
491 491 """Given a source repo, construct a bundler.
492 492
493 493 bundlecaps is optional and can be used to specify the set of
494 494 capabilities which can be used to build the bundle. While bundlecaps is
495 495 unused in core Mercurial, extensions rely on this feature to communicate
496 496 capabilities to customize the changegroup packer.
497 497 """
498 498 # Set of capabilities we can use to build the bundle.
499 499 if bundlecaps is None:
500 500 bundlecaps = set()
501 501 self._bundlecaps = bundlecaps
502 502 # experimental config: bundle.reorder
503 503 reorder = repo.ui.config('bundle', 'reorder')
504 504 if reorder == 'auto':
505 505 reorder = None
506 506 else:
507 507 reorder = util.parsebool(reorder)
508 508 self._repo = repo
509 509 self._reorder = reorder
510 510 self._progress = repo.ui.progress
511 511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
512 512 self._verbosenote = self._repo.ui.note
513 513 else:
514 514 self._verbosenote = lambda s: None
515 515
516 516 def close(self):
517 517 return closechunk()
518 518
519 519 def fileheader(self, fname):
520 520 return chunkheader(len(fname)) + fname
521 521
522 522 # Extracted both for clarity and for overriding in extensions.
523 523 def _sortgroup(self, revlog, nodelist, lookup):
524 524 """Sort nodes for change group and turn them into revnums."""
525 525 # for generaldelta revlogs, we linearize the revs; this will both be
526 526 # much quicker and generate a much smaller bundle
527 527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
528 528 dag = dagutil.revlogdag(revlog)
529 529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
530 530 else:
531 531 return sorted([revlog.rev(n) for n in nodelist])
532 532
533 533 def group(self, nodelist, revlog, lookup, units=None):
534 534 """Calculate a delta group, yielding a sequence of changegroup chunks
535 535 (strings).
536 536
537 537 Given a list of changeset revs, return a set of deltas and
538 538 metadata corresponding to nodes. The first delta is
539 539 first parent(nodelist[0]) -> nodelist[0], the receiver is
540 540 guaranteed to have this parent as it has all history before
541 541 these changesets. In the case firstparent is nullrev the
542 542 changegroup starts with a full revision.
543 543
544 544 If units is not None, progress detail will be generated, units specifies
545 545 the type of revlog that is touched (changelog, manifest, etc.).
546 546 """
547 547 # if we don't have any revisions touched by these changesets, bail
548 548 if len(nodelist) == 0:
549 549 yield self.close()
550 550 return
551 551
552 552 revs = self._sortgroup(revlog, nodelist, lookup)
553 553
554 554 # add the parent of the first rev
555 555 p = revlog.parentrevs(revs[0])[0]
556 556 revs.insert(0, p)
557 557
558 558 # build deltas
559 559 total = len(revs) - 1
560 560 msgbundling = _('bundling')
561 561 for r in xrange(len(revs) - 1):
562 562 if units is not None:
563 563 self._progress(msgbundling, r + 1, unit=units, total=total)
564 564 prev, curr = revs[r], revs[r + 1]
565 565 linknode = lookup(revlog.node(curr))
566 566 for c in self.revchunk(revlog, curr, prev, linknode):
567 567 yield c
568 568
569 569 if units is not None:
570 570 self._progress(msgbundling, None)
571 571 yield self.close()
572 572
573 573 # filter any nodes that claim to be part of the known set
574 574 def prune(self, revlog, missing, commonrevs):
575 575 rr, rl = revlog.rev, revlog.linkrev
576 576 return [n for n in missing if rl(rr(n)) not in commonrevs]
577 577
578 578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
579 579 """Pack flat manifests into a changegroup stream."""
580 580 assert not dir
581 581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
582 582 lookuplinknode, units=_('manifests')):
583 583 yield chunk
584 584
585 585 def _manifestsdone(self):
586 586 return ''
587 587
588 588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
589 589 '''yield a sequence of changegroup chunks (strings)'''
590 590 repo = self._repo
591 591 cl = repo.changelog
592 592
593 593 clrevorder = {}
594 594 mfs = {} # needed manifests
595 595 fnodes = {} # needed file nodes
596 596 changedfiles = set()
597 597
598 598 # Callback for the changelog, used to collect changed files and manifest
599 599 # nodes.
600 600 # Returns the linkrev node (identity in the changelog case).
601 601 def lookupcl(x):
602 602 c = cl.read(x)
603 603 clrevorder[x] = len(clrevorder)
604 604 n = c[0]
605 605 # record the first changeset introducing this manifest version
606 606 mfs.setdefault(n, x)
607 607 # Record a complete list of potentially-changed files in
608 608 # this manifest.
609 609 changedfiles.update(c[3])
610 610 return x
611 611
612 612 self._verbosenote(_('uncompressed size of bundle content:\n'))
613 613 size = 0
614 614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
615 615 size += len(chunk)
616 616 yield chunk
617 617 self._verbosenote(_('%8.i (changelog)\n') % size)
618 618
619 619 # We need to make sure that the linkrev in the changegroup refers to
620 620 # the first changeset that introduced the manifest or file revision.
621 621 # The fastpath is usually safer than the slowpath, because the filelogs
622 622 # are walked in revlog order.
623 623 #
624 624 # When taking the slowpath with reorder=None and the manifest revlog
625 625 # uses generaldelta, the manifest may be walked in the "wrong" order.
626 626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
627 627 # cc0ff93d0c0c).
628 628 #
629 629 # When taking the fastpath, we are only vulnerable to reordering
630 630 # of the changelog itself. The changelog never uses generaldelta, so
631 631 # it is only reordered when reorder=True. To handle this case, we
632 632 # simply take the slowpath, which already has the 'clrevorder' logic.
633 633 # This was also fixed in cc0ff93d0c0c.
634 634 fastpathlinkrev = fastpathlinkrev and not self._reorder
635 635 # Treemanifests don't work correctly with fastpathlinkrev
636 636 # either, because we don't discover which directory nodes to
637 637 # send along with files. This could probably be fixed.
638 638 fastpathlinkrev = fastpathlinkrev and (
639 639 'treemanifest' not in repo.requirements)
640 640
641 641 for chunk in self.generatemanifests(commonrevs, clrevorder,
642 642 fastpathlinkrev, mfs, fnodes):
643 643 yield chunk
644 644 mfs.clear()
645 645 clrevs = set(cl.rev(x) for x in clnodes)
646 646
647 647 if not fastpathlinkrev:
648 648 def linknodes(unused, fname):
649 649 return fnodes.get(fname, {})
650 650 else:
651 651 cln = cl.node
652 652 def linknodes(filerevlog, fname):
653 653 llr = filerevlog.linkrev
654 654 fln = filerevlog.node
655 655 revs = ((r, llr(r)) for r in filerevlog)
656 656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
657 657
658 658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
659 659 source):
660 660 yield chunk
661 661
662 662 yield self.close()
663 663
664 664 if clnodes:
665 665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
666 666
667 667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
668 668 fnodes):
669 669 repo = self._repo
670 670 mfl = repo.manifestlog
671 671 dirlog = mfl._revlog.dirlog
672 672 tmfnodes = {'': mfs}
673 673
674 674 # Callback for the manifest, used to collect linkrevs for filelog
675 675 # revisions.
676 676 # Returns the linkrev node (collected in lookupcl).
677 677 def makelookupmflinknode(dir):
678 678 if fastpathlinkrev:
679 679 assert not dir
680 680 return mfs.__getitem__
681 681
682 682 def lookupmflinknode(x):
683 683 """Callback for looking up the linknode for manifests.
684 684
685 685 Returns the linkrev node for the specified manifest.
686 686
687 687 SIDE EFFECT:
688 688
689 689 1) fclnodes gets populated with the list of relevant
690 690 file nodes if we're not using fastpathlinkrev
691 691 2) When treemanifests are in use, collects treemanifest nodes
692 692 to send
693 693
694 694 Note that this means manifests must be completely sent to
695 695 the client before you can trust the list of files and
696 696 treemanifests to send.
697 697 """
698 698 clnode = tmfnodes[dir][x]
699 699 mdata = mfl.get(dir, x).readfast(shallow=True)
700 700 for p, n, fl in mdata.iterentries():
701 701 if fl == 't': # subdirectory manifest
702 702 subdir = dir + p + '/'
703 703 tmfclnodes = tmfnodes.setdefault(subdir, {})
704 704 tmfclnode = tmfclnodes.setdefault(n, clnode)
705 705 if clrevorder[clnode] < clrevorder[tmfclnode]:
706 706 tmfclnodes[n] = clnode
707 707 else:
708 708 f = dir + p
709 709 fclnodes = fnodes.setdefault(f, {})
710 710 fclnode = fclnodes.setdefault(n, clnode)
711 711 if clrevorder[clnode] < clrevorder[fclnode]:
712 712 fclnodes[n] = clnode
713 713 return clnode
714 714 return lookupmflinknode
715 715
716 716 size = 0
717 717 while tmfnodes:
718 718 dir = min(tmfnodes)
719 719 nodes = tmfnodes[dir]
720 720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
721 721 if not dir or prunednodes:
722 722 for x in self._packmanifests(dir, prunednodes,
723 723 makelookupmflinknode(dir)):
724 724 size += len(x)
725 725 yield x
726 726 del tmfnodes[dir]
727 727 self._verbosenote(_('%8.i (manifests)\n') % size)
728 728 yield self._manifestsdone()
729 729
730 730 # The 'source' parameter is useful for extensions
731 731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
732 732 repo = self._repo
733 733 progress = self._progress
734 734 msgbundling = _('bundling')
735 735
736 736 total = len(changedfiles)
737 737 # for progress output
738 738 msgfiles = _('files')
739 739 for i, fname in enumerate(sorted(changedfiles)):
740 740 filerevlog = repo.file(fname)
741 741 if not filerevlog:
742 742 raise error.Abort(_("empty or missing revlog for %s") % fname)
743 743
744 744 linkrevnodes = linknodes(filerevlog, fname)
745 745 # Lookup for filenodes, we collected the linkrev nodes above in the
746 746 # fastpath case and with lookupmf in the slowpath case.
747 747 def lookupfilelog(x):
748 748 return linkrevnodes[x]
749 749
750 750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
751 751 if filenodes:
752 752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
753 753 total=total)
754 754 h = self.fileheader(fname)
755 755 size = len(h)
756 756 yield h
757 757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
758 758 size += len(chunk)
759 759 yield chunk
760 760 self._verbosenote(_('%8.i %s\n') % (size, fname))
761 761 progress(msgbundling, None)
762 762
763 763 def deltaparent(self, revlog, rev, p1, p2, prev):
764 764 return prev
765 765
766 766 def revchunk(self, revlog, rev, prev, linknode):
767 767 node = revlog.node(rev)
768 768 p1, p2 = revlog.parentrevs(rev)
769 769 base = self.deltaparent(revlog, rev, p1, p2, prev)
770 770
771 771 prefix = ''
772 772 if revlog.iscensored(base) or revlog.iscensored(rev):
773 773 try:
774 774 delta = revlog.revision(node, raw=True)
775 775 except error.CensoredNodeError as e:
776 776 delta = e.tombstone
777 777 if base == nullrev:
778 778 prefix = mdiff.trivialdiffheader(len(delta))
779 779 else:
780 780 baselen = revlog.rawsize(base)
781 781 prefix = mdiff.replacediffheader(baselen, len(delta))
782 782 elif base == nullrev:
783 783 delta = revlog.revision(node, raw=True)
784 784 prefix = mdiff.trivialdiffheader(len(delta))
785 785 else:
786 786 delta = revlog.revdiff(base, rev)
787 787 p1n, p2n = revlog.parents(node)
788 788 basenode = revlog.node(base)
789 789 flags = revlog.flags(rev)
790 790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
791 791 meta += prefix
792 792 l = len(meta) + len(delta)
793 793 yield chunkheader(l)
794 794 yield meta
795 795 yield delta
796 796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
797 797 # do nothing with basenode, it is implicitly the previous one in HG10
798 798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
799 799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
800 800
801 801 class cg2packer(cg1packer):
802 802 version = '02'
803 803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
804 804
805 805 def __init__(self, repo, bundlecaps=None):
806 806 super(cg2packer, self).__init__(repo, bundlecaps)
807 807 if self._reorder is None:
808 808 # Since generaldelta is directly supported by cg2, reordering
809 809 # generally doesn't help, so we disable it by default (treating
810 810 # bundle.reorder=auto just like bundle.reorder=False).
811 811 self._reorder = False
812 812
813 813 def deltaparent(self, revlog, rev, p1, p2, prev):
814 814 dp = revlog.deltaparent(rev)
815 815 if dp == nullrev and revlog.storedeltachains:
816 816 # Avoid sending full revisions when delta parent is null. Pick prev
817 817 # in that case. It's tempting to pick p1 in this case, as p1 will
818 818 # be smaller in the common case. However, computing a delta against
819 819 # p1 may require resolving the raw text of p1, which could be
820 820 # expensive. The revlog caches should have prev cached, meaning
821 821 # less CPU for changegroup generation. There is likely room to add
822 822 # a flag and/or config option to control this behavior.
823 823 return prev
824 824 elif dp == nullrev:
825 825 # revlog is configured to use full snapshot for a reason,
826 826 # stick to full snapshot.
827 827 return nullrev
828 828 elif dp not in (p1, p2, prev):
829 829 # Pick prev when we can't be sure remote has the base revision.
830 830 return prev
831 831 else:
832 832 return dp
833 833
834 834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
835 835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
836 836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
837 837
838 838 class cg3packer(cg2packer):
839 839 version = '03'
840 840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
841 841
842 842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
843 843 if dir:
844 844 yield self.fileheader(dir)
845 845
846 846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
847 847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
848 848 units=_('manifests')):
849 849 yield chunk
850 850
851 851 def _manifestsdone(self):
852 852 return self.close()
853 853
854 854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
855 855 return struct.pack(
856 856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
857 857
858 858 _packermap = {'01': (cg1packer, cg1unpacker),
859 859 # cg2 adds support for exchanging generaldelta
860 860 '02': (cg2packer, cg2unpacker),
861 861 # cg3 adds support for exchanging revlog flags and treemanifests
862 862 '03': (cg3packer, cg3unpacker),
863 863 }
864 864
865 865 def allsupportedversions(repo):
866 866 versions = set(_packermap.keys())
867 867 if not (repo.ui.configbool('experimental', 'changegroup3') or
868 868 repo.ui.configbool('experimental', 'treemanifest') or
869 869 'treemanifest' in repo.requirements):
870 870 versions.discard('03')
871 871 return versions
872 872
873 873 # Changegroup versions that can be applied to the repo
874 874 def supportedincomingversions(repo):
875 875 return allsupportedversions(repo)
876 876
877 877 # Changegroup versions that can be created from the repo
878 878 def supportedoutgoingversions(repo):
879 879 versions = allsupportedversions(repo)
880 880 if 'treemanifest' in repo.requirements:
881 881 # Versions 01 and 02 support only flat manifests and it's just too
882 882 # expensive to convert between the flat manifest and tree manifest on
883 883 # the fly. Since tree manifests are hashed differently, all of history
884 884 # would have to be converted. Instead, we simply don't even pretend to
885 885 # support versions 01 and 02.
886 886 versions.discard('01')
887 887 versions.discard('02')
888 888 return versions
889 889
890 890 def safeversion(repo):
891 891 # Finds the smallest version that it's safe to assume clients of the repo
892 892 # will support. For example, all hg versions that support generaldelta also
893 893 # support changegroup 02.
894 894 versions = supportedoutgoingversions(repo)
895 895 if 'generaldelta' in repo.requirements:
896 896 versions.discard('01')
897 897 assert versions
898 898 return min(versions)
899 899
900 900 def getbundler(version, repo, bundlecaps=None):
901 901 assert version in supportedoutgoingversions(repo)
902 902 return _packermap[version][0](repo, bundlecaps)
903 903
904 904 def getunbundler(version, fh, alg, extras=None):
905 905 return _packermap[version][1](fh, alg, extras=extras)
906 906
907 907 def _changegroupinfo(repo, nodes, source):
908 908 if repo.ui.verbose or source == 'bundle':
909 909 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 910 if repo.ui.debugflag:
911 911 repo.ui.debug("list of changesets:\n")
912 912 for node in nodes:
913 913 repo.ui.debug("%s\n" % hex(node))
914 914
915 def makestream(repo, outgoing, version, source, fastpath=False,
916 bundlecaps=None):
917 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
918 return getsubsetraw(repo, outgoing, bundler, source, fastpath=fastpath)
919
920 def makechangegroup(repo, outgoing, version, source, fastpath=False,
921 bundlecaps=None):
922 cgstream = makestream(repo, outgoing, version, source,
923 fastpath=fastpath, bundlecaps=bundlecaps)
924 return getunbundler(version, util.chunkbuffer(cgstream), None,
925 {'clcount': len(outgoing.missing) })
926
915 927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
916 928 repo = repo.unfiltered()
917 929 commonrevs = outgoing.common
918 930 csets = outgoing.missing
919 931 heads = outgoing.missingheads
920 932 # We go through the fast path if we get told to, or if all (unfiltered
921 933 # heads have been requested (since we then know there all linkrevs will
922 934 # be pulled by the client).
923 935 heads.sort()
924 936 fastpathlinkrev = fastpath or (
925 937 repo.filtername is None and heads == sorted(repo.heads()))
926 938
927 939 repo.hook('preoutgoing', throw=True, source=source)
928 940 _changegroupinfo(repo, csets, source)
929 941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
930 942
931 def getsubset(repo, outgoing, bundler, source, fastpath=False):
932 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
933 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
934 {'clcount': len(outgoing.missing)})
935
936 943 def changegroupsubset(repo, roots, heads, source, version='01'):
937 944 """Compute a changegroup consisting of all the nodes that are
938 945 descendants of any of the roots and ancestors of any of the heads.
939 946 Return a chunkbuffer object whose read() method will return
940 947 successive changegroup chunks.
941 948
942 949 It is fairly complex as determining which filenodes and which
943 950 manifest nodes need to be included for the changeset to be complete
944 951 is non-trivial.
945 952
946 953 Another wrinkle is doing the reverse, figuring out which changeset in
947 954 the changegroup a particular filenode or manifestnode belongs to.
948 955 """
949 956 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
950 bundler = getbundler(version, repo)
951 return getsubset(repo, outgoing, bundler, source)
957 return makechangegroup(repo, outgoing, version, source)
952 958
953 959 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
954 960 version='01'):
955 961 """Like getbundle, but taking a discovery.outgoing as an argument.
956 962
957 963 This is only implemented for local repos and reuses potentially
958 964 precomputed sets in outgoing. Returns a raw changegroup generator."""
959 965 if not outgoing.missing:
960 966 return None
961 967 bundler = getbundler(version, repo, bundlecaps)
962 968 return getsubsetraw(repo, outgoing, bundler, source)
963 969
964 970 def getchangegroup(repo, source, outgoing, bundlecaps=None,
965 971 version='01'):
966 972 """Like getbundle, but taking a discovery.outgoing as an argument.
967 973
968 974 This is only implemented for local repos and reuses potentially
969 975 precomputed sets in outgoing."""
970 976 if not outgoing.missing:
971 977 return None
972 bundler = getbundler(version, repo, bundlecaps)
973 return getsubset(repo, outgoing, bundler, source)
978 return makechangegroup(repo, outgoing, version, source,
979 bundlecaps=bundlecaps)
974 980
975 981 def getlocalchangegroup(repo, *args, **kwargs):
976 982 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
977 983 '4.3')
978 984 return getchangegroup(repo, *args, **kwargs)
979 985
980 986 def changegroup(repo, basenodes, source):
981 987 # to avoid a race we use changegroupsubset() (issue1320)
982 988 return changegroupsubset(repo, basenodes, repo.heads(), source)
983 989
984 990 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
985 991 revisions = 0
986 992 files = 0
987 993 for chunkdata in iter(source.filelogheader, {}):
988 994 files += 1
989 995 f = chunkdata["filename"]
990 996 repo.ui.debug("adding %s revisions\n" % f)
991 997 repo.ui.progress(_('files'), files, unit=_('files'),
992 998 total=expectedfiles)
993 999 fl = repo.file(f)
994 1000 o = len(fl)
995 1001 try:
996 1002 if not fl.addgroup(source, revmap, trp):
997 1003 raise error.Abort(_("received file revlog group is empty"))
998 1004 except error.CensoredBaseError as e:
999 1005 raise error.Abort(_("received delta base is censored: %s") % e)
1000 1006 revisions += len(fl) - o
1001 1007 if f in needfiles:
1002 1008 needs = needfiles[f]
1003 1009 for new in xrange(o, len(fl)):
1004 1010 n = fl.node(new)
1005 1011 if n in needs:
1006 1012 needs.remove(n)
1007 1013 else:
1008 1014 raise error.Abort(
1009 1015 _("received spurious file revlog entry"))
1010 1016 if not needs:
1011 1017 del needfiles[f]
1012 1018 repo.ui.progress(_('files'), None)
1013 1019
1014 1020 for f, needs in needfiles.iteritems():
1015 1021 fl = repo.file(f)
1016 1022 for n in needs:
1017 1023 try:
1018 1024 fl.rev(n)
1019 1025 except error.LookupError:
1020 1026 raise error.Abort(
1021 1027 _('missing file data for %s:%s - run hg verify') %
1022 1028 (f, hex(n)))
1023 1029
1024 1030 return revisions, files
@@ -1,2017 +1,2013 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from . import (
19 19 bookmarks as bookmod,
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 lock as lockmod,
25 25 obsolete,
26 26 phases,
27 27 pushkey,
28 28 pycompat,
29 29 scmutil,
30 30 sslutil,
31 31 streamclone,
32 32 url as urlmod,
33 33 util,
34 34 )
35 35
36 36 urlerr = util.urlerr
37 37 urlreq = util.urlreq
38 38
39 39 # Maps bundle version human names to changegroup versions.
40 40 _bundlespeccgversions = {'v1': '01',
41 41 'v2': '02',
42 42 'packed1': 's1',
43 43 'bundle2': '02', #legacy
44 44 }
45 45
46 46 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
47 47 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
48 48
49 49 def parsebundlespec(repo, spec, strict=True, externalnames=False):
50 50 """Parse a bundle string specification into parts.
51 51
52 52 Bundle specifications denote a well-defined bundle/exchange format.
53 53 The content of a given specification should not change over time in
54 54 order to ensure that bundles produced by a newer version of Mercurial are
55 55 readable from an older version.
56 56
57 57 The string currently has the form:
58 58
59 59 <compression>-<type>[;<parameter0>[;<parameter1>]]
60 60
61 61 Where <compression> is one of the supported compression formats
62 62 and <type> is (currently) a version string. A ";" can follow the type and
63 63 all text afterwards is interpreted as URI encoded, ";" delimited key=value
64 64 pairs.
65 65
66 66 If ``strict`` is True (the default) <compression> is required. Otherwise,
67 67 it is optional.
68 68
69 69 If ``externalnames`` is False (the default), the human-centric names will
70 70 be converted to their internal representation.
71 71
72 72 Returns a 3-tuple of (compression, version, parameters). Compression will
73 73 be ``None`` if not in strict mode and a compression isn't defined.
74 74
75 75 An ``InvalidBundleSpecification`` is raised when the specification is
76 76 not syntactically well formed.
77 77
78 78 An ``UnsupportedBundleSpecification`` is raised when the compression or
79 79 bundle type/version is not recognized.
80 80
81 81 Note: this function will likely eventually return a more complex data
82 82 structure, including bundle2 part information.
83 83 """
84 84 def parseparams(s):
85 85 if ';' not in s:
86 86 return s, {}
87 87
88 88 params = {}
89 89 version, paramstr = s.split(';', 1)
90 90
91 91 for p in paramstr.split(';'):
92 92 if '=' not in p:
93 93 raise error.InvalidBundleSpecification(
94 94 _('invalid bundle specification: '
95 95 'missing "=" in parameter: %s') % p)
96 96
97 97 key, value = p.split('=', 1)
98 98 key = urlreq.unquote(key)
99 99 value = urlreq.unquote(value)
100 100 params[key] = value
101 101
102 102 return version, params
103 103
104 104
105 105 if strict and '-' not in spec:
106 106 raise error.InvalidBundleSpecification(
107 107 _('invalid bundle specification; '
108 108 'must be prefixed with compression: %s') % spec)
109 109
110 110 if '-' in spec:
111 111 compression, version = spec.split('-', 1)
112 112
113 113 if compression not in util.compengines.supportedbundlenames:
114 114 raise error.UnsupportedBundleSpecification(
115 115 _('%s compression is not supported') % compression)
116 116
117 117 version, params = parseparams(version)
118 118
119 119 if version not in _bundlespeccgversions:
120 120 raise error.UnsupportedBundleSpecification(
121 121 _('%s is not a recognized bundle version') % version)
122 122 else:
123 123 # Value could be just the compression or just the version, in which
124 124 # case some defaults are assumed (but only when not in strict mode).
125 125 assert not strict
126 126
127 127 spec, params = parseparams(spec)
128 128
129 129 if spec in util.compengines.supportedbundlenames:
130 130 compression = spec
131 131 version = 'v1'
132 132 # Generaldelta repos require v2.
133 133 if 'generaldelta' in repo.requirements:
134 134 version = 'v2'
135 135 # Modern compression engines require v2.
136 136 if compression not in _bundlespecv1compengines:
137 137 version = 'v2'
138 138 elif spec in _bundlespeccgversions:
139 139 if spec == 'packed1':
140 140 compression = 'none'
141 141 else:
142 142 compression = 'bzip2'
143 143 version = spec
144 144 else:
145 145 raise error.UnsupportedBundleSpecification(
146 146 _('%s is not a recognized bundle specification') % spec)
147 147
148 148 # Bundle version 1 only supports a known set of compression engines.
149 149 if version == 'v1' and compression not in _bundlespecv1compengines:
150 150 raise error.UnsupportedBundleSpecification(
151 151 _('compression engine %s is not supported on v1 bundles') %
152 152 compression)
153 153
154 154 # The specification for packed1 can optionally declare the data formats
155 155 # required to apply it. If we see this metadata, compare against what the
156 156 # repo supports and error if the bundle isn't compatible.
157 157 if version == 'packed1' and 'requirements' in params:
158 158 requirements = set(params['requirements'].split(','))
159 159 missingreqs = requirements - repo.supportedformats
160 160 if missingreqs:
161 161 raise error.UnsupportedBundleSpecification(
162 162 _('missing support for repository features: %s') %
163 163 ', '.join(sorted(missingreqs)))
164 164
165 165 if not externalnames:
166 166 engine = util.compengines.forbundlename(compression)
167 167 compression = engine.bundletype()[1]
168 168 version = _bundlespeccgversions[version]
169 169 return compression, version, params
170 170
171 171 def readbundle(ui, fh, fname, vfs=None):
172 172 header = changegroup.readexactly(fh, 4)
173 173
174 174 alg = None
175 175 if not fname:
176 176 fname = "stream"
177 177 if not header.startswith('HG') and header.startswith('\0'):
178 178 fh = changegroup.headerlessfixup(fh, header)
179 179 header = "HG10"
180 180 alg = 'UN'
181 181 elif vfs:
182 182 fname = vfs.join(fname)
183 183
184 184 magic, version = header[0:2], header[2:4]
185 185
186 186 if magic != 'HG':
187 187 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
188 188 if version == '10':
189 189 if alg is None:
190 190 alg = changegroup.readexactly(fh, 2)
191 191 return changegroup.cg1unpacker(fh, alg)
192 192 elif version.startswith('2'):
193 193 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
194 194 elif version == 'S1':
195 195 return streamclone.streamcloneapplier(fh)
196 196 else:
197 197 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
198 198
199 199 def getbundlespec(ui, fh):
200 200 """Infer the bundlespec from a bundle file handle.
201 201
202 202 The input file handle is seeked and the original seek position is not
203 203 restored.
204 204 """
205 205 def speccompression(alg):
206 206 try:
207 207 return util.compengines.forbundletype(alg).bundletype()[0]
208 208 except KeyError:
209 209 return None
210 210
211 211 b = readbundle(ui, fh, None)
212 212 if isinstance(b, changegroup.cg1unpacker):
213 213 alg = b._type
214 214 if alg == '_truncatedBZ':
215 215 alg = 'BZ'
216 216 comp = speccompression(alg)
217 217 if not comp:
218 218 raise error.Abort(_('unknown compression algorithm: %s') % alg)
219 219 return '%s-v1' % comp
220 220 elif isinstance(b, bundle2.unbundle20):
221 221 if 'Compression' in b.params:
222 222 comp = speccompression(b.params['Compression'])
223 223 if not comp:
224 224 raise error.Abort(_('unknown compression algorithm: %s') % comp)
225 225 else:
226 226 comp = 'none'
227 227
228 228 version = None
229 229 for part in b.iterparts():
230 230 if part.type == 'changegroup':
231 231 version = part.params['version']
232 232 if version in ('01', '02'):
233 233 version = 'v2'
234 234 else:
235 235 raise error.Abort(_('changegroup version %s does not have '
236 236 'a known bundlespec') % version,
237 237 hint=_('try upgrading your Mercurial '
238 238 'client'))
239 239
240 240 if not version:
241 241 raise error.Abort(_('could not identify changegroup version in '
242 242 'bundle'))
243 243
244 244 return '%s-%s' % (comp, version)
245 245 elif isinstance(b, streamclone.streamcloneapplier):
246 246 requirements = streamclone.readbundle1header(fh)[2]
247 247 params = 'requirements=%s' % ','.join(sorted(requirements))
248 248 return 'none-packed1;%s' % urlreq.quote(params)
249 249 else:
250 250 raise error.Abort(_('unknown bundle type: %s') % b)
251 251
252 252 def _computeoutgoing(repo, heads, common):
253 253 """Computes which revs are outgoing given a set of common
254 254 and a set of heads.
255 255
256 256 This is a separate function so extensions can have access to
257 257 the logic.
258 258
259 259 Returns a discovery.outgoing object.
260 260 """
261 261 cl = repo.changelog
262 262 if common:
263 263 hasnode = cl.hasnode
264 264 common = [n for n in common if hasnode(n)]
265 265 else:
266 266 common = [nullid]
267 267 if not heads:
268 268 heads = cl.heads()
269 269 return discovery.outgoing(repo, common, heads)
270 270
271 271 def _forcebundle1(op):
272 272 """return true if a pull/push must use bundle1
273 273
274 274 This function is used to allow testing of the older bundle version"""
275 275 ui = op.repo.ui
276 276 forcebundle1 = False
277 277 # The goal is this config is to allow developer to choose the bundle
278 278 # version used during exchanged. This is especially handy during test.
279 279 # Value is a list of bundle version to be picked from, highest version
280 280 # should be used.
281 281 #
282 282 # developer config: devel.legacy.exchange
283 283 exchange = ui.configlist('devel', 'legacy.exchange')
284 284 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
285 285 return forcebundle1 or not op.remote.capable('bundle2')
286 286
287 287 class pushoperation(object):
288 288 """A object that represent a single push operation
289 289
290 290 Its purpose is to carry push related state and very common operations.
291 291
292 292 A new pushoperation should be created at the beginning of each push and
293 293 discarded afterward.
294 294 """
295 295
296 296 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
297 297 bookmarks=(), pushvars=None):
298 298 # repo we push from
299 299 self.repo = repo
300 300 self.ui = repo.ui
301 301 # repo we push to
302 302 self.remote = remote
303 303 # force option provided
304 304 self.force = force
305 305 # revs to be pushed (None is "all")
306 306 self.revs = revs
307 307 # bookmark explicitly pushed
308 308 self.bookmarks = bookmarks
309 309 # allow push of new branch
310 310 self.newbranch = newbranch
311 311 # step already performed
312 312 # (used to check what steps have been already performed through bundle2)
313 313 self.stepsdone = set()
314 314 # Integer version of the changegroup push result
315 315 # - None means nothing to push
316 316 # - 0 means HTTP error
317 317 # - 1 means we pushed and remote head count is unchanged *or*
318 318 # we have outgoing changesets but refused to push
319 319 # - other values as described by addchangegroup()
320 320 self.cgresult = None
321 321 # Boolean value for the bookmark push
322 322 self.bkresult = None
323 323 # discover.outgoing object (contains common and outgoing data)
324 324 self.outgoing = None
325 325 # all remote topological heads before the push
326 326 self.remoteheads = None
327 327 # Details of the remote branch pre and post push
328 328 #
329 329 # mapping: {'branch': ([remoteheads],
330 330 # [newheads],
331 331 # [unsyncedheads],
332 332 # [discardedheads])}
333 333 # - branch: the branch name
334 334 # - remoteheads: the list of remote heads known locally
335 335 # None if the branch is new
336 336 # - newheads: the new remote heads (known locally) with outgoing pushed
337 337 # - unsyncedheads: the list of remote heads unknown locally.
338 338 # - discardedheads: the list of remote heads made obsolete by the push
339 339 self.pushbranchmap = None
340 340 # testable as a boolean indicating if any nodes are missing locally.
341 341 self.incoming = None
342 342 # phases changes that must be pushed along side the changesets
343 343 self.outdatedphases = None
344 344 # phases changes that must be pushed if changeset push fails
345 345 self.fallbackoutdatedphases = None
346 346 # outgoing obsmarkers
347 347 self.outobsmarkers = set()
348 348 # outgoing bookmarks
349 349 self.outbookmarks = []
350 350 # transaction manager
351 351 self.trmanager = None
352 352 # map { pushkey partid -> callback handling failure}
353 353 # used to handle exception from mandatory pushkey part failure
354 354 self.pkfailcb = {}
355 355 # an iterable of pushvars or None
356 356 self.pushvars = pushvars
357 357
358 358 @util.propertycache
359 359 def futureheads(self):
360 360 """future remote heads if the changeset push succeeds"""
361 361 return self.outgoing.missingheads
362 362
363 363 @util.propertycache
364 364 def fallbackheads(self):
365 365 """future remote heads if the changeset push fails"""
366 366 if self.revs is None:
367 367 # not target to push, all common are relevant
368 368 return self.outgoing.commonheads
369 369 unfi = self.repo.unfiltered()
370 370 # I want cheads = heads(::missingheads and ::commonheads)
371 371 # (missingheads is revs with secret changeset filtered out)
372 372 #
373 373 # This can be expressed as:
374 374 # cheads = ( (missingheads and ::commonheads)
375 375 # + (commonheads and ::missingheads))"
376 376 # )
377 377 #
378 378 # while trying to push we already computed the following:
379 379 # common = (::commonheads)
380 380 # missing = ((commonheads::missingheads) - commonheads)
381 381 #
382 382 # We can pick:
383 383 # * missingheads part of common (::commonheads)
384 384 common = self.outgoing.common
385 385 nm = self.repo.changelog.nodemap
386 386 cheads = [node for node in self.revs if nm[node] in common]
387 387 # and
388 388 # * commonheads parents on missing
389 389 revset = unfi.set('%ln and parents(roots(%ln))',
390 390 self.outgoing.commonheads,
391 391 self.outgoing.missing)
392 392 cheads.extend(c.node() for c in revset)
393 393 return cheads
394 394
395 395 @property
396 396 def commonheads(self):
397 397 """set of all common heads after changeset bundle push"""
398 398 if self.cgresult:
399 399 return self.futureheads
400 400 else:
401 401 return self.fallbackheads
402 402
403 403 # mapping of message used when pushing bookmark
404 404 bookmsgmap = {'update': (_("updating bookmark %s\n"),
405 405 _('updating bookmark %s failed!\n')),
406 406 'export': (_("exporting bookmark %s\n"),
407 407 _('exporting bookmark %s failed!\n')),
408 408 'delete': (_("deleting remote bookmark %s\n"),
409 409 _('deleting remote bookmark %s failed!\n')),
410 410 }
411 411
412 412
413 413 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
414 414 opargs=None):
415 415 '''Push outgoing changesets (limited by revs) from a local
416 416 repository to remote. Return an integer:
417 417 - None means nothing to push
418 418 - 0 means HTTP error
419 419 - 1 means we pushed and remote head count is unchanged *or*
420 420 we have outgoing changesets but refused to push
421 421 - other values as described by addchangegroup()
422 422 '''
423 423 if opargs is None:
424 424 opargs = {}
425 425 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
426 426 **opargs)
427 427 if pushop.remote.local():
428 428 missing = (set(pushop.repo.requirements)
429 429 - pushop.remote.local().supported)
430 430 if missing:
431 431 msg = _("required features are not"
432 432 " supported in the destination:"
433 433 " %s") % (', '.join(sorted(missing)))
434 434 raise error.Abort(msg)
435 435
436 436 if not pushop.remote.canpush():
437 437 raise error.Abort(_("destination does not support push"))
438 438
439 439 if not pushop.remote.capable('unbundle'):
440 440 raise error.Abort(_('cannot push: destination does not support the '
441 441 'unbundle wire protocol command'))
442 442
443 443 # get lock as we might write phase data
444 444 wlock = lock = None
445 445 try:
446 446 # bundle2 push may receive a reply bundle touching bookmarks or other
447 447 # things requiring the wlock. Take it now to ensure proper ordering.
448 448 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
449 449 if (not _forcebundle1(pushop)) and maypushback:
450 450 wlock = pushop.repo.wlock()
451 451 lock = pushop.repo.lock()
452 452 pushop.trmanager = transactionmanager(pushop.repo,
453 453 'push-response',
454 454 pushop.remote.url())
455 455 except IOError as err:
456 456 if err.errno != errno.EACCES:
457 457 raise
458 458 # source repo cannot be locked.
459 459 # We do not abort the push, but just disable the local phase
460 460 # synchronisation.
461 461 msg = 'cannot lock source repository: %s\n' % err
462 462 pushop.ui.debug(msg)
463 463
464 464 with wlock or util.nullcontextmanager(), \
465 465 lock or util.nullcontextmanager(), \
466 466 pushop.trmanager or util.nullcontextmanager():
467 467 pushop.repo.checkpush(pushop)
468 468 _pushdiscovery(pushop)
469 469 if not _forcebundle1(pushop):
470 470 _pushbundle2(pushop)
471 471 _pushchangeset(pushop)
472 472 _pushsyncphase(pushop)
473 473 _pushobsolete(pushop)
474 474 _pushbookmark(pushop)
475 475
476 476 return pushop
477 477
478 478 # list of steps to perform discovery before push
479 479 pushdiscoveryorder = []
480 480
481 481 # Mapping between step name and function
482 482 #
483 483 # This exists to help extensions wrap steps if necessary
484 484 pushdiscoverymapping = {}
485 485
486 486 def pushdiscovery(stepname):
487 487 """decorator for function performing discovery before push
488 488
489 489 The function is added to the step -> function mapping and appended to the
490 490 list of steps. Beware that decorated function will be added in order (this
491 491 may matter).
492 492
493 493 You can only use this decorator for a new step, if you want to wrap a step
494 494 from an extension, change the pushdiscovery dictionary directly."""
495 495 def dec(func):
496 496 assert stepname not in pushdiscoverymapping
497 497 pushdiscoverymapping[stepname] = func
498 498 pushdiscoveryorder.append(stepname)
499 499 return func
500 500 return dec
501 501
502 502 def _pushdiscovery(pushop):
503 503 """Run all discovery steps"""
504 504 for stepname in pushdiscoveryorder:
505 505 step = pushdiscoverymapping[stepname]
506 506 step(pushop)
507 507
508 508 @pushdiscovery('changeset')
509 509 def _pushdiscoverychangeset(pushop):
510 510 """discover the changeset that need to be pushed"""
511 511 fci = discovery.findcommonincoming
512 512 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
513 513 common, inc, remoteheads = commoninc
514 514 fco = discovery.findcommonoutgoing
515 515 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
516 516 commoninc=commoninc, force=pushop.force)
517 517 pushop.outgoing = outgoing
518 518 pushop.remoteheads = remoteheads
519 519 pushop.incoming = inc
520 520
521 521 @pushdiscovery('phase')
522 522 def _pushdiscoveryphase(pushop):
523 523 """discover the phase that needs to be pushed
524 524
525 525 (computed for both success and failure case for changesets push)"""
526 526 outgoing = pushop.outgoing
527 527 unfi = pushop.repo.unfiltered()
528 528 remotephases = pushop.remote.listkeys('phases')
529 529 publishing = remotephases.get('publishing', False)
530 530 if (pushop.ui.configbool('ui', '_usedassubrepo')
531 531 and remotephases # server supports phases
532 532 and not pushop.outgoing.missing # no changesets to be pushed
533 533 and publishing):
534 534 # When:
535 535 # - this is a subrepo push
536 536 # - and remote support phase
537 537 # - and no changeset are to be pushed
538 538 # - and remote is publishing
539 539 # We may be in issue 3871 case!
540 540 # We drop the possible phase synchronisation done by
541 541 # courtesy to publish changesets possibly locally draft
542 542 # on the remote.
543 543 remotephases = {'publishing': 'True'}
544 544 ana = phases.analyzeremotephases(pushop.repo,
545 545 pushop.fallbackheads,
546 546 remotephases)
547 547 pheads, droots = ana
548 548 extracond = ''
549 549 if not publishing:
550 550 extracond = ' and public()'
551 551 revset = 'heads((%%ln::%%ln) %s)' % extracond
552 552 # Get the list of all revs draft on remote by public here.
553 553 # XXX Beware that revset break if droots is not strictly
554 554 # XXX root we may want to ensure it is but it is costly
555 555 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
556 556 if not outgoing.missing:
557 557 future = fallback
558 558 else:
559 559 # adds changeset we are going to push as draft
560 560 #
561 561 # should not be necessary for publishing server, but because of an
562 562 # issue fixed in xxxxx we have to do it anyway.
563 563 fdroots = list(unfi.set('roots(%ln + %ln::)',
564 564 outgoing.missing, droots))
565 565 fdroots = [f.node() for f in fdroots]
566 566 future = list(unfi.set(revset, fdroots, pushop.futureheads))
567 567 pushop.outdatedphases = future
568 568 pushop.fallbackoutdatedphases = fallback
569 569
570 570 @pushdiscovery('obsmarker')
571 571 def _pushdiscoveryobsmarkers(pushop):
572 572 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
573 573 and pushop.repo.obsstore
574 574 and 'obsolete' in pushop.remote.listkeys('namespaces')):
575 575 repo = pushop.repo
576 576 # very naive computation, that can be quite expensive on big repo.
577 577 # However: evolution is currently slow on them anyway.
578 578 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
579 579 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
580 580
581 581 @pushdiscovery('bookmarks')
582 582 def _pushdiscoverybookmarks(pushop):
583 583 ui = pushop.ui
584 584 repo = pushop.repo.unfiltered()
585 585 remote = pushop.remote
586 586 ui.debug("checking for updated bookmarks\n")
587 587 ancestors = ()
588 588 if pushop.revs:
589 589 revnums = map(repo.changelog.rev, pushop.revs)
590 590 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
591 591 remotebookmark = remote.listkeys('bookmarks')
592 592
593 593 explicit = set([repo._bookmarks.expandname(bookmark)
594 594 for bookmark in pushop.bookmarks])
595 595
596 596 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
597 597 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
598 598
599 599 def safehex(x):
600 600 if x is None:
601 601 return x
602 602 return hex(x)
603 603
604 604 def hexifycompbookmarks(bookmarks):
605 605 for b, scid, dcid in bookmarks:
606 606 yield b, safehex(scid), safehex(dcid)
607 607
608 608 comp = [hexifycompbookmarks(marks) for marks in comp]
609 609 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
610 610
611 611 for b, scid, dcid in advsrc:
612 612 if b in explicit:
613 613 explicit.remove(b)
614 614 if not ancestors or repo[scid].rev() in ancestors:
615 615 pushop.outbookmarks.append((b, dcid, scid))
616 616 # search added bookmark
617 617 for b, scid, dcid in addsrc:
618 618 if b in explicit:
619 619 explicit.remove(b)
620 620 pushop.outbookmarks.append((b, '', scid))
621 621 # search for overwritten bookmark
622 622 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
623 623 if b in explicit:
624 624 explicit.remove(b)
625 625 pushop.outbookmarks.append((b, dcid, scid))
626 626 # search for bookmark to delete
627 627 for b, scid, dcid in adddst:
628 628 if b in explicit:
629 629 explicit.remove(b)
630 630 # treat as "deleted locally"
631 631 pushop.outbookmarks.append((b, dcid, ''))
632 632 # identical bookmarks shouldn't get reported
633 633 for b, scid, dcid in same:
634 634 if b in explicit:
635 635 explicit.remove(b)
636 636
637 637 if explicit:
638 638 explicit = sorted(explicit)
639 639 # we should probably list all of them
640 640 ui.warn(_('bookmark %s does not exist on the local '
641 641 'or remote repository!\n') % explicit[0])
642 642 pushop.bkresult = 2
643 643
644 644 pushop.outbookmarks.sort()
645 645
646 646 def _pushcheckoutgoing(pushop):
647 647 outgoing = pushop.outgoing
648 648 unfi = pushop.repo.unfiltered()
649 649 if not outgoing.missing:
650 650 # nothing to push
651 651 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
652 652 return False
653 653 # something to push
654 654 if not pushop.force:
655 655 # if repo.obsstore == False --> no obsolete
656 656 # then, save the iteration
657 657 if unfi.obsstore:
658 658 # this message are here for 80 char limit reason
659 659 mso = _("push includes obsolete changeset: %s!")
660 660 mspd = _("push includes phase-divergent changeset: %s!")
661 661 mscd = _("push includes content-divergent changeset: %s!")
662 662 mst = {"orphan": _("push includes orphan changeset: %s!"),
663 663 "phase-divergent": mspd,
664 664 "content-divergent": mscd}
665 665 # If we are to push if there is at least one
666 666 # obsolete or unstable changeset in missing, at
667 667 # least one of the missinghead will be obsolete or
668 668 # unstable. So checking heads only is ok
669 669 for node in outgoing.missingheads:
670 670 ctx = unfi[node]
671 671 if ctx.obsolete():
672 672 raise error.Abort(mso % ctx)
673 673 elif ctx.isunstable():
674 674 # TODO print more than one instability in the abort
675 675 # message
676 676 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
677 677
678 678 discovery.checkheads(pushop)
679 679 return True
680 680
681 681 # List of names of steps to perform for an outgoing bundle2, order matters.
682 682 b2partsgenorder = []
683 683
684 684 # Mapping between step name and function
685 685 #
686 686 # This exists to help extensions wrap steps if necessary
687 687 b2partsgenmapping = {}
688 688
689 689 def b2partsgenerator(stepname, idx=None):
690 690 """decorator for function generating bundle2 part
691 691
692 692 The function is added to the step -> function mapping and appended to the
693 693 list of steps. Beware that decorated functions will be added in order
694 694 (this may matter).
695 695
696 696 You can only use this decorator for new steps, if you want to wrap a step
697 697 from an extension, attack the b2partsgenmapping dictionary directly."""
698 698 def dec(func):
699 699 assert stepname not in b2partsgenmapping
700 700 b2partsgenmapping[stepname] = func
701 701 if idx is None:
702 702 b2partsgenorder.append(stepname)
703 703 else:
704 704 b2partsgenorder.insert(idx, stepname)
705 705 return func
706 706 return dec
707 707
708 708 def _pushb2ctxcheckheads(pushop, bundler):
709 709 """Generate race condition checking parts
710 710
711 711 Exists as an independent function to aid extensions
712 712 """
713 713 # * 'force' do not check for push race,
714 714 # * if we don't push anything, there are nothing to check.
715 715 if not pushop.force and pushop.outgoing.missingheads:
716 716 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
717 717 emptyremote = pushop.pushbranchmap is None
718 718 if not allowunrelated or emptyremote:
719 719 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
720 720 else:
721 721 affected = set()
722 722 for branch, heads in pushop.pushbranchmap.iteritems():
723 723 remoteheads, newheads, unsyncedheads, discardedheads = heads
724 724 if remoteheads is not None:
725 725 remote = set(remoteheads)
726 726 affected |= set(discardedheads) & remote
727 727 affected |= remote - set(newheads)
728 728 if affected:
729 729 data = iter(sorted(affected))
730 730 bundler.newpart('check:updated-heads', data=data)
731 731
732 732 @b2partsgenerator('changeset')
733 733 def _pushb2ctx(pushop, bundler):
734 734 """handle changegroup push through bundle2
735 735
736 736 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
737 737 """
738 738 if 'changesets' in pushop.stepsdone:
739 739 return
740 740 pushop.stepsdone.add('changesets')
741 741 # Send known heads to the server for race detection.
742 742 if not _pushcheckoutgoing(pushop):
743 743 return
744 744 pushop.repo.prepushoutgoinghooks(pushop)
745 745
746 746 _pushb2ctxcheckheads(pushop, bundler)
747 747
748 748 b2caps = bundle2.bundle2caps(pushop.remote)
749 749 version = '01'
750 750 cgversions = b2caps.get('changegroup')
751 751 if cgversions: # 3.1 and 3.2 ship with an empty value
752 752 cgversions = [v for v in cgversions
753 753 if v in changegroup.supportedoutgoingversions(
754 754 pushop.repo)]
755 755 if not cgversions:
756 756 raise ValueError(_('no common changegroup version'))
757 757 version = max(cgversions)
758 758 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
759 759 pushop.outgoing,
760 760 version=version)
761 761 cgpart = bundler.newpart('changegroup', data=cg)
762 762 if cgversions:
763 763 cgpart.addparam('version', version)
764 764 if 'treemanifest' in pushop.repo.requirements:
765 765 cgpart.addparam('treemanifest', '1')
766 766 def handlereply(op):
767 767 """extract addchangegroup returns from server reply"""
768 768 cgreplies = op.records.getreplies(cgpart.id)
769 769 assert len(cgreplies['changegroup']) == 1
770 770 pushop.cgresult = cgreplies['changegroup'][0]['return']
771 771 return handlereply
772 772
773 773 @b2partsgenerator('phase')
774 774 def _pushb2phases(pushop, bundler):
775 775 """handle phase push through bundle2"""
776 776 if 'phases' in pushop.stepsdone:
777 777 return
778 778 b2caps = bundle2.bundle2caps(pushop.remote)
779 779 if not 'pushkey' in b2caps:
780 780 return
781 781 pushop.stepsdone.add('phases')
782 782 part2node = []
783 783
784 784 def handlefailure(pushop, exc):
785 785 targetid = int(exc.partid)
786 786 for partid, node in part2node:
787 787 if partid == targetid:
788 788 raise error.Abort(_('updating %s to public failed') % node)
789 789
790 790 enc = pushkey.encode
791 791 for newremotehead in pushop.outdatedphases:
792 792 part = bundler.newpart('pushkey')
793 793 part.addparam('namespace', enc('phases'))
794 794 part.addparam('key', enc(newremotehead.hex()))
795 795 part.addparam('old', enc(str(phases.draft)))
796 796 part.addparam('new', enc(str(phases.public)))
797 797 part2node.append((part.id, newremotehead))
798 798 pushop.pkfailcb[part.id] = handlefailure
799 799
800 800 def handlereply(op):
801 801 for partid, node in part2node:
802 802 partrep = op.records.getreplies(partid)
803 803 results = partrep['pushkey']
804 804 assert len(results) <= 1
805 805 msg = None
806 806 if not results:
807 807 msg = _('server ignored update of %s to public!\n') % node
808 808 elif not int(results[0]['return']):
809 809 msg = _('updating %s to public failed!\n') % node
810 810 if msg is not None:
811 811 pushop.ui.warn(msg)
812 812 return handlereply
813 813
814 814 @b2partsgenerator('obsmarkers')
815 815 def _pushb2obsmarkers(pushop, bundler):
816 816 if 'obsmarkers' in pushop.stepsdone:
817 817 return
818 818 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
819 819 if obsolete.commonversion(remoteversions) is None:
820 820 return
821 821 pushop.stepsdone.add('obsmarkers')
822 822 if pushop.outobsmarkers:
823 823 markers = sorted(pushop.outobsmarkers)
824 824 bundle2.buildobsmarkerspart(bundler, markers)
825 825
826 826 @b2partsgenerator('bookmarks')
827 827 def _pushb2bookmarks(pushop, bundler):
828 828 """handle bookmark push through bundle2"""
829 829 if 'bookmarks' in pushop.stepsdone:
830 830 return
831 831 b2caps = bundle2.bundle2caps(pushop.remote)
832 832 if 'pushkey' not in b2caps:
833 833 return
834 834 pushop.stepsdone.add('bookmarks')
835 835 part2book = []
836 836 enc = pushkey.encode
837 837
838 838 def handlefailure(pushop, exc):
839 839 targetid = int(exc.partid)
840 840 for partid, book, action in part2book:
841 841 if partid == targetid:
842 842 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
843 843 # we should not be called for part we did not generated
844 844 assert False
845 845
846 846 for book, old, new in pushop.outbookmarks:
847 847 part = bundler.newpart('pushkey')
848 848 part.addparam('namespace', enc('bookmarks'))
849 849 part.addparam('key', enc(book))
850 850 part.addparam('old', enc(old))
851 851 part.addparam('new', enc(new))
852 852 action = 'update'
853 853 if not old:
854 854 action = 'export'
855 855 elif not new:
856 856 action = 'delete'
857 857 part2book.append((part.id, book, action))
858 858 pushop.pkfailcb[part.id] = handlefailure
859 859
860 860 def handlereply(op):
861 861 ui = pushop.ui
862 862 for partid, book, action in part2book:
863 863 partrep = op.records.getreplies(partid)
864 864 results = partrep['pushkey']
865 865 assert len(results) <= 1
866 866 if not results:
867 867 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
868 868 else:
869 869 ret = int(results[0]['return'])
870 870 if ret:
871 871 ui.status(bookmsgmap[action][0] % book)
872 872 else:
873 873 ui.warn(bookmsgmap[action][1] % book)
874 874 if pushop.bkresult is not None:
875 875 pushop.bkresult = 1
876 876 return handlereply
877 877
878 878 @b2partsgenerator('pushvars', idx=0)
879 879 def _getbundlesendvars(pushop, bundler):
880 880 '''send shellvars via bundle2'''
881 881 pushvars = pushop.pushvars
882 882 if pushvars:
883 883 shellvars = {}
884 884 for raw in pushvars:
885 885 if '=' not in raw:
886 886 msg = ("unable to parse variable '%s', should follow "
887 887 "'KEY=VALUE' or 'KEY=' format")
888 888 raise error.Abort(msg % raw)
889 889 k, v = raw.split('=', 1)
890 890 shellvars[k] = v
891 891
892 892 part = bundler.newpart('pushvars')
893 893
894 894 for key, value in shellvars.iteritems():
895 895 part.addparam(key, value, mandatory=False)
896 896
897 897 def _pushbundle2(pushop):
898 898 """push data to the remote using bundle2
899 899
900 900 The only currently supported type of data is changegroup but this will
901 901 evolve in the future."""
902 902 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
903 903 pushback = (pushop.trmanager
904 904 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
905 905
906 906 # create reply capability
907 907 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
908 908 allowpushback=pushback))
909 909 bundler.newpart('replycaps', data=capsblob)
910 910 replyhandlers = []
911 911 for partgenname in b2partsgenorder:
912 912 partgen = b2partsgenmapping[partgenname]
913 913 ret = partgen(pushop, bundler)
914 914 if callable(ret):
915 915 replyhandlers.append(ret)
916 916 # do not push if nothing to push
917 917 if bundler.nbparts <= 1:
918 918 return
919 919 stream = util.chunkbuffer(bundler.getchunks())
920 920 try:
921 921 try:
922 922 reply = pushop.remote.unbundle(
923 923 stream, ['force'], pushop.remote.url())
924 924 except error.BundleValueError as exc:
925 925 raise error.Abort(_('missing support for %s') % exc)
926 926 try:
927 927 trgetter = None
928 928 if pushback:
929 929 trgetter = pushop.trmanager.transaction
930 930 op = bundle2.processbundle(pushop.repo, reply, trgetter)
931 931 except error.BundleValueError as exc:
932 932 raise error.Abort(_('missing support for %s') % exc)
933 933 except bundle2.AbortFromPart as exc:
934 934 pushop.ui.status(_('remote: %s\n') % exc)
935 935 if exc.hint is not None:
936 936 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
937 937 raise error.Abort(_('push failed on remote'))
938 938 except error.PushkeyFailed as exc:
939 939 partid = int(exc.partid)
940 940 if partid not in pushop.pkfailcb:
941 941 raise
942 942 pushop.pkfailcb[partid](pushop, exc)
943 943 for rephand in replyhandlers:
944 944 rephand(op)
945 945
946 946 def _pushchangeset(pushop):
947 947 """Make the actual push of changeset bundle to remote repo"""
948 948 if 'changesets' in pushop.stepsdone:
949 949 return
950 950 pushop.stepsdone.add('changesets')
951 951 if not _pushcheckoutgoing(pushop):
952 952 return
953 953
954 954 # Should have verified this in push().
955 955 assert pushop.remote.capable('unbundle')
956 956
957 957 pushop.repo.prepushoutgoinghooks(pushop)
958 958 outgoing = pushop.outgoing
959 959 # TODO: get bundlecaps from remote
960 960 bundlecaps = None
961 961 # create a changegroup from local
962 962 if pushop.revs is None and not (outgoing.excluded
963 963 or pushop.repo.changelog.filteredrevs):
964 964 # push everything,
965 965 # use the fast path, no race possible on push
966 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
967 cg = changegroup.getsubset(pushop.repo,
968 outgoing,
969 bundler,
970 'push',
971 fastpath=True)
966 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
967 fastpath=True, bundlecaps=bundlecaps)
972 968 else:
973 969 cg = changegroup.getchangegroup(pushop.repo, 'push', outgoing,
974 970 bundlecaps=bundlecaps)
975 971
976 972 # apply changegroup to remote
977 973 # local repo finds heads on server, finds out what
978 974 # revs it must push. once revs transferred, if server
979 975 # finds it has different heads (someone else won
980 976 # commit/push race), server aborts.
981 977 if pushop.force:
982 978 remoteheads = ['force']
983 979 else:
984 980 remoteheads = pushop.remoteheads
985 981 # ssh: return remote's addchangegroup()
986 982 # http: return remote's addchangegroup() or 0 for error
987 983 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
988 984 pushop.repo.url())
989 985
990 986 def _pushsyncphase(pushop):
991 987 """synchronise phase information locally and remotely"""
992 988 cheads = pushop.commonheads
993 989 # even when we don't push, exchanging phase data is useful
994 990 remotephases = pushop.remote.listkeys('phases')
995 991 if (pushop.ui.configbool('ui', '_usedassubrepo')
996 992 and remotephases # server supports phases
997 993 and pushop.cgresult is None # nothing was pushed
998 994 and remotephases.get('publishing', False)):
999 995 # When:
1000 996 # - this is a subrepo push
1001 997 # - and remote support phase
1002 998 # - and no changeset was pushed
1003 999 # - and remote is publishing
1004 1000 # We may be in issue 3871 case!
1005 1001 # We drop the possible phase synchronisation done by
1006 1002 # courtesy to publish changesets possibly locally draft
1007 1003 # on the remote.
1008 1004 remotephases = {'publishing': 'True'}
1009 1005 if not remotephases: # old server or public only reply from non-publishing
1010 1006 _localphasemove(pushop, cheads)
1011 1007 # don't push any phase data as there is nothing to push
1012 1008 else:
1013 1009 ana = phases.analyzeremotephases(pushop.repo, cheads,
1014 1010 remotephases)
1015 1011 pheads, droots = ana
1016 1012 ### Apply remote phase on local
1017 1013 if remotephases.get('publishing', False):
1018 1014 _localphasemove(pushop, cheads)
1019 1015 else: # publish = False
1020 1016 _localphasemove(pushop, pheads)
1021 1017 _localphasemove(pushop, cheads, phases.draft)
1022 1018 ### Apply local phase on remote
1023 1019
1024 1020 if pushop.cgresult:
1025 1021 if 'phases' in pushop.stepsdone:
1026 1022 # phases already pushed though bundle2
1027 1023 return
1028 1024 outdated = pushop.outdatedphases
1029 1025 else:
1030 1026 outdated = pushop.fallbackoutdatedphases
1031 1027
1032 1028 pushop.stepsdone.add('phases')
1033 1029
1034 1030 # filter heads already turned public by the push
1035 1031 outdated = [c for c in outdated if c.node() not in pheads]
1036 1032 # fallback to independent pushkey command
1037 1033 for newremotehead in outdated:
1038 1034 r = pushop.remote.pushkey('phases',
1039 1035 newremotehead.hex(),
1040 1036 str(phases.draft),
1041 1037 str(phases.public))
1042 1038 if not r:
1043 1039 pushop.ui.warn(_('updating %s to public failed!\n')
1044 1040 % newremotehead)
1045 1041
1046 1042 def _localphasemove(pushop, nodes, phase=phases.public):
1047 1043 """move <nodes> to <phase> in the local source repo"""
1048 1044 if pushop.trmanager:
1049 1045 phases.advanceboundary(pushop.repo,
1050 1046 pushop.trmanager.transaction(),
1051 1047 phase,
1052 1048 nodes)
1053 1049 else:
1054 1050 # repo is not locked, do not change any phases!
1055 1051 # Informs the user that phases should have been moved when
1056 1052 # applicable.
1057 1053 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1058 1054 phasestr = phases.phasenames[phase]
1059 1055 if actualmoves:
1060 1056 pushop.ui.status(_('cannot lock source repo, skipping '
1061 1057 'local %s phase update\n') % phasestr)
1062 1058
1063 1059 def _pushobsolete(pushop):
1064 1060 """utility function to push obsolete markers to a remote"""
1065 1061 if 'obsmarkers' in pushop.stepsdone:
1066 1062 return
1067 1063 repo = pushop.repo
1068 1064 remote = pushop.remote
1069 1065 pushop.stepsdone.add('obsmarkers')
1070 1066 if pushop.outobsmarkers:
1071 1067 pushop.ui.debug('try to push obsolete markers to remote\n')
1072 1068 rslts = []
1073 1069 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1074 1070 for key in sorted(remotedata, reverse=True):
1075 1071 # reverse sort to ensure we end with dump0
1076 1072 data = remotedata[key]
1077 1073 rslts.append(remote.pushkey('obsolete', key, '', data))
1078 1074 if [r for r in rslts if not r]:
1079 1075 msg = _('failed to push some obsolete markers!\n')
1080 1076 repo.ui.warn(msg)
1081 1077
1082 1078 def _pushbookmark(pushop):
1083 1079 """Update bookmark position on remote"""
1084 1080 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1085 1081 return
1086 1082 pushop.stepsdone.add('bookmarks')
1087 1083 ui = pushop.ui
1088 1084 remote = pushop.remote
1089 1085
1090 1086 for b, old, new in pushop.outbookmarks:
1091 1087 action = 'update'
1092 1088 if not old:
1093 1089 action = 'export'
1094 1090 elif not new:
1095 1091 action = 'delete'
1096 1092 if remote.pushkey('bookmarks', b, old, new):
1097 1093 ui.status(bookmsgmap[action][0] % b)
1098 1094 else:
1099 1095 ui.warn(bookmsgmap[action][1] % b)
1100 1096 # discovery can have set the value form invalid entry
1101 1097 if pushop.bkresult is not None:
1102 1098 pushop.bkresult = 1
1103 1099
1104 1100 class pulloperation(object):
1105 1101 """A object that represent a single pull operation
1106 1102
1107 1103 It purpose is to carry pull related state and very common operation.
1108 1104
1109 1105 A new should be created at the beginning of each pull and discarded
1110 1106 afterward.
1111 1107 """
1112 1108
1113 1109 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1114 1110 remotebookmarks=None, streamclonerequested=None):
1115 1111 # repo we pull into
1116 1112 self.repo = repo
1117 1113 # repo we pull from
1118 1114 self.remote = remote
1119 1115 # revision we try to pull (None is "all")
1120 1116 self.heads = heads
1121 1117 # bookmark pulled explicitly
1122 1118 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1123 1119 for bookmark in bookmarks]
1124 1120 # do we force pull?
1125 1121 self.force = force
1126 1122 # whether a streaming clone was requested
1127 1123 self.streamclonerequested = streamclonerequested
1128 1124 # transaction manager
1129 1125 self.trmanager = None
1130 1126 # set of common changeset between local and remote before pull
1131 1127 self.common = None
1132 1128 # set of pulled head
1133 1129 self.rheads = None
1134 1130 # list of missing changeset to fetch remotely
1135 1131 self.fetch = None
1136 1132 # remote bookmarks data
1137 1133 self.remotebookmarks = remotebookmarks
1138 1134 # result of changegroup pulling (used as return code by pull)
1139 1135 self.cgresult = None
1140 1136 # list of step already done
1141 1137 self.stepsdone = set()
1142 1138 # Whether we attempted a clone from pre-generated bundles.
1143 1139 self.clonebundleattempted = False
1144 1140
1145 1141 @util.propertycache
1146 1142 def pulledsubset(self):
1147 1143 """heads of the set of changeset target by the pull"""
1148 1144 # compute target subset
1149 1145 if self.heads is None:
1150 1146 # We pulled every thing possible
1151 1147 # sync on everything common
1152 1148 c = set(self.common)
1153 1149 ret = list(self.common)
1154 1150 for n in self.rheads:
1155 1151 if n not in c:
1156 1152 ret.append(n)
1157 1153 return ret
1158 1154 else:
1159 1155 # We pulled a specific subset
1160 1156 # sync on this subset
1161 1157 return self.heads
1162 1158
1163 1159 @util.propertycache
1164 1160 def canusebundle2(self):
1165 1161 return not _forcebundle1(self)
1166 1162
1167 1163 @util.propertycache
1168 1164 def remotebundle2caps(self):
1169 1165 return bundle2.bundle2caps(self.remote)
1170 1166
1171 1167 def gettransaction(self):
1172 1168 # deprecated; talk to trmanager directly
1173 1169 return self.trmanager.transaction()
1174 1170
1175 1171 class transactionmanager(util.transactional):
1176 1172 """An object to manage the life cycle of a transaction
1177 1173
1178 1174 It creates the transaction on demand and calls the appropriate hooks when
1179 1175 closing the transaction."""
1180 1176 def __init__(self, repo, source, url):
1181 1177 self.repo = repo
1182 1178 self.source = source
1183 1179 self.url = url
1184 1180 self._tr = None
1185 1181
1186 1182 def transaction(self):
1187 1183 """Return an open transaction object, constructing if necessary"""
1188 1184 if not self._tr:
1189 1185 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1190 1186 self._tr = self.repo.transaction(trname)
1191 1187 self._tr.hookargs['source'] = self.source
1192 1188 self._tr.hookargs['url'] = self.url
1193 1189 return self._tr
1194 1190
1195 1191 def close(self):
1196 1192 """close transaction if created"""
1197 1193 if self._tr is not None:
1198 1194 self._tr.close()
1199 1195
1200 1196 def release(self):
1201 1197 """release transaction if created"""
1202 1198 if self._tr is not None:
1203 1199 self._tr.release()
1204 1200
1205 1201 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1206 1202 streamclonerequested=None):
1207 1203 """Fetch repository data from a remote.
1208 1204
1209 1205 This is the main function used to retrieve data from a remote repository.
1210 1206
1211 1207 ``repo`` is the local repository to clone into.
1212 1208 ``remote`` is a peer instance.
1213 1209 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1214 1210 default) means to pull everything from the remote.
1215 1211 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1216 1212 default, all remote bookmarks are pulled.
1217 1213 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1218 1214 initialization.
1219 1215 ``streamclonerequested`` is a boolean indicating whether a "streaming
1220 1216 clone" is requested. A "streaming clone" is essentially a raw file copy
1221 1217 of revlogs from the server. This only works when the local repository is
1222 1218 empty. The default value of ``None`` means to respect the server
1223 1219 configuration for preferring stream clones.
1224 1220
1225 1221 Returns the ``pulloperation`` created for this pull.
1226 1222 """
1227 1223 if opargs is None:
1228 1224 opargs = {}
1229 1225 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1230 1226 streamclonerequested=streamclonerequested, **opargs)
1231 1227
1232 1228 peerlocal = pullop.remote.local()
1233 1229 if peerlocal:
1234 1230 missing = set(peerlocal.requirements) - pullop.repo.supported
1235 1231 if missing:
1236 1232 msg = _("required features are not"
1237 1233 " supported in the destination:"
1238 1234 " %s") % (', '.join(sorted(missing)))
1239 1235 raise error.Abort(msg)
1240 1236
1241 1237 wlock = lock = None
1242 1238 try:
1243 1239 wlock = pullop.repo.wlock()
1244 1240 lock = pullop.repo.lock()
1245 1241 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1246 1242 streamclone.maybeperformlegacystreamclone(pullop)
1247 1243 # This should ideally be in _pullbundle2(). However, it needs to run
1248 1244 # before discovery to avoid extra work.
1249 1245 _maybeapplyclonebundle(pullop)
1250 1246 _pulldiscovery(pullop)
1251 1247 if pullop.canusebundle2:
1252 1248 _pullbundle2(pullop)
1253 1249 _pullchangeset(pullop)
1254 1250 _pullphase(pullop)
1255 1251 _pullbookmarks(pullop)
1256 1252 _pullobsolete(pullop)
1257 1253 pullop.trmanager.close()
1258 1254 finally:
1259 1255 lockmod.release(pullop.trmanager, lock, wlock)
1260 1256
1261 1257 return pullop
1262 1258
1263 1259 # list of steps to perform discovery before pull
1264 1260 pulldiscoveryorder = []
1265 1261
1266 1262 # Mapping between step name and function
1267 1263 #
1268 1264 # This exists to help extensions wrap steps if necessary
1269 1265 pulldiscoverymapping = {}
1270 1266
1271 1267 def pulldiscovery(stepname):
1272 1268 """decorator for function performing discovery before pull
1273 1269
1274 1270 The function is added to the step -> function mapping and appended to the
1275 1271 list of steps. Beware that decorated function will be added in order (this
1276 1272 may matter).
1277 1273
1278 1274 You can only use this decorator for a new step, if you want to wrap a step
1279 1275 from an extension, change the pulldiscovery dictionary directly."""
1280 1276 def dec(func):
1281 1277 assert stepname not in pulldiscoverymapping
1282 1278 pulldiscoverymapping[stepname] = func
1283 1279 pulldiscoveryorder.append(stepname)
1284 1280 return func
1285 1281 return dec
1286 1282
1287 1283 def _pulldiscovery(pullop):
1288 1284 """Run all discovery steps"""
1289 1285 for stepname in pulldiscoveryorder:
1290 1286 step = pulldiscoverymapping[stepname]
1291 1287 step(pullop)
1292 1288
1293 1289 @pulldiscovery('b1:bookmarks')
1294 1290 def _pullbookmarkbundle1(pullop):
1295 1291 """fetch bookmark data in bundle1 case
1296 1292
1297 1293 If not using bundle2, we have to fetch bookmarks before changeset
1298 1294 discovery to reduce the chance and impact of race conditions."""
1299 1295 if pullop.remotebookmarks is not None:
1300 1296 return
1301 1297 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1302 1298 # all known bundle2 servers now support listkeys, but lets be nice with
1303 1299 # new implementation.
1304 1300 return
1305 1301 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1306 1302
1307 1303
1308 1304 @pulldiscovery('changegroup')
1309 1305 def _pulldiscoverychangegroup(pullop):
1310 1306 """discovery phase for the pull
1311 1307
1312 1308 Current handle changeset discovery only, will change handle all discovery
1313 1309 at some point."""
1314 1310 tmp = discovery.findcommonincoming(pullop.repo,
1315 1311 pullop.remote,
1316 1312 heads=pullop.heads,
1317 1313 force=pullop.force)
1318 1314 common, fetch, rheads = tmp
1319 1315 nm = pullop.repo.unfiltered().changelog.nodemap
1320 1316 if fetch and rheads:
1321 1317 # If a remote heads in filtered locally, lets drop it from the unknown
1322 1318 # remote heads and put in back in common.
1323 1319 #
1324 1320 # This is a hackish solution to catch most of "common but locally
1325 1321 # hidden situation". We do not performs discovery on unfiltered
1326 1322 # repository because it end up doing a pathological amount of round
1327 1323 # trip for w huge amount of changeset we do not care about.
1328 1324 #
1329 1325 # If a set of such "common but filtered" changeset exist on the server
1330 1326 # but are not including a remote heads, we'll not be able to detect it,
1331 1327 scommon = set(common)
1332 1328 filteredrheads = []
1333 1329 for n in rheads:
1334 1330 if n in nm:
1335 1331 if n not in scommon:
1336 1332 common.append(n)
1337 1333 else:
1338 1334 filteredrheads.append(n)
1339 1335 if not filteredrheads:
1340 1336 fetch = []
1341 1337 rheads = filteredrheads
1342 1338 pullop.common = common
1343 1339 pullop.fetch = fetch
1344 1340 pullop.rheads = rheads
1345 1341
1346 1342 def _pullbundle2(pullop):
1347 1343 """pull data using bundle2
1348 1344
1349 1345 For now, the only supported data are changegroup."""
1350 1346 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1351 1347
1352 1348 # At the moment we don't do stream clones over bundle2. If that is
1353 1349 # implemented then here's where the check for that will go.
1354 1350 streaming = False
1355 1351
1356 1352 # pulling changegroup
1357 1353 pullop.stepsdone.add('changegroup')
1358 1354
1359 1355 kwargs['common'] = pullop.common
1360 1356 kwargs['heads'] = pullop.heads or pullop.rheads
1361 1357 kwargs['cg'] = pullop.fetch
1362 1358 if 'listkeys' in pullop.remotebundle2caps:
1363 1359 kwargs['listkeys'] = ['phases']
1364 1360 if pullop.remotebookmarks is None:
1365 1361 # make sure to always includes bookmark data when migrating
1366 1362 # `hg incoming --bundle` to using this function.
1367 1363 kwargs['listkeys'].append('bookmarks')
1368 1364
1369 1365 # If this is a full pull / clone and the server supports the clone bundles
1370 1366 # feature, tell the server whether we attempted a clone bundle. The
1371 1367 # presence of this flag indicates the client supports clone bundles. This
1372 1368 # will enable the server to treat clients that support clone bundles
1373 1369 # differently from those that don't.
1374 1370 if (pullop.remote.capable('clonebundles')
1375 1371 and pullop.heads is None and list(pullop.common) == [nullid]):
1376 1372 kwargs['cbattempted'] = pullop.clonebundleattempted
1377 1373
1378 1374 if streaming:
1379 1375 pullop.repo.ui.status(_('streaming all changes\n'))
1380 1376 elif not pullop.fetch:
1381 1377 pullop.repo.ui.status(_("no changes found\n"))
1382 1378 pullop.cgresult = 0
1383 1379 else:
1384 1380 if pullop.heads is None and list(pullop.common) == [nullid]:
1385 1381 pullop.repo.ui.status(_("requesting all changes\n"))
1386 1382 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1387 1383 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1388 1384 if obsolete.commonversion(remoteversions) is not None:
1389 1385 kwargs['obsmarkers'] = True
1390 1386 pullop.stepsdone.add('obsmarkers')
1391 1387 _pullbundle2extraprepare(pullop, kwargs)
1392 1388 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1393 1389 try:
1394 1390 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1395 1391 except bundle2.AbortFromPart as exc:
1396 1392 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1397 1393 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1398 1394 except error.BundleValueError as exc:
1399 1395 raise error.Abort(_('missing support for %s') % exc)
1400 1396
1401 1397 if pullop.fetch:
1402 1398 pullop.cgresult = bundle2.combinechangegroupresults(op)
1403 1399
1404 1400 # If the bundle had a phase-heads part, then phase exchange is already done
1405 1401 if op.records['phase-heads']:
1406 1402 pullop.stepsdone.add('phases')
1407 1403
1408 1404 # processing phases change
1409 1405 for namespace, value in op.records['listkeys']:
1410 1406 if namespace == 'phases':
1411 1407 _pullapplyphases(pullop, value)
1412 1408
1413 1409 # processing bookmark update
1414 1410 for namespace, value in op.records['listkeys']:
1415 1411 if namespace == 'bookmarks':
1416 1412 pullop.remotebookmarks = value
1417 1413
1418 1414 # bookmark data were either already there or pulled in the bundle
1419 1415 if pullop.remotebookmarks is not None:
1420 1416 _pullbookmarks(pullop)
1421 1417
1422 1418 def _pullbundle2extraprepare(pullop, kwargs):
1423 1419 """hook function so that extensions can extend the getbundle call"""
1424 1420 pass
1425 1421
1426 1422 def _pullchangeset(pullop):
1427 1423 """pull changeset from unbundle into the local repo"""
1428 1424 # We delay the open of the transaction as late as possible so we
1429 1425 # don't open transaction for nothing or you break future useful
1430 1426 # rollback call
1431 1427 if 'changegroup' in pullop.stepsdone:
1432 1428 return
1433 1429 pullop.stepsdone.add('changegroup')
1434 1430 if not pullop.fetch:
1435 1431 pullop.repo.ui.status(_("no changes found\n"))
1436 1432 pullop.cgresult = 0
1437 1433 return
1438 1434 tr = pullop.gettransaction()
1439 1435 if pullop.heads is None and list(pullop.common) == [nullid]:
1440 1436 pullop.repo.ui.status(_("requesting all changes\n"))
1441 1437 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1442 1438 # issue1320, avoid a race if remote changed after discovery
1443 1439 pullop.heads = pullop.rheads
1444 1440
1445 1441 if pullop.remote.capable('getbundle'):
1446 1442 # TODO: get bundlecaps from remote
1447 1443 cg = pullop.remote.getbundle('pull', common=pullop.common,
1448 1444 heads=pullop.heads or pullop.rheads)
1449 1445 elif pullop.heads is None:
1450 1446 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1451 1447 elif not pullop.remote.capable('changegroupsubset'):
1452 1448 raise error.Abort(_("partial pull cannot be done because "
1453 1449 "other repository doesn't support "
1454 1450 "changegroupsubset."))
1455 1451 else:
1456 1452 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1457 1453 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1458 1454 pullop.remote.url())
1459 1455 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1460 1456
1461 1457 def _pullphase(pullop):
1462 1458 # Get remote phases data from remote
1463 1459 if 'phases' in pullop.stepsdone:
1464 1460 return
1465 1461 remotephases = pullop.remote.listkeys('phases')
1466 1462 _pullapplyphases(pullop, remotephases)
1467 1463
1468 1464 def _pullapplyphases(pullop, remotephases):
1469 1465 """apply phase movement from observed remote state"""
1470 1466 if 'phases' in pullop.stepsdone:
1471 1467 return
1472 1468 pullop.stepsdone.add('phases')
1473 1469 publishing = bool(remotephases.get('publishing', False))
1474 1470 if remotephases and not publishing:
1475 1471 # remote is new and non-publishing
1476 1472 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1477 1473 pullop.pulledsubset,
1478 1474 remotephases)
1479 1475 dheads = pullop.pulledsubset
1480 1476 else:
1481 1477 # Remote is old or publishing all common changesets
1482 1478 # should be seen as public
1483 1479 pheads = pullop.pulledsubset
1484 1480 dheads = []
1485 1481 unfi = pullop.repo.unfiltered()
1486 1482 phase = unfi._phasecache.phase
1487 1483 rev = unfi.changelog.nodemap.get
1488 1484 public = phases.public
1489 1485 draft = phases.draft
1490 1486
1491 1487 # exclude changesets already public locally and update the others
1492 1488 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1493 1489 if pheads:
1494 1490 tr = pullop.gettransaction()
1495 1491 phases.advanceboundary(pullop.repo, tr, public, pheads)
1496 1492
1497 1493 # exclude changesets already draft locally and update the others
1498 1494 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1499 1495 if dheads:
1500 1496 tr = pullop.gettransaction()
1501 1497 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1502 1498
1503 1499 def _pullbookmarks(pullop):
1504 1500 """process the remote bookmark information to update the local one"""
1505 1501 if 'bookmarks' in pullop.stepsdone:
1506 1502 return
1507 1503 pullop.stepsdone.add('bookmarks')
1508 1504 repo = pullop.repo
1509 1505 remotebookmarks = pullop.remotebookmarks
1510 1506 remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
1511 1507 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1512 1508 pullop.remote.url(),
1513 1509 pullop.gettransaction,
1514 1510 explicit=pullop.explicitbookmarks)
1515 1511
1516 1512 def _pullobsolete(pullop):
1517 1513 """utility function to pull obsolete markers from a remote
1518 1514
1519 1515 The `gettransaction` is function that return the pull transaction, creating
1520 1516 one if necessary. We return the transaction to inform the calling code that
1521 1517 a new transaction have been created (when applicable).
1522 1518
1523 1519 Exists mostly to allow overriding for experimentation purpose"""
1524 1520 if 'obsmarkers' in pullop.stepsdone:
1525 1521 return
1526 1522 pullop.stepsdone.add('obsmarkers')
1527 1523 tr = None
1528 1524 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1529 1525 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1530 1526 remoteobs = pullop.remote.listkeys('obsolete')
1531 1527 if 'dump0' in remoteobs:
1532 1528 tr = pullop.gettransaction()
1533 1529 markers = []
1534 1530 for key in sorted(remoteobs, reverse=True):
1535 1531 if key.startswith('dump'):
1536 1532 data = util.b85decode(remoteobs[key])
1537 1533 version, newmarks = obsolete._readmarkers(data)
1538 1534 markers += newmarks
1539 1535 if markers:
1540 1536 pullop.repo.obsstore.add(tr, markers)
1541 1537 pullop.repo.invalidatevolatilesets()
1542 1538 return tr
1543 1539
1544 1540 def caps20to10(repo):
1545 1541 """return a set with appropriate options to use bundle20 during getbundle"""
1546 1542 caps = {'HG20'}
1547 1543 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1548 1544 caps.add('bundle2=' + urlreq.quote(capsblob))
1549 1545 return caps
1550 1546
1551 1547 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1552 1548 getbundle2partsorder = []
1553 1549
1554 1550 # Mapping between step name and function
1555 1551 #
1556 1552 # This exists to help extensions wrap steps if necessary
1557 1553 getbundle2partsmapping = {}
1558 1554
1559 1555 def getbundle2partsgenerator(stepname, idx=None):
1560 1556 """decorator for function generating bundle2 part for getbundle
1561 1557
1562 1558 The function is added to the step -> function mapping and appended to the
1563 1559 list of steps. Beware that decorated functions will be added in order
1564 1560 (this may matter).
1565 1561
1566 1562 You can only use this decorator for new steps, if you want to wrap a step
1567 1563 from an extension, attack the getbundle2partsmapping dictionary directly."""
1568 1564 def dec(func):
1569 1565 assert stepname not in getbundle2partsmapping
1570 1566 getbundle2partsmapping[stepname] = func
1571 1567 if idx is None:
1572 1568 getbundle2partsorder.append(stepname)
1573 1569 else:
1574 1570 getbundle2partsorder.insert(idx, stepname)
1575 1571 return func
1576 1572 return dec
1577 1573
1578 1574 def bundle2requested(bundlecaps):
1579 1575 if bundlecaps is not None:
1580 1576 return any(cap.startswith('HG2') for cap in bundlecaps)
1581 1577 return False
1582 1578
1583 1579 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1584 1580 **kwargs):
1585 1581 """Return chunks constituting a bundle's raw data.
1586 1582
1587 1583 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1588 1584 passed.
1589 1585
1590 1586 Returns an iterator over raw chunks (of varying sizes).
1591 1587 """
1592 1588 kwargs = pycompat.byteskwargs(kwargs)
1593 1589 usebundle2 = bundle2requested(bundlecaps)
1594 1590 # bundle10 case
1595 1591 if not usebundle2:
1596 1592 if bundlecaps and not kwargs.get('cg', True):
1597 1593 raise ValueError(_('request for bundle10 must include changegroup'))
1598 1594
1599 1595 if kwargs:
1600 1596 raise ValueError(_('unsupported getbundle arguments: %s')
1601 1597 % ', '.join(sorted(kwargs.keys())))
1602 1598 outgoing = _computeoutgoing(repo, heads, common)
1603 1599 bundler = changegroup.getbundler('01', repo, bundlecaps)
1604 1600 return changegroup.getsubsetraw(repo, outgoing, bundler, source)
1605 1601
1606 1602 # bundle20 case
1607 1603 b2caps = {}
1608 1604 for bcaps in bundlecaps:
1609 1605 if bcaps.startswith('bundle2='):
1610 1606 blob = urlreq.unquote(bcaps[len('bundle2='):])
1611 1607 b2caps.update(bundle2.decodecaps(blob))
1612 1608 bundler = bundle2.bundle20(repo.ui, b2caps)
1613 1609
1614 1610 kwargs['heads'] = heads
1615 1611 kwargs['common'] = common
1616 1612
1617 1613 for name in getbundle2partsorder:
1618 1614 func = getbundle2partsmapping[name]
1619 1615 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1620 1616 **pycompat.strkwargs(kwargs))
1621 1617
1622 1618 return bundler.getchunks()
1623 1619
1624 1620 @getbundle2partsgenerator('changegroup')
1625 1621 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1626 1622 b2caps=None, heads=None, common=None, **kwargs):
1627 1623 """add a changegroup part to the requested bundle"""
1628 1624 cg = None
1629 1625 if kwargs.get('cg', True):
1630 1626 # build changegroup bundle here.
1631 1627 version = '01'
1632 1628 cgversions = b2caps.get('changegroup')
1633 1629 if cgversions: # 3.1 and 3.2 ship with an empty value
1634 1630 cgversions = [v for v in cgversions
1635 1631 if v in changegroup.supportedoutgoingversions(repo)]
1636 1632 if not cgversions:
1637 1633 raise ValueError(_('no common changegroup version'))
1638 1634 version = max(cgversions)
1639 1635 outgoing = _computeoutgoing(repo, heads, common)
1640 1636 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1641 1637 bundlecaps=bundlecaps,
1642 1638 version=version)
1643 1639
1644 1640 if cg:
1645 1641 part = bundler.newpart('changegroup', data=cg)
1646 1642 if cgversions:
1647 1643 part.addparam('version', version)
1648 1644 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1649 1645 if 'treemanifest' in repo.requirements:
1650 1646 part.addparam('treemanifest', '1')
1651 1647
1652 1648 @getbundle2partsgenerator('listkeys')
1653 1649 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1654 1650 b2caps=None, **kwargs):
1655 1651 """add parts containing listkeys namespaces to the requested bundle"""
1656 1652 listkeys = kwargs.get('listkeys', ())
1657 1653 for namespace in listkeys:
1658 1654 part = bundler.newpart('listkeys')
1659 1655 part.addparam('namespace', namespace)
1660 1656 keys = repo.listkeys(namespace).items()
1661 1657 part.data = pushkey.encodekeys(keys)
1662 1658
1663 1659 @getbundle2partsgenerator('obsmarkers')
1664 1660 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1665 1661 b2caps=None, heads=None, **kwargs):
1666 1662 """add an obsolescence markers part to the requested bundle"""
1667 1663 if kwargs.get('obsmarkers', False):
1668 1664 if heads is None:
1669 1665 heads = repo.heads()
1670 1666 subset = [c.node() for c in repo.set('::%ln', heads)]
1671 1667 markers = repo.obsstore.relevantmarkers(subset)
1672 1668 markers = sorted(markers)
1673 1669 bundle2.buildobsmarkerspart(bundler, markers)
1674 1670
1675 1671 @getbundle2partsgenerator('hgtagsfnodes')
1676 1672 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1677 1673 b2caps=None, heads=None, common=None,
1678 1674 **kwargs):
1679 1675 """Transfer the .hgtags filenodes mapping.
1680 1676
1681 1677 Only values for heads in this bundle will be transferred.
1682 1678
1683 1679 The part data consists of pairs of 20 byte changeset node and .hgtags
1684 1680 filenodes raw values.
1685 1681 """
1686 1682 # Don't send unless:
1687 1683 # - changeset are being exchanged,
1688 1684 # - the client supports it.
1689 1685 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1690 1686 return
1691 1687
1692 1688 outgoing = _computeoutgoing(repo, heads, common)
1693 1689 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1694 1690
1695 1691 def _getbookmarks(repo, **kwargs):
1696 1692 """Returns bookmark to node mapping.
1697 1693
1698 1694 This function is primarily used to generate `bookmarks` bundle2 part.
1699 1695 It is a separate function in order to make it easy to wrap it
1700 1696 in extensions. Passing `kwargs` to the function makes it easy to
1701 1697 add new parameters in extensions.
1702 1698 """
1703 1699
1704 1700 return dict(bookmod.listbinbookmarks(repo))
1705 1701
1706 1702 def check_heads(repo, their_heads, context):
1707 1703 """check if the heads of a repo have been modified
1708 1704
1709 1705 Used by peer for unbundling.
1710 1706 """
1711 1707 heads = repo.heads()
1712 1708 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1713 1709 if not (their_heads == ['force'] or their_heads == heads or
1714 1710 their_heads == ['hashed', heads_hash]):
1715 1711 # someone else committed/pushed/unbundled while we
1716 1712 # were transferring data
1717 1713 raise error.PushRaced('repository changed while %s - '
1718 1714 'please try again' % context)
1719 1715
1720 1716 def unbundle(repo, cg, heads, source, url):
1721 1717 """Apply a bundle to a repo.
1722 1718
1723 1719 this function makes sure the repo is locked during the application and have
1724 1720 mechanism to check that no push race occurred between the creation of the
1725 1721 bundle and its application.
1726 1722
1727 1723 If the push was raced as PushRaced exception is raised."""
1728 1724 r = 0
1729 1725 # need a transaction when processing a bundle2 stream
1730 1726 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1731 1727 lockandtr = [None, None, None]
1732 1728 recordout = None
1733 1729 # quick fix for output mismatch with bundle2 in 3.4
1734 1730 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1735 1731 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1736 1732 captureoutput = True
1737 1733 try:
1738 1734 # note: outside bundle1, 'heads' is expected to be empty and this
1739 1735 # 'check_heads' call wil be a no-op
1740 1736 check_heads(repo, heads, 'uploading changes')
1741 1737 # push can proceed
1742 1738 if not isinstance(cg, bundle2.unbundle20):
1743 1739 # legacy case: bundle1 (changegroup 01)
1744 1740 txnname = "\n".join([source, util.hidepassword(url)])
1745 1741 with repo.lock(), repo.transaction(txnname) as tr:
1746 1742 op = bundle2.applybundle(repo, cg, tr, source, url)
1747 1743 r = bundle2.combinechangegroupresults(op)
1748 1744 else:
1749 1745 r = None
1750 1746 try:
1751 1747 def gettransaction():
1752 1748 if not lockandtr[2]:
1753 1749 lockandtr[0] = repo.wlock()
1754 1750 lockandtr[1] = repo.lock()
1755 1751 lockandtr[2] = repo.transaction(source)
1756 1752 lockandtr[2].hookargs['source'] = source
1757 1753 lockandtr[2].hookargs['url'] = url
1758 1754 lockandtr[2].hookargs['bundle2'] = '1'
1759 1755 return lockandtr[2]
1760 1756
1761 1757 # Do greedy locking by default until we're satisfied with lazy
1762 1758 # locking.
1763 1759 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1764 1760 gettransaction()
1765 1761
1766 1762 op = bundle2.bundleoperation(repo, gettransaction,
1767 1763 captureoutput=captureoutput)
1768 1764 try:
1769 1765 op = bundle2.processbundle(repo, cg, op=op)
1770 1766 finally:
1771 1767 r = op.reply
1772 1768 if captureoutput and r is not None:
1773 1769 repo.ui.pushbuffer(error=True, subproc=True)
1774 1770 def recordout(output):
1775 1771 r.newpart('output', data=output, mandatory=False)
1776 1772 if lockandtr[2] is not None:
1777 1773 lockandtr[2].close()
1778 1774 except BaseException as exc:
1779 1775 exc.duringunbundle2 = True
1780 1776 if captureoutput and r is not None:
1781 1777 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1782 1778 def recordout(output):
1783 1779 part = bundle2.bundlepart('output', data=output,
1784 1780 mandatory=False)
1785 1781 parts.append(part)
1786 1782 raise
1787 1783 finally:
1788 1784 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1789 1785 if recordout is not None:
1790 1786 recordout(repo.ui.popbuffer())
1791 1787 return r
1792 1788
1793 1789 def _maybeapplyclonebundle(pullop):
1794 1790 """Apply a clone bundle from a remote, if possible."""
1795 1791
1796 1792 repo = pullop.repo
1797 1793 remote = pullop.remote
1798 1794
1799 1795 if not repo.ui.configbool('ui', 'clonebundles'):
1800 1796 return
1801 1797
1802 1798 # Only run if local repo is empty.
1803 1799 if len(repo):
1804 1800 return
1805 1801
1806 1802 if pullop.heads:
1807 1803 return
1808 1804
1809 1805 if not remote.capable('clonebundles'):
1810 1806 return
1811 1807
1812 1808 res = remote._call('clonebundles')
1813 1809
1814 1810 # If we call the wire protocol command, that's good enough to record the
1815 1811 # attempt.
1816 1812 pullop.clonebundleattempted = True
1817 1813
1818 1814 entries = parseclonebundlesmanifest(repo, res)
1819 1815 if not entries:
1820 1816 repo.ui.note(_('no clone bundles available on remote; '
1821 1817 'falling back to regular clone\n'))
1822 1818 return
1823 1819
1824 1820 entries = filterclonebundleentries(repo, entries)
1825 1821 if not entries:
1826 1822 # There is a thundering herd concern here. However, if a server
1827 1823 # operator doesn't advertise bundles appropriate for its clients,
1828 1824 # they deserve what's coming. Furthermore, from a client's
1829 1825 # perspective, no automatic fallback would mean not being able to
1830 1826 # clone!
1831 1827 repo.ui.warn(_('no compatible clone bundles available on server; '
1832 1828 'falling back to regular clone\n'))
1833 1829 repo.ui.warn(_('(you may want to report this to the server '
1834 1830 'operator)\n'))
1835 1831 return
1836 1832
1837 1833 entries = sortclonebundleentries(repo.ui, entries)
1838 1834
1839 1835 url = entries[0]['URL']
1840 1836 repo.ui.status(_('applying clone bundle from %s\n') % url)
1841 1837 if trypullbundlefromurl(repo.ui, repo, url):
1842 1838 repo.ui.status(_('finished applying clone bundle\n'))
1843 1839 # Bundle failed.
1844 1840 #
1845 1841 # We abort by default to avoid the thundering herd of
1846 1842 # clients flooding a server that was expecting expensive
1847 1843 # clone load to be offloaded.
1848 1844 elif repo.ui.configbool('ui', 'clonebundlefallback'):
1849 1845 repo.ui.warn(_('falling back to normal clone\n'))
1850 1846 else:
1851 1847 raise error.Abort(_('error applying bundle'),
1852 1848 hint=_('if this error persists, consider contacting '
1853 1849 'the server operator or disable clone '
1854 1850 'bundles via '
1855 1851 '"--config ui.clonebundles=false"'))
1856 1852
1857 1853 def parseclonebundlesmanifest(repo, s):
1858 1854 """Parses the raw text of a clone bundles manifest.
1859 1855
1860 1856 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1861 1857 to the URL and other keys are the attributes for the entry.
1862 1858 """
1863 1859 m = []
1864 1860 for line in s.splitlines():
1865 1861 fields = line.split()
1866 1862 if not fields:
1867 1863 continue
1868 1864 attrs = {'URL': fields[0]}
1869 1865 for rawattr in fields[1:]:
1870 1866 key, value = rawattr.split('=', 1)
1871 1867 key = urlreq.unquote(key)
1872 1868 value = urlreq.unquote(value)
1873 1869 attrs[key] = value
1874 1870
1875 1871 # Parse BUNDLESPEC into components. This makes client-side
1876 1872 # preferences easier to specify since you can prefer a single
1877 1873 # component of the BUNDLESPEC.
1878 1874 if key == 'BUNDLESPEC':
1879 1875 try:
1880 1876 comp, version, params = parsebundlespec(repo, value,
1881 1877 externalnames=True)
1882 1878 attrs['COMPRESSION'] = comp
1883 1879 attrs['VERSION'] = version
1884 1880 except error.InvalidBundleSpecification:
1885 1881 pass
1886 1882 except error.UnsupportedBundleSpecification:
1887 1883 pass
1888 1884
1889 1885 m.append(attrs)
1890 1886
1891 1887 return m
1892 1888
1893 1889 def filterclonebundleentries(repo, entries):
1894 1890 """Remove incompatible clone bundle manifest entries.
1895 1891
1896 1892 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1897 1893 and returns a new list consisting of only the entries that this client
1898 1894 should be able to apply.
1899 1895
1900 1896 There is no guarantee we'll be able to apply all returned entries because
1901 1897 the metadata we use to filter on may be missing or wrong.
1902 1898 """
1903 1899 newentries = []
1904 1900 for entry in entries:
1905 1901 spec = entry.get('BUNDLESPEC')
1906 1902 if spec:
1907 1903 try:
1908 1904 parsebundlespec(repo, spec, strict=True)
1909 1905 except error.InvalidBundleSpecification as e:
1910 1906 repo.ui.debug(str(e) + '\n')
1911 1907 continue
1912 1908 except error.UnsupportedBundleSpecification as e:
1913 1909 repo.ui.debug('filtering %s because unsupported bundle '
1914 1910 'spec: %s\n' % (entry['URL'], str(e)))
1915 1911 continue
1916 1912
1917 1913 if 'REQUIRESNI' in entry and not sslutil.hassni:
1918 1914 repo.ui.debug('filtering %s because SNI not supported\n' %
1919 1915 entry['URL'])
1920 1916 continue
1921 1917
1922 1918 newentries.append(entry)
1923 1919
1924 1920 return newentries
1925 1921
1926 1922 class clonebundleentry(object):
1927 1923 """Represents an item in a clone bundles manifest.
1928 1924
1929 1925 This rich class is needed to support sorting since sorted() in Python 3
1930 1926 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
1931 1927 won't work.
1932 1928 """
1933 1929
1934 1930 def __init__(self, value, prefers):
1935 1931 self.value = value
1936 1932 self.prefers = prefers
1937 1933
1938 1934 def _cmp(self, other):
1939 1935 for prefkey, prefvalue in self.prefers:
1940 1936 avalue = self.value.get(prefkey)
1941 1937 bvalue = other.value.get(prefkey)
1942 1938
1943 1939 # Special case for b missing attribute and a matches exactly.
1944 1940 if avalue is not None and bvalue is None and avalue == prefvalue:
1945 1941 return -1
1946 1942
1947 1943 # Special case for a missing attribute and b matches exactly.
1948 1944 if bvalue is not None and avalue is None and bvalue == prefvalue:
1949 1945 return 1
1950 1946
1951 1947 # We can't compare unless attribute present on both.
1952 1948 if avalue is None or bvalue is None:
1953 1949 continue
1954 1950
1955 1951 # Same values should fall back to next attribute.
1956 1952 if avalue == bvalue:
1957 1953 continue
1958 1954
1959 1955 # Exact matches come first.
1960 1956 if avalue == prefvalue:
1961 1957 return -1
1962 1958 if bvalue == prefvalue:
1963 1959 return 1
1964 1960
1965 1961 # Fall back to next attribute.
1966 1962 continue
1967 1963
1968 1964 # If we got here we couldn't sort by attributes and prefers. Fall
1969 1965 # back to index order.
1970 1966 return 0
1971 1967
1972 1968 def __lt__(self, other):
1973 1969 return self._cmp(other) < 0
1974 1970
1975 1971 def __gt__(self, other):
1976 1972 return self._cmp(other) > 0
1977 1973
1978 1974 def __eq__(self, other):
1979 1975 return self._cmp(other) == 0
1980 1976
1981 1977 def __le__(self, other):
1982 1978 return self._cmp(other) <= 0
1983 1979
1984 1980 def __ge__(self, other):
1985 1981 return self._cmp(other) >= 0
1986 1982
1987 1983 def __ne__(self, other):
1988 1984 return self._cmp(other) != 0
1989 1985
1990 1986 def sortclonebundleentries(ui, entries):
1991 1987 prefers = ui.configlist('ui', 'clonebundleprefers')
1992 1988 if not prefers:
1993 1989 return list(entries)
1994 1990
1995 1991 prefers = [p.split('=', 1) for p in prefers]
1996 1992
1997 1993 items = sorted(clonebundleentry(v, prefers) for v in entries)
1998 1994 return [i.value for i in items]
1999 1995
2000 1996 def trypullbundlefromurl(ui, repo, url):
2001 1997 """Attempt to apply a bundle from a URL."""
2002 1998 with repo.lock(), repo.transaction('bundleurl') as tr:
2003 1999 try:
2004 2000 fh = urlmod.open(ui, url)
2005 2001 cg = readbundle(ui, fh, 'stream')
2006 2002
2007 2003 if isinstance(cg, streamclone.streamcloneapplier):
2008 2004 cg.apply(repo)
2009 2005 else:
2010 2006 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2011 2007 return True
2012 2008 except urlerr.httperror as e:
2013 2009 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2014 2010 except urlerr.urlerror as e:
2015 2011 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2016 2012
2017 2013 return False
General Comments 0
You need to be logged in to leave comments. Login now