##// END OF EJS Templates
exchange: set 'treemanifest' param on pushed changegroups too...
Martin von Zweigbergk -
r27938:cabac7df stable
parent child Browse files
Show More
@@ -1,1933 +1,1935 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import urllib
12 12 import urllib2
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 nullid,
18 18 )
19 19 from . import (
20 20 base85,
21 21 bookmarks as bookmod,
22 22 bundle2,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 lock as lockmod,
27 27 obsolete,
28 28 phases,
29 29 pushkey,
30 30 scmutil,
31 31 sslutil,
32 32 streamclone,
33 33 tags,
34 34 url as urlmod,
35 35 util,
36 36 )
37 37
38 38 # Maps bundle compression human names to internal representation.
39 39 _bundlespeccompressions = {'none': None,
40 40 'bzip2': 'BZ',
41 41 'gzip': 'GZ',
42 42 }
43 43
44 44 # Maps bundle version human names to changegroup versions.
45 45 _bundlespeccgversions = {'v1': '01',
46 46 'v2': '02',
47 47 'packed1': 's1',
48 48 'bundle2': '02', #legacy
49 49 }
50 50
51 51 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 52 """Parse a bundle string specification into parts.
53 53
54 54 Bundle specifications denote a well-defined bundle/exchange format.
55 55 The content of a given specification should not change over time in
56 56 order to ensure that bundles produced by a newer version of Mercurial are
57 57 readable from an older version.
58 58
59 59 The string currently has the form:
60 60
61 61 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 62
63 63 Where <compression> is one of the supported compression formats
64 64 and <type> is (currently) a version string. A ";" can follow the type and
65 65 all text afterwards is interpretted as URI encoded, ";" delimited key=value
66 66 pairs.
67 67
68 68 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 69 it is optional.
70 70
71 71 If ``externalnames`` is False (the default), the human-centric names will
72 72 be converted to their internal representation.
73 73
74 74 Returns a 3-tuple of (compression, version, parameters). Compression will
75 75 be ``None`` if not in strict mode and a compression isn't defined.
76 76
77 77 An ``InvalidBundleSpecification`` is raised when the specification is
78 78 not syntactically well formed.
79 79
80 80 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 81 bundle type/version is not recognized.
82 82
83 83 Note: this function will likely eventually return a more complex data
84 84 structure, including bundle2 part information.
85 85 """
86 86 def parseparams(s):
87 87 if ';' not in s:
88 88 return s, {}
89 89
90 90 params = {}
91 91 version, paramstr = s.split(';', 1)
92 92
93 93 for p in paramstr.split(';'):
94 94 if '=' not in p:
95 95 raise error.InvalidBundleSpecification(
96 96 _('invalid bundle specification: '
97 97 'missing "=" in parameter: %s') % p)
98 98
99 99 key, value = p.split('=', 1)
100 100 key = urllib.unquote(key)
101 101 value = urllib.unquote(value)
102 102 params[key] = value
103 103
104 104 return version, params
105 105
106 106
107 107 if strict and '-' not in spec:
108 108 raise error.InvalidBundleSpecification(
109 109 _('invalid bundle specification; '
110 110 'must be prefixed with compression: %s') % spec)
111 111
112 112 if '-' in spec:
113 113 compression, version = spec.split('-', 1)
114 114
115 115 if compression not in _bundlespeccompressions:
116 116 raise error.UnsupportedBundleSpecification(
117 117 _('%s compression is not supported') % compression)
118 118
119 119 version, params = parseparams(version)
120 120
121 121 if version not in _bundlespeccgversions:
122 122 raise error.UnsupportedBundleSpecification(
123 123 _('%s is not a recognized bundle version') % version)
124 124 else:
125 125 # Value could be just the compression or just the version, in which
126 126 # case some defaults are assumed (but only when not in strict mode).
127 127 assert not strict
128 128
129 129 spec, params = parseparams(spec)
130 130
131 131 if spec in _bundlespeccompressions:
132 132 compression = spec
133 133 version = 'v1'
134 134 if 'generaldelta' in repo.requirements:
135 135 version = 'v2'
136 136 elif spec in _bundlespeccgversions:
137 137 if spec == 'packed1':
138 138 compression = 'none'
139 139 else:
140 140 compression = 'bzip2'
141 141 version = spec
142 142 else:
143 143 raise error.UnsupportedBundleSpecification(
144 144 _('%s is not a recognized bundle specification') % spec)
145 145
146 146 # The specification for packed1 can optionally declare the data formats
147 147 # required to apply it. If we see this metadata, compare against what the
148 148 # repo supports and error if the bundle isn't compatible.
149 149 if version == 'packed1' and 'requirements' in params:
150 150 requirements = set(params['requirements'].split(','))
151 151 missingreqs = requirements - repo.supportedformats
152 152 if missingreqs:
153 153 raise error.UnsupportedBundleSpecification(
154 154 _('missing support for repository features: %s') %
155 155 ', '.join(sorted(missingreqs)))
156 156
157 157 if not externalnames:
158 158 compression = _bundlespeccompressions[compression]
159 159 version = _bundlespeccgversions[version]
160 160 return compression, version, params
161 161
162 162 def readbundle(ui, fh, fname, vfs=None):
163 163 header = changegroup.readexactly(fh, 4)
164 164
165 165 alg = None
166 166 if not fname:
167 167 fname = "stream"
168 168 if not header.startswith('HG') and header.startswith('\0'):
169 169 fh = changegroup.headerlessfixup(fh, header)
170 170 header = "HG10"
171 171 alg = 'UN'
172 172 elif vfs:
173 173 fname = vfs.join(fname)
174 174
175 175 magic, version = header[0:2], header[2:4]
176 176
177 177 if magic != 'HG':
178 178 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
179 179 if version == '10':
180 180 if alg is None:
181 181 alg = changegroup.readexactly(fh, 2)
182 182 return changegroup.cg1unpacker(fh, alg)
183 183 elif version.startswith('2'):
184 184 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
185 185 elif version == 'S1':
186 186 return streamclone.streamcloneapplier(fh)
187 187 else:
188 188 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
189 189
190 190 def getbundlespec(ui, fh):
191 191 """Infer the bundlespec from a bundle file handle.
192 192
193 193 The input file handle is seeked and the original seek position is not
194 194 restored.
195 195 """
196 196 def speccompression(alg):
197 197 for k, v in _bundlespeccompressions.items():
198 198 if v == alg:
199 199 return k
200 200 return None
201 201
202 202 b = readbundle(ui, fh, None)
203 203 if isinstance(b, changegroup.cg1unpacker):
204 204 alg = b._type
205 205 if alg == '_truncatedBZ':
206 206 alg = 'BZ'
207 207 comp = speccompression(alg)
208 208 if not comp:
209 209 raise error.Abort(_('unknown compression algorithm: %s') % alg)
210 210 return '%s-v1' % comp
211 211 elif isinstance(b, bundle2.unbundle20):
212 212 if 'Compression' in b.params:
213 213 comp = speccompression(b.params['Compression'])
214 214 if not comp:
215 215 raise error.Abort(_('unknown compression algorithm: %s') % comp)
216 216 else:
217 217 comp = 'none'
218 218
219 219 version = None
220 220 for part in b.iterparts():
221 221 if part.type == 'changegroup':
222 222 version = part.params['version']
223 223 if version in ('01', '02'):
224 224 version = 'v2'
225 225 else:
226 226 raise error.Abort(_('changegroup version %s does not have '
227 227 'a known bundlespec') % version,
228 228 hint=_('try upgrading your Mercurial '
229 229 'client'))
230 230
231 231 if not version:
232 232 raise error.Abort(_('could not identify changegroup version in '
233 233 'bundle'))
234 234
235 235 return '%s-%s' % (comp, version)
236 236 elif isinstance(b, streamclone.streamcloneapplier):
237 237 requirements = streamclone.readbundle1header(fh)[2]
238 238 params = 'requirements=%s' % ','.join(sorted(requirements))
239 239 return 'none-packed1;%s' % urllib.quote(params)
240 240 else:
241 241 raise error.Abort(_('unknown bundle type: %s') % b)
242 242
243 243 def buildobsmarkerspart(bundler, markers):
244 244 """add an obsmarker part to the bundler with <markers>
245 245
246 246 No part is created if markers is empty.
247 247 Raises ValueError if the bundler doesn't support any known obsmarker format.
248 248 """
249 249 if markers:
250 250 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
251 251 version = obsolete.commonversion(remoteversions)
252 252 if version is None:
253 253 raise ValueError('bundler does not support common obsmarker format')
254 254 stream = obsolete.encodemarkers(markers, True, version=version)
255 255 return bundler.newpart('obsmarkers', data=stream)
256 256 return None
257 257
258 258 def _canusebundle2(op):
259 259 """return true if a pull/push can use bundle2
260 260
261 261 Feel free to nuke this function when we drop the experimental option"""
262 262 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
263 263 and op.remote.capable('bundle2'))
264 264
265 265
266 266 class pushoperation(object):
267 267 """A object that represent a single push operation
268 268
269 269 It purpose is to carry push related state and very common operation.
270 270
271 271 A new should be created at the beginning of each push and discarded
272 272 afterward.
273 273 """
274 274
275 275 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
276 276 bookmarks=()):
277 277 # repo we push from
278 278 self.repo = repo
279 279 self.ui = repo.ui
280 280 # repo we push to
281 281 self.remote = remote
282 282 # force option provided
283 283 self.force = force
284 284 # revs to be pushed (None is "all")
285 285 self.revs = revs
286 286 # bookmark explicitly pushed
287 287 self.bookmarks = bookmarks
288 288 # allow push of new branch
289 289 self.newbranch = newbranch
290 290 # did a local lock get acquired?
291 291 self.locallocked = None
292 292 # step already performed
293 293 # (used to check what steps have been already performed through bundle2)
294 294 self.stepsdone = set()
295 295 # Integer version of the changegroup push result
296 296 # - None means nothing to push
297 297 # - 0 means HTTP error
298 298 # - 1 means we pushed and remote head count is unchanged *or*
299 299 # we have outgoing changesets but refused to push
300 300 # - other values as described by addchangegroup()
301 301 self.cgresult = None
302 302 # Boolean value for the bookmark push
303 303 self.bkresult = None
304 304 # discover.outgoing object (contains common and outgoing data)
305 305 self.outgoing = None
306 306 # all remote heads before the push
307 307 self.remoteheads = None
308 308 # testable as a boolean indicating if any nodes are missing locally.
309 309 self.incoming = None
310 310 # phases changes that must be pushed along side the changesets
311 311 self.outdatedphases = None
312 312 # phases changes that must be pushed if changeset push fails
313 313 self.fallbackoutdatedphases = None
314 314 # outgoing obsmarkers
315 315 self.outobsmarkers = set()
316 316 # outgoing bookmarks
317 317 self.outbookmarks = []
318 318 # transaction manager
319 319 self.trmanager = None
320 320 # map { pushkey partid -> callback handling failure}
321 321 # used to handle exception from mandatory pushkey part failure
322 322 self.pkfailcb = {}
323 323
324 324 @util.propertycache
325 325 def futureheads(self):
326 326 """future remote heads if the changeset push succeeds"""
327 327 return self.outgoing.missingheads
328 328
329 329 @util.propertycache
330 330 def fallbackheads(self):
331 331 """future remote heads if the changeset push fails"""
332 332 if self.revs is None:
333 333 # not target to push, all common are relevant
334 334 return self.outgoing.commonheads
335 335 unfi = self.repo.unfiltered()
336 336 # I want cheads = heads(::missingheads and ::commonheads)
337 337 # (missingheads is revs with secret changeset filtered out)
338 338 #
339 339 # This can be expressed as:
340 340 # cheads = ( (missingheads and ::commonheads)
341 341 # + (commonheads and ::missingheads))"
342 342 # )
343 343 #
344 344 # while trying to push we already computed the following:
345 345 # common = (::commonheads)
346 346 # missing = ((commonheads::missingheads) - commonheads)
347 347 #
348 348 # We can pick:
349 349 # * missingheads part of common (::commonheads)
350 350 common = self.outgoing.common
351 351 nm = self.repo.changelog.nodemap
352 352 cheads = [node for node in self.revs if nm[node] in common]
353 353 # and
354 354 # * commonheads parents on missing
355 355 revset = unfi.set('%ln and parents(roots(%ln))',
356 356 self.outgoing.commonheads,
357 357 self.outgoing.missing)
358 358 cheads.extend(c.node() for c in revset)
359 359 return cheads
360 360
361 361 @property
362 362 def commonheads(self):
363 363 """set of all common heads after changeset bundle push"""
364 364 if self.cgresult:
365 365 return self.futureheads
366 366 else:
367 367 return self.fallbackheads
368 368
369 369 # mapping of message used when pushing bookmark
370 370 bookmsgmap = {'update': (_("updating bookmark %s\n"),
371 371 _('updating bookmark %s failed!\n')),
372 372 'export': (_("exporting bookmark %s\n"),
373 373 _('exporting bookmark %s failed!\n')),
374 374 'delete': (_("deleting remote bookmark %s\n"),
375 375 _('deleting remote bookmark %s failed!\n')),
376 376 }
377 377
378 378
379 379 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
380 380 opargs=None):
381 381 '''Push outgoing changesets (limited by revs) from a local
382 382 repository to remote. Return an integer:
383 383 - None means nothing to push
384 384 - 0 means HTTP error
385 385 - 1 means we pushed and remote head count is unchanged *or*
386 386 we have outgoing changesets but refused to push
387 387 - other values as described by addchangegroup()
388 388 '''
389 389 if opargs is None:
390 390 opargs = {}
391 391 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
392 392 **opargs)
393 393 if pushop.remote.local():
394 394 missing = (set(pushop.repo.requirements)
395 395 - pushop.remote.local().supported)
396 396 if missing:
397 397 msg = _("required features are not"
398 398 " supported in the destination:"
399 399 " %s") % (', '.join(sorted(missing)))
400 400 raise error.Abort(msg)
401 401
402 402 # there are two ways to push to remote repo:
403 403 #
404 404 # addchangegroup assumes local user can lock remote
405 405 # repo (local filesystem, old ssh servers).
406 406 #
407 407 # unbundle assumes local user cannot lock remote repo (new ssh
408 408 # servers, http servers).
409 409
410 410 if not pushop.remote.canpush():
411 411 raise error.Abort(_("destination does not support push"))
412 412 # get local lock as we might write phase data
413 413 localwlock = locallock = None
414 414 try:
415 415 # bundle2 push may receive a reply bundle touching bookmarks or other
416 416 # things requiring the wlock. Take it now to ensure proper ordering.
417 417 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
418 418 if _canusebundle2(pushop) and maypushback:
419 419 localwlock = pushop.repo.wlock()
420 420 locallock = pushop.repo.lock()
421 421 pushop.locallocked = True
422 422 except IOError as err:
423 423 pushop.locallocked = False
424 424 if err.errno != errno.EACCES:
425 425 raise
426 426 # source repo cannot be locked.
427 427 # We do not abort the push, but just disable the local phase
428 428 # synchronisation.
429 429 msg = 'cannot lock source repository: %s\n' % err
430 430 pushop.ui.debug(msg)
431 431 try:
432 432 if pushop.locallocked:
433 433 pushop.trmanager = transactionmanager(pushop.repo,
434 434 'push-response',
435 435 pushop.remote.url())
436 436 pushop.repo.checkpush(pushop)
437 437 lock = None
438 438 unbundle = pushop.remote.capable('unbundle')
439 439 if not unbundle:
440 440 lock = pushop.remote.lock()
441 441 try:
442 442 _pushdiscovery(pushop)
443 443 if _canusebundle2(pushop):
444 444 _pushbundle2(pushop)
445 445 _pushchangeset(pushop)
446 446 _pushsyncphase(pushop)
447 447 _pushobsolete(pushop)
448 448 _pushbookmark(pushop)
449 449 finally:
450 450 if lock is not None:
451 451 lock.release()
452 452 if pushop.trmanager:
453 453 pushop.trmanager.close()
454 454 finally:
455 455 if pushop.trmanager:
456 456 pushop.trmanager.release()
457 457 if locallock is not None:
458 458 locallock.release()
459 459 if localwlock is not None:
460 460 localwlock.release()
461 461
462 462 return pushop
463 463
464 464 # list of steps to perform discovery before push
465 465 pushdiscoveryorder = []
466 466
467 467 # Mapping between step name and function
468 468 #
469 469 # This exists to help extensions wrap steps if necessary
470 470 pushdiscoverymapping = {}
471 471
472 472 def pushdiscovery(stepname):
473 473 """decorator for function performing discovery before push
474 474
475 475 The function is added to the step -> function mapping and appended to the
476 476 list of steps. Beware that decorated function will be added in order (this
477 477 may matter).
478 478
479 479 You can only use this decorator for a new step, if you want to wrap a step
480 480 from an extension, change the pushdiscovery dictionary directly."""
481 481 def dec(func):
482 482 assert stepname not in pushdiscoverymapping
483 483 pushdiscoverymapping[stepname] = func
484 484 pushdiscoveryorder.append(stepname)
485 485 return func
486 486 return dec
487 487
488 488 def _pushdiscovery(pushop):
489 489 """Run all discovery steps"""
490 490 for stepname in pushdiscoveryorder:
491 491 step = pushdiscoverymapping[stepname]
492 492 step(pushop)
493 493
494 494 @pushdiscovery('changeset')
495 495 def _pushdiscoverychangeset(pushop):
496 496 """discover the changeset that need to be pushed"""
497 497 fci = discovery.findcommonincoming
498 498 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
499 499 common, inc, remoteheads = commoninc
500 500 fco = discovery.findcommonoutgoing
501 501 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
502 502 commoninc=commoninc, force=pushop.force)
503 503 pushop.outgoing = outgoing
504 504 pushop.remoteheads = remoteheads
505 505 pushop.incoming = inc
506 506
507 507 @pushdiscovery('phase')
508 508 def _pushdiscoveryphase(pushop):
509 509 """discover the phase that needs to be pushed
510 510
511 511 (computed for both success and failure case for changesets push)"""
512 512 outgoing = pushop.outgoing
513 513 unfi = pushop.repo.unfiltered()
514 514 remotephases = pushop.remote.listkeys('phases')
515 515 publishing = remotephases.get('publishing', False)
516 516 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
517 517 and remotephases # server supports phases
518 518 and not pushop.outgoing.missing # no changesets to be pushed
519 519 and publishing):
520 520 # When:
521 521 # - this is a subrepo push
522 522 # - and remote support phase
523 523 # - and no changeset are to be pushed
524 524 # - and remote is publishing
525 525 # We may be in issue 3871 case!
526 526 # We drop the possible phase synchronisation done by
527 527 # courtesy to publish changesets possibly locally draft
528 528 # on the remote.
529 529 remotephases = {'publishing': 'True'}
530 530 ana = phases.analyzeremotephases(pushop.repo,
531 531 pushop.fallbackheads,
532 532 remotephases)
533 533 pheads, droots = ana
534 534 extracond = ''
535 535 if not publishing:
536 536 extracond = ' and public()'
537 537 revset = 'heads((%%ln::%%ln) %s)' % extracond
538 538 # Get the list of all revs draft on remote by public here.
539 539 # XXX Beware that revset break if droots is not strictly
540 540 # XXX root we may want to ensure it is but it is costly
541 541 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
542 542 if not outgoing.missing:
543 543 future = fallback
544 544 else:
545 545 # adds changeset we are going to push as draft
546 546 #
547 547 # should not be necessary for publishing server, but because of an
548 548 # issue fixed in xxxxx we have to do it anyway.
549 549 fdroots = list(unfi.set('roots(%ln + %ln::)',
550 550 outgoing.missing, droots))
551 551 fdroots = [f.node() for f in fdroots]
552 552 future = list(unfi.set(revset, fdroots, pushop.futureheads))
553 553 pushop.outdatedphases = future
554 554 pushop.fallbackoutdatedphases = fallback
555 555
556 556 @pushdiscovery('obsmarker')
557 557 def _pushdiscoveryobsmarkers(pushop):
558 558 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
559 559 and pushop.repo.obsstore
560 560 and 'obsolete' in pushop.remote.listkeys('namespaces')):
561 561 repo = pushop.repo
562 562 # very naive computation, that can be quite expensive on big repo.
563 563 # However: evolution is currently slow on them anyway.
564 564 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
565 565 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
566 566
567 567 @pushdiscovery('bookmarks')
568 568 def _pushdiscoverybookmarks(pushop):
569 569 ui = pushop.ui
570 570 repo = pushop.repo.unfiltered()
571 571 remote = pushop.remote
572 572 ui.debug("checking for updated bookmarks\n")
573 573 ancestors = ()
574 574 if pushop.revs:
575 575 revnums = map(repo.changelog.rev, pushop.revs)
576 576 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
577 577 remotebookmark = remote.listkeys('bookmarks')
578 578
579 579 explicit = set(pushop.bookmarks)
580 580
581 581 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
582 582 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
583 583 for b, scid, dcid in advsrc:
584 584 if b in explicit:
585 585 explicit.remove(b)
586 586 if not ancestors or repo[scid].rev() in ancestors:
587 587 pushop.outbookmarks.append((b, dcid, scid))
588 588 # search added bookmark
589 589 for b, scid, dcid in addsrc:
590 590 if b in explicit:
591 591 explicit.remove(b)
592 592 pushop.outbookmarks.append((b, '', scid))
593 593 # search for overwritten bookmark
594 594 for b, scid, dcid in advdst + diverge + differ:
595 595 if b in explicit:
596 596 explicit.remove(b)
597 597 pushop.outbookmarks.append((b, dcid, scid))
598 598 # search for bookmark to delete
599 599 for b, scid, dcid in adddst:
600 600 if b in explicit:
601 601 explicit.remove(b)
602 602 # treat as "deleted locally"
603 603 pushop.outbookmarks.append((b, dcid, ''))
604 604 # identical bookmarks shouldn't get reported
605 605 for b, scid, dcid in same:
606 606 if b in explicit:
607 607 explicit.remove(b)
608 608
609 609 if explicit:
610 610 explicit = sorted(explicit)
611 611 # we should probably list all of them
612 612 ui.warn(_('bookmark %s does not exist on the local '
613 613 'or remote repository!\n') % explicit[0])
614 614 pushop.bkresult = 2
615 615
616 616 pushop.outbookmarks.sort()
617 617
618 618 def _pushcheckoutgoing(pushop):
619 619 outgoing = pushop.outgoing
620 620 unfi = pushop.repo.unfiltered()
621 621 if not outgoing.missing:
622 622 # nothing to push
623 623 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
624 624 return False
625 625 # something to push
626 626 if not pushop.force:
627 627 # if repo.obsstore == False --> no obsolete
628 628 # then, save the iteration
629 629 if unfi.obsstore:
630 630 # this message are here for 80 char limit reason
631 631 mso = _("push includes obsolete changeset: %s!")
632 632 mst = {"unstable": _("push includes unstable changeset: %s!"),
633 633 "bumped": _("push includes bumped changeset: %s!"),
634 634 "divergent": _("push includes divergent changeset: %s!")}
635 635 # If we are to push if there is at least one
636 636 # obsolete or unstable changeset in missing, at
637 637 # least one of the missinghead will be obsolete or
638 638 # unstable. So checking heads only is ok
639 639 for node in outgoing.missingheads:
640 640 ctx = unfi[node]
641 641 if ctx.obsolete():
642 642 raise error.Abort(mso % ctx)
643 643 elif ctx.troubled():
644 644 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
645 645
646 646 discovery.checkheads(pushop)
647 647 return True
648 648
649 649 # List of names of steps to perform for an outgoing bundle2, order matters.
650 650 b2partsgenorder = []
651 651
652 652 # Mapping between step name and function
653 653 #
654 654 # This exists to help extensions wrap steps if necessary
655 655 b2partsgenmapping = {}
656 656
657 657 def b2partsgenerator(stepname, idx=None):
658 658 """decorator for function generating bundle2 part
659 659
660 660 The function is added to the step -> function mapping and appended to the
661 661 list of steps. Beware that decorated functions will be added in order
662 662 (this may matter).
663 663
664 664 You can only use this decorator for new steps, if you want to wrap a step
665 665 from an extension, attack the b2partsgenmapping dictionary directly."""
666 666 def dec(func):
667 667 assert stepname not in b2partsgenmapping
668 668 b2partsgenmapping[stepname] = func
669 669 if idx is None:
670 670 b2partsgenorder.append(stepname)
671 671 else:
672 672 b2partsgenorder.insert(idx, stepname)
673 673 return func
674 674 return dec
675 675
676 676 def _pushb2ctxcheckheads(pushop, bundler):
677 677 """Generate race condition checking parts
678 678
679 679 Exists as an independent function to aid extensions
680 680 """
681 681 if not pushop.force:
682 682 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
683 683
684 684 @b2partsgenerator('changeset')
685 685 def _pushb2ctx(pushop, bundler):
686 686 """handle changegroup push through bundle2
687 687
688 688 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
689 689 """
690 690 if 'changesets' in pushop.stepsdone:
691 691 return
692 692 pushop.stepsdone.add('changesets')
693 693 # Send known heads to the server for race detection.
694 694 if not _pushcheckoutgoing(pushop):
695 695 return
696 696 pushop.repo.prepushoutgoinghooks(pushop.repo,
697 697 pushop.remote,
698 698 pushop.outgoing)
699 699
700 700 _pushb2ctxcheckheads(pushop, bundler)
701 701
702 702 b2caps = bundle2.bundle2caps(pushop.remote)
703 703 version = None
704 704 cgversions = b2caps.get('changegroup')
705 705 if not cgversions: # 3.1 and 3.2 ship with an empty value
706 706 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
707 707 pushop.outgoing)
708 708 else:
709 709 cgversions = [v for v in cgversions
710 710 if v in changegroup.supportedversions(pushop.repo)]
711 711 if not cgversions:
712 712 raise ValueError(_('no common changegroup version'))
713 713 version = max(cgversions)
714 714 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
715 715 pushop.outgoing,
716 716 version=version)
717 717 cgpart = bundler.newpart('changegroup', data=cg)
718 718 if version is not None:
719 719 cgpart.addparam('version', version)
720 if 'treemanifest' in pushop.repo.requirements:
721 cgpart.addparam('treemanifest', '1')
720 722 def handlereply(op):
721 723 """extract addchangegroup returns from server reply"""
722 724 cgreplies = op.records.getreplies(cgpart.id)
723 725 assert len(cgreplies['changegroup']) == 1
724 726 pushop.cgresult = cgreplies['changegroup'][0]['return']
725 727 return handlereply
726 728
727 729 @b2partsgenerator('phase')
728 730 def _pushb2phases(pushop, bundler):
729 731 """handle phase push through bundle2"""
730 732 if 'phases' in pushop.stepsdone:
731 733 return
732 734 b2caps = bundle2.bundle2caps(pushop.remote)
733 735 if not 'pushkey' in b2caps:
734 736 return
735 737 pushop.stepsdone.add('phases')
736 738 part2node = []
737 739
738 740 def handlefailure(pushop, exc):
739 741 targetid = int(exc.partid)
740 742 for partid, node in part2node:
741 743 if partid == targetid:
742 744 raise error.Abort(_('updating %s to public failed') % node)
743 745
744 746 enc = pushkey.encode
745 747 for newremotehead in pushop.outdatedphases:
746 748 part = bundler.newpart('pushkey')
747 749 part.addparam('namespace', enc('phases'))
748 750 part.addparam('key', enc(newremotehead.hex()))
749 751 part.addparam('old', enc(str(phases.draft)))
750 752 part.addparam('new', enc(str(phases.public)))
751 753 part2node.append((part.id, newremotehead))
752 754 pushop.pkfailcb[part.id] = handlefailure
753 755
754 756 def handlereply(op):
755 757 for partid, node in part2node:
756 758 partrep = op.records.getreplies(partid)
757 759 results = partrep['pushkey']
758 760 assert len(results) <= 1
759 761 msg = None
760 762 if not results:
761 763 msg = _('server ignored update of %s to public!\n') % node
762 764 elif not int(results[0]['return']):
763 765 msg = _('updating %s to public failed!\n') % node
764 766 if msg is not None:
765 767 pushop.ui.warn(msg)
766 768 return handlereply
767 769
768 770 @b2partsgenerator('obsmarkers')
769 771 def _pushb2obsmarkers(pushop, bundler):
770 772 if 'obsmarkers' in pushop.stepsdone:
771 773 return
772 774 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
773 775 if obsolete.commonversion(remoteversions) is None:
774 776 return
775 777 pushop.stepsdone.add('obsmarkers')
776 778 if pushop.outobsmarkers:
777 779 markers = sorted(pushop.outobsmarkers)
778 780 buildobsmarkerspart(bundler, markers)
779 781
780 782 @b2partsgenerator('bookmarks')
781 783 def _pushb2bookmarks(pushop, bundler):
782 784 """handle bookmark push through bundle2"""
783 785 if 'bookmarks' in pushop.stepsdone:
784 786 return
785 787 b2caps = bundle2.bundle2caps(pushop.remote)
786 788 if 'pushkey' not in b2caps:
787 789 return
788 790 pushop.stepsdone.add('bookmarks')
789 791 part2book = []
790 792 enc = pushkey.encode
791 793
792 794 def handlefailure(pushop, exc):
793 795 targetid = int(exc.partid)
794 796 for partid, book, action in part2book:
795 797 if partid == targetid:
796 798 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
797 799 # we should not be called for part we did not generated
798 800 assert False
799 801
800 802 for book, old, new in pushop.outbookmarks:
801 803 part = bundler.newpart('pushkey')
802 804 part.addparam('namespace', enc('bookmarks'))
803 805 part.addparam('key', enc(book))
804 806 part.addparam('old', enc(old))
805 807 part.addparam('new', enc(new))
806 808 action = 'update'
807 809 if not old:
808 810 action = 'export'
809 811 elif not new:
810 812 action = 'delete'
811 813 part2book.append((part.id, book, action))
812 814 pushop.pkfailcb[part.id] = handlefailure
813 815
814 816 def handlereply(op):
815 817 ui = pushop.ui
816 818 for partid, book, action in part2book:
817 819 partrep = op.records.getreplies(partid)
818 820 results = partrep['pushkey']
819 821 assert len(results) <= 1
820 822 if not results:
821 823 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
822 824 else:
823 825 ret = int(results[0]['return'])
824 826 if ret:
825 827 ui.status(bookmsgmap[action][0] % book)
826 828 else:
827 829 ui.warn(bookmsgmap[action][1] % book)
828 830 if pushop.bkresult is not None:
829 831 pushop.bkresult = 1
830 832 return handlereply
831 833
832 834
833 835 def _pushbundle2(pushop):
834 836 """push data to the remote using bundle2
835 837
836 838 The only currently supported type of data is changegroup but this will
837 839 evolve in the future."""
838 840 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
839 841 pushback = (pushop.trmanager
840 842 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
841 843
842 844 # create reply capability
843 845 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
844 846 allowpushback=pushback))
845 847 bundler.newpart('replycaps', data=capsblob)
846 848 replyhandlers = []
847 849 for partgenname in b2partsgenorder:
848 850 partgen = b2partsgenmapping[partgenname]
849 851 ret = partgen(pushop, bundler)
850 852 if callable(ret):
851 853 replyhandlers.append(ret)
852 854 # do not push if nothing to push
853 855 if bundler.nbparts <= 1:
854 856 return
855 857 stream = util.chunkbuffer(bundler.getchunks())
856 858 try:
857 859 try:
858 860 reply = pushop.remote.unbundle(stream, ['force'], 'push')
859 861 except error.BundleValueError as exc:
860 862 raise error.Abort('missing support for %s' % exc)
861 863 try:
862 864 trgetter = None
863 865 if pushback:
864 866 trgetter = pushop.trmanager.transaction
865 867 op = bundle2.processbundle(pushop.repo, reply, trgetter)
866 868 except error.BundleValueError as exc:
867 869 raise error.Abort('missing support for %s' % exc)
868 870 except bundle2.AbortFromPart as exc:
869 871 pushop.ui.status(_('remote: %s\n') % exc)
870 872 raise error.Abort(_('push failed on remote'), hint=exc.hint)
871 873 except error.PushkeyFailed as exc:
872 874 partid = int(exc.partid)
873 875 if partid not in pushop.pkfailcb:
874 876 raise
875 877 pushop.pkfailcb[partid](pushop, exc)
876 878 for rephand in replyhandlers:
877 879 rephand(op)
878 880
879 881 def _pushchangeset(pushop):
880 882 """Make the actual push of changeset bundle to remote repo"""
881 883 if 'changesets' in pushop.stepsdone:
882 884 return
883 885 pushop.stepsdone.add('changesets')
884 886 if not _pushcheckoutgoing(pushop):
885 887 return
886 888 pushop.repo.prepushoutgoinghooks(pushop.repo,
887 889 pushop.remote,
888 890 pushop.outgoing)
889 891 outgoing = pushop.outgoing
890 892 unbundle = pushop.remote.capable('unbundle')
891 893 # TODO: get bundlecaps from remote
892 894 bundlecaps = None
893 895 # create a changegroup from local
894 896 if pushop.revs is None and not (outgoing.excluded
895 897 or pushop.repo.changelog.filteredrevs):
896 898 # push everything,
897 899 # use the fast path, no race possible on push
898 900 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
899 901 cg = changegroup.getsubset(pushop.repo,
900 902 outgoing,
901 903 bundler,
902 904 'push',
903 905 fastpath=True)
904 906 else:
905 907 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
906 908 bundlecaps)
907 909
908 910 # apply changegroup to remote
909 911 if unbundle:
910 912 # local repo finds heads on server, finds out what
911 913 # revs it must push. once revs transferred, if server
912 914 # finds it has different heads (someone else won
913 915 # commit/push race), server aborts.
914 916 if pushop.force:
915 917 remoteheads = ['force']
916 918 else:
917 919 remoteheads = pushop.remoteheads
918 920 # ssh: return remote's addchangegroup()
919 921 # http: return remote's addchangegroup() or 0 for error
920 922 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
921 923 pushop.repo.url())
922 924 else:
923 925 # we return an integer indicating remote head count
924 926 # change
925 927 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
926 928 pushop.repo.url())
927 929
928 930 def _pushsyncphase(pushop):
929 931 """synchronise phase information locally and remotely"""
930 932 cheads = pushop.commonheads
931 933 # even when we don't push, exchanging phase data is useful
932 934 remotephases = pushop.remote.listkeys('phases')
933 935 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
934 936 and remotephases # server supports phases
935 937 and pushop.cgresult is None # nothing was pushed
936 938 and remotephases.get('publishing', False)):
937 939 # When:
938 940 # - this is a subrepo push
939 941 # - and remote support phase
940 942 # - and no changeset was pushed
941 943 # - and remote is publishing
942 944 # We may be in issue 3871 case!
943 945 # We drop the possible phase synchronisation done by
944 946 # courtesy to publish changesets possibly locally draft
945 947 # on the remote.
946 948 remotephases = {'publishing': 'True'}
947 949 if not remotephases: # old server or public only reply from non-publishing
948 950 _localphasemove(pushop, cheads)
949 951 # don't push any phase data as there is nothing to push
950 952 else:
951 953 ana = phases.analyzeremotephases(pushop.repo, cheads,
952 954 remotephases)
953 955 pheads, droots = ana
954 956 ### Apply remote phase on local
955 957 if remotephases.get('publishing', False):
956 958 _localphasemove(pushop, cheads)
957 959 else: # publish = False
958 960 _localphasemove(pushop, pheads)
959 961 _localphasemove(pushop, cheads, phases.draft)
960 962 ### Apply local phase on remote
961 963
962 964 if pushop.cgresult:
963 965 if 'phases' in pushop.stepsdone:
964 966 # phases already pushed though bundle2
965 967 return
966 968 outdated = pushop.outdatedphases
967 969 else:
968 970 outdated = pushop.fallbackoutdatedphases
969 971
970 972 pushop.stepsdone.add('phases')
971 973
972 974 # filter heads already turned public by the push
973 975 outdated = [c for c in outdated if c.node() not in pheads]
974 976 # fallback to independent pushkey command
975 977 for newremotehead in outdated:
976 978 r = pushop.remote.pushkey('phases',
977 979 newremotehead.hex(),
978 980 str(phases.draft),
979 981 str(phases.public))
980 982 if not r:
981 983 pushop.ui.warn(_('updating %s to public failed!\n')
982 984 % newremotehead)
983 985
984 986 def _localphasemove(pushop, nodes, phase=phases.public):
985 987 """move <nodes> to <phase> in the local source repo"""
986 988 if pushop.trmanager:
987 989 phases.advanceboundary(pushop.repo,
988 990 pushop.trmanager.transaction(),
989 991 phase,
990 992 nodes)
991 993 else:
992 994 # repo is not locked, do not change any phases!
993 995 # Informs the user that phases should have been moved when
994 996 # applicable.
995 997 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
996 998 phasestr = phases.phasenames[phase]
997 999 if actualmoves:
998 1000 pushop.ui.status(_('cannot lock source repo, skipping '
999 1001 'local %s phase update\n') % phasestr)
1000 1002
1001 1003 def _pushobsolete(pushop):
1002 1004 """utility function to push obsolete markers to a remote"""
1003 1005 if 'obsmarkers' in pushop.stepsdone:
1004 1006 return
1005 1007 repo = pushop.repo
1006 1008 remote = pushop.remote
1007 1009 pushop.stepsdone.add('obsmarkers')
1008 1010 if pushop.outobsmarkers:
1009 1011 pushop.ui.debug('try to push obsolete markers to remote\n')
1010 1012 rslts = []
1011 1013 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1012 1014 for key in sorted(remotedata, reverse=True):
1013 1015 # reverse sort to ensure we end with dump0
1014 1016 data = remotedata[key]
1015 1017 rslts.append(remote.pushkey('obsolete', key, '', data))
1016 1018 if [r for r in rslts if not r]:
1017 1019 msg = _('failed to push some obsolete markers!\n')
1018 1020 repo.ui.warn(msg)
1019 1021
1020 1022 def _pushbookmark(pushop):
1021 1023 """Update bookmark position on remote"""
1022 1024 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1023 1025 return
1024 1026 pushop.stepsdone.add('bookmarks')
1025 1027 ui = pushop.ui
1026 1028 remote = pushop.remote
1027 1029
1028 1030 for b, old, new in pushop.outbookmarks:
1029 1031 action = 'update'
1030 1032 if not old:
1031 1033 action = 'export'
1032 1034 elif not new:
1033 1035 action = 'delete'
1034 1036 if remote.pushkey('bookmarks', b, old, new):
1035 1037 ui.status(bookmsgmap[action][0] % b)
1036 1038 else:
1037 1039 ui.warn(bookmsgmap[action][1] % b)
1038 1040 # discovery can have set the value form invalid entry
1039 1041 if pushop.bkresult is not None:
1040 1042 pushop.bkresult = 1
1041 1043
1042 1044 class pulloperation(object):
1043 1045 """A object that represent a single pull operation
1044 1046
1045 1047 It purpose is to carry pull related state and very common operation.
1046 1048
1047 1049 A new should be created at the beginning of each pull and discarded
1048 1050 afterward.
1049 1051 """
1050 1052
1051 1053 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1052 1054 remotebookmarks=None, streamclonerequested=None):
1053 1055 # repo we pull into
1054 1056 self.repo = repo
1055 1057 # repo we pull from
1056 1058 self.remote = remote
1057 1059 # revision we try to pull (None is "all")
1058 1060 self.heads = heads
1059 1061 # bookmark pulled explicitly
1060 1062 self.explicitbookmarks = bookmarks
1061 1063 # do we force pull?
1062 1064 self.force = force
1063 1065 # whether a streaming clone was requested
1064 1066 self.streamclonerequested = streamclonerequested
1065 1067 # transaction manager
1066 1068 self.trmanager = None
1067 1069 # set of common changeset between local and remote before pull
1068 1070 self.common = None
1069 1071 # set of pulled head
1070 1072 self.rheads = None
1071 1073 # list of missing changeset to fetch remotely
1072 1074 self.fetch = None
1073 1075 # remote bookmarks data
1074 1076 self.remotebookmarks = remotebookmarks
1075 1077 # result of changegroup pulling (used as return code by pull)
1076 1078 self.cgresult = None
1077 1079 # list of step already done
1078 1080 self.stepsdone = set()
1079 1081 # Whether we attempted a clone from pre-generated bundles.
1080 1082 self.clonebundleattempted = False
1081 1083
1082 1084 @util.propertycache
1083 1085 def pulledsubset(self):
1084 1086 """heads of the set of changeset target by the pull"""
1085 1087 # compute target subset
1086 1088 if self.heads is None:
1087 1089 # We pulled every thing possible
1088 1090 # sync on everything common
1089 1091 c = set(self.common)
1090 1092 ret = list(self.common)
1091 1093 for n in self.rheads:
1092 1094 if n not in c:
1093 1095 ret.append(n)
1094 1096 return ret
1095 1097 else:
1096 1098 # We pulled a specific subset
1097 1099 # sync on this subset
1098 1100 return self.heads
1099 1101
1100 1102 @util.propertycache
1101 1103 def canusebundle2(self):
1102 1104 return _canusebundle2(self)
1103 1105
1104 1106 @util.propertycache
1105 1107 def remotebundle2caps(self):
1106 1108 return bundle2.bundle2caps(self.remote)
1107 1109
1108 1110 def gettransaction(self):
1109 1111 # deprecated; talk to trmanager directly
1110 1112 return self.trmanager.transaction()
1111 1113
1112 1114 class transactionmanager(object):
1113 1115 """An object to manage the life cycle of a transaction
1114 1116
1115 1117 It creates the transaction on demand and calls the appropriate hooks when
1116 1118 closing the transaction."""
1117 1119 def __init__(self, repo, source, url):
1118 1120 self.repo = repo
1119 1121 self.source = source
1120 1122 self.url = url
1121 1123 self._tr = None
1122 1124
1123 1125 def transaction(self):
1124 1126 """Return an open transaction object, constructing if necessary"""
1125 1127 if not self._tr:
1126 1128 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1127 1129 self._tr = self.repo.transaction(trname)
1128 1130 self._tr.hookargs['source'] = self.source
1129 1131 self._tr.hookargs['url'] = self.url
1130 1132 return self._tr
1131 1133
1132 1134 def close(self):
1133 1135 """close transaction if created"""
1134 1136 if self._tr is not None:
1135 1137 self._tr.close()
1136 1138
1137 1139 def release(self):
1138 1140 """release transaction if created"""
1139 1141 if self._tr is not None:
1140 1142 self._tr.release()
1141 1143
1142 1144 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1143 1145 streamclonerequested=None):
1144 1146 """Fetch repository data from a remote.
1145 1147
1146 1148 This is the main function used to retrieve data from a remote repository.
1147 1149
1148 1150 ``repo`` is the local repository to clone into.
1149 1151 ``remote`` is a peer instance.
1150 1152 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1151 1153 default) means to pull everything from the remote.
1152 1154 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1153 1155 default, all remote bookmarks are pulled.
1154 1156 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1155 1157 initialization.
1156 1158 ``streamclonerequested`` is a boolean indicating whether a "streaming
1157 1159 clone" is requested. A "streaming clone" is essentially a raw file copy
1158 1160 of revlogs from the server. This only works when the local repository is
1159 1161 empty. The default value of ``None`` means to respect the server
1160 1162 configuration for preferring stream clones.
1161 1163
1162 1164 Returns the ``pulloperation`` created for this pull.
1163 1165 """
1164 1166 if opargs is None:
1165 1167 opargs = {}
1166 1168 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1167 1169 streamclonerequested=streamclonerequested, **opargs)
1168 1170 if pullop.remote.local():
1169 1171 missing = set(pullop.remote.requirements) - pullop.repo.supported
1170 1172 if missing:
1171 1173 msg = _("required features are not"
1172 1174 " supported in the destination:"
1173 1175 " %s") % (', '.join(sorted(missing)))
1174 1176 raise error.Abort(msg)
1175 1177
1176 1178 lock = pullop.repo.lock()
1177 1179 try:
1178 1180 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1179 1181 streamclone.maybeperformlegacystreamclone(pullop)
1180 1182 # This should ideally be in _pullbundle2(). However, it needs to run
1181 1183 # before discovery to avoid extra work.
1182 1184 _maybeapplyclonebundle(pullop)
1183 1185 _pulldiscovery(pullop)
1184 1186 if pullop.canusebundle2:
1185 1187 _pullbundle2(pullop)
1186 1188 _pullchangeset(pullop)
1187 1189 _pullphase(pullop)
1188 1190 _pullbookmarks(pullop)
1189 1191 _pullobsolete(pullop)
1190 1192 pullop.trmanager.close()
1191 1193 finally:
1192 1194 pullop.trmanager.release()
1193 1195 lock.release()
1194 1196
1195 1197 return pullop
1196 1198
1197 1199 # list of steps to perform discovery before pull
1198 1200 pulldiscoveryorder = []
1199 1201
1200 1202 # Mapping between step name and function
1201 1203 #
1202 1204 # This exists to help extensions wrap steps if necessary
1203 1205 pulldiscoverymapping = {}
1204 1206
1205 1207 def pulldiscovery(stepname):
1206 1208 """decorator for function performing discovery before pull
1207 1209
1208 1210 The function is added to the step -> function mapping and appended to the
1209 1211 list of steps. Beware that decorated function will be added in order (this
1210 1212 may matter).
1211 1213
1212 1214 You can only use this decorator for a new step, if you want to wrap a step
1213 1215 from an extension, change the pulldiscovery dictionary directly."""
1214 1216 def dec(func):
1215 1217 assert stepname not in pulldiscoverymapping
1216 1218 pulldiscoverymapping[stepname] = func
1217 1219 pulldiscoveryorder.append(stepname)
1218 1220 return func
1219 1221 return dec
1220 1222
1221 1223 def _pulldiscovery(pullop):
1222 1224 """Run all discovery steps"""
1223 1225 for stepname in pulldiscoveryorder:
1224 1226 step = pulldiscoverymapping[stepname]
1225 1227 step(pullop)
1226 1228
1227 1229 @pulldiscovery('b1:bookmarks')
1228 1230 def _pullbookmarkbundle1(pullop):
1229 1231 """fetch bookmark data in bundle1 case
1230 1232
1231 1233 If not using bundle2, we have to fetch bookmarks before changeset
1232 1234 discovery to reduce the chance and impact of race conditions."""
1233 1235 if pullop.remotebookmarks is not None:
1234 1236 return
1235 1237 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1236 1238 # all known bundle2 servers now support listkeys, but lets be nice with
1237 1239 # new implementation.
1238 1240 return
1239 1241 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1240 1242
1241 1243
1242 1244 @pulldiscovery('changegroup')
1243 1245 def _pulldiscoverychangegroup(pullop):
1244 1246 """discovery phase for the pull
1245 1247
1246 1248 Current handle changeset discovery only, will change handle all discovery
1247 1249 at some point."""
1248 1250 tmp = discovery.findcommonincoming(pullop.repo,
1249 1251 pullop.remote,
1250 1252 heads=pullop.heads,
1251 1253 force=pullop.force)
1252 1254 common, fetch, rheads = tmp
1253 1255 nm = pullop.repo.unfiltered().changelog.nodemap
1254 1256 if fetch and rheads:
1255 1257 # If a remote heads in filtered locally, lets drop it from the unknown
1256 1258 # remote heads and put in back in common.
1257 1259 #
1258 1260 # This is a hackish solution to catch most of "common but locally
1259 1261 # hidden situation". We do not performs discovery on unfiltered
1260 1262 # repository because it end up doing a pathological amount of round
1261 1263 # trip for w huge amount of changeset we do not care about.
1262 1264 #
1263 1265 # If a set of such "common but filtered" changeset exist on the server
1264 1266 # but are not including a remote heads, we'll not be able to detect it,
1265 1267 scommon = set(common)
1266 1268 filteredrheads = []
1267 1269 for n in rheads:
1268 1270 if n in nm:
1269 1271 if n not in scommon:
1270 1272 common.append(n)
1271 1273 else:
1272 1274 filteredrheads.append(n)
1273 1275 if not filteredrheads:
1274 1276 fetch = []
1275 1277 rheads = filteredrheads
1276 1278 pullop.common = common
1277 1279 pullop.fetch = fetch
1278 1280 pullop.rheads = rheads
1279 1281
1280 1282 def _pullbundle2(pullop):
1281 1283 """pull data using bundle2
1282 1284
1283 1285 For now, the only supported data are changegroup."""
1284 1286 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1285 1287
1286 1288 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1287 1289
1288 1290 # pulling changegroup
1289 1291 pullop.stepsdone.add('changegroup')
1290 1292
1291 1293 kwargs['common'] = pullop.common
1292 1294 kwargs['heads'] = pullop.heads or pullop.rheads
1293 1295 kwargs['cg'] = pullop.fetch
1294 1296 if 'listkeys' in pullop.remotebundle2caps:
1295 1297 kwargs['listkeys'] = ['phase']
1296 1298 if pullop.remotebookmarks is None:
1297 1299 # make sure to always includes bookmark data when migrating
1298 1300 # `hg incoming --bundle` to using this function.
1299 1301 kwargs['listkeys'].append('bookmarks')
1300 1302
1301 1303 # If this is a full pull / clone and the server supports the clone bundles
1302 1304 # feature, tell the server whether we attempted a clone bundle. The
1303 1305 # presence of this flag indicates the client supports clone bundles. This
1304 1306 # will enable the server to treat clients that support clone bundles
1305 1307 # differently from those that don't.
1306 1308 if (pullop.remote.capable('clonebundles')
1307 1309 and pullop.heads is None and list(pullop.common) == [nullid]):
1308 1310 kwargs['cbattempted'] = pullop.clonebundleattempted
1309 1311
1310 1312 if streaming:
1311 1313 pullop.repo.ui.status(_('streaming all changes\n'))
1312 1314 elif not pullop.fetch:
1313 1315 pullop.repo.ui.status(_("no changes found\n"))
1314 1316 pullop.cgresult = 0
1315 1317 else:
1316 1318 if pullop.heads is None and list(pullop.common) == [nullid]:
1317 1319 pullop.repo.ui.status(_("requesting all changes\n"))
1318 1320 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1319 1321 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1320 1322 if obsolete.commonversion(remoteversions) is not None:
1321 1323 kwargs['obsmarkers'] = True
1322 1324 pullop.stepsdone.add('obsmarkers')
1323 1325 _pullbundle2extraprepare(pullop, kwargs)
1324 1326 bundle = pullop.remote.getbundle('pull', **kwargs)
1325 1327 try:
1326 1328 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1327 1329 except error.BundleValueError as exc:
1328 1330 raise error.Abort('missing support for %s' % exc)
1329 1331
1330 1332 if pullop.fetch:
1331 1333 results = [cg['return'] for cg in op.records['changegroup']]
1332 1334 pullop.cgresult = changegroup.combineresults(results)
1333 1335
1334 1336 # processing phases change
1335 1337 for namespace, value in op.records['listkeys']:
1336 1338 if namespace == 'phases':
1337 1339 _pullapplyphases(pullop, value)
1338 1340
1339 1341 # processing bookmark update
1340 1342 for namespace, value in op.records['listkeys']:
1341 1343 if namespace == 'bookmarks':
1342 1344 pullop.remotebookmarks = value
1343 1345
1344 1346 # bookmark data were either already there or pulled in the bundle
1345 1347 if pullop.remotebookmarks is not None:
1346 1348 _pullbookmarks(pullop)
1347 1349
1348 1350 def _pullbundle2extraprepare(pullop, kwargs):
1349 1351 """hook function so that extensions can extend the getbundle call"""
1350 1352 pass
1351 1353
1352 1354 def _pullchangeset(pullop):
1353 1355 """pull changeset from unbundle into the local repo"""
1354 1356 # We delay the open of the transaction as late as possible so we
1355 1357 # don't open transaction for nothing or you break future useful
1356 1358 # rollback call
1357 1359 if 'changegroup' in pullop.stepsdone:
1358 1360 return
1359 1361 pullop.stepsdone.add('changegroup')
1360 1362 if not pullop.fetch:
1361 1363 pullop.repo.ui.status(_("no changes found\n"))
1362 1364 pullop.cgresult = 0
1363 1365 return
1364 1366 pullop.gettransaction()
1365 1367 if pullop.heads is None and list(pullop.common) == [nullid]:
1366 1368 pullop.repo.ui.status(_("requesting all changes\n"))
1367 1369 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1368 1370 # issue1320, avoid a race if remote changed after discovery
1369 1371 pullop.heads = pullop.rheads
1370 1372
1371 1373 if pullop.remote.capable('getbundle'):
1372 1374 # TODO: get bundlecaps from remote
1373 1375 cg = pullop.remote.getbundle('pull', common=pullop.common,
1374 1376 heads=pullop.heads or pullop.rheads)
1375 1377 elif pullop.heads is None:
1376 1378 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1377 1379 elif not pullop.remote.capable('changegroupsubset'):
1378 1380 raise error.Abort(_("partial pull cannot be done because "
1379 1381 "other repository doesn't support "
1380 1382 "changegroupsubset."))
1381 1383 else:
1382 1384 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1383 1385 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1384 1386
1385 1387 def _pullphase(pullop):
1386 1388 # Get remote phases data from remote
1387 1389 if 'phases' in pullop.stepsdone:
1388 1390 return
1389 1391 remotephases = pullop.remote.listkeys('phases')
1390 1392 _pullapplyphases(pullop, remotephases)
1391 1393
1392 1394 def _pullapplyphases(pullop, remotephases):
1393 1395 """apply phase movement from observed remote state"""
1394 1396 if 'phases' in pullop.stepsdone:
1395 1397 return
1396 1398 pullop.stepsdone.add('phases')
1397 1399 publishing = bool(remotephases.get('publishing', False))
1398 1400 if remotephases and not publishing:
1399 1401 # remote is new and unpublishing
1400 1402 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1401 1403 pullop.pulledsubset,
1402 1404 remotephases)
1403 1405 dheads = pullop.pulledsubset
1404 1406 else:
1405 1407 # Remote is old or publishing all common changesets
1406 1408 # should be seen as public
1407 1409 pheads = pullop.pulledsubset
1408 1410 dheads = []
1409 1411 unfi = pullop.repo.unfiltered()
1410 1412 phase = unfi._phasecache.phase
1411 1413 rev = unfi.changelog.nodemap.get
1412 1414 public = phases.public
1413 1415 draft = phases.draft
1414 1416
1415 1417 # exclude changesets already public locally and update the others
1416 1418 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1417 1419 if pheads:
1418 1420 tr = pullop.gettransaction()
1419 1421 phases.advanceboundary(pullop.repo, tr, public, pheads)
1420 1422
1421 1423 # exclude changesets already draft locally and update the others
1422 1424 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1423 1425 if dheads:
1424 1426 tr = pullop.gettransaction()
1425 1427 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1426 1428
1427 1429 def _pullbookmarks(pullop):
1428 1430 """process the remote bookmark information to update the local one"""
1429 1431 if 'bookmarks' in pullop.stepsdone:
1430 1432 return
1431 1433 pullop.stepsdone.add('bookmarks')
1432 1434 repo = pullop.repo
1433 1435 remotebookmarks = pullop.remotebookmarks
1434 1436 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1435 1437 pullop.remote.url(),
1436 1438 pullop.gettransaction,
1437 1439 explicit=pullop.explicitbookmarks)
1438 1440
1439 1441 def _pullobsolete(pullop):
1440 1442 """utility function to pull obsolete markers from a remote
1441 1443
1442 1444 The `gettransaction` is function that return the pull transaction, creating
1443 1445 one if necessary. We return the transaction to inform the calling code that
1444 1446 a new transaction have been created (when applicable).
1445 1447
1446 1448 Exists mostly to allow overriding for experimentation purpose"""
1447 1449 if 'obsmarkers' in pullop.stepsdone:
1448 1450 return
1449 1451 pullop.stepsdone.add('obsmarkers')
1450 1452 tr = None
1451 1453 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1452 1454 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1453 1455 remoteobs = pullop.remote.listkeys('obsolete')
1454 1456 if 'dump0' in remoteobs:
1455 1457 tr = pullop.gettransaction()
1456 1458 markers = []
1457 1459 for key in sorted(remoteobs, reverse=True):
1458 1460 if key.startswith('dump'):
1459 1461 data = base85.b85decode(remoteobs[key])
1460 1462 version, newmarks = obsolete._readmarkers(data)
1461 1463 markers += newmarks
1462 1464 if markers:
1463 1465 pullop.repo.obsstore.add(tr, markers)
1464 1466 pullop.repo.invalidatevolatilesets()
1465 1467 return tr
1466 1468
1467 1469 def caps20to10(repo):
1468 1470 """return a set with appropriate options to use bundle20 during getbundle"""
1469 1471 caps = set(['HG20'])
1470 1472 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1471 1473 caps.add('bundle2=' + urllib.quote(capsblob))
1472 1474 return caps
1473 1475
1474 1476 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1475 1477 getbundle2partsorder = []
1476 1478
1477 1479 # Mapping between step name and function
1478 1480 #
1479 1481 # This exists to help extensions wrap steps if necessary
1480 1482 getbundle2partsmapping = {}
1481 1483
1482 1484 def getbundle2partsgenerator(stepname, idx=None):
1483 1485 """decorator for function generating bundle2 part for getbundle
1484 1486
1485 1487 The function is added to the step -> function mapping and appended to the
1486 1488 list of steps. Beware that decorated functions will be added in order
1487 1489 (this may matter).
1488 1490
1489 1491 You can only use this decorator for new steps, if you want to wrap a step
1490 1492 from an extension, attack the getbundle2partsmapping dictionary directly."""
1491 1493 def dec(func):
1492 1494 assert stepname not in getbundle2partsmapping
1493 1495 getbundle2partsmapping[stepname] = func
1494 1496 if idx is None:
1495 1497 getbundle2partsorder.append(stepname)
1496 1498 else:
1497 1499 getbundle2partsorder.insert(idx, stepname)
1498 1500 return func
1499 1501 return dec
1500 1502
1501 1503 def bundle2requested(bundlecaps):
1502 1504 if bundlecaps is not None:
1503 1505 return any(cap.startswith('HG2') for cap in bundlecaps)
1504 1506 return False
1505 1507
1506 1508 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1507 1509 **kwargs):
1508 1510 """return a full bundle (with potentially multiple kind of parts)
1509 1511
1510 1512 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1511 1513 passed. For now, the bundle can contain only changegroup, but this will
1512 1514 changes when more part type will be available for bundle2.
1513 1515
1514 1516 This is different from changegroup.getchangegroup that only returns an HG10
1515 1517 changegroup bundle. They may eventually get reunited in the future when we
1516 1518 have a clearer idea of the API we what to query different data.
1517 1519
1518 1520 The implementation is at a very early stage and will get massive rework
1519 1521 when the API of bundle is refined.
1520 1522 """
1521 1523 usebundle2 = bundle2requested(bundlecaps)
1522 1524 # bundle10 case
1523 1525 if not usebundle2:
1524 1526 if bundlecaps and not kwargs.get('cg', True):
1525 1527 raise ValueError(_('request for bundle10 must include changegroup'))
1526 1528
1527 1529 if kwargs:
1528 1530 raise ValueError(_('unsupported getbundle arguments: %s')
1529 1531 % ', '.join(sorted(kwargs.keys())))
1530 1532 return changegroup.getchangegroup(repo, source, heads=heads,
1531 1533 common=common, bundlecaps=bundlecaps)
1532 1534
1533 1535 # bundle20 case
1534 1536 b2caps = {}
1535 1537 for bcaps in bundlecaps:
1536 1538 if bcaps.startswith('bundle2='):
1537 1539 blob = urllib.unquote(bcaps[len('bundle2='):])
1538 1540 b2caps.update(bundle2.decodecaps(blob))
1539 1541 bundler = bundle2.bundle20(repo.ui, b2caps)
1540 1542
1541 1543 kwargs['heads'] = heads
1542 1544 kwargs['common'] = common
1543 1545
1544 1546 for name in getbundle2partsorder:
1545 1547 func = getbundle2partsmapping[name]
1546 1548 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1547 1549 **kwargs)
1548 1550
1549 1551 return util.chunkbuffer(bundler.getchunks())
1550 1552
1551 1553 @getbundle2partsgenerator('changegroup')
1552 1554 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1553 1555 b2caps=None, heads=None, common=None, **kwargs):
1554 1556 """add a changegroup part to the requested bundle"""
1555 1557 cg = None
1556 1558 if kwargs.get('cg', True):
1557 1559 # build changegroup bundle here.
1558 1560 version = None
1559 1561 cgversions = b2caps.get('changegroup')
1560 1562 getcgkwargs = {}
1561 1563 if cgversions: # 3.1 and 3.2 ship with an empty value
1562 1564 cgversions = [v for v in cgversions
1563 1565 if v in changegroup.supportedversions(repo)]
1564 1566 if not cgversions:
1565 1567 raise ValueError(_('no common changegroup version'))
1566 1568 version = getcgkwargs['version'] = max(cgversions)
1567 1569 outgoing = changegroup.computeoutgoing(repo, heads, common)
1568 1570 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1569 1571 bundlecaps=bundlecaps,
1570 1572 **getcgkwargs)
1571 1573
1572 1574 if cg:
1573 1575 part = bundler.newpart('changegroup', data=cg)
1574 1576 if version is not None:
1575 1577 part.addparam('version', version)
1576 1578 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1577 1579 if 'treemanifest' in repo.requirements:
1578 1580 part.addparam('treemanifest', '1')
1579 1581
1580 1582 @getbundle2partsgenerator('listkeys')
1581 1583 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1582 1584 b2caps=None, **kwargs):
1583 1585 """add parts containing listkeys namespaces to the requested bundle"""
1584 1586 listkeys = kwargs.get('listkeys', ())
1585 1587 for namespace in listkeys:
1586 1588 part = bundler.newpart('listkeys')
1587 1589 part.addparam('namespace', namespace)
1588 1590 keys = repo.listkeys(namespace).items()
1589 1591 part.data = pushkey.encodekeys(keys)
1590 1592
1591 1593 @getbundle2partsgenerator('obsmarkers')
1592 1594 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1593 1595 b2caps=None, heads=None, **kwargs):
1594 1596 """add an obsolescence markers part to the requested bundle"""
1595 1597 if kwargs.get('obsmarkers', False):
1596 1598 if heads is None:
1597 1599 heads = repo.heads()
1598 1600 subset = [c.node() for c in repo.set('::%ln', heads)]
1599 1601 markers = repo.obsstore.relevantmarkers(subset)
1600 1602 markers = sorted(markers)
1601 1603 buildobsmarkerspart(bundler, markers)
1602 1604
1603 1605 @getbundle2partsgenerator('hgtagsfnodes')
1604 1606 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1605 1607 b2caps=None, heads=None, common=None,
1606 1608 **kwargs):
1607 1609 """Transfer the .hgtags filenodes mapping.
1608 1610
1609 1611 Only values for heads in this bundle will be transferred.
1610 1612
1611 1613 The part data consists of pairs of 20 byte changeset node and .hgtags
1612 1614 filenodes raw values.
1613 1615 """
1614 1616 # Don't send unless:
1615 1617 # - changeset are being exchanged,
1616 1618 # - the client supports it.
1617 1619 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1618 1620 return
1619 1621
1620 1622 outgoing = changegroup.computeoutgoing(repo, heads, common)
1621 1623
1622 1624 if not outgoing.missingheads:
1623 1625 return
1624 1626
1625 1627 cache = tags.hgtagsfnodescache(repo.unfiltered())
1626 1628 chunks = []
1627 1629
1628 1630 # .hgtags fnodes are only relevant for head changesets. While we could
1629 1631 # transfer values for all known nodes, there will likely be little to
1630 1632 # no benefit.
1631 1633 #
1632 1634 # We don't bother using a generator to produce output data because
1633 1635 # a) we only have 40 bytes per head and even esoteric numbers of heads
1634 1636 # consume little memory (1M heads is 40MB) b) we don't want to send the
1635 1637 # part if we don't have entries and knowing if we have entries requires
1636 1638 # cache lookups.
1637 1639 for node in outgoing.missingheads:
1638 1640 # Don't compute missing, as this may slow down serving.
1639 1641 fnode = cache.getfnode(node, computemissing=False)
1640 1642 if fnode is not None:
1641 1643 chunks.extend([node, fnode])
1642 1644
1643 1645 if chunks:
1644 1646 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1645 1647
1646 1648 def check_heads(repo, their_heads, context):
1647 1649 """check if the heads of a repo have been modified
1648 1650
1649 1651 Used by peer for unbundling.
1650 1652 """
1651 1653 heads = repo.heads()
1652 1654 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1653 1655 if not (their_heads == ['force'] or their_heads == heads or
1654 1656 their_heads == ['hashed', heads_hash]):
1655 1657 # someone else committed/pushed/unbundled while we
1656 1658 # were transferring data
1657 1659 raise error.PushRaced('repository changed while %s - '
1658 1660 'please try again' % context)
1659 1661
1660 1662 def unbundle(repo, cg, heads, source, url):
1661 1663 """Apply a bundle to a repo.
1662 1664
1663 1665 this function makes sure the repo is locked during the application and have
1664 1666 mechanism to check that no push race occurred between the creation of the
1665 1667 bundle and its application.
1666 1668
1667 1669 If the push was raced as PushRaced exception is raised."""
1668 1670 r = 0
1669 1671 # need a transaction when processing a bundle2 stream
1670 1672 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1671 1673 lockandtr = [None, None, None]
1672 1674 recordout = None
1673 1675 # quick fix for output mismatch with bundle2 in 3.4
1674 1676 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1675 1677 False)
1676 1678 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1677 1679 captureoutput = True
1678 1680 try:
1679 1681 check_heads(repo, heads, 'uploading changes')
1680 1682 # push can proceed
1681 1683 if util.safehasattr(cg, 'params'):
1682 1684 r = None
1683 1685 try:
1684 1686 def gettransaction():
1685 1687 if not lockandtr[2]:
1686 1688 lockandtr[0] = repo.wlock()
1687 1689 lockandtr[1] = repo.lock()
1688 1690 lockandtr[2] = repo.transaction(source)
1689 1691 lockandtr[2].hookargs['source'] = source
1690 1692 lockandtr[2].hookargs['url'] = url
1691 1693 lockandtr[2].hookargs['bundle2'] = '1'
1692 1694 return lockandtr[2]
1693 1695
1694 1696 # Do greedy locking by default until we're satisfied with lazy
1695 1697 # locking.
1696 1698 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1697 1699 gettransaction()
1698 1700
1699 1701 op = bundle2.bundleoperation(repo, gettransaction,
1700 1702 captureoutput=captureoutput)
1701 1703 try:
1702 1704 op = bundle2.processbundle(repo, cg, op=op)
1703 1705 finally:
1704 1706 r = op.reply
1705 1707 if captureoutput and r is not None:
1706 1708 repo.ui.pushbuffer(error=True, subproc=True)
1707 1709 def recordout(output):
1708 1710 r.newpart('output', data=output, mandatory=False)
1709 1711 if lockandtr[2] is not None:
1710 1712 lockandtr[2].close()
1711 1713 except BaseException as exc:
1712 1714 exc.duringunbundle2 = True
1713 1715 if captureoutput and r is not None:
1714 1716 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1715 1717 def recordout(output):
1716 1718 part = bundle2.bundlepart('output', data=output,
1717 1719 mandatory=False)
1718 1720 parts.append(part)
1719 1721 raise
1720 1722 else:
1721 1723 lockandtr[1] = repo.lock()
1722 1724 r = cg.apply(repo, source, url)
1723 1725 finally:
1724 1726 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1725 1727 if recordout is not None:
1726 1728 recordout(repo.ui.popbuffer())
1727 1729 return r
1728 1730
1729 1731 def _maybeapplyclonebundle(pullop):
1730 1732 """Apply a clone bundle from a remote, if possible."""
1731 1733
1732 1734 repo = pullop.repo
1733 1735 remote = pullop.remote
1734 1736
1735 1737 if not repo.ui.configbool('ui', 'clonebundles', True):
1736 1738 return
1737 1739
1738 1740 # Only run if local repo is empty.
1739 1741 if len(repo):
1740 1742 return
1741 1743
1742 1744 if pullop.heads:
1743 1745 return
1744 1746
1745 1747 if not remote.capable('clonebundles'):
1746 1748 return
1747 1749
1748 1750 res = remote._call('clonebundles')
1749 1751
1750 1752 # If we call the wire protocol command, that's good enough to record the
1751 1753 # attempt.
1752 1754 pullop.clonebundleattempted = True
1753 1755
1754 1756 entries = parseclonebundlesmanifest(repo, res)
1755 1757 if not entries:
1756 1758 repo.ui.note(_('no clone bundles available on remote; '
1757 1759 'falling back to regular clone\n'))
1758 1760 return
1759 1761
1760 1762 entries = filterclonebundleentries(repo, entries)
1761 1763 if not entries:
1762 1764 # There is a thundering herd concern here. However, if a server
1763 1765 # operator doesn't advertise bundles appropriate for its clients,
1764 1766 # they deserve what's coming. Furthermore, from a client's
1765 1767 # perspective, no automatic fallback would mean not being able to
1766 1768 # clone!
1767 1769 repo.ui.warn(_('no compatible clone bundles available on server; '
1768 1770 'falling back to regular clone\n'))
1769 1771 repo.ui.warn(_('(you may want to report this to the server '
1770 1772 'operator)\n'))
1771 1773 return
1772 1774
1773 1775 entries = sortclonebundleentries(repo.ui, entries)
1774 1776
1775 1777 url = entries[0]['URL']
1776 1778 repo.ui.status(_('applying clone bundle from %s\n') % url)
1777 1779 if trypullbundlefromurl(repo.ui, repo, url):
1778 1780 repo.ui.status(_('finished applying clone bundle\n'))
1779 1781 # Bundle failed.
1780 1782 #
1781 1783 # We abort by default to avoid the thundering herd of
1782 1784 # clients flooding a server that was expecting expensive
1783 1785 # clone load to be offloaded.
1784 1786 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1785 1787 repo.ui.warn(_('falling back to normal clone\n'))
1786 1788 else:
1787 1789 raise error.Abort(_('error applying bundle'),
1788 1790 hint=_('if this error persists, consider contacting '
1789 1791 'the server operator or disable clone '
1790 1792 'bundles via '
1791 1793 '"--config ui.clonebundles=false"'))
1792 1794
1793 1795 def parseclonebundlesmanifest(repo, s):
1794 1796 """Parses the raw text of a clone bundles manifest.
1795 1797
1796 1798 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1797 1799 to the URL and other keys are the attributes for the entry.
1798 1800 """
1799 1801 m = []
1800 1802 for line in s.splitlines():
1801 1803 fields = line.split()
1802 1804 if not fields:
1803 1805 continue
1804 1806 attrs = {'URL': fields[0]}
1805 1807 for rawattr in fields[1:]:
1806 1808 key, value = rawattr.split('=', 1)
1807 1809 key = urllib.unquote(key)
1808 1810 value = urllib.unquote(value)
1809 1811 attrs[key] = value
1810 1812
1811 1813 # Parse BUNDLESPEC into components. This makes client-side
1812 1814 # preferences easier to specify since you can prefer a single
1813 1815 # component of the BUNDLESPEC.
1814 1816 if key == 'BUNDLESPEC':
1815 1817 try:
1816 1818 comp, version, params = parsebundlespec(repo, value,
1817 1819 externalnames=True)
1818 1820 attrs['COMPRESSION'] = comp
1819 1821 attrs['VERSION'] = version
1820 1822 except error.InvalidBundleSpecification:
1821 1823 pass
1822 1824 except error.UnsupportedBundleSpecification:
1823 1825 pass
1824 1826
1825 1827 m.append(attrs)
1826 1828
1827 1829 return m
1828 1830
1829 1831 def filterclonebundleentries(repo, entries):
1830 1832 """Remove incompatible clone bundle manifest entries.
1831 1833
1832 1834 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1833 1835 and returns a new list consisting of only the entries that this client
1834 1836 should be able to apply.
1835 1837
1836 1838 There is no guarantee we'll be able to apply all returned entries because
1837 1839 the metadata we use to filter on may be missing or wrong.
1838 1840 """
1839 1841 newentries = []
1840 1842 for entry in entries:
1841 1843 spec = entry.get('BUNDLESPEC')
1842 1844 if spec:
1843 1845 try:
1844 1846 parsebundlespec(repo, spec, strict=True)
1845 1847 except error.InvalidBundleSpecification as e:
1846 1848 repo.ui.debug(str(e) + '\n')
1847 1849 continue
1848 1850 except error.UnsupportedBundleSpecification as e:
1849 1851 repo.ui.debug('filtering %s because unsupported bundle '
1850 1852 'spec: %s\n' % (entry['URL'], str(e)))
1851 1853 continue
1852 1854
1853 1855 if 'REQUIRESNI' in entry and not sslutil.hassni:
1854 1856 repo.ui.debug('filtering %s because SNI not supported\n' %
1855 1857 entry['URL'])
1856 1858 continue
1857 1859
1858 1860 newentries.append(entry)
1859 1861
1860 1862 return newentries
1861 1863
1862 1864 def sortclonebundleentries(ui, entries):
1863 1865 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1864 1866 if not prefers:
1865 1867 return list(entries)
1866 1868
1867 1869 prefers = [p.split('=', 1) for p in prefers]
1868 1870
1869 1871 # Our sort function.
1870 1872 def compareentry(a, b):
1871 1873 for prefkey, prefvalue in prefers:
1872 1874 avalue = a.get(prefkey)
1873 1875 bvalue = b.get(prefkey)
1874 1876
1875 1877 # Special case for b missing attribute and a matches exactly.
1876 1878 if avalue is not None and bvalue is None and avalue == prefvalue:
1877 1879 return -1
1878 1880
1879 1881 # Special case for a missing attribute and b matches exactly.
1880 1882 if bvalue is not None and avalue is None and bvalue == prefvalue:
1881 1883 return 1
1882 1884
1883 1885 # We can't compare unless attribute present on both.
1884 1886 if avalue is None or bvalue is None:
1885 1887 continue
1886 1888
1887 1889 # Same values should fall back to next attribute.
1888 1890 if avalue == bvalue:
1889 1891 continue
1890 1892
1891 1893 # Exact matches come first.
1892 1894 if avalue == prefvalue:
1893 1895 return -1
1894 1896 if bvalue == prefvalue:
1895 1897 return 1
1896 1898
1897 1899 # Fall back to next attribute.
1898 1900 continue
1899 1901
1900 1902 # If we got here we couldn't sort by attributes and prefers. Fall
1901 1903 # back to index order.
1902 1904 return 0
1903 1905
1904 1906 return sorted(entries, cmp=compareentry)
1905 1907
1906 1908 def trypullbundlefromurl(ui, repo, url):
1907 1909 """Attempt to apply a bundle from a URL."""
1908 1910 lock = repo.lock()
1909 1911 try:
1910 1912 tr = repo.transaction('bundleurl')
1911 1913 try:
1912 1914 try:
1913 1915 fh = urlmod.open(ui, url)
1914 1916 cg = readbundle(ui, fh, 'stream')
1915 1917
1916 1918 if isinstance(cg, bundle2.unbundle20):
1917 1919 bundle2.processbundle(repo, cg, lambda: tr)
1918 1920 elif isinstance(cg, streamclone.streamcloneapplier):
1919 1921 cg.apply(repo)
1920 1922 else:
1921 1923 cg.apply(repo, 'clonebundles', url)
1922 1924 tr.close()
1923 1925 return True
1924 1926 except urllib2.HTTPError as e:
1925 1927 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1926 1928 except urllib2.URLError as e:
1927 1929 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1928 1930
1929 1931 return False
1930 1932 finally:
1931 1933 tr.release()
1932 1934 finally:
1933 1935 lock.release()
@@ -1,471 +1,490 b''
1 1 $ cat << EOF >> $HGRCPATH
2 2 > [format]
3 3 > usegeneraldelta=yes
4 4 > EOF
5 5
6 6 Set up repo
7 7
8 8 $ hg --config experimental.treemanifest=True init repo
9 9 $ cd repo
10 10
11 11 Requirements get set on init
12 12
13 13 $ grep treemanifest .hg/requires
14 14 treemanifest
15 15
16 16 Without directories, looks like any other repo
17 17
18 18 $ echo 0 > a
19 19 $ echo 0 > b
20 20 $ hg ci -Aqm initial
21 21 $ hg debugdata -m 0
22 22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
24 24
25 25 Submanifest is stored in separate revlog
26 26
27 27 $ mkdir dir1
28 28 $ echo 1 > dir1/a
29 29 $ echo 1 > dir1/b
30 30 $ echo 1 > e
31 31 $ hg ci -Aqm 'add dir1'
32 32 $ hg debugdata -m 1
33 33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
35 35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
36 36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
37 37 $ hg debugdata --dir dir1 0
38 38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
40 40
41 41 Can add nested directories
42 42
43 43 $ mkdir dir1/dir1
44 44 $ echo 2 > dir1/dir1/a
45 45 $ echo 2 > dir1/dir1/b
46 46 $ mkdir dir1/dir2
47 47 $ echo 2 > dir1/dir2/a
48 48 $ echo 2 > dir1/dir2/b
49 49 $ hg ci -Aqm 'add dir1/dir1'
50 50 $ hg files -r .
51 51 a
52 52 b
53 53 dir1/a (glob)
54 54 dir1/b (glob)
55 55 dir1/dir1/a (glob)
56 56 dir1/dir1/b (glob)
57 57 dir1/dir2/a (glob)
58 58 dir1/dir2/b (glob)
59 59 e
60 60
61 61 Revision is not created for unchanged directory
62 62
63 63 $ mkdir dir2
64 64 $ echo 3 > dir2/a
65 65 $ hg add dir2
66 66 adding dir2/a (glob)
67 67 $ hg debugindex --dir dir1 > before
68 68 $ hg ci -qm 'add dir2'
69 69 $ hg debugindex --dir dir1 > after
70 70 $ diff before after
71 71 $ rm before after
72 72
73 73 Removing directory does not create an revlog entry
74 74
75 75 $ hg rm dir1/dir1
76 76 removing dir1/dir1/a (glob)
77 77 removing dir1/dir1/b (glob)
78 78 $ hg debugindex --dir dir1/dir1 > before
79 79 $ hg ci -qm 'remove dir1/dir1'
80 80 $ hg debugindex --dir dir1/dir1 > after
81 81 $ diff before after
82 82 $ rm before after
83 83
84 84 Check that hg files (calls treemanifest.walk()) works
85 85 without loading all directory revlogs
86 86
87 87 $ hg co 'desc("add dir2")'
88 88 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 89 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
90 90 $ hg files -r . dir1
91 91 dir1/a (glob)
92 92 dir1/b (glob)
93 93 dir1/dir1/a (glob)
94 94 dir1/dir1/b (glob)
95 95 dir1/dir2/a (glob)
96 96 dir1/dir2/b (glob)
97 97
98 98 Check that status between revisions works (calls treemanifest.matches())
99 99 without loading all directory revlogs
100 100
101 101 $ hg status --rev 'desc("add dir1")' --rev . dir1
102 102 A dir1/dir1/a
103 103 A dir1/dir1/b
104 104 A dir1/dir2/a
105 105 A dir1/dir2/b
106 106 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
107 107
108 108 Merge creates 2-parent revision of directory revlog
109 109
110 110 $ echo 5 > dir1/a
111 111 $ hg ci -Aqm 'modify dir1/a'
112 112 $ hg co '.^'
113 113 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 114 $ echo 6 > dir1/b
115 115 $ hg ci -Aqm 'modify dir1/b'
116 116 $ hg merge 'desc("modify dir1/a")'
117 117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
118 118 (branch merge, don't forget to commit)
119 119 $ hg ci -m 'conflict-free merge involving dir1/'
120 120 $ cat dir1/a
121 121 5
122 122 $ cat dir1/b
123 123 6
124 124 $ hg debugindex --dir dir1
125 125 rev offset length delta linkrev nodeid p1 p2
126 126 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
127 127 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
128 128 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
129 129 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
130 130 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
131 131 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
132 132
133 133 Merge keeping directory from parent 1 does not create revlog entry. (Note that
134 134 dir1's manifest does change, but only because dir1/a's filelog changes.)
135 135
136 136 $ hg co 'desc("add dir2")'
137 137 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
138 138 $ echo 8 > dir2/a
139 139 $ hg ci -m 'modify dir2/a'
140 140 created new head
141 141
142 142 $ hg debugindex --dir dir2 > before
143 143 $ hg merge 'desc("modify dir1/a")'
144 144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 145 (branch merge, don't forget to commit)
146 146 $ hg revert -r 'desc("modify dir2/a")' .
147 147 reverting dir1/a (glob)
148 148 $ hg ci -m 'merge, keeping parent 1'
149 149 $ hg debugindex --dir dir2 > after
150 150 $ diff before after
151 151 $ rm before after
152 152
153 153 Merge keeping directory from parent 2 does not create revlog entry. (Note that
154 154 dir2's manifest does change, but only because dir2/a's filelog changes.)
155 155
156 156 $ hg co 'desc("modify dir2/a")'
157 157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 158 $ hg debugindex --dir dir1 > before
159 159 $ hg merge 'desc("modify dir1/a")'
160 160 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 161 (branch merge, don't forget to commit)
162 162 $ hg revert -r 'desc("modify dir1/a")' .
163 163 reverting dir2/a (glob)
164 164 $ hg ci -m 'merge, keeping parent 2'
165 165 created new head
166 166 $ hg debugindex --dir dir1 > after
167 167 $ diff before after
168 168 $ rm before after
169 169
170 170 Create flat source repo for tests with mixed flat/tree manifests
171 171
172 172 $ cd ..
173 173 $ hg init repo-flat
174 174 $ cd repo-flat
175 175
176 176 Create a few commits with flat manifest
177 177
178 178 $ echo 0 > a
179 179 $ echo 0 > b
180 180 $ echo 0 > e
181 181 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
182 182 > do
183 183 > mkdir $d
184 184 > echo 0 > $d/a
185 185 > echo 0 > $d/b
186 186 > done
187 187 $ hg ci -Aqm initial
188 188
189 189 $ echo 1 > a
190 190 $ echo 1 > dir1/a
191 191 $ echo 1 > dir1/dir1/a
192 192 $ hg ci -Aqm 'modify on branch 1'
193 193
194 194 $ hg co 0
195 195 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
196 196 $ echo 2 > b
197 197 $ echo 2 > dir1/b
198 198 $ echo 2 > dir1/dir1/b
199 199 $ hg ci -Aqm 'modify on branch 2'
200 200
201 201 $ hg merge 1
202 202 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 203 (branch merge, don't forget to commit)
204 204 $ hg ci -m 'merge of flat manifests to new flat manifest'
205 205
206 206 Create clone with tree manifests enabled
207 207
208 208 $ cd ..
209 209 $ hg clone --pull --config experimental.treemanifest=1 repo-flat repo-mixed
210 210 requesting all changes
211 211 adding changesets
212 212 adding manifests
213 213 adding file changes
214 214 added 4 changesets with 17 changes to 11 files
215 215 updating to branch default
216 216 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
217 217 $ cd repo-mixed
218 218 $ test -f .hg/store/meta
219 219 [1]
220 220 $ grep treemanifest .hg/requires
221 221 treemanifest
222 222
223 223 Commit should store revlog per directory
224 224
225 225 $ hg co 1
226 226 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 227 $ echo 3 > a
228 228 $ echo 3 > dir1/a
229 229 $ echo 3 > dir1/dir1/a
230 230 $ hg ci -m 'first tree'
231 231 created new head
232 232 $ find .hg/store/meta | sort
233 233 .hg/store/meta
234 234 .hg/store/meta/dir1
235 235 .hg/store/meta/dir1/00manifest.i
236 236 .hg/store/meta/dir1/dir1
237 237 .hg/store/meta/dir1/dir1/00manifest.i
238 238 .hg/store/meta/dir1/dir2
239 239 .hg/store/meta/dir1/dir2/00manifest.i
240 240 .hg/store/meta/dir2
241 241 .hg/store/meta/dir2/00manifest.i
242 242
243 243 Merge of two trees
244 244
245 245 $ hg co 2
246 246 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 247 $ hg merge 1
248 248 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 249 (branch merge, don't forget to commit)
250 250 $ hg ci -m 'merge of flat manifests to new tree manifest'
251 251 created new head
252 252 $ hg diff -r 3
253 253
254 254 Parent of tree root manifest should be flat manifest, and two for merge
255 255
256 256 $ hg debugindex -m
257 257 rev offset length delta linkrev nodeid p1 p2
258 258 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
259 259 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
260 260 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
261 261 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
262 262 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
263 263 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
264 264
265 265
266 266 Status across flat/tree boundary should work
267 267
268 268 $ hg status --rev '.^' --rev .
269 269 M a
270 270 M dir1/a
271 271 M dir1/dir1/a
272 272
273 273
274 274 Turning off treemanifest config has no effect
275 275
276 276 $ hg debugindex .hg/store/meta/dir1/00manifest.i
277 277 rev offset length delta linkrev nodeid p1 p2
278 278 0 0 127 -1 4 064927a0648a 000000000000 000000000000
279 279 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
280 280 $ echo 2 > dir1/a
281 281 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
282 282 $ hg debugindex .hg/store/meta/dir1/00manifest.i
283 283 rev offset length delta linkrev nodeid p1 p2
284 284 0 0 127 -1 4 064927a0648a 000000000000 000000000000
285 285 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
286 286 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
287 287
288 288 Stripping and recovering changes should work
289 289
290 290 $ hg st --change tip
291 291 M dir1/a
292 292 $ hg --config extensions.strip= strip tip
293 293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
294 294 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
295 295 $ hg unbundle -q .hg/strip-backup/*
296 296 $ hg st --change tip
297 297 M dir1/a
298 298
299 299 Shelving and unshelving should work
300 300
301 301 $ echo foo >> dir1/a
302 302 $ hg --config extensions.shelve= shelve
303 303 shelved as default
304 304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
305 305 $ hg --config extensions.shelve= unshelve
306 306 unshelving change 'default'
307 307 $ hg diff --nodates
308 308 diff -r 708a273da119 dir1/a
309 309 --- a/dir1/a
310 310 +++ b/dir1/a
311 311 @@ -1,1 +1,2 @@
312 312 1
313 313 +foo
314 314
315 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
316
317 $ cd ..
318 $ hg init empty-repo
319 $ cat << EOF >> empty-repo/.hg/hgrc
320 > [experimental]
321 > changegroup3=yes
322 > EOF
323 $ grep treemanifest empty-repo/.hg/requires
324 [1]
325 $ hg push -R repo -r 0 empty-repo
326 pushing to empty-repo
327 searching for changes
328 adding changesets
329 adding manifests
330 adding file changes
331 added 1 changesets with 2 changes to 2 files
332 $ grep treemanifest empty-repo/.hg/requires
333 treemanifest
334
315 335 Create deeper repo with tree manifests.
316 336
317 $ cd ..
318 337 $ hg --config experimental.treemanifest=True init deeprepo
319 338 $ cd deeprepo
320 339
321 340 $ mkdir a
322 341 $ mkdir b
323 342 $ mkdir b/bar
324 343 $ mkdir b/bar/orange
325 344 $ mkdir b/bar/orange/fly
326 345 $ mkdir b/foo
327 346 $ mkdir b/foo/apple
328 347 $ mkdir b/foo/apple/bees
329 348
330 349 $ touch a/one.txt
331 350 $ touch a/two.txt
332 351 $ touch b/bar/fruits.txt
333 352 $ touch b/bar/orange/fly/gnat.py
334 353 $ touch b/bar/orange/fly/housefly.txt
335 354 $ touch b/foo/apple/bees/flower.py
336 355 $ touch c.txt
337 356 $ touch d.py
338 357
339 358 $ hg ci -Aqm 'initial'
340 359
341 360 We'll see that visitdir works by removing some treemanifest revlogs and running
342 361 the files command with various parameters.
343 362
344 363 Test files from the root.
345 364
346 365 $ hg files -r .
347 366 a/one.txt (glob)
348 367 a/two.txt (glob)
349 368 b/bar/fruits.txt (glob)
350 369 b/bar/orange/fly/gnat.py (glob)
351 370 b/bar/orange/fly/housefly.txt (glob)
352 371 b/foo/apple/bees/flower.py (glob)
353 372 c.txt
354 373 d.py
355 374
356 375 Excludes with a glob should not exclude everything from the glob's root
357 376
358 377 $ hg files -r . -X 'b/fo?' b
359 378 b/bar/fruits.txt (glob)
360 379 b/bar/orange/fly/gnat.py (glob)
361 380 b/bar/orange/fly/housefly.txt (glob)
362 381
363 382 Test files for a subdirectory.
364 383
365 384 $ mv .hg/store/meta/a oldmf
366 385 $ hg files -r . b
367 386 b/bar/fruits.txt (glob)
368 387 b/bar/orange/fly/gnat.py (glob)
369 388 b/bar/orange/fly/housefly.txt (glob)
370 389 b/foo/apple/bees/flower.py (glob)
371 390 $ mv oldmf .hg/store/meta/a
372 391
373 392 Test files with just includes and excludes.
374 393
375 394 $ mv .hg/store/meta/a oldmf
376 395 $ mv .hg/store/meta/b/bar/orange/fly oldmf2
377 396 $ mv .hg/store/meta/b/foo/apple/bees oldmf3
378 397 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
379 398 b/bar/fruits.txt (glob)
380 399 $ mv oldmf .hg/store/meta/a
381 400 $ mv oldmf2 .hg/store/meta/b/bar/orange/fly
382 401 $ mv oldmf3 .hg/store/meta/b/foo/apple/bees
383 402
384 403 Test files for a subdirectory, excluding a directory within it.
385 404
386 405 $ mv .hg/store/meta/a oldmf
387 406 $ mv .hg/store/meta/b/foo oldmf2
388 407 $ hg files -r . -X path:b/foo b
389 408 b/bar/fruits.txt (glob)
390 409 b/bar/orange/fly/gnat.py (glob)
391 410 b/bar/orange/fly/housefly.txt (glob)
392 411 $ mv oldmf .hg/store/meta/a
393 412 $ mv oldmf2 .hg/store/meta/b/foo
394 413
395 414 Test files for a sub directory, including only a directory within it, and
396 415 including an unrelated directory.
397 416
398 417 $ mv .hg/store/meta/a oldmf
399 418 $ mv .hg/store/meta/b/foo oldmf2
400 419 $ hg files -r . -I path:b/bar/orange -I path:a b
401 420 b/bar/orange/fly/gnat.py (glob)
402 421 b/bar/orange/fly/housefly.txt (glob)
403 422 $ mv oldmf .hg/store/meta/a
404 423 $ mv oldmf2 .hg/store/meta/b/foo
405 424
406 425 Test files for a pattern, including a directory, and excluding a directory
407 426 within that.
408 427
409 428 $ mv .hg/store/meta/a oldmf
410 429 $ mv .hg/store/meta/b/foo oldmf2
411 430 $ mv .hg/store/meta/b/bar/orange oldmf3
412 431 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
413 432 b/bar/fruits.txt (glob)
414 433 $ mv oldmf .hg/store/meta/a
415 434 $ mv oldmf2 .hg/store/meta/b/foo
416 435 $ mv oldmf3 .hg/store/meta/b/bar/orange
417 436
418 437 Add some more changes to the deep repo
419 438 $ echo narf >> b/bar/fruits.txt
420 439 $ hg ci -m narf
421 440 $ echo troz >> b/bar/orange/fly/gnat.py
422 441 $ hg ci -m troz
423 442
424 443 Test cloning a treemanifest repo over http.
425 444 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
426 445 $ cat hg.pid >> $DAEMON_PIDS
427 446 $ cd ..
428 447 We can clone even with the knob turned off and we'll get a treemanifest repo.
429 448 $ hg clone --config experimental.treemanifest=False \
430 449 > --config experimental.changegroup3=True \
431 450 > http://localhost:$HGPORT deepclone
432 451 requesting all changes
433 452 adding changesets
434 453 adding manifests
435 454 adding file changes
436 455 added 3 changesets with 10 changes to 8 files
437 456 updating to branch default
438 457 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
439 458 No server errors.
440 459 $ cat deeprepo/errors.log
441 460 requires got updated to include treemanifest
442 461 $ cat deepclone/.hg/requires | grep treemanifest
443 462 treemanifest
444 463 Tree manifest revlogs exist.
445 464 $ find deepclone/.hg/store/meta | sort
446 465 deepclone/.hg/store/meta
447 466 deepclone/.hg/store/meta/a
448 467 deepclone/.hg/store/meta/a/00manifest.i
449 468 deepclone/.hg/store/meta/b
450 469 deepclone/.hg/store/meta/b/00manifest.i
451 470 deepclone/.hg/store/meta/b/bar
452 471 deepclone/.hg/store/meta/b/bar/00manifest.i
453 472 deepclone/.hg/store/meta/b/bar/orange
454 473 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
455 474 deepclone/.hg/store/meta/b/bar/orange/fly
456 475 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
457 476 deepclone/.hg/store/meta/b/foo
458 477 deepclone/.hg/store/meta/b/foo/00manifest.i
459 478 deepclone/.hg/store/meta/b/foo/apple
460 479 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
461 480 deepclone/.hg/store/meta/b/foo/apple/bees
462 481 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
463 482 Verify passes.
464 483 $ cd deepclone
465 484 $ hg verify
466 485 checking changesets
467 486 checking manifests
468 487 crosschecking files in changesets and manifests
469 488 checking files
470 489 8 files, 3 changesets, 10 total revisions
471 490 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now