##// END OF EJS Templates
filelog: drop `indexfile` from `filelog`...
marmoute -
r47918:8a1a51d3 default
parent child Browse files
Show More
@@ -1,714 +1,714 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import os
17 17 import shutil
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 22 nullrev,
23 23 )
24 24
25 25 from . import (
26 26 bundle2,
27 27 changegroup,
28 28 changelog,
29 29 cmdutil,
30 30 discovery,
31 31 encoding,
32 32 error,
33 33 exchange,
34 34 filelog,
35 35 localrepo,
36 36 manifest,
37 37 mdiff,
38 38 pathutil,
39 39 phases,
40 40 pycompat,
41 41 revlog,
42 42 util,
43 43 vfs as vfsmod,
44 44 )
45 45 from .utils import (
46 46 urlutil,
47 47 )
48 48
49 49 from .revlogutils import (
50 50 constants as revlog_constants,
51 51 )
52 52
53 53
54 54 class bundlerevlog(revlog.revlog):
55 55 def __init__(self, opener, target, indexfile, cgunpacker, linkmapper):
56 56 # How it works:
57 57 # To retrieve a revision, we need to know the offset of the revision in
58 58 # the bundle (an unbundle object). We store this offset in the index
59 59 # (start). The base of the delta is stored in the base field.
60 60 #
61 61 # To differentiate a rev in the bundle from a rev in the revlog, we
62 62 # check revision against repotiprev.
63 63 opener = vfsmod.readonlyvfs(opener)
64 64 revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
65 65 self.bundle = cgunpacker
66 66 n = len(self)
67 67 self.repotiprev = n - 1
68 68 self.bundlerevs = set() # used by 'bundle()' revset expression
69 69 for deltadata in cgunpacker.deltaiter():
70 70 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
71 71
72 72 size = len(delta)
73 73 start = cgunpacker.tell() - size
74 74
75 75 if self.index.has_node(node):
76 76 # this can happen if two branches make the same change
77 77 self.bundlerevs.add(self.index.rev(node))
78 78 continue
79 79 if cs == node:
80 80 linkrev = nullrev
81 81 else:
82 82 linkrev = linkmapper(cs)
83 83
84 84 for p in (p1, p2):
85 85 if not self.index.has_node(p):
86 86 raise error.LookupError(
87 87 p, self.indexfile, _(b"unknown parent")
88 88 )
89 89
90 90 if not self.index.has_node(deltabase):
91 91 raise LookupError(
92 92 deltabase, self.indexfile, _(b'unknown delta base')
93 93 )
94 94
95 95 baserev = self.rev(deltabase)
96 96 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
97 97 e = (
98 98 revlog.offset_type(start, flags),
99 99 size,
100 100 -1,
101 101 baserev,
102 102 linkrev,
103 103 self.rev(p1),
104 104 self.rev(p2),
105 105 node,
106 106 0,
107 107 0,
108 108 )
109 109 self.index.append(e)
110 110 self.bundlerevs.add(n)
111 111 n += 1
112 112
113 113 def _chunk(self, rev, df=None):
114 114 # Warning: in case of bundle, the diff is against what we stored as
115 115 # delta base, not against rev - 1
116 116 # XXX: could use some caching
117 117 if rev <= self.repotiprev:
118 118 return revlog.revlog._chunk(self, rev)
119 119 self.bundle.seek(self.start(rev))
120 120 return self.bundle.read(self.length(rev))
121 121
122 122 def revdiff(self, rev1, rev2):
123 123 """return or calculate a delta between two revisions"""
124 124 if rev1 > self.repotiprev and rev2 > self.repotiprev:
125 125 # hot path for bundle
126 126 revb = self.index[rev2][3]
127 127 if revb == rev1:
128 128 return self._chunk(rev2)
129 129 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
130 130 return revlog.revlog.revdiff(self, rev1, rev2)
131 131
132 132 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
133 133
134 134 def _rawtext(self, node, rev, _df=None):
135 135 if rev is None:
136 136 rev = self.rev(node)
137 137 validated = False
138 138 rawtext = None
139 139 chain = []
140 140 iterrev = rev
141 141 # reconstruct the revision if it is from a changegroup
142 142 while iterrev > self.repotiprev:
143 143 if self._revisioncache and self._revisioncache[1] == iterrev:
144 144 rawtext = self._revisioncache[2]
145 145 break
146 146 chain.append(iterrev)
147 147 iterrev = self.index[iterrev][3]
148 148 if iterrev == nullrev:
149 149 rawtext = b''
150 150 elif rawtext is None:
151 151 r = super(bundlerevlog, self)._rawtext(
152 152 self.node(iterrev), iterrev, _df=_df
153 153 )
154 154 __, rawtext, validated = r
155 155 if chain:
156 156 validated = False
157 157 while chain:
158 158 delta = self._chunk(chain.pop())
159 159 rawtext = mdiff.patches(rawtext, [delta])
160 160 return rev, rawtext, validated
161 161
162 162 def addrevision(self, *args, **kwargs):
163 163 raise NotImplementedError
164 164
165 165 def addgroup(self, *args, **kwargs):
166 166 raise NotImplementedError
167 167
168 168 def strip(self, *args, **kwargs):
169 169 raise NotImplementedError
170 170
171 171 def checksize(self):
172 172 raise NotImplementedError
173 173
174 174
175 175 class bundlechangelog(bundlerevlog, changelog.changelog):
176 176 def __init__(self, opener, cgunpacker):
177 177 changelog.changelog.__init__(self, opener)
178 178 linkmapper = lambda x: x
179 179 bundlerevlog.__init__(
180 180 self,
181 181 opener,
182 182 (revlog_constants.KIND_CHANGELOG, None),
183 183 self.indexfile,
184 184 cgunpacker,
185 185 linkmapper,
186 186 )
187 187
188 188
189 189 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
190 190 def __init__(
191 191 self,
192 192 nodeconstants,
193 193 opener,
194 194 cgunpacker,
195 195 linkmapper,
196 196 dirlogstarts=None,
197 197 dir=b'',
198 198 ):
199 199 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
200 200 bundlerevlog.__init__(
201 201 self,
202 202 opener,
203 203 (revlog_constants.KIND_MANIFESTLOG, dir),
204 204 self._revlog.indexfile,
205 205 cgunpacker,
206 206 linkmapper,
207 207 )
208 208 if dirlogstarts is None:
209 209 dirlogstarts = {}
210 210 if self.bundle.version == b"03":
211 211 dirlogstarts = _getfilestarts(self.bundle)
212 212 self._dirlogstarts = dirlogstarts
213 213 self._linkmapper = linkmapper
214 214
215 215 def dirlog(self, d):
216 216 if d in self._dirlogstarts:
217 217 self.bundle.seek(self._dirlogstarts[d])
218 218 return bundlemanifest(
219 219 self.nodeconstants,
220 220 self.opener,
221 221 self.bundle,
222 222 self._linkmapper,
223 223 self._dirlogstarts,
224 224 dir=d,
225 225 )
226 226 return super(bundlemanifest, self).dirlog(d)
227 227
228 228
229 229 class bundlefilelog(filelog.filelog):
230 230 def __init__(self, opener, path, cgunpacker, linkmapper):
231 231 filelog.filelog.__init__(self, opener, path)
232 232 self._revlog = bundlerevlog(
233 233 opener,
234 234 # XXX should use the unencoded path
235 235 target=(revlog_constants.KIND_FILELOG, path),
236 indexfile=self.indexfile,
236 indexfile=self._revlog.indexfile,
237 237 cgunpacker=cgunpacker,
238 238 linkmapper=linkmapper,
239 239 )
240 240
241 241
242 242 class bundlepeer(localrepo.localpeer):
243 243 def canpush(self):
244 244 return False
245 245
246 246
247 247 class bundlephasecache(phases.phasecache):
248 248 def __init__(self, *args, **kwargs):
249 249 super(bundlephasecache, self).__init__(*args, **kwargs)
250 250 if util.safehasattr(self, 'opener'):
251 251 self.opener = vfsmod.readonlyvfs(self.opener)
252 252
253 253 def write(self):
254 254 raise NotImplementedError
255 255
256 256 def _write(self, fp):
257 257 raise NotImplementedError
258 258
259 259 def _updateroots(self, phase, newroots, tr):
260 260 self.phaseroots[phase] = newroots
261 261 self.invalidate()
262 262 self.dirty = True
263 263
264 264
265 265 def _getfilestarts(cgunpacker):
266 266 filespos = {}
267 267 for chunkdata in iter(cgunpacker.filelogheader, {}):
268 268 fname = chunkdata[b'filename']
269 269 filespos[fname] = cgunpacker.tell()
270 270 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
271 271 pass
272 272 return filespos
273 273
274 274
275 275 class bundlerepository(object):
276 276 """A repository instance that is a union of a local repo and a bundle.
277 277
278 278 Instances represent a read-only repository composed of a local repository
279 279 with the contents of a bundle file applied. The repository instance is
280 280 conceptually similar to the state of a repository after an
281 281 ``hg unbundle`` operation. However, the contents of the bundle are never
282 282 applied to the actual base repository.
283 283
284 284 Instances constructed directly are not usable as repository objects.
285 285 Use instance() or makebundlerepository() to create instances.
286 286 """
287 287
288 288 def __init__(self, bundlepath, url, tempparent):
289 289 self._tempparent = tempparent
290 290 self._url = url
291 291
292 292 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
293 293
294 294 self.tempfile = None
295 295 f = util.posixfile(bundlepath, b"rb")
296 296 bundle = exchange.readbundle(self.ui, f, bundlepath)
297 297
298 298 if isinstance(bundle, bundle2.unbundle20):
299 299 self._bundlefile = bundle
300 300 self._cgunpacker = None
301 301
302 302 cgpart = None
303 303 for part in bundle.iterparts(seekable=True):
304 304 if part.type == b'changegroup':
305 305 if cgpart:
306 306 raise NotImplementedError(
307 307 b"can't process multiple changegroups"
308 308 )
309 309 cgpart = part
310 310
311 311 self._handlebundle2part(bundle, part)
312 312
313 313 if not cgpart:
314 314 raise error.Abort(_(b"No changegroups found"))
315 315
316 316 # This is required to placate a later consumer, which expects
317 317 # the payload offset to be at the beginning of the changegroup.
318 318 # We need to do this after the iterparts() generator advances
319 319 # because iterparts() will seek to end of payload after the
320 320 # generator returns control to iterparts().
321 321 cgpart.seek(0, os.SEEK_SET)
322 322
323 323 elif isinstance(bundle, changegroup.cg1unpacker):
324 324 if bundle.compressed():
325 325 f = self._writetempbundle(
326 326 bundle.read, b'.hg10un', header=b'HG10UN'
327 327 )
328 328 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
329 329
330 330 self._bundlefile = bundle
331 331 self._cgunpacker = bundle
332 332 else:
333 333 raise error.Abort(
334 334 _(b'bundle type %s cannot be read') % type(bundle)
335 335 )
336 336
337 337 # dict with the mapping 'filename' -> position in the changegroup.
338 338 self._cgfilespos = {}
339 339
340 340 self.firstnewrev = self.changelog.repotiprev + 1
341 341 phases.retractboundary(
342 342 self,
343 343 None,
344 344 phases.draft,
345 345 [ctx.node() for ctx in self[self.firstnewrev :]],
346 346 )
347 347
348 348 def _handlebundle2part(self, bundle, part):
349 349 if part.type != b'changegroup':
350 350 return
351 351
352 352 cgstream = part
353 353 version = part.params.get(b'version', b'01')
354 354 legalcgvers = changegroup.supportedincomingversions(self)
355 355 if version not in legalcgvers:
356 356 msg = _(b'Unsupported changegroup version: %s')
357 357 raise error.Abort(msg % version)
358 358 if bundle.compressed():
359 359 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
360 360
361 361 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
362 362
363 363 def _writetempbundle(self, readfn, suffix, header=b''):
364 364 """Write a temporary file to disk"""
365 365 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
366 366 self.tempfile = temp
367 367
368 368 with os.fdopen(fdtemp, 'wb') as fptemp:
369 369 fptemp.write(header)
370 370 while True:
371 371 chunk = readfn(2 ** 18)
372 372 if not chunk:
373 373 break
374 374 fptemp.write(chunk)
375 375
376 376 return self.vfs.open(self.tempfile, mode=b"rb")
377 377
378 378 @localrepo.unfilteredpropertycache
379 379 def _phasecache(self):
380 380 return bundlephasecache(self, self._phasedefaults)
381 381
382 382 @localrepo.unfilteredpropertycache
383 383 def changelog(self):
384 384 # consume the header if it exists
385 385 self._cgunpacker.changelogheader()
386 386 c = bundlechangelog(self.svfs, self._cgunpacker)
387 387 self.manstart = self._cgunpacker.tell()
388 388 return c
389 389
390 390 def _refreshchangelog(self):
391 391 # changelog for bundle repo are not filecache, this method is not
392 392 # applicable.
393 393 pass
394 394
395 395 @localrepo.unfilteredpropertycache
396 396 def manifestlog(self):
397 397 self._cgunpacker.seek(self.manstart)
398 398 # consume the header if it exists
399 399 self._cgunpacker.manifestheader()
400 400 linkmapper = self.unfiltered().changelog.rev
401 401 rootstore = bundlemanifest(
402 402 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
403 403 )
404 404 self.filestart = self._cgunpacker.tell()
405 405
406 406 return manifest.manifestlog(
407 407 self.svfs, self, rootstore, self.narrowmatch()
408 408 )
409 409
410 410 def _consumemanifest(self):
411 411 """Consumes the manifest portion of the bundle, setting filestart so the
412 412 file portion can be read."""
413 413 self._cgunpacker.seek(self.manstart)
414 414 self._cgunpacker.manifestheader()
415 415 for delta in self._cgunpacker.deltaiter():
416 416 pass
417 417 self.filestart = self._cgunpacker.tell()
418 418
419 419 @localrepo.unfilteredpropertycache
420 420 def manstart(self):
421 421 self.changelog
422 422 return self.manstart
423 423
424 424 @localrepo.unfilteredpropertycache
425 425 def filestart(self):
426 426 self.manifestlog
427 427
428 428 # If filestart was not set by self.manifestlog, that means the
429 429 # manifestlog implementation did not consume the manifests from the
430 430 # changegroup (ex: it might be consuming trees from a separate bundle2
431 431 # part instead). So we need to manually consume it.
432 432 if 'filestart' not in self.__dict__:
433 433 self._consumemanifest()
434 434
435 435 return self.filestart
436 436
437 437 def url(self):
438 438 return self._url
439 439
440 440 def file(self, f):
441 441 if not self._cgfilespos:
442 442 self._cgunpacker.seek(self.filestart)
443 443 self._cgfilespos = _getfilestarts(self._cgunpacker)
444 444
445 445 if f in self._cgfilespos:
446 446 self._cgunpacker.seek(self._cgfilespos[f])
447 447 linkmapper = self.unfiltered().changelog.rev
448 448 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
449 449 else:
450 450 return super(bundlerepository, self).file(f)
451 451
452 452 def close(self):
453 453 """Close assigned bundle file immediately."""
454 454 self._bundlefile.close()
455 455 if self.tempfile is not None:
456 456 self.vfs.unlink(self.tempfile)
457 457 if self._tempparent:
458 458 shutil.rmtree(self._tempparent, True)
459 459
460 460 def cancopy(self):
461 461 return False
462 462
463 463 def peer(self):
464 464 return bundlepeer(self)
465 465
466 466 def getcwd(self):
467 467 return encoding.getcwd() # always outside the repo
468 468
469 469 # Check if parents exist in localrepo before setting
470 470 def setparents(self, p1, p2=None):
471 471 if p2 is None:
472 472 p2 = self.nullid
473 473 p1rev = self.changelog.rev(p1)
474 474 p2rev = self.changelog.rev(p2)
475 475 msg = _(b"setting parent to node %s that only exists in the bundle\n")
476 476 if self.changelog.repotiprev < p1rev:
477 477 self.ui.warn(msg % hex(p1))
478 478 if self.changelog.repotiprev < p2rev:
479 479 self.ui.warn(msg % hex(p2))
480 480 return super(bundlerepository, self).setparents(p1, p2)
481 481
482 482
483 483 def instance(ui, path, create, intents=None, createopts=None):
484 484 if create:
485 485 raise error.Abort(_(b'cannot create new bundle repository'))
486 486 # internal config: bundle.mainreporoot
487 487 parentpath = ui.config(b"bundle", b"mainreporoot")
488 488 if not parentpath:
489 489 # try to find the correct path to the working directory repo
490 490 parentpath = cmdutil.findrepo(encoding.getcwd())
491 491 if parentpath is None:
492 492 parentpath = b''
493 493 if parentpath:
494 494 # Try to make the full path relative so we get a nice, short URL.
495 495 # In particular, we don't want temp dir names in test outputs.
496 496 cwd = encoding.getcwd()
497 497 if parentpath == cwd:
498 498 parentpath = b''
499 499 else:
500 500 cwd = pathutil.normasprefix(cwd)
501 501 if parentpath.startswith(cwd):
502 502 parentpath = parentpath[len(cwd) :]
503 503 u = urlutil.url(path)
504 504 path = u.localpath()
505 505 if u.scheme == b'bundle':
506 506 s = path.split(b"+", 1)
507 507 if len(s) == 1:
508 508 repopath, bundlename = parentpath, s[0]
509 509 else:
510 510 repopath, bundlename = s
511 511 else:
512 512 repopath, bundlename = parentpath, path
513 513
514 514 return makebundlerepository(ui, repopath, bundlename)
515 515
516 516
517 517 def makebundlerepository(ui, repopath, bundlepath):
518 518 """Make a bundle repository object based on repo and bundle paths."""
519 519 if repopath:
520 520 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
521 521 else:
522 522 url = b'bundle:%s' % bundlepath
523 523
524 524 # Because we can't make any guarantees about the type of the base
525 525 # repository, we can't have a static class representing the bundle
526 526 # repository. We also can't make any guarantees about how to even
527 527 # call the base repository's constructor!
528 528 #
529 529 # So, our strategy is to go through ``localrepo.instance()`` to construct
530 530 # a repo instance. Then, we dynamically create a new type derived from
531 531 # both it and our ``bundlerepository`` class which overrides some
532 532 # functionality. We then change the type of the constructed repository
533 533 # to this new type and initialize the bundle-specific bits of it.
534 534
535 535 try:
536 536 repo = localrepo.instance(ui, repopath, create=False)
537 537 tempparent = None
538 538 except error.RepoError:
539 539 tempparent = pycompat.mkdtemp()
540 540 try:
541 541 repo = localrepo.instance(ui, tempparent, create=True)
542 542 except Exception:
543 543 shutil.rmtree(tempparent)
544 544 raise
545 545
546 546 class derivedbundlerepository(bundlerepository, repo.__class__):
547 547 pass
548 548
549 549 repo.__class__ = derivedbundlerepository
550 550 bundlerepository.__init__(repo, bundlepath, url, tempparent)
551 551
552 552 return repo
553 553
554 554
555 555 class bundletransactionmanager(object):
556 556 def transaction(self):
557 557 return None
558 558
559 559 def close(self):
560 560 raise NotImplementedError
561 561
562 562 def release(self):
563 563 raise NotImplementedError
564 564
565 565
566 566 def getremotechanges(
567 567 ui, repo, peer, onlyheads=None, bundlename=None, force=False
568 568 ):
569 569 """obtains a bundle of changes incoming from peer
570 570
571 571 "onlyheads" restricts the returned changes to those reachable from the
572 572 specified heads.
573 573 "bundlename", if given, stores the bundle to this file path permanently;
574 574 otherwise it's stored to a temp file and gets deleted again when you call
575 575 the returned "cleanupfn".
576 576 "force" indicates whether to proceed on unrelated repos.
577 577
578 578 Returns a tuple (local, csets, cleanupfn):
579 579
580 580 "local" is a local repo from which to obtain the actual incoming
581 581 changesets; it is a bundlerepo for the obtained bundle when the
582 582 original "peer" is remote.
583 583 "csets" lists the incoming changeset node ids.
584 584 "cleanupfn" must be called without arguments when you're done processing
585 585 the changes; it closes both the original "peer" and the one returned
586 586 here.
587 587 """
588 588 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
589 589 common, incoming, rheads = tmp
590 590 if not incoming:
591 591 try:
592 592 if bundlename:
593 593 os.unlink(bundlename)
594 594 except OSError:
595 595 pass
596 596 return repo, [], peer.close
597 597
598 598 commonset = set(common)
599 599 rheads = [x for x in rheads if x not in commonset]
600 600
601 601 bundle = None
602 602 bundlerepo = None
603 603 localrepo = peer.local()
604 604 if bundlename or not localrepo:
605 605 # create a bundle (uncompressed if peer repo is not local)
606 606
607 607 # developer config: devel.legacy.exchange
608 608 legexc = ui.configlist(b'devel', b'legacy.exchange')
609 609 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
610 610 canbundle2 = (
611 611 not forcebundle1
612 612 and peer.capable(b'getbundle')
613 613 and peer.capable(b'bundle2')
614 614 )
615 615 if canbundle2:
616 616 with peer.commandexecutor() as e:
617 617 b2 = e.callcommand(
618 618 b'getbundle',
619 619 {
620 620 b'source': b'incoming',
621 621 b'common': common,
622 622 b'heads': rheads,
623 623 b'bundlecaps': exchange.caps20to10(
624 624 repo, role=b'client'
625 625 ),
626 626 b'cg': True,
627 627 },
628 628 ).result()
629 629
630 630 fname = bundle = changegroup.writechunks(
631 631 ui, b2._forwardchunks(), bundlename
632 632 )
633 633 else:
634 634 if peer.capable(b'getbundle'):
635 635 with peer.commandexecutor() as e:
636 636 cg = e.callcommand(
637 637 b'getbundle',
638 638 {
639 639 b'source': b'incoming',
640 640 b'common': common,
641 641 b'heads': rheads,
642 642 },
643 643 ).result()
644 644 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
645 645 # compat with older servers when pulling all remote heads
646 646
647 647 with peer.commandexecutor() as e:
648 648 cg = e.callcommand(
649 649 b'changegroup',
650 650 {
651 651 b'nodes': incoming,
652 652 b'source': b'incoming',
653 653 },
654 654 ).result()
655 655
656 656 rheads = None
657 657 else:
658 658 with peer.commandexecutor() as e:
659 659 cg = e.callcommand(
660 660 b'changegroupsubset',
661 661 {
662 662 b'bases': incoming,
663 663 b'heads': rheads,
664 664 b'source': b'incoming',
665 665 },
666 666 ).result()
667 667
668 668 if localrepo:
669 669 bundletype = b"HG10BZ"
670 670 else:
671 671 bundletype = b"HG10UN"
672 672 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
673 673 # keep written bundle?
674 674 if bundlename:
675 675 bundle = None
676 676 if not localrepo:
677 677 # use the created uncompressed bundlerepo
678 678 localrepo = bundlerepo = makebundlerepository(
679 679 repo.baseui, repo.root, fname
680 680 )
681 681
682 682 # this repo contains local and peer now, so filter out local again
683 683 common = repo.heads()
684 684 if localrepo:
685 685 # Part of common may be remotely filtered
686 686 # So use an unfiltered version
687 687 # The discovery process probably need cleanup to avoid that
688 688 localrepo = localrepo.unfiltered()
689 689
690 690 csets = localrepo.changelog.findmissing(common, rheads)
691 691
692 692 if bundlerepo:
693 693 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
694 694
695 695 with peer.commandexecutor() as e:
696 696 remotephases = e.callcommand(
697 697 b'listkeys',
698 698 {
699 699 b'namespace': b'phases',
700 700 },
701 701 ).result()
702 702
703 703 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
704 704 pullop.trmanager = bundletransactionmanager()
705 705 exchange._pullapplyphases(pullop, remotephases)
706 706
707 707 def cleanup():
708 708 if bundlerepo:
709 709 bundlerepo.close()
710 710 if bundle:
711 711 os.unlink(bundle)
712 712 peer.close()
713 713
714 714 return (localrepo, csets, cleanup)
@@ -1,298 +1,286 b''
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .node import nullrev
12 12 from . import (
13 13 error,
14 14 revlog,
15 15 )
16 16 from .interfaces import (
17 17 repository,
18 18 util as interfaceutil,
19 19 )
20 20 from .utils import storageutil
21 21 from .revlogutils import (
22 22 constants as revlog_constants,
23 23 )
24 24
25 25
26 26 @interfaceutil.implementer(repository.ifilestorage)
27 27 class filelog(object):
28 28 def __init__(self, opener, path):
29 29 self._revlog = revlog.revlog(
30 30 opener,
31 31 # XXX should use the unencoded path
32 32 target=(revlog_constants.KIND_FILELOG, path),
33 33 indexfile=b'/'.join((b'data', path + b'.i')),
34 34 censorable=True,
35 35 )
36 36 # Full name of the user visible file, relative to the repository root.
37 37 # Used by LFS.
38 38 self._revlog.filename = path
39 39 self.nullid = self._revlog.nullid
40 40
41 41 def __len__(self):
42 42 return len(self._revlog)
43 43
44 44 def __iter__(self):
45 45 return self._revlog.__iter__()
46 46
47 47 def hasnode(self, node):
48 48 if node in (self.nullid, nullrev):
49 49 return False
50 50
51 51 try:
52 52 self._revlog.rev(node)
53 53 return True
54 54 except (TypeError, ValueError, IndexError, error.LookupError):
55 55 return False
56 56
57 57 def revs(self, start=0, stop=None):
58 58 return self._revlog.revs(start=start, stop=stop)
59 59
60 60 def parents(self, node):
61 61 return self._revlog.parents(node)
62 62
63 63 def parentrevs(self, rev):
64 64 return self._revlog.parentrevs(rev)
65 65
66 66 def rev(self, node):
67 67 return self._revlog.rev(node)
68 68
69 69 def node(self, rev):
70 70 return self._revlog.node(rev)
71 71
72 72 def lookup(self, node):
73 73 return storageutil.fileidlookup(
74 74 self._revlog, node, self._revlog.indexfile
75 75 )
76 76
77 77 def linkrev(self, rev):
78 78 return self._revlog.linkrev(rev)
79 79
80 80 def commonancestorsheads(self, node1, node2):
81 81 return self._revlog.commonancestorsheads(node1, node2)
82 82
83 83 # Used by dagop.blockdescendants().
84 84 def descendants(self, revs):
85 85 return self._revlog.descendants(revs)
86 86
87 87 def heads(self, start=None, stop=None):
88 88 return self._revlog.heads(start, stop)
89 89
90 90 # Used by hgweb, children extension.
91 91 def children(self, node):
92 92 return self._revlog.children(node)
93 93
94 94 def iscensored(self, rev):
95 95 return self._revlog.iscensored(rev)
96 96
97 97 def revision(self, node, _df=None, raw=False):
98 98 return self._revlog.revision(node, _df=_df, raw=raw)
99 99
100 100 def rawdata(self, node, _df=None):
101 101 return self._revlog.rawdata(node, _df=_df)
102 102
103 103 def emitrevisions(
104 104 self,
105 105 nodes,
106 106 nodesorder=None,
107 107 revisiondata=False,
108 108 assumehaveparentrevisions=False,
109 109 deltamode=repository.CG_DELTAMODE_STD,
110 110 sidedata_helpers=None,
111 111 ):
112 112 return self._revlog.emitrevisions(
113 113 nodes,
114 114 nodesorder=nodesorder,
115 115 revisiondata=revisiondata,
116 116 assumehaveparentrevisions=assumehaveparentrevisions,
117 117 deltamode=deltamode,
118 118 sidedata_helpers=sidedata_helpers,
119 119 )
120 120
121 121 def addrevision(
122 122 self,
123 123 revisiondata,
124 124 transaction,
125 125 linkrev,
126 126 p1,
127 127 p2,
128 128 node=None,
129 129 flags=revlog.REVIDX_DEFAULT_FLAGS,
130 130 cachedelta=None,
131 131 ):
132 132 return self._revlog.addrevision(
133 133 revisiondata,
134 134 transaction,
135 135 linkrev,
136 136 p1,
137 137 p2,
138 138 node=node,
139 139 flags=flags,
140 140 cachedelta=cachedelta,
141 141 )
142 142
143 143 def addgroup(
144 144 self,
145 145 deltas,
146 146 linkmapper,
147 147 transaction,
148 148 addrevisioncb=None,
149 149 duplicaterevisioncb=None,
150 150 maybemissingparents=False,
151 151 ):
152 152 if maybemissingparents:
153 153 raise error.Abort(
154 154 _(
155 155 b'revlog storage does not support missing '
156 156 b'parents write mode'
157 157 )
158 158 )
159 159
160 160 return self._revlog.addgroup(
161 161 deltas,
162 162 linkmapper,
163 163 transaction,
164 164 addrevisioncb=addrevisioncb,
165 165 duplicaterevisioncb=duplicaterevisioncb,
166 166 )
167 167
168 168 def getstrippoint(self, minlink):
169 169 return self._revlog.getstrippoint(minlink)
170 170
171 171 def strip(self, minlink, transaction):
172 172 return self._revlog.strip(minlink, transaction)
173 173
174 174 def censorrevision(self, tr, node, tombstone=b''):
175 175 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
176 176
177 177 def files(self):
178 178 return self._revlog.files()
179 179
180 180 def read(self, node):
181 181 return storageutil.filtermetadata(self.revision(node))
182 182
183 183 def add(self, text, meta, transaction, link, p1=None, p2=None):
184 184 if meta or text.startswith(b'\1\n'):
185 185 text = storageutil.packmeta(meta, text)
186 186 rev = self.addrevision(text, transaction, link, p1, p2)
187 187 return self.node(rev)
188 188
189 189 def renamed(self, node):
190 190 return storageutil.filerevisioncopied(self, node)
191 191
192 192 def size(self, rev):
193 193 """return the size of a given revision"""
194 194
195 195 # for revisions with renames, we have to go the slow way
196 196 node = self.node(rev)
197 197 if self.renamed(node):
198 198 return len(self.read(node))
199 199 if self.iscensored(rev):
200 200 return 0
201 201
202 202 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
203 203 return self._revlog.size(rev)
204 204
205 205 def cmp(self, node, text):
206 206 """compare text with a given file revision
207 207
208 208 returns True if text is different than what is stored.
209 209 """
210 210 return not storageutil.filedataequivalent(self, node, text)
211 211
212 212 def verifyintegrity(self, state):
213 213 return self._revlog.verifyintegrity(state)
214 214
215 215 def storageinfo(
216 216 self,
217 217 exclusivefiles=False,
218 218 sharedfiles=False,
219 219 revisionscount=False,
220 220 trackedsize=False,
221 221 storedsize=False,
222 222 ):
223 223 return self._revlog.storageinfo(
224 224 exclusivefiles=exclusivefiles,
225 225 sharedfiles=sharedfiles,
226 226 revisionscount=revisionscount,
227 227 trackedsize=trackedsize,
228 228 storedsize=storedsize,
229 229 )
230 230
231 # TODO these aren't part of the interface and aren't internal methods.
232 # Callers should be fixed to not use them.
233
234 # Used by bundlefilelog, unionfilelog.
235 @property
236 def indexfile(self):
237 return self._revlog.indexfile
238
239 @indexfile.setter
240 def indexfile(self, value):
241 self._revlog.indexfile = value
242
243 231 # Used by repo upgrade.
244 232 def clone(self, tr, destrevlog, **kwargs):
245 233 if not isinstance(destrevlog, filelog):
246 234 raise error.ProgrammingError(b'expected filelog to clone()')
247 235
248 236 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
249 237
250 238
251 239 class narrowfilelog(filelog):
252 240 """Filelog variation to be used with narrow stores."""
253 241
254 242 def __init__(self, opener, path, narrowmatch):
255 243 super(narrowfilelog, self).__init__(opener, path)
256 244 self._narrowmatch = narrowmatch
257 245
258 246 def renamed(self, node):
259 247 res = super(narrowfilelog, self).renamed(node)
260 248
261 249 # Renames that come from outside the narrowspec are problematic
262 250 # because we may lack the base text for the rename. This can result
263 251 # in code attempting to walk the ancestry or compute a diff
264 252 # encountering a missing revision. We address this by silently
265 253 # removing rename metadata if the source file is outside the
266 254 # narrow spec.
267 255 #
268 256 # A better solution would be to see if the base revision is available,
269 257 # rather than assuming it isn't.
270 258 #
271 259 # An even better solution would be to teach all consumers of rename
272 260 # metadata that the base revision may not be available.
273 261 #
274 262 # TODO consider better ways of doing this.
275 263 if res and not self._narrowmatch(res[0]):
276 264 return None
277 265
278 266 return res
279 267
280 268 def size(self, rev):
281 269 # Because we have a custom renamed() that may lie, we need to call
282 270 # the base renamed() to report accurate results.
283 271 node = self.node(rev)
284 272 if super(narrowfilelog, self).renamed(node):
285 273 return len(self.read(node))
286 274 else:
287 275 return super(narrowfilelog, self).size(rev)
288 276
289 277 def cmp(self, node, text):
290 278 # We don't call `super` because narrow parents can be buggy in case of a
291 279 # ambiguous dirstate. Always take the slow path until there is a better
292 280 # fix, see issue6150.
293 281
294 282 # Censored files compare against the empty file.
295 283 if self.iscensored(self.rev(node)):
296 284 return text != b''
297 285
298 286 return self.read(node) != text
@@ -1,308 +1,308 b''
1 1 # unionrepo.py - repository class for viewing union of repository changesets
2 2 #
3 3 # Derived from bundlerepo.py
4 4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Repository class for "in-memory pull" of one local repository to another,
11 11 allowing operations like diff and log with revsets.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 from .i18n import _
17 17 from .pycompat import getattr
18 18
19 19 from . import (
20 20 changelog,
21 21 cmdutil,
22 22 encoding,
23 23 error,
24 24 filelog,
25 25 localrepo,
26 26 manifest,
27 27 mdiff,
28 28 pathutil,
29 29 revlog,
30 30 util,
31 31 vfs as vfsmod,
32 32 )
33 33
34 34
35 35 class unionrevlog(revlog.revlog):
36 36 def __init__(self, opener, indexfile, revlog2, linkmapper):
37 37 # How it works:
38 38 # To retrieve a revision, we just need to know the node id so we can
39 39 # look it up in revlog2.
40 40 #
41 41 # To differentiate a rev in the second revlog from a rev in the revlog,
42 42 # we check revision against repotiprev.
43 43 opener = vfsmod.readonlyvfs(opener)
44 44 target = getattr(revlog2, 'target', None)
45 45 if target is None:
46 46 # a revlog wrapper, eg: the manifestlog that is not an actual revlog
47 47 target = revlog2._revlog.target
48 48 revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
49 49 self.revlog2 = revlog2
50 50
51 51 n = len(self)
52 52 self.repotiprev = n - 1
53 53 self.bundlerevs = set() # used by 'bundle()' revset expression
54 54 for rev2 in self.revlog2:
55 55 rev = self.revlog2.index[rev2]
56 56 # rev numbers - in revlog2, very different from self.rev
57 57 (
58 58 _start,
59 59 _csize,
60 60 rsize,
61 61 base,
62 62 linkrev,
63 63 p1rev,
64 64 p2rev,
65 65 node,
66 66 _sdo,
67 67 _sds,
68 68 ) = rev
69 69 flags = _start & 0xFFFF
70 70
71 71 if linkmapper is None: # link is to same revlog
72 72 assert linkrev == rev2 # we never link back
73 73 link = n
74 74 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
75 75 link = linkmapper(linkrev)
76 76
77 77 if linkmapper is not None: # link is to same revlog
78 78 base = linkmapper(base)
79 79
80 80 this_rev = self.index.get_rev(node)
81 81 if this_rev is not None:
82 82 # this happens for the common revlog revisions
83 83 self.bundlerevs.add(this_rev)
84 84 continue
85 85
86 86 p1node = self.revlog2.node(p1rev)
87 87 p2node = self.revlog2.node(p2rev)
88 88
89 89 # TODO: it's probably wrong to set compressed length to -1, but
90 90 # I have no idea if csize is valid in the base revlog context.
91 91 e = (
92 92 flags,
93 93 -1,
94 94 rsize,
95 95 base,
96 96 link,
97 97 self.rev(p1node),
98 98 self.rev(p2node),
99 99 node,
100 100 0, # sidedata offset
101 101 0, # sidedata size
102 102 )
103 103 self.index.append(e)
104 104 self.bundlerevs.add(n)
105 105 n += 1
106 106
107 107 def _chunk(self, rev):
108 108 if rev <= self.repotiprev:
109 109 return revlog.revlog._chunk(self, rev)
110 110 return self.revlog2._chunk(self.node(rev))
111 111
112 112 def revdiff(self, rev1, rev2):
113 113 """return or calculate a delta between two revisions"""
114 114 if rev1 > self.repotiprev and rev2 > self.repotiprev:
115 115 return self.revlog2.revdiff(
116 116 self.revlog2.rev(self.node(rev1)),
117 117 self.revlog2.rev(self.node(rev2)),
118 118 )
119 119 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
120 120 return super(unionrevlog, self).revdiff(rev1, rev2)
121 121
122 122 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
123 123
124 124 def _revisiondata(self, nodeorrev, _df=None, raw=False):
125 125 if isinstance(nodeorrev, int):
126 126 rev = nodeorrev
127 127 node = self.node(rev)
128 128 else:
129 129 node = nodeorrev
130 130 rev = self.rev(node)
131 131
132 132 if rev > self.repotiprev:
133 133 # work around manifestrevlog NOT being a revlog
134 134 revlog2 = getattr(self.revlog2, '_revlog', self.revlog2)
135 135 func = revlog2._revisiondata
136 136 else:
137 137 func = super(unionrevlog, self)._revisiondata
138 138 return func(node, _df=_df, raw=raw)
139 139
140 140 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
141 141 raise NotImplementedError
142 142
143 143 def addgroup(
144 144 self,
145 145 deltas,
146 146 linkmapper,
147 147 transaction,
148 148 alwayscache=False,
149 149 addrevisioncb=None,
150 150 duplicaterevisioncb=None,
151 151 maybemissingparents=False,
152 152 ):
153 153 raise NotImplementedError
154 154
155 155 def strip(self, minlink, transaction):
156 156 raise NotImplementedError
157 157
158 158 def checksize(self):
159 159 raise NotImplementedError
160 160
161 161
162 162 class unionchangelog(unionrevlog, changelog.changelog):
163 163 def __init__(self, opener, opener2):
164 164 changelog.changelog.__init__(self, opener)
165 165 linkmapper = None
166 166 changelog2 = changelog.changelog(opener2)
167 167 unionrevlog.__init__(
168 168 self, opener, self.indexfile, changelog2, linkmapper
169 169 )
170 170
171 171
172 172 class unionmanifest(unionrevlog, manifest.manifestrevlog):
173 173 def __init__(self, nodeconstants, opener, opener2, linkmapper):
174 174 manifest.manifestrevlog.__init__(self, nodeconstants, opener)
175 175 manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
176 176 unionrevlog.__init__(
177 177 self, opener, self._revlog.indexfile, manifest2, linkmapper
178 178 )
179 179
180 180
181 181 class unionfilelog(filelog.filelog):
182 182 def __init__(self, opener, path, opener2, linkmapper, repo):
183 183 filelog.filelog.__init__(self, opener, path)
184 184 filelog2 = filelog.filelog(opener2, path)
185 185 self._revlog = unionrevlog(
186 opener, self.indexfile, filelog2._revlog, linkmapper
186 opener, self._revlog.indexfile, filelog2._revlog, linkmapper
187 187 )
188 188 self._repo = repo
189 189 self.repotiprev = self._revlog.repotiprev
190 190 self.revlog2 = self._revlog.revlog2
191 191
192 192 def iscensored(self, rev):
193 193 """Check if a revision is censored."""
194 194 if rev <= self.repotiprev:
195 195 return filelog.filelog.iscensored(self, rev)
196 196 node = self.node(rev)
197 197 return self.revlog2.iscensored(self.revlog2.rev(node))
198 198
199 199
200 200 class unionpeer(localrepo.localpeer):
201 201 def canpush(self):
202 202 return False
203 203
204 204
205 205 class unionrepository(object):
206 206 """Represents the union of data in 2 repositories.
207 207
208 208 Instances are not usable if constructed directly. Use ``instance()``
209 209 or ``makeunionrepository()`` to create a usable instance.
210 210 """
211 211
212 212 def __init__(self, repo2, url):
213 213 self.repo2 = repo2
214 214 self._url = url
215 215
216 216 self.ui.setconfig(b'phases', b'publish', False, b'unionrepo')
217 217
218 218 @localrepo.unfilteredpropertycache
219 219 def changelog(self):
220 220 return unionchangelog(self.svfs, self.repo2.svfs)
221 221
222 222 @localrepo.unfilteredpropertycache
223 223 def manifestlog(self):
224 224 rootstore = unionmanifest(
225 225 self.nodeconstants,
226 226 self.svfs,
227 227 self.repo2.svfs,
228 228 self.unfiltered()._clrev,
229 229 )
230 230 return manifest.manifestlog(
231 231 self.svfs, self, rootstore, self.narrowmatch()
232 232 )
233 233
234 234 def _clrev(self, rev2):
235 235 """map from repo2 changelog rev to temporary rev in self.changelog"""
236 236 node = self.repo2.changelog.node(rev2)
237 237 return self.changelog.rev(node)
238 238
239 239 def url(self):
240 240 return self._url
241 241
242 242 def file(self, f):
243 243 return unionfilelog(
244 244 self.svfs, f, self.repo2.svfs, self.unfiltered()._clrev, self
245 245 )
246 246
247 247 def close(self):
248 248 self.repo2.close()
249 249
250 250 def cancopy(self):
251 251 return False
252 252
253 253 def peer(self):
254 254 return unionpeer(self)
255 255
256 256 def getcwd(self):
257 257 return encoding.getcwd() # always outside the repo
258 258
259 259
260 260 def instance(ui, path, create, intents=None, createopts=None):
261 261 if create:
262 262 raise error.Abort(_(b'cannot create new union repository'))
263 263 parentpath = ui.config(b"bundle", b"mainreporoot")
264 264 if not parentpath:
265 265 # try to find the correct path to the working directory repo
266 266 parentpath = cmdutil.findrepo(encoding.getcwd())
267 267 if parentpath is None:
268 268 parentpath = b''
269 269 if parentpath:
270 270 # Try to make the full path relative so we get a nice, short URL.
271 271 # In particular, we don't want temp dir names in test outputs.
272 272 cwd = encoding.getcwd()
273 273 if parentpath == cwd:
274 274 parentpath = b''
275 275 else:
276 276 cwd = pathutil.normasprefix(cwd)
277 277 if parentpath.startswith(cwd):
278 278 parentpath = parentpath[len(cwd) :]
279 279 if path.startswith(b'union:'):
280 280 s = path.split(b":", 1)[1].split(b"+", 1)
281 281 if len(s) == 1:
282 282 repopath, repopath2 = parentpath, s[0]
283 283 else:
284 284 repopath, repopath2 = s
285 285 else:
286 286 repopath, repopath2 = parentpath, path
287 287
288 288 return makeunionrepository(ui, repopath, repopath2)
289 289
290 290
291 291 def makeunionrepository(ui, repopath1, repopath2):
292 292 """Make a union repository object from 2 local repo paths."""
293 293 repo1 = localrepo.instance(ui, repopath1, create=False)
294 294 repo2 = localrepo.instance(ui, repopath2, create=False)
295 295
296 296 url = b'union:%s+%s' % (
297 297 util.expandpath(repopath1),
298 298 util.expandpath(repopath2),
299 299 )
300 300
301 301 class derivedunionrepository(unionrepository, repo1.__class__):
302 302 pass
303 303
304 304 repo = repo1
305 305 repo.__class__ = derivedunionrepository
306 306 unionrepository.__init__(repo1, repo2, url)
307 307
308 308 return repo
General Comments 0
You need to be logged in to leave comments. Login now