##// END OF EJS Templates
filelog: drop `indexfile` from `filelog`...
marmoute -
r47918:8a1a51d3 default
parent child Browse files
Show More
@@ -1,714 +1,714 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 bundle2,
26 bundle2,
27 changegroup,
27 changegroup,
28 changelog,
28 changelog,
29 cmdutil,
29 cmdutil,
30 discovery,
30 discovery,
31 encoding,
31 encoding,
32 error,
32 error,
33 exchange,
33 exchange,
34 filelog,
34 filelog,
35 localrepo,
35 localrepo,
36 manifest,
36 manifest,
37 mdiff,
37 mdiff,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 pycompat,
40 pycompat,
41 revlog,
41 revlog,
42 util,
42 util,
43 vfs as vfsmod,
43 vfs as vfsmod,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 urlutil,
46 urlutil,
47 )
47 )
48
48
49 from .revlogutils import (
49 from .revlogutils import (
50 constants as revlog_constants,
50 constants as revlog_constants,
51 )
51 )
52
52
53
53
54 class bundlerevlog(revlog.revlog):
54 class bundlerevlog(revlog.revlog):
55 def __init__(self, opener, target, indexfile, cgunpacker, linkmapper):
55 def __init__(self, opener, target, indexfile, cgunpacker, linkmapper):
56 # How it works:
56 # How it works:
57 # To retrieve a revision, we need to know the offset of the revision in
57 # To retrieve a revision, we need to know the offset of the revision in
58 # the bundle (an unbundle object). We store this offset in the index
58 # the bundle (an unbundle object). We store this offset in the index
59 # (start). The base of the delta is stored in the base field.
59 # (start). The base of the delta is stored in the base field.
60 #
60 #
61 # To differentiate a rev in the bundle from a rev in the revlog, we
61 # To differentiate a rev in the bundle from a rev in the revlog, we
62 # check revision against repotiprev.
62 # check revision against repotiprev.
63 opener = vfsmod.readonlyvfs(opener)
63 opener = vfsmod.readonlyvfs(opener)
64 revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
64 revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
65 self.bundle = cgunpacker
65 self.bundle = cgunpacker
66 n = len(self)
66 n = len(self)
67 self.repotiprev = n - 1
67 self.repotiprev = n - 1
68 self.bundlerevs = set() # used by 'bundle()' revset expression
68 self.bundlerevs = set() # used by 'bundle()' revset expression
69 for deltadata in cgunpacker.deltaiter():
69 for deltadata in cgunpacker.deltaiter():
70 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
70 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
71
71
72 size = len(delta)
72 size = len(delta)
73 start = cgunpacker.tell() - size
73 start = cgunpacker.tell() - size
74
74
75 if self.index.has_node(node):
75 if self.index.has_node(node):
76 # this can happen if two branches make the same change
76 # this can happen if two branches make the same change
77 self.bundlerevs.add(self.index.rev(node))
77 self.bundlerevs.add(self.index.rev(node))
78 continue
78 continue
79 if cs == node:
79 if cs == node:
80 linkrev = nullrev
80 linkrev = nullrev
81 else:
81 else:
82 linkrev = linkmapper(cs)
82 linkrev = linkmapper(cs)
83
83
84 for p in (p1, p2):
84 for p in (p1, p2):
85 if not self.index.has_node(p):
85 if not self.index.has_node(p):
86 raise error.LookupError(
86 raise error.LookupError(
87 p, self.indexfile, _(b"unknown parent")
87 p, self.indexfile, _(b"unknown parent")
88 )
88 )
89
89
90 if not self.index.has_node(deltabase):
90 if not self.index.has_node(deltabase):
91 raise LookupError(
91 raise LookupError(
92 deltabase, self.indexfile, _(b'unknown delta base')
92 deltabase, self.indexfile, _(b'unknown delta base')
93 )
93 )
94
94
95 baserev = self.rev(deltabase)
95 baserev = self.rev(deltabase)
96 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
96 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
97 e = (
97 e = (
98 revlog.offset_type(start, flags),
98 revlog.offset_type(start, flags),
99 size,
99 size,
100 -1,
100 -1,
101 baserev,
101 baserev,
102 linkrev,
102 linkrev,
103 self.rev(p1),
103 self.rev(p1),
104 self.rev(p2),
104 self.rev(p2),
105 node,
105 node,
106 0,
106 0,
107 0,
107 0,
108 )
108 )
109 self.index.append(e)
109 self.index.append(e)
110 self.bundlerevs.add(n)
110 self.bundlerevs.add(n)
111 n += 1
111 n += 1
112
112
113 def _chunk(self, rev, df=None):
113 def _chunk(self, rev, df=None):
114 # Warning: in case of bundle, the diff is against what we stored as
114 # Warning: in case of bundle, the diff is against what we stored as
115 # delta base, not against rev - 1
115 # delta base, not against rev - 1
116 # XXX: could use some caching
116 # XXX: could use some caching
117 if rev <= self.repotiprev:
117 if rev <= self.repotiprev:
118 return revlog.revlog._chunk(self, rev)
118 return revlog.revlog._chunk(self, rev)
119 self.bundle.seek(self.start(rev))
119 self.bundle.seek(self.start(rev))
120 return self.bundle.read(self.length(rev))
120 return self.bundle.read(self.length(rev))
121
121
122 def revdiff(self, rev1, rev2):
122 def revdiff(self, rev1, rev2):
123 """return or calculate a delta between two revisions"""
123 """return or calculate a delta between two revisions"""
124 if rev1 > self.repotiprev and rev2 > self.repotiprev:
124 if rev1 > self.repotiprev and rev2 > self.repotiprev:
125 # hot path for bundle
125 # hot path for bundle
126 revb = self.index[rev2][3]
126 revb = self.index[rev2][3]
127 if revb == rev1:
127 if revb == rev1:
128 return self._chunk(rev2)
128 return self._chunk(rev2)
129 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
129 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
130 return revlog.revlog.revdiff(self, rev1, rev2)
130 return revlog.revlog.revdiff(self, rev1, rev2)
131
131
132 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
132 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
133
133
134 def _rawtext(self, node, rev, _df=None):
134 def _rawtext(self, node, rev, _df=None):
135 if rev is None:
135 if rev is None:
136 rev = self.rev(node)
136 rev = self.rev(node)
137 validated = False
137 validated = False
138 rawtext = None
138 rawtext = None
139 chain = []
139 chain = []
140 iterrev = rev
140 iterrev = rev
141 # reconstruct the revision if it is from a changegroup
141 # reconstruct the revision if it is from a changegroup
142 while iterrev > self.repotiprev:
142 while iterrev > self.repotiprev:
143 if self._revisioncache and self._revisioncache[1] == iterrev:
143 if self._revisioncache and self._revisioncache[1] == iterrev:
144 rawtext = self._revisioncache[2]
144 rawtext = self._revisioncache[2]
145 break
145 break
146 chain.append(iterrev)
146 chain.append(iterrev)
147 iterrev = self.index[iterrev][3]
147 iterrev = self.index[iterrev][3]
148 if iterrev == nullrev:
148 if iterrev == nullrev:
149 rawtext = b''
149 rawtext = b''
150 elif rawtext is None:
150 elif rawtext is None:
151 r = super(bundlerevlog, self)._rawtext(
151 r = super(bundlerevlog, self)._rawtext(
152 self.node(iterrev), iterrev, _df=_df
152 self.node(iterrev), iterrev, _df=_df
153 )
153 )
154 __, rawtext, validated = r
154 __, rawtext, validated = r
155 if chain:
155 if chain:
156 validated = False
156 validated = False
157 while chain:
157 while chain:
158 delta = self._chunk(chain.pop())
158 delta = self._chunk(chain.pop())
159 rawtext = mdiff.patches(rawtext, [delta])
159 rawtext = mdiff.patches(rawtext, [delta])
160 return rev, rawtext, validated
160 return rev, rawtext, validated
161
161
162 def addrevision(self, *args, **kwargs):
162 def addrevision(self, *args, **kwargs):
163 raise NotImplementedError
163 raise NotImplementedError
164
164
165 def addgroup(self, *args, **kwargs):
165 def addgroup(self, *args, **kwargs):
166 raise NotImplementedError
166 raise NotImplementedError
167
167
168 def strip(self, *args, **kwargs):
168 def strip(self, *args, **kwargs):
169 raise NotImplementedError
169 raise NotImplementedError
170
170
171 def checksize(self):
171 def checksize(self):
172 raise NotImplementedError
172 raise NotImplementedError
173
173
174
174
175 class bundlechangelog(bundlerevlog, changelog.changelog):
175 class bundlechangelog(bundlerevlog, changelog.changelog):
176 def __init__(self, opener, cgunpacker):
176 def __init__(self, opener, cgunpacker):
177 changelog.changelog.__init__(self, opener)
177 changelog.changelog.__init__(self, opener)
178 linkmapper = lambda x: x
178 linkmapper = lambda x: x
179 bundlerevlog.__init__(
179 bundlerevlog.__init__(
180 self,
180 self,
181 opener,
181 opener,
182 (revlog_constants.KIND_CHANGELOG, None),
182 (revlog_constants.KIND_CHANGELOG, None),
183 self.indexfile,
183 self.indexfile,
184 cgunpacker,
184 cgunpacker,
185 linkmapper,
185 linkmapper,
186 )
186 )
187
187
188
188
189 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
189 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
190 def __init__(
190 def __init__(
191 self,
191 self,
192 nodeconstants,
192 nodeconstants,
193 opener,
193 opener,
194 cgunpacker,
194 cgunpacker,
195 linkmapper,
195 linkmapper,
196 dirlogstarts=None,
196 dirlogstarts=None,
197 dir=b'',
197 dir=b'',
198 ):
198 ):
199 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
199 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
200 bundlerevlog.__init__(
200 bundlerevlog.__init__(
201 self,
201 self,
202 opener,
202 opener,
203 (revlog_constants.KIND_MANIFESTLOG, dir),
203 (revlog_constants.KIND_MANIFESTLOG, dir),
204 self._revlog.indexfile,
204 self._revlog.indexfile,
205 cgunpacker,
205 cgunpacker,
206 linkmapper,
206 linkmapper,
207 )
207 )
208 if dirlogstarts is None:
208 if dirlogstarts is None:
209 dirlogstarts = {}
209 dirlogstarts = {}
210 if self.bundle.version == b"03":
210 if self.bundle.version == b"03":
211 dirlogstarts = _getfilestarts(self.bundle)
211 dirlogstarts = _getfilestarts(self.bundle)
212 self._dirlogstarts = dirlogstarts
212 self._dirlogstarts = dirlogstarts
213 self._linkmapper = linkmapper
213 self._linkmapper = linkmapper
214
214
215 def dirlog(self, d):
215 def dirlog(self, d):
216 if d in self._dirlogstarts:
216 if d in self._dirlogstarts:
217 self.bundle.seek(self._dirlogstarts[d])
217 self.bundle.seek(self._dirlogstarts[d])
218 return bundlemanifest(
218 return bundlemanifest(
219 self.nodeconstants,
219 self.nodeconstants,
220 self.opener,
220 self.opener,
221 self.bundle,
221 self.bundle,
222 self._linkmapper,
222 self._linkmapper,
223 self._dirlogstarts,
223 self._dirlogstarts,
224 dir=d,
224 dir=d,
225 )
225 )
226 return super(bundlemanifest, self).dirlog(d)
226 return super(bundlemanifest, self).dirlog(d)
227
227
228
228
229 class bundlefilelog(filelog.filelog):
229 class bundlefilelog(filelog.filelog):
230 def __init__(self, opener, path, cgunpacker, linkmapper):
230 def __init__(self, opener, path, cgunpacker, linkmapper):
231 filelog.filelog.__init__(self, opener, path)
231 filelog.filelog.__init__(self, opener, path)
232 self._revlog = bundlerevlog(
232 self._revlog = bundlerevlog(
233 opener,
233 opener,
234 # XXX should use the unencoded path
234 # XXX should use the unencoded path
235 target=(revlog_constants.KIND_FILELOG, path),
235 target=(revlog_constants.KIND_FILELOG, path),
236 indexfile=self.indexfile,
236 indexfile=self._revlog.indexfile,
237 cgunpacker=cgunpacker,
237 cgunpacker=cgunpacker,
238 linkmapper=linkmapper,
238 linkmapper=linkmapper,
239 )
239 )
240
240
241
241
242 class bundlepeer(localrepo.localpeer):
242 class bundlepeer(localrepo.localpeer):
243 def canpush(self):
243 def canpush(self):
244 return False
244 return False
245
245
246
246
247 class bundlephasecache(phases.phasecache):
247 class bundlephasecache(phases.phasecache):
248 def __init__(self, *args, **kwargs):
248 def __init__(self, *args, **kwargs):
249 super(bundlephasecache, self).__init__(*args, **kwargs)
249 super(bundlephasecache, self).__init__(*args, **kwargs)
250 if util.safehasattr(self, 'opener'):
250 if util.safehasattr(self, 'opener'):
251 self.opener = vfsmod.readonlyvfs(self.opener)
251 self.opener = vfsmod.readonlyvfs(self.opener)
252
252
253 def write(self):
253 def write(self):
254 raise NotImplementedError
254 raise NotImplementedError
255
255
256 def _write(self, fp):
256 def _write(self, fp):
257 raise NotImplementedError
257 raise NotImplementedError
258
258
259 def _updateroots(self, phase, newroots, tr):
259 def _updateroots(self, phase, newroots, tr):
260 self.phaseroots[phase] = newroots
260 self.phaseroots[phase] = newroots
261 self.invalidate()
261 self.invalidate()
262 self.dirty = True
262 self.dirty = True
263
263
264
264
265 def _getfilestarts(cgunpacker):
265 def _getfilestarts(cgunpacker):
266 filespos = {}
266 filespos = {}
267 for chunkdata in iter(cgunpacker.filelogheader, {}):
267 for chunkdata in iter(cgunpacker.filelogheader, {}):
268 fname = chunkdata[b'filename']
268 fname = chunkdata[b'filename']
269 filespos[fname] = cgunpacker.tell()
269 filespos[fname] = cgunpacker.tell()
270 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
270 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
271 pass
271 pass
272 return filespos
272 return filespos
273
273
274
274
275 class bundlerepository(object):
275 class bundlerepository(object):
276 """A repository instance that is a union of a local repo and a bundle.
276 """A repository instance that is a union of a local repo and a bundle.
277
277
278 Instances represent a read-only repository composed of a local repository
278 Instances represent a read-only repository composed of a local repository
279 with the contents of a bundle file applied. The repository instance is
279 with the contents of a bundle file applied. The repository instance is
280 conceptually similar to the state of a repository after an
280 conceptually similar to the state of a repository after an
281 ``hg unbundle`` operation. However, the contents of the bundle are never
281 ``hg unbundle`` operation. However, the contents of the bundle are never
282 applied to the actual base repository.
282 applied to the actual base repository.
283
283
284 Instances constructed directly are not usable as repository objects.
284 Instances constructed directly are not usable as repository objects.
285 Use instance() or makebundlerepository() to create instances.
285 Use instance() or makebundlerepository() to create instances.
286 """
286 """
287
287
288 def __init__(self, bundlepath, url, tempparent):
288 def __init__(self, bundlepath, url, tempparent):
289 self._tempparent = tempparent
289 self._tempparent = tempparent
290 self._url = url
290 self._url = url
291
291
292 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
292 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
293
293
294 self.tempfile = None
294 self.tempfile = None
295 f = util.posixfile(bundlepath, b"rb")
295 f = util.posixfile(bundlepath, b"rb")
296 bundle = exchange.readbundle(self.ui, f, bundlepath)
296 bundle = exchange.readbundle(self.ui, f, bundlepath)
297
297
298 if isinstance(bundle, bundle2.unbundle20):
298 if isinstance(bundle, bundle2.unbundle20):
299 self._bundlefile = bundle
299 self._bundlefile = bundle
300 self._cgunpacker = None
300 self._cgunpacker = None
301
301
302 cgpart = None
302 cgpart = None
303 for part in bundle.iterparts(seekable=True):
303 for part in bundle.iterparts(seekable=True):
304 if part.type == b'changegroup':
304 if part.type == b'changegroup':
305 if cgpart:
305 if cgpart:
306 raise NotImplementedError(
306 raise NotImplementedError(
307 b"can't process multiple changegroups"
307 b"can't process multiple changegroups"
308 )
308 )
309 cgpart = part
309 cgpart = part
310
310
311 self._handlebundle2part(bundle, part)
311 self._handlebundle2part(bundle, part)
312
312
313 if not cgpart:
313 if not cgpart:
314 raise error.Abort(_(b"No changegroups found"))
314 raise error.Abort(_(b"No changegroups found"))
315
315
316 # This is required to placate a later consumer, which expects
316 # This is required to placate a later consumer, which expects
317 # the payload offset to be at the beginning of the changegroup.
317 # the payload offset to be at the beginning of the changegroup.
318 # We need to do this after the iterparts() generator advances
318 # We need to do this after the iterparts() generator advances
319 # because iterparts() will seek to end of payload after the
319 # because iterparts() will seek to end of payload after the
320 # generator returns control to iterparts().
320 # generator returns control to iterparts().
321 cgpart.seek(0, os.SEEK_SET)
321 cgpart.seek(0, os.SEEK_SET)
322
322
323 elif isinstance(bundle, changegroup.cg1unpacker):
323 elif isinstance(bundle, changegroup.cg1unpacker):
324 if bundle.compressed():
324 if bundle.compressed():
325 f = self._writetempbundle(
325 f = self._writetempbundle(
326 bundle.read, b'.hg10un', header=b'HG10UN'
326 bundle.read, b'.hg10un', header=b'HG10UN'
327 )
327 )
328 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
328 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
329
329
330 self._bundlefile = bundle
330 self._bundlefile = bundle
331 self._cgunpacker = bundle
331 self._cgunpacker = bundle
332 else:
332 else:
333 raise error.Abort(
333 raise error.Abort(
334 _(b'bundle type %s cannot be read') % type(bundle)
334 _(b'bundle type %s cannot be read') % type(bundle)
335 )
335 )
336
336
337 # dict with the mapping 'filename' -> position in the changegroup.
337 # dict with the mapping 'filename' -> position in the changegroup.
338 self._cgfilespos = {}
338 self._cgfilespos = {}
339
339
340 self.firstnewrev = self.changelog.repotiprev + 1
340 self.firstnewrev = self.changelog.repotiprev + 1
341 phases.retractboundary(
341 phases.retractboundary(
342 self,
342 self,
343 None,
343 None,
344 phases.draft,
344 phases.draft,
345 [ctx.node() for ctx in self[self.firstnewrev :]],
345 [ctx.node() for ctx in self[self.firstnewrev :]],
346 )
346 )
347
347
348 def _handlebundle2part(self, bundle, part):
348 def _handlebundle2part(self, bundle, part):
349 if part.type != b'changegroup':
349 if part.type != b'changegroup':
350 return
350 return
351
351
352 cgstream = part
352 cgstream = part
353 version = part.params.get(b'version', b'01')
353 version = part.params.get(b'version', b'01')
354 legalcgvers = changegroup.supportedincomingversions(self)
354 legalcgvers = changegroup.supportedincomingversions(self)
355 if version not in legalcgvers:
355 if version not in legalcgvers:
356 msg = _(b'Unsupported changegroup version: %s')
356 msg = _(b'Unsupported changegroup version: %s')
357 raise error.Abort(msg % version)
357 raise error.Abort(msg % version)
358 if bundle.compressed():
358 if bundle.compressed():
359 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
359 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
360
360
361 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
361 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
362
362
363 def _writetempbundle(self, readfn, suffix, header=b''):
363 def _writetempbundle(self, readfn, suffix, header=b''):
364 """Write a temporary file to disk"""
364 """Write a temporary file to disk"""
365 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
365 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
366 self.tempfile = temp
366 self.tempfile = temp
367
367
368 with os.fdopen(fdtemp, 'wb') as fptemp:
368 with os.fdopen(fdtemp, 'wb') as fptemp:
369 fptemp.write(header)
369 fptemp.write(header)
370 while True:
370 while True:
371 chunk = readfn(2 ** 18)
371 chunk = readfn(2 ** 18)
372 if not chunk:
372 if not chunk:
373 break
373 break
374 fptemp.write(chunk)
374 fptemp.write(chunk)
375
375
376 return self.vfs.open(self.tempfile, mode=b"rb")
376 return self.vfs.open(self.tempfile, mode=b"rb")
377
377
378 @localrepo.unfilteredpropertycache
378 @localrepo.unfilteredpropertycache
379 def _phasecache(self):
379 def _phasecache(self):
380 return bundlephasecache(self, self._phasedefaults)
380 return bundlephasecache(self, self._phasedefaults)
381
381
382 @localrepo.unfilteredpropertycache
382 @localrepo.unfilteredpropertycache
383 def changelog(self):
383 def changelog(self):
384 # consume the header if it exists
384 # consume the header if it exists
385 self._cgunpacker.changelogheader()
385 self._cgunpacker.changelogheader()
386 c = bundlechangelog(self.svfs, self._cgunpacker)
386 c = bundlechangelog(self.svfs, self._cgunpacker)
387 self.manstart = self._cgunpacker.tell()
387 self.manstart = self._cgunpacker.tell()
388 return c
388 return c
389
389
390 def _refreshchangelog(self):
390 def _refreshchangelog(self):
391 # changelog for bundle repo are not filecache, this method is not
391 # changelog for bundle repo are not filecache, this method is not
392 # applicable.
392 # applicable.
393 pass
393 pass
394
394
395 @localrepo.unfilteredpropertycache
395 @localrepo.unfilteredpropertycache
396 def manifestlog(self):
396 def manifestlog(self):
397 self._cgunpacker.seek(self.manstart)
397 self._cgunpacker.seek(self.manstart)
398 # consume the header if it exists
398 # consume the header if it exists
399 self._cgunpacker.manifestheader()
399 self._cgunpacker.manifestheader()
400 linkmapper = self.unfiltered().changelog.rev
400 linkmapper = self.unfiltered().changelog.rev
401 rootstore = bundlemanifest(
401 rootstore = bundlemanifest(
402 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
402 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
403 )
403 )
404 self.filestart = self._cgunpacker.tell()
404 self.filestart = self._cgunpacker.tell()
405
405
406 return manifest.manifestlog(
406 return manifest.manifestlog(
407 self.svfs, self, rootstore, self.narrowmatch()
407 self.svfs, self, rootstore, self.narrowmatch()
408 )
408 )
409
409
410 def _consumemanifest(self):
410 def _consumemanifest(self):
411 """Consumes the manifest portion of the bundle, setting filestart so the
411 """Consumes the manifest portion of the bundle, setting filestart so the
412 file portion can be read."""
412 file portion can be read."""
413 self._cgunpacker.seek(self.manstart)
413 self._cgunpacker.seek(self.manstart)
414 self._cgunpacker.manifestheader()
414 self._cgunpacker.manifestheader()
415 for delta in self._cgunpacker.deltaiter():
415 for delta in self._cgunpacker.deltaiter():
416 pass
416 pass
417 self.filestart = self._cgunpacker.tell()
417 self.filestart = self._cgunpacker.tell()
418
418
419 @localrepo.unfilteredpropertycache
419 @localrepo.unfilteredpropertycache
420 def manstart(self):
420 def manstart(self):
421 self.changelog
421 self.changelog
422 return self.manstart
422 return self.manstart
423
423
424 @localrepo.unfilteredpropertycache
424 @localrepo.unfilteredpropertycache
425 def filestart(self):
425 def filestart(self):
426 self.manifestlog
426 self.manifestlog
427
427
428 # If filestart was not set by self.manifestlog, that means the
428 # If filestart was not set by self.manifestlog, that means the
429 # manifestlog implementation did not consume the manifests from the
429 # manifestlog implementation did not consume the manifests from the
430 # changegroup (ex: it might be consuming trees from a separate bundle2
430 # changegroup (ex: it might be consuming trees from a separate bundle2
431 # part instead). So we need to manually consume it.
431 # part instead). So we need to manually consume it.
432 if 'filestart' not in self.__dict__:
432 if 'filestart' not in self.__dict__:
433 self._consumemanifest()
433 self._consumemanifest()
434
434
435 return self.filestart
435 return self.filestart
436
436
437 def url(self):
437 def url(self):
438 return self._url
438 return self._url
439
439
440 def file(self, f):
440 def file(self, f):
441 if not self._cgfilespos:
441 if not self._cgfilespos:
442 self._cgunpacker.seek(self.filestart)
442 self._cgunpacker.seek(self.filestart)
443 self._cgfilespos = _getfilestarts(self._cgunpacker)
443 self._cgfilespos = _getfilestarts(self._cgunpacker)
444
444
445 if f in self._cgfilespos:
445 if f in self._cgfilespos:
446 self._cgunpacker.seek(self._cgfilespos[f])
446 self._cgunpacker.seek(self._cgfilespos[f])
447 linkmapper = self.unfiltered().changelog.rev
447 linkmapper = self.unfiltered().changelog.rev
448 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
448 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
449 else:
449 else:
450 return super(bundlerepository, self).file(f)
450 return super(bundlerepository, self).file(f)
451
451
452 def close(self):
452 def close(self):
453 """Close assigned bundle file immediately."""
453 """Close assigned bundle file immediately."""
454 self._bundlefile.close()
454 self._bundlefile.close()
455 if self.tempfile is not None:
455 if self.tempfile is not None:
456 self.vfs.unlink(self.tempfile)
456 self.vfs.unlink(self.tempfile)
457 if self._tempparent:
457 if self._tempparent:
458 shutil.rmtree(self._tempparent, True)
458 shutil.rmtree(self._tempparent, True)
459
459
460 def cancopy(self):
460 def cancopy(self):
461 return False
461 return False
462
462
463 def peer(self):
463 def peer(self):
464 return bundlepeer(self)
464 return bundlepeer(self)
465
465
466 def getcwd(self):
466 def getcwd(self):
467 return encoding.getcwd() # always outside the repo
467 return encoding.getcwd() # always outside the repo
468
468
469 # Check if parents exist in localrepo before setting
469 # Check if parents exist in localrepo before setting
470 def setparents(self, p1, p2=None):
470 def setparents(self, p1, p2=None):
471 if p2 is None:
471 if p2 is None:
472 p2 = self.nullid
472 p2 = self.nullid
473 p1rev = self.changelog.rev(p1)
473 p1rev = self.changelog.rev(p1)
474 p2rev = self.changelog.rev(p2)
474 p2rev = self.changelog.rev(p2)
475 msg = _(b"setting parent to node %s that only exists in the bundle\n")
475 msg = _(b"setting parent to node %s that only exists in the bundle\n")
476 if self.changelog.repotiprev < p1rev:
476 if self.changelog.repotiprev < p1rev:
477 self.ui.warn(msg % hex(p1))
477 self.ui.warn(msg % hex(p1))
478 if self.changelog.repotiprev < p2rev:
478 if self.changelog.repotiprev < p2rev:
479 self.ui.warn(msg % hex(p2))
479 self.ui.warn(msg % hex(p2))
480 return super(bundlerepository, self).setparents(p1, p2)
480 return super(bundlerepository, self).setparents(p1, p2)
481
481
482
482
483 def instance(ui, path, create, intents=None, createopts=None):
483 def instance(ui, path, create, intents=None, createopts=None):
484 if create:
484 if create:
485 raise error.Abort(_(b'cannot create new bundle repository'))
485 raise error.Abort(_(b'cannot create new bundle repository'))
486 # internal config: bundle.mainreporoot
486 # internal config: bundle.mainreporoot
487 parentpath = ui.config(b"bundle", b"mainreporoot")
487 parentpath = ui.config(b"bundle", b"mainreporoot")
488 if not parentpath:
488 if not parentpath:
489 # try to find the correct path to the working directory repo
489 # try to find the correct path to the working directory repo
490 parentpath = cmdutil.findrepo(encoding.getcwd())
490 parentpath = cmdutil.findrepo(encoding.getcwd())
491 if parentpath is None:
491 if parentpath is None:
492 parentpath = b''
492 parentpath = b''
493 if parentpath:
493 if parentpath:
494 # Try to make the full path relative so we get a nice, short URL.
494 # Try to make the full path relative so we get a nice, short URL.
495 # In particular, we don't want temp dir names in test outputs.
495 # In particular, we don't want temp dir names in test outputs.
496 cwd = encoding.getcwd()
496 cwd = encoding.getcwd()
497 if parentpath == cwd:
497 if parentpath == cwd:
498 parentpath = b''
498 parentpath = b''
499 else:
499 else:
500 cwd = pathutil.normasprefix(cwd)
500 cwd = pathutil.normasprefix(cwd)
501 if parentpath.startswith(cwd):
501 if parentpath.startswith(cwd):
502 parentpath = parentpath[len(cwd) :]
502 parentpath = parentpath[len(cwd) :]
503 u = urlutil.url(path)
503 u = urlutil.url(path)
504 path = u.localpath()
504 path = u.localpath()
505 if u.scheme == b'bundle':
505 if u.scheme == b'bundle':
506 s = path.split(b"+", 1)
506 s = path.split(b"+", 1)
507 if len(s) == 1:
507 if len(s) == 1:
508 repopath, bundlename = parentpath, s[0]
508 repopath, bundlename = parentpath, s[0]
509 else:
509 else:
510 repopath, bundlename = s
510 repopath, bundlename = s
511 else:
511 else:
512 repopath, bundlename = parentpath, path
512 repopath, bundlename = parentpath, path
513
513
514 return makebundlerepository(ui, repopath, bundlename)
514 return makebundlerepository(ui, repopath, bundlename)
515
515
516
516
517 def makebundlerepository(ui, repopath, bundlepath):
517 def makebundlerepository(ui, repopath, bundlepath):
518 """Make a bundle repository object based on repo and bundle paths."""
518 """Make a bundle repository object based on repo and bundle paths."""
519 if repopath:
519 if repopath:
520 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
520 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
521 else:
521 else:
522 url = b'bundle:%s' % bundlepath
522 url = b'bundle:%s' % bundlepath
523
523
524 # Because we can't make any guarantees about the type of the base
524 # Because we can't make any guarantees about the type of the base
525 # repository, we can't have a static class representing the bundle
525 # repository, we can't have a static class representing the bundle
526 # repository. We also can't make any guarantees about how to even
526 # repository. We also can't make any guarantees about how to even
527 # call the base repository's constructor!
527 # call the base repository's constructor!
528 #
528 #
529 # So, our strategy is to go through ``localrepo.instance()`` to construct
529 # So, our strategy is to go through ``localrepo.instance()`` to construct
530 # a repo instance. Then, we dynamically create a new type derived from
530 # a repo instance. Then, we dynamically create a new type derived from
531 # both it and our ``bundlerepository`` class which overrides some
531 # both it and our ``bundlerepository`` class which overrides some
532 # functionality. We then change the type of the constructed repository
532 # functionality. We then change the type of the constructed repository
533 # to this new type and initialize the bundle-specific bits of it.
533 # to this new type and initialize the bundle-specific bits of it.
534
534
535 try:
535 try:
536 repo = localrepo.instance(ui, repopath, create=False)
536 repo = localrepo.instance(ui, repopath, create=False)
537 tempparent = None
537 tempparent = None
538 except error.RepoError:
538 except error.RepoError:
539 tempparent = pycompat.mkdtemp()
539 tempparent = pycompat.mkdtemp()
540 try:
540 try:
541 repo = localrepo.instance(ui, tempparent, create=True)
541 repo = localrepo.instance(ui, tempparent, create=True)
542 except Exception:
542 except Exception:
543 shutil.rmtree(tempparent)
543 shutil.rmtree(tempparent)
544 raise
544 raise
545
545
546 class derivedbundlerepository(bundlerepository, repo.__class__):
546 class derivedbundlerepository(bundlerepository, repo.__class__):
547 pass
547 pass
548
548
549 repo.__class__ = derivedbundlerepository
549 repo.__class__ = derivedbundlerepository
550 bundlerepository.__init__(repo, bundlepath, url, tempparent)
550 bundlerepository.__init__(repo, bundlepath, url, tempparent)
551
551
552 return repo
552 return repo
553
553
554
554
555 class bundletransactionmanager(object):
555 class bundletransactionmanager(object):
556 def transaction(self):
556 def transaction(self):
557 return None
557 return None
558
558
559 def close(self):
559 def close(self):
560 raise NotImplementedError
560 raise NotImplementedError
561
561
562 def release(self):
562 def release(self):
563 raise NotImplementedError
563 raise NotImplementedError
564
564
565
565
566 def getremotechanges(
566 def getremotechanges(
567 ui, repo, peer, onlyheads=None, bundlename=None, force=False
567 ui, repo, peer, onlyheads=None, bundlename=None, force=False
568 ):
568 ):
569 """obtains a bundle of changes incoming from peer
569 """obtains a bundle of changes incoming from peer
570
570
571 "onlyheads" restricts the returned changes to those reachable from the
571 "onlyheads" restricts the returned changes to those reachable from the
572 specified heads.
572 specified heads.
573 "bundlename", if given, stores the bundle to this file path permanently;
573 "bundlename", if given, stores the bundle to this file path permanently;
574 otherwise it's stored to a temp file and gets deleted again when you call
574 otherwise it's stored to a temp file and gets deleted again when you call
575 the returned "cleanupfn".
575 the returned "cleanupfn".
576 "force" indicates whether to proceed on unrelated repos.
576 "force" indicates whether to proceed on unrelated repos.
577
577
578 Returns a tuple (local, csets, cleanupfn):
578 Returns a tuple (local, csets, cleanupfn):
579
579
580 "local" is a local repo from which to obtain the actual incoming
580 "local" is a local repo from which to obtain the actual incoming
581 changesets; it is a bundlerepo for the obtained bundle when the
581 changesets; it is a bundlerepo for the obtained bundle when the
582 original "peer" is remote.
582 original "peer" is remote.
583 "csets" lists the incoming changeset node ids.
583 "csets" lists the incoming changeset node ids.
584 "cleanupfn" must be called without arguments when you're done processing
584 "cleanupfn" must be called without arguments when you're done processing
585 the changes; it closes both the original "peer" and the one returned
585 the changes; it closes both the original "peer" and the one returned
586 here.
586 here.
587 """
587 """
588 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
588 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
589 common, incoming, rheads = tmp
589 common, incoming, rheads = tmp
590 if not incoming:
590 if not incoming:
591 try:
591 try:
592 if bundlename:
592 if bundlename:
593 os.unlink(bundlename)
593 os.unlink(bundlename)
594 except OSError:
594 except OSError:
595 pass
595 pass
596 return repo, [], peer.close
596 return repo, [], peer.close
597
597
598 commonset = set(common)
598 commonset = set(common)
599 rheads = [x for x in rheads if x not in commonset]
599 rheads = [x for x in rheads if x not in commonset]
600
600
601 bundle = None
601 bundle = None
602 bundlerepo = None
602 bundlerepo = None
603 localrepo = peer.local()
603 localrepo = peer.local()
604 if bundlename or not localrepo:
604 if bundlename or not localrepo:
605 # create a bundle (uncompressed if peer repo is not local)
605 # create a bundle (uncompressed if peer repo is not local)
606
606
607 # developer config: devel.legacy.exchange
607 # developer config: devel.legacy.exchange
608 legexc = ui.configlist(b'devel', b'legacy.exchange')
608 legexc = ui.configlist(b'devel', b'legacy.exchange')
609 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
609 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
610 canbundle2 = (
610 canbundle2 = (
611 not forcebundle1
611 not forcebundle1
612 and peer.capable(b'getbundle')
612 and peer.capable(b'getbundle')
613 and peer.capable(b'bundle2')
613 and peer.capable(b'bundle2')
614 )
614 )
615 if canbundle2:
615 if canbundle2:
616 with peer.commandexecutor() as e:
616 with peer.commandexecutor() as e:
617 b2 = e.callcommand(
617 b2 = e.callcommand(
618 b'getbundle',
618 b'getbundle',
619 {
619 {
620 b'source': b'incoming',
620 b'source': b'incoming',
621 b'common': common,
621 b'common': common,
622 b'heads': rheads,
622 b'heads': rheads,
623 b'bundlecaps': exchange.caps20to10(
623 b'bundlecaps': exchange.caps20to10(
624 repo, role=b'client'
624 repo, role=b'client'
625 ),
625 ),
626 b'cg': True,
626 b'cg': True,
627 },
627 },
628 ).result()
628 ).result()
629
629
630 fname = bundle = changegroup.writechunks(
630 fname = bundle = changegroup.writechunks(
631 ui, b2._forwardchunks(), bundlename
631 ui, b2._forwardchunks(), bundlename
632 )
632 )
633 else:
633 else:
634 if peer.capable(b'getbundle'):
634 if peer.capable(b'getbundle'):
635 with peer.commandexecutor() as e:
635 with peer.commandexecutor() as e:
636 cg = e.callcommand(
636 cg = e.callcommand(
637 b'getbundle',
637 b'getbundle',
638 {
638 {
639 b'source': b'incoming',
639 b'source': b'incoming',
640 b'common': common,
640 b'common': common,
641 b'heads': rheads,
641 b'heads': rheads,
642 },
642 },
643 ).result()
643 ).result()
644 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
644 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
645 # compat with older servers when pulling all remote heads
645 # compat with older servers when pulling all remote heads
646
646
647 with peer.commandexecutor() as e:
647 with peer.commandexecutor() as e:
648 cg = e.callcommand(
648 cg = e.callcommand(
649 b'changegroup',
649 b'changegroup',
650 {
650 {
651 b'nodes': incoming,
651 b'nodes': incoming,
652 b'source': b'incoming',
652 b'source': b'incoming',
653 },
653 },
654 ).result()
654 ).result()
655
655
656 rheads = None
656 rheads = None
657 else:
657 else:
658 with peer.commandexecutor() as e:
658 with peer.commandexecutor() as e:
659 cg = e.callcommand(
659 cg = e.callcommand(
660 b'changegroupsubset',
660 b'changegroupsubset',
661 {
661 {
662 b'bases': incoming,
662 b'bases': incoming,
663 b'heads': rheads,
663 b'heads': rheads,
664 b'source': b'incoming',
664 b'source': b'incoming',
665 },
665 },
666 ).result()
666 ).result()
667
667
668 if localrepo:
668 if localrepo:
669 bundletype = b"HG10BZ"
669 bundletype = b"HG10BZ"
670 else:
670 else:
671 bundletype = b"HG10UN"
671 bundletype = b"HG10UN"
672 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
672 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
673 # keep written bundle?
673 # keep written bundle?
674 if bundlename:
674 if bundlename:
675 bundle = None
675 bundle = None
676 if not localrepo:
676 if not localrepo:
677 # use the created uncompressed bundlerepo
677 # use the created uncompressed bundlerepo
678 localrepo = bundlerepo = makebundlerepository(
678 localrepo = bundlerepo = makebundlerepository(
679 repo.baseui, repo.root, fname
679 repo.baseui, repo.root, fname
680 )
680 )
681
681
682 # this repo contains local and peer now, so filter out local again
682 # this repo contains local and peer now, so filter out local again
683 common = repo.heads()
683 common = repo.heads()
684 if localrepo:
684 if localrepo:
685 # Part of common may be remotely filtered
685 # Part of common may be remotely filtered
686 # So use an unfiltered version
686 # So use an unfiltered version
687 # The discovery process probably need cleanup to avoid that
687 # The discovery process probably need cleanup to avoid that
688 localrepo = localrepo.unfiltered()
688 localrepo = localrepo.unfiltered()
689
689
690 csets = localrepo.changelog.findmissing(common, rheads)
690 csets = localrepo.changelog.findmissing(common, rheads)
691
691
692 if bundlerepo:
692 if bundlerepo:
693 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
693 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
694
694
695 with peer.commandexecutor() as e:
695 with peer.commandexecutor() as e:
696 remotephases = e.callcommand(
696 remotephases = e.callcommand(
697 b'listkeys',
697 b'listkeys',
698 {
698 {
699 b'namespace': b'phases',
699 b'namespace': b'phases',
700 },
700 },
701 ).result()
701 ).result()
702
702
703 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
703 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
704 pullop.trmanager = bundletransactionmanager()
704 pullop.trmanager = bundletransactionmanager()
705 exchange._pullapplyphases(pullop, remotephases)
705 exchange._pullapplyphases(pullop, remotephases)
706
706
707 def cleanup():
707 def cleanup():
708 if bundlerepo:
708 if bundlerepo:
709 bundlerepo.close()
709 bundlerepo.close()
710 if bundle:
710 if bundle:
711 os.unlink(bundle)
711 os.unlink(bundle)
712 peer.close()
712 peer.close()
713
713
714 return (localrepo, csets, cleanup)
714 return (localrepo, csets, cleanup)
@@ -1,298 +1,286 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import nullrev
11 from .node import nullrev
12 from . import (
12 from . import (
13 error,
13 error,
14 revlog,
14 revlog,
15 )
15 )
16 from .interfaces import (
16 from .interfaces import (
17 repository,
17 repository,
18 util as interfaceutil,
18 util as interfaceutil,
19 )
19 )
20 from .utils import storageutil
20 from .utils import storageutil
21 from .revlogutils import (
21 from .revlogutils import (
22 constants as revlog_constants,
22 constants as revlog_constants,
23 )
23 )
24
24
25
25
26 @interfaceutil.implementer(repository.ifilestorage)
26 @interfaceutil.implementer(repository.ifilestorage)
27 class filelog(object):
27 class filelog(object):
28 def __init__(self, opener, path):
28 def __init__(self, opener, path):
29 self._revlog = revlog.revlog(
29 self._revlog = revlog.revlog(
30 opener,
30 opener,
31 # XXX should use the unencoded path
31 # XXX should use the unencoded path
32 target=(revlog_constants.KIND_FILELOG, path),
32 target=(revlog_constants.KIND_FILELOG, path),
33 indexfile=b'/'.join((b'data', path + b'.i')),
33 indexfile=b'/'.join((b'data', path + b'.i')),
34 censorable=True,
34 censorable=True,
35 )
35 )
36 # Full name of the user visible file, relative to the repository root.
36 # Full name of the user visible file, relative to the repository root.
37 # Used by LFS.
37 # Used by LFS.
38 self._revlog.filename = path
38 self._revlog.filename = path
39 self.nullid = self._revlog.nullid
39 self.nullid = self._revlog.nullid
40
40
41 def __len__(self):
41 def __len__(self):
42 return len(self._revlog)
42 return len(self._revlog)
43
43
44 def __iter__(self):
44 def __iter__(self):
45 return self._revlog.__iter__()
45 return self._revlog.__iter__()
46
46
47 def hasnode(self, node):
47 def hasnode(self, node):
48 if node in (self.nullid, nullrev):
48 if node in (self.nullid, nullrev):
49 return False
49 return False
50
50
51 try:
51 try:
52 self._revlog.rev(node)
52 self._revlog.rev(node)
53 return True
53 return True
54 except (TypeError, ValueError, IndexError, error.LookupError):
54 except (TypeError, ValueError, IndexError, error.LookupError):
55 return False
55 return False
56
56
57 def revs(self, start=0, stop=None):
57 def revs(self, start=0, stop=None):
58 return self._revlog.revs(start=start, stop=stop)
58 return self._revlog.revs(start=start, stop=stop)
59
59
60 def parents(self, node):
60 def parents(self, node):
61 return self._revlog.parents(node)
61 return self._revlog.parents(node)
62
62
63 def parentrevs(self, rev):
63 def parentrevs(self, rev):
64 return self._revlog.parentrevs(rev)
64 return self._revlog.parentrevs(rev)
65
65
66 def rev(self, node):
66 def rev(self, node):
67 return self._revlog.rev(node)
67 return self._revlog.rev(node)
68
68
69 def node(self, rev):
69 def node(self, rev):
70 return self._revlog.node(rev)
70 return self._revlog.node(rev)
71
71
72 def lookup(self, node):
72 def lookup(self, node):
73 return storageutil.fileidlookup(
73 return storageutil.fileidlookup(
74 self._revlog, node, self._revlog.indexfile
74 self._revlog, node, self._revlog.indexfile
75 )
75 )
76
76
77 def linkrev(self, rev):
77 def linkrev(self, rev):
78 return self._revlog.linkrev(rev)
78 return self._revlog.linkrev(rev)
79
79
80 def commonancestorsheads(self, node1, node2):
80 def commonancestorsheads(self, node1, node2):
81 return self._revlog.commonancestorsheads(node1, node2)
81 return self._revlog.commonancestorsheads(node1, node2)
82
82
83 # Used by dagop.blockdescendants().
83 # Used by dagop.blockdescendants().
84 def descendants(self, revs):
84 def descendants(self, revs):
85 return self._revlog.descendants(revs)
85 return self._revlog.descendants(revs)
86
86
87 def heads(self, start=None, stop=None):
87 def heads(self, start=None, stop=None):
88 return self._revlog.heads(start, stop)
88 return self._revlog.heads(start, stop)
89
89
90 # Used by hgweb, children extension.
90 # Used by hgweb, children extension.
91 def children(self, node):
91 def children(self, node):
92 return self._revlog.children(node)
92 return self._revlog.children(node)
93
93
94 def iscensored(self, rev):
94 def iscensored(self, rev):
95 return self._revlog.iscensored(rev)
95 return self._revlog.iscensored(rev)
96
96
97 def revision(self, node, _df=None, raw=False):
97 def revision(self, node, _df=None, raw=False):
98 return self._revlog.revision(node, _df=_df, raw=raw)
98 return self._revlog.revision(node, _df=_df, raw=raw)
99
99
100 def rawdata(self, node, _df=None):
100 def rawdata(self, node, _df=None):
101 return self._revlog.rawdata(node, _df=_df)
101 return self._revlog.rawdata(node, _df=_df)
102
102
103 def emitrevisions(
103 def emitrevisions(
104 self,
104 self,
105 nodes,
105 nodes,
106 nodesorder=None,
106 nodesorder=None,
107 revisiondata=False,
107 revisiondata=False,
108 assumehaveparentrevisions=False,
108 assumehaveparentrevisions=False,
109 deltamode=repository.CG_DELTAMODE_STD,
109 deltamode=repository.CG_DELTAMODE_STD,
110 sidedata_helpers=None,
110 sidedata_helpers=None,
111 ):
111 ):
112 return self._revlog.emitrevisions(
112 return self._revlog.emitrevisions(
113 nodes,
113 nodes,
114 nodesorder=nodesorder,
114 nodesorder=nodesorder,
115 revisiondata=revisiondata,
115 revisiondata=revisiondata,
116 assumehaveparentrevisions=assumehaveparentrevisions,
116 assumehaveparentrevisions=assumehaveparentrevisions,
117 deltamode=deltamode,
117 deltamode=deltamode,
118 sidedata_helpers=sidedata_helpers,
118 sidedata_helpers=sidedata_helpers,
119 )
119 )
120
120
121 def addrevision(
121 def addrevision(
122 self,
122 self,
123 revisiondata,
123 revisiondata,
124 transaction,
124 transaction,
125 linkrev,
125 linkrev,
126 p1,
126 p1,
127 p2,
127 p2,
128 node=None,
128 node=None,
129 flags=revlog.REVIDX_DEFAULT_FLAGS,
129 flags=revlog.REVIDX_DEFAULT_FLAGS,
130 cachedelta=None,
130 cachedelta=None,
131 ):
131 ):
132 return self._revlog.addrevision(
132 return self._revlog.addrevision(
133 revisiondata,
133 revisiondata,
134 transaction,
134 transaction,
135 linkrev,
135 linkrev,
136 p1,
136 p1,
137 p2,
137 p2,
138 node=node,
138 node=node,
139 flags=flags,
139 flags=flags,
140 cachedelta=cachedelta,
140 cachedelta=cachedelta,
141 )
141 )
142
142
143 def addgroup(
143 def addgroup(
144 self,
144 self,
145 deltas,
145 deltas,
146 linkmapper,
146 linkmapper,
147 transaction,
147 transaction,
148 addrevisioncb=None,
148 addrevisioncb=None,
149 duplicaterevisioncb=None,
149 duplicaterevisioncb=None,
150 maybemissingparents=False,
150 maybemissingparents=False,
151 ):
151 ):
152 if maybemissingparents:
152 if maybemissingparents:
153 raise error.Abort(
153 raise error.Abort(
154 _(
154 _(
155 b'revlog storage does not support missing '
155 b'revlog storage does not support missing '
156 b'parents write mode'
156 b'parents write mode'
157 )
157 )
158 )
158 )
159
159
160 return self._revlog.addgroup(
160 return self._revlog.addgroup(
161 deltas,
161 deltas,
162 linkmapper,
162 linkmapper,
163 transaction,
163 transaction,
164 addrevisioncb=addrevisioncb,
164 addrevisioncb=addrevisioncb,
165 duplicaterevisioncb=duplicaterevisioncb,
165 duplicaterevisioncb=duplicaterevisioncb,
166 )
166 )
167
167
168 def getstrippoint(self, minlink):
168 def getstrippoint(self, minlink):
169 return self._revlog.getstrippoint(minlink)
169 return self._revlog.getstrippoint(minlink)
170
170
171 def strip(self, minlink, transaction):
171 def strip(self, minlink, transaction):
172 return self._revlog.strip(minlink, transaction)
172 return self._revlog.strip(minlink, transaction)
173
173
174 def censorrevision(self, tr, node, tombstone=b''):
174 def censorrevision(self, tr, node, tombstone=b''):
175 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
175 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
176
176
177 def files(self):
177 def files(self):
178 return self._revlog.files()
178 return self._revlog.files()
179
179
180 def read(self, node):
180 def read(self, node):
181 return storageutil.filtermetadata(self.revision(node))
181 return storageutil.filtermetadata(self.revision(node))
182
182
183 def add(self, text, meta, transaction, link, p1=None, p2=None):
183 def add(self, text, meta, transaction, link, p1=None, p2=None):
184 if meta or text.startswith(b'\1\n'):
184 if meta or text.startswith(b'\1\n'):
185 text = storageutil.packmeta(meta, text)
185 text = storageutil.packmeta(meta, text)
186 rev = self.addrevision(text, transaction, link, p1, p2)
186 rev = self.addrevision(text, transaction, link, p1, p2)
187 return self.node(rev)
187 return self.node(rev)
188
188
189 def renamed(self, node):
189 def renamed(self, node):
190 return storageutil.filerevisioncopied(self, node)
190 return storageutil.filerevisioncopied(self, node)
191
191
192 def size(self, rev):
192 def size(self, rev):
193 """return the size of a given revision"""
193 """return the size of a given revision"""
194
194
195 # for revisions with renames, we have to go the slow way
195 # for revisions with renames, we have to go the slow way
196 node = self.node(rev)
196 node = self.node(rev)
197 if self.renamed(node):
197 if self.renamed(node):
198 return len(self.read(node))
198 return len(self.read(node))
199 if self.iscensored(rev):
199 if self.iscensored(rev):
200 return 0
200 return 0
201
201
202 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
202 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
203 return self._revlog.size(rev)
203 return self._revlog.size(rev)
204
204
205 def cmp(self, node, text):
205 def cmp(self, node, text):
206 """compare text with a given file revision
206 """compare text with a given file revision
207
207
208 returns True if text is different than what is stored.
208 returns True if text is different than what is stored.
209 """
209 """
210 return not storageutil.filedataequivalent(self, node, text)
210 return not storageutil.filedataequivalent(self, node, text)
211
211
212 def verifyintegrity(self, state):
212 def verifyintegrity(self, state):
213 return self._revlog.verifyintegrity(state)
213 return self._revlog.verifyintegrity(state)
214
214
215 def storageinfo(
215 def storageinfo(
216 self,
216 self,
217 exclusivefiles=False,
217 exclusivefiles=False,
218 sharedfiles=False,
218 sharedfiles=False,
219 revisionscount=False,
219 revisionscount=False,
220 trackedsize=False,
220 trackedsize=False,
221 storedsize=False,
221 storedsize=False,
222 ):
222 ):
223 return self._revlog.storageinfo(
223 return self._revlog.storageinfo(
224 exclusivefiles=exclusivefiles,
224 exclusivefiles=exclusivefiles,
225 sharedfiles=sharedfiles,
225 sharedfiles=sharedfiles,
226 revisionscount=revisionscount,
226 revisionscount=revisionscount,
227 trackedsize=trackedsize,
227 trackedsize=trackedsize,
228 storedsize=storedsize,
228 storedsize=storedsize,
229 )
229 )
230
230
231 # TODO these aren't part of the interface and aren't internal methods.
232 # Callers should be fixed to not use them.
233
234 # Used by bundlefilelog, unionfilelog.
235 @property
236 def indexfile(self):
237 return self._revlog.indexfile
238
239 @indexfile.setter
240 def indexfile(self, value):
241 self._revlog.indexfile = value
242
243 # Used by repo upgrade.
231 # Used by repo upgrade.
244 def clone(self, tr, destrevlog, **kwargs):
232 def clone(self, tr, destrevlog, **kwargs):
245 if not isinstance(destrevlog, filelog):
233 if not isinstance(destrevlog, filelog):
246 raise error.ProgrammingError(b'expected filelog to clone()')
234 raise error.ProgrammingError(b'expected filelog to clone()')
247
235
248 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
236 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
249
237
250
238
251 class narrowfilelog(filelog):
239 class narrowfilelog(filelog):
252 """Filelog variation to be used with narrow stores."""
240 """Filelog variation to be used with narrow stores."""
253
241
254 def __init__(self, opener, path, narrowmatch):
242 def __init__(self, opener, path, narrowmatch):
255 super(narrowfilelog, self).__init__(opener, path)
243 super(narrowfilelog, self).__init__(opener, path)
256 self._narrowmatch = narrowmatch
244 self._narrowmatch = narrowmatch
257
245
258 def renamed(self, node):
246 def renamed(self, node):
259 res = super(narrowfilelog, self).renamed(node)
247 res = super(narrowfilelog, self).renamed(node)
260
248
261 # Renames that come from outside the narrowspec are problematic
249 # Renames that come from outside the narrowspec are problematic
262 # because we may lack the base text for the rename. This can result
250 # because we may lack the base text for the rename. This can result
263 # in code attempting to walk the ancestry or compute a diff
251 # in code attempting to walk the ancestry or compute a diff
264 # encountering a missing revision. We address this by silently
252 # encountering a missing revision. We address this by silently
265 # removing rename metadata if the source file is outside the
253 # removing rename metadata if the source file is outside the
266 # narrow spec.
254 # narrow spec.
267 #
255 #
268 # A better solution would be to see if the base revision is available,
256 # A better solution would be to see if the base revision is available,
269 # rather than assuming it isn't.
257 # rather than assuming it isn't.
270 #
258 #
271 # An even better solution would be to teach all consumers of rename
259 # An even better solution would be to teach all consumers of rename
272 # metadata that the base revision may not be available.
260 # metadata that the base revision may not be available.
273 #
261 #
274 # TODO consider better ways of doing this.
262 # TODO consider better ways of doing this.
275 if res and not self._narrowmatch(res[0]):
263 if res and not self._narrowmatch(res[0]):
276 return None
264 return None
277
265
278 return res
266 return res
279
267
280 def size(self, rev):
268 def size(self, rev):
281 # Because we have a custom renamed() that may lie, we need to call
269 # Because we have a custom renamed() that may lie, we need to call
282 # the base renamed() to report accurate results.
270 # the base renamed() to report accurate results.
283 node = self.node(rev)
271 node = self.node(rev)
284 if super(narrowfilelog, self).renamed(node):
272 if super(narrowfilelog, self).renamed(node):
285 return len(self.read(node))
273 return len(self.read(node))
286 else:
274 else:
287 return super(narrowfilelog, self).size(rev)
275 return super(narrowfilelog, self).size(rev)
288
276
289 def cmp(self, node, text):
277 def cmp(self, node, text):
290 # We don't call `super` because narrow parents can be buggy in case of a
278 # We don't call `super` because narrow parents can be buggy in case of a
291 # ambiguous dirstate. Always take the slow path until there is a better
279 # ambiguous dirstate. Always take the slow path until there is a better
292 # fix, see issue6150.
280 # fix, see issue6150.
293
281
294 # Censored files compare against the empty file.
282 # Censored files compare against the empty file.
295 if self.iscensored(self.rev(node)):
283 if self.iscensored(self.rev(node)):
296 return text != b''
284 return text != b''
297
285
298 return self.read(node) != text
286 return self.read(node) != text
@@ -1,308 +1,308 b''
1 # unionrepo.py - repository class for viewing union of repository changesets
1 # unionrepo.py - repository class for viewing union of repository changesets
2 #
2 #
3 # Derived from bundlerepo.py
3 # Derived from bundlerepo.py
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Repository class for "in-memory pull" of one local repository to another,
10 """Repository class for "in-memory pull" of one local repository to another,
11 allowing operations like diff and log with revsets.
11 allowing operations like diff and log with revsets.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import getattr
17 from .pycompat import getattr
18
18
19 from . import (
19 from . import (
20 changelog,
20 changelog,
21 cmdutil,
21 cmdutil,
22 encoding,
22 encoding,
23 error,
23 error,
24 filelog,
24 filelog,
25 localrepo,
25 localrepo,
26 manifest,
26 manifest,
27 mdiff,
27 mdiff,
28 pathutil,
28 pathutil,
29 revlog,
29 revlog,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33
33
34
34
35 class unionrevlog(revlog.revlog):
35 class unionrevlog(revlog.revlog):
36 def __init__(self, opener, indexfile, revlog2, linkmapper):
36 def __init__(self, opener, indexfile, revlog2, linkmapper):
37 # How it works:
37 # How it works:
38 # To retrieve a revision, we just need to know the node id so we can
38 # To retrieve a revision, we just need to know the node id so we can
39 # look it up in revlog2.
39 # look it up in revlog2.
40 #
40 #
41 # To differentiate a rev in the second revlog from a rev in the revlog,
41 # To differentiate a rev in the second revlog from a rev in the revlog,
42 # we check revision against repotiprev.
42 # we check revision against repotiprev.
43 opener = vfsmod.readonlyvfs(opener)
43 opener = vfsmod.readonlyvfs(opener)
44 target = getattr(revlog2, 'target', None)
44 target = getattr(revlog2, 'target', None)
45 if target is None:
45 if target is None:
46 # a revlog wrapper, eg: the manifestlog that is not an actual revlog
46 # a revlog wrapper, eg: the manifestlog that is not an actual revlog
47 target = revlog2._revlog.target
47 target = revlog2._revlog.target
48 revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
48 revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
49 self.revlog2 = revlog2
49 self.revlog2 = revlog2
50
50
51 n = len(self)
51 n = len(self)
52 self.repotiprev = n - 1
52 self.repotiprev = n - 1
53 self.bundlerevs = set() # used by 'bundle()' revset expression
53 self.bundlerevs = set() # used by 'bundle()' revset expression
54 for rev2 in self.revlog2:
54 for rev2 in self.revlog2:
55 rev = self.revlog2.index[rev2]
55 rev = self.revlog2.index[rev2]
56 # rev numbers - in revlog2, very different from self.rev
56 # rev numbers - in revlog2, very different from self.rev
57 (
57 (
58 _start,
58 _start,
59 _csize,
59 _csize,
60 rsize,
60 rsize,
61 base,
61 base,
62 linkrev,
62 linkrev,
63 p1rev,
63 p1rev,
64 p2rev,
64 p2rev,
65 node,
65 node,
66 _sdo,
66 _sdo,
67 _sds,
67 _sds,
68 ) = rev
68 ) = rev
69 flags = _start & 0xFFFF
69 flags = _start & 0xFFFF
70
70
71 if linkmapper is None: # link is to same revlog
71 if linkmapper is None: # link is to same revlog
72 assert linkrev == rev2 # we never link back
72 assert linkrev == rev2 # we never link back
73 link = n
73 link = n
74 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
74 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
75 link = linkmapper(linkrev)
75 link = linkmapper(linkrev)
76
76
77 if linkmapper is not None: # link is to same revlog
77 if linkmapper is not None: # link is to same revlog
78 base = linkmapper(base)
78 base = linkmapper(base)
79
79
80 this_rev = self.index.get_rev(node)
80 this_rev = self.index.get_rev(node)
81 if this_rev is not None:
81 if this_rev is not None:
82 # this happens for the common revlog revisions
82 # this happens for the common revlog revisions
83 self.bundlerevs.add(this_rev)
83 self.bundlerevs.add(this_rev)
84 continue
84 continue
85
85
86 p1node = self.revlog2.node(p1rev)
86 p1node = self.revlog2.node(p1rev)
87 p2node = self.revlog2.node(p2rev)
87 p2node = self.revlog2.node(p2rev)
88
88
89 # TODO: it's probably wrong to set compressed length to -1, but
89 # TODO: it's probably wrong to set compressed length to -1, but
90 # I have no idea if csize is valid in the base revlog context.
90 # I have no idea if csize is valid in the base revlog context.
91 e = (
91 e = (
92 flags,
92 flags,
93 -1,
93 -1,
94 rsize,
94 rsize,
95 base,
95 base,
96 link,
96 link,
97 self.rev(p1node),
97 self.rev(p1node),
98 self.rev(p2node),
98 self.rev(p2node),
99 node,
99 node,
100 0, # sidedata offset
100 0, # sidedata offset
101 0, # sidedata size
101 0, # sidedata size
102 )
102 )
103 self.index.append(e)
103 self.index.append(e)
104 self.bundlerevs.add(n)
104 self.bundlerevs.add(n)
105 n += 1
105 n += 1
106
106
107 def _chunk(self, rev):
107 def _chunk(self, rev):
108 if rev <= self.repotiprev:
108 if rev <= self.repotiprev:
109 return revlog.revlog._chunk(self, rev)
109 return revlog.revlog._chunk(self, rev)
110 return self.revlog2._chunk(self.node(rev))
110 return self.revlog2._chunk(self.node(rev))
111
111
112 def revdiff(self, rev1, rev2):
112 def revdiff(self, rev1, rev2):
113 """return or calculate a delta between two revisions"""
113 """return or calculate a delta between two revisions"""
114 if rev1 > self.repotiprev and rev2 > self.repotiprev:
114 if rev1 > self.repotiprev and rev2 > self.repotiprev:
115 return self.revlog2.revdiff(
115 return self.revlog2.revdiff(
116 self.revlog2.rev(self.node(rev1)),
116 self.revlog2.rev(self.node(rev1)),
117 self.revlog2.rev(self.node(rev2)),
117 self.revlog2.rev(self.node(rev2)),
118 )
118 )
119 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
119 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
120 return super(unionrevlog, self).revdiff(rev1, rev2)
120 return super(unionrevlog, self).revdiff(rev1, rev2)
121
121
122 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
122 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
123
123
124 def _revisiondata(self, nodeorrev, _df=None, raw=False):
124 def _revisiondata(self, nodeorrev, _df=None, raw=False):
125 if isinstance(nodeorrev, int):
125 if isinstance(nodeorrev, int):
126 rev = nodeorrev
126 rev = nodeorrev
127 node = self.node(rev)
127 node = self.node(rev)
128 else:
128 else:
129 node = nodeorrev
129 node = nodeorrev
130 rev = self.rev(node)
130 rev = self.rev(node)
131
131
132 if rev > self.repotiprev:
132 if rev > self.repotiprev:
133 # work around manifestrevlog NOT being a revlog
133 # work around manifestrevlog NOT being a revlog
134 revlog2 = getattr(self.revlog2, '_revlog', self.revlog2)
134 revlog2 = getattr(self.revlog2, '_revlog', self.revlog2)
135 func = revlog2._revisiondata
135 func = revlog2._revisiondata
136 else:
136 else:
137 func = super(unionrevlog, self)._revisiondata
137 func = super(unionrevlog, self)._revisiondata
138 return func(node, _df=_df, raw=raw)
138 return func(node, _df=_df, raw=raw)
139
139
140 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
140 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
141 raise NotImplementedError
141 raise NotImplementedError
142
142
143 def addgroup(
143 def addgroup(
144 self,
144 self,
145 deltas,
145 deltas,
146 linkmapper,
146 linkmapper,
147 transaction,
147 transaction,
148 alwayscache=False,
148 alwayscache=False,
149 addrevisioncb=None,
149 addrevisioncb=None,
150 duplicaterevisioncb=None,
150 duplicaterevisioncb=None,
151 maybemissingparents=False,
151 maybemissingparents=False,
152 ):
152 ):
153 raise NotImplementedError
153 raise NotImplementedError
154
154
155 def strip(self, minlink, transaction):
155 def strip(self, minlink, transaction):
156 raise NotImplementedError
156 raise NotImplementedError
157
157
158 def checksize(self):
158 def checksize(self):
159 raise NotImplementedError
159 raise NotImplementedError
160
160
161
161
162 class unionchangelog(unionrevlog, changelog.changelog):
162 class unionchangelog(unionrevlog, changelog.changelog):
163 def __init__(self, opener, opener2):
163 def __init__(self, opener, opener2):
164 changelog.changelog.__init__(self, opener)
164 changelog.changelog.__init__(self, opener)
165 linkmapper = None
165 linkmapper = None
166 changelog2 = changelog.changelog(opener2)
166 changelog2 = changelog.changelog(opener2)
167 unionrevlog.__init__(
167 unionrevlog.__init__(
168 self, opener, self.indexfile, changelog2, linkmapper
168 self, opener, self.indexfile, changelog2, linkmapper
169 )
169 )
170
170
171
171
172 class unionmanifest(unionrevlog, manifest.manifestrevlog):
172 class unionmanifest(unionrevlog, manifest.manifestrevlog):
173 def __init__(self, nodeconstants, opener, opener2, linkmapper):
173 def __init__(self, nodeconstants, opener, opener2, linkmapper):
174 manifest.manifestrevlog.__init__(self, nodeconstants, opener)
174 manifest.manifestrevlog.__init__(self, nodeconstants, opener)
175 manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
175 manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
176 unionrevlog.__init__(
176 unionrevlog.__init__(
177 self, opener, self._revlog.indexfile, manifest2, linkmapper
177 self, opener, self._revlog.indexfile, manifest2, linkmapper
178 )
178 )
179
179
180
180
181 class unionfilelog(filelog.filelog):
181 class unionfilelog(filelog.filelog):
182 def __init__(self, opener, path, opener2, linkmapper, repo):
182 def __init__(self, opener, path, opener2, linkmapper, repo):
183 filelog.filelog.__init__(self, opener, path)
183 filelog.filelog.__init__(self, opener, path)
184 filelog2 = filelog.filelog(opener2, path)
184 filelog2 = filelog.filelog(opener2, path)
185 self._revlog = unionrevlog(
185 self._revlog = unionrevlog(
186 opener, self.indexfile, filelog2._revlog, linkmapper
186 opener, self._revlog.indexfile, filelog2._revlog, linkmapper
187 )
187 )
188 self._repo = repo
188 self._repo = repo
189 self.repotiprev = self._revlog.repotiprev
189 self.repotiprev = self._revlog.repotiprev
190 self.revlog2 = self._revlog.revlog2
190 self.revlog2 = self._revlog.revlog2
191
191
192 def iscensored(self, rev):
192 def iscensored(self, rev):
193 """Check if a revision is censored."""
193 """Check if a revision is censored."""
194 if rev <= self.repotiprev:
194 if rev <= self.repotiprev:
195 return filelog.filelog.iscensored(self, rev)
195 return filelog.filelog.iscensored(self, rev)
196 node = self.node(rev)
196 node = self.node(rev)
197 return self.revlog2.iscensored(self.revlog2.rev(node))
197 return self.revlog2.iscensored(self.revlog2.rev(node))
198
198
199
199
200 class unionpeer(localrepo.localpeer):
200 class unionpeer(localrepo.localpeer):
201 def canpush(self):
201 def canpush(self):
202 return False
202 return False
203
203
204
204
205 class unionrepository(object):
205 class unionrepository(object):
206 """Represents the union of data in 2 repositories.
206 """Represents the union of data in 2 repositories.
207
207
208 Instances are not usable if constructed directly. Use ``instance()``
208 Instances are not usable if constructed directly. Use ``instance()``
209 or ``makeunionrepository()`` to create a usable instance.
209 or ``makeunionrepository()`` to create a usable instance.
210 """
210 """
211
211
212 def __init__(self, repo2, url):
212 def __init__(self, repo2, url):
213 self.repo2 = repo2
213 self.repo2 = repo2
214 self._url = url
214 self._url = url
215
215
216 self.ui.setconfig(b'phases', b'publish', False, b'unionrepo')
216 self.ui.setconfig(b'phases', b'publish', False, b'unionrepo')
217
217
218 @localrepo.unfilteredpropertycache
218 @localrepo.unfilteredpropertycache
219 def changelog(self):
219 def changelog(self):
220 return unionchangelog(self.svfs, self.repo2.svfs)
220 return unionchangelog(self.svfs, self.repo2.svfs)
221
221
222 @localrepo.unfilteredpropertycache
222 @localrepo.unfilteredpropertycache
223 def manifestlog(self):
223 def manifestlog(self):
224 rootstore = unionmanifest(
224 rootstore = unionmanifest(
225 self.nodeconstants,
225 self.nodeconstants,
226 self.svfs,
226 self.svfs,
227 self.repo2.svfs,
227 self.repo2.svfs,
228 self.unfiltered()._clrev,
228 self.unfiltered()._clrev,
229 )
229 )
230 return manifest.manifestlog(
230 return manifest.manifestlog(
231 self.svfs, self, rootstore, self.narrowmatch()
231 self.svfs, self, rootstore, self.narrowmatch()
232 )
232 )
233
233
234 def _clrev(self, rev2):
234 def _clrev(self, rev2):
235 """map from repo2 changelog rev to temporary rev in self.changelog"""
235 """map from repo2 changelog rev to temporary rev in self.changelog"""
236 node = self.repo2.changelog.node(rev2)
236 node = self.repo2.changelog.node(rev2)
237 return self.changelog.rev(node)
237 return self.changelog.rev(node)
238
238
239 def url(self):
239 def url(self):
240 return self._url
240 return self._url
241
241
242 def file(self, f):
242 def file(self, f):
243 return unionfilelog(
243 return unionfilelog(
244 self.svfs, f, self.repo2.svfs, self.unfiltered()._clrev, self
244 self.svfs, f, self.repo2.svfs, self.unfiltered()._clrev, self
245 )
245 )
246
246
247 def close(self):
247 def close(self):
248 self.repo2.close()
248 self.repo2.close()
249
249
250 def cancopy(self):
250 def cancopy(self):
251 return False
251 return False
252
252
253 def peer(self):
253 def peer(self):
254 return unionpeer(self)
254 return unionpeer(self)
255
255
256 def getcwd(self):
256 def getcwd(self):
257 return encoding.getcwd() # always outside the repo
257 return encoding.getcwd() # always outside the repo
258
258
259
259
260 def instance(ui, path, create, intents=None, createopts=None):
260 def instance(ui, path, create, intents=None, createopts=None):
261 if create:
261 if create:
262 raise error.Abort(_(b'cannot create new union repository'))
262 raise error.Abort(_(b'cannot create new union repository'))
263 parentpath = ui.config(b"bundle", b"mainreporoot")
263 parentpath = ui.config(b"bundle", b"mainreporoot")
264 if not parentpath:
264 if not parentpath:
265 # try to find the correct path to the working directory repo
265 # try to find the correct path to the working directory repo
266 parentpath = cmdutil.findrepo(encoding.getcwd())
266 parentpath = cmdutil.findrepo(encoding.getcwd())
267 if parentpath is None:
267 if parentpath is None:
268 parentpath = b''
268 parentpath = b''
269 if parentpath:
269 if parentpath:
270 # Try to make the full path relative so we get a nice, short URL.
270 # Try to make the full path relative so we get a nice, short URL.
271 # In particular, we don't want temp dir names in test outputs.
271 # In particular, we don't want temp dir names in test outputs.
272 cwd = encoding.getcwd()
272 cwd = encoding.getcwd()
273 if parentpath == cwd:
273 if parentpath == cwd:
274 parentpath = b''
274 parentpath = b''
275 else:
275 else:
276 cwd = pathutil.normasprefix(cwd)
276 cwd = pathutil.normasprefix(cwd)
277 if parentpath.startswith(cwd):
277 if parentpath.startswith(cwd):
278 parentpath = parentpath[len(cwd) :]
278 parentpath = parentpath[len(cwd) :]
279 if path.startswith(b'union:'):
279 if path.startswith(b'union:'):
280 s = path.split(b":", 1)[1].split(b"+", 1)
280 s = path.split(b":", 1)[1].split(b"+", 1)
281 if len(s) == 1:
281 if len(s) == 1:
282 repopath, repopath2 = parentpath, s[0]
282 repopath, repopath2 = parentpath, s[0]
283 else:
283 else:
284 repopath, repopath2 = s
284 repopath, repopath2 = s
285 else:
285 else:
286 repopath, repopath2 = parentpath, path
286 repopath, repopath2 = parentpath, path
287
287
288 return makeunionrepository(ui, repopath, repopath2)
288 return makeunionrepository(ui, repopath, repopath2)
289
289
290
290
291 def makeunionrepository(ui, repopath1, repopath2):
291 def makeunionrepository(ui, repopath1, repopath2):
292 """Make a union repository object from 2 local repo paths."""
292 """Make a union repository object from 2 local repo paths."""
293 repo1 = localrepo.instance(ui, repopath1, create=False)
293 repo1 = localrepo.instance(ui, repopath1, create=False)
294 repo2 = localrepo.instance(ui, repopath2, create=False)
294 repo2 = localrepo.instance(ui, repopath2, create=False)
295
295
296 url = b'union:%s+%s' % (
296 url = b'union:%s+%s' % (
297 util.expandpath(repopath1),
297 util.expandpath(repopath1),
298 util.expandpath(repopath2),
298 util.expandpath(repopath2),
299 )
299 )
300
300
301 class derivedunionrepository(unionrepository, repo1.__class__):
301 class derivedunionrepository(unionrepository, repo1.__class__):
302 pass
302 pass
303
303
304 repo = repo1
304 repo = repo1
305 repo.__class__ = derivedunionrepository
305 repo.__class__ = derivedunionrepository
306 unionrepository.__init__(repo1, repo2, url)
306 unionrepository.__init__(repo1, repo2, url)
307
307
308 return repo
308 return repo
General Comments 0
You need to be logged in to leave comments. Login now