##// END OF EJS Templates
bundlerepo: move most attribute declaration earlier in __init__...
marmoute -
r51093:a6a8946d default
parent child Browse files
Show More
@@ -1,714 +1,714 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14
14
15 import os
15 import os
16 import shutil
16 import shutil
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullrev,
21 nullrev,
22 )
22 )
23
23
24 from . import (
24 from . import (
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 changelog,
27 changelog,
28 cmdutil,
28 cmdutil,
29 discovery,
29 discovery,
30 encoding,
30 encoding,
31 error,
31 error,
32 exchange,
32 exchange,
33 filelog,
33 filelog,
34 localrepo,
34 localrepo,
35 manifest,
35 manifest,
36 mdiff,
36 mdiff,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 pycompat,
39 pycompat,
40 revlog,
40 revlog,
41 revlogutils,
41 revlogutils,
42 util,
42 util,
43 vfs as vfsmod,
43 vfs as vfsmod,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 urlutil,
46 urlutil,
47 )
47 )
48
48
49 from .revlogutils import (
49 from .revlogutils import (
50 constants as revlog_constants,
50 constants as revlog_constants,
51 )
51 )
52
52
53
53
54 class bundlerevlog(revlog.revlog):
54 class bundlerevlog(revlog.revlog):
55 def __init__(self, opener, target, radix, cgunpacker, linkmapper):
55 def __init__(self, opener, target, radix, cgunpacker, linkmapper):
56 # How it works:
56 # How it works:
57 # To retrieve a revision, we need to know the offset of the revision in
57 # To retrieve a revision, we need to know the offset of the revision in
58 # the bundle (an unbundle object). We store this offset in the index
58 # the bundle (an unbundle object). We store this offset in the index
59 # (start). The base of the delta is stored in the base field.
59 # (start). The base of the delta is stored in the base field.
60 #
60 #
61 # To differentiate a rev in the bundle from a rev in the revlog, we
61 # To differentiate a rev in the bundle from a rev in the revlog, we
62 # check revision against repotiprev.
62 # check revision against repotiprev.
63 opener = vfsmod.readonlyvfs(opener)
63 opener = vfsmod.readonlyvfs(opener)
64 revlog.revlog.__init__(self, opener, target=target, radix=radix)
64 revlog.revlog.__init__(self, opener, target=target, radix=radix)
65 self.bundle = cgunpacker
65 self.bundle = cgunpacker
66 n = len(self)
66 n = len(self)
67 self.repotiprev = n - 1
67 self.repotiprev = n - 1
68 self.bundlerevs = set() # used by 'bundle()' revset expression
68 self.bundlerevs = set() # used by 'bundle()' revset expression
69 for deltadata in cgunpacker.deltaiter():
69 for deltadata in cgunpacker.deltaiter():
70 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
70 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
71
71
72 size = len(delta)
72 size = len(delta)
73 start = cgunpacker.tell() - size
73 start = cgunpacker.tell() - size
74
74
75 if self.index.has_node(node):
75 if self.index.has_node(node):
76 # this can happen if two branches make the same change
76 # this can happen if two branches make the same change
77 self.bundlerevs.add(self.index.rev(node))
77 self.bundlerevs.add(self.index.rev(node))
78 continue
78 continue
79 if cs == node:
79 if cs == node:
80 linkrev = nullrev
80 linkrev = nullrev
81 else:
81 else:
82 linkrev = linkmapper(cs)
82 linkrev = linkmapper(cs)
83
83
84 for p in (p1, p2):
84 for p in (p1, p2):
85 if not self.index.has_node(p):
85 if not self.index.has_node(p):
86 raise error.LookupError(
86 raise error.LookupError(
87 p, self.display_id, _(b"unknown parent")
87 p, self.display_id, _(b"unknown parent")
88 )
88 )
89
89
90 if not self.index.has_node(deltabase):
90 if not self.index.has_node(deltabase):
91 raise error.LookupError(
91 raise error.LookupError(
92 deltabase, self.display_id, _(b'unknown delta base')
92 deltabase, self.display_id, _(b'unknown delta base')
93 )
93 )
94
94
95 baserev = self.rev(deltabase)
95 baserev = self.rev(deltabase)
96 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
96 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
97 e = revlogutils.entry(
97 e = revlogutils.entry(
98 flags=flags,
98 flags=flags,
99 data_offset=start,
99 data_offset=start,
100 data_compressed_length=size,
100 data_compressed_length=size,
101 data_delta_base=baserev,
101 data_delta_base=baserev,
102 link_rev=linkrev,
102 link_rev=linkrev,
103 parent_rev_1=self.rev(p1),
103 parent_rev_1=self.rev(p1),
104 parent_rev_2=self.rev(p2),
104 parent_rev_2=self.rev(p2),
105 node_id=node,
105 node_id=node,
106 )
106 )
107 self.index.append(e)
107 self.index.append(e)
108 self.bundlerevs.add(n)
108 self.bundlerevs.add(n)
109 n += 1
109 n += 1
110
110
111 def _chunk(self, rev, df=None):
111 def _chunk(self, rev, df=None):
112 # Warning: in case of bundle, the diff is against what we stored as
112 # Warning: in case of bundle, the diff is against what we stored as
113 # delta base, not against rev - 1
113 # delta base, not against rev - 1
114 # XXX: could use some caching
114 # XXX: could use some caching
115 if rev <= self.repotiprev:
115 if rev <= self.repotiprev:
116 return revlog.revlog._chunk(self, rev)
116 return revlog.revlog._chunk(self, rev)
117 self.bundle.seek(self.start(rev))
117 self.bundle.seek(self.start(rev))
118 return self.bundle.read(self.length(rev))
118 return self.bundle.read(self.length(rev))
119
119
120 def revdiff(self, rev1, rev2):
120 def revdiff(self, rev1, rev2):
121 """return or calculate a delta between two revisions"""
121 """return or calculate a delta between two revisions"""
122 if rev1 > self.repotiprev and rev2 > self.repotiprev:
122 if rev1 > self.repotiprev and rev2 > self.repotiprev:
123 # hot path for bundle
123 # hot path for bundle
124 revb = self.index[rev2][3]
124 revb = self.index[rev2][3]
125 if revb == rev1:
125 if revb == rev1:
126 return self._chunk(rev2)
126 return self._chunk(rev2)
127 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
127 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
128 return revlog.revlog.revdiff(self, rev1, rev2)
128 return revlog.revlog.revdiff(self, rev1, rev2)
129
129
130 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
130 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
131
131
132 def _rawtext(self, node, rev, _df=None):
132 def _rawtext(self, node, rev, _df=None):
133 if rev is None:
133 if rev is None:
134 rev = self.rev(node)
134 rev = self.rev(node)
135 validated = False
135 validated = False
136 rawtext = None
136 rawtext = None
137 chain = []
137 chain = []
138 iterrev = rev
138 iterrev = rev
139 # reconstruct the revision if it is from a changegroup
139 # reconstruct the revision if it is from a changegroup
140 while iterrev > self.repotiprev:
140 while iterrev > self.repotiprev:
141 if self._revisioncache and self._revisioncache[1] == iterrev:
141 if self._revisioncache and self._revisioncache[1] == iterrev:
142 rawtext = self._revisioncache[2]
142 rawtext = self._revisioncache[2]
143 break
143 break
144 chain.append(iterrev)
144 chain.append(iterrev)
145 iterrev = self.index[iterrev][3]
145 iterrev = self.index[iterrev][3]
146 if iterrev == nullrev:
146 if iterrev == nullrev:
147 rawtext = b''
147 rawtext = b''
148 elif rawtext is None:
148 elif rawtext is None:
149 r = super(bundlerevlog, self)._rawtext(
149 r = super(bundlerevlog, self)._rawtext(
150 self.node(iterrev), iterrev, _df=_df
150 self.node(iterrev), iterrev, _df=_df
151 )
151 )
152 __, rawtext, validated = r
152 __, rawtext, validated = r
153 if chain:
153 if chain:
154 validated = False
154 validated = False
155 while chain:
155 while chain:
156 delta = self._chunk(chain.pop())
156 delta = self._chunk(chain.pop())
157 rawtext = mdiff.patches(rawtext, [delta])
157 rawtext = mdiff.patches(rawtext, [delta])
158 return rev, rawtext, validated
158 return rev, rawtext, validated
159
159
160 def addrevision(self, *args, **kwargs):
160 def addrevision(self, *args, **kwargs):
161 raise NotImplementedError
161 raise NotImplementedError
162
162
163 def addgroup(self, *args, **kwargs):
163 def addgroup(self, *args, **kwargs):
164 raise NotImplementedError
164 raise NotImplementedError
165
165
166 def strip(self, *args, **kwargs):
166 def strip(self, *args, **kwargs):
167 raise NotImplementedError
167 raise NotImplementedError
168
168
169 def checksize(self):
169 def checksize(self):
170 raise NotImplementedError
170 raise NotImplementedError
171
171
172
172
173 class bundlechangelog(bundlerevlog, changelog.changelog):
173 class bundlechangelog(bundlerevlog, changelog.changelog):
174 def __init__(self, opener, cgunpacker):
174 def __init__(self, opener, cgunpacker):
175 changelog.changelog.__init__(self, opener)
175 changelog.changelog.__init__(self, opener)
176 linkmapper = lambda x: x
176 linkmapper = lambda x: x
177 bundlerevlog.__init__(
177 bundlerevlog.__init__(
178 self,
178 self,
179 opener,
179 opener,
180 (revlog_constants.KIND_CHANGELOG, None),
180 (revlog_constants.KIND_CHANGELOG, None),
181 self.radix,
181 self.radix,
182 cgunpacker,
182 cgunpacker,
183 linkmapper,
183 linkmapper,
184 )
184 )
185
185
186
186
187 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
187 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
188 def __init__(
188 def __init__(
189 self,
189 self,
190 nodeconstants,
190 nodeconstants,
191 opener,
191 opener,
192 cgunpacker,
192 cgunpacker,
193 linkmapper,
193 linkmapper,
194 dirlogstarts=None,
194 dirlogstarts=None,
195 dir=b'',
195 dir=b'',
196 ):
196 ):
197 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
197 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
198 bundlerevlog.__init__(
198 bundlerevlog.__init__(
199 self,
199 self,
200 opener,
200 opener,
201 (revlog_constants.KIND_MANIFESTLOG, dir),
201 (revlog_constants.KIND_MANIFESTLOG, dir),
202 self._revlog.radix,
202 self._revlog.radix,
203 cgunpacker,
203 cgunpacker,
204 linkmapper,
204 linkmapper,
205 )
205 )
206 if dirlogstarts is None:
206 if dirlogstarts is None:
207 dirlogstarts = {}
207 dirlogstarts = {}
208 if self.bundle.version == b"03":
208 if self.bundle.version == b"03":
209 dirlogstarts = _getfilestarts(self.bundle)
209 dirlogstarts = _getfilestarts(self.bundle)
210 self._dirlogstarts = dirlogstarts
210 self._dirlogstarts = dirlogstarts
211 self._linkmapper = linkmapper
211 self._linkmapper = linkmapper
212
212
213 def dirlog(self, d):
213 def dirlog(self, d):
214 if d in self._dirlogstarts:
214 if d in self._dirlogstarts:
215 self.bundle.seek(self._dirlogstarts[d])
215 self.bundle.seek(self._dirlogstarts[d])
216 return bundlemanifest(
216 return bundlemanifest(
217 self.nodeconstants,
217 self.nodeconstants,
218 self.opener,
218 self.opener,
219 self.bundle,
219 self.bundle,
220 self._linkmapper,
220 self._linkmapper,
221 self._dirlogstarts,
221 self._dirlogstarts,
222 dir=d,
222 dir=d,
223 )
223 )
224 return super(bundlemanifest, self).dirlog(d)
224 return super(bundlemanifest, self).dirlog(d)
225
225
226
226
227 class bundlefilelog(filelog.filelog):
227 class bundlefilelog(filelog.filelog):
228 def __init__(self, opener, path, cgunpacker, linkmapper):
228 def __init__(self, opener, path, cgunpacker, linkmapper):
229 filelog.filelog.__init__(self, opener, path)
229 filelog.filelog.__init__(self, opener, path)
230 self._revlog = bundlerevlog(
230 self._revlog = bundlerevlog(
231 opener,
231 opener,
232 # XXX should use the unencoded path
232 # XXX should use the unencoded path
233 target=(revlog_constants.KIND_FILELOG, path),
233 target=(revlog_constants.KIND_FILELOG, path),
234 radix=self._revlog.radix,
234 radix=self._revlog.radix,
235 cgunpacker=cgunpacker,
235 cgunpacker=cgunpacker,
236 linkmapper=linkmapper,
236 linkmapper=linkmapper,
237 )
237 )
238
238
239
239
240 class bundlepeer(localrepo.localpeer):
240 class bundlepeer(localrepo.localpeer):
241 def canpush(self):
241 def canpush(self):
242 return False
242 return False
243
243
244
244
245 class bundlephasecache(phases.phasecache):
245 class bundlephasecache(phases.phasecache):
246 def __init__(self, *args, **kwargs):
246 def __init__(self, *args, **kwargs):
247 super(bundlephasecache, self).__init__(*args, **kwargs)
247 super(bundlephasecache, self).__init__(*args, **kwargs)
248 if util.safehasattr(self, 'opener'):
248 if util.safehasattr(self, 'opener'):
249 self.opener = vfsmod.readonlyvfs(self.opener)
249 self.opener = vfsmod.readonlyvfs(self.opener)
250
250
251 def write(self):
251 def write(self):
252 raise NotImplementedError
252 raise NotImplementedError
253
253
254 def _write(self, fp):
254 def _write(self, fp):
255 raise NotImplementedError
255 raise NotImplementedError
256
256
257 def _updateroots(self, phase, newroots, tr):
257 def _updateroots(self, phase, newroots, tr):
258 self.phaseroots[phase] = newroots
258 self.phaseroots[phase] = newroots
259 self.invalidate()
259 self.invalidate()
260 self.dirty = True
260 self.dirty = True
261
261
262
262
263 def _getfilestarts(cgunpacker):
263 def _getfilestarts(cgunpacker):
264 filespos = {}
264 filespos = {}
265 for chunkdata in iter(cgunpacker.filelogheader, {}):
265 for chunkdata in iter(cgunpacker.filelogheader, {}):
266 fname = chunkdata[b'filename']
266 fname = chunkdata[b'filename']
267 filespos[fname] = cgunpacker.tell()
267 filespos[fname] = cgunpacker.tell()
268 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
268 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
269 pass
269 pass
270 return filespos
270 return filespos
271
271
272
272
273 class bundlerepository:
273 class bundlerepository:
274 """A repository instance that is a union of a local repo and a bundle.
274 """A repository instance that is a union of a local repo and a bundle.
275
275
276 Instances represent a read-only repository composed of a local repository
276 Instances represent a read-only repository composed of a local repository
277 with the contents of a bundle file applied. The repository instance is
277 with the contents of a bundle file applied. The repository instance is
278 conceptually similar to the state of a repository after an
278 conceptually similar to the state of a repository after an
279 ``hg unbundle`` operation. However, the contents of the bundle are never
279 ``hg unbundle`` operation. However, the contents of the bundle are never
280 applied to the actual base repository.
280 applied to the actual base repository.
281
281
282 Instances constructed directly are not usable as repository objects.
282 Instances constructed directly are not usable as repository objects.
283 Use instance() or makebundlerepository() to create instances.
283 Use instance() or makebundlerepository() to create instances.
284 """
284 """
285
285
286 def __init__(self, bundlepath, url, tempparent):
286 def __init__(self, bundlepath, url, tempparent):
287 self._tempparent = tempparent
287 self._tempparent = tempparent
288 self._url = url
288 self._url = url
289
289
290 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
290 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
291
291
292 # dict with the mapping 'filename' -> position in the changegroup.
293 self._cgfilespos = {}
294 self._bundlefile = None
295 self._cgunpacker = None
292 self.tempfile = None
296 self.tempfile = None
293 f = util.posixfile(bundlepath, b"rb")
297 f = util.posixfile(bundlepath, b"rb")
294 bundle = exchange.readbundle(self.ui, f, bundlepath)
298 bundle = exchange.readbundle(self.ui, f, bundlepath)
295
299
296 if isinstance(bundle, bundle2.unbundle20):
300 if isinstance(bundle, bundle2.unbundle20):
297 self._bundlefile = bundle
301 self._bundlefile = bundle
298 self._cgunpacker = None
299
302
300 cgpart = None
303 cgpart = None
301 for part in bundle.iterparts(seekable=True):
304 for part in bundle.iterparts(seekable=True):
302 if part.type == b'changegroup':
305 if part.type == b'changegroup':
303 if cgpart:
306 if cgpart:
304 raise NotImplementedError(
307 raise NotImplementedError(
305 b"can't process multiple changegroups"
308 b"can't process multiple changegroups"
306 )
309 )
307 cgpart = part
310 cgpart = part
308 self._handle_bundle2_cg_part(bundle, part)
311 self._handle_bundle2_cg_part(bundle, part)
309
312
310 if not cgpart:
313 if not cgpart:
311 raise error.Abort(_(b"No changegroups found"))
314 raise error.Abort(_(b"No changegroups found"))
312
315
313 # This is required to placate a later consumer, which expects
316 # This is required to placate a later consumer, which expects
314 # the payload offset to be at the beginning of the changegroup.
317 # the payload offset to be at the beginning of the changegroup.
315 # We need to do this after the iterparts() generator advances
318 # We need to do this after the iterparts() generator advances
316 # because iterparts() will seek to end of payload after the
319 # because iterparts() will seek to end of payload after the
317 # generator returns control to iterparts().
320 # generator returns control to iterparts().
318 cgpart.seek(0, os.SEEK_SET)
321 cgpart.seek(0, os.SEEK_SET)
319
322
320 elif isinstance(bundle, changegroup.cg1unpacker):
323 elif isinstance(bundle, changegroup.cg1unpacker):
321 self._handle_bundle1(bundle, bundlepath)
324 self._handle_bundle1(bundle, bundlepath)
322 else:
325 else:
323 raise error.Abort(
326 raise error.Abort(
324 _(b'bundle type %s cannot be read') % type(bundle)
327 _(b'bundle type %s cannot be read') % type(bundle)
325 )
328 )
326
329
327 # dict with the mapping 'filename' -> position in the changegroup.
328 self._cgfilespos = {}
329
330 self.firstnewrev = self.changelog.repotiprev + 1
330 self.firstnewrev = self.changelog.repotiprev + 1
331 phases.retractboundary(
331 phases.retractboundary(
332 self,
332 self,
333 None,
333 None,
334 phases.draft,
334 phases.draft,
335 [ctx.node() for ctx in self[self.firstnewrev :]],
335 [ctx.node() for ctx in self[self.firstnewrev :]],
336 )
336 )
337
337
338 def _handle_bundle1(self, bundle, bundlepath):
338 def _handle_bundle1(self, bundle, bundlepath):
339 if bundle.compressed():
339 if bundle.compressed():
340 f = self._writetempbundle(bundle.read, b'.hg10un', header=b'HG10UN')
340 f = self._writetempbundle(bundle.read, b'.hg10un', header=b'HG10UN')
341 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
341 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
342
342
343 self._bundlefile = bundle
343 self._bundlefile = bundle
344 self._cgunpacker = bundle
344 self._cgunpacker = bundle
345
345
346 def _handle_bundle2_cg_part(self, bundle, part):
346 def _handle_bundle2_cg_part(self, bundle, part):
347 assert part.type == b'changegroup'
347 assert part.type == b'changegroup'
348 cgstream = part
348 cgstream = part
349 version = part.params.get(b'version', b'01')
349 version = part.params.get(b'version', b'01')
350 legalcgvers = changegroup.supportedincomingversions(self)
350 legalcgvers = changegroup.supportedincomingversions(self)
351 if version not in legalcgvers:
351 if version not in legalcgvers:
352 msg = _(b'Unsupported changegroup version: %s')
352 msg = _(b'Unsupported changegroup version: %s')
353 raise error.Abort(msg % version)
353 raise error.Abort(msg % version)
354 if bundle.compressed():
354 if bundle.compressed():
355 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
355 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
356
356
357 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
357 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
358
358
359 def _writetempbundle(self, readfn, suffix, header=b''):
359 def _writetempbundle(self, readfn, suffix, header=b''):
360 """Write a temporary file to disk"""
360 """Write a temporary file to disk"""
361 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
361 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
362 self.tempfile = temp
362 self.tempfile = temp
363
363
364 with os.fdopen(fdtemp, 'wb') as fptemp:
364 with os.fdopen(fdtemp, 'wb') as fptemp:
365 fptemp.write(header)
365 fptemp.write(header)
366 while True:
366 while True:
367 chunk = readfn(2 ** 18)
367 chunk = readfn(2 ** 18)
368 if not chunk:
368 if not chunk:
369 break
369 break
370 fptemp.write(chunk)
370 fptemp.write(chunk)
371
371
372 return self.vfs.open(self.tempfile, mode=b"rb")
372 return self.vfs.open(self.tempfile, mode=b"rb")
373
373
374 @localrepo.unfilteredpropertycache
374 @localrepo.unfilteredpropertycache
375 def _phasecache(self):
375 def _phasecache(self):
376 return bundlephasecache(self, self._phasedefaults)
376 return bundlephasecache(self, self._phasedefaults)
377
377
378 @localrepo.unfilteredpropertycache
378 @localrepo.unfilteredpropertycache
379 def changelog(self):
379 def changelog(self):
380 # consume the header if it exists
380 # consume the header if it exists
381 self._cgunpacker.changelogheader()
381 self._cgunpacker.changelogheader()
382 c = bundlechangelog(self.svfs, self._cgunpacker)
382 c = bundlechangelog(self.svfs, self._cgunpacker)
383 self.manstart = self._cgunpacker.tell()
383 self.manstart = self._cgunpacker.tell()
384 return c
384 return c
385
385
386 def _refreshchangelog(self):
386 def _refreshchangelog(self):
387 # changelog for bundle repo are not filecache, this method is not
387 # changelog for bundle repo are not filecache, this method is not
388 # applicable.
388 # applicable.
389 pass
389 pass
390
390
391 @localrepo.unfilteredpropertycache
391 @localrepo.unfilteredpropertycache
392 def manifestlog(self):
392 def manifestlog(self):
393 self._cgunpacker.seek(self.manstart)
393 self._cgunpacker.seek(self.manstart)
394 # consume the header if it exists
394 # consume the header if it exists
395 self._cgunpacker.manifestheader()
395 self._cgunpacker.manifestheader()
396 linkmapper = self.unfiltered().changelog.rev
396 linkmapper = self.unfiltered().changelog.rev
397 rootstore = bundlemanifest(
397 rootstore = bundlemanifest(
398 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
398 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
399 )
399 )
400 self.filestart = self._cgunpacker.tell()
400 self.filestart = self._cgunpacker.tell()
401
401
402 return manifest.manifestlog(
402 return manifest.manifestlog(
403 self.svfs, self, rootstore, self.narrowmatch()
403 self.svfs, self, rootstore, self.narrowmatch()
404 )
404 )
405
405
406 def _consumemanifest(self):
406 def _consumemanifest(self):
407 """Consumes the manifest portion of the bundle, setting filestart so the
407 """Consumes the manifest portion of the bundle, setting filestart so the
408 file portion can be read."""
408 file portion can be read."""
409 self._cgunpacker.seek(self.manstart)
409 self._cgunpacker.seek(self.manstart)
410 self._cgunpacker.manifestheader()
410 self._cgunpacker.manifestheader()
411 for delta in self._cgunpacker.deltaiter():
411 for delta in self._cgunpacker.deltaiter():
412 pass
412 pass
413 self.filestart = self._cgunpacker.tell()
413 self.filestart = self._cgunpacker.tell()
414
414
415 @localrepo.unfilteredpropertycache
415 @localrepo.unfilteredpropertycache
416 def manstart(self):
416 def manstart(self):
417 self.changelog
417 self.changelog
418 return self.manstart
418 return self.manstart
419
419
420 @localrepo.unfilteredpropertycache
420 @localrepo.unfilteredpropertycache
421 def filestart(self):
421 def filestart(self):
422 self.manifestlog
422 self.manifestlog
423
423
424 # If filestart was not set by self.manifestlog, that means the
424 # If filestart was not set by self.manifestlog, that means the
425 # manifestlog implementation did not consume the manifests from the
425 # manifestlog implementation did not consume the manifests from the
426 # changegroup (ex: it might be consuming trees from a separate bundle2
426 # changegroup (ex: it might be consuming trees from a separate bundle2
427 # part instead). So we need to manually consume it.
427 # part instead). So we need to manually consume it.
428 if 'filestart' not in self.__dict__:
428 if 'filestart' not in self.__dict__:
429 self._consumemanifest()
429 self._consumemanifest()
430
430
431 return self.filestart
431 return self.filestart
432
432
433 def url(self):
433 def url(self):
434 return self._url
434 return self._url
435
435
436 def file(self, f):
436 def file(self, f):
437 if not self._cgfilespos:
437 if not self._cgfilespos:
438 self._cgunpacker.seek(self.filestart)
438 self._cgunpacker.seek(self.filestart)
439 self._cgfilespos = _getfilestarts(self._cgunpacker)
439 self._cgfilespos = _getfilestarts(self._cgunpacker)
440
440
441 if f in self._cgfilespos:
441 if f in self._cgfilespos:
442 self._cgunpacker.seek(self._cgfilespos[f])
442 self._cgunpacker.seek(self._cgfilespos[f])
443 linkmapper = self.unfiltered().changelog.rev
443 linkmapper = self.unfiltered().changelog.rev
444 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
444 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
445 else:
445 else:
446 return super(bundlerepository, self).file(f)
446 return super(bundlerepository, self).file(f)
447
447
448 def close(self):
448 def close(self):
449 """Close assigned bundle file immediately."""
449 """Close assigned bundle file immediately."""
450 self._bundlefile.close()
450 self._bundlefile.close()
451 if self.tempfile is not None:
451 if self.tempfile is not None:
452 self.vfs.unlink(self.tempfile)
452 self.vfs.unlink(self.tempfile)
453 if self._tempparent:
453 if self._tempparent:
454 shutil.rmtree(self._tempparent, True)
454 shutil.rmtree(self._tempparent, True)
455
455
456 def cancopy(self):
456 def cancopy(self):
457 return False
457 return False
458
458
459 def peer(self, path=None):
459 def peer(self, path=None):
460 return bundlepeer(self, path=path)
460 return bundlepeer(self, path=path)
461
461
462 def getcwd(self):
462 def getcwd(self):
463 return encoding.getcwd() # always outside the repo
463 return encoding.getcwd() # always outside the repo
464
464
465 # Check if parents exist in localrepo before setting
465 # Check if parents exist in localrepo before setting
466 def setparents(self, p1, p2=None):
466 def setparents(self, p1, p2=None):
467 if p2 is None:
467 if p2 is None:
468 p2 = self.nullid
468 p2 = self.nullid
469 p1rev = self.changelog.rev(p1)
469 p1rev = self.changelog.rev(p1)
470 p2rev = self.changelog.rev(p2)
470 p2rev = self.changelog.rev(p2)
471 msg = _(b"setting parent to node %s that only exists in the bundle\n")
471 msg = _(b"setting parent to node %s that only exists in the bundle\n")
472 if self.changelog.repotiprev < p1rev:
472 if self.changelog.repotiprev < p1rev:
473 self.ui.warn(msg % hex(p1))
473 self.ui.warn(msg % hex(p1))
474 if self.changelog.repotiprev < p2rev:
474 if self.changelog.repotiprev < p2rev:
475 self.ui.warn(msg % hex(p2))
475 self.ui.warn(msg % hex(p2))
476 return super(bundlerepository, self).setparents(p1, p2)
476 return super(bundlerepository, self).setparents(p1, p2)
477
477
478
478
479 def instance(ui, path, create, intents=None, createopts=None):
479 def instance(ui, path, create, intents=None, createopts=None):
480 if create:
480 if create:
481 raise error.Abort(_(b'cannot create new bundle repository'))
481 raise error.Abort(_(b'cannot create new bundle repository'))
482 # internal config: bundle.mainreporoot
482 # internal config: bundle.mainreporoot
483 parentpath = ui.config(b"bundle", b"mainreporoot")
483 parentpath = ui.config(b"bundle", b"mainreporoot")
484 if not parentpath:
484 if not parentpath:
485 # try to find the correct path to the working directory repo
485 # try to find the correct path to the working directory repo
486 parentpath = cmdutil.findrepo(encoding.getcwd())
486 parentpath = cmdutil.findrepo(encoding.getcwd())
487 if parentpath is None:
487 if parentpath is None:
488 parentpath = b''
488 parentpath = b''
489 if parentpath:
489 if parentpath:
490 # Try to make the full path relative so we get a nice, short URL.
490 # Try to make the full path relative so we get a nice, short URL.
491 # In particular, we don't want temp dir names in test outputs.
491 # In particular, we don't want temp dir names in test outputs.
492 cwd = encoding.getcwd()
492 cwd = encoding.getcwd()
493 if parentpath == cwd:
493 if parentpath == cwd:
494 parentpath = b''
494 parentpath = b''
495 else:
495 else:
496 cwd = pathutil.normasprefix(cwd)
496 cwd = pathutil.normasprefix(cwd)
497 if parentpath.startswith(cwd):
497 if parentpath.startswith(cwd):
498 parentpath = parentpath[len(cwd) :]
498 parentpath = parentpath[len(cwd) :]
499 u = urlutil.url(path)
499 u = urlutil.url(path)
500 path = u.localpath()
500 path = u.localpath()
501 if u.scheme == b'bundle':
501 if u.scheme == b'bundle':
502 s = path.split(b"+", 1)
502 s = path.split(b"+", 1)
503 if len(s) == 1:
503 if len(s) == 1:
504 repopath, bundlename = parentpath, s[0]
504 repopath, bundlename = parentpath, s[0]
505 else:
505 else:
506 repopath, bundlename = s
506 repopath, bundlename = s
507 else:
507 else:
508 repopath, bundlename = parentpath, path
508 repopath, bundlename = parentpath, path
509
509
510 return makebundlerepository(ui, repopath, bundlename)
510 return makebundlerepository(ui, repopath, bundlename)
511
511
512
512
513 def makebundlerepository(ui, repopath, bundlepath):
513 def makebundlerepository(ui, repopath, bundlepath):
514 """Make a bundle repository object based on repo and bundle paths."""
514 """Make a bundle repository object based on repo and bundle paths."""
515 if repopath:
515 if repopath:
516 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
516 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
517 else:
517 else:
518 url = b'bundle:%s' % bundlepath
518 url = b'bundle:%s' % bundlepath
519
519
520 # Because we can't make any guarantees about the type of the base
520 # Because we can't make any guarantees about the type of the base
521 # repository, we can't have a static class representing the bundle
521 # repository, we can't have a static class representing the bundle
522 # repository. We also can't make any guarantees about how to even
522 # repository. We also can't make any guarantees about how to even
523 # call the base repository's constructor!
523 # call the base repository's constructor!
524 #
524 #
525 # So, our strategy is to go through ``localrepo.instance()`` to construct
525 # So, our strategy is to go through ``localrepo.instance()`` to construct
526 # a repo instance. Then, we dynamically create a new type derived from
526 # a repo instance. Then, we dynamically create a new type derived from
527 # both it and our ``bundlerepository`` class which overrides some
527 # both it and our ``bundlerepository`` class which overrides some
528 # functionality. We then change the type of the constructed repository
528 # functionality. We then change the type of the constructed repository
529 # to this new type and initialize the bundle-specific bits of it.
529 # to this new type and initialize the bundle-specific bits of it.
530
530
531 try:
531 try:
532 repo = localrepo.instance(ui, repopath, create=False)
532 repo = localrepo.instance(ui, repopath, create=False)
533 tempparent = None
533 tempparent = None
534 except error.RequirementError:
534 except error.RequirementError:
535 raise # no fallback if the backing repo is unsupported
535 raise # no fallback if the backing repo is unsupported
536 except error.RepoError:
536 except error.RepoError:
537 tempparent = pycompat.mkdtemp()
537 tempparent = pycompat.mkdtemp()
538 try:
538 try:
539 repo = localrepo.instance(ui, tempparent, create=True)
539 repo = localrepo.instance(ui, tempparent, create=True)
540 except Exception:
540 except Exception:
541 shutil.rmtree(tempparent)
541 shutil.rmtree(tempparent)
542 raise
542 raise
543
543
544 class derivedbundlerepository(bundlerepository, repo.__class__):
544 class derivedbundlerepository(bundlerepository, repo.__class__):
545 pass
545 pass
546
546
547 repo.__class__ = derivedbundlerepository
547 repo.__class__ = derivedbundlerepository
548 bundlerepository.__init__(repo, bundlepath, url, tempparent)
548 bundlerepository.__init__(repo, bundlepath, url, tempparent)
549
549
550 return repo
550 return repo
551
551
552
552
553 class bundletransactionmanager:
553 class bundletransactionmanager:
554 def transaction(self):
554 def transaction(self):
555 return None
555 return None
556
556
557 def close(self):
557 def close(self):
558 raise NotImplementedError
558 raise NotImplementedError
559
559
560 def release(self):
560 def release(self):
561 raise NotImplementedError
561 raise NotImplementedError
562
562
563
563
564 def getremotechanges(
564 def getremotechanges(
565 ui, repo, peer, onlyheads=None, bundlename=None, force=False
565 ui, repo, peer, onlyheads=None, bundlename=None, force=False
566 ):
566 ):
567 """obtains a bundle of changes incoming from peer
567 """obtains a bundle of changes incoming from peer
568
568
569 "onlyheads" restricts the returned changes to those reachable from the
569 "onlyheads" restricts the returned changes to those reachable from the
570 specified heads.
570 specified heads.
571 "bundlename", if given, stores the bundle to this file path permanently;
571 "bundlename", if given, stores the bundle to this file path permanently;
572 otherwise it's stored to a temp file and gets deleted again when you call
572 otherwise it's stored to a temp file and gets deleted again when you call
573 the returned "cleanupfn".
573 the returned "cleanupfn".
574 "force" indicates whether to proceed on unrelated repos.
574 "force" indicates whether to proceed on unrelated repos.
575
575
576 Returns a tuple (local, csets, cleanupfn):
576 Returns a tuple (local, csets, cleanupfn):
577
577
578 "local" is a local repo from which to obtain the actual incoming
578 "local" is a local repo from which to obtain the actual incoming
579 changesets; it is a bundlerepo for the obtained bundle when the
579 changesets; it is a bundlerepo for the obtained bundle when the
580 original "peer" is remote.
580 original "peer" is remote.
581 "csets" lists the incoming changeset node ids.
581 "csets" lists the incoming changeset node ids.
582 "cleanupfn" must be called without arguments when you're done processing
582 "cleanupfn" must be called without arguments when you're done processing
583 the changes; it closes both the original "peer" and the one returned
583 the changes; it closes both the original "peer" and the one returned
584 here.
584 here.
585 """
585 """
586 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
586 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
587 common, incoming, rheads = tmp
587 common, incoming, rheads = tmp
588 if not incoming:
588 if not incoming:
589 try:
589 try:
590 if bundlename:
590 if bundlename:
591 os.unlink(bundlename)
591 os.unlink(bundlename)
592 except OSError:
592 except OSError:
593 pass
593 pass
594 return repo, [], peer.close
594 return repo, [], peer.close
595
595
596 commonset = set(common)
596 commonset = set(common)
597 rheads = [x for x in rheads if x not in commonset]
597 rheads = [x for x in rheads if x not in commonset]
598
598
599 bundle = None
599 bundle = None
600 bundlerepo = None
600 bundlerepo = None
601 localrepo = peer.local()
601 localrepo = peer.local()
602 if bundlename or not localrepo:
602 if bundlename or not localrepo:
603 # create a bundle (uncompressed if peer repo is not local)
603 # create a bundle (uncompressed if peer repo is not local)
604
604
605 # developer config: devel.legacy.exchange
605 # developer config: devel.legacy.exchange
606 legexc = ui.configlist(b'devel', b'legacy.exchange')
606 legexc = ui.configlist(b'devel', b'legacy.exchange')
607 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
607 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
608 canbundle2 = (
608 canbundle2 = (
609 not forcebundle1
609 not forcebundle1
610 and peer.capable(b'getbundle')
610 and peer.capable(b'getbundle')
611 and peer.capable(b'bundle2')
611 and peer.capable(b'bundle2')
612 )
612 )
613 if canbundle2:
613 if canbundle2:
614 with peer.commandexecutor() as e:
614 with peer.commandexecutor() as e:
615 b2 = e.callcommand(
615 b2 = e.callcommand(
616 b'getbundle',
616 b'getbundle',
617 {
617 {
618 b'source': b'incoming',
618 b'source': b'incoming',
619 b'common': common,
619 b'common': common,
620 b'heads': rheads,
620 b'heads': rheads,
621 b'bundlecaps': exchange.caps20to10(
621 b'bundlecaps': exchange.caps20to10(
622 repo, role=b'client'
622 repo, role=b'client'
623 ),
623 ),
624 b'cg': True,
624 b'cg': True,
625 },
625 },
626 ).result()
626 ).result()
627
627
628 fname = bundle = changegroup.writechunks(
628 fname = bundle = changegroup.writechunks(
629 ui, b2._forwardchunks(), bundlename
629 ui, b2._forwardchunks(), bundlename
630 )
630 )
631 else:
631 else:
632 if peer.capable(b'getbundle'):
632 if peer.capable(b'getbundle'):
633 with peer.commandexecutor() as e:
633 with peer.commandexecutor() as e:
634 cg = e.callcommand(
634 cg = e.callcommand(
635 b'getbundle',
635 b'getbundle',
636 {
636 {
637 b'source': b'incoming',
637 b'source': b'incoming',
638 b'common': common,
638 b'common': common,
639 b'heads': rheads,
639 b'heads': rheads,
640 },
640 },
641 ).result()
641 ).result()
642 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
642 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
643 # compat with older servers when pulling all remote heads
643 # compat with older servers when pulling all remote heads
644
644
645 with peer.commandexecutor() as e:
645 with peer.commandexecutor() as e:
646 cg = e.callcommand(
646 cg = e.callcommand(
647 b'changegroup',
647 b'changegroup',
648 {
648 {
649 b'nodes': incoming,
649 b'nodes': incoming,
650 b'source': b'incoming',
650 b'source': b'incoming',
651 },
651 },
652 ).result()
652 ).result()
653
653
654 rheads = None
654 rheads = None
655 else:
655 else:
656 with peer.commandexecutor() as e:
656 with peer.commandexecutor() as e:
657 cg = e.callcommand(
657 cg = e.callcommand(
658 b'changegroupsubset',
658 b'changegroupsubset',
659 {
659 {
660 b'bases': incoming,
660 b'bases': incoming,
661 b'heads': rheads,
661 b'heads': rheads,
662 b'source': b'incoming',
662 b'source': b'incoming',
663 },
663 },
664 ).result()
664 ).result()
665
665
666 if localrepo:
666 if localrepo:
667 bundletype = b"HG10BZ"
667 bundletype = b"HG10BZ"
668 else:
668 else:
669 bundletype = b"HG10UN"
669 bundletype = b"HG10UN"
670 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
670 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
671 # keep written bundle?
671 # keep written bundle?
672 if bundlename:
672 if bundlename:
673 bundle = None
673 bundle = None
674 if not localrepo:
674 if not localrepo:
675 # use the created uncompressed bundlerepo
675 # use the created uncompressed bundlerepo
676 localrepo = bundlerepo = makebundlerepository(
676 localrepo = bundlerepo = makebundlerepository(
677 repo.baseui, repo.root, fname
677 repo.baseui, repo.root, fname
678 )
678 )
679
679
680 # this repo contains local and peer now, so filter out local again
680 # this repo contains local and peer now, so filter out local again
681 common = repo.heads()
681 common = repo.heads()
682 if localrepo:
682 if localrepo:
683 # Part of common may be remotely filtered
683 # Part of common may be remotely filtered
684 # So use an unfiltered version
684 # So use an unfiltered version
685 # The discovery process probably need cleanup to avoid that
685 # The discovery process probably need cleanup to avoid that
686 localrepo = localrepo.unfiltered()
686 localrepo = localrepo.unfiltered()
687
687
688 csets = localrepo.changelog.findmissing(common, rheads)
688 csets = localrepo.changelog.findmissing(common, rheads)
689
689
690 if bundlerepo:
690 if bundlerepo:
691 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
691 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
692
692
693 with peer.commandexecutor() as e:
693 with peer.commandexecutor() as e:
694 remotephases = e.callcommand(
694 remotephases = e.callcommand(
695 b'listkeys',
695 b'listkeys',
696 {
696 {
697 b'namespace': b'phases',
697 b'namespace': b'phases',
698 },
698 },
699 ).result()
699 ).result()
700
700
701 pullop = exchange.pulloperation(
701 pullop = exchange.pulloperation(
702 bundlerepo, peer, path=None, heads=reponodes
702 bundlerepo, peer, path=None, heads=reponodes
703 )
703 )
704 pullop.trmanager = bundletransactionmanager()
704 pullop.trmanager = bundletransactionmanager()
705 exchange._pullapplyphases(pullop, remotephases)
705 exchange._pullapplyphases(pullop, remotephases)
706
706
707 def cleanup():
707 def cleanup():
708 if bundlerepo:
708 if bundlerepo:
709 bundlerepo.close()
709 bundlerepo.close()
710 if bundle:
710 if bundle:
711 os.unlink(bundle)
711 os.unlink(bundle)
712 peer.close()
712 peer.close()
713
713
714 return (localrepo, csets, cleanup)
714 return (localrepo, csets, cleanup)
General Comments 0
You need to be logged in to leave comments. Login now