##// END OF EJS Templates
revlog: use the `entry` function in bundlerepo...
marmoute -
r48190:53289d02 default
parent child Browse files
Show More
@@ -1,717 +1,713
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 bundle2,
26 bundle2,
27 changegroup,
27 changegroup,
28 changelog,
28 changelog,
29 cmdutil,
29 cmdutil,
30 discovery,
30 discovery,
31 encoding,
31 encoding,
32 error,
32 error,
33 exchange,
33 exchange,
34 filelog,
34 filelog,
35 localrepo,
35 localrepo,
36 manifest,
36 manifest,
37 mdiff,
37 mdiff,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 pycompat,
40 pycompat,
41 revlog,
41 revlog,
42 revlogutils,
42 revlogutils,
43 util,
43 util,
44 vfs as vfsmod,
44 vfs as vfsmod,
45 )
45 )
46 from .utils import (
46 from .utils import (
47 urlutil,
47 urlutil,
48 )
48 )
49
49
50 from .revlogutils import (
50 from .revlogutils import (
51 constants as revlog_constants,
51 constants as revlog_constants,
52 )
52 )
53
53
54
54
55 class bundlerevlog(revlog.revlog):
55 class bundlerevlog(revlog.revlog):
56 def __init__(self, opener, target, radix, cgunpacker, linkmapper):
56 def __init__(self, opener, target, radix, cgunpacker, linkmapper):
57 # How it works:
57 # How it works:
58 # To retrieve a revision, we need to know the offset of the revision in
58 # To retrieve a revision, we need to know the offset of the revision in
59 # the bundle (an unbundle object). We store this offset in the index
59 # the bundle (an unbundle object). We store this offset in the index
60 # (start). The base of the delta is stored in the base field.
60 # (start). The base of the delta is stored in the base field.
61 #
61 #
62 # To differentiate a rev in the bundle from a rev in the revlog, we
62 # To differentiate a rev in the bundle from a rev in the revlog, we
63 # check revision against repotiprev.
63 # check revision against repotiprev.
64 opener = vfsmod.readonlyvfs(opener)
64 opener = vfsmod.readonlyvfs(opener)
65 revlog.revlog.__init__(self, opener, target=target, radix=radix)
65 revlog.revlog.__init__(self, opener, target=target, radix=radix)
66 self.bundle = cgunpacker
66 self.bundle = cgunpacker
67 n = len(self)
67 n = len(self)
68 self.repotiprev = n - 1
68 self.repotiprev = n - 1
69 self.bundlerevs = set() # used by 'bundle()' revset expression
69 self.bundlerevs = set() # used by 'bundle()' revset expression
70 for deltadata in cgunpacker.deltaiter():
70 for deltadata in cgunpacker.deltaiter():
71 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
71 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
72
72
73 size = len(delta)
73 size = len(delta)
74 start = cgunpacker.tell() - size
74 start = cgunpacker.tell() - size
75
75
76 if self.index.has_node(node):
76 if self.index.has_node(node):
77 # this can happen if two branches make the same change
77 # this can happen if two branches make the same change
78 self.bundlerevs.add(self.index.rev(node))
78 self.bundlerevs.add(self.index.rev(node))
79 continue
79 continue
80 if cs == node:
80 if cs == node:
81 linkrev = nullrev
81 linkrev = nullrev
82 else:
82 else:
83 linkrev = linkmapper(cs)
83 linkrev = linkmapper(cs)
84
84
85 for p in (p1, p2):
85 for p in (p1, p2):
86 if not self.index.has_node(p):
86 if not self.index.has_node(p):
87 raise error.LookupError(
87 raise error.LookupError(
88 p, self.display_id, _(b"unknown parent")
88 p, self.display_id, _(b"unknown parent")
89 )
89 )
90
90
91 if not self.index.has_node(deltabase):
91 if not self.index.has_node(deltabase):
92 raise LookupError(
92 raise LookupError(
93 deltabase, self.display_id, _(b'unknown delta base')
93 deltabase, self.display_id, _(b'unknown delta base')
94 )
94 )
95
95
96 baserev = self.rev(deltabase)
96 baserev = self.rev(deltabase)
97 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
97 # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
98 e = (
98 e = revlogutils.entry(
99 revlogutils.offset_type(start, flags),
99 flags=flags,
100 size,
100 data_offset=start,
101 -1,
101 data_compressed_length=size,
102 baserev,
102 data_delta_base=baserev,
103 linkrev,
103 link_rev=linkrev,
104 self.rev(p1),
104 parent_rev_1=self.rev(p1),
105 self.rev(p2),
105 parent_rev_2=self.rev(p2),
106 node,
106 node_id=node,
107 0,
108 0,
109 revlog_constants.COMP_MODE_INLINE,
110 revlog_constants.COMP_MODE_INLINE,
111 )
107 )
112 self.index.append(e)
108 self.index.append(e)
113 self.bundlerevs.add(n)
109 self.bundlerevs.add(n)
114 n += 1
110 n += 1
115
111
116 def _chunk(self, rev, df=None):
112 def _chunk(self, rev, df=None):
117 # Warning: in case of bundle, the diff is against what we stored as
113 # Warning: in case of bundle, the diff is against what we stored as
118 # delta base, not against rev - 1
114 # delta base, not against rev - 1
119 # XXX: could use some caching
115 # XXX: could use some caching
120 if rev <= self.repotiprev:
116 if rev <= self.repotiprev:
121 return revlog.revlog._chunk(self, rev)
117 return revlog.revlog._chunk(self, rev)
122 self.bundle.seek(self.start(rev))
118 self.bundle.seek(self.start(rev))
123 return self.bundle.read(self.length(rev))
119 return self.bundle.read(self.length(rev))
124
120
125 def revdiff(self, rev1, rev2):
121 def revdiff(self, rev1, rev2):
126 """return or calculate a delta between two revisions"""
122 """return or calculate a delta between two revisions"""
127 if rev1 > self.repotiprev and rev2 > self.repotiprev:
123 if rev1 > self.repotiprev and rev2 > self.repotiprev:
128 # hot path for bundle
124 # hot path for bundle
129 revb = self.index[rev2][3]
125 revb = self.index[rev2][3]
130 if revb == rev1:
126 if revb == rev1:
131 return self._chunk(rev2)
127 return self._chunk(rev2)
132 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
128 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
133 return revlog.revlog.revdiff(self, rev1, rev2)
129 return revlog.revlog.revdiff(self, rev1, rev2)
134
130
135 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
131 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
136
132
137 def _rawtext(self, node, rev, _df=None):
133 def _rawtext(self, node, rev, _df=None):
138 if rev is None:
134 if rev is None:
139 rev = self.rev(node)
135 rev = self.rev(node)
140 validated = False
136 validated = False
141 rawtext = None
137 rawtext = None
142 chain = []
138 chain = []
143 iterrev = rev
139 iterrev = rev
144 # reconstruct the revision if it is from a changegroup
140 # reconstruct the revision if it is from a changegroup
145 while iterrev > self.repotiprev:
141 while iterrev > self.repotiprev:
146 if self._revisioncache and self._revisioncache[1] == iterrev:
142 if self._revisioncache and self._revisioncache[1] == iterrev:
147 rawtext = self._revisioncache[2]
143 rawtext = self._revisioncache[2]
148 break
144 break
149 chain.append(iterrev)
145 chain.append(iterrev)
150 iterrev = self.index[iterrev][3]
146 iterrev = self.index[iterrev][3]
151 if iterrev == nullrev:
147 if iterrev == nullrev:
152 rawtext = b''
148 rawtext = b''
153 elif rawtext is None:
149 elif rawtext is None:
154 r = super(bundlerevlog, self)._rawtext(
150 r = super(bundlerevlog, self)._rawtext(
155 self.node(iterrev), iterrev, _df=_df
151 self.node(iterrev), iterrev, _df=_df
156 )
152 )
157 __, rawtext, validated = r
153 __, rawtext, validated = r
158 if chain:
154 if chain:
159 validated = False
155 validated = False
160 while chain:
156 while chain:
161 delta = self._chunk(chain.pop())
157 delta = self._chunk(chain.pop())
162 rawtext = mdiff.patches(rawtext, [delta])
158 rawtext = mdiff.patches(rawtext, [delta])
163 return rev, rawtext, validated
159 return rev, rawtext, validated
164
160
165 def addrevision(self, *args, **kwargs):
161 def addrevision(self, *args, **kwargs):
166 raise NotImplementedError
162 raise NotImplementedError
167
163
168 def addgroup(self, *args, **kwargs):
164 def addgroup(self, *args, **kwargs):
169 raise NotImplementedError
165 raise NotImplementedError
170
166
171 def strip(self, *args, **kwargs):
167 def strip(self, *args, **kwargs):
172 raise NotImplementedError
168 raise NotImplementedError
173
169
174 def checksize(self):
170 def checksize(self):
175 raise NotImplementedError
171 raise NotImplementedError
176
172
177
173
178 class bundlechangelog(bundlerevlog, changelog.changelog):
174 class bundlechangelog(bundlerevlog, changelog.changelog):
179 def __init__(self, opener, cgunpacker):
175 def __init__(self, opener, cgunpacker):
180 changelog.changelog.__init__(self, opener)
176 changelog.changelog.__init__(self, opener)
181 linkmapper = lambda x: x
177 linkmapper = lambda x: x
182 bundlerevlog.__init__(
178 bundlerevlog.__init__(
183 self,
179 self,
184 opener,
180 opener,
185 (revlog_constants.KIND_CHANGELOG, None),
181 (revlog_constants.KIND_CHANGELOG, None),
186 self.radix,
182 self.radix,
187 cgunpacker,
183 cgunpacker,
188 linkmapper,
184 linkmapper,
189 )
185 )
190
186
191
187
192 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
188 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
193 def __init__(
189 def __init__(
194 self,
190 self,
195 nodeconstants,
191 nodeconstants,
196 opener,
192 opener,
197 cgunpacker,
193 cgunpacker,
198 linkmapper,
194 linkmapper,
199 dirlogstarts=None,
195 dirlogstarts=None,
200 dir=b'',
196 dir=b'',
201 ):
197 ):
202 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
198 manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
203 bundlerevlog.__init__(
199 bundlerevlog.__init__(
204 self,
200 self,
205 opener,
201 opener,
206 (revlog_constants.KIND_MANIFESTLOG, dir),
202 (revlog_constants.KIND_MANIFESTLOG, dir),
207 self._revlog.radix,
203 self._revlog.radix,
208 cgunpacker,
204 cgunpacker,
209 linkmapper,
205 linkmapper,
210 )
206 )
211 if dirlogstarts is None:
207 if dirlogstarts is None:
212 dirlogstarts = {}
208 dirlogstarts = {}
213 if self.bundle.version == b"03":
209 if self.bundle.version == b"03":
214 dirlogstarts = _getfilestarts(self.bundle)
210 dirlogstarts = _getfilestarts(self.bundle)
215 self._dirlogstarts = dirlogstarts
211 self._dirlogstarts = dirlogstarts
216 self._linkmapper = linkmapper
212 self._linkmapper = linkmapper
217
213
218 def dirlog(self, d):
214 def dirlog(self, d):
219 if d in self._dirlogstarts:
215 if d in self._dirlogstarts:
220 self.bundle.seek(self._dirlogstarts[d])
216 self.bundle.seek(self._dirlogstarts[d])
221 return bundlemanifest(
217 return bundlemanifest(
222 self.nodeconstants,
218 self.nodeconstants,
223 self.opener,
219 self.opener,
224 self.bundle,
220 self.bundle,
225 self._linkmapper,
221 self._linkmapper,
226 self._dirlogstarts,
222 self._dirlogstarts,
227 dir=d,
223 dir=d,
228 )
224 )
229 return super(bundlemanifest, self).dirlog(d)
225 return super(bundlemanifest, self).dirlog(d)
230
226
231
227
232 class bundlefilelog(filelog.filelog):
228 class bundlefilelog(filelog.filelog):
233 def __init__(self, opener, path, cgunpacker, linkmapper):
229 def __init__(self, opener, path, cgunpacker, linkmapper):
234 filelog.filelog.__init__(self, opener, path)
230 filelog.filelog.__init__(self, opener, path)
235 self._revlog = bundlerevlog(
231 self._revlog = bundlerevlog(
236 opener,
232 opener,
237 # XXX should use the unencoded path
233 # XXX should use the unencoded path
238 target=(revlog_constants.KIND_FILELOG, path),
234 target=(revlog_constants.KIND_FILELOG, path),
239 radix=self._revlog.radix,
235 radix=self._revlog.radix,
240 cgunpacker=cgunpacker,
236 cgunpacker=cgunpacker,
241 linkmapper=linkmapper,
237 linkmapper=linkmapper,
242 )
238 )
243
239
244
240
245 class bundlepeer(localrepo.localpeer):
241 class bundlepeer(localrepo.localpeer):
246 def canpush(self):
242 def canpush(self):
247 return False
243 return False
248
244
249
245
250 class bundlephasecache(phases.phasecache):
246 class bundlephasecache(phases.phasecache):
251 def __init__(self, *args, **kwargs):
247 def __init__(self, *args, **kwargs):
252 super(bundlephasecache, self).__init__(*args, **kwargs)
248 super(bundlephasecache, self).__init__(*args, **kwargs)
253 if util.safehasattr(self, 'opener'):
249 if util.safehasattr(self, 'opener'):
254 self.opener = vfsmod.readonlyvfs(self.opener)
250 self.opener = vfsmod.readonlyvfs(self.opener)
255
251
256 def write(self):
252 def write(self):
257 raise NotImplementedError
253 raise NotImplementedError
258
254
259 def _write(self, fp):
255 def _write(self, fp):
260 raise NotImplementedError
256 raise NotImplementedError
261
257
262 def _updateroots(self, phase, newroots, tr):
258 def _updateroots(self, phase, newroots, tr):
263 self.phaseroots[phase] = newroots
259 self.phaseroots[phase] = newroots
264 self.invalidate()
260 self.invalidate()
265 self.dirty = True
261 self.dirty = True
266
262
267
263
268 def _getfilestarts(cgunpacker):
264 def _getfilestarts(cgunpacker):
269 filespos = {}
265 filespos = {}
270 for chunkdata in iter(cgunpacker.filelogheader, {}):
266 for chunkdata in iter(cgunpacker.filelogheader, {}):
271 fname = chunkdata[b'filename']
267 fname = chunkdata[b'filename']
272 filespos[fname] = cgunpacker.tell()
268 filespos[fname] = cgunpacker.tell()
273 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
269 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
274 pass
270 pass
275 return filespos
271 return filespos
276
272
277
273
278 class bundlerepository(object):
274 class bundlerepository(object):
279 """A repository instance that is a union of a local repo and a bundle.
275 """A repository instance that is a union of a local repo and a bundle.
280
276
281 Instances represent a read-only repository composed of a local repository
277 Instances represent a read-only repository composed of a local repository
282 with the contents of a bundle file applied. The repository instance is
278 with the contents of a bundle file applied. The repository instance is
283 conceptually similar to the state of a repository after an
279 conceptually similar to the state of a repository after an
284 ``hg unbundle`` operation. However, the contents of the bundle are never
280 ``hg unbundle`` operation. However, the contents of the bundle are never
285 applied to the actual base repository.
281 applied to the actual base repository.
286
282
287 Instances constructed directly are not usable as repository objects.
283 Instances constructed directly are not usable as repository objects.
288 Use instance() or makebundlerepository() to create instances.
284 Use instance() or makebundlerepository() to create instances.
289 """
285 """
290
286
291 def __init__(self, bundlepath, url, tempparent):
287 def __init__(self, bundlepath, url, tempparent):
292 self._tempparent = tempparent
288 self._tempparent = tempparent
293 self._url = url
289 self._url = url
294
290
295 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
291 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
296
292
297 self.tempfile = None
293 self.tempfile = None
298 f = util.posixfile(bundlepath, b"rb")
294 f = util.posixfile(bundlepath, b"rb")
299 bundle = exchange.readbundle(self.ui, f, bundlepath)
295 bundle = exchange.readbundle(self.ui, f, bundlepath)
300
296
301 if isinstance(bundle, bundle2.unbundle20):
297 if isinstance(bundle, bundle2.unbundle20):
302 self._bundlefile = bundle
298 self._bundlefile = bundle
303 self._cgunpacker = None
299 self._cgunpacker = None
304
300
305 cgpart = None
301 cgpart = None
306 for part in bundle.iterparts(seekable=True):
302 for part in bundle.iterparts(seekable=True):
307 if part.type == b'changegroup':
303 if part.type == b'changegroup':
308 if cgpart:
304 if cgpart:
309 raise NotImplementedError(
305 raise NotImplementedError(
310 b"can't process multiple changegroups"
306 b"can't process multiple changegroups"
311 )
307 )
312 cgpart = part
308 cgpart = part
313
309
314 self._handlebundle2part(bundle, part)
310 self._handlebundle2part(bundle, part)
315
311
316 if not cgpart:
312 if not cgpart:
317 raise error.Abort(_(b"No changegroups found"))
313 raise error.Abort(_(b"No changegroups found"))
318
314
319 # This is required to placate a later consumer, which expects
315 # This is required to placate a later consumer, which expects
320 # the payload offset to be at the beginning of the changegroup.
316 # the payload offset to be at the beginning of the changegroup.
321 # We need to do this after the iterparts() generator advances
317 # We need to do this after the iterparts() generator advances
322 # because iterparts() will seek to end of payload after the
318 # because iterparts() will seek to end of payload after the
323 # generator returns control to iterparts().
319 # generator returns control to iterparts().
324 cgpart.seek(0, os.SEEK_SET)
320 cgpart.seek(0, os.SEEK_SET)
325
321
326 elif isinstance(bundle, changegroup.cg1unpacker):
322 elif isinstance(bundle, changegroup.cg1unpacker):
327 if bundle.compressed():
323 if bundle.compressed():
328 f = self._writetempbundle(
324 f = self._writetempbundle(
329 bundle.read, b'.hg10un', header=b'HG10UN'
325 bundle.read, b'.hg10un', header=b'HG10UN'
330 )
326 )
331 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
327 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
332
328
333 self._bundlefile = bundle
329 self._bundlefile = bundle
334 self._cgunpacker = bundle
330 self._cgunpacker = bundle
335 else:
331 else:
336 raise error.Abort(
332 raise error.Abort(
337 _(b'bundle type %s cannot be read') % type(bundle)
333 _(b'bundle type %s cannot be read') % type(bundle)
338 )
334 )
339
335
340 # dict with the mapping 'filename' -> position in the changegroup.
336 # dict with the mapping 'filename' -> position in the changegroup.
341 self._cgfilespos = {}
337 self._cgfilespos = {}
342
338
343 self.firstnewrev = self.changelog.repotiprev + 1
339 self.firstnewrev = self.changelog.repotiprev + 1
344 phases.retractboundary(
340 phases.retractboundary(
345 self,
341 self,
346 None,
342 None,
347 phases.draft,
343 phases.draft,
348 [ctx.node() for ctx in self[self.firstnewrev :]],
344 [ctx.node() for ctx in self[self.firstnewrev :]],
349 )
345 )
350
346
351 def _handlebundle2part(self, bundle, part):
347 def _handlebundle2part(self, bundle, part):
352 if part.type != b'changegroup':
348 if part.type != b'changegroup':
353 return
349 return
354
350
355 cgstream = part
351 cgstream = part
356 version = part.params.get(b'version', b'01')
352 version = part.params.get(b'version', b'01')
357 legalcgvers = changegroup.supportedincomingversions(self)
353 legalcgvers = changegroup.supportedincomingversions(self)
358 if version not in legalcgvers:
354 if version not in legalcgvers:
359 msg = _(b'Unsupported changegroup version: %s')
355 msg = _(b'Unsupported changegroup version: %s')
360 raise error.Abort(msg % version)
356 raise error.Abort(msg % version)
361 if bundle.compressed():
357 if bundle.compressed():
362 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
358 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
363
359
364 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
360 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
365
361
366 def _writetempbundle(self, readfn, suffix, header=b''):
362 def _writetempbundle(self, readfn, suffix, header=b''):
367 """Write a temporary file to disk"""
363 """Write a temporary file to disk"""
368 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
364 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
369 self.tempfile = temp
365 self.tempfile = temp
370
366
371 with os.fdopen(fdtemp, 'wb') as fptemp:
367 with os.fdopen(fdtemp, 'wb') as fptemp:
372 fptemp.write(header)
368 fptemp.write(header)
373 while True:
369 while True:
374 chunk = readfn(2 ** 18)
370 chunk = readfn(2 ** 18)
375 if not chunk:
371 if not chunk:
376 break
372 break
377 fptemp.write(chunk)
373 fptemp.write(chunk)
378
374
379 return self.vfs.open(self.tempfile, mode=b"rb")
375 return self.vfs.open(self.tempfile, mode=b"rb")
380
376
381 @localrepo.unfilteredpropertycache
377 @localrepo.unfilteredpropertycache
382 def _phasecache(self):
378 def _phasecache(self):
383 return bundlephasecache(self, self._phasedefaults)
379 return bundlephasecache(self, self._phasedefaults)
384
380
385 @localrepo.unfilteredpropertycache
381 @localrepo.unfilteredpropertycache
386 def changelog(self):
382 def changelog(self):
387 # consume the header if it exists
383 # consume the header if it exists
388 self._cgunpacker.changelogheader()
384 self._cgunpacker.changelogheader()
389 c = bundlechangelog(self.svfs, self._cgunpacker)
385 c = bundlechangelog(self.svfs, self._cgunpacker)
390 self.manstart = self._cgunpacker.tell()
386 self.manstart = self._cgunpacker.tell()
391 return c
387 return c
392
388
393 def _refreshchangelog(self):
389 def _refreshchangelog(self):
394 # changelog for bundle repo are not filecache, this method is not
390 # changelog for bundle repo are not filecache, this method is not
395 # applicable.
391 # applicable.
396 pass
392 pass
397
393
398 @localrepo.unfilteredpropertycache
394 @localrepo.unfilteredpropertycache
399 def manifestlog(self):
395 def manifestlog(self):
400 self._cgunpacker.seek(self.manstart)
396 self._cgunpacker.seek(self.manstart)
401 # consume the header if it exists
397 # consume the header if it exists
402 self._cgunpacker.manifestheader()
398 self._cgunpacker.manifestheader()
403 linkmapper = self.unfiltered().changelog.rev
399 linkmapper = self.unfiltered().changelog.rev
404 rootstore = bundlemanifest(
400 rootstore = bundlemanifest(
405 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
401 self.nodeconstants, self.svfs, self._cgunpacker, linkmapper
406 )
402 )
407 self.filestart = self._cgunpacker.tell()
403 self.filestart = self._cgunpacker.tell()
408
404
409 return manifest.manifestlog(
405 return manifest.manifestlog(
410 self.svfs, self, rootstore, self.narrowmatch()
406 self.svfs, self, rootstore, self.narrowmatch()
411 )
407 )
412
408
413 def _consumemanifest(self):
409 def _consumemanifest(self):
414 """Consumes the manifest portion of the bundle, setting filestart so the
410 """Consumes the manifest portion of the bundle, setting filestart so the
415 file portion can be read."""
411 file portion can be read."""
416 self._cgunpacker.seek(self.manstart)
412 self._cgunpacker.seek(self.manstart)
417 self._cgunpacker.manifestheader()
413 self._cgunpacker.manifestheader()
418 for delta in self._cgunpacker.deltaiter():
414 for delta in self._cgunpacker.deltaiter():
419 pass
415 pass
420 self.filestart = self._cgunpacker.tell()
416 self.filestart = self._cgunpacker.tell()
421
417
422 @localrepo.unfilteredpropertycache
418 @localrepo.unfilteredpropertycache
423 def manstart(self):
419 def manstart(self):
424 self.changelog
420 self.changelog
425 return self.manstart
421 return self.manstart
426
422
427 @localrepo.unfilteredpropertycache
423 @localrepo.unfilteredpropertycache
428 def filestart(self):
424 def filestart(self):
429 self.manifestlog
425 self.manifestlog
430
426
431 # If filestart was not set by self.manifestlog, that means the
427 # If filestart was not set by self.manifestlog, that means the
432 # manifestlog implementation did not consume the manifests from the
428 # manifestlog implementation did not consume the manifests from the
433 # changegroup (ex: it might be consuming trees from a separate bundle2
429 # changegroup (ex: it might be consuming trees from a separate bundle2
434 # part instead). So we need to manually consume it.
430 # part instead). So we need to manually consume it.
435 if 'filestart' not in self.__dict__:
431 if 'filestart' not in self.__dict__:
436 self._consumemanifest()
432 self._consumemanifest()
437
433
438 return self.filestart
434 return self.filestart
439
435
440 def url(self):
436 def url(self):
441 return self._url
437 return self._url
442
438
443 def file(self, f):
439 def file(self, f):
444 if not self._cgfilespos:
440 if not self._cgfilespos:
445 self._cgunpacker.seek(self.filestart)
441 self._cgunpacker.seek(self.filestart)
446 self._cgfilespos = _getfilestarts(self._cgunpacker)
442 self._cgfilespos = _getfilestarts(self._cgunpacker)
447
443
448 if f in self._cgfilespos:
444 if f in self._cgfilespos:
449 self._cgunpacker.seek(self._cgfilespos[f])
445 self._cgunpacker.seek(self._cgfilespos[f])
450 linkmapper = self.unfiltered().changelog.rev
446 linkmapper = self.unfiltered().changelog.rev
451 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
447 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
452 else:
448 else:
453 return super(bundlerepository, self).file(f)
449 return super(bundlerepository, self).file(f)
454
450
455 def close(self):
451 def close(self):
456 """Close assigned bundle file immediately."""
452 """Close assigned bundle file immediately."""
457 self._bundlefile.close()
453 self._bundlefile.close()
458 if self.tempfile is not None:
454 if self.tempfile is not None:
459 self.vfs.unlink(self.tempfile)
455 self.vfs.unlink(self.tempfile)
460 if self._tempparent:
456 if self._tempparent:
461 shutil.rmtree(self._tempparent, True)
457 shutil.rmtree(self._tempparent, True)
462
458
463 def cancopy(self):
459 def cancopy(self):
464 return False
460 return False
465
461
466 def peer(self):
462 def peer(self):
467 return bundlepeer(self)
463 return bundlepeer(self)
468
464
469 def getcwd(self):
465 def getcwd(self):
470 return encoding.getcwd() # always outside the repo
466 return encoding.getcwd() # always outside the repo
471
467
472 # Check if parents exist in localrepo before setting
468 # Check if parents exist in localrepo before setting
473 def setparents(self, p1, p2=None):
469 def setparents(self, p1, p2=None):
474 if p2 is None:
470 if p2 is None:
475 p2 = self.nullid
471 p2 = self.nullid
476 p1rev = self.changelog.rev(p1)
472 p1rev = self.changelog.rev(p1)
477 p2rev = self.changelog.rev(p2)
473 p2rev = self.changelog.rev(p2)
478 msg = _(b"setting parent to node %s that only exists in the bundle\n")
474 msg = _(b"setting parent to node %s that only exists in the bundle\n")
479 if self.changelog.repotiprev < p1rev:
475 if self.changelog.repotiprev < p1rev:
480 self.ui.warn(msg % hex(p1))
476 self.ui.warn(msg % hex(p1))
481 if self.changelog.repotiprev < p2rev:
477 if self.changelog.repotiprev < p2rev:
482 self.ui.warn(msg % hex(p2))
478 self.ui.warn(msg % hex(p2))
483 return super(bundlerepository, self).setparents(p1, p2)
479 return super(bundlerepository, self).setparents(p1, p2)
484
480
485
481
486 def instance(ui, path, create, intents=None, createopts=None):
482 def instance(ui, path, create, intents=None, createopts=None):
487 if create:
483 if create:
488 raise error.Abort(_(b'cannot create new bundle repository'))
484 raise error.Abort(_(b'cannot create new bundle repository'))
489 # internal config: bundle.mainreporoot
485 # internal config: bundle.mainreporoot
490 parentpath = ui.config(b"bundle", b"mainreporoot")
486 parentpath = ui.config(b"bundle", b"mainreporoot")
491 if not parentpath:
487 if not parentpath:
492 # try to find the correct path to the working directory repo
488 # try to find the correct path to the working directory repo
493 parentpath = cmdutil.findrepo(encoding.getcwd())
489 parentpath = cmdutil.findrepo(encoding.getcwd())
494 if parentpath is None:
490 if parentpath is None:
495 parentpath = b''
491 parentpath = b''
496 if parentpath:
492 if parentpath:
497 # Try to make the full path relative so we get a nice, short URL.
493 # Try to make the full path relative so we get a nice, short URL.
498 # In particular, we don't want temp dir names in test outputs.
494 # In particular, we don't want temp dir names in test outputs.
499 cwd = encoding.getcwd()
495 cwd = encoding.getcwd()
500 if parentpath == cwd:
496 if parentpath == cwd:
501 parentpath = b''
497 parentpath = b''
502 else:
498 else:
503 cwd = pathutil.normasprefix(cwd)
499 cwd = pathutil.normasprefix(cwd)
504 if parentpath.startswith(cwd):
500 if parentpath.startswith(cwd):
505 parentpath = parentpath[len(cwd) :]
501 parentpath = parentpath[len(cwd) :]
506 u = urlutil.url(path)
502 u = urlutil.url(path)
507 path = u.localpath()
503 path = u.localpath()
508 if u.scheme == b'bundle':
504 if u.scheme == b'bundle':
509 s = path.split(b"+", 1)
505 s = path.split(b"+", 1)
510 if len(s) == 1:
506 if len(s) == 1:
511 repopath, bundlename = parentpath, s[0]
507 repopath, bundlename = parentpath, s[0]
512 else:
508 else:
513 repopath, bundlename = s
509 repopath, bundlename = s
514 else:
510 else:
515 repopath, bundlename = parentpath, path
511 repopath, bundlename = parentpath, path
516
512
517 return makebundlerepository(ui, repopath, bundlename)
513 return makebundlerepository(ui, repopath, bundlename)
518
514
519
515
520 def makebundlerepository(ui, repopath, bundlepath):
516 def makebundlerepository(ui, repopath, bundlepath):
521 """Make a bundle repository object based on repo and bundle paths."""
517 """Make a bundle repository object based on repo and bundle paths."""
522 if repopath:
518 if repopath:
523 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
519 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
524 else:
520 else:
525 url = b'bundle:%s' % bundlepath
521 url = b'bundle:%s' % bundlepath
526
522
527 # Because we can't make any guarantees about the type of the base
523 # Because we can't make any guarantees about the type of the base
528 # repository, we can't have a static class representing the bundle
524 # repository, we can't have a static class representing the bundle
529 # repository. We also can't make any guarantees about how to even
525 # repository. We also can't make any guarantees about how to even
530 # call the base repository's constructor!
526 # call the base repository's constructor!
531 #
527 #
532 # So, our strategy is to go through ``localrepo.instance()`` to construct
528 # So, our strategy is to go through ``localrepo.instance()`` to construct
533 # a repo instance. Then, we dynamically create a new type derived from
529 # a repo instance. Then, we dynamically create a new type derived from
534 # both it and our ``bundlerepository`` class which overrides some
530 # both it and our ``bundlerepository`` class which overrides some
535 # functionality. We then change the type of the constructed repository
531 # functionality. We then change the type of the constructed repository
536 # to this new type and initialize the bundle-specific bits of it.
532 # to this new type and initialize the bundle-specific bits of it.
537
533
538 try:
534 try:
539 repo = localrepo.instance(ui, repopath, create=False)
535 repo = localrepo.instance(ui, repopath, create=False)
540 tempparent = None
536 tempparent = None
541 except error.RepoError:
537 except error.RepoError:
542 tempparent = pycompat.mkdtemp()
538 tempparent = pycompat.mkdtemp()
543 try:
539 try:
544 repo = localrepo.instance(ui, tempparent, create=True)
540 repo = localrepo.instance(ui, tempparent, create=True)
545 except Exception:
541 except Exception:
546 shutil.rmtree(tempparent)
542 shutil.rmtree(tempparent)
547 raise
543 raise
548
544
549 class derivedbundlerepository(bundlerepository, repo.__class__):
545 class derivedbundlerepository(bundlerepository, repo.__class__):
550 pass
546 pass
551
547
552 repo.__class__ = derivedbundlerepository
548 repo.__class__ = derivedbundlerepository
553 bundlerepository.__init__(repo, bundlepath, url, tempparent)
549 bundlerepository.__init__(repo, bundlepath, url, tempparent)
554
550
555 return repo
551 return repo
556
552
557
553
558 class bundletransactionmanager(object):
554 class bundletransactionmanager(object):
559 def transaction(self):
555 def transaction(self):
560 return None
556 return None
561
557
562 def close(self):
558 def close(self):
563 raise NotImplementedError
559 raise NotImplementedError
564
560
565 def release(self):
561 def release(self):
566 raise NotImplementedError
562 raise NotImplementedError
567
563
568
564
569 def getremotechanges(
565 def getremotechanges(
570 ui, repo, peer, onlyheads=None, bundlename=None, force=False
566 ui, repo, peer, onlyheads=None, bundlename=None, force=False
571 ):
567 ):
572 """obtains a bundle of changes incoming from peer
568 """obtains a bundle of changes incoming from peer
573
569
574 "onlyheads" restricts the returned changes to those reachable from the
570 "onlyheads" restricts the returned changes to those reachable from the
575 specified heads.
571 specified heads.
576 "bundlename", if given, stores the bundle to this file path permanently;
572 "bundlename", if given, stores the bundle to this file path permanently;
577 otherwise it's stored to a temp file and gets deleted again when you call
573 otherwise it's stored to a temp file and gets deleted again when you call
578 the returned "cleanupfn".
574 the returned "cleanupfn".
579 "force" indicates whether to proceed on unrelated repos.
575 "force" indicates whether to proceed on unrelated repos.
580
576
581 Returns a tuple (local, csets, cleanupfn):
577 Returns a tuple (local, csets, cleanupfn):
582
578
583 "local" is a local repo from which to obtain the actual incoming
579 "local" is a local repo from which to obtain the actual incoming
584 changesets; it is a bundlerepo for the obtained bundle when the
580 changesets; it is a bundlerepo for the obtained bundle when the
585 original "peer" is remote.
581 original "peer" is remote.
586 "csets" lists the incoming changeset node ids.
582 "csets" lists the incoming changeset node ids.
587 "cleanupfn" must be called without arguments when you're done processing
583 "cleanupfn" must be called without arguments when you're done processing
588 the changes; it closes both the original "peer" and the one returned
584 the changes; it closes both the original "peer" and the one returned
589 here.
585 here.
590 """
586 """
591 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
587 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
592 common, incoming, rheads = tmp
588 common, incoming, rheads = tmp
593 if not incoming:
589 if not incoming:
594 try:
590 try:
595 if bundlename:
591 if bundlename:
596 os.unlink(bundlename)
592 os.unlink(bundlename)
597 except OSError:
593 except OSError:
598 pass
594 pass
599 return repo, [], peer.close
595 return repo, [], peer.close
600
596
601 commonset = set(common)
597 commonset = set(common)
602 rheads = [x for x in rheads if x not in commonset]
598 rheads = [x for x in rheads if x not in commonset]
603
599
604 bundle = None
600 bundle = None
605 bundlerepo = None
601 bundlerepo = None
606 localrepo = peer.local()
602 localrepo = peer.local()
607 if bundlename or not localrepo:
603 if bundlename or not localrepo:
608 # create a bundle (uncompressed if peer repo is not local)
604 # create a bundle (uncompressed if peer repo is not local)
609
605
610 # developer config: devel.legacy.exchange
606 # developer config: devel.legacy.exchange
611 legexc = ui.configlist(b'devel', b'legacy.exchange')
607 legexc = ui.configlist(b'devel', b'legacy.exchange')
612 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
608 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
613 canbundle2 = (
609 canbundle2 = (
614 not forcebundle1
610 not forcebundle1
615 and peer.capable(b'getbundle')
611 and peer.capable(b'getbundle')
616 and peer.capable(b'bundle2')
612 and peer.capable(b'bundle2')
617 )
613 )
618 if canbundle2:
614 if canbundle2:
619 with peer.commandexecutor() as e:
615 with peer.commandexecutor() as e:
620 b2 = e.callcommand(
616 b2 = e.callcommand(
621 b'getbundle',
617 b'getbundle',
622 {
618 {
623 b'source': b'incoming',
619 b'source': b'incoming',
624 b'common': common,
620 b'common': common,
625 b'heads': rheads,
621 b'heads': rheads,
626 b'bundlecaps': exchange.caps20to10(
622 b'bundlecaps': exchange.caps20to10(
627 repo, role=b'client'
623 repo, role=b'client'
628 ),
624 ),
629 b'cg': True,
625 b'cg': True,
630 },
626 },
631 ).result()
627 ).result()
632
628
633 fname = bundle = changegroup.writechunks(
629 fname = bundle = changegroup.writechunks(
634 ui, b2._forwardchunks(), bundlename
630 ui, b2._forwardchunks(), bundlename
635 )
631 )
636 else:
632 else:
637 if peer.capable(b'getbundle'):
633 if peer.capable(b'getbundle'):
638 with peer.commandexecutor() as e:
634 with peer.commandexecutor() as e:
639 cg = e.callcommand(
635 cg = e.callcommand(
640 b'getbundle',
636 b'getbundle',
641 {
637 {
642 b'source': b'incoming',
638 b'source': b'incoming',
643 b'common': common,
639 b'common': common,
644 b'heads': rheads,
640 b'heads': rheads,
645 },
641 },
646 ).result()
642 ).result()
647 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
643 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
648 # compat with older servers when pulling all remote heads
644 # compat with older servers when pulling all remote heads
649
645
650 with peer.commandexecutor() as e:
646 with peer.commandexecutor() as e:
651 cg = e.callcommand(
647 cg = e.callcommand(
652 b'changegroup',
648 b'changegroup',
653 {
649 {
654 b'nodes': incoming,
650 b'nodes': incoming,
655 b'source': b'incoming',
651 b'source': b'incoming',
656 },
652 },
657 ).result()
653 ).result()
658
654
659 rheads = None
655 rheads = None
660 else:
656 else:
661 with peer.commandexecutor() as e:
657 with peer.commandexecutor() as e:
662 cg = e.callcommand(
658 cg = e.callcommand(
663 b'changegroupsubset',
659 b'changegroupsubset',
664 {
660 {
665 b'bases': incoming,
661 b'bases': incoming,
666 b'heads': rheads,
662 b'heads': rheads,
667 b'source': b'incoming',
663 b'source': b'incoming',
668 },
664 },
669 ).result()
665 ).result()
670
666
671 if localrepo:
667 if localrepo:
672 bundletype = b"HG10BZ"
668 bundletype = b"HG10BZ"
673 else:
669 else:
674 bundletype = b"HG10UN"
670 bundletype = b"HG10UN"
675 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
671 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
676 # keep written bundle?
672 # keep written bundle?
677 if bundlename:
673 if bundlename:
678 bundle = None
674 bundle = None
679 if not localrepo:
675 if not localrepo:
680 # use the created uncompressed bundlerepo
676 # use the created uncompressed bundlerepo
681 localrepo = bundlerepo = makebundlerepository(
677 localrepo = bundlerepo = makebundlerepository(
682 repo.baseui, repo.root, fname
678 repo.baseui, repo.root, fname
683 )
679 )
684
680
685 # this repo contains local and peer now, so filter out local again
681 # this repo contains local and peer now, so filter out local again
686 common = repo.heads()
682 common = repo.heads()
687 if localrepo:
683 if localrepo:
688 # Part of common may be remotely filtered
684 # Part of common may be remotely filtered
689 # So use an unfiltered version
685 # So use an unfiltered version
690 # The discovery process probably need cleanup to avoid that
686 # The discovery process probably need cleanup to avoid that
691 localrepo = localrepo.unfiltered()
687 localrepo = localrepo.unfiltered()
692
688
693 csets = localrepo.changelog.findmissing(common, rheads)
689 csets = localrepo.changelog.findmissing(common, rheads)
694
690
695 if bundlerepo:
691 if bundlerepo:
696 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
692 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
697
693
698 with peer.commandexecutor() as e:
694 with peer.commandexecutor() as e:
699 remotephases = e.callcommand(
695 remotephases = e.callcommand(
700 b'listkeys',
696 b'listkeys',
701 {
697 {
702 b'namespace': b'phases',
698 b'namespace': b'phases',
703 },
699 },
704 ).result()
700 ).result()
705
701
706 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
702 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
707 pullop.trmanager = bundletransactionmanager()
703 pullop.trmanager = bundletransactionmanager()
708 exchange._pullapplyphases(pullop, remotephases)
704 exchange._pullapplyphases(pullop, remotephases)
709
705
710 def cleanup():
706 def cleanup():
711 if bundlerepo:
707 if bundlerepo:
712 bundlerepo.close()
708 bundlerepo.close()
713 if bundle:
709 if bundle:
714 os.unlink(bundle)
710 os.unlink(bundle)
715 peer.close()
711 peer.close()
716
712
717 return (localrepo, csets, cleanup)
713 return (localrepo, csets, cleanup)
General Comments 0
You need to be logged in to leave comments. Login now