##// END OF EJS Templates
bundlerepo: don't insert index tuples with full nodes as linkrev...
Joerg Sonnenberger -
r46418:88d5abec default
parent child Browse files
Show More
@@ -1,669 +1,672 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import nullid, nullrev
20 from .node import nullid, nullrev
21
21
22 from . import (
22 from . import (
23 bundle2,
23 bundle2,
24 changegroup,
24 changegroup,
25 changelog,
25 changelog,
26 cmdutil,
26 cmdutil,
27 discovery,
27 discovery,
28 encoding,
28 encoding,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44
44
45 class bundlerevlog(revlog.revlog):
45 class bundlerevlog(revlog.revlog):
46 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
46 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
47 # How it works:
47 # How it works:
48 # To retrieve a revision, we need to know the offset of the revision in
48 # To retrieve a revision, we need to know the offset of the revision in
49 # the bundle (an unbundle object). We store this offset in the index
49 # the bundle (an unbundle object). We store this offset in the index
50 # (start). The base of the delta is stored in the base field.
50 # (start). The base of the delta is stored in the base field.
51 #
51 #
52 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # To differentiate a rev in the bundle from a rev in the revlog, we
53 # check revision against repotiprev.
53 # check revision against repotiprev.
54 opener = vfsmod.readonlyvfs(opener)
54 opener = vfsmod.readonlyvfs(opener)
55 revlog.revlog.__init__(self, opener, indexfile)
55 revlog.revlog.__init__(self, opener, indexfile)
56 self.bundle = cgunpacker
56 self.bundle = cgunpacker
57 n = len(self)
57 n = len(self)
58 self.repotiprev = n - 1
58 self.repotiprev = n - 1
59 self.bundlerevs = set() # used by 'bundle()' revset expression
59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 for deltadata in cgunpacker.deltaiter():
60 for deltadata in cgunpacker.deltaiter():
61 node, p1, p2, cs, deltabase, delta, flags = deltadata
61 node, p1, p2, cs, deltabase, delta, flags = deltadata
62
62
63 size = len(delta)
63 size = len(delta)
64 start = cgunpacker.tell() - size
64 start = cgunpacker.tell() - size
65
65
66 link = linkmapper(cs)
67 if self.index.has_node(node):
66 if self.index.has_node(node):
68 # this can happen if two branches make the same change
67 # this can happen if two branches make the same change
69 self.bundlerevs.add(self.index.rev(node))
68 self.bundlerevs.add(self.index.rev(node))
70 continue
69 continue
70 if cs == node:
71 linkrev = nullrev
72 else:
73 linkrev = linkmapper(cs)
71
74
72 for p in (p1, p2):
75 for p in (p1, p2):
73 if not self.index.has_node(p):
76 if not self.index.has_node(p):
74 raise error.LookupError(
77 raise error.LookupError(
75 p, self.indexfile, _(b"unknown parent")
78 p, self.indexfile, _(b"unknown parent")
76 )
79 )
77
80
78 if not self.index.has_node(deltabase):
81 if not self.index.has_node(deltabase):
79 raise LookupError(
82 raise LookupError(
80 deltabase, self.indexfile, _(b'unknown delta base')
83 deltabase, self.indexfile, _(b'unknown delta base')
81 )
84 )
82
85
83 baserev = self.rev(deltabase)
86 baserev = self.rev(deltabase)
84 # start, size, full unc. size, base (unused), link, p1, p2, node
87 # start, size, full unc. size, base (unused), link, p1, p2, node
85 e = (
88 e = (
86 revlog.offset_type(start, flags),
89 revlog.offset_type(start, flags),
87 size,
90 size,
88 -1,
91 -1,
89 baserev,
92 baserev,
90 link,
93 linkrev,
91 self.rev(p1),
94 self.rev(p1),
92 self.rev(p2),
95 self.rev(p2),
93 node,
96 node,
94 )
97 )
95 self.index.append(e)
98 self.index.append(e)
96 self.bundlerevs.add(n)
99 self.bundlerevs.add(n)
97 n += 1
100 n += 1
98
101
99 def _chunk(self, rev, df=None):
102 def _chunk(self, rev, df=None):
100 # Warning: in case of bundle, the diff is against what we stored as
103 # Warning: in case of bundle, the diff is against what we stored as
101 # delta base, not against rev - 1
104 # delta base, not against rev - 1
102 # XXX: could use some caching
105 # XXX: could use some caching
103 if rev <= self.repotiprev:
106 if rev <= self.repotiprev:
104 return revlog.revlog._chunk(self, rev)
107 return revlog.revlog._chunk(self, rev)
105 self.bundle.seek(self.start(rev))
108 self.bundle.seek(self.start(rev))
106 return self.bundle.read(self.length(rev))
109 return self.bundle.read(self.length(rev))
107
110
108 def revdiff(self, rev1, rev2):
111 def revdiff(self, rev1, rev2):
109 """return or calculate a delta between two revisions"""
112 """return or calculate a delta between two revisions"""
110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
113 if rev1 > self.repotiprev and rev2 > self.repotiprev:
111 # hot path for bundle
114 # hot path for bundle
112 revb = self.index[rev2][3]
115 revb = self.index[rev2][3]
113 if revb == rev1:
116 if revb == rev1:
114 return self._chunk(rev2)
117 return self._chunk(rev2)
115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
118 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
116 return revlog.revlog.revdiff(self, rev1, rev2)
119 return revlog.revlog.revdiff(self, rev1, rev2)
117
120
118 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
121 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
119
122
120 def _rawtext(self, node, rev, _df=None):
123 def _rawtext(self, node, rev, _df=None):
121 if rev is None:
124 if rev is None:
122 rev = self.rev(node)
125 rev = self.rev(node)
123 validated = False
126 validated = False
124 rawtext = None
127 rawtext = None
125 chain = []
128 chain = []
126 iterrev = rev
129 iterrev = rev
127 # reconstruct the revision if it is from a changegroup
130 # reconstruct the revision if it is from a changegroup
128 while iterrev > self.repotiprev:
131 while iterrev > self.repotiprev:
129 if self._revisioncache and self._revisioncache[1] == iterrev:
132 if self._revisioncache and self._revisioncache[1] == iterrev:
130 rawtext = self._revisioncache[2]
133 rawtext = self._revisioncache[2]
131 break
134 break
132 chain.append(iterrev)
135 chain.append(iterrev)
133 iterrev = self.index[iterrev][3]
136 iterrev = self.index[iterrev][3]
134 if iterrev == nullrev:
137 if iterrev == nullrev:
135 rawtext = b''
138 rawtext = b''
136 elif rawtext is None:
139 elif rawtext is None:
137 r = super(bundlerevlog, self)._rawtext(
140 r = super(bundlerevlog, self)._rawtext(
138 self.node(iterrev), iterrev, _df=_df
141 self.node(iterrev), iterrev, _df=_df
139 )
142 )
140 __, rawtext, validated = r
143 __, rawtext, validated = r
141 if chain:
144 if chain:
142 validated = False
145 validated = False
143 while chain:
146 while chain:
144 delta = self._chunk(chain.pop())
147 delta = self._chunk(chain.pop())
145 rawtext = mdiff.patches(rawtext, [delta])
148 rawtext = mdiff.patches(rawtext, [delta])
146 return rev, rawtext, validated
149 return rev, rawtext, validated
147
150
148 def addrevision(self, *args, **kwargs):
151 def addrevision(self, *args, **kwargs):
149 raise NotImplementedError
152 raise NotImplementedError
150
153
151 def addgroup(self, *args, **kwargs):
154 def addgroup(self, *args, **kwargs):
152 raise NotImplementedError
155 raise NotImplementedError
153
156
154 def strip(self, *args, **kwargs):
157 def strip(self, *args, **kwargs):
155 raise NotImplementedError
158 raise NotImplementedError
156
159
157 def checksize(self):
160 def checksize(self):
158 raise NotImplementedError
161 raise NotImplementedError
159
162
160
163
161 class bundlechangelog(bundlerevlog, changelog.changelog):
164 class bundlechangelog(bundlerevlog, changelog.changelog):
162 def __init__(self, opener, cgunpacker):
165 def __init__(self, opener, cgunpacker):
163 changelog.changelog.__init__(self, opener)
166 changelog.changelog.__init__(self, opener)
164 linkmapper = lambda x: x
167 linkmapper = lambda x: x
165 bundlerevlog.__init__(
168 bundlerevlog.__init__(
166 self, opener, self.indexfile, cgunpacker, linkmapper
169 self, opener, self.indexfile, cgunpacker, linkmapper
167 )
170 )
168
171
169
172
170 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
173 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
171 def __init__(
174 def __init__(
172 self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
175 self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
173 ):
176 ):
174 manifest.manifestrevlog.__init__(self, opener, tree=dir)
177 manifest.manifestrevlog.__init__(self, opener, tree=dir)
175 bundlerevlog.__init__(
178 bundlerevlog.__init__(
176 self, opener, self.indexfile, cgunpacker, linkmapper
179 self, opener, self.indexfile, cgunpacker, linkmapper
177 )
180 )
178 if dirlogstarts is None:
181 if dirlogstarts is None:
179 dirlogstarts = {}
182 dirlogstarts = {}
180 if self.bundle.version == b"03":
183 if self.bundle.version == b"03":
181 dirlogstarts = _getfilestarts(self.bundle)
184 dirlogstarts = _getfilestarts(self.bundle)
182 self._dirlogstarts = dirlogstarts
185 self._dirlogstarts = dirlogstarts
183 self._linkmapper = linkmapper
186 self._linkmapper = linkmapper
184
187
185 def dirlog(self, d):
188 def dirlog(self, d):
186 if d in self._dirlogstarts:
189 if d in self._dirlogstarts:
187 self.bundle.seek(self._dirlogstarts[d])
190 self.bundle.seek(self._dirlogstarts[d])
188 return bundlemanifest(
191 return bundlemanifest(
189 self.opener,
192 self.opener,
190 self.bundle,
193 self.bundle,
191 self._linkmapper,
194 self._linkmapper,
192 self._dirlogstarts,
195 self._dirlogstarts,
193 dir=d,
196 dir=d,
194 )
197 )
195 return super(bundlemanifest, self).dirlog(d)
198 return super(bundlemanifest, self).dirlog(d)
196
199
197
200
198 class bundlefilelog(filelog.filelog):
201 class bundlefilelog(filelog.filelog):
199 def __init__(self, opener, path, cgunpacker, linkmapper):
202 def __init__(self, opener, path, cgunpacker, linkmapper):
200 filelog.filelog.__init__(self, opener, path)
203 filelog.filelog.__init__(self, opener, path)
201 self._revlog = bundlerevlog(
204 self._revlog = bundlerevlog(
202 opener, self.indexfile, cgunpacker, linkmapper
205 opener, self.indexfile, cgunpacker, linkmapper
203 )
206 )
204
207
205
208
206 class bundlepeer(localrepo.localpeer):
209 class bundlepeer(localrepo.localpeer):
207 def canpush(self):
210 def canpush(self):
208 return False
211 return False
209
212
210
213
211 class bundlephasecache(phases.phasecache):
214 class bundlephasecache(phases.phasecache):
212 def __init__(self, *args, **kwargs):
215 def __init__(self, *args, **kwargs):
213 super(bundlephasecache, self).__init__(*args, **kwargs)
216 super(bundlephasecache, self).__init__(*args, **kwargs)
214 if util.safehasattr(self, 'opener'):
217 if util.safehasattr(self, 'opener'):
215 self.opener = vfsmod.readonlyvfs(self.opener)
218 self.opener = vfsmod.readonlyvfs(self.opener)
216
219
217 def write(self):
220 def write(self):
218 raise NotImplementedError
221 raise NotImplementedError
219
222
220 def _write(self, fp):
223 def _write(self, fp):
221 raise NotImplementedError
224 raise NotImplementedError
222
225
223 def _updateroots(self, phase, newroots, tr):
226 def _updateroots(self, phase, newroots, tr):
224 self.phaseroots[phase] = newroots
227 self.phaseroots[phase] = newroots
225 self.invalidate()
228 self.invalidate()
226 self.dirty = True
229 self.dirty = True
227
230
228
231
229 def _getfilestarts(cgunpacker):
232 def _getfilestarts(cgunpacker):
230 filespos = {}
233 filespos = {}
231 for chunkdata in iter(cgunpacker.filelogheader, {}):
234 for chunkdata in iter(cgunpacker.filelogheader, {}):
232 fname = chunkdata[b'filename']
235 fname = chunkdata[b'filename']
233 filespos[fname] = cgunpacker.tell()
236 filespos[fname] = cgunpacker.tell()
234 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
237 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
235 pass
238 pass
236 return filespos
239 return filespos
237
240
238
241
239 class bundlerepository(object):
242 class bundlerepository(object):
240 """A repository instance that is a union of a local repo and a bundle.
243 """A repository instance that is a union of a local repo and a bundle.
241
244
242 Instances represent a read-only repository composed of a local repository
245 Instances represent a read-only repository composed of a local repository
243 with the contents of a bundle file applied. The repository instance is
246 with the contents of a bundle file applied. The repository instance is
244 conceptually similar to the state of a repository after an
247 conceptually similar to the state of a repository after an
245 ``hg unbundle`` operation. However, the contents of the bundle are never
248 ``hg unbundle`` operation. However, the contents of the bundle are never
246 applied to the actual base repository.
249 applied to the actual base repository.
247
250
248 Instances constructed directly are not usable as repository objects.
251 Instances constructed directly are not usable as repository objects.
249 Use instance() or makebundlerepository() to create instances.
252 Use instance() or makebundlerepository() to create instances.
250 """
253 """
251
254
252 def __init__(self, bundlepath, url, tempparent):
255 def __init__(self, bundlepath, url, tempparent):
253 self._tempparent = tempparent
256 self._tempparent = tempparent
254 self._url = url
257 self._url = url
255
258
256 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
259 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
257
260
258 self.tempfile = None
261 self.tempfile = None
259 f = util.posixfile(bundlepath, b"rb")
262 f = util.posixfile(bundlepath, b"rb")
260 bundle = exchange.readbundle(self.ui, f, bundlepath)
263 bundle = exchange.readbundle(self.ui, f, bundlepath)
261
264
262 if isinstance(bundle, bundle2.unbundle20):
265 if isinstance(bundle, bundle2.unbundle20):
263 self._bundlefile = bundle
266 self._bundlefile = bundle
264 self._cgunpacker = None
267 self._cgunpacker = None
265
268
266 cgpart = None
269 cgpart = None
267 for part in bundle.iterparts(seekable=True):
270 for part in bundle.iterparts(seekable=True):
268 if part.type == b'changegroup':
271 if part.type == b'changegroup':
269 if cgpart:
272 if cgpart:
270 raise NotImplementedError(
273 raise NotImplementedError(
271 b"can't process multiple changegroups"
274 b"can't process multiple changegroups"
272 )
275 )
273 cgpart = part
276 cgpart = part
274
277
275 self._handlebundle2part(bundle, part)
278 self._handlebundle2part(bundle, part)
276
279
277 if not cgpart:
280 if not cgpart:
278 raise error.Abort(_(b"No changegroups found"))
281 raise error.Abort(_(b"No changegroups found"))
279
282
280 # This is required to placate a later consumer, which expects
283 # This is required to placate a later consumer, which expects
281 # the payload offset to be at the beginning of the changegroup.
284 # the payload offset to be at the beginning of the changegroup.
282 # We need to do this after the iterparts() generator advances
285 # We need to do this after the iterparts() generator advances
283 # because iterparts() will seek to end of payload after the
286 # because iterparts() will seek to end of payload after the
284 # generator returns control to iterparts().
287 # generator returns control to iterparts().
285 cgpart.seek(0, os.SEEK_SET)
288 cgpart.seek(0, os.SEEK_SET)
286
289
287 elif isinstance(bundle, changegroup.cg1unpacker):
290 elif isinstance(bundle, changegroup.cg1unpacker):
288 if bundle.compressed():
291 if bundle.compressed():
289 f = self._writetempbundle(
292 f = self._writetempbundle(
290 bundle.read, b'.hg10un', header=b'HG10UN'
293 bundle.read, b'.hg10un', header=b'HG10UN'
291 )
294 )
292 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
295 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
293
296
294 self._bundlefile = bundle
297 self._bundlefile = bundle
295 self._cgunpacker = bundle
298 self._cgunpacker = bundle
296 else:
299 else:
297 raise error.Abort(
300 raise error.Abort(
298 _(b'bundle type %s cannot be read') % type(bundle)
301 _(b'bundle type %s cannot be read') % type(bundle)
299 )
302 )
300
303
301 # dict with the mapping 'filename' -> position in the changegroup.
304 # dict with the mapping 'filename' -> position in the changegroup.
302 self._cgfilespos = {}
305 self._cgfilespos = {}
303
306
304 self.firstnewrev = self.changelog.repotiprev + 1
307 self.firstnewrev = self.changelog.repotiprev + 1
305 phases.retractboundary(
308 phases.retractboundary(
306 self,
309 self,
307 None,
310 None,
308 phases.draft,
311 phases.draft,
309 [ctx.node() for ctx in self[self.firstnewrev :]],
312 [ctx.node() for ctx in self[self.firstnewrev :]],
310 )
313 )
311
314
312 def _handlebundle2part(self, bundle, part):
315 def _handlebundle2part(self, bundle, part):
313 if part.type != b'changegroup':
316 if part.type != b'changegroup':
314 return
317 return
315
318
316 cgstream = part
319 cgstream = part
317 version = part.params.get(b'version', b'01')
320 version = part.params.get(b'version', b'01')
318 legalcgvers = changegroup.supportedincomingversions(self)
321 legalcgvers = changegroup.supportedincomingversions(self)
319 if version not in legalcgvers:
322 if version not in legalcgvers:
320 msg = _(b'Unsupported changegroup version: %s')
323 msg = _(b'Unsupported changegroup version: %s')
321 raise error.Abort(msg % version)
324 raise error.Abort(msg % version)
322 if bundle.compressed():
325 if bundle.compressed():
323 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
326 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
324
327
325 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
328 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
326
329
327 def _writetempbundle(self, readfn, suffix, header=b''):
330 def _writetempbundle(self, readfn, suffix, header=b''):
328 """Write a temporary file to disk
331 """Write a temporary file to disk
329 """
332 """
330 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
333 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
331 self.tempfile = temp
334 self.tempfile = temp
332
335
333 with os.fdopen(fdtemp, 'wb') as fptemp:
336 with os.fdopen(fdtemp, 'wb') as fptemp:
334 fptemp.write(header)
337 fptemp.write(header)
335 while True:
338 while True:
336 chunk = readfn(2 ** 18)
339 chunk = readfn(2 ** 18)
337 if not chunk:
340 if not chunk:
338 break
341 break
339 fptemp.write(chunk)
342 fptemp.write(chunk)
340
343
341 return self.vfs.open(self.tempfile, mode=b"rb")
344 return self.vfs.open(self.tempfile, mode=b"rb")
342
345
343 @localrepo.unfilteredpropertycache
346 @localrepo.unfilteredpropertycache
344 def _phasecache(self):
347 def _phasecache(self):
345 return bundlephasecache(self, self._phasedefaults)
348 return bundlephasecache(self, self._phasedefaults)
346
349
347 @localrepo.unfilteredpropertycache
350 @localrepo.unfilteredpropertycache
348 def changelog(self):
351 def changelog(self):
349 # consume the header if it exists
352 # consume the header if it exists
350 self._cgunpacker.changelogheader()
353 self._cgunpacker.changelogheader()
351 c = bundlechangelog(self.svfs, self._cgunpacker)
354 c = bundlechangelog(self.svfs, self._cgunpacker)
352 self.manstart = self._cgunpacker.tell()
355 self.manstart = self._cgunpacker.tell()
353 return c
356 return c
354
357
355 def _refreshchangelog(self):
358 def _refreshchangelog(self):
356 # changelog for bundle repo are not filecache, this method is not
359 # changelog for bundle repo are not filecache, this method is not
357 # applicable.
360 # applicable.
358 pass
361 pass
359
362
360 @localrepo.unfilteredpropertycache
363 @localrepo.unfilteredpropertycache
361 def manifestlog(self):
364 def manifestlog(self):
362 self._cgunpacker.seek(self.manstart)
365 self._cgunpacker.seek(self.manstart)
363 # consume the header if it exists
366 # consume the header if it exists
364 self._cgunpacker.manifestheader()
367 self._cgunpacker.manifestheader()
365 linkmapper = self.unfiltered().changelog.rev
368 linkmapper = self.unfiltered().changelog.rev
366 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
369 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
367 self.filestart = self._cgunpacker.tell()
370 self.filestart = self._cgunpacker.tell()
368
371
369 return manifest.manifestlog(
372 return manifest.manifestlog(
370 self.svfs, self, rootstore, self.narrowmatch()
373 self.svfs, self, rootstore, self.narrowmatch()
371 )
374 )
372
375
373 def _consumemanifest(self):
376 def _consumemanifest(self):
374 """Consumes the manifest portion of the bundle, setting filestart so the
377 """Consumes the manifest portion of the bundle, setting filestart so the
375 file portion can be read."""
378 file portion can be read."""
376 self._cgunpacker.seek(self.manstart)
379 self._cgunpacker.seek(self.manstart)
377 self._cgunpacker.manifestheader()
380 self._cgunpacker.manifestheader()
378 for delta in self._cgunpacker.deltaiter():
381 for delta in self._cgunpacker.deltaiter():
379 pass
382 pass
380 self.filestart = self._cgunpacker.tell()
383 self.filestart = self._cgunpacker.tell()
381
384
382 @localrepo.unfilteredpropertycache
385 @localrepo.unfilteredpropertycache
383 def manstart(self):
386 def manstart(self):
384 self.changelog
387 self.changelog
385 return self.manstart
388 return self.manstart
386
389
387 @localrepo.unfilteredpropertycache
390 @localrepo.unfilteredpropertycache
388 def filestart(self):
391 def filestart(self):
389 self.manifestlog
392 self.manifestlog
390
393
391 # If filestart was not set by self.manifestlog, that means the
394 # If filestart was not set by self.manifestlog, that means the
392 # manifestlog implementation did not consume the manifests from the
395 # manifestlog implementation did not consume the manifests from the
393 # changegroup (ex: it might be consuming trees from a separate bundle2
396 # changegroup (ex: it might be consuming trees from a separate bundle2
394 # part instead). So we need to manually consume it.
397 # part instead). So we need to manually consume it.
395 if 'filestart' not in self.__dict__:
398 if 'filestart' not in self.__dict__:
396 self._consumemanifest()
399 self._consumemanifest()
397
400
398 return self.filestart
401 return self.filestart
399
402
400 def url(self):
403 def url(self):
401 return self._url
404 return self._url
402
405
403 def file(self, f):
406 def file(self, f):
404 if not self._cgfilespos:
407 if not self._cgfilespos:
405 self._cgunpacker.seek(self.filestart)
408 self._cgunpacker.seek(self.filestart)
406 self._cgfilespos = _getfilestarts(self._cgunpacker)
409 self._cgfilespos = _getfilestarts(self._cgunpacker)
407
410
408 if f in self._cgfilespos:
411 if f in self._cgfilespos:
409 self._cgunpacker.seek(self._cgfilespos[f])
412 self._cgunpacker.seek(self._cgfilespos[f])
410 linkmapper = self.unfiltered().changelog.rev
413 linkmapper = self.unfiltered().changelog.rev
411 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
414 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
412 else:
415 else:
413 return super(bundlerepository, self).file(f)
416 return super(bundlerepository, self).file(f)
414
417
415 def close(self):
418 def close(self):
416 """Close assigned bundle file immediately."""
419 """Close assigned bundle file immediately."""
417 self._bundlefile.close()
420 self._bundlefile.close()
418 if self.tempfile is not None:
421 if self.tempfile is not None:
419 self.vfs.unlink(self.tempfile)
422 self.vfs.unlink(self.tempfile)
420 if self._tempparent:
423 if self._tempparent:
421 shutil.rmtree(self._tempparent, True)
424 shutil.rmtree(self._tempparent, True)
422
425
423 def cancopy(self):
426 def cancopy(self):
424 return False
427 return False
425
428
426 def peer(self):
429 def peer(self):
427 return bundlepeer(self)
430 return bundlepeer(self)
428
431
429 def getcwd(self):
432 def getcwd(self):
430 return encoding.getcwd() # always outside the repo
433 return encoding.getcwd() # always outside the repo
431
434
432 # Check if parents exist in localrepo before setting
435 # Check if parents exist in localrepo before setting
433 def setparents(self, p1, p2=nullid):
436 def setparents(self, p1, p2=nullid):
434 p1rev = self.changelog.rev(p1)
437 p1rev = self.changelog.rev(p1)
435 p2rev = self.changelog.rev(p2)
438 p2rev = self.changelog.rev(p2)
436 msg = _(b"setting parent to node %s that only exists in the bundle\n")
439 msg = _(b"setting parent to node %s that only exists in the bundle\n")
437 if self.changelog.repotiprev < p1rev:
440 if self.changelog.repotiprev < p1rev:
438 self.ui.warn(msg % nodemod.hex(p1))
441 self.ui.warn(msg % nodemod.hex(p1))
439 if self.changelog.repotiprev < p2rev:
442 if self.changelog.repotiprev < p2rev:
440 self.ui.warn(msg % nodemod.hex(p2))
443 self.ui.warn(msg % nodemod.hex(p2))
441 return super(bundlerepository, self).setparents(p1, p2)
444 return super(bundlerepository, self).setparents(p1, p2)
442
445
443
446
444 def instance(ui, path, create, intents=None, createopts=None):
447 def instance(ui, path, create, intents=None, createopts=None):
445 if create:
448 if create:
446 raise error.Abort(_(b'cannot create new bundle repository'))
449 raise error.Abort(_(b'cannot create new bundle repository'))
447 # internal config: bundle.mainreporoot
450 # internal config: bundle.mainreporoot
448 parentpath = ui.config(b"bundle", b"mainreporoot")
451 parentpath = ui.config(b"bundle", b"mainreporoot")
449 if not parentpath:
452 if not parentpath:
450 # try to find the correct path to the working directory repo
453 # try to find the correct path to the working directory repo
451 parentpath = cmdutil.findrepo(encoding.getcwd())
454 parentpath = cmdutil.findrepo(encoding.getcwd())
452 if parentpath is None:
455 if parentpath is None:
453 parentpath = b''
456 parentpath = b''
454 if parentpath:
457 if parentpath:
455 # Try to make the full path relative so we get a nice, short URL.
458 # Try to make the full path relative so we get a nice, short URL.
456 # In particular, we don't want temp dir names in test outputs.
459 # In particular, we don't want temp dir names in test outputs.
457 cwd = encoding.getcwd()
460 cwd = encoding.getcwd()
458 if parentpath == cwd:
461 if parentpath == cwd:
459 parentpath = b''
462 parentpath = b''
460 else:
463 else:
461 cwd = pathutil.normasprefix(cwd)
464 cwd = pathutil.normasprefix(cwd)
462 if parentpath.startswith(cwd):
465 if parentpath.startswith(cwd):
463 parentpath = parentpath[len(cwd) :]
466 parentpath = parentpath[len(cwd) :]
464 u = util.url(path)
467 u = util.url(path)
465 path = u.localpath()
468 path = u.localpath()
466 if u.scheme == b'bundle':
469 if u.scheme == b'bundle':
467 s = path.split(b"+", 1)
470 s = path.split(b"+", 1)
468 if len(s) == 1:
471 if len(s) == 1:
469 repopath, bundlename = parentpath, s[0]
472 repopath, bundlename = parentpath, s[0]
470 else:
473 else:
471 repopath, bundlename = s
474 repopath, bundlename = s
472 else:
475 else:
473 repopath, bundlename = parentpath, path
476 repopath, bundlename = parentpath, path
474
477
475 return makebundlerepository(ui, repopath, bundlename)
478 return makebundlerepository(ui, repopath, bundlename)
476
479
477
480
478 def makebundlerepository(ui, repopath, bundlepath):
481 def makebundlerepository(ui, repopath, bundlepath):
479 """Make a bundle repository object based on repo and bundle paths."""
482 """Make a bundle repository object based on repo and bundle paths."""
480 if repopath:
483 if repopath:
481 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
484 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
482 else:
485 else:
483 url = b'bundle:%s' % bundlepath
486 url = b'bundle:%s' % bundlepath
484
487
485 # Because we can't make any guarantees about the type of the base
488 # Because we can't make any guarantees about the type of the base
486 # repository, we can't have a static class representing the bundle
489 # repository, we can't have a static class representing the bundle
487 # repository. We also can't make any guarantees about how to even
490 # repository. We also can't make any guarantees about how to even
488 # call the base repository's constructor!
491 # call the base repository's constructor!
489 #
492 #
490 # So, our strategy is to go through ``localrepo.instance()`` to construct
493 # So, our strategy is to go through ``localrepo.instance()`` to construct
491 # a repo instance. Then, we dynamically create a new type derived from
494 # a repo instance. Then, we dynamically create a new type derived from
492 # both it and our ``bundlerepository`` class which overrides some
495 # both it and our ``bundlerepository`` class which overrides some
493 # functionality. We then change the type of the constructed repository
496 # functionality. We then change the type of the constructed repository
494 # to this new type and initialize the bundle-specific bits of it.
497 # to this new type and initialize the bundle-specific bits of it.
495
498
496 try:
499 try:
497 repo = localrepo.instance(ui, repopath, create=False)
500 repo = localrepo.instance(ui, repopath, create=False)
498 tempparent = None
501 tempparent = None
499 except error.RepoError:
502 except error.RepoError:
500 tempparent = pycompat.mkdtemp()
503 tempparent = pycompat.mkdtemp()
501 try:
504 try:
502 repo = localrepo.instance(ui, tempparent, create=True)
505 repo = localrepo.instance(ui, tempparent, create=True)
503 except Exception:
506 except Exception:
504 shutil.rmtree(tempparent)
507 shutil.rmtree(tempparent)
505 raise
508 raise
506
509
507 class derivedbundlerepository(bundlerepository, repo.__class__):
510 class derivedbundlerepository(bundlerepository, repo.__class__):
508 pass
511 pass
509
512
510 repo.__class__ = derivedbundlerepository
513 repo.__class__ = derivedbundlerepository
511 bundlerepository.__init__(repo, bundlepath, url, tempparent)
514 bundlerepository.__init__(repo, bundlepath, url, tempparent)
512
515
513 return repo
516 return repo
514
517
515
518
516 class bundletransactionmanager(object):
519 class bundletransactionmanager(object):
517 def transaction(self):
520 def transaction(self):
518 return None
521 return None
519
522
520 def close(self):
523 def close(self):
521 raise NotImplementedError
524 raise NotImplementedError
522
525
523 def release(self):
526 def release(self):
524 raise NotImplementedError
527 raise NotImplementedError
525
528
526
529
527 def getremotechanges(
530 def getremotechanges(
528 ui, repo, peer, onlyheads=None, bundlename=None, force=False
531 ui, repo, peer, onlyheads=None, bundlename=None, force=False
529 ):
532 ):
530 '''obtains a bundle of changes incoming from peer
533 '''obtains a bundle of changes incoming from peer
531
534
532 "onlyheads" restricts the returned changes to those reachable from the
535 "onlyheads" restricts the returned changes to those reachable from the
533 specified heads.
536 specified heads.
534 "bundlename", if given, stores the bundle to this file path permanently;
537 "bundlename", if given, stores the bundle to this file path permanently;
535 otherwise it's stored to a temp file and gets deleted again when you call
538 otherwise it's stored to a temp file and gets deleted again when you call
536 the returned "cleanupfn".
539 the returned "cleanupfn".
537 "force" indicates whether to proceed on unrelated repos.
540 "force" indicates whether to proceed on unrelated repos.
538
541
539 Returns a tuple (local, csets, cleanupfn):
542 Returns a tuple (local, csets, cleanupfn):
540
543
541 "local" is a local repo from which to obtain the actual incoming
544 "local" is a local repo from which to obtain the actual incoming
542 changesets; it is a bundlerepo for the obtained bundle when the
545 changesets; it is a bundlerepo for the obtained bundle when the
543 original "peer" is remote.
546 original "peer" is remote.
544 "csets" lists the incoming changeset node ids.
547 "csets" lists the incoming changeset node ids.
545 "cleanupfn" must be called without arguments when you're done processing
548 "cleanupfn" must be called without arguments when you're done processing
546 the changes; it closes both the original "peer" and the one returned
549 the changes; it closes both the original "peer" and the one returned
547 here.
550 here.
548 '''
551 '''
549 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
552 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
550 common, incoming, rheads = tmp
553 common, incoming, rheads = tmp
551 if not incoming:
554 if not incoming:
552 try:
555 try:
553 if bundlename:
556 if bundlename:
554 os.unlink(bundlename)
557 os.unlink(bundlename)
555 except OSError:
558 except OSError:
556 pass
559 pass
557 return repo, [], peer.close
560 return repo, [], peer.close
558
561
559 commonset = set(common)
562 commonset = set(common)
560 rheads = [x for x in rheads if x not in commonset]
563 rheads = [x for x in rheads if x not in commonset]
561
564
562 bundle = None
565 bundle = None
563 bundlerepo = None
566 bundlerepo = None
564 localrepo = peer.local()
567 localrepo = peer.local()
565 if bundlename or not localrepo:
568 if bundlename or not localrepo:
566 # create a bundle (uncompressed if peer repo is not local)
569 # create a bundle (uncompressed if peer repo is not local)
567
570
568 # developer config: devel.legacy.exchange
571 # developer config: devel.legacy.exchange
569 legexc = ui.configlist(b'devel', b'legacy.exchange')
572 legexc = ui.configlist(b'devel', b'legacy.exchange')
570 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
573 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
571 canbundle2 = (
574 canbundle2 = (
572 not forcebundle1
575 not forcebundle1
573 and peer.capable(b'getbundle')
576 and peer.capable(b'getbundle')
574 and peer.capable(b'bundle2')
577 and peer.capable(b'bundle2')
575 )
578 )
576 if canbundle2:
579 if canbundle2:
577 with peer.commandexecutor() as e:
580 with peer.commandexecutor() as e:
578 b2 = e.callcommand(
581 b2 = e.callcommand(
579 b'getbundle',
582 b'getbundle',
580 {
583 {
581 b'source': b'incoming',
584 b'source': b'incoming',
582 b'common': common,
585 b'common': common,
583 b'heads': rheads,
586 b'heads': rheads,
584 b'bundlecaps': exchange.caps20to10(
587 b'bundlecaps': exchange.caps20to10(
585 repo, role=b'client'
588 repo, role=b'client'
586 ),
589 ),
587 b'cg': True,
590 b'cg': True,
588 },
591 },
589 ).result()
592 ).result()
590
593
591 fname = bundle = changegroup.writechunks(
594 fname = bundle = changegroup.writechunks(
592 ui, b2._forwardchunks(), bundlename
595 ui, b2._forwardchunks(), bundlename
593 )
596 )
594 else:
597 else:
595 if peer.capable(b'getbundle'):
598 if peer.capable(b'getbundle'):
596 with peer.commandexecutor() as e:
599 with peer.commandexecutor() as e:
597 cg = e.callcommand(
600 cg = e.callcommand(
598 b'getbundle',
601 b'getbundle',
599 {
602 {
600 b'source': b'incoming',
603 b'source': b'incoming',
601 b'common': common,
604 b'common': common,
602 b'heads': rheads,
605 b'heads': rheads,
603 },
606 },
604 ).result()
607 ).result()
605 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
608 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
606 # compat with older servers when pulling all remote heads
609 # compat with older servers when pulling all remote heads
607
610
608 with peer.commandexecutor() as e:
611 with peer.commandexecutor() as e:
609 cg = e.callcommand(
612 cg = e.callcommand(
610 b'changegroup',
613 b'changegroup',
611 {b'nodes': incoming, b'source': b'incoming',},
614 {b'nodes': incoming, b'source': b'incoming',},
612 ).result()
615 ).result()
613
616
614 rheads = None
617 rheads = None
615 else:
618 else:
616 with peer.commandexecutor() as e:
619 with peer.commandexecutor() as e:
617 cg = e.callcommand(
620 cg = e.callcommand(
618 b'changegroupsubset',
621 b'changegroupsubset',
619 {
622 {
620 b'bases': incoming,
623 b'bases': incoming,
621 b'heads': rheads,
624 b'heads': rheads,
622 b'source': b'incoming',
625 b'source': b'incoming',
623 },
626 },
624 ).result()
627 ).result()
625
628
626 if localrepo:
629 if localrepo:
627 bundletype = b"HG10BZ"
630 bundletype = b"HG10BZ"
628 else:
631 else:
629 bundletype = b"HG10UN"
632 bundletype = b"HG10UN"
630 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
633 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
631 # keep written bundle?
634 # keep written bundle?
632 if bundlename:
635 if bundlename:
633 bundle = None
636 bundle = None
634 if not localrepo:
637 if not localrepo:
635 # use the created uncompressed bundlerepo
638 # use the created uncompressed bundlerepo
636 localrepo = bundlerepo = makebundlerepository(
639 localrepo = bundlerepo = makebundlerepository(
637 repo.baseui, repo.root, fname
640 repo.baseui, repo.root, fname
638 )
641 )
639
642
640 # this repo contains local and peer now, so filter out local again
643 # this repo contains local and peer now, so filter out local again
641 common = repo.heads()
644 common = repo.heads()
642 if localrepo:
645 if localrepo:
643 # Part of common may be remotely filtered
646 # Part of common may be remotely filtered
644 # So use an unfiltered version
647 # So use an unfiltered version
645 # The discovery process probably need cleanup to avoid that
648 # The discovery process probably need cleanup to avoid that
646 localrepo = localrepo.unfiltered()
649 localrepo = localrepo.unfiltered()
647
650
648 csets = localrepo.changelog.findmissing(common, rheads)
651 csets = localrepo.changelog.findmissing(common, rheads)
649
652
650 if bundlerepo:
653 if bundlerepo:
651 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
654 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
652
655
653 with peer.commandexecutor() as e:
656 with peer.commandexecutor() as e:
654 remotephases = e.callcommand(
657 remotephases = e.callcommand(
655 b'listkeys', {b'namespace': b'phases',}
658 b'listkeys', {b'namespace': b'phases',}
656 ).result()
659 ).result()
657
660
658 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
661 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
659 pullop.trmanager = bundletransactionmanager()
662 pullop.trmanager = bundletransactionmanager()
660 exchange._pullapplyphases(pullop, remotephases)
663 exchange._pullapplyphases(pullop, remotephases)
661
664
662 def cleanup():
665 def cleanup():
663 if bundlerepo:
666 if bundlerepo:
664 bundlerepo.close()
667 bundlerepo.close()
665 if bundle:
668 if bundle:
666 os.unlink(bundle)
669 os.unlink(bundle)
667 peer.close()
670 peer.close()
668
671
669 return (localrepo, csets, cleanup)
672 return (localrepo, csets, cleanup)
General Comments 0
You need to be logged in to leave comments. Login now