##// END OF EJS Templates
index: use `index.has_node` in `bundlerepo.bundlerevlog`...
marmoute -
r43942:5f347567 default
parent child Browse files
Show More
@@ -1,669 +1,669 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import nullid, nullrev
20 from .node import nullid, nullrev
21
21
22 from . import (
22 from . import (
23 bundle2,
23 bundle2,
24 changegroup,
24 changegroup,
25 changelog,
25 changelog,
26 cmdutil,
26 cmdutil,
27 discovery,
27 discovery,
28 encoding,
28 encoding,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44
44
45 class bundlerevlog(revlog.revlog):
45 class bundlerevlog(revlog.revlog):
46 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
46 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
47 # How it works:
47 # How it works:
48 # To retrieve a revision, we need to know the offset of the revision in
48 # To retrieve a revision, we need to know the offset of the revision in
49 # the bundle (an unbundle object). We store this offset in the index
49 # the bundle (an unbundle object). We store this offset in the index
50 # (start). The base of the delta is stored in the base field.
50 # (start). The base of the delta is stored in the base field.
51 #
51 #
52 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # To differentiate a rev in the bundle from a rev in the revlog, we
53 # check revision against repotiprev.
53 # check revision against repotiprev.
54 opener = vfsmod.readonlyvfs(opener)
54 opener = vfsmod.readonlyvfs(opener)
55 revlog.revlog.__init__(self, opener, indexfile)
55 revlog.revlog.__init__(self, opener, indexfile)
56 self.bundle = cgunpacker
56 self.bundle = cgunpacker
57 n = len(self)
57 n = len(self)
58 self.repotiprev = n - 1
58 self.repotiprev = n - 1
59 self.bundlerevs = set() # used by 'bundle()' revset expression
59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 for deltadata in cgunpacker.deltaiter():
60 for deltadata in cgunpacker.deltaiter():
61 node, p1, p2, cs, deltabase, delta, flags = deltadata
61 node, p1, p2, cs, deltabase, delta, flags = deltadata
62
62
63 size = len(delta)
63 size = len(delta)
64 start = cgunpacker.tell() - size
64 start = cgunpacker.tell() - size
65
65
66 link = linkmapper(cs)
66 link = linkmapper(cs)
67 if node in self.nodemap:
67 if self.index.has_node(node):
68 # this can happen if two branches make the same change
68 # this can happen if two branches make the same change
69 self.bundlerevs.add(self.nodemap[node])
69 self.bundlerevs.add(self.nodemap[node])
70 continue
70 continue
71
71
72 for p in (p1, p2):
72 for p in (p1, p2):
73 if p not in self.nodemap:
73 if not self.index.has_node(p):
74 raise error.LookupError(
74 raise error.LookupError(
75 p, self.indexfile, _(b"unknown parent")
75 p, self.indexfile, _(b"unknown parent")
76 )
76 )
77
77
78 if deltabase not in self.nodemap:
78 if not self.index.has_node(deltabase):
79 raise LookupError(
79 raise LookupError(
80 deltabase, self.indexfile, _(b'unknown delta base')
80 deltabase, self.indexfile, _(b'unknown delta base')
81 )
81 )
82
82
83 baserev = self.rev(deltabase)
83 baserev = self.rev(deltabase)
84 # start, size, full unc. size, base (unused), link, p1, p2, node
84 # start, size, full unc. size, base (unused), link, p1, p2, node
85 e = (
85 e = (
86 revlog.offset_type(start, flags),
86 revlog.offset_type(start, flags),
87 size,
87 size,
88 -1,
88 -1,
89 baserev,
89 baserev,
90 link,
90 link,
91 self.rev(p1),
91 self.rev(p1),
92 self.rev(p2),
92 self.rev(p2),
93 node,
93 node,
94 )
94 )
95 self.index.append(e)
95 self.index.append(e)
96 self.bundlerevs.add(n)
96 self.bundlerevs.add(n)
97 n += 1
97 n += 1
98
98
99 def _chunk(self, rev, df=None):
99 def _chunk(self, rev, df=None):
100 # Warning: in case of bundle, the diff is against what we stored as
100 # Warning: in case of bundle, the diff is against what we stored as
101 # delta base, not against rev - 1
101 # delta base, not against rev - 1
102 # XXX: could use some caching
102 # XXX: could use some caching
103 if rev <= self.repotiprev:
103 if rev <= self.repotiprev:
104 return revlog.revlog._chunk(self, rev)
104 return revlog.revlog._chunk(self, rev)
105 self.bundle.seek(self.start(rev))
105 self.bundle.seek(self.start(rev))
106 return self.bundle.read(self.length(rev))
106 return self.bundle.read(self.length(rev))
107
107
108 def revdiff(self, rev1, rev2):
108 def revdiff(self, rev1, rev2):
109 """return or calculate a delta between two revisions"""
109 """return or calculate a delta between two revisions"""
110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
111 # hot path for bundle
111 # hot path for bundle
112 revb = self.index[rev2][3]
112 revb = self.index[rev2][3]
113 if revb == rev1:
113 if revb == rev1:
114 return self._chunk(rev2)
114 return self._chunk(rev2)
115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
116 return revlog.revlog.revdiff(self, rev1, rev2)
116 return revlog.revlog.revdiff(self, rev1, rev2)
117
117
118 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
118 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
119
119
120 def _rawtext(self, node, rev, _df=None):
120 def _rawtext(self, node, rev, _df=None):
121 if rev is None:
121 if rev is None:
122 rev = self.rev(node)
122 rev = self.rev(node)
123 validated = False
123 validated = False
124 rawtext = None
124 rawtext = None
125 chain = []
125 chain = []
126 iterrev = rev
126 iterrev = rev
127 # reconstruct the revision if it is from a changegroup
127 # reconstruct the revision if it is from a changegroup
128 while iterrev > self.repotiprev:
128 while iterrev > self.repotiprev:
129 if self._revisioncache and self._revisioncache[1] == iterrev:
129 if self._revisioncache and self._revisioncache[1] == iterrev:
130 rawtext = self._revisioncache[2]
130 rawtext = self._revisioncache[2]
131 break
131 break
132 chain.append(iterrev)
132 chain.append(iterrev)
133 iterrev = self.index[iterrev][3]
133 iterrev = self.index[iterrev][3]
134 if iterrev == nullrev:
134 if iterrev == nullrev:
135 rawtext = b''
135 rawtext = b''
136 elif rawtext is None:
136 elif rawtext is None:
137 r = super(bundlerevlog, self)._rawtext(
137 r = super(bundlerevlog, self)._rawtext(
138 self.node(iterrev), iterrev, _df=_df
138 self.node(iterrev), iterrev, _df=_df
139 )
139 )
140 __, rawtext, validated = r
140 __, rawtext, validated = r
141 if chain:
141 if chain:
142 validated = False
142 validated = False
143 while chain:
143 while chain:
144 delta = self._chunk(chain.pop())
144 delta = self._chunk(chain.pop())
145 rawtext = mdiff.patches(rawtext, [delta])
145 rawtext = mdiff.patches(rawtext, [delta])
146 return rev, rawtext, validated
146 return rev, rawtext, validated
147
147
148 def addrevision(self, *args, **kwargs):
148 def addrevision(self, *args, **kwargs):
149 raise NotImplementedError
149 raise NotImplementedError
150
150
151 def addgroup(self, *args, **kwargs):
151 def addgroup(self, *args, **kwargs):
152 raise NotImplementedError
152 raise NotImplementedError
153
153
154 def strip(self, *args, **kwargs):
154 def strip(self, *args, **kwargs):
155 raise NotImplementedError
155 raise NotImplementedError
156
156
157 def checksize(self):
157 def checksize(self):
158 raise NotImplementedError
158 raise NotImplementedError
159
159
160
160
161 class bundlechangelog(bundlerevlog, changelog.changelog):
161 class bundlechangelog(bundlerevlog, changelog.changelog):
162 def __init__(self, opener, cgunpacker):
162 def __init__(self, opener, cgunpacker):
163 changelog.changelog.__init__(self, opener)
163 changelog.changelog.__init__(self, opener)
164 linkmapper = lambda x: x
164 linkmapper = lambda x: x
165 bundlerevlog.__init__(
165 bundlerevlog.__init__(
166 self, opener, self.indexfile, cgunpacker, linkmapper
166 self, opener, self.indexfile, cgunpacker, linkmapper
167 )
167 )
168
168
169
169
170 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
170 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
171 def __init__(
171 def __init__(
172 self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
172 self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
173 ):
173 ):
174 manifest.manifestrevlog.__init__(self, opener, tree=dir)
174 manifest.manifestrevlog.__init__(self, opener, tree=dir)
175 bundlerevlog.__init__(
175 bundlerevlog.__init__(
176 self, opener, self.indexfile, cgunpacker, linkmapper
176 self, opener, self.indexfile, cgunpacker, linkmapper
177 )
177 )
178 if dirlogstarts is None:
178 if dirlogstarts is None:
179 dirlogstarts = {}
179 dirlogstarts = {}
180 if self.bundle.version == b"03":
180 if self.bundle.version == b"03":
181 dirlogstarts = _getfilestarts(self.bundle)
181 dirlogstarts = _getfilestarts(self.bundle)
182 self._dirlogstarts = dirlogstarts
182 self._dirlogstarts = dirlogstarts
183 self._linkmapper = linkmapper
183 self._linkmapper = linkmapper
184
184
185 def dirlog(self, d):
185 def dirlog(self, d):
186 if d in self._dirlogstarts:
186 if d in self._dirlogstarts:
187 self.bundle.seek(self._dirlogstarts[d])
187 self.bundle.seek(self._dirlogstarts[d])
188 return bundlemanifest(
188 return bundlemanifest(
189 self.opener,
189 self.opener,
190 self.bundle,
190 self.bundle,
191 self._linkmapper,
191 self._linkmapper,
192 self._dirlogstarts,
192 self._dirlogstarts,
193 dir=d,
193 dir=d,
194 )
194 )
195 return super(bundlemanifest, self).dirlog(d)
195 return super(bundlemanifest, self).dirlog(d)
196
196
197
197
198 class bundlefilelog(filelog.filelog):
198 class bundlefilelog(filelog.filelog):
199 def __init__(self, opener, path, cgunpacker, linkmapper):
199 def __init__(self, opener, path, cgunpacker, linkmapper):
200 filelog.filelog.__init__(self, opener, path)
200 filelog.filelog.__init__(self, opener, path)
201 self._revlog = bundlerevlog(
201 self._revlog = bundlerevlog(
202 opener, self.indexfile, cgunpacker, linkmapper
202 opener, self.indexfile, cgunpacker, linkmapper
203 )
203 )
204
204
205
205
206 class bundlepeer(localrepo.localpeer):
206 class bundlepeer(localrepo.localpeer):
207 def canpush(self):
207 def canpush(self):
208 return False
208 return False
209
209
210
210
211 class bundlephasecache(phases.phasecache):
211 class bundlephasecache(phases.phasecache):
212 def __init__(self, *args, **kwargs):
212 def __init__(self, *args, **kwargs):
213 super(bundlephasecache, self).__init__(*args, **kwargs)
213 super(bundlephasecache, self).__init__(*args, **kwargs)
214 if util.safehasattr(self, 'opener'):
214 if util.safehasattr(self, 'opener'):
215 self.opener = vfsmod.readonlyvfs(self.opener)
215 self.opener = vfsmod.readonlyvfs(self.opener)
216
216
217 def write(self):
217 def write(self):
218 raise NotImplementedError
218 raise NotImplementedError
219
219
220 def _write(self, fp):
220 def _write(self, fp):
221 raise NotImplementedError
221 raise NotImplementedError
222
222
223 def _updateroots(self, phase, newroots, tr):
223 def _updateroots(self, phase, newroots, tr):
224 self.phaseroots[phase] = newroots
224 self.phaseroots[phase] = newroots
225 self.invalidate()
225 self.invalidate()
226 self.dirty = True
226 self.dirty = True
227
227
228
228
229 def _getfilestarts(cgunpacker):
229 def _getfilestarts(cgunpacker):
230 filespos = {}
230 filespos = {}
231 for chunkdata in iter(cgunpacker.filelogheader, {}):
231 for chunkdata in iter(cgunpacker.filelogheader, {}):
232 fname = chunkdata[b'filename']
232 fname = chunkdata[b'filename']
233 filespos[fname] = cgunpacker.tell()
233 filespos[fname] = cgunpacker.tell()
234 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
234 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
235 pass
235 pass
236 return filespos
236 return filespos
237
237
238
238
239 class bundlerepository(object):
239 class bundlerepository(object):
240 """A repository instance that is a union of a local repo and a bundle.
240 """A repository instance that is a union of a local repo and a bundle.
241
241
242 Instances represent a read-only repository composed of a local repository
242 Instances represent a read-only repository composed of a local repository
243 with the contents of a bundle file applied. The repository instance is
243 with the contents of a bundle file applied. The repository instance is
244 conceptually similar to the state of a repository after an
244 conceptually similar to the state of a repository after an
245 ``hg unbundle`` operation. However, the contents of the bundle are never
245 ``hg unbundle`` operation. However, the contents of the bundle are never
246 applied to the actual base repository.
246 applied to the actual base repository.
247
247
248 Instances constructed directly are not usable as repository objects.
248 Instances constructed directly are not usable as repository objects.
249 Use instance() or makebundlerepository() to create instances.
249 Use instance() or makebundlerepository() to create instances.
250 """
250 """
251
251
252 def __init__(self, bundlepath, url, tempparent):
252 def __init__(self, bundlepath, url, tempparent):
253 self._tempparent = tempparent
253 self._tempparent = tempparent
254 self._url = url
254 self._url = url
255
255
256 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
256 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
257
257
258 self.tempfile = None
258 self.tempfile = None
259 f = util.posixfile(bundlepath, b"rb")
259 f = util.posixfile(bundlepath, b"rb")
260 bundle = exchange.readbundle(self.ui, f, bundlepath)
260 bundle = exchange.readbundle(self.ui, f, bundlepath)
261
261
262 if isinstance(bundle, bundle2.unbundle20):
262 if isinstance(bundle, bundle2.unbundle20):
263 self._bundlefile = bundle
263 self._bundlefile = bundle
264 self._cgunpacker = None
264 self._cgunpacker = None
265
265
266 cgpart = None
266 cgpart = None
267 for part in bundle.iterparts(seekable=True):
267 for part in bundle.iterparts(seekable=True):
268 if part.type == b'changegroup':
268 if part.type == b'changegroup':
269 if cgpart:
269 if cgpart:
270 raise NotImplementedError(
270 raise NotImplementedError(
271 b"can't process multiple changegroups"
271 b"can't process multiple changegroups"
272 )
272 )
273 cgpart = part
273 cgpart = part
274
274
275 self._handlebundle2part(bundle, part)
275 self._handlebundle2part(bundle, part)
276
276
277 if not cgpart:
277 if not cgpart:
278 raise error.Abort(_(b"No changegroups found"))
278 raise error.Abort(_(b"No changegroups found"))
279
279
280 # This is required to placate a later consumer, which expects
280 # This is required to placate a later consumer, which expects
281 # the payload offset to be at the beginning of the changegroup.
281 # the payload offset to be at the beginning of the changegroup.
282 # We need to do this after the iterparts() generator advances
282 # We need to do this after the iterparts() generator advances
283 # because iterparts() will seek to end of payload after the
283 # because iterparts() will seek to end of payload after the
284 # generator returns control to iterparts().
284 # generator returns control to iterparts().
285 cgpart.seek(0, os.SEEK_SET)
285 cgpart.seek(0, os.SEEK_SET)
286
286
287 elif isinstance(bundle, changegroup.cg1unpacker):
287 elif isinstance(bundle, changegroup.cg1unpacker):
288 if bundle.compressed():
288 if bundle.compressed():
289 f = self._writetempbundle(
289 f = self._writetempbundle(
290 bundle.read, b'.hg10un', header=b'HG10UN'
290 bundle.read, b'.hg10un', header=b'HG10UN'
291 )
291 )
292 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
292 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
293
293
294 self._bundlefile = bundle
294 self._bundlefile = bundle
295 self._cgunpacker = bundle
295 self._cgunpacker = bundle
296 else:
296 else:
297 raise error.Abort(
297 raise error.Abort(
298 _(b'bundle type %s cannot be read') % type(bundle)
298 _(b'bundle type %s cannot be read') % type(bundle)
299 )
299 )
300
300
301 # dict with the mapping 'filename' -> position in the changegroup.
301 # dict with the mapping 'filename' -> position in the changegroup.
302 self._cgfilespos = {}
302 self._cgfilespos = {}
303
303
304 self.firstnewrev = self.changelog.repotiprev + 1
304 self.firstnewrev = self.changelog.repotiprev + 1
305 phases.retractboundary(
305 phases.retractboundary(
306 self,
306 self,
307 None,
307 None,
308 phases.draft,
308 phases.draft,
309 [ctx.node() for ctx in self[self.firstnewrev :]],
309 [ctx.node() for ctx in self[self.firstnewrev :]],
310 )
310 )
311
311
312 def _handlebundle2part(self, bundle, part):
312 def _handlebundle2part(self, bundle, part):
313 if part.type != b'changegroup':
313 if part.type != b'changegroup':
314 return
314 return
315
315
316 cgstream = part
316 cgstream = part
317 version = part.params.get(b'version', b'01')
317 version = part.params.get(b'version', b'01')
318 legalcgvers = changegroup.supportedincomingversions(self)
318 legalcgvers = changegroup.supportedincomingversions(self)
319 if version not in legalcgvers:
319 if version not in legalcgvers:
320 msg = _(b'Unsupported changegroup version: %s')
320 msg = _(b'Unsupported changegroup version: %s')
321 raise error.Abort(msg % version)
321 raise error.Abort(msg % version)
322 if bundle.compressed():
322 if bundle.compressed():
323 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
323 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
324
324
325 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
325 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
326
326
327 def _writetempbundle(self, readfn, suffix, header=b''):
327 def _writetempbundle(self, readfn, suffix, header=b''):
328 """Write a temporary file to disk
328 """Write a temporary file to disk
329 """
329 """
330 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
330 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
331 self.tempfile = temp
331 self.tempfile = temp
332
332
333 with os.fdopen(fdtemp, 'wb') as fptemp:
333 with os.fdopen(fdtemp, 'wb') as fptemp:
334 fptemp.write(header)
334 fptemp.write(header)
335 while True:
335 while True:
336 chunk = readfn(2 ** 18)
336 chunk = readfn(2 ** 18)
337 if not chunk:
337 if not chunk:
338 break
338 break
339 fptemp.write(chunk)
339 fptemp.write(chunk)
340
340
341 return self.vfs.open(self.tempfile, mode=b"rb")
341 return self.vfs.open(self.tempfile, mode=b"rb")
342
342
343 @localrepo.unfilteredpropertycache
343 @localrepo.unfilteredpropertycache
344 def _phasecache(self):
344 def _phasecache(self):
345 return bundlephasecache(self, self._phasedefaults)
345 return bundlephasecache(self, self._phasedefaults)
346
346
347 @localrepo.unfilteredpropertycache
347 @localrepo.unfilteredpropertycache
348 def changelog(self):
348 def changelog(self):
349 # consume the header if it exists
349 # consume the header if it exists
350 self._cgunpacker.changelogheader()
350 self._cgunpacker.changelogheader()
351 c = bundlechangelog(self.svfs, self._cgunpacker)
351 c = bundlechangelog(self.svfs, self._cgunpacker)
352 self.manstart = self._cgunpacker.tell()
352 self.manstart = self._cgunpacker.tell()
353 return c
353 return c
354
354
355 def _refreshchangelog(self):
355 def _refreshchangelog(self):
356 # changelog for bundle repo are not filecache, this method is not
356 # changelog for bundle repo are not filecache, this method is not
357 # applicable.
357 # applicable.
358 pass
358 pass
359
359
360 @localrepo.unfilteredpropertycache
360 @localrepo.unfilteredpropertycache
361 def manifestlog(self):
361 def manifestlog(self):
362 self._cgunpacker.seek(self.manstart)
362 self._cgunpacker.seek(self.manstart)
363 # consume the header if it exists
363 # consume the header if it exists
364 self._cgunpacker.manifestheader()
364 self._cgunpacker.manifestheader()
365 linkmapper = self.unfiltered().changelog.rev
365 linkmapper = self.unfiltered().changelog.rev
366 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
366 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
367 self.filestart = self._cgunpacker.tell()
367 self.filestart = self._cgunpacker.tell()
368
368
369 return manifest.manifestlog(
369 return manifest.manifestlog(
370 self.svfs, self, rootstore, self.narrowmatch()
370 self.svfs, self, rootstore, self.narrowmatch()
371 )
371 )
372
372
373 def _consumemanifest(self):
373 def _consumemanifest(self):
374 """Consumes the manifest portion of the bundle, setting filestart so the
374 """Consumes the manifest portion of the bundle, setting filestart so the
375 file portion can be read."""
375 file portion can be read."""
376 self._cgunpacker.seek(self.manstart)
376 self._cgunpacker.seek(self.manstart)
377 self._cgunpacker.manifestheader()
377 self._cgunpacker.manifestheader()
378 for delta in self._cgunpacker.deltaiter():
378 for delta in self._cgunpacker.deltaiter():
379 pass
379 pass
380 self.filestart = self._cgunpacker.tell()
380 self.filestart = self._cgunpacker.tell()
381
381
382 @localrepo.unfilteredpropertycache
382 @localrepo.unfilteredpropertycache
383 def manstart(self):
383 def manstart(self):
384 self.changelog
384 self.changelog
385 return self.manstart
385 return self.manstart
386
386
387 @localrepo.unfilteredpropertycache
387 @localrepo.unfilteredpropertycache
388 def filestart(self):
388 def filestart(self):
389 self.manifestlog
389 self.manifestlog
390
390
391 # If filestart was not set by self.manifestlog, that means the
391 # If filestart was not set by self.manifestlog, that means the
392 # manifestlog implementation did not consume the manifests from the
392 # manifestlog implementation did not consume the manifests from the
393 # changegroup (ex: it might be consuming trees from a separate bundle2
393 # changegroup (ex: it might be consuming trees from a separate bundle2
394 # part instead). So we need to manually consume it.
394 # part instead). So we need to manually consume it.
395 if 'filestart' not in self.__dict__:
395 if 'filestart' not in self.__dict__:
396 self._consumemanifest()
396 self._consumemanifest()
397
397
398 return self.filestart
398 return self.filestart
399
399
400 def url(self):
400 def url(self):
401 return self._url
401 return self._url
402
402
403 def file(self, f):
403 def file(self, f):
404 if not self._cgfilespos:
404 if not self._cgfilespos:
405 self._cgunpacker.seek(self.filestart)
405 self._cgunpacker.seek(self.filestart)
406 self._cgfilespos = _getfilestarts(self._cgunpacker)
406 self._cgfilespos = _getfilestarts(self._cgunpacker)
407
407
408 if f in self._cgfilespos:
408 if f in self._cgfilespos:
409 self._cgunpacker.seek(self._cgfilespos[f])
409 self._cgunpacker.seek(self._cgfilespos[f])
410 linkmapper = self.unfiltered().changelog.rev
410 linkmapper = self.unfiltered().changelog.rev
411 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
411 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
412 else:
412 else:
413 return super(bundlerepository, self).file(f)
413 return super(bundlerepository, self).file(f)
414
414
415 def close(self):
415 def close(self):
416 """Close assigned bundle file immediately."""
416 """Close assigned bundle file immediately."""
417 self._bundlefile.close()
417 self._bundlefile.close()
418 if self.tempfile is not None:
418 if self.tempfile is not None:
419 self.vfs.unlink(self.tempfile)
419 self.vfs.unlink(self.tempfile)
420 if self._tempparent:
420 if self._tempparent:
421 shutil.rmtree(self._tempparent, True)
421 shutil.rmtree(self._tempparent, True)
422
422
423 def cancopy(self):
423 def cancopy(self):
424 return False
424 return False
425
425
426 def peer(self):
426 def peer(self):
427 return bundlepeer(self)
427 return bundlepeer(self)
428
428
429 def getcwd(self):
429 def getcwd(self):
430 return encoding.getcwd() # always outside the repo
430 return encoding.getcwd() # always outside the repo
431
431
432 # Check if parents exist in localrepo before setting
432 # Check if parents exist in localrepo before setting
433 def setparents(self, p1, p2=nullid):
433 def setparents(self, p1, p2=nullid):
434 p1rev = self.changelog.rev(p1)
434 p1rev = self.changelog.rev(p1)
435 p2rev = self.changelog.rev(p2)
435 p2rev = self.changelog.rev(p2)
436 msg = _(b"setting parent to node %s that only exists in the bundle\n")
436 msg = _(b"setting parent to node %s that only exists in the bundle\n")
437 if self.changelog.repotiprev < p1rev:
437 if self.changelog.repotiprev < p1rev:
438 self.ui.warn(msg % nodemod.hex(p1))
438 self.ui.warn(msg % nodemod.hex(p1))
439 if self.changelog.repotiprev < p2rev:
439 if self.changelog.repotiprev < p2rev:
440 self.ui.warn(msg % nodemod.hex(p2))
440 self.ui.warn(msg % nodemod.hex(p2))
441 return super(bundlerepository, self).setparents(p1, p2)
441 return super(bundlerepository, self).setparents(p1, p2)
442
442
443
443
444 def instance(ui, path, create, intents=None, createopts=None):
444 def instance(ui, path, create, intents=None, createopts=None):
445 if create:
445 if create:
446 raise error.Abort(_(b'cannot create new bundle repository'))
446 raise error.Abort(_(b'cannot create new bundle repository'))
447 # internal config: bundle.mainreporoot
447 # internal config: bundle.mainreporoot
448 parentpath = ui.config(b"bundle", b"mainreporoot")
448 parentpath = ui.config(b"bundle", b"mainreporoot")
449 if not parentpath:
449 if not parentpath:
450 # try to find the correct path to the working directory repo
450 # try to find the correct path to the working directory repo
451 parentpath = cmdutil.findrepo(encoding.getcwd())
451 parentpath = cmdutil.findrepo(encoding.getcwd())
452 if parentpath is None:
452 if parentpath is None:
453 parentpath = b''
453 parentpath = b''
454 if parentpath:
454 if parentpath:
455 # Try to make the full path relative so we get a nice, short URL.
455 # Try to make the full path relative so we get a nice, short URL.
456 # In particular, we don't want temp dir names in test outputs.
456 # In particular, we don't want temp dir names in test outputs.
457 cwd = encoding.getcwd()
457 cwd = encoding.getcwd()
458 if parentpath == cwd:
458 if parentpath == cwd:
459 parentpath = b''
459 parentpath = b''
460 else:
460 else:
461 cwd = pathutil.normasprefix(cwd)
461 cwd = pathutil.normasprefix(cwd)
462 if parentpath.startswith(cwd):
462 if parentpath.startswith(cwd):
463 parentpath = parentpath[len(cwd) :]
463 parentpath = parentpath[len(cwd) :]
464 u = util.url(path)
464 u = util.url(path)
465 path = u.localpath()
465 path = u.localpath()
466 if u.scheme == b'bundle':
466 if u.scheme == b'bundle':
467 s = path.split(b"+", 1)
467 s = path.split(b"+", 1)
468 if len(s) == 1:
468 if len(s) == 1:
469 repopath, bundlename = parentpath, s[0]
469 repopath, bundlename = parentpath, s[0]
470 else:
470 else:
471 repopath, bundlename = s
471 repopath, bundlename = s
472 else:
472 else:
473 repopath, bundlename = parentpath, path
473 repopath, bundlename = parentpath, path
474
474
475 return makebundlerepository(ui, repopath, bundlename)
475 return makebundlerepository(ui, repopath, bundlename)
476
476
477
477
478 def makebundlerepository(ui, repopath, bundlepath):
478 def makebundlerepository(ui, repopath, bundlepath):
479 """Make a bundle repository object based on repo and bundle paths."""
479 """Make a bundle repository object based on repo and bundle paths."""
480 if repopath:
480 if repopath:
481 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
481 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
482 else:
482 else:
483 url = b'bundle:%s' % bundlepath
483 url = b'bundle:%s' % bundlepath
484
484
485 # Because we can't make any guarantees about the type of the base
485 # Because we can't make any guarantees about the type of the base
486 # repository, we can't have a static class representing the bundle
486 # repository, we can't have a static class representing the bundle
487 # repository. We also can't make any guarantees about how to even
487 # repository. We also can't make any guarantees about how to even
488 # call the base repository's constructor!
488 # call the base repository's constructor!
489 #
489 #
490 # So, our strategy is to go through ``localrepo.instance()`` to construct
490 # So, our strategy is to go through ``localrepo.instance()`` to construct
491 # a repo instance. Then, we dynamically create a new type derived from
491 # a repo instance. Then, we dynamically create a new type derived from
492 # both it and our ``bundlerepository`` class which overrides some
492 # both it and our ``bundlerepository`` class which overrides some
493 # functionality. We then change the type of the constructed repository
493 # functionality. We then change the type of the constructed repository
494 # to this new type and initialize the bundle-specific bits of it.
494 # to this new type and initialize the bundle-specific bits of it.
495
495
496 try:
496 try:
497 repo = localrepo.instance(ui, repopath, create=False)
497 repo = localrepo.instance(ui, repopath, create=False)
498 tempparent = None
498 tempparent = None
499 except error.RepoError:
499 except error.RepoError:
500 tempparent = pycompat.mkdtemp()
500 tempparent = pycompat.mkdtemp()
501 try:
501 try:
502 repo = localrepo.instance(ui, tempparent, create=True)
502 repo = localrepo.instance(ui, tempparent, create=True)
503 except Exception:
503 except Exception:
504 shutil.rmtree(tempparent)
504 shutil.rmtree(tempparent)
505 raise
505 raise
506
506
507 class derivedbundlerepository(bundlerepository, repo.__class__):
507 class derivedbundlerepository(bundlerepository, repo.__class__):
508 pass
508 pass
509
509
510 repo.__class__ = derivedbundlerepository
510 repo.__class__ = derivedbundlerepository
511 bundlerepository.__init__(repo, bundlepath, url, tempparent)
511 bundlerepository.__init__(repo, bundlepath, url, tempparent)
512
512
513 return repo
513 return repo
514
514
515
515
516 class bundletransactionmanager(object):
516 class bundletransactionmanager(object):
517 def transaction(self):
517 def transaction(self):
518 return None
518 return None
519
519
520 def close(self):
520 def close(self):
521 raise NotImplementedError
521 raise NotImplementedError
522
522
523 def release(self):
523 def release(self):
524 raise NotImplementedError
524 raise NotImplementedError
525
525
526
526
527 def getremotechanges(
527 def getremotechanges(
528 ui, repo, peer, onlyheads=None, bundlename=None, force=False
528 ui, repo, peer, onlyheads=None, bundlename=None, force=False
529 ):
529 ):
530 '''obtains a bundle of changes incoming from peer
530 '''obtains a bundle of changes incoming from peer
531
531
532 "onlyheads" restricts the returned changes to those reachable from the
532 "onlyheads" restricts the returned changes to those reachable from the
533 specified heads.
533 specified heads.
534 "bundlename", if given, stores the bundle to this file path permanently;
534 "bundlename", if given, stores the bundle to this file path permanently;
535 otherwise it's stored to a temp file and gets deleted again when you call
535 otherwise it's stored to a temp file and gets deleted again when you call
536 the returned "cleanupfn".
536 the returned "cleanupfn".
537 "force" indicates whether to proceed on unrelated repos.
537 "force" indicates whether to proceed on unrelated repos.
538
538
539 Returns a tuple (local, csets, cleanupfn):
539 Returns a tuple (local, csets, cleanupfn):
540
540
541 "local" is a local repo from which to obtain the actual incoming
541 "local" is a local repo from which to obtain the actual incoming
542 changesets; it is a bundlerepo for the obtained bundle when the
542 changesets; it is a bundlerepo for the obtained bundle when the
543 original "peer" is remote.
543 original "peer" is remote.
544 "csets" lists the incoming changeset node ids.
544 "csets" lists the incoming changeset node ids.
545 "cleanupfn" must be called without arguments when you're done processing
545 "cleanupfn" must be called without arguments when you're done processing
546 the changes; it closes both the original "peer" and the one returned
546 the changes; it closes both the original "peer" and the one returned
547 here.
547 here.
548 '''
548 '''
549 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
549 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
550 common, incoming, rheads = tmp
550 common, incoming, rheads = tmp
551 if not incoming:
551 if not incoming:
552 try:
552 try:
553 if bundlename:
553 if bundlename:
554 os.unlink(bundlename)
554 os.unlink(bundlename)
555 except OSError:
555 except OSError:
556 pass
556 pass
557 return repo, [], peer.close
557 return repo, [], peer.close
558
558
559 commonset = set(common)
559 commonset = set(common)
560 rheads = [x for x in rheads if x not in commonset]
560 rheads = [x for x in rheads if x not in commonset]
561
561
562 bundle = None
562 bundle = None
563 bundlerepo = None
563 bundlerepo = None
564 localrepo = peer.local()
564 localrepo = peer.local()
565 if bundlename or not localrepo:
565 if bundlename or not localrepo:
566 # create a bundle (uncompressed if peer repo is not local)
566 # create a bundle (uncompressed if peer repo is not local)
567
567
568 # developer config: devel.legacy.exchange
568 # developer config: devel.legacy.exchange
569 legexc = ui.configlist(b'devel', b'legacy.exchange')
569 legexc = ui.configlist(b'devel', b'legacy.exchange')
570 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
570 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
571 canbundle2 = (
571 canbundle2 = (
572 not forcebundle1
572 not forcebundle1
573 and peer.capable(b'getbundle')
573 and peer.capable(b'getbundle')
574 and peer.capable(b'bundle2')
574 and peer.capable(b'bundle2')
575 )
575 )
576 if canbundle2:
576 if canbundle2:
577 with peer.commandexecutor() as e:
577 with peer.commandexecutor() as e:
578 b2 = e.callcommand(
578 b2 = e.callcommand(
579 b'getbundle',
579 b'getbundle',
580 {
580 {
581 b'source': b'incoming',
581 b'source': b'incoming',
582 b'common': common,
582 b'common': common,
583 b'heads': rheads,
583 b'heads': rheads,
584 b'bundlecaps': exchange.caps20to10(
584 b'bundlecaps': exchange.caps20to10(
585 repo, role=b'client'
585 repo, role=b'client'
586 ),
586 ),
587 b'cg': True,
587 b'cg': True,
588 },
588 },
589 ).result()
589 ).result()
590
590
591 fname = bundle = changegroup.writechunks(
591 fname = bundle = changegroup.writechunks(
592 ui, b2._forwardchunks(), bundlename
592 ui, b2._forwardchunks(), bundlename
593 )
593 )
594 else:
594 else:
595 if peer.capable(b'getbundle'):
595 if peer.capable(b'getbundle'):
596 with peer.commandexecutor() as e:
596 with peer.commandexecutor() as e:
597 cg = e.callcommand(
597 cg = e.callcommand(
598 b'getbundle',
598 b'getbundle',
599 {
599 {
600 b'source': b'incoming',
600 b'source': b'incoming',
601 b'common': common,
601 b'common': common,
602 b'heads': rheads,
602 b'heads': rheads,
603 },
603 },
604 ).result()
604 ).result()
605 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
605 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
606 # compat with older servers when pulling all remote heads
606 # compat with older servers when pulling all remote heads
607
607
608 with peer.commandexecutor() as e:
608 with peer.commandexecutor() as e:
609 cg = e.callcommand(
609 cg = e.callcommand(
610 b'changegroup',
610 b'changegroup',
611 {b'nodes': incoming, b'source': b'incoming',},
611 {b'nodes': incoming, b'source': b'incoming',},
612 ).result()
612 ).result()
613
613
614 rheads = None
614 rheads = None
615 else:
615 else:
616 with peer.commandexecutor() as e:
616 with peer.commandexecutor() as e:
617 cg = e.callcommand(
617 cg = e.callcommand(
618 b'changegroupsubset',
618 b'changegroupsubset',
619 {
619 {
620 b'bases': incoming,
620 b'bases': incoming,
621 b'heads': rheads,
621 b'heads': rheads,
622 b'source': b'incoming',
622 b'source': b'incoming',
623 },
623 },
624 ).result()
624 ).result()
625
625
626 if localrepo:
626 if localrepo:
627 bundletype = b"HG10BZ"
627 bundletype = b"HG10BZ"
628 else:
628 else:
629 bundletype = b"HG10UN"
629 bundletype = b"HG10UN"
630 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
630 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
631 # keep written bundle?
631 # keep written bundle?
632 if bundlename:
632 if bundlename:
633 bundle = None
633 bundle = None
634 if not localrepo:
634 if not localrepo:
635 # use the created uncompressed bundlerepo
635 # use the created uncompressed bundlerepo
636 localrepo = bundlerepo = makebundlerepository(
636 localrepo = bundlerepo = makebundlerepository(
637 repo.baseui, repo.root, fname
637 repo.baseui, repo.root, fname
638 )
638 )
639
639
640 # this repo contains local and peer now, so filter out local again
640 # this repo contains local and peer now, so filter out local again
641 common = repo.heads()
641 common = repo.heads()
642 if localrepo:
642 if localrepo:
643 # Part of common may be remotely filtered
643 # Part of common may be remotely filtered
644 # So use an unfiltered version
644 # So use an unfiltered version
645 # The discovery process probably need cleanup to avoid that
645 # The discovery process probably need cleanup to avoid that
646 localrepo = localrepo.unfiltered()
646 localrepo = localrepo.unfiltered()
647
647
648 csets = localrepo.changelog.findmissing(common, rheads)
648 csets = localrepo.changelog.findmissing(common, rheads)
649
649
650 if bundlerepo:
650 if bundlerepo:
651 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
651 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
652
652
653 with peer.commandexecutor() as e:
653 with peer.commandexecutor() as e:
654 remotephases = e.callcommand(
654 remotephases = e.callcommand(
655 b'listkeys', {b'namespace': b'phases',}
655 b'listkeys', {b'namespace': b'phases',}
656 ).result()
656 ).result()
657
657
658 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
658 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
659 pullop.trmanager = bundletransactionmanager()
659 pullop.trmanager = bundletransactionmanager()
660 exchange._pullapplyphases(pullop, remotephases)
660 exchange._pullapplyphases(pullop, remotephases)
661
661
662 def cleanup():
662 def cleanup():
663 if bundlerepo:
663 if bundlerepo:
664 bundlerepo.close()
664 bundlerepo.close()
665 if bundle:
665 if bundle:
666 os.unlink(bundle)
666 os.unlink(bundle)
667 peer.close()
667 peer.close()
668
668
669 return (localrepo, csets, cleanup)
669 return (localrepo, csets, cleanup)
General Comments 0
You need to be logged in to leave comments. Login now