##// END OF EJS Templates
bundlerepo: factor out code for instantiating a bundle repository...
Gregory Szorc -
r39639:a8d2faec default
parent child Browse files
Show More
@@ -1,622 +1,629
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import nullid
20 from .node import nullid
21
21
22 from . import (
22 from . import (
23 bundle2,
23 bundle2,
24 changegroup,
24 changegroup,
25 changelog,
25 changelog,
26 cmdutil,
26 cmdutil,
27 discovery,
27 discovery,
28 error,
28 error,
29 exchange,
29 exchange,
30 filelog,
30 filelog,
31 localrepo,
31 localrepo,
32 manifest,
32 manifest,
33 mdiff,
33 mdiff,
34 node as nodemod,
34 node as nodemod,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 revlog,
38 revlog,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 class bundlerevlog(revlog.revlog):
43 class bundlerevlog(revlog.revlog):
44 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
44 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
45 # How it works:
45 # How it works:
46 # To retrieve a revision, we need to know the offset of the revision in
46 # To retrieve a revision, we need to know the offset of the revision in
47 # the bundle (an unbundle object). We store this offset in the index
47 # the bundle (an unbundle object). We store this offset in the index
48 # (start). The base of the delta is stored in the base field.
48 # (start). The base of the delta is stored in the base field.
49 #
49 #
50 # To differentiate a rev in the bundle from a rev in the revlog, we
50 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # check revision against repotiprev.
51 # check revision against repotiprev.
52 opener = vfsmod.readonlyvfs(opener)
52 opener = vfsmod.readonlyvfs(opener)
53 revlog.revlog.__init__(self, opener, indexfile)
53 revlog.revlog.__init__(self, opener, indexfile)
54 self.bundle = cgunpacker
54 self.bundle = cgunpacker
55 n = len(self)
55 n = len(self)
56 self.repotiprev = n - 1
56 self.repotiprev = n - 1
57 self.bundlerevs = set() # used by 'bundle()' revset expression
57 self.bundlerevs = set() # used by 'bundle()' revset expression
58 for deltadata in cgunpacker.deltaiter():
58 for deltadata in cgunpacker.deltaiter():
59 node, p1, p2, cs, deltabase, delta, flags = deltadata
59 node, p1, p2, cs, deltabase, delta, flags = deltadata
60
60
61 size = len(delta)
61 size = len(delta)
62 start = cgunpacker.tell() - size
62 start = cgunpacker.tell() - size
63
63
64 link = linkmapper(cs)
64 link = linkmapper(cs)
65 if node in self.nodemap:
65 if node in self.nodemap:
66 # this can happen if two branches make the same change
66 # this can happen if two branches make the same change
67 self.bundlerevs.add(self.nodemap[node])
67 self.bundlerevs.add(self.nodemap[node])
68 continue
68 continue
69
69
70 for p in (p1, p2):
70 for p in (p1, p2):
71 if p not in self.nodemap:
71 if p not in self.nodemap:
72 raise error.LookupError(p, self.indexfile,
72 raise error.LookupError(p, self.indexfile,
73 _("unknown parent"))
73 _("unknown parent"))
74
74
75 if deltabase not in self.nodemap:
75 if deltabase not in self.nodemap:
76 raise LookupError(deltabase, self.indexfile,
76 raise LookupError(deltabase, self.indexfile,
77 _('unknown delta base'))
77 _('unknown delta base'))
78
78
79 baserev = self.rev(deltabase)
79 baserev = self.rev(deltabase)
80 # start, size, full unc. size, base (unused), link, p1, p2, node
80 # start, size, full unc. size, base (unused), link, p1, p2, node
81 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
81 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
82 self.rev(p1), self.rev(p2), node)
82 self.rev(p1), self.rev(p2), node)
83 self.index.append(e)
83 self.index.append(e)
84 self.nodemap[node] = n
84 self.nodemap[node] = n
85 self.bundlerevs.add(n)
85 self.bundlerevs.add(n)
86 n += 1
86 n += 1
87
87
88 def _chunk(self, rev, df=None):
88 def _chunk(self, rev, df=None):
89 # Warning: in case of bundle, the diff is against what we stored as
89 # Warning: in case of bundle, the diff is against what we stored as
90 # delta base, not against rev - 1
90 # delta base, not against rev - 1
91 # XXX: could use some caching
91 # XXX: could use some caching
92 if rev <= self.repotiprev:
92 if rev <= self.repotiprev:
93 return revlog.revlog._chunk(self, rev)
93 return revlog.revlog._chunk(self, rev)
94 self.bundle.seek(self.start(rev))
94 self.bundle.seek(self.start(rev))
95 return self.bundle.read(self.length(rev))
95 return self.bundle.read(self.length(rev))
96
96
97 def revdiff(self, rev1, rev2):
97 def revdiff(self, rev1, rev2):
98 """return or calculate a delta between two revisions"""
98 """return or calculate a delta between two revisions"""
99 if rev1 > self.repotiprev and rev2 > self.repotiprev:
99 if rev1 > self.repotiprev and rev2 > self.repotiprev:
100 # hot path for bundle
100 # hot path for bundle
101 revb = self.index[rev2][3]
101 revb = self.index[rev2][3]
102 if revb == rev1:
102 if revb == rev1:
103 return self._chunk(rev2)
103 return self._chunk(rev2)
104 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
104 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
105 return revlog.revlog.revdiff(self, rev1, rev2)
105 return revlog.revlog.revdiff(self, rev1, rev2)
106
106
107 return mdiff.textdiff(self.revision(rev1, raw=True),
107 return mdiff.textdiff(self.revision(rev1, raw=True),
108 self.revision(rev2, raw=True))
108 self.revision(rev2, raw=True))
109
109
110 def revision(self, nodeorrev, _df=None, raw=False):
110 def revision(self, nodeorrev, _df=None, raw=False):
111 """return an uncompressed revision of a given node or revision
111 """return an uncompressed revision of a given node or revision
112 number.
112 number.
113 """
113 """
114 if isinstance(nodeorrev, int):
114 if isinstance(nodeorrev, int):
115 rev = nodeorrev
115 rev = nodeorrev
116 node = self.node(rev)
116 node = self.node(rev)
117 else:
117 else:
118 node = nodeorrev
118 node = nodeorrev
119 rev = self.rev(node)
119 rev = self.rev(node)
120
120
121 if node == nullid:
121 if node == nullid:
122 return ""
122 return ""
123
123
124 rawtext = None
124 rawtext = None
125 chain = []
125 chain = []
126 iterrev = rev
126 iterrev = rev
127 # reconstruct the revision if it is from a changegroup
127 # reconstruct the revision if it is from a changegroup
128 while iterrev > self.repotiprev:
128 while iterrev > self.repotiprev:
129 if self._cache and self._cache[1] == iterrev:
129 if self._cache and self._cache[1] == iterrev:
130 rawtext = self._cache[2]
130 rawtext = self._cache[2]
131 break
131 break
132 chain.append(iterrev)
132 chain.append(iterrev)
133 iterrev = self.index[iterrev][3]
133 iterrev = self.index[iterrev][3]
134 if rawtext is None:
134 if rawtext is None:
135 rawtext = self.baserevision(iterrev)
135 rawtext = self.baserevision(iterrev)
136
136
137 while chain:
137 while chain:
138 delta = self._chunk(chain.pop())
138 delta = self._chunk(chain.pop())
139 rawtext = mdiff.patches(rawtext, [delta])
139 rawtext = mdiff.patches(rawtext, [delta])
140
140
141 text, validatehash = self._processflags(rawtext, self.flags(rev),
141 text, validatehash = self._processflags(rawtext, self.flags(rev),
142 'read', raw=raw)
142 'read', raw=raw)
143 if validatehash:
143 if validatehash:
144 self.checkhash(text, node, rev=rev)
144 self.checkhash(text, node, rev=rev)
145 self._cache = (node, rev, rawtext)
145 self._cache = (node, rev, rawtext)
146 return text
146 return text
147
147
148 def baserevision(self, nodeorrev):
148 def baserevision(self, nodeorrev):
149 # Revlog subclasses may override 'revision' method to modify format of
149 # Revlog subclasses may override 'revision' method to modify format of
150 # content retrieved from revlog. To use bundlerevlog with such class one
150 # content retrieved from revlog. To use bundlerevlog with such class one
151 # needs to override 'baserevision' and make more specific call here.
151 # needs to override 'baserevision' and make more specific call here.
152 return revlog.revlog.revision(self, nodeorrev, raw=True)
152 return revlog.revlog.revision(self, nodeorrev, raw=True)
153
153
154 def addrevision(self, *args, **kwargs):
154 def addrevision(self, *args, **kwargs):
155 raise NotImplementedError
155 raise NotImplementedError
156
156
157 def addgroup(self, *args, **kwargs):
157 def addgroup(self, *args, **kwargs):
158 raise NotImplementedError
158 raise NotImplementedError
159
159
160 def strip(self, *args, **kwargs):
160 def strip(self, *args, **kwargs):
161 raise NotImplementedError
161 raise NotImplementedError
162
162
163 def checksize(self):
163 def checksize(self):
164 raise NotImplementedError
164 raise NotImplementedError
165
165
166 class bundlechangelog(bundlerevlog, changelog.changelog):
166 class bundlechangelog(bundlerevlog, changelog.changelog):
167 def __init__(self, opener, cgunpacker):
167 def __init__(self, opener, cgunpacker):
168 changelog.changelog.__init__(self, opener)
168 changelog.changelog.__init__(self, opener)
169 linkmapper = lambda x: x
169 linkmapper = lambda x: x
170 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
170 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
171 linkmapper)
171 linkmapper)
172
172
173 def baserevision(self, nodeorrev):
173 def baserevision(self, nodeorrev):
174 # Although changelog doesn't override 'revision' method, some extensions
174 # Although changelog doesn't override 'revision' method, some extensions
175 # may replace this class with another that does. Same story with
175 # may replace this class with another that does. Same story with
176 # manifest and filelog classes.
176 # manifest and filelog classes.
177
177
178 # This bypasses filtering on changelog.node() and rev() because we need
178 # This bypasses filtering on changelog.node() and rev() because we need
179 # revision text of the bundle base even if it is hidden.
179 # revision text of the bundle base even if it is hidden.
180 oldfilter = self.filteredrevs
180 oldfilter = self.filteredrevs
181 try:
181 try:
182 self.filteredrevs = ()
182 self.filteredrevs = ()
183 return changelog.changelog.revision(self, nodeorrev, raw=True)
183 return changelog.changelog.revision(self, nodeorrev, raw=True)
184 finally:
184 finally:
185 self.filteredrevs = oldfilter
185 self.filteredrevs = oldfilter
186
186
187 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
187 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
188 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
188 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
189 dir=''):
189 dir=''):
190 manifest.manifestrevlog.__init__(self, opener, tree=dir)
190 manifest.manifestrevlog.__init__(self, opener, tree=dir)
191 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
191 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
192 linkmapper)
192 linkmapper)
193 if dirlogstarts is None:
193 if dirlogstarts is None:
194 dirlogstarts = {}
194 dirlogstarts = {}
195 if self.bundle.version == "03":
195 if self.bundle.version == "03":
196 dirlogstarts = _getfilestarts(self.bundle)
196 dirlogstarts = _getfilestarts(self.bundle)
197 self._dirlogstarts = dirlogstarts
197 self._dirlogstarts = dirlogstarts
198 self._linkmapper = linkmapper
198 self._linkmapper = linkmapper
199
199
200 def baserevision(self, nodeorrev):
200 def baserevision(self, nodeorrev):
201 node = nodeorrev
201 node = nodeorrev
202 if isinstance(node, int):
202 if isinstance(node, int):
203 node = self.node(node)
203 node = self.node(node)
204
204
205 if node in self.fulltextcache:
205 if node in self.fulltextcache:
206 result = '%s' % self.fulltextcache[node]
206 result = '%s' % self.fulltextcache[node]
207 else:
207 else:
208 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
208 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
209 return result
209 return result
210
210
211 def dirlog(self, d):
211 def dirlog(self, d):
212 if d in self._dirlogstarts:
212 if d in self._dirlogstarts:
213 self.bundle.seek(self._dirlogstarts[d])
213 self.bundle.seek(self._dirlogstarts[d])
214 return bundlemanifest(
214 return bundlemanifest(
215 self.opener, self.bundle, self._linkmapper,
215 self.opener, self.bundle, self._linkmapper,
216 self._dirlogstarts, dir=d)
216 self._dirlogstarts, dir=d)
217 return super(bundlemanifest, self).dirlog(d)
217 return super(bundlemanifest, self).dirlog(d)
218
218
219 class bundlefilelog(filelog.filelog):
219 class bundlefilelog(filelog.filelog):
220 def __init__(self, opener, path, cgunpacker, linkmapper):
220 def __init__(self, opener, path, cgunpacker, linkmapper):
221 filelog.filelog.__init__(self, opener, path)
221 filelog.filelog.__init__(self, opener, path)
222 self._revlog = bundlerevlog(opener, self.indexfile,
222 self._revlog = bundlerevlog(opener, self.indexfile,
223 cgunpacker, linkmapper)
223 cgunpacker, linkmapper)
224
224
225 def baserevision(self, nodeorrev):
225 def baserevision(self, nodeorrev):
226 return filelog.filelog.revision(self, nodeorrev, raw=True)
226 return filelog.filelog.revision(self, nodeorrev, raw=True)
227
227
228 class bundlepeer(localrepo.localpeer):
228 class bundlepeer(localrepo.localpeer):
229 def canpush(self):
229 def canpush(self):
230 return False
230 return False
231
231
232 class bundlephasecache(phases.phasecache):
232 class bundlephasecache(phases.phasecache):
233 def __init__(self, *args, **kwargs):
233 def __init__(self, *args, **kwargs):
234 super(bundlephasecache, self).__init__(*args, **kwargs)
234 super(bundlephasecache, self).__init__(*args, **kwargs)
235 if util.safehasattr(self, 'opener'):
235 if util.safehasattr(self, 'opener'):
236 self.opener = vfsmod.readonlyvfs(self.opener)
236 self.opener = vfsmod.readonlyvfs(self.opener)
237
237
238 def write(self):
238 def write(self):
239 raise NotImplementedError
239 raise NotImplementedError
240
240
241 def _write(self, fp):
241 def _write(self, fp):
242 raise NotImplementedError
242 raise NotImplementedError
243
243
244 def _updateroots(self, phase, newroots, tr):
244 def _updateroots(self, phase, newroots, tr):
245 self.phaseroots[phase] = newroots
245 self.phaseroots[phase] = newroots
246 self.invalidate()
246 self.invalidate()
247 self.dirty = True
247 self.dirty = True
248
248
249 def _getfilestarts(cgunpacker):
249 def _getfilestarts(cgunpacker):
250 filespos = {}
250 filespos = {}
251 for chunkdata in iter(cgunpacker.filelogheader, {}):
251 for chunkdata in iter(cgunpacker.filelogheader, {}):
252 fname = chunkdata['filename']
252 fname = chunkdata['filename']
253 filespos[fname] = cgunpacker.tell()
253 filespos[fname] = cgunpacker.tell()
254 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
254 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
255 pass
255 pass
256 return filespos
256 return filespos
257
257
258 class bundlerepository(localrepo.localrepository):
258 class bundlerepository(localrepo.localrepository):
259 """A repository instance that is a union of a local repo and a bundle.
259 """A repository instance that is a union of a local repo and a bundle.
260
260
261 Instances represent a read-only repository composed of a local repository
261 Instances represent a read-only repository composed of a local repository
262 with the contents of a bundle file applied. The repository instance is
262 with the contents of a bundle file applied. The repository instance is
263 conceptually similar to the state of a repository after an
263 conceptually similar to the state of a repository after an
264 ``hg unbundle`` operation. However, the contents of the bundle are never
264 ``hg unbundle`` operation. However, the contents of the bundle are never
265 applied to the actual base repository.
265 applied to the actual base repository.
266 """
266 """
267 def __init__(self, ui, repopath, bundlepath):
267 def __init__(self, ui, repopath, bundlepath):
268 self._tempparent = None
268 self._tempparent = None
269 try:
269 try:
270 localrepo.localrepository.__init__(self, ui, repopath)
270 localrepo.localrepository.__init__(self, ui, repopath)
271 except error.RepoError:
271 except error.RepoError:
272 self._tempparent = pycompat.mkdtemp()
272 self._tempparent = pycompat.mkdtemp()
273 localrepo.instance(ui, self._tempparent, create=True)
273 localrepo.instance(ui, self._tempparent, create=True)
274 localrepo.localrepository.__init__(self, ui, self._tempparent)
274 localrepo.localrepository.__init__(self, ui, self._tempparent)
275 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
275 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
276
276
277 if repopath:
277 if repopath:
278 self._url = 'bundle:' + util.expandpath(repopath) + '+' + bundlepath
278 self._url = 'bundle:' + util.expandpath(repopath) + '+' + bundlepath
279 else:
279 else:
280 self._url = 'bundle:' + bundlepath
280 self._url = 'bundle:' + bundlepath
281
281
282 self.tempfile = None
282 self.tempfile = None
283 f = util.posixfile(bundlepath, "rb")
283 f = util.posixfile(bundlepath, "rb")
284 bundle = exchange.readbundle(ui, f, bundlepath)
284 bundle = exchange.readbundle(ui, f, bundlepath)
285
285
286 if isinstance(bundle, bundle2.unbundle20):
286 if isinstance(bundle, bundle2.unbundle20):
287 self._bundlefile = bundle
287 self._bundlefile = bundle
288 self._cgunpacker = None
288 self._cgunpacker = None
289
289
290 cgpart = None
290 cgpart = None
291 for part in bundle.iterparts(seekable=True):
291 for part in bundle.iterparts(seekable=True):
292 if part.type == 'changegroup':
292 if part.type == 'changegroup':
293 if cgpart:
293 if cgpart:
294 raise NotImplementedError("can't process "
294 raise NotImplementedError("can't process "
295 "multiple changegroups")
295 "multiple changegroups")
296 cgpart = part
296 cgpart = part
297
297
298 self._handlebundle2part(bundle, part)
298 self._handlebundle2part(bundle, part)
299
299
300 if not cgpart:
300 if not cgpart:
301 raise error.Abort(_("No changegroups found"))
301 raise error.Abort(_("No changegroups found"))
302
302
303 # This is required to placate a later consumer, which expects
303 # This is required to placate a later consumer, which expects
304 # the payload offset to be at the beginning of the changegroup.
304 # the payload offset to be at the beginning of the changegroup.
305 # We need to do this after the iterparts() generator advances
305 # We need to do this after the iterparts() generator advances
306 # because iterparts() will seek to end of payload after the
306 # because iterparts() will seek to end of payload after the
307 # generator returns control to iterparts().
307 # generator returns control to iterparts().
308 cgpart.seek(0, os.SEEK_SET)
308 cgpart.seek(0, os.SEEK_SET)
309
309
310 elif isinstance(bundle, changegroup.cg1unpacker):
310 elif isinstance(bundle, changegroup.cg1unpacker):
311 if bundle.compressed():
311 if bundle.compressed():
312 f = self._writetempbundle(bundle.read, '.hg10un',
312 f = self._writetempbundle(bundle.read, '.hg10un',
313 header='HG10UN')
313 header='HG10UN')
314 bundle = exchange.readbundle(ui, f, bundlepath, self.vfs)
314 bundle = exchange.readbundle(ui, f, bundlepath, self.vfs)
315
315
316 self._bundlefile = bundle
316 self._bundlefile = bundle
317 self._cgunpacker = bundle
317 self._cgunpacker = bundle
318 else:
318 else:
319 raise error.Abort(_('bundle type %s cannot be read') %
319 raise error.Abort(_('bundle type %s cannot be read') %
320 type(bundle))
320 type(bundle))
321
321
322 # dict with the mapping 'filename' -> position in the changegroup.
322 # dict with the mapping 'filename' -> position in the changegroup.
323 self._cgfilespos = {}
323 self._cgfilespos = {}
324
324
325 self.firstnewrev = self.changelog.repotiprev + 1
325 self.firstnewrev = self.changelog.repotiprev + 1
326 phases.retractboundary(self, None, phases.draft,
326 phases.retractboundary(self, None, phases.draft,
327 [ctx.node() for ctx in self[self.firstnewrev:]])
327 [ctx.node() for ctx in self[self.firstnewrev:]])
328
328
329 def _handlebundle2part(self, bundle, part):
329 def _handlebundle2part(self, bundle, part):
330 if part.type != 'changegroup':
330 if part.type != 'changegroup':
331 return
331 return
332
332
333 cgstream = part
333 cgstream = part
334 version = part.params.get('version', '01')
334 version = part.params.get('version', '01')
335 legalcgvers = changegroup.supportedincomingversions(self)
335 legalcgvers = changegroup.supportedincomingversions(self)
336 if version not in legalcgvers:
336 if version not in legalcgvers:
337 msg = _('Unsupported changegroup version: %s')
337 msg = _('Unsupported changegroup version: %s')
338 raise error.Abort(msg % version)
338 raise error.Abort(msg % version)
339 if bundle.compressed():
339 if bundle.compressed():
340 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
340 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
341
341
342 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
342 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
343
343
344 def _writetempbundle(self, readfn, suffix, header=''):
344 def _writetempbundle(self, readfn, suffix, header=''):
345 """Write a temporary file to disk
345 """Write a temporary file to disk
346 """
346 """
347 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
347 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
348 suffix=suffix)
348 suffix=suffix)
349 self.tempfile = temp
349 self.tempfile = temp
350
350
351 with os.fdopen(fdtemp, r'wb') as fptemp:
351 with os.fdopen(fdtemp, r'wb') as fptemp:
352 fptemp.write(header)
352 fptemp.write(header)
353 while True:
353 while True:
354 chunk = readfn(2**18)
354 chunk = readfn(2**18)
355 if not chunk:
355 if not chunk:
356 break
356 break
357 fptemp.write(chunk)
357 fptemp.write(chunk)
358
358
359 return self.vfs.open(self.tempfile, mode="rb")
359 return self.vfs.open(self.tempfile, mode="rb")
360
360
361 @localrepo.unfilteredpropertycache
361 @localrepo.unfilteredpropertycache
362 def _phasecache(self):
362 def _phasecache(self):
363 return bundlephasecache(self, self._phasedefaults)
363 return bundlephasecache(self, self._phasedefaults)
364
364
365 @localrepo.unfilteredpropertycache
365 @localrepo.unfilteredpropertycache
366 def changelog(self):
366 def changelog(self):
367 # consume the header if it exists
367 # consume the header if it exists
368 self._cgunpacker.changelogheader()
368 self._cgunpacker.changelogheader()
369 c = bundlechangelog(self.svfs, self._cgunpacker)
369 c = bundlechangelog(self.svfs, self._cgunpacker)
370 self.manstart = self._cgunpacker.tell()
370 self.manstart = self._cgunpacker.tell()
371 return c
371 return c
372
372
373 def _constructmanifest(self):
373 def _constructmanifest(self):
374 self._cgunpacker.seek(self.manstart)
374 self._cgunpacker.seek(self.manstart)
375 # consume the header if it exists
375 # consume the header if it exists
376 self._cgunpacker.manifestheader()
376 self._cgunpacker.manifestheader()
377 linkmapper = self.unfiltered().changelog.rev
377 linkmapper = self.unfiltered().changelog.rev
378 m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
378 m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
379 self.filestart = self._cgunpacker.tell()
379 self.filestart = self._cgunpacker.tell()
380 return m
380 return m
381
381
382 def _consumemanifest(self):
382 def _consumemanifest(self):
383 """Consumes the manifest portion of the bundle, setting filestart so the
383 """Consumes the manifest portion of the bundle, setting filestart so the
384 file portion can be read."""
384 file portion can be read."""
385 self._cgunpacker.seek(self.manstart)
385 self._cgunpacker.seek(self.manstart)
386 self._cgunpacker.manifestheader()
386 self._cgunpacker.manifestheader()
387 for delta in self._cgunpacker.deltaiter():
387 for delta in self._cgunpacker.deltaiter():
388 pass
388 pass
389 self.filestart = self._cgunpacker.tell()
389 self.filestart = self._cgunpacker.tell()
390
390
391 @localrepo.unfilteredpropertycache
391 @localrepo.unfilteredpropertycache
392 def manstart(self):
392 def manstart(self):
393 self.changelog
393 self.changelog
394 return self.manstart
394 return self.manstart
395
395
396 @localrepo.unfilteredpropertycache
396 @localrepo.unfilteredpropertycache
397 def filestart(self):
397 def filestart(self):
398 self.manifestlog
398 self.manifestlog
399
399
400 # If filestart was not set by self.manifestlog, that means the
400 # If filestart was not set by self.manifestlog, that means the
401 # manifestlog implementation did not consume the manifests from the
401 # manifestlog implementation did not consume the manifests from the
402 # changegroup (ex: it might be consuming trees from a separate bundle2
402 # changegroup (ex: it might be consuming trees from a separate bundle2
403 # part instead). So we need to manually consume it.
403 # part instead). So we need to manually consume it.
404 if r'filestart' not in self.__dict__:
404 if r'filestart' not in self.__dict__:
405 self._consumemanifest()
405 self._consumemanifest()
406
406
407 return self.filestart
407 return self.filestart
408
408
409 def url(self):
409 def url(self):
410 return self._url
410 return self._url
411
411
412 def file(self, f):
412 def file(self, f):
413 if not self._cgfilespos:
413 if not self._cgfilespos:
414 self._cgunpacker.seek(self.filestart)
414 self._cgunpacker.seek(self.filestart)
415 self._cgfilespos = _getfilestarts(self._cgunpacker)
415 self._cgfilespos = _getfilestarts(self._cgunpacker)
416
416
417 if f in self._cgfilespos:
417 if f in self._cgfilespos:
418 self._cgunpacker.seek(self._cgfilespos[f])
418 self._cgunpacker.seek(self._cgfilespos[f])
419 linkmapper = self.unfiltered().changelog.rev
419 linkmapper = self.unfiltered().changelog.rev
420 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
420 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
421 else:
421 else:
422 return super(bundlerepository, self).file(f)
422 return super(bundlerepository, self).file(f)
423
423
424 def close(self):
424 def close(self):
425 """Close assigned bundle file immediately."""
425 """Close assigned bundle file immediately."""
426 self._bundlefile.close()
426 self._bundlefile.close()
427 if self.tempfile is not None:
427 if self.tempfile is not None:
428 self.vfs.unlink(self.tempfile)
428 self.vfs.unlink(self.tempfile)
429 if self._tempparent:
429 if self._tempparent:
430 shutil.rmtree(self._tempparent, True)
430 shutil.rmtree(self._tempparent, True)
431
431
432 def cancopy(self):
432 def cancopy(self):
433 return False
433 return False
434
434
435 def peer(self):
435 def peer(self):
436 return bundlepeer(self)
436 return bundlepeer(self)
437
437
438 def getcwd(self):
438 def getcwd(self):
439 return pycompat.getcwd() # always outside the repo
439 return pycompat.getcwd() # always outside the repo
440
440
441 # Check if parents exist in localrepo before setting
441 # Check if parents exist in localrepo before setting
442 def setparents(self, p1, p2=nullid):
442 def setparents(self, p1, p2=nullid):
443 p1rev = self.changelog.rev(p1)
443 p1rev = self.changelog.rev(p1)
444 p2rev = self.changelog.rev(p2)
444 p2rev = self.changelog.rev(p2)
445 msg = _("setting parent to node %s that only exists in the bundle\n")
445 msg = _("setting parent to node %s that only exists in the bundle\n")
446 if self.changelog.repotiprev < p1rev:
446 if self.changelog.repotiprev < p1rev:
447 self.ui.warn(msg % nodemod.hex(p1))
447 self.ui.warn(msg % nodemod.hex(p1))
448 if self.changelog.repotiprev < p2rev:
448 if self.changelog.repotiprev < p2rev:
449 self.ui.warn(msg % nodemod.hex(p2))
449 self.ui.warn(msg % nodemod.hex(p2))
450 return super(bundlerepository, self).setparents(p1, p2)
450 return super(bundlerepository, self).setparents(p1, p2)
451
451
452 def instance(ui, path, create, intents=None, createopts=None):
452 def instance(ui, path, create, intents=None, createopts=None):
453 if create:
453 if create:
454 raise error.Abort(_('cannot create new bundle repository'))
454 raise error.Abort(_('cannot create new bundle repository'))
455 # internal config: bundle.mainreporoot
455 # internal config: bundle.mainreporoot
456 parentpath = ui.config("bundle", "mainreporoot")
456 parentpath = ui.config("bundle", "mainreporoot")
457 if not parentpath:
457 if not parentpath:
458 # try to find the correct path to the working directory repo
458 # try to find the correct path to the working directory repo
459 parentpath = cmdutil.findrepo(pycompat.getcwd())
459 parentpath = cmdutil.findrepo(pycompat.getcwd())
460 if parentpath is None:
460 if parentpath is None:
461 parentpath = ''
461 parentpath = ''
462 if parentpath:
462 if parentpath:
463 # Try to make the full path relative so we get a nice, short URL.
463 # Try to make the full path relative so we get a nice, short URL.
464 # In particular, we don't want temp dir names in test outputs.
464 # In particular, we don't want temp dir names in test outputs.
465 cwd = pycompat.getcwd()
465 cwd = pycompat.getcwd()
466 if parentpath == cwd:
466 if parentpath == cwd:
467 parentpath = ''
467 parentpath = ''
468 else:
468 else:
469 cwd = pathutil.normasprefix(cwd)
469 cwd = pathutil.normasprefix(cwd)
470 if parentpath.startswith(cwd):
470 if parentpath.startswith(cwd):
471 parentpath = parentpath[len(cwd):]
471 parentpath = parentpath[len(cwd):]
472 u = util.url(path)
472 u = util.url(path)
473 path = u.localpath()
473 path = u.localpath()
474 if u.scheme == 'bundle':
474 if u.scheme == 'bundle':
475 s = path.split("+", 1)
475 s = path.split("+", 1)
476 if len(s) == 1:
476 if len(s) == 1:
477 repopath, bundlename = parentpath, s[0]
477 repopath, bundlename = parentpath, s[0]
478 else:
478 else:
479 repopath, bundlename = s
479 repopath, bundlename = s
480 else:
480 else:
481 repopath, bundlename = parentpath, path
481 repopath, bundlename = parentpath, path
482 return bundlerepository(ui, repopath, bundlename)
482
483 return makebundlerepository(ui, repopath, bundlename)
484
485 def makebundlerepository(ui, repopath, bundlepath):
486 """Make a bundle repository object based on repo and bundle paths."""
487 return bundlerepository(ui, repopath, bundlepath)
483
488
484 class bundletransactionmanager(object):
489 class bundletransactionmanager(object):
485 def transaction(self):
490 def transaction(self):
486 return None
491 return None
487
492
488 def close(self):
493 def close(self):
489 raise NotImplementedError
494 raise NotImplementedError
490
495
491 def release(self):
496 def release(self):
492 raise NotImplementedError
497 raise NotImplementedError
493
498
494 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
499 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
495 force=False):
500 force=False):
496 '''obtains a bundle of changes incoming from peer
501 '''obtains a bundle of changes incoming from peer
497
502
498 "onlyheads" restricts the returned changes to those reachable from the
503 "onlyheads" restricts the returned changes to those reachable from the
499 specified heads.
504 specified heads.
500 "bundlename", if given, stores the bundle to this file path permanently;
505 "bundlename", if given, stores the bundle to this file path permanently;
501 otherwise it's stored to a temp file and gets deleted again when you call
506 otherwise it's stored to a temp file and gets deleted again when you call
502 the returned "cleanupfn".
507 the returned "cleanupfn".
503 "force" indicates whether to proceed on unrelated repos.
508 "force" indicates whether to proceed on unrelated repos.
504
509
505 Returns a tuple (local, csets, cleanupfn):
510 Returns a tuple (local, csets, cleanupfn):
506
511
507 "local" is a local repo from which to obtain the actual incoming
512 "local" is a local repo from which to obtain the actual incoming
508 changesets; it is a bundlerepo for the obtained bundle when the
513 changesets; it is a bundlerepo for the obtained bundle when the
509 original "peer" is remote.
514 original "peer" is remote.
510 "csets" lists the incoming changeset node ids.
515 "csets" lists the incoming changeset node ids.
511 "cleanupfn" must be called without arguments when you're done processing
516 "cleanupfn" must be called without arguments when you're done processing
512 the changes; it closes both the original "peer" and the one returned
517 the changes; it closes both the original "peer" and the one returned
513 here.
518 here.
514 '''
519 '''
515 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
520 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
516 force=force)
521 force=force)
517 common, incoming, rheads = tmp
522 common, incoming, rheads = tmp
518 if not incoming:
523 if not incoming:
519 try:
524 try:
520 if bundlename:
525 if bundlename:
521 os.unlink(bundlename)
526 os.unlink(bundlename)
522 except OSError:
527 except OSError:
523 pass
528 pass
524 return repo, [], peer.close
529 return repo, [], peer.close
525
530
526 commonset = set(common)
531 commonset = set(common)
527 rheads = [x for x in rheads if x not in commonset]
532 rheads = [x for x in rheads if x not in commonset]
528
533
529 bundle = None
534 bundle = None
530 bundlerepo = None
535 bundlerepo = None
531 localrepo = peer.local()
536 localrepo = peer.local()
532 if bundlename or not localrepo:
537 if bundlename or not localrepo:
533 # create a bundle (uncompressed if peer repo is not local)
538 # create a bundle (uncompressed if peer repo is not local)
534
539
535 # developer config: devel.legacy.exchange
540 # developer config: devel.legacy.exchange
536 legexc = ui.configlist('devel', 'legacy.exchange')
541 legexc = ui.configlist('devel', 'legacy.exchange')
537 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
542 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
538 canbundle2 = (not forcebundle1
543 canbundle2 = (not forcebundle1
539 and peer.capable('getbundle')
544 and peer.capable('getbundle')
540 and peer.capable('bundle2'))
545 and peer.capable('bundle2'))
541 if canbundle2:
546 if canbundle2:
542 with peer.commandexecutor() as e:
547 with peer.commandexecutor() as e:
543 b2 = e.callcommand('getbundle', {
548 b2 = e.callcommand('getbundle', {
544 'source': 'incoming',
549 'source': 'incoming',
545 'common': common,
550 'common': common,
546 'heads': rheads,
551 'heads': rheads,
547 'bundlecaps': exchange.caps20to10(repo, role='client'),
552 'bundlecaps': exchange.caps20to10(repo, role='client'),
548 'cg': True,
553 'cg': True,
549 }).result()
554 }).result()
550
555
551 fname = bundle = changegroup.writechunks(ui,
556 fname = bundle = changegroup.writechunks(ui,
552 b2._forwardchunks(),
557 b2._forwardchunks(),
553 bundlename)
558 bundlename)
554 else:
559 else:
555 if peer.capable('getbundle'):
560 if peer.capable('getbundle'):
556 with peer.commandexecutor() as e:
561 with peer.commandexecutor() as e:
557 cg = e.callcommand('getbundle', {
562 cg = e.callcommand('getbundle', {
558 'source': 'incoming',
563 'source': 'incoming',
559 'common': common,
564 'common': common,
560 'heads': rheads,
565 'heads': rheads,
561 }).result()
566 }).result()
562 elif onlyheads is None and not peer.capable('changegroupsubset'):
567 elif onlyheads is None and not peer.capable('changegroupsubset'):
563 # compat with older servers when pulling all remote heads
568 # compat with older servers when pulling all remote heads
564
569
565 with peer.commandexecutor() as e:
570 with peer.commandexecutor() as e:
566 cg = e.callcommand('changegroup', {
571 cg = e.callcommand('changegroup', {
567 'nodes': incoming,
572 'nodes': incoming,
568 'source': 'incoming',
573 'source': 'incoming',
569 }).result()
574 }).result()
570
575
571 rheads = None
576 rheads = None
572 else:
577 else:
573 with peer.commandexecutor() as e:
578 with peer.commandexecutor() as e:
574 cg = e.callcommand('changegroupsubset', {
579 cg = e.callcommand('changegroupsubset', {
575 'bases': incoming,
580 'bases': incoming,
576 'heads': rheads,
581 'heads': rheads,
577 'source': 'incoming',
582 'source': 'incoming',
578 }).result()
583 }).result()
579
584
580 if localrepo:
585 if localrepo:
581 bundletype = "HG10BZ"
586 bundletype = "HG10BZ"
582 else:
587 else:
583 bundletype = "HG10UN"
588 bundletype = "HG10UN"
584 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
589 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
585 bundletype)
590 bundletype)
586 # keep written bundle?
591 # keep written bundle?
587 if bundlename:
592 if bundlename:
588 bundle = None
593 bundle = None
589 if not localrepo:
594 if not localrepo:
590 # use the created uncompressed bundlerepo
595 # use the created uncompressed bundlerepo
591 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
596 localrepo = bundlerepo = makebundlerepository(repo. baseui,
592 fname)
597 repo.root,
598 fname)
599
593 # this repo contains local and peer now, so filter out local again
600 # this repo contains local and peer now, so filter out local again
594 common = repo.heads()
601 common = repo.heads()
595 if localrepo:
602 if localrepo:
596 # Part of common may be remotely filtered
603 # Part of common may be remotely filtered
597 # So use an unfiltered version
604 # So use an unfiltered version
598 # The discovery process probably need cleanup to avoid that
605 # The discovery process probably need cleanup to avoid that
599 localrepo = localrepo.unfiltered()
606 localrepo = localrepo.unfiltered()
600
607
601 csets = localrepo.changelog.findmissing(common, rheads)
608 csets = localrepo.changelog.findmissing(common, rheads)
602
609
603 if bundlerepo:
610 if bundlerepo:
604 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
611 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
605
612
606 with peer.commandexecutor() as e:
613 with peer.commandexecutor() as e:
607 remotephases = e.callcommand('listkeys', {
614 remotephases = e.callcommand('listkeys', {
608 'namespace': 'phases',
615 'namespace': 'phases',
609 }).result()
616 }).result()
610
617
611 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
618 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
612 pullop.trmanager = bundletransactionmanager()
619 pullop.trmanager = bundletransactionmanager()
613 exchange._pullapplyphases(pullop, remotephases)
620 exchange._pullapplyphases(pullop, remotephases)
614
621
615 def cleanup():
622 def cleanup():
616 if bundlerepo:
623 if bundlerepo:
617 bundlerepo.close()
624 bundlerepo.close()
618 if bundle:
625 if bundle:
619 os.unlink(bundle)
626 os.unlink(bundle)
620 peer.close()
627 peer.close()
621
628
622 return (localrepo, csets, cleanup)
629 return (localrepo, csets, cleanup)
General Comments 0
You need to be logged in to leave comments. Login now