##// END OF EJS Templates
rawdata: update callers in bundlerepo...
marmoute -
r43015:e6d5ac17 default draft
parent child Browse files
Show More
@@ -1,668 +1,668 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import nullid
20 from .node import nullid
21
21
22 from . import (
22 from . import (
23 bundle2,
23 bundle2,
24 changegroup,
24 changegroup,
25 changelog,
25 changelog,
26 cmdutil,
26 cmdutil,
27 discovery,
27 discovery,
28 encoding,
28 encoding,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44 class bundlerevlog(revlog.revlog):
44 class bundlerevlog(revlog.revlog):
45 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
45 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
46 # How it works:
46 # How it works:
47 # To retrieve a revision, we need to know the offset of the revision in
47 # To retrieve a revision, we need to know the offset of the revision in
48 # the bundle (an unbundle object). We store this offset in the index
48 # the bundle (an unbundle object). We store this offset in the index
49 # (start). The base of the delta is stored in the base field.
49 # (start). The base of the delta is stored in the base field.
50 #
50 #
51 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # check revision against repotiprev.
52 # check revision against repotiprev.
53 opener = vfsmod.readonlyvfs(opener)
53 opener = vfsmod.readonlyvfs(opener)
54 revlog.revlog.__init__(self, opener, indexfile)
54 revlog.revlog.__init__(self, opener, indexfile)
55 self.bundle = cgunpacker
55 self.bundle = cgunpacker
56 n = len(self)
56 n = len(self)
57 self.repotiprev = n - 1
57 self.repotiprev = n - 1
58 self.bundlerevs = set() # used by 'bundle()' revset expression
58 self.bundlerevs = set() # used by 'bundle()' revset expression
59 for deltadata in cgunpacker.deltaiter():
59 for deltadata in cgunpacker.deltaiter():
60 node, p1, p2, cs, deltabase, delta, flags = deltadata
60 node, p1, p2, cs, deltabase, delta, flags = deltadata
61
61
62 size = len(delta)
62 size = len(delta)
63 start = cgunpacker.tell() - size
63 start = cgunpacker.tell() - size
64
64
65 link = linkmapper(cs)
65 link = linkmapper(cs)
66 if node in self.nodemap:
66 if node in self.nodemap:
67 # this can happen if two branches make the same change
67 # this can happen if two branches make the same change
68 self.bundlerevs.add(self.nodemap[node])
68 self.bundlerevs.add(self.nodemap[node])
69 continue
69 continue
70
70
71 for p in (p1, p2):
71 for p in (p1, p2):
72 if p not in self.nodemap:
72 if p not in self.nodemap:
73 raise error.LookupError(p, self.indexfile,
73 raise error.LookupError(p, self.indexfile,
74 _("unknown parent"))
74 _("unknown parent"))
75
75
76 if deltabase not in self.nodemap:
76 if deltabase not in self.nodemap:
77 raise LookupError(deltabase, self.indexfile,
77 raise LookupError(deltabase, self.indexfile,
78 _('unknown delta base'))
78 _('unknown delta base'))
79
79
80 baserev = self.rev(deltabase)
80 baserev = self.rev(deltabase)
81 # start, size, full unc. size, base (unused), link, p1, p2, node
81 # start, size, full unc. size, base (unused), link, p1, p2, node
82 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
82 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
83 self.rev(p1), self.rev(p2), node)
83 self.rev(p1), self.rev(p2), node)
84 self.index.append(e)
84 self.index.append(e)
85 self.nodemap[node] = n
85 self.nodemap[node] = n
86 self.bundlerevs.add(n)
86 self.bundlerevs.add(n)
87 n += 1
87 n += 1
88
88
89 def _chunk(self, rev, df=None):
89 def _chunk(self, rev, df=None):
90 # Warning: in case of bundle, the diff is against what we stored as
90 # Warning: in case of bundle, the diff is against what we stored as
91 # delta base, not against rev - 1
91 # delta base, not against rev - 1
92 # XXX: could use some caching
92 # XXX: could use some caching
93 if rev <= self.repotiprev:
93 if rev <= self.repotiprev:
94 return revlog.revlog._chunk(self, rev)
94 return revlog.revlog._chunk(self, rev)
95 self.bundle.seek(self.start(rev))
95 self.bundle.seek(self.start(rev))
96 return self.bundle.read(self.length(rev))
96 return self.bundle.read(self.length(rev))
97
97
98 def revdiff(self, rev1, rev2):
98 def revdiff(self, rev1, rev2):
99 """return or calculate a delta between two revisions"""
99 """return or calculate a delta between two revisions"""
100 if rev1 > self.repotiprev and rev2 > self.repotiprev:
100 if rev1 > self.repotiprev and rev2 > self.repotiprev:
101 # hot path for bundle
101 # hot path for bundle
102 revb = self.index[rev2][3]
102 revb = self.index[rev2][3]
103 if revb == rev1:
103 if revb == rev1:
104 return self._chunk(rev2)
104 return self._chunk(rev2)
105 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
105 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
106 return revlog.revlog.revdiff(self, rev1, rev2)
106 return revlog.revlog.revdiff(self, rev1, rev2)
107
107
108 return mdiff.textdiff(self.revision(rev1, raw=True),
108 return mdiff.textdiff(self.rawdata(rev1),
109 self.revision(rev2, raw=True))
109 self.rawdata(rev2))
110
110
111 def revision(self, nodeorrev, _df=None, raw=False):
111 def revision(self, nodeorrev, _df=None, raw=False):
112 """return an uncompressed revision of a given node or revision
112 """return an uncompressed revision of a given node or revision
113 number.
113 number.
114 """
114 """
115 if isinstance(nodeorrev, int):
115 if isinstance(nodeorrev, int):
116 rev = nodeorrev
116 rev = nodeorrev
117 node = self.node(rev)
117 node = self.node(rev)
118 else:
118 else:
119 node = nodeorrev
119 node = nodeorrev
120 rev = self.rev(node)
120 rev = self.rev(node)
121
121
122 if node == nullid:
122 if node == nullid:
123 return ""
123 return ""
124
124
125 rawtext = None
125 rawtext = None
126 chain = []
126 chain = []
127 iterrev = rev
127 iterrev = rev
128 # reconstruct the revision if it is from a changegroup
128 # reconstruct the revision if it is from a changegroup
129 while iterrev > self.repotiprev:
129 while iterrev > self.repotiprev:
130 if self._revisioncache and self._revisioncache[1] == iterrev:
130 if self._revisioncache and self._revisioncache[1] == iterrev:
131 rawtext = self._revisioncache[2]
131 rawtext = self._revisioncache[2]
132 break
132 break
133 chain.append(iterrev)
133 chain.append(iterrev)
134 iterrev = self.index[iterrev][3]
134 iterrev = self.index[iterrev][3]
135 if rawtext is None:
135 if rawtext is None:
136 rawtext = self.baserevision(iterrev)
136 rawtext = self.baserevision(iterrev)
137
137
138 while chain:
138 while chain:
139 delta = self._chunk(chain.pop())
139 delta = self._chunk(chain.pop())
140 rawtext = mdiff.patches(rawtext, [delta])
140 rawtext = mdiff.patches(rawtext, [delta])
141
141
142 text, validatehash = self._processflags(rawtext, self.flags(rev),
142 text, validatehash = self._processflags(rawtext, self.flags(rev),
143 'read', raw=raw)
143 'read', raw=raw)
144 if validatehash:
144 if validatehash:
145 self.checkhash(text, node, rev=rev)
145 self.checkhash(text, node, rev=rev)
146 self._revisioncache = (node, rev, rawtext)
146 self._revisioncache = (node, rev, rawtext)
147 return text
147 return text
148
148
149 def rawdata(self, nodeorrev, _df=None):
149 def rawdata(self, nodeorrev, _df=None):
150 return self.revision(nodeorrev, _df=_df, raw=True)
150 return self.revision(nodeorrev, _df=_df, raw=True)
151
151
152 def baserevision(self, nodeorrev):
152 def baserevision(self, nodeorrev):
153 # Revlog subclasses may override 'revision' method to modify format of
153 # Revlog subclasses may override 'revision' method to modify format of
154 # content retrieved from revlog. To use bundlerevlog with such class one
154 # content retrieved from revlog. To use bundlerevlog with such class one
155 # needs to override 'baserevision' and make more specific call here.
155 # needs to override 'baserevision' and make more specific call here.
156 return revlog.revlog.revision(self, nodeorrev, raw=True)
156 return revlog.revlog.rawdata(self, nodeorrev)
157
157
158 def addrevision(self, *args, **kwargs):
158 def addrevision(self, *args, **kwargs):
159 raise NotImplementedError
159 raise NotImplementedError
160
160
161 def addgroup(self, *args, **kwargs):
161 def addgroup(self, *args, **kwargs):
162 raise NotImplementedError
162 raise NotImplementedError
163
163
164 def strip(self, *args, **kwargs):
164 def strip(self, *args, **kwargs):
165 raise NotImplementedError
165 raise NotImplementedError
166
166
167 def checksize(self):
167 def checksize(self):
168 raise NotImplementedError
168 raise NotImplementedError
169
169
170 class bundlechangelog(bundlerevlog, changelog.changelog):
170 class bundlechangelog(bundlerevlog, changelog.changelog):
171 def __init__(self, opener, cgunpacker):
171 def __init__(self, opener, cgunpacker):
172 changelog.changelog.__init__(self, opener)
172 changelog.changelog.__init__(self, opener)
173 linkmapper = lambda x: x
173 linkmapper = lambda x: x
174 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
174 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
175 linkmapper)
175 linkmapper)
176
176
177 def baserevision(self, nodeorrev):
177 def baserevision(self, nodeorrev):
178 # Although changelog doesn't override 'revision' method, some extensions
178 # Although changelog doesn't override 'revision' method, some extensions
179 # may replace this class with another that does. Same story with
179 # may replace this class with another that does. Same story with
180 # manifest and filelog classes.
180 # manifest and filelog classes.
181
181
182 # This bypasses filtering on changelog.node() and rev() because we need
182 # This bypasses filtering on changelog.node() and rev() because we need
183 # revision text of the bundle base even if it is hidden.
183 # revision text of the bundle base even if it is hidden.
184 oldfilter = self.filteredrevs
184 oldfilter = self.filteredrevs
185 try:
185 try:
186 self.filteredrevs = ()
186 self.filteredrevs = ()
187 return changelog.changelog.revision(self, nodeorrev, raw=True)
187 return changelog.changelog.rawdata(self, nodeorrev)
188 finally:
188 finally:
189 self.filteredrevs = oldfilter
189 self.filteredrevs = oldfilter
190
190
191 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
191 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
192 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
192 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
193 dir=''):
193 dir=''):
194 manifest.manifestrevlog.__init__(self, opener, tree=dir)
194 manifest.manifestrevlog.__init__(self, opener, tree=dir)
195 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
195 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
196 linkmapper)
196 linkmapper)
197 if dirlogstarts is None:
197 if dirlogstarts is None:
198 dirlogstarts = {}
198 dirlogstarts = {}
199 if self.bundle.version == "03":
199 if self.bundle.version == "03":
200 dirlogstarts = _getfilestarts(self.bundle)
200 dirlogstarts = _getfilestarts(self.bundle)
201 self._dirlogstarts = dirlogstarts
201 self._dirlogstarts = dirlogstarts
202 self._linkmapper = linkmapper
202 self._linkmapper = linkmapper
203
203
204 def baserevision(self, nodeorrev):
204 def baserevision(self, nodeorrev):
205 node = nodeorrev
205 node = nodeorrev
206 if isinstance(node, int):
206 if isinstance(node, int):
207 node = self.node(node)
207 node = self.node(node)
208
208
209 if node in self.fulltextcache:
209 if node in self.fulltextcache:
210 result = '%s' % self.fulltextcache[node]
210 result = '%s' % self.fulltextcache[node]
211 else:
211 else:
212 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
212 result = manifest.manifestrevlog.rawdata(self, nodeorrev)
213 return result
213 return result
214
214
215 def dirlog(self, d):
215 def dirlog(self, d):
216 if d in self._dirlogstarts:
216 if d in self._dirlogstarts:
217 self.bundle.seek(self._dirlogstarts[d])
217 self.bundle.seek(self._dirlogstarts[d])
218 return bundlemanifest(
218 return bundlemanifest(
219 self.opener, self.bundle, self._linkmapper,
219 self.opener, self.bundle, self._linkmapper,
220 self._dirlogstarts, dir=d)
220 self._dirlogstarts, dir=d)
221 return super(bundlemanifest, self).dirlog(d)
221 return super(bundlemanifest, self).dirlog(d)
222
222
223 class bundlefilelog(filelog.filelog):
223 class bundlefilelog(filelog.filelog):
224 def __init__(self, opener, path, cgunpacker, linkmapper):
224 def __init__(self, opener, path, cgunpacker, linkmapper):
225 filelog.filelog.__init__(self, opener, path)
225 filelog.filelog.__init__(self, opener, path)
226 self._revlog = bundlerevlog(opener, self.indexfile,
226 self._revlog = bundlerevlog(opener, self.indexfile,
227 cgunpacker, linkmapper)
227 cgunpacker, linkmapper)
228
228
229 def baserevision(self, nodeorrev):
229 def baserevision(self, nodeorrev):
230 return filelog.filelog.revision(self, nodeorrev, raw=True)
230 return filelog.filelog.rawdata(self, nodeorrev)
231
231
232 class bundlepeer(localrepo.localpeer):
232 class bundlepeer(localrepo.localpeer):
233 def canpush(self):
233 def canpush(self):
234 return False
234 return False
235
235
236 class bundlephasecache(phases.phasecache):
236 class bundlephasecache(phases.phasecache):
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 super(bundlephasecache, self).__init__(*args, **kwargs)
238 super(bundlephasecache, self).__init__(*args, **kwargs)
239 if util.safehasattr(self, 'opener'):
239 if util.safehasattr(self, 'opener'):
240 self.opener = vfsmod.readonlyvfs(self.opener)
240 self.opener = vfsmod.readonlyvfs(self.opener)
241
241
242 def write(self):
242 def write(self):
243 raise NotImplementedError
243 raise NotImplementedError
244
244
245 def _write(self, fp):
245 def _write(self, fp):
246 raise NotImplementedError
246 raise NotImplementedError
247
247
248 def _updateroots(self, phase, newroots, tr):
248 def _updateroots(self, phase, newroots, tr):
249 self.phaseroots[phase] = newroots
249 self.phaseroots[phase] = newroots
250 self.invalidate()
250 self.invalidate()
251 self.dirty = True
251 self.dirty = True
252
252
253 def _getfilestarts(cgunpacker):
253 def _getfilestarts(cgunpacker):
254 filespos = {}
254 filespos = {}
255 for chunkdata in iter(cgunpacker.filelogheader, {}):
255 for chunkdata in iter(cgunpacker.filelogheader, {}):
256 fname = chunkdata['filename']
256 fname = chunkdata['filename']
257 filespos[fname] = cgunpacker.tell()
257 filespos[fname] = cgunpacker.tell()
258 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
258 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
259 pass
259 pass
260 return filespos
260 return filespos
261
261
262 class bundlerepository(object):
262 class bundlerepository(object):
263 """A repository instance that is a union of a local repo and a bundle.
263 """A repository instance that is a union of a local repo and a bundle.
264
264
265 Instances represent a read-only repository composed of a local repository
265 Instances represent a read-only repository composed of a local repository
266 with the contents of a bundle file applied. The repository instance is
266 with the contents of a bundle file applied. The repository instance is
267 conceptually similar to the state of a repository after an
267 conceptually similar to the state of a repository after an
268 ``hg unbundle`` operation. However, the contents of the bundle are never
268 ``hg unbundle`` operation. However, the contents of the bundle are never
269 applied to the actual base repository.
269 applied to the actual base repository.
270
270
271 Instances constructed directly are not usable as repository objects.
271 Instances constructed directly are not usable as repository objects.
272 Use instance() or makebundlerepository() to create instances.
272 Use instance() or makebundlerepository() to create instances.
273 """
273 """
274 def __init__(self, bundlepath, url, tempparent):
274 def __init__(self, bundlepath, url, tempparent):
275 self._tempparent = tempparent
275 self._tempparent = tempparent
276 self._url = url
276 self._url = url
277
277
278 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
278 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
279
279
280 self.tempfile = None
280 self.tempfile = None
281 f = util.posixfile(bundlepath, "rb")
281 f = util.posixfile(bundlepath, "rb")
282 bundle = exchange.readbundle(self.ui, f, bundlepath)
282 bundle = exchange.readbundle(self.ui, f, bundlepath)
283
283
284 if isinstance(bundle, bundle2.unbundle20):
284 if isinstance(bundle, bundle2.unbundle20):
285 self._bundlefile = bundle
285 self._bundlefile = bundle
286 self._cgunpacker = None
286 self._cgunpacker = None
287
287
288 cgpart = None
288 cgpart = None
289 for part in bundle.iterparts(seekable=True):
289 for part in bundle.iterparts(seekable=True):
290 if part.type == 'changegroup':
290 if part.type == 'changegroup':
291 if cgpart:
291 if cgpart:
292 raise NotImplementedError("can't process "
292 raise NotImplementedError("can't process "
293 "multiple changegroups")
293 "multiple changegroups")
294 cgpart = part
294 cgpart = part
295
295
296 self._handlebundle2part(bundle, part)
296 self._handlebundle2part(bundle, part)
297
297
298 if not cgpart:
298 if not cgpart:
299 raise error.Abort(_("No changegroups found"))
299 raise error.Abort(_("No changegroups found"))
300
300
301 # This is required to placate a later consumer, which expects
301 # This is required to placate a later consumer, which expects
302 # the payload offset to be at the beginning of the changegroup.
302 # the payload offset to be at the beginning of the changegroup.
303 # We need to do this after the iterparts() generator advances
303 # We need to do this after the iterparts() generator advances
304 # because iterparts() will seek to end of payload after the
304 # because iterparts() will seek to end of payload after the
305 # generator returns control to iterparts().
305 # generator returns control to iterparts().
306 cgpart.seek(0, os.SEEK_SET)
306 cgpart.seek(0, os.SEEK_SET)
307
307
308 elif isinstance(bundle, changegroup.cg1unpacker):
308 elif isinstance(bundle, changegroup.cg1unpacker):
309 if bundle.compressed():
309 if bundle.compressed():
310 f = self._writetempbundle(bundle.read, '.hg10un',
310 f = self._writetempbundle(bundle.read, '.hg10un',
311 header='HG10UN')
311 header='HG10UN')
312 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
312 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
313
313
314 self._bundlefile = bundle
314 self._bundlefile = bundle
315 self._cgunpacker = bundle
315 self._cgunpacker = bundle
316 else:
316 else:
317 raise error.Abort(_('bundle type %s cannot be read') %
317 raise error.Abort(_('bundle type %s cannot be read') %
318 type(bundle))
318 type(bundle))
319
319
320 # dict with the mapping 'filename' -> position in the changegroup.
320 # dict with the mapping 'filename' -> position in the changegroup.
321 self._cgfilespos = {}
321 self._cgfilespos = {}
322
322
323 self.firstnewrev = self.changelog.repotiprev + 1
323 self.firstnewrev = self.changelog.repotiprev + 1
324 phases.retractboundary(self, None, phases.draft,
324 phases.retractboundary(self, None, phases.draft,
325 [ctx.node() for ctx in self[self.firstnewrev:]])
325 [ctx.node() for ctx in self[self.firstnewrev:]])
326
326
327 def _handlebundle2part(self, bundle, part):
327 def _handlebundle2part(self, bundle, part):
328 if part.type != 'changegroup':
328 if part.type != 'changegroup':
329 return
329 return
330
330
331 cgstream = part
331 cgstream = part
332 version = part.params.get('version', '01')
332 version = part.params.get('version', '01')
333 legalcgvers = changegroup.supportedincomingversions(self)
333 legalcgvers = changegroup.supportedincomingversions(self)
334 if version not in legalcgvers:
334 if version not in legalcgvers:
335 msg = _('Unsupported changegroup version: %s')
335 msg = _('Unsupported changegroup version: %s')
336 raise error.Abort(msg % version)
336 raise error.Abort(msg % version)
337 if bundle.compressed():
337 if bundle.compressed():
338 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
338 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
339
339
340 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
340 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
341
341
342 def _writetempbundle(self, readfn, suffix, header=''):
342 def _writetempbundle(self, readfn, suffix, header=''):
343 """Write a temporary file to disk
343 """Write a temporary file to disk
344 """
344 """
345 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
345 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
346 suffix=suffix)
346 suffix=suffix)
347 self.tempfile = temp
347 self.tempfile = temp
348
348
349 with os.fdopen(fdtemp, r'wb') as fptemp:
349 with os.fdopen(fdtemp, r'wb') as fptemp:
350 fptemp.write(header)
350 fptemp.write(header)
351 while True:
351 while True:
352 chunk = readfn(2**18)
352 chunk = readfn(2**18)
353 if not chunk:
353 if not chunk:
354 break
354 break
355 fptemp.write(chunk)
355 fptemp.write(chunk)
356
356
357 return self.vfs.open(self.tempfile, mode="rb")
357 return self.vfs.open(self.tempfile, mode="rb")
358
358
359 @localrepo.unfilteredpropertycache
359 @localrepo.unfilteredpropertycache
360 def _phasecache(self):
360 def _phasecache(self):
361 return bundlephasecache(self, self._phasedefaults)
361 return bundlephasecache(self, self._phasedefaults)
362
362
363 @localrepo.unfilteredpropertycache
363 @localrepo.unfilteredpropertycache
364 def changelog(self):
364 def changelog(self):
365 # consume the header if it exists
365 # consume the header if it exists
366 self._cgunpacker.changelogheader()
366 self._cgunpacker.changelogheader()
367 c = bundlechangelog(self.svfs, self._cgunpacker)
367 c = bundlechangelog(self.svfs, self._cgunpacker)
368 self.manstart = self._cgunpacker.tell()
368 self.manstart = self._cgunpacker.tell()
369 return c
369 return c
370
370
371 def _refreshchangelog(self):
371 def _refreshchangelog(self):
372 # changelog for bundle repo are not filecache, this method is not
372 # changelog for bundle repo are not filecache, this method is not
373 # applicable.
373 # applicable.
374 pass
374 pass
375
375
376 @localrepo.unfilteredpropertycache
376 @localrepo.unfilteredpropertycache
377 def manifestlog(self):
377 def manifestlog(self):
378 self._cgunpacker.seek(self.manstart)
378 self._cgunpacker.seek(self.manstart)
379 # consume the header if it exists
379 # consume the header if it exists
380 self._cgunpacker.manifestheader()
380 self._cgunpacker.manifestheader()
381 linkmapper = self.unfiltered().changelog.rev
381 linkmapper = self.unfiltered().changelog.rev
382 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
382 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
383 self.filestart = self._cgunpacker.tell()
383 self.filestart = self._cgunpacker.tell()
384
384
385 return manifest.manifestlog(self.svfs, self, rootstore,
385 return manifest.manifestlog(self.svfs, self, rootstore,
386 self.narrowmatch())
386 self.narrowmatch())
387
387
388 def _consumemanifest(self):
388 def _consumemanifest(self):
389 """Consumes the manifest portion of the bundle, setting filestart so the
389 """Consumes the manifest portion of the bundle, setting filestart so the
390 file portion can be read."""
390 file portion can be read."""
391 self._cgunpacker.seek(self.manstart)
391 self._cgunpacker.seek(self.manstart)
392 self._cgunpacker.manifestheader()
392 self._cgunpacker.manifestheader()
393 for delta in self._cgunpacker.deltaiter():
393 for delta in self._cgunpacker.deltaiter():
394 pass
394 pass
395 self.filestart = self._cgunpacker.tell()
395 self.filestart = self._cgunpacker.tell()
396
396
397 @localrepo.unfilteredpropertycache
397 @localrepo.unfilteredpropertycache
398 def manstart(self):
398 def manstart(self):
399 self.changelog
399 self.changelog
400 return self.manstart
400 return self.manstart
401
401
402 @localrepo.unfilteredpropertycache
402 @localrepo.unfilteredpropertycache
403 def filestart(self):
403 def filestart(self):
404 self.manifestlog
404 self.manifestlog
405
405
406 # If filestart was not set by self.manifestlog, that means the
406 # If filestart was not set by self.manifestlog, that means the
407 # manifestlog implementation did not consume the manifests from the
407 # manifestlog implementation did not consume the manifests from the
408 # changegroup (ex: it might be consuming trees from a separate bundle2
408 # changegroup (ex: it might be consuming trees from a separate bundle2
409 # part instead). So we need to manually consume it.
409 # part instead). So we need to manually consume it.
410 if r'filestart' not in self.__dict__:
410 if r'filestart' not in self.__dict__:
411 self._consumemanifest()
411 self._consumemanifest()
412
412
413 return self.filestart
413 return self.filestart
414
414
415 def url(self):
415 def url(self):
416 return self._url
416 return self._url
417
417
418 def file(self, f):
418 def file(self, f):
419 if not self._cgfilespos:
419 if not self._cgfilespos:
420 self._cgunpacker.seek(self.filestart)
420 self._cgunpacker.seek(self.filestart)
421 self._cgfilespos = _getfilestarts(self._cgunpacker)
421 self._cgfilespos = _getfilestarts(self._cgunpacker)
422
422
423 if f in self._cgfilespos:
423 if f in self._cgfilespos:
424 self._cgunpacker.seek(self._cgfilespos[f])
424 self._cgunpacker.seek(self._cgfilespos[f])
425 linkmapper = self.unfiltered().changelog.rev
425 linkmapper = self.unfiltered().changelog.rev
426 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
426 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
427 else:
427 else:
428 return super(bundlerepository, self).file(f)
428 return super(bundlerepository, self).file(f)
429
429
430 def close(self):
430 def close(self):
431 """Close assigned bundle file immediately."""
431 """Close assigned bundle file immediately."""
432 self._bundlefile.close()
432 self._bundlefile.close()
433 if self.tempfile is not None:
433 if self.tempfile is not None:
434 self.vfs.unlink(self.tempfile)
434 self.vfs.unlink(self.tempfile)
435 if self._tempparent:
435 if self._tempparent:
436 shutil.rmtree(self._tempparent, True)
436 shutil.rmtree(self._tempparent, True)
437
437
438 def cancopy(self):
438 def cancopy(self):
439 return False
439 return False
440
440
441 def peer(self):
441 def peer(self):
442 return bundlepeer(self)
442 return bundlepeer(self)
443
443
444 def getcwd(self):
444 def getcwd(self):
445 return encoding.getcwd() # always outside the repo
445 return encoding.getcwd() # always outside the repo
446
446
447 # Check if parents exist in localrepo before setting
447 # Check if parents exist in localrepo before setting
448 def setparents(self, p1, p2=nullid):
448 def setparents(self, p1, p2=nullid):
449 p1rev = self.changelog.rev(p1)
449 p1rev = self.changelog.rev(p1)
450 p2rev = self.changelog.rev(p2)
450 p2rev = self.changelog.rev(p2)
451 msg = _("setting parent to node %s that only exists in the bundle\n")
451 msg = _("setting parent to node %s that only exists in the bundle\n")
452 if self.changelog.repotiprev < p1rev:
452 if self.changelog.repotiprev < p1rev:
453 self.ui.warn(msg % nodemod.hex(p1))
453 self.ui.warn(msg % nodemod.hex(p1))
454 if self.changelog.repotiprev < p2rev:
454 if self.changelog.repotiprev < p2rev:
455 self.ui.warn(msg % nodemod.hex(p2))
455 self.ui.warn(msg % nodemod.hex(p2))
456 return super(bundlerepository, self).setparents(p1, p2)
456 return super(bundlerepository, self).setparents(p1, p2)
457
457
458 def instance(ui, path, create, intents=None, createopts=None):
458 def instance(ui, path, create, intents=None, createopts=None):
459 if create:
459 if create:
460 raise error.Abort(_('cannot create new bundle repository'))
460 raise error.Abort(_('cannot create new bundle repository'))
461 # internal config: bundle.mainreporoot
461 # internal config: bundle.mainreporoot
462 parentpath = ui.config("bundle", "mainreporoot")
462 parentpath = ui.config("bundle", "mainreporoot")
463 if not parentpath:
463 if not parentpath:
464 # try to find the correct path to the working directory repo
464 # try to find the correct path to the working directory repo
465 parentpath = cmdutil.findrepo(encoding.getcwd())
465 parentpath = cmdutil.findrepo(encoding.getcwd())
466 if parentpath is None:
466 if parentpath is None:
467 parentpath = ''
467 parentpath = ''
468 if parentpath:
468 if parentpath:
469 # Try to make the full path relative so we get a nice, short URL.
469 # Try to make the full path relative so we get a nice, short URL.
470 # In particular, we don't want temp dir names in test outputs.
470 # In particular, we don't want temp dir names in test outputs.
471 cwd = encoding.getcwd()
471 cwd = encoding.getcwd()
472 if parentpath == cwd:
472 if parentpath == cwd:
473 parentpath = ''
473 parentpath = ''
474 else:
474 else:
475 cwd = pathutil.normasprefix(cwd)
475 cwd = pathutil.normasprefix(cwd)
476 if parentpath.startswith(cwd):
476 if parentpath.startswith(cwd):
477 parentpath = parentpath[len(cwd):]
477 parentpath = parentpath[len(cwd):]
478 u = util.url(path)
478 u = util.url(path)
479 path = u.localpath()
479 path = u.localpath()
480 if u.scheme == 'bundle':
480 if u.scheme == 'bundle':
481 s = path.split("+", 1)
481 s = path.split("+", 1)
482 if len(s) == 1:
482 if len(s) == 1:
483 repopath, bundlename = parentpath, s[0]
483 repopath, bundlename = parentpath, s[0]
484 else:
484 else:
485 repopath, bundlename = s
485 repopath, bundlename = s
486 else:
486 else:
487 repopath, bundlename = parentpath, path
487 repopath, bundlename = parentpath, path
488
488
489 return makebundlerepository(ui, repopath, bundlename)
489 return makebundlerepository(ui, repopath, bundlename)
490
490
491 def makebundlerepository(ui, repopath, bundlepath):
491 def makebundlerepository(ui, repopath, bundlepath):
492 """Make a bundle repository object based on repo and bundle paths."""
492 """Make a bundle repository object based on repo and bundle paths."""
493 if repopath:
493 if repopath:
494 url = 'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
494 url = 'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
495 else:
495 else:
496 url = 'bundle:%s' % bundlepath
496 url = 'bundle:%s' % bundlepath
497
497
498 # Because we can't make any guarantees about the type of the base
498 # Because we can't make any guarantees about the type of the base
499 # repository, we can't have a static class representing the bundle
499 # repository, we can't have a static class representing the bundle
500 # repository. We also can't make any guarantees about how to even
500 # repository. We also can't make any guarantees about how to even
501 # call the base repository's constructor!
501 # call the base repository's constructor!
502 #
502 #
503 # So, our strategy is to go through ``localrepo.instance()`` to construct
503 # So, our strategy is to go through ``localrepo.instance()`` to construct
504 # a repo instance. Then, we dynamically create a new type derived from
504 # a repo instance. Then, we dynamically create a new type derived from
505 # both it and our ``bundlerepository`` class which overrides some
505 # both it and our ``bundlerepository`` class which overrides some
506 # functionality. We then change the type of the constructed repository
506 # functionality. We then change the type of the constructed repository
507 # to this new type and initialize the bundle-specific bits of it.
507 # to this new type and initialize the bundle-specific bits of it.
508
508
509 try:
509 try:
510 repo = localrepo.instance(ui, repopath, create=False)
510 repo = localrepo.instance(ui, repopath, create=False)
511 tempparent = None
511 tempparent = None
512 except error.RepoError:
512 except error.RepoError:
513 tempparent = pycompat.mkdtemp()
513 tempparent = pycompat.mkdtemp()
514 try:
514 try:
515 repo = localrepo.instance(ui, tempparent, create=True)
515 repo = localrepo.instance(ui, tempparent, create=True)
516 except Exception:
516 except Exception:
517 shutil.rmtree(tempparent)
517 shutil.rmtree(tempparent)
518 raise
518 raise
519
519
520 class derivedbundlerepository(bundlerepository, repo.__class__):
520 class derivedbundlerepository(bundlerepository, repo.__class__):
521 pass
521 pass
522
522
523 repo.__class__ = derivedbundlerepository
523 repo.__class__ = derivedbundlerepository
524 bundlerepository.__init__(repo, bundlepath, url, tempparent)
524 bundlerepository.__init__(repo, bundlepath, url, tempparent)
525
525
526 return repo
526 return repo
527
527
528 class bundletransactionmanager(object):
528 class bundletransactionmanager(object):
529 def transaction(self):
529 def transaction(self):
530 return None
530 return None
531
531
532 def close(self):
532 def close(self):
533 raise NotImplementedError
533 raise NotImplementedError
534
534
535 def release(self):
535 def release(self):
536 raise NotImplementedError
536 raise NotImplementedError
537
537
538 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
538 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
539 force=False):
539 force=False):
540 '''obtains a bundle of changes incoming from peer
540 '''obtains a bundle of changes incoming from peer
541
541
542 "onlyheads" restricts the returned changes to those reachable from the
542 "onlyheads" restricts the returned changes to those reachable from the
543 specified heads.
543 specified heads.
544 "bundlename", if given, stores the bundle to this file path permanently;
544 "bundlename", if given, stores the bundle to this file path permanently;
545 otherwise it's stored to a temp file and gets deleted again when you call
545 otherwise it's stored to a temp file and gets deleted again when you call
546 the returned "cleanupfn".
546 the returned "cleanupfn".
547 "force" indicates whether to proceed on unrelated repos.
547 "force" indicates whether to proceed on unrelated repos.
548
548
549 Returns a tuple (local, csets, cleanupfn):
549 Returns a tuple (local, csets, cleanupfn):
550
550
551 "local" is a local repo from which to obtain the actual incoming
551 "local" is a local repo from which to obtain the actual incoming
552 changesets; it is a bundlerepo for the obtained bundle when the
552 changesets; it is a bundlerepo for the obtained bundle when the
553 original "peer" is remote.
553 original "peer" is remote.
554 "csets" lists the incoming changeset node ids.
554 "csets" lists the incoming changeset node ids.
555 "cleanupfn" must be called without arguments when you're done processing
555 "cleanupfn" must be called without arguments when you're done processing
556 the changes; it closes both the original "peer" and the one returned
556 the changes; it closes both the original "peer" and the one returned
557 here.
557 here.
558 '''
558 '''
559 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
559 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
560 force=force)
560 force=force)
561 common, incoming, rheads = tmp
561 common, incoming, rheads = tmp
562 if not incoming:
562 if not incoming:
563 try:
563 try:
564 if bundlename:
564 if bundlename:
565 os.unlink(bundlename)
565 os.unlink(bundlename)
566 except OSError:
566 except OSError:
567 pass
567 pass
568 return repo, [], peer.close
568 return repo, [], peer.close
569
569
570 commonset = set(common)
570 commonset = set(common)
571 rheads = [x for x in rheads if x not in commonset]
571 rheads = [x for x in rheads if x not in commonset]
572
572
573 bundle = None
573 bundle = None
574 bundlerepo = None
574 bundlerepo = None
575 localrepo = peer.local()
575 localrepo = peer.local()
576 if bundlename or not localrepo:
576 if bundlename or not localrepo:
577 # create a bundle (uncompressed if peer repo is not local)
577 # create a bundle (uncompressed if peer repo is not local)
578
578
579 # developer config: devel.legacy.exchange
579 # developer config: devel.legacy.exchange
580 legexc = ui.configlist('devel', 'legacy.exchange')
580 legexc = ui.configlist('devel', 'legacy.exchange')
581 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
581 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
582 canbundle2 = (not forcebundle1
582 canbundle2 = (not forcebundle1
583 and peer.capable('getbundle')
583 and peer.capable('getbundle')
584 and peer.capable('bundle2'))
584 and peer.capable('bundle2'))
585 if canbundle2:
585 if canbundle2:
586 with peer.commandexecutor() as e:
586 with peer.commandexecutor() as e:
587 b2 = e.callcommand('getbundle', {
587 b2 = e.callcommand('getbundle', {
588 'source': 'incoming',
588 'source': 'incoming',
589 'common': common,
589 'common': common,
590 'heads': rheads,
590 'heads': rheads,
591 'bundlecaps': exchange.caps20to10(repo, role='client'),
591 'bundlecaps': exchange.caps20to10(repo, role='client'),
592 'cg': True,
592 'cg': True,
593 }).result()
593 }).result()
594
594
595 fname = bundle = changegroup.writechunks(ui,
595 fname = bundle = changegroup.writechunks(ui,
596 b2._forwardchunks(),
596 b2._forwardchunks(),
597 bundlename)
597 bundlename)
598 else:
598 else:
599 if peer.capable('getbundle'):
599 if peer.capable('getbundle'):
600 with peer.commandexecutor() as e:
600 with peer.commandexecutor() as e:
601 cg = e.callcommand('getbundle', {
601 cg = e.callcommand('getbundle', {
602 'source': 'incoming',
602 'source': 'incoming',
603 'common': common,
603 'common': common,
604 'heads': rheads,
604 'heads': rheads,
605 }).result()
605 }).result()
606 elif onlyheads is None and not peer.capable('changegroupsubset'):
606 elif onlyheads is None and not peer.capable('changegroupsubset'):
607 # compat with older servers when pulling all remote heads
607 # compat with older servers when pulling all remote heads
608
608
609 with peer.commandexecutor() as e:
609 with peer.commandexecutor() as e:
610 cg = e.callcommand('changegroup', {
610 cg = e.callcommand('changegroup', {
611 'nodes': incoming,
611 'nodes': incoming,
612 'source': 'incoming',
612 'source': 'incoming',
613 }).result()
613 }).result()
614
614
615 rheads = None
615 rheads = None
616 else:
616 else:
617 with peer.commandexecutor() as e:
617 with peer.commandexecutor() as e:
618 cg = e.callcommand('changegroupsubset', {
618 cg = e.callcommand('changegroupsubset', {
619 'bases': incoming,
619 'bases': incoming,
620 'heads': rheads,
620 'heads': rheads,
621 'source': 'incoming',
621 'source': 'incoming',
622 }).result()
622 }).result()
623
623
624 if localrepo:
624 if localrepo:
625 bundletype = "HG10BZ"
625 bundletype = "HG10BZ"
626 else:
626 else:
627 bundletype = "HG10UN"
627 bundletype = "HG10UN"
628 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
628 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
629 bundletype)
629 bundletype)
630 # keep written bundle?
630 # keep written bundle?
631 if bundlename:
631 if bundlename:
632 bundle = None
632 bundle = None
633 if not localrepo:
633 if not localrepo:
634 # use the created uncompressed bundlerepo
634 # use the created uncompressed bundlerepo
635 localrepo = bundlerepo = makebundlerepository(repo. baseui,
635 localrepo = bundlerepo = makebundlerepository(repo. baseui,
636 repo.root,
636 repo.root,
637 fname)
637 fname)
638
638
639 # this repo contains local and peer now, so filter out local again
639 # this repo contains local and peer now, so filter out local again
640 common = repo.heads()
640 common = repo.heads()
641 if localrepo:
641 if localrepo:
642 # Part of common may be remotely filtered
642 # Part of common may be remotely filtered
643 # So use an unfiltered version
643 # So use an unfiltered version
644 # The discovery process probably need cleanup to avoid that
644 # The discovery process probably need cleanup to avoid that
645 localrepo = localrepo.unfiltered()
645 localrepo = localrepo.unfiltered()
646
646
647 csets = localrepo.changelog.findmissing(common, rheads)
647 csets = localrepo.changelog.findmissing(common, rheads)
648
648
649 if bundlerepo:
649 if bundlerepo:
650 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
650 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
651
651
652 with peer.commandexecutor() as e:
652 with peer.commandexecutor() as e:
653 remotephases = e.callcommand('listkeys', {
653 remotephases = e.callcommand('listkeys', {
654 'namespace': 'phases',
654 'namespace': 'phases',
655 }).result()
655 }).result()
656
656
657 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
657 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
658 pullop.trmanager = bundletransactionmanager()
658 pullop.trmanager = bundletransactionmanager()
659 exchange._pullapplyphases(pullop, remotephases)
659 exchange._pullapplyphases(pullop, remotephases)
660
660
661 def cleanup():
661 def cleanup():
662 if bundlerepo:
662 if bundlerepo:
663 bundlerepo.close()
663 bundlerepo.close()
664 if bundle:
664 if bundle:
665 os.unlink(bundle)
665 os.unlink(bundle)
666 peer.close()
666 peer.close()
667
667
668 return (localrepo, csets, cleanup)
668 return (localrepo, csets, cleanup)
General Comments 0
You need to be logged in to leave comments. Login now