Show More
@@ -1,759 +1,766 | |||||
1 | # bundlerepo.py - repository class for viewing uncompressed bundles |
|
1 | # bundlerepo.py - repository class for viewing uncompressed bundles | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> |
|
3 | # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | """Repository class for viewing uncompressed bundles. |
|
8 | """Repository class for viewing uncompressed bundles. | |
9 |
|
9 | |||
10 | This provides a read-only repository interface to bundles as if they |
|
10 | This provides a read-only repository interface to bundles as if they | |
11 | were part of the actual repository. |
|
11 | were part of the actual repository. | |
12 | """ |
|
12 | """ | |
13 |
|
13 | |||
14 | from __future__ import annotations |
|
14 | from __future__ import annotations | |
15 |
|
15 | |||
16 | import contextlib |
|
16 | import contextlib | |
17 | import os |
|
17 | import os | |
18 | import shutil |
|
18 | import shutil | |
|
19 | import typing | |||
19 |
|
20 | |||
20 | from .i18n import _ |
|
21 | from .i18n import _ | |
21 | from .node import ( |
|
22 | from .node import ( | |
22 | hex, |
|
23 | hex, | |
23 | nullrev, |
|
24 | nullrev, | |
24 | ) |
|
25 | ) | |
25 |
|
26 | |||
26 | from . import ( |
|
27 | from . import ( | |
27 | bundle2, |
|
28 | bundle2, | |
28 | changegroup, |
|
29 | changegroup, | |
29 | changelog, |
|
30 | changelog, | |
30 | cmdutil, |
|
31 | cmdutil, | |
31 | discovery, |
|
32 | discovery, | |
32 | encoding, |
|
33 | encoding, | |
33 | error, |
|
34 | error, | |
34 | exchange, |
|
35 | exchange, | |
35 | filelog, |
|
36 | filelog, | |
36 | localrepo, |
|
37 | localrepo, | |
37 | manifest, |
|
38 | manifest, | |
38 | mdiff, |
|
39 | mdiff, | |
39 | pathutil, |
|
40 | pathutil, | |
40 | phases, |
|
41 | phases, | |
41 | pycompat, |
|
42 | pycompat, | |
42 | revlog, |
|
43 | revlog, | |
43 | revlogutils, |
|
44 | revlogutils, | |
44 | util, |
|
45 | util, | |
45 | vfs as vfsmod, |
|
46 | vfs as vfsmod, | |
46 | ) |
|
47 | ) | |
47 | from .utils import ( |
|
48 | from .utils import ( | |
48 | urlutil, |
|
49 | urlutil, | |
49 | ) |
|
50 | ) | |
50 |
|
51 | |||
51 | from .revlogutils import ( |
|
52 | from .revlogutils import ( | |
52 | constants as revlog_constants, |
|
53 | constants as revlog_constants, | |
53 | ) |
|
54 | ) | |
54 |
|
55 | |||
55 |
|
56 | |||
56 | class bundlerevlog(revlog.revlog): |
|
57 | class bundlerevlog(revlog.revlog): | |
57 | def __init__(self, opener, target, radix, cgunpacker, linkmapper): |
|
58 | def __init__(self, opener, target, radix, cgunpacker, linkmapper): | |
58 | # How it works: |
|
59 | # How it works: | |
59 | # To retrieve a revision, we need to know the offset of the revision in |
|
60 | # To retrieve a revision, we need to know the offset of the revision in | |
60 | # the bundle (an unbundle object). We store this offset in the index |
|
61 | # the bundle (an unbundle object). We store this offset in the index | |
61 | # (start). The base of the delta is stored in the base field. |
|
62 | # (start). The base of the delta is stored in the base field. | |
62 | # |
|
63 | # | |
63 | # To differentiate a rev in the bundle from a rev in the revlog, we |
|
64 | # To differentiate a rev in the bundle from a rev in the revlog, we | |
64 | # check revision against repotiprev. |
|
65 | # check revision against repotiprev. | |
65 | opener = vfsmod.readonlyvfs(opener) |
|
66 | opener = vfsmod.readonlyvfs(opener) | |
66 | revlog.revlog.__init__(self, opener, target=target, radix=radix) |
|
67 | revlog.revlog.__init__(self, opener, target=target, radix=radix) | |
67 | self.bundle = cgunpacker |
|
68 | self.bundle = cgunpacker | |
68 | n = len(self) |
|
69 | n = len(self) | |
69 | self.repotiprev = n - 1 |
|
70 | self.repotiprev = n - 1 | |
70 | self.bundlerevs = set() # used by 'bundle()' revset expression |
|
71 | self.bundlerevs = set() # used by 'bundle()' revset expression | |
71 | for deltadata in cgunpacker.deltaiter(): |
|
72 | for deltadata in cgunpacker.deltaiter(): | |
72 | node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata |
|
73 | node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata | |
73 |
|
74 | |||
74 | size = len(delta) |
|
75 | size = len(delta) | |
75 | start = cgunpacker.tell() - size |
|
76 | start = cgunpacker.tell() - size | |
76 |
|
77 | |||
77 | if self.index.has_node(node): |
|
78 | if self.index.has_node(node): | |
78 | # this can happen if two branches make the same change |
|
79 | # this can happen if two branches make the same change | |
79 | self.bundlerevs.add(self.index.rev(node)) |
|
80 | self.bundlerevs.add(self.index.rev(node)) | |
80 | continue |
|
81 | continue | |
81 | if cs == node: |
|
82 | if cs == node: | |
82 | linkrev = nullrev |
|
83 | linkrev = nullrev | |
83 | else: |
|
84 | else: | |
84 | linkrev = linkmapper(cs) |
|
85 | linkrev = linkmapper(cs) | |
85 |
|
86 | |||
86 | for p in (p1, p2): |
|
87 | for p in (p1, p2): | |
87 | if not self.index.has_node(p): |
|
88 | if not self.index.has_node(p): | |
88 | raise error.LookupError( |
|
89 | raise error.LookupError( | |
89 | p, self.display_id, _(b"unknown parent") |
|
90 | p, self.display_id, _(b"unknown parent") | |
90 | ) |
|
91 | ) | |
91 |
|
92 | |||
92 | if not self.index.has_node(deltabase): |
|
93 | if not self.index.has_node(deltabase): | |
93 | raise error.LookupError( |
|
94 | raise error.LookupError( | |
94 | deltabase, self.display_id, _(b'unknown delta base') |
|
95 | deltabase, self.display_id, _(b'unknown delta base') | |
95 | ) |
|
96 | ) | |
96 |
|
97 | |||
97 | baserev = self.rev(deltabase) |
|
98 | baserev = self.rev(deltabase) | |
98 | # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused) |
|
99 | # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused) | |
99 | e = revlogutils.entry( |
|
100 | e = revlogutils.entry( | |
100 | flags=flags, |
|
101 | flags=flags, | |
101 | data_offset=start, |
|
102 | data_offset=start, | |
102 | data_compressed_length=size, |
|
103 | data_compressed_length=size, | |
103 | data_delta_base=baserev, |
|
104 | data_delta_base=baserev, | |
104 | link_rev=linkrev, |
|
105 | link_rev=linkrev, | |
105 | parent_rev_1=self.rev(p1), |
|
106 | parent_rev_1=self.rev(p1), | |
106 | parent_rev_2=self.rev(p2), |
|
107 | parent_rev_2=self.rev(p2), | |
107 | node_id=node, |
|
108 | node_id=node, | |
108 | ) |
|
109 | ) | |
109 | self.index.append(e) |
|
110 | self.index.append(e) | |
110 | self.bundlerevs.add(n) |
|
111 | self.bundlerevs.add(n) | |
111 | n += 1 |
|
112 | n += 1 | |
112 |
|
113 | |||
113 | @contextlib.contextmanager |
|
114 | @contextlib.contextmanager | |
114 | def reading(self): |
|
115 | def reading(self): | |
115 | if self.repotiprev < 0: |
|
116 | if self.repotiprev < 0: | |
116 | yield |
|
117 | yield | |
117 | else: |
|
118 | else: | |
118 | with super().reading() as x: |
|
119 | with super().reading() as x: | |
119 | yield x |
|
120 | yield x | |
120 |
|
121 | |||
121 | def _chunk(self, rev): |
|
122 | def _chunk(self, rev): | |
122 | # Warning: in case of bundle, the diff is against what we stored as |
|
123 | # Warning: in case of bundle, the diff is against what we stored as | |
123 | # delta base, not against rev - 1 |
|
124 | # delta base, not against rev - 1 | |
124 | # XXX: could use some caching |
|
125 | # XXX: could use some caching | |
125 | if rev <= self.repotiprev: |
|
126 | if rev <= self.repotiprev: | |
126 | return revlog.revlog._chunk(self, rev) |
|
127 | return revlog.revlog._chunk(self, rev) | |
127 | self.bundle.seek(self.start(rev)) |
|
128 | self.bundle.seek(self.start(rev)) | |
128 | return self.bundle.read(self.length(rev)) |
|
129 | return self.bundle.read(self.length(rev)) | |
129 |
|
130 | |||
130 | def revdiff(self, rev1, rev2): |
|
131 | def revdiff(self, rev1, rev2): | |
131 | """return or calculate a delta between two revisions""" |
|
132 | """return or calculate a delta between two revisions""" | |
132 | if rev1 > self.repotiprev and rev2 > self.repotiprev: |
|
133 | if rev1 > self.repotiprev and rev2 > self.repotiprev: | |
133 | # hot path for bundle |
|
134 | # hot path for bundle | |
134 | revb = self.index[rev2][3] |
|
135 | revb = self.index[rev2][3] | |
135 | if revb == rev1: |
|
136 | if revb == rev1: | |
136 | return self._chunk(rev2) |
|
137 | return self._chunk(rev2) | |
137 | elif rev1 <= self.repotiprev and rev2 <= self.repotiprev: |
|
138 | elif rev1 <= self.repotiprev and rev2 <= self.repotiprev: | |
138 | return revlog.revlog.revdiff(self, rev1, rev2) |
|
139 | return revlog.revlog.revdiff(self, rev1, rev2) | |
139 |
|
140 | |||
140 | return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2)) |
|
141 | return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2)) | |
141 |
|
142 | |||
142 | def _rawtext(self, node, rev): |
|
143 | def _rawtext(self, node, rev): | |
143 | if rev is None: |
|
144 | if rev is None: | |
144 | rev = self.rev(node) |
|
145 | rev = self.rev(node) | |
145 | validated = False |
|
146 | validated = False | |
146 | rawtext = None |
|
147 | rawtext = None | |
147 | chain = [] |
|
148 | chain = [] | |
148 | iterrev = rev |
|
149 | iterrev = rev | |
149 | # reconstruct the revision if it is from a changegroup |
|
150 | # reconstruct the revision if it is from a changegroup | |
150 | while iterrev > self.repotiprev: |
|
151 | while iterrev > self.repotiprev: | |
151 | if ( |
|
152 | if ( | |
152 | self._inner._revisioncache |
|
153 | self._inner._revisioncache | |
153 | and self._inner._revisioncache[1] == iterrev |
|
154 | and self._inner._revisioncache[1] == iterrev | |
154 | ): |
|
155 | ): | |
155 | rawtext = self._inner._revisioncache[2] |
|
156 | rawtext = self._inner._revisioncache[2] | |
156 | break |
|
157 | break | |
157 | chain.append(iterrev) |
|
158 | chain.append(iterrev) | |
158 | iterrev = self.index[iterrev][3] |
|
159 | iterrev = self.index[iterrev][3] | |
159 | if iterrev == nullrev: |
|
160 | if iterrev == nullrev: | |
160 | rawtext = b'' |
|
161 | rawtext = b'' | |
161 | elif rawtext is None: |
|
162 | elif rawtext is None: | |
162 | r = super(bundlerevlog, self)._rawtext( |
|
163 | r = super(bundlerevlog, self)._rawtext( | |
163 | self.node(iterrev), |
|
164 | self.node(iterrev), | |
164 | iterrev, |
|
165 | iterrev, | |
165 | ) |
|
166 | ) | |
166 | __, rawtext, validated = r |
|
167 | __, rawtext, validated = r | |
167 | if chain: |
|
168 | if chain: | |
168 | validated = False |
|
169 | validated = False | |
169 | while chain: |
|
170 | while chain: | |
170 | delta = self._chunk(chain.pop()) |
|
171 | delta = self._chunk(chain.pop()) | |
171 | rawtext = mdiff.patches(rawtext, [delta]) |
|
172 | rawtext = mdiff.patches(rawtext, [delta]) | |
172 | return rev, rawtext, validated |
|
173 | return rev, rawtext, validated | |
173 |
|
174 | |||
174 | def addrevision(self, *args, **kwargs): |
|
175 | def addrevision(self, *args, **kwargs): | |
175 | raise NotImplementedError |
|
176 | raise NotImplementedError | |
176 |
|
177 | |||
177 | def addgroup(self, *args, **kwargs): |
|
178 | def addgroup(self, *args, **kwargs): | |
178 | raise NotImplementedError |
|
179 | raise NotImplementedError | |
179 |
|
180 | |||
180 | def strip(self, *args, **kwargs): |
|
181 | def strip(self, *args, **kwargs): | |
181 | raise NotImplementedError |
|
182 | raise NotImplementedError | |
182 |
|
183 | |||
183 | def checksize(self): |
|
184 | def checksize(self): | |
184 | raise NotImplementedError |
|
185 | raise NotImplementedError | |
185 |
|
186 | |||
186 |
|
187 | |||
187 | class bundlechangelog(bundlerevlog, changelog.changelog): |
|
188 | class bundlechangelog(bundlerevlog, changelog.changelog): | |
188 | def __init__(self, opener, cgunpacker): |
|
189 | def __init__(self, opener, cgunpacker): | |
189 | changelog.changelog.__init__(self, opener) |
|
190 | changelog.changelog.__init__(self, opener) | |
190 | linkmapper = lambda x: x |
|
191 | linkmapper = lambda x: x | |
191 | bundlerevlog.__init__( |
|
192 | bundlerevlog.__init__( | |
192 | self, |
|
193 | self, | |
193 | opener, |
|
194 | opener, | |
194 | (revlog_constants.KIND_CHANGELOG, None), |
|
195 | (revlog_constants.KIND_CHANGELOG, None), | |
195 | self.radix, |
|
196 | self.radix, | |
196 | cgunpacker, |
|
197 | cgunpacker, | |
197 | linkmapper, |
|
198 | linkmapper, | |
198 | ) |
|
199 | ) | |
199 |
|
200 | |||
200 |
|
201 | |||
201 | class bundlemanifest(bundlerevlog, manifest.manifestrevlog): |
|
202 | class bundlemanifest(bundlerevlog, manifest.manifestrevlog): | |
202 | def __init__( |
|
203 | def __init__( | |
203 | self, |
|
204 | self, | |
204 | nodeconstants, |
|
205 | nodeconstants, | |
205 | opener, |
|
206 | opener, | |
206 | cgunpacker, |
|
207 | cgunpacker, | |
207 | linkmapper, |
|
208 | linkmapper, | |
208 | dirlogstarts=None, |
|
209 | dirlogstarts=None, | |
209 | dir=b'', |
|
210 | dir=b'', | |
210 | ): |
|
211 | ): | |
211 | # XXX manifestrevlog is not actually a revlog , so mixing it with |
|
212 | # XXX manifestrevlog is not actually a revlog , so mixing it with | |
212 | # bundlerevlog is not a good idea. |
|
213 | # bundlerevlog is not a good idea. | |
213 | manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir) |
|
214 | manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir) | |
214 | bundlerevlog.__init__( |
|
215 | bundlerevlog.__init__( | |
215 | self, |
|
216 | self, | |
216 | opener, |
|
217 | opener, | |
217 | (revlog_constants.KIND_MANIFESTLOG, dir), |
|
218 | (revlog_constants.KIND_MANIFESTLOG, dir), | |
218 | self._revlog.radix, |
|
219 | self._revlog.radix, | |
219 | cgunpacker, |
|
220 | cgunpacker, | |
220 | linkmapper, |
|
221 | linkmapper, | |
221 | ) |
|
222 | ) | |
222 | if dirlogstarts is None: |
|
223 | if dirlogstarts is None: | |
223 | dirlogstarts = {} |
|
224 | dirlogstarts = {} | |
224 | if self.bundle.version == b"03": |
|
225 | if self.bundle.version == b"03": | |
225 | dirlogstarts = _getfilestarts(self.bundle) |
|
226 | dirlogstarts = _getfilestarts(self.bundle) | |
226 | self._dirlogstarts = dirlogstarts |
|
227 | self._dirlogstarts = dirlogstarts | |
227 | self._linkmapper = linkmapper |
|
228 | self._linkmapper = linkmapper | |
228 |
|
229 | |||
229 | def dirlog(self, d): |
|
230 | def dirlog(self, d): | |
230 | if d in self._dirlogstarts: |
|
231 | if d in self._dirlogstarts: | |
231 | self.bundle.seek(self._dirlogstarts[d]) |
|
232 | self.bundle.seek(self._dirlogstarts[d]) | |
232 | return bundlemanifest( |
|
233 | return bundlemanifest( | |
233 | self.nodeconstants, |
|
234 | self.nodeconstants, | |
234 | self.opener, |
|
235 | self.opener, | |
235 | self.bundle, |
|
236 | self.bundle, | |
236 | self._linkmapper, |
|
237 | self._linkmapper, | |
237 | self._dirlogstarts, |
|
238 | self._dirlogstarts, | |
238 | dir=d, |
|
239 | dir=d, | |
239 | ) |
|
240 | ) | |
240 | return super(bundlemanifest, self).dirlog(d) |
|
241 | return super(bundlemanifest, self).dirlog(d) | |
241 |
|
242 | |||
242 |
|
243 | |||
243 | class bundlefilelog(filelog.filelog): |
|
244 | class bundlefilelog(filelog.filelog): | |
244 | def __init__(self, opener, path, cgunpacker, linkmapper): |
|
245 | def __init__(self, opener, path, cgunpacker, linkmapper): | |
245 | filelog.filelog.__init__(self, opener, path) |
|
246 | filelog.filelog.__init__(self, opener, path) | |
246 | self._revlog = bundlerevlog( |
|
247 | self._revlog = bundlerevlog( | |
247 | opener, |
|
248 | opener, | |
248 | # XXX should use the unencoded path |
|
249 | # XXX should use the unencoded path | |
249 | target=(revlog_constants.KIND_FILELOG, path), |
|
250 | target=(revlog_constants.KIND_FILELOG, path), | |
250 | radix=self._revlog.radix, |
|
251 | radix=self._revlog.radix, | |
251 | cgunpacker=cgunpacker, |
|
252 | cgunpacker=cgunpacker, | |
252 | linkmapper=linkmapper, |
|
253 | linkmapper=linkmapper, | |
253 | ) |
|
254 | ) | |
254 |
|
255 | |||
255 |
|
256 | |||
256 | class bundlepeer(localrepo.localpeer): |
|
257 | class bundlepeer(localrepo.localpeer): | |
257 | def canpush(self): |
|
258 | def canpush(self): | |
258 | return False |
|
259 | return False | |
259 |
|
260 | |||
260 |
|
261 | |||
261 | class bundlephasecache(phases.phasecache): |
|
262 | class bundlephasecache(phases.phasecache): | |
262 | def __init__(self, *args, **kwargs): |
|
263 | def __init__(self, *args, **kwargs): | |
263 | super(bundlephasecache, self).__init__(*args, **kwargs) |
|
264 | super(bundlephasecache, self).__init__(*args, **kwargs) | |
264 | if hasattr(self, 'opener'): |
|
265 | if hasattr(self, 'opener'): | |
265 | self.opener = vfsmod.readonlyvfs(self.opener) |
|
266 | self.opener = vfsmod.readonlyvfs(self.opener) | |
266 |
|
267 | |||
267 | def write(self): |
|
268 | def write(self): | |
268 | raise NotImplementedError |
|
269 | raise NotImplementedError | |
269 |
|
270 | |||
270 | def _write(self, fp): |
|
271 | def _write(self, fp): | |
271 | raise NotImplementedError |
|
272 | raise NotImplementedError | |
272 |
|
273 | |||
273 | def _updateroots(self, repo, phase, newroots, tr, invalidate=True): |
|
274 | def _updateroots(self, repo, phase, newroots, tr, invalidate=True): | |
274 | self._phaseroots[phase] = newroots |
|
275 | self._phaseroots[phase] = newroots | |
275 | if invalidate: |
|
276 | if invalidate: | |
276 | self.invalidate() |
|
277 | self.invalidate() | |
277 | self.dirty = True |
|
278 | self.dirty = True | |
278 |
|
279 | |||
279 |
|
280 | |||
280 | def _getfilestarts(cgunpacker): |
|
281 | def _getfilestarts(cgunpacker): | |
281 | filespos = {} |
|
282 | filespos = {} | |
282 | for chunkdata in iter(cgunpacker.filelogheader, {}): |
|
283 | for chunkdata in iter(cgunpacker.filelogheader, {}): | |
283 | fname = chunkdata[b'filename'] |
|
284 | fname = chunkdata[b'filename'] | |
284 | filespos[fname] = cgunpacker.tell() |
|
285 | filespos[fname] = cgunpacker.tell() | |
285 | for chunk in iter(lambda: cgunpacker.deltachunk(None), {}): |
|
286 | for chunk in iter(lambda: cgunpacker.deltachunk(None), {}): | |
286 | pass |
|
287 | pass | |
287 | return filespos |
|
288 | return filespos | |
288 |
|
289 | |||
289 |
|
290 | |||
290 | class bundlerepository: |
|
291 | _bundle_repo_baseclass = object | |
|
292 | ||||
|
293 | if typing.TYPE_CHECKING: | |||
|
294 | _bundle_repo_baseclass = localrepo.localrepository | |||
|
295 | ||||
|
296 | ||||
|
297 | class bundlerepository(_bundle_repo_baseclass): | |||
291 | """A repository instance that is a union of a local repo and a bundle. |
|
298 | """A repository instance that is a union of a local repo and a bundle. | |
292 |
|
299 | |||
293 | Instances represent a read-only repository composed of a local repository |
|
300 | Instances represent a read-only repository composed of a local repository | |
294 | with the contents of a bundle file applied. The repository instance is |
|
301 | with the contents of a bundle file applied. The repository instance is | |
295 | conceptually similar to the state of a repository after an |
|
302 | conceptually similar to the state of a repository after an | |
296 | ``hg unbundle`` operation. However, the contents of the bundle are never |
|
303 | ``hg unbundle`` operation. However, the contents of the bundle are never | |
297 | applied to the actual base repository. |
|
304 | applied to the actual base repository. | |
298 |
|
305 | |||
299 | Instances constructed directly are not usable as repository objects. |
|
306 | Instances constructed directly are not usable as repository objects. | |
300 | Use instance() or makebundlerepository() to create instances. |
|
307 | Use instance() or makebundlerepository() to create instances. | |
301 | """ |
|
308 | """ | |
302 |
|
309 | |||
303 | def __init__(self, bundlepath, url, tempparent): |
|
310 | def __init__(self, bundlepath, url, tempparent): | |
304 | self._tempparent = tempparent |
|
311 | self._tempparent = tempparent | |
305 | self._url = url |
|
312 | self._url = url | |
306 |
|
313 | |||
307 | self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo') |
|
314 | self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo') | |
308 |
|
315 | |||
309 | # dict with the mapping 'filename' -> position in the changegroup. |
|
316 | # dict with the mapping 'filename' -> position in the changegroup. | |
310 | self._cgfilespos = {} |
|
317 | self._cgfilespos = {} | |
311 | self._bundlefile = None |
|
318 | self._bundlefile = None | |
312 | self._cgunpacker = None |
|
319 | self._cgunpacker = None | |
313 | self.tempfile = None |
|
320 | self.tempfile = None | |
314 | f = util.posixfile(bundlepath, b"rb") |
|
321 | f = util.posixfile(bundlepath, b"rb") | |
315 | bundle = exchange.readbundle(self.ui, f, bundlepath) |
|
322 | bundle = exchange.readbundle(self.ui, f, bundlepath) | |
316 |
|
323 | |||
317 | if isinstance(bundle, bundle2.unbundle20): |
|
324 | if isinstance(bundle, bundle2.unbundle20): | |
318 | self._bundlefile = bundle |
|
325 | self._bundlefile = bundle | |
319 |
|
326 | |||
320 | cgpart = None |
|
327 | cgpart = None | |
321 | for part in bundle.iterparts(seekable=True): |
|
328 | for part in bundle.iterparts(seekable=True): | |
322 | if part.type == b'phase-heads': |
|
329 | if part.type == b'phase-heads': | |
323 | self._handle_bundle2_phase_part(bundle, part) |
|
330 | self._handle_bundle2_phase_part(bundle, part) | |
324 | elif part.type == b'changegroup': |
|
331 | elif part.type == b'changegroup': | |
325 | if cgpart: |
|
332 | if cgpart: | |
326 | raise NotImplementedError( |
|
333 | raise NotImplementedError( | |
327 | b"can't process multiple changegroups" |
|
334 | b"can't process multiple changegroups" | |
328 | ) |
|
335 | ) | |
329 | cgpart = part |
|
336 | cgpart = part | |
330 | self._handle_bundle2_cg_part(bundle, part) |
|
337 | self._handle_bundle2_cg_part(bundle, part) | |
331 |
|
338 | |||
332 | if not cgpart: |
|
339 | if not cgpart: | |
333 | raise error.Abort(_(b"No changegroups found")) |
|
340 | raise error.Abort(_(b"No changegroups found")) | |
334 |
|
341 | |||
335 | # This is required to placate a later consumer, which expects |
|
342 | # This is required to placate a later consumer, which expects | |
336 | # the payload offset to be at the beginning of the changegroup. |
|
343 | # the payload offset to be at the beginning of the changegroup. | |
337 | # We need to do this after the iterparts() generator advances |
|
344 | # We need to do this after the iterparts() generator advances | |
338 | # because iterparts() will seek to end of payload after the |
|
345 | # because iterparts() will seek to end of payload after the | |
339 | # generator returns control to iterparts(). |
|
346 | # generator returns control to iterparts(). | |
340 | cgpart.seek(0, os.SEEK_SET) |
|
347 | cgpart.seek(0, os.SEEK_SET) | |
341 |
|
348 | |||
342 | elif isinstance(bundle, changegroup.cg1unpacker): |
|
349 | elif isinstance(bundle, changegroup.cg1unpacker): | |
343 | self._handle_bundle1(bundle, bundlepath) |
|
350 | self._handle_bundle1(bundle, bundlepath) | |
344 | else: |
|
351 | else: | |
345 | raise error.Abort( |
|
352 | raise error.Abort( | |
346 | _(b'bundle type %r cannot be read') % type(bundle) |
|
353 | _(b'bundle type %r cannot be read') % type(bundle) | |
347 | ) |
|
354 | ) | |
348 |
|
355 | |||
349 | def _handle_bundle1(self, bundle, bundlepath): |
|
356 | def _handle_bundle1(self, bundle, bundlepath): | |
350 | if bundle.compressed(): |
|
357 | if bundle.compressed(): | |
351 | f = self._writetempbundle(bundle.read, b'.hg10un', header=b'HG10UN') |
|
358 | f = self._writetempbundle(bundle.read, b'.hg10un', header=b'HG10UN') | |
352 | bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs) |
|
359 | bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs) | |
353 |
|
360 | |||
354 | self._bundlefile = bundle |
|
361 | self._bundlefile = bundle | |
355 | self._cgunpacker = bundle |
|
362 | self._cgunpacker = bundle | |
356 |
|
363 | |||
357 | self.firstnewrev = self.changelog.repotiprev + 1 |
|
364 | self.firstnewrev = self.changelog.repotiprev + 1 | |
358 | phases.retractboundary( |
|
365 | phases.retractboundary( | |
359 | self, |
|
366 | self, | |
360 | None, |
|
367 | None, | |
361 | phases.draft, |
|
368 | phases.draft, | |
362 | [ctx.node() for ctx in self[self.firstnewrev :]], |
|
369 | [ctx.node() for ctx in self[self.firstnewrev :]], | |
363 | ) |
|
370 | ) | |
364 |
|
371 | |||
365 | def _handle_bundle2_cg_part(self, bundle, part): |
|
372 | def _handle_bundle2_cg_part(self, bundle, part): | |
366 | assert part.type == b'changegroup' |
|
373 | assert part.type == b'changegroup' | |
367 | cgstream = part |
|
374 | cgstream = part | |
368 | targetphase = part.params.get(b'targetphase') |
|
375 | targetphase = part.params.get(b'targetphase') | |
369 | try: |
|
376 | try: | |
370 | targetphase = int(targetphase) |
|
377 | targetphase = int(targetphase) | |
371 | except TypeError: |
|
378 | except TypeError: | |
372 | pass |
|
379 | pass | |
373 | if targetphase is None: |
|
380 | if targetphase is None: | |
374 | targetphase = phases.draft |
|
381 | targetphase = phases.draft | |
375 | if targetphase not in phases.allphases: |
|
382 | if targetphase not in phases.allphases: | |
376 | m = _(b'unsupported targetphase: %d') |
|
383 | m = _(b'unsupported targetphase: %d') | |
377 | m %= targetphase |
|
384 | m %= targetphase | |
378 | raise error.Abort(m) |
|
385 | raise error.Abort(m) | |
379 | version = part.params.get(b'version', b'01') |
|
386 | version = part.params.get(b'version', b'01') | |
380 | legalcgvers = changegroup.supportedincomingversions(self) |
|
387 | legalcgvers = changegroup.supportedincomingversions(self) | |
381 | if version not in legalcgvers: |
|
388 | if version not in legalcgvers: | |
382 | msg = _(b'Unsupported changegroup version: %s') |
|
389 | msg = _(b'Unsupported changegroup version: %s') | |
383 | raise error.Abort(msg % version) |
|
390 | raise error.Abort(msg % version) | |
384 | if bundle.compressed(): |
|
391 | if bundle.compressed(): | |
385 | cgstream = self._writetempbundle(part.read, b'.cg%sun' % version) |
|
392 | cgstream = self._writetempbundle(part.read, b'.cg%sun' % version) | |
386 |
|
393 | |||
387 | self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN') |
|
394 | self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN') | |
388 |
|
395 | |||
389 | self.firstnewrev = self.changelog.repotiprev + 1 |
|
396 | self.firstnewrev = self.changelog.repotiprev + 1 | |
390 | phases.retractboundary( |
|
397 | phases.retractboundary( | |
391 | self, |
|
398 | self, | |
392 | None, |
|
399 | None, | |
393 | targetphase, |
|
400 | targetphase, | |
394 | [ctx.node() for ctx in self[self.firstnewrev :]], |
|
401 | [ctx.node() for ctx in self[self.firstnewrev :]], | |
395 | ) |
|
402 | ) | |
396 |
|
403 | |||
397 | def _handle_bundle2_phase_part(self, bundle, part): |
|
404 | def _handle_bundle2_phase_part(self, bundle, part): | |
398 | assert part.type == b'phase-heads' |
|
405 | assert part.type == b'phase-heads' | |
399 |
|
406 | |||
400 | unfi = self.unfiltered() |
|
407 | unfi = self.unfiltered() | |
401 | headsbyphase = phases.binarydecode(part) |
|
408 | headsbyphase = phases.binarydecode(part) | |
402 | phases.updatephases(unfi, lambda: None, headsbyphase) |
|
409 | phases.updatephases(unfi, lambda: None, headsbyphase) | |
403 |
|
410 | |||
404 | def _writetempbundle(self, readfn, suffix, header=b''): |
|
411 | def _writetempbundle(self, readfn, suffix, header=b''): | |
405 | """Write a temporary file to disk""" |
|
412 | """Write a temporary file to disk""" | |
406 | fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix) |
|
413 | fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix) | |
407 | self.tempfile = temp |
|
414 | self.tempfile = temp | |
408 |
|
415 | |||
409 | with os.fdopen(fdtemp, 'wb') as fptemp: |
|
416 | with os.fdopen(fdtemp, 'wb') as fptemp: | |
410 | fptemp.write(header) |
|
417 | fptemp.write(header) | |
411 | while True: |
|
418 | while True: | |
412 | chunk = readfn(2**18) |
|
419 | chunk = readfn(2**18) | |
413 | if not chunk: |
|
420 | if not chunk: | |
414 | break |
|
421 | break | |
415 | fptemp.write(chunk) |
|
422 | fptemp.write(chunk) | |
416 |
|
423 | |||
417 | return self.vfs.open(self.tempfile, mode=b"rb") |
|
424 | return self.vfs.open(self.tempfile, mode=b"rb") | |
418 |
|
425 | |||
419 | @localrepo.unfilteredpropertycache |
|
426 | @localrepo.unfilteredpropertycache | |
420 | def _phasecache(self): |
|
427 | def _phasecache(self): | |
421 | return bundlephasecache(self, self._phasedefaults) |
|
428 | return bundlephasecache(self, self._phasedefaults) | |
422 |
|
429 | |||
423 | @localrepo.unfilteredpropertycache |
|
430 | @localrepo.unfilteredpropertycache | |
424 | def changelog(self): |
|
431 | def changelog(self): | |
425 | # consume the header if it exists |
|
432 | # consume the header if it exists | |
426 | self._cgunpacker.changelogheader() |
|
433 | self._cgunpacker.changelogheader() | |
427 | c = bundlechangelog(self.svfs, self._cgunpacker) |
|
434 | c = bundlechangelog(self.svfs, self._cgunpacker) | |
428 | self.manstart = self._cgunpacker.tell() |
|
435 | self.manstart = self._cgunpacker.tell() | |
429 | return c |
|
436 | return c | |
430 |
|
437 | |||
431 | def _refreshchangelog(self): |
|
438 | def _refreshchangelog(self): | |
432 | # changelog for bundle repo are not filecache, this method is not |
|
439 | # changelog for bundle repo are not filecache, this method is not | |
433 | # applicable. |
|
440 | # applicable. | |
434 | pass |
|
441 | pass | |
435 |
|
442 | |||
436 | @localrepo.unfilteredpropertycache |
|
443 | @localrepo.unfilteredpropertycache | |
437 | def manifestlog(self): |
|
444 | def manifestlog(self): | |
438 | self._cgunpacker.seek(self.manstart) |
|
445 | self._cgunpacker.seek(self.manstart) | |
439 | # consume the header if it exists |
|
446 | # consume the header if it exists | |
440 | self._cgunpacker.manifestheader() |
|
447 | self._cgunpacker.manifestheader() | |
441 | linkmapper = self.unfiltered().changelog.rev |
|
448 | linkmapper = self.unfiltered().changelog.rev | |
442 | rootstore = bundlemanifest( |
|
449 | rootstore = bundlemanifest( | |
443 | self.nodeconstants, self.svfs, self._cgunpacker, linkmapper |
|
450 | self.nodeconstants, self.svfs, self._cgunpacker, linkmapper | |
444 | ) |
|
451 | ) | |
445 | self.filestart = self._cgunpacker.tell() |
|
452 | self.filestart = self._cgunpacker.tell() | |
446 |
|
453 | |||
447 | return manifest.manifestlog( |
|
454 | return manifest.manifestlog( | |
448 | self.svfs, self, rootstore, self.narrowmatch() |
|
455 | self.svfs, self, rootstore, self.narrowmatch() | |
449 | ) |
|
456 | ) | |
450 |
|
457 | |||
451 | def _consumemanifest(self): |
|
458 | def _consumemanifest(self): | |
452 | """Consumes the manifest portion of the bundle, setting filestart so the |
|
459 | """Consumes the manifest portion of the bundle, setting filestart so the | |
453 | file portion can be read.""" |
|
460 | file portion can be read.""" | |
454 | self._cgunpacker.seek(self.manstart) |
|
461 | self._cgunpacker.seek(self.manstart) | |
455 | self._cgunpacker.manifestheader() |
|
462 | self._cgunpacker.manifestheader() | |
456 | for delta in self._cgunpacker.deltaiter(): |
|
463 | for delta in self._cgunpacker.deltaiter(): | |
457 | pass |
|
464 | pass | |
458 | self.filestart = self._cgunpacker.tell() |
|
465 | self.filestart = self._cgunpacker.tell() | |
459 |
|
466 | |||
460 | @localrepo.unfilteredpropertycache |
|
467 | @localrepo.unfilteredpropertycache | |
461 | def manstart(self): |
|
468 | def manstart(self): | |
462 | self.changelog |
|
469 | self.changelog | |
463 | return self.manstart |
|
470 | return self.manstart | |
464 |
|
471 | |||
465 | @localrepo.unfilteredpropertycache |
|
472 | @localrepo.unfilteredpropertycache | |
466 | def filestart(self): |
|
473 | def filestart(self): | |
467 | self.manifestlog |
|
474 | self.manifestlog | |
468 |
|
475 | |||
469 | # If filestart was not set by self.manifestlog, that means the |
|
476 | # If filestart was not set by self.manifestlog, that means the | |
470 | # manifestlog implementation did not consume the manifests from the |
|
477 | # manifestlog implementation did not consume the manifests from the | |
471 | # changegroup (ex: it might be consuming trees from a separate bundle2 |
|
478 | # changegroup (ex: it might be consuming trees from a separate bundle2 | |
472 | # part instead). So we need to manually consume it. |
|
479 | # part instead). So we need to manually consume it. | |
473 | if 'filestart' not in self.__dict__: |
|
480 | if 'filestart' not in self.__dict__: | |
474 | self._consumemanifest() |
|
481 | self._consumemanifest() | |
475 |
|
482 | |||
476 | return self.filestart |
|
483 | return self.filestart | |
477 |
|
484 | |||
478 | def url(self): |
|
485 | def url(self): | |
479 | return self._url |
|
486 | return self._url | |
480 |
|
487 | |||
481 | def file(self, f): |
|
488 | def file(self, f): | |
482 | if not self._cgfilespos: |
|
489 | if not self._cgfilespos: | |
483 | self._cgunpacker.seek(self.filestart) |
|
490 | self._cgunpacker.seek(self.filestart) | |
484 | self._cgfilespos = _getfilestarts(self._cgunpacker) |
|
491 | self._cgfilespos = _getfilestarts(self._cgunpacker) | |
485 |
|
492 | |||
486 | if f in self._cgfilespos: |
|
493 | if f in self._cgfilespos: | |
487 | self._cgunpacker.seek(self._cgfilespos[f]) |
|
494 | self._cgunpacker.seek(self._cgfilespos[f]) | |
488 | linkmapper = self.unfiltered().changelog.rev |
|
495 | linkmapper = self.unfiltered().changelog.rev | |
489 | return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper) |
|
496 | return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper) | |
490 | else: |
|
497 | else: | |
491 | return super(bundlerepository, self).file(f) |
|
498 | return super(bundlerepository, self).file(f) | |
492 |
|
499 | |||
493 | def close(self): |
|
500 | def close(self): | |
494 | """Close assigned bundle file immediately.""" |
|
501 | """Close assigned bundle file immediately.""" | |
495 | self._bundlefile.close() |
|
502 | self._bundlefile.close() | |
496 | if self.tempfile is not None: |
|
503 | if self.tempfile is not None: | |
497 | self.vfs.unlink(self.tempfile) |
|
504 | self.vfs.unlink(self.tempfile) | |
498 | if self._tempparent: |
|
505 | if self._tempparent: | |
499 | shutil.rmtree(self._tempparent, True) |
|
506 | shutil.rmtree(self._tempparent, True) | |
500 |
|
507 | |||
501 | def cancopy(self): |
|
508 | def cancopy(self): | |
502 | return False |
|
509 | return False | |
503 |
|
510 | |||
504 | def peer(self, path=None, remotehidden=False): |
|
511 | def peer(self, path=None, remotehidden=False): | |
505 | return bundlepeer(self, path=path, remotehidden=remotehidden) |
|
512 | return bundlepeer(self, path=path, remotehidden=remotehidden) | |
506 |
|
513 | |||
507 | def getcwd(self): |
|
514 | def getcwd(self): | |
508 | return encoding.getcwd() # always outside the repo |
|
515 | return encoding.getcwd() # always outside the repo | |
509 |
|
516 | |||
510 | # Check if parents exist in localrepo before setting |
|
517 | # Check if parents exist in localrepo before setting | |
511 | def setparents(self, p1, p2=None): |
|
518 | def setparents(self, p1, p2=None): | |
512 | if p2 is None: |
|
519 | if p2 is None: | |
513 | p2 = self.nullid |
|
520 | p2 = self.nullid | |
514 | p1rev = self.changelog.rev(p1) |
|
521 | p1rev = self.changelog.rev(p1) | |
515 | p2rev = self.changelog.rev(p2) |
|
522 | p2rev = self.changelog.rev(p2) | |
516 | msg = _(b"setting parent to node %s that only exists in the bundle\n") |
|
523 | msg = _(b"setting parent to node %s that only exists in the bundle\n") | |
517 | if self.changelog.repotiprev < p1rev: |
|
524 | if self.changelog.repotiprev < p1rev: | |
518 | self.ui.warn(msg % hex(p1)) |
|
525 | self.ui.warn(msg % hex(p1)) | |
519 | if self.changelog.repotiprev < p2rev: |
|
526 | if self.changelog.repotiprev < p2rev: | |
520 | self.ui.warn(msg % hex(p2)) |
|
527 | self.ui.warn(msg % hex(p2)) | |
521 | return super(bundlerepository, self).setparents(p1, p2) |
|
528 | return super(bundlerepository, self).setparents(p1, p2) | |
522 |
|
529 | |||
523 |
|
530 | |||
524 | def instance(ui, path, create, intents=None, createopts=None): |
|
531 | def instance(ui, path, create, intents=None, createopts=None): | |
525 | if create: |
|
532 | if create: | |
526 | raise error.Abort(_(b'cannot create new bundle repository')) |
|
533 | raise error.Abort(_(b'cannot create new bundle repository')) | |
527 | # internal config: bundle.mainreporoot |
|
534 | # internal config: bundle.mainreporoot | |
528 | parentpath = ui.config(b"bundle", b"mainreporoot") |
|
535 | parentpath = ui.config(b"bundle", b"mainreporoot") | |
529 | if not parentpath: |
|
536 | if not parentpath: | |
530 | # try to find the correct path to the working directory repo |
|
537 | # try to find the correct path to the working directory repo | |
531 | parentpath = cmdutil.findrepo(encoding.getcwd()) |
|
538 | parentpath = cmdutil.findrepo(encoding.getcwd()) | |
532 | if parentpath is None: |
|
539 | if parentpath is None: | |
533 | parentpath = b'' |
|
540 | parentpath = b'' | |
534 | if parentpath: |
|
541 | if parentpath: | |
535 | # Try to make the full path relative so we get a nice, short URL. |
|
542 | # Try to make the full path relative so we get a nice, short URL. | |
536 | # In particular, we don't want temp dir names in test outputs. |
|
543 | # In particular, we don't want temp dir names in test outputs. | |
537 | cwd = encoding.getcwd() |
|
544 | cwd = encoding.getcwd() | |
538 | if parentpath == cwd: |
|
545 | if parentpath == cwd: | |
539 | parentpath = b'' |
|
546 | parentpath = b'' | |
540 | else: |
|
547 | else: | |
541 | cwd = pathutil.normasprefix(cwd) |
|
548 | cwd = pathutil.normasprefix(cwd) | |
542 | if parentpath.startswith(cwd): |
|
549 | if parentpath.startswith(cwd): | |
543 | parentpath = parentpath[len(cwd) :] |
|
550 | parentpath = parentpath[len(cwd) :] | |
544 | u = urlutil.url(path) |
|
551 | u = urlutil.url(path) | |
545 | path = u.localpath() |
|
552 | path = u.localpath() | |
546 | if u.scheme == b'bundle': |
|
553 | if u.scheme == b'bundle': | |
547 | s = path.split(b"+", 1) |
|
554 | s = path.split(b"+", 1) | |
548 | if len(s) == 1: |
|
555 | if len(s) == 1: | |
549 | repopath, bundlename = parentpath, s[0] |
|
556 | repopath, bundlename = parentpath, s[0] | |
550 | else: |
|
557 | else: | |
551 | repopath, bundlename = s |
|
558 | repopath, bundlename = s | |
552 | else: |
|
559 | else: | |
553 | repopath, bundlename = parentpath, path |
|
560 | repopath, bundlename = parentpath, path | |
554 |
|
561 | |||
555 | return makebundlerepository(ui, repopath, bundlename) |
|
562 | return makebundlerepository(ui, repopath, bundlename) | |
556 |
|
563 | |||
557 |
|
564 | |||
558 | def makebundlerepository(ui, repopath, bundlepath): |
|
565 | def makebundlerepository(ui, repopath, bundlepath): | |
559 | """Make a bundle repository object based on repo and bundle paths.""" |
|
566 | """Make a bundle repository object based on repo and bundle paths.""" | |
560 | if repopath: |
|
567 | if repopath: | |
561 | url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath) |
|
568 | url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath) | |
562 | else: |
|
569 | else: | |
563 | url = b'bundle:%s' % bundlepath |
|
570 | url = b'bundle:%s' % bundlepath | |
564 |
|
571 | |||
565 | # Because we can't make any guarantees about the type of the base |
|
572 | # Because we can't make any guarantees about the type of the base | |
566 | # repository, we can't have a static class representing the bundle |
|
573 | # repository, we can't have a static class representing the bundle | |
567 | # repository. We also can't make any guarantees about how to even |
|
574 | # repository. We also can't make any guarantees about how to even | |
568 | # call the base repository's constructor! |
|
575 | # call the base repository's constructor! | |
569 | # |
|
576 | # | |
570 | # So, our strategy is to go through ``localrepo.instance()`` to construct |
|
577 | # So, our strategy is to go through ``localrepo.instance()`` to construct | |
571 | # a repo instance. Then, we dynamically create a new type derived from |
|
578 | # a repo instance. Then, we dynamically create a new type derived from | |
572 | # both it and our ``bundlerepository`` class which overrides some |
|
579 | # both it and our ``bundlerepository`` class which overrides some | |
573 | # functionality. We then change the type of the constructed repository |
|
580 | # functionality. We then change the type of the constructed repository | |
574 | # to this new type and initialize the bundle-specific bits of it. |
|
581 | # to this new type and initialize the bundle-specific bits of it. | |
575 |
|
582 | |||
576 | try: |
|
583 | try: | |
577 | repo = localrepo.instance(ui, repopath, create=False) |
|
584 | repo = localrepo.instance(ui, repopath, create=False) | |
578 | tempparent = None |
|
585 | tempparent = None | |
579 | except error.RequirementError: |
|
586 | except error.RequirementError: | |
580 | raise # no fallback if the backing repo is unsupported |
|
587 | raise # no fallback if the backing repo is unsupported | |
581 | except error.RepoError: |
|
588 | except error.RepoError: | |
582 | tempparent = pycompat.mkdtemp() |
|
589 | tempparent = pycompat.mkdtemp() | |
583 | try: |
|
590 | try: | |
584 | repo = localrepo.instance(ui, tempparent, create=True) |
|
591 | repo = localrepo.instance(ui, tempparent, create=True) | |
585 | except Exception: |
|
592 | except Exception: | |
586 | shutil.rmtree(tempparent) |
|
593 | shutil.rmtree(tempparent) | |
587 | raise |
|
594 | raise | |
588 |
|
595 | |||
589 | class derivedbundlerepository(bundlerepository, repo.__class__): |
|
596 | class derivedbundlerepository(bundlerepository, repo.__class__): | |
590 | pass |
|
597 | pass | |
591 |
|
598 | |||
592 | repo.__class__ = derivedbundlerepository |
|
599 | repo.__class__ = derivedbundlerepository | |
593 | bundlerepository.__init__(repo, bundlepath, url, tempparent) |
|
600 | bundlerepository.__init__(repo, bundlepath, url, tempparent) | |
594 |
|
601 | |||
595 | return repo |
|
602 | return repo | |
596 |
|
603 | |||
597 |
|
604 | |||
598 | class bundletransactionmanager: |
|
605 | class bundletransactionmanager: | |
599 | def transaction(self): |
|
606 | def transaction(self): | |
600 | return None |
|
607 | return None | |
601 |
|
608 | |||
602 | def close(self): |
|
609 | def close(self): | |
603 | raise NotImplementedError |
|
610 | raise NotImplementedError | |
604 |
|
611 | |||
605 | def release(self): |
|
612 | def release(self): | |
606 | raise NotImplementedError |
|
613 | raise NotImplementedError | |
607 |
|
614 | |||
608 |
|
615 | |||
609 | def getremotechanges( |
|
616 | def getremotechanges( | |
610 | ui, repo, peer, onlyheads=None, bundlename=None, force=False |
|
617 | ui, repo, peer, onlyheads=None, bundlename=None, force=False | |
611 | ): |
|
618 | ): | |
612 | """obtains a bundle of changes incoming from peer |
|
619 | """obtains a bundle of changes incoming from peer | |
613 |
|
620 | |||
614 | "onlyheads" restricts the returned changes to those reachable from the |
|
621 | "onlyheads" restricts the returned changes to those reachable from the | |
615 | specified heads. |
|
622 | specified heads. | |
616 | "bundlename", if given, stores the bundle to this file path permanently; |
|
623 | "bundlename", if given, stores the bundle to this file path permanently; | |
617 | otherwise it's stored to a temp file and gets deleted again when you call |
|
624 | otherwise it's stored to a temp file and gets deleted again when you call | |
618 | the returned "cleanupfn". |
|
625 | the returned "cleanupfn". | |
619 | "force" indicates whether to proceed on unrelated repos. |
|
626 | "force" indicates whether to proceed on unrelated repos. | |
620 |
|
627 | |||
621 | Returns a tuple (local, csets, cleanupfn): |
|
628 | Returns a tuple (local, csets, cleanupfn): | |
622 |
|
629 | |||
623 | "local" is a local repo from which to obtain the actual incoming |
|
630 | "local" is a local repo from which to obtain the actual incoming | |
624 | changesets; it is a bundlerepo for the obtained bundle when the |
|
631 | changesets; it is a bundlerepo for the obtained bundle when the | |
625 | original "peer" is remote. |
|
632 | original "peer" is remote. | |
626 | "csets" lists the incoming changeset node ids. |
|
633 | "csets" lists the incoming changeset node ids. | |
627 | "cleanupfn" must be called without arguments when you're done processing |
|
634 | "cleanupfn" must be called without arguments when you're done processing | |
628 | the changes; it closes both the original "peer" and the one returned |
|
635 | the changes; it closes both the original "peer" and the one returned | |
629 | here. |
|
636 | here. | |
630 | """ |
|
637 | """ | |
631 | tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force) |
|
638 | tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force) | |
632 | common, incoming, rheads = tmp |
|
639 | common, incoming, rheads = tmp | |
633 | if not incoming: |
|
640 | if not incoming: | |
634 | try: |
|
641 | try: | |
635 | if bundlename: |
|
642 | if bundlename: | |
636 | os.unlink(bundlename) |
|
643 | os.unlink(bundlename) | |
637 | except OSError: |
|
644 | except OSError: | |
638 | pass |
|
645 | pass | |
639 | return repo, [], peer.close |
|
646 | return repo, [], peer.close | |
640 |
|
647 | |||
641 | commonset = set(common) |
|
648 | commonset = set(common) | |
642 | rheads = [x for x in rheads if x not in commonset] |
|
649 | rheads = [x for x in rheads if x not in commonset] | |
643 |
|
650 | |||
644 | bundle = None |
|
651 | bundle = None | |
645 | bundlerepo = None |
|
652 | bundlerepo = None | |
646 | localrepo = peer.local() |
|
653 | localrepo = peer.local() | |
647 | if bundlename or not localrepo: |
|
654 | if bundlename or not localrepo: | |
648 | # create a bundle (uncompressed if peer repo is not local) |
|
655 | # create a bundle (uncompressed if peer repo is not local) | |
649 |
|
656 | |||
650 | # developer config: devel.legacy.exchange |
|
657 | # developer config: devel.legacy.exchange | |
651 | legexc = ui.configlist(b'devel', b'legacy.exchange') |
|
658 | legexc = ui.configlist(b'devel', b'legacy.exchange') | |
652 | forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc |
|
659 | forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc | |
653 | canbundle2 = ( |
|
660 | canbundle2 = ( | |
654 | not forcebundle1 |
|
661 | not forcebundle1 | |
655 | and peer.capable(b'getbundle') |
|
662 | and peer.capable(b'getbundle') | |
656 | and peer.capable(b'bundle2') |
|
663 | and peer.capable(b'bundle2') | |
657 | ) |
|
664 | ) | |
658 | if canbundle2: |
|
665 | if canbundle2: | |
659 | with peer.commandexecutor() as e: |
|
666 | with peer.commandexecutor() as e: | |
660 | b2 = e.callcommand( |
|
667 | b2 = e.callcommand( | |
661 | b'getbundle', |
|
668 | b'getbundle', | |
662 | { |
|
669 | { | |
663 | b'source': b'incoming', |
|
670 | b'source': b'incoming', | |
664 | b'common': common, |
|
671 | b'common': common, | |
665 | b'heads': rheads, |
|
672 | b'heads': rheads, | |
666 | b'bundlecaps': exchange.caps20to10( |
|
673 | b'bundlecaps': exchange.caps20to10( | |
667 | repo, role=b'client' |
|
674 | repo, role=b'client' | |
668 | ), |
|
675 | ), | |
669 | b'cg': True, |
|
676 | b'cg': True, | |
670 | }, |
|
677 | }, | |
671 | ).result() |
|
678 | ).result() | |
672 |
|
679 | |||
673 | fname = bundle = changegroup.writechunks( |
|
680 | fname = bundle = changegroup.writechunks( | |
674 | ui, b2._forwardchunks(), bundlename |
|
681 | ui, b2._forwardchunks(), bundlename | |
675 | ) |
|
682 | ) | |
676 | else: |
|
683 | else: | |
677 | if peer.capable(b'getbundle'): |
|
684 | if peer.capable(b'getbundle'): | |
678 | with peer.commandexecutor() as e: |
|
685 | with peer.commandexecutor() as e: | |
679 | cg = e.callcommand( |
|
686 | cg = e.callcommand( | |
680 | b'getbundle', |
|
687 | b'getbundle', | |
681 | { |
|
688 | { | |
682 | b'source': b'incoming', |
|
689 | b'source': b'incoming', | |
683 | b'common': common, |
|
690 | b'common': common, | |
684 | b'heads': rheads, |
|
691 | b'heads': rheads, | |
685 | }, |
|
692 | }, | |
686 | ).result() |
|
693 | ).result() | |
687 | elif onlyheads is None and not peer.capable(b'changegroupsubset'): |
|
694 | elif onlyheads is None and not peer.capable(b'changegroupsubset'): | |
688 | # compat with older servers when pulling all remote heads |
|
695 | # compat with older servers when pulling all remote heads | |
689 |
|
696 | |||
690 | with peer.commandexecutor() as e: |
|
697 | with peer.commandexecutor() as e: | |
691 | cg = e.callcommand( |
|
698 | cg = e.callcommand( | |
692 | b'changegroup', |
|
699 | b'changegroup', | |
693 | { |
|
700 | { | |
694 | b'nodes': incoming, |
|
701 | b'nodes': incoming, | |
695 | b'source': b'incoming', |
|
702 | b'source': b'incoming', | |
696 | }, |
|
703 | }, | |
697 | ).result() |
|
704 | ).result() | |
698 |
|
705 | |||
699 | rheads = None |
|
706 | rheads = None | |
700 | else: |
|
707 | else: | |
701 | with peer.commandexecutor() as e: |
|
708 | with peer.commandexecutor() as e: | |
702 | cg = e.callcommand( |
|
709 | cg = e.callcommand( | |
703 | b'changegroupsubset', |
|
710 | b'changegroupsubset', | |
704 | { |
|
711 | { | |
705 | b'bases': incoming, |
|
712 | b'bases': incoming, | |
706 | b'heads': rheads, |
|
713 | b'heads': rheads, | |
707 | b'source': b'incoming', |
|
714 | b'source': b'incoming', | |
708 | }, |
|
715 | }, | |
709 | ).result() |
|
716 | ).result() | |
710 |
|
717 | |||
711 | if localrepo: |
|
718 | if localrepo: | |
712 | bundletype = b"HG10BZ" |
|
719 | bundletype = b"HG10BZ" | |
713 | else: |
|
720 | else: | |
714 | bundletype = b"HG10UN" |
|
721 | bundletype = b"HG10UN" | |
715 | fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype) |
|
722 | fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype) | |
716 | # keep written bundle? |
|
723 | # keep written bundle? | |
717 | if bundlename: |
|
724 | if bundlename: | |
718 | bundle = None |
|
725 | bundle = None | |
719 | if not localrepo: |
|
726 | if not localrepo: | |
720 | # use the created uncompressed bundlerepo |
|
727 | # use the created uncompressed bundlerepo | |
721 | localrepo = bundlerepo = makebundlerepository( |
|
728 | localrepo = bundlerepo = makebundlerepository( | |
722 | repo.baseui, repo.root, fname |
|
729 | repo.baseui, repo.root, fname | |
723 | ) |
|
730 | ) | |
724 |
|
731 | |||
725 | # this repo contains local and peer now, so filter out local again |
|
732 | # this repo contains local and peer now, so filter out local again | |
726 | common = repo.heads() |
|
733 | common = repo.heads() | |
727 | if localrepo: |
|
734 | if localrepo: | |
728 | # Part of common may be remotely filtered |
|
735 | # Part of common may be remotely filtered | |
729 | # So use an unfiltered version |
|
736 | # So use an unfiltered version | |
730 | # The discovery process probably need cleanup to avoid that |
|
737 | # The discovery process probably need cleanup to avoid that | |
731 | localrepo = localrepo.unfiltered() |
|
738 | localrepo = localrepo.unfiltered() | |
732 |
|
739 | |||
733 | csets = localrepo.changelog.findmissing(common, rheads) |
|
740 | csets = localrepo.changelog.findmissing(common, rheads) | |
734 |
|
741 | |||
735 | if bundlerepo: |
|
742 | if bundlerepo: | |
736 | reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]] |
|
743 | reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]] | |
737 |
|
744 | |||
738 | with peer.commandexecutor() as e: |
|
745 | with peer.commandexecutor() as e: | |
739 | remotephases = e.callcommand( |
|
746 | remotephases = e.callcommand( | |
740 | b'listkeys', |
|
747 | b'listkeys', | |
741 | { |
|
748 | { | |
742 | b'namespace': b'phases', |
|
749 | b'namespace': b'phases', | |
743 | }, |
|
750 | }, | |
744 | ).result() |
|
751 | ).result() | |
745 |
|
752 | |||
746 | pullop = exchange.pulloperation( |
|
753 | pullop = exchange.pulloperation( | |
747 | bundlerepo, peer, path=None, heads=reponodes |
|
754 | bundlerepo, peer, path=None, heads=reponodes | |
748 | ) |
|
755 | ) | |
749 | pullop.trmanager = bundletransactionmanager() |
|
756 | pullop.trmanager = bundletransactionmanager() | |
750 | exchange._pullapplyphases(pullop, remotephases) |
|
757 | exchange._pullapplyphases(pullop, remotephases) | |
751 |
|
758 | |||
752 | def cleanup(): |
|
759 | def cleanup(): | |
753 | if bundlerepo: |
|
760 | if bundlerepo: | |
754 | bundlerepo.close() |
|
761 | bundlerepo.close() | |
755 | if bundle: |
|
762 | if bundle: | |
756 | os.unlink(bundle) |
|
763 | os.unlink(bundle) | |
757 | peer.close() |
|
764 | peer.close() | |
758 |
|
765 | |||
759 | return (localrepo, csets, cleanup) |
|
766 | return (localrepo, csets, cleanup) |
General Comments 0
You need to be logged in to leave comments.
Login now