##// END OF EJS Templates
narrow: move changegroup.supportedoutgoingversions() override to core...
Martin von Zweigbergk -
r36483:94709406 default
parent child Browse files
Show More
@@ -1,376 +1,366 b''
1 # narrowchangegroup.py - narrow clone changegroup creation and consumption
1 # narrowchangegroup.py - narrow clone changegroup creation and consumption
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import (
11 from mercurial import (
12 changegroup,
12 changegroup,
13 error,
13 error,
14 extensions,
14 extensions,
15 manifest,
15 manifest,
16 mdiff,
16 mdiff,
17 node,
17 node,
18 revlog,
18 revlog,
19 util,
19 util,
20 )
20 )
21
21
22 def setup():
22 def setup():
23
23
24 def supportedoutgoingversions(orig, repo):
25 versions = orig(repo)
26 if changegroup.NARROW_REQUIREMENT in repo.requirements:
27 versions.discard('01')
28 versions.discard('02')
29 return versions
30
31 extensions.wrapfunction(changegroup, 'supportedoutgoingversions',
32 supportedoutgoingversions)
33
34 def prune(orig, self, revlog, missing, commonrevs):
24 def prune(orig, self, revlog, missing, commonrevs):
35 if isinstance(revlog, manifest.manifestrevlog):
25 if isinstance(revlog, manifest.manifestrevlog):
36 matcher = getattr(self._repo, 'narrowmatch',
26 matcher = getattr(self._repo, 'narrowmatch',
37 getattr(self, '_narrow_matcher', None))
27 getattr(self, '_narrow_matcher', None))
38 if (matcher is not None and
28 if (matcher is not None and
39 not matcher().visitdir(revlog._dir[:-1] or '.')):
29 not matcher().visitdir(revlog._dir[:-1] or '.')):
40 return []
30 return []
41 return orig(self, revlog, missing, commonrevs)
31 return orig(self, revlog, missing, commonrevs)
42
32
43 extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
33 extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
44
34
45 def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
35 def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
46 source):
36 source):
47 matcher = getattr(self._repo, 'narrowmatch',
37 matcher = getattr(self._repo, 'narrowmatch',
48 getattr(self, '_narrow_matcher', None))
38 getattr(self, '_narrow_matcher', None))
49 if matcher is not None:
39 if matcher is not None:
50 narrowmatch = matcher()
40 narrowmatch = matcher()
51 changedfiles = [f for f in changedfiles if narrowmatch(f)]
41 changedfiles = [f for f in changedfiles if narrowmatch(f)]
52 if getattr(self, 'is_shallow', False):
42 if getattr(self, 'is_shallow', False):
53 # See comment in generate() for why this sadness is a thing.
43 # See comment in generate() for why this sadness is a thing.
54 mfdicts = self._mfdicts
44 mfdicts = self._mfdicts
55 del self._mfdicts
45 del self._mfdicts
56 # In a shallow clone, the linknodes callback needs to also include
46 # In a shallow clone, the linknodes callback needs to also include
57 # those file nodes that are in the manifests we sent but weren't
47 # those file nodes that are in the manifests we sent but weren't
58 # introduced by those manifests.
48 # introduced by those manifests.
59 commonctxs = [self._repo[c] for c in commonrevs]
49 commonctxs = [self._repo[c] for c in commonrevs]
60 oldlinknodes = linknodes
50 oldlinknodes = linknodes
61 clrev = self._repo.changelog.rev
51 clrev = self._repo.changelog.rev
62 def linknodes(flog, fname):
52 def linknodes(flog, fname):
63 for c in commonctxs:
53 for c in commonctxs:
64 try:
54 try:
65 fnode = c.filenode(fname)
55 fnode = c.filenode(fname)
66 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
56 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
67 except error.ManifestLookupError:
57 except error.ManifestLookupError:
68 pass
58 pass
69 links = oldlinknodes(flog, fname)
59 links = oldlinknodes(flog, fname)
70 if len(links) != len(mfdicts):
60 if len(links) != len(mfdicts):
71 for mf, lr in mfdicts:
61 for mf, lr in mfdicts:
72 fnode = mf.get(fname, None)
62 fnode = mf.get(fname, None)
73 if fnode in links:
63 if fnode in links:
74 links[fnode] = min(links[fnode], lr, key=clrev)
64 links[fnode] = min(links[fnode], lr, key=clrev)
75 elif fnode:
65 elif fnode:
76 links[fnode] = lr
66 links[fnode] = lr
77 return links
67 return links
78 return orig(self, changedfiles, linknodes, commonrevs, source)
68 return orig(self, changedfiles, linknodes, commonrevs, source)
79 extensions.wrapfunction(
69 extensions.wrapfunction(
80 changegroup.cg1packer, 'generatefiles', generatefiles)
70 changegroup.cg1packer, 'generatefiles', generatefiles)
81
71
82 def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode):
72 def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode):
83 n = revlog_.node(rev)
73 n = revlog_.node(rev)
84 p1n, p2n = revlog_.node(p1), revlog_.node(p2)
74 p1n, p2n = revlog_.node(p1), revlog_.node(p2)
85 flags = revlog_.flags(rev)
75 flags = revlog_.flags(rev)
86 flags |= revlog.REVIDX_ELLIPSIS
76 flags |= revlog.REVIDX_ELLIPSIS
87 meta = packer.builddeltaheader(
77 meta = packer.builddeltaheader(
88 n, p1n, p2n, node.nullid, linknode, flags)
78 n, p1n, p2n, node.nullid, linknode, flags)
89 # TODO: try and actually send deltas for ellipsis data blocks
79 # TODO: try and actually send deltas for ellipsis data blocks
90 diffheader = mdiff.trivialdiffheader(len(data))
80 diffheader = mdiff.trivialdiffheader(len(data))
91 l = len(meta) + len(diffheader) + len(data)
81 l = len(meta) + len(diffheader) + len(data)
92 return ''.join((changegroup.chunkheader(l),
82 return ''.join((changegroup.chunkheader(l),
93 meta,
83 meta,
94 diffheader,
84 diffheader,
95 data))
85 data))
96
86
97 def close(orig, self):
87 def close(orig, self):
98 getattr(self, 'clrev_to_localrev', {}).clear()
88 getattr(self, 'clrev_to_localrev', {}).clear()
99 if getattr(self, 'next_clrev_to_localrev', {}):
89 if getattr(self, 'next_clrev_to_localrev', {}):
100 self.clrev_to_localrev = self.next_clrev_to_localrev
90 self.clrev_to_localrev = self.next_clrev_to_localrev
101 del self.next_clrev_to_localrev
91 del self.next_clrev_to_localrev
102 self.changelog_done = True
92 self.changelog_done = True
103 return orig(self)
93 return orig(self)
104 extensions.wrapfunction(changegroup.cg1packer, 'close', close)
94 extensions.wrapfunction(changegroup.cg1packer, 'close', close)
105
95
106 # In a perfect world, we'd generate better ellipsis-ified graphs
96 # In a perfect world, we'd generate better ellipsis-ified graphs
107 # for non-changelog revlogs. In practice, we haven't started doing
97 # for non-changelog revlogs. In practice, we haven't started doing
108 # that yet, so the resulting DAGs for the manifestlog and filelogs
98 # that yet, so the resulting DAGs for the manifestlog and filelogs
109 # are actually full of bogus parentage on all the ellipsis
99 # are actually full of bogus parentage on all the ellipsis
110 # nodes. This has the side effect that, while the contents are
100 # nodes. This has the side effect that, while the contents are
111 # correct, the individual DAGs might be completely out of whack in
101 # correct, the individual DAGs might be completely out of whack in
112 # a case like 882681bc3166 and its ancestors (back about 10
102 # a case like 882681bc3166 and its ancestors (back about 10
113 # revisions or so) in the main hg repo.
103 # revisions or so) in the main hg repo.
114 #
104 #
115 # The one invariant we *know* holds is that the new (potentially
105 # The one invariant we *know* holds is that the new (potentially
116 # bogus) DAG shape will be valid if we order the nodes in the
106 # bogus) DAG shape will be valid if we order the nodes in the
117 # order that they're introduced in dramatis personae by the
107 # order that they're introduced in dramatis personae by the
118 # changelog, so what we do is we sort the non-changelog histories
108 # changelog, so what we do is we sort the non-changelog histories
119 # by the order in which they are used by the changelog.
109 # by the order in which they are used by the changelog.
120 def _sortgroup(orig, self, revlog, nodelist, lookup):
110 def _sortgroup(orig, self, revlog, nodelist, lookup):
121 if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
111 if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
122 return orig(self, revlog, nodelist, lookup)
112 return orig(self, revlog, nodelist, lookup)
123 key = lambda n: self.clnode_to_rev[lookup(n)]
113 key = lambda n: self.clnode_to_rev[lookup(n)]
124 return [revlog.rev(n) for n in sorted(nodelist, key=key)]
114 return [revlog.rev(n) for n in sorted(nodelist, key=key)]
125
115
126 extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
116 extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
127
117
128 def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
118 def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
129 '''yield a sequence of changegroup chunks (strings)'''
119 '''yield a sequence of changegroup chunks (strings)'''
130 # Note: other than delegating to orig, the only deviation in
120 # Note: other than delegating to orig, the only deviation in
131 # logic from normal hg's generate is marked with BEGIN/END
121 # logic from normal hg's generate is marked with BEGIN/END
132 # NARROW HACK.
122 # NARROW HACK.
133 if not util.safehasattr(self, 'full_nodes'):
123 if not util.safehasattr(self, 'full_nodes'):
134 # not sending a narrow bundle
124 # not sending a narrow bundle
135 for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
125 for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
136 yield x
126 yield x
137 return
127 return
138
128
139 repo = self._repo
129 repo = self._repo
140 cl = repo.changelog
130 cl = repo.changelog
141 mfl = repo.manifestlog
131 mfl = repo.manifestlog
142 mfrevlog = mfl._revlog
132 mfrevlog = mfl._revlog
143
133
144 clrevorder = {}
134 clrevorder = {}
145 mfs = {} # needed manifests
135 mfs = {} # needed manifests
146 fnodes = {} # needed file nodes
136 fnodes = {} # needed file nodes
147 changedfiles = set()
137 changedfiles = set()
148
138
149 # Callback for the changelog, used to collect changed files and manifest
139 # Callback for the changelog, used to collect changed files and manifest
150 # nodes.
140 # nodes.
151 # Returns the linkrev node (identity in the changelog case).
141 # Returns the linkrev node (identity in the changelog case).
152 def lookupcl(x):
142 def lookupcl(x):
153 c = cl.read(x)
143 c = cl.read(x)
154 clrevorder[x] = len(clrevorder)
144 clrevorder[x] = len(clrevorder)
155 # BEGIN NARROW HACK
145 # BEGIN NARROW HACK
156 #
146 #
157 # Only update mfs if x is going to be sent. Otherwise we
147 # Only update mfs if x is going to be sent. Otherwise we
158 # end up with bogus linkrevs specified for manifests and
148 # end up with bogus linkrevs specified for manifests and
159 # we skip some manifest nodes that we should otherwise
149 # we skip some manifest nodes that we should otherwise
160 # have sent.
150 # have sent.
161 if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
151 if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
162 n = c[0]
152 n = c[0]
163 # record the first changeset introducing this manifest version
153 # record the first changeset introducing this manifest version
164 mfs.setdefault(n, x)
154 mfs.setdefault(n, x)
165 # Set this narrow-specific dict so we have the lowest manifest
155 # Set this narrow-specific dict so we have the lowest manifest
166 # revnum to look up for this cl revnum. (Part of mapping
156 # revnum to look up for this cl revnum. (Part of mapping
167 # changelog ellipsis parents to manifest ellipsis parents)
157 # changelog ellipsis parents to manifest ellipsis parents)
168 self.next_clrev_to_localrev.setdefault(cl.rev(x),
158 self.next_clrev_to_localrev.setdefault(cl.rev(x),
169 mfrevlog.rev(n))
159 mfrevlog.rev(n))
170 # We can't trust the changed files list in the changeset if the
160 # We can't trust the changed files list in the changeset if the
171 # client requested a shallow clone.
161 # client requested a shallow clone.
172 if self.is_shallow:
162 if self.is_shallow:
173 changedfiles.update(mfl[c[0]].read().keys())
163 changedfiles.update(mfl[c[0]].read().keys())
174 else:
164 else:
175 changedfiles.update(c[3])
165 changedfiles.update(c[3])
176 # END NARROW HACK
166 # END NARROW HACK
177 # Record a complete list of potentially-changed files in
167 # Record a complete list of potentially-changed files in
178 # this manifest.
168 # this manifest.
179 return x
169 return x
180
170
181 self._verbosenote(_('uncompressed size of bundle content:\n'))
171 self._verbosenote(_('uncompressed size of bundle content:\n'))
182 size = 0
172 size = 0
183 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
173 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
184 size += len(chunk)
174 size += len(chunk)
185 yield chunk
175 yield chunk
186 self._verbosenote(_('%8.i (changelog)\n') % size)
176 self._verbosenote(_('%8.i (changelog)\n') % size)
187
177
188 # We need to make sure that the linkrev in the changegroup refers to
178 # We need to make sure that the linkrev in the changegroup refers to
189 # the first changeset that introduced the manifest or file revision.
179 # the first changeset that introduced the manifest or file revision.
190 # The fastpath is usually safer than the slowpath, because the filelogs
180 # The fastpath is usually safer than the slowpath, because the filelogs
191 # are walked in revlog order.
181 # are walked in revlog order.
192 #
182 #
193 # When taking the slowpath with reorder=None and the manifest revlog
183 # When taking the slowpath with reorder=None and the manifest revlog
194 # uses generaldelta, the manifest may be walked in the "wrong" order.
184 # uses generaldelta, the manifest may be walked in the "wrong" order.
195 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
185 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
196 # cc0ff93d0c0c).
186 # cc0ff93d0c0c).
197 #
187 #
198 # When taking the fastpath, we are only vulnerable to reordering
188 # When taking the fastpath, we are only vulnerable to reordering
199 # of the changelog itself. The changelog never uses generaldelta, so
189 # of the changelog itself. The changelog never uses generaldelta, so
200 # it is only reordered when reorder=True. To handle this case, we
190 # it is only reordered when reorder=True. To handle this case, we
201 # simply take the slowpath, which already has the 'clrevorder' logic.
191 # simply take the slowpath, which already has the 'clrevorder' logic.
202 # This was also fixed in cc0ff93d0c0c.
192 # This was also fixed in cc0ff93d0c0c.
203 fastpathlinkrev = fastpathlinkrev and not self._reorder
193 fastpathlinkrev = fastpathlinkrev and not self._reorder
204 # Treemanifests don't work correctly with fastpathlinkrev
194 # Treemanifests don't work correctly with fastpathlinkrev
205 # either, because we don't discover which directory nodes to
195 # either, because we don't discover which directory nodes to
206 # send along with files. This could probably be fixed.
196 # send along with files. This could probably be fixed.
207 fastpathlinkrev = fastpathlinkrev and (
197 fastpathlinkrev = fastpathlinkrev and (
208 'treemanifest' not in repo.requirements)
198 'treemanifest' not in repo.requirements)
209 # Shallow clones also don't work correctly with fastpathlinkrev
199 # Shallow clones also don't work correctly with fastpathlinkrev
210 # because file nodes may need to be sent for a manifest even if they
200 # because file nodes may need to be sent for a manifest even if they
211 # weren't introduced by that manifest.
201 # weren't introduced by that manifest.
212 fastpathlinkrev = fastpathlinkrev and not self.is_shallow
202 fastpathlinkrev = fastpathlinkrev and not self.is_shallow
213
203
214 for chunk in self.generatemanifests(commonrevs, clrevorder,
204 for chunk in self.generatemanifests(commonrevs, clrevorder,
215 fastpathlinkrev, mfs, fnodes, source):
205 fastpathlinkrev, mfs, fnodes, source):
216 yield chunk
206 yield chunk
217 # BEGIN NARROW HACK
207 # BEGIN NARROW HACK
218 mfdicts = None
208 mfdicts = None
219 if self.is_shallow:
209 if self.is_shallow:
220 mfdicts = [(self._repo.manifestlog[n].read(), lr)
210 mfdicts = [(self._repo.manifestlog[n].read(), lr)
221 for (n, lr) in mfs.iteritems()]
211 for (n, lr) in mfs.iteritems()]
222 # END NARROW HACK
212 # END NARROW HACK
223 mfs.clear()
213 mfs.clear()
224 clrevs = set(cl.rev(x) for x in clnodes)
214 clrevs = set(cl.rev(x) for x in clnodes)
225
215
226 if not fastpathlinkrev:
216 if not fastpathlinkrev:
227 def linknodes(unused, fname):
217 def linknodes(unused, fname):
228 return fnodes.get(fname, {})
218 return fnodes.get(fname, {})
229 else:
219 else:
230 cln = cl.node
220 cln = cl.node
231 def linknodes(filerevlog, fname):
221 def linknodes(filerevlog, fname):
232 llr = filerevlog.linkrev
222 llr = filerevlog.linkrev
233 fln = filerevlog.node
223 fln = filerevlog.node
234 revs = ((r, llr(r)) for r in filerevlog)
224 revs = ((r, llr(r)) for r in filerevlog)
235 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
225 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
236
226
237 # BEGIN NARROW HACK
227 # BEGIN NARROW HACK
238 #
228 #
239 # We need to pass the mfdicts variable down into
229 # We need to pass the mfdicts variable down into
240 # generatefiles(), but more than one command might have
230 # generatefiles(), but more than one command might have
241 # wrapped generatefiles so we can't modify the function
231 # wrapped generatefiles so we can't modify the function
242 # signature. Instead, we pass the data to ourselves using an
232 # signature. Instead, we pass the data to ourselves using an
243 # instance attribute. I'm sorry.
233 # instance attribute. I'm sorry.
244 self._mfdicts = mfdicts
234 self._mfdicts = mfdicts
245 # END NARROW HACK
235 # END NARROW HACK
246 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
236 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
247 source):
237 source):
248 yield chunk
238 yield chunk
249
239
250 yield self.close()
240 yield self.close()
251
241
252 if clnodes:
242 if clnodes:
253 repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
243 repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
254 extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
244 extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
255
245
256 def revchunk(orig, self, revlog, rev, prev, linknode):
246 def revchunk(orig, self, revlog, rev, prev, linknode):
257 if not util.safehasattr(self, 'full_nodes'):
247 if not util.safehasattr(self, 'full_nodes'):
258 # not sending a narrow changegroup
248 # not sending a narrow changegroup
259 for x in orig(self, revlog, rev, prev, linknode):
249 for x in orig(self, revlog, rev, prev, linknode):
260 yield x
250 yield x
261 return
251 return
262 # build up some mapping information that's useful later. See
252 # build up some mapping information that's useful later. See
263 # the local() nested function below.
253 # the local() nested function below.
264 if not self.changelog_done:
254 if not self.changelog_done:
265 self.clnode_to_rev[linknode] = rev
255 self.clnode_to_rev[linknode] = rev
266 linkrev = rev
256 linkrev = rev
267 self.clrev_to_localrev[linkrev] = rev
257 self.clrev_to_localrev[linkrev] = rev
268 else:
258 else:
269 linkrev = self.clnode_to_rev[linknode]
259 linkrev = self.clnode_to_rev[linknode]
270 self.clrev_to_localrev[linkrev] = rev
260 self.clrev_to_localrev[linkrev] = rev
271 # This is a node to send in full, because the changeset it
261 # This is a node to send in full, because the changeset it
272 # corresponds to was a full changeset.
262 # corresponds to was a full changeset.
273 if linknode in self.full_nodes:
263 if linknode in self.full_nodes:
274 for x in orig(self, revlog, rev, prev, linknode):
264 for x in orig(self, revlog, rev, prev, linknode):
275 yield x
265 yield x
276 return
266 return
277 # At this point, a node can either be one we should skip or an
267 # At this point, a node can either be one we should skip or an
278 # ellipsis. If it's not an ellipsis, bail immediately.
268 # ellipsis. If it's not an ellipsis, bail immediately.
279 if linkrev not in self.precomputed_ellipsis:
269 if linkrev not in self.precomputed_ellipsis:
280 return
270 return
281 linkparents = self.precomputed_ellipsis[linkrev]
271 linkparents = self.precomputed_ellipsis[linkrev]
282 def local(clrev):
272 def local(clrev):
283 """Turn a changelog revnum into a local revnum.
273 """Turn a changelog revnum into a local revnum.
284
274
285 The ellipsis dag is stored as revnums on the changelog,
275 The ellipsis dag is stored as revnums on the changelog,
286 but when we're producing ellipsis entries for
276 but when we're producing ellipsis entries for
287 non-changelog revlogs, we need to turn those numbers into
277 non-changelog revlogs, we need to turn those numbers into
288 something local. This does that for us, and during the
278 something local. This does that for us, and during the
289 changelog sending phase will also expand the stored
279 changelog sending phase will also expand the stored
290 mappings as needed.
280 mappings as needed.
291 """
281 """
292 if clrev == node.nullrev:
282 if clrev == node.nullrev:
293 return node.nullrev
283 return node.nullrev
294 if not self.changelog_done:
284 if not self.changelog_done:
295 # If we're doing the changelog, it's possible that we
285 # If we're doing the changelog, it's possible that we
296 # have a parent that is already on the client, and we
286 # have a parent that is already on the client, and we
297 # need to store some extra mapping information so that
287 # need to store some extra mapping information so that
298 # our contained ellipsis nodes will be able to resolve
288 # our contained ellipsis nodes will be able to resolve
299 # their parents.
289 # their parents.
300 if clrev not in self.clrev_to_localrev:
290 if clrev not in self.clrev_to_localrev:
301 clnode = revlog.node(clrev)
291 clnode = revlog.node(clrev)
302 self.clnode_to_rev[clnode] = clrev
292 self.clnode_to_rev[clnode] = clrev
303 return clrev
293 return clrev
304 # Walk the ellipsis-ized changelog breadth-first looking for a
294 # Walk the ellipsis-ized changelog breadth-first looking for a
305 # change that has been linked from the current revlog.
295 # change that has been linked from the current revlog.
306 #
296 #
307 # For a flat manifest revlog only a single step should be necessary
297 # For a flat manifest revlog only a single step should be necessary
308 # as all relevant changelog entries are relevant to the flat
298 # as all relevant changelog entries are relevant to the flat
309 # manifest.
299 # manifest.
310 #
300 #
311 # For a filelog or tree manifest dirlog however not every changelog
301 # For a filelog or tree manifest dirlog however not every changelog
312 # entry will have been relevant, so we need to skip some changelog
302 # entry will have been relevant, so we need to skip some changelog
313 # nodes even after ellipsis-izing.
303 # nodes even after ellipsis-izing.
314 walk = [clrev]
304 walk = [clrev]
315 while walk:
305 while walk:
316 p = walk[0]
306 p = walk[0]
317 walk = walk[1:]
307 walk = walk[1:]
318 if p in self.clrev_to_localrev:
308 if p in self.clrev_to_localrev:
319 return self.clrev_to_localrev[p]
309 return self.clrev_to_localrev[p]
320 elif p in self.full_nodes:
310 elif p in self.full_nodes:
321 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
311 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
322 if pp != node.nullrev])
312 if pp != node.nullrev])
323 elif p in self.precomputed_ellipsis:
313 elif p in self.precomputed_ellipsis:
324 walk.extend([pp for pp in self.precomputed_ellipsis[p]
314 walk.extend([pp for pp in self.precomputed_ellipsis[p]
325 if pp != node.nullrev])
315 if pp != node.nullrev])
326 else:
316 else:
327 # In this case, we've got an ellipsis with parents
317 # In this case, we've got an ellipsis with parents
328 # outside the current bundle (likely an
318 # outside the current bundle (likely an
329 # incremental pull). We "know" that we can use the
319 # incremental pull). We "know" that we can use the
330 # value of this same revlog at whatever revision
320 # value of this same revlog at whatever revision
331 # is pointed to by linknode. "Know" is in scare
321 # is pointed to by linknode. "Know" is in scare
332 # quotes because I haven't done enough examination
322 # quotes because I haven't done enough examination
333 # of edge cases to convince myself this is really
323 # of edge cases to convince myself this is really
334 # a fact - it works for all the (admittedly
324 # a fact - it works for all the (admittedly
335 # thorough) cases in our testsuite, but I would be
325 # thorough) cases in our testsuite, but I would be
336 # somewhat unsurprised to find a case in the wild
326 # somewhat unsurprised to find a case in the wild
337 # where this breaks down a bit. That said, I don't
327 # where this breaks down a bit. That said, I don't
338 # know if it would hurt anything.
328 # know if it would hurt anything.
339 for i in xrange(rev, 0, -1):
329 for i in xrange(rev, 0, -1):
340 if revlog.linkrev(i) == clrev:
330 if revlog.linkrev(i) == clrev:
341 return i
331 return i
342 # We failed to resolve a parent for this node, so
332 # We failed to resolve a parent for this node, so
343 # we crash the changegroup construction.
333 # we crash the changegroup construction.
344 raise error.Abort(
334 raise error.Abort(
345 'unable to resolve parent while packing %r %r'
335 'unable to resolve parent while packing %r %r'
346 ' for changeset %r' % (revlog.indexfile, rev, clrev))
336 ' for changeset %r' % (revlog.indexfile, rev, clrev))
347 return node.nullrev
337 return node.nullrev
348
338
349 if not linkparents or (
339 if not linkparents or (
350 revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
340 revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
351 p1, p2 = node.nullrev, node.nullrev
341 p1, p2 = node.nullrev, node.nullrev
352 elif len(linkparents) == 1:
342 elif len(linkparents) == 1:
353 p1, = sorted(local(p) for p in linkparents)
343 p1, = sorted(local(p) for p in linkparents)
354 p2 = node.nullrev
344 p2 = node.nullrev
355 else:
345 else:
356 p1, p2 = sorted(local(p) for p in linkparents)
346 p1, p2 = sorted(local(p) for p in linkparents)
357 yield ellipsisdata(
347 yield ellipsisdata(
358 self, rev, revlog, p1, p2, revlog.revision(rev), linknode)
348 self, rev, revlog, p1, p2, revlog.revision(rev), linknode)
359 extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
349 extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
360
350
361 def deltaparent(orig, self, revlog, rev, p1, p2, prev):
351 def deltaparent(orig, self, revlog, rev, p1, p2, prev):
362 if util.safehasattr(self, 'full_nodes'):
352 if util.safehasattr(self, 'full_nodes'):
363 # TODO: send better deltas when in narrow mode.
353 # TODO: send better deltas when in narrow mode.
364 #
354 #
365 # changegroup.group() loops over revisions to send,
355 # changegroup.group() loops over revisions to send,
366 # including revisions we'll skip. What this means is that
356 # including revisions we'll skip. What this means is that
367 # `prev` will be a potentially useless delta base for all
357 # `prev` will be a potentially useless delta base for all
368 # ellipsis nodes, as the client likely won't have it. In
358 # ellipsis nodes, as the client likely won't have it. In
369 # the future we should do bookkeeping about which nodes
359 # the future we should do bookkeeping about which nodes
370 # have been sent to the client, and try to be
360 # have been sent to the client, and try to be
371 # significantly smarter about delta bases. This is
361 # significantly smarter about delta bases. This is
372 # slightly tricky because this same code has to work for
362 # slightly tricky because this same code has to work for
373 # all revlogs, and we don't have the linkrev/linknode here.
363 # all revlogs, and we don't have the linkrev/linknode here.
374 return p1
364 return p1
375 return orig(self, revlog, rev, p1, p2, prev)
365 return orig(self, revlog, rev, p1, p2, prev)
376 extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
366 extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
@@ -1,1000 +1,1005 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 error,
24 error,
25 mdiff,
25 mdiff,
26 phases,
26 phases,
27 pycompat,
27 pycompat,
28 util,
28 util,
29 )
29 )
30
30
31 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
31 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
32 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
33 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34
34
35 # When narrowing is finalized and no longer subject to format changes,
35 # When narrowing is finalized and no longer subject to format changes,
36 # we should move this to just "narrow" or similar.
36 # we should move this to just "narrow" or similar.
37 NARROW_REQUIREMENT = 'narrowhg-experimental'
37 NARROW_REQUIREMENT = 'narrowhg-experimental'
38
38
39 readexactly = util.readexactly
39 readexactly = util.readexactly
40
40
41 def getchunk(stream):
41 def getchunk(stream):
42 """return the next chunk from stream as a string"""
42 """return the next chunk from stream as a string"""
43 d = readexactly(stream, 4)
43 d = readexactly(stream, 4)
44 l = struct.unpack(">l", d)[0]
44 l = struct.unpack(">l", d)[0]
45 if l <= 4:
45 if l <= 4:
46 if l:
46 if l:
47 raise error.Abort(_("invalid chunk length %d") % l)
47 raise error.Abort(_("invalid chunk length %d") % l)
48 return ""
48 return ""
49 return readexactly(stream, l - 4)
49 return readexactly(stream, l - 4)
50
50
51 def chunkheader(length):
51 def chunkheader(length):
52 """return a changegroup chunk header (string)"""
52 """return a changegroup chunk header (string)"""
53 return struct.pack(">l", length + 4)
53 return struct.pack(">l", length + 4)
54
54
55 def closechunk():
55 def closechunk():
56 """return a changegroup chunk header (string) for a zero-length chunk"""
56 """return a changegroup chunk header (string) for a zero-length chunk"""
57 return struct.pack(">l", 0)
57 return struct.pack(">l", 0)
58
58
59 def writechunks(ui, chunks, filename, vfs=None):
59 def writechunks(ui, chunks, filename, vfs=None):
60 """Write chunks to a file and return its filename.
60 """Write chunks to a file and return its filename.
61
61
62 The stream is assumed to be a bundle file.
62 The stream is assumed to be a bundle file.
63 Existing files will not be overwritten.
63 Existing files will not be overwritten.
64 If no filename is specified, a temporary file is created.
64 If no filename is specified, a temporary file is created.
65 """
65 """
66 fh = None
66 fh = None
67 cleanup = None
67 cleanup = None
68 try:
68 try:
69 if filename:
69 if filename:
70 if vfs:
70 if vfs:
71 fh = vfs.open(filename, "wb")
71 fh = vfs.open(filename, "wb")
72 else:
72 else:
73 # Increase default buffer size because default is usually
73 # Increase default buffer size because default is usually
74 # small (4k is common on Linux).
74 # small (4k is common on Linux).
75 fh = open(filename, "wb", 131072)
75 fh = open(filename, "wb", 131072)
76 else:
76 else:
77 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
77 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
78 fh = os.fdopen(fd, pycompat.sysstr("wb"))
78 fh = os.fdopen(fd, pycompat.sysstr("wb"))
79 cleanup = filename
79 cleanup = filename
80 for c in chunks:
80 for c in chunks:
81 fh.write(c)
81 fh.write(c)
82 cleanup = None
82 cleanup = None
83 return filename
83 return filename
84 finally:
84 finally:
85 if fh is not None:
85 if fh is not None:
86 fh.close()
86 fh.close()
87 if cleanup is not None:
87 if cleanup is not None:
88 if filename and vfs:
88 if filename and vfs:
89 vfs.unlink(cleanup)
89 vfs.unlink(cleanup)
90 else:
90 else:
91 os.unlink(cleanup)
91 os.unlink(cleanup)
92
92
93 class cg1unpacker(object):
93 class cg1unpacker(object):
94 """Unpacker for cg1 changegroup streams.
94 """Unpacker for cg1 changegroup streams.
95
95
96 A changegroup unpacker handles the framing of the revision data in
96 A changegroup unpacker handles the framing of the revision data in
97 the wire format. Most consumers will want to use the apply()
97 the wire format. Most consumers will want to use the apply()
98 method to add the changes from the changegroup to a repository.
98 method to add the changes from the changegroup to a repository.
99
99
100 If you're forwarding a changegroup unmodified to another consumer,
100 If you're forwarding a changegroup unmodified to another consumer,
101 use getchunks(), which returns an iterator of changegroup
101 use getchunks(), which returns an iterator of changegroup
102 chunks. This is mostly useful for cases where you need to know the
102 chunks. This is mostly useful for cases where you need to know the
103 data stream has ended by observing the end of the changegroup.
103 data stream has ended by observing the end of the changegroup.
104
104
105 deltachunk() is useful only if you're applying delta data. Most
105 deltachunk() is useful only if you're applying delta data. Most
106 consumers should prefer apply() instead.
106 consumers should prefer apply() instead.
107
107
108 A few other public methods exist. Those are used only for
108 A few other public methods exist. Those are used only for
109 bundlerepo and some debug commands - their use is discouraged.
109 bundlerepo and some debug commands - their use is discouraged.
110 """
110 """
111 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
111 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
112 deltaheadersize = struct.calcsize(deltaheader)
112 deltaheadersize = struct.calcsize(deltaheader)
113 version = '01'
113 version = '01'
114 _grouplistcount = 1 # One list of files after the manifests
114 _grouplistcount = 1 # One list of files after the manifests
115
115
116 def __init__(self, fh, alg, extras=None):
116 def __init__(self, fh, alg, extras=None):
117 if alg is None:
117 if alg is None:
118 alg = 'UN'
118 alg = 'UN'
119 if alg not in util.compengines.supportedbundletypes:
119 if alg not in util.compengines.supportedbundletypes:
120 raise error.Abort(_('unknown stream compression type: %s')
120 raise error.Abort(_('unknown stream compression type: %s')
121 % alg)
121 % alg)
122 if alg == 'BZ':
122 if alg == 'BZ':
123 alg = '_truncatedBZ'
123 alg = '_truncatedBZ'
124
124
125 compengine = util.compengines.forbundletype(alg)
125 compengine = util.compengines.forbundletype(alg)
126 self._stream = compengine.decompressorreader(fh)
126 self._stream = compengine.decompressorreader(fh)
127 self._type = alg
127 self._type = alg
128 self.extras = extras or {}
128 self.extras = extras or {}
129 self.callback = None
129 self.callback = None
130
130
131 # These methods (compressed, read, seek, tell) all appear to only
131 # These methods (compressed, read, seek, tell) all appear to only
132 # be used by bundlerepo, but it's a little hard to tell.
132 # be used by bundlerepo, but it's a little hard to tell.
133 def compressed(self):
133 def compressed(self):
134 return self._type is not None and self._type != 'UN'
134 return self._type is not None and self._type != 'UN'
135 def read(self, l):
135 def read(self, l):
136 return self._stream.read(l)
136 return self._stream.read(l)
137 def seek(self, pos):
137 def seek(self, pos):
138 return self._stream.seek(pos)
138 return self._stream.seek(pos)
139 def tell(self):
139 def tell(self):
140 return self._stream.tell()
140 return self._stream.tell()
141 def close(self):
141 def close(self):
142 return self._stream.close()
142 return self._stream.close()
143
143
144 def _chunklength(self):
144 def _chunklength(self):
145 d = readexactly(self._stream, 4)
145 d = readexactly(self._stream, 4)
146 l = struct.unpack(">l", d)[0]
146 l = struct.unpack(">l", d)[0]
147 if l <= 4:
147 if l <= 4:
148 if l:
148 if l:
149 raise error.Abort(_("invalid chunk length %d") % l)
149 raise error.Abort(_("invalid chunk length %d") % l)
150 return 0
150 return 0
151 if self.callback:
151 if self.callback:
152 self.callback()
152 self.callback()
153 return l - 4
153 return l - 4
154
154
155 def changelogheader(self):
155 def changelogheader(self):
156 """v10 does not have a changelog header chunk"""
156 """v10 does not have a changelog header chunk"""
157 return {}
157 return {}
158
158
159 def manifestheader(self):
159 def manifestheader(self):
160 """v10 does not have a manifest header chunk"""
160 """v10 does not have a manifest header chunk"""
161 return {}
161 return {}
162
162
163 def filelogheader(self):
163 def filelogheader(self):
164 """return the header of the filelogs chunk, v10 only has the filename"""
164 """return the header of the filelogs chunk, v10 only has the filename"""
165 l = self._chunklength()
165 l = self._chunklength()
166 if not l:
166 if not l:
167 return {}
167 return {}
168 fname = readexactly(self._stream, l)
168 fname = readexactly(self._stream, l)
169 return {'filename': fname}
169 return {'filename': fname}
170
170
171 def _deltaheader(self, headertuple, prevnode):
171 def _deltaheader(self, headertuple, prevnode):
172 node, p1, p2, cs = headertuple
172 node, p1, p2, cs = headertuple
173 if prevnode is None:
173 if prevnode is None:
174 deltabase = p1
174 deltabase = p1
175 else:
175 else:
176 deltabase = prevnode
176 deltabase = prevnode
177 flags = 0
177 flags = 0
178 return node, p1, p2, deltabase, cs, flags
178 return node, p1, p2, deltabase, cs, flags
179
179
180 def deltachunk(self, prevnode):
180 def deltachunk(self, prevnode):
181 l = self._chunklength()
181 l = self._chunklength()
182 if not l:
182 if not l:
183 return {}
183 return {}
184 headerdata = readexactly(self._stream, self.deltaheadersize)
184 headerdata = readexactly(self._stream, self.deltaheadersize)
185 header = struct.unpack(self.deltaheader, headerdata)
185 header = struct.unpack(self.deltaheader, headerdata)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
187 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
187 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
188 return (node, p1, p2, cs, deltabase, delta, flags)
188 return (node, p1, p2, cs, deltabase, delta, flags)
189
189
190 def getchunks(self):
190 def getchunks(self):
191 """returns all the chunks contains in the bundle
191 """returns all the chunks contains in the bundle
192
192
193 Used when you need to forward the binary stream to a file or another
193 Used when you need to forward the binary stream to a file or another
194 network API. To do so, it parse the changegroup data, otherwise it will
194 network API. To do so, it parse the changegroup data, otherwise it will
195 block in case of sshrepo because it don't know the end of the stream.
195 block in case of sshrepo because it don't know the end of the stream.
196 """
196 """
197 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
197 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
198 # and a list of filelogs. For changegroup 3, we expect 4 parts:
198 # and a list of filelogs. For changegroup 3, we expect 4 parts:
199 # changelog, manifestlog, a list of tree manifestlogs, and a list of
199 # changelog, manifestlog, a list of tree manifestlogs, and a list of
200 # filelogs.
200 # filelogs.
201 #
201 #
202 # Changelog and manifestlog parts are terminated with empty chunks. The
202 # Changelog and manifestlog parts are terminated with empty chunks. The
203 # tree and file parts are a list of entry sections. Each entry section
203 # tree and file parts are a list of entry sections. Each entry section
204 # is a series of chunks terminating in an empty chunk. The list of these
204 # is a series of chunks terminating in an empty chunk. The list of these
205 # entry sections is terminated in yet another empty chunk, so we know
205 # entry sections is terminated in yet another empty chunk, so we know
206 # we've reached the end of the tree/file list when we reach an empty
206 # we've reached the end of the tree/file list when we reach an empty
207 # chunk that was proceeded by no non-empty chunks.
207 # chunk that was proceeded by no non-empty chunks.
208
208
209 parts = 0
209 parts = 0
210 while parts < 2 + self._grouplistcount:
210 while parts < 2 + self._grouplistcount:
211 noentries = True
211 noentries = True
212 while True:
212 while True:
213 chunk = getchunk(self)
213 chunk = getchunk(self)
214 if not chunk:
214 if not chunk:
215 # The first two empty chunks represent the end of the
215 # The first two empty chunks represent the end of the
216 # changelog and the manifestlog portions. The remaining
216 # changelog and the manifestlog portions. The remaining
217 # empty chunks represent either A) the end of individual
217 # empty chunks represent either A) the end of individual
218 # tree or file entries in the file list, or B) the end of
218 # tree or file entries in the file list, or B) the end of
219 # the entire list. It's the end of the entire list if there
219 # the entire list. It's the end of the entire list if there
220 # were no entries (i.e. noentries is True).
220 # were no entries (i.e. noentries is True).
221 if parts < 2:
221 if parts < 2:
222 parts += 1
222 parts += 1
223 elif noentries:
223 elif noentries:
224 parts += 1
224 parts += 1
225 break
225 break
226 noentries = False
226 noentries = False
227 yield chunkheader(len(chunk))
227 yield chunkheader(len(chunk))
228 pos = 0
228 pos = 0
229 while pos < len(chunk):
229 while pos < len(chunk):
230 next = pos + 2**20
230 next = pos + 2**20
231 yield chunk[pos:next]
231 yield chunk[pos:next]
232 pos = next
232 pos = next
233 yield closechunk()
233 yield closechunk()
234
234
235 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
235 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
236 # We know that we'll never have more manifests than we had
236 # We know that we'll never have more manifests than we had
237 # changesets.
237 # changesets.
238 self.callback = prog(_('manifests'), numchanges)
238 self.callback = prog(_('manifests'), numchanges)
239 # no need to check for empty manifest group here:
239 # no need to check for empty manifest group here:
240 # if the result of the merge of 1 and 2 is the same in 3 and 4,
240 # if the result of the merge of 1 and 2 is the same in 3 and 4,
241 # no new manifest will be created and the manifest group will
241 # no new manifest will be created and the manifest group will
242 # be empty during the pull
242 # be empty during the pull
243 self.manifestheader()
243 self.manifestheader()
244 deltas = self.deltaiter()
244 deltas = self.deltaiter()
245 repo.manifestlog._revlog.addgroup(deltas, revmap, trp)
245 repo.manifestlog._revlog.addgroup(deltas, revmap, trp)
246 repo.ui.progress(_('manifests'), None)
246 repo.ui.progress(_('manifests'), None)
247 self.callback = None
247 self.callback = None
248
248
249 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
249 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
250 expectedtotal=None):
250 expectedtotal=None):
251 """Add the changegroup returned by source.read() to this repo.
251 """Add the changegroup returned by source.read() to this repo.
252 srctype is a string like 'push', 'pull', or 'unbundle'. url is
252 srctype is a string like 'push', 'pull', or 'unbundle'. url is
253 the URL of the repo where this changegroup is coming from.
253 the URL of the repo where this changegroup is coming from.
254
254
255 Return an integer summarizing the change to this repo:
255 Return an integer summarizing the change to this repo:
256 - nothing changed or no source: 0
256 - nothing changed or no source: 0
257 - more heads than before: 1+added heads (2..n)
257 - more heads than before: 1+added heads (2..n)
258 - fewer heads than before: -1-removed heads (-2..-n)
258 - fewer heads than before: -1-removed heads (-2..-n)
259 - number of heads stays the same: 1
259 - number of heads stays the same: 1
260 """
260 """
261 repo = repo.unfiltered()
261 repo = repo.unfiltered()
262 def csmap(x):
262 def csmap(x):
263 repo.ui.debug("add changeset %s\n" % short(x))
263 repo.ui.debug("add changeset %s\n" % short(x))
264 return len(cl)
264 return len(cl)
265
265
266 def revmap(x):
266 def revmap(x):
267 return cl.rev(x)
267 return cl.rev(x)
268
268
269 changesets = files = revisions = 0
269 changesets = files = revisions = 0
270
270
271 try:
271 try:
272 # The transaction may already carry source information. In this
272 # The transaction may already carry source information. In this
273 # case we use the top level data. We overwrite the argument
273 # case we use the top level data. We overwrite the argument
274 # because we need to use the top level value (if they exist)
274 # because we need to use the top level value (if they exist)
275 # in this function.
275 # in this function.
276 srctype = tr.hookargs.setdefault('source', srctype)
276 srctype = tr.hookargs.setdefault('source', srctype)
277 url = tr.hookargs.setdefault('url', url)
277 url = tr.hookargs.setdefault('url', url)
278 repo.hook('prechangegroup',
278 repo.hook('prechangegroup',
279 throw=True, **pycompat.strkwargs(tr.hookargs))
279 throw=True, **pycompat.strkwargs(tr.hookargs))
280
280
281 # write changelog data to temp files so concurrent readers
281 # write changelog data to temp files so concurrent readers
282 # will not see an inconsistent view
282 # will not see an inconsistent view
283 cl = repo.changelog
283 cl = repo.changelog
284 cl.delayupdate(tr)
284 cl.delayupdate(tr)
285 oldheads = set(cl.heads())
285 oldheads = set(cl.heads())
286
286
287 trp = weakref.proxy(tr)
287 trp = weakref.proxy(tr)
288 # pull off the changeset group
288 # pull off the changeset group
289 repo.ui.status(_("adding changesets\n"))
289 repo.ui.status(_("adding changesets\n"))
290 clstart = len(cl)
290 clstart = len(cl)
291 class prog(object):
291 class prog(object):
292 def __init__(self, step, total):
292 def __init__(self, step, total):
293 self._step = step
293 self._step = step
294 self._total = total
294 self._total = total
295 self._count = 1
295 self._count = 1
296 def __call__(self):
296 def __call__(self):
297 repo.ui.progress(self._step, self._count, unit=_('chunks'),
297 repo.ui.progress(self._step, self._count, unit=_('chunks'),
298 total=self._total)
298 total=self._total)
299 self._count += 1
299 self._count += 1
300 self.callback = prog(_('changesets'), expectedtotal)
300 self.callback = prog(_('changesets'), expectedtotal)
301
301
302 efiles = set()
302 efiles = set()
303 def onchangelog(cl, node):
303 def onchangelog(cl, node):
304 efiles.update(cl.readfiles(node))
304 efiles.update(cl.readfiles(node))
305
305
306 self.changelogheader()
306 self.changelogheader()
307 deltas = self.deltaiter()
307 deltas = self.deltaiter()
308 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
308 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 efiles = len(efiles)
309 efiles = len(efiles)
310
310
311 if not cgnodes:
311 if not cgnodes:
312 repo.ui.develwarn('applied empty changegroup',
312 repo.ui.develwarn('applied empty changegroup',
313 config='warn-empty-changegroup')
313 config='warn-empty-changegroup')
314 clend = len(cl)
314 clend = len(cl)
315 changesets = clend - clstart
315 changesets = clend - clstart
316 repo.ui.progress(_('changesets'), None)
316 repo.ui.progress(_('changesets'), None)
317 self.callback = None
317 self.callback = None
318
318
319 # pull off the manifest group
319 # pull off the manifest group
320 repo.ui.status(_("adding manifests\n"))
320 repo.ui.status(_("adding manifests\n"))
321 self._unpackmanifests(repo, revmap, trp, prog, changesets)
321 self._unpackmanifests(repo, revmap, trp, prog, changesets)
322
322
323 needfiles = {}
323 needfiles = {}
324 if repo.ui.configbool('server', 'validate'):
324 if repo.ui.configbool('server', 'validate'):
325 cl = repo.changelog
325 cl = repo.changelog
326 ml = repo.manifestlog
326 ml = repo.manifestlog
327 # validate incoming csets have their manifests
327 # validate incoming csets have their manifests
328 for cset in xrange(clstart, clend):
328 for cset in xrange(clstart, clend):
329 mfnode = cl.changelogrevision(cset).manifest
329 mfnode = cl.changelogrevision(cset).manifest
330 mfest = ml[mfnode].readdelta()
330 mfest = ml[mfnode].readdelta()
331 # store file cgnodes we must see
331 # store file cgnodes we must see
332 for f, n in mfest.iteritems():
332 for f, n in mfest.iteritems():
333 needfiles.setdefault(f, set()).add(n)
333 needfiles.setdefault(f, set()).add(n)
334
334
335 # process the files
335 # process the files
336 repo.ui.status(_("adding file changes\n"))
336 repo.ui.status(_("adding file changes\n"))
337 newrevs, newfiles = _addchangegroupfiles(
337 newrevs, newfiles = _addchangegroupfiles(
338 repo, self, revmap, trp, efiles, needfiles)
338 repo, self, revmap, trp, efiles, needfiles)
339 revisions += newrevs
339 revisions += newrevs
340 files += newfiles
340 files += newfiles
341
341
342 deltaheads = 0
342 deltaheads = 0
343 if oldheads:
343 if oldheads:
344 heads = cl.heads()
344 heads = cl.heads()
345 deltaheads = len(heads) - len(oldheads)
345 deltaheads = len(heads) - len(oldheads)
346 for h in heads:
346 for h in heads:
347 if h not in oldheads and repo[h].closesbranch():
347 if h not in oldheads and repo[h].closesbranch():
348 deltaheads -= 1
348 deltaheads -= 1
349 htext = ""
349 htext = ""
350 if deltaheads:
350 if deltaheads:
351 htext = _(" (%+d heads)") % deltaheads
351 htext = _(" (%+d heads)") % deltaheads
352
352
353 repo.ui.status(_("added %d changesets"
353 repo.ui.status(_("added %d changesets"
354 " with %d changes to %d files%s\n")
354 " with %d changes to %d files%s\n")
355 % (changesets, revisions, files, htext))
355 % (changesets, revisions, files, htext))
356 repo.invalidatevolatilesets()
356 repo.invalidatevolatilesets()
357
357
358 if changesets > 0:
358 if changesets > 0:
359 if 'node' not in tr.hookargs:
359 if 'node' not in tr.hookargs:
360 tr.hookargs['node'] = hex(cl.node(clstart))
360 tr.hookargs['node'] = hex(cl.node(clstart))
361 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
361 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
362 hookargs = dict(tr.hookargs)
362 hookargs = dict(tr.hookargs)
363 else:
363 else:
364 hookargs = dict(tr.hookargs)
364 hookargs = dict(tr.hookargs)
365 hookargs['node'] = hex(cl.node(clstart))
365 hookargs['node'] = hex(cl.node(clstart))
366 hookargs['node_last'] = hex(cl.node(clend - 1))
366 hookargs['node_last'] = hex(cl.node(clend - 1))
367 repo.hook('pretxnchangegroup',
367 repo.hook('pretxnchangegroup',
368 throw=True, **pycompat.strkwargs(hookargs))
368 throw=True, **pycompat.strkwargs(hookargs))
369
369
370 added = [cl.node(r) for r in xrange(clstart, clend)]
370 added = [cl.node(r) for r in xrange(clstart, clend)]
371 phaseall = None
371 phaseall = None
372 if srctype in ('push', 'serve'):
372 if srctype in ('push', 'serve'):
373 # Old servers can not push the boundary themselves.
373 # Old servers can not push the boundary themselves.
374 # New servers won't push the boundary if changeset already
374 # New servers won't push the boundary if changeset already
375 # exists locally as secret
375 # exists locally as secret
376 #
376 #
377 # We should not use added here but the list of all change in
377 # We should not use added here but the list of all change in
378 # the bundle
378 # the bundle
379 if repo.publishing():
379 if repo.publishing():
380 targetphase = phaseall = phases.public
380 targetphase = phaseall = phases.public
381 else:
381 else:
382 # closer target phase computation
382 # closer target phase computation
383
383
384 # Those changesets have been pushed from the
384 # Those changesets have been pushed from the
385 # outside, their phases are going to be pushed
385 # outside, their phases are going to be pushed
386 # alongside. Therefor `targetphase` is
386 # alongside. Therefor `targetphase` is
387 # ignored.
387 # ignored.
388 targetphase = phaseall = phases.draft
388 targetphase = phaseall = phases.draft
389 if added:
389 if added:
390 phases.registernew(repo, tr, targetphase, added)
390 phases.registernew(repo, tr, targetphase, added)
391 if phaseall is not None:
391 if phaseall is not None:
392 phases.advanceboundary(repo, tr, phaseall, cgnodes)
392 phases.advanceboundary(repo, tr, phaseall, cgnodes)
393
393
394 if changesets > 0:
394 if changesets > 0:
395
395
396 def runhooks():
396 def runhooks():
397 # These hooks run when the lock releases, not when the
397 # These hooks run when the lock releases, not when the
398 # transaction closes. So it's possible for the changelog
398 # transaction closes. So it's possible for the changelog
399 # to have changed since we last saw it.
399 # to have changed since we last saw it.
400 if clstart >= len(repo):
400 if clstart >= len(repo):
401 return
401 return
402
402
403 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
403 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
404
404
405 for n in added:
405 for n in added:
406 args = hookargs.copy()
406 args = hookargs.copy()
407 args['node'] = hex(n)
407 args['node'] = hex(n)
408 del args['node_last']
408 del args['node_last']
409 repo.hook("incoming", **pycompat.strkwargs(args))
409 repo.hook("incoming", **pycompat.strkwargs(args))
410
410
411 newheads = [h for h in repo.heads()
411 newheads = [h for h in repo.heads()
412 if h not in oldheads]
412 if h not in oldheads]
413 repo.ui.log("incoming",
413 repo.ui.log("incoming",
414 "%s incoming changes - new heads: %s\n",
414 "%s incoming changes - new heads: %s\n",
415 len(added),
415 len(added),
416 ', '.join([hex(c[:6]) for c in newheads]))
416 ', '.join([hex(c[:6]) for c in newheads]))
417
417
418 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
418 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
419 lambda tr: repo._afterlock(runhooks))
419 lambda tr: repo._afterlock(runhooks))
420 finally:
420 finally:
421 repo.ui.flush()
421 repo.ui.flush()
422 # never return 0 here:
422 # never return 0 here:
423 if deltaheads < 0:
423 if deltaheads < 0:
424 ret = deltaheads - 1
424 ret = deltaheads - 1
425 else:
425 else:
426 ret = deltaheads + 1
426 ret = deltaheads + 1
427 return ret
427 return ret
428
428
429 def deltaiter(self):
429 def deltaiter(self):
430 """
430 """
431 returns an iterator of the deltas in this changegroup
431 returns an iterator of the deltas in this changegroup
432
432
433 Useful for passing to the underlying storage system to be stored.
433 Useful for passing to the underlying storage system to be stored.
434 """
434 """
435 chain = None
435 chain = None
436 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
436 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
437 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
437 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
438 yield chunkdata
438 yield chunkdata
439 chain = chunkdata[0]
439 chain = chunkdata[0]
440
440
441 class cg2unpacker(cg1unpacker):
441 class cg2unpacker(cg1unpacker):
442 """Unpacker for cg2 streams.
442 """Unpacker for cg2 streams.
443
443
444 cg2 streams add support for generaldelta, so the delta header
444 cg2 streams add support for generaldelta, so the delta header
445 format is slightly different. All other features about the data
445 format is slightly different. All other features about the data
446 remain the same.
446 remain the same.
447 """
447 """
448 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
448 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
449 deltaheadersize = struct.calcsize(deltaheader)
449 deltaheadersize = struct.calcsize(deltaheader)
450 version = '02'
450 version = '02'
451
451
452 def _deltaheader(self, headertuple, prevnode):
452 def _deltaheader(self, headertuple, prevnode):
453 node, p1, p2, deltabase, cs = headertuple
453 node, p1, p2, deltabase, cs = headertuple
454 flags = 0
454 flags = 0
455 return node, p1, p2, deltabase, cs, flags
455 return node, p1, p2, deltabase, cs, flags
456
456
457 class cg3unpacker(cg2unpacker):
457 class cg3unpacker(cg2unpacker):
458 """Unpacker for cg3 streams.
458 """Unpacker for cg3 streams.
459
459
460 cg3 streams add support for exchanging treemanifests and revlog
460 cg3 streams add support for exchanging treemanifests and revlog
461 flags. It adds the revlog flags to the delta header and an empty chunk
461 flags. It adds the revlog flags to the delta header and an empty chunk
462 separating manifests and files.
462 separating manifests and files.
463 """
463 """
464 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
464 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
465 deltaheadersize = struct.calcsize(deltaheader)
465 deltaheadersize = struct.calcsize(deltaheader)
466 version = '03'
466 version = '03'
467 _grouplistcount = 2 # One list of manifests and one list of files
467 _grouplistcount = 2 # One list of manifests and one list of files
468
468
469 def _deltaheader(self, headertuple, prevnode):
469 def _deltaheader(self, headertuple, prevnode):
470 node, p1, p2, deltabase, cs, flags = headertuple
470 node, p1, p2, deltabase, cs, flags = headertuple
471 return node, p1, p2, deltabase, cs, flags
471 return node, p1, p2, deltabase, cs, flags
472
472
473 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
473 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
474 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
474 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
475 numchanges)
475 numchanges)
476 for chunkdata in iter(self.filelogheader, {}):
476 for chunkdata in iter(self.filelogheader, {}):
477 # If we get here, there are directory manifests in the changegroup
477 # If we get here, there are directory manifests in the changegroup
478 d = chunkdata["filename"]
478 d = chunkdata["filename"]
479 repo.ui.debug("adding %s revisions\n" % d)
479 repo.ui.debug("adding %s revisions\n" % d)
480 dirlog = repo.manifestlog._revlog.dirlog(d)
480 dirlog = repo.manifestlog._revlog.dirlog(d)
481 deltas = self.deltaiter()
481 deltas = self.deltaiter()
482 if not dirlog.addgroup(deltas, revmap, trp):
482 if not dirlog.addgroup(deltas, revmap, trp):
483 raise error.Abort(_("received dir revlog group is empty"))
483 raise error.Abort(_("received dir revlog group is empty"))
484
484
485 class headerlessfixup(object):
485 class headerlessfixup(object):
486 def __init__(self, fh, h):
486 def __init__(self, fh, h):
487 self._h = h
487 self._h = h
488 self._fh = fh
488 self._fh = fh
489 def read(self, n):
489 def read(self, n):
490 if self._h:
490 if self._h:
491 d, self._h = self._h[:n], self._h[n:]
491 d, self._h = self._h[:n], self._h[n:]
492 if len(d) < n:
492 if len(d) < n:
493 d += readexactly(self._fh, n - len(d))
493 d += readexactly(self._fh, n - len(d))
494 return d
494 return d
495 return readexactly(self._fh, n)
495 return readexactly(self._fh, n)
496
496
497 class cg1packer(object):
497 class cg1packer(object):
498 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
498 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
499 version = '01'
499 version = '01'
500 def __init__(self, repo, bundlecaps=None):
500 def __init__(self, repo, bundlecaps=None):
501 """Given a source repo, construct a bundler.
501 """Given a source repo, construct a bundler.
502
502
503 bundlecaps is optional and can be used to specify the set of
503 bundlecaps is optional and can be used to specify the set of
504 capabilities which can be used to build the bundle. While bundlecaps is
504 capabilities which can be used to build the bundle. While bundlecaps is
505 unused in core Mercurial, extensions rely on this feature to communicate
505 unused in core Mercurial, extensions rely on this feature to communicate
506 capabilities to customize the changegroup packer.
506 capabilities to customize the changegroup packer.
507 """
507 """
508 # Set of capabilities we can use to build the bundle.
508 # Set of capabilities we can use to build the bundle.
509 if bundlecaps is None:
509 if bundlecaps is None:
510 bundlecaps = set()
510 bundlecaps = set()
511 self._bundlecaps = bundlecaps
511 self._bundlecaps = bundlecaps
512 # experimental config: bundle.reorder
512 # experimental config: bundle.reorder
513 reorder = repo.ui.config('bundle', 'reorder')
513 reorder = repo.ui.config('bundle', 'reorder')
514 if reorder == 'auto':
514 if reorder == 'auto':
515 reorder = None
515 reorder = None
516 else:
516 else:
517 reorder = util.parsebool(reorder)
517 reorder = util.parsebool(reorder)
518 self._repo = repo
518 self._repo = repo
519 self._reorder = reorder
519 self._reorder = reorder
520 self._progress = repo.ui.progress
520 self._progress = repo.ui.progress
521 if self._repo.ui.verbose and not self._repo.ui.debugflag:
521 if self._repo.ui.verbose and not self._repo.ui.debugflag:
522 self._verbosenote = self._repo.ui.note
522 self._verbosenote = self._repo.ui.note
523 else:
523 else:
524 self._verbosenote = lambda s: None
524 self._verbosenote = lambda s: None
525
525
526 def close(self):
526 def close(self):
527 return closechunk()
527 return closechunk()
528
528
529 def fileheader(self, fname):
529 def fileheader(self, fname):
530 return chunkheader(len(fname)) + fname
530 return chunkheader(len(fname)) + fname
531
531
532 # Extracted both for clarity and for overriding in extensions.
532 # Extracted both for clarity and for overriding in extensions.
533 def _sortgroup(self, revlog, nodelist, lookup):
533 def _sortgroup(self, revlog, nodelist, lookup):
534 """Sort nodes for change group and turn them into revnums."""
534 """Sort nodes for change group and turn them into revnums."""
535 # for generaldelta revlogs, we linearize the revs; this will both be
535 # for generaldelta revlogs, we linearize the revs; this will both be
536 # much quicker and generate a much smaller bundle
536 # much quicker and generate a much smaller bundle
537 if (revlog._generaldelta and self._reorder is None) or self._reorder:
537 if (revlog._generaldelta and self._reorder is None) or self._reorder:
538 dag = dagutil.revlogdag(revlog)
538 dag = dagutil.revlogdag(revlog)
539 return dag.linearize(set(revlog.rev(n) for n in nodelist))
539 return dag.linearize(set(revlog.rev(n) for n in nodelist))
540 else:
540 else:
541 return sorted([revlog.rev(n) for n in nodelist])
541 return sorted([revlog.rev(n) for n in nodelist])
542
542
543 def group(self, nodelist, revlog, lookup, units=None):
543 def group(self, nodelist, revlog, lookup, units=None):
544 """Calculate a delta group, yielding a sequence of changegroup chunks
544 """Calculate a delta group, yielding a sequence of changegroup chunks
545 (strings).
545 (strings).
546
546
547 Given a list of changeset revs, return a set of deltas and
547 Given a list of changeset revs, return a set of deltas and
548 metadata corresponding to nodes. The first delta is
548 metadata corresponding to nodes. The first delta is
549 first parent(nodelist[0]) -> nodelist[0], the receiver is
549 first parent(nodelist[0]) -> nodelist[0], the receiver is
550 guaranteed to have this parent as it has all history before
550 guaranteed to have this parent as it has all history before
551 these changesets. In the case firstparent is nullrev the
551 these changesets. In the case firstparent is nullrev the
552 changegroup starts with a full revision.
552 changegroup starts with a full revision.
553
553
554 If units is not None, progress detail will be generated, units specifies
554 If units is not None, progress detail will be generated, units specifies
555 the type of revlog that is touched (changelog, manifest, etc.).
555 the type of revlog that is touched (changelog, manifest, etc.).
556 """
556 """
557 # if we don't have any revisions touched by these changesets, bail
557 # if we don't have any revisions touched by these changesets, bail
558 if len(nodelist) == 0:
558 if len(nodelist) == 0:
559 yield self.close()
559 yield self.close()
560 return
560 return
561
561
562 revs = self._sortgroup(revlog, nodelist, lookup)
562 revs = self._sortgroup(revlog, nodelist, lookup)
563
563
564 # add the parent of the first rev
564 # add the parent of the first rev
565 p = revlog.parentrevs(revs[0])[0]
565 p = revlog.parentrevs(revs[0])[0]
566 revs.insert(0, p)
566 revs.insert(0, p)
567
567
568 # build deltas
568 # build deltas
569 total = len(revs) - 1
569 total = len(revs) - 1
570 msgbundling = _('bundling')
570 msgbundling = _('bundling')
571 for r in xrange(len(revs) - 1):
571 for r in xrange(len(revs) - 1):
572 if units is not None:
572 if units is not None:
573 self._progress(msgbundling, r + 1, unit=units, total=total)
573 self._progress(msgbundling, r + 1, unit=units, total=total)
574 prev, curr = revs[r], revs[r + 1]
574 prev, curr = revs[r], revs[r + 1]
575 linknode = lookup(revlog.node(curr))
575 linknode = lookup(revlog.node(curr))
576 for c in self.revchunk(revlog, curr, prev, linknode):
576 for c in self.revchunk(revlog, curr, prev, linknode):
577 yield c
577 yield c
578
578
579 if units is not None:
579 if units is not None:
580 self._progress(msgbundling, None)
580 self._progress(msgbundling, None)
581 yield self.close()
581 yield self.close()
582
582
583 # filter any nodes that claim to be part of the known set
583 # filter any nodes that claim to be part of the known set
584 def prune(self, revlog, missing, commonrevs):
584 def prune(self, revlog, missing, commonrevs):
585 rr, rl = revlog.rev, revlog.linkrev
585 rr, rl = revlog.rev, revlog.linkrev
586 return [n for n in missing if rl(rr(n)) not in commonrevs]
586 return [n for n in missing if rl(rr(n)) not in commonrevs]
587
587
588 def _packmanifests(self, dir, mfnodes, lookuplinknode):
588 def _packmanifests(self, dir, mfnodes, lookuplinknode):
589 """Pack flat manifests into a changegroup stream."""
589 """Pack flat manifests into a changegroup stream."""
590 assert not dir
590 assert not dir
591 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
591 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
592 lookuplinknode, units=_('manifests')):
592 lookuplinknode, units=_('manifests')):
593 yield chunk
593 yield chunk
594
594
595 def _manifestsdone(self):
595 def _manifestsdone(self):
596 return ''
596 return ''
597
597
598 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
598 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
599 '''yield a sequence of changegroup chunks (strings)'''
599 '''yield a sequence of changegroup chunks (strings)'''
600 repo = self._repo
600 repo = self._repo
601 cl = repo.changelog
601 cl = repo.changelog
602
602
603 clrevorder = {}
603 clrevorder = {}
604 mfs = {} # needed manifests
604 mfs = {} # needed manifests
605 fnodes = {} # needed file nodes
605 fnodes = {} # needed file nodes
606 changedfiles = set()
606 changedfiles = set()
607
607
608 # Callback for the changelog, used to collect changed files and manifest
608 # Callback for the changelog, used to collect changed files and manifest
609 # nodes.
609 # nodes.
610 # Returns the linkrev node (identity in the changelog case).
610 # Returns the linkrev node (identity in the changelog case).
611 def lookupcl(x):
611 def lookupcl(x):
612 c = cl.read(x)
612 c = cl.read(x)
613 clrevorder[x] = len(clrevorder)
613 clrevorder[x] = len(clrevorder)
614 n = c[0]
614 n = c[0]
615 # record the first changeset introducing this manifest version
615 # record the first changeset introducing this manifest version
616 mfs.setdefault(n, x)
616 mfs.setdefault(n, x)
617 # Record a complete list of potentially-changed files in
617 # Record a complete list of potentially-changed files in
618 # this manifest.
618 # this manifest.
619 changedfiles.update(c[3])
619 changedfiles.update(c[3])
620 return x
620 return x
621
621
622 self._verbosenote(_('uncompressed size of bundle content:\n'))
622 self._verbosenote(_('uncompressed size of bundle content:\n'))
623 size = 0
623 size = 0
624 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
624 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
625 size += len(chunk)
625 size += len(chunk)
626 yield chunk
626 yield chunk
627 self._verbosenote(_('%8.i (changelog)\n') % size)
627 self._verbosenote(_('%8.i (changelog)\n') % size)
628
628
629 # We need to make sure that the linkrev in the changegroup refers to
629 # We need to make sure that the linkrev in the changegroup refers to
630 # the first changeset that introduced the manifest or file revision.
630 # the first changeset that introduced the manifest or file revision.
631 # The fastpath is usually safer than the slowpath, because the filelogs
631 # The fastpath is usually safer than the slowpath, because the filelogs
632 # are walked in revlog order.
632 # are walked in revlog order.
633 #
633 #
634 # When taking the slowpath with reorder=None and the manifest revlog
634 # When taking the slowpath with reorder=None and the manifest revlog
635 # uses generaldelta, the manifest may be walked in the "wrong" order.
635 # uses generaldelta, the manifest may be walked in the "wrong" order.
636 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
636 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
637 # cc0ff93d0c0c).
637 # cc0ff93d0c0c).
638 #
638 #
639 # When taking the fastpath, we are only vulnerable to reordering
639 # When taking the fastpath, we are only vulnerable to reordering
640 # of the changelog itself. The changelog never uses generaldelta, so
640 # of the changelog itself. The changelog never uses generaldelta, so
641 # it is only reordered when reorder=True. To handle this case, we
641 # it is only reordered when reorder=True. To handle this case, we
642 # simply take the slowpath, which already has the 'clrevorder' logic.
642 # simply take the slowpath, which already has the 'clrevorder' logic.
643 # This was also fixed in cc0ff93d0c0c.
643 # This was also fixed in cc0ff93d0c0c.
644 fastpathlinkrev = fastpathlinkrev and not self._reorder
644 fastpathlinkrev = fastpathlinkrev and not self._reorder
645 # Treemanifests don't work correctly with fastpathlinkrev
645 # Treemanifests don't work correctly with fastpathlinkrev
646 # either, because we don't discover which directory nodes to
646 # either, because we don't discover which directory nodes to
647 # send along with files. This could probably be fixed.
647 # send along with files. This could probably be fixed.
648 fastpathlinkrev = fastpathlinkrev and (
648 fastpathlinkrev = fastpathlinkrev and (
649 'treemanifest' not in repo.requirements)
649 'treemanifest' not in repo.requirements)
650
650
651 for chunk in self.generatemanifests(commonrevs, clrevorder,
651 for chunk in self.generatemanifests(commonrevs, clrevorder,
652 fastpathlinkrev, mfs, fnodes, source):
652 fastpathlinkrev, mfs, fnodes, source):
653 yield chunk
653 yield chunk
654 mfs.clear()
654 mfs.clear()
655 clrevs = set(cl.rev(x) for x in clnodes)
655 clrevs = set(cl.rev(x) for x in clnodes)
656
656
657 if not fastpathlinkrev:
657 if not fastpathlinkrev:
658 def linknodes(unused, fname):
658 def linknodes(unused, fname):
659 return fnodes.get(fname, {})
659 return fnodes.get(fname, {})
660 else:
660 else:
661 cln = cl.node
661 cln = cl.node
662 def linknodes(filerevlog, fname):
662 def linknodes(filerevlog, fname):
663 llr = filerevlog.linkrev
663 llr = filerevlog.linkrev
664 fln = filerevlog.node
664 fln = filerevlog.node
665 revs = ((r, llr(r)) for r in filerevlog)
665 revs = ((r, llr(r)) for r in filerevlog)
666 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
666 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
667
667
668 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
668 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
669 source):
669 source):
670 yield chunk
670 yield chunk
671
671
672 yield self.close()
672 yield self.close()
673
673
674 if clnodes:
674 if clnodes:
675 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
675 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
676
676
677 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
677 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
678 fnodes, source):
678 fnodes, source):
679 """Returns an iterator of changegroup chunks containing manifests.
679 """Returns an iterator of changegroup chunks containing manifests.
680
680
681 `source` is unused here, but is used by extensions like remotefilelog to
681 `source` is unused here, but is used by extensions like remotefilelog to
682 change what is sent based in pulls vs pushes, etc.
682 change what is sent based in pulls vs pushes, etc.
683 """
683 """
684 repo = self._repo
684 repo = self._repo
685 mfl = repo.manifestlog
685 mfl = repo.manifestlog
686 dirlog = mfl._revlog.dirlog
686 dirlog = mfl._revlog.dirlog
687 tmfnodes = {'': mfs}
687 tmfnodes = {'': mfs}
688
688
689 # Callback for the manifest, used to collect linkrevs for filelog
689 # Callback for the manifest, used to collect linkrevs for filelog
690 # revisions.
690 # revisions.
691 # Returns the linkrev node (collected in lookupcl).
691 # Returns the linkrev node (collected in lookupcl).
692 def makelookupmflinknode(dir, nodes):
692 def makelookupmflinknode(dir, nodes):
693 if fastpathlinkrev:
693 if fastpathlinkrev:
694 assert not dir
694 assert not dir
695 return mfs.__getitem__
695 return mfs.__getitem__
696
696
697 def lookupmflinknode(x):
697 def lookupmflinknode(x):
698 """Callback for looking up the linknode for manifests.
698 """Callback for looking up the linknode for manifests.
699
699
700 Returns the linkrev node for the specified manifest.
700 Returns the linkrev node for the specified manifest.
701
701
702 SIDE EFFECT:
702 SIDE EFFECT:
703
703
704 1) fclnodes gets populated with the list of relevant
704 1) fclnodes gets populated with the list of relevant
705 file nodes if we're not using fastpathlinkrev
705 file nodes if we're not using fastpathlinkrev
706 2) When treemanifests are in use, collects treemanifest nodes
706 2) When treemanifests are in use, collects treemanifest nodes
707 to send
707 to send
708
708
709 Note that this means manifests must be completely sent to
709 Note that this means manifests must be completely sent to
710 the client before you can trust the list of files and
710 the client before you can trust the list of files and
711 treemanifests to send.
711 treemanifests to send.
712 """
712 """
713 clnode = nodes[x]
713 clnode = nodes[x]
714 mdata = mfl.get(dir, x).readfast(shallow=True)
714 mdata = mfl.get(dir, x).readfast(shallow=True)
715 for p, n, fl in mdata.iterentries():
715 for p, n, fl in mdata.iterentries():
716 if fl == 't': # subdirectory manifest
716 if fl == 't': # subdirectory manifest
717 subdir = dir + p + '/'
717 subdir = dir + p + '/'
718 tmfclnodes = tmfnodes.setdefault(subdir, {})
718 tmfclnodes = tmfnodes.setdefault(subdir, {})
719 tmfclnode = tmfclnodes.setdefault(n, clnode)
719 tmfclnode = tmfclnodes.setdefault(n, clnode)
720 if clrevorder[clnode] < clrevorder[tmfclnode]:
720 if clrevorder[clnode] < clrevorder[tmfclnode]:
721 tmfclnodes[n] = clnode
721 tmfclnodes[n] = clnode
722 else:
722 else:
723 f = dir + p
723 f = dir + p
724 fclnodes = fnodes.setdefault(f, {})
724 fclnodes = fnodes.setdefault(f, {})
725 fclnode = fclnodes.setdefault(n, clnode)
725 fclnode = fclnodes.setdefault(n, clnode)
726 if clrevorder[clnode] < clrevorder[fclnode]:
726 if clrevorder[clnode] < clrevorder[fclnode]:
727 fclnodes[n] = clnode
727 fclnodes[n] = clnode
728 return clnode
728 return clnode
729 return lookupmflinknode
729 return lookupmflinknode
730
730
731 size = 0
731 size = 0
732 while tmfnodes:
732 while tmfnodes:
733 dir, nodes = tmfnodes.popitem()
733 dir, nodes = tmfnodes.popitem()
734 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
734 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
735 if not dir or prunednodes:
735 if not dir or prunednodes:
736 for x in self._packmanifests(dir, prunednodes,
736 for x in self._packmanifests(dir, prunednodes,
737 makelookupmflinknode(dir, nodes)):
737 makelookupmflinknode(dir, nodes)):
738 size += len(x)
738 size += len(x)
739 yield x
739 yield x
740 self._verbosenote(_('%8.i (manifests)\n') % size)
740 self._verbosenote(_('%8.i (manifests)\n') % size)
741 yield self._manifestsdone()
741 yield self._manifestsdone()
742
742
743 # The 'source' parameter is useful for extensions
743 # The 'source' parameter is useful for extensions
744 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
744 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
745 repo = self._repo
745 repo = self._repo
746 progress = self._progress
746 progress = self._progress
747 msgbundling = _('bundling')
747 msgbundling = _('bundling')
748
748
749 total = len(changedfiles)
749 total = len(changedfiles)
750 # for progress output
750 # for progress output
751 msgfiles = _('files')
751 msgfiles = _('files')
752 for i, fname in enumerate(sorted(changedfiles)):
752 for i, fname in enumerate(sorted(changedfiles)):
753 filerevlog = repo.file(fname)
753 filerevlog = repo.file(fname)
754 if not filerevlog:
754 if not filerevlog:
755 raise error.Abort(_("empty or missing revlog for %s") % fname)
755 raise error.Abort(_("empty or missing revlog for %s") % fname)
756
756
757 linkrevnodes = linknodes(filerevlog, fname)
757 linkrevnodes = linknodes(filerevlog, fname)
758 # Lookup for filenodes, we collected the linkrev nodes above in the
758 # Lookup for filenodes, we collected the linkrev nodes above in the
759 # fastpath case and with lookupmf in the slowpath case.
759 # fastpath case and with lookupmf in the slowpath case.
760 def lookupfilelog(x):
760 def lookupfilelog(x):
761 return linkrevnodes[x]
761 return linkrevnodes[x]
762
762
763 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
763 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
764 if filenodes:
764 if filenodes:
765 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
765 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
766 total=total)
766 total=total)
767 h = self.fileheader(fname)
767 h = self.fileheader(fname)
768 size = len(h)
768 size = len(h)
769 yield h
769 yield h
770 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
770 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
771 size += len(chunk)
771 size += len(chunk)
772 yield chunk
772 yield chunk
773 self._verbosenote(_('%8.i %s\n') % (size, fname))
773 self._verbosenote(_('%8.i %s\n') % (size, fname))
774 progress(msgbundling, None)
774 progress(msgbundling, None)
775
775
776 def deltaparent(self, revlog, rev, p1, p2, prev):
776 def deltaparent(self, revlog, rev, p1, p2, prev):
777 return prev
777 return prev
778
778
779 def revchunk(self, revlog, rev, prev, linknode):
779 def revchunk(self, revlog, rev, prev, linknode):
780 node = revlog.node(rev)
780 node = revlog.node(rev)
781 p1, p2 = revlog.parentrevs(rev)
781 p1, p2 = revlog.parentrevs(rev)
782 base = self.deltaparent(revlog, rev, p1, p2, prev)
782 base = self.deltaparent(revlog, rev, p1, p2, prev)
783
783
784 prefix = ''
784 prefix = ''
785 if revlog.iscensored(base) or revlog.iscensored(rev):
785 if revlog.iscensored(base) or revlog.iscensored(rev):
786 try:
786 try:
787 delta = revlog.revision(node, raw=True)
787 delta = revlog.revision(node, raw=True)
788 except error.CensoredNodeError as e:
788 except error.CensoredNodeError as e:
789 delta = e.tombstone
789 delta = e.tombstone
790 if base == nullrev:
790 if base == nullrev:
791 prefix = mdiff.trivialdiffheader(len(delta))
791 prefix = mdiff.trivialdiffheader(len(delta))
792 else:
792 else:
793 baselen = revlog.rawsize(base)
793 baselen = revlog.rawsize(base)
794 prefix = mdiff.replacediffheader(baselen, len(delta))
794 prefix = mdiff.replacediffheader(baselen, len(delta))
795 elif base == nullrev:
795 elif base == nullrev:
796 delta = revlog.revision(node, raw=True)
796 delta = revlog.revision(node, raw=True)
797 prefix = mdiff.trivialdiffheader(len(delta))
797 prefix = mdiff.trivialdiffheader(len(delta))
798 else:
798 else:
799 delta = revlog.revdiff(base, rev)
799 delta = revlog.revdiff(base, rev)
800 p1n, p2n = revlog.parents(node)
800 p1n, p2n = revlog.parents(node)
801 basenode = revlog.node(base)
801 basenode = revlog.node(base)
802 flags = revlog.flags(rev)
802 flags = revlog.flags(rev)
803 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
803 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
804 meta += prefix
804 meta += prefix
805 l = len(meta) + len(delta)
805 l = len(meta) + len(delta)
806 yield chunkheader(l)
806 yield chunkheader(l)
807 yield meta
807 yield meta
808 yield delta
808 yield delta
809 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
809 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
810 # do nothing with basenode, it is implicitly the previous one in HG10
810 # do nothing with basenode, it is implicitly the previous one in HG10
811 # do nothing with flags, it is implicitly 0 for cg1 and cg2
811 # do nothing with flags, it is implicitly 0 for cg1 and cg2
812 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
812 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
813
813
814 class cg2packer(cg1packer):
814 class cg2packer(cg1packer):
815 version = '02'
815 version = '02'
816 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
816 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
817
817
818 def __init__(self, repo, bundlecaps=None):
818 def __init__(self, repo, bundlecaps=None):
819 super(cg2packer, self).__init__(repo, bundlecaps)
819 super(cg2packer, self).__init__(repo, bundlecaps)
820 if self._reorder is None:
820 if self._reorder is None:
821 # Since generaldelta is directly supported by cg2, reordering
821 # Since generaldelta is directly supported by cg2, reordering
822 # generally doesn't help, so we disable it by default (treating
822 # generally doesn't help, so we disable it by default (treating
823 # bundle.reorder=auto just like bundle.reorder=False).
823 # bundle.reorder=auto just like bundle.reorder=False).
824 self._reorder = False
824 self._reorder = False
825
825
826 def deltaparent(self, revlog, rev, p1, p2, prev):
826 def deltaparent(self, revlog, rev, p1, p2, prev):
827 dp = revlog.deltaparent(rev)
827 dp = revlog.deltaparent(rev)
828 if dp == nullrev and revlog.storedeltachains:
828 if dp == nullrev and revlog.storedeltachains:
829 # Avoid sending full revisions when delta parent is null. Pick prev
829 # Avoid sending full revisions when delta parent is null. Pick prev
830 # in that case. It's tempting to pick p1 in this case, as p1 will
830 # in that case. It's tempting to pick p1 in this case, as p1 will
831 # be smaller in the common case. However, computing a delta against
831 # be smaller in the common case. However, computing a delta against
832 # p1 may require resolving the raw text of p1, which could be
832 # p1 may require resolving the raw text of p1, which could be
833 # expensive. The revlog caches should have prev cached, meaning
833 # expensive. The revlog caches should have prev cached, meaning
834 # less CPU for changegroup generation. There is likely room to add
834 # less CPU for changegroup generation. There is likely room to add
835 # a flag and/or config option to control this behavior.
835 # a flag and/or config option to control this behavior.
836 return prev
836 return prev
837 elif dp == nullrev:
837 elif dp == nullrev:
838 # revlog is configured to use full snapshot for a reason,
838 # revlog is configured to use full snapshot for a reason,
839 # stick to full snapshot.
839 # stick to full snapshot.
840 return nullrev
840 return nullrev
841 elif dp not in (p1, p2, prev):
841 elif dp not in (p1, p2, prev):
842 # Pick prev when we can't be sure remote has the base revision.
842 # Pick prev when we can't be sure remote has the base revision.
843 return prev
843 return prev
844 else:
844 else:
845 return dp
845 return dp
846
846
847 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
847 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
848 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
848 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
849 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
849 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
850
850
851 class cg3packer(cg2packer):
851 class cg3packer(cg2packer):
852 version = '03'
852 version = '03'
853 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
853 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
854
854
855 def _packmanifests(self, dir, mfnodes, lookuplinknode):
855 def _packmanifests(self, dir, mfnodes, lookuplinknode):
856 if dir:
856 if dir:
857 yield self.fileheader(dir)
857 yield self.fileheader(dir)
858
858
859 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
859 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
860 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
860 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
861 units=_('manifests')):
861 units=_('manifests')):
862 yield chunk
862 yield chunk
863
863
864 def _manifestsdone(self):
864 def _manifestsdone(self):
865 return self.close()
865 return self.close()
866
866
867 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
867 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
868 return struct.pack(
868 return struct.pack(
869 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
869 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
870
870
871 _packermap = {'01': (cg1packer, cg1unpacker),
871 _packermap = {'01': (cg1packer, cg1unpacker),
872 # cg2 adds support for exchanging generaldelta
872 # cg2 adds support for exchanging generaldelta
873 '02': (cg2packer, cg2unpacker),
873 '02': (cg2packer, cg2unpacker),
874 # cg3 adds support for exchanging revlog flags and treemanifests
874 # cg3 adds support for exchanging revlog flags and treemanifests
875 '03': (cg3packer, cg3unpacker),
875 '03': (cg3packer, cg3unpacker),
876 }
876 }
877
877
878 def allsupportedversions(repo):
878 def allsupportedversions(repo):
879 versions = set(_packermap.keys())
879 versions = set(_packermap.keys())
880 if not (repo.ui.configbool('experimental', 'changegroup3') or
880 if not (repo.ui.configbool('experimental', 'changegroup3') or
881 repo.ui.configbool('experimental', 'treemanifest') or
881 repo.ui.configbool('experimental', 'treemanifest') or
882 'treemanifest' in repo.requirements):
882 'treemanifest' in repo.requirements):
883 versions.discard('03')
883 versions.discard('03')
884 return versions
884 return versions
885
885
886 # Changegroup versions that can be applied to the repo
886 # Changegroup versions that can be applied to the repo
887 def supportedincomingversions(repo):
887 def supportedincomingversions(repo):
888 return allsupportedversions(repo)
888 return allsupportedversions(repo)
889
889
890 # Changegroup versions that can be created from the repo
890 # Changegroup versions that can be created from the repo
891 def supportedoutgoingversions(repo):
891 def supportedoutgoingversions(repo):
892 versions = allsupportedversions(repo)
892 versions = allsupportedversions(repo)
893 if 'treemanifest' in repo.requirements:
893 if 'treemanifest' in repo.requirements:
894 # Versions 01 and 02 support only flat manifests and it's just too
894 # Versions 01 and 02 support only flat manifests and it's just too
895 # expensive to convert between the flat manifest and tree manifest on
895 # expensive to convert between the flat manifest and tree manifest on
896 # the fly. Since tree manifests are hashed differently, all of history
896 # the fly. Since tree manifests are hashed differently, all of history
897 # would have to be converted. Instead, we simply don't even pretend to
897 # would have to be converted. Instead, we simply don't even pretend to
898 # support versions 01 and 02.
898 # support versions 01 and 02.
899 versions.discard('01')
899 versions.discard('01')
900 versions.discard('02')
900 versions.discard('02')
901 if NARROW_REQUIREMENT in repo.requirements:
902 # Versions 01 and 02 don't support revlog flags, and we need to
903 # support that for stripping and unbundling to work.
904 versions.discard('01')
905 versions.discard('02')
901 return versions
906 return versions
902
907
903 def localversion(repo):
908 def localversion(repo):
904 # Finds the best version to use for bundles that are meant to be used
909 # Finds the best version to use for bundles that are meant to be used
905 # locally, such as those from strip and shelve, and temporary bundles.
910 # locally, such as those from strip and shelve, and temporary bundles.
906 return max(supportedoutgoingversions(repo))
911 return max(supportedoutgoingversions(repo))
907
912
908 def safeversion(repo):
913 def safeversion(repo):
909 # Finds the smallest version that it's safe to assume clients of the repo
914 # Finds the smallest version that it's safe to assume clients of the repo
910 # will support. For example, all hg versions that support generaldelta also
915 # will support. For example, all hg versions that support generaldelta also
911 # support changegroup 02.
916 # support changegroup 02.
912 versions = supportedoutgoingversions(repo)
917 versions = supportedoutgoingversions(repo)
913 if 'generaldelta' in repo.requirements:
918 if 'generaldelta' in repo.requirements:
914 versions.discard('01')
919 versions.discard('01')
915 assert versions
920 assert versions
916 return min(versions)
921 return min(versions)
917
922
918 def getbundler(version, repo, bundlecaps=None):
923 def getbundler(version, repo, bundlecaps=None):
919 assert version in supportedoutgoingversions(repo)
924 assert version in supportedoutgoingversions(repo)
920 return _packermap[version][0](repo, bundlecaps)
925 return _packermap[version][0](repo, bundlecaps)
921
926
922 def getunbundler(version, fh, alg, extras=None):
927 def getunbundler(version, fh, alg, extras=None):
923 return _packermap[version][1](fh, alg, extras=extras)
928 return _packermap[version][1](fh, alg, extras=extras)
924
929
925 def _changegroupinfo(repo, nodes, source):
930 def _changegroupinfo(repo, nodes, source):
926 if repo.ui.verbose or source == 'bundle':
931 if repo.ui.verbose or source == 'bundle':
927 repo.ui.status(_("%d changesets found\n") % len(nodes))
932 repo.ui.status(_("%d changesets found\n") % len(nodes))
928 if repo.ui.debugflag:
933 if repo.ui.debugflag:
929 repo.ui.debug("list of changesets:\n")
934 repo.ui.debug("list of changesets:\n")
930 for node in nodes:
935 for node in nodes:
931 repo.ui.debug("%s\n" % hex(node))
936 repo.ui.debug("%s\n" % hex(node))
932
937
933 def makechangegroup(repo, outgoing, version, source, fastpath=False,
938 def makechangegroup(repo, outgoing, version, source, fastpath=False,
934 bundlecaps=None):
939 bundlecaps=None):
935 cgstream = makestream(repo, outgoing, version, source,
940 cgstream = makestream(repo, outgoing, version, source,
936 fastpath=fastpath, bundlecaps=bundlecaps)
941 fastpath=fastpath, bundlecaps=bundlecaps)
937 return getunbundler(version, util.chunkbuffer(cgstream), None,
942 return getunbundler(version, util.chunkbuffer(cgstream), None,
938 {'clcount': len(outgoing.missing) })
943 {'clcount': len(outgoing.missing) })
939
944
940 def makestream(repo, outgoing, version, source, fastpath=False,
945 def makestream(repo, outgoing, version, source, fastpath=False,
941 bundlecaps=None):
946 bundlecaps=None):
942 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
947 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
943
948
944 repo = repo.unfiltered()
949 repo = repo.unfiltered()
945 commonrevs = outgoing.common
950 commonrevs = outgoing.common
946 csets = outgoing.missing
951 csets = outgoing.missing
947 heads = outgoing.missingheads
952 heads = outgoing.missingheads
948 # We go through the fast path if we get told to, or if all (unfiltered
953 # We go through the fast path if we get told to, or if all (unfiltered
949 # heads have been requested (since we then know there all linkrevs will
954 # heads have been requested (since we then know there all linkrevs will
950 # be pulled by the client).
955 # be pulled by the client).
951 heads.sort()
956 heads.sort()
952 fastpathlinkrev = fastpath or (
957 fastpathlinkrev = fastpath or (
953 repo.filtername is None and heads == sorted(repo.heads()))
958 repo.filtername is None and heads == sorted(repo.heads()))
954
959
955 repo.hook('preoutgoing', throw=True, source=source)
960 repo.hook('preoutgoing', throw=True, source=source)
956 _changegroupinfo(repo, csets, source)
961 _changegroupinfo(repo, csets, source)
957 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
962 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
958
963
959 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
964 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
960 revisions = 0
965 revisions = 0
961 files = 0
966 files = 0
962 for chunkdata in iter(source.filelogheader, {}):
967 for chunkdata in iter(source.filelogheader, {}):
963 files += 1
968 files += 1
964 f = chunkdata["filename"]
969 f = chunkdata["filename"]
965 repo.ui.debug("adding %s revisions\n" % f)
970 repo.ui.debug("adding %s revisions\n" % f)
966 repo.ui.progress(_('files'), files, unit=_('files'),
971 repo.ui.progress(_('files'), files, unit=_('files'),
967 total=expectedfiles)
972 total=expectedfiles)
968 fl = repo.file(f)
973 fl = repo.file(f)
969 o = len(fl)
974 o = len(fl)
970 try:
975 try:
971 deltas = source.deltaiter()
976 deltas = source.deltaiter()
972 if not fl.addgroup(deltas, revmap, trp):
977 if not fl.addgroup(deltas, revmap, trp):
973 raise error.Abort(_("received file revlog group is empty"))
978 raise error.Abort(_("received file revlog group is empty"))
974 except error.CensoredBaseError as e:
979 except error.CensoredBaseError as e:
975 raise error.Abort(_("received delta base is censored: %s") % e)
980 raise error.Abort(_("received delta base is censored: %s") % e)
976 revisions += len(fl) - o
981 revisions += len(fl) - o
977 if f in needfiles:
982 if f in needfiles:
978 needs = needfiles[f]
983 needs = needfiles[f]
979 for new in xrange(o, len(fl)):
984 for new in xrange(o, len(fl)):
980 n = fl.node(new)
985 n = fl.node(new)
981 if n in needs:
986 if n in needs:
982 needs.remove(n)
987 needs.remove(n)
983 else:
988 else:
984 raise error.Abort(
989 raise error.Abort(
985 _("received spurious file revlog entry"))
990 _("received spurious file revlog entry"))
986 if not needs:
991 if not needs:
987 del needfiles[f]
992 del needfiles[f]
988 repo.ui.progress(_('files'), None)
993 repo.ui.progress(_('files'), None)
989
994
990 for f, needs in needfiles.iteritems():
995 for f, needs in needfiles.iteritems():
991 fl = repo.file(f)
996 fl = repo.file(f)
992 for n in needs:
997 for n in needs:
993 try:
998 try:
994 fl.rev(n)
999 fl.rev(n)
995 except error.LookupError:
1000 except error.LookupError:
996 raise error.Abort(
1001 raise error.Abort(
997 _('missing file data for %s:%s - run hg verify') %
1002 _('missing file data for %s:%s - run hg verify') %
998 (f, hex(n)))
1003 (f, hex(n)))
999
1004
1000 return revisions, files
1005 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now