Show More
@@ -1,1017 +1,1013 b'' | |||||
1 | # changegroup.py - Mercurial changegroup manipulation functions |
|
1 | # changegroup.py - Mercurial changegroup manipulation functions | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import os |
|
10 | import os | |
11 | import struct |
|
11 | import struct | |
12 | import weakref |
|
12 | import weakref | |
13 |
|
13 | |||
14 | from .i18n import _ |
|
14 | from .i18n import _ | |
15 | from .node import ( |
|
15 | from .node import ( | |
16 | hex, |
|
16 | hex, | |
17 | nullrev, |
|
17 | nullrev, | |
18 | short, |
|
18 | short, | |
19 | ) |
|
19 | ) | |
20 |
|
20 | |||
21 | from . import ( |
|
21 | from . import ( | |
22 | dagutil, |
|
22 | dagutil, | |
23 | error, |
|
23 | error, | |
24 | mdiff, |
|
24 | mdiff, | |
25 | phases, |
|
25 | phases, | |
26 | pycompat, |
|
26 | pycompat, | |
27 | util, |
|
27 | util, | |
28 | ) |
|
28 | ) | |
29 |
|
29 | |||
30 | from .utils import ( |
|
30 | from .utils import ( | |
31 | stringutil, |
|
31 | stringutil, | |
32 | ) |
|
32 | ) | |
33 |
|
33 | |||
34 | _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s" |
|
34 | _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s" | |
35 | _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s" |
|
35 | _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s" | |
36 | _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH" |
|
36 | _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH" | |
37 |
|
37 | |||
38 | LFS_REQUIREMENT = 'lfs' |
|
38 | LFS_REQUIREMENT = 'lfs' | |
39 |
|
39 | |||
40 | # When narrowing is finalized and no longer subject to format changes, |
|
40 | # When narrowing is finalized and no longer subject to format changes, | |
41 | # we should move this to just "narrow" or similar. |
|
41 | # we should move this to just "narrow" or similar. | |
42 | NARROW_REQUIREMENT = 'narrowhg-experimental' |
|
42 | NARROW_REQUIREMENT = 'narrowhg-experimental' | |
43 |
|
43 | |||
44 | readexactly = util.readexactly |
|
44 | readexactly = util.readexactly | |
45 |
|
45 | |||
46 | def getchunk(stream): |
|
46 | def getchunk(stream): | |
47 | """return the next chunk from stream as a string""" |
|
47 | """return the next chunk from stream as a string""" | |
48 | d = readexactly(stream, 4) |
|
48 | d = readexactly(stream, 4) | |
49 | l = struct.unpack(">l", d)[0] |
|
49 | l = struct.unpack(">l", d)[0] | |
50 | if l <= 4: |
|
50 | if l <= 4: | |
51 | if l: |
|
51 | if l: | |
52 | raise error.Abort(_("invalid chunk length %d") % l) |
|
52 | raise error.Abort(_("invalid chunk length %d") % l) | |
53 | return "" |
|
53 | return "" | |
54 | return readexactly(stream, l - 4) |
|
54 | return readexactly(stream, l - 4) | |
55 |
|
55 | |||
56 | def chunkheader(length): |
|
56 | def chunkheader(length): | |
57 | """return a changegroup chunk header (string)""" |
|
57 | """return a changegroup chunk header (string)""" | |
58 | return struct.pack(">l", length + 4) |
|
58 | return struct.pack(">l", length + 4) | |
59 |
|
59 | |||
60 | def closechunk(): |
|
60 | def closechunk(): | |
61 | """return a changegroup chunk header (string) for a zero-length chunk""" |
|
61 | """return a changegroup chunk header (string) for a zero-length chunk""" | |
62 | return struct.pack(">l", 0) |
|
62 | return struct.pack(">l", 0) | |
63 |
|
63 | |||
64 | def writechunks(ui, chunks, filename, vfs=None): |
|
64 | def writechunks(ui, chunks, filename, vfs=None): | |
65 | """Write chunks to a file and return its filename. |
|
65 | """Write chunks to a file and return its filename. | |
66 |
|
66 | |||
67 | The stream is assumed to be a bundle file. |
|
67 | The stream is assumed to be a bundle file. | |
68 | Existing files will not be overwritten. |
|
68 | Existing files will not be overwritten. | |
69 | If no filename is specified, a temporary file is created. |
|
69 | If no filename is specified, a temporary file is created. | |
70 | """ |
|
70 | """ | |
71 | fh = None |
|
71 | fh = None | |
72 | cleanup = None |
|
72 | cleanup = None | |
73 | try: |
|
73 | try: | |
74 | if filename: |
|
74 | if filename: | |
75 | if vfs: |
|
75 | if vfs: | |
76 | fh = vfs.open(filename, "wb") |
|
76 | fh = vfs.open(filename, "wb") | |
77 | else: |
|
77 | else: | |
78 | # Increase default buffer size because default is usually |
|
78 | # Increase default buffer size because default is usually | |
79 | # small (4k is common on Linux). |
|
79 | # small (4k is common on Linux). | |
80 | fh = open(filename, "wb", 131072) |
|
80 | fh = open(filename, "wb", 131072) | |
81 | else: |
|
81 | else: | |
82 | fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") |
|
82 | fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") | |
83 | fh = os.fdopen(fd, r"wb") |
|
83 | fh = os.fdopen(fd, r"wb") | |
84 | cleanup = filename |
|
84 | cleanup = filename | |
85 | for c in chunks: |
|
85 | for c in chunks: | |
86 | fh.write(c) |
|
86 | fh.write(c) | |
87 | cleanup = None |
|
87 | cleanup = None | |
88 | return filename |
|
88 | return filename | |
89 | finally: |
|
89 | finally: | |
90 | if fh is not None: |
|
90 | if fh is not None: | |
91 | fh.close() |
|
91 | fh.close() | |
92 | if cleanup is not None: |
|
92 | if cleanup is not None: | |
93 | if filename and vfs: |
|
93 | if filename and vfs: | |
94 | vfs.unlink(cleanup) |
|
94 | vfs.unlink(cleanup) | |
95 | else: |
|
95 | else: | |
96 | os.unlink(cleanup) |
|
96 | os.unlink(cleanup) | |
97 |
|
97 | |||
98 | class cg1unpacker(object): |
|
98 | class cg1unpacker(object): | |
99 | """Unpacker for cg1 changegroup streams. |
|
99 | """Unpacker for cg1 changegroup streams. | |
100 |
|
100 | |||
101 | A changegroup unpacker handles the framing of the revision data in |
|
101 | A changegroup unpacker handles the framing of the revision data in | |
102 | the wire format. Most consumers will want to use the apply() |
|
102 | the wire format. Most consumers will want to use the apply() | |
103 | method to add the changes from the changegroup to a repository. |
|
103 | method to add the changes from the changegroup to a repository. | |
104 |
|
104 | |||
105 | If you're forwarding a changegroup unmodified to another consumer, |
|
105 | If you're forwarding a changegroup unmodified to another consumer, | |
106 | use getchunks(), which returns an iterator of changegroup |
|
106 | use getchunks(), which returns an iterator of changegroup | |
107 | chunks. This is mostly useful for cases where you need to know the |
|
107 | chunks. This is mostly useful for cases where you need to know the | |
108 | data stream has ended by observing the end of the changegroup. |
|
108 | data stream has ended by observing the end of the changegroup. | |
109 |
|
109 | |||
110 | deltachunk() is useful only if you're applying delta data. Most |
|
110 | deltachunk() is useful only if you're applying delta data. Most | |
111 | consumers should prefer apply() instead. |
|
111 | consumers should prefer apply() instead. | |
112 |
|
112 | |||
113 | A few other public methods exist. Those are used only for |
|
113 | A few other public methods exist. Those are used only for | |
114 | bundlerepo and some debug commands - their use is discouraged. |
|
114 | bundlerepo and some debug commands - their use is discouraged. | |
115 | """ |
|
115 | """ | |
116 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
116 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER | |
117 | deltaheadersize = struct.calcsize(deltaheader) |
|
117 | deltaheadersize = struct.calcsize(deltaheader) | |
118 | version = '01' |
|
118 | version = '01' | |
119 | _grouplistcount = 1 # One list of files after the manifests |
|
119 | _grouplistcount = 1 # One list of files after the manifests | |
120 |
|
120 | |||
121 | def __init__(self, fh, alg, extras=None): |
|
121 | def __init__(self, fh, alg, extras=None): | |
122 | if alg is None: |
|
122 | if alg is None: | |
123 | alg = 'UN' |
|
123 | alg = 'UN' | |
124 | if alg not in util.compengines.supportedbundletypes: |
|
124 | if alg not in util.compengines.supportedbundletypes: | |
125 | raise error.Abort(_('unknown stream compression type: %s') |
|
125 | raise error.Abort(_('unknown stream compression type: %s') | |
126 | % alg) |
|
126 | % alg) | |
127 | if alg == 'BZ': |
|
127 | if alg == 'BZ': | |
128 | alg = '_truncatedBZ' |
|
128 | alg = '_truncatedBZ' | |
129 |
|
129 | |||
130 | compengine = util.compengines.forbundletype(alg) |
|
130 | compengine = util.compengines.forbundletype(alg) | |
131 | self._stream = compengine.decompressorreader(fh) |
|
131 | self._stream = compengine.decompressorreader(fh) | |
132 | self._type = alg |
|
132 | self._type = alg | |
133 | self.extras = extras or {} |
|
133 | self.extras = extras or {} | |
134 | self.callback = None |
|
134 | self.callback = None | |
135 |
|
135 | |||
136 | # These methods (compressed, read, seek, tell) all appear to only |
|
136 | # These methods (compressed, read, seek, tell) all appear to only | |
137 | # be used by bundlerepo, but it's a little hard to tell. |
|
137 | # be used by bundlerepo, but it's a little hard to tell. | |
138 | def compressed(self): |
|
138 | def compressed(self): | |
139 | return self._type is not None and self._type != 'UN' |
|
139 | return self._type is not None and self._type != 'UN' | |
140 | def read(self, l): |
|
140 | def read(self, l): | |
141 | return self._stream.read(l) |
|
141 | return self._stream.read(l) | |
142 | def seek(self, pos): |
|
142 | def seek(self, pos): | |
143 | return self._stream.seek(pos) |
|
143 | return self._stream.seek(pos) | |
144 | def tell(self): |
|
144 | def tell(self): | |
145 | return self._stream.tell() |
|
145 | return self._stream.tell() | |
146 | def close(self): |
|
146 | def close(self): | |
147 | return self._stream.close() |
|
147 | return self._stream.close() | |
148 |
|
148 | |||
149 | def _chunklength(self): |
|
149 | def _chunklength(self): | |
150 | d = readexactly(self._stream, 4) |
|
150 | d = readexactly(self._stream, 4) | |
151 | l = struct.unpack(">l", d)[0] |
|
151 | l = struct.unpack(">l", d)[0] | |
152 | if l <= 4: |
|
152 | if l <= 4: | |
153 | if l: |
|
153 | if l: | |
154 | raise error.Abort(_("invalid chunk length %d") % l) |
|
154 | raise error.Abort(_("invalid chunk length %d") % l) | |
155 | return 0 |
|
155 | return 0 | |
156 | if self.callback: |
|
156 | if self.callback: | |
157 | self.callback() |
|
157 | self.callback() | |
158 | return l - 4 |
|
158 | return l - 4 | |
159 |
|
159 | |||
160 | def changelogheader(self): |
|
160 | def changelogheader(self): | |
161 | """v10 does not have a changelog header chunk""" |
|
161 | """v10 does not have a changelog header chunk""" | |
162 | return {} |
|
162 | return {} | |
163 |
|
163 | |||
164 | def manifestheader(self): |
|
164 | def manifestheader(self): | |
165 | """v10 does not have a manifest header chunk""" |
|
165 | """v10 does not have a manifest header chunk""" | |
166 | return {} |
|
166 | return {} | |
167 |
|
167 | |||
168 | def filelogheader(self): |
|
168 | def filelogheader(self): | |
169 | """return the header of the filelogs chunk, v10 only has the filename""" |
|
169 | """return the header of the filelogs chunk, v10 only has the filename""" | |
170 | l = self._chunklength() |
|
170 | l = self._chunklength() | |
171 | if not l: |
|
171 | if not l: | |
172 | return {} |
|
172 | return {} | |
173 | fname = readexactly(self._stream, l) |
|
173 | fname = readexactly(self._stream, l) | |
174 | return {'filename': fname} |
|
174 | return {'filename': fname} | |
175 |
|
175 | |||
176 | def _deltaheader(self, headertuple, prevnode): |
|
176 | def _deltaheader(self, headertuple, prevnode): | |
177 | node, p1, p2, cs = headertuple |
|
177 | node, p1, p2, cs = headertuple | |
178 | if prevnode is None: |
|
178 | if prevnode is None: | |
179 | deltabase = p1 |
|
179 | deltabase = p1 | |
180 | else: |
|
180 | else: | |
181 | deltabase = prevnode |
|
181 | deltabase = prevnode | |
182 | flags = 0 |
|
182 | flags = 0 | |
183 | return node, p1, p2, deltabase, cs, flags |
|
183 | return node, p1, p2, deltabase, cs, flags | |
184 |
|
184 | |||
185 | def deltachunk(self, prevnode): |
|
185 | def deltachunk(self, prevnode): | |
186 | l = self._chunklength() |
|
186 | l = self._chunklength() | |
187 | if not l: |
|
187 | if not l: | |
188 | return {} |
|
188 | return {} | |
189 | headerdata = readexactly(self._stream, self.deltaheadersize) |
|
189 | headerdata = readexactly(self._stream, self.deltaheadersize) | |
190 | header = struct.unpack(self.deltaheader, headerdata) |
|
190 | header = struct.unpack(self.deltaheader, headerdata) | |
191 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
191 | delta = readexactly(self._stream, l - self.deltaheadersize) | |
192 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) |
|
192 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) | |
193 | return (node, p1, p2, cs, deltabase, delta, flags) |
|
193 | return (node, p1, p2, cs, deltabase, delta, flags) | |
194 |
|
194 | |||
195 | def getchunks(self): |
|
195 | def getchunks(self): | |
196 | """returns all the chunks contains in the bundle |
|
196 | """returns all the chunks contains in the bundle | |
197 |
|
197 | |||
198 | Used when you need to forward the binary stream to a file or another |
|
198 | Used when you need to forward the binary stream to a file or another | |
199 | network API. To do so, it parse the changegroup data, otherwise it will |
|
199 | network API. To do so, it parse the changegroup data, otherwise it will | |
200 | block in case of sshrepo because it don't know the end of the stream. |
|
200 | block in case of sshrepo because it don't know the end of the stream. | |
201 | """ |
|
201 | """ | |
202 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, |
|
202 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, | |
203 | # and a list of filelogs. For changegroup 3, we expect 4 parts: |
|
203 | # and a list of filelogs. For changegroup 3, we expect 4 parts: | |
204 | # changelog, manifestlog, a list of tree manifestlogs, and a list of |
|
204 | # changelog, manifestlog, a list of tree manifestlogs, and a list of | |
205 | # filelogs. |
|
205 | # filelogs. | |
206 | # |
|
206 | # | |
207 | # Changelog and manifestlog parts are terminated with empty chunks. The |
|
207 | # Changelog and manifestlog parts are terminated with empty chunks. The | |
208 | # tree and file parts are a list of entry sections. Each entry section |
|
208 | # tree and file parts are a list of entry sections. Each entry section | |
209 | # is a series of chunks terminating in an empty chunk. The list of these |
|
209 | # is a series of chunks terminating in an empty chunk. The list of these | |
210 | # entry sections is terminated in yet another empty chunk, so we know |
|
210 | # entry sections is terminated in yet another empty chunk, so we know | |
211 | # we've reached the end of the tree/file list when we reach an empty |
|
211 | # we've reached the end of the tree/file list when we reach an empty | |
212 | # chunk that was proceeded by no non-empty chunks. |
|
212 | # chunk that was proceeded by no non-empty chunks. | |
213 |
|
213 | |||
214 | parts = 0 |
|
214 | parts = 0 | |
215 | while parts < 2 + self._grouplistcount: |
|
215 | while parts < 2 + self._grouplistcount: | |
216 | noentries = True |
|
216 | noentries = True | |
217 | while True: |
|
217 | while True: | |
218 | chunk = getchunk(self) |
|
218 | chunk = getchunk(self) | |
219 | if not chunk: |
|
219 | if not chunk: | |
220 | # The first two empty chunks represent the end of the |
|
220 | # The first two empty chunks represent the end of the | |
221 | # changelog and the manifestlog portions. The remaining |
|
221 | # changelog and the manifestlog portions. The remaining | |
222 | # empty chunks represent either A) the end of individual |
|
222 | # empty chunks represent either A) the end of individual | |
223 | # tree or file entries in the file list, or B) the end of |
|
223 | # tree or file entries in the file list, or B) the end of | |
224 | # the entire list. It's the end of the entire list if there |
|
224 | # the entire list. It's the end of the entire list if there | |
225 | # were no entries (i.e. noentries is True). |
|
225 | # were no entries (i.e. noentries is True). | |
226 | if parts < 2: |
|
226 | if parts < 2: | |
227 | parts += 1 |
|
227 | parts += 1 | |
228 | elif noentries: |
|
228 | elif noentries: | |
229 | parts += 1 |
|
229 | parts += 1 | |
230 | break |
|
230 | break | |
231 | noentries = False |
|
231 | noentries = False | |
232 | yield chunkheader(len(chunk)) |
|
232 | yield chunkheader(len(chunk)) | |
233 | pos = 0 |
|
233 | pos = 0 | |
234 | while pos < len(chunk): |
|
234 | while pos < len(chunk): | |
235 | next = pos + 2**20 |
|
235 | next = pos + 2**20 | |
236 | yield chunk[pos:next] |
|
236 | yield chunk[pos:next] | |
237 | pos = next |
|
237 | pos = next | |
238 | yield closechunk() |
|
238 | yield closechunk() | |
239 |
|
239 | |||
240 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
240 | def _unpackmanifests(self, repo, revmap, trp, prog): | |
241 | self.callback = prog.increment |
|
241 | self.callback = prog.increment | |
242 | # no need to check for empty manifest group here: |
|
242 | # no need to check for empty manifest group here: | |
243 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
243 | # if the result of the merge of 1 and 2 is the same in 3 and 4, | |
244 | # no new manifest will be created and the manifest group will |
|
244 | # no new manifest will be created and the manifest group will | |
245 | # be empty during the pull |
|
245 | # be empty during the pull | |
246 | self.manifestheader() |
|
246 | self.manifestheader() | |
247 | deltas = self.deltaiter() |
|
247 | deltas = self.deltaiter() | |
248 | repo.manifestlog._revlog.addgroup(deltas, revmap, trp) |
|
248 | repo.manifestlog._revlog.addgroup(deltas, revmap, trp) | |
249 | prog.complete() |
|
249 | prog.complete() | |
250 | self.callback = None |
|
250 | self.callback = None | |
251 |
|
251 | |||
252 | def apply(self, repo, tr, srctype, url, targetphase=phases.draft, |
|
252 | def apply(self, repo, tr, srctype, url, targetphase=phases.draft, | |
253 | expectedtotal=None): |
|
253 | expectedtotal=None): | |
254 | """Add the changegroup returned by source.read() to this repo. |
|
254 | """Add the changegroup returned by source.read() to this repo. | |
255 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
255 | srctype is a string like 'push', 'pull', or 'unbundle'. url is | |
256 | the URL of the repo where this changegroup is coming from. |
|
256 | the URL of the repo where this changegroup is coming from. | |
257 |
|
257 | |||
258 | Return an integer summarizing the change to this repo: |
|
258 | Return an integer summarizing the change to this repo: | |
259 | - nothing changed or no source: 0 |
|
259 | - nothing changed or no source: 0 | |
260 | - more heads than before: 1+added heads (2..n) |
|
260 | - more heads than before: 1+added heads (2..n) | |
261 | - fewer heads than before: -1-removed heads (-2..-n) |
|
261 | - fewer heads than before: -1-removed heads (-2..-n) | |
262 | - number of heads stays the same: 1 |
|
262 | - number of heads stays the same: 1 | |
263 | """ |
|
263 | """ | |
264 | repo = repo.unfiltered() |
|
264 | repo = repo.unfiltered() | |
265 | def csmap(x): |
|
265 | def csmap(x): | |
266 | repo.ui.debug("add changeset %s\n" % short(x)) |
|
266 | repo.ui.debug("add changeset %s\n" % short(x)) | |
267 | return len(cl) |
|
267 | return len(cl) | |
268 |
|
268 | |||
269 | def revmap(x): |
|
269 | def revmap(x): | |
270 | return cl.rev(x) |
|
270 | return cl.rev(x) | |
271 |
|
271 | |||
272 | changesets = files = revisions = 0 |
|
272 | changesets = files = revisions = 0 | |
273 |
|
273 | |||
274 | try: |
|
274 | try: | |
275 | # The transaction may already carry source information. In this |
|
275 | # The transaction may already carry source information. In this | |
276 | # case we use the top level data. We overwrite the argument |
|
276 | # case we use the top level data. We overwrite the argument | |
277 | # because we need to use the top level value (if they exist) |
|
277 | # because we need to use the top level value (if they exist) | |
278 | # in this function. |
|
278 | # in this function. | |
279 | srctype = tr.hookargs.setdefault('source', srctype) |
|
279 | srctype = tr.hookargs.setdefault('source', srctype) | |
280 | url = tr.hookargs.setdefault('url', url) |
|
280 | url = tr.hookargs.setdefault('url', url) | |
281 | repo.hook('prechangegroup', |
|
281 | repo.hook('prechangegroup', | |
282 | throw=True, **pycompat.strkwargs(tr.hookargs)) |
|
282 | throw=True, **pycompat.strkwargs(tr.hookargs)) | |
283 |
|
283 | |||
284 | # write changelog data to temp files so concurrent readers |
|
284 | # write changelog data to temp files so concurrent readers | |
285 | # will not see an inconsistent view |
|
285 | # will not see an inconsistent view | |
286 | cl = repo.changelog |
|
286 | cl = repo.changelog | |
287 | cl.delayupdate(tr) |
|
287 | cl.delayupdate(tr) | |
288 | oldheads = set(cl.heads()) |
|
288 | oldheads = set(cl.heads()) | |
289 |
|
289 | |||
290 | trp = weakref.proxy(tr) |
|
290 | trp = weakref.proxy(tr) | |
291 | # pull off the changeset group |
|
291 | # pull off the changeset group | |
292 | repo.ui.status(_("adding changesets\n")) |
|
292 | repo.ui.status(_("adding changesets\n")) | |
293 | clstart = len(cl) |
|
293 | clstart = len(cl) | |
294 | progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'), |
|
294 | progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'), | |
295 | total=expectedtotal) |
|
295 | total=expectedtotal) | |
296 | self.callback = progress.increment |
|
296 | self.callback = progress.increment | |
297 |
|
297 | |||
298 | efiles = set() |
|
298 | efiles = set() | |
299 | def onchangelog(cl, node): |
|
299 | def onchangelog(cl, node): | |
300 | efiles.update(cl.readfiles(node)) |
|
300 | efiles.update(cl.readfiles(node)) | |
301 |
|
301 | |||
302 | self.changelogheader() |
|
302 | self.changelogheader() | |
303 | deltas = self.deltaiter() |
|
303 | deltas = self.deltaiter() | |
304 | cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog) |
|
304 | cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog) | |
305 | efiles = len(efiles) |
|
305 | efiles = len(efiles) | |
306 |
|
306 | |||
307 | if not cgnodes: |
|
307 | if not cgnodes: | |
308 | repo.ui.develwarn('applied empty changegroup', |
|
308 | repo.ui.develwarn('applied empty changegroup', | |
309 | config='warn-empty-changegroup') |
|
309 | config='warn-empty-changegroup') | |
310 | clend = len(cl) |
|
310 | clend = len(cl) | |
311 | changesets = clend - clstart |
|
311 | changesets = clend - clstart | |
312 | progress.complete() |
|
312 | progress.complete() | |
313 | self.callback = None |
|
313 | self.callback = None | |
314 |
|
314 | |||
315 | # pull off the manifest group |
|
315 | # pull off the manifest group | |
316 | repo.ui.status(_("adding manifests\n")) |
|
316 | repo.ui.status(_("adding manifests\n")) | |
317 | # We know that we'll never have more manifests than we had |
|
317 | # We know that we'll never have more manifests than we had | |
318 | # changesets. |
|
318 | # changesets. | |
319 | progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'), |
|
319 | progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'), | |
320 | total=changesets) |
|
320 | total=changesets) | |
321 | self._unpackmanifests(repo, revmap, trp, progress) |
|
321 | self._unpackmanifests(repo, revmap, trp, progress) | |
322 |
|
322 | |||
323 | needfiles = {} |
|
323 | needfiles = {} | |
324 | if repo.ui.configbool('server', 'validate'): |
|
324 | if repo.ui.configbool('server', 'validate'): | |
325 | cl = repo.changelog |
|
325 | cl = repo.changelog | |
326 | ml = repo.manifestlog |
|
326 | ml = repo.manifestlog | |
327 | # validate incoming csets have their manifests |
|
327 | # validate incoming csets have their manifests | |
328 | for cset in xrange(clstart, clend): |
|
328 | for cset in xrange(clstart, clend): | |
329 | mfnode = cl.changelogrevision(cset).manifest |
|
329 | mfnode = cl.changelogrevision(cset).manifest | |
330 | mfest = ml[mfnode].readdelta() |
|
330 | mfest = ml[mfnode].readdelta() | |
331 | # store file cgnodes we must see |
|
331 | # store file cgnodes we must see | |
332 | for f, n in mfest.iteritems(): |
|
332 | for f, n in mfest.iteritems(): | |
333 | needfiles.setdefault(f, set()).add(n) |
|
333 | needfiles.setdefault(f, set()).add(n) | |
334 |
|
334 | |||
335 | # process the files |
|
335 | # process the files | |
336 | repo.ui.status(_("adding file changes\n")) |
|
336 | repo.ui.status(_("adding file changes\n")) | |
337 | newrevs, newfiles = _addchangegroupfiles( |
|
337 | newrevs, newfiles = _addchangegroupfiles( | |
338 | repo, self, revmap, trp, efiles, needfiles) |
|
338 | repo, self, revmap, trp, efiles, needfiles) | |
339 | revisions += newrevs |
|
339 | revisions += newrevs | |
340 | files += newfiles |
|
340 | files += newfiles | |
341 |
|
341 | |||
342 | deltaheads = 0 |
|
342 | deltaheads = 0 | |
343 | if oldheads: |
|
343 | if oldheads: | |
344 | heads = cl.heads() |
|
344 | heads = cl.heads() | |
345 | deltaheads = len(heads) - len(oldheads) |
|
345 | deltaheads = len(heads) - len(oldheads) | |
346 | for h in heads: |
|
346 | for h in heads: | |
347 | if h not in oldheads and repo[h].closesbranch(): |
|
347 | if h not in oldheads and repo[h].closesbranch(): | |
348 | deltaheads -= 1 |
|
348 | deltaheads -= 1 | |
349 | htext = "" |
|
349 | htext = "" | |
350 | if deltaheads: |
|
350 | if deltaheads: | |
351 | htext = _(" (%+d heads)") % deltaheads |
|
351 | htext = _(" (%+d heads)") % deltaheads | |
352 |
|
352 | |||
353 | repo.ui.status(_("added %d changesets" |
|
353 | repo.ui.status(_("added %d changesets" | |
354 | " with %d changes to %d files%s\n") |
|
354 | " with %d changes to %d files%s\n") | |
355 | % (changesets, revisions, files, htext)) |
|
355 | % (changesets, revisions, files, htext)) | |
356 | repo.invalidatevolatilesets() |
|
356 | repo.invalidatevolatilesets() | |
357 |
|
357 | |||
358 | if changesets > 0: |
|
358 | if changesets > 0: | |
359 | if 'node' not in tr.hookargs: |
|
359 | if 'node' not in tr.hookargs: | |
360 | tr.hookargs['node'] = hex(cl.node(clstart)) |
|
360 | tr.hookargs['node'] = hex(cl.node(clstart)) | |
361 | tr.hookargs['node_last'] = hex(cl.node(clend - 1)) |
|
361 | tr.hookargs['node_last'] = hex(cl.node(clend - 1)) | |
362 | hookargs = dict(tr.hookargs) |
|
362 | hookargs = dict(tr.hookargs) | |
363 | else: |
|
363 | else: | |
364 | hookargs = dict(tr.hookargs) |
|
364 | hookargs = dict(tr.hookargs) | |
365 | hookargs['node'] = hex(cl.node(clstart)) |
|
365 | hookargs['node'] = hex(cl.node(clstart)) | |
366 | hookargs['node_last'] = hex(cl.node(clend - 1)) |
|
366 | hookargs['node_last'] = hex(cl.node(clend - 1)) | |
367 | repo.hook('pretxnchangegroup', |
|
367 | repo.hook('pretxnchangegroup', | |
368 | throw=True, **pycompat.strkwargs(hookargs)) |
|
368 | throw=True, **pycompat.strkwargs(hookargs)) | |
369 |
|
369 | |||
370 | added = [cl.node(r) for r in xrange(clstart, clend)] |
|
370 | added = [cl.node(r) for r in xrange(clstart, clend)] | |
371 | phaseall = None |
|
371 | phaseall = None | |
372 | if srctype in ('push', 'serve'): |
|
372 | if srctype in ('push', 'serve'): | |
373 | # Old servers can not push the boundary themselves. |
|
373 | # Old servers can not push the boundary themselves. | |
374 | # New servers won't push the boundary if changeset already |
|
374 | # New servers won't push the boundary if changeset already | |
375 | # exists locally as secret |
|
375 | # exists locally as secret | |
376 | # |
|
376 | # | |
377 | # We should not use added here but the list of all change in |
|
377 | # We should not use added here but the list of all change in | |
378 | # the bundle |
|
378 | # the bundle | |
379 | if repo.publishing(): |
|
379 | if repo.publishing(): | |
380 | targetphase = phaseall = phases.public |
|
380 | targetphase = phaseall = phases.public | |
381 | else: |
|
381 | else: | |
382 | # closer target phase computation |
|
382 | # closer target phase computation | |
383 |
|
383 | |||
384 | # Those changesets have been pushed from the |
|
384 | # Those changesets have been pushed from the | |
385 | # outside, their phases are going to be pushed |
|
385 | # outside, their phases are going to be pushed | |
386 | # alongside. Therefor `targetphase` is |
|
386 | # alongside. Therefor `targetphase` is | |
387 | # ignored. |
|
387 | # ignored. | |
388 | targetphase = phaseall = phases.draft |
|
388 | targetphase = phaseall = phases.draft | |
389 | if added: |
|
389 | if added: | |
390 | phases.registernew(repo, tr, targetphase, added) |
|
390 | phases.registernew(repo, tr, targetphase, added) | |
391 | if phaseall is not None: |
|
391 | if phaseall is not None: | |
392 | phases.advanceboundary(repo, tr, phaseall, cgnodes) |
|
392 | phases.advanceboundary(repo, tr, phaseall, cgnodes) | |
393 |
|
393 | |||
394 | if changesets > 0: |
|
394 | if changesets > 0: | |
395 |
|
395 | |||
396 | def runhooks(): |
|
396 | def runhooks(): | |
397 | # These hooks run when the lock releases, not when the |
|
397 | # These hooks run when the lock releases, not when the | |
398 | # transaction closes. So it's possible for the changelog |
|
398 | # transaction closes. So it's possible for the changelog | |
399 | # to have changed since we last saw it. |
|
399 | # to have changed since we last saw it. | |
400 | if clstart >= len(repo): |
|
400 | if clstart >= len(repo): | |
401 | return |
|
401 | return | |
402 |
|
402 | |||
403 | repo.hook("changegroup", **pycompat.strkwargs(hookargs)) |
|
403 | repo.hook("changegroup", **pycompat.strkwargs(hookargs)) | |
404 |
|
404 | |||
405 | for n in added: |
|
405 | for n in added: | |
406 | args = hookargs.copy() |
|
406 | args = hookargs.copy() | |
407 | args['node'] = hex(n) |
|
407 | args['node'] = hex(n) | |
408 | del args['node_last'] |
|
408 | del args['node_last'] | |
409 | repo.hook("incoming", **pycompat.strkwargs(args)) |
|
409 | repo.hook("incoming", **pycompat.strkwargs(args)) | |
410 |
|
410 | |||
411 | newheads = [h for h in repo.heads() |
|
411 | newheads = [h for h in repo.heads() | |
412 | if h not in oldheads] |
|
412 | if h not in oldheads] | |
413 | repo.ui.log("incoming", |
|
413 | repo.ui.log("incoming", | |
414 | "%d incoming changes - new heads: %s\n", |
|
414 | "%d incoming changes - new heads: %s\n", | |
415 | len(added), |
|
415 | len(added), | |
416 | ', '.join([hex(c[:6]) for c in newheads])) |
|
416 | ', '.join([hex(c[:6]) for c in newheads])) | |
417 |
|
417 | |||
418 | tr.addpostclose('changegroup-runhooks-%020i' % clstart, |
|
418 | tr.addpostclose('changegroup-runhooks-%020i' % clstart, | |
419 | lambda tr: repo._afterlock(runhooks)) |
|
419 | lambda tr: repo._afterlock(runhooks)) | |
420 | finally: |
|
420 | finally: | |
421 | repo.ui.flush() |
|
421 | repo.ui.flush() | |
422 | # never return 0 here: |
|
422 | # never return 0 here: | |
423 | if deltaheads < 0: |
|
423 | if deltaheads < 0: | |
424 | ret = deltaheads - 1 |
|
424 | ret = deltaheads - 1 | |
425 | else: |
|
425 | else: | |
426 | ret = deltaheads + 1 |
|
426 | ret = deltaheads + 1 | |
427 | return ret |
|
427 | return ret | |
428 |
|
428 | |||
429 | def deltaiter(self): |
|
429 | def deltaiter(self): | |
430 | """ |
|
430 | """ | |
431 | returns an iterator of the deltas in this changegroup |
|
431 | returns an iterator of the deltas in this changegroup | |
432 |
|
432 | |||
433 | Useful for passing to the underlying storage system to be stored. |
|
433 | Useful for passing to the underlying storage system to be stored. | |
434 | """ |
|
434 | """ | |
435 | chain = None |
|
435 | chain = None | |
436 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): |
|
436 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): | |
437 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) |
|
437 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) | |
438 | yield chunkdata |
|
438 | yield chunkdata | |
439 | chain = chunkdata[0] |
|
439 | chain = chunkdata[0] | |
440 |
|
440 | |||
441 | class cg2unpacker(cg1unpacker): |
|
441 | class cg2unpacker(cg1unpacker): | |
442 | """Unpacker for cg2 streams. |
|
442 | """Unpacker for cg2 streams. | |
443 |
|
443 | |||
444 | cg2 streams add support for generaldelta, so the delta header |
|
444 | cg2 streams add support for generaldelta, so the delta header | |
445 | format is slightly different. All other features about the data |
|
445 | format is slightly different. All other features about the data | |
446 | remain the same. |
|
446 | remain the same. | |
447 | """ |
|
447 | """ | |
448 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
448 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER | |
449 | deltaheadersize = struct.calcsize(deltaheader) |
|
449 | deltaheadersize = struct.calcsize(deltaheader) | |
450 | version = '02' |
|
450 | version = '02' | |
451 |
|
451 | |||
452 | def _deltaheader(self, headertuple, prevnode): |
|
452 | def _deltaheader(self, headertuple, prevnode): | |
453 | node, p1, p2, deltabase, cs = headertuple |
|
453 | node, p1, p2, deltabase, cs = headertuple | |
454 | flags = 0 |
|
454 | flags = 0 | |
455 | return node, p1, p2, deltabase, cs, flags |
|
455 | return node, p1, p2, deltabase, cs, flags | |
456 |
|
456 | |||
457 | class cg3unpacker(cg2unpacker): |
|
457 | class cg3unpacker(cg2unpacker): | |
458 | """Unpacker for cg3 streams. |
|
458 | """Unpacker for cg3 streams. | |
459 |
|
459 | |||
460 | cg3 streams add support for exchanging treemanifests and revlog |
|
460 | cg3 streams add support for exchanging treemanifests and revlog | |
461 | flags. It adds the revlog flags to the delta header and an empty chunk |
|
461 | flags. It adds the revlog flags to the delta header and an empty chunk | |
462 | separating manifests and files. |
|
462 | separating manifests and files. | |
463 | """ |
|
463 | """ | |
464 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
464 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER | |
465 | deltaheadersize = struct.calcsize(deltaheader) |
|
465 | deltaheadersize = struct.calcsize(deltaheader) | |
466 | version = '03' |
|
466 | version = '03' | |
467 | _grouplistcount = 2 # One list of manifests and one list of files |
|
467 | _grouplistcount = 2 # One list of manifests and one list of files | |
468 |
|
468 | |||
469 | def _deltaheader(self, headertuple, prevnode): |
|
469 | def _deltaheader(self, headertuple, prevnode): | |
470 | node, p1, p2, deltabase, cs, flags = headertuple |
|
470 | node, p1, p2, deltabase, cs, flags = headertuple | |
471 | return node, p1, p2, deltabase, cs, flags |
|
471 | return node, p1, p2, deltabase, cs, flags | |
472 |
|
472 | |||
473 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
473 | def _unpackmanifests(self, repo, revmap, trp, prog): | |
474 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) |
|
474 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) | |
475 | for chunkdata in iter(self.filelogheader, {}): |
|
475 | for chunkdata in iter(self.filelogheader, {}): | |
476 | # If we get here, there are directory manifests in the changegroup |
|
476 | # If we get here, there are directory manifests in the changegroup | |
477 | d = chunkdata["filename"] |
|
477 | d = chunkdata["filename"] | |
478 | repo.ui.debug("adding %s revisions\n" % d) |
|
478 | repo.ui.debug("adding %s revisions\n" % d) | |
479 | dirlog = repo.manifestlog._revlog.dirlog(d) |
|
479 | dirlog = repo.manifestlog._revlog.dirlog(d) | |
480 | deltas = self.deltaiter() |
|
480 | deltas = self.deltaiter() | |
481 | if not dirlog.addgroup(deltas, revmap, trp): |
|
481 | if not dirlog.addgroup(deltas, revmap, trp): | |
482 | raise error.Abort(_("received dir revlog group is empty")) |
|
482 | raise error.Abort(_("received dir revlog group is empty")) | |
483 |
|
483 | |||
484 | class headerlessfixup(object): |
|
484 | class headerlessfixup(object): | |
485 | def __init__(self, fh, h): |
|
485 | def __init__(self, fh, h): | |
486 | self._h = h |
|
486 | self._h = h | |
487 | self._fh = fh |
|
487 | self._fh = fh | |
488 | def read(self, n): |
|
488 | def read(self, n): | |
489 | if self._h: |
|
489 | if self._h: | |
490 | d, self._h = self._h[:n], self._h[n:] |
|
490 | d, self._h = self._h[:n], self._h[n:] | |
491 | if len(d) < n: |
|
491 | if len(d) < n: | |
492 | d += readexactly(self._fh, n - len(d)) |
|
492 | d += readexactly(self._fh, n - len(d)) | |
493 | return d |
|
493 | return d | |
494 | return readexactly(self._fh, n) |
|
494 | return readexactly(self._fh, n) | |
495 |
|
495 | |||
496 | class cg1packer(object): |
|
496 | class cg1packer(object): | |
497 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
497 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER | |
498 | version = '01' |
|
498 | version = '01' | |
499 | def __init__(self, repo, bundlecaps=None): |
|
499 | def __init__(self, repo, bundlecaps=None): | |
500 | """Given a source repo, construct a bundler. |
|
500 | """Given a source repo, construct a bundler. | |
501 |
|
501 | |||
502 | bundlecaps is optional and can be used to specify the set of |
|
502 | bundlecaps is optional and can be used to specify the set of | |
503 | capabilities which can be used to build the bundle. While bundlecaps is |
|
503 | capabilities which can be used to build the bundle. While bundlecaps is | |
504 | unused in core Mercurial, extensions rely on this feature to communicate |
|
504 | unused in core Mercurial, extensions rely on this feature to communicate | |
505 | capabilities to customize the changegroup packer. |
|
505 | capabilities to customize the changegroup packer. | |
506 | """ |
|
506 | """ | |
507 | # Set of capabilities we can use to build the bundle. |
|
507 | # Set of capabilities we can use to build the bundle. | |
508 | if bundlecaps is None: |
|
508 | if bundlecaps is None: | |
509 | bundlecaps = set() |
|
509 | bundlecaps = set() | |
510 | self._bundlecaps = bundlecaps |
|
510 | self._bundlecaps = bundlecaps | |
511 | # experimental config: bundle.reorder |
|
511 | # experimental config: bundle.reorder | |
512 | reorder = repo.ui.config('bundle', 'reorder') |
|
512 | reorder = repo.ui.config('bundle', 'reorder') | |
513 | if reorder == 'auto': |
|
513 | if reorder == 'auto': | |
514 | reorder = None |
|
514 | reorder = None | |
515 | else: |
|
515 | else: | |
516 | reorder = stringutil.parsebool(reorder) |
|
516 | reorder = stringutil.parsebool(reorder) | |
517 | self._repo = repo |
|
517 | self._repo = repo | |
518 | self._reorder = reorder |
|
518 | self._reorder = reorder | |
519 | self._progress = repo.ui.progress |
|
|||
520 | if self._repo.ui.verbose and not self._repo.ui.debugflag: |
|
519 | if self._repo.ui.verbose and not self._repo.ui.debugflag: | |
521 | self._verbosenote = self._repo.ui.note |
|
520 | self._verbosenote = self._repo.ui.note | |
522 | else: |
|
521 | else: | |
523 | self._verbosenote = lambda s: None |
|
522 | self._verbosenote = lambda s: None | |
524 |
|
523 | |||
525 | def close(self): |
|
524 | def close(self): | |
526 | return closechunk() |
|
525 | return closechunk() | |
527 |
|
526 | |||
528 | def fileheader(self, fname): |
|
527 | def fileheader(self, fname): | |
529 | return chunkheader(len(fname)) + fname |
|
528 | return chunkheader(len(fname)) + fname | |
530 |
|
529 | |||
531 | # Extracted both for clarity and for overriding in extensions. |
|
530 | # Extracted both for clarity and for overriding in extensions. | |
532 | def _sortgroup(self, revlog, nodelist, lookup): |
|
531 | def _sortgroup(self, revlog, nodelist, lookup): | |
533 | """Sort nodes for change group and turn them into revnums.""" |
|
532 | """Sort nodes for change group and turn them into revnums.""" | |
534 | # for generaldelta revlogs, we linearize the revs; this will both be |
|
533 | # for generaldelta revlogs, we linearize the revs; this will both be | |
535 | # much quicker and generate a much smaller bundle |
|
534 | # much quicker and generate a much smaller bundle | |
536 | if (revlog._generaldelta and self._reorder is None) or self._reorder: |
|
535 | if (revlog._generaldelta and self._reorder is None) or self._reorder: | |
537 | dag = dagutil.revlogdag(revlog) |
|
536 | dag = dagutil.revlogdag(revlog) | |
538 | return dag.linearize(set(revlog.rev(n) for n in nodelist)) |
|
537 | return dag.linearize(set(revlog.rev(n) for n in nodelist)) | |
539 | else: |
|
538 | else: | |
540 | return sorted([revlog.rev(n) for n in nodelist]) |
|
539 | return sorted([revlog.rev(n) for n in nodelist]) | |
541 |
|
540 | |||
542 | def group(self, nodelist, revlog, lookup, units=None): |
|
541 | def group(self, nodelist, revlog, lookup, units=None): | |
543 | """Calculate a delta group, yielding a sequence of changegroup chunks |
|
542 | """Calculate a delta group, yielding a sequence of changegroup chunks | |
544 | (strings). |
|
543 | (strings). | |
545 |
|
544 | |||
546 | Given a list of changeset revs, return a set of deltas and |
|
545 | Given a list of changeset revs, return a set of deltas and | |
547 | metadata corresponding to nodes. The first delta is |
|
546 | metadata corresponding to nodes. The first delta is | |
548 | first parent(nodelist[0]) -> nodelist[0], the receiver is |
|
547 | first parent(nodelist[0]) -> nodelist[0], the receiver is | |
549 | guaranteed to have this parent as it has all history before |
|
548 | guaranteed to have this parent as it has all history before | |
550 | these changesets. In the case firstparent is nullrev the |
|
549 | these changesets. In the case firstparent is nullrev the | |
551 | changegroup starts with a full revision. |
|
550 | changegroup starts with a full revision. | |
552 |
|
551 | |||
553 | If units is not None, progress detail will be generated, units specifies |
|
552 | If units is not None, progress detail will be generated, units specifies | |
554 | the type of revlog that is touched (changelog, manifest, etc.). |
|
553 | the type of revlog that is touched (changelog, manifest, etc.). | |
555 | """ |
|
554 | """ | |
556 | # if we don't have any revisions touched by these changesets, bail |
|
555 | # if we don't have any revisions touched by these changesets, bail | |
557 | if len(nodelist) == 0: |
|
556 | if len(nodelist) == 0: | |
558 | yield self.close() |
|
557 | yield self.close() | |
559 | return |
|
558 | return | |
560 |
|
559 | |||
561 | revs = self._sortgroup(revlog, nodelist, lookup) |
|
560 | revs = self._sortgroup(revlog, nodelist, lookup) | |
562 |
|
561 | |||
563 | # add the parent of the first rev |
|
562 | # add the parent of the first rev | |
564 | p = revlog.parentrevs(revs[0])[0] |
|
563 | p = revlog.parentrevs(revs[0])[0] | |
565 | revs.insert(0, p) |
|
564 | revs.insert(0, p) | |
566 |
|
565 | |||
567 | # build deltas |
|
566 | # build deltas | |
568 | total = len(revs) - 1 |
|
567 | progress = None | |
569 | msgbundling = _('bundling') |
|
568 | if units is not None: | |
|
569 | progress = self._repo.ui.makeprogress(_('bundling'), unit=units, | |||
|
570 | total=(len(revs) - 1)) | |||
570 | for r in xrange(len(revs) - 1): |
|
571 | for r in xrange(len(revs) - 1): | |
571 |
if |
|
572 | if progress: | |
572 | self._progress(msgbundling, r + 1, unit=units, total=total) |
|
573 | progress.update(r + 1) | |
573 | prev, curr = revs[r], revs[r + 1] |
|
574 | prev, curr = revs[r], revs[r + 1] | |
574 | linknode = lookup(revlog.node(curr)) |
|
575 | linknode = lookup(revlog.node(curr)) | |
575 | for c in self.revchunk(revlog, curr, prev, linknode): |
|
576 | for c in self.revchunk(revlog, curr, prev, linknode): | |
576 | yield c |
|
577 | yield c | |
577 |
|
578 | |||
578 | if units is not None: |
|
579 | if progress: | |
579 | self._progress(msgbundling, None) |
|
580 | progress.complete() | |
580 | yield self.close() |
|
581 | yield self.close() | |
581 |
|
582 | |||
582 | # filter any nodes that claim to be part of the known set |
|
583 | # filter any nodes that claim to be part of the known set | |
583 | def prune(self, revlog, missing, commonrevs): |
|
584 | def prune(self, revlog, missing, commonrevs): | |
584 | rr, rl = revlog.rev, revlog.linkrev |
|
585 | rr, rl = revlog.rev, revlog.linkrev | |
585 | return [n for n in missing if rl(rr(n)) not in commonrevs] |
|
586 | return [n for n in missing if rl(rr(n)) not in commonrevs] | |
586 |
|
587 | |||
587 | def _packmanifests(self, dir, mfnodes, lookuplinknode): |
|
588 | def _packmanifests(self, dir, mfnodes, lookuplinknode): | |
588 | """Pack flat manifests into a changegroup stream.""" |
|
589 | """Pack flat manifests into a changegroup stream.""" | |
589 | assert not dir |
|
590 | assert not dir | |
590 | for chunk in self.group(mfnodes, self._repo.manifestlog._revlog, |
|
591 | for chunk in self.group(mfnodes, self._repo.manifestlog._revlog, | |
591 | lookuplinknode, units=_('manifests')): |
|
592 | lookuplinknode, units=_('manifests')): | |
592 | yield chunk |
|
593 | yield chunk | |
593 |
|
594 | |||
594 | def _manifestsdone(self): |
|
595 | def _manifestsdone(self): | |
595 | return '' |
|
596 | return '' | |
596 |
|
597 | |||
597 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
598 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): | |
598 | '''yield a sequence of changegroup chunks (strings)''' |
|
599 | '''yield a sequence of changegroup chunks (strings)''' | |
599 | repo = self._repo |
|
600 | repo = self._repo | |
600 | cl = repo.changelog |
|
601 | cl = repo.changelog | |
601 |
|
602 | |||
602 | clrevorder = {} |
|
603 | clrevorder = {} | |
603 | mfs = {} # needed manifests |
|
604 | mfs = {} # needed manifests | |
604 | fnodes = {} # needed file nodes |
|
605 | fnodes = {} # needed file nodes | |
605 | changedfiles = set() |
|
606 | changedfiles = set() | |
606 |
|
607 | |||
607 | # Callback for the changelog, used to collect changed files and manifest |
|
608 | # Callback for the changelog, used to collect changed files and manifest | |
608 | # nodes. |
|
609 | # nodes. | |
609 | # Returns the linkrev node (identity in the changelog case). |
|
610 | # Returns the linkrev node (identity in the changelog case). | |
610 | def lookupcl(x): |
|
611 | def lookupcl(x): | |
611 | c = cl.read(x) |
|
612 | c = cl.read(x) | |
612 | clrevorder[x] = len(clrevorder) |
|
613 | clrevorder[x] = len(clrevorder) | |
613 | n = c[0] |
|
614 | n = c[0] | |
614 | # record the first changeset introducing this manifest version |
|
615 | # record the first changeset introducing this manifest version | |
615 | mfs.setdefault(n, x) |
|
616 | mfs.setdefault(n, x) | |
616 | # Record a complete list of potentially-changed files in |
|
617 | # Record a complete list of potentially-changed files in | |
617 | # this manifest. |
|
618 | # this manifest. | |
618 | changedfiles.update(c[3]) |
|
619 | changedfiles.update(c[3]) | |
619 | return x |
|
620 | return x | |
620 |
|
621 | |||
621 | self._verbosenote(_('uncompressed size of bundle content:\n')) |
|
622 | self._verbosenote(_('uncompressed size of bundle content:\n')) | |
622 | size = 0 |
|
623 | size = 0 | |
623 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): |
|
624 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): | |
624 | size += len(chunk) |
|
625 | size += len(chunk) | |
625 | yield chunk |
|
626 | yield chunk | |
626 | self._verbosenote(_('%8.i (changelog)\n') % size) |
|
627 | self._verbosenote(_('%8.i (changelog)\n') % size) | |
627 |
|
628 | |||
628 | # We need to make sure that the linkrev in the changegroup refers to |
|
629 | # We need to make sure that the linkrev in the changegroup refers to | |
629 | # the first changeset that introduced the manifest or file revision. |
|
630 | # the first changeset that introduced the manifest or file revision. | |
630 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
631 | # The fastpath is usually safer than the slowpath, because the filelogs | |
631 | # are walked in revlog order. |
|
632 | # are walked in revlog order. | |
632 | # |
|
633 | # | |
633 | # When taking the slowpath with reorder=None and the manifest revlog |
|
634 | # When taking the slowpath with reorder=None and the manifest revlog | |
634 | # uses generaldelta, the manifest may be walked in the "wrong" order. |
|
635 | # uses generaldelta, the manifest may be walked in the "wrong" order. | |
635 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in |
|
636 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in | |
636 | # cc0ff93d0c0c). |
|
637 | # cc0ff93d0c0c). | |
637 | # |
|
638 | # | |
638 | # When taking the fastpath, we are only vulnerable to reordering |
|
639 | # When taking the fastpath, we are only vulnerable to reordering | |
639 | # of the changelog itself. The changelog never uses generaldelta, so |
|
640 | # of the changelog itself. The changelog never uses generaldelta, so | |
640 | # it is only reordered when reorder=True. To handle this case, we |
|
641 | # it is only reordered when reorder=True. To handle this case, we | |
641 | # simply take the slowpath, which already has the 'clrevorder' logic. |
|
642 | # simply take the slowpath, which already has the 'clrevorder' logic. | |
642 | # This was also fixed in cc0ff93d0c0c. |
|
643 | # This was also fixed in cc0ff93d0c0c. | |
643 | fastpathlinkrev = fastpathlinkrev and not self._reorder |
|
644 | fastpathlinkrev = fastpathlinkrev and not self._reorder | |
644 | # Treemanifests don't work correctly with fastpathlinkrev |
|
645 | # Treemanifests don't work correctly with fastpathlinkrev | |
645 | # either, because we don't discover which directory nodes to |
|
646 | # either, because we don't discover which directory nodes to | |
646 | # send along with files. This could probably be fixed. |
|
647 | # send along with files. This could probably be fixed. | |
647 | fastpathlinkrev = fastpathlinkrev and ( |
|
648 | fastpathlinkrev = fastpathlinkrev and ( | |
648 | 'treemanifest' not in repo.requirements) |
|
649 | 'treemanifest' not in repo.requirements) | |
649 |
|
650 | |||
650 | for chunk in self.generatemanifests(commonrevs, clrevorder, |
|
651 | for chunk in self.generatemanifests(commonrevs, clrevorder, | |
651 | fastpathlinkrev, mfs, fnodes, source): |
|
652 | fastpathlinkrev, mfs, fnodes, source): | |
652 | yield chunk |
|
653 | yield chunk | |
653 | mfs.clear() |
|
654 | mfs.clear() | |
654 | clrevs = set(cl.rev(x) for x in clnodes) |
|
655 | clrevs = set(cl.rev(x) for x in clnodes) | |
655 |
|
656 | |||
656 | if not fastpathlinkrev: |
|
657 | if not fastpathlinkrev: | |
657 | def linknodes(unused, fname): |
|
658 | def linknodes(unused, fname): | |
658 | return fnodes.get(fname, {}) |
|
659 | return fnodes.get(fname, {}) | |
659 | else: |
|
660 | else: | |
660 | cln = cl.node |
|
661 | cln = cl.node | |
661 | def linknodes(filerevlog, fname): |
|
662 | def linknodes(filerevlog, fname): | |
662 | llr = filerevlog.linkrev |
|
663 | llr = filerevlog.linkrev | |
663 | fln = filerevlog.node |
|
664 | fln = filerevlog.node | |
664 | revs = ((r, llr(r)) for r in filerevlog) |
|
665 | revs = ((r, llr(r)) for r in filerevlog) | |
665 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) |
|
666 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) | |
666 |
|
667 | |||
667 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, |
|
668 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, | |
668 | source): |
|
669 | source): | |
669 | yield chunk |
|
670 | yield chunk | |
670 |
|
671 | |||
671 | yield self.close() |
|
672 | yield self.close() | |
672 |
|
673 | |||
673 | if clnodes: |
|
674 | if clnodes: | |
674 | repo.hook('outgoing', node=hex(clnodes[0]), source=source) |
|
675 | repo.hook('outgoing', node=hex(clnodes[0]), source=source) | |
675 |
|
676 | |||
676 | def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, |
|
677 | def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, | |
677 | fnodes, source): |
|
678 | fnodes, source): | |
678 | """Returns an iterator of changegroup chunks containing manifests. |
|
679 | """Returns an iterator of changegroup chunks containing manifests. | |
679 |
|
680 | |||
680 | `source` is unused here, but is used by extensions like remotefilelog to |
|
681 | `source` is unused here, but is used by extensions like remotefilelog to | |
681 | change what is sent based in pulls vs pushes, etc. |
|
682 | change what is sent based in pulls vs pushes, etc. | |
682 | """ |
|
683 | """ | |
683 | repo = self._repo |
|
684 | repo = self._repo | |
684 | mfl = repo.manifestlog |
|
685 | mfl = repo.manifestlog | |
685 | dirlog = mfl._revlog.dirlog |
|
686 | dirlog = mfl._revlog.dirlog | |
686 | tmfnodes = {'': mfs} |
|
687 | tmfnodes = {'': mfs} | |
687 |
|
688 | |||
688 | # Callback for the manifest, used to collect linkrevs for filelog |
|
689 | # Callback for the manifest, used to collect linkrevs for filelog | |
689 | # revisions. |
|
690 | # revisions. | |
690 | # Returns the linkrev node (collected in lookupcl). |
|
691 | # Returns the linkrev node (collected in lookupcl). | |
691 | def makelookupmflinknode(dir, nodes): |
|
692 | def makelookupmflinknode(dir, nodes): | |
692 | if fastpathlinkrev: |
|
693 | if fastpathlinkrev: | |
693 | assert not dir |
|
694 | assert not dir | |
694 | return mfs.__getitem__ |
|
695 | return mfs.__getitem__ | |
695 |
|
696 | |||
696 | def lookupmflinknode(x): |
|
697 | def lookupmflinknode(x): | |
697 | """Callback for looking up the linknode for manifests. |
|
698 | """Callback for looking up the linknode for manifests. | |
698 |
|
699 | |||
699 | Returns the linkrev node for the specified manifest. |
|
700 | Returns the linkrev node for the specified manifest. | |
700 |
|
701 | |||
701 | SIDE EFFECT: |
|
702 | SIDE EFFECT: | |
702 |
|
703 | |||
703 | 1) fclnodes gets populated with the list of relevant |
|
704 | 1) fclnodes gets populated with the list of relevant | |
704 | file nodes if we're not using fastpathlinkrev |
|
705 | file nodes if we're not using fastpathlinkrev | |
705 | 2) When treemanifests are in use, collects treemanifest nodes |
|
706 | 2) When treemanifests are in use, collects treemanifest nodes | |
706 | to send |
|
707 | to send | |
707 |
|
708 | |||
708 | Note that this means manifests must be completely sent to |
|
709 | Note that this means manifests must be completely sent to | |
709 | the client before you can trust the list of files and |
|
710 | the client before you can trust the list of files and | |
710 | treemanifests to send. |
|
711 | treemanifests to send. | |
711 | """ |
|
712 | """ | |
712 | clnode = nodes[x] |
|
713 | clnode = nodes[x] | |
713 | mdata = mfl.get(dir, x).readfast(shallow=True) |
|
714 | mdata = mfl.get(dir, x).readfast(shallow=True) | |
714 | for p, n, fl in mdata.iterentries(): |
|
715 | for p, n, fl in mdata.iterentries(): | |
715 | if fl == 't': # subdirectory manifest |
|
716 | if fl == 't': # subdirectory manifest | |
716 | subdir = dir + p + '/' |
|
717 | subdir = dir + p + '/' | |
717 | tmfclnodes = tmfnodes.setdefault(subdir, {}) |
|
718 | tmfclnodes = tmfnodes.setdefault(subdir, {}) | |
718 | tmfclnode = tmfclnodes.setdefault(n, clnode) |
|
719 | tmfclnode = tmfclnodes.setdefault(n, clnode) | |
719 | if clrevorder[clnode] < clrevorder[tmfclnode]: |
|
720 | if clrevorder[clnode] < clrevorder[tmfclnode]: | |
720 | tmfclnodes[n] = clnode |
|
721 | tmfclnodes[n] = clnode | |
721 | else: |
|
722 | else: | |
722 | f = dir + p |
|
723 | f = dir + p | |
723 | fclnodes = fnodes.setdefault(f, {}) |
|
724 | fclnodes = fnodes.setdefault(f, {}) | |
724 | fclnode = fclnodes.setdefault(n, clnode) |
|
725 | fclnode = fclnodes.setdefault(n, clnode) | |
725 | if clrevorder[clnode] < clrevorder[fclnode]: |
|
726 | if clrevorder[clnode] < clrevorder[fclnode]: | |
726 | fclnodes[n] = clnode |
|
727 | fclnodes[n] = clnode | |
727 | return clnode |
|
728 | return clnode | |
728 | return lookupmflinknode |
|
729 | return lookupmflinknode | |
729 |
|
730 | |||
730 | size = 0 |
|
731 | size = 0 | |
731 | while tmfnodes: |
|
732 | while tmfnodes: | |
732 | dir, nodes = tmfnodes.popitem() |
|
733 | dir, nodes = tmfnodes.popitem() | |
733 | prunednodes = self.prune(dirlog(dir), nodes, commonrevs) |
|
734 | prunednodes = self.prune(dirlog(dir), nodes, commonrevs) | |
734 | if not dir or prunednodes: |
|
735 | if not dir or prunednodes: | |
735 | for x in self._packmanifests(dir, prunednodes, |
|
736 | for x in self._packmanifests(dir, prunednodes, | |
736 | makelookupmflinknode(dir, nodes)): |
|
737 | makelookupmflinknode(dir, nodes)): | |
737 | size += len(x) |
|
738 | size += len(x) | |
738 | yield x |
|
739 | yield x | |
739 | self._verbosenote(_('%8.i (manifests)\n') % size) |
|
740 | self._verbosenote(_('%8.i (manifests)\n') % size) | |
740 | yield self._manifestsdone() |
|
741 | yield self._manifestsdone() | |
741 |
|
742 | |||
742 | # The 'source' parameter is useful for extensions |
|
743 | # The 'source' parameter is useful for extensions | |
743 | def generatefiles(self, changedfiles, linknodes, commonrevs, source): |
|
744 | def generatefiles(self, changedfiles, linknodes, commonrevs, source): | |
744 | repo = self._repo |
|
745 | repo = self._repo | |
745 | progress = self._progress |
|
746 | progress = repo.ui.makeprogress(_('bundling'), unit=_('files'), | |
746 | msgbundling = _('bundling') |
|
747 | total=len(changedfiles)) | |
747 |
|
||||
748 | total = len(changedfiles) |
|
|||
749 | # for progress output |
|
|||
750 | msgfiles = _('files') |
|
|||
751 | for i, fname in enumerate(sorted(changedfiles)): |
|
748 | for i, fname in enumerate(sorted(changedfiles)): | |
752 | filerevlog = repo.file(fname) |
|
749 | filerevlog = repo.file(fname) | |
753 | if not filerevlog: |
|
750 | if not filerevlog: | |
754 | raise error.Abort(_("empty or missing file data for %s") % |
|
751 | raise error.Abort(_("empty or missing file data for %s") % | |
755 | fname) |
|
752 | fname) | |
756 |
|
753 | |||
757 | linkrevnodes = linknodes(filerevlog, fname) |
|
754 | linkrevnodes = linknodes(filerevlog, fname) | |
758 | # Lookup for filenodes, we collected the linkrev nodes above in the |
|
755 | # Lookup for filenodes, we collected the linkrev nodes above in the | |
759 | # fastpath case and with lookupmf in the slowpath case. |
|
756 | # fastpath case and with lookupmf in the slowpath case. | |
760 | def lookupfilelog(x): |
|
757 | def lookupfilelog(x): | |
761 | return linkrevnodes[x] |
|
758 | return linkrevnodes[x] | |
762 |
|
759 | |||
763 | filenodes = self.prune(filerevlog, linkrevnodes, commonrevs) |
|
760 | filenodes = self.prune(filerevlog, linkrevnodes, commonrevs) | |
764 | if filenodes: |
|
761 | if filenodes: | |
765 |
progress( |
|
762 | progress.update(i + 1, item=fname) | |
766 | total=total) |
|
|||
767 | h = self.fileheader(fname) |
|
763 | h = self.fileheader(fname) | |
768 | size = len(h) |
|
764 | size = len(h) | |
769 | yield h |
|
765 | yield h | |
770 | for chunk in self.group(filenodes, filerevlog, lookupfilelog): |
|
766 | for chunk in self.group(filenodes, filerevlog, lookupfilelog): | |
771 | size += len(chunk) |
|
767 | size += len(chunk) | |
772 | yield chunk |
|
768 | yield chunk | |
773 | self._verbosenote(_('%8.i %s\n') % (size, fname)) |
|
769 | self._verbosenote(_('%8.i %s\n') % (size, fname)) | |
774 | progress(msgbundling, None) |
|
770 | progress.complete() | |
775 |
|
771 | |||
776 | def deltaparent(self, revlog, rev, p1, p2, prev): |
|
772 | def deltaparent(self, revlog, rev, p1, p2, prev): | |
777 | if not revlog.candelta(prev, rev): |
|
773 | if not revlog.candelta(prev, rev): | |
778 | raise error.ProgrammingError('cg1 should not be used in this case') |
|
774 | raise error.ProgrammingError('cg1 should not be used in this case') | |
779 | return prev |
|
775 | return prev | |
780 |
|
776 | |||
781 | def revchunk(self, revlog, rev, prev, linknode): |
|
777 | def revchunk(self, revlog, rev, prev, linknode): | |
782 | node = revlog.node(rev) |
|
778 | node = revlog.node(rev) | |
783 | p1, p2 = revlog.parentrevs(rev) |
|
779 | p1, p2 = revlog.parentrevs(rev) | |
784 | base = self.deltaparent(revlog, rev, p1, p2, prev) |
|
780 | base = self.deltaparent(revlog, rev, p1, p2, prev) | |
785 |
|
781 | |||
786 | prefix = '' |
|
782 | prefix = '' | |
787 | if revlog.iscensored(base) or revlog.iscensored(rev): |
|
783 | if revlog.iscensored(base) or revlog.iscensored(rev): | |
788 | try: |
|
784 | try: | |
789 | delta = revlog.revision(node, raw=True) |
|
785 | delta = revlog.revision(node, raw=True) | |
790 | except error.CensoredNodeError as e: |
|
786 | except error.CensoredNodeError as e: | |
791 | delta = e.tombstone |
|
787 | delta = e.tombstone | |
792 | if base == nullrev: |
|
788 | if base == nullrev: | |
793 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
789 | prefix = mdiff.trivialdiffheader(len(delta)) | |
794 | else: |
|
790 | else: | |
795 | baselen = revlog.rawsize(base) |
|
791 | baselen = revlog.rawsize(base) | |
796 | prefix = mdiff.replacediffheader(baselen, len(delta)) |
|
792 | prefix = mdiff.replacediffheader(baselen, len(delta)) | |
797 | elif base == nullrev: |
|
793 | elif base == nullrev: | |
798 | delta = revlog.revision(node, raw=True) |
|
794 | delta = revlog.revision(node, raw=True) | |
799 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
795 | prefix = mdiff.trivialdiffheader(len(delta)) | |
800 | else: |
|
796 | else: | |
801 | delta = revlog.revdiff(base, rev) |
|
797 | delta = revlog.revdiff(base, rev) | |
802 | p1n, p2n = revlog.parents(node) |
|
798 | p1n, p2n = revlog.parents(node) | |
803 | basenode = revlog.node(base) |
|
799 | basenode = revlog.node(base) | |
804 | flags = revlog.flags(rev) |
|
800 | flags = revlog.flags(rev) | |
805 | meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags) |
|
801 | meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags) | |
806 | meta += prefix |
|
802 | meta += prefix | |
807 | l = len(meta) + len(delta) |
|
803 | l = len(meta) + len(delta) | |
808 | yield chunkheader(l) |
|
804 | yield chunkheader(l) | |
809 | yield meta |
|
805 | yield meta | |
810 | yield delta |
|
806 | yield delta | |
811 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
807 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): | |
812 | # do nothing with basenode, it is implicitly the previous one in HG10 |
|
808 | # do nothing with basenode, it is implicitly the previous one in HG10 | |
813 | # do nothing with flags, it is implicitly 0 for cg1 and cg2 |
|
809 | # do nothing with flags, it is implicitly 0 for cg1 and cg2 | |
814 | return struct.pack(self.deltaheader, node, p1n, p2n, linknode) |
|
810 | return struct.pack(self.deltaheader, node, p1n, p2n, linknode) | |
815 |
|
811 | |||
816 | class cg2packer(cg1packer): |
|
812 | class cg2packer(cg1packer): | |
817 | version = '02' |
|
813 | version = '02' | |
818 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
814 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER | |
819 |
|
815 | |||
820 | def __init__(self, repo, bundlecaps=None): |
|
816 | def __init__(self, repo, bundlecaps=None): | |
821 | super(cg2packer, self).__init__(repo, bundlecaps) |
|
817 | super(cg2packer, self).__init__(repo, bundlecaps) | |
822 | if self._reorder is None: |
|
818 | if self._reorder is None: | |
823 | # Since generaldelta is directly supported by cg2, reordering |
|
819 | # Since generaldelta is directly supported by cg2, reordering | |
824 | # generally doesn't help, so we disable it by default (treating |
|
820 | # generally doesn't help, so we disable it by default (treating | |
825 | # bundle.reorder=auto just like bundle.reorder=False). |
|
821 | # bundle.reorder=auto just like bundle.reorder=False). | |
826 | self._reorder = False |
|
822 | self._reorder = False | |
827 |
|
823 | |||
828 | def deltaparent(self, revlog, rev, p1, p2, prev): |
|
824 | def deltaparent(self, revlog, rev, p1, p2, prev): | |
829 | dp = revlog.deltaparent(rev) |
|
825 | dp = revlog.deltaparent(rev) | |
830 | if dp == nullrev and revlog.storedeltachains: |
|
826 | if dp == nullrev and revlog.storedeltachains: | |
831 | # Avoid sending full revisions when delta parent is null. Pick prev |
|
827 | # Avoid sending full revisions when delta parent is null. Pick prev | |
832 | # in that case. It's tempting to pick p1 in this case, as p1 will |
|
828 | # in that case. It's tempting to pick p1 in this case, as p1 will | |
833 | # be smaller in the common case. However, computing a delta against |
|
829 | # be smaller in the common case. However, computing a delta against | |
834 | # p1 may require resolving the raw text of p1, which could be |
|
830 | # p1 may require resolving the raw text of p1, which could be | |
835 | # expensive. The revlog caches should have prev cached, meaning |
|
831 | # expensive. The revlog caches should have prev cached, meaning | |
836 | # less CPU for changegroup generation. There is likely room to add |
|
832 | # less CPU for changegroup generation. There is likely room to add | |
837 | # a flag and/or config option to control this behavior. |
|
833 | # a flag and/or config option to control this behavior. | |
838 | base = prev |
|
834 | base = prev | |
839 | elif dp == nullrev: |
|
835 | elif dp == nullrev: | |
840 | # revlog is configured to use full snapshot for a reason, |
|
836 | # revlog is configured to use full snapshot for a reason, | |
841 | # stick to full snapshot. |
|
837 | # stick to full snapshot. | |
842 | base = nullrev |
|
838 | base = nullrev | |
843 | elif dp not in (p1, p2, prev): |
|
839 | elif dp not in (p1, p2, prev): | |
844 | # Pick prev when we can't be sure remote has the base revision. |
|
840 | # Pick prev when we can't be sure remote has the base revision. | |
845 | return prev |
|
841 | return prev | |
846 | else: |
|
842 | else: | |
847 | base = dp |
|
843 | base = dp | |
848 | if base != nullrev and not revlog.candelta(base, rev): |
|
844 | if base != nullrev and not revlog.candelta(base, rev): | |
849 | base = nullrev |
|
845 | base = nullrev | |
850 | return base |
|
846 | return base | |
851 |
|
847 | |||
852 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
848 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): | |
853 | # Do nothing with flags, it is implicitly 0 in cg1 and cg2 |
|
849 | # Do nothing with flags, it is implicitly 0 in cg1 and cg2 | |
854 | return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode) |
|
850 | return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode) | |
855 |
|
851 | |||
856 | class cg3packer(cg2packer): |
|
852 | class cg3packer(cg2packer): | |
857 | version = '03' |
|
853 | version = '03' | |
858 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
854 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER | |
859 |
|
855 | |||
860 | def _packmanifests(self, dir, mfnodes, lookuplinknode): |
|
856 | def _packmanifests(self, dir, mfnodes, lookuplinknode): | |
861 | if dir: |
|
857 | if dir: | |
862 | yield self.fileheader(dir) |
|
858 | yield self.fileheader(dir) | |
863 |
|
859 | |||
864 | dirlog = self._repo.manifestlog._revlog.dirlog(dir) |
|
860 | dirlog = self._repo.manifestlog._revlog.dirlog(dir) | |
865 | for chunk in self.group(mfnodes, dirlog, lookuplinknode, |
|
861 | for chunk in self.group(mfnodes, dirlog, lookuplinknode, | |
866 | units=_('manifests')): |
|
862 | units=_('manifests')): | |
867 | yield chunk |
|
863 | yield chunk | |
868 |
|
864 | |||
869 | def _manifestsdone(self): |
|
865 | def _manifestsdone(self): | |
870 | return self.close() |
|
866 | return self.close() | |
871 |
|
867 | |||
872 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
868 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): | |
873 | return struct.pack( |
|
869 | return struct.pack( | |
874 | self.deltaheader, node, p1n, p2n, basenode, linknode, flags) |
|
870 | self.deltaheader, node, p1n, p2n, basenode, linknode, flags) | |
875 |
|
871 | |||
876 | _packermap = {'01': (cg1packer, cg1unpacker), |
|
872 | _packermap = {'01': (cg1packer, cg1unpacker), | |
877 | # cg2 adds support for exchanging generaldelta |
|
873 | # cg2 adds support for exchanging generaldelta | |
878 | '02': (cg2packer, cg2unpacker), |
|
874 | '02': (cg2packer, cg2unpacker), | |
879 | # cg3 adds support for exchanging revlog flags and treemanifests |
|
875 | # cg3 adds support for exchanging revlog flags and treemanifests | |
880 | '03': (cg3packer, cg3unpacker), |
|
876 | '03': (cg3packer, cg3unpacker), | |
881 | } |
|
877 | } | |
882 |
|
878 | |||
883 | def allsupportedversions(repo): |
|
879 | def allsupportedversions(repo): | |
884 | versions = set(_packermap.keys()) |
|
880 | versions = set(_packermap.keys()) | |
885 | if not (repo.ui.configbool('experimental', 'changegroup3') or |
|
881 | if not (repo.ui.configbool('experimental', 'changegroup3') or | |
886 | repo.ui.configbool('experimental', 'treemanifest') or |
|
882 | repo.ui.configbool('experimental', 'treemanifest') or | |
887 | 'treemanifest' in repo.requirements): |
|
883 | 'treemanifest' in repo.requirements): | |
888 | versions.discard('03') |
|
884 | versions.discard('03') | |
889 | return versions |
|
885 | return versions | |
890 |
|
886 | |||
891 | # Changegroup versions that can be applied to the repo |
|
887 | # Changegroup versions that can be applied to the repo | |
892 | def supportedincomingversions(repo): |
|
888 | def supportedincomingversions(repo): | |
893 | return allsupportedversions(repo) |
|
889 | return allsupportedversions(repo) | |
894 |
|
890 | |||
895 | # Changegroup versions that can be created from the repo |
|
891 | # Changegroup versions that can be created from the repo | |
896 | def supportedoutgoingversions(repo): |
|
892 | def supportedoutgoingversions(repo): | |
897 | versions = allsupportedversions(repo) |
|
893 | versions = allsupportedversions(repo) | |
898 | if 'treemanifest' in repo.requirements: |
|
894 | if 'treemanifest' in repo.requirements: | |
899 | # Versions 01 and 02 support only flat manifests and it's just too |
|
895 | # Versions 01 and 02 support only flat manifests and it's just too | |
900 | # expensive to convert between the flat manifest and tree manifest on |
|
896 | # expensive to convert between the flat manifest and tree manifest on | |
901 | # the fly. Since tree manifests are hashed differently, all of history |
|
897 | # the fly. Since tree manifests are hashed differently, all of history | |
902 | # would have to be converted. Instead, we simply don't even pretend to |
|
898 | # would have to be converted. Instead, we simply don't even pretend to | |
903 | # support versions 01 and 02. |
|
899 | # support versions 01 and 02. | |
904 | versions.discard('01') |
|
900 | versions.discard('01') | |
905 | versions.discard('02') |
|
901 | versions.discard('02') | |
906 | if NARROW_REQUIREMENT in repo.requirements: |
|
902 | if NARROW_REQUIREMENT in repo.requirements: | |
907 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
903 | # Versions 01 and 02 don't support revlog flags, and we need to | |
908 | # support that for stripping and unbundling to work. |
|
904 | # support that for stripping and unbundling to work. | |
909 | versions.discard('01') |
|
905 | versions.discard('01') | |
910 | versions.discard('02') |
|
906 | versions.discard('02') | |
911 | if LFS_REQUIREMENT in repo.requirements: |
|
907 | if LFS_REQUIREMENT in repo.requirements: | |
912 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
908 | # Versions 01 and 02 don't support revlog flags, and we need to | |
913 | # mark LFS entries with REVIDX_EXTSTORED. |
|
909 | # mark LFS entries with REVIDX_EXTSTORED. | |
914 | versions.discard('01') |
|
910 | versions.discard('01') | |
915 | versions.discard('02') |
|
911 | versions.discard('02') | |
916 |
|
912 | |||
917 | return versions |
|
913 | return versions | |
918 |
|
914 | |||
919 | def localversion(repo): |
|
915 | def localversion(repo): | |
920 | # Finds the best version to use for bundles that are meant to be used |
|
916 | # Finds the best version to use for bundles that are meant to be used | |
921 | # locally, such as those from strip and shelve, and temporary bundles. |
|
917 | # locally, such as those from strip and shelve, and temporary bundles. | |
922 | return max(supportedoutgoingversions(repo)) |
|
918 | return max(supportedoutgoingversions(repo)) | |
923 |
|
919 | |||
924 | def safeversion(repo): |
|
920 | def safeversion(repo): | |
925 | # Finds the smallest version that it's safe to assume clients of the repo |
|
921 | # Finds the smallest version that it's safe to assume clients of the repo | |
926 | # will support. For example, all hg versions that support generaldelta also |
|
922 | # will support. For example, all hg versions that support generaldelta also | |
927 | # support changegroup 02. |
|
923 | # support changegroup 02. | |
928 | versions = supportedoutgoingversions(repo) |
|
924 | versions = supportedoutgoingversions(repo) | |
929 | if 'generaldelta' in repo.requirements: |
|
925 | if 'generaldelta' in repo.requirements: | |
930 | versions.discard('01') |
|
926 | versions.discard('01') | |
931 | assert versions |
|
927 | assert versions | |
932 | return min(versions) |
|
928 | return min(versions) | |
933 |
|
929 | |||
934 | def getbundler(version, repo, bundlecaps=None): |
|
930 | def getbundler(version, repo, bundlecaps=None): | |
935 | assert version in supportedoutgoingversions(repo) |
|
931 | assert version in supportedoutgoingversions(repo) | |
936 | return _packermap[version][0](repo, bundlecaps) |
|
932 | return _packermap[version][0](repo, bundlecaps) | |
937 |
|
933 | |||
938 | def getunbundler(version, fh, alg, extras=None): |
|
934 | def getunbundler(version, fh, alg, extras=None): | |
939 | return _packermap[version][1](fh, alg, extras=extras) |
|
935 | return _packermap[version][1](fh, alg, extras=extras) | |
940 |
|
936 | |||
941 | def _changegroupinfo(repo, nodes, source): |
|
937 | def _changegroupinfo(repo, nodes, source): | |
942 | if repo.ui.verbose or source == 'bundle': |
|
938 | if repo.ui.verbose or source == 'bundle': | |
943 | repo.ui.status(_("%d changesets found\n") % len(nodes)) |
|
939 | repo.ui.status(_("%d changesets found\n") % len(nodes)) | |
944 | if repo.ui.debugflag: |
|
940 | if repo.ui.debugflag: | |
945 | repo.ui.debug("list of changesets:\n") |
|
941 | repo.ui.debug("list of changesets:\n") | |
946 | for node in nodes: |
|
942 | for node in nodes: | |
947 | repo.ui.debug("%s\n" % hex(node)) |
|
943 | repo.ui.debug("%s\n" % hex(node)) | |
948 |
|
944 | |||
949 | def makechangegroup(repo, outgoing, version, source, fastpath=False, |
|
945 | def makechangegroup(repo, outgoing, version, source, fastpath=False, | |
950 | bundlecaps=None): |
|
946 | bundlecaps=None): | |
951 | cgstream = makestream(repo, outgoing, version, source, |
|
947 | cgstream = makestream(repo, outgoing, version, source, | |
952 | fastpath=fastpath, bundlecaps=bundlecaps) |
|
948 | fastpath=fastpath, bundlecaps=bundlecaps) | |
953 | return getunbundler(version, util.chunkbuffer(cgstream), None, |
|
949 | return getunbundler(version, util.chunkbuffer(cgstream), None, | |
954 | {'clcount': len(outgoing.missing) }) |
|
950 | {'clcount': len(outgoing.missing) }) | |
955 |
|
951 | |||
956 | def makestream(repo, outgoing, version, source, fastpath=False, |
|
952 | def makestream(repo, outgoing, version, source, fastpath=False, | |
957 | bundlecaps=None): |
|
953 | bundlecaps=None): | |
958 | bundler = getbundler(version, repo, bundlecaps=bundlecaps) |
|
954 | bundler = getbundler(version, repo, bundlecaps=bundlecaps) | |
959 |
|
955 | |||
960 | repo = repo.unfiltered() |
|
956 | repo = repo.unfiltered() | |
961 | commonrevs = outgoing.common |
|
957 | commonrevs = outgoing.common | |
962 | csets = outgoing.missing |
|
958 | csets = outgoing.missing | |
963 | heads = outgoing.missingheads |
|
959 | heads = outgoing.missingheads | |
964 | # We go through the fast path if we get told to, or if all (unfiltered |
|
960 | # We go through the fast path if we get told to, or if all (unfiltered | |
965 | # heads have been requested (since we then know there all linkrevs will |
|
961 | # heads have been requested (since we then know there all linkrevs will | |
966 | # be pulled by the client). |
|
962 | # be pulled by the client). | |
967 | heads.sort() |
|
963 | heads.sort() | |
968 | fastpathlinkrev = fastpath or ( |
|
964 | fastpathlinkrev = fastpath or ( | |
969 | repo.filtername is None and heads == sorted(repo.heads())) |
|
965 | repo.filtername is None and heads == sorted(repo.heads())) | |
970 |
|
966 | |||
971 | repo.hook('preoutgoing', throw=True, source=source) |
|
967 | repo.hook('preoutgoing', throw=True, source=source) | |
972 | _changegroupinfo(repo, csets, source) |
|
968 | _changegroupinfo(repo, csets, source) | |
973 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
969 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) | |
974 |
|
970 | |||
975 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): |
|
971 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): | |
976 | revisions = 0 |
|
972 | revisions = 0 | |
977 | files = 0 |
|
973 | files = 0 | |
978 | progress = repo.ui.makeprogress(_('files'), unit=_('files'), |
|
974 | progress = repo.ui.makeprogress(_('files'), unit=_('files'), | |
979 | total=expectedfiles) |
|
975 | total=expectedfiles) | |
980 | for chunkdata in iter(source.filelogheader, {}): |
|
976 | for chunkdata in iter(source.filelogheader, {}): | |
981 | files += 1 |
|
977 | files += 1 | |
982 | f = chunkdata["filename"] |
|
978 | f = chunkdata["filename"] | |
983 | repo.ui.debug("adding %s revisions\n" % f) |
|
979 | repo.ui.debug("adding %s revisions\n" % f) | |
984 | progress.increment() |
|
980 | progress.increment() | |
985 | fl = repo.file(f) |
|
981 | fl = repo.file(f) | |
986 | o = len(fl) |
|
982 | o = len(fl) | |
987 | try: |
|
983 | try: | |
988 | deltas = source.deltaiter() |
|
984 | deltas = source.deltaiter() | |
989 | if not fl.addgroup(deltas, revmap, trp): |
|
985 | if not fl.addgroup(deltas, revmap, trp): | |
990 | raise error.Abort(_("received file revlog group is empty")) |
|
986 | raise error.Abort(_("received file revlog group is empty")) | |
991 | except error.CensoredBaseError as e: |
|
987 | except error.CensoredBaseError as e: | |
992 | raise error.Abort(_("received delta base is censored: %s") % e) |
|
988 | raise error.Abort(_("received delta base is censored: %s") % e) | |
993 | revisions += len(fl) - o |
|
989 | revisions += len(fl) - o | |
994 | if f in needfiles: |
|
990 | if f in needfiles: | |
995 | needs = needfiles[f] |
|
991 | needs = needfiles[f] | |
996 | for new in xrange(o, len(fl)): |
|
992 | for new in xrange(o, len(fl)): | |
997 | n = fl.node(new) |
|
993 | n = fl.node(new) | |
998 | if n in needs: |
|
994 | if n in needs: | |
999 | needs.remove(n) |
|
995 | needs.remove(n) | |
1000 | else: |
|
996 | else: | |
1001 | raise error.Abort( |
|
997 | raise error.Abort( | |
1002 | _("received spurious file revlog entry")) |
|
998 | _("received spurious file revlog entry")) | |
1003 | if not needs: |
|
999 | if not needs: | |
1004 | del needfiles[f] |
|
1000 | del needfiles[f] | |
1005 | progress.complete() |
|
1001 | progress.complete() | |
1006 |
|
1002 | |||
1007 | for f, needs in needfiles.iteritems(): |
|
1003 | for f, needs in needfiles.iteritems(): | |
1008 | fl = repo.file(f) |
|
1004 | fl = repo.file(f) | |
1009 | for n in needs: |
|
1005 | for n in needs: | |
1010 | try: |
|
1006 | try: | |
1011 | fl.rev(n) |
|
1007 | fl.rev(n) | |
1012 | except error.LookupError: |
|
1008 | except error.LookupError: | |
1013 | raise error.Abort( |
|
1009 | raise error.Abort( | |
1014 | _('missing file data for %s:%s - run hg verify') % |
|
1010 | _('missing file data for %s:%s - run hg verify') % | |
1015 | (f, hex(n))) |
|
1011 | (f, hex(n))) | |
1016 |
|
1012 | |||
1017 | return revisions, files |
|
1013 | return revisions, files |
General Comments 0
You need to be logged in to leave comments.
Login now