##// END OF EJS Templates
changegroup: move revision maps to cgpacker...
Gregory Szorc -
r39179:0548f696 default
parent child Browse files
Show More
@@ -1,1411 +1,1413 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagutil,
27 dagutil,
28 error,
28 error,
29 manifest,
29 manifest,
30 match as matchmod,
30 match as matchmod,
31 mdiff,
31 mdiff,
32 phases,
32 phases,
33 pycompat,
33 pycompat,
34 repository,
34 repository,
35 revlog,
35 revlog,
36 util,
36 util,
37 )
37 )
38
38
39 from .utils import (
39 from .utils import (
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
43 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
44 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
44 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
45 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
45 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
46
46
47 LFS_REQUIREMENT = 'lfs'
47 LFS_REQUIREMENT = 'lfs'
48
48
49 readexactly = util.readexactly
49 readexactly = util.readexactly
50
50
51 def getchunk(stream):
51 def getchunk(stream):
52 """return the next chunk from stream as a string"""
52 """return the next chunk from stream as a string"""
53 d = readexactly(stream, 4)
53 d = readexactly(stream, 4)
54 l = struct.unpack(">l", d)[0]
54 l = struct.unpack(">l", d)[0]
55 if l <= 4:
55 if l <= 4:
56 if l:
56 if l:
57 raise error.Abort(_("invalid chunk length %d") % l)
57 raise error.Abort(_("invalid chunk length %d") % l)
58 return ""
58 return ""
59 return readexactly(stream, l - 4)
59 return readexactly(stream, l - 4)
60
60
61 def chunkheader(length):
61 def chunkheader(length):
62 """return a changegroup chunk header (string)"""
62 """return a changegroup chunk header (string)"""
63 return struct.pack(">l", length + 4)
63 return struct.pack(">l", length + 4)
64
64
65 def closechunk():
65 def closechunk():
66 """return a changegroup chunk header (string) for a zero-length chunk"""
66 """return a changegroup chunk header (string) for a zero-length chunk"""
67 return struct.pack(">l", 0)
67 return struct.pack(">l", 0)
68
68
69 def writechunks(ui, chunks, filename, vfs=None):
69 def writechunks(ui, chunks, filename, vfs=None):
70 """Write chunks to a file and return its filename.
70 """Write chunks to a file and return its filename.
71
71
72 The stream is assumed to be a bundle file.
72 The stream is assumed to be a bundle file.
73 Existing files will not be overwritten.
73 Existing files will not be overwritten.
74 If no filename is specified, a temporary file is created.
74 If no filename is specified, a temporary file is created.
75 """
75 """
76 fh = None
76 fh = None
77 cleanup = None
77 cleanup = None
78 try:
78 try:
79 if filename:
79 if filename:
80 if vfs:
80 if vfs:
81 fh = vfs.open(filename, "wb")
81 fh = vfs.open(filename, "wb")
82 else:
82 else:
83 # Increase default buffer size because default is usually
83 # Increase default buffer size because default is usually
84 # small (4k is common on Linux).
84 # small (4k is common on Linux).
85 fh = open(filename, "wb", 131072)
85 fh = open(filename, "wb", 131072)
86 else:
86 else:
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
88 fh = os.fdopen(fd, r"wb")
88 fh = os.fdopen(fd, r"wb")
89 cleanup = filename
89 cleanup = filename
90 for c in chunks:
90 for c in chunks:
91 fh.write(c)
91 fh.write(c)
92 cleanup = None
92 cleanup = None
93 return filename
93 return filename
94 finally:
94 finally:
95 if fh is not None:
95 if fh is not None:
96 fh.close()
96 fh.close()
97 if cleanup is not None:
97 if cleanup is not None:
98 if filename and vfs:
98 if filename and vfs:
99 vfs.unlink(cleanup)
99 vfs.unlink(cleanup)
100 else:
100 else:
101 os.unlink(cleanup)
101 os.unlink(cleanup)
102
102
103 class cg1unpacker(object):
103 class cg1unpacker(object):
104 """Unpacker for cg1 changegroup streams.
104 """Unpacker for cg1 changegroup streams.
105
105
106 A changegroup unpacker handles the framing of the revision data in
106 A changegroup unpacker handles the framing of the revision data in
107 the wire format. Most consumers will want to use the apply()
107 the wire format. Most consumers will want to use the apply()
108 method to add the changes from the changegroup to a repository.
108 method to add the changes from the changegroup to a repository.
109
109
110 If you're forwarding a changegroup unmodified to another consumer,
110 If you're forwarding a changegroup unmodified to another consumer,
111 use getchunks(), which returns an iterator of changegroup
111 use getchunks(), which returns an iterator of changegroup
112 chunks. This is mostly useful for cases where you need to know the
112 chunks. This is mostly useful for cases where you need to know the
113 data stream has ended by observing the end of the changegroup.
113 data stream has ended by observing the end of the changegroup.
114
114
115 deltachunk() is useful only if you're applying delta data. Most
115 deltachunk() is useful only if you're applying delta data. Most
116 consumers should prefer apply() instead.
116 consumers should prefer apply() instead.
117
117
118 A few other public methods exist. Those are used only for
118 A few other public methods exist. Those are used only for
119 bundlerepo and some debug commands - their use is discouraged.
119 bundlerepo and some debug commands - their use is discouraged.
120 """
120 """
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
122 deltaheadersize = deltaheader.size
122 deltaheadersize = deltaheader.size
123 version = '01'
123 version = '01'
124 _grouplistcount = 1 # One list of files after the manifests
124 _grouplistcount = 1 # One list of files after the manifests
125
125
126 def __init__(self, fh, alg, extras=None):
126 def __init__(self, fh, alg, extras=None):
127 if alg is None:
127 if alg is None:
128 alg = 'UN'
128 alg = 'UN'
129 if alg not in util.compengines.supportedbundletypes:
129 if alg not in util.compengines.supportedbundletypes:
130 raise error.Abort(_('unknown stream compression type: %s')
130 raise error.Abort(_('unknown stream compression type: %s')
131 % alg)
131 % alg)
132 if alg == 'BZ':
132 if alg == 'BZ':
133 alg = '_truncatedBZ'
133 alg = '_truncatedBZ'
134
134
135 compengine = util.compengines.forbundletype(alg)
135 compengine = util.compengines.forbundletype(alg)
136 self._stream = compengine.decompressorreader(fh)
136 self._stream = compengine.decompressorreader(fh)
137 self._type = alg
137 self._type = alg
138 self.extras = extras or {}
138 self.extras = extras or {}
139 self.callback = None
139 self.callback = None
140
140
141 # These methods (compressed, read, seek, tell) all appear to only
141 # These methods (compressed, read, seek, tell) all appear to only
142 # be used by bundlerepo, but it's a little hard to tell.
142 # be used by bundlerepo, but it's a little hard to tell.
143 def compressed(self):
143 def compressed(self):
144 return self._type is not None and self._type != 'UN'
144 return self._type is not None and self._type != 'UN'
145 def read(self, l):
145 def read(self, l):
146 return self._stream.read(l)
146 return self._stream.read(l)
147 def seek(self, pos):
147 def seek(self, pos):
148 return self._stream.seek(pos)
148 return self._stream.seek(pos)
149 def tell(self):
149 def tell(self):
150 return self._stream.tell()
150 return self._stream.tell()
151 def close(self):
151 def close(self):
152 return self._stream.close()
152 return self._stream.close()
153
153
154 def _chunklength(self):
154 def _chunklength(self):
155 d = readexactly(self._stream, 4)
155 d = readexactly(self._stream, 4)
156 l = struct.unpack(">l", d)[0]
156 l = struct.unpack(">l", d)[0]
157 if l <= 4:
157 if l <= 4:
158 if l:
158 if l:
159 raise error.Abort(_("invalid chunk length %d") % l)
159 raise error.Abort(_("invalid chunk length %d") % l)
160 return 0
160 return 0
161 if self.callback:
161 if self.callback:
162 self.callback()
162 self.callback()
163 return l - 4
163 return l - 4
164
164
165 def changelogheader(self):
165 def changelogheader(self):
166 """v10 does not have a changelog header chunk"""
166 """v10 does not have a changelog header chunk"""
167 return {}
167 return {}
168
168
169 def manifestheader(self):
169 def manifestheader(self):
170 """v10 does not have a manifest header chunk"""
170 """v10 does not have a manifest header chunk"""
171 return {}
171 return {}
172
172
173 def filelogheader(self):
173 def filelogheader(self):
174 """return the header of the filelogs chunk, v10 only has the filename"""
174 """return the header of the filelogs chunk, v10 only has the filename"""
175 l = self._chunklength()
175 l = self._chunklength()
176 if not l:
176 if not l:
177 return {}
177 return {}
178 fname = readexactly(self._stream, l)
178 fname = readexactly(self._stream, l)
179 return {'filename': fname}
179 return {'filename': fname}
180
180
181 def _deltaheader(self, headertuple, prevnode):
181 def _deltaheader(self, headertuple, prevnode):
182 node, p1, p2, cs = headertuple
182 node, p1, p2, cs = headertuple
183 if prevnode is None:
183 if prevnode is None:
184 deltabase = p1
184 deltabase = p1
185 else:
185 else:
186 deltabase = prevnode
186 deltabase = prevnode
187 flags = 0
187 flags = 0
188 return node, p1, p2, deltabase, cs, flags
188 return node, p1, p2, deltabase, cs, flags
189
189
190 def deltachunk(self, prevnode):
190 def deltachunk(self, prevnode):
191 l = self._chunklength()
191 l = self._chunklength()
192 if not l:
192 if not l:
193 return {}
193 return {}
194 headerdata = readexactly(self._stream, self.deltaheadersize)
194 headerdata = readexactly(self._stream, self.deltaheadersize)
195 header = self.deltaheader.unpack(headerdata)
195 header = self.deltaheader.unpack(headerdata)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
198 return (node, p1, p2, cs, deltabase, delta, flags)
198 return (node, p1, p2, cs, deltabase, delta, flags)
199
199
200 def getchunks(self):
200 def getchunks(self):
201 """returns all the chunks contains in the bundle
201 """returns all the chunks contains in the bundle
202
202
203 Used when you need to forward the binary stream to a file or another
203 Used when you need to forward the binary stream to a file or another
204 network API. To do so, it parse the changegroup data, otherwise it will
204 network API. To do so, it parse the changegroup data, otherwise it will
205 block in case of sshrepo because it don't know the end of the stream.
205 block in case of sshrepo because it don't know the end of the stream.
206 """
206 """
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
210 # filelogs.
210 # filelogs.
211 #
211 #
212 # Changelog and manifestlog parts are terminated with empty chunks. The
212 # Changelog and manifestlog parts are terminated with empty chunks. The
213 # tree and file parts are a list of entry sections. Each entry section
213 # tree and file parts are a list of entry sections. Each entry section
214 # is a series of chunks terminating in an empty chunk. The list of these
214 # is a series of chunks terminating in an empty chunk. The list of these
215 # entry sections is terminated in yet another empty chunk, so we know
215 # entry sections is terminated in yet another empty chunk, so we know
216 # we've reached the end of the tree/file list when we reach an empty
216 # we've reached the end of the tree/file list when we reach an empty
217 # chunk that was proceeded by no non-empty chunks.
217 # chunk that was proceeded by no non-empty chunks.
218
218
219 parts = 0
219 parts = 0
220 while parts < 2 + self._grouplistcount:
220 while parts < 2 + self._grouplistcount:
221 noentries = True
221 noentries = True
222 while True:
222 while True:
223 chunk = getchunk(self)
223 chunk = getchunk(self)
224 if not chunk:
224 if not chunk:
225 # The first two empty chunks represent the end of the
225 # The first two empty chunks represent the end of the
226 # changelog and the manifestlog portions. The remaining
226 # changelog and the manifestlog portions. The remaining
227 # empty chunks represent either A) the end of individual
227 # empty chunks represent either A) the end of individual
228 # tree or file entries in the file list, or B) the end of
228 # tree or file entries in the file list, or B) the end of
229 # the entire list. It's the end of the entire list if there
229 # the entire list. It's the end of the entire list if there
230 # were no entries (i.e. noentries is True).
230 # were no entries (i.e. noentries is True).
231 if parts < 2:
231 if parts < 2:
232 parts += 1
232 parts += 1
233 elif noentries:
233 elif noentries:
234 parts += 1
234 parts += 1
235 break
235 break
236 noentries = False
236 noentries = False
237 yield chunkheader(len(chunk))
237 yield chunkheader(len(chunk))
238 pos = 0
238 pos = 0
239 while pos < len(chunk):
239 while pos < len(chunk):
240 next = pos + 2**20
240 next = pos + 2**20
241 yield chunk[pos:next]
241 yield chunk[pos:next]
242 pos = next
242 pos = next
243 yield closechunk()
243 yield closechunk()
244
244
245 def _unpackmanifests(self, repo, revmap, trp, prog):
245 def _unpackmanifests(self, repo, revmap, trp, prog):
246 self.callback = prog.increment
246 self.callback = prog.increment
247 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
250 # be empty during the pull
250 # be empty during the pull
251 self.manifestheader()
251 self.manifestheader()
252 deltas = self.deltaiter()
252 deltas = self.deltaiter()
253 repo.manifestlog.addgroup(deltas, revmap, trp)
253 repo.manifestlog.addgroup(deltas, revmap, trp)
254 prog.complete()
254 prog.complete()
255 self.callback = None
255 self.callback = None
256
256
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
258 expectedtotal=None):
258 expectedtotal=None):
259 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
261 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
262
262
263 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
264 - nothing changed or no source: 0
264 - nothing changed or no source: 0
265 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
266 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
267 - number of heads stays the same: 1
267 - number of heads stays the same: 1
268 """
268 """
269 repo = repo.unfiltered()
269 repo = repo.unfiltered()
270 def csmap(x):
270 def csmap(x):
271 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
272 return len(cl)
272 return len(cl)
273
273
274 def revmap(x):
274 def revmap(x):
275 return cl.rev(x)
275 return cl.rev(x)
276
276
277 changesets = files = revisions = 0
277 changesets = files = revisions = 0
278
278
279 try:
279 try:
280 # The transaction may already carry source information. In this
280 # The transaction may already carry source information. In this
281 # case we use the top level data. We overwrite the argument
281 # case we use the top level data. We overwrite the argument
282 # because we need to use the top level value (if they exist)
282 # because we need to use the top level value (if they exist)
283 # in this function.
283 # in this function.
284 srctype = tr.hookargs.setdefault('source', srctype)
284 srctype = tr.hookargs.setdefault('source', srctype)
285 url = tr.hookargs.setdefault('url', url)
285 url = tr.hookargs.setdefault('url', url)
286 repo.hook('prechangegroup',
286 repo.hook('prechangegroup',
287 throw=True, **pycompat.strkwargs(tr.hookargs))
287 throw=True, **pycompat.strkwargs(tr.hookargs))
288
288
289 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
290 # will not see an inconsistent view
290 # will not see an inconsistent view
291 cl = repo.changelog
291 cl = repo.changelog
292 cl.delayupdate(tr)
292 cl.delayupdate(tr)
293 oldheads = set(cl.heads())
293 oldheads = set(cl.heads())
294
294
295 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
296 # pull off the changeset group
296 # pull off the changeset group
297 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
298 clstart = len(cl)
298 clstart = len(cl)
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
300 total=expectedtotal)
300 total=expectedtotal)
301 self.callback = progress.increment
301 self.callback = progress.increment
302
302
303 efiles = set()
303 efiles = set()
304 def onchangelog(cl, node):
304 def onchangelog(cl, node):
305 efiles.update(cl.readfiles(node))
305 efiles.update(cl.readfiles(node))
306
306
307 self.changelogheader()
307 self.changelogheader()
308 deltas = self.deltaiter()
308 deltas = self.deltaiter()
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
310 efiles = len(efiles)
310 efiles = len(efiles)
311
311
312 if not cgnodes:
312 if not cgnodes:
313 repo.ui.develwarn('applied empty changegroup',
313 repo.ui.develwarn('applied empty changegroup',
314 config='warn-empty-changegroup')
314 config='warn-empty-changegroup')
315 clend = len(cl)
315 clend = len(cl)
316 changesets = clend - clstart
316 changesets = clend - clstart
317 progress.complete()
317 progress.complete()
318 self.callback = None
318 self.callback = None
319
319
320 # pull off the manifest group
320 # pull off the manifest group
321 repo.ui.status(_("adding manifests\n"))
321 repo.ui.status(_("adding manifests\n"))
322 # We know that we'll never have more manifests than we had
322 # We know that we'll never have more manifests than we had
323 # changesets.
323 # changesets.
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
325 total=changesets)
325 total=changesets)
326 self._unpackmanifests(repo, revmap, trp, progress)
326 self._unpackmanifests(repo, revmap, trp, progress)
327
327
328 needfiles = {}
328 needfiles = {}
329 if repo.ui.configbool('server', 'validate'):
329 if repo.ui.configbool('server', 'validate'):
330 cl = repo.changelog
330 cl = repo.changelog
331 ml = repo.manifestlog
331 ml = repo.manifestlog
332 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
333 for cset in pycompat.xrange(clstart, clend):
333 for cset in pycompat.xrange(clstart, clend):
334 mfnode = cl.changelogrevision(cset).manifest
334 mfnode = cl.changelogrevision(cset).manifest
335 mfest = ml[mfnode].readdelta()
335 mfest = ml[mfnode].readdelta()
336 # store file cgnodes we must see
336 # store file cgnodes we must see
337 for f, n in mfest.iteritems():
337 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
338 needfiles.setdefault(f, set()).add(n)
339
339
340 # process the files
340 # process the files
341 repo.ui.status(_("adding file changes\n"))
341 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
342 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
343 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
344 revisions += newrevs
345 files += newfiles
345 files += newfiles
346
346
347 deltaheads = 0
347 deltaheads = 0
348 if oldheads:
348 if oldheads:
349 heads = cl.heads()
349 heads = cl.heads()
350 deltaheads = len(heads) - len(oldheads)
350 deltaheads = len(heads) - len(oldheads)
351 for h in heads:
351 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
352 if h not in oldheads and repo[h].closesbranch():
353 deltaheads -= 1
353 deltaheads -= 1
354 htext = ""
354 htext = ""
355 if deltaheads:
355 if deltaheads:
356 htext = _(" (%+d heads)") % deltaheads
356 htext = _(" (%+d heads)") % deltaheads
357
357
358 repo.ui.status(_("added %d changesets"
358 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
359 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
360 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
361 repo.invalidatevolatilesets()
362
362
363 if changesets > 0:
363 if changesets > 0:
364 if 'node' not in tr.hookargs:
364 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
365 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 else:
368 else:
369 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
370 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup',
372 repo.hook('pretxnchangegroup',
373 throw=True, **pycompat.strkwargs(hookargs))
373 throw=True, **pycompat.strkwargs(hookargs))
374
374
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
376 phaseall = None
376 phaseall = None
377 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
380 # exists locally as secret
380 # exists locally as secret
381 #
381 #
382 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
383 # the bundle
383 # the bundle
384 if repo.publishing():
384 if repo.publishing():
385 targetphase = phaseall = phases.public
385 targetphase = phaseall = phases.public
386 else:
386 else:
387 # closer target phase computation
387 # closer target phase computation
388
388
389 # Those changesets have been pushed from the
389 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
390 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
391 # alongside. Therefor `targetphase` is
392 # ignored.
392 # ignored.
393 targetphase = phaseall = phases.draft
393 targetphase = phaseall = phases.draft
394 if added:
394 if added:
395 phases.registernew(repo, tr, targetphase, added)
395 phases.registernew(repo, tr, targetphase, added)
396 if phaseall is not None:
396 if phaseall is not None:
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
398
398
399 if changesets > 0:
399 if changesets > 0:
400
400
401 def runhooks():
401 def runhooks():
402 # These hooks run when the lock releases, not when the
402 # These hooks run when the lock releases, not when the
403 # transaction closes. So it's possible for the changelog
403 # transaction closes. So it's possible for the changelog
404 # to have changed since we last saw it.
404 # to have changed since we last saw it.
405 if clstart >= len(repo):
405 if clstart >= len(repo):
406 return
406 return
407
407
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
409
409
410 for n in added:
410 for n in added:
411 args = hookargs.copy()
411 args = hookargs.copy()
412 args['node'] = hex(n)
412 args['node'] = hex(n)
413 del args['node_last']
413 del args['node_last']
414 repo.hook("incoming", **pycompat.strkwargs(args))
414 repo.hook("incoming", **pycompat.strkwargs(args))
415
415
416 newheads = [h for h in repo.heads()
416 newheads = [h for h in repo.heads()
417 if h not in oldheads]
417 if h not in oldheads]
418 repo.ui.log("incoming",
418 repo.ui.log("incoming",
419 "%d incoming changes - new heads: %s\n",
419 "%d incoming changes - new heads: %s\n",
420 len(added),
420 len(added),
421 ', '.join([hex(c[:6]) for c in newheads]))
421 ', '.join([hex(c[:6]) for c in newheads]))
422
422
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
424 lambda tr: repo._afterlock(runhooks))
424 lambda tr: repo._afterlock(runhooks))
425 finally:
425 finally:
426 repo.ui.flush()
426 repo.ui.flush()
427 # never return 0 here:
427 # never return 0 here:
428 if deltaheads < 0:
428 if deltaheads < 0:
429 ret = deltaheads - 1
429 ret = deltaheads - 1
430 else:
430 else:
431 ret = deltaheads + 1
431 ret = deltaheads + 1
432 return ret
432 return ret
433
433
434 def deltaiter(self):
434 def deltaiter(self):
435 """
435 """
436 returns an iterator of the deltas in this changegroup
436 returns an iterator of the deltas in this changegroup
437
437
438 Useful for passing to the underlying storage system to be stored.
438 Useful for passing to the underlying storage system to be stored.
439 """
439 """
440 chain = None
440 chain = None
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
443 yield chunkdata
443 yield chunkdata
444 chain = chunkdata[0]
444 chain = chunkdata[0]
445
445
446 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
448
448
449 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
451 remain the same.
451 remain the same.
452 """
452 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = deltaheader.size
454 deltaheadersize = deltaheader.size
455 version = '02'
455 version = '02'
456
456
457 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
459 flags = 0
460 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
461
461
462 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
464
464
465 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
467 separating manifests and files.
468 """
468 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = deltaheader.size
470 deltaheadersize = deltaheader.size
471 version = '03'
471 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
473
473
474 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
477
477
478 def _unpackmanifests(self, repo, revmap, trp, prog):
478 def _unpackmanifests(self, repo, revmap, trp, prog):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
480 for chunkdata in iter(self.filelogheader, {}):
480 for chunkdata in iter(self.filelogheader, {}):
481 # If we get here, there are directory manifests in the changegroup
481 # If we get here, there are directory manifests in the changegroup
482 d = chunkdata["filename"]
482 d = chunkdata["filename"]
483 repo.ui.debug("adding %s revisions\n" % d)
483 repo.ui.debug("adding %s revisions\n" % d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
485 deltas = self.deltaiter()
485 deltas = self.deltaiter()
486 if not dirlog.addgroup(deltas, revmap, trp):
486 if not dirlog.addgroup(deltas, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 @attr.s(slots=True, frozen=True)
501 @attr.s(slots=True, frozen=True)
502 class revisiondelta(object):
502 class revisiondelta(object):
503 """Describes a delta entry in a changegroup.
503 """Describes a delta entry in a changegroup.
504
504
505 Captured data is sufficient to serialize the delta into multiple
505 Captured data is sufficient to serialize the delta into multiple
506 formats.
506 formats.
507 """
507 """
508 # 20 byte node of this revision.
508 # 20 byte node of this revision.
509 node = attr.ib()
509 node = attr.ib()
510 # 20 byte nodes of parent revisions.
510 # 20 byte nodes of parent revisions.
511 p1node = attr.ib()
511 p1node = attr.ib()
512 p2node = attr.ib()
512 p2node = attr.ib()
513 # 20 byte node of node this delta is against.
513 # 20 byte node of node this delta is against.
514 basenode = attr.ib()
514 basenode = attr.ib()
515 # 20 byte node of changeset revision this delta is associated with.
515 # 20 byte node of changeset revision this delta is associated with.
516 linknode = attr.ib()
516 linknode = attr.ib()
517 # 2 bytes of flags to apply to revision data.
517 # 2 bytes of flags to apply to revision data.
518 flags = attr.ib()
518 flags = attr.ib()
519 # Iterable of chunks holding raw delta data.
519 # Iterable of chunks holding raw delta data.
520 deltachunks = attr.ib()
520 deltachunks = attr.ib()
521
521
522 class cgpacker(object):
522 class cgpacker(object):
523 def __init__(self, repo, filematcher, version, allowreorder,
523 def __init__(self, repo, filematcher, version, allowreorder,
524 useprevdelta, builddeltaheader, manifestsend,
524 useprevdelta, builddeltaheader, manifestsend,
525 sendtreemanifests, bundlecaps=None, shallow=False):
525 sendtreemanifests, bundlecaps=None, shallow=False):
526 """Given a source repo, construct a bundler.
526 """Given a source repo, construct a bundler.
527
527
528 filematcher is a matcher that matches on files to include in the
528 filematcher is a matcher that matches on files to include in the
529 changegroup. Used to facilitate sparse changegroups.
529 changegroup. Used to facilitate sparse changegroups.
530
530
531 allowreorder controls whether reordering of revisions is allowed.
531 allowreorder controls whether reordering of revisions is allowed.
532 This value is used when ``bundle.reorder`` is ``auto`` or isn't
532 This value is used when ``bundle.reorder`` is ``auto`` or isn't
533 set.
533 set.
534
534
535 useprevdelta controls whether revisions should always delta against
535 useprevdelta controls whether revisions should always delta against
536 the previous revision in the changegroup.
536 the previous revision in the changegroup.
537
537
538 builddeltaheader is a callable that constructs the header for a group
538 builddeltaheader is a callable that constructs the header for a group
539 delta.
539 delta.
540
540
541 manifestsend is a chunk to send after manifests have been fully emitted.
541 manifestsend is a chunk to send after manifests have been fully emitted.
542
542
543 sendtreemanifests indicates whether tree manifests should be emitted.
543 sendtreemanifests indicates whether tree manifests should be emitted.
544
544
545 bundlecaps is optional and can be used to specify the set of
545 bundlecaps is optional and can be used to specify the set of
546 capabilities which can be used to build the bundle. While bundlecaps is
546 capabilities which can be used to build the bundle. While bundlecaps is
547 unused in core Mercurial, extensions rely on this feature to communicate
547 unused in core Mercurial, extensions rely on this feature to communicate
548 capabilities to customize the changegroup packer.
548 capabilities to customize the changegroup packer.
549
549
550 shallow indicates whether shallow data might be sent. The packer may
550 shallow indicates whether shallow data might be sent. The packer may
551 need to pack file contents not introduced by the changes being packed.
551 need to pack file contents not introduced by the changes being packed.
552 """
552 """
553 assert filematcher
553 assert filematcher
554 self._filematcher = filematcher
554 self._filematcher = filematcher
555
555
556 self.version = version
556 self.version = version
557 self._useprevdelta = useprevdelta
557 self._useprevdelta = useprevdelta
558 self._builddeltaheader = builddeltaheader
558 self._builddeltaheader = builddeltaheader
559 self._manifestsend = manifestsend
559 self._manifestsend = manifestsend
560 self._sendtreemanifests = sendtreemanifests
560 self._sendtreemanifests = sendtreemanifests
561
561
562 # Set of capabilities we can use to build the bundle.
562 # Set of capabilities we can use to build the bundle.
563 if bundlecaps is None:
563 if bundlecaps is None:
564 bundlecaps = set()
564 bundlecaps = set()
565 self._bundlecaps = bundlecaps
565 self._bundlecaps = bundlecaps
566 self._isshallow = shallow
566 self._isshallow = shallow
567
567
568 # experimental config: bundle.reorder
568 # experimental config: bundle.reorder
569 reorder = repo.ui.config('bundle', 'reorder')
569 reorder = repo.ui.config('bundle', 'reorder')
570 if reorder == 'auto':
570 if reorder == 'auto':
571 self._reorder = allowreorder
571 self._reorder = allowreorder
572 else:
572 else:
573 self._reorder = stringutil.parsebool(reorder)
573 self._reorder = stringutil.parsebool(reorder)
574
574
575 self._repo = repo
575 self._repo = repo
576
576
577 if self._repo.ui.verbose and not self._repo.ui.debugflag:
577 if self._repo.ui.verbose and not self._repo.ui.debugflag:
578 self._verbosenote = self._repo.ui.note
578 self._verbosenote = self._repo.ui.note
579 else:
579 else:
580 self._verbosenote = lambda s: None
580 self._verbosenote = lambda s: None
581
581
582 # TODO the functionality keyed off of this should probably be
582 # TODO the functionality keyed off of this should probably be
583 # controlled via arguments to group() that influence behavior.
583 # controlled via arguments to group() that influence behavior.
584 self._changelogdone = False
584 self._changelogdone = False
585
585
586 # Maps CL revs to per-revlog revisions. Cleared in close() at
587 # the end of each group.
588 self._clrevtolocalrev = {}
589 self._nextclrevtolocalrev = {}
590
591 # Maps changelog nodes to changelog revs. Filled in once
592 # during changelog stage and then left unmodified.
593 self._clnodetorev = {}
594
586 def _close(self):
595 def _close(self):
587 # Ellipses serving mode.
596 # Ellipses serving mode.
588 getattr(self, '_clrev_to_localrev', {}).clear()
597 self._clrevtolocalrev.clear()
589 if getattr(self, '_next_clrev_to_localrev', {}):
598 if self._nextclrevtolocalrev:
590 self._clrev_to_localrev = self._next_clrev_to_localrev
599 self.clrevtolocalrev = self._nextclrevtolocalrev
591 del self._next_clrev_to_localrev
600 self._nextclrevtolocalrev.clear()
592 self._changelogdone = True
601 self._changelogdone = True
593
602
594 return closechunk()
603 return closechunk()
595
604
596 def _fileheader(self, fname):
605 def _fileheader(self, fname):
597 return chunkheader(len(fname)) + fname
606 return chunkheader(len(fname)) + fname
598
607
599 # Extracted both for clarity and for overriding in extensions.
608 # Extracted both for clarity and for overriding in extensions.
600 def _sortgroup(self, store, nodelist, lookup):
609 def _sortgroup(self, store, nodelist, lookup):
601 """Sort nodes for change group and turn them into revnums."""
610 """Sort nodes for change group and turn them into revnums."""
602 # Ellipses serving mode.
611 # Ellipses serving mode.
603 #
612 #
604 # In a perfect world, we'd generate better ellipsis-ified graphs
613 # In a perfect world, we'd generate better ellipsis-ified graphs
605 # for non-changelog revlogs. In practice, we haven't started doing
614 # for non-changelog revlogs. In practice, we haven't started doing
606 # that yet, so the resulting DAGs for the manifestlog and filelogs
615 # that yet, so the resulting DAGs for the manifestlog and filelogs
607 # are actually full of bogus parentage on all the ellipsis
616 # are actually full of bogus parentage on all the ellipsis
608 # nodes. This has the side effect that, while the contents are
617 # nodes. This has the side effect that, while the contents are
609 # correct, the individual DAGs might be completely out of whack in
618 # correct, the individual DAGs might be completely out of whack in
610 # a case like 882681bc3166 and its ancestors (back about 10
619 # a case like 882681bc3166 and its ancestors (back about 10
611 # revisions or so) in the main hg repo.
620 # revisions or so) in the main hg repo.
612 #
621 #
613 # The one invariant we *know* holds is that the new (potentially
622 # The one invariant we *know* holds is that the new (potentially
614 # bogus) DAG shape will be valid if we order the nodes in the
623 # bogus) DAG shape will be valid if we order the nodes in the
615 # order that they're introduced in dramatis personae by the
624 # order that they're introduced in dramatis personae by the
616 # changelog, so what we do is we sort the non-changelog histories
625 # changelog, so what we do is we sort the non-changelog histories
617 # by the order in which they are used by the changelog.
626 # by the order in which they are used by the changelog.
618 if util.safehasattr(self, '_full_nodes') and self._clnode_to_rev:
627 if util.safehasattr(self, '_full_nodes') and self._clnodetorev:
619 key = lambda n: self._clnode_to_rev[lookup(n)]
628 key = lambda n: self._clnodetorev[lookup(n)]
620 return [store.rev(n) for n in sorted(nodelist, key=key)]
629 return [store.rev(n) for n in sorted(nodelist, key=key)]
621
630
622 # for generaldelta revlogs, we linearize the revs; this will both be
631 # for generaldelta revlogs, we linearize the revs; this will both be
623 # much quicker and generate a much smaller bundle
632 # much quicker and generate a much smaller bundle
624 if (store._generaldelta and self._reorder is None) or self._reorder:
633 if (store._generaldelta and self._reorder is None) or self._reorder:
625 dag = dagutil.revlogdag(store)
634 dag = dagutil.revlogdag(store)
626 return dag.linearize(set(store.rev(n) for n in nodelist))
635 return dag.linearize(set(store.rev(n) for n in nodelist))
627 else:
636 else:
628 return sorted([store.rev(n) for n in nodelist])
637 return sorted([store.rev(n) for n in nodelist])
629
638
630 def group(self, nodelist, store, lookup, units=None):
639 def group(self, nodelist, store, lookup, units=None):
631 """Calculate a delta group, yielding a sequence of changegroup chunks
640 """Calculate a delta group, yielding a sequence of changegroup chunks
632 (strings).
641 (strings).
633
642
634 Given a list of changeset revs, return a set of deltas and
643 Given a list of changeset revs, return a set of deltas and
635 metadata corresponding to nodes. The first delta is
644 metadata corresponding to nodes. The first delta is
636 first parent(nodelist[0]) -> nodelist[0], the receiver is
645 first parent(nodelist[0]) -> nodelist[0], the receiver is
637 guaranteed to have this parent as it has all history before
646 guaranteed to have this parent as it has all history before
638 these changesets. In the case firstparent is nullrev the
647 these changesets. In the case firstparent is nullrev the
639 changegroup starts with a full revision.
648 changegroup starts with a full revision.
640
649
641 If units is not None, progress detail will be generated, units specifies
650 If units is not None, progress detail will be generated, units specifies
642 the type of revlog that is touched (changelog, manifest, etc.).
651 the type of revlog that is touched (changelog, manifest, etc.).
643 """
652 """
644 # if we don't have any revisions touched by these changesets, bail
653 # if we don't have any revisions touched by these changesets, bail
645 if len(nodelist) == 0:
654 if len(nodelist) == 0:
646 yield self._close()
655 yield self._close()
647 return
656 return
648
657
649 revs = self._sortgroup(store, nodelist, lookup)
658 revs = self._sortgroup(store, nodelist, lookup)
650
659
651 # add the parent of the first rev
660 # add the parent of the first rev
652 p = store.parentrevs(revs[0])[0]
661 p = store.parentrevs(revs[0])[0]
653 revs.insert(0, p)
662 revs.insert(0, p)
654
663
655 # build deltas
664 # build deltas
656 progress = None
665 progress = None
657 if units is not None:
666 if units is not None:
658 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
667 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
659 total=(len(revs) - 1))
668 total=(len(revs) - 1))
660 for r in pycompat.xrange(len(revs) - 1):
669 for r in pycompat.xrange(len(revs) - 1):
661 if progress:
670 if progress:
662 progress.update(r + 1)
671 progress.update(r + 1)
663 prev, curr = revs[r], revs[r + 1]
672 prev, curr = revs[r], revs[r + 1]
664 linknode = lookup(store.node(curr))
673 linknode = lookup(store.node(curr))
665 for c in self._revchunk(store, curr, prev, linknode):
674 for c in self._revchunk(store, curr, prev, linknode):
666 yield c
675 yield c
667
676
668 if progress:
677 if progress:
669 progress.complete()
678 progress.complete()
670 yield self._close()
679 yield self._close()
671
680
672 # filter any nodes that claim to be part of the known set
681 # filter any nodes that claim to be part of the known set
673 def _prune(self, store, missing, commonrevs):
682 def _prune(self, store, missing, commonrevs):
674 # TODO this violates storage abstraction for manifests.
683 # TODO this violates storage abstraction for manifests.
675 if isinstance(store, manifest.manifestrevlog):
684 if isinstance(store, manifest.manifestrevlog):
676 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
685 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
677 return []
686 return []
678
687
679 rr, rl = store.rev, store.linkrev
688 rr, rl = store.rev, store.linkrev
680 return [n for n in missing if rl(rr(n)) not in commonrevs]
689 return [n for n in missing if rl(rr(n)) not in commonrevs]
681
690
682 def _packmanifests(self, dir, mfnodes, lookuplinknode):
691 def _packmanifests(self, dir, mfnodes, lookuplinknode):
683 """Pack flat manifests into a changegroup stream."""
692 """Pack flat manifests into a changegroup stream."""
684 assert not dir
693 assert not dir
685 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
694 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
686 lookuplinknode, units=_('manifests')):
695 lookuplinknode, units=_('manifests')):
687 yield chunk
696 yield chunk
688
697
689 def _packtreemanifests(self, dir, mfnodes, lookuplinknode):
698 def _packtreemanifests(self, dir, mfnodes, lookuplinknode):
690 """Version of _packmanifests that operates on directory manifests.
699 """Version of _packmanifests that operates on directory manifests.
691
700
692 Encodes the directory name in the output so multiple manifests
701 Encodes the directory name in the output so multiple manifests
693 can be sent.
702 can be sent.
694 """
703 """
695 assert self.version == b'03'
704 assert self.version == b'03'
696
705
697 if dir:
706 if dir:
698 yield self._fileheader(dir)
707 yield self._fileheader(dir)
699
708
700 # TODO violates storage abstractions by assuming revlogs.
709 # TODO violates storage abstractions by assuming revlogs.
701 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
710 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
702 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
711 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
703 units=_('manifests')):
712 units=_('manifests')):
704 yield chunk
713 yield chunk
705
714
706 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
715 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
707 '''yield a sequence of changegroup chunks (strings)'''
716 '''yield a sequence of changegroup chunks (strings)'''
708 repo = self._repo
717 repo = self._repo
709 cl = repo.changelog
718 cl = repo.changelog
710
719
711 clrevorder = {}
720 clrevorder = {}
712 mfs = {} # needed manifests
721 mfs = {} # needed manifests
713 fnodes = {} # needed file nodes
722 fnodes = {} # needed file nodes
714 mfl = repo.manifestlog
723 mfl = repo.manifestlog
715 # TODO violates storage abstraction.
724 # TODO violates storage abstraction.
716 mfrevlog = mfl._revlog
725 mfrevlog = mfl._revlog
717 changedfiles = set()
726 changedfiles = set()
718
727
719 ellipsesmode = util.safehasattr(self, '_full_nodes')
728 ellipsesmode = util.safehasattr(self, '_full_nodes')
720
729
721 # Callback for the changelog, used to collect changed files and
730 # Callback for the changelog, used to collect changed files and
722 # manifest nodes.
731 # manifest nodes.
723 # Returns the linkrev node (identity in the changelog case).
732 # Returns the linkrev node (identity in the changelog case).
724 def lookupcl(x):
733 def lookupcl(x):
725 c = cl.read(x)
734 c = cl.read(x)
726 clrevorder[x] = len(clrevorder)
735 clrevorder[x] = len(clrevorder)
727
736
728 if ellipsesmode:
737 if ellipsesmode:
729 # Only update mfs if x is going to be sent. Otherwise we
738 # Only update mfs if x is going to be sent. Otherwise we
730 # end up with bogus linkrevs specified for manifests and
739 # end up with bogus linkrevs specified for manifests and
731 # we skip some manifest nodes that we should otherwise
740 # we skip some manifest nodes that we should otherwise
732 # have sent.
741 # have sent.
733 if (x in self._full_nodes
742 if (x in self._full_nodes
734 or cl.rev(x) in self._precomputed_ellipsis):
743 or cl.rev(x) in self._precomputed_ellipsis):
735 n = c[0]
744 n = c[0]
736 # Record the first changeset introducing this manifest
745 # Record the first changeset introducing this manifest
737 # version.
746 # version.
738 mfs.setdefault(n, x)
747 mfs.setdefault(n, x)
739 # Set this narrow-specific dict so we have the lowest
748 # Set this narrow-specific dict so we have the lowest
740 # manifest revnum to look up for this cl revnum. (Part of
749 # manifest revnum to look up for this cl revnum. (Part of
741 # mapping changelog ellipsis parents to manifest ellipsis
750 # mapping changelog ellipsis parents to manifest ellipsis
742 # parents)
751 # parents)
743 self._next_clrev_to_localrev.setdefault(cl.rev(x),
752 self._nextclrevtolocalrev.setdefault(cl.rev(x),
744 mfrevlog.rev(n))
753 mfrevlog.rev(n))
745 # We can't trust the changed files list in the changeset if the
754 # We can't trust the changed files list in the changeset if the
746 # client requested a shallow clone.
755 # client requested a shallow clone.
747 if self._isshallow:
756 if self._isshallow:
748 changedfiles.update(mfl[c[0]].read().keys())
757 changedfiles.update(mfl[c[0]].read().keys())
749 else:
758 else:
750 changedfiles.update(c[3])
759 changedfiles.update(c[3])
751 else:
760 else:
752
761
753 n = c[0]
762 n = c[0]
754 # record the first changeset introducing this manifest version
763 # record the first changeset introducing this manifest version
755 mfs.setdefault(n, x)
764 mfs.setdefault(n, x)
756 # Record a complete list of potentially-changed files in
765 # Record a complete list of potentially-changed files in
757 # this manifest.
766 # this manifest.
758 changedfiles.update(c[3])
767 changedfiles.update(c[3])
759
768
760 return x
769 return x
761
770
762 self._verbosenote(_('uncompressed size of bundle content:\n'))
771 self._verbosenote(_('uncompressed size of bundle content:\n'))
763 size = 0
772 size = 0
764 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
773 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
765 size += len(chunk)
774 size += len(chunk)
766 yield chunk
775 yield chunk
767 self._verbosenote(_('%8.i (changelog)\n') % size)
776 self._verbosenote(_('%8.i (changelog)\n') % size)
768
777
769 # We need to make sure that the linkrev in the changegroup refers to
778 # We need to make sure that the linkrev in the changegroup refers to
770 # the first changeset that introduced the manifest or file revision.
779 # the first changeset that introduced the manifest or file revision.
771 # The fastpath is usually safer than the slowpath, because the filelogs
780 # The fastpath is usually safer than the slowpath, because the filelogs
772 # are walked in revlog order.
781 # are walked in revlog order.
773 #
782 #
774 # When taking the slowpath with reorder=None and the manifest revlog
783 # When taking the slowpath with reorder=None and the manifest revlog
775 # uses generaldelta, the manifest may be walked in the "wrong" order.
784 # uses generaldelta, the manifest may be walked in the "wrong" order.
776 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
785 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
777 # cc0ff93d0c0c).
786 # cc0ff93d0c0c).
778 #
787 #
779 # When taking the fastpath, we are only vulnerable to reordering
788 # When taking the fastpath, we are only vulnerable to reordering
780 # of the changelog itself. The changelog never uses generaldelta, so
789 # of the changelog itself. The changelog never uses generaldelta, so
781 # it is only reordered when reorder=True. To handle this case, we
790 # it is only reordered when reorder=True. To handle this case, we
782 # simply take the slowpath, which already has the 'clrevorder' logic.
791 # simply take the slowpath, which already has the 'clrevorder' logic.
783 # This was also fixed in cc0ff93d0c0c.
792 # This was also fixed in cc0ff93d0c0c.
784 fastpathlinkrev = fastpathlinkrev and not self._reorder
793 fastpathlinkrev = fastpathlinkrev and not self._reorder
785 # Treemanifests don't work correctly with fastpathlinkrev
794 # Treemanifests don't work correctly with fastpathlinkrev
786 # either, because we don't discover which directory nodes to
795 # either, because we don't discover which directory nodes to
787 # send along with files. This could probably be fixed.
796 # send along with files. This could probably be fixed.
788 fastpathlinkrev = fastpathlinkrev and (
797 fastpathlinkrev = fastpathlinkrev and (
789 'treemanifest' not in repo.requirements)
798 'treemanifest' not in repo.requirements)
790
799
791 for chunk in self.generatemanifests(commonrevs, clrevorder,
800 for chunk in self.generatemanifests(commonrevs, clrevorder,
792 fastpathlinkrev, mfs, fnodes, source):
801 fastpathlinkrev, mfs, fnodes, source):
793 yield chunk
802 yield chunk
794
803
795 if ellipsesmode:
804 if ellipsesmode:
796 mfdicts = None
805 mfdicts = None
797 if self._isshallow:
806 if self._isshallow:
798 mfdicts = [(self._repo.manifestlog[n].read(), lr)
807 mfdicts = [(self._repo.manifestlog[n].read(), lr)
799 for (n, lr) in mfs.iteritems()]
808 for (n, lr) in mfs.iteritems()]
800
809
801 mfs.clear()
810 mfs.clear()
802 clrevs = set(cl.rev(x) for x in clnodes)
811 clrevs = set(cl.rev(x) for x in clnodes)
803
812
804 if not fastpathlinkrev:
813 if not fastpathlinkrev:
805 def linknodes(unused, fname):
814 def linknodes(unused, fname):
806 return fnodes.get(fname, {})
815 return fnodes.get(fname, {})
807 else:
816 else:
808 cln = cl.node
817 cln = cl.node
809 def linknodes(filerevlog, fname):
818 def linknodes(filerevlog, fname):
810 llr = filerevlog.linkrev
819 llr = filerevlog.linkrev
811 fln = filerevlog.node
820 fln = filerevlog.node
812 revs = ((r, llr(r)) for r in filerevlog)
821 revs = ((r, llr(r)) for r in filerevlog)
813 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
822 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
814
823
815 if ellipsesmode:
824 if ellipsesmode:
816 # We need to pass the mfdicts variable down into
825 # We need to pass the mfdicts variable down into
817 # generatefiles(), but more than one command might have
826 # generatefiles(), but more than one command might have
818 # wrapped generatefiles so we can't modify the function
827 # wrapped generatefiles so we can't modify the function
819 # signature. Instead, we pass the data to ourselves using an
828 # signature. Instead, we pass the data to ourselves using an
820 # instance attribute. I'm sorry.
829 # instance attribute. I'm sorry.
821 self._mfdicts = mfdicts
830 self._mfdicts = mfdicts
822
831
823 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
832 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
824 source):
833 source):
825 yield chunk
834 yield chunk
826
835
827 yield self._close()
836 yield self._close()
828
837
829 if clnodes:
838 if clnodes:
830 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
839 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
831
840
832 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
841 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
833 fnodes, source):
842 fnodes, source):
834 """Returns an iterator of changegroup chunks containing manifests.
843 """Returns an iterator of changegroup chunks containing manifests.
835
844
836 `source` is unused here, but is used by extensions like remotefilelog to
845 `source` is unused here, but is used by extensions like remotefilelog to
837 change what is sent based in pulls vs pushes, etc.
846 change what is sent based in pulls vs pushes, etc.
838 """
847 """
839 repo = self._repo
848 repo = self._repo
840 mfl = repo.manifestlog
849 mfl = repo.manifestlog
841 dirlog = mfl._revlog.dirlog
850 dirlog = mfl._revlog.dirlog
842 tmfnodes = {'': mfs}
851 tmfnodes = {'': mfs}
843
852
844 # Callback for the manifest, used to collect linkrevs for filelog
853 # Callback for the manifest, used to collect linkrevs for filelog
845 # revisions.
854 # revisions.
846 # Returns the linkrev node (collected in lookupcl).
855 # Returns the linkrev node (collected in lookupcl).
847 def makelookupmflinknode(dir, nodes):
856 def makelookupmflinknode(dir, nodes):
848 if fastpathlinkrev:
857 if fastpathlinkrev:
849 assert not dir
858 assert not dir
850 return mfs.__getitem__
859 return mfs.__getitem__
851
860
852 def lookupmflinknode(x):
861 def lookupmflinknode(x):
853 """Callback for looking up the linknode for manifests.
862 """Callback for looking up the linknode for manifests.
854
863
855 Returns the linkrev node for the specified manifest.
864 Returns the linkrev node for the specified manifest.
856
865
857 SIDE EFFECT:
866 SIDE EFFECT:
858
867
859 1) fclnodes gets populated with the list of relevant
868 1) fclnodes gets populated with the list of relevant
860 file nodes if we're not using fastpathlinkrev
869 file nodes if we're not using fastpathlinkrev
861 2) When treemanifests are in use, collects treemanifest nodes
870 2) When treemanifests are in use, collects treemanifest nodes
862 to send
871 to send
863
872
864 Note that this means manifests must be completely sent to
873 Note that this means manifests must be completely sent to
865 the client before you can trust the list of files and
874 the client before you can trust the list of files and
866 treemanifests to send.
875 treemanifests to send.
867 """
876 """
868 clnode = nodes[x]
877 clnode = nodes[x]
869 mdata = mfl.get(dir, x).readfast(shallow=True)
878 mdata = mfl.get(dir, x).readfast(shallow=True)
870 for p, n, fl in mdata.iterentries():
879 for p, n, fl in mdata.iterentries():
871 if fl == 't': # subdirectory manifest
880 if fl == 't': # subdirectory manifest
872 subdir = dir + p + '/'
881 subdir = dir + p + '/'
873 tmfclnodes = tmfnodes.setdefault(subdir, {})
882 tmfclnodes = tmfnodes.setdefault(subdir, {})
874 tmfclnode = tmfclnodes.setdefault(n, clnode)
883 tmfclnode = tmfclnodes.setdefault(n, clnode)
875 if clrevorder[clnode] < clrevorder[tmfclnode]:
884 if clrevorder[clnode] < clrevorder[tmfclnode]:
876 tmfclnodes[n] = clnode
885 tmfclnodes[n] = clnode
877 else:
886 else:
878 f = dir + p
887 f = dir + p
879 fclnodes = fnodes.setdefault(f, {})
888 fclnodes = fnodes.setdefault(f, {})
880 fclnode = fclnodes.setdefault(n, clnode)
889 fclnode = fclnodes.setdefault(n, clnode)
881 if clrevorder[clnode] < clrevorder[fclnode]:
890 if clrevorder[clnode] < clrevorder[fclnode]:
882 fclnodes[n] = clnode
891 fclnodes[n] = clnode
883 return clnode
892 return clnode
884 return lookupmflinknode
893 return lookupmflinknode
885
894
886 fn = (self._packtreemanifests if self._sendtreemanifests
895 fn = (self._packtreemanifests if self._sendtreemanifests
887 else self._packmanifests)
896 else self._packmanifests)
888 size = 0
897 size = 0
889 while tmfnodes:
898 while tmfnodes:
890 dir, nodes = tmfnodes.popitem()
899 dir, nodes = tmfnodes.popitem()
891 prunednodes = self._prune(dirlog(dir), nodes, commonrevs)
900 prunednodes = self._prune(dirlog(dir), nodes, commonrevs)
892 if not dir or prunednodes:
901 if not dir or prunednodes:
893 for x in fn(dir, prunednodes, makelookupmflinknode(dir, nodes)):
902 for x in fn(dir, prunednodes, makelookupmflinknode(dir, nodes)):
894 size += len(x)
903 size += len(x)
895 yield x
904 yield x
896 self._verbosenote(_('%8.i (manifests)\n') % size)
905 self._verbosenote(_('%8.i (manifests)\n') % size)
897 yield self._manifestsend
906 yield self._manifestsend
898
907
899 # The 'source' parameter is useful for extensions
908 # The 'source' parameter is useful for extensions
900 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
909 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
901 changedfiles = list(filter(self._filematcher, changedfiles))
910 changedfiles = list(filter(self._filematcher, changedfiles))
902
911
903 if self._isshallow:
912 if self._isshallow:
904 # See comment in generate() for why this sadness is a thing.
913 # See comment in generate() for why this sadness is a thing.
905 mfdicts = self._mfdicts
914 mfdicts = self._mfdicts
906 del self._mfdicts
915 del self._mfdicts
907 # In a shallow clone, the linknodes callback needs to also include
916 # In a shallow clone, the linknodes callback needs to also include
908 # those file nodes that are in the manifests we sent but weren't
917 # those file nodes that are in the manifests we sent but weren't
909 # introduced by those manifests.
918 # introduced by those manifests.
910 commonctxs = [self._repo[c] for c in commonrevs]
919 commonctxs = [self._repo[c] for c in commonrevs]
911 oldlinknodes = linknodes
920 oldlinknodes = linknodes
912 clrev = self._repo.changelog.rev
921 clrev = self._repo.changelog.rev
913
922
914 # Defining this function has a side-effect of overriding the
923 # Defining this function has a side-effect of overriding the
915 # function of the same name that was passed in as an argument.
924 # function of the same name that was passed in as an argument.
916 # TODO have caller pass in appropriate function.
925 # TODO have caller pass in appropriate function.
917 def linknodes(flog, fname):
926 def linknodes(flog, fname):
918 for c in commonctxs:
927 for c in commonctxs:
919 try:
928 try:
920 fnode = c.filenode(fname)
929 fnode = c.filenode(fname)
921 self._clrev_to_localrev[c.rev()] = flog.rev(fnode)
930 self._clrevtolocalrev[c.rev()] = flog.rev(fnode)
922 except error.ManifestLookupError:
931 except error.ManifestLookupError:
923 pass
932 pass
924 links = oldlinknodes(flog, fname)
933 links = oldlinknodes(flog, fname)
925 if len(links) != len(mfdicts):
934 if len(links) != len(mfdicts):
926 for mf, lr in mfdicts:
935 for mf, lr in mfdicts:
927 fnode = mf.get(fname, None)
936 fnode = mf.get(fname, None)
928 if fnode in links:
937 if fnode in links:
929 links[fnode] = min(links[fnode], lr, key=clrev)
938 links[fnode] = min(links[fnode], lr, key=clrev)
930 elif fnode:
939 elif fnode:
931 links[fnode] = lr
940 links[fnode] = lr
932 return links
941 return links
933
942
934 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
943 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
935
944
936 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
945 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
937 repo = self._repo
946 repo = self._repo
938 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
947 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
939 total=len(changedfiles))
948 total=len(changedfiles))
940 for i, fname in enumerate(sorted(changedfiles)):
949 for i, fname in enumerate(sorted(changedfiles)):
941 filerevlog = repo.file(fname)
950 filerevlog = repo.file(fname)
942 if not filerevlog:
951 if not filerevlog:
943 raise error.Abort(_("empty or missing file data for %s") %
952 raise error.Abort(_("empty or missing file data for %s") %
944 fname)
953 fname)
945
954
946 linkrevnodes = linknodes(filerevlog, fname)
955 linkrevnodes = linknodes(filerevlog, fname)
947 # Lookup for filenodes, we collected the linkrev nodes above in the
956 # Lookup for filenodes, we collected the linkrev nodes above in the
948 # fastpath case and with lookupmf in the slowpath case.
957 # fastpath case and with lookupmf in the slowpath case.
949 def lookupfilelog(x):
958 def lookupfilelog(x):
950 return linkrevnodes[x]
959 return linkrevnodes[x]
951
960
952 filenodes = self._prune(filerevlog, linkrevnodes, commonrevs)
961 filenodes = self._prune(filerevlog, linkrevnodes, commonrevs)
953 if filenodes:
962 if filenodes:
954 progress.update(i + 1, item=fname)
963 progress.update(i + 1, item=fname)
955 h = self._fileheader(fname)
964 h = self._fileheader(fname)
956 size = len(h)
965 size = len(h)
957 yield h
966 yield h
958 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
967 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
959 size += len(chunk)
968 size += len(chunk)
960 yield chunk
969 yield chunk
961 self._verbosenote(_('%8.i %s\n') % (size, fname))
970 self._verbosenote(_('%8.i %s\n') % (size, fname))
962 progress.complete()
971 progress.complete()
963
972
964 def _deltaparent(self, store, rev, p1, p2, prev):
973 def _deltaparent(self, store, rev, p1, p2, prev):
965 if self._useprevdelta:
974 if self._useprevdelta:
966 if not store.candelta(prev, rev):
975 if not store.candelta(prev, rev):
967 raise error.ProgrammingError(
976 raise error.ProgrammingError(
968 'cg1 should not be used in this case')
977 'cg1 should not be used in this case')
969 return prev
978 return prev
970
979
971 # Narrow ellipses mode.
980 # Narrow ellipses mode.
972 if util.safehasattr(self, '_full_nodes'):
981 if util.safehasattr(self, '_full_nodes'):
973 # TODO: send better deltas when in narrow mode.
982 # TODO: send better deltas when in narrow mode.
974 #
983 #
975 # changegroup.group() loops over revisions to send,
984 # changegroup.group() loops over revisions to send,
976 # including revisions we'll skip. What this means is that
985 # including revisions we'll skip. What this means is that
977 # `prev` will be a potentially useless delta base for all
986 # `prev` will be a potentially useless delta base for all
978 # ellipsis nodes, as the client likely won't have it. In
987 # ellipsis nodes, as the client likely won't have it. In
979 # the future we should do bookkeeping about which nodes
988 # the future we should do bookkeeping about which nodes
980 # have been sent to the client, and try to be
989 # have been sent to the client, and try to be
981 # significantly smarter about delta bases. This is
990 # significantly smarter about delta bases. This is
982 # slightly tricky because this same code has to work for
991 # slightly tricky because this same code has to work for
983 # all revlogs, and we don't have the linkrev/linknode here.
992 # all revlogs, and we don't have the linkrev/linknode here.
984 return p1
993 return p1
985
994
986 dp = store.deltaparent(rev)
995 dp = store.deltaparent(rev)
987 if dp == nullrev and store.storedeltachains:
996 if dp == nullrev and store.storedeltachains:
988 # Avoid sending full revisions when delta parent is null. Pick prev
997 # Avoid sending full revisions when delta parent is null. Pick prev
989 # in that case. It's tempting to pick p1 in this case, as p1 will
998 # in that case. It's tempting to pick p1 in this case, as p1 will
990 # be smaller in the common case. However, computing a delta against
999 # be smaller in the common case. However, computing a delta against
991 # p1 may require resolving the raw text of p1, which could be
1000 # p1 may require resolving the raw text of p1, which could be
992 # expensive. The revlog caches should have prev cached, meaning
1001 # expensive. The revlog caches should have prev cached, meaning
993 # less CPU for changegroup generation. There is likely room to add
1002 # less CPU for changegroup generation. There is likely room to add
994 # a flag and/or config option to control this behavior.
1003 # a flag and/or config option to control this behavior.
995 base = prev
1004 base = prev
996 elif dp == nullrev:
1005 elif dp == nullrev:
997 # revlog is configured to use full snapshot for a reason,
1006 # revlog is configured to use full snapshot for a reason,
998 # stick to full snapshot.
1007 # stick to full snapshot.
999 base = nullrev
1008 base = nullrev
1000 elif dp not in (p1, p2, prev):
1009 elif dp not in (p1, p2, prev):
1001 # Pick prev when we can't be sure remote has the base revision.
1010 # Pick prev when we can't be sure remote has the base revision.
1002 return prev
1011 return prev
1003 else:
1012 else:
1004 base = dp
1013 base = dp
1005
1014
1006 if base != nullrev and not store.candelta(base, rev):
1015 if base != nullrev and not store.candelta(base, rev):
1007 base = nullrev
1016 base = nullrev
1008
1017
1009 return base
1018 return base
1010
1019
1011 def _revchunk(self, store, rev, prev, linknode):
1020 def _revchunk(self, store, rev, prev, linknode):
1012 if util.safehasattr(self, '_full_nodes'):
1021 if util.safehasattr(self, '_full_nodes'):
1013 fn = self._revisiondeltanarrow
1022 fn = self._revisiondeltanarrow
1014 else:
1023 else:
1015 fn = self._revisiondeltanormal
1024 fn = self._revisiondeltanormal
1016
1025
1017 delta = fn(store, rev, prev, linknode)
1026 delta = fn(store, rev, prev, linknode)
1018 if not delta:
1027 if not delta:
1019 return
1028 return
1020
1029
1021 meta = self._builddeltaheader(delta)
1030 meta = self._builddeltaheader(delta)
1022 l = len(meta) + sum(len(x) for x in delta.deltachunks)
1031 l = len(meta) + sum(len(x) for x in delta.deltachunks)
1023
1032
1024 yield chunkheader(l)
1033 yield chunkheader(l)
1025 yield meta
1034 yield meta
1026 for x in delta.deltachunks:
1035 for x in delta.deltachunks:
1027 yield x
1036 yield x
1028
1037
1029 def _revisiondeltanormal(self, store, rev, prev, linknode):
1038 def _revisiondeltanormal(self, store, rev, prev, linknode):
1030 node = store.node(rev)
1039 node = store.node(rev)
1031 p1, p2 = store.parentrevs(rev)
1040 p1, p2 = store.parentrevs(rev)
1032 base = self._deltaparent(store, rev, p1, p2, prev)
1041 base = self._deltaparent(store, rev, p1, p2, prev)
1033
1042
1034 prefix = ''
1043 prefix = ''
1035 if store.iscensored(base) or store.iscensored(rev):
1044 if store.iscensored(base) or store.iscensored(rev):
1036 try:
1045 try:
1037 delta = store.revision(node, raw=True)
1046 delta = store.revision(node, raw=True)
1038 except error.CensoredNodeError as e:
1047 except error.CensoredNodeError as e:
1039 delta = e.tombstone
1048 delta = e.tombstone
1040 if base == nullrev:
1049 if base == nullrev:
1041 prefix = mdiff.trivialdiffheader(len(delta))
1050 prefix = mdiff.trivialdiffheader(len(delta))
1042 else:
1051 else:
1043 baselen = store.rawsize(base)
1052 baselen = store.rawsize(base)
1044 prefix = mdiff.replacediffheader(baselen, len(delta))
1053 prefix = mdiff.replacediffheader(baselen, len(delta))
1045 elif base == nullrev:
1054 elif base == nullrev:
1046 delta = store.revision(node, raw=True)
1055 delta = store.revision(node, raw=True)
1047 prefix = mdiff.trivialdiffheader(len(delta))
1056 prefix = mdiff.trivialdiffheader(len(delta))
1048 else:
1057 else:
1049 delta = store.revdiff(base, rev)
1058 delta = store.revdiff(base, rev)
1050 p1n, p2n = store.parents(node)
1059 p1n, p2n = store.parents(node)
1051
1060
1052 return revisiondelta(
1061 return revisiondelta(
1053 node=node,
1062 node=node,
1054 p1node=p1n,
1063 p1node=p1n,
1055 p2node=p2n,
1064 p2node=p2n,
1056 basenode=store.node(base),
1065 basenode=store.node(base),
1057 linknode=linknode,
1066 linknode=linknode,
1058 flags=store.flags(rev),
1067 flags=store.flags(rev),
1059 deltachunks=(prefix, delta),
1068 deltachunks=(prefix, delta),
1060 )
1069 )
1061
1070
1062 def _revisiondeltanarrow(self, store, rev, prev, linknode):
1071 def _revisiondeltanarrow(self, store, rev, prev, linknode):
1063 # build up some mapping information that's useful later. See
1072 # build up some mapping information that's useful later. See
1064 # the local() nested function below.
1073 # the local() nested function below.
1065 if not self._changelogdone:
1074 if not self._changelogdone:
1066 self._clnode_to_rev[linknode] = rev
1075 self._clnodetorev[linknode] = rev
1067 linkrev = rev
1076 linkrev = rev
1068 self._clrev_to_localrev[linkrev] = rev
1077 self._clrevtolocalrev[linkrev] = rev
1069 else:
1078 else:
1070 linkrev = self._clnode_to_rev[linknode]
1079 linkrev = self._clnodetorev[linknode]
1071 self._clrev_to_localrev[linkrev] = rev
1080 self._clrevtolocalrev[linkrev] = rev
1072
1081
1073 # This is a node to send in full, because the changeset it
1082 # This is a node to send in full, because the changeset it
1074 # corresponds to was a full changeset.
1083 # corresponds to was a full changeset.
1075 if linknode in self._full_nodes:
1084 if linknode in self._full_nodes:
1076 return self._revisiondeltanormal(store, rev, prev, linknode)
1085 return self._revisiondeltanormal(store, rev, prev, linknode)
1077
1086
1078 # At this point, a node can either be one we should skip or an
1087 # At this point, a node can either be one we should skip or an
1079 # ellipsis. If it's not an ellipsis, bail immediately.
1088 # ellipsis. If it's not an ellipsis, bail immediately.
1080 if linkrev not in self._precomputed_ellipsis:
1089 if linkrev not in self._precomputed_ellipsis:
1081 return
1090 return
1082
1091
1083 linkparents = self._precomputed_ellipsis[linkrev]
1092 linkparents = self._precomputed_ellipsis[linkrev]
1084 def local(clrev):
1093 def local(clrev):
1085 """Turn a changelog revnum into a local revnum.
1094 """Turn a changelog revnum into a local revnum.
1086
1095
1087 The ellipsis dag is stored as revnums on the changelog,
1096 The ellipsis dag is stored as revnums on the changelog,
1088 but when we're producing ellipsis entries for
1097 but when we're producing ellipsis entries for
1089 non-changelog revlogs, we need to turn those numbers into
1098 non-changelog revlogs, we need to turn those numbers into
1090 something local. This does that for us, and during the
1099 something local. This does that for us, and during the
1091 changelog sending phase will also expand the stored
1100 changelog sending phase will also expand the stored
1092 mappings as needed.
1101 mappings as needed.
1093 """
1102 """
1094 if clrev == nullrev:
1103 if clrev == nullrev:
1095 return nullrev
1104 return nullrev
1096
1105
1097 if not self._changelogdone:
1106 if not self._changelogdone:
1098 # If we're doing the changelog, it's possible that we
1107 # If we're doing the changelog, it's possible that we
1099 # have a parent that is already on the client, and we
1108 # have a parent that is already on the client, and we
1100 # need to store some extra mapping information so that
1109 # need to store some extra mapping information so that
1101 # our contained ellipsis nodes will be able to resolve
1110 # our contained ellipsis nodes will be able to resolve
1102 # their parents.
1111 # their parents.
1103 if clrev not in self._clrev_to_localrev:
1112 if clrev not in self._clrevtolocalrev:
1104 clnode = store.node(clrev)
1113 clnode = store.node(clrev)
1105 self._clnode_to_rev[clnode] = clrev
1114 self._clnodetorev[clnode] = clrev
1106 return clrev
1115 return clrev
1107
1116
1108 # Walk the ellipsis-ized changelog breadth-first looking for a
1117 # Walk the ellipsis-ized changelog breadth-first looking for a
1109 # change that has been linked from the current revlog.
1118 # change that has been linked from the current revlog.
1110 #
1119 #
1111 # For a flat manifest revlog only a single step should be necessary
1120 # For a flat manifest revlog only a single step should be necessary
1112 # as all relevant changelog entries are relevant to the flat
1121 # as all relevant changelog entries are relevant to the flat
1113 # manifest.
1122 # manifest.
1114 #
1123 #
1115 # For a filelog or tree manifest dirlog however not every changelog
1124 # For a filelog or tree manifest dirlog however not every changelog
1116 # entry will have been relevant, so we need to skip some changelog
1125 # entry will have been relevant, so we need to skip some changelog
1117 # nodes even after ellipsis-izing.
1126 # nodes even after ellipsis-izing.
1118 walk = [clrev]
1127 walk = [clrev]
1119 while walk:
1128 while walk:
1120 p = walk[0]
1129 p = walk[0]
1121 walk = walk[1:]
1130 walk = walk[1:]
1122 if p in self._clrev_to_localrev:
1131 if p in self._clrevtolocalrev:
1123 return self._clrev_to_localrev[p]
1132 return self._clrevtolocalrev[p]
1124 elif p in self._full_nodes:
1133 elif p in self._full_nodes:
1125 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1134 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1126 if pp != nullrev])
1135 if pp != nullrev])
1127 elif p in self._precomputed_ellipsis:
1136 elif p in self._precomputed_ellipsis:
1128 walk.extend([pp for pp in self._precomputed_ellipsis[p]
1137 walk.extend([pp for pp in self._precomputed_ellipsis[p]
1129 if pp != nullrev])
1138 if pp != nullrev])
1130 else:
1139 else:
1131 # In this case, we've got an ellipsis with parents
1140 # In this case, we've got an ellipsis with parents
1132 # outside the current bundle (likely an
1141 # outside the current bundle (likely an
1133 # incremental pull). We "know" that we can use the
1142 # incremental pull). We "know" that we can use the
1134 # value of this same revlog at whatever revision
1143 # value of this same revlog at whatever revision
1135 # is pointed to by linknode. "Know" is in scare
1144 # is pointed to by linknode. "Know" is in scare
1136 # quotes because I haven't done enough examination
1145 # quotes because I haven't done enough examination
1137 # of edge cases to convince myself this is really
1146 # of edge cases to convince myself this is really
1138 # a fact - it works for all the (admittedly
1147 # a fact - it works for all the (admittedly
1139 # thorough) cases in our testsuite, but I would be
1148 # thorough) cases in our testsuite, but I would be
1140 # somewhat unsurprised to find a case in the wild
1149 # somewhat unsurprised to find a case in the wild
1141 # where this breaks down a bit. That said, I don't
1150 # where this breaks down a bit. That said, I don't
1142 # know if it would hurt anything.
1151 # know if it would hurt anything.
1143 for i in pycompat.xrange(rev, 0, -1):
1152 for i in pycompat.xrange(rev, 0, -1):
1144 if store.linkrev(i) == clrev:
1153 if store.linkrev(i) == clrev:
1145 return i
1154 return i
1146 # We failed to resolve a parent for this node, so
1155 # We failed to resolve a parent for this node, so
1147 # we crash the changegroup construction.
1156 # we crash the changegroup construction.
1148 raise error.Abort(
1157 raise error.Abort(
1149 'unable to resolve parent while packing %r %r'
1158 'unable to resolve parent while packing %r %r'
1150 ' for changeset %r' % (store.indexfile, rev, clrev))
1159 ' for changeset %r' % (store.indexfile, rev, clrev))
1151
1160
1152 return nullrev
1161 return nullrev
1153
1162
1154 if not linkparents or (
1163 if not linkparents or (
1155 store.parentrevs(rev) == (nullrev, nullrev)):
1164 store.parentrevs(rev) == (nullrev, nullrev)):
1156 p1, p2 = nullrev, nullrev
1165 p1, p2 = nullrev, nullrev
1157 elif len(linkparents) == 1:
1166 elif len(linkparents) == 1:
1158 p1, = sorted(local(p) for p in linkparents)
1167 p1, = sorted(local(p) for p in linkparents)
1159 p2 = nullrev
1168 p2 = nullrev
1160 else:
1169 else:
1161 p1, p2 = sorted(local(p) for p in linkparents)
1170 p1, p2 = sorted(local(p) for p in linkparents)
1162
1171
1163 n = store.node(rev)
1172 n = store.node(rev)
1164 p1n, p2n = store.node(p1), store.node(p2)
1173 p1n, p2n = store.node(p1), store.node(p2)
1165 flags = store.flags(rev)
1174 flags = store.flags(rev)
1166 flags |= revlog.REVIDX_ELLIPSIS
1175 flags |= revlog.REVIDX_ELLIPSIS
1167
1176
1168 # TODO: try and actually send deltas for ellipsis data blocks
1177 # TODO: try and actually send deltas for ellipsis data blocks
1169 data = store.revision(n)
1178 data = store.revision(n)
1170 diffheader = mdiff.trivialdiffheader(len(data))
1179 diffheader = mdiff.trivialdiffheader(len(data))
1171
1180
1172 return revisiondelta(
1181 return revisiondelta(
1173 node=n,
1182 node=n,
1174 p1node=p1n,
1183 p1node=p1n,
1175 p2node=p2n,
1184 p2node=p2n,
1176 basenode=nullid,
1185 basenode=nullid,
1177 linknode=linknode,
1186 linknode=linknode,
1178 flags=flags,
1187 flags=flags,
1179 deltachunks=(diffheader, data),
1188 deltachunks=(diffheader, data),
1180 )
1189 )
1181
1190
1182 def _makecg1packer(repo, filematcher, bundlecaps, shallow=False):
1191 def _makecg1packer(repo, filematcher, bundlecaps, shallow=False):
1183 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1192 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1184 d.node, d.p1node, d.p2node, d.linknode)
1193 d.node, d.p1node, d.p2node, d.linknode)
1185
1194
1186 return cgpacker(repo, filematcher, b'01',
1195 return cgpacker(repo, filematcher, b'01',
1187 useprevdelta=True,
1196 useprevdelta=True,
1188 allowreorder=None,
1197 allowreorder=None,
1189 builddeltaheader=builddeltaheader,
1198 builddeltaheader=builddeltaheader,
1190 manifestsend=b'',
1199 manifestsend=b'',
1191 sendtreemanifests=False,
1200 sendtreemanifests=False,
1192 bundlecaps=bundlecaps,
1201 bundlecaps=bundlecaps,
1193 shallow=shallow)
1202 shallow=shallow)
1194
1203
1195 def _makecg2packer(repo, filematcher, bundlecaps, shallow=False):
1204 def _makecg2packer(repo, filematcher, bundlecaps, shallow=False):
1196 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1205 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1197 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1206 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1198
1207
1199 # Since generaldelta is directly supported by cg2, reordering
1208 # Since generaldelta is directly supported by cg2, reordering
1200 # generally doesn't help, so we disable it by default (treating
1209 # generally doesn't help, so we disable it by default (treating
1201 # bundle.reorder=auto just like bundle.reorder=False).
1210 # bundle.reorder=auto just like bundle.reorder=False).
1202 return cgpacker(repo, filematcher, b'02',
1211 return cgpacker(repo, filematcher, b'02',
1203 useprevdelta=False,
1212 useprevdelta=False,
1204 allowreorder=False,
1213 allowreorder=False,
1205 builddeltaheader=builddeltaheader,
1214 builddeltaheader=builddeltaheader,
1206 manifestsend=b'',
1215 manifestsend=b'',
1207 sendtreemanifests=False,
1216 sendtreemanifests=False,
1208 bundlecaps=bundlecaps,
1217 bundlecaps=bundlecaps,
1209 shallow=shallow)
1218 shallow=shallow)
1210
1219
1211 def _makecg3packer(repo, filematcher, bundlecaps, shallow=False):
1220 def _makecg3packer(repo, filematcher, bundlecaps, shallow=False):
1212 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1221 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1213 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1222 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1214
1223
1215 return cgpacker(repo, filematcher, b'03',
1224 return cgpacker(repo, filematcher, b'03',
1216 useprevdelta=False,
1225 useprevdelta=False,
1217 allowreorder=False,
1226 allowreorder=False,
1218 builddeltaheader=builddeltaheader,
1227 builddeltaheader=builddeltaheader,
1219 manifestsend=closechunk(),
1228 manifestsend=closechunk(),
1220 sendtreemanifests=True,
1229 sendtreemanifests=True,
1221 bundlecaps=bundlecaps,
1230 bundlecaps=bundlecaps,
1222 shallow=shallow)
1231 shallow=shallow)
1223
1232
1224 _packermap = {'01': (_makecg1packer, cg1unpacker),
1233 _packermap = {'01': (_makecg1packer, cg1unpacker),
1225 # cg2 adds support for exchanging generaldelta
1234 # cg2 adds support for exchanging generaldelta
1226 '02': (_makecg2packer, cg2unpacker),
1235 '02': (_makecg2packer, cg2unpacker),
1227 # cg3 adds support for exchanging revlog flags and treemanifests
1236 # cg3 adds support for exchanging revlog flags and treemanifests
1228 '03': (_makecg3packer, cg3unpacker),
1237 '03': (_makecg3packer, cg3unpacker),
1229 }
1238 }
1230
1239
1231 def allsupportedversions(repo):
1240 def allsupportedversions(repo):
1232 versions = set(_packermap.keys())
1241 versions = set(_packermap.keys())
1233 if not (repo.ui.configbool('experimental', 'changegroup3') or
1242 if not (repo.ui.configbool('experimental', 'changegroup3') or
1234 repo.ui.configbool('experimental', 'treemanifest') or
1243 repo.ui.configbool('experimental', 'treemanifest') or
1235 'treemanifest' in repo.requirements):
1244 'treemanifest' in repo.requirements):
1236 versions.discard('03')
1245 versions.discard('03')
1237 return versions
1246 return versions
1238
1247
1239 # Changegroup versions that can be applied to the repo
1248 # Changegroup versions that can be applied to the repo
1240 def supportedincomingversions(repo):
1249 def supportedincomingversions(repo):
1241 return allsupportedversions(repo)
1250 return allsupportedversions(repo)
1242
1251
1243 # Changegroup versions that can be created from the repo
1252 # Changegroup versions that can be created from the repo
1244 def supportedoutgoingversions(repo):
1253 def supportedoutgoingversions(repo):
1245 versions = allsupportedversions(repo)
1254 versions = allsupportedversions(repo)
1246 if 'treemanifest' in repo.requirements:
1255 if 'treemanifest' in repo.requirements:
1247 # Versions 01 and 02 support only flat manifests and it's just too
1256 # Versions 01 and 02 support only flat manifests and it's just too
1248 # expensive to convert between the flat manifest and tree manifest on
1257 # expensive to convert between the flat manifest and tree manifest on
1249 # the fly. Since tree manifests are hashed differently, all of history
1258 # the fly. Since tree manifests are hashed differently, all of history
1250 # would have to be converted. Instead, we simply don't even pretend to
1259 # would have to be converted. Instead, we simply don't even pretend to
1251 # support versions 01 and 02.
1260 # support versions 01 and 02.
1252 versions.discard('01')
1261 versions.discard('01')
1253 versions.discard('02')
1262 versions.discard('02')
1254 if repository.NARROW_REQUIREMENT in repo.requirements:
1263 if repository.NARROW_REQUIREMENT in repo.requirements:
1255 # Versions 01 and 02 don't support revlog flags, and we need to
1264 # Versions 01 and 02 don't support revlog flags, and we need to
1256 # support that for stripping and unbundling to work.
1265 # support that for stripping and unbundling to work.
1257 versions.discard('01')
1266 versions.discard('01')
1258 versions.discard('02')
1267 versions.discard('02')
1259 if LFS_REQUIREMENT in repo.requirements:
1268 if LFS_REQUIREMENT in repo.requirements:
1260 # Versions 01 and 02 don't support revlog flags, and we need to
1269 # Versions 01 and 02 don't support revlog flags, and we need to
1261 # mark LFS entries with REVIDX_EXTSTORED.
1270 # mark LFS entries with REVIDX_EXTSTORED.
1262 versions.discard('01')
1271 versions.discard('01')
1263 versions.discard('02')
1272 versions.discard('02')
1264
1273
1265 return versions
1274 return versions
1266
1275
1267 def localversion(repo):
1276 def localversion(repo):
1268 # Finds the best version to use for bundles that are meant to be used
1277 # Finds the best version to use for bundles that are meant to be used
1269 # locally, such as those from strip and shelve, and temporary bundles.
1278 # locally, such as those from strip and shelve, and temporary bundles.
1270 return max(supportedoutgoingversions(repo))
1279 return max(supportedoutgoingversions(repo))
1271
1280
1272 def safeversion(repo):
1281 def safeversion(repo):
1273 # Finds the smallest version that it's safe to assume clients of the repo
1282 # Finds the smallest version that it's safe to assume clients of the repo
1274 # will support. For example, all hg versions that support generaldelta also
1283 # will support. For example, all hg versions that support generaldelta also
1275 # support changegroup 02.
1284 # support changegroup 02.
1276 versions = supportedoutgoingversions(repo)
1285 versions = supportedoutgoingversions(repo)
1277 if 'generaldelta' in repo.requirements:
1286 if 'generaldelta' in repo.requirements:
1278 versions.discard('01')
1287 versions.discard('01')
1279 assert versions
1288 assert versions
1280 return min(versions)
1289 return min(versions)
1281
1290
1282 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1291 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1283 shallow=False):
1292 shallow=False):
1284 assert version in supportedoutgoingversions(repo)
1293 assert version in supportedoutgoingversions(repo)
1285
1294
1286 if filematcher is None:
1295 if filematcher is None:
1287 filematcher = matchmod.alwaysmatcher(repo.root, '')
1296 filematcher = matchmod.alwaysmatcher(repo.root, '')
1288
1297
1289 if version == '01' and not filematcher.always():
1298 if version == '01' and not filematcher.always():
1290 raise error.ProgrammingError('version 01 changegroups do not support '
1299 raise error.ProgrammingError('version 01 changegroups do not support '
1291 'sparse file matchers')
1300 'sparse file matchers')
1292
1301
1293 # Requested files could include files not in the local store. So
1302 # Requested files could include files not in the local store. So
1294 # filter those out.
1303 # filter those out.
1295 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1304 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1296 filematcher)
1305 filematcher)
1297
1306
1298 fn = _packermap[version][0]
1307 fn = _packermap[version][0]
1299 return fn(repo, filematcher, bundlecaps, shallow=shallow)
1308 return fn(repo, filematcher, bundlecaps, shallow=shallow)
1300
1309
1301 def getunbundler(version, fh, alg, extras=None):
1310 def getunbundler(version, fh, alg, extras=None):
1302 return _packermap[version][1](fh, alg, extras=extras)
1311 return _packermap[version][1](fh, alg, extras=extras)
1303
1312
1304 def _changegroupinfo(repo, nodes, source):
1313 def _changegroupinfo(repo, nodes, source):
1305 if repo.ui.verbose or source == 'bundle':
1314 if repo.ui.verbose or source == 'bundle':
1306 repo.ui.status(_("%d changesets found\n") % len(nodes))
1315 repo.ui.status(_("%d changesets found\n") % len(nodes))
1307 if repo.ui.debugflag:
1316 if repo.ui.debugflag:
1308 repo.ui.debug("list of changesets:\n")
1317 repo.ui.debug("list of changesets:\n")
1309 for node in nodes:
1318 for node in nodes:
1310 repo.ui.debug("%s\n" % hex(node))
1319 repo.ui.debug("%s\n" % hex(node))
1311
1320
1312 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1321 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1313 bundlecaps=None):
1322 bundlecaps=None):
1314 cgstream = makestream(repo, outgoing, version, source,
1323 cgstream = makestream(repo, outgoing, version, source,
1315 fastpath=fastpath, bundlecaps=bundlecaps)
1324 fastpath=fastpath, bundlecaps=bundlecaps)
1316 return getunbundler(version, util.chunkbuffer(cgstream), None,
1325 return getunbundler(version, util.chunkbuffer(cgstream), None,
1317 {'clcount': len(outgoing.missing) })
1326 {'clcount': len(outgoing.missing) })
1318
1327
1319 def makestream(repo, outgoing, version, source, fastpath=False,
1328 def makestream(repo, outgoing, version, source, fastpath=False,
1320 bundlecaps=None, filematcher=None):
1329 bundlecaps=None, filematcher=None):
1321 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1330 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1322 filematcher=filematcher)
1331 filematcher=filematcher)
1323
1332
1324 repo = repo.unfiltered()
1333 repo = repo.unfiltered()
1325 commonrevs = outgoing.common
1334 commonrevs = outgoing.common
1326 csets = outgoing.missing
1335 csets = outgoing.missing
1327 heads = outgoing.missingheads
1336 heads = outgoing.missingheads
1328 # We go through the fast path if we get told to, or if all (unfiltered
1337 # We go through the fast path if we get told to, or if all (unfiltered
1329 # heads have been requested (since we then know there all linkrevs will
1338 # heads have been requested (since we then know there all linkrevs will
1330 # be pulled by the client).
1339 # be pulled by the client).
1331 heads.sort()
1340 heads.sort()
1332 fastpathlinkrev = fastpath or (
1341 fastpathlinkrev = fastpath or (
1333 repo.filtername is None and heads == sorted(repo.heads()))
1342 repo.filtername is None and heads == sorted(repo.heads()))
1334
1343
1335 repo.hook('preoutgoing', throw=True, source=source)
1344 repo.hook('preoutgoing', throw=True, source=source)
1336 _changegroupinfo(repo, csets, source)
1345 _changegroupinfo(repo, csets, source)
1337 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1346 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1338
1347
1339 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1348 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1340 revisions = 0
1349 revisions = 0
1341 files = 0
1350 files = 0
1342 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1351 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1343 total=expectedfiles)
1352 total=expectedfiles)
1344 for chunkdata in iter(source.filelogheader, {}):
1353 for chunkdata in iter(source.filelogheader, {}):
1345 files += 1
1354 files += 1
1346 f = chunkdata["filename"]
1355 f = chunkdata["filename"]
1347 repo.ui.debug("adding %s revisions\n" % f)
1356 repo.ui.debug("adding %s revisions\n" % f)
1348 progress.increment()
1357 progress.increment()
1349 fl = repo.file(f)
1358 fl = repo.file(f)
1350 o = len(fl)
1359 o = len(fl)
1351 try:
1360 try:
1352 deltas = source.deltaiter()
1361 deltas = source.deltaiter()
1353 if not fl.addgroup(deltas, revmap, trp):
1362 if not fl.addgroup(deltas, revmap, trp):
1354 raise error.Abort(_("received file revlog group is empty"))
1363 raise error.Abort(_("received file revlog group is empty"))
1355 except error.CensoredBaseError as e:
1364 except error.CensoredBaseError as e:
1356 raise error.Abort(_("received delta base is censored: %s") % e)
1365 raise error.Abort(_("received delta base is censored: %s") % e)
1357 revisions += len(fl) - o
1366 revisions += len(fl) - o
1358 if f in needfiles:
1367 if f in needfiles:
1359 needs = needfiles[f]
1368 needs = needfiles[f]
1360 for new in pycompat.xrange(o, len(fl)):
1369 for new in pycompat.xrange(o, len(fl)):
1361 n = fl.node(new)
1370 n = fl.node(new)
1362 if n in needs:
1371 if n in needs:
1363 needs.remove(n)
1372 needs.remove(n)
1364 else:
1373 else:
1365 raise error.Abort(
1374 raise error.Abort(
1366 _("received spurious file revlog entry"))
1375 _("received spurious file revlog entry"))
1367 if not needs:
1376 if not needs:
1368 del needfiles[f]
1377 del needfiles[f]
1369 progress.complete()
1378 progress.complete()
1370
1379
1371 for f, needs in needfiles.iteritems():
1380 for f, needs in needfiles.iteritems():
1372 fl = repo.file(f)
1381 fl = repo.file(f)
1373 for n in needs:
1382 for n in needs:
1374 try:
1383 try:
1375 fl.rev(n)
1384 fl.rev(n)
1376 except error.LookupError:
1385 except error.LookupError:
1377 raise error.Abort(
1386 raise error.Abort(
1378 _('missing file data for %s:%s - run hg verify') %
1387 _('missing file data for %s:%s - run hg verify') %
1379 (f, hex(n)))
1388 (f, hex(n)))
1380
1389
1381 return revisions, files
1390 return revisions, files
1382
1391
1383 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1392 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1384 ellipsisroots, visitnodes, depth, source, version):
1393 ellipsisroots, visitnodes, depth, source, version):
1385 if version in ('01', '02'):
1394 if version in ('01', '02'):
1386 raise error.Abort(
1395 raise error.Abort(
1387 'ellipsis nodes require at least cg3 on client and server, '
1396 'ellipsis nodes require at least cg3 on client and server, '
1388 'but negotiated version %s' % version)
1397 'but negotiated version %s' % version)
1389 # We wrap cg1packer.revchunk, using a side channel to pass
1398 # We wrap cg1packer.revchunk, using a side channel to pass
1390 # relevant_nodes into that area. Then if linknode isn't in the
1399 # relevant_nodes into that area. Then if linknode isn't in the
1391 # set, we know we have an ellipsis node and we should defer
1400 # set, we know we have an ellipsis node and we should defer
1392 # sending that node's data. We override close() to detect
1401 # sending that node's data. We override close() to detect
1393 # pending ellipsis nodes and flush them.
1402 # pending ellipsis nodes and flush them.
1394 packer = getbundler(version, repo, filematcher=match,
1403 packer = getbundler(version, repo, filematcher=match,
1395 shallow=depth is not None)
1404 shallow=depth is not None)
1396 # Give the packer the list of nodes which should not be
1405 # Give the packer the list of nodes which should not be
1397 # ellipsis nodes. We store this rather than the set of nodes
1406 # ellipsis nodes. We store this rather than the set of nodes
1398 # that should be an ellipsis because for very large histories
1407 # that should be an ellipsis because for very large histories
1399 # we expect this to be significantly smaller.
1408 # we expect this to be significantly smaller.
1400 packer._full_nodes = relevant_nodes
1409 packer._full_nodes = relevant_nodes
1401 # Maps ellipsis revs to their roots at the changelog level.
1410 # Maps ellipsis revs to their roots at the changelog level.
1402 packer._precomputed_ellipsis = ellipsisroots
1411 packer._precomputed_ellipsis = ellipsisroots
1403 # Maps CL revs to per-revlog revisions. Cleared in close() at
1404 # the end of each group.
1405 packer._clrev_to_localrev = {}
1406 packer._next_clrev_to_localrev = {}
1407 # Maps changelog nodes to changelog revs. Filled in once
1408 # during changelog stage and then left unmodified.
1409 packer._clnode_to_rev = {}
1410
1412
1411 return packer.generate(common, visitnodes, False, source)
1413 return packer.generate(common, visitnodes, False, source)
General Comments 0
You need to be logged in to leave comments. Login now