##// END OF EJS Templates
changegroup: control delta parent behavior via constructor...
Gregory Szorc -
r38937:23ae0c07 default
parent child Browse files
Show More
@@ -1,1387 +1,1399 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagutil,
27 dagutil,
28 error,
28 error,
29 manifest,
29 manifest,
30 match as matchmod,
30 match as matchmod,
31 mdiff,
31 mdiff,
32 phases,
32 phases,
33 pycompat,
33 pycompat,
34 repository,
34 repository,
35 revlog,
35 revlog,
36 util,
36 util,
37 )
37 )
38
38
39 from .utils import (
39 from .utils import (
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
43 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
44 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
44 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
45 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
45 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
46
46
47 LFS_REQUIREMENT = 'lfs'
47 LFS_REQUIREMENT = 'lfs'
48
48
49 readexactly = util.readexactly
49 readexactly = util.readexactly
50
50
51 def getchunk(stream):
51 def getchunk(stream):
52 """return the next chunk from stream as a string"""
52 """return the next chunk from stream as a string"""
53 d = readexactly(stream, 4)
53 d = readexactly(stream, 4)
54 l = struct.unpack(">l", d)[0]
54 l = struct.unpack(">l", d)[0]
55 if l <= 4:
55 if l <= 4:
56 if l:
56 if l:
57 raise error.Abort(_("invalid chunk length %d") % l)
57 raise error.Abort(_("invalid chunk length %d") % l)
58 return ""
58 return ""
59 return readexactly(stream, l - 4)
59 return readexactly(stream, l - 4)
60
60
61 def chunkheader(length):
61 def chunkheader(length):
62 """return a changegroup chunk header (string)"""
62 """return a changegroup chunk header (string)"""
63 return struct.pack(">l", length + 4)
63 return struct.pack(">l", length + 4)
64
64
65 def closechunk():
65 def closechunk():
66 """return a changegroup chunk header (string) for a zero-length chunk"""
66 """return a changegroup chunk header (string) for a zero-length chunk"""
67 return struct.pack(">l", 0)
67 return struct.pack(">l", 0)
68
68
69 def writechunks(ui, chunks, filename, vfs=None):
69 def writechunks(ui, chunks, filename, vfs=None):
70 """Write chunks to a file and return its filename.
70 """Write chunks to a file and return its filename.
71
71
72 The stream is assumed to be a bundle file.
72 The stream is assumed to be a bundle file.
73 Existing files will not be overwritten.
73 Existing files will not be overwritten.
74 If no filename is specified, a temporary file is created.
74 If no filename is specified, a temporary file is created.
75 """
75 """
76 fh = None
76 fh = None
77 cleanup = None
77 cleanup = None
78 try:
78 try:
79 if filename:
79 if filename:
80 if vfs:
80 if vfs:
81 fh = vfs.open(filename, "wb")
81 fh = vfs.open(filename, "wb")
82 else:
82 else:
83 # Increase default buffer size because default is usually
83 # Increase default buffer size because default is usually
84 # small (4k is common on Linux).
84 # small (4k is common on Linux).
85 fh = open(filename, "wb", 131072)
85 fh = open(filename, "wb", 131072)
86 else:
86 else:
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
88 fh = os.fdopen(fd, r"wb")
88 fh = os.fdopen(fd, r"wb")
89 cleanup = filename
89 cleanup = filename
90 for c in chunks:
90 for c in chunks:
91 fh.write(c)
91 fh.write(c)
92 cleanup = None
92 cleanup = None
93 return filename
93 return filename
94 finally:
94 finally:
95 if fh is not None:
95 if fh is not None:
96 fh.close()
96 fh.close()
97 if cleanup is not None:
97 if cleanup is not None:
98 if filename and vfs:
98 if filename and vfs:
99 vfs.unlink(cleanup)
99 vfs.unlink(cleanup)
100 else:
100 else:
101 os.unlink(cleanup)
101 os.unlink(cleanup)
102
102
103 class cg1unpacker(object):
103 class cg1unpacker(object):
104 """Unpacker for cg1 changegroup streams.
104 """Unpacker for cg1 changegroup streams.
105
105
106 A changegroup unpacker handles the framing of the revision data in
106 A changegroup unpacker handles the framing of the revision data in
107 the wire format. Most consumers will want to use the apply()
107 the wire format. Most consumers will want to use the apply()
108 method to add the changes from the changegroup to a repository.
108 method to add the changes from the changegroup to a repository.
109
109
110 If you're forwarding a changegroup unmodified to another consumer,
110 If you're forwarding a changegroup unmodified to another consumer,
111 use getchunks(), which returns an iterator of changegroup
111 use getchunks(), which returns an iterator of changegroup
112 chunks. This is mostly useful for cases where you need to know the
112 chunks. This is mostly useful for cases where you need to know the
113 data stream has ended by observing the end of the changegroup.
113 data stream has ended by observing the end of the changegroup.
114
114
115 deltachunk() is useful only if you're applying delta data. Most
115 deltachunk() is useful only if you're applying delta data. Most
116 consumers should prefer apply() instead.
116 consumers should prefer apply() instead.
117
117
118 A few other public methods exist. Those are used only for
118 A few other public methods exist. Those are used only for
119 bundlerepo and some debug commands - their use is discouraged.
119 bundlerepo and some debug commands - their use is discouraged.
120 """
120 """
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
122 deltaheadersize = deltaheader.size
122 deltaheadersize = deltaheader.size
123 version = '01'
123 version = '01'
124 _grouplistcount = 1 # One list of files after the manifests
124 _grouplistcount = 1 # One list of files after the manifests
125
125
126 def __init__(self, fh, alg, extras=None):
126 def __init__(self, fh, alg, extras=None):
127 if alg is None:
127 if alg is None:
128 alg = 'UN'
128 alg = 'UN'
129 if alg not in util.compengines.supportedbundletypes:
129 if alg not in util.compengines.supportedbundletypes:
130 raise error.Abort(_('unknown stream compression type: %s')
130 raise error.Abort(_('unknown stream compression type: %s')
131 % alg)
131 % alg)
132 if alg == 'BZ':
132 if alg == 'BZ':
133 alg = '_truncatedBZ'
133 alg = '_truncatedBZ'
134
134
135 compengine = util.compengines.forbundletype(alg)
135 compengine = util.compengines.forbundletype(alg)
136 self._stream = compengine.decompressorreader(fh)
136 self._stream = compengine.decompressorreader(fh)
137 self._type = alg
137 self._type = alg
138 self.extras = extras or {}
138 self.extras = extras or {}
139 self.callback = None
139 self.callback = None
140
140
141 # These methods (compressed, read, seek, tell) all appear to only
141 # These methods (compressed, read, seek, tell) all appear to only
142 # be used by bundlerepo, but it's a little hard to tell.
142 # be used by bundlerepo, but it's a little hard to tell.
143 def compressed(self):
143 def compressed(self):
144 return self._type is not None and self._type != 'UN'
144 return self._type is not None and self._type != 'UN'
145 def read(self, l):
145 def read(self, l):
146 return self._stream.read(l)
146 return self._stream.read(l)
147 def seek(self, pos):
147 def seek(self, pos):
148 return self._stream.seek(pos)
148 return self._stream.seek(pos)
149 def tell(self):
149 def tell(self):
150 return self._stream.tell()
150 return self._stream.tell()
151 def close(self):
151 def close(self):
152 return self._stream.close()
152 return self._stream.close()
153
153
154 def _chunklength(self):
154 def _chunklength(self):
155 d = readexactly(self._stream, 4)
155 d = readexactly(self._stream, 4)
156 l = struct.unpack(">l", d)[0]
156 l = struct.unpack(">l", d)[0]
157 if l <= 4:
157 if l <= 4:
158 if l:
158 if l:
159 raise error.Abort(_("invalid chunk length %d") % l)
159 raise error.Abort(_("invalid chunk length %d") % l)
160 return 0
160 return 0
161 if self.callback:
161 if self.callback:
162 self.callback()
162 self.callback()
163 return l - 4
163 return l - 4
164
164
165 def changelogheader(self):
165 def changelogheader(self):
166 """v10 does not have a changelog header chunk"""
166 """v10 does not have a changelog header chunk"""
167 return {}
167 return {}
168
168
169 def manifestheader(self):
169 def manifestheader(self):
170 """v10 does not have a manifest header chunk"""
170 """v10 does not have a manifest header chunk"""
171 return {}
171 return {}
172
172
173 def filelogheader(self):
173 def filelogheader(self):
174 """return the header of the filelogs chunk, v10 only has the filename"""
174 """return the header of the filelogs chunk, v10 only has the filename"""
175 l = self._chunklength()
175 l = self._chunklength()
176 if not l:
176 if not l:
177 return {}
177 return {}
178 fname = readexactly(self._stream, l)
178 fname = readexactly(self._stream, l)
179 return {'filename': fname}
179 return {'filename': fname}
180
180
181 def _deltaheader(self, headertuple, prevnode):
181 def _deltaheader(self, headertuple, prevnode):
182 node, p1, p2, cs = headertuple
182 node, p1, p2, cs = headertuple
183 if prevnode is None:
183 if prevnode is None:
184 deltabase = p1
184 deltabase = p1
185 else:
185 else:
186 deltabase = prevnode
186 deltabase = prevnode
187 flags = 0
187 flags = 0
188 return node, p1, p2, deltabase, cs, flags
188 return node, p1, p2, deltabase, cs, flags
189
189
190 def deltachunk(self, prevnode):
190 def deltachunk(self, prevnode):
191 l = self._chunklength()
191 l = self._chunklength()
192 if not l:
192 if not l:
193 return {}
193 return {}
194 headerdata = readexactly(self._stream, self.deltaheadersize)
194 headerdata = readexactly(self._stream, self.deltaheadersize)
195 header = self.deltaheader.unpack(headerdata)
195 header = self.deltaheader.unpack(headerdata)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
198 return (node, p1, p2, cs, deltabase, delta, flags)
198 return (node, p1, p2, cs, deltabase, delta, flags)
199
199
200 def getchunks(self):
200 def getchunks(self):
201 """returns all the chunks contains in the bundle
201 """returns all the chunks contains in the bundle
202
202
203 Used when you need to forward the binary stream to a file or another
203 Used when you need to forward the binary stream to a file or another
204 network API. To do so, it parse the changegroup data, otherwise it will
204 network API. To do so, it parse the changegroup data, otherwise it will
205 block in case of sshrepo because it don't know the end of the stream.
205 block in case of sshrepo because it don't know the end of the stream.
206 """
206 """
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
210 # filelogs.
210 # filelogs.
211 #
211 #
212 # Changelog and manifestlog parts are terminated with empty chunks. The
212 # Changelog and manifestlog parts are terminated with empty chunks. The
213 # tree and file parts are a list of entry sections. Each entry section
213 # tree and file parts are a list of entry sections. Each entry section
214 # is a series of chunks terminating in an empty chunk. The list of these
214 # is a series of chunks terminating in an empty chunk. The list of these
215 # entry sections is terminated in yet another empty chunk, so we know
215 # entry sections is terminated in yet another empty chunk, so we know
216 # we've reached the end of the tree/file list when we reach an empty
216 # we've reached the end of the tree/file list when we reach an empty
217 # chunk that was proceeded by no non-empty chunks.
217 # chunk that was proceeded by no non-empty chunks.
218
218
219 parts = 0
219 parts = 0
220 while parts < 2 + self._grouplistcount:
220 while parts < 2 + self._grouplistcount:
221 noentries = True
221 noentries = True
222 while True:
222 while True:
223 chunk = getchunk(self)
223 chunk = getchunk(self)
224 if not chunk:
224 if not chunk:
225 # The first two empty chunks represent the end of the
225 # The first two empty chunks represent the end of the
226 # changelog and the manifestlog portions. The remaining
226 # changelog and the manifestlog portions. The remaining
227 # empty chunks represent either A) the end of individual
227 # empty chunks represent either A) the end of individual
228 # tree or file entries in the file list, or B) the end of
228 # tree or file entries in the file list, or B) the end of
229 # the entire list. It's the end of the entire list if there
229 # the entire list. It's the end of the entire list if there
230 # were no entries (i.e. noentries is True).
230 # were no entries (i.e. noentries is True).
231 if parts < 2:
231 if parts < 2:
232 parts += 1
232 parts += 1
233 elif noentries:
233 elif noentries:
234 parts += 1
234 parts += 1
235 break
235 break
236 noentries = False
236 noentries = False
237 yield chunkheader(len(chunk))
237 yield chunkheader(len(chunk))
238 pos = 0
238 pos = 0
239 while pos < len(chunk):
239 while pos < len(chunk):
240 next = pos + 2**20
240 next = pos + 2**20
241 yield chunk[pos:next]
241 yield chunk[pos:next]
242 pos = next
242 pos = next
243 yield closechunk()
243 yield closechunk()
244
244
245 def _unpackmanifests(self, repo, revmap, trp, prog):
245 def _unpackmanifests(self, repo, revmap, trp, prog):
246 self.callback = prog.increment
246 self.callback = prog.increment
247 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
250 # be empty during the pull
250 # be empty during the pull
251 self.manifestheader()
251 self.manifestheader()
252 deltas = self.deltaiter()
252 deltas = self.deltaiter()
253 repo.manifestlog.addgroup(deltas, revmap, trp)
253 repo.manifestlog.addgroup(deltas, revmap, trp)
254 prog.complete()
254 prog.complete()
255 self.callback = None
255 self.callback = None
256
256
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
258 expectedtotal=None):
258 expectedtotal=None):
259 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
261 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
262
262
263 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
264 - nothing changed or no source: 0
264 - nothing changed or no source: 0
265 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
266 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
267 - number of heads stays the same: 1
267 - number of heads stays the same: 1
268 """
268 """
269 repo = repo.unfiltered()
269 repo = repo.unfiltered()
270 def csmap(x):
270 def csmap(x):
271 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
272 return len(cl)
272 return len(cl)
273
273
274 def revmap(x):
274 def revmap(x):
275 return cl.rev(x)
275 return cl.rev(x)
276
276
277 changesets = files = revisions = 0
277 changesets = files = revisions = 0
278
278
279 try:
279 try:
280 # The transaction may already carry source information. In this
280 # The transaction may already carry source information. In this
281 # case we use the top level data. We overwrite the argument
281 # case we use the top level data. We overwrite the argument
282 # because we need to use the top level value (if they exist)
282 # because we need to use the top level value (if they exist)
283 # in this function.
283 # in this function.
284 srctype = tr.hookargs.setdefault('source', srctype)
284 srctype = tr.hookargs.setdefault('source', srctype)
285 url = tr.hookargs.setdefault('url', url)
285 url = tr.hookargs.setdefault('url', url)
286 repo.hook('prechangegroup',
286 repo.hook('prechangegroup',
287 throw=True, **pycompat.strkwargs(tr.hookargs))
287 throw=True, **pycompat.strkwargs(tr.hookargs))
288
288
289 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
290 # will not see an inconsistent view
290 # will not see an inconsistent view
291 cl = repo.changelog
291 cl = repo.changelog
292 cl.delayupdate(tr)
292 cl.delayupdate(tr)
293 oldheads = set(cl.heads())
293 oldheads = set(cl.heads())
294
294
295 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
296 # pull off the changeset group
296 # pull off the changeset group
297 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
298 clstart = len(cl)
298 clstart = len(cl)
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
300 total=expectedtotal)
300 total=expectedtotal)
301 self.callback = progress.increment
301 self.callback = progress.increment
302
302
303 efiles = set()
303 efiles = set()
304 def onchangelog(cl, node):
304 def onchangelog(cl, node):
305 efiles.update(cl.readfiles(node))
305 efiles.update(cl.readfiles(node))
306
306
307 self.changelogheader()
307 self.changelogheader()
308 deltas = self.deltaiter()
308 deltas = self.deltaiter()
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
310 efiles = len(efiles)
310 efiles = len(efiles)
311
311
312 if not cgnodes:
312 if not cgnodes:
313 repo.ui.develwarn('applied empty changegroup',
313 repo.ui.develwarn('applied empty changegroup',
314 config='warn-empty-changegroup')
314 config='warn-empty-changegroup')
315 clend = len(cl)
315 clend = len(cl)
316 changesets = clend - clstart
316 changesets = clend - clstart
317 progress.complete()
317 progress.complete()
318 self.callback = None
318 self.callback = None
319
319
320 # pull off the manifest group
320 # pull off the manifest group
321 repo.ui.status(_("adding manifests\n"))
321 repo.ui.status(_("adding manifests\n"))
322 # We know that we'll never have more manifests than we had
322 # We know that we'll never have more manifests than we had
323 # changesets.
323 # changesets.
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
325 total=changesets)
325 total=changesets)
326 self._unpackmanifests(repo, revmap, trp, progress)
326 self._unpackmanifests(repo, revmap, trp, progress)
327
327
328 needfiles = {}
328 needfiles = {}
329 if repo.ui.configbool('server', 'validate'):
329 if repo.ui.configbool('server', 'validate'):
330 cl = repo.changelog
330 cl = repo.changelog
331 ml = repo.manifestlog
331 ml = repo.manifestlog
332 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
333 for cset in pycompat.xrange(clstart, clend):
333 for cset in pycompat.xrange(clstart, clend):
334 mfnode = cl.changelogrevision(cset).manifest
334 mfnode = cl.changelogrevision(cset).manifest
335 mfest = ml[mfnode].readdelta()
335 mfest = ml[mfnode].readdelta()
336 # store file cgnodes we must see
336 # store file cgnodes we must see
337 for f, n in mfest.iteritems():
337 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
338 needfiles.setdefault(f, set()).add(n)
339
339
340 # process the files
340 # process the files
341 repo.ui.status(_("adding file changes\n"))
341 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
342 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
343 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
344 revisions += newrevs
345 files += newfiles
345 files += newfiles
346
346
347 deltaheads = 0
347 deltaheads = 0
348 if oldheads:
348 if oldheads:
349 heads = cl.heads()
349 heads = cl.heads()
350 deltaheads = len(heads) - len(oldheads)
350 deltaheads = len(heads) - len(oldheads)
351 for h in heads:
351 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
352 if h not in oldheads and repo[h].closesbranch():
353 deltaheads -= 1
353 deltaheads -= 1
354 htext = ""
354 htext = ""
355 if deltaheads:
355 if deltaheads:
356 htext = _(" (%+d heads)") % deltaheads
356 htext = _(" (%+d heads)") % deltaheads
357
357
358 repo.ui.status(_("added %d changesets"
358 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
359 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
360 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
361 repo.invalidatevolatilesets()
362
362
363 if changesets > 0:
363 if changesets > 0:
364 if 'node' not in tr.hookargs:
364 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
365 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 else:
368 else:
369 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
370 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup',
372 repo.hook('pretxnchangegroup',
373 throw=True, **pycompat.strkwargs(hookargs))
373 throw=True, **pycompat.strkwargs(hookargs))
374
374
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
376 phaseall = None
376 phaseall = None
377 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
380 # exists locally as secret
380 # exists locally as secret
381 #
381 #
382 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
383 # the bundle
383 # the bundle
384 if repo.publishing():
384 if repo.publishing():
385 targetphase = phaseall = phases.public
385 targetphase = phaseall = phases.public
386 else:
386 else:
387 # closer target phase computation
387 # closer target phase computation
388
388
389 # Those changesets have been pushed from the
389 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
390 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
391 # alongside. Therefor `targetphase` is
392 # ignored.
392 # ignored.
393 targetphase = phaseall = phases.draft
393 targetphase = phaseall = phases.draft
394 if added:
394 if added:
395 phases.registernew(repo, tr, targetphase, added)
395 phases.registernew(repo, tr, targetphase, added)
396 if phaseall is not None:
396 if phaseall is not None:
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
398
398
399 if changesets > 0:
399 if changesets > 0:
400
400
401 def runhooks():
401 def runhooks():
402 # These hooks run when the lock releases, not when the
402 # These hooks run when the lock releases, not when the
403 # transaction closes. So it's possible for the changelog
403 # transaction closes. So it's possible for the changelog
404 # to have changed since we last saw it.
404 # to have changed since we last saw it.
405 if clstart >= len(repo):
405 if clstart >= len(repo):
406 return
406 return
407
407
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
409
409
410 for n in added:
410 for n in added:
411 args = hookargs.copy()
411 args = hookargs.copy()
412 args['node'] = hex(n)
412 args['node'] = hex(n)
413 del args['node_last']
413 del args['node_last']
414 repo.hook("incoming", **pycompat.strkwargs(args))
414 repo.hook("incoming", **pycompat.strkwargs(args))
415
415
416 newheads = [h for h in repo.heads()
416 newheads = [h for h in repo.heads()
417 if h not in oldheads]
417 if h not in oldheads]
418 repo.ui.log("incoming",
418 repo.ui.log("incoming",
419 "%d incoming changes - new heads: %s\n",
419 "%d incoming changes - new heads: %s\n",
420 len(added),
420 len(added),
421 ', '.join([hex(c[:6]) for c in newheads]))
421 ', '.join([hex(c[:6]) for c in newheads]))
422
422
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
424 lambda tr: repo._afterlock(runhooks))
424 lambda tr: repo._afterlock(runhooks))
425 finally:
425 finally:
426 repo.ui.flush()
426 repo.ui.flush()
427 # never return 0 here:
427 # never return 0 here:
428 if deltaheads < 0:
428 if deltaheads < 0:
429 ret = deltaheads - 1
429 ret = deltaheads - 1
430 else:
430 else:
431 ret = deltaheads + 1
431 ret = deltaheads + 1
432 return ret
432 return ret
433
433
434 def deltaiter(self):
434 def deltaiter(self):
435 """
435 """
436 returns an iterator of the deltas in this changegroup
436 returns an iterator of the deltas in this changegroup
437
437
438 Useful for passing to the underlying storage system to be stored.
438 Useful for passing to the underlying storage system to be stored.
439 """
439 """
440 chain = None
440 chain = None
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
443 yield chunkdata
443 yield chunkdata
444 chain = chunkdata[0]
444 chain = chunkdata[0]
445
445
446 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
448
448
449 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
451 remain the same.
451 remain the same.
452 """
452 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = deltaheader.size
454 deltaheadersize = deltaheader.size
455 version = '02'
455 version = '02'
456
456
457 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
459 flags = 0
460 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
461
461
462 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
464
464
465 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
467 separating manifests and files.
468 """
468 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = deltaheader.size
470 deltaheadersize = deltaheader.size
471 version = '03'
471 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
473
473
474 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
477
477
478 def _unpackmanifests(self, repo, revmap, trp, prog):
478 def _unpackmanifests(self, repo, revmap, trp, prog):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
480 for chunkdata in iter(self.filelogheader, {}):
480 for chunkdata in iter(self.filelogheader, {}):
481 # If we get here, there are directory manifests in the changegroup
481 # If we get here, there are directory manifests in the changegroup
482 d = chunkdata["filename"]
482 d = chunkdata["filename"]
483 repo.ui.debug("adding %s revisions\n" % d)
483 repo.ui.debug("adding %s revisions\n" % d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
485 deltas = self.deltaiter()
485 deltas = self.deltaiter()
486 if not dirlog.addgroup(deltas, revmap, trp):
486 if not dirlog.addgroup(deltas, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 @attr.s(slots=True, frozen=True)
501 @attr.s(slots=True, frozen=True)
502 class revisiondelta(object):
502 class revisiondelta(object):
503 """Describes a delta entry in a changegroup.
503 """Describes a delta entry in a changegroup.
504
504
505 Captured data is sufficient to serialize the delta into multiple
505 Captured data is sufficient to serialize the delta into multiple
506 formats.
506 formats.
507 """
507 """
508 # 20 byte node of this revision.
508 # 20 byte node of this revision.
509 node = attr.ib()
509 node = attr.ib()
510 # 20 byte nodes of parent revisions.
510 # 20 byte nodes of parent revisions.
511 p1node = attr.ib()
511 p1node = attr.ib()
512 p2node = attr.ib()
512 p2node = attr.ib()
513 # 20 byte node of node this delta is against.
513 # 20 byte node of node this delta is against.
514 basenode = attr.ib()
514 basenode = attr.ib()
515 # 20 byte node of changeset revision this delta is associated with.
515 # 20 byte node of changeset revision this delta is associated with.
516 linknode = attr.ib()
516 linknode = attr.ib()
517 # 2 bytes of flags to apply to revision data.
517 # 2 bytes of flags to apply to revision data.
518 flags = attr.ib()
518 flags = attr.ib()
519 # Iterable of chunks holding raw delta data.
519 # Iterable of chunks holding raw delta data.
520 deltachunks = attr.ib()
520 deltachunks = attr.ib()
521
521
522 class cg1packer(object):
522 class cg1packer(object):
523 def __init__(self, repo, filematcher, version, allowreorder,
523 def __init__(self, repo, filematcher, version, allowreorder,
524 builddeltaheader, manifestsend, sendtreemanifests,
524 useprevdelta, builddeltaheader, manifestsend,
525 bundlecaps=None):
525 sendtreemanifests, bundlecaps=None):
526 """Given a source repo, construct a bundler.
526 """Given a source repo, construct a bundler.
527
527
528 filematcher is a matcher that matches on files to include in the
528 filematcher is a matcher that matches on files to include in the
529 changegroup. Used to facilitate sparse changegroups.
529 changegroup. Used to facilitate sparse changegroups.
530
530
531 allowreorder controls whether reordering of revisions is allowed.
531 allowreorder controls whether reordering of revisions is allowed.
532 This value is used when ``bundle.reorder`` is ``auto`` or isn't
532 This value is used when ``bundle.reorder`` is ``auto`` or isn't
533 set.
533 set.
534
534
535 useprevdelta controls whether revisions should always delta against
536 the previous revision in the changegroup.
537
535 builddeltaheader is a callable that constructs the header for a group
538 builddeltaheader is a callable that constructs the header for a group
536 delta.
539 delta.
537
540
538 manifestsend is a chunk to send after manifests have been fully emitted.
541 manifestsend is a chunk to send after manifests have been fully emitted.
539
542
540 sendtreemanifests indicates whether tree manifests should be emitted.
543 sendtreemanifests indicates whether tree manifests should be emitted.
541
544
542 bundlecaps is optional and can be used to specify the set of
545 bundlecaps is optional and can be used to specify the set of
543 capabilities which can be used to build the bundle. While bundlecaps is
546 capabilities which can be used to build the bundle. While bundlecaps is
544 unused in core Mercurial, extensions rely on this feature to communicate
547 unused in core Mercurial, extensions rely on this feature to communicate
545 capabilities to customize the changegroup packer.
548 capabilities to customize the changegroup packer.
546 """
549 """
547 assert filematcher
550 assert filematcher
548 self._filematcher = filematcher
551 self._filematcher = filematcher
549
552
550 self.version = version
553 self.version = version
554 self._useprevdelta = useprevdelta
551 self._builddeltaheader = builddeltaheader
555 self._builddeltaheader = builddeltaheader
552 self._manifestsend = manifestsend
556 self._manifestsend = manifestsend
553 self._sendtreemanifests = sendtreemanifests
557 self._sendtreemanifests = sendtreemanifests
554
558
555 # Set of capabilities we can use to build the bundle.
559 # Set of capabilities we can use to build the bundle.
556 if bundlecaps is None:
560 if bundlecaps is None:
557 bundlecaps = set()
561 bundlecaps = set()
558 self._bundlecaps = bundlecaps
562 self._bundlecaps = bundlecaps
559
563
560 # experimental config: bundle.reorder
564 # experimental config: bundle.reorder
561 reorder = repo.ui.config('bundle', 'reorder')
565 reorder = repo.ui.config('bundle', 'reorder')
562 if reorder == 'auto':
566 if reorder == 'auto':
563 self._reorder = allowreorder
567 self._reorder = allowreorder
564 else:
568 else:
565 self._reorder = stringutil.parsebool(reorder)
569 self._reorder = stringutil.parsebool(reorder)
566
570
567 self._repo = repo
571 self._repo = repo
568
572
569 if self._repo.ui.verbose and not self._repo.ui.debugflag:
573 if self._repo.ui.verbose and not self._repo.ui.debugflag:
570 self._verbosenote = self._repo.ui.note
574 self._verbosenote = self._repo.ui.note
571 else:
575 else:
572 self._verbosenote = lambda s: None
576 self._verbosenote = lambda s: None
573
577
574 def close(self):
578 def close(self):
575 # Ellipses serving mode.
579 # Ellipses serving mode.
576 getattr(self, 'clrev_to_localrev', {}).clear()
580 getattr(self, 'clrev_to_localrev', {}).clear()
577 if getattr(self, 'next_clrev_to_localrev', {}):
581 if getattr(self, 'next_clrev_to_localrev', {}):
578 self.clrev_to_localrev = self.next_clrev_to_localrev
582 self.clrev_to_localrev = self.next_clrev_to_localrev
579 del self.next_clrev_to_localrev
583 del self.next_clrev_to_localrev
580 self.changelog_done = True
584 self.changelog_done = True
581
585
582 return closechunk()
586 return closechunk()
583
587
584 def fileheader(self, fname):
588 def fileheader(self, fname):
585 return chunkheader(len(fname)) + fname
589 return chunkheader(len(fname)) + fname
586
590
587 # Extracted both for clarity and for overriding in extensions.
591 # Extracted both for clarity and for overriding in extensions.
588 def _sortgroup(self, store, nodelist, lookup):
592 def _sortgroup(self, store, nodelist, lookup):
589 """Sort nodes for change group and turn them into revnums."""
593 """Sort nodes for change group and turn them into revnums."""
590 # Ellipses serving mode.
594 # Ellipses serving mode.
591 #
595 #
592 # In a perfect world, we'd generate better ellipsis-ified graphs
596 # In a perfect world, we'd generate better ellipsis-ified graphs
593 # for non-changelog revlogs. In practice, we haven't started doing
597 # for non-changelog revlogs. In practice, we haven't started doing
594 # that yet, so the resulting DAGs for the manifestlog and filelogs
598 # that yet, so the resulting DAGs for the manifestlog and filelogs
595 # are actually full of bogus parentage on all the ellipsis
599 # are actually full of bogus parentage on all the ellipsis
596 # nodes. This has the side effect that, while the contents are
600 # nodes. This has the side effect that, while the contents are
597 # correct, the individual DAGs might be completely out of whack in
601 # correct, the individual DAGs might be completely out of whack in
598 # a case like 882681bc3166 and its ancestors (back about 10
602 # a case like 882681bc3166 and its ancestors (back about 10
599 # revisions or so) in the main hg repo.
603 # revisions or so) in the main hg repo.
600 #
604 #
601 # The one invariant we *know* holds is that the new (potentially
605 # The one invariant we *know* holds is that the new (potentially
602 # bogus) DAG shape will be valid if we order the nodes in the
606 # bogus) DAG shape will be valid if we order the nodes in the
603 # order that they're introduced in dramatis personae by the
607 # order that they're introduced in dramatis personae by the
604 # changelog, so what we do is we sort the non-changelog histories
608 # changelog, so what we do is we sort the non-changelog histories
605 # by the order in which they are used by the changelog.
609 # by the order in which they are used by the changelog.
606 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
610 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
607 key = lambda n: self.clnode_to_rev[lookup(n)]
611 key = lambda n: self.clnode_to_rev[lookup(n)]
608 return [store.rev(n) for n in sorted(nodelist, key=key)]
612 return [store.rev(n) for n in sorted(nodelist, key=key)]
609
613
610 # for generaldelta revlogs, we linearize the revs; this will both be
614 # for generaldelta revlogs, we linearize the revs; this will both be
611 # much quicker and generate a much smaller bundle
615 # much quicker and generate a much smaller bundle
612 if (store._generaldelta and self._reorder is None) or self._reorder:
616 if (store._generaldelta and self._reorder is None) or self._reorder:
613 dag = dagutil.revlogdag(store)
617 dag = dagutil.revlogdag(store)
614 return dag.linearize(set(store.rev(n) for n in nodelist))
618 return dag.linearize(set(store.rev(n) for n in nodelist))
615 else:
619 else:
616 return sorted([store.rev(n) for n in nodelist])
620 return sorted([store.rev(n) for n in nodelist])
617
621
618 def group(self, nodelist, store, lookup, units=None):
622 def group(self, nodelist, store, lookup, units=None):
619 """Calculate a delta group, yielding a sequence of changegroup chunks
623 """Calculate a delta group, yielding a sequence of changegroup chunks
620 (strings).
624 (strings).
621
625
622 Given a list of changeset revs, return a set of deltas and
626 Given a list of changeset revs, return a set of deltas and
623 metadata corresponding to nodes. The first delta is
627 metadata corresponding to nodes. The first delta is
624 first parent(nodelist[0]) -> nodelist[0], the receiver is
628 first parent(nodelist[0]) -> nodelist[0], the receiver is
625 guaranteed to have this parent as it has all history before
629 guaranteed to have this parent as it has all history before
626 these changesets. In the case firstparent is nullrev the
630 these changesets. In the case firstparent is nullrev the
627 changegroup starts with a full revision.
631 changegroup starts with a full revision.
628
632
629 If units is not None, progress detail will be generated, units specifies
633 If units is not None, progress detail will be generated, units specifies
630 the type of revlog that is touched (changelog, manifest, etc.).
634 the type of revlog that is touched (changelog, manifest, etc.).
631 """
635 """
632 # if we don't have any revisions touched by these changesets, bail
636 # if we don't have any revisions touched by these changesets, bail
633 if len(nodelist) == 0:
637 if len(nodelist) == 0:
634 yield self.close()
638 yield self.close()
635 return
639 return
636
640
637 revs = self._sortgroup(store, nodelist, lookup)
641 revs = self._sortgroup(store, nodelist, lookup)
638
642
639 # add the parent of the first rev
643 # add the parent of the first rev
640 p = store.parentrevs(revs[0])[0]
644 p = store.parentrevs(revs[0])[0]
641 revs.insert(0, p)
645 revs.insert(0, p)
642
646
643 # build deltas
647 # build deltas
644 progress = None
648 progress = None
645 if units is not None:
649 if units is not None:
646 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
650 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
647 total=(len(revs) - 1))
651 total=(len(revs) - 1))
648 for r in pycompat.xrange(len(revs) - 1):
652 for r in pycompat.xrange(len(revs) - 1):
649 if progress:
653 if progress:
650 progress.update(r + 1)
654 progress.update(r + 1)
651 prev, curr = revs[r], revs[r + 1]
655 prev, curr = revs[r], revs[r + 1]
652 linknode = lookup(store.node(curr))
656 linknode = lookup(store.node(curr))
653 for c in self.revchunk(store, curr, prev, linknode):
657 for c in self.revchunk(store, curr, prev, linknode):
654 yield c
658 yield c
655
659
656 if progress:
660 if progress:
657 progress.complete()
661 progress.complete()
658 yield self.close()
662 yield self.close()
659
663
660 # filter any nodes that claim to be part of the known set
664 # filter any nodes that claim to be part of the known set
661 def prune(self, store, missing, commonrevs):
665 def prune(self, store, missing, commonrevs):
662 # TODO this violates storage abstraction for manifests.
666 # TODO this violates storage abstraction for manifests.
663 if isinstance(store, manifest.manifestrevlog):
667 if isinstance(store, manifest.manifestrevlog):
664 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
668 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
665 return []
669 return []
666
670
667 rr, rl = store.rev, store.linkrev
671 rr, rl = store.rev, store.linkrev
668 return [n for n in missing if rl(rr(n)) not in commonrevs]
672 return [n for n in missing if rl(rr(n)) not in commonrevs]
669
673
670 def _packmanifests(self, dir, mfnodes, lookuplinknode):
674 def _packmanifests(self, dir, mfnodes, lookuplinknode):
671 """Pack flat manifests into a changegroup stream."""
675 """Pack flat manifests into a changegroup stream."""
672 assert not dir
676 assert not dir
673 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
677 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
674 lookuplinknode, units=_('manifests')):
678 lookuplinknode, units=_('manifests')):
675 yield chunk
679 yield chunk
676
680
677 def _packtreemanifests(self, dir, mfnodes, lookuplinknode):
681 def _packtreemanifests(self, dir, mfnodes, lookuplinknode):
678 """Version of _packmanifests that operates on directory manifests.
682 """Version of _packmanifests that operates on directory manifests.
679
683
680 Encodes the directory name in the output so multiple manifests
684 Encodes the directory name in the output so multiple manifests
681 can be sent.
685 can be sent.
682 """
686 """
683 assert self.version == b'03'
687 assert self.version == b'03'
684
688
685 if dir:
689 if dir:
686 yield self.fileheader(dir)
690 yield self.fileheader(dir)
687
691
688 # TODO violates storage abstractions by assuming revlogs.
692 # TODO violates storage abstractions by assuming revlogs.
689 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
693 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
690 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
694 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
691 units=_('manifests')):
695 units=_('manifests')):
692 yield chunk
696 yield chunk
693
697
694 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
698 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
695 '''yield a sequence of changegroup chunks (strings)'''
699 '''yield a sequence of changegroup chunks (strings)'''
696 repo = self._repo
700 repo = self._repo
697 cl = repo.changelog
701 cl = repo.changelog
698
702
699 clrevorder = {}
703 clrevorder = {}
700 mfs = {} # needed manifests
704 mfs = {} # needed manifests
701 fnodes = {} # needed file nodes
705 fnodes = {} # needed file nodes
702 mfl = repo.manifestlog
706 mfl = repo.manifestlog
703 # TODO violates storage abstraction.
707 # TODO violates storage abstraction.
704 mfrevlog = mfl._revlog
708 mfrevlog = mfl._revlog
705 changedfiles = set()
709 changedfiles = set()
706
710
707 ellipsesmode = util.safehasattr(self, 'full_nodes')
711 ellipsesmode = util.safehasattr(self, 'full_nodes')
708
712
709 # Callback for the changelog, used to collect changed files and
713 # Callback for the changelog, used to collect changed files and
710 # manifest nodes.
714 # manifest nodes.
711 # Returns the linkrev node (identity in the changelog case).
715 # Returns the linkrev node (identity in the changelog case).
712 def lookupcl(x):
716 def lookupcl(x):
713 c = cl.read(x)
717 c = cl.read(x)
714 clrevorder[x] = len(clrevorder)
718 clrevorder[x] = len(clrevorder)
715
719
716 if ellipsesmode:
720 if ellipsesmode:
717 # Only update mfs if x is going to be sent. Otherwise we
721 # Only update mfs if x is going to be sent. Otherwise we
718 # end up with bogus linkrevs specified for manifests and
722 # end up with bogus linkrevs specified for manifests and
719 # we skip some manifest nodes that we should otherwise
723 # we skip some manifest nodes that we should otherwise
720 # have sent.
724 # have sent.
721 if (x in self.full_nodes
725 if (x in self.full_nodes
722 or cl.rev(x) in self.precomputed_ellipsis):
726 or cl.rev(x) in self.precomputed_ellipsis):
723 n = c[0]
727 n = c[0]
724 # Record the first changeset introducing this manifest
728 # Record the first changeset introducing this manifest
725 # version.
729 # version.
726 mfs.setdefault(n, x)
730 mfs.setdefault(n, x)
727 # Set this narrow-specific dict so we have the lowest
731 # Set this narrow-specific dict so we have the lowest
728 # manifest revnum to look up for this cl revnum. (Part of
732 # manifest revnum to look up for this cl revnum. (Part of
729 # mapping changelog ellipsis parents to manifest ellipsis
733 # mapping changelog ellipsis parents to manifest ellipsis
730 # parents)
734 # parents)
731 self.next_clrev_to_localrev.setdefault(cl.rev(x),
735 self.next_clrev_to_localrev.setdefault(cl.rev(x),
732 mfrevlog.rev(n))
736 mfrevlog.rev(n))
733 # We can't trust the changed files list in the changeset if the
737 # We can't trust the changed files list in the changeset if the
734 # client requested a shallow clone.
738 # client requested a shallow clone.
735 if self.is_shallow:
739 if self.is_shallow:
736 changedfiles.update(mfl[c[0]].read().keys())
740 changedfiles.update(mfl[c[0]].read().keys())
737 else:
741 else:
738 changedfiles.update(c[3])
742 changedfiles.update(c[3])
739 else:
743 else:
740
744
741 n = c[0]
745 n = c[0]
742 # record the first changeset introducing this manifest version
746 # record the first changeset introducing this manifest version
743 mfs.setdefault(n, x)
747 mfs.setdefault(n, x)
744 # Record a complete list of potentially-changed files in
748 # Record a complete list of potentially-changed files in
745 # this manifest.
749 # this manifest.
746 changedfiles.update(c[3])
750 changedfiles.update(c[3])
747
751
748 return x
752 return x
749
753
750 self._verbosenote(_('uncompressed size of bundle content:\n'))
754 self._verbosenote(_('uncompressed size of bundle content:\n'))
751 size = 0
755 size = 0
752 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
756 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
753 size += len(chunk)
757 size += len(chunk)
754 yield chunk
758 yield chunk
755 self._verbosenote(_('%8.i (changelog)\n') % size)
759 self._verbosenote(_('%8.i (changelog)\n') % size)
756
760
757 # We need to make sure that the linkrev in the changegroup refers to
761 # We need to make sure that the linkrev in the changegroup refers to
758 # the first changeset that introduced the manifest or file revision.
762 # the first changeset that introduced the manifest or file revision.
759 # The fastpath is usually safer than the slowpath, because the filelogs
763 # The fastpath is usually safer than the slowpath, because the filelogs
760 # are walked in revlog order.
764 # are walked in revlog order.
761 #
765 #
762 # When taking the slowpath with reorder=None and the manifest revlog
766 # When taking the slowpath with reorder=None and the manifest revlog
763 # uses generaldelta, the manifest may be walked in the "wrong" order.
767 # uses generaldelta, the manifest may be walked in the "wrong" order.
764 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
768 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
765 # cc0ff93d0c0c).
769 # cc0ff93d0c0c).
766 #
770 #
767 # When taking the fastpath, we are only vulnerable to reordering
771 # When taking the fastpath, we are only vulnerable to reordering
768 # of the changelog itself. The changelog never uses generaldelta, so
772 # of the changelog itself. The changelog never uses generaldelta, so
769 # it is only reordered when reorder=True. To handle this case, we
773 # it is only reordered when reorder=True. To handle this case, we
770 # simply take the slowpath, which already has the 'clrevorder' logic.
774 # simply take the slowpath, which already has the 'clrevorder' logic.
771 # This was also fixed in cc0ff93d0c0c.
775 # This was also fixed in cc0ff93d0c0c.
772 fastpathlinkrev = fastpathlinkrev and not self._reorder
776 fastpathlinkrev = fastpathlinkrev and not self._reorder
773 # Treemanifests don't work correctly with fastpathlinkrev
777 # Treemanifests don't work correctly with fastpathlinkrev
774 # either, because we don't discover which directory nodes to
778 # either, because we don't discover which directory nodes to
775 # send along with files. This could probably be fixed.
779 # send along with files. This could probably be fixed.
776 fastpathlinkrev = fastpathlinkrev and (
780 fastpathlinkrev = fastpathlinkrev and (
777 'treemanifest' not in repo.requirements)
781 'treemanifest' not in repo.requirements)
778
782
779 for chunk in self.generatemanifests(commonrevs, clrevorder,
783 for chunk in self.generatemanifests(commonrevs, clrevorder,
780 fastpathlinkrev, mfs, fnodes, source):
784 fastpathlinkrev, mfs, fnodes, source):
781 yield chunk
785 yield chunk
782
786
783 if ellipsesmode:
787 if ellipsesmode:
784 mfdicts = None
788 mfdicts = None
785 if self.is_shallow:
789 if self.is_shallow:
786 mfdicts = [(self._repo.manifestlog[n].read(), lr)
790 mfdicts = [(self._repo.manifestlog[n].read(), lr)
787 for (n, lr) in mfs.iteritems()]
791 for (n, lr) in mfs.iteritems()]
788
792
789 mfs.clear()
793 mfs.clear()
790 clrevs = set(cl.rev(x) for x in clnodes)
794 clrevs = set(cl.rev(x) for x in clnodes)
791
795
792 if not fastpathlinkrev:
796 if not fastpathlinkrev:
793 def linknodes(unused, fname):
797 def linknodes(unused, fname):
794 return fnodes.get(fname, {})
798 return fnodes.get(fname, {})
795 else:
799 else:
796 cln = cl.node
800 cln = cl.node
797 def linknodes(filerevlog, fname):
801 def linknodes(filerevlog, fname):
798 llr = filerevlog.linkrev
802 llr = filerevlog.linkrev
799 fln = filerevlog.node
803 fln = filerevlog.node
800 revs = ((r, llr(r)) for r in filerevlog)
804 revs = ((r, llr(r)) for r in filerevlog)
801 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
805 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
802
806
803 if ellipsesmode:
807 if ellipsesmode:
804 # We need to pass the mfdicts variable down into
808 # We need to pass the mfdicts variable down into
805 # generatefiles(), but more than one command might have
809 # generatefiles(), but more than one command might have
806 # wrapped generatefiles so we can't modify the function
810 # wrapped generatefiles so we can't modify the function
807 # signature. Instead, we pass the data to ourselves using an
811 # signature. Instead, we pass the data to ourselves using an
808 # instance attribute. I'm sorry.
812 # instance attribute. I'm sorry.
809 self._mfdicts = mfdicts
813 self._mfdicts = mfdicts
810
814
811 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
815 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
812 source):
816 source):
813 yield chunk
817 yield chunk
814
818
815 yield self.close()
819 yield self.close()
816
820
817 if clnodes:
821 if clnodes:
818 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
822 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
819
823
820 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
824 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
821 fnodes, source):
825 fnodes, source):
822 """Returns an iterator of changegroup chunks containing manifests.
826 """Returns an iterator of changegroup chunks containing manifests.
823
827
824 `source` is unused here, but is used by extensions like remotefilelog to
828 `source` is unused here, but is used by extensions like remotefilelog to
825 change what is sent based in pulls vs pushes, etc.
829 change what is sent based in pulls vs pushes, etc.
826 """
830 """
827 repo = self._repo
831 repo = self._repo
828 mfl = repo.manifestlog
832 mfl = repo.manifestlog
829 dirlog = mfl._revlog.dirlog
833 dirlog = mfl._revlog.dirlog
830 tmfnodes = {'': mfs}
834 tmfnodes = {'': mfs}
831
835
832 # Callback for the manifest, used to collect linkrevs for filelog
836 # Callback for the manifest, used to collect linkrevs for filelog
833 # revisions.
837 # revisions.
834 # Returns the linkrev node (collected in lookupcl).
838 # Returns the linkrev node (collected in lookupcl).
835 def makelookupmflinknode(dir, nodes):
839 def makelookupmflinknode(dir, nodes):
836 if fastpathlinkrev:
840 if fastpathlinkrev:
837 assert not dir
841 assert not dir
838 return mfs.__getitem__
842 return mfs.__getitem__
839
843
840 def lookupmflinknode(x):
844 def lookupmflinknode(x):
841 """Callback for looking up the linknode for manifests.
845 """Callback for looking up the linknode for manifests.
842
846
843 Returns the linkrev node for the specified manifest.
847 Returns the linkrev node for the specified manifest.
844
848
845 SIDE EFFECT:
849 SIDE EFFECT:
846
850
847 1) fclnodes gets populated with the list of relevant
851 1) fclnodes gets populated with the list of relevant
848 file nodes if we're not using fastpathlinkrev
852 file nodes if we're not using fastpathlinkrev
849 2) When treemanifests are in use, collects treemanifest nodes
853 2) When treemanifests are in use, collects treemanifest nodes
850 to send
854 to send
851
855
852 Note that this means manifests must be completely sent to
856 Note that this means manifests must be completely sent to
853 the client before you can trust the list of files and
857 the client before you can trust the list of files and
854 treemanifests to send.
858 treemanifests to send.
855 """
859 """
856 clnode = nodes[x]
860 clnode = nodes[x]
857 mdata = mfl.get(dir, x).readfast(shallow=True)
861 mdata = mfl.get(dir, x).readfast(shallow=True)
858 for p, n, fl in mdata.iterentries():
862 for p, n, fl in mdata.iterentries():
859 if fl == 't': # subdirectory manifest
863 if fl == 't': # subdirectory manifest
860 subdir = dir + p + '/'
864 subdir = dir + p + '/'
861 tmfclnodes = tmfnodes.setdefault(subdir, {})
865 tmfclnodes = tmfnodes.setdefault(subdir, {})
862 tmfclnode = tmfclnodes.setdefault(n, clnode)
866 tmfclnode = tmfclnodes.setdefault(n, clnode)
863 if clrevorder[clnode] < clrevorder[tmfclnode]:
867 if clrevorder[clnode] < clrevorder[tmfclnode]:
864 tmfclnodes[n] = clnode
868 tmfclnodes[n] = clnode
865 else:
869 else:
866 f = dir + p
870 f = dir + p
867 fclnodes = fnodes.setdefault(f, {})
871 fclnodes = fnodes.setdefault(f, {})
868 fclnode = fclnodes.setdefault(n, clnode)
872 fclnode = fclnodes.setdefault(n, clnode)
869 if clrevorder[clnode] < clrevorder[fclnode]:
873 if clrevorder[clnode] < clrevorder[fclnode]:
870 fclnodes[n] = clnode
874 fclnodes[n] = clnode
871 return clnode
875 return clnode
872 return lookupmflinknode
876 return lookupmflinknode
873
877
874 fn = (self._packtreemanifests if self._sendtreemanifests
878 fn = (self._packtreemanifests if self._sendtreemanifests
875 else self._packmanifests)
879 else self._packmanifests)
876 size = 0
880 size = 0
877 while tmfnodes:
881 while tmfnodes:
878 dir, nodes = tmfnodes.popitem()
882 dir, nodes = tmfnodes.popitem()
879 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
883 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
880 if not dir or prunednodes:
884 if not dir or prunednodes:
881 for x in fn(dir, prunednodes, makelookupmflinknode(dir, nodes)):
885 for x in fn(dir, prunednodes, makelookupmflinknode(dir, nodes)):
882 size += len(x)
886 size += len(x)
883 yield x
887 yield x
884 self._verbosenote(_('%8.i (manifests)\n') % size)
888 self._verbosenote(_('%8.i (manifests)\n') % size)
885 yield self._manifestsend
889 yield self._manifestsend
886
890
887 # The 'source' parameter is useful for extensions
891 # The 'source' parameter is useful for extensions
888 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
892 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
889 changedfiles = list(filter(self._filematcher, changedfiles))
893 changedfiles = list(filter(self._filematcher, changedfiles))
890
894
891 if getattr(self, 'is_shallow', False):
895 if getattr(self, 'is_shallow', False):
892 # See comment in generate() for why this sadness is a thing.
896 # See comment in generate() for why this sadness is a thing.
893 mfdicts = self._mfdicts
897 mfdicts = self._mfdicts
894 del self._mfdicts
898 del self._mfdicts
895 # In a shallow clone, the linknodes callback needs to also include
899 # In a shallow clone, the linknodes callback needs to also include
896 # those file nodes that are in the manifests we sent but weren't
900 # those file nodes that are in the manifests we sent but weren't
897 # introduced by those manifests.
901 # introduced by those manifests.
898 commonctxs = [self._repo[c] for c in commonrevs]
902 commonctxs = [self._repo[c] for c in commonrevs]
899 oldlinknodes = linknodes
903 oldlinknodes = linknodes
900 clrev = self._repo.changelog.rev
904 clrev = self._repo.changelog.rev
901
905
902 # Defining this function has a side-effect of overriding the
906 # Defining this function has a side-effect of overriding the
903 # function of the same name that was passed in as an argument.
907 # function of the same name that was passed in as an argument.
904 # TODO have caller pass in appropriate function.
908 # TODO have caller pass in appropriate function.
905 def linknodes(flog, fname):
909 def linknodes(flog, fname):
906 for c in commonctxs:
910 for c in commonctxs:
907 try:
911 try:
908 fnode = c.filenode(fname)
912 fnode = c.filenode(fname)
909 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
913 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
910 except error.ManifestLookupError:
914 except error.ManifestLookupError:
911 pass
915 pass
912 links = oldlinknodes(flog, fname)
916 links = oldlinknodes(flog, fname)
913 if len(links) != len(mfdicts):
917 if len(links) != len(mfdicts):
914 for mf, lr in mfdicts:
918 for mf, lr in mfdicts:
915 fnode = mf.get(fname, None)
919 fnode = mf.get(fname, None)
916 if fnode in links:
920 if fnode in links:
917 links[fnode] = min(links[fnode], lr, key=clrev)
921 links[fnode] = min(links[fnode], lr, key=clrev)
918 elif fnode:
922 elif fnode:
919 links[fnode] = lr
923 links[fnode] = lr
920 return links
924 return links
921
925
922 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
926 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
923
927
924 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
928 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
925 repo = self._repo
929 repo = self._repo
926 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
930 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
927 total=len(changedfiles))
931 total=len(changedfiles))
928 for i, fname in enumerate(sorted(changedfiles)):
932 for i, fname in enumerate(sorted(changedfiles)):
929 filerevlog = repo.file(fname)
933 filerevlog = repo.file(fname)
930 if not filerevlog:
934 if not filerevlog:
931 raise error.Abort(_("empty or missing file data for %s") %
935 raise error.Abort(_("empty or missing file data for %s") %
932 fname)
936 fname)
933
937
934 linkrevnodes = linknodes(filerevlog, fname)
938 linkrevnodes = linknodes(filerevlog, fname)
935 # Lookup for filenodes, we collected the linkrev nodes above in the
939 # Lookup for filenodes, we collected the linkrev nodes above in the
936 # fastpath case and with lookupmf in the slowpath case.
940 # fastpath case and with lookupmf in the slowpath case.
937 def lookupfilelog(x):
941 def lookupfilelog(x):
938 return linkrevnodes[x]
942 return linkrevnodes[x]
939
943
940 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
944 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
941 if filenodes:
945 if filenodes:
942 progress.update(i + 1, item=fname)
946 progress.update(i + 1, item=fname)
943 h = self.fileheader(fname)
947 h = self.fileheader(fname)
944 size = len(h)
948 size = len(h)
945 yield h
949 yield h
946 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
950 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
947 size += len(chunk)
951 size += len(chunk)
948 yield chunk
952 yield chunk
949 self._verbosenote(_('%8.i %s\n') % (size, fname))
953 self._verbosenote(_('%8.i %s\n') % (size, fname))
950 progress.complete()
954 progress.complete()
951
955
952 def deltaparent(self, store, rev, p1, p2, prev):
956 def deltaparent(self, store, rev, p1, p2, prev):
957 if self._useprevdelta:
953 if not store.candelta(prev, rev):
958 if not store.candelta(prev, rev):
954 raise error.ProgrammingError('cg1 should not be used in this case')
959 raise error.ProgrammingError(
960 'cg1 should not be used in this case')
955 return prev
961 return prev
956
962
963 # Narrow ellipses mode.
964 if util.safehasattr(self, 'full_nodes'):
965 # TODO: send better deltas when in narrow mode.
966 #
967 # changegroup.group() loops over revisions to send,
968 # including revisions we'll skip. What this means is that
969 # `prev` will be a potentially useless delta base for all
970 # ellipsis nodes, as the client likely won't have it. In
971 # the future we should do bookkeeping about which nodes
972 # have been sent to the client, and try to be
973 # significantly smarter about delta bases. This is
974 # slightly tricky because this same code has to work for
975 # all revlogs, and we don't have the linkrev/linknode here.
976 return p1
977
978 dp = store.deltaparent(rev)
979 if dp == nullrev and store.storedeltachains:
980 # Avoid sending full revisions when delta parent is null. Pick prev
981 # in that case. It's tempting to pick p1 in this case, as p1 will
982 # be smaller in the common case. However, computing a delta against
983 # p1 may require resolving the raw text of p1, which could be
984 # expensive. The revlog caches should have prev cached, meaning
985 # less CPU for changegroup generation. There is likely room to add
986 # a flag and/or config option to control this behavior.
987 base = prev
988 elif dp == nullrev:
989 # revlog is configured to use full snapshot for a reason,
990 # stick to full snapshot.
991 base = nullrev
992 elif dp not in (p1, p2, prev):
993 # Pick prev when we can't be sure remote has the base revision.
994 return prev
995 else:
996 base = dp
997
998 if base != nullrev and not store.candelta(base, rev):
999 base = nullrev
1000
1001 return base
1002
957 def revchunk(self, store, rev, prev, linknode):
1003 def revchunk(self, store, rev, prev, linknode):
958 if util.safehasattr(self, 'full_nodes'):
1004 if util.safehasattr(self, 'full_nodes'):
959 fn = self._revisiondeltanarrow
1005 fn = self._revisiondeltanarrow
960 else:
1006 else:
961 fn = self._revisiondeltanormal
1007 fn = self._revisiondeltanormal
962
1008
963 delta = fn(store, rev, prev, linknode)
1009 delta = fn(store, rev, prev, linknode)
964 if not delta:
1010 if not delta:
965 return
1011 return
966
1012
967 meta = self._builddeltaheader(delta)
1013 meta = self._builddeltaheader(delta)
968 l = len(meta) + sum(len(x) for x in delta.deltachunks)
1014 l = len(meta) + sum(len(x) for x in delta.deltachunks)
969
1015
970 yield chunkheader(l)
1016 yield chunkheader(l)
971 yield meta
1017 yield meta
972 for x in delta.deltachunks:
1018 for x in delta.deltachunks:
973 yield x
1019 yield x
974
1020
975 def _revisiondeltanormal(self, store, rev, prev, linknode):
1021 def _revisiondeltanormal(self, store, rev, prev, linknode):
976 node = store.node(rev)
1022 node = store.node(rev)
977 p1, p2 = store.parentrevs(rev)
1023 p1, p2 = store.parentrevs(rev)
978 base = self.deltaparent(store, rev, p1, p2, prev)
1024 base = self.deltaparent(store, rev, p1, p2, prev)
979
1025
980 prefix = ''
1026 prefix = ''
981 if store.iscensored(base) or store.iscensored(rev):
1027 if store.iscensored(base) or store.iscensored(rev):
982 try:
1028 try:
983 delta = store.revision(node, raw=True)
1029 delta = store.revision(node, raw=True)
984 except error.CensoredNodeError as e:
1030 except error.CensoredNodeError as e:
985 delta = e.tombstone
1031 delta = e.tombstone
986 if base == nullrev:
1032 if base == nullrev:
987 prefix = mdiff.trivialdiffheader(len(delta))
1033 prefix = mdiff.trivialdiffheader(len(delta))
988 else:
1034 else:
989 baselen = store.rawsize(base)
1035 baselen = store.rawsize(base)
990 prefix = mdiff.replacediffheader(baselen, len(delta))
1036 prefix = mdiff.replacediffheader(baselen, len(delta))
991 elif base == nullrev:
1037 elif base == nullrev:
992 delta = store.revision(node, raw=True)
1038 delta = store.revision(node, raw=True)
993 prefix = mdiff.trivialdiffheader(len(delta))
1039 prefix = mdiff.trivialdiffheader(len(delta))
994 else:
1040 else:
995 delta = store.revdiff(base, rev)
1041 delta = store.revdiff(base, rev)
996 p1n, p2n = store.parents(node)
1042 p1n, p2n = store.parents(node)
997
1043
998 return revisiondelta(
1044 return revisiondelta(
999 node=node,
1045 node=node,
1000 p1node=p1n,
1046 p1node=p1n,
1001 p2node=p2n,
1047 p2node=p2n,
1002 basenode=store.node(base),
1048 basenode=store.node(base),
1003 linknode=linknode,
1049 linknode=linknode,
1004 flags=store.flags(rev),
1050 flags=store.flags(rev),
1005 deltachunks=(prefix, delta),
1051 deltachunks=(prefix, delta),
1006 )
1052 )
1007
1053
1008 def _revisiondeltanarrow(self, store, rev, prev, linknode):
1054 def _revisiondeltanarrow(self, store, rev, prev, linknode):
1009 # build up some mapping information that's useful later. See
1055 # build up some mapping information that's useful later. See
1010 # the local() nested function below.
1056 # the local() nested function below.
1011 if not self.changelog_done:
1057 if not self.changelog_done:
1012 self.clnode_to_rev[linknode] = rev
1058 self.clnode_to_rev[linknode] = rev
1013 linkrev = rev
1059 linkrev = rev
1014 self.clrev_to_localrev[linkrev] = rev
1060 self.clrev_to_localrev[linkrev] = rev
1015 else:
1061 else:
1016 linkrev = self.clnode_to_rev[linknode]
1062 linkrev = self.clnode_to_rev[linknode]
1017 self.clrev_to_localrev[linkrev] = rev
1063 self.clrev_to_localrev[linkrev] = rev
1018
1064
1019 # This is a node to send in full, because the changeset it
1065 # This is a node to send in full, because the changeset it
1020 # corresponds to was a full changeset.
1066 # corresponds to was a full changeset.
1021 if linknode in self.full_nodes:
1067 if linknode in self.full_nodes:
1022 return self._revisiondeltanormal(store, rev, prev, linknode)
1068 return self._revisiondeltanormal(store, rev, prev, linknode)
1023
1069
1024 # At this point, a node can either be one we should skip or an
1070 # At this point, a node can either be one we should skip or an
1025 # ellipsis. If it's not an ellipsis, bail immediately.
1071 # ellipsis. If it's not an ellipsis, bail immediately.
1026 if linkrev not in self.precomputed_ellipsis:
1072 if linkrev not in self.precomputed_ellipsis:
1027 return
1073 return
1028
1074
1029 linkparents = self.precomputed_ellipsis[linkrev]
1075 linkparents = self.precomputed_ellipsis[linkrev]
1030 def local(clrev):
1076 def local(clrev):
1031 """Turn a changelog revnum into a local revnum.
1077 """Turn a changelog revnum into a local revnum.
1032
1078
1033 The ellipsis dag is stored as revnums on the changelog,
1079 The ellipsis dag is stored as revnums on the changelog,
1034 but when we're producing ellipsis entries for
1080 but when we're producing ellipsis entries for
1035 non-changelog revlogs, we need to turn those numbers into
1081 non-changelog revlogs, we need to turn those numbers into
1036 something local. This does that for us, and during the
1082 something local. This does that for us, and during the
1037 changelog sending phase will also expand the stored
1083 changelog sending phase will also expand the stored
1038 mappings as needed.
1084 mappings as needed.
1039 """
1085 """
1040 if clrev == nullrev:
1086 if clrev == nullrev:
1041 return nullrev
1087 return nullrev
1042
1088
1043 if not self.changelog_done:
1089 if not self.changelog_done:
1044 # If we're doing the changelog, it's possible that we
1090 # If we're doing the changelog, it's possible that we
1045 # have a parent that is already on the client, and we
1091 # have a parent that is already on the client, and we
1046 # need to store some extra mapping information so that
1092 # need to store some extra mapping information so that
1047 # our contained ellipsis nodes will be able to resolve
1093 # our contained ellipsis nodes will be able to resolve
1048 # their parents.
1094 # their parents.
1049 if clrev not in self.clrev_to_localrev:
1095 if clrev not in self.clrev_to_localrev:
1050 clnode = store.node(clrev)
1096 clnode = store.node(clrev)
1051 self.clnode_to_rev[clnode] = clrev
1097 self.clnode_to_rev[clnode] = clrev
1052 return clrev
1098 return clrev
1053
1099
1054 # Walk the ellipsis-ized changelog breadth-first looking for a
1100 # Walk the ellipsis-ized changelog breadth-first looking for a
1055 # change that has been linked from the current revlog.
1101 # change that has been linked from the current revlog.
1056 #
1102 #
1057 # For a flat manifest revlog only a single step should be necessary
1103 # For a flat manifest revlog only a single step should be necessary
1058 # as all relevant changelog entries are relevant to the flat
1104 # as all relevant changelog entries are relevant to the flat
1059 # manifest.
1105 # manifest.
1060 #
1106 #
1061 # For a filelog or tree manifest dirlog however not every changelog
1107 # For a filelog or tree manifest dirlog however not every changelog
1062 # entry will have been relevant, so we need to skip some changelog
1108 # entry will have been relevant, so we need to skip some changelog
1063 # nodes even after ellipsis-izing.
1109 # nodes even after ellipsis-izing.
1064 walk = [clrev]
1110 walk = [clrev]
1065 while walk:
1111 while walk:
1066 p = walk[0]
1112 p = walk[0]
1067 walk = walk[1:]
1113 walk = walk[1:]
1068 if p in self.clrev_to_localrev:
1114 if p in self.clrev_to_localrev:
1069 return self.clrev_to_localrev[p]
1115 return self.clrev_to_localrev[p]
1070 elif p in self.full_nodes:
1116 elif p in self.full_nodes:
1071 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1117 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1072 if pp != nullrev])
1118 if pp != nullrev])
1073 elif p in self.precomputed_ellipsis:
1119 elif p in self.precomputed_ellipsis:
1074 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1120 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1075 if pp != nullrev])
1121 if pp != nullrev])
1076 else:
1122 else:
1077 # In this case, we've got an ellipsis with parents
1123 # In this case, we've got an ellipsis with parents
1078 # outside the current bundle (likely an
1124 # outside the current bundle (likely an
1079 # incremental pull). We "know" that we can use the
1125 # incremental pull). We "know" that we can use the
1080 # value of this same revlog at whatever revision
1126 # value of this same revlog at whatever revision
1081 # is pointed to by linknode. "Know" is in scare
1127 # is pointed to by linknode. "Know" is in scare
1082 # quotes because I haven't done enough examination
1128 # quotes because I haven't done enough examination
1083 # of edge cases to convince myself this is really
1129 # of edge cases to convince myself this is really
1084 # a fact - it works for all the (admittedly
1130 # a fact - it works for all the (admittedly
1085 # thorough) cases in our testsuite, but I would be
1131 # thorough) cases in our testsuite, but I would be
1086 # somewhat unsurprised to find a case in the wild
1132 # somewhat unsurprised to find a case in the wild
1087 # where this breaks down a bit. That said, I don't
1133 # where this breaks down a bit. That said, I don't
1088 # know if it would hurt anything.
1134 # know if it would hurt anything.
1089 for i in pycompat.xrange(rev, 0, -1):
1135 for i in pycompat.xrange(rev, 0, -1):
1090 if store.linkrev(i) == clrev:
1136 if store.linkrev(i) == clrev:
1091 return i
1137 return i
1092 # We failed to resolve a parent for this node, so
1138 # We failed to resolve a parent for this node, so
1093 # we crash the changegroup construction.
1139 # we crash the changegroup construction.
1094 raise error.Abort(
1140 raise error.Abort(
1095 'unable to resolve parent while packing %r %r'
1141 'unable to resolve parent while packing %r %r'
1096 ' for changeset %r' % (store.indexfile, rev, clrev))
1142 ' for changeset %r' % (store.indexfile, rev, clrev))
1097
1143
1098 return nullrev
1144 return nullrev
1099
1145
1100 if not linkparents or (
1146 if not linkparents or (
1101 store.parentrevs(rev) == (nullrev, nullrev)):
1147 store.parentrevs(rev) == (nullrev, nullrev)):
1102 p1, p2 = nullrev, nullrev
1148 p1, p2 = nullrev, nullrev
1103 elif len(linkparents) == 1:
1149 elif len(linkparents) == 1:
1104 p1, = sorted(local(p) for p in linkparents)
1150 p1, = sorted(local(p) for p in linkparents)
1105 p2 = nullrev
1151 p2 = nullrev
1106 else:
1152 else:
1107 p1, p2 = sorted(local(p) for p in linkparents)
1153 p1, p2 = sorted(local(p) for p in linkparents)
1108
1154
1109 n = store.node(rev)
1155 n = store.node(rev)
1110 p1n, p2n = store.node(p1), store.node(p2)
1156 p1n, p2n = store.node(p1), store.node(p2)
1111 flags = store.flags(rev)
1157 flags = store.flags(rev)
1112 flags |= revlog.REVIDX_ELLIPSIS
1158 flags |= revlog.REVIDX_ELLIPSIS
1113
1159
1114 # TODO: try and actually send deltas for ellipsis data blocks
1160 # TODO: try and actually send deltas for ellipsis data blocks
1115 data = store.revision(n)
1161 data = store.revision(n)
1116 diffheader = mdiff.trivialdiffheader(len(data))
1162 diffheader = mdiff.trivialdiffheader(len(data))
1117
1163
1118 return revisiondelta(
1164 return revisiondelta(
1119 node=n,
1165 node=n,
1120 p1node=p1n,
1166 p1node=p1n,
1121 p2node=p2n,
1167 p2node=p2n,
1122 basenode=nullid,
1168 basenode=nullid,
1123 linknode=linknode,
1169 linknode=linknode,
1124 flags=flags,
1170 flags=flags,
1125 deltachunks=(diffheader, data),
1171 deltachunks=(diffheader, data),
1126 )
1172 )
1127
1173
1128 class cg2packer(cg1packer):
1129 def deltaparent(self, store, rev, p1, p2, prev):
1130 # Narrow ellipses mode.
1131 if util.safehasattr(self, 'full_nodes'):
1132 # TODO: send better deltas when in narrow mode.
1133 #
1134 # changegroup.group() loops over revisions to send,
1135 # including revisions we'll skip. What this means is that
1136 # `prev` will be a potentially useless delta base for all
1137 # ellipsis nodes, as the client likely won't have it. In
1138 # the future we should do bookkeeping about which nodes
1139 # have been sent to the client, and try to be
1140 # significantly smarter about delta bases. This is
1141 # slightly tricky because this same code has to work for
1142 # all revlogs, and we don't have the linkrev/linknode here.
1143 return p1
1144
1145 dp = store.deltaparent(rev)
1146 if dp == nullrev and store.storedeltachains:
1147 # Avoid sending full revisions when delta parent is null. Pick prev
1148 # in that case. It's tempting to pick p1 in this case, as p1 will
1149 # be smaller in the common case. However, computing a delta against
1150 # p1 may require resolving the raw text of p1, which could be
1151 # expensive. The revlog caches should have prev cached, meaning
1152 # less CPU for changegroup generation. There is likely room to add
1153 # a flag and/or config option to control this behavior.
1154 base = prev
1155 elif dp == nullrev:
1156 # revlog is configured to use full snapshot for a reason,
1157 # stick to full snapshot.
1158 base = nullrev
1159 elif dp not in (p1, p2, prev):
1160 # Pick prev when we can't be sure remote has the base revision.
1161 return prev
1162 else:
1163 base = dp
1164 if base != nullrev and not store.candelta(base, rev):
1165 base = nullrev
1166 return base
1167
1168 def _makecg1packer(repo, filematcher, bundlecaps):
1174 def _makecg1packer(repo, filematcher, bundlecaps):
1169 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1175 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1170 d.node, d.p1node, d.p2node, d.linknode)
1176 d.node, d.p1node, d.p2node, d.linknode)
1171
1177
1172 return cg1packer(repo, filematcher, b'01', allowreorder=None,
1178 return cg1packer(repo, filematcher, b'01',
1179 useprevdelta=True,
1180 allowreorder=None,
1173 builddeltaheader=builddeltaheader,
1181 builddeltaheader=builddeltaheader,
1174 manifestsend=b'', sendtreemanifests=False,
1182 manifestsend=b'', sendtreemanifests=False,
1175 bundlecaps=bundlecaps)
1183 bundlecaps=bundlecaps)
1176
1184
1177 def _makecg2packer(repo, filematcher, bundlecaps):
1185 def _makecg2packer(repo, filematcher, bundlecaps):
1178 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1186 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1179 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1187 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1180
1188
1181 # Since generaldelta is directly supported by cg2, reordering
1189 # Since generaldelta is directly supported by cg2, reordering
1182 # generally doesn't help, so we disable it by default (treating
1190 # generally doesn't help, so we disable it by default (treating
1183 # bundle.reorder=auto just like bundle.reorder=False).
1191 # bundle.reorder=auto just like bundle.reorder=False).
1184 return cg2packer(repo, filematcher, b'02', allowreorder=False,
1192 return cg1packer(repo, filematcher, b'02',
1193 useprevdelta=False,
1194 allowreorder=False,
1185 builddeltaheader=builddeltaheader,
1195 builddeltaheader=builddeltaheader,
1186 manifestsend=b'', sendtreemanifests=False,
1196 manifestsend=b'', sendtreemanifests=False,
1187 bundlecaps=bundlecaps)
1197 bundlecaps=bundlecaps)
1188
1198
1189 def _makecg3packer(repo, filematcher, bundlecaps):
1199 def _makecg3packer(repo, filematcher, bundlecaps):
1190 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1200 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1191 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1201 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1192
1202
1193 return cg2packer(repo, filematcher, b'03', allowreorder=False,
1203 return cg1packer(repo, filematcher, b'03',
1204 useprevdelta=False,
1205 allowreorder=False,
1194 builddeltaheader=builddeltaheader,
1206 builddeltaheader=builddeltaheader,
1195 manifestsend=closechunk(), sendtreemanifests=True,
1207 manifestsend=closechunk(), sendtreemanifests=True,
1196 bundlecaps=bundlecaps)
1208 bundlecaps=bundlecaps)
1197
1209
1198 _packermap = {'01': (_makecg1packer, cg1unpacker),
1210 _packermap = {'01': (_makecg1packer, cg1unpacker),
1199 # cg2 adds support for exchanging generaldelta
1211 # cg2 adds support for exchanging generaldelta
1200 '02': (_makecg2packer, cg2unpacker),
1212 '02': (_makecg2packer, cg2unpacker),
1201 # cg3 adds support for exchanging revlog flags and treemanifests
1213 # cg3 adds support for exchanging revlog flags and treemanifests
1202 '03': (_makecg3packer, cg3unpacker),
1214 '03': (_makecg3packer, cg3unpacker),
1203 }
1215 }
1204
1216
1205 def allsupportedversions(repo):
1217 def allsupportedversions(repo):
1206 versions = set(_packermap.keys())
1218 versions = set(_packermap.keys())
1207 if not (repo.ui.configbool('experimental', 'changegroup3') or
1219 if not (repo.ui.configbool('experimental', 'changegroup3') or
1208 repo.ui.configbool('experimental', 'treemanifest') or
1220 repo.ui.configbool('experimental', 'treemanifest') or
1209 'treemanifest' in repo.requirements):
1221 'treemanifest' in repo.requirements):
1210 versions.discard('03')
1222 versions.discard('03')
1211 return versions
1223 return versions
1212
1224
1213 # Changegroup versions that can be applied to the repo
1225 # Changegroup versions that can be applied to the repo
1214 def supportedincomingversions(repo):
1226 def supportedincomingversions(repo):
1215 return allsupportedversions(repo)
1227 return allsupportedversions(repo)
1216
1228
1217 # Changegroup versions that can be created from the repo
1229 # Changegroup versions that can be created from the repo
1218 def supportedoutgoingversions(repo):
1230 def supportedoutgoingversions(repo):
1219 versions = allsupportedversions(repo)
1231 versions = allsupportedversions(repo)
1220 if 'treemanifest' in repo.requirements:
1232 if 'treemanifest' in repo.requirements:
1221 # Versions 01 and 02 support only flat manifests and it's just too
1233 # Versions 01 and 02 support only flat manifests and it's just too
1222 # expensive to convert between the flat manifest and tree manifest on
1234 # expensive to convert between the flat manifest and tree manifest on
1223 # the fly. Since tree manifests are hashed differently, all of history
1235 # the fly. Since tree manifests are hashed differently, all of history
1224 # would have to be converted. Instead, we simply don't even pretend to
1236 # would have to be converted. Instead, we simply don't even pretend to
1225 # support versions 01 and 02.
1237 # support versions 01 and 02.
1226 versions.discard('01')
1238 versions.discard('01')
1227 versions.discard('02')
1239 versions.discard('02')
1228 if repository.NARROW_REQUIREMENT in repo.requirements:
1240 if repository.NARROW_REQUIREMENT in repo.requirements:
1229 # Versions 01 and 02 don't support revlog flags, and we need to
1241 # Versions 01 and 02 don't support revlog flags, and we need to
1230 # support that for stripping and unbundling to work.
1242 # support that for stripping and unbundling to work.
1231 versions.discard('01')
1243 versions.discard('01')
1232 versions.discard('02')
1244 versions.discard('02')
1233 if LFS_REQUIREMENT in repo.requirements:
1245 if LFS_REQUIREMENT in repo.requirements:
1234 # Versions 01 and 02 don't support revlog flags, and we need to
1246 # Versions 01 and 02 don't support revlog flags, and we need to
1235 # mark LFS entries with REVIDX_EXTSTORED.
1247 # mark LFS entries with REVIDX_EXTSTORED.
1236 versions.discard('01')
1248 versions.discard('01')
1237 versions.discard('02')
1249 versions.discard('02')
1238
1250
1239 return versions
1251 return versions
1240
1252
1241 def localversion(repo):
1253 def localversion(repo):
1242 # Finds the best version to use for bundles that are meant to be used
1254 # Finds the best version to use for bundles that are meant to be used
1243 # locally, such as those from strip and shelve, and temporary bundles.
1255 # locally, such as those from strip and shelve, and temporary bundles.
1244 return max(supportedoutgoingversions(repo))
1256 return max(supportedoutgoingversions(repo))
1245
1257
1246 def safeversion(repo):
1258 def safeversion(repo):
1247 # Finds the smallest version that it's safe to assume clients of the repo
1259 # Finds the smallest version that it's safe to assume clients of the repo
1248 # will support. For example, all hg versions that support generaldelta also
1260 # will support. For example, all hg versions that support generaldelta also
1249 # support changegroup 02.
1261 # support changegroup 02.
1250 versions = supportedoutgoingversions(repo)
1262 versions = supportedoutgoingversions(repo)
1251 if 'generaldelta' in repo.requirements:
1263 if 'generaldelta' in repo.requirements:
1252 versions.discard('01')
1264 versions.discard('01')
1253 assert versions
1265 assert versions
1254 return min(versions)
1266 return min(versions)
1255
1267
1256 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1268 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1257 assert version in supportedoutgoingversions(repo)
1269 assert version in supportedoutgoingversions(repo)
1258
1270
1259 if filematcher is None:
1271 if filematcher is None:
1260 filematcher = matchmod.alwaysmatcher(repo.root, '')
1272 filematcher = matchmod.alwaysmatcher(repo.root, '')
1261
1273
1262 if version == '01' and not filematcher.always():
1274 if version == '01' and not filematcher.always():
1263 raise error.ProgrammingError('version 01 changegroups do not support '
1275 raise error.ProgrammingError('version 01 changegroups do not support '
1264 'sparse file matchers')
1276 'sparse file matchers')
1265
1277
1266 # Requested files could include files not in the local store. So
1278 # Requested files could include files not in the local store. So
1267 # filter those out.
1279 # filter those out.
1268 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1280 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1269 filematcher)
1281 filematcher)
1270
1282
1271 fn = _packermap[version][0]
1283 fn = _packermap[version][0]
1272 return fn(repo, filematcher, bundlecaps)
1284 return fn(repo, filematcher, bundlecaps)
1273
1285
1274 def getunbundler(version, fh, alg, extras=None):
1286 def getunbundler(version, fh, alg, extras=None):
1275 return _packermap[version][1](fh, alg, extras=extras)
1287 return _packermap[version][1](fh, alg, extras=extras)
1276
1288
1277 def _changegroupinfo(repo, nodes, source):
1289 def _changegroupinfo(repo, nodes, source):
1278 if repo.ui.verbose or source == 'bundle':
1290 if repo.ui.verbose or source == 'bundle':
1279 repo.ui.status(_("%d changesets found\n") % len(nodes))
1291 repo.ui.status(_("%d changesets found\n") % len(nodes))
1280 if repo.ui.debugflag:
1292 if repo.ui.debugflag:
1281 repo.ui.debug("list of changesets:\n")
1293 repo.ui.debug("list of changesets:\n")
1282 for node in nodes:
1294 for node in nodes:
1283 repo.ui.debug("%s\n" % hex(node))
1295 repo.ui.debug("%s\n" % hex(node))
1284
1296
1285 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1297 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1286 bundlecaps=None):
1298 bundlecaps=None):
1287 cgstream = makestream(repo, outgoing, version, source,
1299 cgstream = makestream(repo, outgoing, version, source,
1288 fastpath=fastpath, bundlecaps=bundlecaps)
1300 fastpath=fastpath, bundlecaps=bundlecaps)
1289 return getunbundler(version, util.chunkbuffer(cgstream), None,
1301 return getunbundler(version, util.chunkbuffer(cgstream), None,
1290 {'clcount': len(outgoing.missing) })
1302 {'clcount': len(outgoing.missing) })
1291
1303
1292 def makestream(repo, outgoing, version, source, fastpath=False,
1304 def makestream(repo, outgoing, version, source, fastpath=False,
1293 bundlecaps=None, filematcher=None):
1305 bundlecaps=None, filematcher=None):
1294 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1306 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1295 filematcher=filematcher)
1307 filematcher=filematcher)
1296
1308
1297 repo = repo.unfiltered()
1309 repo = repo.unfiltered()
1298 commonrevs = outgoing.common
1310 commonrevs = outgoing.common
1299 csets = outgoing.missing
1311 csets = outgoing.missing
1300 heads = outgoing.missingheads
1312 heads = outgoing.missingheads
1301 # We go through the fast path if we get told to, or if all (unfiltered
1313 # We go through the fast path if we get told to, or if all (unfiltered
1302 # heads have been requested (since we then know there all linkrevs will
1314 # heads have been requested (since we then know there all linkrevs will
1303 # be pulled by the client).
1315 # be pulled by the client).
1304 heads.sort()
1316 heads.sort()
1305 fastpathlinkrev = fastpath or (
1317 fastpathlinkrev = fastpath or (
1306 repo.filtername is None and heads == sorted(repo.heads()))
1318 repo.filtername is None and heads == sorted(repo.heads()))
1307
1319
1308 repo.hook('preoutgoing', throw=True, source=source)
1320 repo.hook('preoutgoing', throw=True, source=source)
1309 _changegroupinfo(repo, csets, source)
1321 _changegroupinfo(repo, csets, source)
1310 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1322 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1311
1323
1312 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1324 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1313 revisions = 0
1325 revisions = 0
1314 files = 0
1326 files = 0
1315 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1327 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1316 total=expectedfiles)
1328 total=expectedfiles)
1317 for chunkdata in iter(source.filelogheader, {}):
1329 for chunkdata in iter(source.filelogheader, {}):
1318 files += 1
1330 files += 1
1319 f = chunkdata["filename"]
1331 f = chunkdata["filename"]
1320 repo.ui.debug("adding %s revisions\n" % f)
1332 repo.ui.debug("adding %s revisions\n" % f)
1321 progress.increment()
1333 progress.increment()
1322 fl = repo.file(f)
1334 fl = repo.file(f)
1323 o = len(fl)
1335 o = len(fl)
1324 try:
1336 try:
1325 deltas = source.deltaiter()
1337 deltas = source.deltaiter()
1326 if not fl.addgroup(deltas, revmap, trp):
1338 if not fl.addgroup(deltas, revmap, trp):
1327 raise error.Abort(_("received file revlog group is empty"))
1339 raise error.Abort(_("received file revlog group is empty"))
1328 except error.CensoredBaseError as e:
1340 except error.CensoredBaseError as e:
1329 raise error.Abort(_("received delta base is censored: %s") % e)
1341 raise error.Abort(_("received delta base is censored: %s") % e)
1330 revisions += len(fl) - o
1342 revisions += len(fl) - o
1331 if f in needfiles:
1343 if f in needfiles:
1332 needs = needfiles[f]
1344 needs = needfiles[f]
1333 for new in pycompat.xrange(o, len(fl)):
1345 for new in pycompat.xrange(o, len(fl)):
1334 n = fl.node(new)
1346 n = fl.node(new)
1335 if n in needs:
1347 if n in needs:
1336 needs.remove(n)
1348 needs.remove(n)
1337 else:
1349 else:
1338 raise error.Abort(
1350 raise error.Abort(
1339 _("received spurious file revlog entry"))
1351 _("received spurious file revlog entry"))
1340 if not needs:
1352 if not needs:
1341 del needfiles[f]
1353 del needfiles[f]
1342 progress.complete()
1354 progress.complete()
1343
1355
1344 for f, needs in needfiles.iteritems():
1356 for f, needs in needfiles.iteritems():
1345 fl = repo.file(f)
1357 fl = repo.file(f)
1346 for n in needs:
1358 for n in needs:
1347 try:
1359 try:
1348 fl.rev(n)
1360 fl.rev(n)
1349 except error.LookupError:
1361 except error.LookupError:
1350 raise error.Abort(
1362 raise error.Abort(
1351 _('missing file data for %s:%s - run hg verify') %
1363 _('missing file data for %s:%s - run hg verify') %
1352 (f, hex(n)))
1364 (f, hex(n)))
1353
1365
1354 return revisions, files
1366 return revisions, files
1355
1367
1356 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1368 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1357 ellipsisroots, visitnodes, depth, source, version):
1369 ellipsisroots, visitnodes, depth, source, version):
1358 if version in ('01', '02'):
1370 if version in ('01', '02'):
1359 raise error.Abort(
1371 raise error.Abort(
1360 'ellipsis nodes require at least cg3 on client and server, '
1372 'ellipsis nodes require at least cg3 on client and server, '
1361 'but negotiated version %s' % version)
1373 'but negotiated version %s' % version)
1362 # We wrap cg1packer.revchunk, using a side channel to pass
1374 # We wrap cg1packer.revchunk, using a side channel to pass
1363 # relevant_nodes into that area. Then if linknode isn't in the
1375 # relevant_nodes into that area. Then if linknode isn't in the
1364 # set, we know we have an ellipsis node and we should defer
1376 # set, we know we have an ellipsis node and we should defer
1365 # sending that node's data. We override close() to detect
1377 # sending that node's data. We override close() to detect
1366 # pending ellipsis nodes and flush them.
1378 # pending ellipsis nodes and flush them.
1367 packer = getbundler(version, repo, filematcher=match)
1379 packer = getbundler(version, repo, filematcher=match)
1368 # Give the packer the list of nodes which should not be
1380 # Give the packer the list of nodes which should not be
1369 # ellipsis nodes. We store this rather than the set of nodes
1381 # ellipsis nodes. We store this rather than the set of nodes
1370 # that should be an ellipsis because for very large histories
1382 # that should be an ellipsis because for very large histories
1371 # we expect this to be significantly smaller.
1383 # we expect this to be significantly smaller.
1372 packer.full_nodes = relevant_nodes
1384 packer.full_nodes = relevant_nodes
1373 # Maps ellipsis revs to their roots at the changelog level.
1385 # Maps ellipsis revs to their roots at the changelog level.
1374 packer.precomputed_ellipsis = ellipsisroots
1386 packer.precomputed_ellipsis = ellipsisroots
1375 # Maps CL revs to per-revlog revisions. Cleared in close() at
1387 # Maps CL revs to per-revlog revisions. Cleared in close() at
1376 # the end of each group.
1388 # the end of each group.
1377 packer.clrev_to_localrev = {}
1389 packer.clrev_to_localrev = {}
1378 packer.next_clrev_to_localrev = {}
1390 packer.next_clrev_to_localrev = {}
1379 # Maps changelog nodes to changelog revs. Filled in once
1391 # Maps changelog nodes to changelog revs. Filled in once
1380 # during changelog stage and then left unmodified.
1392 # during changelog stage and then left unmodified.
1381 packer.clnode_to_rev = {}
1393 packer.clnode_to_rev = {}
1382 packer.changelog_done = False
1394 packer.changelog_done = False
1383 # If true, informs the packer that it is serving shallow content and might
1395 # If true, informs the packer that it is serving shallow content and might
1384 # need to pack file contents not introduced by the changes being packed.
1396 # need to pack file contents not introduced by the changes being packed.
1385 packer.is_shallow = depth is not None
1397 packer.is_shallow = depth is not None
1386
1398
1387 return packer.generate(common, visitnodes, False, source)
1399 return packer.generate(common, visitnodes, False, source)
General Comments 0
You need to be logged in to leave comments. Login now