##// END OF EJS Templates
changegroup: rename cg1packer to cgpacker...
Gregory Szorc -
r38938:4c99c6d1 default
parent child Browse files
Show More
@@ -1,1399 +1,1402 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagutil,
27 dagutil,
28 error,
28 error,
29 manifest,
29 manifest,
30 match as matchmod,
30 match as matchmod,
31 mdiff,
31 mdiff,
32 phases,
32 phases,
33 pycompat,
33 pycompat,
34 repository,
34 repository,
35 revlog,
35 revlog,
36 util,
36 util,
37 )
37 )
38
38
39 from .utils import (
39 from .utils import (
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
43 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
44 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
44 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
45 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
45 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
46
46
47 LFS_REQUIREMENT = 'lfs'
47 LFS_REQUIREMENT = 'lfs'
48
48
49 readexactly = util.readexactly
49 readexactly = util.readexactly
50
50
51 def getchunk(stream):
51 def getchunk(stream):
52 """return the next chunk from stream as a string"""
52 """return the next chunk from stream as a string"""
53 d = readexactly(stream, 4)
53 d = readexactly(stream, 4)
54 l = struct.unpack(">l", d)[0]
54 l = struct.unpack(">l", d)[0]
55 if l <= 4:
55 if l <= 4:
56 if l:
56 if l:
57 raise error.Abort(_("invalid chunk length %d") % l)
57 raise error.Abort(_("invalid chunk length %d") % l)
58 return ""
58 return ""
59 return readexactly(stream, l - 4)
59 return readexactly(stream, l - 4)
60
60
61 def chunkheader(length):
61 def chunkheader(length):
62 """return a changegroup chunk header (string)"""
62 """return a changegroup chunk header (string)"""
63 return struct.pack(">l", length + 4)
63 return struct.pack(">l", length + 4)
64
64
65 def closechunk():
65 def closechunk():
66 """return a changegroup chunk header (string) for a zero-length chunk"""
66 """return a changegroup chunk header (string) for a zero-length chunk"""
67 return struct.pack(">l", 0)
67 return struct.pack(">l", 0)
68
68
69 def writechunks(ui, chunks, filename, vfs=None):
69 def writechunks(ui, chunks, filename, vfs=None):
70 """Write chunks to a file and return its filename.
70 """Write chunks to a file and return its filename.
71
71
72 The stream is assumed to be a bundle file.
72 The stream is assumed to be a bundle file.
73 Existing files will not be overwritten.
73 Existing files will not be overwritten.
74 If no filename is specified, a temporary file is created.
74 If no filename is specified, a temporary file is created.
75 """
75 """
76 fh = None
76 fh = None
77 cleanup = None
77 cleanup = None
78 try:
78 try:
79 if filename:
79 if filename:
80 if vfs:
80 if vfs:
81 fh = vfs.open(filename, "wb")
81 fh = vfs.open(filename, "wb")
82 else:
82 else:
83 # Increase default buffer size because default is usually
83 # Increase default buffer size because default is usually
84 # small (4k is common on Linux).
84 # small (4k is common on Linux).
85 fh = open(filename, "wb", 131072)
85 fh = open(filename, "wb", 131072)
86 else:
86 else:
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
88 fh = os.fdopen(fd, r"wb")
88 fh = os.fdopen(fd, r"wb")
89 cleanup = filename
89 cleanup = filename
90 for c in chunks:
90 for c in chunks:
91 fh.write(c)
91 fh.write(c)
92 cleanup = None
92 cleanup = None
93 return filename
93 return filename
94 finally:
94 finally:
95 if fh is not None:
95 if fh is not None:
96 fh.close()
96 fh.close()
97 if cleanup is not None:
97 if cleanup is not None:
98 if filename and vfs:
98 if filename and vfs:
99 vfs.unlink(cleanup)
99 vfs.unlink(cleanup)
100 else:
100 else:
101 os.unlink(cleanup)
101 os.unlink(cleanup)
102
102
103 class cg1unpacker(object):
103 class cg1unpacker(object):
104 """Unpacker for cg1 changegroup streams.
104 """Unpacker for cg1 changegroup streams.
105
105
106 A changegroup unpacker handles the framing of the revision data in
106 A changegroup unpacker handles the framing of the revision data in
107 the wire format. Most consumers will want to use the apply()
107 the wire format. Most consumers will want to use the apply()
108 method to add the changes from the changegroup to a repository.
108 method to add the changes from the changegroup to a repository.
109
109
110 If you're forwarding a changegroup unmodified to another consumer,
110 If you're forwarding a changegroup unmodified to another consumer,
111 use getchunks(), which returns an iterator of changegroup
111 use getchunks(), which returns an iterator of changegroup
112 chunks. This is mostly useful for cases where you need to know the
112 chunks. This is mostly useful for cases where you need to know the
113 data stream has ended by observing the end of the changegroup.
113 data stream has ended by observing the end of the changegroup.
114
114
115 deltachunk() is useful only if you're applying delta data. Most
115 deltachunk() is useful only if you're applying delta data. Most
116 consumers should prefer apply() instead.
116 consumers should prefer apply() instead.
117
117
118 A few other public methods exist. Those are used only for
118 A few other public methods exist. Those are used only for
119 bundlerepo and some debug commands - their use is discouraged.
119 bundlerepo and some debug commands - their use is discouraged.
120 """
120 """
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
122 deltaheadersize = deltaheader.size
122 deltaheadersize = deltaheader.size
123 version = '01'
123 version = '01'
124 _grouplistcount = 1 # One list of files after the manifests
124 _grouplistcount = 1 # One list of files after the manifests
125
125
126 def __init__(self, fh, alg, extras=None):
126 def __init__(self, fh, alg, extras=None):
127 if alg is None:
127 if alg is None:
128 alg = 'UN'
128 alg = 'UN'
129 if alg not in util.compengines.supportedbundletypes:
129 if alg not in util.compengines.supportedbundletypes:
130 raise error.Abort(_('unknown stream compression type: %s')
130 raise error.Abort(_('unknown stream compression type: %s')
131 % alg)
131 % alg)
132 if alg == 'BZ':
132 if alg == 'BZ':
133 alg = '_truncatedBZ'
133 alg = '_truncatedBZ'
134
134
135 compengine = util.compengines.forbundletype(alg)
135 compengine = util.compengines.forbundletype(alg)
136 self._stream = compengine.decompressorreader(fh)
136 self._stream = compengine.decompressorreader(fh)
137 self._type = alg
137 self._type = alg
138 self.extras = extras or {}
138 self.extras = extras or {}
139 self.callback = None
139 self.callback = None
140
140
141 # These methods (compressed, read, seek, tell) all appear to only
141 # These methods (compressed, read, seek, tell) all appear to only
142 # be used by bundlerepo, but it's a little hard to tell.
142 # be used by bundlerepo, but it's a little hard to tell.
143 def compressed(self):
143 def compressed(self):
144 return self._type is not None and self._type != 'UN'
144 return self._type is not None and self._type != 'UN'
145 def read(self, l):
145 def read(self, l):
146 return self._stream.read(l)
146 return self._stream.read(l)
147 def seek(self, pos):
147 def seek(self, pos):
148 return self._stream.seek(pos)
148 return self._stream.seek(pos)
149 def tell(self):
149 def tell(self):
150 return self._stream.tell()
150 return self._stream.tell()
151 def close(self):
151 def close(self):
152 return self._stream.close()
152 return self._stream.close()
153
153
154 def _chunklength(self):
154 def _chunklength(self):
155 d = readexactly(self._stream, 4)
155 d = readexactly(self._stream, 4)
156 l = struct.unpack(">l", d)[0]
156 l = struct.unpack(">l", d)[0]
157 if l <= 4:
157 if l <= 4:
158 if l:
158 if l:
159 raise error.Abort(_("invalid chunk length %d") % l)
159 raise error.Abort(_("invalid chunk length %d") % l)
160 return 0
160 return 0
161 if self.callback:
161 if self.callback:
162 self.callback()
162 self.callback()
163 return l - 4
163 return l - 4
164
164
165 def changelogheader(self):
165 def changelogheader(self):
166 """v10 does not have a changelog header chunk"""
166 """v10 does not have a changelog header chunk"""
167 return {}
167 return {}
168
168
169 def manifestheader(self):
169 def manifestheader(self):
170 """v10 does not have a manifest header chunk"""
170 """v10 does not have a manifest header chunk"""
171 return {}
171 return {}
172
172
173 def filelogheader(self):
173 def filelogheader(self):
174 """return the header of the filelogs chunk, v10 only has the filename"""
174 """return the header of the filelogs chunk, v10 only has the filename"""
175 l = self._chunklength()
175 l = self._chunklength()
176 if not l:
176 if not l:
177 return {}
177 return {}
178 fname = readexactly(self._stream, l)
178 fname = readexactly(self._stream, l)
179 return {'filename': fname}
179 return {'filename': fname}
180
180
181 def _deltaheader(self, headertuple, prevnode):
181 def _deltaheader(self, headertuple, prevnode):
182 node, p1, p2, cs = headertuple
182 node, p1, p2, cs = headertuple
183 if prevnode is None:
183 if prevnode is None:
184 deltabase = p1
184 deltabase = p1
185 else:
185 else:
186 deltabase = prevnode
186 deltabase = prevnode
187 flags = 0
187 flags = 0
188 return node, p1, p2, deltabase, cs, flags
188 return node, p1, p2, deltabase, cs, flags
189
189
190 def deltachunk(self, prevnode):
190 def deltachunk(self, prevnode):
191 l = self._chunklength()
191 l = self._chunklength()
192 if not l:
192 if not l:
193 return {}
193 return {}
194 headerdata = readexactly(self._stream, self.deltaheadersize)
194 headerdata = readexactly(self._stream, self.deltaheadersize)
195 header = self.deltaheader.unpack(headerdata)
195 header = self.deltaheader.unpack(headerdata)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
198 return (node, p1, p2, cs, deltabase, delta, flags)
198 return (node, p1, p2, cs, deltabase, delta, flags)
199
199
200 def getchunks(self):
200 def getchunks(self):
201 """returns all the chunks contains in the bundle
201 """returns all the chunks contains in the bundle
202
202
203 Used when you need to forward the binary stream to a file or another
203 Used when you need to forward the binary stream to a file or another
204 network API. To do so, it parse the changegroup data, otherwise it will
204 network API. To do so, it parse the changegroup data, otherwise it will
205 block in case of sshrepo because it don't know the end of the stream.
205 block in case of sshrepo because it don't know the end of the stream.
206 """
206 """
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
210 # filelogs.
210 # filelogs.
211 #
211 #
212 # Changelog and manifestlog parts are terminated with empty chunks. The
212 # Changelog and manifestlog parts are terminated with empty chunks. The
213 # tree and file parts are a list of entry sections. Each entry section
213 # tree and file parts are a list of entry sections. Each entry section
214 # is a series of chunks terminating in an empty chunk. The list of these
214 # is a series of chunks terminating in an empty chunk. The list of these
215 # entry sections is terminated in yet another empty chunk, so we know
215 # entry sections is terminated in yet another empty chunk, so we know
216 # we've reached the end of the tree/file list when we reach an empty
216 # we've reached the end of the tree/file list when we reach an empty
217 # chunk that was proceeded by no non-empty chunks.
217 # chunk that was proceeded by no non-empty chunks.
218
218
219 parts = 0
219 parts = 0
220 while parts < 2 + self._grouplistcount:
220 while parts < 2 + self._grouplistcount:
221 noentries = True
221 noentries = True
222 while True:
222 while True:
223 chunk = getchunk(self)
223 chunk = getchunk(self)
224 if not chunk:
224 if not chunk:
225 # The first two empty chunks represent the end of the
225 # The first two empty chunks represent the end of the
226 # changelog and the manifestlog portions. The remaining
226 # changelog and the manifestlog portions. The remaining
227 # empty chunks represent either A) the end of individual
227 # empty chunks represent either A) the end of individual
228 # tree or file entries in the file list, or B) the end of
228 # tree or file entries in the file list, or B) the end of
229 # the entire list. It's the end of the entire list if there
229 # the entire list. It's the end of the entire list if there
230 # were no entries (i.e. noentries is True).
230 # were no entries (i.e. noentries is True).
231 if parts < 2:
231 if parts < 2:
232 parts += 1
232 parts += 1
233 elif noentries:
233 elif noentries:
234 parts += 1
234 parts += 1
235 break
235 break
236 noentries = False
236 noentries = False
237 yield chunkheader(len(chunk))
237 yield chunkheader(len(chunk))
238 pos = 0
238 pos = 0
239 while pos < len(chunk):
239 while pos < len(chunk):
240 next = pos + 2**20
240 next = pos + 2**20
241 yield chunk[pos:next]
241 yield chunk[pos:next]
242 pos = next
242 pos = next
243 yield closechunk()
243 yield closechunk()
244
244
245 def _unpackmanifests(self, repo, revmap, trp, prog):
245 def _unpackmanifests(self, repo, revmap, trp, prog):
246 self.callback = prog.increment
246 self.callback = prog.increment
247 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
250 # be empty during the pull
250 # be empty during the pull
251 self.manifestheader()
251 self.manifestheader()
252 deltas = self.deltaiter()
252 deltas = self.deltaiter()
253 repo.manifestlog.addgroup(deltas, revmap, trp)
253 repo.manifestlog.addgroup(deltas, revmap, trp)
254 prog.complete()
254 prog.complete()
255 self.callback = None
255 self.callback = None
256
256
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
258 expectedtotal=None):
258 expectedtotal=None):
259 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
261 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
262
262
263 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
264 - nothing changed or no source: 0
264 - nothing changed or no source: 0
265 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
266 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
267 - number of heads stays the same: 1
267 - number of heads stays the same: 1
268 """
268 """
269 repo = repo.unfiltered()
269 repo = repo.unfiltered()
270 def csmap(x):
270 def csmap(x):
271 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
272 return len(cl)
272 return len(cl)
273
273
274 def revmap(x):
274 def revmap(x):
275 return cl.rev(x)
275 return cl.rev(x)
276
276
277 changesets = files = revisions = 0
277 changesets = files = revisions = 0
278
278
279 try:
279 try:
280 # The transaction may already carry source information. In this
280 # The transaction may already carry source information. In this
281 # case we use the top level data. We overwrite the argument
281 # case we use the top level data. We overwrite the argument
282 # because we need to use the top level value (if they exist)
282 # because we need to use the top level value (if they exist)
283 # in this function.
283 # in this function.
284 srctype = tr.hookargs.setdefault('source', srctype)
284 srctype = tr.hookargs.setdefault('source', srctype)
285 url = tr.hookargs.setdefault('url', url)
285 url = tr.hookargs.setdefault('url', url)
286 repo.hook('prechangegroup',
286 repo.hook('prechangegroup',
287 throw=True, **pycompat.strkwargs(tr.hookargs))
287 throw=True, **pycompat.strkwargs(tr.hookargs))
288
288
289 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
290 # will not see an inconsistent view
290 # will not see an inconsistent view
291 cl = repo.changelog
291 cl = repo.changelog
292 cl.delayupdate(tr)
292 cl.delayupdate(tr)
293 oldheads = set(cl.heads())
293 oldheads = set(cl.heads())
294
294
295 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
296 # pull off the changeset group
296 # pull off the changeset group
297 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
298 clstart = len(cl)
298 clstart = len(cl)
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
300 total=expectedtotal)
300 total=expectedtotal)
301 self.callback = progress.increment
301 self.callback = progress.increment
302
302
303 efiles = set()
303 efiles = set()
304 def onchangelog(cl, node):
304 def onchangelog(cl, node):
305 efiles.update(cl.readfiles(node))
305 efiles.update(cl.readfiles(node))
306
306
307 self.changelogheader()
307 self.changelogheader()
308 deltas = self.deltaiter()
308 deltas = self.deltaiter()
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
310 efiles = len(efiles)
310 efiles = len(efiles)
311
311
312 if not cgnodes:
312 if not cgnodes:
313 repo.ui.develwarn('applied empty changegroup',
313 repo.ui.develwarn('applied empty changegroup',
314 config='warn-empty-changegroup')
314 config='warn-empty-changegroup')
315 clend = len(cl)
315 clend = len(cl)
316 changesets = clend - clstart
316 changesets = clend - clstart
317 progress.complete()
317 progress.complete()
318 self.callback = None
318 self.callback = None
319
319
320 # pull off the manifest group
320 # pull off the manifest group
321 repo.ui.status(_("adding manifests\n"))
321 repo.ui.status(_("adding manifests\n"))
322 # We know that we'll never have more manifests than we had
322 # We know that we'll never have more manifests than we had
323 # changesets.
323 # changesets.
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
325 total=changesets)
325 total=changesets)
326 self._unpackmanifests(repo, revmap, trp, progress)
326 self._unpackmanifests(repo, revmap, trp, progress)
327
327
328 needfiles = {}
328 needfiles = {}
329 if repo.ui.configbool('server', 'validate'):
329 if repo.ui.configbool('server', 'validate'):
330 cl = repo.changelog
330 cl = repo.changelog
331 ml = repo.manifestlog
331 ml = repo.manifestlog
332 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
333 for cset in pycompat.xrange(clstart, clend):
333 for cset in pycompat.xrange(clstart, clend):
334 mfnode = cl.changelogrevision(cset).manifest
334 mfnode = cl.changelogrevision(cset).manifest
335 mfest = ml[mfnode].readdelta()
335 mfest = ml[mfnode].readdelta()
336 # store file cgnodes we must see
336 # store file cgnodes we must see
337 for f, n in mfest.iteritems():
337 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
338 needfiles.setdefault(f, set()).add(n)
339
339
340 # process the files
340 # process the files
341 repo.ui.status(_("adding file changes\n"))
341 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
342 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
343 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
344 revisions += newrevs
345 files += newfiles
345 files += newfiles
346
346
347 deltaheads = 0
347 deltaheads = 0
348 if oldheads:
348 if oldheads:
349 heads = cl.heads()
349 heads = cl.heads()
350 deltaheads = len(heads) - len(oldheads)
350 deltaheads = len(heads) - len(oldheads)
351 for h in heads:
351 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
352 if h not in oldheads and repo[h].closesbranch():
353 deltaheads -= 1
353 deltaheads -= 1
354 htext = ""
354 htext = ""
355 if deltaheads:
355 if deltaheads:
356 htext = _(" (%+d heads)") % deltaheads
356 htext = _(" (%+d heads)") % deltaheads
357
357
358 repo.ui.status(_("added %d changesets"
358 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
359 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
360 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
361 repo.invalidatevolatilesets()
362
362
363 if changesets > 0:
363 if changesets > 0:
364 if 'node' not in tr.hookargs:
364 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
365 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 else:
368 else:
369 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
370 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup',
372 repo.hook('pretxnchangegroup',
373 throw=True, **pycompat.strkwargs(hookargs))
373 throw=True, **pycompat.strkwargs(hookargs))
374
374
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
376 phaseall = None
376 phaseall = None
377 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
380 # exists locally as secret
380 # exists locally as secret
381 #
381 #
382 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
383 # the bundle
383 # the bundle
384 if repo.publishing():
384 if repo.publishing():
385 targetphase = phaseall = phases.public
385 targetphase = phaseall = phases.public
386 else:
386 else:
387 # closer target phase computation
387 # closer target phase computation
388
388
389 # Those changesets have been pushed from the
389 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
390 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
391 # alongside. Therefor `targetphase` is
392 # ignored.
392 # ignored.
393 targetphase = phaseall = phases.draft
393 targetphase = phaseall = phases.draft
394 if added:
394 if added:
395 phases.registernew(repo, tr, targetphase, added)
395 phases.registernew(repo, tr, targetphase, added)
396 if phaseall is not None:
396 if phaseall is not None:
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
398
398
399 if changesets > 0:
399 if changesets > 0:
400
400
401 def runhooks():
401 def runhooks():
402 # These hooks run when the lock releases, not when the
402 # These hooks run when the lock releases, not when the
403 # transaction closes. So it's possible for the changelog
403 # transaction closes. So it's possible for the changelog
404 # to have changed since we last saw it.
404 # to have changed since we last saw it.
405 if clstart >= len(repo):
405 if clstart >= len(repo):
406 return
406 return
407
407
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
409
409
410 for n in added:
410 for n in added:
411 args = hookargs.copy()
411 args = hookargs.copy()
412 args['node'] = hex(n)
412 args['node'] = hex(n)
413 del args['node_last']
413 del args['node_last']
414 repo.hook("incoming", **pycompat.strkwargs(args))
414 repo.hook("incoming", **pycompat.strkwargs(args))
415
415
416 newheads = [h for h in repo.heads()
416 newheads = [h for h in repo.heads()
417 if h not in oldheads]
417 if h not in oldheads]
418 repo.ui.log("incoming",
418 repo.ui.log("incoming",
419 "%d incoming changes - new heads: %s\n",
419 "%d incoming changes - new heads: %s\n",
420 len(added),
420 len(added),
421 ', '.join([hex(c[:6]) for c in newheads]))
421 ', '.join([hex(c[:6]) for c in newheads]))
422
422
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
424 lambda tr: repo._afterlock(runhooks))
424 lambda tr: repo._afterlock(runhooks))
425 finally:
425 finally:
426 repo.ui.flush()
426 repo.ui.flush()
427 # never return 0 here:
427 # never return 0 here:
428 if deltaheads < 0:
428 if deltaheads < 0:
429 ret = deltaheads - 1
429 ret = deltaheads - 1
430 else:
430 else:
431 ret = deltaheads + 1
431 ret = deltaheads + 1
432 return ret
432 return ret
433
433
434 def deltaiter(self):
434 def deltaiter(self):
435 """
435 """
436 returns an iterator of the deltas in this changegroup
436 returns an iterator of the deltas in this changegroup
437
437
438 Useful for passing to the underlying storage system to be stored.
438 Useful for passing to the underlying storage system to be stored.
439 """
439 """
440 chain = None
440 chain = None
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
443 yield chunkdata
443 yield chunkdata
444 chain = chunkdata[0]
444 chain = chunkdata[0]
445
445
446 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
448
448
449 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
451 remain the same.
451 remain the same.
452 """
452 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = deltaheader.size
454 deltaheadersize = deltaheader.size
455 version = '02'
455 version = '02'
456
456
457 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
459 flags = 0
460 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
461
461
462 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
464
464
465 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
467 separating manifests and files.
468 """
468 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = deltaheader.size
470 deltaheadersize = deltaheader.size
471 version = '03'
471 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
473
473
474 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
477
477
478 def _unpackmanifests(self, repo, revmap, trp, prog):
478 def _unpackmanifests(self, repo, revmap, trp, prog):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
480 for chunkdata in iter(self.filelogheader, {}):
480 for chunkdata in iter(self.filelogheader, {}):
481 # If we get here, there are directory manifests in the changegroup
481 # If we get here, there are directory manifests in the changegroup
482 d = chunkdata["filename"]
482 d = chunkdata["filename"]
483 repo.ui.debug("adding %s revisions\n" % d)
483 repo.ui.debug("adding %s revisions\n" % d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
485 deltas = self.deltaiter()
485 deltas = self.deltaiter()
486 if not dirlog.addgroup(deltas, revmap, trp):
486 if not dirlog.addgroup(deltas, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 @attr.s(slots=True, frozen=True)
501 @attr.s(slots=True, frozen=True)
502 class revisiondelta(object):
502 class revisiondelta(object):
503 """Describes a delta entry in a changegroup.
503 """Describes a delta entry in a changegroup.
504
504
505 Captured data is sufficient to serialize the delta into multiple
505 Captured data is sufficient to serialize the delta into multiple
506 formats.
506 formats.
507 """
507 """
508 # 20 byte node of this revision.
508 # 20 byte node of this revision.
509 node = attr.ib()
509 node = attr.ib()
510 # 20 byte nodes of parent revisions.
510 # 20 byte nodes of parent revisions.
511 p1node = attr.ib()
511 p1node = attr.ib()
512 p2node = attr.ib()
512 p2node = attr.ib()
513 # 20 byte node of node this delta is against.
513 # 20 byte node of node this delta is against.
514 basenode = attr.ib()
514 basenode = attr.ib()
515 # 20 byte node of changeset revision this delta is associated with.
515 # 20 byte node of changeset revision this delta is associated with.
516 linknode = attr.ib()
516 linknode = attr.ib()
517 # 2 bytes of flags to apply to revision data.
517 # 2 bytes of flags to apply to revision data.
518 flags = attr.ib()
518 flags = attr.ib()
519 # Iterable of chunks holding raw delta data.
519 # Iterable of chunks holding raw delta data.
520 deltachunks = attr.ib()
520 deltachunks = attr.ib()
521
521
522 class cg1packer(object):
522 class cgpacker(object):
523 def __init__(self, repo, filematcher, version, allowreorder,
523 def __init__(self, repo, filematcher, version, allowreorder,
524 useprevdelta, builddeltaheader, manifestsend,
524 useprevdelta, builddeltaheader, manifestsend,
525 sendtreemanifests, bundlecaps=None):
525 sendtreemanifests, bundlecaps=None):
526 """Given a source repo, construct a bundler.
526 """Given a source repo, construct a bundler.
527
527
528 filematcher is a matcher that matches on files to include in the
528 filematcher is a matcher that matches on files to include in the
529 changegroup. Used to facilitate sparse changegroups.
529 changegroup. Used to facilitate sparse changegroups.
530
530
531 allowreorder controls whether reordering of revisions is allowed.
531 allowreorder controls whether reordering of revisions is allowed.
532 This value is used when ``bundle.reorder`` is ``auto`` or isn't
532 This value is used when ``bundle.reorder`` is ``auto`` or isn't
533 set.
533 set.
534
534
535 useprevdelta controls whether revisions should always delta against
535 useprevdelta controls whether revisions should always delta against
536 the previous revision in the changegroup.
536 the previous revision in the changegroup.
537
537
538 builddeltaheader is a callable that constructs the header for a group
538 builddeltaheader is a callable that constructs the header for a group
539 delta.
539 delta.
540
540
541 manifestsend is a chunk to send after manifests have been fully emitted.
541 manifestsend is a chunk to send after manifests have been fully emitted.
542
542
543 sendtreemanifests indicates whether tree manifests should be emitted.
543 sendtreemanifests indicates whether tree manifests should be emitted.
544
544
545 bundlecaps is optional and can be used to specify the set of
545 bundlecaps is optional and can be used to specify the set of
546 capabilities which can be used to build the bundle. While bundlecaps is
546 capabilities which can be used to build the bundle. While bundlecaps is
547 unused in core Mercurial, extensions rely on this feature to communicate
547 unused in core Mercurial, extensions rely on this feature to communicate
548 capabilities to customize the changegroup packer.
548 capabilities to customize the changegroup packer.
549 """
549 """
550 assert filematcher
550 assert filematcher
551 self._filematcher = filematcher
551 self._filematcher = filematcher
552
552
553 self.version = version
553 self.version = version
554 self._useprevdelta = useprevdelta
554 self._useprevdelta = useprevdelta
555 self._builddeltaheader = builddeltaheader
555 self._builddeltaheader = builddeltaheader
556 self._manifestsend = manifestsend
556 self._manifestsend = manifestsend
557 self._sendtreemanifests = sendtreemanifests
557 self._sendtreemanifests = sendtreemanifests
558
558
559 # Set of capabilities we can use to build the bundle.
559 # Set of capabilities we can use to build the bundle.
560 if bundlecaps is None:
560 if bundlecaps is None:
561 bundlecaps = set()
561 bundlecaps = set()
562 self._bundlecaps = bundlecaps
562 self._bundlecaps = bundlecaps
563
563
564 # experimental config: bundle.reorder
564 # experimental config: bundle.reorder
565 reorder = repo.ui.config('bundle', 'reorder')
565 reorder = repo.ui.config('bundle', 'reorder')
566 if reorder == 'auto':
566 if reorder == 'auto':
567 self._reorder = allowreorder
567 self._reorder = allowreorder
568 else:
568 else:
569 self._reorder = stringutil.parsebool(reorder)
569 self._reorder = stringutil.parsebool(reorder)
570
570
571 self._repo = repo
571 self._repo = repo
572
572
573 if self._repo.ui.verbose and not self._repo.ui.debugflag:
573 if self._repo.ui.verbose and not self._repo.ui.debugflag:
574 self._verbosenote = self._repo.ui.note
574 self._verbosenote = self._repo.ui.note
575 else:
575 else:
576 self._verbosenote = lambda s: None
576 self._verbosenote = lambda s: None
577
577
578 def close(self):
578 def close(self):
579 # Ellipses serving mode.
579 # Ellipses serving mode.
580 getattr(self, 'clrev_to_localrev', {}).clear()
580 getattr(self, 'clrev_to_localrev', {}).clear()
581 if getattr(self, 'next_clrev_to_localrev', {}):
581 if getattr(self, 'next_clrev_to_localrev', {}):
582 self.clrev_to_localrev = self.next_clrev_to_localrev
582 self.clrev_to_localrev = self.next_clrev_to_localrev
583 del self.next_clrev_to_localrev
583 del self.next_clrev_to_localrev
584 self.changelog_done = True
584 self.changelog_done = True
585
585
586 return closechunk()
586 return closechunk()
587
587
588 def fileheader(self, fname):
588 def fileheader(self, fname):
589 return chunkheader(len(fname)) + fname
589 return chunkheader(len(fname)) + fname
590
590
591 # Extracted both for clarity and for overriding in extensions.
591 # Extracted both for clarity and for overriding in extensions.
592 def _sortgroup(self, store, nodelist, lookup):
592 def _sortgroup(self, store, nodelist, lookup):
593 """Sort nodes for change group and turn them into revnums."""
593 """Sort nodes for change group and turn them into revnums."""
594 # Ellipses serving mode.
594 # Ellipses serving mode.
595 #
595 #
596 # In a perfect world, we'd generate better ellipsis-ified graphs
596 # In a perfect world, we'd generate better ellipsis-ified graphs
597 # for non-changelog revlogs. In practice, we haven't started doing
597 # for non-changelog revlogs. In practice, we haven't started doing
598 # that yet, so the resulting DAGs for the manifestlog and filelogs
598 # that yet, so the resulting DAGs for the manifestlog and filelogs
599 # are actually full of bogus parentage on all the ellipsis
599 # are actually full of bogus parentage on all the ellipsis
600 # nodes. This has the side effect that, while the contents are
600 # nodes. This has the side effect that, while the contents are
601 # correct, the individual DAGs might be completely out of whack in
601 # correct, the individual DAGs might be completely out of whack in
602 # a case like 882681bc3166 and its ancestors (back about 10
602 # a case like 882681bc3166 and its ancestors (back about 10
603 # revisions or so) in the main hg repo.
603 # revisions or so) in the main hg repo.
604 #
604 #
605 # The one invariant we *know* holds is that the new (potentially
605 # The one invariant we *know* holds is that the new (potentially
606 # bogus) DAG shape will be valid if we order the nodes in the
606 # bogus) DAG shape will be valid if we order the nodes in the
607 # order that they're introduced in dramatis personae by the
607 # order that they're introduced in dramatis personae by the
608 # changelog, so what we do is we sort the non-changelog histories
608 # changelog, so what we do is we sort the non-changelog histories
609 # by the order in which they are used by the changelog.
609 # by the order in which they are used by the changelog.
610 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
610 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
611 key = lambda n: self.clnode_to_rev[lookup(n)]
611 key = lambda n: self.clnode_to_rev[lookup(n)]
612 return [store.rev(n) for n in sorted(nodelist, key=key)]
612 return [store.rev(n) for n in sorted(nodelist, key=key)]
613
613
614 # for generaldelta revlogs, we linearize the revs; this will both be
614 # for generaldelta revlogs, we linearize the revs; this will both be
615 # much quicker and generate a much smaller bundle
615 # much quicker and generate a much smaller bundle
616 if (store._generaldelta and self._reorder is None) or self._reorder:
616 if (store._generaldelta and self._reorder is None) or self._reorder:
617 dag = dagutil.revlogdag(store)
617 dag = dagutil.revlogdag(store)
618 return dag.linearize(set(store.rev(n) for n in nodelist))
618 return dag.linearize(set(store.rev(n) for n in nodelist))
619 else:
619 else:
620 return sorted([store.rev(n) for n in nodelist])
620 return sorted([store.rev(n) for n in nodelist])
621
621
622 def group(self, nodelist, store, lookup, units=None):
622 def group(self, nodelist, store, lookup, units=None):
623 """Calculate a delta group, yielding a sequence of changegroup chunks
623 """Calculate a delta group, yielding a sequence of changegroup chunks
624 (strings).
624 (strings).
625
625
626 Given a list of changeset revs, return a set of deltas and
626 Given a list of changeset revs, return a set of deltas and
627 metadata corresponding to nodes. The first delta is
627 metadata corresponding to nodes. The first delta is
628 first parent(nodelist[0]) -> nodelist[0], the receiver is
628 first parent(nodelist[0]) -> nodelist[0], the receiver is
629 guaranteed to have this parent as it has all history before
629 guaranteed to have this parent as it has all history before
630 these changesets. In the case firstparent is nullrev the
630 these changesets. In the case firstparent is nullrev the
631 changegroup starts with a full revision.
631 changegroup starts with a full revision.
632
632
633 If units is not None, progress detail will be generated, units specifies
633 If units is not None, progress detail will be generated, units specifies
634 the type of revlog that is touched (changelog, manifest, etc.).
634 the type of revlog that is touched (changelog, manifest, etc.).
635 """
635 """
636 # if we don't have any revisions touched by these changesets, bail
636 # if we don't have any revisions touched by these changesets, bail
637 if len(nodelist) == 0:
637 if len(nodelist) == 0:
638 yield self.close()
638 yield self.close()
639 return
639 return
640
640
641 revs = self._sortgroup(store, nodelist, lookup)
641 revs = self._sortgroup(store, nodelist, lookup)
642
642
643 # add the parent of the first rev
643 # add the parent of the first rev
644 p = store.parentrevs(revs[0])[0]
644 p = store.parentrevs(revs[0])[0]
645 revs.insert(0, p)
645 revs.insert(0, p)
646
646
647 # build deltas
647 # build deltas
648 progress = None
648 progress = None
649 if units is not None:
649 if units is not None:
650 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
650 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
651 total=(len(revs) - 1))
651 total=(len(revs) - 1))
652 for r in pycompat.xrange(len(revs) - 1):
652 for r in pycompat.xrange(len(revs) - 1):
653 if progress:
653 if progress:
654 progress.update(r + 1)
654 progress.update(r + 1)
655 prev, curr = revs[r], revs[r + 1]
655 prev, curr = revs[r], revs[r + 1]
656 linknode = lookup(store.node(curr))
656 linknode = lookup(store.node(curr))
657 for c in self.revchunk(store, curr, prev, linknode):
657 for c in self.revchunk(store, curr, prev, linknode):
658 yield c
658 yield c
659
659
660 if progress:
660 if progress:
661 progress.complete()
661 progress.complete()
662 yield self.close()
662 yield self.close()
663
663
664 # filter any nodes that claim to be part of the known set
664 # filter any nodes that claim to be part of the known set
665 def prune(self, store, missing, commonrevs):
665 def prune(self, store, missing, commonrevs):
666 # TODO this violates storage abstraction for manifests.
666 # TODO this violates storage abstraction for manifests.
667 if isinstance(store, manifest.manifestrevlog):
667 if isinstance(store, manifest.manifestrevlog):
668 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
668 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
669 return []
669 return []
670
670
671 rr, rl = store.rev, store.linkrev
671 rr, rl = store.rev, store.linkrev
672 return [n for n in missing if rl(rr(n)) not in commonrevs]
672 return [n for n in missing if rl(rr(n)) not in commonrevs]
673
673
674 def _packmanifests(self, dir, mfnodes, lookuplinknode):
674 def _packmanifests(self, dir, mfnodes, lookuplinknode):
675 """Pack flat manifests into a changegroup stream."""
675 """Pack flat manifests into a changegroup stream."""
676 assert not dir
676 assert not dir
677 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
677 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
678 lookuplinknode, units=_('manifests')):
678 lookuplinknode, units=_('manifests')):
679 yield chunk
679 yield chunk
680
680
681 def _packtreemanifests(self, dir, mfnodes, lookuplinknode):
681 def _packtreemanifests(self, dir, mfnodes, lookuplinknode):
682 """Version of _packmanifests that operates on directory manifests.
682 """Version of _packmanifests that operates on directory manifests.
683
683
684 Encodes the directory name in the output so multiple manifests
684 Encodes the directory name in the output so multiple manifests
685 can be sent.
685 can be sent.
686 """
686 """
687 assert self.version == b'03'
687 assert self.version == b'03'
688
688
689 if dir:
689 if dir:
690 yield self.fileheader(dir)
690 yield self.fileheader(dir)
691
691
692 # TODO violates storage abstractions by assuming revlogs.
692 # TODO violates storage abstractions by assuming revlogs.
693 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
693 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
694 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
694 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
695 units=_('manifests')):
695 units=_('manifests')):
696 yield chunk
696 yield chunk
697
697
698 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
698 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
699 '''yield a sequence of changegroup chunks (strings)'''
699 '''yield a sequence of changegroup chunks (strings)'''
700 repo = self._repo
700 repo = self._repo
701 cl = repo.changelog
701 cl = repo.changelog
702
702
703 clrevorder = {}
703 clrevorder = {}
704 mfs = {} # needed manifests
704 mfs = {} # needed manifests
705 fnodes = {} # needed file nodes
705 fnodes = {} # needed file nodes
706 mfl = repo.manifestlog
706 mfl = repo.manifestlog
707 # TODO violates storage abstraction.
707 # TODO violates storage abstraction.
708 mfrevlog = mfl._revlog
708 mfrevlog = mfl._revlog
709 changedfiles = set()
709 changedfiles = set()
710
710
711 ellipsesmode = util.safehasattr(self, 'full_nodes')
711 ellipsesmode = util.safehasattr(self, 'full_nodes')
712
712
713 # Callback for the changelog, used to collect changed files and
713 # Callback for the changelog, used to collect changed files and
714 # manifest nodes.
714 # manifest nodes.
715 # Returns the linkrev node (identity in the changelog case).
715 # Returns the linkrev node (identity in the changelog case).
716 def lookupcl(x):
716 def lookupcl(x):
717 c = cl.read(x)
717 c = cl.read(x)
718 clrevorder[x] = len(clrevorder)
718 clrevorder[x] = len(clrevorder)
719
719
720 if ellipsesmode:
720 if ellipsesmode:
721 # Only update mfs if x is going to be sent. Otherwise we
721 # Only update mfs if x is going to be sent. Otherwise we
722 # end up with bogus linkrevs specified for manifests and
722 # end up with bogus linkrevs specified for manifests and
723 # we skip some manifest nodes that we should otherwise
723 # we skip some manifest nodes that we should otherwise
724 # have sent.
724 # have sent.
725 if (x in self.full_nodes
725 if (x in self.full_nodes
726 or cl.rev(x) in self.precomputed_ellipsis):
726 or cl.rev(x) in self.precomputed_ellipsis):
727 n = c[0]
727 n = c[0]
728 # Record the first changeset introducing this manifest
728 # Record the first changeset introducing this manifest
729 # version.
729 # version.
730 mfs.setdefault(n, x)
730 mfs.setdefault(n, x)
731 # Set this narrow-specific dict so we have the lowest
731 # Set this narrow-specific dict so we have the lowest
732 # manifest revnum to look up for this cl revnum. (Part of
732 # manifest revnum to look up for this cl revnum. (Part of
733 # mapping changelog ellipsis parents to manifest ellipsis
733 # mapping changelog ellipsis parents to manifest ellipsis
734 # parents)
734 # parents)
735 self.next_clrev_to_localrev.setdefault(cl.rev(x),
735 self.next_clrev_to_localrev.setdefault(cl.rev(x),
736 mfrevlog.rev(n))
736 mfrevlog.rev(n))
737 # We can't trust the changed files list in the changeset if the
737 # We can't trust the changed files list in the changeset if the
738 # client requested a shallow clone.
738 # client requested a shallow clone.
739 if self.is_shallow:
739 if self.is_shallow:
740 changedfiles.update(mfl[c[0]].read().keys())
740 changedfiles.update(mfl[c[0]].read().keys())
741 else:
741 else:
742 changedfiles.update(c[3])
742 changedfiles.update(c[3])
743 else:
743 else:
744
744
745 n = c[0]
745 n = c[0]
746 # record the first changeset introducing this manifest version
746 # record the first changeset introducing this manifest version
747 mfs.setdefault(n, x)
747 mfs.setdefault(n, x)
748 # Record a complete list of potentially-changed files in
748 # Record a complete list of potentially-changed files in
749 # this manifest.
749 # this manifest.
750 changedfiles.update(c[3])
750 changedfiles.update(c[3])
751
751
752 return x
752 return x
753
753
754 self._verbosenote(_('uncompressed size of bundle content:\n'))
754 self._verbosenote(_('uncompressed size of bundle content:\n'))
755 size = 0
755 size = 0
756 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
756 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
757 size += len(chunk)
757 size += len(chunk)
758 yield chunk
758 yield chunk
759 self._verbosenote(_('%8.i (changelog)\n') % size)
759 self._verbosenote(_('%8.i (changelog)\n') % size)
760
760
761 # We need to make sure that the linkrev in the changegroup refers to
761 # We need to make sure that the linkrev in the changegroup refers to
762 # the first changeset that introduced the manifest or file revision.
762 # the first changeset that introduced the manifest or file revision.
763 # The fastpath is usually safer than the slowpath, because the filelogs
763 # The fastpath is usually safer than the slowpath, because the filelogs
764 # are walked in revlog order.
764 # are walked in revlog order.
765 #
765 #
766 # When taking the slowpath with reorder=None and the manifest revlog
766 # When taking the slowpath with reorder=None and the manifest revlog
767 # uses generaldelta, the manifest may be walked in the "wrong" order.
767 # uses generaldelta, the manifest may be walked in the "wrong" order.
768 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
768 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
769 # cc0ff93d0c0c).
769 # cc0ff93d0c0c).
770 #
770 #
771 # When taking the fastpath, we are only vulnerable to reordering
771 # When taking the fastpath, we are only vulnerable to reordering
772 # of the changelog itself. The changelog never uses generaldelta, so
772 # of the changelog itself. The changelog never uses generaldelta, so
773 # it is only reordered when reorder=True. To handle this case, we
773 # it is only reordered when reorder=True. To handle this case, we
774 # simply take the slowpath, which already has the 'clrevorder' logic.
774 # simply take the slowpath, which already has the 'clrevorder' logic.
775 # This was also fixed in cc0ff93d0c0c.
775 # This was also fixed in cc0ff93d0c0c.
776 fastpathlinkrev = fastpathlinkrev and not self._reorder
776 fastpathlinkrev = fastpathlinkrev and not self._reorder
777 # Treemanifests don't work correctly with fastpathlinkrev
777 # Treemanifests don't work correctly with fastpathlinkrev
778 # either, because we don't discover which directory nodes to
778 # either, because we don't discover which directory nodes to
779 # send along with files. This could probably be fixed.
779 # send along with files. This could probably be fixed.
780 fastpathlinkrev = fastpathlinkrev and (
780 fastpathlinkrev = fastpathlinkrev and (
781 'treemanifest' not in repo.requirements)
781 'treemanifest' not in repo.requirements)
782
782
783 for chunk in self.generatemanifests(commonrevs, clrevorder,
783 for chunk in self.generatemanifests(commonrevs, clrevorder,
784 fastpathlinkrev, mfs, fnodes, source):
784 fastpathlinkrev, mfs, fnodes, source):
785 yield chunk
785 yield chunk
786
786
787 if ellipsesmode:
787 if ellipsesmode:
788 mfdicts = None
788 mfdicts = None
789 if self.is_shallow:
789 if self.is_shallow:
790 mfdicts = [(self._repo.manifestlog[n].read(), lr)
790 mfdicts = [(self._repo.manifestlog[n].read(), lr)
791 for (n, lr) in mfs.iteritems()]
791 for (n, lr) in mfs.iteritems()]
792
792
793 mfs.clear()
793 mfs.clear()
794 clrevs = set(cl.rev(x) for x in clnodes)
794 clrevs = set(cl.rev(x) for x in clnodes)
795
795
796 if not fastpathlinkrev:
796 if not fastpathlinkrev:
797 def linknodes(unused, fname):
797 def linknodes(unused, fname):
798 return fnodes.get(fname, {})
798 return fnodes.get(fname, {})
799 else:
799 else:
800 cln = cl.node
800 cln = cl.node
801 def linknodes(filerevlog, fname):
801 def linknodes(filerevlog, fname):
802 llr = filerevlog.linkrev
802 llr = filerevlog.linkrev
803 fln = filerevlog.node
803 fln = filerevlog.node
804 revs = ((r, llr(r)) for r in filerevlog)
804 revs = ((r, llr(r)) for r in filerevlog)
805 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
805 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
806
806
807 if ellipsesmode:
807 if ellipsesmode:
808 # We need to pass the mfdicts variable down into
808 # We need to pass the mfdicts variable down into
809 # generatefiles(), but more than one command might have
809 # generatefiles(), but more than one command might have
810 # wrapped generatefiles so we can't modify the function
810 # wrapped generatefiles so we can't modify the function
811 # signature. Instead, we pass the data to ourselves using an
811 # signature. Instead, we pass the data to ourselves using an
812 # instance attribute. I'm sorry.
812 # instance attribute. I'm sorry.
813 self._mfdicts = mfdicts
813 self._mfdicts = mfdicts
814
814
815 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
815 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
816 source):
816 source):
817 yield chunk
817 yield chunk
818
818
819 yield self.close()
819 yield self.close()
820
820
821 if clnodes:
821 if clnodes:
822 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
822 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
823
823
824 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
824 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
825 fnodes, source):
825 fnodes, source):
826 """Returns an iterator of changegroup chunks containing manifests.
826 """Returns an iterator of changegroup chunks containing manifests.
827
827
828 `source` is unused here, but is used by extensions like remotefilelog to
828 `source` is unused here, but is used by extensions like remotefilelog to
829 change what is sent based in pulls vs pushes, etc.
829 change what is sent based in pulls vs pushes, etc.
830 """
830 """
831 repo = self._repo
831 repo = self._repo
832 mfl = repo.manifestlog
832 mfl = repo.manifestlog
833 dirlog = mfl._revlog.dirlog
833 dirlog = mfl._revlog.dirlog
834 tmfnodes = {'': mfs}
834 tmfnodes = {'': mfs}
835
835
836 # Callback for the manifest, used to collect linkrevs for filelog
836 # Callback for the manifest, used to collect linkrevs for filelog
837 # revisions.
837 # revisions.
838 # Returns the linkrev node (collected in lookupcl).
838 # Returns the linkrev node (collected in lookupcl).
839 def makelookupmflinknode(dir, nodes):
839 def makelookupmflinknode(dir, nodes):
840 if fastpathlinkrev:
840 if fastpathlinkrev:
841 assert not dir
841 assert not dir
842 return mfs.__getitem__
842 return mfs.__getitem__
843
843
844 def lookupmflinknode(x):
844 def lookupmflinknode(x):
845 """Callback for looking up the linknode for manifests.
845 """Callback for looking up the linknode for manifests.
846
846
847 Returns the linkrev node for the specified manifest.
847 Returns the linkrev node for the specified manifest.
848
848
849 SIDE EFFECT:
849 SIDE EFFECT:
850
850
851 1) fclnodes gets populated with the list of relevant
851 1) fclnodes gets populated with the list of relevant
852 file nodes if we're not using fastpathlinkrev
852 file nodes if we're not using fastpathlinkrev
853 2) When treemanifests are in use, collects treemanifest nodes
853 2) When treemanifests are in use, collects treemanifest nodes
854 to send
854 to send
855
855
856 Note that this means manifests must be completely sent to
856 Note that this means manifests must be completely sent to
857 the client before you can trust the list of files and
857 the client before you can trust the list of files and
858 treemanifests to send.
858 treemanifests to send.
859 """
859 """
860 clnode = nodes[x]
860 clnode = nodes[x]
861 mdata = mfl.get(dir, x).readfast(shallow=True)
861 mdata = mfl.get(dir, x).readfast(shallow=True)
862 for p, n, fl in mdata.iterentries():
862 for p, n, fl in mdata.iterentries():
863 if fl == 't': # subdirectory manifest
863 if fl == 't': # subdirectory manifest
864 subdir = dir + p + '/'
864 subdir = dir + p + '/'
865 tmfclnodes = tmfnodes.setdefault(subdir, {})
865 tmfclnodes = tmfnodes.setdefault(subdir, {})
866 tmfclnode = tmfclnodes.setdefault(n, clnode)
866 tmfclnode = tmfclnodes.setdefault(n, clnode)
867 if clrevorder[clnode] < clrevorder[tmfclnode]:
867 if clrevorder[clnode] < clrevorder[tmfclnode]:
868 tmfclnodes[n] = clnode
868 tmfclnodes[n] = clnode
869 else:
869 else:
870 f = dir + p
870 f = dir + p
871 fclnodes = fnodes.setdefault(f, {})
871 fclnodes = fnodes.setdefault(f, {})
872 fclnode = fclnodes.setdefault(n, clnode)
872 fclnode = fclnodes.setdefault(n, clnode)
873 if clrevorder[clnode] < clrevorder[fclnode]:
873 if clrevorder[clnode] < clrevorder[fclnode]:
874 fclnodes[n] = clnode
874 fclnodes[n] = clnode
875 return clnode
875 return clnode
876 return lookupmflinknode
876 return lookupmflinknode
877
877
878 fn = (self._packtreemanifests if self._sendtreemanifests
878 fn = (self._packtreemanifests if self._sendtreemanifests
879 else self._packmanifests)
879 else self._packmanifests)
880 size = 0
880 size = 0
881 while tmfnodes:
881 while tmfnodes:
882 dir, nodes = tmfnodes.popitem()
882 dir, nodes = tmfnodes.popitem()
883 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
883 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
884 if not dir or prunednodes:
884 if not dir or prunednodes:
885 for x in fn(dir, prunednodes, makelookupmflinknode(dir, nodes)):
885 for x in fn(dir, prunednodes, makelookupmflinknode(dir, nodes)):
886 size += len(x)
886 size += len(x)
887 yield x
887 yield x
888 self._verbosenote(_('%8.i (manifests)\n') % size)
888 self._verbosenote(_('%8.i (manifests)\n') % size)
889 yield self._manifestsend
889 yield self._manifestsend
890
890
891 # The 'source' parameter is useful for extensions
891 # The 'source' parameter is useful for extensions
892 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
892 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
893 changedfiles = list(filter(self._filematcher, changedfiles))
893 changedfiles = list(filter(self._filematcher, changedfiles))
894
894
895 if getattr(self, 'is_shallow', False):
895 if getattr(self, 'is_shallow', False):
896 # See comment in generate() for why this sadness is a thing.
896 # See comment in generate() for why this sadness is a thing.
897 mfdicts = self._mfdicts
897 mfdicts = self._mfdicts
898 del self._mfdicts
898 del self._mfdicts
899 # In a shallow clone, the linknodes callback needs to also include
899 # In a shallow clone, the linknodes callback needs to also include
900 # those file nodes that are in the manifests we sent but weren't
900 # those file nodes that are in the manifests we sent but weren't
901 # introduced by those manifests.
901 # introduced by those manifests.
902 commonctxs = [self._repo[c] for c in commonrevs]
902 commonctxs = [self._repo[c] for c in commonrevs]
903 oldlinknodes = linknodes
903 oldlinknodes = linknodes
904 clrev = self._repo.changelog.rev
904 clrev = self._repo.changelog.rev
905
905
906 # Defining this function has a side-effect of overriding the
906 # Defining this function has a side-effect of overriding the
907 # function of the same name that was passed in as an argument.
907 # function of the same name that was passed in as an argument.
908 # TODO have caller pass in appropriate function.
908 # TODO have caller pass in appropriate function.
909 def linknodes(flog, fname):
909 def linknodes(flog, fname):
910 for c in commonctxs:
910 for c in commonctxs:
911 try:
911 try:
912 fnode = c.filenode(fname)
912 fnode = c.filenode(fname)
913 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
913 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
914 except error.ManifestLookupError:
914 except error.ManifestLookupError:
915 pass
915 pass
916 links = oldlinknodes(flog, fname)
916 links = oldlinknodes(flog, fname)
917 if len(links) != len(mfdicts):
917 if len(links) != len(mfdicts):
918 for mf, lr in mfdicts:
918 for mf, lr in mfdicts:
919 fnode = mf.get(fname, None)
919 fnode = mf.get(fname, None)
920 if fnode in links:
920 if fnode in links:
921 links[fnode] = min(links[fnode], lr, key=clrev)
921 links[fnode] = min(links[fnode], lr, key=clrev)
922 elif fnode:
922 elif fnode:
923 links[fnode] = lr
923 links[fnode] = lr
924 return links
924 return links
925
925
926 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
926 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
927
927
928 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
928 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
929 repo = self._repo
929 repo = self._repo
930 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
930 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
931 total=len(changedfiles))
931 total=len(changedfiles))
932 for i, fname in enumerate(sorted(changedfiles)):
932 for i, fname in enumerate(sorted(changedfiles)):
933 filerevlog = repo.file(fname)
933 filerevlog = repo.file(fname)
934 if not filerevlog:
934 if not filerevlog:
935 raise error.Abort(_("empty or missing file data for %s") %
935 raise error.Abort(_("empty or missing file data for %s") %
936 fname)
936 fname)
937
937
938 linkrevnodes = linknodes(filerevlog, fname)
938 linkrevnodes = linknodes(filerevlog, fname)
939 # Lookup for filenodes, we collected the linkrev nodes above in the
939 # Lookup for filenodes, we collected the linkrev nodes above in the
940 # fastpath case and with lookupmf in the slowpath case.
940 # fastpath case and with lookupmf in the slowpath case.
941 def lookupfilelog(x):
941 def lookupfilelog(x):
942 return linkrevnodes[x]
942 return linkrevnodes[x]
943
943
944 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
944 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
945 if filenodes:
945 if filenodes:
946 progress.update(i + 1, item=fname)
946 progress.update(i + 1, item=fname)
947 h = self.fileheader(fname)
947 h = self.fileheader(fname)
948 size = len(h)
948 size = len(h)
949 yield h
949 yield h
950 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
950 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
951 size += len(chunk)
951 size += len(chunk)
952 yield chunk
952 yield chunk
953 self._verbosenote(_('%8.i %s\n') % (size, fname))
953 self._verbosenote(_('%8.i %s\n') % (size, fname))
954 progress.complete()
954 progress.complete()
955
955
956 def deltaparent(self, store, rev, p1, p2, prev):
956 def deltaparent(self, store, rev, p1, p2, prev):
957 if self._useprevdelta:
957 if self._useprevdelta:
958 if not store.candelta(prev, rev):
958 if not store.candelta(prev, rev):
959 raise error.ProgrammingError(
959 raise error.ProgrammingError(
960 'cg1 should not be used in this case')
960 'cg1 should not be used in this case')
961 return prev
961 return prev
962
962
963 # Narrow ellipses mode.
963 # Narrow ellipses mode.
964 if util.safehasattr(self, 'full_nodes'):
964 if util.safehasattr(self, 'full_nodes'):
965 # TODO: send better deltas when in narrow mode.
965 # TODO: send better deltas when in narrow mode.
966 #
966 #
967 # changegroup.group() loops over revisions to send,
967 # changegroup.group() loops over revisions to send,
968 # including revisions we'll skip. What this means is that
968 # including revisions we'll skip. What this means is that
969 # `prev` will be a potentially useless delta base for all
969 # `prev` will be a potentially useless delta base for all
970 # ellipsis nodes, as the client likely won't have it. In
970 # ellipsis nodes, as the client likely won't have it. In
971 # the future we should do bookkeeping about which nodes
971 # the future we should do bookkeeping about which nodes
972 # have been sent to the client, and try to be
972 # have been sent to the client, and try to be
973 # significantly smarter about delta bases. This is
973 # significantly smarter about delta bases. This is
974 # slightly tricky because this same code has to work for
974 # slightly tricky because this same code has to work for
975 # all revlogs, and we don't have the linkrev/linknode here.
975 # all revlogs, and we don't have the linkrev/linknode here.
976 return p1
976 return p1
977
977
978 dp = store.deltaparent(rev)
978 dp = store.deltaparent(rev)
979 if dp == nullrev and store.storedeltachains:
979 if dp == nullrev and store.storedeltachains:
980 # Avoid sending full revisions when delta parent is null. Pick prev
980 # Avoid sending full revisions when delta parent is null. Pick prev
981 # in that case. It's tempting to pick p1 in this case, as p1 will
981 # in that case. It's tempting to pick p1 in this case, as p1 will
982 # be smaller in the common case. However, computing a delta against
982 # be smaller in the common case. However, computing a delta against
983 # p1 may require resolving the raw text of p1, which could be
983 # p1 may require resolving the raw text of p1, which could be
984 # expensive. The revlog caches should have prev cached, meaning
984 # expensive. The revlog caches should have prev cached, meaning
985 # less CPU for changegroup generation. There is likely room to add
985 # less CPU for changegroup generation. There is likely room to add
986 # a flag and/or config option to control this behavior.
986 # a flag and/or config option to control this behavior.
987 base = prev
987 base = prev
988 elif dp == nullrev:
988 elif dp == nullrev:
989 # revlog is configured to use full snapshot for a reason,
989 # revlog is configured to use full snapshot for a reason,
990 # stick to full snapshot.
990 # stick to full snapshot.
991 base = nullrev
991 base = nullrev
992 elif dp not in (p1, p2, prev):
992 elif dp not in (p1, p2, prev):
993 # Pick prev when we can't be sure remote has the base revision.
993 # Pick prev when we can't be sure remote has the base revision.
994 return prev
994 return prev
995 else:
995 else:
996 base = dp
996 base = dp
997
997
998 if base != nullrev and not store.candelta(base, rev):
998 if base != nullrev and not store.candelta(base, rev):
999 base = nullrev
999 base = nullrev
1000
1000
1001 return base
1001 return base
1002
1002
1003 def revchunk(self, store, rev, prev, linknode):
1003 def revchunk(self, store, rev, prev, linknode):
1004 if util.safehasattr(self, 'full_nodes'):
1004 if util.safehasattr(self, 'full_nodes'):
1005 fn = self._revisiondeltanarrow
1005 fn = self._revisiondeltanarrow
1006 else:
1006 else:
1007 fn = self._revisiondeltanormal
1007 fn = self._revisiondeltanormal
1008
1008
1009 delta = fn(store, rev, prev, linknode)
1009 delta = fn(store, rev, prev, linknode)
1010 if not delta:
1010 if not delta:
1011 return
1011 return
1012
1012
1013 meta = self._builddeltaheader(delta)
1013 meta = self._builddeltaheader(delta)
1014 l = len(meta) + sum(len(x) for x in delta.deltachunks)
1014 l = len(meta) + sum(len(x) for x in delta.deltachunks)
1015
1015
1016 yield chunkheader(l)
1016 yield chunkheader(l)
1017 yield meta
1017 yield meta
1018 for x in delta.deltachunks:
1018 for x in delta.deltachunks:
1019 yield x
1019 yield x
1020
1020
1021 def _revisiondeltanormal(self, store, rev, prev, linknode):
1021 def _revisiondeltanormal(self, store, rev, prev, linknode):
1022 node = store.node(rev)
1022 node = store.node(rev)
1023 p1, p2 = store.parentrevs(rev)
1023 p1, p2 = store.parentrevs(rev)
1024 base = self.deltaparent(store, rev, p1, p2, prev)
1024 base = self.deltaparent(store, rev, p1, p2, prev)
1025
1025
1026 prefix = ''
1026 prefix = ''
1027 if store.iscensored(base) or store.iscensored(rev):
1027 if store.iscensored(base) or store.iscensored(rev):
1028 try:
1028 try:
1029 delta = store.revision(node, raw=True)
1029 delta = store.revision(node, raw=True)
1030 except error.CensoredNodeError as e:
1030 except error.CensoredNodeError as e:
1031 delta = e.tombstone
1031 delta = e.tombstone
1032 if base == nullrev:
1032 if base == nullrev:
1033 prefix = mdiff.trivialdiffheader(len(delta))
1033 prefix = mdiff.trivialdiffheader(len(delta))
1034 else:
1034 else:
1035 baselen = store.rawsize(base)
1035 baselen = store.rawsize(base)
1036 prefix = mdiff.replacediffheader(baselen, len(delta))
1036 prefix = mdiff.replacediffheader(baselen, len(delta))
1037 elif base == nullrev:
1037 elif base == nullrev:
1038 delta = store.revision(node, raw=True)
1038 delta = store.revision(node, raw=True)
1039 prefix = mdiff.trivialdiffheader(len(delta))
1039 prefix = mdiff.trivialdiffheader(len(delta))
1040 else:
1040 else:
1041 delta = store.revdiff(base, rev)
1041 delta = store.revdiff(base, rev)
1042 p1n, p2n = store.parents(node)
1042 p1n, p2n = store.parents(node)
1043
1043
1044 return revisiondelta(
1044 return revisiondelta(
1045 node=node,
1045 node=node,
1046 p1node=p1n,
1046 p1node=p1n,
1047 p2node=p2n,
1047 p2node=p2n,
1048 basenode=store.node(base),
1048 basenode=store.node(base),
1049 linknode=linknode,
1049 linknode=linknode,
1050 flags=store.flags(rev),
1050 flags=store.flags(rev),
1051 deltachunks=(prefix, delta),
1051 deltachunks=(prefix, delta),
1052 )
1052 )
1053
1053
1054 def _revisiondeltanarrow(self, store, rev, prev, linknode):
1054 def _revisiondeltanarrow(self, store, rev, prev, linknode):
1055 # build up some mapping information that's useful later. See
1055 # build up some mapping information that's useful later. See
1056 # the local() nested function below.
1056 # the local() nested function below.
1057 if not self.changelog_done:
1057 if not self.changelog_done:
1058 self.clnode_to_rev[linknode] = rev
1058 self.clnode_to_rev[linknode] = rev
1059 linkrev = rev
1059 linkrev = rev
1060 self.clrev_to_localrev[linkrev] = rev
1060 self.clrev_to_localrev[linkrev] = rev
1061 else:
1061 else:
1062 linkrev = self.clnode_to_rev[linknode]
1062 linkrev = self.clnode_to_rev[linknode]
1063 self.clrev_to_localrev[linkrev] = rev
1063 self.clrev_to_localrev[linkrev] = rev
1064
1064
1065 # This is a node to send in full, because the changeset it
1065 # This is a node to send in full, because the changeset it
1066 # corresponds to was a full changeset.
1066 # corresponds to was a full changeset.
1067 if linknode in self.full_nodes:
1067 if linknode in self.full_nodes:
1068 return self._revisiondeltanormal(store, rev, prev, linknode)
1068 return self._revisiondeltanormal(store, rev, prev, linknode)
1069
1069
1070 # At this point, a node can either be one we should skip or an
1070 # At this point, a node can either be one we should skip or an
1071 # ellipsis. If it's not an ellipsis, bail immediately.
1071 # ellipsis. If it's not an ellipsis, bail immediately.
1072 if linkrev not in self.precomputed_ellipsis:
1072 if linkrev not in self.precomputed_ellipsis:
1073 return
1073 return
1074
1074
1075 linkparents = self.precomputed_ellipsis[linkrev]
1075 linkparents = self.precomputed_ellipsis[linkrev]
1076 def local(clrev):
1076 def local(clrev):
1077 """Turn a changelog revnum into a local revnum.
1077 """Turn a changelog revnum into a local revnum.
1078
1078
1079 The ellipsis dag is stored as revnums on the changelog,
1079 The ellipsis dag is stored as revnums on the changelog,
1080 but when we're producing ellipsis entries for
1080 but when we're producing ellipsis entries for
1081 non-changelog revlogs, we need to turn those numbers into
1081 non-changelog revlogs, we need to turn those numbers into
1082 something local. This does that for us, and during the
1082 something local. This does that for us, and during the
1083 changelog sending phase will also expand the stored
1083 changelog sending phase will also expand the stored
1084 mappings as needed.
1084 mappings as needed.
1085 """
1085 """
1086 if clrev == nullrev:
1086 if clrev == nullrev:
1087 return nullrev
1087 return nullrev
1088
1088
1089 if not self.changelog_done:
1089 if not self.changelog_done:
1090 # If we're doing the changelog, it's possible that we
1090 # If we're doing the changelog, it's possible that we
1091 # have a parent that is already on the client, and we
1091 # have a parent that is already on the client, and we
1092 # need to store some extra mapping information so that
1092 # need to store some extra mapping information so that
1093 # our contained ellipsis nodes will be able to resolve
1093 # our contained ellipsis nodes will be able to resolve
1094 # their parents.
1094 # their parents.
1095 if clrev not in self.clrev_to_localrev:
1095 if clrev not in self.clrev_to_localrev:
1096 clnode = store.node(clrev)
1096 clnode = store.node(clrev)
1097 self.clnode_to_rev[clnode] = clrev
1097 self.clnode_to_rev[clnode] = clrev
1098 return clrev
1098 return clrev
1099
1099
1100 # Walk the ellipsis-ized changelog breadth-first looking for a
1100 # Walk the ellipsis-ized changelog breadth-first looking for a
1101 # change that has been linked from the current revlog.
1101 # change that has been linked from the current revlog.
1102 #
1102 #
1103 # For a flat manifest revlog only a single step should be necessary
1103 # For a flat manifest revlog only a single step should be necessary
1104 # as all relevant changelog entries are relevant to the flat
1104 # as all relevant changelog entries are relevant to the flat
1105 # manifest.
1105 # manifest.
1106 #
1106 #
1107 # For a filelog or tree manifest dirlog however not every changelog
1107 # For a filelog or tree manifest dirlog however not every changelog
1108 # entry will have been relevant, so we need to skip some changelog
1108 # entry will have been relevant, so we need to skip some changelog
1109 # nodes even after ellipsis-izing.
1109 # nodes even after ellipsis-izing.
1110 walk = [clrev]
1110 walk = [clrev]
1111 while walk:
1111 while walk:
1112 p = walk[0]
1112 p = walk[0]
1113 walk = walk[1:]
1113 walk = walk[1:]
1114 if p in self.clrev_to_localrev:
1114 if p in self.clrev_to_localrev:
1115 return self.clrev_to_localrev[p]
1115 return self.clrev_to_localrev[p]
1116 elif p in self.full_nodes:
1116 elif p in self.full_nodes:
1117 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1117 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1118 if pp != nullrev])
1118 if pp != nullrev])
1119 elif p in self.precomputed_ellipsis:
1119 elif p in self.precomputed_ellipsis:
1120 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1120 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1121 if pp != nullrev])
1121 if pp != nullrev])
1122 else:
1122 else:
1123 # In this case, we've got an ellipsis with parents
1123 # In this case, we've got an ellipsis with parents
1124 # outside the current bundle (likely an
1124 # outside the current bundle (likely an
1125 # incremental pull). We "know" that we can use the
1125 # incremental pull). We "know" that we can use the
1126 # value of this same revlog at whatever revision
1126 # value of this same revlog at whatever revision
1127 # is pointed to by linknode. "Know" is in scare
1127 # is pointed to by linknode. "Know" is in scare
1128 # quotes because I haven't done enough examination
1128 # quotes because I haven't done enough examination
1129 # of edge cases to convince myself this is really
1129 # of edge cases to convince myself this is really
1130 # a fact - it works for all the (admittedly
1130 # a fact - it works for all the (admittedly
1131 # thorough) cases in our testsuite, but I would be
1131 # thorough) cases in our testsuite, but I would be
1132 # somewhat unsurprised to find a case in the wild
1132 # somewhat unsurprised to find a case in the wild
1133 # where this breaks down a bit. That said, I don't
1133 # where this breaks down a bit. That said, I don't
1134 # know if it would hurt anything.
1134 # know if it would hurt anything.
1135 for i in pycompat.xrange(rev, 0, -1):
1135 for i in pycompat.xrange(rev, 0, -1):
1136 if store.linkrev(i) == clrev:
1136 if store.linkrev(i) == clrev:
1137 return i
1137 return i
1138 # We failed to resolve a parent for this node, so
1138 # We failed to resolve a parent for this node, so
1139 # we crash the changegroup construction.
1139 # we crash the changegroup construction.
1140 raise error.Abort(
1140 raise error.Abort(
1141 'unable to resolve parent while packing %r %r'
1141 'unable to resolve parent while packing %r %r'
1142 ' for changeset %r' % (store.indexfile, rev, clrev))
1142 ' for changeset %r' % (store.indexfile, rev, clrev))
1143
1143
1144 return nullrev
1144 return nullrev
1145
1145
1146 if not linkparents or (
1146 if not linkparents or (
1147 store.parentrevs(rev) == (nullrev, nullrev)):
1147 store.parentrevs(rev) == (nullrev, nullrev)):
1148 p1, p2 = nullrev, nullrev
1148 p1, p2 = nullrev, nullrev
1149 elif len(linkparents) == 1:
1149 elif len(linkparents) == 1:
1150 p1, = sorted(local(p) for p in linkparents)
1150 p1, = sorted(local(p) for p in linkparents)
1151 p2 = nullrev
1151 p2 = nullrev
1152 else:
1152 else:
1153 p1, p2 = sorted(local(p) for p in linkparents)
1153 p1, p2 = sorted(local(p) for p in linkparents)
1154
1154
1155 n = store.node(rev)
1155 n = store.node(rev)
1156 p1n, p2n = store.node(p1), store.node(p2)
1156 p1n, p2n = store.node(p1), store.node(p2)
1157 flags = store.flags(rev)
1157 flags = store.flags(rev)
1158 flags |= revlog.REVIDX_ELLIPSIS
1158 flags |= revlog.REVIDX_ELLIPSIS
1159
1159
1160 # TODO: try and actually send deltas for ellipsis data blocks
1160 # TODO: try and actually send deltas for ellipsis data blocks
1161 data = store.revision(n)
1161 data = store.revision(n)
1162 diffheader = mdiff.trivialdiffheader(len(data))
1162 diffheader = mdiff.trivialdiffheader(len(data))
1163
1163
1164 return revisiondelta(
1164 return revisiondelta(
1165 node=n,
1165 node=n,
1166 p1node=p1n,
1166 p1node=p1n,
1167 p2node=p2n,
1167 p2node=p2n,
1168 basenode=nullid,
1168 basenode=nullid,
1169 linknode=linknode,
1169 linknode=linknode,
1170 flags=flags,
1170 flags=flags,
1171 deltachunks=(diffheader, data),
1171 deltachunks=(diffheader, data),
1172 )
1172 )
1173
1173
1174 def _makecg1packer(repo, filematcher, bundlecaps):
1174 def _makecg1packer(repo, filematcher, bundlecaps):
1175 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1175 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1176 d.node, d.p1node, d.p2node, d.linknode)
1176 d.node, d.p1node, d.p2node, d.linknode)
1177
1177
1178 return cg1packer(repo, filematcher, b'01',
1178 return cgpacker(repo, filematcher, b'01',
1179 useprevdelta=True,
1179 useprevdelta=True,
1180 allowreorder=None,
1180 allowreorder=None,
1181 builddeltaheader=builddeltaheader,
1181 builddeltaheader=builddeltaheader,
1182 manifestsend=b'', sendtreemanifests=False,
1182 manifestsend=b'',
1183 bundlecaps=bundlecaps)
1183 sendtreemanifests=False,
1184 bundlecaps=bundlecaps)
1184
1185
1185 def _makecg2packer(repo, filematcher, bundlecaps):
1186 def _makecg2packer(repo, filematcher, bundlecaps):
1186 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1187 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1187 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1188 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1188
1189
1189 # Since generaldelta is directly supported by cg2, reordering
1190 # Since generaldelta is directly supported by cg2, reordering
1190 # generally doesn't help, so we disable it by default (treating
1191 # generally doesn't help, so we disable it by default (treating
1191 # bundle.reorder=auto just like bundle.reorder=False).
1192 # bundle.reorder=auto just like bundle.reorder=False).
1192 return cg1packer(repo, filematcher, b'02',
1193 return cgpacker(repo, filematcher, b'02',
1193 useprevdelta=False,
1194 useprevdelta=False,
1194 allowreorder=False,
1195 allowreorder=False,
1195 builddeltaheader=builddeltaheader,
1196 builddeltaheader=builddeltaheader,
1196 manifestsend=b'', sendtreemanifests=False,
1197 manifestsend=b'',
1197 bundlecaps=bundlecaps)
1198 sendtreemanifests=False,
1199 bundlecaps=bundlecaps)
1198
1200
1199 def _makecg3packer(repo, filematcher, bundlecaps):
1201 def _makecg3packer(repo, filematcher, bundlecaps):
1200 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1202 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1201 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1203 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1202
1204
1203 return cg1packer(repo, filematcher, b'03',
1205 return cgpacker(repo, filematcher, b'03',
1204 useprevdelta=False,
1206 useprevdelta=False,
1205 allowreorder=False,
1207 allowreorder=False,
1206 builddeltaheader=builddeltaheader,
1208 builddeltaheader=builddeltaheader,
1207 manifestsend=closechunk(), sendtreemanifests=True,
1209 manifestsend=closechunk(),
1208 bundlecaps=bundlecaps)
1210 sendtreemanifests=True,
1211 bundlecaps=bundlecaps)
1209
1212
1210 _packermap = {'01': (_makecg1packer, cg1unpacker),
1213 _packermap = {'01': (_makecg1packer, cg1unpacker),
1211 # cg2 adds support for exchanging generaldelta
1214 # cg2 adds support for exchanging generaldelta
1212 '02': (_makecg2packer, cg2unpacker),
1215 '02': (_makecg2packer, cg2unpacker),
1213 # cg3 adds support for exchanging revlog flags and treemanifests
1216 # cg3 adds support for exchanging revlog flags and treemanifests
1214 '03': (_makecg3packer, cg3unpacker),
1217 '03': (_makecg3packer, cg3unpacker),
1215 }
1218 }
1216
1219
1217 def allsupportedversions(repo):
1220 def allsupportedversions(repo):
1218 versions = set(_packermap.keys())
1221 versions = set(_packermap.keys())
1219 if not (repo.ui.configbool('experimental', 'changegroup3') or
1222 if not (repo.ui.configbool('experimental', 'changegroup3') or
1220 repo.ui.configbool('experimental', 'treemanifest') or
1223 repo.ui.configbool('experimental', 'treemanifest') or
1221 'treemanifest' in repo.requirements):
1224 'treemanifest' in repo.requirements):
1222 versions.discard('03')
1225 versions.discard('03')
1223 return versions
1226 return versions
1224
1227
1225 # Changegroup versions that can be applied to the repo
1228 # Changegroup versions that can be applied to the repo
1226 def supportedincomingversions(repo):
1229 def supportedincomingversions(repo):
1227 return allsupportedversions(repo)
1230 return allsupportedversions(repo)
1228
1231
1229 # Changegroup versions that can be created from the repo
1232 # Changegroup versions that can be created from the repo
1230 def supportedoutgoingversions(repo):
1233 def supportedoutgoingversions(repo):
1231 versions = allsupportedversions(repo)
1234 versions = allsupportedversions(repo)
1232 if 'treemanifest' in repo.requirements:
1235 if 'treemanifest' in repo.requirements:
1233 # Versions 01 and 02 support only flat manifests and it's just too
1236 # Versions 01 and 02 support only flat manifests and it's just too
1234 # expensive to convert between the flat manifest and tree manifest on
1237 # expensive to convert between the flat manifest and tree manifest on
1235 # the fly. Since tree manifests are hashed differently, all of history
1238 # the fly. Since tree manifests are hashed differently, all of history
1236 # would have to be converted. Instead, we simply don't even pretend to
1239 # would have to be converted. Instead, we simply don't even pretend to
1237 # support versions 01 and 02.
1240 # support versions 01 and 02.
1238 versions.discard('01')
1241 versions.discard('01')
1239 versions.discard('02')
1242 versions.discard('02')
1240 if repository.NARROW_REQUIREMENT in repo.requirements:
1243 if repository.NARROW_REQUIREMENT in repo.requirements:
1241 # Versions 01 and 02 don't support revlog flags, and we need to
1244 # Versions 01 and 02 don't support revlog flags, and we need to
1242 # support that for stripping and unbundling to work.
1245 # support that for stripping and unbundling to work.
1243 versions.discard('01')
1246 versions.discard('01')
1244 versions.discard('02')
1247 versions.discard('02')
1245 if LFS_REQUIREMENT in repo.requirements:
1248 if LFS_REQUIREMENT in repo.requirements:
1246 # Versions 01 and 02 don't support revlog flags, and we need to
1249 # Versions 01 and 02 don't support revlog flags, and we need to
1247 # mark LFS entries with REVIDX_EXTSTORED.
1250 # mark LFS entries with REVIDX_EXTSTORED.
1248 versions.discard('01')
1251 versions.discard('01')
1249 versions.discard('02')
1252 versions.discard('02')
1250
1253
1251 return versions
1254 return versions
1252
1255
1253 def localversion(repo):
1256 def localversion(repo):
1254 # Finds the best version to use for bundles that are meant to be used
1257 # Finds the best version to use for bundles that are meant to be used
1255 # locally, such as those from strip and shelve, and temporary bundles.
1258 # locally, such as those from strip and shelve, and temporary bundles.
1256 return max(supportedoutgoingversions(repo))
1259 return max(supportedoutgoingversions(repo))
1257
1260
1258 def safeversion(repo):
1261 def safeversion(repo):
1259 # Finds the smallest version that it's safe to assume clients of the repo
1262 # Finds the smallest version that it's safe to assume clients of the repo
1260 # will support. For example, all hg versions that support generaldelta also
1263 # will support. For example, all hg versions that support generaldelta also
1261 # support changegroup 02.
1264 # support changegroup 02.
1262 versions = supportedoutgoingversions(repo)
1265 versions = supportedoutgoingversions(repo)
1263 if 'generaldelta' in repo.requirements:
1266 if 'generaldelta' in repo.requirements:
1264 versions.discard('01')
1267 versions.discard('01')
1265 assert versions
1268 assert versions
1266 return min(versions)
1269 return min(versions)
1267
1270
1268 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1271 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1269 assert version in supportedoutgoingversions(repo)
1272 assert version in supportedoutgoingversions(repo)
1270
1273
1271 if filematcher is None:
1274 if filematcher is None:
1272 filematcher = matchmod.alwaysmatcher(repo.root, '')
1275 filematcher = matchmod.alwaysmatcher(repo.root, '')
1273
1276
1274 if version == '01' and not filematcher.always():
1277 if version == '01' and not filematcher.always():
1275 raise error.ProgrammingError('version 01 changegroups do not support '
1278 raise error.ProgrammingError('version 01 changegroups do not support '
1276 'sparse file matchers')
1279 'sparse file matchers')
1277
1280
1278 # Requested files could include files not in the local store. So
1281 # Requested files could include files not in the local store. So
1279 # filter those out.
1282 # filter those out.
1280 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1283 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1281 filematcher)
1284 filematcher)
1282
1285
1283 fn = _packermap[version][0]
1286 fn = _packermap[version][0]
1284 return fn(repo, filematcher, bundlecaps)
1287 return fn(repo, filematcher, bundlecaps)
1285
1288
1286 def getunbundler(version, fh, alg, extras=None):
1289 def getunbundler(version, fh, alg, extras=None):
1287 return _packermap[version][1](fh, alg, extras=extras)
1290 return _packermap[version][1](fh, alg, extras=extras)
1288
1291
1289 def _changegroupinfo(repo, nodes, source):
1292 def _changegroupinfo(repo, nodes, source):
1290 if repo.ui.verbose or source == 'bundle':
1293 if repo.ui.verbose or source == 'bundle':
1291 repo.ui.status(_("%d changesets found\n") % len(nodes))
1294 repo.ui.status(_("%d changesets found\n") % len(nodes))
1292 if repo.ui.debugflag:
1295 if repo.ui.debugflag:
1293 repo.ui.debug("list of changesets:\n")
1296 repo.ui.debug("list of changesets:\n")
1294 for node in nodes:
1297 for node in nodes:
1295 repo.ui.debug("%s\n" % hex(node))
1298 repo.ui.debug("%s\n" % hex(node))
1296
1299
1297 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1300 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1298 bundlecaps=None):
1301 bundlecaps=None):
1299 cgstream = makestream(repo, outgoing, version, source,
1302 cgstream = makestream(repo, outgoing, version, source,
1300 fastpath=fastpath, bundlecaps=bundlecaps)
1303 fastpath=fastpath, bundlecaps=bundlecaps)
1301 return getunbundler(version, util.chunkbuffer(cgstream), None,
1304 return getunbundler(version, util.chunkbuffer(cgstream), None,
1302 {'clcount': len(outgoing.missing) })
1305 {'clcount': len(outgoing.missing) })
1303
1306
1304 def makestream(repo, outgoing, version, source, fastpath=False,
1307 def makestream(repo, outgoing, version, source, fastpath=False,
1305 bundlecaps=None, filematcher=None):
1308 bundlecaps=None, filematcher=None):
1306 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1309 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1307 filematcher=filematcher)
1310 filematcher=filematcher)
1308
1311
1309 repo = repo.unfiltered()
1312 repo = repo.unfiltered()
1310 commonrevs = outgoing.common
1313 commonrevs = outgoing.common
1311 csets = outgoing.missing
1314 csets = outgoing.missing
1312 heads = outgoing.missingheads
1315 heads = outgoing.missingheads
1313 # We go through the fast path if we get told to, or if all (unfiltered
1316 # We go through the fast path if we get told to, or if all (unfiltered
1314 # heads have been requested (since we then know there all linkrevs will
1317 # heads have been requested (since we then know there all linkrevs will
1315 # be pulled by the client).
1318 # be pulled by the client).
1316 heads.sort()
1319 heads.sort()
1317 fastpathlinkrev = fastpath or (
1320 fastpathlinkrev = fastpath or (
1318 repo.filtername is None and heads == sorted(repo.heads()))
1321 repo.filtername is None and heads == sorted(repo.heads()))
1319
1322
1320 repo.hook('preoutgoing', throw=True, source=source)
1323 repo.hook('preoutgoing', throw=True, source=source)
1321 _changegroupinfo(repo, csets, source)
1324 _changegroupinfo(repo, csets, source)
1322 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1325 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1323
1326
1324 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1327 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1325 revisions = 0
1328 revisions = 0
1326 files = 0
1329 files = 0
1327 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1330 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1328 total=expectedfiles)
1331 total=expectedfiles)
1329 for chunkdata in iter(source.filelogheader, {}):
1332 for chunkdata in iter(source.filelogheader, {}):
1330 files += 1
1333 files += 1
1331 f = chunkdata["filename"]
1334 f = chunkdata["filename"]
1332 repo.ui.debug("adding %s revisions\n" % f)
1335 repo.ui.debug("adding %s revisions\n" % f)
1333 progress.increment()
1336 progress.increment()
1334 fl = repo.file(f)
1337 fl = repo.file(f)
1335 o = len(fl)
1338 o = len(fl)
1336 try:
1339 try:
1337 deltas = source.deltaiter()
1340 deltas = source.deltaiter()
1338 if not fl.addgroup(deltas, revmap, trp):
1341 if not fl.addgroup(deltas, revmap, trp):
1339 raise error.Abort(_("received file revlog group is empty"))
1342 raise error.Abort(_("received file revlog group is empty"))
1340 except error.CensoredBaseError as e:
1343 except error.CensoredBaseError as e:
1341 raise error.Abort(_("received delta base is censored: %s") % e)
1344 raise error.Abort(_("received delta base is censored: %s") % e)
1342 revisions += len(fl) - o
1345 revisions += len(fl) - o
1343 if f in needfiles:
1346 if f in needfiles:
1344 needs = needfiles[f]
1347 needs = needfiles[f]
1345 for new in pycompat.xrange(o, len(fl)):
1348 for new in pycompat.xrange(o, len(fl)):
1346 n = fl.node(new)
1349 n = fl.node(new)
1347 if n in needs:
1350 if n in needs:
1348 needs.remove(n)
1351 needs.remove(n)
1349 else:
1352 else:
1350 raise error.Abort(
1353 raise error.Abort(
1351 _("received spurious file revlog entry"))
1354 _("received spurious file revlog entry"))
1352 if not needs:
1355 if not needs:
1353 del needfiles[f]
1356 del needfiles[f]
1354 progress.complete()
1357 progress.complete()
1355
1358
1356 for f, needs in needfiles.iteritems():
1359 for f, needs in needfiles.iteritems():
1357 fl = repo.file(f)
1360 fl = repo.file(f)
1358 for n in needs:
1361 for n in needs:
1359 try:
1362 try:
1360 fl.rev(n)
1363 fl.rev(n)
1361 except error.LookupError:
1364 except error.LookupError:
1362 raise error.Abort(
1365 raise error.Abort(
1363 _('missing file data for %s:%s - run hg verify') %
1366 _('missing file data for %s:%s - run hg verify') %
1364 (f, hex(n)))
1367 (f, hex(n)))
1365
1368
1366 return revisions, files
1369 return revisions, files
1367
1370
1368 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1371 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1369 ellipsisroots, visitnodes, depth, source, version):
1372 ellipsisroots, visitnodes, depth, source, version):
1370 if version in ('01', '02'):
1373 if version in ('01', '02'):
1371 raise error.Abort(
1374 raise error.Abort(
1372 'ellipsis nodes require at least cg3 on client and server, '
1375 'ellipsis nodes require at least cg3 on client and server, '
1373 'but negotiated version %s' % version)
1376 'but negotiated version %s' % version)
1374 # We wrap cg1packer.revchunk, using a side channel to pass
1377 # We wrap cg1packer.revchunk, using a side channel to pass
1375 # relevant_nodes into that area. Then if linknode isn't in the
1378 # relevant_nodes into that area. Then if linknode isn't in the
1376 # set, we know we have an ellipsis node and we should defer
1379 # set, we know we have an ellipsis node and we should defer
1377 # sending that node's data. We override close() to detect
1380 # sending that node's data. We override close() to detect
1378 # pending ellipsis nodes and flush them.
1381 # pending ellipsis nodes and flush them.
1379 packer = getbundler(version, repo, filematcher=match)
1382 packer = getbundler(version, repo, filematcher=match)
1380 # Give the packer the list of nodes which should not be
1383 # Give the packer the list of nodes which should not be
1381 # ellipsis nodes. We store this rather than the set of nodes
1384 # ellipsis nodes. We store this rather than the set of nodes
1382 # that should be an ellipsis because for very large histories
1385 # that should be an ellipsis because for very large histories
1383 # we expect this to be significantly smaller.
1386 # we expect this to be significantly smaller.
1384 packer.full_nodes = relevant_nodes
1387 packer.full_nodes = relevant_nodes
1385 # Maps ellipsis revs to their roots at the changelog level.
1388 # Maps ellipsis revs to their roots at the changelog level.
1386 packer.precomputed_ellipsis = ellipsisroots
1389 packer.precomputed_ellipsis = ellipsisroots
1387 # Maps CL revs to per-revlog revisions. Cleared in close() at
1390 # Maps CL revs to per-revlog revisions. Cleared in close() at
1388 # the end of each group.
1391 # the end of each group.
1389 packer.clrev_to_localrev = {}
1392 packer.clrev_to_localrev = {}
1390 packer.next_clrev_to_localrev = {}
1393 packer.next_clrev_to_localrev = {}
1391 # Maps changelog nodes to changelog revs. Filled in once
1394 # Maps changelog nodes to changelog revs. Filled in once
1392 # during changelog stage and then left unmodified.
1395 # during changelog stage and then left unmodified.
1393 packer.clnode_to_rev = {}
1396 packer.clnode_to_rev = {}
1394 packer.changelog_done = False
1397 packer.changelog_done = False
1395 # If true, informs the packer that it is serving shallow content and might
1398 # If true, informs the packer that it is serving shallow content and might
1396 # need to pack file contents not introduced by the changes being packed.
1399 # need to pack file contents not introduced by the changes being packed.
1397 packer.is_shallow = depth is not None
1400 packer.is_shallow = depth is not None
1398
1401
1399 return packer.generate(common, visitnodes, False, source)
1402 return packer.generate(common, visitnodes, False, source)
General Comments 0
You need to be logged in to leave comments. Login now