##// END OF EJS Templates
changegroup: define functions for creating changegroup packers...
Gregory Szorc -
r38930:19344024 default
parent child Browse files
Show More
@@ -1,1368 +1,1377 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagutil,
27 dagutil,
28 error,
28 error,
29 manifest,
29 manifest,
30 match as matchmod,
30 match as matchmod,
31 mdiff,
31 mdiff,
32 phases,
32 phases,
33 pycompat,
33 pycompat,
34 repository,
34 repository,
35 revlog,
35 revlog,
36 util,
36 util,
37 )
37 )
38
38
39 from .utils import (
39 from .utils import (
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
43 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
44 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
44 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
45 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
45 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
46
46
47 LFS_REQUIREMENT = 'lfs'
47 LFS_REQUIREMENT = 'lfs'
48
48
49 readexactly = util.readexactly
49 readexactly = util.readexactly
50
50
51 def getchunk(stream):
51 def getchunk(stream):
52 """return the next chunk from stream as a string"""
52 """return the next chunk from stream as a string"""
53 d = readexactly(stream, 4)
53 d = readexactly(stream, 4)
54 l = struct.unpack(">l", d)[0]
54 l = struct.unpack(">l", d)[0]
55 if l <= 4:
55 if l <= 4:
56 if l:
56 if l:
57 raise error.Abort(_("invalid chunk length %d") % l)
57 raise error.Abort(_("invalid chunk length %d") % l)
58 return ""
58 return ""
59 return readexactly(stream, l - 4)
59 return readexactly(stream, l - 4)
60
60
61 def chunkheader(length):
61 def chunkheader(length):
62 """return a changegroup chunk header (string)"""
62 """return a changegroup chunk header (string)"""
63 return struct.pack(">l", length + 4)
63 return struct.pack(">l", length + 4)
64
64
65 def closechunk():
65 def closechunk():
66 """return a changegroup chunk header (string) for a zero-length chunk"""
66 """return a changegroup chunk header (string) for a zero-length chunk"""
67 return struct.pack(">l", 0)
67 return struct.pack(">l", 0)
68
68
69 def writechunks(ui, chunks, filename, vfs=None):
69 def writechunks(ui, chunks, filename, vfs=None):
70 """Write chunks to a file and return its filename.
70 """Write chunks to a file and return its filename.
71
71
72 The stream is assumed to be a bundle file.
72 The stream is assumed to be a bundle file.
73 Existing files will not be overwritten.
73 Existing files will not be overwritten.
74 If no filename is specified, a temporary file is created.
74 If no filename is specified, a temporary file is created.
75 """
75 """
76 fh = None
76 fh = None
77 cleanup = None
77 cleanup = None
78 try:
78 try:
79 if filename:
79 if filename:
80 if vfs:
80 if vfs:
81 fh = vfs.open(filename, "wb")
81 fh = vfs.open(filename, "wb")
82 else:
82 else:
83 # Increase default buffer size because default is usually
83 # Increase default buffer size because default is usually
84 # small (4k is common on Linux).
84 # small (4k is common on Linux).
85 fh = open(filename, "wb", 131072)
85 fh = open(filename, "wb", 131072)
86 else:
86 else:
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
88 fh = os.fdopen(fd, r"wb")
88 fh = os.fdopen(fd, r"wb")
89 cleanup = filename
89 cleanup = filename
90 for c in chunks:
90 for c in chunks:
91 fh.write(c)
91 fh.write(c)
92 cleanup = None
92 cleanup = None
93 return filename
93 return filename
94 finally:
94 finally:
95 if fh is not None:
95 if fh is not None:
96 fh.close()
96 fh.close()
97 if cleanup is not None:
97 if cleanup is not None:
98 if filename and vfs:
98 if filename and vfs:
99 vfs.unlink(cleanup)
99 vfs.unlink(cleanup)
100 else:
100 else:
101 os.unlink(cleanup)
101 os.unlink(cleanup)
102
102
103 class cg1unpacker(object):
103 class cg1unpacker(object):
104 """Unpacker for cg1 changegroup streams.
104 """Unpacker for cg1 changegroup streams.
105
105
106 A changegroup unpacker handles the framing of the revision data in
106 A changegroup unpacker handles the framing of the revision data in
107 the wire format. Most consumers will want to use the apply()
107 the wire format. Most consumers will want to use the apply()
108 method to add the changes from the changegroup to a repository.
108 method to add the changes from the changegroup to a repository.
109
109
110 If you're forwarding a changegroup unmodified to another consumer,
110 If you're forwarding a changegroup unmodified to another consumer,
111 use getchunks(), which returns an iterator of changegroup
111 use getchunks(), which returns an iterator of changegroup
112 chunks. This is mostly useful for cases where you need to know the
112 chunks. This is mostly useful for cases where you need to know the
113 data stream has ended by observing the end of the changegroup.
113 data stream has ended by observing the end of the changegroup.
114
114
115 deltachunk() is useful only if you're applying delta data. Most
115 deltachunk() is useful only if you're applying delta data. Most
116 consumers should prefer apply() instead.
116 consumers should prefer apply() instead.
117
117
118 A few other public methods exist. Those are used only for
118 A few other public methods exist. Those are used only for
119 bundlerepo and some debug commands - their use is discouraged.
119 bundlerepo and some debug commands - their use is discouraged.
120 """
120 """
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
122 deltaheadersize = struct.calcsize(deltaheader)
122 deltaheadersize = struct.calcsize(deltaheader)
123 version = '01'
123 version = '01'
124 _grouplistcount = 1 # One list of files after the manifests
124 _grouplistcount = 1 # One list of files after the manifests
125
125
126 def __init__(self, fh, alg, extras=None):
126 def __init__(self, fh, alg, extras=None):
127 if alg is None:
127 if alg is None:
128 alg = 'UN'
128 alg = 'UN'
129 if alg not in util.compengines.supportedbundletypes:
129 if alg not in util.compengines.supportedbundletypes:
130 raise error.Abort(_('unknown stream compression type: %s')
130 raise error.Abort(_('unknown stream compression type: %s')
131 % alg)
131 % alg)
132 if alg == 'BZ':
132 if alg == 'BZ':
133 alg = '_truncatedBZ'
133 alg = '_truncatedBZ'
134
134
135 compengine = util.compengines.forbundletype(alg)
135 compengine = util.compengines.forbundletype(alg)
136 self._stream = compengine.decompressorreader(fh)
136 self._stream = compengine.decompressorreader(fh)
137 self._type = alg
137 self._type = alg
138 self.extras = extras or {}
138 self.extras = extras or {}
139 self.callback = None
139 self.callback = None
140
140
141 # These methods (compressed, read, seek, tell) all appear to only
141 # These methods (compressed, read, seek, tell) all appear to only
142 # be used by bundlerepo, but it's a little hard to tell.
142 # be used by bundlerepo, but it's a little hard to tell.
143 def compressed(self):
143 def compressed(self):
144 return self._type is not None and self._type != 'UN'
144 return self._type is not None and self._type != 'UN'
145 def read(self, l):
145 def read(self, l):
146 return self._stream.read(l)
146 return self._stream.read(l)
147 def seek(self, pos):
147 def seek(self, pos):
148 return self._stream.seek(pos)
148 return self._stream.seek(pos)
149 def tell(self):
149 def tell(self):
150 return self._stream.tell()
150 return self._stream.tell()
151 def close(self):
151 def close(self):
152 return self._stream.close()
152 return self._stream.close()
153
153
154 def _chunklength(self):
154 def _chunklength(self):
155 d = readexactly(self._stream, 4)
155 d = readexactly(self._stream, 4)
156 l = struct.unpack(">l", d)[0]
156 l = struct.unpack(">l", d)[0]
157 if l <= 4:
157 if l <= 4:
158 if l:
158 if l:
159 raise error.Abort(_("invalid chunk length %d") % l)
159 raise error.Abort(_("invalid chunk length %d") % l)
160 return 0
160 return 0
161 if self.callback:
161 if self.callback:
162 self.callback()
162 self.callback()
163 return l - 4
163 return l - 4
164
164
165 def changelogheader(self):
165 def changelogheader(self):
166 """v10 does not have a changelog header chunk"""
166 """v10 does not have a changelog header chunk"""
167 return {}
167 return {}
168
168
169 def manifestheader(self):
169 def manifestheader(self):
170 """v10 does not have a manifest header chunk"""
170 """v10 does not have a manifest header chunk"""
171 return {}
171 return {}
172
172
173 def filelogheader(self):
173 def filelogheader(self):
174 """return the header of the filelogs chunk, v10 only has the filename"""
174 """return the header of the filelogs chunk, v10 only has the filename"""
175 l = self._chunklength()
175 l = self._chunklength()
176 if not l:
176 if not l:
177 return {}
177 return {}
178 fname = readexactly(self._stream, l)
178 fname = readexactly(self._stream, l)
179 return {'filename': fname}
179 return {'filename': fname}
180
180
181 def _deltaheader(self, headertuple, prevnode):
181 def _deltaheader(self, headertuple, prevnode):
182 node, p1, p2, cs = headertuple
182 node, p1, p2, cs = headertuple
183 if prevnode is None:
183 if prevnode is None:
184 deltabase = p1
184 deltabase = p1
185 else:
185 else:
186 deltabase = prevnode
186 deltabase = prevnode
187 flags = 0
187 flags = 0
188 return node, p1, p2, deltabase, cs, flags
188 return node, p1, p2, deltabase, cs, flags
189
189
190 def deltachunk(self, prevnode):
190 def deltachunk(self, prevnode):
191 l = self._chunklength()
191 l = self._chunklength()
192 if not l:
192 if not l:
193 return {}
193 return {}
194 headerdata = readexactly(self._stream, self.deltaheadersize)
194 headerdata = readexactly(self._stream, self.deltaheadersize)
195 header = struct.unpack(self.deltaheader, headerdata)
195 header = struct.unpack(self.deltaheader, headerdata)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
198 return (node, p1, p2, cs, deltabase, delta, flags)
198 return (node, p1, p2, cs, deltabase, delta, flags)
199
199
200 def getchunks(self):
200 def getchunks(self):
201 """returns all the chunks contains in the bundle
201 """returns all the chunks contains in the bundle
202
202
203 Used when you need to forward the binary stream to a file or another
203 Used when you need to forward the binary stream to a file or another
204 network API. To do so, it parse the changegroup data, otherwise it will
204 network API. To do so, it parse the changegroup data, otherwise it will
205 block in case of sshrepo because it don't know the end of the stream.
205 block in case of sshrepo because it don't know the end of the stream.
206 """
206 """
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
210 # filelogs.
210 # filelogs.
211 #
211 #
212 # Changelog and manifestlog parts are terminated with empty chunks. The
212 # Changelog and manifestlog parts are terminated with empty chunks. The
213 # tree and file parts are a list of entry sections. Each entry section
213 # tree and file parts are a list of entry sections. Each entry section
214 # is a series of chunks terminating in an empty chunk. The list of these
214 # is a series of chunks terminating in an empty chunk. The list of these
215 # entry sections is terminated in yet another empty chunk, so we know
215 # entry sections is terminated in yet another empty chunk, so we know
216 # we've reached the end of the tree/file list when we reach an empty
216 # we've reached the end of the tree/file list when we reach an empty
217 # chunk that was proceeded by no non-empty chunks.
217 # chunk that was proceeded by no non-empty chunks.
218
218
219 parts = 0
219 parts = 0
220 while parts < 2 + self._grouplistcount:
220 while parts < 2 + self._grouplistcount:
221 noentries = True
221 noentries = True
222 while True:
222 while True:
223 chunk = getchunk(self)
223 chunk = getchunk(self)
224 if not chunk:
224 if not chunk:
225 # The first two empty chunks represent the end of the
225 # The first two empty chunks represent the end of the
226 # changelog and the manifestlog portions. The remaining
226 # changelog and the manifestlog portions. The remaining
227 # empty chunks represent either A) the end of individual
227 # empty chunks represent either A) the end of individual
228 # tree or file entries in the file list, or B) the end of
228 # tree or file entries in the file list, or B) the end of
229 # the entire list. It's the end of the entire list if there
229 # the entire list. It's the end of the entire list if there
230 # were no entries (i.e. noentries is True).
230 # were no entries (i.e. noentries is True).
231 if parts < 2:
231 if parts < 2:
232 parts += 1
232 parts += 1
233 elif noentries:
233 elif noentries:
234 parts += 1
234 parts += 1
235 break
235 break
236 noentries = False
236 noentries = False
237 yield chunkheader(len(chunk))
237 yield chunkheader(len(chunk))
238 pos = 0
238 pos = 0
239 while pos < len(chunk):
239 while pos < len(chunk):
240 next = pos + 2**20
240 next = pos + 2**20
241 yield chunk[pos:next]
241 yield chunk[pos:next]
242 pos = next
242 pos = next
243 yield closechunk()
243 yield closechunk()
244
244
245 def _unpackmanifests(self, repo, revmap, trp, prog):
245 def _unpackmanifests(self, repo, revmap, trp, prog):
246 self.callback = prog.increment
246 self.callback = prog.increment
247 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
250 # be empty during the pull
250 # be empty during the pull
251 self.manifestheader()
251 self.manifestheader()
252 deltas = self.deltaiter()
252 deltas = self.deltaiter()
253 repo.manifestlog.addgroup(deltas, revmap, trp)
253 repo.manifestlog.addgroup(deltas, revmap, trp)
254 prog.complete()
254 prog.complete()
255 self.callback = None
255 self.callback = None
256
256
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
258 expectedtotal=None):
258 expectedtotal=None):
259 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
261 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
262
262
263 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
264 - nothing changed or no source: 0
264 - nothing changed or no source: 0
265 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
266 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
267 - number of heads stays the same: 1
267 - number of heads stays the same: 1
268 """
268 """
269 repo = repo.unfiltered()
269 repo = repo.unfiltered()
270 def csmap(x):
270 def csmap(x):
271 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
272 return len(cl)
272 return len(cl)
273
273
274 def revmap(x):
274 def revmap(x):
275 return cl.rev(x)
275 return cl.rev(x)
276
276
277 changesets = files = revisions = 0
277 changesets = files = revisions = 0
278
278
279 try:
279 try:
280 # The transaction may already carry source information. In this
280 # The transaction may already carry source information. In this
281 # case we use the top level data. We overwrite the argument
281 # case we use the top level data. We overwrite the argument
282 # because we need to use the top level value (if they exist)
282 # because we need to use the top level value (if they exist)
283 # in this function.
283 # in this function.
284 srctype = tr.hookargs.setdefault('source', srctype)
284 srctype = tr.hookargs.setdefault('source', srctype)
285 url = tr.hookargs.setdefault('url', url)
285 url = tr.hookargs.setdefault('url', url)
286 repo.hook('prechangegroup',
286 repo.hook('prechangegroup',
287 throw=True, **pycompat.strkwargs(tr.hookargs))
287 throw=True, **pycompat.strkwargs(tr.hookargs))
288
288
289 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
290 # will not see an inconsistent view
290 # will not see an inconsistent view
291 cl = repo.changelog
291 cl = repo.changelog
292 cl.delayupdate(tr)
292 cl.delayupdate(tr)
293 oldheads = set(cl.heads())
293 oldheads = set(cl.heads())
294
294
295 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
296 # pull off the changeset group
296 # pull off the changeset group
297 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
298 clstart = len(cl)
298 clstart = len(cl)
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
300 total=expectedtotal)
300 total=expectedtotal)
301 self.callback = progress.increment
301 self.callback = progress.increment
302
302
303 efiles = set()
303 efiles = set()
304 def onchangelog(cl, node):
304 def onchangelog(cl, node):
305 efiles.update(cl.readfiles(node))
305 efiles.update(cl.readfiles(node))
306
306
307 self.changelogheader()
307 self.changelogheader()
308 deltas = self.deltaiter()
308 deltas = self.deltaiter()
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
310 efiles = len(efiles)
310 efiles = len(efiles)
311
311
312 if not cgnodes:
312 if not cgnodes:
313 repo.ui.develwarn('applied empty changegroup',
313 repo.ui.develwarn('applied empty changegroup',
314 config='warn-empty-changegroup')
314 config='warn-empty-changegroup')
315 clend = len(cl)
315 clend = len(cl)
316 changesets = clend - clstart
316 changesets = clend - clstart
317 progress.complete()
317 progress.complete()
318 self.callback = None
318 self.callback = None
319
319
320 # pull off the manifest group
320 # pull off the manifest group
321 repo.ui.status(_("adding manifests\n"))
321 repo.ui.status(_("adding manifests\n"))
322 # We know that we'll never have more manifests than we had
322 # We know that we'll never have more manifests than we had
323 # changesets.
323 # changesets.
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
325 total=changesets)
325 total=changesets)
326 self._unpackmanifests(repo, revmap, trp, progress)
326 self._unpackmanifests(repo, revmap, trp, progress)
327
327
328 needfiles = {}
328 needfiles = {}
329 if repo.ui.configbool('server', 'validate'):
329 if repo.ui.configbool('server', 'validate'):
330 cl = repo.changelog
330 cl = repo.changelog
331 ml = repo.manifestlog
331 ml = repo.manifestlog
332 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
333 for cset in pycompat.xrange(clstart, clend):
333 for cset in pycompat.xrange(clstart, clend):
334 mfnode = cl.changelogrevision(cset).manifest
334 mfnode = cl.changelogrevision(cset).manifest
335 mfest = ml[mfnode].readdelta()
335 mfest = ml[mfnode].readdelta()
336 # store file cgnodes we must see
336 # store file cgnodes we must see
337 for f, n in mfest.iteritems():
337 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
338 needfiles.setdefault(f, set()).add(n)
339
339
340 # process the files
340 # process the files
341 repo.ui.status(_("adding file changes\n"))
341 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
342 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
343 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
344 revisions += newrevs
345 files += newfiles
345 files += newfiles
346
346
347 deltaheads = 0
347 deltaheads = 0
348 if oldheads:
348 if oldheads:
349 heads = cl.heads()
349 heads = cl.heads()
350 deltaheads = len(heads) - len(oldheads)
350 deltaheads = len(heads) - len(oldheads)
351 for h in heads:
351 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
352 if h not in oldheads and repo[h].closesbranch():
353 deltaheads -= 1
353 deltaheads -= 1
354 htext = ""
354 htext = ""
355 if deltaheads:
355 if deltaheads:
356 htext = _(" (%+d heads)") % deltaheads
356 htext = _(" (%+d heads)") % deltaheads
357
357
358 repo.ui.status(_("added %d changesets"
358 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
359 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
360 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
361 repo.invalidatevolatilesets()
362
362
363 if changesets > 0:
363 if changesets > 0:
364 if 'node' not in tr.hookargs:
364 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
365 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 else:
368 else:
369 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
370 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup',
372 repo.hook('pretxnchangegroup',
373 throw=True, **pycompat.strkwargs(hookargs))
373 throw=True, **pycompat.strkwargs(hookargs))
374
374
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
376 phaseall = None
376 phaseall = None
377 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
380 # exists locally as secret
380 # exists locally as secret
381 #
381 #
382 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
383 # the bundle
383 # the bundle
384 if repo.publishing():
384 if repo.publishing():
385 targetphase = phaseall = phases.public
385 targetphase = phaseall = phases.public
386 else:
386 else:
387 # closer target phase computation
387 # closer target phase computation
388
388
389 # Those changesets have been pushed from the
389 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
390 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
391 # alongside. Therefor `targetphase` is
392 # ignored.
392 # ignored.
393 targetphase = phaseall = phases.draft
393 targetphase = phaseall = phases.draft
394 if added:
394 if added:
395 phases.registernew(repo, tr, targetphase, added)
395 phases.registernew(repo, tr, targetphase, added)
396 if phaseall is not None:
396 if phaseall is not None:
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
398
398
399 if changesets > 0:
399 if changesets > 0:
400
400
401 def runhooks():
401 def runhooks():
402 # These hooks run when the lock releases, not when the
402 # These hooks run when the lock releases, not when the
403 # transaction closes. So it's possible for the changelog
403 # transaction closes. So it's possible for the changelog
404 # to have changed since we last saw it.
404 # to have changed since we last saw it.
405 if clstart >= len(repo):
405 if clstart >= len(repo):
406 return
406 return
407
407
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
409
409
410 for n in added:
410 for n in added:
411 args = hookargs.copy()
411 args = hookargs.copy()
412 args['node'] = hex(n)
412 args['node'] = hex(n)
413 del args['node_last']
413 del args['node_last']
414 repo.hook("incoming", **pycompat.strkwargs(args))
414 repo.hook("incoming", **pycompat.strkwargs(args))
415
415
416 newheads = [h for h in repo.heads()
416 newheads = [h for h in repo.heads()
417 if h not in oldheads]
417 if h not in oldheads]
418 repo.ui.log("incoming",
418 repo.ui.log("incoming",
419 "%d incoming changes - new heads: %s\n",
419 "%d incoming changes - new heads: %s\n",
420 len(added),
420 len(added),
421 ', '.join([hex(c[:6]) for c in newheads]))
421 ', '.join([hex(c[:6]) for c in newheads]))
422
422
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
424 lambda tr: repo._afterlock(runhooks))
424 lambda tr: repo._afterlock(runhooks))
425 finally:
425 finally:
426 repo.ui.flush()
426 repo.ui.flush()
427 # never return 0 here:
427 # never return 0 here:
428 if deltaheads < 0:
428 if deltaheads < 0:
429 ret = deltaheads - 1
429 ret = deltaheads - 1
430 else:
430 else:
431 ret = deltaheads + 1
431 ret = deltaheads + 1
432 return ret
432 return ret
433
433
434 def deltaiter(self):
434 def deltaiter(self):
435 """
435 """
436 returns an iterator of the deltas in this changegroup
436 returns an iterator of the deltas in this changegroup
437
437
438 Useful for passing to the underlying storage system to be stored.
438 Useful for passing to the underlying storage system to be stored.
439 """
439 """
440 chain = None
440 chain = None
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
443 yield chunkdata
443 yield chunkdata
444 chain = chunkdata[0]
444 chain = chunkdata[0]
445
445
446 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
448
448
449 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
451 remain the same.
451 remain the same.
452 """
452 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = struct.calcsize(deltaheader)
454 deltaheadersize = struct.calcsize(deltaheader)
455 version = '02'
455 version = '02'
456
456
457 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
459 flags = 0
460 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
461
461
462 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
464
464
465 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
467 separating manifests and files.
468 """
468 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = struct.calcsize(deltaheader)
470 deltaheadersize = struct.calcsize(deltaheader)
471 version = '03'
471 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
473
473
474 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
477
477
478 def _unpackmanifests(self, repo, revmap, trp, prog):
478 def _unpackmanifests(self, repo, revmap, trp, prog):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
480 for chunkdata in iter(self.filelogheader, {}):
480 for chunkdata in iter(self.filelogheader, {}):
481 # If we get here, there are directory manifests in the changegroup
481 # If we get here, there are directory manifests in the changegroup
482 d = chunkdata["filename"]
482 d = chunkdata["filename"]
483 repo.ui.debug("adding %s revisions\n" % d)
483 repo.ui.debug("adding %s revisions\n" % d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
485 deltas = self.deltaiter()
485 deltas = self.deltaiter()
486 if not dirlog.addgroup(deltas, revmap, trp):
486 if not dirlog.addgroup(deltas, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 @attr.s(slots=True, frozen=True)
501 @attr.s(slots=True, frozen=True)
502 class revisiondelta(object):
502 class revisiondelta(object):
503 """Describes a delta entry in a changegroup.
503 """Describes a delta entry in a changegroup.
504
504
505 Captured data is sufficient to serialize the delta into multiple
505 Captured data is sufficient to serialize the delta into multiple
506 formats.
506 formats.
507 """
507 """
508 # 20 byte node of this revision.
508 # 20 byte node of this revision.
509 node = attr.ib()
509 node = attr.ib()
510 # 20 byte nodes of parent revisions.
510 # 20 byte nodes of parent revisions.
511 p1node = attr.ib()
511 p1node = attr.ib()
512 p2node = attr.ib()
512 p2node = attr.ib()
513 # 20 byte node of node this delta is against.
513 # 20 byte node of node this delta is against.
514 basenode = attr.ib()
514 basenode = attr.ib()
515 # 20 byte node of changeset revision this delta is associated with.
515 # 20 byte node of changeset revision this delta is associated with.
516 linknode = attr.ib()
516 linknode = attr.ib()
517 # 2 bytes of flags to apply to revision data.
517 # 2 bytes of flags to apply to revision data.
518 flags = attr.ib()
518 flags = attr.ib()
519 # Iterable of chunks holding raw delta data.
519 # Iterable of chunks holding raw delta data.
520 deltachunks = attr.ib()
520 deltachunks = attr.ib()
521
521
522 class cg1packer(object):
522 class cg1packer(object):
523 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
523 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
524 version = '01'
524 version = '01'
525 def __init__(self, repo, filematcher, bundlecaps=None):
525 def __init__(self, repo, filematcher, bundlecaps=None):
526 """Given a source repo, construct a bundler.
526 """Given a source repo, construct a bundler.
527
527
528 filematcher is a matcher that matches on files to include in the
528 filematcher is a matcher that matches on files to include in the
529 changegroup. Used to facilitate sparse changegroups.
529 changegroup. Used to facilitate sparse changegroups.
530
530
531 bundlecaps is optional and can be used to specify the set of
531 bundlecaps is optional and can be used to specify the set of
532 capabilities which can be used to build the bundle. While bundlecaps is
532 capabilities which can be used to build the bundle. While bundlecaps is
533 unused in core Mercurial, extensions rely on this feature to communicate
533 unused in core Mercurial, extensions rely on this feature to communicate
534 capabilities to customize the changegroup packer.
534 capabilities to customize the changegroup packer.
535 """
535 """
536 assert filematcher
536 assert filematcher
537 self._filematcher = filematcher
537 self._filematcher = filematcher
538
538
539 # Set of capabilities we can use to build the bundle.
539 # Set of capabilities we can use to build the bundle.
540 if bundlecaps is None:
540 if bundlecaps is None:
541 bundlecaps = set()
541 bundlecaps = set()
542 self._bundlecaps = bundlecaps
542 self._bundlecaps = bundlecaps
543 # experimental config: bundle.reorder
543 # experimental config: bundle.reorder
544 reorder = repo.ui.config('bundle', 'reorder')
544 reorder = repo.ui.config('bundle', 'reorder')
545 if reorder == 'auto':
545 if reorder == 'auto':
546 reorder = None
546 reorder = None
547 else:
547 else:
548 reorder = stringutil.parsebool(reorder)
548 reorder = stringutil.parsebool(reorder)
549 self._repo = repo
549 self._repo = repo
550 self._reorder = reorder
550 self._reorder = reorder
551 if self._repo.ui.verbose and not self._repo.ui.debugflag:
551 if self._repo.ui.verbose and not self._repo.ui.debugflag:
552 self._verbosenote = self._repo.ui.note
552 self._verbosenote = self._repo.ui.note
553 else:
553 else:
554 self._verbosenote = lambda s: None
554 self._verbosenote = lambda s: None
555
555
556 def close(self):
556 def close(self):
557 # Ellipses serving mode.
557 # Ellipses serving mode.
558 getattr(self, 'clrev_to_localrev', {}).clear()
558 getattr(self, 'clrev_to_localrev', {}).clear()
559 if getattr(self, 'next_clrev_to_localrev', {}):
559 if getattr(self, 'next_clrev_to_localrev', {}):
560 self.clrev_to_localrev = self.next_clrev_to_localrev
560 self.clrev_to_localrev = self.next_clrev_to_localrev
561 del self.next_clrev_to_localrev
561 del self.next_clrev_to_localrev
562 self.changelog_done = True
562 self.changelog_done = True
563
563
564 return closechunk()
564 return closechunk()
565
565
566 def fileheader(self, fname):
566 def fileheader(self, fname):
567 return chunkheader(len(fname)) + fname
567 return chunkheader(len(fname)) + fname
568
568
569 # Extracted both for clarity and for overriding in extensions.
569 # Extracted both for clarity and for overriding in extensions.
570 def _sortgroup(self, store, nodelist, lookup):
570 def _sortgroup(self, store, nodelist, lookup):
571 """Sort nodes for change group and turn them into revnums."""
571 """Sort nodes for change group and turn them into revnums."""
572 # Ellipses serving mode.
572 # Ellipses serving mode.
573 #
573 #
574 # In a perfect world, we'd generate better ellipsis-ified graphs
574 # In a perfect world, we'd generate better ellipsis-ified graphs
575 # for non-changelog revlogs. In practice, we haven't started doing
575 # for non-changelog revlogs. In practice, we haven't started doing
576 # that yet, so the resulting DAGs for the manifestlog and filelogs
576 # that yet, so the resulting DAGs for the manifestlog and filelogs
577 # are actually full of bogus parentage on all the ellipsis
577 # are actually full of bogus parentage on all the ellipsis
578 # nodes. This has the side effect that, while the contents are
578 # nodes. This has the side effect that, while the contents are
579 # correct, the individual DAGs might be completely out of whack in
579 # correct, the individual DAGs might be completely out of whack in
580 # a case like 882681bc3166 and its ancestors (back about 10
580 # a case like 882681bc3166 and its ancestors (back about 10
581 # revisions or so) in the main hg repo.
581 # revisions or so) in the main hg repo.
582 #
582 #
583 # The one invariant we *know* holds is that the new (potentially
583 # The one invariant we *know* holds is that the new (potentially
584 # bogus) DAG shape will be valid if we order the nodes in the
584 # bogus) DAG shape will be valid if we order the nodes in the
585 # order that they're introduced in dramatis personae by the
585 # order that they're introduced in dramatis personae by the
586 # changelog, so what we do is we sort the non-changelog histories
586 # changelog, so what we do is we sort the non-changelog histories
587 # by the order in which they are used by the changelog.
587 # by the order in which they are used by the changelog.
588 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
588 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
589 key = lambda n: self.clnode_to_rev[lookup(n)]
589 key = lambda n: self.clnode_to_rev[lookup(n)]
590 return [store.rev(n) for n in sorted(nodelist, key=key)]
590 return [store.rev(n) for n in sorted(nodelist, key=key)]
591
591
592 # for generaldelta revlogs, we linearize the revs; this will both be
592 # for generaldelta revlogs, we linearize the revs; this will both be
593 # much quicker and generate a much smaller bundle
593 # much quicker and generate a much smaller bundle
594 if (store._generaldelta and self._reorder is None) or self._reorder:
594 if (store._generaldelta and self._reorder is None) or self._reorder:
595 dag = dagutil.revlogdag(store)
595 dag = dagutil.revlogdag(store)
596 return dag.linearize(set(store.rev(n) for n in nodelist))
596 return dag.linearize(set(store.rev(n) for n in nodelist))
597 else:
597 else:
598 return sorted([store.rev(n) for n in nodelist])
598 return sorted([store.rev(n) for n in nodelist])
599
599
600 def group(self, nodelist, store, lookup, units=None):
600 def group(self, nodelist, store, lookup, units=None):
601 """Calculate a delta group, yielding a sequence of changegroup chunks
601 """Calculate a delta group, yielding a sequence of changegroup chunks
602 (strings).
602 (strings).
603
603
604 Given a list of changeset revs, return a set of deltas and
604 Given a list of changeset revs, return a set of deltas and
605 metadata corresponding to nodes. The first delta is
605 metadata corresponding to nodes. The first delta is
606 first parent(nodelist[0]) -> nodelist[0], the receiver is
606 first parent(nodelist[0]) -> nodelist[0], the receiver is
607 guaranteed to have this parent as it has all history before
607 guaranteed to have this parent as it has all history before
608 these changesets. In the case firstparent is nullrev the
608 these changesets. In the case firstparent is nullrev the
609 changegroup starts with a full revision.
609 changegroup starts with a full revision.
610
610
611 If units is not None, progress detail will be generated, units specifies
611 If units is not None, progress detail will be generated, units specifies
612 the type of revlog that is touched (changelog, manifest, etc.).
612 the type of revlog that is touched (changelog, manifest, etc.).
613 """
613 """
614 # if we don't have any revisions touched by these changesets, bail
614 # if we don't have any revisions touched by these changesets, bail
615 if len(nodelist) == 0:
615 if len(nodelist) == 0:
616 yield self.close()
616 yield self.close()
617 return
617 return
618
618
619 revs = self._sortgroup(store, nodelist, lookup)
619 revs = self._sortgroup(store, nodelist, lookup)
620
620
621 # add the parent of the first rev
621 # add the parent of the first rev
622 p = store.parentrevs(revs[0])[0]
622 p = store.parentrevs(revs[0])[0]
623 revs.insert(0, p)
623 revs.insert(0, p)
624
624
625 # build deltas
625 # build deltas
626 progress = None
626 progress = None
627 if units is not None:
627 if units is not None:
628 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
628 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
629 total=(len(revs) - 1))
629 total=(len(revs) - 1))
630 for r in pycompat.xrange(len(revs) - 1):
630 for r in pycompat.xrange(len(revs) - 1):
631 if progress:
631 if progress:
632 progress.update(r + 1)
632 progress.update(r + 1)
633 prev, curr = revs[r], revs[r + 1]
633 prev, curr = revs[r], revs[r + 1]
634 linknode = lookup(store.node(curr))
634 linknode = lookup(store.node(curr))
635 for c in self.revchunk(store, curr, prev, linknode):
635 for c in self.revchunk(store, curr, prev, linknode):
636 yield c
636 yield c
637
637
638 if progress:
638 if progress:
639 progress.complete()
639 progress.complete()
640 yield self.close()
640 yield self.close()
641
641
642 # filter any nodes that claim to be part of the known set
642 # filter any nodes that claim to be part of the known set
643 def prune(self, store, missing, commonrevs):
643 def prune(self, store, missing, commonrevs):
644 # TODO this violates storage abstraction for manifests.
644 # TODO this violates storage abstraction for manifests.
645 if isinstance(store, manifest.manifestrevlog):
645 if isinstance(store, manifest.manifestrevlog):
646 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
646 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
647 return []
647 return []
648
648
649 rr, rl = store.rev, store.linkrev
649 rr, rl = store.rev, store.linkrev
650 return [n for n in missing if rl(rr(n)) not in commonrevs]
650 return [n for n in missing if rl(rr(n)) not in commonrevs]
651
651
652 def _packmanifests(self, dir, mfnodes, lookuplinknode):
652 def _packmanifests(self, dir, mfnodes, lookuplinknode):
653 """Pack flat manifests into a changegroup stream."""
653 """Pack flat manifests into a changegroup stream."""
654 assert not dir
654 assert not dir
655 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
655 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
656 lookuplinknode, units=_('manifests')):
656 lookuplinknode, units=_('manifests')):
657 yield chunk
657 yield chunk
658
658
659 def _manifestsdone(self):
659 def _manifestsdone(self):
660 return ''
660 return ''
661
661
662 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
662 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
663 '''yield a sequence of changegroup chunks (strings)'''
663 '''yield a sequence of changegroup chunks (strings)'''
664 repo = self._repo
664 repo = self._repo
665 cl = repo.changelog
665 cl = repo.changelog
666
666
667 clrevorder = {}
667 clrevorder = {}
668 mfs = {} # needed manifests
668 mfs = {} # needed manifests
669 fnodes = {} # needed file nodes
669 fnodes = {} # needed file nodes
670 mfl = repo.manifestlog
670 mfl = repo.manifestlog
671 # TODO violates storage abstraction.
671 # TODO violates storage abstraction.
672 mfrevlog = mfl._revlog
672 mfrevlog = mfl._revlog
673 changedfiles = set()
673 changedfiles = set()
674
674
675 ellipsesmode = util.safehasattr(self, 'full_nodes')
675 ellipsesmode = util.safehasattr(self, 'full_nodes')
676
676
677 # Callback for the changelog, used to collect changed files and
677 # Callback for the changelog, used to collect changed files and
678 # manifest nodes.
678 # manifest nodes.
679 # Returns the linkrev node (identity in the changelog case).
679 # Returns the linkrev node (identity in the changelog case).
680 def lookupcl(x):
680 def lookupcl(x):
681 c = cl.read(x)
681 c = cl.read(x)
682 clrevorder[x] = len(clrevorder)
682 clrevorder[x] = len(clrevorder)
683
683
684 if ellipsesmode:
684 if ellipsesmode:
685 # Only update mfs if x is going to be sent. Otherwise we
685 # Only update mfs if x is going to be sent. Otherwise we
686 # end up with bogus linkrevs specified for manifests and
686 # end up with bogus linkrevs specified for manifests and
687 # we skip some manifest nodes that we should otherwise
687 # we skip some manifest nodes that we should otherwise
688 # have sent.
688 # have sent.
689 if (x in self.full_nodes
689 if (x in self.full_nodes
690 or cl.rev(x) in self.precomputed_ellipsis):
690 or cl.rev(x) in self.precomputed_ellipsis):
691 n = c[0]
691 n = c[0]
692 # Record the first changeset introducing this manifest
692 # Record the first changeset introducing this manifest
693 # version.
693 # version.
694 mfs.setdefault(n, x)
694 mfs.setdefault(n, x)
695 # Set this narrow-specific dict so we have the lowest
695 # Set this narrow-specific dict so we have the lowest
696 # manifest revnum to look up for this cl revnum. (Part of
696 # manifest revnum to look up for this cl revnum. (Part of
697 # mapping changelog ellipsis parents to manifest ellipsis
697 # mapping changelog ellipsis parents to manifest ellipsis
698 # parents)
698 # parents)
699 self.next_clrev_to_localrev.setdefault(cl.rev(x),
699 self.next_clrev_to_localrev.setdefault(cl.rev(x),
700 mfrevlog.rev(n))
700 mfrevlog.rev(n))
701 # We can't trust the changed files list in the changeset if the
701 # We can't trust the changed files list in the changeset if the
702 # client requested a shallow clone.
702 # client requested a shallow clone.
703 if self.is_shallow:
703 if self.is_shallow:
704 changedfiles.update(mfl[c[0]].read().keys())
704 changedfiles.update(mfl[c[0]].read().keys())
705 else:
705 else:
706 changedfiles.update(c[3])
706 changedfiles.update(c[3])
707 else:
707 else:
708
708
709 n = c[0]
709 n = c[0]
710 # record the first changeset introducing this manifest version
710 # record the first changeset introducing this manifest version
711 mfs.setdefault(n, x)
711 mfs.setdefault(n, x)
712 # Record a complete list of potentially-changed files in
712 # Record a complete list of potentially-changed files in
713 # this manifest.
713 # this manifest.
714 changedfiles.update(c[3])
714 changedfiles.update(c[3])
715
715
716 return x
716 return x
717
717
718 self._verbosenote(_('uncompressed size of bundle content:\n'))
718 self._verbosenote(_('uncompressed size of bundle content:\n'))
719 size = 0
719 size = 0
720 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
720 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
721 size += len(chunk)
721 size += len(chunk)
722 yield chunk
722 yield chunk
723 self._verbosenote(_('%8.i (changelog)\n') % size)
723 self._verbosenote(_('%8.i (changelog)\n') % size)
724
724
725 # We need to make sure that the linkrev in the changegroup refers to
725 # We need to make sure that the linkrev in the changegroup refers to
726 # the first changeset that introduced the manifest or file revision.
726 # the first changeset that introduced the manifest or file revision.
727 # The fastpath is usually safer than the slowpath, because the filelogs
727 # The fastpath is usually safer than the slowpath, because the filelogs
728 # are walked in revlog order.
728 # are walked in revlog order.
729 #
729 #
730 # When taking the slowpath with reorder=None and the manifest revlog
730 # When taking the slowpath with reorder=None and the manifest revlog
731 # uses generaldelta, the manifest may be walked in the "wrong" order.
731 # uses generaldelta, the manifest may be walked in the "wrong" order.
732 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
732 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
733 # cc0ff93d0c0c).
733 # cc0ff93d0c0c).
734 #
734 #
735 # When taking the fastpath, we are only vulnerable to reordering
735 # When taking the fastpath, we are only vulnerable to reordering
736 # of the changelog itself. The changelog never uses generaldelta, so
736 # of the changelog itself. The changelog never uses generaldelta, so
737 # it is only reordered when reorder=True. To handle this case, we
737 # it is only reordered when reorder=True. To handle this case, we
738 # simply take the slowpath, which already has the 'clrevorder' logic.
738 # simply take the slowpath, which already has the 'clrevorder' logic.
739 # This was also fixed in cc0ff93d0c0c.
739 # This was also fixed in cc0ff93d0c0c.
740 fastpathlinkrev = fastpathlinkrev and not self._reorder
740 fastpathlinkrev = fastpathlinkrev and not self._reorder
741 # Treemanifests don't work correctly with fastpathlinkrev
741 # Treemanifests don't work correctly with fastpathlinkrev
742 # either, because we don't discover which directory nodes to
742 # either, because we don't discover which directory nodes to
743 # send along with files. This could probably be fixed.
743 # send along with files. This could probably be fixed.
744 fastpathlinkrev = fastpathlinkrev and (
744 fastpathlinkrev = fastpathlinkrev and (
745 'treemanifest' not in repo.requirements)
745 'treemanifest' not in repo.requirements)
746
746
747 for chunk in self.generatemanifests(commonrevs, clrevorder,
747 for chunk in self.generatemanifests(commonrevs, clrevorder,
748 fastpathlinkrev, mfs, fnodes, source):
748 fastpathlinkrev, mfs, fnodes, source):
749 yield chunk
749 yield chunk
750
750
751 if ellipsesmode:
751 if ellipsesmode:
752 mfdicts = None
752 mfdicts = None
753 if self.is_shallow:
753 if self.is_shallow:
754 mfdicts = [(self._repo.manifestlog[n].read(), lr)
754 mfdicts = [(self._repo.manifestlog[n].read(), lr)
755 for (n, lr) in mfs.iteritems()]
755 for (n, lr) in mfs.iteritems()]
756
756
757 mfs.clear()
757 mfs.clear()
758 clrevs = set(cl.rev(x) for x in clnodes)
758 clrevs = set(cl.rev(x) for x in clnodes)
759
759
760 if not fastpathlinkrev:
760 if not fastpathlinkrev:
761 def linknodes(unused, fname):
761 def linknodes(unused, fname):
762 return fnodes.get(fname, {})
762 return fnodes.get(fname, {})
763 else:
763 else:
764 cln = cl.node
764 cln = cl.node
765 def linknodes(filerevlog, fname):
765 def linknodes(filerevlog, fname):
766 llr = filerevlog.linkrev
766 llr = filerevlog.linkrev
767 fln = filerevlog.node
767 fln = filerevlog.node
768 revs = ((r, llr(r)) for r in filerevlog)
768 revs = ((r, llr(r)) for r in filerevlog)
769 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
769 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
770
770
771 if ellipsesmode:
771 if ellipsesmode:
772 # We need to pass the mfdicts variable down into
772 # We need to pass the mfdicts variable down into
773 # generatefiles(), but more than one command might have
773 # generatefiles(), but more than one command might have
774 # wrapped generatefiles so we can't modify the function
774 # wrapped generatefiles so we can't modify the function
775 # signature. Instead, we pass the data to ourselves using an
775 # signature. Instead, we pass the data to ourselves using an
776 # instance attribute. I'm sorry.
776 # instance attribute. I'm sorry.
777 self._mfdicts = mfdicts
777 self._mfdicts = mfdicts
778
778
779 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
779 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
780 source):
780 source):
781 yield chunk
781 yield chunk
782
782
783 yield self.close()
783 yield self.close()
784
784
785 if clnodes:
785 if clnodes:
786 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
786 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
787
787
788 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
788 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
789 fnodes, source):
789 fnodes, source):
790 """Returns an iterator of changegroup chunks containing manifests.
790 """Returns an iterator of changegroup chunks containing manifests.
791
791
792 `source` is unused here, but is used by extensions like remotefilelog to
792 `source` is unused here, but is used by extensions like remotefilelog to
793 change what is sent based in pulls vs pushes, etc.
793 change what is sent based in pulls vs pushes, etc.
794 """
794 """
795 repo = self._repo
795 repo = self._repo
796 mfl = repo.manifestlog
796 mfl = repo.manifestlog
797 dirlog = mfl._revlog.dirlog
797 dirlog = mfl._revlog.dirlog
798 tmfnodes = {'': mfs}
798 tmfnodes = {'': mfs}
799
799
800 # Callback for the manifest, used to collect linkrevs for filelog
800 # Callback for the manifest, used to collect linkrevs for filelog
801 # revisions.
801 # revisions.
802 # Returns the linkrev node (collected in lookupcl).
802 # Returns the linkrev node (collected in lookupcl).
803 def makelookupmflinknode(dir, nodes):
803 def makelookupmflinknode(dir, nodes):
804 if fastpathlinkrev:
804 if fastpathlinkrev:
805 assert not dir
805 assert not dir
806 return mfs.__getitem__
806 return mfs.__getitem__
807
807
808 def lookupmflinknode(x):
808 def lookupmflinknode(x):
809 """Callback for looking up the linknode for manifests.
809 """Callback for looking up the linknode for manifests.
810
810
811 Returns the linkrev node for the specified manifest.
811 Returns the linkrev node for the specified manifest.
812
812
813 SIDE EFFECT:
813 SIDE EFFECT:
814
814
815 1) fclnodes gets populated with the list of relevant
815 1) fclnodes gets populated with the list of relevant
816 file nodes if we're not using fastpathlinkrev
816 file nodes if we're not using fastpathlinkrev
817 2) When treemanifests are in use, collects treemanifest nodes
817 2) When treemanifests are in use, collects treemanifest nodes
818 to send
818 to send
819
819
820 Note that this means manifests must be completely sent to
820 Note that this means manifests must be completely sent to
821 the client before you can trust the list of files and
821 the client before you can trust the list of files and
822 treemanifests to send.
822 treemanifests to send.
823 """
823 """
824 clnode = nodes[x]
824 clnode = nodes[x]
825 mdata = mfl.get(dir, x).readfast(shallow=True)
825 mdata = mfl.get(dir, x).readfast(shallow=True)
826 for p, n, fl in mdata.iterentries():
826 for p, n, fl in mdata.iterentries():
827 if fl == 't': # subdirectory manifest
827 if fl == 't': # subdirectory manifest
828 subdir = dir + p + '/'
828 subdir = dir + p + '/'
829 tmfclnodes = tmfnodes.setdefault(subdir, {})
829 tmfclnodes = tmfnodes.setdefault(subdir, {})
830 tmfclnode = tmfclnodes.setdefault(n, clnode)
830 tmfclnode = tmfclnodes.setdefault(n, clnode)
831 if clrevorder[clnode] < clrevorder[tmfclnode]:
831 if clrevorder[clnode] < clrevorder[tmfclnode]:
832 tmfclnodes[n] = clnode
832 tmfclnodes[n] = clnode
833 else:
833 else:
834 f = dir + p
834 f = dir + p
835 fclnodes = fnodes.setdefault(f, {})
835 fclnodes = fnodes.setdefault(f, {})
836 fclnode = fclnodes.setdefault(n, clnode)
836 fclnode = fclnodes.setdefault(n, clnode)
837 if clrevorder[clnode] < clrevorder[fclnode]:
837 if clrevorder[clnode] < clrevorder[fclnode]:
838 fclnodes[n] = clnode
838 fclnodes[n] = clnode
839 return clnode
839 return clnode
840 return lookupmflinknode
840 return lookupmflinknode
841
841
842 size = 0
842 size = 0
843 while tmfnodes:
843 while tmfnodes:
844 dir, nodes = tmfnodes.popitem()
844 dir, nodes = tmfnodes.popitem()
845 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
845 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
846 if not dir or prunednodes:
846 if not dir or prunednodes:
847 for x in self._packmanifests(dir, prunednodes,
847 for x in self._packmanifests(dir, prunednodes,
848 makelookupmflinknode(dir, nodes)):
848 makelookupmflinknode(dir, nodes)):
849 size += len(x)
849 size += len(x)
850 yield x
850 yield x
851 self._verbosenote(_('%8.i (manifests)\n') % size)
851 self._verbosenote(_('%8.i (manifests)\n') % size)
852 yield self._manifestsdone()
852 yield self._manifestsdone()
853
853
854 # The 'source' parameter is useful for extensions
854 # The 'source' parameter is useful for extensions
855 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
855 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
856 changedfiles = list(filter(self._filematcher, changedfiles))
856 changedfiles = list(filter(self._filematcher, changedfiles))
857
857
858 if getattr(self, 'is_shallow', False):
858 if getattr(self, 'is_shallow', False):
859 # See comment in generate() for why this sadness is a thing.
859 # See comment in generate() for why this sadness is a thing.
860 mfdicts = self._mfdicts
860 mfdicts = self._mfdicts
861 del self._mfdicts
861 del self._mfdicts
862 # In a shallow clone, the linknodes callback needs to also include
862 # In a shallow clone, the linknodes callback needs to also include
863 # those file nodes that are in the manifests we sent but weren't
863 # those file nodes that are in the manifests we sent but weren't
864 # introduced by those manifests.
864 # introduced by those manifests.
865 commonctxs = [self._repo[c] for c in commonrevs]
865 commonctxs = [self._repo[c] for c in commonrevs]
866 oldlinknodes = linknodes
866 oldlinknodes = linknodes
867 clrev = self._repo.changelog.rev
867 clrev = self._repo.changelog.rev
868
868
869 # Defining this function has a side-effect of overriding the
869 # Defining this function has a side-effect of overriding the
870 # function of the same name that was passed in as an argument.
870 # function of the same name that was passed in as an argument.
871 # TODO have caller pass in appropriate function.
871 # TODO have caller pass in appropriate function.
872 def linknodes(flog, fname):
872 def linknodes(flog, fname):
873 for c in commonctxs:
873 for c in commonctxs:
874 try:
874 try:
875 fnode = c.filenode(fname)
875 fnode = c.filenode(fname)
876 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
876 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
877 except error.ManifestLookupError:
877 except error.ManifestLookupError:
878 pass
878 pass
879 links = oldlinknodes(flog, fname)
879 links = oldlinknodes(flog, fname)
880 if len(links) != len(mfdicts):
880 if len(links) != len(mfdicts):
881 for mf, lr in mfdicts:
881 for mf, lr in mfdicts:
882 fnode = mf.get(fname, None)
882 fnode = mf.get(fname, None)
883 if fnode in links:
883 if fnode in links:
884 links[fnode] = min(links[fnode], lr, key=clrev)
884 links[fnode] = min(links[fnode], lr, key=clrev)
885 elif fnode:
885 elif fnode:
886 links[fnode] = lr
886 links[fnode] = lr
887 return links
887 return links
888
888
889 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
889 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
890
890
891 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
891 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
892 repo = self._repo
892 repo = self._repo
893 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
893 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
894 total=len(changedfiles))
894 total=len(changedfiles))
895 for i, fname in enumerate(sorted(changedfiles)):
895 for i, fname in enumerate(sorted(changedfiles)):
896 filerevlog = repo.file(fname)
896 filerevlog = repo.file(fname)
897 if not filerevlog:
897 if not filerevlog:
898 raise error.Abort(_("empty or missing file data for %s") %
898 raise error.Abort(_("empty or missing file data for %s") %
899 fname)
899 fname)
900
900
901 linkrevnodes = linknodes(filerevlog, fname)
901 linkrevnodes = linknodes(filerevlog, fname)
902 # Lookup for filenodes, we collected the linkrev nodes above in the
902 # Lookup for filenodes, we collected the linkrev nodes above in the
903 # fastpath case and with lookupmf in the slowpath case.
903 # fastpath case and with lookupmf in the slowpath case.
904 def lookupfilelog(x):
904 def lookupfilelog(x):
905 return linkrevnodes[x]
905 return linkrevnodes[x]
906
906
907 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
907 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
908 if filenodes:
908 if filenodes:
909 progress.update(i + 1, item=fname)
909 progress.update(i + 1, item=fname)
910 h = self.fileheader(fname)
910 h = self.fileheader(fname)
911 size = len(h)
911 size = len(h)
912 yield h
912 yield h
913 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
913 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
914 size += len(chunk)
914 size += len(chunk)
915 yield chunk
915 yield chunk
916 self._verbosenote(_('%8.i %s\n') % (size, fname))
916 self._verbosenote(_('%8.i %s\n') % (size, fname))
917 progress.complete()
917 progress.complete()
918
918
919 def deltaparent(self, store, rev, p1, p2, prev):
919 def deltaparent(self, store, rev, p1, p2, prev):
920 if not store.candelta(prev, rev):
920 if not store.candelta(prev, rev):
921 raise error.ProgrammingError('cg1 should not be used in this case')
921 raise error.ProgrammingError('cg1 should not be used in this case')
922 return prev
922 return prev
923
923
924 def revchunk(self, store, rev, prev, linknode):
924 def revchunk(self, store, rev, prev, linknode):
925 if util.safehasattr(self, 'full_nodes'):
925 if util.safehasattr(self, 'full_nodes'):
926 fn = self._revisiondeltanarrow
926 fn = self._revisiondeltanarrow
927 else:
927 else:
928 fn = self._revisiondeltanormal
928 fn = self._revisiondeltanormal
929
929
930 delta = fn(store, rev, prev, linknode)
930 delta = fn(store, rev, prev, linknode)
931 if not delta:
931 if not delta:
932 return
932 return
933
933
934 meta = self.builddeltaheader(delta.node, delta.p1node, delta.p2node,
934 meta = self.builddeltaheader(delta.node, delta.p1node, delta.p2node,
935 delta.basenode, delta.linknode,
935 delta.basenode, delta.linknode,
936 delta.flags)
936 delta.flags)
937 l = len(meta) + sum(len(x) for x in delta.deltachunks)
937 l = len(meta) + sum(len(x) for x in delta.deltachunks)
938
938
939 yield chunkheader(l)
939 yield chunkheader(l)
940 yield meta
940 yield meta
941 for x in delta.deltachunks:
941 for x in delta.deltachunks:
942 yield x
942 yield x
943
943
944 def _revisiondeltanormal(self, store, rev, prev, linknode):
944 def _revisiondeltanormal(self, store, rev, prev, linknode):
945 node = store.node(rev)
945 node = store.node(rev)
946 p1, p2 = store.parentrevs(rev)
946 p1, p2 = store.parentrevs(rev)
947 base = self.deltaparent(store, rev, p1, p2, prev)
947 base = self.deltaparent(store, rev, p1, p2, prev)
948
948
949 prefix = ''
949 prefix = ''
950 if store.iscensored(base) or store.iscensored(rev):
950 if store.iscensored(base) or store.iscensored(rev):
951 try:
951 try:
952 delta = store.revision(node, raw=True)
952 delta = store.revision(node, raw=True)
953 except error.CensoredNodeError as e:
953 except error.CensoredNodeError as e:
954 delta = e.tombstone
954 delta = e.tombstone
955 if base == nullrev:
955 if base == nullrev:
956 prefix = mdiff.trivialdiffheader(len(delta))
956 prefix = mdiff.trivialdiffheader(len(delta))
957 else:
957 else:
958 baselen = store.rawsize(base)
958 baselen = store.rawsize(base)
959 prefix = mdiff.replacediffheader(baselen, len(delta))
959 prefix = mdiff.replacediffheader(baselen, len(delta))
960 elif base == nullrev:
960 elif base == nullrev:
961 delta = store.revision(node, raw=True)
961 delta = store.revision(node, raw=True)
962 prefix = mdiff.trivialdiffheader(len(delta))
962 prefix = mdiff.trivialdiffheader(len(delta))
963 else:
963 else:
964 delta = store.revdiff(base, rev)
964 delta = store.revdiff(base, rev)
965 p1n, p2n = store.parents(node)
965 p1n, p2n = store.parents(node)
966
966
967 return revisiondelta(
967 return revisiondelta(
968 node=node,
968 node=node,
969 p1node=p1n,
969 p1node=p1n,
970 p2node=p2n,
970 p2node=p2n,
971 basenode=store.node(base),
971 basenode=store.node(base),
972 linknode=linknode,
972 linknode=linknode,
973 flags=store.flags(rev),
973 flags=store.flags(rev),
974 deltachunks=(prefix, delta),
974 deltachunks=(prefix, delta),
975 )
975 )
976
976
977 def _revisiondeltanarrow(self, store, rev, prev, linknode):
977 def _revisiondeltanarrow(self, store, rev, prev, linknode):
978 # build up some mapping information that's useful later. See
978 # build up some mapping information that's useful later. See
979 # the local() nested function below.
979 # the local() nested function below.
980 if not self.changelog_done:
980 if not self.changelog_done:
981 self.clnode_to_rev[linknode] = rev
981 self.clnode_to_rev[linknode] = rev
982 linkrev = rev
982 linkrev = rev
983 self.clrev_to_localrev[linkrev] = rev
983 self.clrev_to_localrev[linkrev] = rev
984 else:
984 else:
985 linkrev = self.clnode_to_rev[linknode]
985 linkrev = self.clnode_to_rev[linknode]
986 self.clrev_to_localrev[linkrev] = rev
986 self.clrev_to_localrev[linkrev] = rev
987
987
988 # This is a node to send in full, because the changeset it
988 # This is a node to send in full, because the changeset it
989 # corresponds to was a full changeset.
989 # corresponds to was a full changeset.
990 if linknode in self.full_nodes:
990 if linknode in self.full_nodes:
991 return self._revisiondeltanormal(store, rev, prev, linknode)
991 return self._revisiondeltanormal(store, rev, prev, linknode)
992
992
993 # At this point, a node can either be one we should skip or an
993 # At this point, a node can either be one we should skip or an
994 # ellipsis. If it's not an ellipsis, bail immediately.
994 # ellipsis. If it's not an ellipsis, bail immediately.
995 if linkrev not in self.precomputed_ellipsis:
995 if linkrev not in self.precomputed_ellipsis:
996 return
996 return
997
997
998 linkparents = self.precomputed_ellipsis[linkrev]
998 linkparents = self.precomputed_ellipsis[linkrev]
999 def local(clrev):
999 def local(clrev):
1000 """Turn a changelog revnum into a local revnum.
1000 """Turn a changelog revnum into a local revnum.
1001
1001
1002 The ellipsis dag is stored as revnums on the changelog,
1002 The ellipsis dag is stored as revnums on the changelog,
1003 but when we're producing ellipsis entries for
1003 but when we're producing ellipsis entries for
1004 non-changelog revlogs, we need to turn those numbers into
1004 non-changelog revlogs, we need to turn those numbers into
1005 something local. This does that for us, and during the
1005 something local. This does that for us, and during the
1006 changelog sending phase will also expand the stored
1006 changelog sending phase will also expand the stored
1007 mappings as needed.
1007 mappings as needed.
1008 """
1008 """
1009 if clrev == nullrev:
1009 if clrev == nullrev:
1010 return nullrev
1010 return nullrev
1011
1011
1012 if not self.changelog_done:
1012 if not self.changelog_done:
1013 # If we're doing the changelog, it's possible that we
1013 # If we're doing the changelog, it's possible that we
1014 # have a parent that is already on the client, and we
1014 # have a parent that is already on the client, and we
1015 # need to store some extra mapping information so that
1015 # need to store some extra mapping information so that
1016 # our contained ellipsis nodes will be able to resolve
1016 # our contained ellipsis nodes will be able to resolve
1017 # their parents.
1017 # their parents.
1018 if clrev not in self.clrev_to_localrev:
1018 if clrev not in self.clrev_to_localrev:
1019 clnode = store.node(clrev)
1019 clnode = store.node(clrev)
1020 self.clnode_to_rev[clnode] = clrev
1020 self.clnode_to_rev[clnode] = clrev
1021 return clrev
1021 return clrev
1022
1022
1023 # Walk the ellipsis-ized changelog breadth-first looking for a
1023 # Walk the ellipsis-ized changelog breadth-first looking for a
1024 # change that has been linked from the current revlog.
1024 # change that has been linked from the current revlog.
1025 #
1025 #
1026 # For a flat manifest revlog only a single step should be necessary
1026 # For a flat manifest revlog only a single step should be necessary
1027 # as all relevant changelog entries are relevant to the flat
1027 # as all relevant changelog entries are relevant to the flat
1028 # manifest.
1028 # manifest.
1029 #
1029 #
1030 # For a filelog or tree manifest dirlog however not every changelog
1030 # For a filelog or tree manifest dirlog however not every changelog
1031 # entry will have been relevant, so we need to skip some changelog
1031 # entry will have been relevant, so we need to skip some changelog
1032 # nodes even after ellipsis-izing.
1032 # nodes even after ellipsis-izing.
1033 walk = [clrev]
1033 walk = [clrev]
1034 while walk:
1034 while walk:
1035 p = walk[0]
1035 p = walk[0]
1036 walk = walk[1:]
1036 walk = walk[1:]
1037 if p in self.clrev_to_localrev:
1037 if p in self.clrev_to_localrev:
1038 return self.clrev_to_localrev[p]
1038 return self.clrev_to_localrev[p]
1039 elif p in self.full_nodes:
1039 elif p in self.full_nodes:
1040 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1040 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1041 if pp != nullrev])
1041 if pp != nullrev])
1042 elif p in self.precomputed_ellipsis:
1042 elif p in self.precomputed_ellipsis:
1043 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1043 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1044 if pp != nullrev])
1044 if pp != nullrev])
1045 else:
1045 else:
1046 # In this case, we've got an ellipsis with parents
1046 # In this case, we've got an ellipsis with parents
1047 # outside the current bundle (likely an
1047 # outside the current bundle (likely an
1048 # incremental pull). We "know" that we can use the
1048 # incremental pull). We "know" that we can use the
1049 # value of this same revlog at whatever revision
1049 # value of this same revlog at whatever revision
1050 # is pointed to by linknode. "Know" is in scare
1050 # is pointed to by linknode. "Know" is in scare
1051 # quotes because I haven't done enough examination
1051 # quotes because I haven't done enough examination
1052 # of edge cases to convince myself this is really
1052 # of edge cases to convince myself this is really
1053 # a fact - it works for all the (admittedly
1053 # a fact - it works for all the (admittedly
1054 # thorough) cases in our testsuite, but I would be
1054 # thorough) cases in our testsuite, but I would be
1055 # somewhat unsurprised to find a case in the wild
1055 # somewhat unsurprised to find a case in the wild
1056 # where this breaks down a bit. That said, I don't
1056 # where this breaks down a bit. That said, I don't
1057 # know if it would hurt anything.
1057 # know if it would hurt anything.
1058 for i in pycompat.xrange(rev, 0, -1):
1058 for i in pycompat.xrange(rev, 0, -1):
1059 if store.linkrev(i) == clrev:
1059 if store.linkrev(i) == clrev:
1060 return i
1060 return i
1061 # We failed to resolve a parent for this node, so
1061 # We failed to resolve a parent for this node, so
1062 # we crash the changegroup construction.
1062 # we crash the changegroup construction.
1063 raise error.Abort(
1063 raise error.Abort(
1064 'unable to resolve parent while packing %r %r'
1064 'unable to resolve parent while packing %r %r'
1065 ' for changeset %r' % (store.indexfile, rev, clrev))
1065 ' for changeset %r' % (store.indexfile, rev, clrev))
1066
1066
1067 return nullrev
1067 return nullrev
1068
1068
1069 if not linkparents or (
1069 if not linkparents or (
1070 store.parentrevs(rev) == (nullrev, nullrev)):
1070 store.parentrevs(rev) == (nullrev, nullrev)):
1071 p1, p2 = nullrev, nullrev
1071 p1, p2 = nullrev, nullrev
1072 elif len(linkparents) == 1:
1072 elif len(linkparents) == 1:
1073 p1, = sorted(local(p) for p in linkparents)
1073 p1, = sorted(local(p) for p in linkparents)
1074 p2 = nullrev
1074 p2 = nullrev
1075 else:
1075 else:
1076 p1, p2 = sorted(local(p) for p in linkparents)
1076 p1, p2 = sorted(local(p) for p in linkparents)
1077
1077
1078 n = store.node(rev)
1078 n = store.node(rev)
1079 p1n, p2n = store.node(p1), store.node(p2)
1079 p1n, p2n = store.node(p1), store.node(p2)
1080 flags = store.flags(rev)
1080 flags = store.flags(rev)
1081 flags |= revlog.REVIDX_ELLIPSIS
1081 flags |= revlog.REVIDX_ELLIPSIS
1082
1082
1083 # TODO: try and actually send deltas for ellipsis data blocks
1083 # TODO: try and actually send deltas for ellipsis data blocks
1084 data = store.revision(n)
1084 data = store.revision(n)
1085 diffheader = mdiff.trivialdiffheader(len(data))
1085 diffheader = mdiff.trivialdiffheader(len(data))
1086
1086
1087 return revisiondelta(
1087 return revisiondelta(
1088 node=n,
1088 node=n,
1089 p1node=p1n,
1089 p1node=p1n,
1090 p2node=p2n,
1090 p2node=p2n,
1091 basenode=nullid,
1091 basenode=nullid,
1092 linknode=linknode,
1092 linknode=linknode,
1093 flags=flags,
1093 flags=flags,
1094 deltachunks=(diffheader, data),
1094 deltachunks=(diffheader, data),
1095 )
1095 )
1096
1096
1097 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1097 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1098 # do nothing with basenode, it is implicitly the previous one in HG10
1098 # do nothing with basenode, it is implicitly the previous one in HG10
1099 # do nothing with flags, it is implicitly 0 for cg1 and cg2
1099 # do nothing with flags, it is implicitly 0 for cg1 and cg2
1100 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
1100 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
1101
1101
1102 class cg2packer(cg1packer):
1102 class cg2packer(cg1packer):
1103 version = '02'
1103 version = '02'
1104 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
1104 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
1105
1105
1106 def __init__(self, repo, filematcher, bundlecaps=None):
1106 def __init__(self, repo, filematcher, bundlecaps=None):
1107 super(cg2packer, self).__init__(repo, filematcher,
1107 super(cg2packer, self).__init__(repo, filematcher,
1108 bundlecaps=bundlecaps)
1108 bundlecaps=bundlecaps)
1109
1109
1110 if self._reorder is None:
1110 if self._reorder is None:
1111 # Since generaldelta is directly supported by cg2, reordering
1111 # Since generaldelta is directly supported by cg2, reordering
1112 # generally doesn't help, so we disable it by default (treating
1112 # generally doesn't help, so we disable it by default (treating
1113 # bundle.reorder=auto just like bundle.reorder=False).
1113 # bundle.reorder=auto just like bundle.reorder=False).
1114 self._reorder = False
1114 self._reorder = False
1115
1115
1116 def deltaparent(self, store, rev, p1, p2, prev):
1116 def deltaparent(self, store, rev, p1, p2, prev):
1117 # Narrow ellipses mode.
1117 # Narrow ellipses mode.
1118 if util.safehasattr(self, 'full_nodes'):
1118 if util.safehasattr(self, 'full_nodes'):
1119 # TODO: send better deltas when in narrow mode.
1119 # TODO: send better deltas when in narrow mode.
1120 #
1120 #
1121 # changegroup.group() loops over revisions to send,
1121 # changegroup.group() loops over revisions to send,
1122 # including revisions we'll skip. What this means is that
1122 # including revisions we'll skip. What this means is that
1123 # `prev` will be a potentially useless delta base for all
1123 # `prev` will be a potentially useless delta base for all
1124 # ellipsis nodes, as the client likely won't have it. In
1124 # ellipsis nodes, as the client likely won't have it. In
1125 # the future we should do bookkeeping about which nodes
1125 # the future we should do bookkeeping about which nodes
1126 # have been sent to the client, and try to be
1126 # have been sent to the client, and try to be
1127 # significantly smarter about delta bases. This is
1127 # significantly smarter about delta bases. This is
1128 # slightly tricky because this same code has to work for
1128 # slightly tricky because this same code has to work for
1129 # all revlogs, and we don't have the linkrev/linknode here.
1129 # all revlogs, and we don't have the linkrev/linknode here.
1130 return p1
1130 return p1
1131
1131
1132 dp = store.deltaparent(rev)
1132 dp = store.deltaparent(rev)
1133 if dp == nullrev and store.storedeltachains:
1133 if dp == nullrev and store.storedeltachains:
1134 # Avoid sending full revisions when delta parent is null. Pick prev
1134 # Avoid sending full revisions when delta parent is null. Pick prev
1135 # in that case. It's tempting to pick p1 in this case, as p1 will
1135 # in that case. It's tempting to pick p1 in this case, as p1 will
1136 # be smaller in the common case. However, computing a delta against
1136 # be smaller in the common case. However, computing a delta against
1137 # p1 may require resolving the raw text of p1, which could be
1137 # p1 may require resolving the raw text of p1, which could be
1138 # expensive. The revlog caches should have prev cached, meaning
1138 # expensive. The revlog caches should have prev cached, meaning
1139 # less CPU for changegroup generation. There is likely room to add
1139 # less CPU for changegroup generation. There is likely room to add
1140 # a flag and/or config option to control this behavior.
1140 # a flag and/or config option to control this behavior.
1141 base = prev
1141 base = prev
1142 elif dp == nullrev:
1142 elif dp == nullrev:
1143 # revlog is configured to use full snapshot for a reason,
1143 # revlog is configured to use full snapshot for a reason,
1144 # stick to full snapshot.
1144 # stick to full snapshot.
1145 base = nullrev
1145 base = nullrev
1146 elif dp not in (p1, p2, prev):
1146 elif dp not in (p1, p2, prev):
1147 # Pick prev when we can't be sure remote has the base revision.
1147 # Pick prev when we can't be sure remote has the base revision.
1148 return prev
1148 return prev
1149 else:
1149 else:
1150 base = dp
1150 base = dp
1151 if base != nullrev and not store.candelta(base, rev):
1151 if base != nullrev and not store.candelta(base, rev):
1152 base = nullrev
1152 base = nullrev
1153 return base
1153 return base
1154
1154
1155 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1155 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1156 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
1156 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
1157 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
1157 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
1158
1158
1159 class cg3packer(cg2packer):
1159 class cg3packer(cg2packer):
1160 version = '03'
1160 version = '03'
1161 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
1161 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
1162
1162
1163 def _packmanifests(self, dir, mfnodes, lookuplinknode):
1163 def _packmanifests(self, dir, mfnodes, lookuplinknode):
1164 if dir:
1164 if dir:
1165 yield self.fileheader(dir)
1165 yield self.fileheader(dir)
1166
1166
1167 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
1167 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
1168 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
1168 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
1169 units=_('manifests')):
1169 units=_('manifests')):
1170 yield chunk
1170 yield chunk
1171
1171
1172 def _manifestsdone(self):
1172 def _manifestsdone(self):
1173 return self.close()
1173 return self.close()
1174
1174
1175 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1175 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1176 return struct.pack(
1176 return struct.pack(
1177 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
1177 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
1178
1178
1179 _packermap = {'01': (cg1packer, cg1unpacker),
1179 def _makecg1packer(repo, filematcher, bundlecaps):
1180 return cg1packer(repo, filematcher, bundlecaps=bundlecaps)
1181
1182 def _makecg2packer(repo, filematcher, bundlecaps):
1183 return cg2packer(repo, filematcher, bundlecaps=bundlecaps)
1184
1185 def _makecg3packer(repo, filematcher, bundlecaps):
1186 return cg3packer(repo, filematcher, bundlecaps=bundlecaps)
1187
1188 _packermap = {'01': (_makecg1packer, cg1unpacker),
1180 # cg2 adds support for exchanging generaldelta
1189 # cg2 adds support for exchanging generaldelta
1181 '02': (cg2packer, cg2unpacker),
1190 '02': (_makecg2packer, cg2unpacker),
1182 # cg3 adds support for exchanging revlog flags and treemanifests
1191 # cg3 adds support for exchanging revlog flags and treemanifests
1183 '03': (cg3packer, cg3unpacker),
1192 '03': (_makecg3packer, cg3unpacker),
1184 }
1193 }
1185
1194
1186 def allsupportedversions(repo):
1195 def allsupportedversions(repo):
1187 versions = set(_packermap.keys())
1196 versions = set(_packermap.keys())
1188 if not (repo.ui.configbool('experimental', 'changegroup3') or
1197 if not (repo.ui.configbool('experimental', 'changegroup3') or
1189 repo.ui.configbool('experimental', 'treemanifest') or
1198 repo.ui.configbool('experimental', 'treemanifest') or
1190 'treemanifest' in repo.requirements):
1199 'treemanifest' in repo.requirements):
1191 versions.discard('03')
1200 versions.discard('03')
1192 return versions
1201 return versions
1193
1202
1194 # Changegroup versions that can be applied to the repo
1203 # Changegroup versions that can be applied to the repo
1195 def supportedincomingversions(repo):
1204 def supportedincomingversions(repo):
1196 return allsupportedversions(repo)
1205 return allsupportedversions(repo)
1197
1206
1198 # Changegroup versions that can be created from the repo
1207 # Changegroup versions that can be created from the repo
1199 def supportedoutgoingversions(repo):
1208 def supportedoutgoingversions(repo):
1200 versions = allsupportedversions(repo)
1209 versions = allsupportedversions(repo)
1201 if 'treemanifest' in repo.requirements:
1210 if 'treemanifest' in repo.requirements:
1202 # Versions 01 and 02 support only flat manifests and it's just too
1211 # Versions 01 and 02 support only flat manifests and it's just too
1203 # expensive to convert between the flat manifest and tree manifest on
1212 # expensive to convert between the flat manifest and tree manifest on
1204 # the fly. Since tree manifests are hashed differently, all of history
1213 # the fly. Since tree manifests are hashed differently, all of history
1205 # would have to be converted. Instead, we simply don't even pretend to
1214 # would have to be converted. Instead, we simply don't even pretend to
1206 # support versions 01 and 02.
1215 # support versions 01 and 02.
1207 versions.discard('01')
1216 versions.discard('01')
1208 versions.discard('02')
1217 versions.discard('02')
1209 if repository.NARROW_REQUIREMENT in repo.requirements:
1218 if repository.NARROW_REQUIREMENT in repo.requirements:
1210 # Versions 01 and 02 don't support revlog flags, and we need to
1219 # Versions 01 and 02 don't support revlog flags, and we need to
1211 # support that for stripping and unbundling to work.
1220 # support that for stripping and unbundling to work.
1212 versions.discard('01')
1221 versions.discard('01')
1213 versions.discard('02')
1222 versions.discard('02')
1214 if LFS_REQUIREMENT in repo.requirements:
1223 if LFS_REQUIREMENT in repo.requirements:
1215 # Versions 01 and 02 don't support revlog flags, and we need to
1224 # Versions 01 and 02 don't support revlog flags, and we need to
1216 # mark LFS entries with REVIDX_EXTSTORED.
1225 # mark LFS entries with REVIDX_EXTSTORED.
1217 versions.discard('01')
1226 versions.discard('01')
1218 versions.discard('02')
1227 versions.discard('02')
1219
1228
1220 return versions
1229 return versions
1221
1230
1222 def localversion(repo):
1231 def localversion(repo):
1223 # Finds the best version to use for bundles that are meant to be used
1232 # Finds the best version to use for bundles that are meant to be used
1224 # locally, such as those from strip and shelve, and temporary bundles.
1233 # locally, such as those from strip and shelve, and temporary bundles.
1225 return max(supportedoutgoingversions(repo))
1234 return max(supportedoutgoingversions(repo))
1226
1235
1227 def safeversion(repo):
1236 def safeversion(repo):
1228 # Finds the smallest version that it's safe to assume clients of the repo
1237 # Finds the smallest version that it's safe to assume clients of the repo
1229 # will support. For example, all hg versions that support generaldelta also
1238 # will support. For example, all hg versions that support generaldelta also
1230 # support changegroup 02.
1239 # support changegroup 02.
1231 versions = supportedoutgoingversions(repo)
1240 versions = supportedoutgoingversions(repo)
1232 if 'generaldelta' in repo.requirements:
1241 if 'generaldelta' in repo.requirements:
1233 versions.discard('01')
1242 versions.discard('01')
1234 assert versions
1243 assert versions
1235 return min(versions)
1244 return min(versions)
1236
1245
1237 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1246 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1238 assert version in supportedoutgoingversions(repo)
1247 assert version in supportedoutgoingversions(repo)
1239
1248
1240 if filematcher is None:
1249 if filematcher is None:
1241 filematcher = matchmod.alwaysmatcher(repo.root, '')
1250 filematcher = matchmod.alwaysmatcher(repo.root, '')
1242
1251
1243 if version == '01' and not filematcher.always():
1252 if version == '01' and not filematcher.always():
1244 raise error.ProgrammingError('version 01 changegroups do not support '
1253 raise error.ProgrammingError('version 01 changegroups do not support '
1245 'sparse file matchers')
1254 'sparse file matchers')
1246
1255
1247 # Requested files could include files not in the local store. So
1256 # Requested files could include files not in the local store. So
1248 # filter those out.
1257 # filter those out.
1249 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1258 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1250 filematcher)
1259 filematcher)
1251
1260
1252 return _packermap[version][0](repo, filematcher=filematcher,
1261 fn = _packermap[version][0]
1253 bundlecaps=bundlecaps)
1262 return fn(repo, filematcher, bundlecaps)
1254
1263
1255 def getunbundler(version, fh, alg, extras=None):
1264 def getunbundler(version, fh, alg, extras=None):
1256 return _packermap[version][1](fh, alg, extras=extras)
1265 return _packermap[version][1](fh, alg, extras=extras)
1257
1266
1258 def _changegroupinfo(repo, nodes, source):
1267 def _changegroupinfo(repo, nodes, source):
1259 if repo.ui.verbose or source == 'bundle':
1268 if repo.ui.verbose or source == 'bundle':
1260 repo.ui.status(_("%d changesets found\n") % len(nodes))
1269 repo.ui.status(_("%d changesets found\n") % len(nodes))
1261 if repo.ui.debugflag:
1270 if repo.ui.debugflag:
1262 repo.ui.debug("list of changesets:\n")
1271 repo.ui.debug("list of changesets:\n")
1263 for node in nodes:
1272 for node in nodes:
1264 repo.ui.debug("%s\n" % hex(node))
1273 repo.ui.debug("%s\n" % hex(node))
1265
1274
1266 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1275 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1267 bundlecaps=None):
1276 bundlecaps=None):
1268 cgstream = makestream(repo, outgoing, version, source,
1277 cgstream = makestream(repo, outgoing, version, source,
1269 fastpath=fastpath, bundlecaps=bundlecaps)
1278 fastpath=fastpath, bundlecaps=bundlecaps)
1270 return getunbundler(version, util.chunkbuffer(cgstream), None,
1279 return getunbundler(version, util.chunkbuffer(cgstream), None,
1271 {'clcount': len(outgoing.missing) })
1280 {'clcount': len(outgoing.missing) })
1272
1281
1273 def makestream(repo, outgoing, version, source, fastpath=False,
1282 def makestream(repo, outgoing, version, source, fastpath=False,
1274 bundlecaps=None, filematcher=None):
1283 bundlecaps=None, filematcher=None):
1275 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1284 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1276 filematcher=filematcher)
1285 filematcher=filematcher)
1277
1286
1278 repo = repo.unfiltered()
1287 repo = repo.unfiltered()
1279 commonrevs = outgoing.common
1288 commonrevs = outgoing.common
1280 csets = outgoing.missing
1289 csets = outgoing.missing
1281 heads = outgoing.missingheads
1290 heads = outgoing.missingheads
1282 # We go through the fast path if we get told to, or if all (unfiltered
1291 # We go through the fast path if we get told to, or if all (unfiltered
1283 # heads have been requested (since we then know there all linkrevs will
1292 # heads have been requested (since we then know there all linkrevs will
1284 # be pulled by the client).
1293 # be pulled by the client).
1285 heads.sort()
1294 heads.sort()
1286 fastpathlinkrev = fastpath or (
1295 fastpathlinkrev = fastpath or (
1287 repo.filtername is None and heads == sorted(repo.heads()))
1296 repo.filtername is None and heads == sorted(repo.heads()))
1288
1297
1289 repo.hook('preoutgoing', throw=True, source=source)
1298 repo.hook('preoutgoing', throw=True, source=source)
1290 _changegroupinfo(repo, csets, source)
1299 _changegroupinfo(repo, csets, source)
1291 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1300 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1292
1301
1293 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1302 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1294 revisions = 0
1303 revisions = 0
1295 files = 0
1304 files = 0
1296 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1305 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1297 total=expectedfiles)
1306 total=expectedfiles)
1298 for chunkdata in iter(source.filelogheader, {}):
1307 for chunkdata in iter(source.filelogheader, {}):
1299 files += 1
1308 files += 1
1300 f = chunkdata["filename"]
1309 f = chunkdata["filename"]
1301 repo.ui.debug("adding %s revisions\n" % f)
1310 repo.ui.debug("adding %s revisions\n" % f)
1302 progress.increment()
1311 progress.increment()
1303 fl = repo.file(f)
1312 fl = repo.file(f)
1304 o = len(fl)
1313 o = len(fl)
1305 try:
1314 try:
1306 deltas = source.deltaiter()
1315 deltas = source.deltaiter()
1307 if not fl.addgroup(deltas, revmap, trp):
1316 if not fl.addgroup(deltas, revmap, trp):
1308 raise error.Abort(_("received file revlog group is empty"))
1317 raise error.Abort(_("received file revlog group is empty"))
1309 except error.CensoredBaseError as e:
1318 except error.CensoredBaseError as e:
1310 raise error.Abort(_("received delta base is censored: %s") % e)
1319 raise error.Abort(_("received delta base is censored: %s") % e)
1311 revisions += len(fl) - o
1320 revisions += len(fl) - o
1312 if f in needfiles:
1321 if f in needfiles:
1313 needs = needfiles[f]
1322 needs = needfiles[f]
1314 for new in pycompat.xrange(o, len(fl)):
1323 for new in pycompat.xrange(o, len(fl)):
1315 n = fl.node(new)
1324 n = fl.node(new)
1316 if n in needs:
1325 if n in needs:
1317 needs.remove(n)
1326 needs.remove(n)
1318 else:
1327 else:
1319 raise error.Abort(
1328 raise error.Abort(
1320 _("received spurious file revlog entry"))
1329 _("received spurious file revlog entry"))
1321 if not needs:
1330 if not needs:
1322 del needfiles[f]
1331 del needfiles[f]
1323 progress.complete()
1332 progress.complete()
1324
1333
1325 for f, needs in needfiles.iteritems():
1334 for f, needs in needfiles.iteritems():
1326 fl = repo.file(f)
1335 fl = repo.file(f)
1327 for n in needs:
1336 for n in needs:
1328 try:
1337 try:
1329 fl.rev(n)
1338 fl.rev(n)
1330 except error.LookupError:
1339 except error.LookupError:
1331 raise error.Abort(
1340 raise error.Abort(
1332 _('missing file data for %s:%s - run hg verify') %
1341 _('missing file data for %s:%s - run hg verify') %
1333 (f, hex(n)))
1342 (f, hex(n)))
1334
1343
1335 return revisions, files
1344 return revisions, files
1336
1345
1337 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1346 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1338 ellipsisroots, visitnodes, depth, source, version):
1347 ellipsisroots, visitnodes, depth, source, version):
1339 if version in ('01', '02'):
1348 if version in ('01', '02'):
1340 raise error.Abort(
1349 raise error.Abort(
1341 'ellipsis nodes require at least cg3 on client and server, '
1350 'ellipsis nodes require at least cg3 on client and server, '
1342 'but negotiated version %s' % version)
1351 'but negotiated version %s' % version)
1343 # We wrap cg1packer.revchunk, using a side channel to pass
1352 # We wrap cg1packer.revchunk, using a side channel to pass
1344 # relevant_nodes into that area. Then if linknode isn't in the
1353 # relevant_nodes into that area. Then if linknode isn't in the
1345 # set, we know we have an ellipsis node and we should defer
1354 # set, we know we have an ellipsis node and we should defer
1346 # sending that node's data. We override close() to detect
1355 # sending that node's data. We override close() to detect
1347 # pending ellipsis nodes and flush them.
1356 # pending ellipsis nodes and flush them.
1348 packer = getbundler(version, repo, filematcher=match)
1357 packer = getbundler(version, repo, filematcher=match)
1349 # Give the packer the list of nodes which should not be
1358 # Give the packer the list of nodes which should not be
1350 # ellipsis nodes. We store this rather than the set of nodes
1359 # ellipsis nodes. We store this rather than the set of nodes
1351 # that should be an ellipsis because for very large histories
1360 # that should be an ellipsis because for very large histories
1352 # we expect this to be significantly smaller.
1361 # we expect this to be significantly smaller.
1353 packer.full_nodes = relevant_nodes
1362 packer.full_nodes = relevant_nodes
1354 # Maps ellipsis revs to their roots at the changelog level.
1363 # Maps ellipsis revs to their roots at the changelog level.
1355 packer.precomputed_ellipsis = ellipsisroots
1364 packer.precomputed_ellipsis = ellipsisroots
1356 # Maps CL revs to per-revlog revisions. Cleared in close() at
1365 # Maps CL revs to per-revlog revisions. Cleared in close() at
1357 # the end of each group.
1366 # the end of each group.
1358 packer.clrev_to_localrev = {}
1367 packer.clrev_to_localrev = {}
1359 packer.next_clrev_to_localrev = {}
1368 packer.next_clrev_to_localrev = {}
1360 # Maps changelog nodes to changelog revs. Filled in once
1369 # Maps changelog nodes to changelog revs. Filled in once
1361 # during changelog stage and then left unmodified.
1370 # during changelog stage and then left unmodified.
1362 packer.clnode_to_rev = {}
1371 packer.clnode_to_rev = {}
1363 packer.changelog_done = False
1372 packer.changelog_done = False
1364 # If true, informs the packer that it is serving shallow content and might
1373 # If true, informs the packer that it is serving shallow content and might
1365 # need to pack file contents not introduced by the changes being packed.
1374 # need to pack file contents not introduced by the changes being packed.
1366 packer.is_shallow = depth is not None
1375 packer.is_shallow = depth is not None
1367
1376
1368 return packer.generate(common, visitnodes, False, source)
1377 return packer.generate(common, visitnodes, False, source)
General Comments 0
You need to be logged in to leave comments. Login now