##// END OF EJS Templates
changegroup: pass function to build delta header into constructor...
Gregory Szorc -
r38933:bd64b8b8 default
parent child Browse files
Show More
@@ -1,1376 +1,1375 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagutil,
27 dagutil,
28 error,
28 error,
29 manifest,
29 manifest,
30 match as matchmod,
30 match as matchmod,
31 mdiff,
31 mdiff,
32 phases,
32 phases,
33 pycompat,
33 pycompat,
34 repository,
34 repository,
35 revlog,
35 revlog,
36 util,
36 util,
37 )
37 )
38
38
39 from .utils import (
39 from .utils import (
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
43 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
44 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
44 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
45 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
45 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
46
46
47 LFS_REQUIREMENT = 'lfs'
47 LFS_REQUIREMENT = 'lfs'
48
48
49 readexactly = util.readexactly
49 readexactly = util.readexactly
50
50
51 def getchunk(stream):
51 def getchunk(stream):
52 """return the next chunk from stream as a string"""
52 """return the next chunk from stream as a string"""
53 d = readexactly(stream, 4)
53 d = readexactly(stream, 4)
54 l = struct.unpack(">l", d)[0]
54 l = struct.unpack(">l", d)[0]
55 if l <= 4:
55 if l <= 4:
56 if l:
56 if l:
57 raise error.Abort(_("invalid chunk length %d") % l)
57 raise error.Abort(_("invalid chunk length %d") % l)
58 return ""
58 return ""
59 return readexactly(stream, l - 4)
59 return readexactly(stream, l - 4)
60
60
61 def chunkheader(length):
61 def chunkheader(length):
62 """return a changegroup chunk header (string)"""
62 """return a changegroup chunk header (string)"""
63 return struct.pack(">l", length + 4)
63 return struct.pack(">l", length + 4)
64
64
65 def closechunk():
65 def closechunk():
66 """return a changegroup chunk header (string) for a zero-length chunk"""
66 """return a changegroup chunk header (string) for a zero-length chunk"""
67 return struct.pack(">l", 0)
67 return struct.pack(">l", 0)
68
68
69 def writechunks(ui, chunks, filename, vfs=None):
69 def writechunks(ui, chunks, filename, vfs=None):
70 """Write chunks to a file and return its filename.
70 """Write chunks to a file and return its filename.
71
71
72 The stream is assumed to be a bundle file.
72 The stream is assumed to be a bundle file.
73 Existing files will not be overwritten.
73 Existing files will not be overwritten.
74 If no filename is specified, a temporary file is created.
74 If no filename is specified, a temporary file is created.
75 """
75 """
76 fh = None
76 fh = None
77 cleanup = None
77 cleanup = None
78 try:
78 try:
79 if filename:
79 if filename:
80 if vfs:
80 if vfs:
81 fh = vfs.open(filename, "wb")
81 fh = vfs.open(filename, "wb")
82 else:
82 else:
83 # Increase default buffer size because default is usually
83 # Increase default buffer size because default is usually
84 # small (4k is common on Linux).
84 # small (4k is common on Linux).
85 fh = open(filename, "wb", 131072)
85 fh = open(filename, "wb", 131072)
86 else:
86 else:
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
88 fh = os.fdopen(fd, r"wb")
88 fh = os.fdopen(fd, r"wb")
89 cleanup = filename
89 cleanup = filename
90 for c in chunks:
90 for c in chunks:
91 fh.write(c)
91 fh.write(c)
92 cleanup = None
92 cleanup = None
93 return filename
93 return filename
94 finally:
94 finally:
95 if fh is not None:
95 if fh is not None:
96 fh.close()
96 fh.close()
97 if cleanup is not None:
97 if cleanup is not None:
98 if filename and vfs:
98 if filename and vfs:
99 vfs.unlink(cleanup)
99 vfs.unlink(cleanup)
100 else:
100 else:
101 os.unlink(cleanup)
101 os.unlink(cleanup)
102
102
103 class cg1unpacker(object):
103 class cg1unpacker(object):
104 """Unpacker for cg1 changegroup streams.
104 """Unpacker for cg1 changegroup streams.
105
105
106 A changegroup unpacker handles the framing of the revision data in
106 A changegroup unpacker handles the framing of the revision data in
107 the wire format. Most consumers will want to use the apply()
107 the wire format. Most consumers will want to use the apply()
108 method to add the changes from the changegroup to a repository.
108 method to add the changes from the changegroup to a repository.
109
109
110 If you're forwarding a changegroup unmodified to another consumer,
110 If you're forwarding a changegroup unmodified to another consumer,
111 use getchunks(), which returns an iterator of changegroup
111 use getchunks(), which returns an iterator of changegroup
112 chunks. This is mostly useful for cases where you need to know the
112 chunks. This is mostly useful for cases where you need to know the
113 data stream has ended by observing the end of the changegroup.
113 data stream has ended by observing the end of the changegroup.
114
114
115 deltachunk() is useful only if you're applying delta data. Most
115 deltachunk() is useful only if you're applying delta data. Most
116 consumers should prefer apply() instead.
116 consumers should prefer apply() instead.
117
117
118 A few other public methods exist. Those are used only for
118 A few other public methods exist. Those are used only for
119 bundlerepo and some debug commands - their use is discouraged.
119 bundlerepo and some debug commands - their use is discouraged.
120 """
120 """
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
122 deltaheadersize = deltaheader.size
122 deltaheadersize = deltaheader.size
123 version = '01'
123 version = '01'
124 _grouplistcount = 1 # One list of files after the manifests
124 _grouplistcount = 1 # One list of files after the manifests
125
125
126 def __init__(self, fh, alg, extras=None):
126 def __init__(self, fh, alg, extras=None):
127 if alg is None:
127 if alg is None:
128 alg = 'UN'
128 alg = 'UN'
129 if alg not in util.compengines.supportedbundletypes:
129 if alg not in util.compengines.supportedbundletypes:
130 raise error.Abort(_('unknown stream compression type: %s')
130 raise error.Abort(_('unknown stream compression type: %s')
131 % alg)
131 % alg)
132 if alg == 'BZ':
132 if alg == 'BZ':
133 alg = '_truncatedBZ'
133 alg = '_truncatedBZ'
134
134
135 compengine = util.compengines.forbundletype(alg)
135 compengine = util.compengines.forbundletype(alg)
136 self._stream = compengine.decompressorreader(fh)
136 self._stream = compengine.decompressorreader(fh)
137 self._type = alg
137 self._type = alg
138 self.extras = extras or {}
138 self.extras = extras or {}
139 self.callback = None
139 self.callback = None
140
140
141 # These methods (compressed, read, seek, tell) all appear to only
141 # These methods (compressed, read, seek, tell) all appear to only
142 # be used by bundlerepo, but it's a little hard to tell.
142 # be used by bundlerepo, but it's a little hard to tell.
143 def compressed(self):
143 def compressed(self):
144 return self._type is not None and self._type != 'UN'
144 return self._type is not None and self._type != 'UN'
145 def read(self, l):
145 def read(self, l):
146 return self._stream.read(l)
146 return self._stream.read(l)
147 def seek(self, pos):
147 def seek(self, pos):
148 return self._stream.seek(pos)
148 return self._stream.seek(pos)
149 def tell(self):
149 def tell(self):
150 return self._stream.tell()
150 return self._stream.tell()
151 def close(self):
151 def close(self):
152 return self._stream.close()
152 return self._stream.close()
153
153
154 def _chunklength(self):
154 def _chunklength(self):
155 d = readexactly(self._stream, 4)
155 d = readexactly(self._stream, 4)
156 l = struct.unpack(">l", d)[0]
156 l = struct.unpack(">l", d)[0]
157 if l <= 4:
157 if l <= 4:
158 if l:
158 if l:
159 raise error.Abort(_("invalid chunk length %d") % l)
159 raise error.Abort(_("invalid chunk length %d") % l)
160 return 0
160 return 0
161 if self.callback:
161 if self.callback:
162 self.callback()
162 self.callback()
163 return l - 4
163 return l - 4
164
164
165 def changelogheader(self):
165 def changelogheader(self):
166 """v10 does not have a changelog header chunk"""
166 """v10 does not have a changelog header chunk"""
167 return {}
167 return {}
168
168
169 def manifestheader(self):
169 def manifestheader(self):
170 """v10 does not have a manifest header chunk"""
170 """v10 does not have a manifest header chunk"""
171 return {}
171 return {}
172
172
173 def filelogheader(self):
173 def filelogheader(self):
174 """return the header of the filelogs chunk, v10 only has the filename"""
174 """return the header of the filelogs chunk, v10 only has the filename"""
175 l = self._chunklength()
175 l = self._chunklength()
176 if not l:
176 if not l:
177 return {}
177 return {}
178 fname = readexactly(self._stream, l)
178 fname = readexactly(self._stream, l)
179 return {'filename': fname}
179 return {'filename': fname}
180
180
181 def _deltaheader(self, headertuple, prevnode):
181 def _deltaheader(self, headertuple, prevnode):
182 node, p1, p2, cs = headertuple
182 node, p1, p2, cs = headertuple
183 if prevnode is None:
183 if prevnode is None:
184 deltabase = p1
184 deltabase = p1
185 else:
185 else:
186 deltabase = prevnode
186 deltabase = prevnode
187 flags = 0
187 flags = 0
188 return node, p1, p2, deltabase, cs, flags
188 return node, p1, p2, deltabase, cs, flags
189
189
190 def deltachunk(self, prevnode):
190 def deltachunk(self, prevnode):
191 l = self._chunklength()
191 l = self._chunklength()
192 if not l:
192 if not l:
193 return {}
193 return {}
194 headerdata = readexactly(self._stream, self.deltaheadersize)
194 headerdata = readexactly(self._stream, self.deltaheadersize)
195 header = self.deltaheader.unpack(headerdata)
195 header = self.deltaheader.unpack(headerdata)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
198 return (node, p1, p2, cs, deltabase, delta, flags)
198 return (node, p1, p2, cs, deltabase, delta, flags)
199
199
200 def getchunks(self):
200 def getchunks(self):
201 """returns all the chunks contains in the bundle
201 """returns all the chunks contains in the bundle
202
202
203 Used when you need to forward the binary stream to a file or another
203 Used when you need to forward the binary stream to a file or another
204 network API. To do so, it parse the changegroup data, otherwise it will
204 network API. To do so, it parse the changegroup data, otherwise it will
205 block in case of sshrepo because it don't know the end of the stream.
205 block in case of sshrepo because it don't know the end of the stream.
206 """
206 """
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
210 # filelogs.
210 # filelogs.
211 #
211 #
212 # Changelog and manifestlog parts are terminated with empty chunks. The
212 # Changelog and manifestlog parts are terminated with empty chunks. The
213 # tree and file parts are a list of entry sections. Each entry section
213 # tree and file parts are a list of entry sections. Each entry section
214 # is a series of chunks terminating in an empty chunk. The list of these
214 # is a series of chunks terminating in an empty chunk. The list of these
215 # entry sections is terminated in yet another empty chunk, so we know
215 # entry sections is terminated in yet another empty chunk, so we know
216 # we've reached the end of the tree/file list when we reach an empty
216 # we've reached the end of the tree/file list when we reach an empty
217 # chunk that was proceeded by no non-empty chunks.
217 # chunk that was proceeded by no non-empty chunks.
218
218
219 parts = 0
219 parts = 0
220 while parts < 2 + self._grouplistcount:
220 while parts < 2 + self._grouplistcount:
221 noentries = True
221 noentries = True
222 while True:
222 while True:
223 chunk = getchunk(self)
223 chunk = getchunk(self)
224 if not chunk:
224 if not chunk:
225 # The first two empty chunks represent the end of the
225 # The first two empty chunks represent the end of the
226 # changelog and the manifestlog portions. The remaining
226 # changelog and the manifestlog portions. The remaining
227 # empty chunks represent either A) the end of individual
227 # empty chunks represent either A) the end of individual
228 # tree or file entries in the file list, or B) the end of
228 # tree or file entries in the file list, or B) the end of
229 # the entire list. It's the end of the entire list if there
229 # the entire list. It's the end of the entire list if there
230 # were no entries (i.e. noentries is True).
230 # were no entries (i.e. noentries is True).
231 if parts < 2:
231 if parts < 2:
232 parts += 1
232 parts += 1
233 elif noentries:
233 elif noentries:
234 parts += 1
234 parts += 1
235 break
235 break
236 noentries = False
236 noentries = False
237 yield chunkheader(len(chunk))
237 yield chunkheader(len(chunk))
238 pos = 0
238 pos = 0
239 while pos < len(chunk):
239 while pos < len(chunk):
240 next = pos + 2**20
240 next = pos + 2**20
241 yield chunk[pos:next]
241 yield chunk[pos:next]
242 pos = next
242 pos = next
243 yield closechunk()
243 yield closechunk()
244
244
245 def _unpackmanifests(self, repo, revmap, trp, prog):
245 def _unpackmanifests(self, repo, revmap, trp, prog):
246 self.callback = prog.increment
246 self.callback = prog.increment
247 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
250 # be empty during the pull
250 # be empty during the pull
251 self.manifestheader()
251 self.manifestheader()
252 deltas = self.deltaiter()
252 deltas = self.deltaiter()
253 repo.manifestlog.addgroup(deltas, revmap, trp)
253 repo.manifestlog.addgroup(deltas, revmap, trp)
254 prog.complete()
254 prog.complete()
255 self.callback = None
255 self.callback = None
256
256
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
258 expectedtotal=None):
258 expectedtotal=None):
259 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
261 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
262
262
263 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
264 - nothing changed or no source: 0
264 - nothing changed or no source: 0
265 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
266 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
267 - number of heads stays the same: 1
267 - number of heads stays the same: 1
268 """
268 """
269 repo = repo.unfiltered()
269 repo = repo.unfiltered()
270 def csmap(x):
270 def csmap(x):
271 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
272 return len(cl)
272 return len(cl)
273
273
274 def revmap(x):
274 def revmap(x):
275 return cl.rev(x)
275 return cl.rev(x)
276
276
277 changesets = files = revisions = 0
277 changesets = files = revisions = 0
278
278
279 try:
279 try:
280 # The transaction may already carry source information. In this
280 # The transaction may already carry source information. In this
281 # case we use the top level data. We overwrite the argument
281 # case we use the top level data. We overwrite the argument
282 # because we need to use the top level value (if they exist)
282 # because we need to use the top level value (if they exist)
283 # in this function.
283 # in this function.
284 srctype = tr.hookargs.setdefault('source', srctype)
284 srctype = tr.hookargs.setdefault('source', srctype)
285 url = tr.hookargs.setdefault('url', url)
285 url = tr.hookargs.setdefault('url', url)
286 repo.hook('prechangegroup',
286 repo.hook('prechangegroup',
287 throw=True, **pycompat.strkwargs(tr.hookargs))
287 throw=True, **pycompat.strkwargs(tr.hookargs))
288
288
289 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
290 # will not see an inconsistent view
290 # will not see an inconsistent view
291 cl = repo.changelog
291 cl = repo.changelog
292 cl.delayupdate(tr)
292 cl.delayupdate(tr)
293 oldheads = set(cl.heads())
293 oldheads = set(cl.heads())
294
294
295 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
296 # pull off the changeset group
296 # pull off the changeset group
297 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
298 clstart = len(cl)
298 clstart = len(cl)
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
300 total=expectedtotal)
300 total=expectedtotal)
301 self.callback = progress.increment
301 self.callback = progress.increment
302
302
303 efiles = set()
303 efiles = set()
304 def onchangelog(cl, node):
304 def onchangelog(cl, node):
305 efiles.update(cl.readfiles(node))
305 efiles.update(cl.readfiles(node))
306
306
307 self.changelogheader()
307 self.changelogheader()
308 deltas = self.deltaiter()
308 deltas = self.deltaiter()
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
310 efiles = len(efiles)
310 efiles = len(efiles)
311
311
312 if not cgnodes:
312 if not cgnodes:
313 repo.ui.develwarn('applied empty changegroup',
313 repo.ui.develwarn('applied empty changegroup',
314 config='warn-empty-changegroup')
314 config='warn-empty-changegroup')
315 clend = len(cl)
315 clend = len(cl)
316 changesets = clend - clstart
316 changesets = clend - clstart
317 progress.complete()
317 progress.complete()
318 self.callback = None
318 self.callback = None
319
319
320 # pull off the manifest group
320 # pull off the manifest group
321 repo.ui.status(_("adding manifests\n"))
321 repo.ui.status(_("adding manifests\n"))
322 # We know that we'll never have more manifests than we had
322 # We know that we'll never have more manifests than we had
323 # changesets.
323 # changesets.
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
325 total=changesets)
325 total=changesets)
326 self._unpackmanifests(repo, revmap, trp, progress)
326 self._unpackmanifests(repo, revmap, trp, progress)
327
327
328 needfiles = {}
328 needfiles = {}
329 if repo.ui.configbool('server', 'validate'):
329 if repo.ui.configbool('server', 'validate'):
330 cl = repo.changelog
330 cl = repo.changelog
331 ml = repo.manifestlog
331 ml = repo.manifestlog
332 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
333 for cset in pycompat.xrange(clstart, clend):
333 for cset in pycompat.xrange(clstart, clend):
334 mfnode = cl.changelogrevision(cset).manifest
334 mfnode = cl.changelogrevision(cset).manifest
335 mfest = ml[mfnode].readdelta()
335 mfest = ml[mfnode].readdelta()
336 # store file cgnodes we must see
336 # store file cgnodes we must see
337 for f, n in mfest.iteritems():
337 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
338 needfiles.setdefault(f, set()).add(n)
339
339
340 # process the files
340 # process the files
341 repo.ui.status(_("adding file changes\n"))
341 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
342 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
343 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
344 revisions += newrevs
345 files += newfiles
345 files += newfiles
346
346
347 deltaheads = 0
347 deltaheads = 0
348 if oldheads:
348 if oldheads:
349 heads = cl.heads()
349 heads = cl.heads()
350 deltaheads = len(heads) - len(oldheads)
350 deltaheads = len(heads) - len(oldheads)
351 for h in heads:
351 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
352 if h not in oldheads and repo[h].closesbranch():
353 deltaheads -= 1
353 deltaheads -= 1
354 htext = ""
354 htext = ""
355 if deltaheads:
355 if deltaheads:
356 htext = _(" (%+d heads)") % deltaheads
356 htext = _(" (%+d heads)") % deltaheads
357
357
358 repo.ui.status(_("added %d changesets"
358 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
359 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
360 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
361 repo.invalidatevolatilesets()
362
362
363 if changesets > 0:
363 if changesets > 0:
364 if 'node' not in tr.hookargs:
364 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
365 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 else:
368 else:
369 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
370 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup',
372 repo.hook('pretxnchangegroup',
373 throw=True, **pycompat.strkwargs(hookargs))
373 throw=True, **pycompat.strkwargs(hookargs))
374
374
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
376 phaseall = None
376 phaseall = None
377 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
380 # exists locally as secret
380 # exists locally as secret
381 #
381 #
382 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
383 # the bundle
383 # the bundle
384 if repo.publishing():
384 if repo.publishing():
385 targetphase = phaseall = phases.public
385 targetphase = phaseall = phases.public
386 else:
386 else:
387 # closer target phase computation
387 # closer target phase computation
388
388
389 # Those changesets have been pushed from the
389 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
390 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
391 # alongside. Therefor `targetphase` is
392 # ignored.
392 # ignored.
393 targetphase = phaseall = phases.draft
393 targetphase = phaseall = phases.draft
394 if added:
394 if added:
395 phases.registernew(repo, tr, targetphase, added)
395 phases.registernew(repo, tr, targetphase, added)
396 if phaseall is not None:
396 if phaseall is not None:
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
398
398
399 if changesets > 0:
399 if changesets > 0:
400
400
401 def runhooks():
401 def runhooks():
402 # These hooks run when the lock releases, not when the
402 # These hooks run when the lock releases, not when the
403 # transaction closes. So it's possible for the changelog
403 # transaction closes. So it's possible for the changelog
404 # to have changed since we last saw it.
404 # to have changed since we last saw it.
405 if clstart >= len(repo):
405 if clstart >= len(repo):
406 return
406 return
407
407
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
409
409
410 for n in added:
410 for n in added:
411 args = hookargs.copy()
411 args = hookargs.copy()
412 args['node'] = hex(n)
412 args['node'] = hex(n)
413 del args['node_last']
413 del args['node_last']
414 repo.hook("incoming", **pycompat.strkwargs(args))
414 repo.hook("incoming", **pycompat.strkwargs(args))
415
415
416 newheads = [h for h in repo.heads()
416 newheads = [h for h in repo.heads()
417 if h not in oldheads]
417 if h not in oldheads]
418 repo.ui.log("incoming",
418 repo.ui.log("incoming",
419 "%d incoming changes - new heads: %s\n",
419 "%d incoming changes - new heads: %s\n",
420 len(added),
420 len(added),
421 ', '.join([hex(c[:6]) for c in newheads]))
421 ', '.join([hex(c[:6]) for c in newheads]))
422
422
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
424 lambda tr: repo._afterlock(runhooks))
424 lambda tr: repo._afterlock(runhooks))
425 finally:
425 finally:
426 repo.ui.flush()
426 repo.ui.flush()
427 # never return 0 here:
427 # never return 0 here:
428 if deltaheads < 0:
428 if deltaheads < 0:
429 ret = deltaheads - 1
429 ret = deltaheads - 1
430 else:
430 else:
431 ret = deltaheads + 1
431 ret = deltaheads + 1
432 return ret
432 return ret
433
433
434 def deltaiter(self):
434 def deltaiter(self):
435 """
435 """
436 returns an iterator of the deltas in this changegroup
436 returns an iterator of the deltas in this changegroup
437
437
438 Useful for passing to the underlying storage system to be stored.
438 Useful for passing to the underlying storage system to be stored.
439 """
439 """
440 chain = None
440 chain = None
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
443 yield chunkdata
443 yield chunkdata
444 chain = chunkdata[0]
444 chain = chunkdata[0]
445
445
446 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
448
448
449 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
451 remain the same.
451 remain the same.
452 """
452 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = deltaheader.size
454 deltaheadersize = deltaheader.size
455 version = '02'
455 version = '02'
456
456
457 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
459 flags = 0
460 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
461
461
462 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
464
464
465 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
467 separating manifests and files.
468 """
468 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = deltaheader.size
470 deltaheadersize = deltaheader.size
471 version = '03'
471 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
473
473
474 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
477
477
478 def _unpackmanifests(self, repo, revmap, trp, prog):
478 def _unpackmanifests(self, repo, revmap, trp, prog):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
480 for chunkdata in iter(self.filelogheader, {}):
480 for chunkdata in iter(self.filelogheader, {}):
481 # If we get here, there are directory manifests in the changegroup
481 # If we get here, there are directory manifests in the changegroup
482 d = chunkdata["filename"]
482 d = chunkdata["filename"]
483 repo.ui.debug("adding %s revisions\n" % d)
483 repo.ui.debug("adding %s revisions\n" % d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
485 deltas = self.deltaiter()
485 deltas = self.deltaiter()
486 if not dirlog.addgroup(deltas, revmap, trp):
486 if not dirlog.addgroup(deltas, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 @attr.s(slots=True, frozen=True)
501 @attr.s(slots=True, frozen=True)
502 class revisiondelta(object):
502 class revisiondelta(object):
503 """Describes a delta entry in a changegroup.
503 """Describes a delta entry in a changegroup.
504
504
505 Captured data is sufficient to serialize the delta into multiple
505 Captured data is sufficient to serialize the delta into multiple
506 formats.
506 formats.
507 """
507 """
508 # 20 byte node of this revision.
508 # 20 byte node of this revision.
509 node = attr.ib()
509 node = attr.ib()
510 # 20 byte nodes of parent revisions.
510 # 20 byte nodes of parent revisions.
511 p1node = attr.ib()
511 p1node = attr.ib()
512 p2node = attr.ib()
512 p2node = attr.ib()
513 # 20 byte node of node this delta is against.
513 # 20 byte node of node this delta is against.
514 basenode = attr.ib()
514 basenode = attr.ib()
515 # 20 byte node of changeset revision this delta is associated with.
515 # 20 byte node of changeset revision this delta is associated with.
516 linknode = attr.ib()
516 linknode = attr.ib()
517 # 2 bytes of flags to apply to revision data.
517 # 2 bytes of flags to apply to revision data.
518 flags = attr.ib()
518 flags = attr.ib()
519 # Iterable of chunks holding raw delta data.
519 # Iterable of chunks holding raw delta data.
520 deltachunks = attr.ib()
520 deltachunks = attr.ib()
521
521
522 class cg1packer(object):
522 class cg1packer(object):
523 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
523 def __init__(self, repo, filematcher, version, builddeltaheader,
524
524 bundlecaps=None):
525 def __init__(self, repo, filematcher, version, bundlecaps=None):
526 """Given a source repo, construct a bundler.
525 """Given a source repo, construct a bundler.
527
526
528 filematcher is a matcher that matches on files to include in the
527 filematcher is a matcher that matches on files to include in the
529 changegroup. Used to facilitate sparse changegroups.
528 changegroup. Used to facilitate sparse changegroups.
530
529
530 builddeltaheader is a callable that constructs the header for a group
531 delta.
532
531 bundlecaps is optional and can be used to specify the set of
533 bundlecaps is optional and can be used to specify the set of
532 capabilities which can be used to build the bundle. While bundlecaps is
534 capabilities which can be used to build the bundle. While bundlecaps is
533 unused in core Mercurial, extensions rely on this feature to communicate
535 unused in core Mercurial, extensions rely on this feature to communicate
534 capabilities to customize the changegroup packer.
536 capabilities to customize the changegroup packer.
535 """
537 """
536 assert filematcher
538 assert filematcher
537 self._filematcher = filematcher
539 self._filematcher = filematcher
538
540
539 self.version = version
541 self.version = version
542 self._builddeltaheader = builddeltaheader
540
543
541 # Set of capabilities we can use to build the bundle.
544 # Set of capabilities we can use to build the bundle.
542 if bundlecaps is None:
545 if bundlecaps is None:
543 bundlecaps = set()
546 bundlecaps = set()
544 self._bundlecaps = bundlecaps
547 self._bundlecaps = bundlecaps
545 # experimental config: bundle.reorder
548 # experimental config: bundle.reorder
546 reorder = repo.ui.config('bundle', 'reorder')
549 reorder = repo.ui.config('bundle', 'reorder')
547 if reorder == 'auto':
550 if reorder == 'auto':
548 reorder = None
551 reorder = None
549 else:
552 else:
550 reorder = stringutil.parsebool(reorder)
553 reorder = stringutil.parsebool(reorder)
551 self._repo = repo
554 self._repo = repo
552 self._reorder = reorder
555 self._reorder = reorder
553 if self._repo.ui.verbose and not self._repo.ui.debugflag:
556 if self._repo.ui.verbose and not self._repo.ui.debugflag:
554 self._verbosenote = self._repo.ui.note
557 self._verbosenote = self._repo.ui.note
555 else:
558 else:
556 self._verbosenote = lambda s: None
559 self._verbosenote = lambda s: None
557
560
558 def close(self):
561 def close(self):
559 # Ellipses serving mode.
562 # Ellipses serving mode.
560 getattr(self, 'clrev_to_localrev', {}).clear()
563 getattr(self, 'clrev_to_localrev', {}).clear()
561 if getattr(self, 'next_clrev_to_localrev', {}):
564 if getattr(self, 'next_clrev_to_localrev', {}):
562 self.clrev_to_localrev = self.next_clrev_to_localrev
565 self.clrev_to_localrev = self.next_clrev_to_localrev
563 del self.next_clrev_to_localrev
566 del self.next_clrev_to_localrev
564 self.changelog_done = True
567 self.changelog_done = True
565
568
566 return closechunk()
569 return closechunk()
567
570
568 def fileheader(self, fname):
571 def fileheader(self, fname):
569 return chunkheader(len(fname)) + fname
572 return chunkheader(len(fname)) + fname
570
573
571 # Extracted both for clarity and for overriding in extensions.
574 # Extracted both for clarity and for overriding in extensions.
572 def _sortgroup(self, store, nodelist, lookup):
575 def _sortgroup(self, store, nodelist, lookup):
573 """Sort nodes for change group and turn them into revnums."""
576 """Sort nodes for change group and turn them into revnums."""
574 # Ellipses serving mode.
577 # Ellipses serving mode.
575 #
578 #
576 # In a perfect world, we'd generate better ellipsis-ified graphs
579 # In a perfect world, we'd generate better ellipsis-ified graphs
577 # for non-changelog revlogs. In practice, we haven't started doing
580 # for non-changelog revlogs. In practice, we haven't started doing
578 # that yet, so the resulting DAGs for the manifestlog and filelogs
581 # that yet, so the resulting DAGs for the manifestlog and filelogs
579 # are actually full of bogus parentage on all the ellipsis
582 # are actually full of bogus parentage on all the ellipsis
580 # nodes. This has the side effect that, while the contents are
583 # nodes. This has the side effect that, while the contents are
581 # correct, the individual DAGs might be completely out of whack in
584 # correct, the individual DAGs might be completely out of whack in
582 # a case like 882681bc3166 and its ancestors (back about 10
585 # a case like 882681bc3166 and its ancestors (back about 10
583 # revisions or so) in the main hg repo.
586 # revisions or so) in the main hg repo.
584 #
587 #
585 # The one invariant we *know* holds is that the new (potentially
588 # The one invariant we *know* holds is that the new (potentially
586 # bogus) DAG shape will be valid if we order the nodes in the
589 # bogus) DAG shape will be valid if we order the nodes in the
587 # order that they're introduced in dramatis personae by the
590 # order that they're introduced in dramatis personae by the
588 # changelog, so what we do is we sort the non-changelog histories
591 # changelog, so what we do is we sort the non-changelog histories
589 # by the order in which they are used by the changelog.
592 # by the order in which they are used by the changelog.
590 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
593 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
591 key = lambda n: self.clnode_to_rev[lookup(n)]
594 key = lambda n: self.clnode_to_rev[lookup(n)]
592 return [store.rev(n) for n in sorted(nodelist, key=key)]
595 return [store.rev(n) for n in sorted(nodelist, key=key)]
593
596
594 # for generaldelta revlogs, we linearize the revs; this will both be
597 # for generaldelta revlogs, we linearize the revs; this will both be
595 # much quicker and generate a much smaller bundle
598 # much quicker and generate a much smaller bundle
596 if (store._generaldelta and self._reorder is None) or self._reorder:
599 if (store._generaldelta and self._reorder is None) or self._reorder:
597 dag = dagutil.revlogdag(store)
600 dag = dagutil.revlogdag(store)
598 return dag.linearize(set(store.rev(n) for n in nodelist))
601 return dag.linearize(set(store.rev(n) for n in nodelist))
599 else:
602 else:
600 return sorted([store.rev(n) for n in nodelist])
603 return sorted([store.rev(n) for n in nodelist])
601
604
602 def group(self, nodelist, store, lookup, units=None):
605 def group(self, nodelist, store, lookup, units=None):
603 """Calculate a delta group, yielding a sequence of changegroup chunks
606 """Calculate a delta group, yielding a sequence of changegroup chunks
604 (strings).
607 (strings).
605
608
606 Given a list of changeset revs, return a set of deltas and
609 Given a list of changeset revs, return a set of deltas and
607 metadata corresponding to nodes. The first delta is
610 metadata corresponding to nodes. The first delta is
608 first parent(nodelist[0]) -> nodelist[0], the receiver is
611 first parent(nodelist[0]) -> nodelist[0], the receiver is
609 guaranteed to have this parent as it has all history before
612 guaranteed to have this parent as it has all history before
610 these changesets. In the case firstparent is nullrev the
613 these changesets. In the case firstparent is nullrev the
611 changegroup starts with a full revision.
614 changegroup starts with a full revision.
612
615
613 If units is not None, progress detail will be generated, units specifies
616 If units is not None, progress detail will be generated, units specifies
614 the type of revlog that is touched (changelog, manifest, etc.).
617 the type of revlog that is touched (changelog, manifest, etc.).
615 """
618 """
616 # if we don't have any revisions touched by these changesets, bail
619 # if we don't have any revisions touched by these changesets, bail
617 if len(nodelist) == 0:
620 if len(nodelist) == 0:
618 yield self.close()
621 yield self.close()
619 return
622 return
620
623
621 revs = self._sortgroup(store, nodelist, lookup)
624 revs = self._sortgroup(store, nodelist, lookup)
622
625
623 # add the parent of the first rev
626 # add the parent of the first rev
624 p = store.parentrevs(revs[0])[0]
627 p = store.parentrevs(revs[0])[0]
625 revs.insert(0, p)
628 revs.insert(0, p)
626
629
627 # build deltas
630 # build deltas
628 progress = None
631 progress = None
629 if units is not None:
632 if units is not None:
630 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
633 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
631 total=(len(revs) - 1))
634 total=(len(revs) - 1))
632 for r in pycompat.xrange(len(revs) - 1):
635 for r in pycompat.xrange(len(revs) - 1):
633 if progress:
636 if progress:
634 progress.update(r + 1)
637 progress.update(r + 1)
635 prev, curr = revs[r], revs[r + 1]
638 prev, curr = revs[r], revs[r + 1]
636 linknode = lookup(store.node(curr))
639 linknode = lookup(store.node(curr))
637 for c in self.revchunk(store, curr, prev, linknode):
640 for c in self.revchunk(store, curr, prev, linknode):
638 yield c
641 yield c
639
642
640 if progress:
643 if progress:
641 progress.complete()
644 progress.complete()
642 yield self.close()
645 yield self.close()
643
646
644 # filter any nodes that claim to be part of the known set
647 # filter any nodes that claim to be part of the known set
645 def prune(self, store, missing, commonrevs):
648 def prune(self, store, missing, commonrevs):
646 # TODO this violates storage abstraction for manifests.
649 # TODO this violates storage abstraction for manifests.
647 if isinstance(store, manifest.manifestrevlog):
650 if isinstance(store, manifest.manifestrevlog):
648 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
651 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
649 return []
652 return []
650
653
651 rr, rl = store.rev, store.linkrev
654 rr, rl = store.rev, store.linkrev
652 return [n for n in missing if rl(rr(n)) not in commonrevs]
655 return [n for n in missing if rl(rr(n)) not in commonrevs]
653
656
654 def _packmanifests(self, dir, mfnodes, lookuplinknode):
657 def _packmanifests(self, dir, mfnodes, lookuplinknode):
655 """Pack flat manifests into a changegroup stream."""
658 """Pack flat manifests into a changegroup stream."""
656 assert not dir
659 assert not dir
657 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
660 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
658 lookuplinknode, units=_('manifests')):
661 lookuplinknode, units=_('manifests')):
659 yield chunk
662 yield chunk
660
663
661 def _manifestsdone(self):
664 def _manifestsdone(self):
662 return ''
665 return ''
663
666
664 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
667 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
665 '''yield a sequence of changegroup chunks (strings)'''
668 '''yield a sequence of changegroup chunks (strings)'''
666 repo = self._repo
669 repo = self._repo
667 cl = repo.changelog
670 cl = repo.changelog
668
671
669 clrevorder = {}
672 clrevorder = {}
670 mfs = {} # needed manifests
673 mfs = {} # needed manifests
671 fnodes = {} # needed file nodes
674 fnodes = {} # needed file nodes
672 mfl = repo.manifestlog
675 mfl = repo.manifestlog
673 # TODO violates storage abstraction.
676 # TODO violates storage abstraction.
674 mfrevlog = mfl._revlog
677 mfrevlog = mfl._revlog
675 changedfiles = set()
678 changedfiles = set()
676
679
677 ellipsesmode = util.safehasattr(self, 'full_nodes')
680 ellipsesmode = util.safehasattr(self, 'full_nodes')
678
681
679 # Callback for the changelog, used to collect changed files and
682 # Callback for the changelog, used to collect changed files and
680 # manifest nodes.
683 # manifest nodes.
681 # Returns the linkrev node (identity in the changelog case).
684 # Returns the linkrev node (identity in the changelog case).
682 def lookupcl(x):
685 def lookupcl(x):
683 c = cl.read(x)
686 c = cl.read(x)
684 clrevorder[x] = len(clrevorder)
687 clrevorder[x] = len(clrevorder)
685
688
686 if ellipsesmode:
689 if ellipsesmode:
687 # Only update mfs if x is going to be sent. Otherwise we
690 # Only update mfs if x is going to be sent. Otherwise we
688 # end up with bogus linkrevs specified for manifests and
691 # end up with bogus linkrevs specified for manifests and
689 # we skip some manifest nodes that we should otherwise
692 # we skip some manifest nodes that we should otherwise
690 # have sent.
693 # have sent.
691 if (x in self.full_nodes
694 if (x in self.full_nodes
692 or cl.rev(x) in self.precomputed_ellipsis):
695 or cl.rev(x) in self.precomputed_ellipsis):
693 n = c[0]
696 n = c[0]
694 # Record the first changeset introducing this manifest
697 # Record the first changeset introducing this manifest
695 # version.
698 # version.
696 mfs.setdefault(n, x)
699 mfs.setdefault(n, x)
697 # Set this narrow-specific dict so we have the lowest
700 # Set this narrow-specific dict so we have the lowest
698 # manifest revnum to look up for this cl revnum. (Part of
701 # manifest revnum to look up for this cl revnum. (Part of
699 # mapping changelog ellipsis parents to manifest ellipsis
702 # mapping changelog ellipsis parents to manifest ellipsis
700 # parents)
703 # parents)
701 self.next_clrev_to_localrev.setdefault(cl.rev(x),
704 self.next_clrev_to_localrev.setdefault(cl.rev(x),
702 mfrevlog.rev(n))
705 mfrevlog.rev(n))
703 # We can't trust the changed files list in the changeset if the
706 # We can't trust the changed files list in the changeset if the
704 # client requested a shallow clone.
707 # client requested a shallow clone.
705 if self.is_shallow:
708 if self.is_shallow:
706 changedfiles.update(mfl[c[0]].read().keys())
709 changedfiles.update(mfl[c[0]].read().keys())
707 else:
710 else:
708 changedfiles.update(c[3])
711 changedfiles.update(c[3])
709 else:
712 else:
710
713
711 n = c[0]
714 n = c[0]
712 # record the first changeset introducing this manifest version
715 # record the first changeset introducing this manifest version
713 mfs.setdefault(n, x)
716 mfs.setdefault(n, x)
714 # Record a complete list of potentially-changed files in
717 # Record a complete list of potentially-changed files in
715 # this manifest.
718 # this manifest.
716 changedfiles.update(c[3])
719 changedfiles.update(c[3])
717
720
718 return x
721 return x
719
722
720 self._verbosenote(_('uncompressed size of bundle content:\n'))
723 self._verbosenote(_('uncompressed size of bundle content:\n'))
721 size = 0
724 size = 0
722 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
725 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
723 size += len(chunk)
726 size += len(chunk)
724 yield chunk
727 yield chunk
725 self._verbosenote(_('%8.i (changelog)\n') % size)
728 self._verbosenote(_('%8.i (changelog)\n') % size)
726
729
727 # We need to make sure that the linkrev in the changegroup refers to
730 # We need to make sure that the linkrev in the changegroup refers to
728 # the first changeset that introduced the manifest or file revision.
731 # the first changeset that introduced the manifest or file revision.
729 # The fastpath is usually safer than the slowpath, because the filelogs
732 # The fastpath is usually safer than the slowpath, because the filelogs
730 # are walked in revlog order.
733 # are walked in revlog order.
731 #
734 #
732 # When taking the slowpath with reorder=None and the manifest revlog
735 # When taking the slowpath with reorder=None and the manifest revlog
733 # uses generaldelta, the manifest may be walked in the "wrong" order.
736 # uses generaldelta, the manifest may be walked in the "wrong" order.
734 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
737 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
735 # cc0ff93d0c0c).
738 # cc0ff93d0c0c).
736 #
739 #
737 # When taking the fastpath, we are only vulnerable to reordering
740 # When taking the fastpath, we are only vulnerable to reordering
738 # of the changelog itself. The changelog never uses generaldelta, so
741 # of the changelog itself. The changelog never uses generaldelta, so
739 # it is only reordered when reorder=True. To handle this case, we
742 # it is only reordered when reorder=True. To handle this case, we
740 # simply take the slowpath, which already has the 'clrevorder' logic.
743 # simply take the slowpath, which already has the 'clrevorder' logic.
741 # This was also fixed in cc0ff93d0c0c.
744 # This was also fixed in cc0ff93d0c0c.
742 fastpathlinkrev = fastpathlinkrev and not self._reorder
745 fastpathlinkrev = fastpathlinkrev and not self._reorder
743 # Treemanifests don't work correctly with fastpathlinkrev
746 # Treemanifests don't work correctly with fastpathlinkrev
744 # either, because we don't discover which directory nodes to
747 # either, because we don't discover which directory nodes to
745 # send along with files. This could probably be fixed.
748 # send along with files. This could probably be fixed.
746 fastpathlinkrev = fastpathlinkrev and (
749 fastpathlinkrev = fastpathlinkrev and (
747 'treemanifest' not in repo.requirements)
750 'treemanifest' not in repo.requirements)
748
751
749 for chunk in self.generatemanifests(commonrevs, clrevorder,
752 for chunk in self.generatemanifests(commonrevs, clrevorder,
750 fastpathlinkrev, mfs, fnodes, source):
753 fastpathlinkrev, mfs, fnodes, source):
751 yield chunk
754 yield chunk
752
755
753 if ellipsesmode:
756 if ellipsesmode:
754 mfdicts = None
757 mfdicts = None
755 if self.is_shallow:
758 if self.is_shallow:
756 mfdicts = [(self._repo.manifestlog[n].read(), lr)
759 mfdicts = [(self._repo.manifestlog[n].read(), lr)
757 for (n, lr) in mfs.iteritems()]
760 for (n, lr) in mfs.iteritems()]
758
761
759 mfs.clear()
762 mfs.clear()
760 clrevs = set(cl.rev(x) for x in clnodes)
763 clrevs = set(cl.rev(x) for x in clnodes)
761
764
762 if not fastpathlinkrev:
765 if not fastpathlinkrev:
763 def linknodes(unused, fname):
766 def linknodes(unused, fname):
764 return fnodes.get(fname, {})
767 return fnodes.get(fname, {})
765 else:
768 else:
766 cln = cl.node
769 cln = cl.node
767 def linknodes(filerevlog, fname):
770 def linknodes(filerevlog, fname):
768 llr = filerevlog.linkrev
771 llr = filerevlog.linkrev
769 fln = filerevlog.node
772 fln = filerevlog.node
770 revs = ((r, llr(r)) for r in filerevlog)
773 revs = ((r, llr(r)) for r in filerevlog)
771 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
774 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
772
775
773 if ellipsesmode:
776 if ellipsesmode:
774 # We need to pass the mfdicts variable down into
777 # We need to pass the mfdicts variable down into
775 # generatefiles(), but more than one command might have
778 # generatefiles(), but more than one command might have
776 # wrapped generatefiles so we can't modify the function
779 # wrapped generatefiles so we can't modify the function
777 # signature. Instead, we pass the data to ourselves using an
780 # signature. Instead, we pass the data to ourselves using an
778 # instance attribute. I'm sorry.
781 # instance attribute. I'm sorry.
779 self._mfdicts = mfdicts
782 self._mfdicts = mfdicts
780
783
781 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
784 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
782 source):
785 source):
783 yield chunk
786 yield chunk
784
787
785 yield self.close()
788 yield self.close()
786
789
787 if clnodes:
790 if clnodes:
788 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
791 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
789
792
790 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
793 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
791 fnodes, source):
794 fnodes, source):
792 """Returns an iterator of changegroup chunks containing manifests.
795 """Returns an iterator of changegroup chunks containing manifests.
793
796
794 `source` is unused here, but is used by extensions like remotefilelog to
797 `source` is unused here, but is used by extensions like remotefilelog to
795 change what is sent based in pulls vs pushes, etc.
798 change what is sent based in pulls vs pushes, etc.
796 """
799 """
797 repo = self._repo
800 repo = self._repo
798 mfl = repo.manifestlog
801 mfl = repo.manifestlog
799 dirlog = mfl._revlog.dirlog
802 dirlog = mfl._revlog.dirlog
800 tmfnodes = {'': mfs}
803 tmfnodes = {'': mfs}
801
804
802 # Callback for the manifest, used to collect linkrevs for filelog
805 # Callback for the manifest, used to collect linkrevs for filelog
803 # revisions.
806 # revisions.
804 # Returns the linkrev node (collected in lookupcl).
807 # Returns the linkrev node (collected in lookupcl).
805 def makelookupmflinknode(dir, nodes):
808 def makelookupmflinknode(dir, nodes):
806 if fastpathlinkrev:
809 if fastpathlinkrev:
807 assert not dir
810 assert not dir
808 return mfs.__getitem__
811 return mfs.__getitem__
809
812
810 def lookupmflinknode(x):
813 def lookupmflinknode(x):
811 """Callback for looking up the linknode for manifests.
814 """Callback for looking up the linknode for manifests.
812
815
813 Returns the linkrev node for the specified manifest.
816 Returns the linkrev node for the specified manifest.
814
817
815 SIDE EFFECT:
818 SIDE EFFECT:
816
819
817 1) fclnodes gets populated with the list of relevant
820 1) fclnodes gets populated with the list of relevant
818 file nodes if we're not using fastpathlinkrev
821 file nodes if we're not using fastpathlinkrev
819 2) When treemanifests are in use, collects treemanifest nodes
822 2) When treemanifests are in use, collects treemanifest nodes
820 to send
823 to send
821
824
822 Note that this means manifests must be completely sent to
825 Note that this means manifests must be completely sent to
823 the client before you can trust the list of files and
826 the client before you can trust the list of files and
824 treemanifests to send.
827 treemanifests to send.
825 """
828 """
826 clnode = nodes[x]
829 clnode = nodes[x]
827 mdata = mfl.get(dir, x).readfast(shallow=True)
830 mdata = mfl.get(dir, x).readfast(shallow=True)
828 for p, n, fl in mdata.iterentries():
831 for p, n, fl in mdata.iterentries():
829 if fl == 't': # subdirectory manifest
832 if fl == 't': # subdirectory manifest
830 subdir = dir + p + '/'
833 subdir = dir + p + '/'
831 tmfclnodes = tmfnodes.setdefault(subdir, {})
834 tmfclnodes = tmfnodes.setdefault(subdir, {})
832 tmfclnode = tmfclnodes.setdefault(n, clnode)
835 tmfclnode = tmfclnodes.setdefault(n, clnode)
833 if clrevorder[clnode] < clrevorder[tmfclnode]:
836 if clrevorder[clnode] < clrevorder[tmfclnode]:
834 tmfclnodes[n] = clnode
837 tmfclnodes[n] = clnode
835 else:
838 else:
836 f = dir + p
839 f = dir + p
837 fclnodes = fnodes.setdefault(f, {})
840 fclnodes = fnodes.setdefault(f, {})
838 fclnode = fclnodes.setdefault(n, clnode)
841 fclnode = fclnodes.setdefault(n, clnode)
839 if clrevorder[clnode] < clrevorder[fclnode]:
842 if clrevorder[clnode] < clrevorder[fclnode]:
840 fclnodes[n] = clnode
843 fclnodes[n] = clnode
841 return clnode
844 return clnode
842 return lookupmflinknode
845 return lookupmflinknode
843
846
844 size = 0
847 size = 0
845 while tmfnodes:
848 while tmfnodes:
846 dir, nodes = tmfnodes.popitem()
849 dir, nodes = tmfnodes.popitem()
847 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
850 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
848 if not dir or prunednodes:
851 if not dir or prunednodes:
849 for x in self._packmanifests(dir, prunednodes,
852 for x in self._packmanifests(dir, prunednodes,
850 makelookupmflinknode(dir, nodes)):
853 makelookupmflinknode(dir, nodes)):
851 size += len(x)
854 size += len(x)
852 yield x
855 yield x
853 self._verbosenote(_('%8.i (manifests)\n') % size)
856 self._verbosenote(_('%8.i (manifests)\n') % size)
854 yield self._manifestsdone()
857 yield self._manifestsdone()
855
858
856 # The 'source' parameter is useful for extensions
859 # The 'source' parameter is useful for extensions
857 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
860 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
858 changedfiles = list(filter(self._filematcher, changedfiles))
861 changedfiles = list(filter(self._filematcher, changedfiles))
859
862
860 if getattr(self, 'is_shallow', False):
863 if getattr(self, 'is_shallow', False):
861 # See comment in generate() for why this sadness is a thing.
864 # See comment in generate() for why this sadness is a thing.
862 mfdicts = self._mfdicts
865 mfdicts = self._mfdicts
863 del self._mfdicts
866 del self._mfdicts
864 # In a shallow clone, the linknodes callback needs to also include
867 # In a shallow clone, the linknodes callback needs to also include
865 # those file nodes that are in the manifests we sent but weren't
868 # those file nodes that are in the manifests we sent but weren't
866 # introduced by those manifests.
869 # introduced by those manifests.
867 commonctxs = [self._repo[c] for c in commonrevs]
870 commonctxs = [self._repo[c] for c in commonrevs]
868 oldlinknodes = linknodes
871 oldlinknodes = linknodes
869 clrev = self._repo.changelog.rev
872 clrev = self._repo.changelog.rev
870
873
871 # Defining this function has a side-effect of overriding the
874 # Defining this function has a side-effect of overriding the
872 # function of the same name that was passed in as an argument.
875 # function of the same name that was passed in as an argument.
873 # TODO have caller pass in appropriate function.
876 # TODO have caller pass in appropriate function.
874 def linknodes(flog, fname):
877 def linknodes(flog, fname):
875 for c in commonctxs:
878 for c in commonctxs:
876 try:
879 try:
877 fnode = c.filenode(fname)
880 fnode = c.filenode(fname)
878 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
881 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
879 except error.ManifestLookupError:
882 except error.ManifestLookupError:
880 pass
883 pass
881 links = oldlinknodes(flog, fname)
884 links = oldlinknodes(flog, fname)
882 if len(links) != len(mfdicts):
885 if len(links) != len(mfdicts):
883 for mf, lr in mfdicts:
886 for mf, lr in mfdicts:
884 fnode = mf.get(fname, None)
887 fnode = mf.get(fname, None)
885 if fnode in links:
888 if fnode in links:
886 links[fnode] = min(links[fnode], lr, key=clrev)
889 links[fnode] = min(links[fnode], lr, key=clrev)
887 elif fnode:
890 elif fnode:
888 links[fnode] = lr
891 links[fnode] = lr
889 return links
892 return links
890
893
891 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
894 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
892
895
893 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
896 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
894 repo = self._repo
897 repo = self._repo
895 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
898 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
896 total=len(changedfiles))
899 total=len(changedfiles))
897 for i, fname in enumerate(sorted(changedfiles)):
900 for i, fname in enumerate(sorted(changedfiles)):
898 filerevlog = repo.file(fname)
901 filerevlog = repo.file(fname)
899 if not filerevlog:
902 if not filerevlog:
900 raise error.Abort(_("empty or missing file data for %s") %
903 raise error.Abort(_("empty or missing file data for %s") %
901 fname)
904 fname)
902
905
903 linkrevnodes = linknodes(filerevlog, fname)
906 linkrevnodes = linknodes(filerevlog, fname)
904 # Lookup for filenodes, we collected the linkrev nodes above in the
907 # Lookup for filenodes, we collected the linkrev nodes above in the
905 # fastpath case and with lookupmf in the slowpath case.
908 # fastpath case and with lookupmf in the slowpath case.
906 def lookupfilelog(x):
909 def lookupfilelog(x):
907 return linkrevnodes[x]
910 return linkrevnodes[x]
908
911
909 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
912 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
910 if filenodes:
913 if filenodes:
911 progress.update(i + 1, item=fname)
914 progress.update(i + 1, item=fname)
912 h = self.fileheader(fname)
915 h = self.fileheader(fname)
913 size = len(h)
916 size = len(h)
914 yield h
917 yield h
915 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
918 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
916 size += len(chunk)
919 size += len(chunk)
917 yield chunk
920 yield chunk
918 self._verbosenote(_('%8.i %s\n') % (size, fname))
921 self._verbosenote(_('%8.i %s\n') % (size, fname))
919 progress.complete()
922 progress.complete()
920
923
921 def deltaparent(self, store, rev, p1, p2, prev):
924 def deltaparent(self, store, rev, p1, p2, prev):
922 if not store.candelta(prev, rev):
925 if not store.candelta(prev, rev):
923 raise error.ProgrammingError('cg1 should not be used in this case')
926 raise error.ProgrammingError('cg1 should not be used in this case')
924 return prev
927 return prev
925
928
926 def revchunk(self, store, rev, prev, linknode):
929 def revchunk(self, store, rev, prev, linknode):
927 if util.safehasattr(self, 'full_nodes'):
930 if util.safehasattr(self, 'full_nodes'):
928 fn = self._revisiondeltanarrow
931 fn = self._revisiondeltanarrow
929 else:
932 else:
930 fn = self._revisiondeltanormal
933 fn = self._revisiondeltanormal
931
934
932 delta = fn(store, rev, prev, linknode)
935 delta = fn(store, rev, prev, linknode)
933 if not delta:
936 if not delta:
934 return
937 return
935
938
936 meta = self.builddeltaheader(delta.node, delta.p1node, delta.p2node,
939 meta = self._builddeltaheader(delta)
937 delta.basenode, delta.linknode,
938 delta.flags)
939 l = len(meta) + sum(len(x) for x in delta.deltachunks)
940 l = len(meta) + sum(len(x) for x in delta.deltachunks)
940
941
941 yield chunkheader(l)
942 yield chunkheader(l)
942 yield meta
943 yield meta
943 for x in delta.deltachunks:
944 for x in delta.deltachunks:
944 yield x
945 yield x
945
946
946 def _revisiondeltanormal(self, store, rev, prev, linknode):
947 def _revisiondeltanormal(self, store, rev, prev, linknode):
947 node = store.node(rev)
948 node = store.node(rev)
948 p1, p2 = store.parentrevs(rev)
949 p1, p2 = store.parentrevs(rev)
949 base = self.deltaparent(store, rev, p1, p2, prev)
950 base = self.deltaparent(store, rev, p1, p2, prev)
950
951
951 prefix = ''
952 prefix = ''
952 if store.iscensored(base) or store.iscensored(rev):
953 if store.iscensored(base) or store.iscensored(rev):
953 try:
954 try:
954 delta = store.revision(node, raw=True)
955 delta = store.revision(node, raw=True)
955 except error.CensoredNodeError as e:
956 except error.CensoredNodeError as e:
956 delta = e.tombstone
957 delta = e.tombstone
957 if base == nullrev:
958 if base == nullrev:
958 prefix = mdiff.trivialdiffheader(len(delta))
959 prefix = mdiff.trivialdiffheader(len(delta))
959 else:
960 else:
960 baselen = store.rawsize(base)
961 baselen = store.rawsize(base)
961 prefix = mdiff.replacediffheader(baselen, len(delta))
962 prefix = mdiff.replacediffheader(baselen, len(delta))
962 elif base == nullrev:
963 elif base == nullrev:
963 delta = store.revision(node, raw=True)
964 delta = store.revision(node, raw=True)
964 prefix = mdiff.trivialdiffheader(len(delta))
965 prefix = mdiff.trivialdiffheader(len(delta))
965 else:
966 else:
966 delta = store.revdiff(base, rev)
967 delta = store.revdiff(base, rev)
967 p1n, p2n = store.parents(node)
968 p1n, p2n = store.parents(node)
968
969
969 return revisiondelta(
970 return revisiondelta(
970 node=node,
971 node=node,
971 p1node=p1n,
972 p1node=p1n,
972 p2node=p2n,
973 p2node=p2n,
973 basenode=store.node(base),
974 basenode=store.node(base),
974 linknode=linknode,
975 linknode=linknode,
975 flags=store.flags(rev),
976 flags=store.flags(rev),
976 deltachunks=(prefix, delta),
977 deltachunks=(prefix, delta),
977 )
978 )
978
979
979 def _revisiondeltanarrow(self, store, rev, prev, linknode):
980 def _revisiondeltanarrow(self, store, rev, prev, linknode):
980 # build up some mapping information that's useful later. See
981 # build up some mapping information that's useful later. See
981 # the local() nested function below.
982 # the local() nested function below.
982 if not self.changelog_done:
983 if not self.changelog_done:
983 self.clnode_to_rev[linknode] = rev
984 self.clnode_to_rev[linknode] = rev
984 linkrev = rev
985 linkrev = rev
985 self.clrev_to_localrev[linkrev] = rev
986 self.clrev_to_localrev[linkrev] = rev
986 else:
987 else:
987 linkrev = self.clnode_to_rev[linknode]
988 linkrev = self.clnode_to_rev[linknode]
988 self.clrev_to_localrev[linkrev] = rev
989 self.clrev_to_localrev[linkrev] = rev
989
990
990 # This is a node to send in full, because the changeset it
991 # This is a node to send in full, because the changeset it
991 # corresponds to was a full changeset.
992 # corresponds to was a full changeset.
992 if linknode in self.full_nodes:
993 if linknode in self.full_nodes:
993 return self._revisiondeltanormal(store, rev, prev, linknode)
994 return self._revisiondeltanormal(store, rev, prev, linknode)
994
995
995 # At this point, a node can either be one we should skip or an
996 # At this point, a node can either be one we should skip or an
996 # ellipsis. If it's not an ellipsis, bail immediately.
997 # ellipsis. If it's not an ellipsis, bail immediately.
997 if linkrev not in self.precomputed_ellipsis:
998 if linkrev not in self.precomputed_ellipsis:
998 return
999 return
999
1000
1000 linkparents = self.precomputed_ellipsis[linkrev]
1001 linkparents = self.precomputed_ellipsis[linkrev]
1001 def local(clrev):
1002 def local(clrev):
1002 """Turn a changelog revnum into a local revnum.
1003 """Turn a changelog revnum into a local revnum.
1003
1004
1004 The ellipsis dag is stored as revnums on the changelog,
1005 The ellipsis dag is stored as revnums on the changelog,
1005 but when we're producing ellipsis entries for
1006 but when we're producing ellipsis entries for
1006 non-changelog revlogs, we need to turn those numbers into
1007 non-changelog revlogs, we need to turn those numbers into
1007 something local. This does that for us, and during the
1008 something local. This does that for us, and during the
1008 changelog sending phase will also expand the stored
1009 changelog sending phase will also expand the stored
1009 mappings as needed.
1010 mappings as needed.
1010 """
1011 """
1011 if clrev == nullrev:
1012 if clrev == nullrev:
1012 return nullrev
1013 return nullrev
1013
1014
1014 if not self.changelog_done:
1015 if not self.changelog_done:
1015 # If we're doing the changelog, it's possible that we
1016 # If we're doing the changelog, it's possible that we
1016 # have a parent that is already on the client, and we
1017 # have a parent that is already on the client, and we
1017 # need to store some extra mapping information so that
1018 # need to store some extra mapping information so that
1018 # our contained ellipsis nodes will be able to resolve
1019 # our contained ellipsis nodes will be able to resolve
1019 # their parents.
1020 # their parents.
1020 if clrev not in self.clrev_to_localrev:
1021 if clrev not in self.clrev_to_localrev:
1021 clnode = store.node(clrev)
1022 clnode = store.node(clrev)
1022 self.clnode_to_rev[clnode] = clrev
1023 self.clnode_to_rev[clnode] = clrev
1023 return clrev
1024 return clrev
1024
1025
1025 # Walk the ellipsis-ized changelog breadth-first looking for a
1026 # Walk the ellipsis-ized changelog breadth-first looking for a
1026 # change that has been linked from the current revlog.
1027 # change that has been linked from the current revlog.
1027 #
1028 #
1028 # For a flat manifest revlog only a single step should be necessary
1029 # For a flat manifest revlog only a single step should be necessary
1029 # as all relevant changelog entries are relevant to the flat
1030 # as all relevant changelog entries are relevant to the flat
1030 # manifest.
1031 # manifest.
1031 #
1032 #
1032 # For a filelog or tree manifest dirlog however not every changelog
1033 # For a filelog or tree manifest dirlog however not every changelog
1033 # entry will have been relevant, so we need to skip some changelog
1034 # entry will have been relevant, so we need to skip some changelog
1034 # nodes even after ellipsis-izing.
1035 # nodes even after ellipsis-izing.
1035 walk = [clrev]
1036 walk = [clrev]
1036 while walk:
1037 while walk:
1037 p = walk[0]
1038 p = walk[0]
1038 walk = walk[1:]
1039 walk = walk[1:]
1039 if p in self.clrev_to_localrev:
1040 if p in self.clrev_to_localrev:
1040 return self.clrev_to_localrev[p]
1041 return self.clrev_to_localrev[p]
1041 elif p in self.full_nodes:
1042 elif p in self.full_nodes:
1042 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1043 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1043 if pp != nullrev])
1044 if pp != nullrev])
1044 elif p in self.precomputed_ellipsis:
1045 elif p in self.precomputed_ellipsis:
1045 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1046 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1046 if pp != nullrev])
1047 if pp != nullrev])
1047 else:
1048 else:
1048 # In this case, we've got an ellipsis with parents
1049 # In this case, we've got an ellipsis with parents
1049 # outside the current bundle (likely an
1050 # outside the current bundle (likely an
1050 # incremental pull). We "know" that we can use the
1051 # incremental pull). We "know" that we can use the
1051 # value of this same revlog at whatever revision
1052 # value of this same revlog at whatever revision
1052 # is pointed to by linknode. "Know" is in scare
1053 # is pointed to by linknode. "Know" is in scare
1053 # quotes because I haven't done enough examination
1054 # quotes because I haven't done enough examination
1054 # of edge cases to convince myself this is really
1055 # of edge cases to convince myself this is really
1055 # a fact - it works for all the (admittedly
1056 # a fact - it works for all the (admittedly
1056 # thorough) cases in our testsuite, but I would be
1057 # thorough) cases in our testsuite, but I would be
1057 # somewhat unsurprised to find a case in the wild
1058 # somewhat unsurprised to find a case in the wild
1058 # where this breaks down a bit. That said, I don't
1059 # where this breaks down a bit. That said, I don't
1059 # know if it would hurt anything.
1060 # know if it would hurt anything.
1060 for i in pycompat.xrange(rev, 0, -1):
1061 for i in pycompat.xrange(rev, 0, -1):
1061 if store.linkrev(i) == clrev:
1062 if store.linkrev(i) == clrev:
1062 return i
1063 return i
1063 # We failed to resolve a parent for this node, so
1064 # We failed to resolve a parent for this node, so
1064 # we crash the changegroup construction.
1065 # we crash the changegroup construction.
1065 raise error.Abort(
1066 raise error.Abort(
1066 'unable to resolve parent while packing %r %r'
1067 'unable to resolve parent while packing %r %r'
1067 ' for changeset %r' % (store.indexfile, rev, clrev))
1068 ' for changeset %r' % (store.indexfile, rev, clrev))
1068
1069
1069 return nullrev
1070 return nullrev
1070
1071
1071 if not linkparents or (
1072 if not linkparents or (
1072 store.parentrevs(rev) == (nullrev, nullrev)):
1073 store.parentrevs(rev) == (nullrev, nullrev)):
1073 p1, p2 = nullrev, nullrev
1074 p1, p2 = nullrev, nullrev
1074 elif len(linkparents) == 1:
1075 elif len(linkparents) == 1:
1075 p1, = sorted(local(p) for p in linkparents)
1076 p1, = sorted(local(p) for p in linkparents)
1076 p2 = nullrev
1077 p2 = nullrev
1077 else:
1078 else:
1078 p1, p2 = sorted(local(p) for p in linkparents)
1079 p1, p2 = sorted(local(p) for p in linkparents)
1079
1080
1080 n = store.node(rev)
1081 n = store.node(rev)
1081 p1n, p2n = store.node(p1), store.node(p2)
1082 p1n, p2n = store.node(p1), store.node(p2)
1082 flags = store.flags(rev)
1083 flags = store.flags(rev)
1083 flags |= revlog.REVIDX_ELLIPSIS
1084 flags |= revlog.REVIDX_ELLIPSIS
1084
1085
1085 # TODO: try and actually send deltas for ellipsis data blocks
1086 # TODO: try and actually send deltas for ellipsis data blocks
1086 data = store.revision(n)
1087 data = store.revision(n)
1087 diffheader = mdiff.trivialdiffheader(len(data))
1088 diffheader = mdiff.trivialdiffheader(len(data))
1088
1089
1089 return revisiondelta(
1090 return revisiondelta(
1090 node=n,
1091 node=n,
1091 p1node=p1n,
1092 p1node=p1n,
1092 p2node=p2n,
1093 p2node=p2n,
1093 basenode=nullid,
1094 basenode=nullid,
1094 linknode=linknode,
1095 linknode=linknode,
1095 flags=flags,
1096 flags=flags,
1096 deltachunks=(diffheader, data),
1097 deltachunks=(diffheader, data),
1097 )
1098 )
1098
1099
1099 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1100 # do nothing with basenode, it is implicitly the previous one in HG10
1101 # do nothing with flags, it is implicitly 0 for cg1 and cg2
1102 return self.deltaheader.pack(node, p1n, p2n, linknode)
1103
1104 class cg2packer(cg1packer):
1100 class cg2packer(cg1packer):
1105 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
1101 def __init__(self, repo, filematcher, version, builddeltaheader,
1106
1102 bundlecaps=None):
1107 def __init__(self, repo, filematcher, version, bundlecaps=None):
1108 super(cg2packer, self).__init__(repo, filematcher, version,
1103 super(cg2packer, self).__init__(repo, filematcher, version,
1104 builddeltaheader,
1109 bundlecaps=bundlecaps)
1105 bundlecaps=bundlecaps)
1110
1106
1111 if self._reorder is None:
1107 if self._reorder is None:
1112 # Since generaldelta is directly supported by cg2, reordering
1108 # Since generaldelta is directly supported by cg2, reordering
1113 # generally doesn't help, so we disable it by default (treating
1109 # generally doesn't help, so we disable it by default (treating
1114 # bundle.reorder=auto just like bundle.reorder=False).
1110 # bundle.reorder=auto just like bundle.reorder=False).
1115 self._reorder = False
1111 self._reorder = False
1116
1112
1117 def deltaparent(self, store, rev, p1, p2, prev):
1113 def deltaparent(self, store, rev, p1, p2, prev):
1118 # Narrow ellipses mode.
1114 # Narrow ellipses mode.
1119 if util.safehasattr(self, 'full_nodes'):
1115 if util.safehasattr(self, 'full_nodes'):
1120 # TODO: send better deltas when in narrow mode.
1116 # TODO: send better deltas when in narrow mode.
1121 #
1117 #
1122 # changegroup.group() loops over revisions to send,
1118 # changegroup.group() loops over revisions to send,
1123 # including revisions we'll skip. What this means is that
1119 # including revisions we'll skip. What this means is that
1124 # `prev` will be a potentially useless delta base for all
1120 # `prev` will be a potentially useless delta base for all
1125 # ellipsis nodes, as the client likely won't have it. In
1121 # ellipsis nodes, as the client likely won't have it. In
1126 # the future we should do bookkeeping about which nodes
1122 # the future we should do bookkeeping about which nodes
1127 # have been sent to the client, and try to be
1123 # have been sent to the client, and try to be
1128 # significantly smarter about delta bases. This is
1124 # significantly smarter about delta bases. This is
1129 # slightly tricky because this same code has to work for
1125 # slightly tricky because this same code has to work for
1130 # all revlogs, and we don't have the linkrev/linknode here.
1126 # all revlogs, and we don't have the linkrev/linknode here.
1131 return p1
1127 return p1
1132
1128
1133 dp = store.deltaparent(rev)
1129 dp = store.deltaparent(rev)
1134 if dp == nullrev and store.storedeltachains:
1130 if dp == nullrev and store.storedeltachains:
1135 # Avoid sending full revisions when delta parent is null. Pick prev
1131 # Avoid sending full revisions when delta parent is null. Pick prev
1136 # in that case. It's tempting to pick p1 in this case, as p1 will
1132 # in that case. It's tempting to pick p1 in this case, as p1 will
1137 # be smaller in the common case. However, computing a delta against
1133 # be smaller in the common case. However, computing a delta against
1138 # p1 may require resolving the raw text of p1, which could be
1134 # p1 may require resolving the raw text of p1, which could be
1139 # expensive. The revlog caches should have prev cached, meaning
1135 # expensive. The revlog caches should have prev cached, meaning
1140 # less CPU for changegroup generation. There is likely room to add
1136 # less CPU for changegroup generation. There is likely room to add
1141 # a flag and/or config option to control this behavior.
1137 # a flag and/or config option to control this behavior.
1142 base = prev
1138 base = prev
1143 elif dp == nullrev:
1139 elif dp == nullrev:
1144 # revlog is configured to use full snapshot for a reason,
1140 # revlog is configured to use full snapshot for a reason,
1145 # stick to full snapshot.
1141 # stick to full snapshot.
1146 base = nullrev
1142 base = nullrev
1147 elif dp not in (p1, p2, prev):
1143 elif dp not in (p1, p2, prev):
1148 # Pick prev when we can't be sure remote has the base revision.
1144 # Pick prev when we can't be sure remote has the base revision.
1149 return prev
1145 return prev
1150 else:
1146 else:
1151 base = dp
1147 base = dp
1152 if base != nullrev and not store.candelta(base, rev):
1148 if base != nullrev and not store.candelta(base, rev):
1153 base = nullrev
1149 base = nullrev
1154 return base
1150 return base
1155
1151
1156 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1157 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
1158 return self.deltaheader.pack(node, p1n, p2n, basenode, linknode)
1159
1160 class cg3packer(cg2packer):
1152 class cg3packer(cg2packer):
1161 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
1162
1163 def _packmanifests(self, dir, mfnodes, lookuplinknode):
1153 def _packmanifests(self, dir, mfnodes, lookuplinknode):
1164 if dir:
1154 if dir:
1165 yield self.fileheader(dir)
1155 yield self.fileheader(dir)
1166
1156
1167 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
1157 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
1168 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
1158 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
1169 units=_('manifests')):
1159 units=_('manifests')):
1170 yield chunk
1160 yield chunk
1171
1161
1172 def _manifestsdone(self):
1162 def _manifestsdone(self):
1173 return self.close()
1163 return self.close()
1174
1164
1175 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1165 def _makecg1packer(repo, filematcher, bundlecaps):
1176 return self.deltaheader.pack(node, p1n, p2n, basenode, linknode, flags)
1166 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1167 d.node, d.p1node, d.p2node, d.linknode)
1177
1168
1178 def _makecg1packer(repo, filematcher, bundlecaps):
1169 return cg1packer(repo, filematcher, b'01', builddeltaheader,
1179 return cg1packer(repo, filematcher, b'01', bundlecaps=bundlecaps)
1170 bundlecaps=bundlecaps)
1180
1171
1181 def _makecg2packer(repo, filematcher, bundlecaps):
1172 def _makecg2packer(repo, filematcher, bundlecaps):
1182 return cg2packer(repo, filematcher, b'02', bundlecaps=bundlecaps)
1173 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1174 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1175
1176 return cg2packer(repo, filematcher, b'02', builddeltaheader,
1177 bundlecaps=bundlecaps)
1183
1178
1184 def _makecg3packer(repo, filematcher, bundlecaps):
1179 def _makecg3packer(repo, filematcher, bundlecaps):
1185 return cg3packer(repo, filematcher, b'03', bundlecaps=bundlecaps)
1180 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1181 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1182
1183 return cg3packer(repo, filematcher, b'03', builddeltaheader,
1184 bundlecaps=bundlecaps)
1186
1185
1187 _packermap = {'01': (_makecg1packer, cg1unpacker),
1186 _packermap = {'01': (_makecg1packer, cg1unpacker),
1188 # cg2 adds support for exchanging generaldelta
1187 # cg2 adds support for exchanging generaldelta
1189 '02': (_makecg2packer, cg2unpacker),
1188 '02': (_makecg2packer, cg2unpacker),
1190 # cg3 adds support for exchanging revlog flags and treemanifests
1189 # cg3 adds support for exchanging revlog flags and treemanifests
1191 '03': (_makecg3packer, cg3unpacker),
1190 '03': (_makecg3packer, cg3unpacker),
1192 }
1191 }
1193
1192
1194 def allsupportedversions(repo):
1193 def allsupportedversions(repo):
1195 versions = set(_packermap.keys())
1194 versions = set(_packermap.keys())
1196 if not (repo.ui.configbool('experimental', 'changegroup3') or
1195 if not (repo.ui.configbool('experimental', 'changegroup3') or
1197 repo.ui.configbool('experimental', 'treemanifest') or
1196 repo.ui.configbool('experimental', 'treemanifest') or
1198 'treemanifest' in repo.requirements):
1197 'treemanifest' in repo.requirements):
1199 versions.discard('03')
1198 versions.discard('03')
1200 return versions
1199 return versions
1201
1200
1202 # Changegroup versions that can be applied to the repo
1201 # Changegroup versions that can be applied to the repo
1203 def supportedincomingversions(repo):
1202 def supportedincomingversions(repo):
1204 return allsupportedversions(repo)
1203 return allsupportedversions(repo)
1205
1204
1206 # Changegroup versions that can be created from the repo
1205 # Changegroup versions that can be created from the repo
1207 def supportedoutgoingversions(repo):
1206 def supportedoutgoingversions(repo):
1208 versions = allsupportedversions(repo)
1207 versions = allsupportedversions(repo)
1209 if 'treemanifest' in repo.requirements:
1208 if 'treemanifest' in repo.requirements:
1210 # Versions 01 and 02 support only flat manifests and it's just too
1209 # Versions 01 and 02 support only flat manifests and it's just too
1211 # expensive to convert between the flat manifest and tree manifest on
1210 # expensive to convert between the flat manifest and tree manifest on
1212 # the fly. Since tree manifests are hashed differently, all of history
1211 # the fly. Since tree manifests are hashed differently, all of history
1213 # would have to be converted. Instead, we simply don't even pretend to
1212 # would have to be converted. Instead, we simply don't even pretend to
1214 # support versions 01 and 02.
1213 # support versions 01 and 02.
1215 versions.discard('01')
1214 versions.discard('01')
1216 versions.discard('02')
1215 versions.discard('02')
1217 if repository.NARROW_REQUIREMENT in repo.requirements:
1216 if repository.NARROW_REQUIREMENT in repo.requirements:
1218 # Versions 01 and 02 don't support revlog flags, and we need to
1217 # Versions 01 and 02 don't support revlog flags, and we need to
1219 # support that for stripping and unbundling to work.
1218 # support that for stripping and unbundling to work.
1220 versions.discard('01')
1219 versions.discard('01')
1221 versions.discard('02')
1220 versions.discard('02')
1222 if LFS_REQUIREMENT in repo.requirements:
1221 if LFS_REQUIREMENT in repo.requirements:
1223 # Versions 01 and 02 don't support revlog flags, and we need to
1222 # Versions 01 and 02 don't support revlog flags, and we need to
1224 # mark LFS entries with REVIDX_EXTSTORED.
1223 # mark LFS entries with REVIDX_EXTSTORED.
1225 versions.discard('01')
1224 versions.discard('01')
1226 versions.discard('02')
1225 versions.discard('02')
1227
1226
1228 return versions
1227 return versions
1229
1228
1230 def localversion(repo):
1229 def localversion(repo):
1231 # Finds the best version to use for bundles that are meant to be used
1230 # Finds the best version to use for bundles that are meant to be used
1232 # locally, such as those from strip and shelve, and temporary bundles.
1231 # locally, such as those from strip and shelve, and temporary bundles.
1233 return max(supportedoutgoingversions(repo))
1232 return max(supportedoutgoingversions(repo))
1234
1233
1235 def safeversion(repo):
1234 def safeversion(repo):
1236 # Finds the smallest version that it's safe to assume clients of the repo
1235 # Finds the smallest version that it's safe to assume clients of the repo
1237 # will support. For example, all hg versions that support generaldelta also
1236 # will support. For example, all hg versions that support generaldelta also
1238 # support changegroup 02.
1237 # support changegroup 02.
1239 versions = supportedoutgoingversions(repo)
1238 versions = supportedoutgoingversions(repo)
1240 if 'generaldelta' in repo.requirements:
1239 if 'generaldelta' in repo.requirements:
1241 versions.discard('01')
1240 versions.discard('01')
1242 assert versions
1241 assert versions
1243 return min(versions)
1242 return min(versions)
1244
1243
1245 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1244 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1246 assert version in supportedoutgoingversions(repo)
1245 assert version in supportedoutgoingversions(repo)
1247
1246
1248 if filematcher is None:
1247 if filematcher is None:
1249 filematcher = matchmod.alwaysmatcher(repo.root, '')
1248 filematcher = matchmod.alwaysmatcher(repo.root, '')
1250
1249
1251 if version == '01' and not filematcher.always():
1250 if version == '01' and not filematcher.always():
1252 raise error.ProgrammingError('version 01 changegroups do not support '
1251 raise error.ProgrammingError('version 01 changegroups do not support '
1253 'sparse file matchers')
1252 'sparse file matchers')
1254
1253
1255 # Requested files could include files not in the local store. So
1254 # Requested files could include files not in the local store. So
1256 # filter those out.
1255 # filter those out.
1257 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1256 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1258 filematcher)
1257 filematcher)
1259
1258
1260 fn = _packermap[version][0]
1259 fn = _packermap[version][0]
1261 return fn(repo, filematcher, bundlecaps)
1260 return fn(repo, filematcher, bundlecaps)
1262
1261
1263 def getunbundler(version, fh, alg, extras=None):
1262 def getunbundler(version, fh, alg, extras=None):
1264 return _packermap[version][1](fh, alg, extras=extras)
1263 return _packermap[version][1](fh, alg, extras=extras)
1265
1264
1266 def _changegroupinfo(repo, nodes, source):
1265 def _changegroupinfo(repo, nodes, source):
1267 if repo.ui.verbose or source == 'bundle':
1266 if repo.ui.verbose or source == 'bundle':
1268 repo.ui.status(_("%d changesets found\n") % len(nodes))
1267 repo.ui.status(_("%d changesets found\n") % len(nodes))
1269 if repo.ui.debugflag:
1268 if repo.ui.debugflag:
1270 repo.ui.debug("list of changesets:\n")
1269 repo.ui.debug("list of changesets:\n")
1271 for node in nodes:
1270 for node in nodes:
1272 repo.ui.debug("%s\n" % hex(node))
1271 repo.ui.debug("%s\n" % hex(node))
1273
1272
1274 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1273 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1275 bundlecaps=None):
1274 bundlecaps=None):
1276 cgstream = makestream(repo, outgoing, version, source,
1275 cgstream = makestream(repo, outgoing, version, source,
1277 fastpath=fastpath, bundlecaps=bundlecaps)
1276 fastpath=fastpath, bundlecaps=bundlecaps)
1278 return getunbundler(version, util.chunkbuffer(cgstream), None,
1277 return getunbundler(version, util.chunkbuffer(cgstream), None,
1279 {'clcount': len(outgoing.missing) })
1278 {'clcount': len(outgoing.missing) })
1280
1279
1281 def makestream(repo, outgoing, version, source, fastpath=False,
1280 def makestream(repo, outgoing, version, source, fastpath=False,
1282 bundlecaps=None, filematcher=None):
1281 bundlecaps=None, filematcher=None):
1283 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1282 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1284 filematcher=filematcher)
1283 filematcher=filematcher)
1285
1284
1286 repo = repo.unfiltered()
1285 repo = repo.unfiltered()
1287 commonrevs = outgoing.common
1286 commonrevs = outgoing.common
1288 csets = outgoing.missing
1287 csets = outgoing.missing
1289 heads = outgoing.missingheads
1288 heads = outgoing.missingheads
1290 # We go through the fast path if we get told to, or if all (unfiltered
1289 # We go through the fast path if we get told to, or if all (unfiltered
1291 # heads have been requested (since we then know there all linkrevs will
1290 # heads have been requested (since we then know there all linkrevs will
1292 # be pulled by the client).
1291 # be pulled by the client).
1293 heads.sort()
1292 heads.sort()
1294 fastpathlinkrev = fastpath or (
1293 fastpathlinkrev = fastpath or (
1295 repo.filtername is None and heads == sorted(repo.heads()))
1294 repo.filtername is None and heads == sorted(repo.heads()))
1296
1295
1297 repo.hook('preoutgoing', throw=True, source=source)
1296 repo.hook('preoutgoing', throw=True, source=source)
1298 _changegroupinfo(repo, csets, source)
1297 _changegroupinfo(repo, csets, source)
1299 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1298 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1300
1299
1301 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1300 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1302 revisions = 0
1301 revisions = 0
1303 files = 0
1302 files = 0
1304 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1303 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1305 total=expectedfiles)
1304 total=expectedfiles)
1306 for chunkdata in iter(source.filelogheader, {}):
1305 for chunkdata in iter(source.filelogheader, {}):
1307 files += 1
1306 files += 1
1308 f = chunkdata["filename"]
1307 f = chunkdata["filename"]
1309 repo.ui.debug("adding %s revisions\n" % f)
1308 repo.ui.debug("adding %s revisions\n" % f)
1310 progress.increment()
1309 progress.increment()
1311 fl = repo.file(f)
1310 fl = repo.file(f)
1312 o = len(fl)
1311 o = len(fl)
1313 try:
1312 try:
1314 deltas = source.deltaiter()
1313 deltas = source.deltaiter()
1315 if not fl.addgroup(deltas, revmap, trp):
1314 if not fl.addgroup(deltas, revmap, trp):
1316 raise error.Abort(_("received file revlog group is empty"))
1315 raise error.Abort(_("received file revlog group is empty"))
1317 except error.CensoredBaseError as e:
1316 except error.CensoredBaseError as e:
1318 raise error.Abort(_("received delta base is censored: %s") % e)
1317 raise error.Abort(_("received delta base is censored: %s") % e)
1319 revisions += len(fl) - o
1318 revisions += len(fl) - o
1320 if f in needfiles:
1319 if f in needfiles:
1321 needs = needfiles[f]
1320 needs = needfiles[f]
1322 for new in pycompat.xrange(o, len(fl)):
1321 for new in pycompat.xrange(o, len(fl)):
1323 n = fl.node(new)
1322 n = fl.node(new)
1324 if n in needs:
1323 if n in needs:
1325 needs.remove(n)
1324 needs.remove(n)
1326 else:
1325 else:
1327 raise error.Abort(
1326 raise error.Abort(
1328 _("received spurious file revlog entry"))
1327 _("received spurious file revlog entry"))
1329 if not needs:
1328 if not needs:
1330 del needfiles[f]
1329 del needfiles[f]
1331 progress.complete()
1330 progress.complete()
1332
1331
1333 for f, needs in needfiles.iteritems():
1332 for f, needs in needfiles.iteritems():
1334 fl = repo.file(f)
1333 fl = repo.file(f)
1335 for n in needs:
1334 for n in needs:
1336 try:
1335 try:
1337 fl.rev(n)
1336 fl.rev(n)
1338 except error.LookupError:
1337 except error.LookupError:
1339 raise error.Abort(
1338 raise error.Abort(
1340 _('missing file data for %s:%s - run hg verify') %
1339 _('missing file data for %s:%s - run hg verify') %
1341 (f, hex(n)))
1340 (f, hex(n)))
1342
1341
1343 return revisions, files
1342 return revisions, files
1344
1343
1345 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1344 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1346 ellipsisroots, visitnodes, depth, source, version):
1345 ellipsisroots, visitnodes, depth, source, version):
1347 if version in ('01', '02'):
1346 if version in ('01', '02'):
1348 raise error.Abort(
1347 raise error.Abort(
1349 'ellipsis nodes require at least cg3 on client and server, '
1348 'ellipsis nodes require at least cg3 on client and server, '
1350 'but negotiated version %s' % version)
1349 'but negotiated version %s' % version)
1351 # We wrap cg1packer.revchunk, using a side channel to pass
1350 # We wrap cg1packer.revchunk, using a side channel to pass
1352 # relevant_nodes into that area. Then if linknode isn't in the
1351 # relevant_nodes into that area. Then if linknode isn't in the
1353 # set, we know we have an ellipsis node and we should defer
1352 # set, we know we have an ellipsis node and we should defer
1354 # sending that node's data. We override close() to detect
1353 # sending that node's data. We override close() to detect
1355 # pending ellipsis nodes and flush them.
1354 # pending ellipsis nodes and flush them.
1356 packer = getbundler(version, repo, filematcher=match)
1355 packer = getbundler(version, repo, filematcher=match)
1357 # Give the packer the list of nodes which should not be
1356 # Give the packer the list of nodes which should not be
1358 # ellipsis nodes. We store this rather than the set of nodes
1357 # ellipsis nodes. We store this rather than the set of nodes
1359 # that should be an ellipsis because for very large histories
1358 # that should be an ellipsis because for very large histories
1360 # we expect this to be significantly smaller.
1359 # we expect this to be significantly smaller.
1361 packer.full_nodes = relevant_nodes
1360 packer.full_nodes = relevant_nodes
1362 # Maps ellipsis revs to their roots at the changelog level.
1361 # Maps ellipsis revs to their roots at the changelog level.
1363 packer.precomputed_ellipsis = ellipsisroots
1362 packer.precomputed_ellipsis = ellipsisroots
1364 # Maps CL revs to per-revlog revisions. Cleared in close() at
1363 # Maps CL revs to per-revlog revisions. Cleared in close() at
1365 # the end of each group.
1364 # the end of each group.
1366 packer.clrev_to_localrev = {}
1365 packer.clrev_to_localrev = {}
1367 packer.next_clrev_to_localrev = {}
1366 packer.next_clrev_to_localrev = {}
1368 # Maps changelog nodes to changelog revs. Filled in once
1367 # Maps changelog nodes to changelog revs. Filled in once
1369 # during changelog stage and then left unmodified.
1368 # during changelog stage and then left unmodified.
1370 packer.clnode_to_rev = {}
1369 packer.clnode_to_rev = {}
1371 packer.changelog_done = False
1370 packer.changelog_done = False
1372 # If true, informs the packer that it is serving shallow content and might
1371 # If true, informs the packer that it is serving shallow content and might
1373 # need to pack file contents not introduced by the changes being packed.
1372 # need to pack file contents not introduced by the changes being packed.
1374 packer.is_shallow = depth is not None
1373 packer.is_shallow = depth is not None
1375
1374
1376 return packer.generate(common, visitnodes, False, source)
1375 return packer.generate(common, visitnodes, False, source)
General Comments 0
You need to be logged in to leave comments. Login now