##// END OF EJS Templates
changegroup: pass version into constructor...
Gregory Szorc -
r38931:d7ac49c2 default
parent child Browse files
Show More
@@ -1,1377 +1,1377 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagutil,
27 dagutil,
28 error,
28 error,
29 manifest,
29 manifest,
30 match as matchmod,
30 match as matchmod,
31 mdiff,
31 mdiff,
32 phases,
32 phases,
33 pycompat,
33 pycompat,
34 repository,
34 repository,
35 revlog,
35 revlog,
36 util,
36 util,
37 )
37 )
38
38
39 from .utils import (
39 from .utils import (
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
43 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
44 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
44 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
45 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
45 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
46
46
47 LFS_REQUIREMENT = 'lfs'
47 LFS_REQUIREMENT = 'lfs'
48
48
49 readexactly = util.readexactly
49 readexactly = util.readexactly
50
50
51 def getchunk(stream):
51 def getchunk(stream):
52 """return the next chunk from stream as a string"""
52 """return the next chunk from stream as a string"""
53 d = readexactly(stream, 4)
53 d = readexactly(stream, 4)
54 l = struct.unpack(">l", d)[0]
54 l = struct.unpack(">l", d)[0]
55 if l <= 4:
55 if l <= 4:
56 if l:
56 if l:
57 raise error.Abort(_("invalid chunk length %d") % l)
57 raise error.Abort(_("invalid chunk length %d") % l)
58 return ""
58 return ""
59 return readexactly(stream, l - 4)
59 return readexactly(stream, l - 4)
60
60
61 def chunkheader(length):
61 def chunkheader(length):
62 """return a changegroup chunk header (string)"""
62 """return a changegroup chunk header (string)"""
63 return struct.pack(">l", length + 4)
63 return struct.pack(">l", length + 4)
64
64
65 def closechunk():
65 def closechunk():
66 """return a changegroup chunk header (string) for a zero-length chunk"""
66 """return a changegroup chunk header (string) for a zero-length chunk"""
67 return struct.pack(">l", 0)
67 return struct.pack(">l", 0)
68
68
69 def writechunks(ui, chunks, filename, vfs=None):
69 def writechunks(ui, chunks, filename, vfs=None):
70 """Write chunks to a file and return its filename.
70 """Write chunks to a file and return its filename.
71
71
72 The stream is assumed to be a bundle file.
72 The stream is assumed to be a bundle file.
73 Existing files will not be overwritten.
73 Existing files will not be overwritten.
74 If no filename is specified, a temporary file is created.
74 If no filename is specified, a temporary file is created.
75 """
75 """
76 fh = None
76 fh = None
77 cleanup = None
77 cleanup = None
78 try:
78 try:
79 if filename:
79 if filename:
80 if vfs:
80 if vfs:
81 fh = vfs.open(filename, "wb")
81 fh = vfs.open(filename, "wb")
82 else:
82 else:
83 # Increase default buffer size because default is usually
83 # Increase default buffer size because default is usually
84 # small (4k is common on Linux).
84 # small (4k is common on Linux).
85 fh = open(filename, "wb", 131072)
85 fh = open(filename, "wb", 131072)
86 else:
86 else:
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
88 fh = os.fdopen(fd, r"wb")
88 fh = os.fdopen(fd, r"wb")
89 cleanup = filename
89 cleanup = filename
90 for c in chunks:
90 for c in chunks:
91 fh.write(c)
91 fh.write(c)
92 cleanup = None
92 cleanup = None
93 return filename
93 return filename
94 finally:
94 finally:
95 if fh is not None:
95 if fh is not None:
96 fh.close()
96 fh.close()
97 if cleanup is not None:
97 if cleanup is not None:
98 if filename and vfs:
98 if filename and vfs:
99 vfs.unlink(cleanup)
99 vfs.unlink(cleanup)
100 else:
100 else:
101 os.unlink(cleanup)
101 os.unlink(cleanup)
102
102
103 class cg1unpacker(object):
103 class cg1unpacker(object):
104 """Unpacker for cg1 changegroup streams.
104 """Unpacker for cg1 changegroup streams.
105
105
106 A changegroup unpacker handles the framing of the revision data in
106 A changegroup unpacker handles the framing of the revision data in
107 the wire format. Most consumers will want to use the apply()
107 the wire format. Most consumers will want to use the apply()
108 method to add the changes from the changegroup to a repository.
108 method to add the changes from the changegroup to a repository.
109
109
110 If you're forwarding a changegroup unmodified to another consumer,
110 If you're forwarding a changegroup unmodified to another consumer,
111 use getchunks(), which returns an iterator of changegroup
111 use getchunks(), which returns an iterator of changegroup
112 chunks. This is mostly useful for cases where you need to know the
112 chunks. This is mostly useful for cases where you need to know the
113 data stream has ended by observing the end of the changegroup.
113 data stream has ended by observing the end of the changegroup.
114
114
115 deltachunk() is useful only if you're applying delta data. Most
115 deltachunk() is useful only if you're applying delta data. Most
116 consumers should prefer apply() instead.
116 consumers should prefer apply() instead.
117
117
118 A few other public methods exist. Those are used only for
118 A few other public methods exist. Those are used only for
119 bundlerepo and some debug commands - their use is discouraged.
119 bundlerepo and some debug commands - their use is discouraged.
120 """
120 """
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
122 deltaheadersize = struct.calcsize(deltaheader)
122 deltaheadersize = struct.calcsize(deltaheader)
123 version = '01'
123 version = '01'
124 _grouplistcount = 1 # One list of files after the manifests
124 _grouplistcount = 1 # One list of files after the manifests
125
125
126 def __init__(self, fh, alg, extras=None):
126 def __init__(self, fh, alg, extras=None):
127 if alg is None:
127 if alg is None:
128 alg = 'UN'
128 alg = 'UN'
129 if alg not in util.compengines.supportedbundletypes:
129 if alg not in util.compengines.supportedbundletypes:
130 raise error.Abort(_('unknown stream compression type: %s')
130 raise error.Abort(_('unknown stream compression type: %s')
131 % alg)
131 % alg)
132 if alg == 'BZ':
132 if alg == 'BZ':
133 alg = '_truncatedBZ'
133 alg = '_truncatedBZ'
134
134
135 compengine = util.compengines.forbundletype(alg)
135 compengine = util.compengines.forbundletype(alg)
136 self._stream = compengine.decompressorreader(fh)
136 self._stream = compengine.decompressorreader(fh)
137 self._type = alg
137 self._type = alg
138 self.extras = extras or {}
138 self.extras = extras or {}
139 self.callback = None
139 self.callback = None
140
140
141 # These methods (compressed, read, seek, tell) all appear to only
141 # These methods (compressed, read, seek, tell) all appear to only
142 # be used by bundlerepo, but it's a little hard to tell.
142 # be used by bundlerepo, but it's a little hard to tell.
143 def compressed(self):
143 def compressed(self):
144 return self._type is not None and self._type != 'UN'
144 return self._type is not None and self._type != 'UN'
145 def read(self, l):
145 def read(self, l):
146 return self._stream.read(l)
146 return self._stream.read(l)
147 def seek(self, pos):
147 def seek(self, pos):
148 return self._stream.seek(pos)
148 return self._stream.seek(pos)
149 def tell(self):
149 def tell(self):
150 return self._stream.tell()
150 return self._stream.tell()
151 def close(self):
151 def close(self):
152 return self._stream.close()
152 return self._stream.close()
153
153
154 def _chunklength(self):
154 def _chunklength(self):
155 d = readexactly(self._stream, 4)
155 d = readexactly(self._stream, 4)
156 l = struct.unpack(">l", d)[0]
156 l = struct.unpack(">l", d)[0]
157 if l <= 4:
157 if l <= 4:
158 if l:
158 if l:
159 raise error.Abort(_("invalid chunk length %d") % l)
159 raise error.Abort(_("invalid chunk length %d") % l)
160 return 0
160 return 0
161 if self.callback:
161 if self.callback:
162 self.callback()
162 self.callback()
163 return l - 4
163 return l - 4
164
164
165 def changelogheader(self):
165 def changelogheader(self):
166 """v10 does not have a changelog header chunk"""
166 """v10 does not have a changelog header chunk"""
167 return {}
167 return {}
168
168
169 def manifestheader(self):
169 def manifestheader(self):
170 """v10 does not have a manifest header chunk"""
170 """v10 does not have a manifest header chunk"""
171 return {}
171 return {}
172
172
173 def filelogheader(self):
173 def filelogheader(self):
174 """return the header of the filelogs chunk, v10 only has the filename"""
174 """return the header of the filelogs chunk, v10 only has the filename"""
175 l = self._chunklength()
175 l = self._chunklength()
176 if not l:
176 if not l:
177 return {}
177 return {}
178 fname = readexactly(self._stream, l)
178 fname = readexactly(self._stream, l)
179 return {'filename': fname}
179 return {'filename': fname}
180
180
181 def _deltaheader(self, headertuple, prevnode):
181 def _deltaheader(self, headertuple, prevnode):
182 node, p1, p2, cs = headertuple
182 node, p1, p2, cs = headertuple
183 if prevnode is None:
183 if prevnode is None:
184 deltabase = p1
184 deltabase = p1
185 else:
185 else:
186 deltabase = prevnode
186 deltabase = prevnode
187 flags = 0
187 flags = 0
188 return node, p1, p2, deltabase, cs, flags
188 return node, p1, p2, deltabase, cs, flags
189
189
190 def deltachunk(self, prevnode):
190 def deltachunk(self, prevnode):
191 l = self._chunklength()
191 l = self._chunklength()
192 if not l:
192 if not l:
193 return {}
193 return {}
194 headerdata = readexactly(self._stream, self.deltaheadersize)
194 headerdata = readexactly(self._stream, self.deltaheadersize)
195 header = struct.unpack(self.deltaheader, headerdata)
195 header = struct.unpack(self.deltaheader, headerdata)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
198 return (node, p1, p2, cs, deltabase, delta, flags)
198 return (node, p1, p2, cs, deltabase, delta, flags)
199
199
200 def getchunks(self):
200 def getchunks(self):
201 """returns all the chunks contains in the bundle
201 """returns all the chunks contains in the bundle
202
202
203 Used when you need to forward the binary stream to a file or another
203 Used when you need to forward the binary stream to a file or another
204 network API. To do so, it parse the changegroup data, otherwise it will
204 network API. To do so, it parse the changegroup data, otherwise it will
205 block in case of sshrepo because it don't know the end of the stream.
205 block in case of sshrepo because it don't know the end of the stream.
206 """
206 """
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
210 # filelogs.
210 # filelogs.
211 #
211 #
212 # Changelog and manifestlog parts are terminated with empty chunks. The
212 # Changelog and manifestlog parts are terminated with empty chunks. The
213 # tree and file parts are a list of entry sections. Each entry section
213 # tree and file parts are a list of entry sections. Each entry section
214 # is a series of chunks terminating in an empty chunk. The list of these
214 # is a series of chunks terminating in an empty chunk. The list of these
215 # entry sections is terminated in yet another empty chunk, so we know
215 # entry sections is terminated in yet another empty chunk, so we know
216 # we've reached the end of the tree/file list when we reach an empty
216 # we've reached the end of the tree/file list when we reach an empty
217 # chunk that was proceeded by no non-empty chunks.
217 # chunk that was proceeded by no non-empty chunks.
218
218
219 parts = 0
219 parts = 0
220 while parts < 2 + self._grouplistcount:
220 while parts < 2 + self._grouplistcount:
221 noentries = True
221 noentries = True
222 while True:
222 while True:
223 chunk = getchunk(self)
223 chunk = getchunk(self)
224 if not chunk:
224 if not chunk:
225 # The first two empty chunks represent the end of the
225 # The first two empty chunks represent the end of the
226 # changelog and the manifestlog portions. The remaining
226 # changelog and the manifestlog portions. The remaining
227 # empty chunks represent either A) the end of individual
227 # empty chunks represent either A) the end of individual
228 # tree or file entries in the file list, or B) the end of
228 # tree or file entries in the file list, or B) the end of
229 # the entire list. It's the end of the entire list if there
229 # the entire list. It's the end of the entire list if there
230 # were no entries (i.e. noentries is True).
230 # were no entries (i.e. noentries is True).
231 if parts < 2:
231 if parts < 2:
232 parts += 1
232 parts += 1
233 elif noentries:
233 elif noentries:
234 parts += 1
234 parts += 1
235 break
235 break
236 noentries = False
236 noentries = False
237 yield chunkheader(len(chunk))
237 yield chunkheader(len(chunk))
238 pos = 0
238 pos = 0
239 while pos < len(chunk):
239 while pos < len(chunk):
240 next = pos + 2**20
240 next = pos + 2**20
241 yield chunk[pos:next]
241 yield chunk[pos:next]
242 pos = next
242 pos = next
243 yield closechunk()
243 yield closechunk()
244
244
245 def _unpackmanifests(self, repo, revmap, trp, prog):
245 def _unpackmanifests(self, repo, revmap, trp, prog):
246 self.callback = prog.increment
246 self.callback = prog.increment
247 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
250 # be empty during the pull
250 # be empty during the pull
251 self.manifestheader()
251 self.manifestheader()
252 deltas = self.deltaiter()
252 deltas = self.deltaiter()
253 repo.manifestlog.addgroup(deltas, revmap, trp)
253 repo.manifestlog.addgroup(deltas, revmap, trp)
254 prog.complete()
254 prog.complete()
255 self.callback = None
255 self.callback = None
256
256
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
258 expectedtotal=None):
258 expectedtotal=None):
259 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
261 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
262
262
263 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
264 - nothing changed or no source: 0
264 - nothing changed or no source: 0
265 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
266 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
267 - number of heads stays the same: 1
267 - number of heads stays the same: 1
268 """
268 """
269 repo = repo.unfiltered()
269 repo = repo.unfiltered()
270 def csmap(x):
270 def csmap(x):
271 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
272 return len(cl)
272 return len(cl)
273
273
274 def revmap(x):
274 def revmap(x):
275 return cl.rev(x)
275 return cl.rev(x)
276
276
277 changesets = files = revisions = 0
277 changesets = files = revisions = 0
278
278
279 try:
279 try:
280 # The transaction may already carry source information. In this
280 # The transaction may already carry source information. In this
281 # case we use the top level data. We overwrite the argument
281 # case we use the top level data. We overwrite the argument
282 # because we need to use the top level value (if they exist)
282 # because we need to use the top level value (if they exist)
283 # in this function.
283 # in this function.
284 srctype = tr.hookargs.setdefault('source', srctype)
284 srctype = tr.hookargs.setdefault('source', srctype)
285 url = tr.hookargs.setdefault('url', url)
285 url = tr.hookargs.setdefault('url', url)
286 repo.hook('prechangegroup',
286 repo.hook('prechangegroup',
287 throw=True, **pycompat.strkwargs(tr.hookargs))
287 throw=True, **pycompat.strkwargs(tr.hookargs))
288
288
289 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
290 # will not see an inconsistent view
290 # will not see an inconsistent view
291 cl = repo.changelog
291 cl = repo.changelog
292 cl.delayupdate(tr)
292 cl.delayupdate(tr)
293 oldheads = set(cl.heads())
293 oldheads = set(cl.heads())
294
294
295 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
296 # pull off the changeset group
296 # pull off the changeset group
297 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
298 clstart = len(cl)
298 clstart = len(cl)
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
300 total=expectedtotal)
300 total=expectedtotal)
301 self.callback = progress.increment
301 self.callback = progress.increment
302
302
303 efiles = set()
303 efiles = set()
304 def onchangelog(cl, node):
304 def onchangelog(cl, node):
305 efiles.update(cl.readfiles(node))
305 efiles.update(cl.readfiles(node))
306
306
307 self.changelogheader()
307 self.changelogheader()
308 deltas = self.deltaiter()
308 deltas = self.deltaiter()
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
310 efiles = len(efiles)
310 efiles = len(efiles)
311
311
312 if not cgnodes:
312 if not cgnodes:
313 repo.ui.develwarn('applied empty changegroup',
313 repo.ui.develwarn('applied empty changegroup',
314 config='warn-empty-changegroup')
314 config='warn-empty-changegroup')
315 clend = len(cl)
315 clend = len(cl)
316 changesets = clend - clstart
316 changesets = clend - clstart
317 progress.complete()
317 progress.complete()
318 self.callback = None
318 self.callback = None
319
319
320 # pull off the manifest group
320 # pull off the manifest group
321 repo.ui.status(_("adding manifests\n"))
321 repo.ui.status(_("adding manifests\n"))
322 # We know that we'll never have more manifests than we had
322 # We know that we'll never have more manifests than we had
323 # changesets.
323 # changesets.
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
325 total=changesets)
325 total=changesets)
326 self._unpackmanifests(repo, revmap, trp, progress)
326 self._unpackmanifests(repo, revmap, trp, progress)
327
327
328 needfiles = {}
328 needfiles = {}
329 if repo.ui.configbool('server', 'validate'):
329 if repo.ui.configbool('server', 'validate'):
330 cl = repo.changelog
330 cl = repo.changelog
331 ml = repo.manifestlog
331 ml = repo.manifestlog
332 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
333 for cset in pycompat.xrange(clstart, clend):
333 for cset in pycompat.xrange(clstart, clend):
334 mfnode = cl.changelogrevision(cset).manifest
334 mfnode = cl.changelogrevision(cset).manifest
335 mfest = ml[mfnode].readdelta()
335 mfest = ml[mfnode].readdelta()
336 # store file cgnodes we must see
336 # store file cgnodes we must see
337 for f, n in mfest.iteritems():
337 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
338 needfiles.setdefault(f, set()).add(n)
339
339
340 # process the files
340 # process the files
341 repo.ui.status(_("adding file changes\n"))
341 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
342 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
343 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
344 revisions += newrevs
345 files += newfiles
345 files += newfiles
346
346
347 deltaheads = 0
347 deltaheads = 0
348 if oldheads:
348 if oldheads:
349 heads = cl.heads()
349 heads = cl.heads()
350 deltaheads = len(heads) - len(oldheads)
350 deltaheads = len(heads) - len(oldheads)
351 for h in heads:
351 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
352 if h not in oldheads and repo[h].closesbranch():
353 deltaheads -= 1
353 deltaheads -= 1
354 htext = ""
354 htext = ""
355 if deltaheads:
355 if deltaheads:
356 htext = _(" (%+d heads)") % deltaheads
356 htext = _(" (%+d heads)") % deltaheads
357
357
358 repo.ui.status(_("added %d changesets"
358 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
359 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
360 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
361 repo.invalidatevolatilesets()
362
362
363 if changesets > 0:
363 if changesets > 0:
364 if 'node' not in tr.hookargs:
364 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
365 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 else:
368 else:
369 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
370 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup',
372 repo.hook('pretxnchangegroup',
373 throw=True, **pycompat.strkwargs(hookargs))
373 throw=True, **pycompat.strkwargs(hookargs))
374
374
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
376 phaseall = None
376 phaseall = None
377 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
380 # exists locally as secret
380 # exists locally as secret
381 #
381 #
382 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
383 # the bundle
383 # the bundle
384 if repo.publishing():
384 if repo.publishing():
385 targetphase = phaseall = phases.public
385 targetphase = phaseall = phases.public
386 else:
386 else:
387 # closer target phase computation
387 # closer target phase computation
388
388
389 # Those changesets have been pushed from the
389 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
390 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
391 # alongside. Therefor `targetphase` is
392 # ignored.
392 # ignored.
393 targetphase = phaseall = phases.draft
393 targetphase = phaseall = phases.draft
394 if added:
394 if added:
395 phases.registernew(repo, tr, targetphase, added)
395 phases.registernew(repo, tr, targetphase, added)
396 if phaseall is not None:
396 if phaseall is not None:
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
398
398
399 if changesets > 0:
399 if changesets > 0:
400
400
401 def runhooks():
401 def runhooks():
402 # These hooks run when the lock releases, not when the
402 # These hooks run when the lock releases, not when the
403 # transaction closes. So it's possible for the changelog
403 # transaction closes. So it's possible for the changelog
404 # to have changed since we last saw it.
404 # to have changed since we last saw it.
405 if clstart >= len(repo):
405 if clstart >= len(repo):
406 return
406 return
407
407
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
409
409
410 for n in added:
410 for n in added:
411 args = hookargs.copy()
411 args = hookargs.copy()
412 args['node'] = hex(n)
412 args['node'] = hex(n)
413 del args['node_last']
413 del args['node_last']
414 repo.hook("incoming", **pycompat.strkwargs(args))
414 repo.hook("incoming", **pycompat.strkwargs(args))
415
415
416 newheads = [h for h in repo.heads()
416 newheads = [h for h in repo.heads()
417 if h not in oldheads]
417 if h not in oldheads]
418 repo.ui.log("incoming",
418 repo.ui.log("incoming",
419 "%d incoming changes - new heads: %s\n",
419 "%d incoming changes - new heads: %s\n",
420 len(added),
420 len(added),
421 ', '.join([hex(c[:6]) for c in newheads]))
421 ', '.join([hex(c[:6]) for c in newheads]))
422
422
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
424 lambda tr: repo._afterlock(runhooks))
424 lambda tr: repo._afterlock(runhooks))
425 finally:
425 finally:
426 repo.ui.flush()
426 repo.ui.flush()
427 # never return 0 here:
427 # never return 0 here:
428 if deltaheads < 0:
428 if deltaheads < 0:
429 ret = deltaheads - 1
429 ret = deltaheads - 1
430 else:
430 else:
431 ret = deltaheads + 1
431 ret = deltaheads + 1
432 return ret
432 return ret
433
433
434 def deltaiter(self):
434 def deltaiter(self):
435 """
435 """
436 returns an iterator of the deltas in this changegroup
436 returns an iterator of the deltas in this changegroup
437
437
438 Useful for passing to the underlying storage system to be stored.
438 Useful for passing to the underlying storage system to be stored.
439 """
439 """
440 chain = None
440 chain = None
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
443 yield chunkdata
443 yield chunkdata
444 chain = chunkdata[0]
444 chain = chunkdata[0]
445
445
446 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
448
448
449 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
451 remain the same.
451 remain the same.
452 """
452 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = struct.calcsize(deltaheader)
454 deltaheadersize = struct.calcsize(deltaheader)
455 version = '02'
455 version = '02'
456
456
457 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
459 flags = 0
460 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
461
461
462 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
464
464
465 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
467 separating manifests and files.
468 """
468 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = struct.calcsize(deltaheader)
470 deltaheadersize = struct.calcsize(deltaheader)
471 version = '03'
471 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
473
473
474 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
477
477
478 def _unpackmanifests(self, repo, revmap, trp, prog):
478 def _unpackmanifests(self, repo, revmap, trp, prog):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
480 for chunkdata in iter(self.filelogheader, {}):
480 for chunkdata in iter(self.filelogheader, {}):
481 # If we get here, there are directory manifests in the changegroup
481 # If we get here, there are directory manifests in the changegroup
482 d = chunkdata["filename"]
482 d = chunkdata["filename"]
483 repo.ui.debug("adding %s revisions\n" % d)
483 repo.ui.debug("adding %s revisions\n" % d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
485 deltas = self.deltaiter()
485 deltas = self.deltaiter()
486 if not dirlog.addgroup(deltas, revmap, trp):
486 if not dirlog.addgroup(deltas, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 @attr.s(slots=True, frozen=True)
501 @attr.s(slots=True, frozen=True)
502 class revisiondelta(object):
502 class revisiondelta(object):
503 """Describes a delta entry in a changegroup.
503 """Describes a delta entry in a changegroup.
504
504
505 Captured data is sufficient to serialize the delta into multiple
505 Captured data is sufficient to serialize the delta into multiple
506 formats.
506 formats.
507 """
507 """
508 # 20 byte node of this revision.
508 # 20 byte node of this revision.
509 node = attr.ib()
509 node = attr.ib()
510 # 20 byte nodes of parent revisions.
510 # 20 byte nodes of parent revisions.
511 p1node = attr.ib()
511 p1node = attr.ib()
512 p2node = attr.ib()
512 p2node = attr.ib()
513 # 20 byte node of node this delta is against.
513 # 20 byte node of node this delta is against.
514 basenode = attr.ib()
514 basenode = attr.ib()
515 # 20 byte node of changeset revision this delta is associated with.
515 # 20 byte node of changeset revision this delta is associated with.
516 linknode = attr.ib()
516 linknode = attr.ib()
517 # 2 bytes of flags to apply to revision data.
517 # 2 bytes of flags to apply to revision data.
518 flags = attr.ib()
518 flags = attr.ib()
519 # Iterable of chunks holding raw delta data.
519 # Iterable of chunks holding raw delta data.
520 deltachunks = attr.ib()
520 deltachunks = attr.ib()
521
521
522 class cg1packer(object):
522 class cg1packer(object):
523 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
523 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
524 version = '01'
524
525 def __init__(self, repo, filematcher, bundlecaps=None):
525 def __init__(self, repo, filematcher, version, bundlecaps=None):
526 """Given a source repo, construct a bundler.
526 """Given a source repo, construct a bundler.
527
527
528 filematcher is a matcher that matches on files to include in the
528 filematcher is a matcher that matches on files to include in the
529 changegroup. Used to facilitate sparse changegroups.
529 changegroup. Used to facilitate sparse changegroups.
530
530
531 bundlecaps is optional and can be used to specify the set of
531 bundlecaps is optional and can be used to specify the set of
532 capabilities which can be used to build the bundle. While bundlecaps is
532 capabilities which can be used to build the bundle. While bundlecaps is
533 unused in core Mercurial, extensions rely on this feature to communicate
533 unused in core Mercurial, extensions rely on this feature to communicate
534 capabilities to customize the changegroup packer.
534 capabilities to customize the changegroup packer.
535 """
535 """
536 assert filematcher
536 assert filematcher
537 self._filematcher = filematcher
537 self._filematcher = filematcher
538
538
539 self.version = version
540
539 # Set of capabilities we can use to build the bundle.
541 # Set of capabilities we can use to build the bundle.
540 if bundlecaps is None:
542 if bundlecaps is None:
541 bundlecaps = set()
543 bundlecaps = set()
542 self._bundlecaps = bundlecaps
544 self._bundlecaps = bundlecaps
543 # experimental config: bundle.reorder
545 # experimental config: bundle.reorder
544 reorder = repo.ui.config('bundle', 'reorder')
546 reorder = repo.ui.config('bundle', 'reorder')
545 if reorder == 'auto':
547 if reorder == 'auto':
546 reorder = None
548 reorder = None
547 else:
549 else:
548 reorder = stringutil.parsebool(reorder)
550 reorder = stringutil.parsebool(reorder)
549 self._repo = repo
551 self._repo = repo
550 self._reorder = reorder
552 self._reorder = reorder
551 if self._repo.ui.verbose and not self._repo.ui.debugflag:
553 if self._repo.ui.verbose and not self._repo.ui.debugflag:
552 self._verbosenote = self._repo.ui.note
554 self._verbosenote = self._repo.ui.note
553 else:
555 else:
554 self._verbosenote = lambda s: None
556 self._verbosenote = lambda s: None
555
557
556 def close(self):
558 def close(self):
557 # Ellipses serving mode.
559 # Ellipses serving mode.
558 getattr(self, 'clrev_to_localrev', {}).clear()
560 getattr(self, 'clrev_to_localrev', {}).clear()
559 if getattr(self, 'next_clrev_to_localrev', {}):
561 if getattr(self, 'next_clrev_to_localrev', {}):
560 self.clrev_to_localrev = self.next_clrev_to_localrev
562 self.clrev_to_localrev = self.next_clrev_to_localrev
561 del self.next_clrev_to_localrev
563 del self.next_clrev_to_localrev
562 self.changelog_done = True
564 self.changelog_done = True
563
565
564 return closechunk()
566 return closechunk()
565
567
566 def fileheader(self, fname):
568 def fileheader(self, fname):
567 return chunkheader(len(fname)) + fname
569 return chunkheader(len(fname)) + fname
568
570
569 # Extracted both for clarity and for overriding in extensions.
571 # Extracted both for clarity and for overriding in extensions.
570 def _sortgroup(self, store, nodelist, lookup):
572 def _sortgroup(self, store, nodelist, lookup):
571 """Sort nodes for change group and turn them into revnums."""
573 """Sort nodes for change group and turn them into revnums."""
572 # Ellipses serving mode.
574 # Ellipses serving mode.
573 #
575 #
574 # In a perfect world, we'd generate better ellipsis-ified graphs
576 # In a perfect world, we'd generate better ellipsis-ified graphs
575 # for non-changelog revlogs. In practice, we haven't started doing
577 # for non-changelog revlogs. In practice, we haven't started doing
576 # that yet, so the resulting DAGs for the manifestlog and filelogs
578 # that yet, so the resulting DAGs for the manifestlog and filelogs
577 # are actually full of bogus parentage on all the ellipsis
579 # are actually full of bogus parentage on all the ellipsis
578 # nodes. This has the side effect that, while the contents are
580 # nodes. This has the side effect that, while the contents are
579 # correct, the individual DAGs might be completely out of whack in
581 # correct, the individual DAGs might be completely out of whack in
580 # a case like 882681bc3166 and its ancestors (back about 10
582 # a case like 882681bc3166 and its ancestors (back about 10
581 # revisions or so) in the main hg repo.
583 # revisions or so) in the main hg repo.
582 #
584 #
583 # The one invariant we *know* holds is that the new (potentially
585 # The one invariant we *know* holds is that the new (potentially
584 # bogus) DAG shape will be valid if we order the nodes in the
586 # bogus) DAG shape will be valid if we order the nodes in the
585 # order that they're introduced in dramatis personae by the
587 # order that they're introduced in dramatis personae by the
586 # changelog, so what we do is we sort the non-changelog histories
588 # changelog, so what we do is we sort the non-changelog histories
587 # by the order in which they are used by the changelog.
589 # by the order in which they are used by the changelog.
588 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
590 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
589 key = lambda n: self.clnode_to_rev[lookup(n)]
591 key = lambda n: self.clnode_to_rev[lookup(n)]
590 return [store.rev(n) for n in sorted(nodelist, key=key)]
592 return [store.rev(n) for n in sorted(nodelist, key=key)]
591
593
592 # for generaldelta revlogs, we linearize the revs; this will both be
594 # for generaldelta revlogs, we linearize the revs; this will both be
593 # much quicker and generate a much smaller bundle
595 # much quicker and generate a much smaller bundle
594 if (store._generaldelta and self._reorder is None) or self._reorder:
596 if (store._generaldelta and self._reorder is None) or self._reorder:
595 dag = dagutil.revlogdag(store)
597 dag = dagutil.revlogdag(store)
596 return dag.linearize(set(store.rev(n) for n in nodelist))
598 return dag.linearize(set(store.rev(n) for n in nodelist))
597 else:
599 else:
598 return sorted([store.rev(n) for n in nodelist])
600 return sorted([store.rev(n) for n in nodelist])
599
601
600 def group(self, nodelist, store, lookup, units=None):
602 def group(self, nodelist, store, lookup, units=None):
601 """Calculate a delta group, yielding a sequence of changegroup chunks
603 """Calculate a delta group, yielding a sequence of changegroup chunks
602 (strings).
604 (strings).
603
605
604 Given a list of changeset revs, return a set of deltas and
606 Given a list of changeset revs, return a set of deltas and
605 metadata corresponding to nodes. The first delta is
607 metadata corresponding to nodes. The first delta is
606 first parent(nodelist[0]) -> nodelist[0], the receiver is
608 first parent(nodelist[0]) -> nodelist[0], the receiver is
607 guaranteed to have this parent as it has all history before
609 guaranteed to have this parent as it has all history before
608 these changesets. In the case firstparent is nullrev the
610 these changesets. In the case firstparent is nullrev the
609 changegroup starts with a full revision.
611 changegroup starts with a full revision.
610
612
611 If units is not None, progress detail will be generated, units specifies
613 If units is not None, progress detail will be generated, units specifies
612 the type of revlog that is touched (changelog, manifest, etc.).
614 the type of revlog that is touched (changelog, manifest, etc.).
613 """
615 """
614 # if we don't have any revisions touched by these changesets, bail
616 # if we don't have any revisions touched by these changesets, bail
615 if len(nodelist) == 0:
617 if len(nodelist) == 0:
616 yield self.close()
618 yield self.close()
617 return
619 return
618
620
619 revs = self._sortgroup(store, nodelist, lookup)
621 revs = self._sortgroup(store, nodelist, lookup)
620
622
621 # add the parent of the first rev
623 # add the parent of the first rev
622 p = store.parentrevs(revs[0])[0]
624 p = store.parentrevs(revs[0])[0]
623 revs.insert(0, p)
625 revs.insert(0, p)
624
626
625 # build deltas
627 # build deltas
626 progress = None
628 progress = None
627 if units is not None:
629 if units is not None:
628 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
630 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
629 total=(len(revs) - 1))
631 total=(len(revs) - 1))
630 for r in pycompat.xrange(len(revs) - 1):
632 for r in pycompat.xrange(len(revs) - 1):
631 if progress:
633 if progress:
632 progress.update(r + 1)
634 progress.update(r + 1)
633 prev, curr = revs[r], revs[r + 1]
635 prev, curr = revs[r], revs[r + 1]
634 linknode = lookup(store.node(curr))
636 linknode = lookup(store.node(curr))
635 for c in self.revchunk(store, curr, prev, linknode):
637 for c in self.revchunk(store, curr, prev, linknode):
636 yield c
638 yield c
637
639
638 if progress:
640 if progress:
639 progress.complete()
641 progress.complete()
640 yield self.close()
642 yield self.close()
641
643
642 # filter any nodes that claim to be part of the known set
644 # filter any nodes that claim to be part of the known set
643 def prune(self, store, missing, commonrevs):
645 def prune(self, store, missing, commonrevs):
644 # TODO this violates storage abstraction for manifests.
646 # TODO this violates storage abstraction for manifests.
645 if isinstance(store, manifest.manifestrevlog):
647 if isinstance(store, manifest.manifestrevlog):
646 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
648 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
647 return []
649 return []
648
650
649 rr, rl = store.rev, store.linkrev
651 rr, rl = store.rev, store.linkrev
650 return [n for n in missing if rl(rr(n)) not in commonrevs]
652 return [n for n in missing if rl(rr(n)) not in commonrevs]
651
653
652 def _packmanifests(self, dir, mfnodes, lookuplinknode):
654 def _packmanifests(self, dir, mfnodes, lookuplinknode):
653 """Pack flat manifests into a changegroup stream."""
655 """Pack flat manifests into a changegroup stream."""
654 assert not dir
656 assert not dir
655 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
657 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
656 lookuplinknode, units=_('manifests')):
658 lookuplinknode, units=_('manifests')):
657 yield chunk
659 yield chunk
658
660
659 def _manifestsdone(self):
661 def _manifestsdone(self):
660 return ''
662 return ''
661
663
662 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
664 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
663 '''yield a sequence of changegroup chunks (strings)'''
665 '''yield a sequence of changegroup chunks (strings)'''
664 repo = self._repo
666 repo = self._repo
665 cl = repo.changelog
667 cl = repo.changelog
666
668
667 clrevorder = {}
669 clrevorder = {}
668 mfs = {} # needed manifests
670 mfs = {} # needed manifests
669 fnodes = {} # needed file nodes
671 fnodes = {} # needed file nodes
670 mfl = repo.manifestlog
672 mfl = repo.manifestlog
671 # TODO violates storage abstraction.
673 # TODO violates storage abstraction.
672 mfrevlog = mfl._revlog
674 mfrevlog = mfl._revlog
673 changedfiles = set()
675 changedfiles = set()
674
676
675 ellipsesmode = util.safehasattr(self, 'full_nodes')
677 ellipsesmode = util.safehasattr(self, 'full_nodes')
676
678
677 # Callback for the changelog, used to collect changed files and
679 # Callback for the changelog, used to collect changed files and
678 # manifest nodes.
680 # manifest nodes.
679 # Returns the linkrev node (identity in the changelog case).
681 # Returns the linkrev node (identity in the changelog case).
680 def lookupcl(x):
682 def lookupcl(x):
681 c = cl.read(x)
683 c = cl.read(x)
682 clrevorder[x] = len(clrevorder)
684 clrevorder[x] = len(clrevorder)
683
685
684 if ellipsesmode:
686 if ellipsesmode:
685 # Only update mfs if x is going to be sent. Otherwise we
687 # Only update mfs if x is going to be sent. Otherwise we
686 # end up with bogus linkrevs specified for manifests and
688 # end up with bogus linkrevs specified for manifests and
687 # we skip some manifest nodes that we should otherwise
689 # we skip some manifest nodes that we should otherwise
688 # have sent.
690 # have sent.
689 if (x in self.full_nodes
691 if (x in self.full_nodes
690 or cl.rev(x) in self.precomputed_ellipsis):
692 or cl.rev(x) in self.precomputed_ellipsis):
691 n = c[0]
693 n = c[0]
692 # Record the first changeset introducing this manifest
694 # Record the first changeset introducing this manifest
693 # version.
695 # version.
694 mfs.setdefault(n, x)
696 mfs.setdefault(n, x)
695 # Set this narrow-specific dict so we have the lowest
697 # Set this narrow-specific dict so we have the lowest
696 # manifest revnum to look up for this cl revnum. (Part of
698 # manifest revnum to look up for this cl revnum. (Part of
697 # mapping changelog ellipsis parents to manifest ellipsis
699 # mapping changelog ellipsis parents to manifest ellipsis
698 # parents)
700 # parents)
699 self.next_clrev_to_localrev.setdefault(cl.rev(x),
701 self.next_clrev_to_localrev.setdefault(cl.rev(x),
700 mfrevlog.rev(n))
702 mfrevlog.rev(n))
701 # We can't trust the changed files list in the changeset if the
703 # We can't trust the changed files list in the changeset if the
702 # client requested a shallow clone.
704 # client requested a shallow clone.
703 if self.is_shallow:
705 if self.is_shallow:
704 changedfiles.update(mfl[c[0]].read().keys())
706 changedfiles.update(mfl[c[0]].read().keys())
705 else:
707 else:
706 changedfiles.update(c[3])
708 changedfiles.update(c[3])
707 else:
709 else:
708
710
709 n = c[0]
711 n = c[0]
710 # record the first changeset introducing this manifest version
712 # record the first changeset introducing this manifest version
711 mfs.setdefault(n, x)
713 mfs.setdefault(n, x)
712 # Record a complete list of potentially-changed files in
714 # Record a complete list of potentially-changed files in
713 # this manifest.
715 # this manifest.
714 changedfiles.update(c[3])
716 changedfiles.update(c[3])
715
717
716 return x
718 return x
717
719
718 self._verbosenote(_('uncompressed size of bundle content:\n'))
720 self._verbosenote(_('uncompressed size of bundle content:\n'))
719 size = 0
721 size = 0
720 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
722 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
721 size += len(chunk)
723 size += len(chunk)
722 yield chunk
724 yield chunk
723 self._verbosenote(_('%8.i (changelog)\n') % size)
725 self._verbosenote(_('%8.i (changelog)\n') % size)
724
726
725 # We need to make sure that the linkrev in the changegroup refers to
727 # We need to make sure that the linkrev in the changegroup refers to
726 # the first changeset that introduced the manifest or file revision.
728 # the first changeset that introduced the manifest or file revision.
727 # The fastpath is usually safer than the slowpath, because the filelogs
729 # The fastpath is usually safer than the slowpath, because the filelogs
728 # are walked in revlog order.
730 # are walked in revlog order.
729 #
731 #
730 # When taking the slowpath with reorder=None and the manifest revlog
732 # When taking the slowpath with reorder=None and the manifest revlog
731 # uses generaldelta, the manifest may be walked in the "wrong" order.
733 # uses generaldelta, the manifest may be walked in the "wrong" order.
732 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
734 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
733 # cc0ff93d0c0c).
735 # cc0ff93d0c0c).
734 #
736 #
735 # When taking the fastpath, we are only vulnerable to reordering
737 # When taking the fastpath, we are only vulnerable to reordering
736 # of the changelog itself. The changelog never uses generaldelta, so
738 # of the changelog itself. The changelog never uses generaldelta, so
737 # it is only reordered when reorder=True. To handle this case, we
739 # it is only reordered when reorder=True. To handle this case, we
738 # simply take the slowpath, which already has the 'clrevorder' logic.
740 # simply take the slowpath, which already has the 'clrevorder' logic.
739 # This was also fixed in cc0ff93d0c0c.
741 # This was also fixed in cc0ff93d0c0c.
740 fastpathlinkrev = fastpathlinkrev and not self._reorder
742 fastpathlinkrev = fastpathlinkrev and not self._reorder
741 # Treemanifests don't work correctly with fastpathlinkrev
743 # Treemanifests don't work correctly with fastpathlinkrev
742 # either, because we don't discover which directory nodes to
744 # either, because we don't discover which directory nodes to
743 # send along with files. This could probably be fixed.
745 # send along with files. This could probably be fixed.
744 fastpathlinkrev = fastpathlinkrev and (
746 fastpathlinkrev = fastpathlinkrev and (
745 'treemanifest' not in repo.requirements)
747 'treemanifest' not in repo.requirements)
746
748
747 for chunk in self.generatemanifests(commonrevs, clrevorder,
749 for chunk in self.generatemanifests(commonrevs, clrevorder,
748 fastpathlinkrev, mfs, fnodes, source):
750 fastpathlinkrev, mfs, fnodes, source):
749 yield chunk
751 yield chunk
750
752
751 if ellipsesmode:
753 if ellipsesmode:
752 mfdicts = None
754 mfdicts = None
753 if self.is_shallow:
755 if self.is_shallow:
754 mfdicts = [(self._repo.manifestlog[n].read(), lr)
756 mfdicts = [(self._repo.manifestlog[n].read(), lr)
755 for (n, lr) in mfs.iteritems()]
757 for (n, lr) in mfs.iteritems()]
756
758
757 mfs.clear()
759 mfs.clear()
758 clrevs = set(cl.rev(x) for x in clnodes)
760 clrevs = set(cl.rev(x) for x in clnodes)
759
761
760 if not fastpathlinkrev:
762 if not fastpathlinkrev:
761 def linknodes(unused, fname):
763 def linknodes(unused, fname):
762 return fnodes.get(fname, {})
764 return fnodes.get(fname, {})
763 else:
765 else:
764 cln = cl.node
766 cln = cl.node
765 def linknodes(filerevlog, fname):
767 def linknodes(filerevlog, fname):
766 llr = filerevlog.linkrev
768 llr = filerevlog.linkrev
767 fln = filerevlog.node
769 fln = filerevlog.node
768 revs = ((r, llr(r)) for r in filerevlog)
770 revs = ((r, llr(r)) for r in filerevlog)
769 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
771 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
770
772
771 if ellipsesmode:
773 if ellipsesmode:
772 # We need to pass the mfdicts variable down into
774 # We need to pass the mfdicts variable down into
773 # generatefiles(), but more than one command might have
775 # generatefiles(), but more than one command might have
774 # wrapped generatefiles so we can't modify the function
776 # wrapped generatefiles so we can't modify the function
775 # signature. Instead, we pass the data to ourselves using an
777 # signature. Instead, we pass the data to ourselves using an
776 # instance attribute. I'm sorry.
778 # instance attribute. I'm sorry.
777 self._mfdicts = mfdicts
779 self._mfdicts = mfdicts
778
780
779 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
781 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
780 source):
782 source):
781 yield chunk
783 yield chunk
782
784
783 yield self.close()
785 yield self.close()
784
786
785 if clnodes:
787 if clnodes:
786 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
788 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
787
789
788 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
790 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
789 fnodes, source):
791 fnodes, source):
790 """Returns an iterator of changegroup chunks containing manifests.
792 """Returns an iterator of changegroup chunks containing manifests.
791
793
792 `source` is unused here, but is used by extensions like remotefilelog to
794 `source` is unused here, but is used by extensions like remotefilelog to
793 change what is sent based in pulls vs pushes, etc.
795 change what is sent based in pulls vs pushes, etc.
794 """
796 """
795 repo = self._repo
797 repo = self._repo
796 mfl = repo.manifestlog
798 mfl = repo.manifestlog
797 dirlog = mfl._revlog.dirlog
799 dirlog = mfl._revlog.dirlog
798 tmfnodes = {'': mfs}
800 tmfnodes = {'': mfs}
799
801
800 # Callback for the manifest, used to collect linkrevs for filelog
802 # Callback for the manifest, used to collect linkrevs for filelog
801 # revisions.
803 # revisions.
802 # Returns the linkrev node (collected in lookupcl).
804 # Returns the linkrev node (collected in lookupcl).
803 def makelookupmflinknode(dir, nodes):
805 def makelookupmflinknode(dir, nodes):
804 if fastpathlinkrev:
806 if fastpathlinkrev:
805 assert not dir
807 assert not dir
806 return mfs.__getitem__
808 return mfs.__getitem__
807
809
808 def lookupmflinknode(x):
810 def lookupmflinknode(x):
809 """Callback for looking up the linknode for manifests.
811 """Callback for looking up the linknode for manifests.
810
812
811 Returns the linkrev node for the specified manifest.
813 Returns the linkrev node for the specified manifest.
812
814
813 SIDE EFFECT:
815 SIDE EFFECT:
814
816
815 1) fclnodes gets populated with the list of relevant
817 1) fclnodes gets populated with the list of relevant
816 file nodes if we're not using fastpathlinkrev
818 file nodes if we're not using fastpathlinkrev
817 2) When treemanifests are in use, collects treemanifest nodes
819 2) When treemanifests are in use, collects treemanifest nodes
818 to send
820 to send
819
821
820 Note that this means manifests must be completely sent to
822 Note that this means manifests must be completely sent to
821 the client before you can trust the list of files and
823 the client before you can trust the list of files and
822 treemanifests to send.
824 treemanifests to send.
823 """
825 """
824 clnode = nodes[x]
826 clnode = nodes[x]
825 mdata = mfl.get(dir, x).readfast(shallow=True)
827 mdata = mfl.get(dir, x).readfast(shallow=True)
826 for p, n, fl in mdata.iterentries():
828 for p, n, fl in mdata.iterentries():
827 if fl == 't': # subdirectory manifest
829 if fl == 't': # subdirectory manifest
828 subdir = dir + p + '/'
830 subdir = dir + p + '/'
829 tmfclnodes = tmfnodes.setdefault(subdir, {})
831 tmfclnodes = tmfnodes.setdefault(subdir, {})
830 tmfclnode = tmfclnodes.setdefault(n, clnode)
832 tmfclnode = tmfclnodes.setdefault(n, clnode)
831 if clrevorder[clnode] < clrevorder[tmfclnode]:
833 if clrevorder[clnode] < clrevorder[tmfclnode]:
832 tmfclnodes[n] = clnode
834 tmfclnodes[n] = clnode
833 else:
835 else:
834 f = dir + p
836 f = dir + p
835 fclnodes = fnodes.setdefault(f, {})
837 fclnodes = fnodes.setdefault(f, {})
836 fclnode = fclnodes.setdefault(n, clnode)
838 fclnode = fclnodes.setdefault(n, clnode)
837 if clrevorder[clnode] < clrevorder[fclnode]:
839 if clrevorder[clnode] < clrevorder[fclnode]:
838 fclnodes[n] = clnode
840 fclnodes[n] = clnode
839 return clnode
841 return clnode
840 return lookupmflinknode
842 return lookupmflinknode
841
843
842 size = 0
844 size = 0
843 while tmfnodes:
845 while tmfnodes:
844 dir, nodes = tmfnodes.popitem()
846 dir, nodes = tmfnodes.popitem()
845 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
847 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
846 if not dir or prunednodes:
848 if not dir or prunednodes:
847 for x in self._packmanifests(dir, prunednodes,
849 for x in self._packmanifests(dir, prunednodes,
848 makelookupmflinknode(dir, nodes)):
850 makelookupmflinknode(dir, nodes)):
849 size += len(x)
851 size += len(x)
850 yield x
852 yield x
851 self._verbosenote(_('%8.i (manifests)\n') % size)
853 self._verbosenote(_('%8.i (manifests)\n') % size)
852 yield self._manifestsdone()
854 yield self._manifestsdone()
853
855
854 # The 'source' parameter is useful for extensions
856 # The 'source' parameter is useful for extensions
855 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
857 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
856 changedfiles = list(filter(self._filematcher, changedfiles))
858 changedfiles = list(filter(self._filematcher, changedfiles))
857
859
858 if getattr(self, 'is_shallow', False):
860 if getattr(self, 'is_shallow', False):
859 # See comment in generate() for why this sadness is a thing.
861 # See comment in generate() for why this sadness is a thing.
860 mfdicts = self._mfdicts
862 mfdicts = self._mfdicts
861 del self._mfdicts
863 del self._mfdicts
862 # In a shallow clone, the linknodes callback needs to also include
864 # In a shallow clone, the linknodes callback needs to also include
863 # those file nodes that are in the manifests we sent but weren't
865 # those file nodes that are in the manifests we sent but weren't
864 # introduced by those manifests.
866 # introduced by those manifests.
865 commonctxs = [self._repo[c] for c in commonrevs]
867 commonctxs = [self._repo[c] for c in commonrevs]
866 oldlinknodes = linknodes
868 oldlinknodes = linknodes
867 clrev = self._repo.changelog.rev
869 clrev = self._repo.changelog.rev
868
870
869 # Defining this function has a side-effect of overriding the
871 # Defining this function has a side-effect of overriding the
870 # function of the same name that was passed in as an argument.
872 # function of the same name that was passed in as an argument.
871 # TODO have caller pass in appropriate function.
873 # TODO have caller pass in appropriate function.
872 def linknodes(flog, fname):
874 def linknodes(flog, fname):
873 for c in commonctxs:
875 for c in commonctxs:
874 try:
876 try:
875 fnode = c.filenode(fname)
877 fnode = c.filenode(fname)
876 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
878 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
877 except error.ManifestLookupError:
879 except error.ManifestLookupError:
878 pass
880 pass
879 links = oldlinknodes(flog, fname)
881 links = oldlinknodes(flog, fname)
880 if len(links) != len(mfdicts):
882 if len(links) != len(mfdicts):
881 for mf, lr in mfdicts:
883 for mf, lr in mfdicts:
882 fnode = mf.get(fname, None)
884 fnode = mf.get(fname, None)
883 if fnode in links:
885 if fnode in links:
884 links[fnode] = min(links[fnode], lr, key=clrev)
886 links[fnode] = min(links[fnode], lr, key=clrev)
885 elif fnode:
887 elif fnode:
886 links[fnode] = lr
888 links[fnode] = lr
887 return links
889 return links
888
890
889 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
891 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
890
892
891 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
893 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
892 repo = self._repo
894 repo = self._repo
893 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
895 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
894 total=len(changedfiles))
896 total=len(changedfiles))
895 for i, fname in enumerate(sorted(changedfiles)):
897 for i, fname in enumerate(sorted(changedfiles)):
896 filerevlog = repo.file(fname)
898 filerevlog = repo.file(fname)
897 if not filerevlog:
899 if not filerevlog:
898 raise error.Abort(_("empty or missing file data for %s") %
900 raise error.Abort(_("empty or missing file data for %s") %
899 fname)
901 fname)
900
902
901 linkrevnodes = linknodes(filerevlog, fname)
903 linkrevnodes = linknodes(filerevlog, fname)
902 # Lookup for filenodes, we collected the linkrev nodes above in the
904 # Lookup for filenodes, we collected the linkrev nodes above in the
903 # fastpath case and with lookupmf in the slowpath case.
905 # fastpath case and with lookupmf in the slowpath case.
904 def lookupfilelog(x):
906 def lookupfilelog(x):
905 return linkrevnodes[x]
907 return linkrevnodes[x]
906
908
907 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
909 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
908 if filenodes:
910 if filenodes:
909 progress.update(i + 1, item=fname)
911 progress.update(i + 1, item=fname)
910 h = self.fileheader(fname)
912 h = self.fileheader(fname)
911 size = len(h)
913 size = len(h)
912 yield h
914 yield h
913 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
915 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
914 size += len(chunk)
916 size += len(chunk)
915 yield chunk
917 yield chunk
916 self._verbosenote(_('%8.i %s\n') % (size, fname))
918 self._verbosenote(_('%8.i %s\n') % (size, fname))
917 progress.complete()
919 progress.complete()
918
920
919 def deltaparent(self, store, rev, p1, p2, prev):
921 def deltaparent(self, store, rev, p1, p2, prev):
920 if not store.candelta(prev, rev):
922 if not store.candelta(prev, rev):
921 raise error.ProgrammingError('cg1 should not be used in this case')
923 raise error.ProgrammingError('cg1 should not be used in this case')
922 return prev
924 return prev
923
925
924 def revchunk(self, store, rev, prev, linknode):
926 def revchunk(self, store, rev, prev, linknode):
925 if util.safehasattr(self, 'full_nodes'):
927 if util.safehasattr(self, 'full_nodes'):
926 fn = self._revisiondeltanarrow
928 fn = self._revisiondeltanarrow
927 else:
929 else:
928 fn = self._revisiondeltanormal
930 fn = self._revisiondeltanormal
929
931
930 delta = fn(store, rev, prev, linknode)
932 delta = fn(store, rev, prev, linknode)
931 if not delta:
933 if not delta:
932 return
934 return
933
935
934 meta = self.builddeltaheader(delta.node, delta.p1node, delta.p2node,
936 meta = self.builddeltaheader(delta.node, delta.p1node, delta.p2node,
935 delta.basenode, delta.linknode,
937 delta.basenode, delta.linknode,
936 delta.flags)
938 delta.flags)
937 l = len(meta) + sum(len(x) for x in delta.deltachunks)
939 l = len(meta) + sum(len(x) for x in delta.deltachunks)
938
940
939 yield chunkheader(l)
941 yield chunkheader(l)
940 yield meta
942 yield meta
941 for x in delta.deltachunks:
943 for x in delta.deltachunks:
942 yield x
944 yield x
943
945
944 def _revisiondeltanormal(self, store, rev, prev, linknode):
946 def _revisiondeltanormal(self, store, rev, prev, linknode):
945 node = store.node(rev)
947 node = store.node(rev)
946 p1, p2 = store.parentrevs(rev)
948 p1, p2 = store.parentrevs(rev)
947 base = self.deltaparent(store, rev, p1, p2, prev)
949 base = self.deltaparent(store, rev, p1, p2, prev)
948
950
949 prefix = ''
951 prefix = ''
950 if store.iscensored(base) or store.iscensored(rev):
952 if store.iscensored(base) or store.iscensored(rev):
951 try:
953 try:
952 delta = store.revision(node, raw=True)
954 delta = store.revision(node, raw=True)
953 except error.CensoredNodeError as e:
955 except error.CensoredNodeError as e:
954 delta = e.tombstone
956 delta = e.tombstone
955 if base == nullrev:
957 if base == nullrev:
956 prefix = mdiff.trivialdiffheader(len(delta))
958 prefix = mdiff.trivialdiffheader(len(delta))
957 else:
959 else:
958 baselen = store.rawsize(base)
960 baselen = store.rawsize(base)
959 prefix = mdiff.replacediffheader(baselen, len(delta))
961 prefix = mdiff.replacediffheader(baselen, len(delta))
960 elif base == nullrev:
962 elif base == nullrev:
961 delta = store.revision(node, raw=True)
963 delta = store.revision(node, raw=True)
962 prefix = mdiff.trivialdiffheader(len(delta))
964 prefix = mdiff.trivialdiffheader(len(delta))
963 else:
965 else:
964 delta = store.revdiff(base, rev)
966 delta = store.revdiff(base, rev)
965 p1n, p2n = store.parents(node)
967 p1n, p2n = store.parents(node)
966
968
967 return revisiondelta(
969 return revisiondelta(
968 node=node,
970 node=node,
969 p1node=p1n,
971 p1node=p1n,
970 p2node=p2n,
972 p2node=p2n,
971 basenode=store.node(base),
973 basenode=store.node(base),
972 linknode=linknode,
974 linknode=linknode,
973 flags=store.flags(rev),
975 flags=store.flags(rev),
974 deltachunks=(prefix, delta),
976 deltachunks=(prefix, delta),
975 )
977 )
976
978
977 def _revisiondeltanarrow(self, store, rev, prev, linknode):
979 def _revisiondeltanarrow(self, store, rev, prev, linknode):
978 # build up some mapping information that's useful later. See
980 # build up some mapping information that's useful later. See
979 # the local() nested function below.
981 # the local() nested function below.
980 if not self.changelog_done:
982 if not self.changelog_done:
981 self.clnode_to_rev[linknode] = rev
983 self.clnode_to_rev[linknode] = rev
982 linkrev = rev
984 linkrev = rev
983 self.clrev_to_localrev[linkrev] = rev
985 self.clrev_to_localrev[linkrev] = rev
984 else:
986 else:
985 linkrev = self.clnode_to_rev[linknode]
987 linkrev = self.clnode_to_rev[linknode]
986 self.clrev_to_localrev[linkrev] = rev
988 self.clrev_to_localrev[linkrev] = rev
987
989
988 # This is a node to send in full, because the changeset it
990 # This is a node to send in full, because the changeset it
989 # corresponds to was a full changeset.
991 # corresponds to was a full changeset.
990 if linknode in self.full_nodes:
992 if linknode in self.full_nodes:
991 return self._revisiondeltanormal(store, rev, prev, linknode)
993 return self._revisiondeltanormal(store, rev, prev, linknode)
992
994
993 # At this point, a node can either be one we should skip or an
995 # At this point, a node can either be one we should skip or an
994 # ellipsis. If it's not an ellipsis, bail immediately.
996 # ellipsis. If it's not an ellipsis, bail immediately.
995 if linkrev not in self.precomputed_ellipsis:
997 if linkrev not in self.precomputed_ellipsis:
996 return
998 return
997
999
998 linkparents = self.precomputed_ellipsis[linkrev]
1000 linkparents = self.precomputed_ellipsis[linkrev]
999 def local(clrev):
1001 def local(clrev):
1000 """Turn a changelog revnum into a local revnum.
1002 """Turn a changelog revnum into a local revnum.
1001
1003
1002 The ellipsis dag is stored as revnums on the changelog,
1004 The ellipsis dag is stored as revnums on the changelog,
1003 but when we're producing ellipsis entries for
1005 but when we're producing ellipsis entries for
1004 non-changelog revlogs, we need to turn those numbers into
1006 non-changelog revlogs, we need to turn those numbers into
1005 something local. This does that for us, and during the
1007 something local. This does that for us, and during the
1006 changelog sending phase will also expand the stored
1008 changelog sending phase will also expand the stored
1007 mappings as needed.
1009 mappings as needed.
1008 """
1010 """
1009 if clrev == nullrev:
1011 if clrev == nullrev:
1010 return nullrev
1012 return nullrev
1011
1013
1012 if not self.changelog_done:
1014 if not self.changelog_done:
1013 # If we're doing the changelog, it's possible that we
1015 # If we're doing the changelog, it's possible that we
1014 # have a parent that is already on the client, and we
1016 # have a parent that is already on the client, and we
1015 # need to store some extra mapping information so that
1017 # need to store some extra mapping information so that
1016 # our contained ellipsis nodes will be able to resolve
1018 # our contained ellipsis nodes will be able to resolve
1017 # their parents.
1019 # their parents.
1018 if clrev not in self.clrev_to_localrev:
1020 if clrev not in self.clrev_to_localrev:
1019 clnode = store.node(clrev)
1021 clnode = store.node(clrev)
1020 self.clnode_to_rev[clnode] = clrev
1022 self.clnode_to_rev[clnode] = clrev
1021 return clrev
1023 return clrev
1022
1024
1023 # Walk the ellipsis-ized changelog breadth-first looking for a
1025 # Walk the ellipsis-ized changelog breadth-first looking for a
1024 # change that has been linked from the current revlog.
1026 # change that has been linked from the current revlog.
1025 #
1027 #
1026 # For a flat manifest revlog only a single step should be necessary
1028 # For a flat manifest revlog only a single step should be necessary
1027 # as all relevant changelog entries are relevant to the flat
1029 # as all relevant changelog entries are relevant to the flat
1028 # manifest.
1030 # manifest.
1029 #
1031 #
1030 # For a filelog or tree manifest dirlog however not every changelog
1032 # For a filelog or tree manifest dirlog however not every changelog
1031 # entry will have been relevant, so we need to skip some changelog
1033 # entry will have been relevant, so we need to skip some changelog
1032 # nodes even after ellipsis-izing.
1034 # nodes even after ellipsis-izing.
1033 walk = [clrev]
1035 walk = [clrev]
1034 while walk:
1036 while walk:
1035 p = walk[0]
1037 p = walk[0]
1036 walk = walk[1:]
1038 walk = walk[1:]
1037 if p in self.clrev_to_localrev:
1039 if p in self.clrev_to_localrev:
1038 return self.clrev_to_localrev[p]
1040 return self.clrev_to_localrev[p]
1039 elif p in self.full_nodes:
1041 elif p in self.full_nodes:
1040 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1042 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1041 if pp != nullrev])
1043 if pp != nullrev])
1042 elif p in self.precomputed_ellipsis:
1044 elif p in self.precomputed_ellipsis:
1043 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1045 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1044 if pp != nullrev])
1046 if pp != nullrev])
1045 else:
1047 else:
1046 # In this case, we've got an ellipsis with parents
1048 # In this case, we've got an ellipsis with parents
1047 # outside the current bundle (likely an
1049 # outside the current bundle (likely an
1048 # incremental pull). We "know" that we can use the
1050 # incremental pull). We "know" that we can use the
1049 # value of this same revlog at whatever revision
1051 # value of this same revlog at whatever revision
1050 # is pointed to by linknode. "Know" is in scare
1052 # is pointed to by linknode. "Know" is in scare
1051 # quotes because I haven't done enough examination
1053 # quotes because I haven't done enough examination
1052 # of edge cases to convince myself this is really
1054 # of edge cases to convince myself this is really
1053 # a fact - it works for all the (admittedly
1055 # a fact - it works for all the (admittedly
1054 # thorough) cases in our testsuite, but I would be
1056 # thorough) cases in our testsuite, but I would be
1055 # somewhat unsurprised to find a case in the wild
1057 # somewhat unsurprised to find a case in the wild
1056 # where this breaks down a bit. That said, I don't
1058 # where this breaks down a bit. That said, I don't
1057 # know if it would hurt anything.
1059 # know if it would hurt anything.
1058 for i in pycompat.xrange(rev, 0, -1):
1060 for i in pycompat.xrange(rev, 0, -1):
1059 if store.linkrev(i) == clrev:
1061 if store.linkrev(i) == clrev:
1060 return i
1062 return i
1061 # We failed to resolve a parent for this node, so
1063 # We failed to resolve a parent for this node, so
1062 # we crash the changegroup construction.
1064 # we crash the changegroup construction.
1063 raise error.Abort(
1065 raise error.Abort(
1064 'unable to resolve parent while packing %r %r'
1066 'unable to resolve parent while packing %r %r'
1065 ' for changeset %r' % (store.indexfile, rev, clrev))
1067 ' for changeset %r' % (store.indexfile, rev, clrev))
1066
1068
1067 return nullrev
1069 return nullrev
1068
1070
1069 if not linkparents or (
1071 if not linkparents or (
1070 store.parentrevs(rev) == (nullrev, nullrev)):
1072 store.parentrevs(rev) == (nullrev, nullrev)):
1071 p1, p2 = nullrev, nullrev
1073 p1, p2 = nullrev, nullrev
1072 elif len(linkparents) == 1:
1074 elif len(linkparents) == 1:
1073 p1, = sorted(local(p) for p in linkparents)
1075 p1, = sorted(local(p) for p in linkparents)
1074 p2 = nullrev
1076 p2 = nullrev
1075 else:
1077 else:
1076 p1, p2 = sorted(local(p) for p in linkparents)
1078 p1, p2 = sorted(local(p) for p in linkparents)
1077
1079
1078 n = store.node(rev)
1080 n = store.node(rev)
1079 p1n, p2n = store.node(p1), store.node(p2)
1081 p1n, p2n = store.node(p1), store.node(p2)
1080 flags = store.flags(rev)
1082 flags = store.flags(rev)
1081 flags |= revlog.REVIDX_ELLIPSIS
1083 flags |= revlog.REVIDX_ELLIPSIS
1082
1084
1083 # TODO: try and actually send deltas for ellipsis data blocks
1085 # TODO: try and actually send deltas for ellipsis data blocks
1084 data = store.revision(n)
1086 data = store.revision(n)
1085 diffheader = mdiff.trivialdiffheader(len(data))
1087 diffheader = mdiff.trivialdiffheader(len(data))
1086
1088
1087 return revisiondelta(
1089 return revisiondelta(
1088 node=n,
1090 node=n,
1089 p1node=p1n,
1091 p1node=p1n,
1090 p2node=p2n,
1092 p2node=p2n,
1091 basenode=nullid,
1093 basenode=nullid,
1092 linknode=linknode,
1094 linknode=linknode,
1093 flags=flags,
1095 flags=flags,
1094 deltachunks=(diffheader, data),
1096 deltachunks=(diffheader, data),
1095 )
1097 )
1096
1098
1097 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1099 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1098 # do nothing with basenode, it is implicitly the previous one in HG10
1100 # do nothing with basenode, it is implicitly the previous one in HG10
1099 # do nothing with flags, it is implicitly 0 for cg1 and cg2
1101 # do nothing with flags, it is implicitly 0 for cg1 and cg2
1100 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
1102 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
1101
1103
1102 class cg2packer(cg1packer):
1104 class cg2packer(cg1packer):
1103 version = '02'
1104 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
1105 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
1105
1106
1106 def __init__(self, repo, filematcher, bundlecaps=None):
1107 def __init__(self, repo, filematcher, version, bundlecaps=None):
1107 super(cg2packer, self).__init__(repo, filematcher,
1108 super(cg2packer, self).__init__(repo, filematcher, version,
1108 bundlecaps=bundlecaps)
1109 bundlecaps=bundlecaps)
1109
1110
1110 if self._reorder is None:
1111 if self._reorder is None:
1111 # Since generaldelta is directly supported by cg2, reordering
1112 # Since generaldelta is directly supported by cg2, reordering
1112 # generally doesn't help, so we disable it by default (treating
1113 # generally doesn't help, so we disable it by default (treating
1113 # bundle.reorder=auto just like bundle.reorder=False).
1114 # bundle.reorder=auto just like bundle.reorder=False).
1114 self._reorder = False
1115 self._reorder = False
1115
1116
1116 def deltaparent(self, store, rev, p1, p2, prev):
1117 def deltaparent(self, store, rev, p1, p2, prev):
1117 # Narrow ellipses mode.
1118 # Narrow ellipses mode.
1118 if util.safehasattr(self, 'full_nodes'):
1119 if util.safehasattr(self, 'full_nodes'):
1119 # TODO: send better deltas when in narrow mode.
1120 # TODO: send better deltas when in narrow mode.
1120 #
1121 #
1121 # changegroup.group() loops over revisions to send,
1122 # changegroup.group() loops over revisions to send,
1122 # including revisions we'll skip. What this means is that
1123 # including revisions we'll skip. What this means is that
1123 # `prev` will be a potentially useless delta base for all
1124 # `prev` will be a potentially useless delta base for all
1124 # ellipsis nodes, as the client likely won't have it. In
1125 # ellipsis nodes, as the client likely won't have it. In
1125 # the future we should do bookkeeping about which nodes
1126 # the future we should do bookkeeping about which nodes
1126 # have been sent to the client, and try to be
1127 # have been sent to the client, and try to be
1127 # significantly smarter about delta bases. This is
1128 # significantly smarter about delta bases. This is
1128 # slightly tricky because this same code has to work for
1129 # slightly tricky because this same code has to work for
1129 # all revlogs, and we don't have the linkrev/linknode here.
1130 # all revlogs, and we don't have the linkrev/linknode here.
1130 return p1
1131 return p1
1131
1132
1132 dp = store.deltaparent(rev)
1133 dp = store.deltaparent(rev)
1133 if dp == nullrev and store.storedeltachains:
1134 if dp == nullrev and store.storedeltachains:
1134 # Avoid sending full revisions when delta parent is null. Pick prev
1135 # Avoid sending full revisions when delta parent is null. Pick prev
1135 # in that case. It's tempting to pick p1 in this case, as p1 will
1136 # in that case. It's tempting to pick p1 in this case, as p1 will
1136 # be smaller in the common case. However, computing a delta against
1137 # be smaller in the common case. However, computing a delta against
1137 # p1 may require resolving the raw text of p1, which could be
1138 # p1 may require resolving the raw text of p1, which could be
1138 # expensive. The revlog caches should have prev cached, meaning
1139 # expensive. The revlog caches should have prev cached, meaning
1139 # less CPU for changegroup generation. There is likely room to add
1140 # less CPU for changegroup generation. There is likely room to add
1140 # a flag and/or config option to control this behavior.
1141 # a flag and/or config option to control this behavior.
1141 base = prev
1142 base = prev
1142 elif dp == nullrev:
1143 elif dp == nullrev:
1143 # revlog is configured to use full snapshot for a reason,
1144 # revlog is configured to use full snapshot for a reason,
1144 # stick to full snapshot.
1145 # stick to full snapshot.
1145 base = nullrev
1146 base = nullrev
1146 elif dp not in (p1, p2, prev):
1147 elif dp not in (p1, p2, prev):
1147 # Pick prev when we can't be sure remote has the base revision.
1148 # Pick prev when we can't be sure remote has the base revision.
1148 return prev
1149 return prev
1149 else:
1150 else:
1150 base = dp
1151 base = dp
1151 if base != nullrev and not store.candelta(base, rev):
1152 if base != nullrev and not store.candelta(base, rev):
1152 base = nullrev
1153 base = nullrev
1153 return base
1154 return base
1154
1155
1155 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1156 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1156 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
1157 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
1157 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
1158 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
1158
1159
1159 class cg3packer(cg2packer):
1160 class cg3packer(cg2packer):
1160 version = '03'
1161 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
1161 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
1162
1162
1163 def _packmanifests(self, dir, mfnodes, lookuplinknode):
1163 def _packmanifests(self, dir, mfnodes, lookuplinknode):
1164 if dir:
1164 if dir:
1165 yield self.fileheader(dir)
1165 yield self.fileheader(dir)
1166
1166
1167 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
1167 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
1168 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
1168 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
1169 units=_('manifests')):
1169 units=_('manifests')):
1170 yield chunk
1170 yield chunk
1171
1171
1172 def _manifestsdone(self):
1172 def _manifestsdone(self):
1173 return self.close()
1173 return self.close()
1174
1174
1175 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1175 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1176 return struct.pack(
1176 return struct.pack(
1177 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
1177 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
1178
1178
1179 def _makecg1packer(repo, filematcher, bundlecaps):
1179 def _makecg1packer(repo, filematcher, bundlecaps):
1180 return cg1packer(repo, filematcher, bundlecaps=bundlecaps)
1180 return cg1packer(repo, filematcher, b'01', bundlecaps=bundlecaps)
1181
1181
1182 def _makecg2packer(repo, filematcher, bundlecaps):
1182 def _makecg2packer(repo, filematcher, bundlecaps):
1183 return cg2packer(repo, filematcher, bundlecaps=bundlecaps)
1183 return cg2packer(repo, filematcher, b'02', bundlecaps=bundlecaps)
1184
1184
1185 def _makecg3packer(repo, filematcher, bundlecaps):
1185 def _makecg3packer(repo, filematcher, bundlecaps):
1186 return cg3packer(repo, filematcher, bundlecaps=bundlecaps)
1186 return cg3packer(repo, filematcher, b'03', bundlecaps=bundlecaps)
1187
1187
1188 _packermap = {'01': (_makecg1packer, cg1unpacker),
1188 _packermap = {'01': (_makecg1packer, cg1unpacker),
1189 # cg2 adds support for exchanging generaldelta
1189 # cg2 adds support for exchanging generaldelta
1190 '02': (_makecg2packer, cg2unpacker),
1190 '02': (_makecg2packer, cg2unpacker),
1191 # cg3 adds support for exchanging revlog flags and treemanifests
1191 # cg3 adds support for exchanging revlog flags and treemanifests
1192 '03': (_makecg3packer, cg3unpacker),
1192 '03': (_makecg3packer, cg3unpacker),
1193 }
1193 }
1194
1194
1195 def allsupportedversions(repo):
1195 def allsupportedversions(repo):
1196 versions = set(_packermap.keys())
1196 versions = set(_packermap.keys())
1197 if not (repo.ui.configbool('experimental', 'changegroup3') or
1197 if not (repo.ui.configbool('experimental', 'changegroup3') or
1198 repo.ui.configbool('experimental', 'treemanifest') or
1198 repo.ui.configbool('experimental', 'treemanifest') or
1199 'treemanifest' in repo.requirements):
1199 'treemanifest' in repo.requirements):
1200 versions.discard('03')
1200 versions.discard('03')
1201 return versions
1201 return versions
1202
1202
1203 # Changegroup versions that can be applied to the repo
1203 # Changegroup versions that can be applied to the repo
1204 def supportedincomingversions(repo):
1204 def supportedincomingversions(repo):
1205 return allsupportedversions(repo)
1205 return allsupportedversions(repo)
1206
1206
1207 # Changegroup versions that can be created from the repo
1207 # Changegroup versions that can be created from the repo
1208 def supportedoutgoingversions(repo):
1208 def supportedoutgoingversions(repo):
1209 versions = allsupportedversions(repo)
1209 versions = allsupportedversions(repo)
1210 if 'treemanifest' in repo.requirements:
1210 if 'treemanifest' in repo.requirements:
1211 # Versions 01 and 02 support only flat manifests and it's just too
1211 # Versions 01 and 02 support only flat manifests and it's just too
1212 # expensive to convert between the flat manifest and tree manifest on
1212 # expensive to convert between the flat manifest and tree manifest on
1213 # the fly. Since tree manifests are hashed differently, all of history
1213 # the fly. Since tree manifests are hashed differently, all of history
1214 # would have to be converted. Instead, we simply don't even pretend to
1214 # would have to be converted. Instead, we simply don't even pretend to
1215 # support versions 01 and 02.
1215 # support versions 01 and 02.
1216 versions.discard('01')
1216 versions.discard('01')
1217 versions.discard('02')
1217 versions.discard('02')
1218 if repository.NARROW_REQUIREMENT in repo.requirements:
1218 if repository.NARROW_REQUIREMENT in repo.requirements:
1219 # Versions 01 and 02 don't support revlog flags, and we need to
1219 # Versions 01 and 02 don't support revlog flags, and we need to
1220 # support that for stripping and unbundling to work.
1220 # support that for stripping and unbundling to work.
1221 versions.discard('01')
1221 versions.discard('01')
1222 versions.discard('02')
1222 versions.discard('02')
1223 if LFS_REQUIREMENT in repo.requirements:
1223 if LFS_REQUIREMENT in repo.requirements:
1224 # Versions 01 and 02 don't support revlog flags, and we need to
1224 # Versions 01 and 02 don't support revlog flags, and we need to
1225 # mark LFS entries with REVIDX_EXTSTORED.
1225 # mark LFS entries with REVIDX_EXTSTORED.
1226 versions.discard('01')
1226 versions.discard('01')
1227 versions.discard('02')
1227 versions.discard('02')
1228
1228
1229 return versions
1229 return versions
1230
1230
1231 def localversion(repo):
1231 def localversion(repo):
1232 # Finds the best version to use for bundles that are meant to be used
1232 # Finds the best version to use for bundles that are meant to be used
1233 # locally, such as those from strip and shelve, and temporary bundles.
1233 # locally, such as those from strip and shelve, and temporary bundles.
1234 return max(supportedoutgoingversions(repo))
1234 return max(supportedoutgoingversions(repo))
1235
1235
1236 def safeversion(repo):
1236 def safeversion(repo):
1237 # Finds the smallest version that it's safe to assume clients of the repo
1237 # Finds the smallest version that it's safe to assume clients of the repo
1238 # will support. For example, all hg versions that support generaldelta also
1238 # will support. For example, all hg versions that support generaldelta also
1239 # support changegroup 02.
1239 # support changegroup 02.
1240 versions = supportedoutgoingversions(repo)
1240 versions = supportedoutgoingversions(repo)
1241 if 'generaldelta' in repo.requirements:
1241 if 'generaldelta' in repo.requirements:
1242 versions.discard('01')
1242 versions.discard('01')
1243 assert versions
1243 assert versions
1244 return min(versions)
1244 return min(versions)
1245
1245
1246 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1246 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1247 assert version in supportedoutgoingversions(repo)
1247 assert version in supportedoutgoingversions(repo)
1248
1248
1249 if filematcher is None:
1249 if filematcher is None:
1250 filematcher = matchmod.alwaysmatcher(repo.root, '')
1250 filematcher = matchmod.alwaysmatcher(repo.root, '')
1251
1251
1252 if version == '01' and not filematcher.always():
1252 if version == '01' and not filematcher.always():
1253 raise error.ProgrammingError('version 01 changegroups do not support '
1253 raise error.ProgrammingError('version 01 changegroups do not support '
1254 'sparse file matchers')
1254 'sparse file matchers')
1255
1255
1256 # Requested files could include files not in the local store. So
1256 # Requested files could include files not in the local store. So
1257 # filter those out.
1257 # filter those out.
1258 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1258 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1259 filematcher)
1259 filematcher)
1260
1260
1261 fn = _packermap[version][0]
1261 fn = _packermap[version][0]
1262 return fn(repo, filematcher, bundlecaps)
1262 return fn(repo, filematcher, bundlecaps)
1263
1263
1264 def getunbundler(version, fh, alg, extras=None):
1264 def getunbundler(version, fh, alg, extras=None):
1265 return _packermap[version][1](fh, alg, extras=extras)
1265 return _packermap[version][1](fh, alg, extras=extras)
1266
1266
1267 def _changegroupinfo(repo, nodes, source):
1267 def _changegroupinfo(repo, nodes, source):
1268 if repo.ui.verbose or source == 'bundle':
1268 if repo.ui.verbose or source == 'bundle':
1269 repo.ui.status(_("%d changesets found\n") % len(nodes))
1269 repo.ui.status(_("%d changesets found\n") % len(nodes))
1270 if repo.ui.debugflag:
1270 if repo.ui.debugflag:
1271 repo.ui.debug("list of changesets:\n")
1271 repo.ui.debug("list of changesets:\n")
1272 for node in nodes:
1272 for node in nodes:
1273 repo.ui.debug("%s\n" % hex(node))
1273 repo.ui.debug("%s\n" % hex(node))
1274
1274
1275 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1275 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1276 bundlecaps=None):
1276 bundlecaps=None):
1277 cgstream = makestream(repo, outgoing, version, source,
1277 cgstream = makestream(repo, outgoing, version, source,
1278 fastpath=fastpath, bundlecaps=bundlecaps)
1278 fastpath=fastpath, bundlecaps=bundlecaps)
1279 return getunbundler(version, util.chunkbuffer(cgstream), None,
1279 return getunbundler(version, util.chunkbuffer(cgstream), None,
1280 {'clcount': len(outgoing.missing) })
1280 {'clcount': len(outgoing.missing) })
1281
1281
1282 def makestream(repo, outgoing, version, source, fastpath=False,
1282 def makestream(repo, outgoing, version, source, fastpath=False,
1283 bundlecaps=None, filematcher=None):
1283 bundlecaps=None, filematcher=None):
1284 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1284 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1285 filematcher=filematcher)
1285 filematcher=filematcher)
1286
1286
1287 repo = repo.unfiltered()
1287 repo = repo.unfiltered()
1288 commonrevs = outgoing.common
1288 commonrevs = outgoing.common
1289 csets = outgoing.missing
1289 csets = outgoing.missing
1290 heads = outgoing.missingheads
1290 heads = outgoing.missingheads
1291 # We go through the fast path if we get told to, or if all (unfiltered
1291 # We go through the fast path if we get told to, or if all (unfiltered
1292 # heads have been requested (since we then know there all linkrevs will
1292 # heads have been requested (since we then know there all linkrevs will
1293 # be pulled by the client).
1293 # be pulled by the client).
1294 heads.sort()
1294 heads.sort()
1295 fastpathlinkrev = fastpath or (
1295 fastpathlinkrev = fastpath or (
1296 repo.filtername is None and heads == sorted(repo.heads()))
1296 repo.filtername is None and heads == sorted(repo.heads()))
1297
1297
1298 repo.hook('preoutgoing', throw=True, source=source)
1298 repo.hook('preoutgoing', throw=True, source=source)
1299 _changegroupinfo(repo, csets, source)
1299 _changegroupinfo(repo, csets, source)
1300 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1300 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1301
1301
1302 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1302 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1303 revisions = 0
1303 revisions = 0
1304 files = 0
1304 files = 0
1305 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1305 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1306 total=expectedfiles)
1306 total=expectedfiles)
1307 for chunkdata in iter(source.filelogheader, {}):
1307 for chunkdata in iter(source.filelogheader, {}):
1308 files += 1
1308 files += 1
1309 f = chunkdata["filename"]
1309 f = chunkdata["filename"]
1310 repo.ui.debug("adding %s revisions\n" % f)
1310 repo.ui.debug("adding %s revisions\n" % f)
1311 progress.increment()
1311 progress.increment()
1312 fl = repo.file(f)
1312 fl = repo.file(f)
1313 o = len(fl)
1313 o = len(fl)
1314 try:
1314 try:
1315 deltas = source.deltaiter()
1315 deltas = source.deltaiter()
1316 if not fl.addgroup(deltas, revmap, trp):
1316 if not fl.addgroup(deltas, revmap, trp):
1317 raise error.Abort(_("received file revlog group is empty"))
1317 raise error.Abort(_("received file revlog group is empty"))
1318 except error.CensoredBaseError as e:
1318 except error.CensoredBaseError as e:
1319 raise error.Abort(_("received delta base is censored: %s") % e)
1319 raise error.Abort(_("received delta base is censored: %s") % e)
1320 revisions += len(fl) - o
1320 revisions += len(fl) - o
1321 if f in needfiles:
1321 if f in needfiles:
1322 needs = needfiles[f]
1322 needs = needfiles[f]
1323 for new in pycompat.xrange(o, len(fl)):
1323 for new in pycompat.xrange(o, len(fl)):
1324 n = fl.node(new)
1324 n = fl.node(new)
1325 if n in needs:
1325 if n in needs:
1326 needs.remove(n)
1326 needs.remove(n)
1327 else:
1327 else:
1328 raise error.Abort(
1328 raise error.Abort(
1329 _("received spurious file revlog entry"))
1329 _("received spurious file revlog entry"))
1330 if not needs:
1330 if not needs:
1331 del needfiles[f]
1331 del needfiles[f]
1332 progress.complete()
1332 progress.complete()
1333
1333
1334 for f, needs in needfiles.iteritems():
1334 for f, needs in needfiles.iteritems():
1335 fl = repo.file(f)
1335 fl = repo.file(f)
1336 for n in needs:
1336 for n in needs:
1337 try:
1337 try:
1338 fl.rev(n)
1338 fl.rev(n)
1339 except error.LookupError:
1339 except error.LookupError:
1340 raise error.Abort(
1340 raise error.Abort(
1341 _('missing file data for %s:%s - run hg verify') %
1341 _('missing file data for %s:%s - run hg verify') %
1342 (f, hex(n)))
1342 (f, hex(n)))
1343
1343
1344 return revisions, files
1344 return revisions, files
1345
1345
1346 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1346 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1347 ellipsisroots, visitnodes, depth, source, version):
1347 ellipsisroots, visitnodes, depth, source, version):
1348 if version in ('01', '02'):
1348 if version in ('01', '02'):
1349 raise error.Abort(
1349 raise error.Abort(
1350 'ellipsis nodes require at least cg3 on client and server, '
1350 'ellipsis nodes require at least cg3 on client and server, '
1351 'but negotiated version %s' % version)
1351 'but negotiated version %s' % version)
1352 # We wrap cg1packer.revchunk, using a side channel to pass
1352 # We wrap cg1packer.revchunk, using a side channel to pass
1353 # relevant_nodes into that area. Then if linknode isn't in the
1353 # relevant_nodes into that area. Then if linknode isn't in the
1354 # set, we know we have an ellipsis node and we should defer
1354 # set, we know we have an ellipsis node and we should defer
1355 # sending that node's data. We override close() to detect
1355 # sending that node's data. We override close() to detect
1356 # pending ellipsis nodes and flush them.
1356 # pending ellipsis nodes and flush them.
1357 packer = getbundler(version, repo, filematcher=match)
1357 packer = getbundler(version, repo, filematcher=match)
1358 # Give the packer the list of nodes which should not be
1358 # Give the packer the list of nodes which should not be
1359 # ellipsis nodes. We store this rather than the set of nodes
1359 # ellipsis nodes. We store this rather than the set of nodes
1360 # that should be an ellipsis because for very large histories
1360 # that should be an ellipsis because for very large histories
1361 # we expect this to be significantly smaller.
1361 # we expect this to be significantly smaller.
1362 packer.full_nodes = relevant_nodes
1362 packer.full_nodes = relevant_nodes
1363 # Maps ellipsis revs to their roots at the changelog level.
1363 # Maps ellipsis revs to their roots at the changelog level.
1364 packer.precomputed_ellipsis = ellipsisroots
1364 packer.precomputed_ellipsis = ellipsisroots
1365 # Maps CL revs to per-revlog revisions. Cleared in close() at
1365 # Maps CL revs to per-revlog revisions. Cleared in close() at
1366 # the end of each group.
1366 # the end of each group.
1367 packer.clrev_to_localrev = {}
1367 packer.clrev_to_localrev = {}
1368 packer.next_clrev_to_localrev = {}
1368 packer.next_clrev_to_localrev = {}
1369 # Maps changelog nodes to changelog revs. Filled in once
1369 # Maps changelog nodes to changelog revs. Filled in once
1370 # during changelog stage and then left unmodified.
1370 # during changelog stage and then left unmodified.
1371 packer.clnode_to_rev = {}
1371 packer.clnode_to_rev = {}
1372 packer.changelog_done = False
1372 packer.changelog_done = False
1373 # If true, informs the packer that it is serving shallow content and might
1373 # If true, informs the packer that it is serving shallow content and might
1374 # need to pack file contents not introduced by the changes being packed.
1374 # need to pack file contents not introduced by the changes being packed.
1375 packer.is_shallow = depth is not None
1375 packer.is_shallow = depth is not None
1376
1376
1377 return packer.generate(common, visitnodes, False, source)
1377 return packer.generate(common, visitnodes, False, source)
General Comments 0
You need to be logged in to leave comments. Login now