##// END OF EJS Templates
changegroup: move non-pruning of non-ellipsis manifests to _prunemanifests()...
Martin von Zweigbergk -
r41933:1c1c4ef8 default
parent child Browse files
Show More
@@ -1,1423 +1,1423 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 mdiff,
25 mdiff,
26 phases,
26 phases,
27 pycompat,
27 pycompat,
28 repository,
28 repository,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
32 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
33 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
33 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
34 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
34 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
35
35
36 LFS_REQUIREMENT = 'lfs'
36 LFS_REQUIREMENT = 'lfs'
37
37
38 readexactly = util.readexactly
38 readexactly = util.readexactly
39
39
40 def getchunk(stream):
40 def getchunk(stream):
41 """return the next chunk from stream as a string"""
41 """return the next chunk from stream as a string"""
42 d = readexactly(stream, 4)
42 d = readexactly(stream, 4)
43 l = struct.unpack(">l", d)[0]
43 l = struct.unpack(">l", d)[0]
44 if l <= 4:
44 if l <= 4:
45 if l:
45 if l:
46 raise error.Abort(_("invalid chunk length %d") % l)
46 raise error.Abort(_("invalid chunk length %d") % l)
47 return ""
47 return ""
48 return readexactly(stream, l - 4)
48 return readexactly(stream, l - 4)
49
49
50 def chunkheader(length):
50 def chunkheader(length):
51 """return a changegroup chunk header (string)"""
51 """return a changegroup chunk header (string)"""
52 return struct.pack(">l", length + 4)
52 return struct.pack(">l", length + 4)
53
53
54 def closechunk():
54 def closechunk():
55 """return a changegroup chunk header (string) for a zero-length chunk"""
55 """return a changegroup chunk header (string) for a zero-length chunk"""
56 return struct.pack(">l", 0)
56 return struct.pack(">l", 0)
57
57
58 def _fileheader(path):
58 def _fileheader(path):
59 """Obtain a changegroup chunk header for a named path."""
59 """Obtain a changegroup chunk header for a named path."""
60 return chunkheader(len(path)) + path
60 return chunkheader(len(path)) + path
61
61
62 def writechunks(ui, chunks, filename, vfs=None):
62 def writechunks(ui, chunks, filename, vfs=None):
63 """Write chunks to a file and return its filename.
63 """Write chunks to a file and return its filename.
64
64
65 The stream is assumed to be a bundle file.
65 The stream is assumed to be a bundle file.
66 Existing files will not be overwritten.
66 Existing files will not be overwritten.
67 If no filename is specified, a temporary file is created.
67 If no filename is specified, a temporary file is created.
68 """
68 """
69 fh = None
69 fh = None
70 cleanup = None
70 cleanup = None
71 try:
71 try:
72 if filename:
72 if filename:
73 if vfs:
73 if vfs:
74 fh = vfs.open(filename, "wb")
74 fh = vfs.open(filename, "wb")
75 else:
75 else:
76 # Increase default buffer size because default is usually
76 # Increase default buffer size because default is usually
77 # small (4k is common on Linux).
77 # small (4k is common on Linux).
78 fh = open(filename, "wb", 131072)
78 fh = open(filename, "wb", 131072)
79 else:
79 else:
80 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fh = os.fdopen(fd, r"wb")
81 fh = os.fdopen(fd, r"wb")
82 cleanup = filename
82 cleanup = filename
83 for c in chunks:
83 for c in chunks:
84 fh.write(c)
84 fh.write(c)
85 cleanup = None
85 cleanup = None
86 return filename
86 return filename
87 finally:
87 finally:
88 if fh is not None:
88 if fh is not None:
89 fh.close()
89 fh.close()
90 if cleanup is not None:
90 if cleanup is not None:
91 if filename and vfs:
91 if filename and vfs:
92 vfs.unlink(cleanup)
92 vfs.unlink(cleanup)
93 else:
93 else:
94 os.unlink(cleanup)
94 os.unlink(cleanup)
95
95
96 class cg1unpacker(object):
96 class cg1unpacker(object):
97 """Unpacker for cg1 changegroup streams.
97 """Unpacker for cg1 changegroup streams.
98
98
99 A changegroup unpacker handles the framing of the revision data in
99 A changegroup unpacker handles the framing of the revision data in
100 the wire format. Most consumers will want to use the apply()
100 the wire format. Most consumers will want to use the apply()
101 method to add the changes from the changegroup to a repository.
101 method to add the changes from the changegroup to a repository.
102
102
103 If you're forwarding a changegroup unmodified to another consumer,
103 If you're forwarding a changegroup unmodified to another consumer,
104 use getchunks(), which returns an iterator of changegroup
104 use getchunks(), which returns an iterator of changegroup
105 chunks. This is mostly useful for cases where you need to know the
105 chunks. This is mostly useful for cases where you need to know the
106 data stream has ended by observing the end of the changegroup.
106 data stream has ended by observing the end of the changegroup.
107
107
108 deltachunk() is useful only if you're applying delta data. Most
108 deltachunk() is useful only if you're applying delta data. Most
109 consumers should prefer apply() instead.
109 consumers should prefer apply() instead.
110
110
111 A few other public methods exist. Those are used only for
111 A few other public methods exist. Those are used only for
112 bundlerepo and some debug commands - their use is discouraged.
112 bundlerepo and some debug commands - their use is discouraged.
113 """
113 """
114 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
114 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 deltaheadersize = deltaheader.size
115 deltaheadersize = deltaheader.size
116 version = '01'
116 version = '01'
117 _grouplistcount = 1 # One list of files after the manifests
117 _grouplistcount = 1 # One list of files after the manifests
118
118
119 def __init__(self, fh, alg, extras=None):
119 def __init__(self, fh, alg, extras=None):
120 if alg is None:
120 if alg is None:
121 alg = 'UN'
121 alg = 'UN'
122 if alg not in util.compengines.supportedbundletypes:
122 if alg not in util.compengines.supportedbundletypes:
123 raise error.Abort(_('unknown stream compression type: %s')
123 raise error.Abort(_('unknown stream compression type: %s')
124 % alg)
124 % alg)
125 if alg == 'BZ':
125 if alg == 'BZ':
126 alg = '_truncatedBZ'
126 alg = '_truncatedBZ'
127
127
128 compengine = util.compengines.forbundletype(alg)
128 compengine = util.compengines.forbundletype(alg)
129 self._stream = compengine.decompressorreader(fh)
129 self._stream = compengine.decompressorreader(fh)
130 self._type = alg
130 self._type = alg
131 self.extras = extras or {}
131 self.extras = extras or {}
132 self.callback = None
132 self.callback = None
133
133
134 # These methods (compressed, read, seek, tell) all appear to only
134 # These methods (compressed, read, seek, tell) all appear to only
135 # be used by bundlerepo, but it's a little hard to tell.
135 # be used by bundlerepo, but it's a little hard to tell.
136 def compressed(self):
136 def compressed(self):
137 return self._type is not None and self._type != 'UN'
137 return self._type is not None and self._type != 'UN'
138 def read(self, l):
138 def read(self, l):
139 return self._stream.read(l)
139 return self._stream.read(l)
140 def seek(self, pos):
140 def seek(self, pos):
141 return self._stream.seek(pos)
141 return self._stream.seek(pos)
142 def tell(self):
142 def tell(self):
143 return self._stream.tell()
143 return self._stream.tell()
144 def close(self):
144 def close(self):
145 return self._stream.close()
145 return self._stream.close()
146
146
147 def _chunklength(self):
147 def _chunklength(self):
148 d = readexactly(self._stream, 4)
148 d = readexactly(self._stream, 4)
149 l = struct.unpack(">l", d)[0]
149 l = struct.unpack(">l", d)[0]
150 if l <= 4:
150 if l <= 4:
151 if l:
151 if l:
152 raise error.Abort(_("invalid chunk length %d") % l)
152 raise error.Abort(_("invalid chunk length %d") % l)
153 return 0
153 return 0
154 if self.callback:
154 if self.callback:
155 self.callback()
155 self.callback()
156 return l - 4
156 return l - 4
157
157
158 def changelogheader(self):
158 def changelogheader(self):
159 """v10 does not have a changelog header chunk"""
159 """v10 does not have a changelog header chunk"""
160 return {}
160 return {}
161
161
162 def manifestheader(self):
162 def manifestheader(self):
163 """v10 does not have a manifest header chunk"""
163 """v10 does not have a manifest header chunk"""
164 return {}
164 return {}
165
165
166 def filelogheader(self):
166 def filelogheader(self):
167 """return the header of the filelogs chunk, v10 only has the filename"""
167 """return the header of the filelogs chunk, v10 only has the filename"""
168 l = self._chunklength()
168 l = self._chunklength()
169 if not l:
169 if not l:
170 return {}
170 return {}
171 fname = readexactly(self._stream, l)
171 fname = readexactly(self._stream, l)
172 return {'filename': fname}
172 return {'filename': fname}
173
173
174 def _deltaheader(self, headertuple, prevnode):
174 def _deltaheader(self, headertuple, prevnode):
175 node, p1, p2, cs = headertuple
175 node, p1, p2, cs = headertuple
176 if prevnode is None:
176 if prevnode is None:
177 deltabase = p1
177 deltabase = p1
178 else:
178 else:
179 deltabase = prevnode
179 deltabase = prevnode
180 flags = 0
180 flags = 0
181 return node, p1, p2, deltabase, cs, flags
181 return node, p1, p2, deltabase, cs, flags
182
182
183 def deltachunk(self, prevnode):
183 def deltachunk(self, prevnode):
184 l = self._chunklength()
184 l = self._chunklength()
185 if not l:
185 if not l:
186 return {}
186 return {}
187 headerdata = readexactly(self._stream, self.deltaheadersize)
187 headerdata = readexactly(self._stream, self.deltaheadersize)
188 header = self.deltaheader.unpack(headerdata)
188 header = self.deltaheader.unpack(headerdata)
189 delta = readexactly(self._stream, l - self.deltaheadersize)
189 delta = readexactly(self._stream, l - self.deltaheadersize)
190 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
190 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 return (node, p1, p2, cs, deltabase, delta, flags)
191 return (node, p1, p2, cs, deltabase, delta, flags)
192
192
193 def getchunks(self):
193 def getchunks(self):
194 """returns all the chunks contains in the bundle
194 """returns all the chunks contains in the bundle
195
195
196 Used when you need to forward the binary stream to a file or another
196 Used when you need to forward the binary stream to a file or another
197 network API. To do so, it parse the changegroup data, otherwise it will
197 network API. To do so, it parse the changegroup data, otherwise it will
198 block in case of sshrepo because it don't know the end of the stream.
198 block in case of sshrepo because it don't know the end of the stream.
199 """
199 """
200 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
200 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
201 # and a list of filelogs. For changegroup 3, we expect 4 parts:
201 # and a list of filelogs. For changegroup 3, we expect 4 parts:
202 # changelog, manifestlog, a list of tree manifestlogs, and a list of
202 # changelog, manifestlog, a list of tree manifestlogs, and a list of
203 # filelogs.
203 # filelogs.
204 #
204 #
205 # Changelog and manifestlog parts are terminated with empty chunks. The
205 # Changelog and manifestlog parts are terminated with empty chunks. The
206 # tree and file parts are a list of entry sections. Each entry section
206 # tree and file parts are a list of entry sections. Each entry section
207 # is a series of chunks terminating in an empty chunk. The list of these
207 # is a series of chunks terminating in an empty chunk. The list of these
208 # entry sections is terminated in yet another empty chunk, so we know
208 # entry sections is terminated in yet another empty chunk, so we know
209 # we've reached the end of the tree/file list when we reach an empty
209 # we've reached the end of the tree/file list when we reach an empty
210 # chunk that was proceeded by no non-empty chunks.
210 # chunk that was proceeded by no non-empty chunks.
211
211
212 parts = 0
212 parts = 0
213 while parts < 2 + self._grouplistcount:
213 while parts < 2 + self._grouplistcount:
214 noentries = True
214 noentries = True
215 while True:
215 while True:
216 chunk = getchunk(self)
216 chunk = getchunk(self)
217 if not chunk:
217 if not chunk:
218 # The first two empty chunks represent the end of the
218 # The first two empty chunks represent the end of the
219 # changelog and the manifestlog portions. The remaining
219 # changelog and the manifestlog portions. The remaining
220 # empty chunks represent either A) the end of individual
220 # empty chunks represent either A) the end of individual
221 # tree or file entries in the file list, or B) the end of
221 # tree or file entries in the file list, or B) the end of
222 # the entire list. It's the end of the entire list if there
222 # the entire list. It's the end of the entire list if there
223 # were no entries (i.e. noentries is True).
223 # were no entries (i.e. noentries is True).
224 if parts < 2:
224 if parts < 2:
225 parts += 1
225 parts += 1
226 elif noentries:
226 elif noentries:
227 parts += 1
227 parts += 1
228 break
228 break
229 noentries = False
229 noentries = False
230 yield chunkheader(len(chunk))
230 yield chunkheader(len(chunk))
231 pos = 0
231 pos = 0
232 while pos < len(chunk):
232 while pos < len(chunk):
233 next = pos + 2**20
233 next = pos + 2**20
234 yield chunk[pos:next]
234 yield chunk[pos:next]
235 pos = next
235 pos = next
236 yield closechunk()
236 yield closechunk()
237
237
238 def _unpackmanifests(self, repo, revmap, trp, prog):
238 def _unpackmanifests(self, repo, revmap, trp, prog):
239 self.callback = prog.increment
239 self.callback = prog.increment
240 # no need to check for empty manifest group here:
240 # no need to check for empty manifest group here:
241 # if the result of the merge of 1 and 2 is the same in 3 and 4,
241 # if the result of the merge of 1 and 2 is the same in 3 and 4,
242 # no new manifest will be created and the manifest group will
242 # no new manifest will be created and the manifest group will
243 # be empty during the pull
243 # be empty during the pull
244 self.manifestheader()
244 self.manifestheader()
245 deltas = self.deltaiter()
245 deltas = self.deltaiter()
246 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
246 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
247 prog.complete()
247 prog.complete()
248 self.callback = None
248 self.callback = None
249
249
250 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
250 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
251 expectedtotal=None):
251 expectedtotal=None):
252 """Add the changegroup returned by source.read() to this repo.
252 """Add the changegroup returned by source.read() to this repo.
253 srctype is a string like 'push', 'pull', or 'unbundle'. url is
253 srctype is a string like 'push', 'pull', or 'unbundle'. url is
254 the URL of the repo where this changegroup is coming from.
254 the URL of the repo where this changegroup is coming from.
255
255
256 Return an integer summarizing the change to this repo:
256 Return an integer summarizing the change to this repo:
257 - nothing changed or no source: 0
257 - nothing changed or no source: 0
258 - more heads than before: 1+added heads (2..n)
258 - more heads than before: 1+added heads (2..n)
259 - fewer heads than before: -1-removed heads (-2..-n)
259 - fewer heads than before: -1-removed heads (-2..-n)
260 - number of heads stays the same: 1
260 - number of heads stays the same: 1
261 """
261 """
262 repo = repo.unfiltered()
262 repo = repo.unfiltered()
263 def csmap(x):
263 def csmap(x):
264 repo.ui.debug("add changeset %s\n" % short(x))
264 repo.ui.debug("add changeset %s\n" % short(x))
265 return len(cl)
265 return len(cl)
266
266
267 def revmap(x):
267 def revmap(x):
268 return cl.rev(x)
268 return cl.rev(x)
269
269
270 changesets = files = revisions = 0
270 changesets = files = revisions = 0
271
271
272 try:
272 try:
273 # The transaction may already carry source information. In this
273 # The transaction may already carry source information. In this
274 # case we use the top level data. We overwrite the argument
274 # case we use the top level data. We overwrite the argument
275 # because we need to use the top level value (if they exist)
275 # because we need to use the top level value (if they exist)
276 # in this function.
276 # in this function.
277 srctype = tr.hookargs.setdefault('source', srctype)
277 srctype = tr.hookargs.setdefault('source', srctype)
278 tr.hookargs.setdefault('url', url)
278 tr.hookargs.setdefault('url', url)
279 repo.hook('prechangegroup',
279 repo.hook('prechangegroup',
280 throw=True, **pycompat.strkwargs(tr.hookargs))
280 throw=True, **pycompat.strkwargs(tr.hookargs))
281
281
282 # write changelog data to temp files so concurrent readers
282 # write changelog data to temp files so concurrent readers
283 # will not see an inconsistent view
283 # will not see an inconsistent view
284 cl = repo.changelog
284 cl = repo.changelog
285 cl.delayupdate(tr)
285 cl.delayupdate(tr)
286 oldheads = set(cl.heads())
286 oldheads = set(cl.heads())
287
287
288 trp = weakref.proxy(tr)
288 trp = weakref.proxy(tr)
289 # pull off the changeset group
289 # pull off the changeset group
290 repo.ui.status(_("adding changesets\n"))
290 repo.ui.status(_("adding changesets\n"))
291 clstart = len(cl)
291 clstart = len(cl)
292 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
292 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
293 total=expectedtotal)
293 total=expectedtotal)
294 self.callback = progress.increment
294 self.callback = progress.increment
295
295
296 efiles = set()
296 efiles = set()
297 def onchangelog(cl, node):
297 def onchangelog(cl, node):
298 efiles.update(cl.readfiles(node))
298 efiles.update(cl.readfiles(node))
299
299
300 self.changelogheader()
300 self.changelogheader()
301 deltas = self.deltaiter()
301 deltas = self.deltaiter()
302 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
302 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
303 efiles = len(efiles)
303 efiles = len(efiles)
304
304
305 if not cgnodes:
305 if not cgnodes:
306 repo.ui.develwarn('applied empty changelog from changegroup',
306 repo.ui.develwarn('applied empty changelog from changegroup',
307 config='warn-empty-changegroup')
307 config='warn-empty-changegroup')
308 clend = len(cl)
308 clend = len(cl)
309 changesets = clend - clstart
309 changesets = clend - clstart
310 progress.complete()
310 progress.complete()
311 self.callback = None
311 self.callback = None
312
312
313 # pull off the manifest group
313 # pull off the manifest group
314 repo.ui.status(_("adding manifests\n"))
314 repo.ui.status(_("adding manifests\n"))
315 # We know that we'll never have more manifests than we had
315 # We know that we'll never have more manifests than we had
316 # changesets.
316 # changesets.
317 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
317 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
318 total=changesets)
318 total=changesets)
319 self._unpackmanifests(repo, revmap, trp, progress)
319 self._unpackmanifests(repo, revmap, trp, progress)
320
320
321 needfiles = {}
321 needfiles = {}
322 if repo.ui.configbool('server', 'validate'):
322 if repo.ui.configbool('server', 'validate'):
323 cl = repo.changelog
323 cl = repo.changelog
324 ml = repo.manifestlog
324 ml = repo.manifestlog
325 # validate incoming csets have their manifests
325 # validate incoming csets have their manifests
326 for cset in pycompat.xrange(clstart, clend):
326 for cset in pycompat.xrange(clstart, clend):
327 mfnode = cl.changelogrevision(cset).manifest
327 mfnode = cl.changelogrevision(cset).manifest
328 mfest = ml[mfnode].readdelta()
328 mfest = ml[mfnode].readdelta()
329 # store file cgnodes we must see
329 # store file cgnodes we must see
330 for f, n in mfest.iteritems():
330 for f, n in mfest.iteritems():
331 needfiles.setdefault(f, set()).add(n)
331 needfiles.setdefault(f, set()).add(n)
332
332
333 # process the files
333 # process the files
334 repo.ui.status(_("adding file changes\n"))
334 repo.ui.status(_("adding file changes\n"))
335 newrevs, newfiles = _addchangegroupfiles(
335 newrevs, newfiles = _addchangegroupfiles(
336 repo, self, revmap, trp, efiles, needfiles)
336 repo, self, revmap, trp, efiles, needfiles)
337 revisions += newrevs
337 revisions += newrevs
338 files += newfiles
338 files += newfiles
339
339
340 deltaheads = 0
340 deltaheads = 0
341 if oldheads:
341 if oldheads:
342 heads = cl.heads()
342 heads = cl.heads()
343 deltaheads = len(heads) - len(oldheads)
343 deltaheads = len(heads) - len(oldheads)
344 for h in heads:
344 for h in heads:
345 if h not in oldheads and repo[h].closesbranch():
345 if h not in oldheads and repo[h].closesbranch():
346 deltaheads -= 1
346 deltaheads -= 1
347 htext = ""
347 htext = ""
348 if deltaheads:
348 if deltaheads:
349 htext = _(" (%+d heads)") % deltaheads
349 htext = _(" (%+d heads)") % deltaheads
350
350
351 repo.ui.status(_("added %d changesets"
351 repo.ui.status(_("added %d changesets"
352 " with %d changes to %d files%s\n")
352 " with %d changes to %d files%s\n")
353 % (changesets, revisions, files, htext))
353 % (changesets, revisions, files, htext))
354 repo.invalidatevolatilesets()
354 repo.invalidatevolatilesets()
355
355
356 if changesets > 0:
356 if changesets > 0:
357 if 'node' not in tr.hookargs:
357 if 'node' not in tr.hookargs:
358 tr.hookargs['node'] = hex(cl.node(clstart))
358 tr.hookargs['node'] = hex(cl.node(clstart))
359 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
359 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
360 hookargs = dict(tr.hookargs)
360 hookargs = dict(tr.hookargs)
361 else:
361 else:
362 hookargs = dict(tr.hookargs)
362 hookargs = dict(tr.hookargs)
363 hookargs['node'] = hex(cl.node(clstart))
363 hookargs['node'] = hex(cl.node(clstart))
364 hookargs['node_last'] = hex(cl.node(clend - 1))
364 hookargs['node_last'] = hex(cl.node(clend - 1))
365 repo.hook('pretxnchangegroup',
365 repo.hook('pretxnchangegroup',
366 throw=True, **pycompat.strkwargs(hookargs))
366 throw=True, **pycompat.strkwargs(hookargs))
367
367
368 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
368 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
369 phaseall = None
369 phaseall = None
370 if srctype in ('push', 'serve'):
370 if srctype in ('push', 'serve'):
371 # Old servers can not push the boundary themselves.
371 # Old servers can not push the boundary themselves.
372 # New servers won't push the boundary if changeset already
372 # New servers won't push the boundary if changeset already
373 # exists locally as secret
373 # exists locally as secret
374 #
374 #
375 # We should not use added here but the list of all change in
375 # We should not use added here but the list of all change in
376 # the bundle
376 # the bundle
377 if repo.publishing():
377 if repo.publishing():
378 targetphase = phaseall = phases.public
378 targetphase = phaseall = phases.public
379 else:
379 else:
380 # closer target phase computation
380 # closer target phase computation
381
381
382 # Those changesets have been pushed from the
382 # Those changesets have been pushed from the
383 # outside, their phases are going to be pushed
383 # outside, their phases are going to be pushed
384 # alongside. Therefor `targetphase` is
384 # alongside. Therefor `targetphase` is
385 # ignored.
385 # ignored.
386 targetphase = phaseall = phases.draft
386 targetphase = phaseall = phases.draft
387 if added:
387 if added:
388 phases.registernew(repo, tr, targetphase, added)
388 phases.registernew(repo, tr, targetphase, added)
389 if phaseall is not None:
389 if phaseall is not None:
390 phases.advanceboundary(repo, tr, phaseall, cgnodes)
390 phases.advanceboundary(repo, tr, phaseall, cgnodes)
391
391
392 if changesets > 0:
392 if changesets > 0:
393
393
394 def runhooks():
394 def runhooks():
395 # These hooks run when the lock releases, not when the
395 # These hooks run when the lock releases, not when the
396 # transaction closes. So it's possible for the changelog
396 # transaction closes. So it's possible for the changelog
397 # to have changed since we last saw it.
397 # to have changed since we last saw it.
398 if clstart >= len(repo):
398 if clstart >= len(repo):
399 return
399 return
400
400
401 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
401 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
402
402
403 for n in added:
403 for n in added:
404 args = hookargs.copy()
404 args = hookargs.copy()
405 args['node'] = hex(n)
405 args['node'] = hex(n)
406 del args['node_last']
406 del args['node_last']
407 repo.hook("incoming", **pycompat.strkwargs(args))
407 repo.hook("incoming", **pycompat.strkwargs(args))
408
408
409 newheads = [h for h in repo.heads()
409 newheads = [h for h in repo.heads()
410 if h not in oldheads]
410 if h not in oldheads]
411 repo.ui.log("incoming",
411 repo.ui.log("incoming",
412 "%d incoming changes - new heads: %s\n",
412 "%d incoming changes - new heads: %s\n",
413 len(added),
413 len(added),
414 ', '.join([hex(c[:6]) for c in newheads]))
414 ', '.join([hex(c[:6]) for c in newheads]))
415
415
416 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
416 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
417 lambda tr: repo._afterlock(runhooks))
417 lambda tr: repo._afterlock(runhooks))
418 finally:
418 finally:
419 repo.ui.flush()
419 repo.ui.flush()
420 # never return 0 here:
420 # never return 0 here:
421 if deltaheads < 0:
421 if deltaheads < 0:
422 ret = deltaheads - 1
422 ret = deltaheads - 1
423 else:
423 else:
424 ret = deltaheads + 1
424 ret = deltaheads + 1
425 return ret
425 return ret
426
426
427 def deltaiter(self):
427 def deltaiter(self):
428 """
428 """
429 returns an iterator of the deltas in this changegroup
429 returns an iterator of the deltas in this changegroup
430
430
431 Useful for passing to the underlying storage system to be stored.
431 Useful for passing to the underlying storage system to be stored.
432 """
432 """
433 chain = None
433 chain = None
434 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
434 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
435 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
435 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
436 yield chunkdata
436 yield chunkdata
437 chain = chunkdata[0]
437 chain = chunkdata[0]
438
438
439 class cg2unpacker(cg1unpacker):
439 class cg2unpacker(cg1unpacker):
440 """Unpacker for cg2 streams.
440 """Unpacker for cg2 streams.
441
441
442 cg2 streams add support for generaldelta, so the delta header
442 cg2 streams add support for generaldelta, so the delta header
443 format is slightly different. All other features about the data
443 format is slightly different. All other features about the data
444 remain the same.
444 remain the same.
445 """
445 """
446 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
446 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
447 deltaheadersize = deltaheader.size
447 deltaheadersize = deltaheader.size
448 version = '02'
448 version = '02'
449
449
450 def _deltaheader(self, headertuple, prevnode):
450 def _deltaheader(self, headertuple, prevnode):
451 node, p1, p2, deltabase, cs = headertuple
451 node, p1, p2, deltabase, cs = headertuple
452 flags = 0
452 flags = 0
453 return node, p1, p2, deltabase, cs, flags
453 return node, p1, p2, deltabase, cs, flags
454
454
455 class cg3unpacker(cg2unpacker):
455 class cg3unpacker(cg2unpacker):
456 """Unpacker for cg3 streams.
456 """Unpacker for cg3 streams.
457
457
458 cg3 streams add support for exchanging treemanifests and revlog
458 cg3 streams add support for exchanging treemanifests and revlog
459 flags. It adds the revlog flags to the delta header and an empty chunk
459 flags. It adds the revlog flags to the delta header and an empty chunk
460 separating manifests and files.
460 separating manifests and files.
461 """
461 """
462 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
462 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
463 deltaheadersize = deltaheader.size
463 deltaheadersize = deltaheader.size
464 version = '03'
464 version = '03'
465 _grouplistcount = 2 # One list of manifests and one list of files
465 _grouplistcount = 2 # One list of manifests and one list of files
466
466
467 def _deltaheader(self, headertuple, prevnode):
467 def _deltaheader(self, headertuple, prevnode):
468 node, p1, p2, deltabase, cs, flags = headertuple
468 node, p1, p2, deltabase, cs, flags = headertuple
469 return node, p1, p2, deltabase, cs, flags
469 return node, p1, p2, deltabase, cs, flags
470
470
471 def _unpackmanifests(self, repo, revmap, trp, prog):
471 def _unpackmanifests(self, repo, revmap, trp, prog):
472 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
472 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
473 for chunkdata in iter(self.filelogheader, {}):
473 for chunkdata in iter(self.filelogheader, {}):
474 # If we get here, there are directory manifests in the changegroup
474 # If we get here, there are directory manifests in the changegroup
475 d = chunkdata["filename"]
475 d = chunkdata["filename"]
476 repo.ui.debug("adding %s revisions\n" % d)
476 repo.ui.debug("adding %s revisions\n" % d)
477 deltas = self.deltaiter()
477 deltas = self.deltaiter()
478 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
478 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
479 raise error.Abort(_("received dir revlog group is empty"))
479 raise error.Abort(_("received dir revlog group is empty"))
480
480
481 class headerlessfixup(object):
481 class headerlessfixup(object):
482 def __init__(self, fh, h):
482 def __init__(self, fh, h):
483 self._h = h
483 self._h = h
484 self._fh = fh
484 self._fh = fh
485 def read(self, n):
485 def read(self, n):
486 if self._h:
486 if self._h:
487 d, self._h = self._h[:n], self._h[n:]
487 d, self._h = self._h[:n], self._h[n:]
488 if len(d) < n:
488 if len(d) < n:
489 d += readexactly(self._fh, n - len(d))
489 d += readexactly(self._fh, n - len(d))
490 return d
490 return d
491 return readexactly(self._fh, n)
491 return readexactly(self._fh, n)
492
492
493 def _revisiondeltatochunks(delta, headerfn):
493 def _revisiondeltatochunks(delta, headerfn):
494 """Serialize a revisiondelta to changegroup chunks."""
494 """Serialize a revisiondelta to changegroup chunks."""
495
495
496 # The captured revision delta may be encoded as a delta against
496 # The captured revision delta may be encoded as a delta against
497 # a base revision or as a full revision. The changegroup format
497 # a base revision or as a full revision. The changegroup format
498 # requires that everything on the wire be deltas. So for full
498 # requires that everything on the wire be deltas. So for full
499 # revisions, we need to invent a header that says to rewrite
499 # revisions, we need to invent a header that says to rewrite
500 # data.
500 # data.
501
501
502 if delta.delta is not None:
502 if delta.delta is not None:
503 prefix, data = b'', delta.delta
503 prefix, data = b'', delta.delta
504 elif delta.basenode == nullid:
504 elif delta.basenode == nullid:
505 data = delta.revision
505 data = delta.revision
506 prefix = mdiff.trivialdiffheader(len(data))
506 prefix = mdiff.trivialdiffheader(len(data))
507 else:
507 else:
508 data = delta.revision
508 data = delta.revision
509 prefix = mdiff.replacediffheader(delta.baserevisionsize,
509 prefix = mdiff.replacediffheader(delta.baserevisionsize,
510 len(data))
510 len(data))
511
511
512 meta = headerfn(delta)
512 meta = headerfn(delta)
513
513
514 yield chunkheader(len(meta) + len(prefix) + len(data))
514 yield chunkheader(len(meta) + len(prefix) + len(data))
515 yield meta
515 yield meta
516 if prefix:
516 if prefix:
517 yield prefix
517 yield prefix
518 yield data
518 yield data
519
519
520 def _sortnodesellipsis(store, nodes, cl, lookup):
520 def _sortnodesellipsis(store, nodes, cl, lookup):
521 """Sort nodes for changegroup generation."""
521 """Sort nodes for changegroup generation."""
522 # Ellipses serving mode.
522 # Ellipses serving mode.
523 #
523 #
524 # In a perfect world, we'd generate better ellipsis-ified graphs
524 # In a perfect world, we'd generate better ellipsis-ified graphs
525 # for non-changelog revlogs. In practice, we haven't started doing
525 # for non-changelog revlogs. In practice, we haven't started doing
526 # that yet, so the resulting DAGs for the manifestlog and filelogs
526 # that yet, so the resulting DAGs for the manifestlog and filelogs
527 # are actually full of bogus parentage on all the ellipsis
527 # are actually full of bogus parentage on all the ellipsis
528 # nodes. This has the side effect that, while the contents are
528 # nodes. This has the side effect that, while the contents are
529 # correct, the individual DAGs might be completely out of whack in
529 # correct, the individual DAGs might be completely out of whack in
530 # a case like 882681bc3166 and its ancestors (back about 10
530 # a case like 882681bc3166 and its ancestors (back about 10
531 # revisions or so) in the main hg repo.
531 # revisions or so) in the main hg repo.
532 #
532 #
533 # The one invariant we *know* holds is that the new (potentially
533 # The one invariant we *know* holds is that the new (potentially
534 # bogus) DAG shape will be valid if we order the nodes in the
534 # bogus) DAG shape will be valid if we order the nodes in the
535 # order that they're introduced in dramatis personae by the
535 # order that they're introduced in dramatis personae by the
536 # changelog, so what we do is we sort the non-changelog histories
536 # changelog, so what we do is we sort the non-changelog histories
537 # by the order in which they are used by the changelog.
537 # by the order in which they are used by the changelog.
538 key = lambda n: cl.rev(lookup(n))
538 key = lambda n: cl.rev(lookup(n))
539 return sorted(nodes, key=key)
539 return sorted(nodes, key=key)
540
540
541 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
541 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
542 linknode, clrevtolocalrev, fullclnodes,
542 linknode, clrevtolocalrev, fullclnodes,
543 precomputedellipsis):
543 precomputedellipsis):
544 linkparents = precomputedellipsis[linkrev]
544 linkparents = precomputedellipsis[linkrev]
545 def local(clrev):
545 def local(clrev):
546 """Turn a changelog revnum into a local revnum.
546 """Turn a changelog revnum into a local revnum.
547
547
548 The ellipsis dag is stored as revnums on the changelog,
548 The ellipsis dag is stored as revnums on the changelog,
549 but when we're producing ellipsis entries for
549 but when we're producing ellipsis entries for
550 non-changelog revlogs, we need to turn those numbers into
550 non-changelog revlogs, we need to turn those numbers into
551 something local. This does that for us, and during the
551 something local. This does that for us, and during the
552 changelog sending phase will also expand the stored
552 changelog sending phase will also expand the stored
553 mappings as needed.
553 mappings as needed.
554 """
554 """
555 if clrev == nullrev:
555 if clrev == nullrev:
556 return nullrev
556 return nullrev
557
557
558 if ischangelog:
558 if ischangelog:
559 return clrev
559 return clrev
560
560
561 # Walk the ellipsis-ized changelog breadth-first looking for a
561 # Walk the ellipsis-ized changelog breadth-first looking for a
562 # change that has been linked from the current revlog.
562 # change that has been linked from the current revlog.
563 #
563 #
564 # For a flat manifest revlog only a single step should be necessary
564 # For a flat manifest revlog only a single step should be necessary
565 # as all relevant changelog entries are relevant to the flat
565 # as all relevant changelog entries are relevant to the flat
566 # manifest.
566 # manifest.
567 #
567 #
568 # For a filelog or tree manifest dirlog however not every changelog
568 # For a filelog or tree manifest dirlog however not every changelog
569 # entry will have been relevant, so we need to skip some changelog
569 # entry will have been relevant, so we need to skip some changelog
570 # nodes even after ellipsis-izing.
570 # nodes even after ellipsis-izing.
571 walk = [clrev]
571 walk = [clrev]
572 while walk:
572 while walk:
573 p = walk[0]
573 p = walk[0]
574 walk = walk[1:]
574 walk = walk[1:]
575 if p in clrevtolocalrev:
575 if p in clrevtolocalrev:
576 return clrevtolocalrev[p]
576 return clrevtolocalrev[p]
577 elif p in fullclnodes:
577 elif p in fullclnodes:
578 walk.extend([pp for pp in cl.parentrevs(p)
578 walk.extend([pp for pp in cl.parentrevs(p)
579 if pp != nullrev])
579 if pp != nullrev])
580 elif p in precomputedellipsis:
580 elif p in precomputedellipsis:
581 walk.extend([pp for pp in precomputedellipsis[p]
581 walk.extend([pp for pp in precomputedellipsis[p]
582 if pp != nullrev])
582 if pp != nullrev])
583 else:
583 else:
584 # In this case, we've got an ellipsis with parents
584 # In this case, we've got an ellipsis with parents
585 # outside the current bundle (likely an
585 # outside the current bundle (likely an
586 # incremental pull). We "know" that we can use the
586 # incremental pull). We "know" that we can use the
587 # value of this same revlog at whatever revision
587 # value of this same revlog at whatever revision
588 # is pointed to by linknode. "Know" is in scare
588 # is pointed to by linknode. "Know" is in scare
589 # quotes because I haven't done enough examination
589 # quotes because I haven't done enough examination
590 # of edge cases to convince myself this is really
590 # of edge cases to convince myself this is really
591 # a fact - it works for all the (admittedly
591 # a fact - it works for all the (admittedly
592 # thorough) cases in our testsuite, but I would be
592 # thorough) cases in our testsuite, but I would be
593 # somewhat unsurprised to find a case in the wild
593 # somewhat unsurprised to find a case in the wild
594 # where this breaks down a bit. That said, I don't
594 # where this breaks down a bit. That said, I don't
595 # know if it would hurt anything.
595 # know if it would hurt anything.
596 for i in pycompat.xrange(rev, 0, -1):
596 for i in pycompat.xrange(rev, 0, -1):
597 if store.linkrev(i) == clrev:
597 if store.linkrev(i) == clrev:
598 return i
598 return i
599 # We failed to resolve a parent for this node, so
599 # We failed to resolve a parent for this node, so
600 # we crash the changegroup construction.
600 # we crash the changegroup construction.
601 raise error.Abort(
601 raise error.Abort(
602 'unable to resolve parent while packing %r %r'
602 'unable to resolve parent while packing %r %r'
603 ' for changeset %r' % (store.indexfile, rev, clrev))
603 ' for changeset %r' % (store.indexfile, rev, clrev))
604
604
605 return nullrev
605 return nullrev
606
606
607 if not linkparents or (
607 if not linkparents or (
608 store.parentrevs(rev) == (nullrev, nullrev)):
608 store.parentrevs(rev) == (nullrev, nullrev)):
609 p1, p2 = nullrev, nullrev
609 p1, p2 = nullrev, nullrev
610 elif len(linkparents) == 1:
610 elif len(linkparents) == 1:
611 p1, = sorted(local(p) for p in linkparents)
611 p1, = sorted(local(p) for p in linkparents)
612 p2 = nullrev
612 p2 = nullrev
613 else:
613 else:
614 p1, p2 = sorted(local(p) for p in linkparents)
614 p1, p2 = sorted(local(p) for p in linkparents)
615
615
616 p1node, p2node = store.node(p1), store.node(p2)
616 p1node, p2node = store.node(p1), store.node(p2)
617
617
618 return p1node, p2node, linknode
618 return p1node, p2node, linknode
619
619
620 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
620 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
621 topic=None,
621 topic=None,
622 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
622 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
623 precomputedellipsis=None):
623 precomputedellipsis=None):
624 """Calculate deltas for a set of revisions.
624 """Calculate deltas for a set of revisions.
625
625
626 Is a generator of ``revisiondelta`` instances.
626 Is a generator of ``revisiondelta`` instances.
627
627
628 If topic is not None, progress detail will be generated using this
628 If topic is not None, progress detail will be generated using this
629 topic name (e.g. changesets, manifests, etc).
629 topic name (e.g. changesets, manifests, etc).
630 """
630 """
631 if not nodes:
631 if not nodes:
632 return
632 return
633
633
634 cl = repo.changelog
634 cl = repo.changelog
635
635
636 if ischangelog:
636 if ischangelog:
637 # `hg log` shows changesets in storage order. To preserve order
637 # `hg log` shows changesets in storage order. To preserve order
638 # across clones, send out changesets in storage order.
638 # across clones, send out changesets in storage order.
639 nodesorder = 'storage'
639 nodesorder = 'storage'
640 elif ellipses:
640 elif ellipses:
641 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
641 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
642 nodesorder = 'nodes'
642 nodesorder = 'nodes'
643 else:
643 else:
644 nodesorder = None
644 nodesorder = None
645
645
646 # Perform ellipses filtering and revision massaging. We do this before
646 # Perform ellipses filtering and revision massaging. We do this before
647 # emitrevisions() because a) filtering out revisions creates less work
647 # emitrevisions() because a) filtering out revisions creates less work
648 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
648 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
649 # assumptions about delta choices and we would possibly send a delta
649 # assumptions about delta choices and we would possibly send a delta
650 # referencing a missing base revision.
650 # referencing a missing base revision.
651 #
651 #
652 # Also, calling lookup() has side-effects with regards to populating
652 # Also, calling lookup() has side-effects with regards to populating
653 # data structures. If we don't call lookup() for each node or if we call
653 # data structures. If we don't call lookup() for each node or if we call
654 # lookup() after the first pass through each node, things can break -
654 # lookup() after the first pass through each node, things can break -
655 # possibly intermittently depending on the python hash seed! For that
655 # possibly intermittently depending on the python hash seed! For that
656 # reason, we store a mapping of all linknodes during the initial node
656 # reason, we store a mapping of all linknodes during the initial node
657 # pass rather than use lookup() on the output side.
657 # pass rather than use lookup() on the output side.
658 if ellipses:
658 if ellipses:
659 filtered = []
659 filtered = []
660 adjustedparents = {}
660 adjustedparents = {}
661 linknodes = {}
661 linknodes = {}
662
662
663 for node in nodes:
663 for node in nodes:
664 rev = store.rev(node)
664 rev = store.rev(node)
665 linknode = lookup(node)
665 linknode = lookup(node)
666 linkrev = cl.rev(linknode)
666 linkrev = cl.rev(linknode)
667 clrevtolocalrev[linkrev] = rev
667 clrevtolocalrev[linkrev] = rev
668
668
669 # If linknode is in fullclnodes, it means the corresponding
669 # If linknode is in fullclnodes, it means the corresponding
670 # changeset was a full changeset and is being sent unaltered.
670 # changeset was a full changeset and is being sent unaltered.
671 if linknode in fullclnodes:
671 if linknode in fullclnodes:
672 linknodes[node] = linknode
672 linknodes[node] = linknode
673
673
674 # If the corresponding changeset wasn't in the set computed
674 # If the corresponding changeset wasn't in the set computed
675 # as relevant to us, it should be dropped outright.
675 # as relevant to us, it should be dropped outright.
676 elif linkrev not in precomputedellipsis:
676 elif linkrev not in precomputedellipsis:
677 continue
677 continue
678
678
679 else:
679 else:
680 # We could probably do this later and avoid the dict
680 # We could probably do this later and avoid the dict
681 # holding state. But it likely doesn't matter.
681 # holding state. But it likely doesn't matter.
682 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
682 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
683 cl, store, ischangelog, rev, linkrev, linknode,
683 cl, store, ischangelog, rev, linkrev, linknode,
684 clrevtolocalrev, fullclnodes, precomputedellipsis)
684 clrevtolocalrev, fullclnodes, precomputedellipsis)
685
685
686 adjustedparents[node] = (p1node, p2node)
686 adjustedparents[node] = (p1node, p2node)
687 linknodes[node] = linknode
687 linknodes[node] = linknode
688
688
689 filtered.append(node)
689 filtered.append(node)
690
690
691 nodes = filtered
691 nodes = filtered
692
692
693 # We expect the first pass to be fast, so we only engage the progress
693 # We expect the first pass to be fast, so we only engage the progress
694 # meter for constructing the revision deltas.
694 # meter for constructing the revision deltas.
695 progress = None
695 progress = None
696 if topic is not None:
696 if topic is not None:
697 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
697 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
698 total=len(nodes))
698 total=len(nodes))
699
699
700 configtarget = repo.ui.config('devel', 'bundle.delta')
700 configtarget = repo.ui.config('devel', 'bundle.delta')
701 if configtarget not in ('', 'p1', 'full'):
701 if configtarget not in ('', 'p1', 'full'):
702 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
702 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
703 repo.ui.warn(msg % configtarget)
703 repo.ui.warn(msg % configtarget)
704
704
705 deltamode = repository.CG_DELTAMODE_STD
705 deltamode = repository.CG_DELTAMODE_STD
706 if forcedeltaparentprev:
706 if forcedeltaparentprev:
707 deltamode = repository.CG_DELTAMODE_PREV
707 deltamode = repository.CG_DELTAMODE_PREV
708 elif configtarget == 'p1':
708 elif configtarget == 'p1':
709 deltamode = repository.CG_DELTAMODE_P1
709 deltamode = repository.CG_DELTAMODE_P1
710 elif configtarget == 'full':
710 elif configtarget == 'full':
711 deltamode = repository.CG_DELTAMODE_FULL
711 deltamode = repository.CG_DELTAMODE_FULL
712
712
713 revisions = store.emitrevisions(
713 revisions = store.emitrevisions(
714 nodes,
714 nodes,
715 nodesorder=nodesorder,
715 nodesorder=nodesorder,
716 revisiondata=True,
716 revisiondata=True,
717 assumehaveparentrevisions=not ellipses,
717 assumehaveparentrevisions=not ellipses,
718 deltamode=deltamode)
718 deltamode=deltamode)
719
719
720 for i, revision in enumerate(revisions):
720 for i, revision in enumerate(revisions):
721 if progress:
721 if progress:
722 progress.update(i + 1)
722 progress.update(i + 1)
723
723
724 if ellipses:
724 if ellipses:
725 linknode = linknodes[revision.node]
725 linknode = linknodes[revision.node]
726
726
727 if revision.node in adjustedparents:
727 if revision.node in adjustedparents:
728 p1node, p2node = adjustedparents[revision.node]
728 p1node, p2node = adjustedparents[revision.node]
729 revision.p1node = p1node
729 revision.p1node = p1node
730 revision.p2node = p2node
730 revision.p2node = p2node
731 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
731 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
732
732
733 else:
733 else:
734 linknode = lookup(revision.node)
734 linknode = lookup(revision.node)
735
735
736 revision.linknode = linknode
736 revision.linknode = linknode
737 yield revision
737 yield revision
738
738
739 if progress:
739 if progress:
740 progress.complete()
740 progress.complete()
741
741
742 class cgpacker(object):
742 class cgpacker(object):
743 def __init__(self, repo, oldmatcher, matcher, version,
743 def __init__(self, repo, oldmatcher, matcher, version,
744 builddeltaheader, manifestsend,
744 builddeltaheader, manifestsend,
745 forcedeltaparentprev=False,
745 forcedeltaparentprev=False,
746 bundlecaps=None, ellipses=False,
746 bundlecaps=None, ellipses=False,
747 shallow=False, ellipsisroots=None, fullnodes=None):
747 shallow=False, ellipsisroots=None, fullnodes=None):
748 """Given a source repo, construct a bundler.
748 """Given a source repo, construct a bundler.
749
749
750 oldmatcher is a matcher that matches on files the client already has.
750 oldmatcher is a matcher that matches on files the client already has.
751 These will not be included in the changegroup.
751 These will not be included in the changegroup.
752
752
753 matcher is a matcher that matches on files to include in the
753 matcher is a matcher that matches on files to include in the
754 changegroup. Used to facilitate sparse changegroups.
754 changegroup. Used to facilitate sparse changegroups.
755
755
756 forcedeltaparentprev indicates whether delta parents must be against
756 forcedeltaparentprev indicates whether delta parents must be against
757 the previous revision in a delta group. This should only be used for
757 the previous revision in a delta group. This should only be used for
758 compatibility with changegroup version 1.
758 compatibility with changegroup version 1.
759
759
760 builddeltaheader is a callable that constructs the header for a group
760 builddeltaheader is a callable that constructs the header for a group
761 delta.
761 delta.
762
762
763 manifestsend is a chunk to send after manifests have been fully emitted.
763 manifestsend is a chunk to send after manifests have been fully emitted.
764
764
765 ellipses indicates whether ellipsis serving mode is enabled.
765 ellipses indicates whether ellipsis serving mode is enabled.
766
766
767 bundlecaps is optional and can be used to specify the set of
767 bundlecaps is optional and can be used to specify the set of
768 capabilities which can be used to build the bundle. While bundlecaps is
768 capabilities which can be used to build the bundle. While bundlecaps is
769 unused in core Mercurial, extensions rely on this feature to communicate
769 unused in core Mercurial, extensions rely on this feature to communicate
770 capabilities to customize the changegroup packer.
770 capabilities to customize the changegroup packer.
771
771
772 shallow indicates whether shallow data might be sent. The packer may
772 shallow indicates whether shallow data might be sent. The packer may
773 need to pack file contents not introduced by the changes being packed.
773 need to pack file contents not introduced by the changes being packed.
774
774
775 fullnodes is the set of changelog nodes which should not be ellipsis
775 fullnodes is the set of changelog nodes which should not be ellipsis
776 nodes. We store this rather than the set of nodes that should be
776 nodes. We store this rather than the set of nodes that should be
777 ellipsis because for very large histories we expect this to be
777 ellipsis because for very large histories we expect this to be
778 significantly smaller.
778 significantly smaller.
779 """
779 """
780 assert oldmatcher
780 assert oldmatcher
781 assert matcher
781 assert matcher
782 self._oldmatcher = oldmatcher
782 self._oldmatcher = oldmatcher
783 self._matcher = matcher
783 self._matcher = matcher
784
784
785 self.version = version
785 self.version = version
786 self._forcedeltaparentprev = forcedeltaparentprev
786 self._forcedeltaparentprev = forcedeltaparentprev
787 self._builddeltaheader = builddeltaheader
787 self._builddeltaheader = builddeltaheader
788 self._manifestsend = manifestsend
788 self._manifestsend = manifestsend
789 self._ellipses = ellipses
789 self._ellipses = ellipses
790
790
791 # Set of capabilities we can use to build the bundle.
791 # Set of capabilities we can use to build the bundle.
792 if bundlecaps is None:
792 if bundlecaps is None:
793 bundlecaps = set()
793 bundlecaps = set()
794 self._bundlecaps = bundlecaps
794 self._bundlecaps = bundlecaps
795 self._isshallow = shallow
795 self._isshallow = shallow
796 self._fullclnodes = fullnodes
796 self._fullclnodes = fullnodes
797
797
798 # Maps ellipsis revs to their roots at the changelog level.
798 # Maps ellipsis revs to their roots at the changelog level.
799 self._precomputedellipsis = ellipsisroots
799 self._precomputedellipsis = ellipsisroots
800
800
801 self._repo = repo
801 self._repo = repo
802
802
803 if self._repo.ui.verbose and not self._repo.ui.debugflag:
803 if self._repo.ui.verbose and not self._repo.ui.debugflag:
804 self._verbosenote = self._repo.ui.note
804 self._verbosenote = self._repo.ui.note
805 else:
805 else:
806 self._verbosenote = lambda s: None
806 self._verbosenote = lambda s: None
807
807
808 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
808 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
809 changelog=True):
809 changelog=True):
810 """Yield a sequence of changegroup byte chunks.
810 """Yield a sequence of changegroup byte chunks.
811 If changelog is False, changelog data won't be added to changegroup
811 If changelog is False, changelog data won't be added to changegroup
812 """
812 """
813
813
814 repo = self._repo
814 repo = self._repo
815 cl = repo.changelog
815 cl = repo.changelog
816
816
817 self._verbosenote(_('uncompressed size of bundle content:\n'))
817 self._verbosenote(_('uncompressed size of bundle content:\n'))
818 size = 0
818 size = 0
819
819
820 clstate, deltas = self._generatechangelog(cl, clnodes,
820 clstate, deltas = self._generatechangelog(cl, clnodes,
821 generate=changelog)
821 generate=changelog)
822 for delta in deltas:
822 for delta in deltas:
823 for chunk in _revisiondeltatochunks(delta,
823 for chunk in _revisiondeltatochunks(delta,
824 self._builddeltaheader):
824 self._builddeltaheader):
825 size += len(chunk)
825 size += len(chunk)
826 yield chunk
826 yield chunk
827
827
828 close = closechunk()
828 close = closechunk()
829 size += len(close)
829 size += len(close)
830 yield closechunk()
830 yield closechunk()
831
831
832 self._verbosenote(_('%8.i (changelog)\n') % size)
832 self._verbosenote(_('%8.i (changelog)\n') % size)
833
833
834 clrevorder = clstate['clrevorder']
834 clrevorder = clstate['clrevorder']
835 manifests = clstate['manifests']
835 manifests = clstate['manifests']
836 changedfiles = clstate['changedfiles']
836 changedfiles = clstate['changedfiles']
837
837
838 # We need to make sure that the linkrev in the changegroup refers to
838 # We need to make sure that the linkrev in the changegroup refers to
839 # the first changeset that introduced the manifest or file revision.
839 # the first changeset that introduced the manifest or file revision.
840 # The fastpath is usually safer than the slowpath, because the filelogs
840 # The fastpath is usually safer than the slowpath, because the filelogs
841 # are walked in revlog order.
841 # are walked in revlog order.
842 #
842 #
843 # When taking the slowpath when the manifest revlog uses generaldelta,
843 # When taking the slowpath when the manifest revlog uses generaldelta,
844 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
844 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
845 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
845 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
846 #
846 #
847 # When taking the fastpath, we are only vulnerable to reordering
847 # When taking the fastpath, we are only vulnerable to reordering
848 # of the changelog itself. The changelog never uses generaldelta and is
848 # of the changelog itself. The changelog never uses generaldelta and is
849 # never reordered. To handle this case, we simply take the slowpath,
849 # never reordered. To handle this case, we simply take the slowpath,
850 # which already has the 'clrevorder' logic. This was also fixed in
850 # which already has the 'clrevorder' logic. This was also fixed in
851 # cc0ff93d0c0c.
851 # cc0ff93d0c0c.
852
852
853 # Treemanifests don't work correctly with fastpathlinkrev
853 # Treemanifests don't work correctly with fastpathlinkrev
854 # either, because we don't discover which directory nodes to
854 # either, because we don't discover which directory nodes to
855 # send along with files. This could probably be fixed.
855 # send along with files. This could probably be fixed.
856 fastpathlinkrev = fastpathlinkrev and (
856 fastpathlinkrev = fastpathlinkrev and (
857 'treemanifest' not in repo.requirements)
857 'treemanifest' not in repo.requirements)
858
858
859 fnodes = {} # needed file nodes
859 fnodes = {} # needed file nodes
860
860
861 size = 0
861 size = 0
862 it = self.generatemanifests(
862 it = self.generatemanifests(
863 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
863 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
864 clstate['clrevtomanifestrev'])
864 clstate['clrevtomanifestrev'])
865
865
866 for tree, deltas in it:
866 for tree, deltas in it:
867 if tree:
867 if tree:
868 assert self.version == b'03'
868 assert self.version == b'03'
869 chunk = _fileheader(tree)
869 chunk = _fileheader(tree)
870 size += len(chunk)
870 size += len(chunk)
871 yield chunk
871 yield chunk
872
872
873 for delta in deltas:
873 for delta in deltas:
874 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
874 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
875 for chunk in chunks:
875 for chunk in chunks:
876 size += len(chunk)
876 size += len(chunk)
877 yield chunk
877 yield chunk
878
878
879 close = closechunk()
879 close = closechunk()
880 size += len(close)
880 size += len(close)
881 yield close
881 yield close
882
882
883 self._verbosenote(_('%8.i (manifests)\n') % size)
883 self._verbosenote(_('%8.i (manifests)\n') % size)
884 yield self._manifestsend
884 yield self._manifestsend
885
885
886 mfdicts = None
886 mfdicts = None
887 if self._ellipses and self._isshallow:
887 if self._ellipses and self._isshallow:
888 mfdicts = [(self._repo.manifestlog[n].read(), lr)
888 mfdicts = [(self._repo.manifestlog[n].read(), lr)
889 for (n, lr) in manifests.iteritems()]
889 for (n, lr) in manifests.iteritems()]
890
890
891 manifests.clear()
891 manifests.clear()
892 clrevs = set(cl.rev(x) for x in clnodes)
892 clrevs = set(cl.rev(x) for x in clnodes)
893
893
894 it = self.generatefiles(changedfiles, commonrevs,
894 it = self.generatefiles(changedfiles, commonrevs,
895 source, mfdicts, fastpathlinkrev,
895 source, mfdicts, fastpathlinkrev,
896 fnodes, clrevs)
896 fnodes, clrevs)
897
897
898 for path, deltas in it:
898 for path, deltas in it:
899 h = _fileheader(path)
899 h = _fileheader(path)
900 size = len(h)
900 size = len(h)
901 yield h
901 yield h
902
902
903 for delta in deltas:
903 for delta in deltas:
904 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
904 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
905 for chunk in chunks:
905 for chunk in chunks:
906 size += len(chunk)
906 size += len(chunk)
907 yield chunk
907 yield chunk
908
908
909 close = closechunk()
909 close = closechunk()
910 size += len(close)
910 size += len(close)
911 yield close
911 yield close
912
912
913 self._verbosenote(_('%8.i %s\n') % (size, path))
913 self._verbosenote(_('%8.i %s\n') % (size, path))
914
914
915 yield closechunk()
915 yield closechunk()
916
916
917 if clnodes:
917 if clnodes:
918 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
918 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
919
919
920 def _generatechangelog(self, cl, nodes, generate=True):
920 def _generatechangelog(self, cl, nodes, generate=True):
921 """Generate data for changelog chunks.
921 """Generate data for changelog chunks.
922
922
923 Returns a 2-tuple of a dict containing state and an iterable of
923 Returns a 2-tuple of a dict containing state and an iterable of
924 byte chunks. The state will not be fully populated until the
924 byte chunks. The state will not be fully populated until the
925 chunk stream has been fully consumed.
925 chunk stream has been fully consumed.
926
926
927 if generate is False, the state will be fully populated and no chunk
927 if generate is False, the state will be fully populated and no chunk
928 stream will be yielded
928 stream will be yielded
929 """
929 """
930 clrevorder = {}
930 clrevorder = {}
931 manifests = {}
931 manifests = {}
932 mfl = self._repo.manifestlog
932 mfl = self._repo.manifestlog
933 changedfiles = set()
933 changedfiles = set()
934 clrevtomanifestrev = {}
934 clrevtomanifestrev = {}
935
935
936 state = {
936 state = {
937 'clrevorder': clrevorder,
937 'clrevorder': clrevorder,
938 'manifests': manifests,
938 'manifests': manifests,
939 'changedfiles': changedfiles,
939 'changedfiles': changedfiles,
940 'clrevtomanifestrev': clrevtomanifestrev,
940 'clrevtomanifestrev': clrevtomanifestrev,
941 }
941 }
942
942
943 if not (generate or self._ellipses):
943 if not (generate or self._ellipses):
944 # sort the nodes in storage order
944 # sort the nodes in storage order
945 nodes = sorted(nodes, key=cl.rev)
945 nodes = sorted(nodes, key=cl.rev)
946 for node in nodes:
946 for node in nodes:
947 c = cl.changelogrevision(node)
947 c = cl.changelogrevision(node)
948 clrevorder[node] = len(clrevorder)
948 clrevorder[node] = len(clrevorder)
949 # record the first changeset introducing this manifest version
949 # record the first changeset introducing this manifest version
950 manifests.setdefault(c.manifest, node)
950 manifests.setdefault(c.manifest, node)
951 # Record a complete list of potentially-changed files in
951 # Record a complete list of potentially-changed files in
952 # this manifest.
952 # this manifest.
953 changedfiles.update(c.files)
953 changedfiles.update(c.files)
954
954
955 return state, ()
955 return state, ()
956
956
957 # Callback for the changelog, used to collect changed files and
957 # Callback for the changelog, used to collect changed files and
958 # manifest nodes.
958 # manifest nodes.
959 # Returns the linkrev node (identity in the changelog case).
959 # Returns the linkrev node (identity in the changelog case).
960 def lookupcl(x):
960 def lookupcl(x):
961 c = cl.changelogrevision(x)
961 c = cl.changelogrevision(x)
962 clrevorder[x] = len(clrevorder)
962 clrevorder[x] = len(clrevorder)
963
963
964 if self._ellipses:
964 if self._ellipses:
965 # Only update manifests if x is going to be sent. Otherwise we
965 # Only update manifests if x is going to be sent. Otherwise we
966 # end up with bogus linkrevs specified for manifests and
966 # end up with bogus linkrevs specified for manifests and
967 # we skip some manifest nodes that we should otherwise
967 # we skip some manifest nodes that we should otherwise
968 # have sent.
968 # have sent.
969 if (x in self._fullclnodes
969 if (x in self._fullclnodes
970 or cl.rev(x) in self._precomputedellipsis):
970 or cl.rev(x) in self._precomputedellipsis):
971
971
972 manifestnode = c.manifest
972 manifestnode = c.manifest
973 # Record the first changeset introducing this manifest
973 # Record the first changeset introducing this manifest
974 # version.
974 # version.
975 manifests.setdefault(manifestnode, x)
975 manifests.setdefault(manifestnode, x)
976 # Set this narrow-specific dict so we have the lowest
976 # Set this narrow-specific dict so we have the lowest
977 # manifest revnum to look up for this cl revnum. (Part of
977 # manifest revnum to look up for this cl revnum. (Part of
978 # mapping changelog ellipsis parents to manifest ellipsis
978 # mapping changelog ellipsis parents to manifest ellipsis
979 # parents)
979 # parents)
980 clrevtomanifestrev.setdefault(
980 clrevtomanifestrev.setdefault(
981 cl.rev(x), mfl.rev(manifestnode))
981 cl.rev(x), mfl.rev(manifestnode))
982 # We can't trust the changed files list in the changeset if the
982 # We can't trust the changed files list in the changeset if the
983 # client requested a shallow clone.
983 # client requested a shallow clone.
984 if self._isshallow:
984 if self._isshallow:
985 changedfiles.update(mfl[c.manifest].read().keys())
985 changedfiles.update(mfl[c.manifest].read().keys())
986 else:
986 else:
987 changedfiles.update(c.files)
987 changedfiles.update(c.files)
988 else:
988 else:
989 # record the first changeset introducing this manifest version
989 # record the first changeset introducing this manifest version
990 manifests.setdefault(c.manifest, x)
990 manifests.setdefault(c.manifest, x)
991 # Record a complete list of potentially-changed files in
991 # Record a complete list of potentially-changed files in
992 # this manifest.
992 # this manifest.
993 changedfiles.update(c.files)
993 changedfiles.update(c.files)
994
994
995 return x
995 return x
996
996
997 gen = deltagroup(
997 gen = deltagroup(
998 self._repo, cl, nodes, True, lookupcl,
998 self._repo, cl, nodes, True, lookupcl,
999 self._forcedeltaparentprev,
999 self._forcedeltaparentprev,
1000 ellipses=self._ellipses,
1000 ellipses=self._ellipses,
1001 topic=_('changesets'),
1001 topic=_('changesets'),
1002 clrevtolocalrev={},
1002 clrevtolocalrev={},
1003 fullclnodes=self._fullclnodes,
1003 fullclnodes=self._fullclnodes,
1004 precomputedellipsis=self._precomputedellipsis)
1004 precomputedellipsis=self._precomputedellipsis)
1005
1005
1006 return state, gen
1006 return state, gen
1007
1007
1008 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
1008 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
1009 manifests, fnodes, source, clrevtolocalrev):
1009 manifests, fnodes, source, clrevtolocalrev):
1010 """Returns an iterator of changegroup chunks containing manifests.
1010 """Returns an iterator of changegroup chunks containing manifests.
1011
1011
1012 `source` is unused here, but is used by extensions like remotefilelog to
1012 `source` is unused here, but is used by extensions like remotefilelog to
1013 change what is sent based in pulls vs pushes, etc.
1013 change what is sent based in pulls vs pushes, etc.
1014 """
1014 """
1015 repo = self._repo
1015 repo = self._repo
1016 mfl = repo.manifestlog
1016 mfl = repo.manifestlog
1017 tmfnodes = {'': manifests}
1017 tmfnodes = {'': manifests}
1018
1018
1019 # Callback for the manifest, used to collect linkrevs for filelog
1019 # Callback for the manifest, used to collect linkrevs for filelog
1020 # revisions.
1020 # revisions.
1021 # Returns the linkrev node (collected in lookupcl).
1021 # Returns the linkrev node (collected in lookupcl).
1022 def makelookupmflinknode(tree, nodes):
1022 def makelookupmflinknode(tree, nodes):
1023 if fastpathlinkrev:
1023 if fastpathlinkrev:
1024 assert not tree
1024 assert not tree
1025 return manifests.__getitem__
1025 return manifests.__getitem__
1026
1026
1027 def lookupmflinknode(x):
1027 def lookupmflinknode(x):
1028 """Callback for looking up the linknode for manifests.
1028 """Callback for looking up the linknode for manifests.
1029
1029
1030 Returns the linkrev node for the specified manifest.
1030 Returns the linkrev node for the specified manifest.
1031
1031
1032 SIDE EFFECT:
1032 SIDE EFFECT:
1033
1033
1034 1) fclnodes gets populated with the list of relevant
1034 1) fclnodes gets populated with the list of relevant
1035 file nodes if we're not using fastpathlinkrev
1035 file nodes if we're not using fastpathlinkrev
1036 2) When treemanifests are in use, collects treemanifest nodes
1036 2) When treemanifests are in use, collects treemanifest nodes
1037 to send
1037 to send
1038
1038
1039 Note that this means manifests must be completely sent to
1039 Note that this means manifests must be completely sent to
1040 the client before you can trust the list of files and
1040 the client before you can trust the list of files and
1041 treemanifests to send.
1041 treemanifests to send.
1042 """
1042 """
1043 clnode = nodes[x]
1043 clnode = nodes[x]
1044 mdata = mfl.get(tree, x).readfast(shallow=True)
1044 mdata = mfl.get(tree, x).readfast(shallow=True)
1045 for p, n, fl in mdata.iterentries():
1045 for p, n, fl in mdata.iterentries():
1046 if fl == 't': # subdirectory manifest
1046 if fl == 't': # subdirectory manifest
1047 subtree = tree + p + '/'
1047 subtree = tree + p + '/'
1048 tmfclnodes = tmfnodes.setdefault(subtree, {})
1048 tmfclnodes = tmfnodes.setdefault(subtree, {})
1049 tmfclnode = tmfclnodes.setdefault(n, clnode)
1049 tmfclnode = tmfclnodes.setdefault(n, clnode)
1050 if clrevorder[clnode] < clrevorder[tmfclnode]:
1050 if clrevorder[clnode] < clrevorder[tmfclnode]:
1051 tmfclnodes[n] = clnode
1051 tmfclnodes[n] = clnode
1052 else:
1052 else:
1053 f = tree + p
1053 f = tree + p
1054 fclnodes = fnodes.setdefault(f, {})
1054 fclnodes = fnodes.setdefault(f, {})
1055 fclnode = fclnodes.setdefault(n, clnode)
1055 fclnode = fclnodes.setdefault(n, clnode)
1056 if clrevorder[clnode] < clrevorder[fclnode]:
1056 if clrevorder[clnode] < clrevorder[fclnode]:
1057 fclnodes[n] = clnode
1057 fclnodes[n] = clnode
1058 return clnode
1058 return clnode
1059 return lookupmflinknode
1059 return lookupmflinknode
1060
1060
1061 while tmfnodes:
1061 while tmfnodes:
1062 tree, nodes = tmfnodes.popitem()
1062 tree, nodes = tmfnodes.popitem()
1063
1063
1064 should_visit = self._matcher.visitdir(tree[:-1] or '.')
1064 should_visit = self._matcher.visitdir(tree[:-1] or '.')
1065 if tree and not should_visit:
1065 if tree and not should_visit:
1066 continue
1066 continue
1067
1067
1068 store = mfl.getstorage(tree)
1068 store = mfl.getstorage(tree)
1069
1069
1070 if not should_visit:
1070 if not should_visit:
1071 # No nodes to send because this directory is out of
1071 # No nodes to send because this directory is out of
1072 # the client's view of the repository (probably
1072 # the client's view of the repository (probably
1073 # because of narrow clones). Do this even for the root
1073 # because of narrow clones). Do this even for the root
1074 # directory (tree=='')
1074 # directory (tree=='')
1075 prunednodes = []
1075 prunednodes = []
1076 elif not self._ellipses:
1077 # In non-ellipses case and large repositories, it is better to
1078 # prevent calling of store.rev and store.linkrev on a lot of
1079 # nodes as compared to sending some extra data
1080 prunednodes = nodes.copy()
1081 else:
1076 else:
1082 # Avoid sending any manifest nodes we can prove the
1077 # Avoid sending any manifest nodes we can prove the
1083 # client already has by checking linkrevs. See the
1078 # client already has by checking linkrevs. See the
1084 # related comment in generatefiles().
1079 # related comment in generatefiles().
1085 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1080 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1086
1081
1087 if tree and not prunednodes:
1082 if tree and not prunednodes:
1088 continue
1083 continue
1089
1084
1090 lookupfn = makelookupmflinknode(tree, nodes)
1085 lookupfn = makelookupmflinknode(tree, nodes)
1091
1086
1092 deltas = deltagroup(
1087 deltas = deltagroup(
1093 self._repo, store, prunednodes, False, lookupfn,
1088 self._repo, store, prunednodes, False, lookupfn,
1094 self._forcedeltaparentprev,
1089 self._forcedeltaparentprev,
1095 ellipses=self._ellipses,
1090 ellipses=self._ellipses,
1096 topic=_('manifests'),
1091 topic=_('manifests'),
1097 clrevtolocalrev=clrevtolocalrev,
1092 clrevtolocalrev=clrevtolocalrev,
1098 fullclnodes=self._fullclnodes,
1093 fullclnodes=self._fullclnodes,
1099 precomputedellipsis=self._precomputedellipsis)
1094 precomputedellipsis=self._precomputedellipsis)
1100
1095
1101 if not self._oldmatcher.visitdir(store.tree[:-1] or '.'):
1096 if not self._oldmatcher.visitdir(store.tree[:-1] or '.'):
1102 yield tree, deltas
1097 yield tree, deltas
1103 else:
1098 else:
1104 # 'deltas' is a generator and we need to consume it even if
1099 # 'deltas' is a generator and we need to consume it even if
1105 # we are not going to send it because a side-effect is that
1100 # we are not going to send it because a side-effect is that
1106 # it updates tmdnodes (via lookupfn)
1101 # it updates tmdnodes (via lookupfn)
1107 for d in deltas:
1102 for d in deltas:
1108 pass
1103 pass
1109 if not tree:
1104 if not tree:
1110 yield tree, []
1105 yield tree, []
1111
1106
1112 def _prunemanifests(self, store, nodes, commonrevs):
1107 def _prunemanifests(self, store, nodes, commonrevs):
1108 if not self._ellipses:
1109 # In non-ellipses case and large repositories, it is better to
1110 # prevent calling of store.rev and store.linkrev on a lot of
1111 # nodes as compared to sending some extra data
1112 return nodes.copy()
1113 # This is split out as a separate method to allow filtering
1113 # This is split out as a separate method to allow filtering
1114 # commonrevs in extension code.
1114 # commonrevs in extension code.
1115 #
1115 #
1116 # TODO(augie): this shouldn't be required, instead we should
1116 # TODO(augie): this shouldn't be required, instead we should
1117 # make filtering of revisions to send delegated to the store
1117 # make filtering of revisions to send delegated to the store
1118 # layer.
1118 # layer.
1119 frev, flr = store.rev, store.linkrev
1119 frev, flr = store.rev, store.linkrev
1120 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1120 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1121
1121
1122 # The 'source' parameter is useful for extensions
1122 # The 'source' parameter is useful for extensions
1123 def generatefiles(self, changedfiles, commonrevs, source,
1123 def generatefiles(self, changedfiles, commonrevs, source,
1124 mfdicts, fastpathlinkrev, fnodes, clrevs):
1124 mfdicts, fastpathlinkrev, fnodes, clrevs):
1125 changedfiles = [f for f in changedfiles
1125 changedfiles = [f for f in changedfiles
1126 if self._matcher(f) and not self._oldmatcher(f)]
1126 if self._matcher(f) and not self._oldmatcher(f)]
1127
1127
1128 if not fastpathlinkrev:
1128 if not fastpathlinkrev:
1129 def normallinknodes(unused, fname):
1129 def normallinknodes(unused, fname):
1130 return fnodes.get(fname, {})
1130 return fnodes.get(fname, {})
1131 else:
1131 else:
1132 cln = self._repo.changelog.node
1132 cln = self._repo.changelog.node
1133
1133
1134 def normallinknodes(store, fname):
1134 def normallinknodes(store, fname):
1135 flinkrev = store.linkrev
1135 flinkrev = store.linkrev
1136 fnode = store.node
1136 fnode = store.node
1137 revs = ((r, flinkrev(r)) for r in store)
1137 revs = ((r, flinkrev(r)) for r in store)
1138 return dict((fnode(r), cln(lr))
1138 return dict((fnode(r), cln(lr))
1139 for r, lr in revs if lr in clrevs)
1139 for r, lr in revs if lr in clrevs)
1140
1140
1141 clrevtolocalrev = {}
1141 clrevtolocalrev = {}
1142
1142
1143 if self._isshallow:
1143 if self._isshallow:
1144 # In a shallow clone, the linknodes callback needs to also include
1144 # In a shallow clone, the linknodes callback needs to also include
1145 # those file nodes that are in the manifests we sent but weren't
1145 # those file nodes that are in the manifests we sent but weren't
1146 # introduced by those manifests.
1146 # introduced by those manifests.
1147 commonctxs = [self._repo[c] for c in commonrevs]
1147 commonctxs = [self._repo[c] for c in commonrevs]
1148 clrev = self._repo.changelog.rev
1148 clrev = self._repo.changelog.rev
1149
1149
1150 def linknodes(flog, fname):
1150 def linknodes(flog, fname):
1151 for c in commonctxs:
1151 for c in commonctxs:
1152 try:
1152 try:
1153 fnode = c.filenode(fname)
1153 fnode = c.filenode(fname)
1154 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1154 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1155 except error.ManifestLookupError:
1155 except error.ManifestLookupError:
1156 pass
1156 pass
1157 links = normallinknodes(flog, fname)
1157 links = normallinknodes(flog, fname)
1158 if len(links) != len(mfdicts):
1158 if len(links) != len(mfdicts):
1159 for mf, lr in mfdicts:
1159 for mf, lr in mfdicts:
1160 fnode = mf.get(fname, None)
1160 fnode = mf.get(fname, None)
1161 if fnode in links:
1161 if fnode in links:
1162 links[fnode] = min(links[fnode], lr, key=clrev)
1162 links[fnode] = min(links[fnode], lr, key=clrev)
1163 elif fnode:
1163 elif fnode:
1164 links[fnode] = lr
1164 links[fnode] = lr
1165 return links
1165 return links
1166 else:
1166 else:
1167 linknodes = normallinknodes
1167 linknodes = normallinknodes
1168
1168
1169 repo = self._repo
1169 repo = self._repo
1170 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1170 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1171 total=len(changedfiles))
1171 total=len(changedfiles))
1172 for i, fname in enumerate(sorted(changedfiles)):
1172 for i, fname in enumerate(sorted(changedfiles)):
1173 filerevlog = repo.file(fname)
1173 filerevlog = repo.file(fname)
1174 if not filerevlog:
1174 if not filerevlog:
1175 raise error.Abort(_("empty or missing file data for %s") %
1175 raise error.Abort(_("empty or missing file data for %s") %
1176 fname)
1176 fname)
1177
1177
1178 clrevtolocalrev.clear()
1178 clrevtolocalrev.clear()
1179
1179
1180 linkrevnodes = linknodes(filerevlog, fname)
1180 linkrevnodes = linknodes(filerevlog, fname)
1181 # Lookup for filenodes, we collected the linkrev nodes above in the
1181 # Lookup for filenodes, we collected the linkrev nodes above in the
1182 # fastpath case and with lookupmf in the slowpath case.
1182 # fastpath case and with lookupmf in the slowpath case.
1183 def lookupfilelog(x):
1183 def lookupfilelog(x):
1184 return linkrevnodes[x]
1184 return linkrevnodes[x]
1185
1185
1186 frev, flr = filerevlog.rev, filerevlog.linkrev
1186 frev, flr = filerevlog.rev, filerevlog.linkrev
1187 # Skip sending any filenode we know the client already
1187 # Skip sending any filenode we know the client already
1188 # has. This avoids over-sending files relatively
1188 # has. This avoids over-sending files relatively
1189 # inexpensively, so it's not a problem if we under-filter
1189 # inexpensively, so it's not a problem if we under-filter
1190 # here.
1190 # here.
1191 filenodes = [n for n in linkrevnodes
1191 filenodes = [n for n in linkrevnodes
1192 if flr(frev(n)) not in commonrevs]
1192 if flr(frev(n)) not in commonrevs]
1193
1193
1194 if not filenodes:
1194 if not filenodes:
1195 continue
1195 continue
1196
1196
1197 progress.update(i + 1, item=fname)
1197 progress.update(i + 1, item=fname)
1198
1198
1199 deltas = deltagroup(
1199 deltas = deltagroup(
1200 self._repo, filerevlog, filenodes, False, lookupfilelog,
1200 self._repo, filerevlog, filenodes, False, lookupfilelog,
1201 self._forcedeltaparentprev,
1201 self._forcedeltaparentprev,
1202 ellipses=self._ellipses,
1202 ellipses=self._ellipses,
1203 clrevtolocalrev=clrevtolocalrev,
1203 clrevtolocalrev=clrevtolocalrev,
1204 fullclnodes=self._fullclnodes,
1204 fullclnodes=self._fullclnodes,
1205 precomputedellipsis=self._precomputedellipsis)
1205 precomputedellipsis=self._precomputedellipsis)
1206
1206
1207 yield fname, deltas
1207 yield fname, deltas
1208
1208
1209 progress.complete()
1209 progress.complete()
1210
1210
1211 def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
1211 def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
1212 ellipses=False, shallow=False, ellipsisroots=None,
1212 ellipses=False, shallow=False, ellipsisroots=None,
1213 fullnodes=None):
1213 fullnodes=None):
1214 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1214 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1215 d.node, d.p1node, d.p2node, d.linknode)
1215 d.node, d.p1node, d.p2node, d.linknode)
1216
1216
1217 return cgpacker(repo, oldmatcher, matcher, b'01',
1217 return cgpacker(repo, oldmatcher, matcher, b'01',
1218 builddeltaheader=builddeltaheader,
1218 builddeltaheader=builddeltaheader,
1219 manifestsend=b'',
1219 manifestsend=b'',
1220 forcedeltaparentprev=True,
1220 forcedeltaparentprev=True,
1221 bundlecaps=bundlecaps,
1221 bundlecaps=bundlecaps,
1222 ellipses=ellipses,
1222 ellipses=ellipses,
1223 shallow=shallow,
1223 shallow=shallow,
1224 ellipsisroots=ellipsisroots,
1224 ellipsisroots=ellipsisroots,
1225 fullnodes=fullnodes)
1225 fullnodes=fullnodes)
1226
1226
1227 def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
1227 def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
1228 ellipses=False, shallow=False, ellipsisroots=None,
1228 ellipses=False, shallow=False, ellipsisroots=None,
1229 fullnodes=None):
1229 fullnodes=None):
1230 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1230 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1231 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1231 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1232
1232
1233 return cgpacker(repo, oldmatcher, matcher, b'02',
1233 return cgpacker(repo, oldmatcher, matcher, b'02',
1234 builddeltaheader=builddeltaheader,
1234 builddeltaheader=builddeltaheader,
1235 manifestsend=b'',
1235 manifestsend=b'',
1236 bundlecaps=bundlecaps,
1236 bundlecaps=bundlecaps,
1237 ellipses=ellipses,
1237 ellipses=ellipses,
1238 shallow=shallow,
1238 shallow=shallow,
1239 ellipsisroots=ellipsisroots,
1239 ellipsisroots=ellipsisroots,
1240 fullnodes=fullnodes)
1240 fullnodes=fullnodes)
1241
1241
1242 def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
1242 def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
1243 ellipses=False, shallow=False, ellipsisroots=None,
1243 ellipses=False, shallow=False, ellipsisroots=None,
1244 fullnodes=None):
1244 fullnodes=None):
1245 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1245 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1246 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1246 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1247
1247
1248 return cgpacker(repo, oldmatcher, matcher, b'03',
1248 return cgpacker(repo, oldmatcher, matcher, b'03',
1249 builddeltaheader=builddeltaheader,
1249 builddeltaheader=builddeltaheader,
1250 manifestsend=closechunk(),
1250 manifestsend=closechunk(),
1251 bundlecaps=bundlecaps,
1251 bundlecaps=bundlecaps,
1252 ellipses=ellipses,
1252 ellipses=ellipses,
1253 shallow=shallow,
1253 shallow=shallow,
1254 ellipsisroots=ellipsisroots,
1254 ellipsisroots=ellipsisroots,
1255 fullnodes=fullnodes)
1255 fullnodes=fullnodes)
1256
1256
1257 _packermap = {'01': (_makecg1packer, cg1unpacker),
1257 _packermap = {'01': (_makecg1packer, cg1unpacker),
1258 # cg2 adds support for exchanging generaldelta
1258 # cg2 adds support for exchanging generaldelta
1259 '02': (_makecg2packer, cg2unpacker),
1259 '02': (_makecg2packer, cg2unpacker),
1260 # cg3 adds support for exchanging revlog flags and treemanifests
1260 # cg3 adds support for exchanging revlog flags and treemanifests
1261 '03': (_makecg3packer, cg3unpacker),
1261 '03': (_makecg3packer, cg3unpacker),
1262 }
1262 }
1263
1263
1264 def allsupportedversions(repo):
1264 def allsupportedversions(repo):
1265 versions = set(_packermap.keys())
1265 versions = set(_packermap.keys())
1266 if not (repo.ui.configbool('experimental', 'changegroup3') or
1266 if not (repo.ui.configbool('experimental', 'changegroup3') or
1267 repo.ui.configbool('experimental', 'treemanifest') or
1267 repo.ui.configbool('experimental', 'treemanifest') or
1268 'treemanifest' in repo.requirements):
1268 'treemanifest' in repo.requirements):
1269 versions.discard('03')
1269 versions.discard('03')
1270 return versions
1270 return versions
1271
1271
1272 # Changegroup versions that can be applied to the repo
1272 # Changegroup versions that can be applied to the repo
1273 def supportedincomingversions(repo):
1273 def supportedincomingversions(repo):
1274 return allsupportedversions(repo)
1274 return allsupportedversions(repo)
1275
1275
1276 # Changegroup versions that can be created from the repo
1276 # Changegroup versions that can be created from the repo
1277 def supportedoutgoingversions(repo):
1277 def supportedoutgoingversions(repo):
1278 versions = allsupportedversions(repo)
1278 versions = allsupportedversions(repo)
1279 if 'treemanifest' in repo.requirements:
1279 if 'treemanifest' in repo.requirements:
1280 # Versions 01 and 02 support only flat manifests and it's just too
1280 # Versions 01 and 02 support only flat manifests and it's just too
1281 # expensive to convert between the flat manifest and tree manifest on
1281 # expensive to convert between the flat manifest and tree manifest on
1282 # the fly. Since tree manifests are hashed differently, all of history
1282 # the fly. Since tree manifests are hashed differently, all of history
1283 # would have to be converted. Instead, we simply don't even pretend to
1283 # would have to be converted. Instead, we simply don't even pretend to
1284 # support versions 01 and 02.
1284 # support versions 01 and 02.
1285 versions.discard('01')
1285 versions.discard('01')
1286 versions.discard('02')
1286 versions.discard('02')
1287 if repository.NARROW_REQUIREMENT in repo.requirements:
1287 if repository.NARROW_REQUIREMENT in repo.requirements:
1288 # Versions 01 and 02 don't support revlog flags, and we need to
1288 # Versions 01 and 02 don't support revlog flags, and we need to
1289 # support that for stripping and unbundling to work.
1289 # support that for stripping and unbundling to work.
1290 versions.discard('01')
1290 versions.discard('01')
1291 versions.discard('02')
1291 versions.discard('02')
1292 if LFS_REQUIREMENT in repo.requirements:
1292 if LFS_REQUIREMENT in repo.requirements:
1293 # Versions 01 and 02 don't support revlog flags, and we need to
1293 # Versions 01 and 02 don't support revlog flags, and we need to
1294 # mark LFS entries with REVIDX_EXTSTORED.
1294 # mark LFS entries with REVIDX_EXTSTORED.
1295 versions.discard('01')
1295 versions.discard('01')
1296 versions.discard('02')
1296 versions.discard('02')
1297
1297
1298 return versions
1298 return versions
1299
1299
1300 def localversion(repo):
1300 def localversion(repo):
1301 # Finds the best version to use for bundles that are meant to be used
1301 # Finds the best version to use for bundles that are meant to be used
1302 # locally, such as those from strip and shelve, and temporary bundles.
1302 # locally, such as those from strip and shelve, and temporary bundles.
1303 return max(supportedoutgoingversions(repo))
1303 return max(supportedoutgoingversions(repo))
1304
1304
1305 def safeversion(repo):
1305 def safeversion(repo):
1306 # Finds the smallest version that it's safe to assume clients of the repo
1306 # Finds the smallest version that it's safe to assume clients of the repo
1307 # will support. For example, all hg versions that support generaldelta also
1307 # will support. For example, all hg versions that support generaldelta also
1308 # support changegroup 02.
1308 # support changegroup 02.
1309 versions = supportedoutgoingversions(repo)
1309 versions = supportedoutgoingversions(repo)
1310 if 'generaldelta' in repo.requirements:
1310 if 'generaldelta' in repo.requirements:
1311 versions.discard('01')
1311 versions.discard('01')
1312 assert versions
1312 assert versions
1313 return min(versions)
1313 return min(versions)
1314
1314
1315 def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
1315 def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
1316 matcher=None, ellipses=False, shallow=False,
1316 matcher=None, ellipses=False, shallow=False,
1317 ellipsisroots=None, fullnodes=None):
1317 ellipsisroots=None, fullnodes=None):
1318 assert version in supportedoutgoingversions(repo)
1318 assert version in supportedoutgoingversions(repo)
1319
1319
1320 if matcher is None:
1320 if matcher is None:
1321 matcher = matchmod.always()
1321 matcher = matchmod.always()
1322 if oldmatcher is None:
1322 if oldmatcher is None:
1323 oldmatcher = matchmod.never()
1323 oldmatcher = matchmod.never()
1324
1324
1325 if version == '01' and not matcher.always():
1325 if version == '01' and not matcher.always():
1326 raise error.ProgrammingError('version 01 changegroups do not support '
1326 raise error.ProgrammingError('version 01 changegroups do not support '
1327 'sparse file matchers')
1327 'sparse file matchers')
1328
1328
1329 if ellipses and version in (b'01', b'02'):
1329 if ellipses and version in (b'01', b'02'):
1330 raise error.Abort(
1330 raise error.Abort(
1331 _('ellipsis nodes require at least cg3 on client and server, '
1331 _('ellipsis nodes require at least cg3 on client and server, '
1332 'but negotiated version %s') % version)
1332 'but negotiated version %s') % version)
1333
1333
1334 # Requested files could include files not in the local store. So
1334 # Requested files could include files not in the local store. So
1335 # filter those out.
1335 # filter those out.
1336 matcher = repo.narrowmatch(matcher)
1336 matcher = repo.narrowmatch(matcher)
1337
1337
1338 fn = _packermap[version][0]
1338 fn = _packermap[version][0]
1339 return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
1339 return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
1340 shallow=shallow, ellipsisroots=ellipsisroots,
1340 shallow=shallow, ellipsisroots=ellipsisroots,
1341 fullnodes=fullnodes)
1341 fullnodes=fullnodes)
1342
1342
1343 def getunbundler(version, fh, alg, extras=None):
1343 def getunbundler(version, fh, alg, extras=None):
1344 return _packermap[version][1](fh, alg, extras=extras)
1344 return _packermap[version][1](fh, alg, extras=extras)
1345
1345
1346 def _changegroupinfo(repo, nodes, source):
1346 def _changegroupinfo(repo, nodes, source):
1347 if repo.ui.verbose or source == 'bundle':
1347 if repo.ui.verbose or source == 'bundle':
1348 repo.ui.status(_("%d changesets found\n") % len(nodes))
1348 repo.ui.status(_("%d changesets found\n") % len(nodes))
1349 if repo.ui.debugflag:
1349 if repo.ui.debugflag:
1350 repo.ui.debug("list of changesets:\n")
1350 repo.ui.debug("list of changesets:\n")
1351 for node in nodes:
1351 for node in nodes:
1352 repo.ui.debug("%s\n" % hex(node))
1352 repo.ui.debug("%s\n" % hex(node))
1353
1353
1354 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1354 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1355 bundlecaps=None):
1355 bundlecaps=None):
1356 cgstream = makestream(repo, outgoing, version, source,
1356 cgstream = makestream(repo, outgoing, version, source,
1357 fastpath=fastpath, bundlecaps=bundlecaps)
1357 fastpath=fastpath, bundlecaps=bundlecaps)
1358 return getunbundler(version, util.chunkbuffer(cgstream), None,
1358 return getunbundler(version, util.chunkbuffer(cgstream), None,
1359 {'clcount': len(outgoing.missing) })
1359 {'clcount': len(outgoing.missing) })
1360
1360
1361 def makestream(repo, outgoing, version, source, fastpath=False,
1361 def makestream(repo, outgoing, version, source, fastpath=False,
1362 bundlecaps=None, matcher=None):
1362 bundlecaps=None, matcher=None):
1363 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1363 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1364 matcher=matcher)
1364 matcher=matcher)
1365
1365
1366 repo = repo.unfiltered()
1366 repo = repo.unfiltered()
1367 commonrevs = outgoing.common
1367 commonrevs = outgoing.common
1368 csets = outgoing.missing
1368 csets = outgoing.missing
1369 heads = outgoing.missingheads
1369 heads = outgoing.missingheads
1370 # We go through the fast path if we get told to, or if all (unfiltered
1370 # We go through the fast path if we get told to, or if all (unfiltered
1371 # heads have been requested (since we then know there all linkrevs will
1371 # heads have been requested (since we then know there all linkrevs will
1372 # be pulled by the client).
1372 # be pulled by the client).
1373 heads.sort()
1373 heads.sort()
1374 fastpathlinkrev = fastpath or (
1374 fastpathlinkrev = fastpath or (
1375 repo.filtername is None and heads == sorted(repo.heads()))
1375 repo.filtername is None and heads == sorted(repo.heads()))
1376
1376
1377 repo.hook('preoutgoing', throw=True, source=source)
1377 repo.hook('preoutgoing', throw=True, source=source)
1378 _changegroupinfo(repo, csets, source)
1378 _changegroupinfo(repo, csets, source)
1379 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1379 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1380
1380
1381 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1381 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1382 revisions = 0
1382 revisions = 0
1383 files = 0
1383 files = 0
1384 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1384 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1385 total=expectedfiles)
1385 total=expectedfiles)
1386 for chunkdata in iter(source.filelogheader, {}):
1386 for chunkdata in iter(source.filelogheader, {}):
1387 files += 1
1387 files += 1
1388 f = chunkdata["filename"]
1388 f = chunkdata["filename"]
1389 repo.ui.debug("adding %s revisions\n" % f)
1389 repo.ui.debug("adding %s revisions\n" % f)
1390 progress.increment()
1390 progress.increment()
1391 fl = repo.file(f)
1391 fl = repo.file(f)
1392 o = len(fl)
1392 o = len(fl)
1393 try:
1393 try:
1394 deltas = source.deltaiter()
1394 deltas = source.deltaiter()
1395 if not fl.addgroup(deltas, revmap, trp):
1395 if not fl.addgroup(deltas, revmap, trp):
1396 raise error.Abort(_("received file revlog group is empty"))
1396 raise error.Abort(_("received file revlog group is empty"))
1397 except error.CensoredBaseError as e:
1397 except error.CensoredBaseError as e:
1398 raise error.Abort(_("received delta base is censored: %s") % e)
1398 raise error.Abort(_("received delta base is censored: %s") % e)
1399 revisions += len(fl) - o
1399 revisions += len(fl) - o
1400 if f in needfiles:
1400 if f in needfiles:
1401 needs = needfiles[f]
1401 needs = needfiles[f]
1402 for new in pycompat.xrange(o, len(fl)):
1402 for new in pycompat.xrange(o, len(fl)):
1403 n = fl.node(new)
1403 n = fl.node(new)
1404 if n in needs:
1404 if n in needs:
1405 needs.remove(n)
1405 needs.remove(n)
1406 else:
1406 else:
1407 raise error.Abort(
1407 raise error.Abort(
1408 _("received spurious file revlog entry"))
1408 _("received spurious file revlog entry"))
1409 if not needs:
1409 if not needs:
1410 del needfiles[f]
1410 del needfiles[f]
1411 progress.complete()
1411 progress.complete()
1412
1412
1413 for f, needs in needfiles.iteritems():
1413 for f, needs in needfiles.iteritems():
1414 fl = repo.file(f)
1414 fl = repo.file(f)
1415 for n in needs:
1415 for n in needs:
1416 try:
1416 try:
1417 fl.rev(n)
1417 fl.rev(n)
1418 except error.LookupError:
1418 except error.LookupError:
1419 raise error.Abort(
1419 raise error.Abort(
1420 _('missing file data for %s:%s - run hg verify') %
1420 _('missing file data for %s:%s - run hg verify') %
1421 (f, hex(n)))
1421 (f, hex(n)))
1422
1422
1423 return revisions, files
1423 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now