##// END OF EJS Templates
changegroup: move node sorting into deltagroup()...
Gregory Szorc -
r39265:2646b8d6 default
parent child Browse files
Show More
@@ -1,1522 +1,1516 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 error,
28 error,
29 match as matchmod,
29 match as matchmod,
30 mdiff,
30 mdiff,
31 phases,
31 phases,
32 pycompat,
32 pycompat,
33 repository,
33 repository,
34 revlog,
34 revlog,
35 util,
35 util,
36 )
36 )
37
37
38 from .utils import (
38 from .utils import (
39 stringutil,
39 stringutil,
40 )
40 )
41
41
42 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
42 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
43 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
43 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
44 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
44 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
45
45
46 LFS_REQUIREMENT = 'lfs'
46 LFS_REQUIREMENT = 'lfs'
47
47
48 readexactly = util.readexactly
48 readexactly = util.readexactly
49
49
50 def getchunk(stream):
50 def getchunk(stream):
51 """return the next chunk from stream as a string"""
51 """return the next chunk from stream as a string"""
52 d = readexactly(stream, 4)
52 d = readexactly(stream, 4)
53 l = struct.unpack(">l", d)[0]
53 l = struct.unpack(">l", d)[0]
54 if l <= 4:
54 if l <= 4:
55 if l:
55 if l:
56 raise error.Abort(_("invalid chunk length %d") % l)
56 raise error.Abort(_("invalid chunk length %d") % l)
57 return ""
57 return ""
58 return readexactly(stream, l - 4)
58 return readexactly(stream, l - 4)
59
59
60 def chunkheader(length):
60 def chunkheader(length):
61 """return a changegroup chunk header (string)"""
61 """return a changegroup chunk header (string)"""
62 return struct.pack(">l", length + 4)
62 return struct.pack(">l", length + 4)
63
63
64 def closechunk():
64 def closechunk():
65 """return a changegroup chunk header (string) for a zero-length chunk"""
65 """return a changegroup chunk header (string) for a zero-length chunk"""
66 return struct.pack(">l", 0)
66 return struct.pack(">l", 0)
67
67
68 def _fileheader(path):
68 def _fileheader(path):
69 """Obtain a changegroup chunk header for a named path."""
69 """Obtain a changegroup chunk header for a named path."""
70 return chunkheader(len(path)) + path
70 return chunkheader(len(path)) + path
71
71
72 def writechunks(ui, chunks, filename, vfs=None):
72 def writechunks(ui, chunks, filename, vfs=None):
73 """Write chunks to a file and return its filename.
73 """Write chunks to a file and return its filename.
74
74
75 The stream is assumed to be a bundle file.
75 The stream is assumed to be a bundle file.
76 Existing files will not be overwritten.
76 Existing files will not be overwritten.
77 If no filename is specified, a temporary file is created.
77 If no filename is specified, a temporary file is created.
78 """
78 """
79 fh = None
79 fh = None
80 cleanup = None
80 cleanup = None
81 try:
81 try:
82 if filename:
82 if filename:
83 if vfs:
83 if vfs:
84 fh = vfs.open(filename, "wb")
84 fh = vfs.open(filename, "wb")
85 else:
85 else:
86 # Increase default buffer size because default is usually
86 # Increase default buffer size because default is usually
87 # small (4k is common on Linux).
87 # small (4k is common on Linux).
88 fh = open(filename, "wb", 131072)
88 fh = open(filename, "wb", 131072)
89 else:
89 else:
90 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
90 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
91 fh = os.fdopen(fd, r"wb")
91 fh = os.fdopen(fd, r"wb")
92 cleanup = filename
92 cleanup = filename
93 for c in chunks:
93 for c in chunks:
94 fh.write(c)
94 fh.write(c)
95 cleanup = None
95 cleanup = None
96 return filename
96 return filename
97 finally:
97 finally:
98 if fh is not None:
98 if fh is not None:
99 fh.close()
99 fh.close()
100 if cleanup is not None:
100 if cleanup is not None:
101 if filename and vfs:
101 if filename and vfs:
102 vfs.unlink(cleanup)
102 vfs.unlink(cleanup)
103 else:
103 else:
104 os.unlink(cleanup)
104 os.unlink(cleanup)
105
105
106 class cg1unpacker(object):
106 class cg1unpacker(object):
107 """Unpacker for cg1 changegroup streams.
107 """Unpacker for cg1 changegroup streams.
108
108
109 A changegroup unpacker handles the framing of the revision data in
109 A changegroup unpacker handles the framing of the revision data in
110 the wire format. Most consumers will want to use the apply()
110 the wire format. Most consumers will want to use the apply()
111 method to add the changes from the changegroup to a repository.
111 method to add the changes from the changegroup to a repository.
112
112
113 If you're forwarding a changegroup unmodified to another consumer,
113 If you're forwarding a changegroup unmodified to another consumer,
114 use getchunks(), which returns an iterator of changegroup
114 use getchunks(), which returns an iterator of changegroup
115 chunks. This is mostly useful for cases where you need to know the
115 chunks. This is mostly useful for cases where you need to know the
116 data stream has ended by observing the end of the changegroup.
116 data stream has ended by observing the end of the changegroup.
117
117
118 deltachunk() is useful only if you're applying delta data. Most
118 deltachunk() is useful only if you're applying delta data. Most
119 consumers should prefer apply() instead.
119 consumers should prefer apply() instead.
120
120
121 A few other public methods exist. Those are used only for
121 A few other public methods exist. Those are used only for
122 bundlerepo and some debug commands - their use is discouraged.
122 bundlerepo and some debug commands - their use is discouraged.
123 """
123 """
124 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
124 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
125 deltaheadersize = deltaheader.size
125 deltaheadersize = deltaheader.size
126 version = '01'
126 version = '01'
127 _grouplistcount = 1 # One list of files after the manifests
127 _grouplistcount = 1 # One list of files after the manifests
128
128
129 def __init__(self, fh, alg, extras=None):
129 def __init__(self, fh, alg, extras=None):
130 if alg is None:
130 if alg is None:
131 alg = 'UN'
131 alg = 'UN'
132 if alg not in util.compengines.supportedbundletypes:
132 if alg not in util.compengines.supportedbundletypes:
133 raise error.Abort(_('unknown stream compression type: %s')
133 raise error.Abort(_('unknown stream compression type: %s')
134 % alg)
134 % alg)
135 if alg == 'BZ':
135 if alg == 'BZ':
136 alg = '_truncatedBZ'
136 alg = '_truncatedBZ'
137
137
138 compengine = util.compengines.forbundletype(alg)
138 compengine = util.compengines.forbundletype(alg)
139 self._stream = compengine.decompressorreader(fh)
139 self._stream = compengine.decompressorreader(fh)
140 self._type = alg
140 self._type = alg
141 self.extras = extras or {}
141 self.extras = extras or {}
142 self.callback = None
142 self.callback = None
143
143
144 # These methods (compressed, read, seek, tell) all appear to only
144 # These methods (compressed, read, seek, tell) all appear to only
145 # be used by bundlerepo, but it's a little hard to tell.
145 # be used by bundlerepo, but it's a little hard to tell.
146 def compressed(self):
146 def compressed(self):
147 return self._type is not None and self._type != 'UN'
147 return self._type is not None and self._type != 'UN'
148 def read(self, l):
148 def read(self, l):
149 return self._stream.read(l)
149 return self._stream.read(l)
150 def seek(self, pos):
150 def seek(self, pos):
151 return self._stream.seek(pos)
151 return self._stream.seek(pos)
152 def tell(self):
152 def tell(self):
153 return self._stream.tell()
153 return self._stream.tell()
154 def close(self):
154 def close(self):
155 return self._stream.close()
155 return self._stream.close()
156
156
157 def _chunklength(self):
157 def _chunklength(self):
158 d = readexactly(self._stream, 4)
158 d = readexactly(self._stream, 4)
159 l = struct.unpack(">l", d)[0]
159 l = struct.unpack(">l", d)[0]
160 if l <= 4:
160 if l <= 4:
161 if l:
161 if l:
162 raise error.Abort(_("invalid chunk length %d") % l)
162 raise error.Abort(_("invalid chunk length %d") % l)
163 return 0
163 return 0
164 if self.callback:
164 if self.callback:
165 self.callback()
165 self.callback()
166 return l - 4
166 return l - 4
167
167
168 def changelogheader(self):
168 def changelogheader(self):
169 """v10 does not have a changelog header chunk"""
169 """v10 does not have a changelog header chunk"""
170 return {}
170 return {}
171
171
172 def manifestheader(self):
172 def manifestheader(self):
173 """v10 does not have a manifest header chunk"""
173 """v10 does not have a manifest header chunk"""
174 return {}
174 return {}
175
175
176 def filelogheader(self):
176 def filelogheader(self):
177 """return the header of the filelogs chunk, v10 only has the filename"""
177 """return the header of the filelogs chunk, v10 only has the filename"""
178 l = self._chunklength()
178 l = self._chunklength()
179 if not l:
179 if not l:
180 return {}
180 return {}
181 fname = readexactly(self._stream, l)
181 fname = readexactly(self._stream, l)
182 return {'filename': fname}
182 return {'filename': fname}
183
183
184 def _deltaheader(self, headertuple, prevnode):
184 def _deltaheader(self, headertuple, prevnode):
185 node, p1, p2, cs = headertuple
185 node, p1, p2, cs = headertuple
186 if prevnode is None:
186 if prevnode is None:
187 deltabase = p1
187 deltabase = p1
188 else:
188 else:
189 deltabase = prevnode
189 deltabase = prevnode
190 flags = 0
190 flags = 0
191 return node, p1, p2, deltabase, cs, flags
191 return node, p1, p2, deltabase, cs, flags
192
192
193 def deltachunk(self, prevnode):
193 def deltachunk(self, prevnode):
194 l = self._chunklength()
194 l = self._chunklength()
195 if not l:
195 if not l:
196 return {}
196 return {}
197 headerdata = readexactly(self._stream, self.deltaheadersize)
197 headerdata = readexactly(self._stream, self.deltaheadersize)
198 header = self.deltaheader.unpack(headerdata)
198 header = self.deltaheader.unpack(headerdata)
199 delta = readexactly(self._stream, l - self.deltaheadersize)
199 delta = readexactly(self._stream, l - self.deltaheadersize)
200 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
200 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
201 return (node, p1, p2, cs, deltabase, delta, flags)
201 return (node, p1, p2, cs, deltabase, delta, flags)
202
202
203 def getchunks(self):
203 def getchunks(self):
204 """returns all the chunks contains in the bundle
204 """returns all the chunks contains in the bundle
205
205
206 Used when you need to forward the binary stream to a file or another
206 Used when you need to forward the binary stream to a file or another
207 network API. To do so, it parse the changegroup data, otherwise it will
207 network API. To do so, it parse the changegroup data, otherwise it will
208 block in case of sshrepo because it don't know the end of the stream.
208 block in case of sshrepo because it don't know the end of the stream.
209 """
209 """
210 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
210 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
211 # and a list of filelogs. For changegroup 3, we expect 4 parts:
211 # and a list of filelogs. For changegroup 3, we expect 4 parts:
212 # changelog, manifestlog, a list of tree manifestlogs, and a list of
212 # changelog, manifestlog, a list of tree manifestlogs, and a list of
213 # filelogs.
213 # filelogs.
214 #
214 #
215 # Changelog and manifestlog parts are terminated with empty chunks. The
215 # Changelog and manifestlog parts are terminated with empty chunks. The
216 # tree and file parts are a list of entry sections. Each entry section
216 # tree and file parts are a list of entry sections. Each entry section
217 # is a series of chunks terminating in an empty chunk. The list of these
217 # is a series of chunks terminating in an empty chunk. The list of these
218 # entry sections is terminated in yet another empty chunk, so we know
218 # entry sections is terminated in yet another empty chunk, so we know
219 # we've reached the end of the tree/file list when we reach an empty
219 # we've reached the end of the tree/file list when we reach an empty
220 # chunk that was proceeded by no non-empty chunks.
220 # chunk that was proceeded by no non-empty chunks.
221
221
222 parts = 0
222 parts = 0
223 while parts < 2 + self._grouplistcount:
223 while parts < 2 + self._grouplistcount:
224 noentries = True
224 noentries = True
225 while True:
225 while True:
226 chunk = getchunk(self)
226 chunk = getchunk(self)
227 if not chunk:
227 if not chunk:
228 # The first two empty chunks represent the end of the
228 # The first two empty chunks represent the end of the
229 # changelog and the manifestlog portions. The remaining
229 # changelog and the manifestlog portions. The remaining
230 # empty chunks represent either A) the end of individual
230 # empty chunks represent either A) the end of individual
231 # tree or file entries in the file list, or B) the end of
231 # tree or file entries in the file list, or B) the end of
232 # the entire list. It's the end of the entire list if there
232 # the entire list. It's the end of the entire list if there
233 # were no entries (i.e. noentries is True).
233 # were no entries (i.e. noentries is True).
234 if parts < 2:
234 if parts < 2:
235 parts += 1
235 parts += 1
236 elif noentries:
236 elif noentries:
237 parts += 1
237 parts += 1
238 break
238 break
239 noentries = False
239 noentries = False
240 yield chunkheader(len(chunk))
240 yield chunkheader(len(chunk))
241 pos = 0
241 pos = 0
242 while pos < len(chunk):
242 while pos < len(chunk):
243 next = pos + 2**20
243 next = pos + 2**20
244 yield chunk[pos:next]
244 yield chunk[pos:next]
245 pos = next
245 pos = next
246 yield closechunk()
246 yield closechunk()
247
247
248 def _unpackmanifests(self, repo, revmap, trp, prog):
248 def _unpackmanifests(self, repo, revmap, trp, prog):
249 self.callback = prog.increment
249 self.callback = prog.increment
250 # no need to check for empty manifest group here:
250 # no need to check for empty manifest group here:
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
252 # no new manifest will be created and the manifest group will
252 # no new manifest will be created and the manifest group will
253 # be empty during the pull
253 # be empty during the pull
254 self.manifestheader()
254 self.manifestheader()
255 deltas = self.deltaiter()
255 deltas = self.deltaiter()
256 repo.manifestlog.addgroup(deltas, revmap, trp)
256 repo.manifestlog.addgroup(deltas, revmap, trp)
257 prog.complete()
257 prog.complete()
258 self.callback = None
258 self.callback = None
259
259
260 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
260 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
261 expectedtotal=None):
261 expectedtotal=None):
262 """Add the changegroup returned by source.read() to this repo.
262 """Add the changegroup returned by source.read() to this repo.
263 srctype is a string like 'push', 'pull', or 'unbundle'. url is
263 srctype is a string like 'push', 'pull', or 'unbundle'. url is
264 the URL of the repo where this changegroup is coming from.
264 the URL of the repo where this changegroup is coming from.
265
265
266 Return an integer summarizing the change to this repo:
266 Return an integer summarizing the change to this repo:
267 - nothing changed or no source: 0
267 - nothing changed or no source: 0
268 - more heads than before: 1+added heads (2..n)
268 - more heads than before: 1+added heads (2..n)
269 - fewer heads than before: -1-removed heads (-2..-n)
269 - fewer heads than before: -1-removed heads (-2..-n)
270 - number of heads stays the same: 1
270 - number of heads stays the same: 1
271 """
271 """
272 repo = repo.unfiltered()
272 repo = repo.unfiltered()
273 def csmap(x):
273 def csmap(x):
274 repo.ui.debug("add changeset %s\n" % short(x))
274 repo.ui.debug("add changeset %s\n" % short(x))
275 return len(cl)
275 return len(cl)
276
276
277 def revmap(x):
277 def revmap(x):
278 return cl.rev(x)
278 return cl.rev(x)
279
279
280 changesets = files = revisions = 0
280 changesets = files = revisions = 0
281
281
282 try:
282 try:
283 # The transaction may already carry source information. In this
283 # The transaction may already carry source information. In this
284 # case we use the top level data. We overwrite the argument
284 # case we use the top level data. We overwrite the argument
285 # because we need to use the top level value (if they exist)
285 # because we need to use the top level value (if they exist)
286 # in this function.
286 # in this function.
287 srctype = tr.hookargs.setdefault('source', srctype)
287 srctype = tr.hookargs.setdefault('source', srctype)
288 url = tr.hookargs.setdefault('url', url)
288 url = tr.hookargs.setdefault('url', url)
289 repo.hook('prechangegroup',
289 repo.hook('prechangegroup',
290 throw=True, **pycompat.strkwargs(tr.hookargs))
290 throw=True, **pycompat.strkwargs(tr.hookargs))
291
291
292 # write changelog data to temp files so concurrent readers
292 # write changelog data to temp files so concurrent readers
293 # will not see an inconsistent view
293 # will not see an inconsistent view
294 cl = repo.changelog
294 cl = repo.changelog
295 cl.delayupdate(tr)
295 cl.delayupdate(tr)
296 oldheads = set(cl.heads())
296 oldheads = set(cl.heads())
297
297
298 trp = weakref.proxy(tr)
298 trp = weakref.proxy(tr)
299 # pull off the changeset group
299 # pull off the changeset group
300 repo.ui.status(_("adding changesets\n"))
300 repo.ui.status(_("adding changesets\n"))
301 clstart = len(cl)
301 clstart = len(cl)
302 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
302 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
303 total=expectedtotal)
303 total=expectedtotal)
304 self.callback = progress.increment
304 self.callback = progress.increment
305
305
306 efiles = set()
306 efiles = set()
307 def onchangelog(cl, node):
307 def onchangelog(cl, node):
308 efiles.update(cl.readfiles(node))
308 efiles.update(cl.readfiles(node))
309
309
310 self.changelogheader()
310 self.changelogheader()
311 deltas = self.deltaiter()
311 deltas = self.deltaiter()
312 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
312 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
313 efiles = len(efiles)
313 efiles = len(efiles)
314
314
315 if not cgnodes:
315 if not cgnodes:
316 repo.ui.develwarn('applied empty changegroup',
316 repo.ui.develwarn('applied empty changegroup',
317 config='warn-empty-changegroup')
317 config='warn-empty-changegroup')
318 clend = len(cl)
318 clend = len(cl)
319 changesets = clend - clstart
319 changesets = clend - clstart
320 progress.complete()
320 progress.complete()
321 self.callback = None
321 self.callback = None
322
322
323 # pull off the manifest group
323 # pull off the manifest group
324 repo.ui.status(_("adding manifests\n"))
324 repo.ui.status(_("adding manifests\n"))
325 # We know that we'll never have more manifests than we had
325 # We know that we'll never have more manifests than we had
326 # changesets.
326 # changesets.
327 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
327 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
328 total=changesets)
328 total=changesets)
329 self._unpackmanifests(repo, revmap, trp, progress)
329 self._unpackmanifests(repo, revmap, trp, progress)
330
330
331 needfiles = {}
331 needfiles = {}
332 if repo.ui.configbool('server', 'validate'):
332 if repo.ui.configbool('server', 'validate'):
333 cl = repo.changelog
333 cl = repo.changelog
334 ml = repo.manifestlog
334 ml = repo.manifestlog
335 # validate incoming csets have their manifests
335 # validate incoming csets have their manifests
336 for cset in pycompat.xrange(clstart, clend):
336 for cset in pycompat.xrange(clstart, clend):
337 mfnode = cl.changelogrevision(cset).manifest
337 mfnode = cl.changelogrevision(cset).manifest
338 mfest = ml[mfnode].readdelta()
338 mfest = ml[mfnode].readdelta()
339 # store file cgnodes we must see
339 # store file cgnodes we must see
340 for f, n in mfest.iteritems():
340 for f, n in mfest.iteritems():
341 needfiles.setdefault(f, set()).add(n)
341 needfiles.setdefault(f, set()).add(n)
342
342
343 # process the files
343 # process the files
344 repo.ui.status(_("adding file changes\n"))
344 repo.ui.status(_("adding file changes\n"))
345 newrevs, newfiles = _addchangegroupfiles(
345 newrevs, newfiles = _addchangegroupfiles(
346 repo, self, revmap, trp, efiles, needfiles)
346 repo, self, revmap, trp, efiles, needfiles)
347 revisions += newrevs
347 revisions += newrevs
348 files += newfiles
348 files += newfiles
349
349
350 deltaheads = 0
350 deltaheads = 0
351 if oldheads:
351 if oldheads:
352 heads = cl.heads()
352 heads = cl.heads()
353 deltaheads = len(heads) - len(oldheads)
353 deltaheads = len(heads) - len(oldheads)
354 for h in heads:
354 for h in heads:
355 if h not in oldheads and repo[h].closesbranch():
355 if h not in oldheads and repo[h].closesbranch():
356 deltaheads -= 1
356 deltaheads -= 1
357 htext = ""
357 htext = ""
358 if deltaheads:
358 if deltaheads:
359 htext = _(" (%+d heads)") % deltaheads
359 htext = _(" (%+d heads)") % deltaheads
360
360
361 repo.ui.status(_("added %d changesets"
361 repo.ui.status(_("added %d changesets"
362 " with %d changes to %d files%s\n")
362 " with %d changes to %d files%s\n")
363 % (changesets, revisions, files, htext))
363 % (changesets, revisions, files, htext))
364 repo.invalidatevolatilesets()
364 repo.invalidatevolatilesets()
365
365
366 if changesets > 0:
366 if changesets > 0:
367 if 'node' not in tr.hookargs:
367 if 'node' not in tr.hookargs:
368 tr.hookargs['node'] = hex(cl.node(clstart))
368 tr.hookargs['node'] = hex(cl.node(clstart))
369 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
369 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
370 hookargs = dict(tr.hookargs)
370 hookargs = dict(tr.hookargs)
371 else:
371 else:
372 hookargs = dict(tr.hookargs)
372 hookargs = dict(tr.hookargs)
373 hookargs['node'] = hex(cl.node(clstart))
373 hookargs['node'] = hex(cl.node(clstart))
374 hookargs['node_last'] = hex(cl.node(clend - 1))
374 hookargs['node_last'] = hex(cl.node(clend - 1))
375 repo.hook('pretxnchangegroup',
375 repo.hook('pretxnchangegroup',
376 throw=True, **pycompat.strkwargs(hookargs))
376 throw=True, **pycompat.strkwargs(hookargs))
377
377
378 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
378 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
379 phaseall = None
379 phaseall = None
380 if srctype in ('push', 'serve'):
380 if srctype in ('push', 'serve'):
381 # Old servers can not push the boundary themselves.
381 # Old servers can not push the boundary themselves.
382 # New servers won't push the boundary if changeset already
382 # New servers won't push the boundary if changeset already
383 # exists locally as secret
383 # exists locally as secret
384 #
384 #
385 # We should not use added here but the list of all change in
385 # We should not use added here but the list of all change in
386 # the bundle
386 # the bundle
387 if repo.publishing():
387 if repo.publishing():
388 targetphase = phaseall = phases.public
388 targetphase = phaseall = phases.public
389 else:
389 else:
390 # closer target phase computation
390 # closer target phase computation
391
391
392 # Those changesets have been pushed from the
392 # Those changesets have been pushed from the
393 # outside, their phases are going to be pushed
393 # outside, their phases are going to be pushed
394 # alongside. Therefor `targetphase` is
394 # alongside. Therefor `targetphase` is
395 # ignored.
395 # ignored.
396 targetphase = phaseall = phases.draft
396 targetphase = phaseall = phases.draft
397 if added:
397 if added:
398 phases.registernew(repo, tr, targetphase, added)
398 phases.registernew(repo, tr, targetphase, added)
399 if phaseall is not None:
399 if phaseall is not None:
400 phases.advanceboundary(repo, tr, phaseall, cgnodes)
400 phases.advanceboundary(repo, tr, phaseall, cgnodes)
401
401
402 if changesets > 0:
402 if changesets > 0:
403
403
404 def runhooks():
404 def runhooks():
405 # These hooks run when the lock releases, not when the
405 # These hooks run when the lock releases, not when the
406 # transaction closes. So it's possible for the changelog
406 # transaction closes. So it's possible for the changelog
407 # to have changed since we last saw it.
407 # to have changed since we last saw it.
408 if clstart >= len(repo):
408 if clstart >= len(repo):
409 return
409 return
410
410
411 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
411 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
412
412
413 for n in added:
413 for n in added:
414 args = hookargs.copy()
414 args = hookargs.copy()
415 args['node'] = hex(n)
415 args['node'] = hex(n)
416 del args['node_last']
416 del args['node_last']
417 repo.hook("incoming", **pycompat.strkwargs(args))
417 repo.hook("incoming", **pycompat.strkwargs(args))
418
418
419 newheads = [h for h in repo.heads()
419 newheads = [h for h in repo.heads()
420 if h not in oldheads]
420 if h not in oldheads]
421 repo.ui.log("incoming",
421 repo.ui.log("incoming",
422 "%d incoming changes - new heads: %s\n",
422 "%d incoming changes - new heads: %s\n",
423 len(added),
423 len(added),
424 ', '.join([hex(c[:6]) for c in newheads]))
424 ', '.join([hex(c[:6]) for c in newheads]))
425
425
426 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
426 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
427 lambda tr: repo._afterlock(runhooks))
427 lambda tr: repo._afterlock(runhooks))
428 finally:
428 finally:
429 repo.ui.flush()
429 repo.ui.flush()
430 # never return 0 here:
430 # never return 0 here:
431 if deltaheads < 0:
431 if deltaheads < 0:
432 ret = deltaheads - 1
432 ret = deltaheads - 1
433 else:
433 else:
434 ret = deltaheads + 1
434 ret = deltaheads + 1
435 return ret
435 return ret
436
436
437 def deltaiter(self):
437 def deltaiter(self):
438 """
438 """
439 returns an iterator of the deltas in this changegroup
439 returns an iterator of the deltas in this changegroup
440
440
441 Useful for passing to the underlying storage system to be stored.
441 Useful for passing to the underlying storage system to be stored.
442 """
442 """
443 chain = None
443 chain = None
444 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
444 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
445 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
445 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
446 yield chunkdata
446 yield chunkdata
447 chain = chunkdata[0]
447 chain = chunkdata[0]
448
448
449 class cg2unpacker(cg1unpacker):
449 class cg2unpacker(cg1unpacker):
450 """Unpacker for cg2 streams.
450 """Unpacker for cg2 streams.
451
451
452 cg2 streams add support for generaldelta, so the delta header
452 cg2 streams add support for generaldelta, so the delta header
453 format is slightly different. All other features about the data
453 format is slightly different. All other features about the data
454 remain the same.
454 remain the same.
455 """
455 """
456 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
456 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
457 deltaheadersize = deltaheader.size
457 deltaheadersize = deltaheader.size
458 version = '02'
458 version = '02'
459
459
460 def _deltaheader(self, headertuple, prevnode):
460 def _deltaheader(self, headertuple, prevnode):
461 node, p1, p2, deltabase, cs = headertuple
461 node, p1, p2, deltabase, cs = headertuple
462 flags = 0
462 flags = 0
463 return node, p1, p2, deltabase, cs, flags
463 return node, p1, p2, deltabase, cs, flags
464
464
465 class cg3unpacker(cg2unpacker):
465 class cg3unpacker(cg2unpacker):
466 """Unpacker for cg3 streams.
466 """Unpacker for cg3 streams.
467
467
468 cg3 streams add support for exchanging treemanifests and revlog
468 cg3 streams add support for exchanging treemanifests and revlog
469 flags. It adds the revlog flags to the delta header and an empty chunk
469 flags. It adds the revlog flags to the delta header and an empty chunk
470 separating manifests and files.
470 separating manifests and files.
471 """
471 """
472 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
472 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
473 deltaheadersize = deltaheader.size
473 deltaheadersize = deltaheader.size
474 version = '03'
474 version = '03'
475 _grouplistcount = 2 # One list of manifests and one list of files
475 _grouplistcount = 2 # One list of manifests and one list of files
476
476
477 def _deltaheader(self, headertuple, prevnode):
477 def _deltaheader(self, headertuple, prevnode):
478 node, p1, p2, deltabase, cs, flags = headertuple
478 node, p1, p2, deltabase, cs, flags = headertuple
479 return node, p1, p2, deltabase, cs, flags
479 return node, p1, p2, deltabase, cs, flags
480
480
481 def _unpackmanifests(self, repo, revmap, trp, prog):
481 def _unpackmanifests(self, repo, revmap, trp, prog):
482 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
482 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
483 for chunkdata in iter(self.filelogheader, {}):
483 for chunkdata in iter(self.filelogheader, {}):
484 # If we get here, there are directory manifests in the changegroup
484 # If we get here, there are directory manifests in the changegroup
485 d = chunkdata["filename"]
485 d = chunkdata["filename"]
486 repo.ui.debug("adding %s revisions\n" % d)
486 repo.ui.debug("adding %s revisions\n" % d)
487 dirlog = repo.manifestlog._revlog.dirlog(d)
487 dirlog = repo.manifestlog._revlog.dirlog(d)
488 deltas = self.deltaiter()
488 deltas = self.deltaiter()
489 if not dirlog.addgroup(deltas, revmap, trp):
489 if not dirlog.addgroup(deltas, revmap, trp):
490 raise error.Abort(_("received dir revlog group is empty"))
490 raise error.Abort(_("received dir revlog group is empty"))
491
491
492 class headerlessfixup(object):
492 class headerlessfixup(object):
493 def __init__(self, fh, h):
493 def __init__(self, fh, h):
494 self._h = h
494 self._h = h
495 self._fh = fh
495 self._fh = fh
496 def read(self, n):
496 def read(self, n):
497 if self._h:
497 if self._h:
498 d, self._h = self._h[:n], self._h[n:]
498 d, self._h = self._h[:n], self._h[n:]
499 if len(d) < n:
499 if len(d) < n:
500 d += readexactly(self._fh, n - len(d))
500 d += readexactly(self._fh, n - len(d))
501 return d
501 return d
502 return readexactly(self._fh, n)
502 return readexactly(self._fh, n)
503
503
504 @attr.s(slots=True, frozen=True)
504 @attr.s(slots=True, frozen=True)
505 class revisiondeltarequest(object):
505 class revisiondeltarequest(object):
506 """Describes a request to construct a revision delta.
506 """Describes a request to construct a revision delta.
507
507
508 Instances are converted into ``revisiondelta`` later.
508 Instances are converted into ``revisiondelta`` later.
509 """
509 """
510 # Revision whose delta will be generated.
510 # Revision whose delta will be generated.
511 node = attr.ib()
511 node = attr.ib()
512
512
513 # Linknode value.
513 # Linknode value.
514 linknode = attr.ib()
514 linknode = attr.ib()
515
515
516 # Parent revisions to record in ``revisiondelta`` instance.
516 # Parent revisions to record in ``revisiondelta`` instance.
517 p1node = attr.ib()
517 p1node = attr.ib()
518 p2node = attr.ib()
518 p2node = attr.ib()
519
519
520 # Base revision that delta should be generated against. If nullid,
520 # Base revision that delta should be generated against. If nullid,
521 # the full revision data should be populated. If None, the delta
521 # the full revision data should be populated. If None, the delta
522 # may be generated against any base revision that is an ancestor of
522 # may be generated against any base revision that is an ancestor of
523 # this revision. If any other value, the delta should be produced
523 # this revision. If any other value, the delta should be produced
524 # against that revision.
524 # against that revision.
525 basenode = attr.ib()
525 basenode = attr.ib()
526
526
527 # Whether this should be marked as an ellipsis revision.
527 # Whether this should be marked as an ellipsis revision.
528 ellipsis = attr.ib(default=False)
528 ellipsis = attr.ib(default=False)
529
529
530 @attr.s(slots=True, frozen=True)
530 @attr.s(slots=True, frozen=True)
531 class revisiondelta(object):
531 class revisiondelta(object):
532 """Describes a delta entry in a changegroup.
532 """Describes a delta entry in a changegroup.
533
533
534 Captured data is sufficient to serialize the delta into multiple
534 Captured data is sufficient to serialize the delta into multiple
535 formats.
535 formats.
536
536
537 ``revision`` and ``delta`` are mutually exclusive.
537 ``revision`` and ``delta`` are mutually exclusive.
538 """
538 """
539 # 20 byte node of this revision.
539 # 20 byte node of this revision.
540 node = attr.ib()
540 node = attr.ib()
541 # 20 byte nodes of parent revisions.
541 # 20 byte nodes of parent revisions.
542 p1node = attr.ib()
542 p1node = attr.ib()
543 p2node = attr.ib()
543 p2node = attr.ib()
544 # 20 byte node of node this delta is against.
544 # 20 byte node of node this delta is against.
545 basenode = attr.ib()
545 basenode = attr.ib()
546 # 20 byte node of changeset revision this delta is associated with.
546 # 20 byte node of changeset revision this delta is associated with.
547 linknode = attr.ib()
547 linknode = attr.ib()
548 # 2 bytes of flags to apply to revision data.
548 # 2 bytes of flags to apply to revision data.
549 flags = attr.ib()
549 flags = attr.ib()
550 # Size of base revision this delta is against. May be None if
550 # Size of base revision this delta is against. May be None if
551 # basenode is nullid.
551 # basenode is nullid.
552 baserevisionsize = attr.ib()
552 baserevisionsize = attr.ib()
553 # Raw fulltext revision data.
553 # Raw fulltext revision data.
554 revision = attr.ib()
554 revision = attr.ib()
555 # Delta between the basenode and node.
555 # Delta between the basenode and node.
556 delta = attr.ib()
556 delta = attr.ib()
557
557
558 def _revisiondeltatochunks(delta, headerfn):
558 def _revisiondeltatochunks(delta, headerfn):
559 """Serialize a revisiondelta to changegroup chunks."""
559 """Serialize a revisiondelta to changegroup chunks."""
560
560
561 # The captured revision delta may be encoded as a delta against
561 # The captured revision delta may be encoded as a delta against
562 # a base revision or as a full revision. The changegroup format
562 # a base revision or as a full revision. The changegroup format
563 # requires that everything on the wire be deltas. So for full
563 # requires that everything on the wire be deltas. So for full
564 # revisions, we need to invent a header that says to rewrite
564 # revisions, we need to invent a header that says to rewrite
565 # data.
565 # data.
566
566
567 if delta.delta is not None:
567 if delta.delta is not None:
568 prefix, data = b'', delta.delta
568 prefix, data = b'', delta.delta
569 elif delta.basenode == nullid:
569 elif delta.basenode == nullid:
570 data = delta.revision
570 data = delta.revision
571 prefix = mdiff.trivialdiffheader(len(data))
571 prefix = mdiff.trivialdiffheader(len(data))
572 else:
572 else:
573 data = delta.revision
573 data = delta.revision
574 prefix = mdiff.replacediffheader(delta.baserevisionsize,
574 prefix = mdiff.replacediffheader(delta.baserevisionsize,
575 len(data))
575 len(data))
576
576
577 meta = headerfn(delta)
577 meta = headerfn(delta)
578
578
579 yield chunkheader(len(meta) + len(prefix) + len(data))
579 yield chunkheader(len(meta) + len(prefix) + len(data))
580 yield meta
580 yield meta
581 if prefix:
581 if prefix:
582 yield prefix
582 yield prefix
583 yield data
583 yield data
584
584
585 def _sortnodesnormal(store, nodes, reorder):
585 def _sortnodesnormal(store, nodes, reorder):
586 """Sort nodes for changegroup generation and turn into revnums."""
586 """Sort nodes for changegroup generation and turn into revnums."""
587 # for generaldelta revlogs, we linearize the revs; this will both be
587 # for generaldelta revlogs, we linearize the revs; this will both be
588 # much quicker and generate a much smaller bundle
588 # much quicker and generate a much smaller bundle
589 if (store._generaldelta and reorder is None) or reorder:
589 if (store._generaldelta and reorder is None) or reorder:
590 revs = set(store.rev(n) for n in nodes)
590 revs = set(store.rev(n) for n in nodes)
591 return dagop.linearize(revs, store.parentrevs)
591 return dagop.linearize(revs, store.parentrevs)
592 else:
592 else:
593 return sorted([store.rev(n) for n in nodes])
593 return sorted([store.rev(n) for n in nodes])
594
594
595 def _sortnodesellipsis(store, nodes, cl, lookup):
595 def _sortnodesellipsis(store, nodes, cl, lookup):
596 """Sort nodes for changegroup generation and turn into revnums."""
596 """Sort nodes for changegroup generation and turn into revnums."""
597 # Ellipses serving mode.
597 # Ellipses serving mode.
598 #
598 #
599 # In a perfect world, we'd generate better ellipsis-ified graphs
599 # In a perfect world, we'd generate better ellipsis-ified graphs
600 # for non-changelog revlogs. In practice, we haven't started doing
600 # for non-changelog revlogs. In practice, we haven't started doing
601 # that yet, so the resulting DAGs for the manifestlog and filelogs
601 # that yet, so the resulting DAGs for the manifestlog and filelogs
602 # are actually full of bogus parentage on all the ellipsis
602 # are actually full of bogus parentage on all the ellipsis
603 # nodes. This has the side effect that, while the contents are
603 # nodes. This has the side effect that, while the contents are
604 # correct, the individual DAGs might be completely out of whack in
604 # correct, the individual DAGs might be completely out of whack in
605 # a case like 882681bc3166 and its ancestors (back about 10
605 # a case like 882681bc3166 and its ancestors (back about 10
606 # revisions or so) in the main hg repo.
606 # revisions or so) in the main hg repo.
607 #
607 #
608 # The one invariant we *know* holds is that the new (potentially
608 # The one invariant we *know* holds is that the new (potentially
609 # bogus) DAG shape will be valid if we order the nodes in the
609 # bogus) DAG shape will be valid if we order the nodes in the
610 # order that they're introduced in dramatis personae by the
610 # order that they're introduced in dramatis personae by the
611 # changelog, so what we do is we sort the non-changelog histories
611 # changelog, so what we do is we sort the non-changelog histories
612 # by the order in which they are used by the changelog.
612 # by the order in which they are used by the changelog.
613 key = lambda n: cl.rev(lookup(n))
613 key = lambda n: cl.rev(lookup(n))
614 return [store.rev(n) for n in sorted(nodes, key=key)]
614 return [store.rev(n) for n in sorted(nodes, key=key)]
615
615
616 def _handlerevisiondeltarequest(store, request, prevnode):
616 def _handlerevisiondeltarequest(store, request, prevnode):
617 """Obtain a revisiondelta from a revisiondeltarequest"""
617 """Obtain a revisiondelta from a revisiondeltarequest"""
618
618
619 node = request.node
619 node = request.node
620 rev = store.rev(node)
620 rev = store.rev(node)
621
621
622 # Requesting a full revision.
622 # Requesting a full revision.
623 if request.basenode == nullid:
623 if request.basenode == nullid:
624 baserev = nullrev
624 baserev = nullrev
625 # Requesting an explicit revision.
625 # Requesting an explicit revision.
626 elif request.basenode is not None:
626 elif request.basenode is not None:
627 baserev = store.rev(request.basenode)
627 baserev = store.rev(request.basenode)
628 # Allowing us to choose.
628 # Allowing us to choose.
629 else:
629 else:
630 p1, p2 = store.parentrevs(rev)
630 p1, p2 = store.parentrevs(rev)
631 dp = store.deltaparent(rev)
631 dp = store.deltaparent(rev)
632
632
633 if dp == nullrev and store.storedeltachains:
633 if dp == nullrev and store.storedeltachains:
634 # Avoid sending full revisions when delta parent is null. Pick prev
634 # Avoid sending full revisions when delta parent is null. Pick prev
635 # in that case. It's tempting to pick p1 in this case, as p1 will
635 # in that case. It's tempting to pick p1 in this case, as p1 will
636 # be smaller in the common case. However, computing a delta against
636 # be smaller in the common case. However, computing a delta against
637 # p1 may require resolving the raw text of p1, which could be
637 # p1 may require resolving the raw text of p1, which could be
638 # expensive. The revlog caches should have prev cached, meaning
638 # expensive. The revlog caches should have prev cached, meaning
639 # less CPU for changegroup generation. There is likely room to add
639 # less CPU for changegroup generation. There is likely room to add
640 # a flag and/or config option to control this behavior.
640 # a flag and/or config option to control this behavior.
641 baserev = store.rev(prevnode)
641 baserev = store.rev(prevnode)
642 elif dp == nullrev:
642 elif dp == nullrev:
643 # revlog is configured to use full snapshot for a reason,
643 # revlog is configured to use full snapshot for a reason,
644 # stick to full snapshot.
644 # stick to full snapshot.
645 baserev = nullrev
645 baserev = nullrev
646 elif dp not in (p1, p2, store.rev(prevnode)):
646 elif dp not in (p1, p2, store.rev(prevnode)):
647 # Pick prev when we can't be sure remote has the base revision.
647 # Pick prev when we can't be sure remote has the base revision.
648 baserev = store.rev(prevnode)
648 baserev = store.rev(prevnode)
649 else:
649 else:
650 baserev = dp
650 baserev = dp
651
651
652 if baserev != nullrev and not store.candelta(baserev, rev):
652 if baserev != nullrev and not store.candelta(baserev, rev):
653 baserev = nullrev
653 baserev = nullrev
654
654
655 revision = None
655 revision = None
656 delta = None
656 delta = None
657 baserevisionsize = None
657 baserevisionsize = None
658
658
659 if store.iscensored(baserev) or store.iscensored(rev):
659 if store.iscensored(baserev) or store.iscensored(rev):
660 try:
660 try:
661 revision = store.revision(node, raw=True)
661 revision = store.revision(node, raw=True)
662 except error.CensoredNodeError as e:
662 except error.CensoredNodeError as e:
663 revision = e.tombstone
663 revision = e.tombstone
664
664
665 if baserev != nullrev:
665 if baserev != nullrev:
666 baserevisionsize = store.rawsize(baserev)
666 baserevisionsize = store.rawsize(baserev)
667
667
668 elif baserev == nullrev:
668 elif baserev == nullrev:
669 revision = store.revision(node, raw=True)
669 revision = store.revision(node, raw=True)
670 else:
670 else:
671 delta = store.revdiff(baserev, rev)
671 delta = store.revdiff(baserev, rev)
672
672
673 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
673 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
674
674
675 return revisiondelta(
675 return revisiondelta(
676 node=node,
676 node=node,
677 p1node=request.p1node,
677 p1node=request.p1node,
678 p2node=request.p2node,
678 p2node=request.p2node,
679 linknode=request.linknode,
679 linknode=request.linknode,
680 basenode=store.node(baserev),
680 basenode=store.node(baserev),
681 flags=store.flags(rev) | extraflags,
681 flags=store.flags(rev) | extraflags,
682 baserevisionsize=baserevisionsize,
682 baserevisionsize=baserevisionsize,
683 revision=revision,
683 revision=revision,
684 delta=delta,
684 delta=delta,
685 )
685 )
686
686
687 def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev,
687 def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev,
688 linknode, clrevtolocalrev, fullclnodes,
688 linknode, clrevtolocalrev, fullclnodes,
689 precomputedellipsis):
689 precomputedellipsis):
690 linkparents = precomputedellipsis[linkrev]
690 linkparents = precomputedellipsis[linkrev]
691 def local(clrev):
691 def local(clrev):
692 """Turn a changelog revnum into a local revnum.
692 """Turn a changelog revnum into a local revnum.
693
693
694 The ellipsis dag is stored as revnums on the changelog,
694 The ellipsis dag is stored as revnums on the changelog,
695 but when we're producing ellipsis entries for
695 but when we're producing ellipsis entries for
696 non-changelog revlogs, we need to turn those numbers into
696 non-changelog revlogs, we need to turn those numbers into
697 something local. This does that for us, and during the
697 something local. This does that for us, and during the
698 changelog sending phase will also expand the stored
698 changelog sending phase will also expand the stored
699 mappings as needed.
699 mappings as needed.
700 """
700 """
701 if clrev == nullrev:
701 if clrev == nullrev:
702 return nullrev
702 return nullrev
703
703
704 if ischangelog:
704 if ischangelog:
705 return clrev
705 return clrev
706
706
707 # Walk the ellipsis-ized changelog breadth-first looking for a
707 # Walk the ellipsis-ized changelog breadth-first looking for a
708 # change that has been linked from the current revlog.
708 # change that has been linked from the current revlog.
709 #
709 #
710 # For a flat manifest revlog only a single step should be necessary
710 # For a flat manifest revlog only a single step should be necessary
711 # as all relevant changelog entries are relevant to the flat
711 # as all relevant changelog entries are relevant to the flat
712 # manifest.
712 # manifest.
713 #
713 #
714 # For a filelog or tree manifest dirlog however not every changelog
714 # For a filelog or tree manifest dirlog however not every changelog
715 # entry will have been relevant, so we need to skip some changelog
715 # entry will have been relevant, so we need to skip some changelog
716 # nodes even after ellipsis-izing.
716 # nodes even after ellipsis-izing.
717 walk = [clrev]
717 walk = [clrev]
718 while walk:
718 while walk:
719 p = walk[0]
719 p = walk[0]
720 walk = walk[1:]
720 walk = walk[1:]
721 if p in clrevtolocalrev:
721 if p in clrevtolocalrev:
722 return clrevtolocalrev[p]
722 return clrevtolocalrev[p]
723 elif p in fullclnodes:
723 elif p in fullclnodes:
724 walk.extend([pp for pp in cl.parentrevs(p)
724 walk.extend([pp for pp in cl.parentrevs(p)
725 if pp != nullrev])
725 if pp != nullrev])
726 elif p in precomputedellipsis:
726 elif p in precomputedellipsis:
727 walk.extend([pp for pp in precomputedellipsis[p]
727 walk.extend([pp for pp in precomputedellipsis[p]
728 if pp != nullrev])
728 if pp != nullrev])
729 else:
729 else:
730 # In this case, we've got an ellipsis with parents
730 # In this case, we've got an ellipsis with parents
731 # outside the current bundle (likely an
731 # outside the current bundle (likely an
732 # incremental pull). We "know" that we can use the
732 # incremental pull). We "know" that we can use the
733 # value of this same revlog at whatever revision
733 # value of this same revlog at whatever revision
734 # is pointed to by linknode. "Know" is in scare
734 # is pointed to by linknode. "Know" is in scare
735 # quotes because I haven't done enough examination
735 # quotes because I haven't done enough examination
736 # of edge cases to convince myself this is really
736 # of edge cases to convince myself this is really
737 # a fact - it works for all the (admittedly
737 # a fact - it works for all the (admittedly
738 # thorough) cases in our testsuite, but I would be
738 # thorough) cases in our testsuite, but I would be
739 # somewhat unsurprised to find a case in the wild
739 # somewhat unsurprised to find a case in the wild
740 # where this breaks down a bit. That said, I don't
740 # where this breaks down a bit. That said, I don't
741 # know if it would hurt anything.
741 # know if it would hurt anything.
742 for i in pycompat.xrange(rev, 0, -1):
742 for i in pycompat.xrange(rev, 0, -1):
743 if store.linkrev(i) == clrev:
743 if store.linkrev(i) == clrev:
744 return i
744 return i
745 # We failed to resolve a parent for this node, so
745 # We failed to resolve a parent for this node, so
746 # we crash the changegroup construction.
746 # we crash the changegroup construction.
747 raise error.Abort(
747 raise error.Abort(
748 'unable to resolve parent while packing %r %r'
748 'unable to resolve parent while packing %r %r'
749 ' for changeset %r' % (store.indexfile, rev, clrev))
749 ' for changeset %r' % (store.indexfile, rev, clrev))
750
750
751 return nullrev
751 return nullrev
752
752
753 if not linkparents or (
753 if not linkparents or (
754 store.parentrevs(rev) == (nullrev, nullrev)):
754 store.parentrevs(rev) == (nullrev, nullrev)):
755 p1, p2 = nullrev, nullrev
755 p1, p2 = nullrev, nullrev
756 elif len(linkparents) == 1:
756 elif len(linkparents) == 1:
757 p1, = sorted(local(p) for p in linkparents)
757 p1, = sorted(local(p) for p in linkparents)
758 p2 = nullrev
758 p2 = nullrev
759 else:
759 else:
760 p1, p2 = sorted(local(p) for p in linkparents)
760 p1, p2 = sorted(local(p) for p in linkparents)
761
761
762 p1node, p2node = store.node(p1), store.node(p2)
762 p1node, p2node = store.node(p1), store.node(p2)
763
763
764 # TODO: try and actually send deltas for ellipsis data blocks
764 # TODO: try and actually send deltas for ellipsis data blocks
765 return revisiondeltarequest(
765 return revisiondeltarequest(
766 node=node,
766 node=node,
767 p1node=p1node,
767 p1node=p1node,
768 p2node=p2node,
768 p2node=p2node,
769 linknode=linknode,
769 linknode=linknode,
770 basenode=nullid,
770 basenode=nullid,
771 ellipsis=True,
771 ellipsis=True,
772 )
772 )
773
773
774 def deltagroup(repo, revs, store, ischangelog, lookup, forcedeltaparentprev,
774 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
775 allowreorder,
775 units=None,
776 units=None,
776 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
777 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
777 precomputedellipsis=None):
778 precomputedellipsis=None):
778 """Calculate deltas for a set of revisions.
779 """Calculate deltas for a set of revisions.
779
780
780 Is a generator of ``revisiondelta`` instances.
781 Is a generator of ``revisiondelta`` instances.
781
782
782 If units is not None, progress detail will be generated, units specifies
783 If units is not None, progress detail will be generated, units specifies
783 the type of revlog that is touched (changelog, manifest, etc.).
784 the type of revlog that is touched (changelog, manifest, etc.).
784 """
785 """
785 if not revs:
786 if not nodes:
786 return
787 return
787
788
788 # We perform two passes over the revisions whose data we will emit.
789 # We perform two passes over the revisions whose data we will emit.
789 #
790 #
790 # In the first pass, we obtain information about the deltas that will
791 # In the first pass, we obtain information about the deltas that will
791 # be generated. This involves computing linknodes and adjusting the
792 # be generated. This involves computing linknodes and adjusting the
792 # request to take shallow fetching into account. The end result of
793 # request to take shallow fetching into account. The end result of
793 # this pass is a list of "request" objects stating which deltas
794 # this pass is a list of "request" objects stating which deltas
794 # to obtain.
795 # to obtain.
795 #
796 #
796 # The second pass is simply resolving the requested deltas.
797 # The second pass is simply resolving the requested deltas.
797
798
798 cl = repo.changelog
799 cl = repo.changelog
799
800
801 if ischangelog:
802 # Changelog doesn't benefit from reordering revisions. So send
803 # out revisions in store order.
804 # TODO the API would be cleaner if this were controlled by the
805 # store producing the deltas.
806 revs = sorted(cl.rev(n) for n in nodes)
807 elif ellipses:
808 revs = _sortnodesellipsis(store, nodes, cl, lookup)
809 else:
810 revs = _sortnodesnormal(store, nodes, allowreorder)
811
800 # In the first pass, collect info about the deltas we'll be
812 # In the first pass, collect info about the deltas we'll be
801 # generating.
813 # generating.
802 requests = []
814 requests = []
803
815
804 # Add the parent of the first rev.
816 # Add the parent of the first rev.
805 revs.insert(0, store.parentrevs(revs[0])[0])
817 revs.insert(0, store.parentrevs(revs[0])[0])
806
818
807 for i in pycompat.xrange(len(revs) - 1):
819 for i in pycompat.xrange(len(revs) - 1):
808 prev = revs[i]
820 prev = revs[i]
809 curr = revs[i + 1]
821 curr = revs[i + 1]
810
822
811 node = store.node(curr)
823 node = store.node(curr)
812 linknode = lookup(node)
824 linknode = lookup(node)
813 p1node, p2node = store.parents(node)
825 p1node, p2node = store.parents(node)
814
826
815 if ellipses:
827 if ellipses:
816 linkrev = cl.rev(linknode)
828 linkrev = cl.rev(linknode)
817 clrevtolocalrev[linkrev] = curr
829 clrevtolocalrev[linkrev] = curr
818
830
819 # This is a node to send in full, because the changeset it
831 # This is a node to send in full, because the changeset it
820 # corresponds to was a full changeset.
832 # corresponds to was a full changeset.
821 if linknode in fullclnodes:
833 if linknode in fullclnodes:
822 requests.append(revisiondeltarequest(
834 requests.append(revisiondeltarequest(
823 node=node,
835 node=node,
824 p1node=p1node,
836 p1node=p1node,
825 p2node=p2node,
837 p2node=p2node,
826 linknode=linknode,
838 linknode=linknode,
827 basenode=None,
839 basenode=None,
828 ))
840 ))
829
841
830 elif linkrev not in precomputedellipsis:
842 elif linkrev not in precomputedellipsis:
831 pass
843 pass
832 else:
844 else:
833 requests.append(_makenarrowdeltarequest(
845 requests.append(_makenarrowdeltarequest(
834 cl, store, ischangelog, curr, node, linkrev, linknode,
846 cl, store, ischangelog, curr, node, linkrev, linknode,
835 clrevtolocalrev, fullclnodes,
847 clrevtolocalrev, fullclnodes,
836 precomputedellipsis))
848 precomputedellipsis))
837 else:
849 else:
838 requests.append(revisiondeltarequest(
850 requests.append(revisiondeltarequest(
839 node=node,
851 node=node,
840 p1node=p1node,
852 p1node=p1node,
841 p2node=p2node,
853 p2node=p2node,
842 linknode=linknode,
854 linknode=linknode,
843 basenode=store.node(prev) if forcedeltaparentprev else None,
855 basenode=store.node(prev) if forcedeltaparentprev else None,
844 ))
856 ))
845
857
846 # We expect the first pass to be fast, so we only engage the progress
858 # We expect the first pass to be fast, so we only engage the progress
847 # meter for constructing the revision deltas.
859 # meter for constructing the revision deltas.
848 progress = None
860 progress = None
849 if units is not None:
861 if units is not None:
850 progress = repo.ui.makeprogress(_('bundling'), unit=units,
862 progress = repo.ui.makeprogress(_('bundling'), unit=units,
851 total=len(requests))
863 total=len(requests))
852
864
853 prevnode = store.node(revs[0])
865 prevnode = store.node(revs[0])
854 for i, request in enumerate(requests):
866 for i, request in enumerate(requests):
855 if progress:
867 if progress:
856 progress.update(i + 1)
868 progress.update(i + 1)
857
869
858 delta = _handlerevisiondeltarequest(store, request, prevnode)
870 delta = _handlerevisiondeltarequest(store, request, prevnode)
859
871
860 yield delta
872 yield delta
861
873
862 prevnode = request.node
874 prevnode = request.node
863
875
864 if progress:
876 if progress:
865 progress.complete()
877 progress.complete()
866
878
867 class cgpacker(object):
879 class cgpacker(object):
868 def __init__(self, repo, filematcher, version, allowreorder,
880 def __init__(self, repo, filematcher, version, allowreorder,
869 builddeltaheader, manifestsend,
881 builddeltaheader, manifestsend,
870 forcedeltaparentprev=False,
882 forcedeltaparentprev=False,
871 bundlecaps=None, ellipses=False,
883 bundlecaps=None, ellipses=False,
872 shallow=False, ellipsisroots=None, fullnodes=None):
884 shallow=False, ellipsisroots=None, fullnodes=None):
873 """Given a source repo, construct a bundler.
885 """Given a source repo, construct a bundler.
874
886
875 filematcher is a matcher that matches on files to include in the
887 filematcher is a matcher that matches on files to include in the
876 changegroup. Used to facilitate sparse changegroups.
888 changegroup. Used to facilitate sparse changegroups.
877
889
878 allowreorder controls whether reordering of revisions is allowed.
890 allowreorder controls whether reordering of revisions is allowed.
879 This value is used when ``bundle.reorder`` is ``auto`` or isn't
891 This value is used when ``bundle.reorder`` is ``auto`` or isn't
880 set.
892 set.
881
893
882 forcedeltaparentprev indicates whether delta parents must be against
894 forcedeltaparentprev indicates whether delta parents must be against
883 the previous revision in a delta group. This should only be used for
895 the previous revision in a delta group. This should only be used for
884 compatibility with changegroup version 1.
896 compatibility with changegroup version 1.
885
897
886 builddeltaheader is a callable that constructs the header for a group
898 builddeltaheader is a callable that constructs the header for a group
887 delta.
899 delta.
888
900
889 manifestsend is a chunk to send after manifests have been fully emitted.
901 manifestsend is a chunk to send after manifests have been fully emitted.
890
902
891 ellipses indicates whether ellipsis serving mode is enabled.
903 ellipses indicates whether ellipsis serving mode is enabled.
892
904
893 bundlecaps is optional and can be used to specify the set of
905 bundlecaps is optional and can be used to specify the set of
894 capabilities which can be used to build the bundle. While bundlecaps is
906 capabilities which can be used to build the bundle. While bundlecaps is
895 unused in core Mercurial, extensions rely on this feature to communicate
907 unused in core Mercurial, extensions rely on this feature to communicate
896 capabilities to customize the changegroup packer.
908 capabilities to customize the changegroup packer.
897
909
898 shallow indicates whether shallow data might be sent. The packer may
910 shallow indicates whether shallow data might be sent. The packer may
899 need to pack file contents not introduced by the changes being packed.
911 need to pack file contents not introduced by the changes being packed.
900
912
901 fullnodes is the set of changelog nodes which should not be ellipsis
913 fullnodes is the set of changelog nodes which should not be ellipsis
902 nodes. We store this rather than the set of nodes that should be
914 nodes. We store this rather than the set of nodes that should be
903 ellipsis because for very large histories we expect this to be
915 ellipsis because for very large histories we expect this to be
904 significantly smaller.
916 significantly smaller.
905 """
917 """
906 assert filematcher
918 assert filematcher
907 self._filematcher = filematcher
919 self._filematcher = filematcher
908
920
909 self.version = version
921 self.version = version
910 self._forcedeltaparentprev = forcedeltaparentprev
922 self._forcedeltaparentprev = forcedeltaparentprev
911 self._builddeltaheader = builddeltaheader
923 self._builddeltaheader = builddeltaheader
912 self._manifestsend = manifestsend
924 self._manifestsend = manifestsend
913 self._ellipses = ellipses
925 self._ellipses = ellipses
914
926
915 # Set of capabilities we can use to build the bundle.
927 # Set of capabilities we can use to build the bundle.
916 if bundlecaps is None:
928 if bundlecaps is None:
917 bundlecaps = set()
929 bundlecaps = set()
918 self._bundlecaps = bundlecaps
930 self._bundlecaps = bundlecaps
919 self._isshallow = shallow
931 self._isshallow = shallow
920 self._fullclnodes = fullnodes
932 self._fullclnodes = fullnodes
921
933
922 # Maps ellipsis revs to their roots at the changelog level.
934 # Maps ellipsis revs to their roots at the changelog level.
923 self._precomputedellipsis = ellipsisroots
935 self._precomputedellipsis = ellipsisroots
924
936
925 # experimental config: bundle.reorder
937 # experimental config: bundle.reorder
926 reorder = repo.ui.config('bundle', 'reorder')
938 reorder = repo.ui.config('bundle', 'reorder')
927 if reorder == 'auto':
939 if reorder == 'auto':
928 self._reorder = allowreorder
940 self._reorder = allowreorder
929 else:
941 else:
930 self._reorder = stringutil.parsebool(reorder)
942 self._reorder = stringutil.parsebool(reorder)
931
943
932 self._repo = repo
944 self._repo = repo
933
945
934 if self._repo.ui.verbose and not self._repo.ui.debugflag:
946 if self._repo.ui.verbose and not self._repo.ui.debugflag:
935 self._verbosenote = self._repo.ui.note
947 self._verbosenote = self._repo.ui.note
936 else:
948 else:
937 self._verbosenote = lambda s: None
949 self._verbosenote = lambda s: None
938
950
939 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
951 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
940 """Yield a sequence of changegroup byte chunks."""
952 """Yield a sequence of changegroup byte chunks."""
941
953
942 repo = self._repo
954 repo = self._repo
943 cl = repo.changelog
955 cl = repo.changelog
944
956
945 self._verbosenote(_('uncompressed size of bundle content:\n'))
957 self._verbosenote(_('uncompressed size of bundle content:\n'))
946 size = 0
958 size = 0
947
959
948 clstate, deltas = self._generatechangelog(cl, clnodes)
960 clstate, deltas = self._generatechangelog(cl, clnodes)
949 for delta in deltas:
961 for delta in deltas:
950 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
962 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
951 size += len(chunk)
963 size += len(chunk)
952 yield chunk
964 yield chunk
953
965
954 close = closechunk()
966 close = closechunk()
955 size += len(close)
967 size += len(close)
956 yield closechunk()
968 yield closechunk()
957
969
958 self._verbosenote(_('%8.i (changelog)\n') % size)
970 self._verbosenote(_('%8.i (changelog)\n') % size)
959
971
960 clrevorder = clstate['clrevorder']
972 clrevorder = clstate['clrevorder']
961 mfs = clstate['mfs']
973 mfs = clstate['mfs']
962 changedfiles = clstate['changedfiles']
974 changedfiles = clstate['changedfiles']
963
975
964 # We need to make sure that the linkrev in the changegroup refers to
976 # We need to make sure that the linkrev in the changegroup refers to
965 # the first changeset that introduced the manifest or file revision.
977 # the first changeset that introduced the manifest or file revision.
966 # The fastpath is usually safer than the slowpath, because the filelogs
978 # The fastpath is usually safer than the slowpath, because the filelogs
967 # are walked in revlog order.
979 # are walked in revlog order.
968 #
980 #
969 # When taking the slowpath with reorder=None and the manifest revlog
981 # When taking the slowpath with reorder=None and the manifest revlog
970 # uses generaldelta, the manifest may be walked in the "wrong" order.
982 # uses generaldelta, the manifest may be walked in the "wrong" order.
971 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
983 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
972 # cc0ff93d0c0c).
984 # cc0ff93d0c0c).
973 #
985 #
974 # When taking the fastpath, we are only vulnerable to reordering
986 # When taking the fastpath, we are only vulnerable to reordering
975 # of the changelog itself. The changelog never uses generaldelta, so
987 # of the changelog itself. The changelog never uses generaldelta, so
976 # it is only reordered when reorder=True. To handle this case, we
988 # it is only reordered when reorder=True. To handle this case, we
977 # simply take the slowpath, which already has the 'clrevorder' logic.
989 # simply take the slowpath, which already has the 'clrevorder' logic.
978 # This was also fixed in cc0ff93d0c0c.
990 # This was also fixed in cc0ff93d0c0c.
979 fastpathlinkrev = fastpathlinkrev and not self._reorder
991 fastpathlinkrev = fastpathlinkrev and not self._reorder
980 # Treemanifests don't work correctly with fastpathlinkrev
992 # Treemanifests don't work correctly with fastpathlinkrev
981 # either, because we don't discover which directory nodes to
993 # either, because we don't discover which directory nodes to
982 # send along with files. This could probably be fixed.
994 # send along with files. This could probably be fixed.
983 fastpathlinkrev = fastpathlinkrev and (
995 fastpathlinkrev = fastpathlinkrev and (
984 'treemanifest' not in repo.requirements)
996 'treemanifest' not in repo.requirements)
985
997
986 fnodes = {} # needed file nodes
998 fnodes = {} # needed file nodes
987
999
988 size = 0
1000 size = 0
989 it = self.generatemanifests(
1001 it = self.generatemanifests(
990 commonrevs, clrevorder, fastpathlinkrev, mfs, fnodes, source,
1002 commonrevs, clrevorder, fastpathlinkrev, mfs, fnodes, source,
991 clstate['clrevtomanifestrev'])
1003 clstate['clrevtomanifestrev'])
992
1004
993 for dir, deltas in it:
1005 for dir, deltas in it:
994 if dir:
1006 if dir:
995 assert self.version == b'03'
1007 assert self.version == b'03'
996 chunk = _fileheader(dir)
1008 chunk = _fileheader(dir)
997 size += len(chunk)
1009 size += len(chunk)
998 yield chunk
1010 yield chunk
999
1011
1000 for delta in deltas:
1012 for delta in deltas:
1001 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1013 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1002 for chunk in chunks:
1014 for chunk in chunks:
1003 size += len(chunk)
1015 size += len(chunk)
1004 yield chunk
1016 yield chunk
1005
1017
1006 close = closechunk()
1018 close = closechunk()
1007 size += len(close)
1019 size += len(close)
1008 yield close
1020 yield close
1009
1021
1010 self._verbosenote(_('%8.i (manifests)\n') % size)
1022 self._verbosenote(_('%8.i (manifests)\n') % size)
1011 yield self._manifestsend
1023 yield self._manifestsend
1012
1024
1013 mfdicts = None
1025 mfdicts = None
1014 if self._ellipses and self._isshallow:
1026 if self._ellipses and self._isshallow:
1015 mfdicts = [(self._repo.manifestlog[n].read(), lr)
1027 mfdicts = [(self._repo.manifestlog[n].read(), lr)
1016 for (n, lr) in mfs.iteritems()]
1028 for (n, lr) in mfs.iteritems()]
1017
1029
1018 mfs.clear()
1030 mfs.clear()
1019 clrevs = set(cl.rev(x) for x in clnodes)
1031 clrevs = set(cl.rev(x) for x in clnodes)
1020
1032
1021 it = self.generatefiles(changedfiles, commonrevs,
1033 it = self.generatefiles(changedfiles, commonrevs,
1022 source, mfdicts, fastpathlinkrev,
1034 source, mfdicts, fastpathlinkrev,
1023 fnodes, clrevs)
1035 fnodes, clrevs)
1024
1036
1025 for path, deltas in it:
1037 for path, deltas in it:
1026 h = _fileheader(path)
1038 h = _fileheader(path)
1027 size = len(h)
1039 size = len(h)
1028 yield h
1040 yield h
1029
1041
1030 for delta in deltas:
1042 for delta in deltas:
1031 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1043 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1032 for chunk in chunks:
1044 for chunk in chunks:
1033 size += len(chunk)
1045 size += len(chunk)
1034 yield chunk
1046 yield chunk
1035
1047
1036 close = closechunk()
1048 close = closechunk()
1037 size += len(close)
1049 size += len(close)
1038 yield close
1050 yield close
1039
1051
1040 self._verbosenote(_('%8.i %s\n') % (size, path))
1052 self._verbosenote(_('%8.i %s\n') % (size, path))
1041
1053
1042 yield closechunk()
1054 yield closechunk()
1043
1055
1044 if clnodes:
1056 if clnodes:
1045 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
1057 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
1046
1058
1047 def _generatechangelog(self, cl, nodes):
1059 def _generatechangelog(self, cl, nodes):
1048 """Generate data for changelog chunks.
1060 """Generate data for changelog chunks.
1049
1061
1050 Returns a 2-tuple of a dict containing state and an iterable of
1062 Returns a 2-tuple of a dict containing state and an iterable of
1051 byte chunks. The state will not be fully populated until the
1063 byte chunks. The state will not be fully populated until the
1052 chunk stream has been fully consumed.
1064 chunk stream has been fully consumed.
1053 """
1065 """
1054 clrevorder = {}
1066 clrevorder = {}
1055 mfs = {} # needed manifests
1067 mfs = {} # needed manifests
1056 mfl = self._repo.manifestlog
1068 mfl = self._repo.manifestlog
1057 # TODO violates storage abstraction.
1069 # TODO violates storage abstraction.
1058 mfrevlog = mfl._revlog
1070 mfrevlog = mfl._revlog
1059 changedfiles = set()
1071 changedfiles = set()
1060 clrevtomanifestrev = {}
1072 clrevtomanifestrev = {}
1061
1073
1062 # Callback for the changelog, used to collect changed files and
1074 # Callback for the changelog, used to collect changed files and
1063 # manifest nodes.
1075 # manifest nodes.
1064 # Returns the linkrev node (identity in the changelog case).
1076 # Returns the linkrev node (identity in the changelog case).
1065 def lookupcl(x):
1077 def lookupcl(x):
1066 c = cl.read(x)
1078 c = cl.read(x)
1067 clrevorder[x] = len(clrevorder)
1079 clrevorder[x] = len(clrevorder)
1068
1080
1069 if self._ellipses:
1081 if self._ellipses:
1070 # Only update mfs if x is going to be sent. Otherwise we
1082 # Only update mfs if x is going to be sent. Otherwise we
1071 # end up with bogus linkrevs specified for manifests and
1083 # end up with bogus linkrevs specified for manifests and
1072 # we skip some manifest nodes that we should otherwise
1084 # we skip some manifest nodes that we should otherwise
1073 # have sent.
1085 # have sent.
1074 if (x in self._fullclnodes
1086 if (x in self._fullclnodes
1075 or cl.rev(x) in self._precomputedellipsis):
1087 or cl.rev(x) in self._precomputedellipsis):
1076 n = c[0]
1088 n = c[0]
1077 # Record the first changeset introducing this manifest
1089 # Record the first changeset introducing this manifest
1078 # version.
1090 # version.
1079 mfs.setdefault(n, x)
1091 mfs.setdefault(n, x)
1080 # Set this narrow-specific dict so we have the lowest
1092 # Set this narrow-specific dict so we have the lowest
1081 # manifest revnum to look up for this cl revnum. (Part of
1093 # manifest revnum to look up for this cl revnum. (Part of
1082 # mapping changelog ellipsis parents to manifest ellipsis
1094 # mapping changelog ellipsis parents to manifest ellipsis
1083 # parents)
1095 # parents)
1084 clrevtomanifestrev.setdefault(cl.rev(x), mfrevlog.rev(n))
1096 clrevtomanifestrev.setdefault(cl.rev(x), mfrevlog.rev(n))
1085 # We can't trust the changed files list in the changeset if the
1097 # We can't trust the changed files list in the changeset if the
1086 # client requested a shallow clone.
1098 # client requested a shallow clone.
1087 if self._isshallow:
1099 if self._isshallow:
1088 changedfiles.update(mfl[c[0]].read().keys())
1100 changedfiles.update(mfl[c[0]].read().keys())
1089 else:
1101 else:
1090 changedfiles.update(c[3])
1102 changedfiles.update(c[3])
1091 else:
1103 else:
1092
1104
1093 n = c[0]
1105 n = c[0]
1094 # record the first changeset introducing this manifest version
1106 # record the first changeset introducing this manifest version
1095 mfs.setdefault(n, x)
1107 mfs.setdefault(n, x)
1096 # Record a complete list of potentially-changed files in
1108 # Record a complete list of potentially-changed files in
1097 # this manifest.
1109 # this manifest.
1098 changedfiles.update(c[3])
1110 changedfiles.update(c[3])
1099
1111
1100 return x
1112 return x
1101
1113
1102 # Changelog doesn't benefit from reordering revisions. So send out
1103 # revisions in store order.
1104 revs = sorted(cl.rev(n) for n in nodes)
1105
1106 state = {
1114 state = {
1107 'clrevorder': clrevorder,
1115 'clrevorder': clrevorder,
1108 'mfs': mfs,
1116 'mfs': mfs,
1109 'changedfiles': changedfiles,
1117 'changedfiles': changedfiles,
1110 'clrevtomanifestrev': clrevtomanifestrev,
1118 'clrevtomanifestrev': clrevtomanifestrev,
1111 }
1119 }
1112
1120
1113 gen = deltagroup(
1121 gen = deltagroup(
1114 self._repo, revs, cl, True, lookupcl,
1122 self._repo, cl, nodes, True, lookupcl,
1115 self._forcedeltaparentprev,
1123 self._forcedeltaparentprev,
1124 # Reorder settings are currently ignored for changelog.
1125 True,
1116 ellipses=self._ellipses,
1126 ellipses=self._ellipses,
1117 units=_('changesets'),
1127 units=_('changesets'),
1118 clrevtolocalrev={},
1128 clrevtolocalrev={},
1119 fullclnodes=self._fullclnodes,
1129 fullclnodes=self._fullclnodes,
1120 precomputedellipsis=self._precomputedellipsis)
1130 precomputedellipsis=self._precomputedellipsis)
1121
1131
1122 return state, gen
1132 return state, gen
1123
1133
1124 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
1134 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
1125 fnodes, source, clrevtolocalrev):
1135 fnodes, source, clrevtolocalrev):
1126 """Returns an iterator of changegroup chunks containing manifests.
1136 """Returns an iterator of changegroup chunks containing manifests.
1127
1137
1128 `source` is unused here, but is used by extensions like remotefilelog to
1138 `source` is unused here, but is used by extensions like remotefilelog to
1129 change what is sent based in pulls vs pushes, etc.
1139 change what is sent based in pulls vs pushes, etc.
1130 """
1140 """
1131 repo = self._repo
1141 repo = self._repo
1132 cl = repo.changelog
1133 mfl = repo.manifestlog
1142 mfl = repo.manifestlog
1134 dirlog = mfl._revlog.dirlog
1143 dirlog = mfl._revlog.dirlog
1135 tmfnodes = {'': mfs}
1144 tmfnodes = {'': mfs}
1136
1145
1137 # Callback for the manifest, used to collect linkrevs for filelog
1146 # Callback for the manifest, used to collect linkrevs for filelog
1138 # revisions.
1147 # revisions.
1139 # Returns the linkrev node (collected in lookupcl).
1148 # Returns the linkrev node (collected in lookupcl).
1140 def makelookupmflinknode(dir, nodes):
1149 def makelookupmflinknode(dir, nodes):
1141 if fastpathlinkrev:
1150 if fastpathlinkrev:
1142 assert not dir
1151 assert not dir
1143 return mfs.__getitem__
1152 return mfs.__getitem__
1144
1153
1145 def lookupmflinknode(x):
1154 def lookupmflinknode(x):
1146 """Callback for looking up the linknode for manifests.
1155 """Callback for looking up the linknode for manifests.
1147
1156
1148 Returns the linkrev node for the specified manifest.
1157 Returns the linkrev node for the specified manifest.
1149
1158
1150 SIDE EFFECT:
1159 SIDE EFFECT:
1151
1160
1152 1) fclnodes gets populated with the list of relevant
1161 1) fclnodes gets populated with the list of relevant
1153 file nodes if we're not using fastpathlinkrev
1162 file nodes if we're not using fastpathlinkrev
1154 2) When treemanifests are in use, collects treemanifest nodes
1163 2) When treemanifests are in use, collects treemanifest nodes
1155 to send
1164 to send
1156
1165
1157 Note that this means manifests must be completely sent to
1166 Note that this means manifests must be completely sent to
1158 the client before you can trust the list of files and
1167 the client before you can trust the list of files and
1159 treemanifests to send.
1168 treemanifests to send.
1160 """
1169 """
1161 clnode = nodes[x]
1170 clnode = nodes[x]
1162 mdata = mfl.get(dir, x).readfast(shallow=True)
1171 mdata = mfl.get(dir, x).readfast(shallow=True)
1163 for p, n, fl in mdata.iterentries():
1172 for p, n, fl in mdata.iterentries():
1164 if fl == 't': # subdirectory manifest
1173 if fl == 't': # subdirectory manifest
1165 subdir = dir + p + '/'
1174 subdir = dir + p + '/'
1166 tmfclnodes = tmfnodes.setdefault(subdir, {})
1175 tmfclnodes = tmfnodes.setdefault(subdir, {})
1167 tmfclnode = tmfclnodes.setdefault(n, clnode)
1176 tmfclnode = tmfclnodes.setdefault(n, clnode)
1168 if clrevorder[clnode] < clrevorder[tmfclnode]:
1177 if clrevorder[clnode] < clrevorder[tmfclnode]:
1169 tmfclnodes[n] = clnode
1178 tmfclnodes[n] = clnode
1170 else:
1179 else:
1171 f = dir + p
1180 f = dir + p
1172 fclnodes = fnodes.setdefault(f, {})
1181 fclnodes = fnodes.setdefault(f, {})
1173 fclnode = fclnodes.setdefault(n, clnode)
1182 fclnode = fclnodes.setdefault(n, clnode)
1174 if clrevorder[clnode] < clrevorder[fclnode]:
1183 if clrevorder[clnode] < clrevorder[fclnode]:
1175 fclnodes[n] = clnode
1184 fclnodes[n] = clnode
1176 return clnode
1185 return clnode
1177 return lookupmflinknode
1186 return lookupmflinknode
1178
1187
1179 while tmfnodes:
1188 while tmfnodes:
1180 dir, nodes = tmfnodes.popitem()
1189 dir, nodes = tmfnodes.popitem()
1181 store = dirlog(dir)
1190 store = dirlog(dir)
1182
1191
1183 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
1192 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
1184 prunednodes = []
1193 prunednodes = []
1185 else:
1194 else:
1186 frev, flr = store.rev, store.linkrev
1195 frev, flr = store.rev, store.linkrev
1187 prunednodes = [n for n in nodes
1196 prunednodes = [n for n in nodes
1188 if flr(frev(n)) not in commonrevs]
1197 if flr(frev(n)) not in commonrevs]
1189
1198
1190 if dir and not prunednodes:
1199 if dir and not prunednodes:
1191 continue
1200 continue
1192
1201
1193 lookupfn = makelookupmflinknode(dir, nodes)
1202 lookupfn = makelookupmflinknode(dir, nodes)
1194
1203
1195 if self._ellipses:
1196 revs = _sortnodesellipsis(store, prunednodes, cl,
1197 lookupfn)
1198 else:
1199 revs = _sortnodesnormal(store, prunednodes,
1200 self._reorder)
1201
1202 deltas = deltagroup(
1204 deltas = deltagroup(
1203 self._repo, revs, store, False, lookupfn,
1205 self._repo, store, prunednodes, False, lookupfn,
1204 self._forcedeltaparentprev,
1206 self._forcedeltaparentprev, self._reorder,
1205 ellipses=self._ellipses,
1207 ellipses=self._ellipses,
1206 units=_('manifests'),
1208 units=_('manifests'),
1207 clrevtolocalrev=clrevtolocalrev,
1209 clrevtolocalrev=clrevtolocalrev,
1208 fullclnodes=self._fullclnodes,
1210 fullclnodes=self._fullclnodes,
1209 precomputedellipsis=self._precomputedellipsis)
1211 precomputedellipsis=self._precomputedellipsis)
1210
1212
1211 yield dir, deltas
1213 yield dir, deltas
1212
1214
1213 # The 'source' parameter is useful for extensions
1215 # The 'source' parameter is useful for extensions
1214 def generatefiles(self, changedfiles, commonrevs, source,
1216 def generatefiles(self, changedfiles, commonrevs, source,
1215 mfdicts, fastpathlinkrev, fnodes, clrevs):
1217 mfdicts, fastpathlinkrev, fnodes, clrevs):
1216 changedfiles = list(filter(self._filematcher, changedfiles))
1218 changedfiles = list(filter(self._filematcher, changedfiles))
1217
1219
1218 if not fastpathlinkrev:
1220 if not fastpathlinkrev:
1219 def normallinknodes(unused, fname):
1221 def normallinknodes(unused, fname):
1220 return fnodes.get(fname, {})
1222 return fnodes.get(fname, {})
1221 else:
1223 else:
1222 cln = self._repo.changelog.node
1224 cln = self._repo.changelog.node
1223
1225
1224 def normallinknodes(store, fname):
1226 def normallinknodes(store, fname):
1225 flinkrev = store.linkrev
1227 flinkrev = store.linkrev
1226 fnode = store.node
1228 fnode = store.node
1227 revs = ((r, flinkrev(r)) for r in store)
1229 revs = ((r, flinkrev(r)) for r in store)
1228 return dict((fnode(r), cln(lr))
1230 return dict((fnode(r), cln(lr))
1229 for r, lr in revs if lr in clrevs)
1231 for r, lr in revs if lr in clrevs)
1230
1232
1231 clrevtolocalrev = {}
1233 clrevtolocalrev = {}
1232
1234
1233 if self._isshallow:
1235 if self._isshallow:
1234 # In a shallow clone, the linknodes callback needs to also include
1236 # In a shallow clone, the linknodes callback needs to also include
1235 # those file nodes that are in the manifests we sent but weren't
1237 # those file nodes that are in the manifests we sent but weren't
1236 # introduced by those manifests.
1238 # introduced by those manifests.
1237 commonctxs = [self._repo[c] for c in commonrevs]
1239 commonctxs = [self._repo[c] for c in commonrevs]
1238 clrev = self._repo.changelog.rev
1240 clrev = self._repo.changelog.rev
1239
1241
1240 # Defining this function has a side-effect of overriding the
1242 # Defining this function has a side-effect of overriding the
1241 # function of the same name that was passed in as an argument.
1243 # function of the same name that was passed in as an argument.
1242 # TODO have caller pass in appropriate function.
1244 # TODO have caller pass in appropriate function.
1243 def linknodes(flog, fname):
1245 def linknodes(flog, fname):
1244 for c in commonctxs:
1246 for c in commonctxs:
1245 try:
1247 try:
1246 fnode = c.filenode(fname)
1248 fnode = c.filenode(fname)
1247 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1249 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1248 except error.ManifestLookupError:
1250 except error.ManifestLookupError:
1249 pass
1251 pass
1250 links = normallinknodes(flog, fname)
1252 links = normallinknodes(flog, fname)
1251 if len(links) != len(mfdicts):
1253 if len(links) != len(mfdicts):
1252 for mf, lr in mfdicts:
1254 for mf, lr in mfdicts:
1253 fnode = mf.get(fname, None)
1255 fnode = mf.get(fname, None)
1254 if fnode in links:
1256 if fnode in links:
1255 links[fnode] = min(links[fnode], lr, key=clrev)
1257 links[fnode] = min(links[fnode], lr, key=clrev)
1256 elif fnode:
1258 elif fnode:
1257 links[fnode] = lr
1259 links[fnode] = lr
1258 return links
1260 return links
1259 else:
1261 else:
1260 linknodes = normallinknodes
1262 linknodes = normallinknodes
1261
1263
1262 repo = self._repo
1264 repo = self._repo
1263 cl = repo.changelog
1264 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
1265 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
1265 total=len(changedfiles))
1266 total=len(changedfiles))
1266 for i, fname in enumerate(sorted(changedfiles)):
1267 for i, fname in enumerate(sorted(changedfiles)):
1267 filerevlog = repo.file(fname)
1268 filerevlog = repo.file(fname)
1268 if not filerevlog:
1269 if not filerevlog:
1269 raise error.Abort(_("empty or missing file data for %s") %
1270 raise error.Abort(_("empty or missing file data for %s") %
1270 fname)
1271 fname)
1271
1272
1272 clrevtolocalrev.clear()
1273 clrevtolocalrev.clear()
1273
1274
1274 linkrevnodes = linknodes(filerevlog, fname)
1275 linkrevnodes = linknodes(filerevlog, fname)
1275 # Lookup for filenodes, we collected the linkrev nodes above in the
1276 # Lookup for filenodes, we collected the linkrev nodes above in the
1276 # fastpath case and with lookupmf in the slowpath case.
1277 # fastpath case and with lookupmf in the slowpath case.
1277 def lookupfilelog(x):
1278 def lookupfilelog(x):
1278 return linkrevnodes[x]
1279 return linkrevnodes[x]
1279
1280
1280 frev, flr = filerevlog.rev, filerevlog.linkrev
1281 frev, flr = filerevlog.rev, filerevlog.linkrev
1281 filenodes = [n for n in linkrevnodes
1282 filenodes = [n for n in linkrevnodes
1282 if flr(frev(n)) not in commonrevs]
1283 if flr(frev(n)) not in commonrevs]
1283
1284
1284 if not filenodes:
1285 if not filenodes:
1285 continue
1286 continue
1286
1287
1287 if self._ellipses:
1288 revs = _sortnodesellipsis(filerevlog, filenodes,
1289 cl, lookupfilelog)
1290 else:
1291 revs = _sortnodesnormal(filerevlog, filenodes,
1292 self._reorder)
1293
1294 progress.update(i + 1, item=fname)
1288 progress.update(i + 1, item=fname)
1295
1289
1296 deltas = deltagroup(
1290 deltas = deltagroup(
1297 self._repo, revs, filerevlog, False, lookupfilelog,
1291 self._repo, filerevlog, filenodes, False, lookupfilelog,
1298 self._forcedeltaparentprev,
1292 self._forcedeltaparentprev, self._reorder,
1299 ellipses=self._ellipses,
1293 ellipses=self._ellipses,
1300 clrevtolocalrev=clrevtolocalrev,
1294 clrevtolocalrev=clrevtolocalrev,
1301 fullclnodes=self._fullclnodes,
1295 fullclnodes=self._fullclnodes,
1302 precomputedellipsis=self._precomputedellipsis)
1296 precomputedellipsis=self._precomputedellipsis)
1303
1297
1304 yield fname, deltas
1298 yield fname, deltas
1305
1299
1306 progress.complete()
1300 progress.complete()
1307
1301
1308 def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
1302 def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
1309 shallow=False, ellipsisroots=None, fullnodes=None):
1303 shallow=False, ellipsisroots=None, fullnodes=None):
1310 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1304 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1311 d.node, d.p1node, d.p2node, d.linknode)
1305 d.node, d.p1node, d.p2node, d.linknode)
1312
1306
1313 return cgpacker(repo, filematcher, b'01',
1307 return cgpacker(repo, filematcher, b'01',
1314 allowreorder=None,
1308 allowreorder=None,
1315 builddeltaheader=builddeltaheader,
1309 builddeltaheader=builddeltaheader,
1316 manifestsend=b'',
1310 manifestsend=b'',
1317 forcedeltaparentprev=True,
1311 forcedeltaparentprev=True,
1318 bundlecaps=bundlecaps,
1312 bundlecaps=bundlecaps,
1319 ellipses=ellipses,
1313 ellipses=ellipses,
1320 shallow=shallow,
1314 shallow=shallow,
1321 ellipsisroots=ellipsisroots,
1315 ellipsisroots=ellipsisroots,
1322 fullnodes=fullnodes)
1316 fullnodes=fullnodes)
1323
1317
1324 def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
1318 def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
1325 shallow=False, ellipsisroots=None, fullnodes=None):
1319 shallow=False, ellipsisroots=None, fullnodes=None):
1326 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1320 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1327 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1321 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1328
1322
1329 # Since generaldelta is directly supported by cg2, reordering
1323 # Since generaldelta is directly supported by cg2, reordering
1330 # generally doesn't help, so we disable it by default (treating
1324 # generally doesn't help, so we disable it by default (treating
1331 # bundle.reorder=auto just like bundle.reorder=False).
1325 # bundle.reorder=auto just like bundle.reorder=False).
1332 return cgpacker(repo, filematcher, b'02',
1326 return cgpacker(repo, filematcher, b'02',
1333 allowreorder=False,
1327 allowreorder=False,
1334 builddeltaheader=builddeltaheader,
1328 builddeltaheader=builddeltaheader,
1335 manifestsend=b'',
1329 manifestsend=b'',
1336 bundlecaps=bundlecaps,
1330 bundlecaps=bundlecaps,
1337 ellipses=ellipses,
1331 ellipses=ellipses,
1338 shallow=shallow,
1332 shallow=shallow,
1339 ellipsisroots=ellipsisroots,
1333 ellipsisroots=ellipsisroots,
1340 fullnodes=fullnodes)
1334 fullnodes=fullnodes)
1341
1335
1342 def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
1336 def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
1343 shallow=False, ellipsisroots=None, fullnodes=None):
1337 shallow=False, ellipsisroots=None, fullnodes=None):
1344 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1338 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1345 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1339 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1346
1340
1347 return cgpacker(repo, filematcher, b'03',
1341 return cgpacker(repo, filematcher, b'03',
1348 allowreorder=False,
1342 allowreorder=False,
1349 builddeltaheader=builddeltaheader,
1343 builddeltaheader=builddeltaheader,
1350 manifestsend=closechunk(),
1344 manifestsend=closechunk(),
1351 bundlecaps=bundlecaps,
1345 bundlecaps=bundlecaps,
1352 ellipses=ellipses,
1346 ellipses=ellipses,
1353 shallow=shallow,
1347 shallow=shallow,
1354 ellipsisroots=ellipsisroots,
1348 ellipsisroots=ellipsisroots,
1355 fullnodes=fullnodes)
1349 fullnodes=fullnodes)
1356
1350
1357 _packermap = {'01': (_makecg1packer, cg1unpacker),
1351 _packermap = {'01': (_makecg1packer, cg1unpacker),
1358 # cg2 adds support for exchanging generaldelta
1352 # cg2 adds support for exchanging generaldelta
1359 '02': (_makecg2packer, cg2unpacker),
1353 '02': (_makecg2packer, cg2unpacker),
1360 # cg3 adds support for exchanging revlog flags and treemanifests
1354 # cg3 adds support for exchanging revlog flags and treemanifests
1361 '03': (_makecg3packer, cg3unpacker),
1355 '03': (_makecg3packer, cg3unpacker),
1362 }
1356 }
1363
1357
1364 def allsupportedversions(repo):
1358 def allsupportedversions(repo):
1365 versions = set(_packermap.keys())
1359 versions = set(_packermap.keys())
1366 if not (repo.ui.configbool('experimental', 'changegroup3') or
1360 if not (repo.ui.configbool('experimental', 'changegroup3') or
1367 repo.ui.configbool('experimental', 'treemanifest') or
1361 repo.ui.configbool('experimental', 'treemanifest') or
1368 'treemanifest' in repo.requirements):
1362 'treemanifest' in repo.requirements):
1369 versions.discard('03')
1363 versions.discard('03')
1370 return versions
1364 return versions
1371
1365
1372 # Changegroup versions that can be applied to the repo
1366 # Changegroup versions that can be applied to the repo
1373 def supportedincomingversions(repo):
1367 def supportedincomingversions(repo):
1374 return allsupportedversions(repo)
1368 return allsupportedversions(repo)
1375
1369
1376 # Changegroup versions that can be created from the repo
1370 # Changegroup versions that can be created from the repo
1377 def supportedoutgoingversions(repo):
1371 def supportedoutgoingversions(repo):
1378 versions = allsupportedversions(repo)
1372 versions = allsupportedversions(repo)
1379 if 'treemanifest' in repo.requirements:
1373 if 'treemanifest' in repo.requirements:
1380 # Versions 01 and 02 support only flat manifests and it's just too
1374 # Versions 01 and 02 support only flat manifests and it's just too
1381 # expensive to convert between the flat manifest and tree manifest on
1375 # expensive to convert between the flat manifest and tree manifest on
1382 # the fly. Since tree manifests are hashed differently, all of history
1376 # the fly. Since tree manifests are hashed differently, all of history
1383 # would have to be converted. Instead, we simply don't even pretend to
1377 # would have to be converted. Instead, we simply don't even pretend to
1384 # support versions 01 and 02.
1378 # support versions 01 and 02.
1385 versions.discard('01')
1379 versions.discard('01')
1386 versions.discard('02')
1380 versions.discard('02')
1387 if repository.NARROW_REQUIREMENT in repo.requirements:
1381 if repository.NARROW_REQUIREMENT in repo.requirements:
1388 # Versions 01 and 02 don't support revlog flags, and we need to
1382 # Versions 01 and 02 don't support revlog flags, and we need to
1389 # support that for stripping and unbundling to work.
1383 # support that for stripping and unbundling to work.
1390 versions.discard('01')
1384 versions.discard('01')
1391 versions.discard('02')
1385 versions.discard('02')
1392 if LFS_REQUIREMENT in repo.requirements:
1386 if LFS_REQUIREMENT in repo.requirements:
1393 # Versions 01 and 02 don't support revlog flags, and we need to
1387 # Versions 01 and 02 don't support revlog flags, and we need to
1394 # mark LFS entries with REVIDX_EXTSTORED.
1388 # mark LFS entries with REVIDX_EXTSTORED.
1395 versions.discard('01')
1389 versions.discard('01')
1396 versions.discard('02')
1390 versions.discard('02')
1397
1391
1398 return versions
1392 return versions
1399
1393
1400 def localversion(repo):
1394 def localversion(repo):
1401 # Finds the best version to use for bundles that are meant to be used
1395 # Finds the best version to use for bundles that are meant to be used
1402 # locally, such as those from strip and shelve, and temporary bundles.
1396 # locally, such as those from strip and shelve, and temporary bundles.
1403 return max(supportedoutgoingversions(repo))
1397 return max(supportedoutgoingversions(repo))
1404
1398
1405 def safeversion(repo):
1399 def safeversion(repo):
1406 # Finds the smallest version that it's safe to assume clients of the repo
1400 # Finds the smallest version that it's safe to assume clients of the repo
1407 # will support. For example, all hg versions that support generaldelta also
1401 # will support. For example, all hg versions that support generaldelta also
1408 # support changegroup 02.
1402 # support changegroup 02.
1409 versions = supportedoutgoingversions(repo)
1403 versions = supportedoutgoingversions(repo)
1410 if 'generaldelta' in repo.requirements:
1404 if 'generaldelta' in repo.requirements:
1411 versions.discard('01')
1405 versions.discard('01')
1412 assert versions
1406 assert versions
1413 return min(versions)
1407 return min(versions)
1414
1408
1415 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1409 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1416 ellipses=False, shallow=False, ellipsisroots=None,
1410 ellipses=False, shallow=False, ellipsisroots=None,
1417 fullnodes=None):
1411 fullnodes=None):
1418 assert version in supportedoutgoingversions(repo)
1412 assert version in supportedoutgoingversions(repo)
1419
1413
1420 if filematcher is None:
1414 if filematcher is None:
1421 filematcher = matchmod.alwaysmatcher(repo.root, '')
1415 filematcher = matchmod.alwaysmatcher(repo.root, '')
1422
1416
1423 if version == '01' and not filematcher.always():
1417 if version == '01' and not filematcher.always():
1424 raise error.ProgrammingError('version 01 changegroups do not support '
1418 raise error.ProgrammingError('version 01 changegroups do not support '
1425 'sparse file matchers')
1419 'sparse file matchers')
1426
1420
1427 if ellipses and version in (b'01', b'02'):
1421 if ellipses and version in (b'01', b'02'):
1428 raise error.Abort(
1422 raise error.Abort(
1429 _('ellipsis nodes require at least cg3 on client and server, '
1423 _('ellipsis nodes require at least cg3 on client and server, '
1430 'but negotiated version %s') % version)
1424 'but negotiated version %s') % version)
1431
1425
1432 # Requested files could include files not in the local store. So
1426 # Requested files could include files not in the local store. So
1433 # filter those out.
1427 # filter those out.
1434 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1428 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1435 filematcher)
1429 filematcher)
1436
1430
1437 fn = _packermap[version][0]
1431 fn = _packermap[version][0]
1438 return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
1432 return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
1439 shallow=shallow, ellipsisroots=ellipsisroots,
1433 shallow=shallow, ellipsisroots=ellipsisroots,
1440 fullnodes=fullnodes)
1434 fullnodes=fullnodes)
1441
1435
1442 def getunbundler(version, fh, alg, extras=None):
1436 def getunbundler(version, fh, alg, extras=None):
1443 return _packermap[version][1](fh, alg, extras=extras)
1437 return _packermap[version][1](fh, alg, extras=extras)
1444
1438
1445 def _changegroupinfo(repo, nodes, source):
1439 def _changegroupinfo(repo, nodes, source):
1446 if repo.ui.verbose or source == 'bundle':
1440 if repo.ui.verbose or source == 'bundle':
1447 repo.ui.status(_("%d changesets found\n") % len(nodes))
1441 repo.ui.status(_("%d changesets found\n") % len(nodes))
1448 if repo.ui.debugflag:
1442 if repo.ui.debugflag:
1449 repo.ui.debug("list of changesets:\n")
1443 repo.ui.debug("list of changesets:\n")
1450 for node in nodes:
1444 for node in nodes:
1451 repo.ui.debug("%s\n" % hex(node))
1445 repo.ui.debug("%s\n" % hex(node))
1452
1446
1453 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1447 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1454 bundlecaps=None):
1448 bundlecaps=None):
1455 cgstream = makestream(repo, outgoing, version, source,
1449 cgstream = makestream(repo, outgoing, version, source,
1456 fastpath=fastpath, bundlecaps=bundlecaps)
1450 fastpath=fastpath, bundlecaps=bundlecaps)
1457 return getunbundler(version, util.chunkbuffer(cgstream), None,
1451 return getunbundler(version, util.chunkbuffer(cgstream), None,
1458 {'clcount': len(outgoing.missing) })
1452 {'clcount': len(outgoing.missing) })
1459
1453
1460 def makestream(repo, outgoing, version, source, fastpath=False,
1454 def makestream(repo, outgoing, version, source, fastpath=False,
1461 bundlecaps=None, filematcher=None):
1455 bundlecaps=None, filematcher=None):
1462 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1456 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1463 filematcher=filematcher)
1457 filematcher=filematcher)
1464
1458
1465 repo = repo.unfiltered()
1459 repo = repo.unfiltered()
1466 commonrevs = outgoing.common
1460 commonrevs = outgoing.common
1467 csets = outgoing.missing
1461 csets = outgoing.missing
1468 heads = outgoing.missingheads
1462 heads = outgoing.missingheads
1469 # We go through the fast path if we get told to, or if all (unfiltered
1463 # We go through the fast path if we get told to, or if all (unfiltered
1470 # heads have been requested (since we then know there all linkrevs will
1464 # heads have been requested (since we then know there all linkrevs will
1471 # be pulled by the client).
1465 # be pulled by the client).
1472 heads.sort()
1466 heads.sort()
1473 fastpathlinkrev = fastpath or (
1467 fastpathlinkrev = fastpath or (
1474 repo.filtername is None and heads == sorted(repo.heads()))
1468 repo.filtername is None and heads == sorted(repo.heads()))
1475
1469
1476 repo.hook('preoutgoing', throw=True, source=source)
1470 repo.hook('preoutgoing', throw=True, source=source)
1477 _changegroupinfo(repo, csets, source)
1471 _changegroupinfo(repo, csets, source)
1478 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1472 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1479
1473
1480 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1474 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1481 revisions = 0
1475 revisions = 0
1482 files = 0
1476 files = 0
1483 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1477 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1484 total=expectedfiles)
1478 total=expectedfiles)
1485 for chunkdata in iter(source.filelogheader, {}):
1479 for chunkdata in iter(source.filelogheader, {}):
1486 files += 1
1480 files += 1
1487 f = chunkdata["filename"]
1481 f = chunkdata["filename"]
1488 repo.ui.debug("adding %s revisions\n" % f)
1482 repo.ui.debug("adding %s revisions\n" % f)
1489 progress.increment()
1483 progress.increment()
1490 fl = repo.file(f)
1484 fl = repo.file(f)
1491 o = len(fl)
1485 o = len(fl)
1492 try:
1486 try:
1493 deltas = source.deltaiter()
1487 deltas = source.deltaiter()
1494 if not fl.addgroup(deltas, revmap, trp):
1488 if not fl.addgroup(deltas, revmap, trp):
1495 raise error.Abort(_("received file revlog group is empty"))
1489 raise error.Abort(_("received file revlog group is empty"))
1496 except error.CensoredBaseError as e:
1490 except error.CensoredBaseError as e:
1497 raise error.Abort(_("received delta base is censored: %s") % e)
1491 raise error.Abort(_("received delta base is censored: %s") % e)
1498 revisions += len(fl) - o
1492 revisions += len(fl) - o
1499 if f in needfiles:
1493 if f in needfiles:
1500 needs = needfiles[f]
1494 needs = needfiles[f]
1501 for new in pycompat.xrange(o, len(fl)):
1495 for new in pycompat.xrange(o, len(fl)):
1502 n = fl.node(new)
1496 n = fl.node(new)
1503 if n in needs:
1497 if n in needs:
1504 needs.remove(n)
1498 needs.remove(n)
1505 else:
1499 else:
1506 raise error.Abort(
1500 raise error.Abort(
1507 _("received spurious file revlog entry"))
1501 _("received spurious file revlog entry"))
1508 if not needs:
1502 if not needs:
1509 del needfiles[f]
1503 del needfiles[f]
1510 progress.complete()
1504 progress.complete()
1511
1505
1512 for f, needs in needfiles.iteritems():
1506 for f, needs in needfiles.iteritems():
1513 fl = repo.file(f)
1507 fl = repo.file(f)
1514 for n in needs:
1508 for n in needs:
1515 try:
1509 try:
1516 fl.rev(n)
1510 fl.rev(n)
1517 except error.LookupError:
1511 except error.LookupError:
1518 raise error.Abort(
1512 raise error.Abort(
1519 _('missing file data for %s:%s - run hg verify') %
1513 _('missing file data for %s:%s - run hg verify') %
1520 (f, hex(n)))
1514 (f, hex(n)))
1521
1515
1522 return revisions, files
1516 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now