##// END OF EJS Templates
changegroup: use positive logic for treemanifest changegroup3 logic...
marmoute -
r43329:4bbc9569 default
parent child Browse files
Show More
@@ -1,1441 +1,1451 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 mdiff,
25 mdiff,
26 phases,
26 phases,
27 pycompat,
27 pycompat,
28 util,
28 util,
29 )
29 )
30
30
31 from .interfaces import (
31 from .interfaces import (
32 repository,
32 repository,
33 )
33 )
34
34
35 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
35 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
36 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
36 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
37 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
37 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
38
38
39 LFS_REQUIREMENT = 'lfs'
39 LFS_REQUIREMENT = 'lfs'
40
40
41 readexactly = util.readexactly
41 readexactly = util.readexactly
42
42
43 def getchunk(stream):
43 def getchunk(stream):
44 """return the next chunk from stream as a string"""
44 """return the next chunk from stream as a string"""
45 d = readexactly(stream, 4)
45 d = readexactly(stream, 4)
46 l = struct.unpack(">l", d)[0]
46 l = struct.unpack(">l", d)[0]
47 if l <= 4:
47 if l <= 4:
48 if l:
48 if l:
49 raise error.Abort(_("invalid chunk length %d") % l)
49 raise error.Abort(_("invalid chunk length %d") % l)
50 return ""
50 return ""
51 return readexactly(stream, l - 4)
51 return readexactly(stream, l - 4)
52
52
53 def chunkheader(length):
53 def chunkheader(length):
54 """return a changegroup chunk header (string)"""
54 """return a changegroup chunk header (string)"""
55 return struct.pack(">l", length + 4)
55 return struct.pack(">l", length + 4)
56
56
57 def closechunk():
57 def closechunk():
58 """return a changegroup chunk header (string) for a zero-length chunk"""
58 """return a changegroup chunk header (string) for a zero-length chunk"""
59 return struct.pack(">l", 0)
59 return struct.pack(">l", 0)
60
60
61 def _fileheader(path):
61 def _fileheader(path):
62 """Obtain a changegroup chunk header for a named path."""
62 """Obtain a changegroup chunk header for a named path."""
63 return chunkheader(len(path)) + path
63 return chunkheader(len(path)) + path
64
64
65 def writechunks(ui, chunks, filename, vfs=None):
65 def writechunks(ui, chunks, filename, vfs=None):
66 """Write chunks to a file and return its filename.
66 """Write chunks to a file and return its filename.
67
67
68 The stream is assumed to be a bundle file.
68 The stream is assumed to be a bundle file.
69 Existing files will not be overwritten.
69 Existing files will not be overwritten.
70 If no filename is specified, a temporary file is created.
70 If no filename is specified, a temporary file is created.
71 """
71 """
72 fh = None
72 fh = None
73 cleanup = None
73 cleanup = None
74 try:
74 try:
75 if filename:
75 if filename:
76 if vfs:
76 if vfs:
77 fh = vfs.open(filename, "wb")
77 fh = vfs.open(filename, "wb")
78 else:
78 else:
79 # Increase default buffer size because default is usually
79 # Increase default buffer size because default is usually
80 # small (4k is common on Linux).
80 # small (4k is common on Linux).
81 fh = open(filename, "wb", 131072)
81 fh = open(filename, "wb", 131072)
82 else:
82 else:
83 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
83 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
84 fh = os.fdopen(fd, r"wb")
84 fh = os.fdopen(fd, r"wb")
85 cleanup = filename
85 cleanup = filename
86 for c in chunks:
86 for c in chunks:
87 fh.write(c)
87 fh.write(c)
88 cleanup = None
88 cleanup = None
89 return filename
89 return filename
90 finally:
90 finally:
91 if fh is not None:
91 if fh is not None:
92 fh.close()
92 fh.close()
93 if cleanup is not None:
93 if cleanup is not None:
94 if filename and vfs:
94 if filename and vfs:
95 vfs.unlink(cleanup)
95 vfs.unlink(cleanup)
96 else:
96 else:
97 os.unlink(cleanup)
97 os.unlink(cleanup)
98
98
99 class cg1unpacker(object):
99 class cg1unpacker(object):
100 """Unpacker for cg1 changegroup streams.
100 """Unpacker for cg1 changegroup streams.
101
101
102 A changegroup unpacker handles the framing of the revision data in
102 A changegroup unpacker handles the framing of the revision data in
103 the wire format. Most consumers will want to use the apply()
103 the wire format. Most consumers will want to use the apply()
104 method to add the changes from the changegroup to a repository.
104 method to add the changes from the changegroup to a repository.
105
105
106 If you're forwarding a changegroup unmodified to another consumer,
106 If you're forwarding a changegroup unmodified to another consumer,
107 use getchunks(), which returns an iterator of changegroup
107 use getchunks(), which returns an iterator of changegroup
108 chunks. This is mostly useful for cases where you need to know the
108 chunks. This is mostly useful for cases where you need to know the
109 data stream has ended by observing the end of the changegroup.
109 data stream has ended by observing the end of the changegroup.
110
110
111 deltachunk() is useful only if you're applying delta data. Most
111 deltachunk() is useful only if you're applying delta data. Most
112 consumers should prefer apply() instead.
112 consumers should prefer apply() instead.
113
113
114 A few other public methods exist. Those are used only for
114 A few other public methods exist. Those are used only for
115 bundlerepo and some debug commands - their use is discouraged.
115 bundlerepo and some debug commands - their use is discouraged.
116 """
116 """
117 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
117 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
118 deltaheadersize = deltaheader.size
118 deltaheadersize = deltaheader.size
119 version = '01'
119 version = '01'
120 _grouplistcount = 1 # One list of files after the manifests
120 _grouplistcount = 1 # One list of files after the manifests
121
121
122 def __init__(self, fh, alg, extras=None):
122 def __init__(self, fh, alg, extras=None):
123 if alg is None:
123 if alg is None:
124 alg = 'UN'
124 alg = 'UN'
125 if alg not in util.compengines.supportedbundletypes:
125 if alg not in util.compengines.supportedbundletypes:
126 raise error.Abort(_('unknown stream compression type: %s')
126 raise error.Abort(_('unknown stream compression type: %s')
127 % alg)
127 % alg)
128 if alg == 'BZ':
128 if alg == 'BZ':
129 alg = '_truncatedBZ'
129 alg = '_truncatedBZ'
130
130
131 compengine = util.compengines.forbundletype(alg)
131 compengine = util.compengines.forbundletype(alg)
132 self._stream = compengine.decompressorreader(fh)
132 self._stream = compengine.decompressorreader(fh)
133 self._type = alg
133 self._type = alg
134 self.extras = extras or {}
134 self.extras = extras or {}
135 self.callback = None
135 self.callback = None
136
136
137 # These methods (compressed, read, seek, tell) all appear to only
137 # These methods (compressed, read, seek, tell) all appear to only
138 # be used by bundlerepo, but it's a little hard to tell.
138 # be used by bundlerepo, but it's a little hard to tell.
139 def compressed(self):
139 def compressed(self):
140 return self._type is not None and self._type != 'UN'
140 return self._type is not None and self._type != 'UN'
141 def read(self, l):
141 def read(self, l):
142 return self._stream.read(l)
142 return self._stream.read(l)
143 def seek(self, pos):
143 def seek(self, pos):
144 return self._stream.seek(pos)
144 return self._stream.seek(pos)
145 def tell(self):
145 def tell(self):
146 return self._stream.tell()
146 return self._stream.tell()
147 def close(self):
147 def close(self):
148 return self._stream.close()
148 return self._stream.close()
149
149
150 def _chunklength(self):
150 def _chunklength(self):
151 d = readexactly(self._stream, 4)
151 d = readexactly(self._stream, 4)
152 l = struct.unpack(">l", d)[0]
152 l = struct.unpack(">l", d)[0]
153 if l <= 4:
153 if l <= 4:
154 if l:
154 if l:
155 raise error.Abort(_("invalid chunk length %d") % l)
155 raise error.Abort(_("invalid chunk length %d") % l)
156 return 0
156 return 0
157 if self.callback:
157 if self.callback:
158 self.callback()
158 self.callback()
159 return l - 4
159 return l - 4
160
160
161 def changelogheader(self):
161 def changelogheader(self):
162 """v10 does not have a changelog header chunk"""
162 """v10 does not have a changelog header chunk"""
163 return {}
163 return {}
164
164
165 def manifestheader(self):
165 def manifestheader(self):
166 """v10 does not have a manifest header chunk"""
166 """v10 does not have a manifest header chunk"""
167 return {}
167 return {}
168
168
169 def filelogheader(self):
169 def filelogheader(self):
170 """return the header of the filelogs chunk, v10 only has the filename"""
170 """return the header of the filelogs chunk, v10 only has the filename"""
171 l = self._chunklength()
171 l = self._chunklength()
172 if not l:
172 if not l:
173 return {}
173 return {}
174 fname = readexactly(self._stream, l)
174 fname = readexactly(self._stream, l)
175 return {'filename': fname}
175 return {'filename': fname}
176
176
177 def _deltaheader(self, headertuple, prevnode):
177 def _deltaheader(self, headertuple, prevnode):
178 node, p1, p2, cs = headertuple
178 node, p1, p2, cs = headertuple
179 if prevnode is None:
179 if prevnode is None:
180 deltabase = p1
180 deltabase = p1
181 else:
181 else:
182 deltabase = prevnode
182 deltabase = prevnode
183 flags = 0
183 flags = 0
184 return node, p1, p2, deltabase, cs, flags
184 return node, p1, p2, deltabase, cs, flags
185
185
186 def deltachunk(self, prevnode):
186 def deltachunk(self, prevnode):
187 l = self._chunklength()
187 l = self._chunklength()
188 if not l:
188 if not l:
189 return {}
189 return {}
190 headerdata = readexactly(self._stream, self.deltaheadersize)
190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 header = self.deltaheader.unpack(headerdata)
191 header = self.deltaheader.unpack(headerdata)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
193 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
194 return (node, p1, p2, cs, deltabase, delta, flags)
194 return (node, p1, p2, cs, deltabase, delta, flags)
195
195
196 def getchunks(self):
196 def getchunks(self):
197 """returns all the chunks contains in the bundle
197 """returns all the chunks contains in the bundle
198
198
199 Used when you need to forward the binary stream to a file or another
199 Used when you need to forward the binary stream to a file or another
200 network API. To do so, it parse the changegroup data, otherwise it will
200 network API. To do so, it parse the changegroup data, otherwise it will
201 block in case of sshrepo because it don't know the end of the stream.
201 block in case of sshrepo because it don't know the end of the stream.
202 """
202 """
203 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
204 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 # and a list of filelogs. For changegroup 3, we expect 4 parts:
205 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 # changelog, manifestlog, a list of tree manifestlogs, and a list of
206 # filelogs.
206 # filelogs.
207 #
207 #
208 # Changelog and manifestlog parts are terminated with empty chunks. The
208 # Changelog and manifestlog parts are terminated with empty chunks. The
209 # tree and file parts are a list of entry sections. Each entry section
209 # tree and file parts are a list of entry sections. Each entry section
210 # is a series of chunks terminating in an empty chunk. The list of these
210 # is a series of chunks terminating in an empty chunk. The list of these
211 # entry sections is terminated in yet another empty chunk, so we know
211 # entry sections is terminated in yet another empty chunk, so we know
212 # we've reached the end of the tree/file list when we reach an empty
212 # we've reached the end of the tree/file list when we reach an empty
213 # chunk that was proceeded by no non-empty chunks.
213 # chunk that was proceeded by no non-empty chunks.
214
214
215 parts = 0
215 parts = 0
216 while parts < 2 + self._grouplistcount:
216 while parts < 2 + self._grouplistcount:
217 noentries = True
217 noentries = True
218 while True:
218 while True:
219 chunk = getchunk(self)
219 chunk = getchunk(self)
220 if not chunk:
220 if not chunk:
221 # The first two empty chunks represent the end of the
221 # The first two empty chunks represent the end of the
222 # changelog and the manifestlog portions. The remaining
222 # changelog and the manifestlog portions. The remaining
223 # empty chunks represent either A) the end of individual
223 # empty chunks represent either A) the end of individual
224 # tree or file entries in the file list, or B) the end of
224 # tree or file entries in the file list, or B) the end of
225 # the entire list. It's the end of the entire list if there
225 # the entire list. It's the end of the entire list if there
226 # were no entries (i.e. noentries is True).
226 # were no entries (i.e. noentries is True).
227 if parts < 2:
227 if parts < 2:
228 parts += 1
228 parts += 1
229 elif noentries:
229 elif noentries:
230 parts += 1
230 parts += 1
231 break
231 break
232 noentries = False
232 noentries = False
233 yield chunkheader(len(chunk))
233 yield chunkheader(len(chunk))
234 pos = 0
234 pos = 0
235 while pos < len(chunk):
235 while pos < len(chunk):
236 next = pos + 2**20
236 next = pos + 2**20
237 yield chunk[pos:next]
237 yield chunk[pos:next]
238 pos = next
238 pos = next
239 yield closechunk()
239 yield closechunk()
240
240
241 def _unpackmanifests(self, repo, revmap, trp, prog):
241 def _unpackmanifests(self, repo, revmap, trp, prog):
242 self.callback = prog.increment
242 self.callback = prog.increment
243 # no need to check for empty manifest group here:
243 # no need to check for empty manifest group here:
244 # if the result of the merge of 1 and 2 is the same in 3 and 4,
244 # if the result of the merge of 1 and 2 is the same in 3 and 4,
245 # no new manifest will be created and the manifest group will
245 # no new manifest will be created and the manifest group will
246 # be empty during the pull
246 # be empty during the pull
247 self.manifestheader()
247 self.manifestheader()
248 deltas = self.deltaiter()
248 deltas = self.deltaiter()
249 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
249 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
250 prog.complete()
250 prog.complete()
251 self.callback = None
251 self.callback = None
252
252
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
254 expectedtotal=None):
254 expectedtotal=None):
255 """Add the changegroup returned by source.read() to this repo.
255 """Add the changegroup returned by source.read() to this repo.
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
257 the URL of the repo where this changegroup is coming from.
257 the URL of the repo where this changegroup is coming from.
258
258
259 Return an integer summarizing the change to this repo:
259 Return an integer summarizing the change to this repo:
260 - nothing changed or no source: 0
260 - nothing changed or no source: 0
261 - more heads than before: 1+added heads (2..n)
261 - more heads than before: 1+added heads (2..n)
262 - fewer heads than before: -1-removed heads (-2..-n)
262 - fewer heads than before: -1-removed heads (-2..-n)
263 - number of heads stays the same: 1
263 - number of heads stays the same: 1
264 """
264 """
265 repo = repo.unfiltered()
265 repo = repo.unfiltered()
266 def csmap(x):
266 def csmap(x):
267 repo.ui.debug("add changeset %s\n" % short(x))
267 repo.ui.debug("add changeset %s\n" % short(x))
268 return len(cl)
268 return len(cl)
269
269
270 def revmap(x):
270 def revmap(x):
271 return cl.rev(x)
271 return cl.rev(x)
272
272
273 changesets = 0
273 changesets = 0
274
274
275 try:
275 try:
276 # The transaction may already carry source information. In this
276 # The transaction may already carry source information. In this
277 # case we use the top level data. We overwrite the argument
277 # case we use the top level data. We overwrite the argument
278 # because we need to use the top level value (if they exist)
278 # because we need to use the top level value (if they exist)
279 # in this function.
279 # in this function.
280 srctype = tr.hookargs.setdefault('source', srctype)
280 srctype = tr.hookargs.setdefault('source', srctype)
281 tr.hookargs.setdefault('url', url)
281 tr.hookargs.setdefault('url', url)
282 repo.hook('prechangegroup',
282 repo.hook('prechangegroup',
283 throw=True, **pycompat.strkwargs(tr.hookargs))
283 throw=True, **pycompat.strkwargs(tr.hookargs))
284
284
285 # write changelog data to temp files so concurrent readers
285 # write changelog data to temp files so concurrent readers
286 # will not see an inconsistent view
286 # will not see an inconsistent view
287 cl = repo.changelog
287 cl = repo.changelog
288 cl.delayupdate(tr)
288 cl.delayupdate(tr)
289 oldheads = set(cl.heads())
289 oldheads = set(cl.heads())
290
290
291 trp = weakref.proxy(tr)
291 trp = weakref.proxy(tr)
292 # pull off the changeset group
292 # pull off the changeset group
293 repo.ui.status(_("adding changesets\n"))
293 repo.ui.status(_("adding changesets\n"))
294 clstart = len(cl)
294 clstart = len(cl)
295 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
295 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
296 total=expectedtotal)
296 total=expectedtotal)
297 self.callback = progress.increment
297 self.callback = progress.increment
298
298
299 efiles = set()
299 efiles = set()
300 def onchangelog(cl, node):
300 def onchangelog(cl, node):
301 efiles.update(cl.readfiles(node))
301 efiles.update(cl.readfiles(node))
302
302
303 self.changelogheader()
303 self.changelogheader()
304 deltas = self.deltaiter()
304 deltas = self.deltaiter()
305 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
305 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
306 efiles = len(efiles)
306 efiles = len(efiles)
307
307
308 if not cgnodes:
308 if not cgnodes:
309 repo.ui.develwarn('applied empty changelog from changegroup',
309 repo.ui.develwarn('applied empty changelog from changegroup',
310 config='warn-empty-changegroup')
310 config='warn-empty-changegroup')
311 clend = len(cl)
311 clend = len(cl)
312 changesets = clend - clstart
312 changesets = clend - clstart
313 progress.complete()
313 progress.complete()
314 self.callback = None
314 self.callback = None
315
315
316 # pull off the manifest group
316 # pull off the manifest group
317 repo.ui.status(_("adding manifests\n"))
317 repo.ui.status(_("adding manifests\n"))
318 # We know that we'll never have more manifests than we had
318 # We know that we'll never have more manifests than we had
319 # changesets.
319 # changesets.
320 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
320 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
321 total=changesets)
321 total=changesets)
322 self._unpackmanifests(repo, revmap, trp, progress)
322 self._unpackmanifests(repo, revmap, trp, progress)
323
323
324 needfiles = {}
324 needfiles = {}
325 if repo.ui.configbool('server', 'validate'):
325 if repo.ui.configbool('server', 'validate'):
326 cl = repo.changelog
326 cl = repo.changelog
327 ml = repo.manifestlog
327 ml = repo.manifestlog
328 # validate incoming csets have their manifests
328 # validate incoming csets have their manifests
329 for cset in pycompat.xrange(clstart, clend):
329 for cset in pycompat.xrange(clstart, clend):
330 mfnode = cl.changelogrevision(cset).manifest
330 mfnode = cl.changelogrevision(cset).manifest
331 mfest = ml[mfnode].readdelta()
331 mfest = ml[mfnode].readdelta()
332 # store file cgnodes we must see
332 # store file cgnodes we must see
333 for f, n in mfest.iteritems():
333 for f, n in mfest.iteritems():
334 needfiles.setdefault(f, set()).add(n)
334 needfiles.setdefault(f, set()).add(n)
335
335
336 # process the files
336 # process the files
337 repo.ui.status(_("adding file changes\n"))
337 repo.ui.status(_("adding file changes\n"))
338 newrevs, newfiles = _addchangegroupfiles(
338 newrevs, newfiles = _addchangegroupfiles(
339 repo, self, revmap, trp, efiles, needfiles)
339 repo, self, revmap, trp, efiles, needfiles)
340
340
341 # making sure the value exists
341 # making sure the value exists
342 tr.changes.setdefault('changegroup-count-changesets', 0)
342 tr.changes.setdefault('changegroup-count-changesets', 0)
343 tr.changes.setdefault('changegroup-count-revisions', 0)
343 tr.changes.setdefault('changegroup-count-revisions', 0)
344 tr.changes.setdefault('changegroup-count-files', 0)
344 tr.changes.setdefault('changegroup-count-files', 0)
345 tr.changes.setdefault('changegroup-count-heads', 0)
345 tr.changes.setdefault('changegroup-count-heads', 0)
346
346
347 # some code use bundle operation for internal purpose. They usually
347 # some code use bundle operation for internal purpose. They usually
348 # set `ui.quiet` to do this outside of user sight. Size the report
348 # set `ui.quiet` to do this outside of user sight. Size the report
349 # of such operation now happens at the end of the transaction, that
349 # of such operation now happens at the end of the transaction, that
350 # ui.quiet has not direct effect on the output.
350 # ui.quiet has not direct effect on the output.
351 #
351 #
352 # To preserve this intend use an inelegant hack, we fail to report
352 # To preserve this intend use an inelegant hack, we fail to report
353 # the change if `quiet` is set. We should probably move to
353 # the change if `quiet` is set. We should probably move to
354 # something better, but this is a good first step to allow the "end
354 # something better, but this is a good first step to allow the "end
355 # of transaction report" to pass tests.
355 # of transaction report" to pass tests.
356 if not repo.ui.quiet:
356 if not repo.ui.quiet:
357 tr.changes['changegroup-count-changesets'] += changesets
357 tr.changes['changegroup-count-changesets'] += changesets
358 tr.changes['changegroup-count-revisions'] += newrevs
358 tr.changes['changegroup-count-revisions'] += newrevs
359 tr.changes['changegroup-count-files'] += newfiles
359 tr.changes['changegroup-count-files'] += newfiles
360
360
361 deltaheads = 0
361 deltaheads = 0
362 if oldheads:
362 if oldheads:
363 heads = cl.heads()
363 heads = cl.heads()
364 deltaheads += len(heads) - len(oldheads)
364 deltaheads += len(heads) - len(oldheads)
365 for h in heads:
365 for h in heads:
366 if h not in oldheads and repo[h].closesbranch():
366 if h not in oldheads and repo[h].closesbranch():
367 deltaheads -= 1
367 deltaheads -= 1
368
368
369 # see previous comment about checking ui.quiet
369 # see previous comment about checking ui.quiet
370 if not repo.ui.quiet:
370 if not repo.ui.quiet:
371 tr.changes['changegroup-count-heads'] += deltaheads
371 tr.changes['changegroup-count-heads'] += deltaheads
372 repo.invalidatevolatilesets()
372 repo.invalidatevolatilesets()
373
373
374 if changesets > 0:
374 if changesets > 0:
375 if 'node' not in tr.hookargs:
375 if 'node' not in tr.hookargs:
376 tr.hookargs['node'] = hex(cl.node(clstart))
376 tr.hookargs['node'] = hex(cl.node(clstart))
377 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
377 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
378 hookargs = dict(tr.hookargs)
378 hookargs = dict(tr.hookargs)
379 else:
379 else:
380 hookargs = dict(tr.hookargs)
380 hookargs = dict(tr.hookargs)
381 hookargs['node'] = hex(cl.node(clstart))
381 hookargs['node'] = hex(cl.node(clstart))
382 hookargs['node_last'] = hex(cl.node(clend - 1))
382 hookargs['node_last'] = hex(cl.node(clend - 1))
383 repo.hook('pretxnchangegroup',
383 repo.hook('pretxnchangegroup',
384 throw=True, **pycompat.strkwargs(hookargs))
384 throw=True, **pycompat.strkwargs(hookargs))
385
385
386 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
386 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
387 phaseall = None
387 phaseall = None
388 if srctype in ('push', 'serve'):
388 if srctype in ('push', 'serve'):
389 # Old servers can not push the boundary themselves.
389 # Old servers can not push the boundary themselves.
390 # New servers won't push the boundary if changeset already
390 # New servers won't push the boundary if changeset already
391 # exists locally as secret
391 # exists locally as secret
392 #
392 #
393 # We should not use added here but the list of all change in
393 # We should not use added here but the list of all change in
394 # the bundle
394 # the bundle
395 if repo.publishing():
395 if repo.publishing():
396 targetphase = phaseall = phases.public
396 targetphase = phaseall = phases.public
397 else:
397 else:
398 # closer target phase computation
398 # closer target phase computation
399
399
400 # Those changesets have been pushed from the
400 # Those changesets have been pushed from the
401 # outside, their phases are going to be pushed
401 # outside, their phases are going to be pushed
402 # alongside. Therefor `targetphase` is
402 # alongside. Therefor `targetphase` is
403 # ignored.
403 # ignored.
404 targetphase = phaseall = phases.draft
404 targetphase = phaseall = phases.draft
405 if added:
405 if added:
406 phases.registernew(repo, tr, targetphase, added)
406 phases.registernew(repo, tr, targetphase, added)
407 if phaseall is not None:
407 if phaseall is not None:
408 phases.advanceboundary(repo, tr, phaseall, cgnodes)
408 phases.advanceboundary(repo, tr, phaseall, cgnodes)
409
409
410 if changesets > 0:
410 if changesets > 0:
411
411
412 def runhooks():
412 def runhooks():
413 # These hooks run when the lock releases, not when the
413 # These hooks run when the lock releases, not when the
414 # transaction closes. So it's possible for the changelog
414 # transaction closes. So it's possible for the changelog
415 # to have changed since we last saw it.
415 # to have changed since we last saw it.
416 if clstart >= len(repo):
416 if clstart >= len(repo):
417 return
417 return
418
418
419 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
419 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
420
420
421 for n in added:
421 for n in added:
422 args = hookargs.copy()
422 args = hookargs.copy()
423 args['node'] = hex(n)
423 args['node'] = hex(n)
424 del args['node_last']
424 del args['node_last']
425 repo.hook("incoming", **pycompat.strkwargs(args))
425 repo.hook("incoming", **pycompat.strkwargs(args))
426
426
427 newheads = [h for h in repo.heads()
427 newheads = [h for h in repo.heads()
428 if h not in oldheads]
428 if h not in oldheads]
429 repo.ui.log("incoming",
429 repo.ui.log("incoming",
430 "%d incoming changes - new heads: %s\n",
430 "%d incoming changes - new heads: %s\n",
431 len(added),
431 len(added),
432 ', '.join([hex(c[:6]) for c in newheads]))
432 ', '.join([hex(c[:6]) for c in newheads]))
433
433
434 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
434 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
435 lambda tr: repo._afterlock(runhooks))
435 lambda tr: repo._afterlock(runhooks))
436 finally:
436 finally:
437 repo.ui.flush()
437 repo.ui.flush()
438 # never return 0 here:
438 # never return 0 here:
439 if deltaheads < 0:
439 if deltaheads < 0:
440 ret = deltaheads - 1
440 ret = deltaheads - 1
441 else:
441 else:
442 ret = deltaheads + 1
442 ret = deltaheads + 1
443 return ret
443 return ret
444
444
445 def deltaiter(self):
445 def deltaiter(self):
446 """
446 """
447 returns an iterator of the deltas in this changegroup
447 returns an iterator of the deltas in this changegroup
448
448
449 Useful for passing to the underlying storage system to be stored.
449 Useful for passing to the underlying storage system to be stored.
450 """
450 """
451 chain = None
451 chain = None
452 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
452 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
453 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
453 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
454 yield chunkdata
454 yield chunkdata
455 chain = chunkdata[0]
455 chain = chunkdata[0]
456
456
457 class cg2unpacker(cg1unpacker):
457 class cg2unpacker(cg1unpacker):
458 """Unpacker for cg2 streams.
458 """Unpacker for cg2 streams.
459
459
460 cg2 streams add support for generaldelta, so the delta header
460 cg2 streams add support for generaldelta, so the delta header
461 format is slightly different. All other features about the data
461 format is slightly different. All other features about the data
462 remain the same.
462 remain the same.
463 """
463 """
464 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
464 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
465 deltaheadersize = deltaheader.size
465 deltaheadersize = deltaheader.size
466 version = '02'
466 version = '02'
467
467
468 def _deltaheader(self, headertuple, prevnode):
468 def _deltaheader(self, headertuple, prevnode):
469 node, p1, p2, deltabase, cs = headertuple
469 node, p1, p2, deltabase, cs = headertuple
470 flags = 0
470 flags = 0
471 return node, p1, p2, deltabase, cs, flags
471 return node, p1, p2, deltabase, cs, flags
472
472
473 class cg3unpacker(cg2unpacker):
473 class cg3unpacker(cg2unpacker):
474 """Unpacker for cg3 streams.
474 """Unpacker for cg3 streams.
475
475
476 cg3 streams add support for exchanging treemanifests and revlog
476 cg3 streams add support for exchanging treemanifests and revlog
477 flags. It adds the revlog flags to the delta header and an empty chunk
477 flags. It adds the revlog flags to the delta header and an empty chunk
478 separating manifests and files.
478 separating manifests and files.
479 """
479 """
480 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
480 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
481 deltaheadersize = deltaheader.size
481 deltaheadersize = deltaheader.size
482 version = '03'
482 version = '03'
483 _grouplistcount = 2 # One list of manifests and one list of files
483 _grouplistcount = 2 # One list of manifests and one list of files
484
484
485 def _deltaheader(self, headertuple, prevnode):
485 def _deltaheader(self, headertuple, prevnode):
486 node, p1, p2, deltabase, cs, flags = headertuple
486 node, p1, p2, deltabase, cs, flags = headertuple
487 return node, p1, p2, deltabase, cs, flags
487 return node, p1, p2, deltabase, cs, flags
488
488
489 def _unpackmanifests(self, repo, revmap, trp, prog):
489 def _unpackmanifests(self, repo, revmap, trp, prog):
490 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
490 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
491 for chunkdata in iter(self.filelogheader, {}):
491 for chunkdata in iter(self.filelogheader, {}):
492 # If we get here, there are directory manifests in the changegroup
492 # If we get here, there are directory manifests in the changegroup
493 d = chunkdata["filename"]
493 d = chunkdata["filename"]
494 repo.ui.debug("adding %s revisions\n" % d)
494 repo.ui.debug("adding %s revisions\n" % d)
495 deltas = self.deltaiter()
495 deltas = self.deltaiter()
496 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
496 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
497 raise error.Abort(_("received dir revlog group is empty"))
497 raise error.Abort(_("received dir revlog group is empty"))
498
498
499 class headerlessfixup(object):
499 class headerlessfixup(object):
500 def __init__(self, fh, h):
500 def __init__(self, fh, h):
501 self._h = h
501 self._h = h
502 self._fh = fh
502 self._fh = fh
503 def read(self, n):
503 def read(self, n):
504 if self._h:
504 if self._h:
505 d, self._h = self._h[:n], self._h[n:]
505 d, self._h = self._h[:n], self._h[n:]
506 if len(d) < n:
506 if len(d) < n:
507 d += readexactly(self._fh, n - len(d))
507 d += readexactly(self._fh, n - len(d))
508 return d
508 return d
509 return readexactly(self._fh, n)
509 return readexactly(self._fh, n)
510
510
511 def _revisiondeltatochunks(delta, headerfn):
511 def _revisiondeltatochunks(delta, headerfn):
512 """Serialize a revisiondelta to changegroup chunks."""
512 """Serialize a revisiondelta to changegroup chunks."""
513
513
514 # The captured revision delta may be encoded as a delta against
514 # The captured revision delta may be encoded as a delta against
515 # a base revision or as a full revision. The changegroup format
515 # a base revision or as a full revision. The changegroup format
516 # requires that everything on the wire be deltas. So for full
516 # requires that everything on the wire be deltas. So for full
517 # revisions, we need to invent a header that says to rewrite
517 # revisions, we need to invent a header that says to rewrite
518 # data.
518 # data.
519
519
520 if delta.delta is not None:
520 if delta.delta is not None:
521 prefix, data = b'', delta.delta
521 prefix, data = b'', delta.delta
522 elif delta.basenode == nullid:
522 elif delta.basenode == nullid:
523 data = delta.revision
523 data = delta.revision
524 prefix = mdiff.trivialdiffheader(len(data))
524 prefix = mdiff.trivialdiffheader(len(data))
525 else:
525 else:
526 data = delta.revision
526 data = delta.revision
527 prefix = mdiff.replacediffheader(delta.baserevisionsize,
527 prefix = mdiff.replacediffheader(delta.baserevisionsize,
528 len(data))
528 len(data))
529
529
530 meta = headerfn(delta)
530 meta = headerfn(delta)
531
531
532 yield chunkheader(len(meta) + len(prefix) + len(data))
532 yield chunkheader(len(meta) + len(prefix) + len(data))
533 yield meta
533 yield meta
534 if prefix:
534 if prefix:
535 yield prefix
535 yield prefix
536 yield data
536 yield data
537
537
538 def _sortnodesellipsis(store, nodes, cl, lookup):
538 def _sortnodesellipsis(store, nodes, cl, lookup):
539 """Sort nodes for changegroup generation."""
539 """Sort nodes for changegroup generation."""
540 # Ellipses serving mode.
540 # Ellipses serving mode.
541 #
541 #
542 # In a perfect world, we'd generate better ellipsis-ified graphs
542 # In a perfect world, we'd generate better ellipsis-ified graphs
543 # for non-changelog revlogs. In practice, we haven't started doing
543 # for non-changelog revlogs. In practice, we haven't started doing
544 # that yet, so the resulting DAGs for the manifestlog and filelogs
544 # that yet, so the resulting DAGs for the manifestlog and filelogs
545 # are actually full of bogus parentage on all the ellipsis
545 # are actually full of bogus parentage on all the ellipsis
546 # nodes. This has the side effect that, while the contents are
546 # nodes. This has the side effect that, while the contents are
547 # correct, the individual DAGs might be completely out of whack in
547 # correct, the individual DAGs might be completely out of whack in
548 # a case like 882681bc3166 and its ancestors (back about 10
548 # a case like 882681bc3166 and its ancestors (back about 10
549 # revisions or so) in the main hg repo.
549 # revisions or so) in the main hg repo.
550 #
550 #
551 # The one invariant we *know* holds is that the new (potentially
551 # The one invariant we *know* holds is that the new (potentially
552 # bogus) DAG shape will be valid if we order the nodes in the
552 # bogus) DAG shape will be valid if we order the nodes in the
553 # order that they're introduced in dramatis personae by the
553 # order that they're introduced in dramatis personae by the
554 # changelog, so what we do is we sort the non-changelog histories
554 # changelog, so what we do is we sort the non-changelog histories
555 # by the order in which they are used by the changelog.
555 # by the order in which they are used by the changelog.
556 key = lambda n: cl.rev(lookup(n))
556 key = lambda n: cl.rev(lookup(n))
557 return sorted(nodes, key=key)
557 return sorted(nodes, key=key)
558
558
559 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
559 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
560 linknode, clrevtolocalrev, fullclnodes,
560 linknode, clrevtolocalrev, fullclnodes,
561 precomputedellipsis):
561 precomputedellipsis):
562 linkparents = precomputedellipsis[linkrev]
562 linkparents = precomputedellipsis[linkrev]
563 def local(clrev):
563 def local(clrev):
564 """Turn a changelog revnum into a local revnum.
564 """Turn a changelog revnum into a local revnum.
565
565
566 The ellipsis dag is stored as revnums on the changelog,
566 The ellipsis dag is stored as revnums on the changelog,
567 but when we're producing ellipsis entries for
567 but when we're producing ellipsis entries for
568 non-changelog revlogs, we need to turn those numbers into
568 non-changelog revlogs, we need to turn those numbers into
569 something local. This does that for us, and during the
569 something local. This does that for us, and during the
570 changelog sending phase will also expand the stored
570 changelog sending phase will also expand the stored
571 mappings as needed.
571 mappings as needed.
572 """
572 """
573 if clrev == nullrev:
573 if clrev == nullrev:
574 return nullrev
574 return nullrev
575
575
576 if ischangelog:
576 if ischangelog:
577 return clrev
577 return clrev
578
578
579 # Walk the ellipsis-ized changelog breadth-first looking for a
579 # Walk the ellipsis-ized changelog breadth-first looking for a
580 # change that has been linked from the current revlog.
580 # change that has been linked from the current revlog.
581 #
581 #
582 # For a flat manifest revlog only a single step should be necessary
582 # For a flat manifest revlog only a single step should be necessary
583 # as all relevant changelog entries are relevant to the flat
583 # as all relevant changelog entries are relevant to the flat
584 # manifest.
584 # manifest.
585 #
585 #
586 # For a filelog or tree manifest dirlog however not every changelog
586 # For a filelog or tree manifest dirlog however not every changelog
587 # entry will have been relevant, so we need to skip some changelog
587 # entry will have been relevant, so we need to skip some changelog
588 # nodes even after ellipsis-izing.
588 # nodes even after ellipsis-izing.
589 walk = [clrev]
589 walk = [clrev]
590 while walk:
590 while walk:
591 p = walk[0]
591 p = walk[0]
592 walk = walk[1:]
592 walk = walk[1:]
593 if p in clrevtolocalrev:
593 if p in clrevtolocalrev:
594 return clrevtolocalrev[p]
594 return clrevtolocalrev[p]
595 elif p in fullclnodes:
595 elif p in fullclnodes:
596 walk.extend([pp for pp in cl.parentrevs(p)
596 walk.extend([pp for pp in cl.parentrevs(p)
597 if pp != nullrev])
597 if pp != nullrev])
598 elif p in precomputedellipsis:
598 elif p in precomputedellipsis:
599 walk.extend([pp for pp in precomputedellipsis[p]
599 walk.extend([pp for pp in precomputedellipsis[p]
600 if pp != nullrev])
600 if pp != nullrev])
601 else:
601 else:
602 # In this case, we've got an ellipsis with parents
602 # In this case, we've got an ellipsis with parents
603 # outside the current bundle (likely an
603 # outside the current bundle (likely an
604 # incremental pull). We "know" that we can use the
604 # incremental pull). We "know" that we can use the
605 # value of this same revlog at whatever revision
605 # value of this same revlog at whatever revision
606 # is pointed to by linknode. "Know" is in scare
606 # is pointed to by linknode. "Know" is in scare
607 # quotes because I haven't done enough examination
607 # quotes because I haven't done enough examination
608 # of edge cases to convince myself this is really
608 # of edge cases to convince myself this is really
609 # a fact - it works for all the (admittedly
609 # a fact - it works for all the (admittedly
610 # thorough) cases in our testsuite, but I would be
610 # thorough) cases in our testsuite, but I would be
611 # somewhat unsurprised to find a case in the wild
611 # somewhat unsurprised to find a case in the wild
612 # where this breaks down a bit. That said, I don't
612 # where this breaks down a bit. That said, I don't
613 # know if it would hurt anything.
613 # know if it would hurt anything.
614 for i in pycompat.xrange(rev, 0, -1):
614 for i in pycompat.xrange(rev, 0, -1):
615 if store.linkrev(i) == clrev:
615 if store.linkrev(i) == clrev:
616 return i
616 return i
617 # We failed to resolve a parent for this node, so
617 # We failed to resolve a parent for this node, so
618 # we crash the changegroup construction.
618 # we crash the changegroup construction.
619 raise error.Abort(
619 raise error.Abort(
620 'unable to resolve parent while packing %r %r'
620 'unable to resolve parent while packing %r %r'
621 ' for changeset %r' % (store.indexfile, rev, clrev))
621 ' for changeset %r' % (store.indexfile, rev, clrev))
622
622
623 return nullrev
623 return nullrev
624
624
625 if not linkparents or (
625 if not linkparents or (
626 store.parentrevs(rev) == (nullrev, nullrev)):
626 store.parentrevs(rev) == (nullrev, nullrev)):
627 p1, p2 = nullrev, nullrev
627 p1, p2 = nullrev, nullrev
628 elif len(linkparents) == 1:
628 elif len(linkparents) == 1:
629 p1, = sorted(local(p) for p in linkparents)
629 p1, = sorted(local(p) for p in linkparents)
630 p2 = nullrev
630 p2 = nullrev
631 else:
631 else:
632 p1, p2 = sorted(local(p) for p in linkparents)
632 p1, p2 = sorted(local(p) for p in linkparents)
633
633
634 p1node, p2node = store.node(p1), store.node(p2)
634 p1node, p2node = store.node(p1), store.node(p2)
635
635
636 return p1node, p2node, linknode
636 return p1node, p2node, linknode
637
637
638 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
638 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
639 topic=None,
639 topic=None,
640 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
640 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
641 precomputedellipsis=None):
641 precomputedellipsis=None):
642 """Calculate deltas for a set of revisions.
642 """Calculate deltas for a set of revisions.
643
643
644 Is a generator of ``revisiondelta`` instances.
644 Is a generator of ``revisiondelta`` instances.
645
645
646 If topic is not None, progress detail will be generated using this
646 If topic is not None, progress detail will be generated using this
647 topic name (e.g. changesets, manifests, etc).
647 topic name (e.g. changesets, manifests, etc).
648 """
648 """
649 if not nodes:
649 if not nodes:
650 return
650 return
651
651
652 cl = repo.changelog
652 cl = repo.changelog
653
653
654 if ischangelog:
654 if ischangelog:
655 # `hg log` shows changesets in storage order. To preserve order
655 # `hg log` shows changesets in storage order. To preserve order
656 # across clones, send out changesets in storage order.
656 # across clones, send out changesets in storage order.
657 nodesorder = 'storage'
657 nodesorder = 'storage'
658 elif ellipses:
658 elif ellipses:
659 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
659 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
660 nodesorder = 'nodes'
660 nodesorder = 'nodes'
661 else:
661 else:
662 nodesorder = None
662 nodesorder = None
663
663
664 # Perform ellipses filtering and revision massaging. We do this before
664 # Perform ellipses filtering and revision massaging. We do this before
665 # emitrevisions() because a) filtering out revisions creates less work
665 # emitrevisions() because a) filtering out revisions creates less work
666 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
666 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
667 # assumptions about delta choices and we would possibly send a delta
667 # assumptions about delta choices and we would possibly send a delta
668 # referencing a missing base revision.
668 # referencing a missing base revision.
669 #
669 #
670 # Also, calling lookup() has side-effects with regards to populating
670 # Also, calling lookup() has side-effects with regards to populating
671 # data structures. If we don't call lookup() for each node or if we call
671 # data structures. If we don't call lookup() for each node or if we call
672 # lookup() after the first pass through each node, things can break -
672 # lookup() after the first pass through each node, things can break -
673 # possibly intermittently depending on the python hash seed! For that
673 # possibly intermittently depending on the python hash seed! For that
674 # reason, we store a mapping of all linknodes during the initial node
674 # reason, we store a mapping of all linknodes during the initial node
675 # pass rather than use lookup() on the output side.
675 # pass rather than use lookup() on the output side.
676 if ellipses:
676 if ellipses:
677 filtered = []
677 filtered = []
678 adjustedparents = {}
678 adjustedparents = {}
679 linknodes = {}
679 linknodes = {}
680
680
681 for node in nodes:
681 for node in nodes:
682 rev = store.rev(node)
682 rev = store.rev(node)
683 linknode = lookup(node)
683 linknode = lookup(node)
684 linkrev = cl.rev(linknode)
684 linkrev = cl.rev(linknode)
685 clrevtolocalrev[linkrev] = rev
685 clrevtolocalrev[linkrev] = rev
686
686
687 # If linknode is in fullclnodes, it means the corresponding
687 # If linknode is in fullclnodes, it means the corresponding
688 # changeset was a full changeset and is being sent unaltered.
688 # changeset was a full changeset and is being sent unaltered.
689 if linknode in fullclnodes:
689 if linknode in fullclnodes:
690 linknodes[node] = linknode
690 linknodes[node] = linknode
691
691
692 # If the corresponding changeset wasn't in the set computed
692 # If the corresponding changeset wasn't in the set computed
693 # as relevant to us, it should be dropped outright.
693 # as relevant to us, it should be dropped outright.
694 elif linkrev not in precomputedellipsis:
694 elif linkrev not in precomputedellipsis:
695 continue
695 continue
696
696
697 else:
697 else:
698 # We could probably do this later and avoid the dict
698 # We could probably do this later and avoid the dict
699 # holding state. But it likely doesn't matter.
699 # holding state. But it likely doesn't matter.
700 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
700 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
701 cl, store, ischangelog, rev, linkrev, linknode,
701 cl, store, ischangelog, rev, linkrev, linknode,
702 clrevtolocalrev, fullclnodes, precomputedellipsis)
702 clrevtolocalrev, fullclnodes, precomputedellipsis)
703
703
704 adjustedparents[node] = (p1node, p2node)
704 adjustedparents[node] = (p1node, p2node)
705 linknodes[node] = linknode
705 linknodes[node] = linknode
706
706
707 filtered.append(node)
707 filtered.append(node)
708
708
709 nodes = filtered
709 nodes = filtered
710
710
711 # We expect the first pass to be fast, so we only engage the progress
711 # We expect the first pass to be fast, so we only engage the progress
712 # meter for constructing the revision deltas.
712 # meter for constructing the revision deltas.
713 progress = None
713 progress = None
714 if topic is not None:
714 if topic is not None:
715 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
715 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
716 total=len(nodes))
716 total=len(nodes))
717
717
718 configtarget = repo.ui.config('devel', 'bundle.delta')
718 configtarget = repo.ui.config('devel', 'bundle.delta')
719 if configtarget not in ('', 'p1', 'full'):
719 if configtarget not in ('', 'p1', 'full'):
720 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
720 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
721 repo.ui.warn(msg % configtarget)
721 repo.ui.warn(msg % configtarget)
722
722
723 deltamode = repository.CG_DELTAMODE_STD
723 deltamode = repository.CG_DELTAMODE_STD
724 if forcedeltaparentprev:
724 if forcedeltaparentprev:
725 deltamode = repository.CG_DELTAMODE_PREV
725 deltamode = repository.CG_DELTAMODE_PREV
726 elif configtarget == 'p1':
726 elif configtarget == 'p1':
727 deltamode = repository.CG_DELTAMODE_P1
727 deltamode = repository.CG_DELTAMODE_P1
728 elif configtarget == 'full':
728 elif configtarget == 'full':
729 deltamode = repository.CG_DELTAMODE_FULL
729 deltamode = repository.CG_DELTAMODE_FULL
730
730
731 revisions = store.emitrevisions(
731 revisions = store.emitrevisions(
732 nodes,
732 nodes,
733 nodesorder=nodesorder,
733 nodesorder=nodesorder,
734 revisiondata=True,
734 revisiondata=True,
735 assumehaveparentrevisions=not ellipses,
735 assumehaveparentrevisions=not ellipses,
736 deltamode=deltamode)
736 deltamode=deltamode)
737
737
738 for i, revision in enumerate(revisions):
738 for i, revision in enumerate(revisions):
739 if progress:
739 if progress:
740 progress.update(i + 1)
740 progress.update(i + 1)
741
741
742 if ellipses:
742 if ellipses:
743 linknode = linknodes[revision.node]
743 linknode = linknodes[revision.node]
744
744
745 if revision.node in adjustedparents:
745 if revision.node in adjustedparents:
746 p1node, p2node = adjustedparents[revision.node]
746 p1node, p2node = adjustedparents[revision.node]
747 revision.p1node = p1node
747 revision.p1node = p1node
748 revision.p2node = p2node
748 revision.p2node = p2node
749 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
749 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
750
750
751 else:
751 else:
752 linknode = lookup(revision.node)
752 linknode = lookup(revision.node)
753
753
754 revision.linknode = linknode
754 revision.linknode = linknode
755 yield revision
755 yield revision
756
756
757 if progress:
757 if progress:
758 progress.complete()
758 progress.complete()
759
759
760 class cgpacker(object):
760 class cgpacker(object):
761 def __init__(self, repo, oldmatcher, matcher, version,
761 def __init__(self, repo, oldmatcher, matcher, version,
762 builddeltaheader, manifestsend,
762 builddeltaheader, manifestsend,
763 forcedeltaparentprev=False,
763 forcedeltaparentprev=False,
764 bundlecaps=None, ellipses=False,
764 bundlecaps=None, ellipses=False,
765 shallow=False, ellipsisroots=None, fullnodes=None):
765 shallow=False, ellipsisroots=None, fullnodes=None):
766 """Given a source repo, construct a bundler.
766 """Given a source repo, construct a bundler.
767
767
768 oldmatcher is a matcher that matches on files the client already has.
768 oldmatcher is a matcher that matches on files the client already has.
769 These will not be included in the changegroup.
769 These will not be included in the changegroup.
770
770
771 matcher is a matcher that matches on files to include in the
771 matcher is a matcher that matches on files to include in the
772 changegroup. Used to facilitate sparse changegroups.
772 changegroup. Used to facilitate sparse changegroups.
773
773
774 forcedeltaparentprev indicates whether delta parents must be against
774 forcedeltaparentprev indicates whether delta parents must be against
775 the previous revision in a delta group. This should only be used for
775 the previous revision in a delta group. This should only be used for
776 compatibility with changegroup version 1.
776 compatibility with changegroup version 1.
777
777
778 builddeltaheader is a callable that constructs the header for a group
778 builddeltaheader is a callable that constructs the header for a group
779 delta.
779 delta.
780
780
781 manifestsend is a chunk to send after manifests have been fully emitted.
781 manifestsend is a chunk to send after manifests have been fully emitted.
782
782
783 ellipses indicates whether ellipsis serving mode is enabled.
783 ellipses indicates whether ellipsis serving mode is enabled.
784
784
785 bundlecaps is optional and can be used to specify the set of
785 bundlecaps is optional and can be used to specify the set of
786 capabilities which can be used to build the bundle. While bundlecaps is
786 capabilities which can be used to build the bundle. While bundlecaps is
787 unused in core Mercurial, extensions rely on this feature to communicate
787 unused in core Mercurial, extensions rely on this feature to communicate
788 capabilities to customize the changegroup packer.
788 capabilities to customize the changegroup packer.
789
789
790 shallow indicates whether shallow data might be sent. The packer may
790 shallow indicates whether shallow data might be sent. The packer may
791 need to pack file contents not introduced by the changes being packed.
791 need to pack file contents not introduced by the changes being packed.
792
792
793 fullnodes is the set of changelog nodes which should not be ellipsis
793 fullnodes is the set of changelog nodes which should not be ellipsis
794 nodes. We store this rather than the set of nodes that should be
794 nodes. We store this rather than the set of nodes that should be
795 ellipsis because for very large histories we expect this to be
795 ellipsis because for very large histories we expect this to be
796 significantly smaller.
796 significantly smaller.
797 """
797 """
798 assert oldmatcher
798 assert oldmatcher
799 assert matcher
799 assert matcher
800 self._oldmatcher = oldmatcher
800 self._oldmatcher = oldmatcher
801 self._matcher = matcher
801 self._matcher = matcher
802
802
803 self.version = version
803 self.version = version
804 self._forcedeltaparentprev = forcedeltaparentprev
804 self._forcedeltaparentprev = forcedeltaparentprev
805 self._builddeltaheader = builddeltaheader
805 self._builddeltaheader = builddeltaheader
806 self._manifestsend = manifestsend
806 self._manifestsend = manifestsend
807 self._ellipses = ellipses
807 self._ellipses = ellipses
808
808
809 # Set of capabilities we can use to build the bundle.
809 # Set of capabilities we can use to build the bundle.
810 if bundlecaps is None:
810 if bundlecaps is None:
811 bundlecaps = set()
811 bundlecaps = set()
812 self._bundlecaps = bundlecaps
812 self._bundlecaps = bundlecaps
813 self._isshallow = shallow
813 self._isshallow = shallow
814 self._fullclnodes = fullnodes
814 self._fullclnodes = fullnodes
815
815
816 # Maps ellipsis revs to their roots at the changelog level.
816 # Maps ellipsis revs to their roots at the changelog level.
817 self._precomputedellipsis = ellipsisroots
817 self._precomputedellipsis = ellipsisroots
818
818
819 self._repo = repo
819 self._repo = repo
820
820
821 if self._repo.ui.verbose and not self._repo.ui.debugflag:
821 if self._repo.ui.verbose and not self._repo.ui.debugflag:
822 self._verbosenote = self._repo.ui.note
822 self._verbosenote = self._repo.ui.note
823 else:
823 else:
824 self._verbosenote = lambda s: None
824 self._verbosenote = lambda s: None
825
825
826 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
826 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
827 changelog=True):
827 changelog=True):
828 """Yield a sequence of changegroup byte chunks.
828 """Yield a sequence of changegroup byte chunks.
829 If changelog is False, changelog data won't be added to changegroup
829 If changelog is False, changelog data won't be added to changegroup
830 """
830 """
831
831
832 repo = self._repo
832 repo = self._repo
833 cl = repo.changelog
833 cl = repo.changelog
834
834
835 self._verbosenote(_('uncompressed size of bundle content:\n'))
835 self._verbosenote(_('uncompressed size of bundle content:\n'))
836 size = 0
836 size = 0
837
837
838 clstate, deltas = self._generatechangelog(cl, clnodes,
838 clstate, deltas = self._generatechangelog(cl, clnodes,
839 generate=changelog)
839 generate=changelog)
840 for delta in deltas:
840 for delta in deltas:
841 for chunk in _revisiondeltatochunks(delta,
841 for chunk in _revisiondeltatochunks(delta,
842 self._builddeltaheader):
842 self._builddeltaheader):
843 size += len(chunk)
843 size += len(chunk)
844 yield chunk
844 yield chunk
845
845
846 close = closechunk()
846 close = closechunk()
847 size += len(close)
847 size += len(close)
848 yield closechunk()
848 yield closechunk()
849
849
850 self._verbosenote(_('%8.i (changelog)\n') % size)
850 self._verbosenote(_('%8.i (changelog)\n') % size)
851
851
852 clrevorder = clstate['clrevorder']
852 clrevorder = clstate['clrevorder']
853 manifests = clstate['manifests']
853 manifests = clstate['manifests']
854 changedfiles = clstate['changedfiles']
854 changedfiles = clstate['changedfiles']
855
855
856 # We need to make sure that the linkrev in the changegroup refers to
856 # We need to make sure that the linkrev in the changegroup refers to
857 # the first changeset that introduced the manifest or file revision.
857 # the first changeset that introduced the manifest or file revision.
858 # The fastpath is usually safer than the slowpath, because the filelogs
858 # The fastpath is usually safer than the slowpath, because the filelogs
859 # are walked in revlog order.
859 # are walked in revlog order.
860 #
860 #
861 # When taking the slowpath when the manifest revlog uses generaldelta,
861 # When taking the slowpath when the manifest revlog uses generaldelta,
862 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
862 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
863 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
863 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
864 #
864 #
865 # When taking the fastpath, we are only vulnerable to reordering
865 # When taking the fastpath, we are only vulnerable to reordering
866 # of the changelog itself. The changelog never uses generaldelta and is
866 # of the changelog itself. The changelog never uses generaldelta and is
867 # never reordered. To handle this case, we simply take the slowpath,
867 # never reordered. To handle this case, we simply take the slowpath,
868 # which already has the 'clrevorder' logic. This was also fixed in
868 # which already has the 'clrevorder' logic. This was also fixed in
869 # cc0ff93d0c0c.
869 # cc0ff93d0c0c.
870
870
871 # Treemanifests don't work correctly with fastpathlinkrev
871 # Treemanifests don't work correctly with fastpathlinkrev
872 # either, because we don't discover which directory nodes to
872 # either, because we don't discover which directory nodes to
873 # send along with files. This could probably be fixed.
873 # send along with files. This could probably be fixed.
874 fastpathlinkrev = fastpathlinkrev and (
874 fastpathlinkrev = fastpathlinkrev and (
875 'treemanifest' not in repo.requirements)
875 'treemanifest' not in repo.requirements)
876
876
877 fnodes = {} # needed file nodes
877 fnodes = {} # needed file nodes
878
878
879 size = 0
879 size = 0
880 it = self.generatemanifests(
880 it = self.generatemanifests(
881 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
881 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
882 clstate['clrevtomanifestrev'])
882 clstate['clrevtomanifestrev'])
883
883
884 for tree, deltas in it:
884 for tree, deltas in it:
885 if tree:
885 if tree:
886 assert self.version == b'03'
886 assert self.version == b'03'
887 chunk = _fileheader(tree)
887 chunk = _fileheader(tree)
888 size += len(chunk)
888 size += len(chunk)
889 yield chunk
889 yield chunk
890
890
891 for delta in deltas:
891 for delta in deltas:
892 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
892 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
893 for chunk in chunks:
893 for chunk in chunks:
894 size += len(chunk)
894 size += len(chunk)
895 yield chunk
895 yield chunk
896
896
897 close = closechunk()
897 close = closechunk()
898 size += len(close)
898 size += len(close)
899 yield close
899 yield close
900
900
901 self._verbosenote(_('%8.i (manifests)\n') % size)
901 self._verbosenote(_('%8.i (manifests)\n') % size)
902 yield self._manifestsend
902 yield self._manifestsend
903
903
904 mfdicts = None
904 mfdicts = None
905 if self._ellipses and self._isshallow:
905 if self._ellipses and self._isshallow:
906 mfdicts = [(self._repo.manifestlog[n].read(), lr)
906 mfdicts = [(self._repo.manifestlog[n].read(), lr)
907 for (n, lr) in manifests.iteritems()]
907 for (n, lr) in manifests.iteritems()]
908
908
909 manifests.clear()
909 manifests.clear()
910 clrevs = set(cl.rev(x) for x in clnodes)
910 clrevs = set(cl.rev(x) for x in clnodes)
911
911
912 it = self.generatefiles(changedfiles, commonrevs,
912 it = self.generatefiles(changedfiles, commonrevs,
913 source, mfdicts, fastpathlinkrev,
913 source, mfdicts, fastpathlinkrev,
914 fnodes, clrevs)
914 fnodes, clrevs)
915
915
916 for path, deltas in it:
916 for path, deltas in it:
917 h = _fileheader(path)
917 h = _fileheader(path)
918 size = len(h)
918 size = len(h)
919 yield h
919 yield h
920
920
921 for delta in deltas:
921 for delta in deltas:
922 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
922 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
923 for chunk in chunks:
923 for chunk in chunks:
924 size += len(chunk)
924 size += len(chunk)
925 yield chunk
925 yield chunk
926
926
927 close = closechunk()
927 close = closechunk()
928 size += len(close)
928 size += len(close)
929 yield close
929 yield close
930
930
931 self._verbosenote(_('%8.i %s\n') % (size, path))
931 self._verbosenote(_('%8.i %s\n') % (size, path))
932
932
933 yield closechunk()
933 yield closechunk()
934
934
935 if clnodes:
935 if clnodes:
936 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
936 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
937
937
938 def _generatechangelog(self, cl, nodes, generate=True):
938 def _generatechangelog(self, cl, nodes, generate=True):
939 """Generate data for changelog chunks.
939 """Generate data for changelog chunks.
940
940
941 Returns a 2-tuple of a dict containing state and an iterable of
941 Returns a 2-tuple of a dict containing state and an iterable of
942 byte chunks. The state will not be fully populated until the
942 byte chunks. The state will not be fully populated until the
943 chunk stream has been fully consumed.
943 chunk stream has been fully consumed.
944
944
945 if generate is False, the state will be fully populated and no chunk
945 if generate is False, the state will be fully populated and no chunk
946 stream will be yielded
946 stream will be yielded
947 """
947 """
948 clrevorder = {}
948 clrevorder = {}
949 manifests = {}
949 manifests = {}
950 mfl = self._repo.manifestlog
950 mfl = self._repo.manifestlog
951 changedfiles = set()
951 changedfiles = set()
952 clrevtomanifestrev = {}
952 clrevtomanifestrev = {}
953
953
954 state = {
954 state = {
955 'clrevorder': clrevorder,
955 'clrevorder': clrevorder,
956 'manifests': manifests,
956 'manifests': manifests,
957 'changedfiles': changedfiles,
957 'changedfiles': changedfiles,
958 'clrevtomanifestrev': clrevtomanifestrev,
958 'clrevtomanifestrev': clrevtomanifestrev,
959 }
959 }
960
960
961 if not (generate or self._ellipses):
961 if not (generate or self._ellipses):
962 # sort the nodes in storage order
962 # sort the nodes in storage order
963 nodes = sorted(nodes, key=cl.rev)
963 nodes = sorted(nodes, key=cl.rev)
964 for node in nodes:
964 for node in nodes:
965 c = cl.changelogrevision(node)
965 c = cl.changelogrevision(node)
966 clrevorder[node] = len(clrevorder)
966 clrevorder[node] = len(clrevorder)
967 # record the first changeset introducing this manifest version
967 # record the first changeset introducing this manifest version
968 manifests.setdefault(c.manifest, node)
968 manifests.setdefault(c.manifest, node)
969 # Record a complete list of potentially-changed files in
969 # Record a complete list of potentially-changed files in
970 # this manifest.
970 # this manifest.
971 changedfiles.update(c.files)
971 changedfiles.update(c.files)
972
972
973 return state, ()
973 return state, ()
974
974
975 # Callback for the changelog, used to collect changed files and
975 # Callback for the changelog, used to collect changed files and
976 # manifest nodes.
976 # manifest nodes.
977 # Returns the linkrev node (identity in the changelog case).
977 # Returns the linkrev node (identity in the changelog case).
978 def lookupcl(x):
978 def lookupcl(x):
979 c = cl.changelogrevision(x)
979 c = cl.changelogrevision(x)
980 clrevorder[x] = len(clrevorder)
980 clrevorder[x] = len(clrevorder)
981
981
982 if self._ellipses:
982 if self._ellipses:
983 # Only update manifests if x is going to be sent. Otherwise we
983 # Only update manifests if x is going to be sent. Otherwise we
984 # end up with bogus linkrevs specified for manifests and
984 # end up with bogus linkrevs specified for manifests and
985 # we skip some manifest nodes that we should otherwise
985 # we skip some manifest nodes that we should otherwise
986 # have sent.
986 # have sent.
987 if (x in self._fullclnodes
987 if (x in self._fullclnodes
988 or cl.rev(x) in self._precomputedellipsis):
988 or cl.rev(x) in self._precomputedellipsis):
989
989
990 manifestnode = c.manifest
990 manifestnode = c.manifest
991 # Record the first changeset introducing this manifest
991 # Record the first changeset introducing this manifest
992 # version.
992 # version.
993 manifests.setdefault(manifestnode, x)
993 manifests.setdefault(manifestnode, x)
994 # Set this narrow-specific dict so we have the lowest
994 # Set this narrow-specific dict so we have the lowest
995 # manifest revnum to look up for this cl revnum. (Part of
995 # manifest revnum to look up for this cl revnum. (Part of
996 # mapping changelog ellipsis parents to manifest ellipsis
996 # mapping changelog ellipsis parents to manifest ellipsis
997 # parents)
997 # parents)
998 clrevtomanifestrev.setdefault(
998 clrevtomanifestrev.setdefault(
999 cl.rev(x), mfl.rev(manifestnode))
999 cl.rev(x), mfl.rev(manifestnode))
1000 # We can't trust the changed files list in the changeset if the
1000 # We can't trust the changed files list in the changeset if the
1001 # client requested a shallow clone.
1001 # client requested a shallow clone.
1002 if self._isshallow:
1002 if self._isshallow:
1003 changedfiles.update(mfl[c.manifest].read().keys())
1003 changedfiles.update(mfl[c.manifest].read().keys())
1004 else:
1004 else:
1005 changedfiles.update(c.files)
1005 changedfiles.update(c.files)
1006 else:
1006 else:
1007 # record the first changeset introducing this manifest version
1007 # record the first changeset introducing this manifest version
1008 manifests.setdefault(c.manifest, x)
1008 manifests.setdefault(c.manifest, x)
1009 # Record a complete list of potentially-changed files in
1009 # Record a complete list of potentially-changed files in
1010 # this manifest.
1010 # this manifest.
1011 changedfiles.update(c.files)
1011 changedfiles.update(c.files)
1012
1012
1013 return x
1013 return x
1014
1014
1015 gen = deltagroup(
1015 gen = deltagroup(
1016 self._repo, cl, nodes, True, lookupcl,
1016 self._repo, cl, nodes, True, lookupcl,
1017 self._forcedeltaparentprev,
1017 self._forcedeltaparentprev,
1018 ellipses=self._ellipses,
1018 ellipses=self._ellipses,
1019 topic=_('changesets'),
1019 topic=_('changesets'),
1020 clrevtolocalrev={},
1020 clrevtolocalrev={},
1021 fullclnodes=self._fullclnodes,
1021 fullclnodes=self._fullclnodes,
1022 precomputedellipsis=self._precomputedellipsis)
1022 precomputedellipsis=self._precomputedellipsis)
1023
1023
1024 return state, gen
1024 return state, gen
1025
1025
1026 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
1026 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
1027 manifests, fnodes, source, clrevtolocalrev):
1027 manifests, fnodes, source, clrevtolocalrev):
1028 """Returns an iterator of changegroup chunks containing manifests.
1028 """Returns an iterator of changegroup chunks containing manifests.
1029
1029
1030 `source` is unused here, but is used by extensions like remotefilelog to
1030 `source` is unused here, but is used by extensions like remotefilelog to
1031 change what is sent based in pulls vs pushes, etc.
1031 change what is sent based in pulls vs pushes, etc.
1032 """
1032 """
1033 repo = self._repo
1033 repo = self._repo
1034 mfl = repo.manifestlog
1034 mfl = repo.manifestlog
1035 tmfnodes = {'': manifests}
1035 tmfnodes = {'': manifests}
1036
1036
1037 # Callback for the manifest, used to collect linkrevs for filelog
1037 # Callback for the manifest, used to collect linkrevs for filelog
1038 # revisions.
1038 # revisions.
1039 # Returns the linkrev node (collected in lookupcl).
1039 # Returns the linkrev node (collected in lookupcl).
1040 def makelookupmflinknode(tree, nodes):
1040 def makelookupmflinknode(tree, nodes):
1041 if fastpathlinkrev:
1041 if fastpathlinkrev:
1042 assert not tree
1042 assert not tree
1043 return manifests.__getitem__
1043 return manifests.__getitem__
1044
1044
1045 def lookupmflinknode(x):
1045 def lookupmflinknode(x):
1046 """Callback for looking up the linknode for manifests.
1046 """Callback for looking up the linknode for manifests.
1047
1047
1048 Returns the linkrev node for the specified manifest.
1048 Returns the linkrev node for the specified manifest.
1049
1049
1050 SIDE EFFECT:
1050 SIDE EFFECT:
1051
1051
1052 1) fclnodes gets populated with the list of relevant
1052 1) fclnodes gets populated with the list of relevant
1053 file nodes if we're not using fastpathlinkrev
1053 file nodes if we're not using fastpathlinkrev
1054 2) When treemanifests are in use, collects treemanifest nodes
1054 2) When treemanifests are in use, collects treemanifest nodes
1055 to send
1055 to send
1056
1056
1057 Note that this means manifests must be completely sent to
1057 Note that this means manifests must be completely sent to
1058 the client before you can trust the list of files and
1058 the client before you can trust the list of files and
1059 treemanifests to send.
1059 treemanifests to send.
1060 """
1060 """
1061 clnode = nodes[x]
1061 clnode = nodes[x]
1062 mdata = mfl.get(tree, x).readfast(shallow=True)
1062 mdata = mfl.get(tree, x).readfast(shallow=True)
1063 for p, n, fl in mdata.iterentries():
1063 for p, n, fl in mdata.iterentries():
1064 if fl == 't': # subdirectory manifest
1064 if fl == 't': # subdirectory manifest
1065 subtree = tree + p + '/'
1065 subtree = tree + p + '/'
1066 tmfclnodes = tmfnodes.setdefault(subtree, {})
1066 tmfclnodes = tmfnodes.setdefault(subtree, {})
1067 tmfclnode = tmfclnodes.setdefault(n, clnode)
1067 tmfclnode = tmfclnodes.setdefault(n, clnode)
1068 if clrevorder[clnode] < clrevorder[tmfclnode]:
1068 if clrevorder[clnode] < clrevorder[tmfclnode]:
1069 tmfclnodes[n] = clnode
1069 tmfclnodes[n] = clnode
1070 else:
1070 else:
1071 f = tree + p
1071 f = tree + p
1072 fclnodes = fnodes.setdefault(f, {})
1072 fclnodes = fnodes.setdefault(f, {})
1073 fclnode = fclnodes.setdefault(n, clnode)
1073 fclnode = fclnodes.setdefault(n, clnode)
1074 if clrevorder[clnode] < clrevorder[fclnode]:
1074 if clrevorder[clnode] < clrevorder[fclnode]:
1075 fclnodes[n] = clnode
1075 fclnodes[n] = clnode
1076 return clnode
1076 return clnode
1077 return lookupmflinknode
1077 return lookupmflinknode
1078
1078
1079 while tmfnodes:
1079 while tmfnodes:
1080 tree, nodes = tmfnodes.popitem()
1080 tree, nodes = tmfnodes.popitem()
1081
1081
1082 should_visit = self._matcher.visitdir(tree[:-1])
1082 should_visit = self._matcher.visitdir(tree[:-1])
1083 if tree and not should_visit:
1083 if tree and not should_visit:
1084 continue
1084 continue
1085
1085
1086 store = mfl.getstorage(tree)
1086 store = mfl.getstorage(tree)
1087
1087
1088 if not should_visit:
1088 if not should_visit:
1089 # No nodes to send because this directory is out of
1089 # No nodes to send because this directory is out of
1090 # the client's view of the repository (probably
1090 # the client's view of the repository (probably
1091 # because of narrow clones). Do this even for the root
1091 # because of narrow clones). Do this even for the root
1092 # directory (tree=='')
1092 # directory (tree=='')
1093 prunednodes = []
1093 prunednodes = []
1094 else:
1094 else:
1095 # Avoid sending any manifest nodes we can prove the
1095 # Avoid sending any manifest nodes we can prove the
1096 # client already has by checking linkrevs. See the
1096 # client already has by checking linkrevs. See the
1097 # related comment in generatefiles().
1097 # related comment in generatefiles().
1098 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1098 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1099
1099
1100 if tree and not prunednodes:
1100 if tree and not prunednodes:
1101 continue
1101 continue
1102
1102
1103 lookupfn = makelookupmflinknode(tree, nodes)
1103 lookupfn = makelookupmflinknode(tree, nodes)
1104
1104
1105 deltas = deltagroup(
1105 deltas = deltagroup(
1106 self._repo, store, prunednodes, False, lookupfn,
1106 self._repo, store, prunednodes, False, lookupfn,
1107 self._forcedeltaparentprev,
1107 self._forcedeltaparentprev,
1108 ellipses=self._ellipses,
1108 ellipses=self._ellipses,
1109 topic=_('manifests'),
1109 topic=_('manifests'),
1110 clrevtolocalrev=clrevtolocalrev,
1110 clrevtolocalrev=clrevtolocalrev,
1111 fullclnodes=self._fullclnodes,
1111 fullclnodes=self._fullclnodes,
1112 precomputedellipsis=self._precomputedellipsis)
1112 precomputedellipsis=self._precomputedellipsis)
1113
1113
1114 if not self._oldmatcher.visitdir(store.tree[:-1]):
1114 if not self._oldmatcher.visitdir(store.tree[:-1]):
1115 yield tree, deltas
1115 yield tree, deltas
1116 else:
1116 else:
1117 # 'deltas' is a generator and we need to consume it even if
1117 # 'deltas' is a generator and we need to consume it even if
1118 # we are not going to send it because a side-effect is that
1118 # we are not going to send it because a side-effect is that
1119 # it updates tmdnodes (via lookupfn)
1119 # it updates tmdnodes (via lookupfn)
1120 for d in deltas:
1120 for d in deltas:
1121 pass
1121 pass
1122 if not tree:
1122 if not tree:
1123 yield tree, []
1123 yield tree, []
1124
1124
1125 def _prunemanifests(self, store, nodes, commonrevs):
1125 def _prunemanifests(self, store, nodes, commonrevs):
1126 if not self._ellipses:
1126 if not self._ellipses:
1127 # In non-ellipses case and large repositories, it is better to
1127 # In non-ellipses case and large repositories, it is better to
1128 # prevent calling of store.rev and store.linkrev on a lot of
1128 # prevent calling of store.rev and store.linkrev on a lot of
1129 # nodes as compared to sending some extra data
1129 # nodes as compared to sending some extra data
1130 return nodes.copy()
1130 return nodes.copy()
1131 # This is split out as a separate method to allow filtering
1131 # This is split out as a separate method to allow filtering
1132 # commonrevs in extension code.
1132 # commonrevs in extension code.
1133 #
1133 #
1134 # TODO(augie): this shouldn't be required, instead we should
1134 # TODO(augie): this shouldn't be required, instead we should
1135 # make filtering of revisions to send delegated to the store
1135 # make filtering of revisions to send delegated to the store
1136 # layer.
1136 # layer.
1137 frev, flr = store.rev, store.linkrev
1137 frev, flr = store.rev, store.linkrev
1138 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1138 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1139
1139
1140 # The 'source' parameter is useful for extensions
1140 # The 'source' parameter is useful for extensions
1141 def generatefiles(self, changedfiles, commonrevs, source,
1141 def generatefiles(self, changedfiles, commonrevs, source,
1142 mfdicts, fastpathlinkrev, fnodes, clrevs):
1142 mfdicts, fastpathlinkrev, fnodes, clrevs):
1143 changedfiles = [f for f in changedfiles
1143 changedfiles = [f for f in changedfiles
1144 if self._matcher(f) and not self._oldmatcher(f)]
1144 if self._matcher(f) and not self._oldmatcher(f)]
1145
1145
1146 if not fastpathlinkrev:
1146 if not fastpathlinkrev:
1147 def normallinknodes(unused, fname):
1147 def normallinknodes(unused, fname):
1148 return fnodes.get(fname, {})
1148 return fnodes.get(fname, {})
1149 else:
1149 else:
1150 cln = self._repo.changelog.node
1150 cln = self._repo.changelog.node
1151
1151
1152 def normallinknodes(store, fname):
1152 def normallinknodes(store, fname):
1153 flinkrev = store.linkrev
1153 flinkrev = store.linkrev
1154 fnode = store.node
1154 fnode = store.node
1155 revs = ((r, flinkrev(r)) for r in store)
1155 revs = ((r, flinkrev(r)) for r in store)
1156 return dict((fnode(r), cln(lr))
1156 return dict((fnode(r), cln(lr))
1157 for r, lr in revs if lr in clrevs)
1157 for r, lr in revs if lr in clrevs)
1158
1158
1159 clrevtolocalrev = {}
1159 clrevtolocalrev = {}
1160
1160
1161 if self._isshallow:
1161 if self._isshallow:
1162 # In a shallow clone, the linknodes callback needs to also include
1162 # In a shallow clone, the linknodes callback needs to also include
1163 # those file nodes that are in the manifests we sent but weren't
1163 # those file nodes that are in the manifests we sent but weren't
1164 # introduced by those manifests.
1164 # introduced by those manifests.
1165 commonctxs = [self._repo[c] for c in commonrevs]
1165 commonctxs = [self._repo[c] for c in commonrevs]
1166 clrev = self._repo.changelog.rev
1166 clrev = self._repo.changelog.rev
1167
1167
1168 def linknodes(flog, fname):
1168 def linknodes(flog, fname):
1169 for c in commonctxs:
1169 for c in commonctxs:
1170 try:
1170 try:
1171 fnode = c.filenode(fname)
1171 fnode = c.filenode(fname)
1172 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1172 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1173 except error.ManifestLookupError:
1173 except error.ManifestLookupError:
1174 pass
1174 pass
1175 links = normallinknodes(flog, fname)
1175 links = normallinknodes(flog, fname)
1176 if len(links) != len(mfdicts):
1176 if len(links) != len(mfdicts):
1177 for mf, lr in mfdicts:
1177 for mf, lr in mfdicts:
1178 fnode = mf.get(fname, None)
1178 fnode = mf.get(fname, None)
1179 if fnode in links:
1179 if fnode in links:
1180 links[fnode] = min(links[fnode], lr, key=clrev)
1180 links[fnode] = min(links[fnode], lr, key=clrev)
1181 elif fnode:
1181 elif fnode:
1182 links[fnode] = lr
1182 links[fnode] = lr
1183 return links
1183 return links
1184 else:
1184 else:
1185 linknodes = normallinknodes
1185 linknodes = normallinknodes
1186
1186
1187 repo = self._repo
1187 repo = self._repo
1188 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1188 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1189 total=len(changedfiles))
1189 total=len(changedfiles))
1190 for i, fname in enumerate(sorted(changedfiles)):
1190 for i, fname in enumerate(sorted(changedfiles)):
1191 filerevlog = repo.file(fname)
1191 filerevlog = repo.file(fname)
1192 if not filerevlog:
1192 if not filerevlog:
1193 raise error.Abort(_("empty or missing file data for %s") %
1193 raise error.Abort(_("empty or missing file data for %s") %
1194 fname)
1194 fname)
1195
1195
1196 clrevtolocalrev.clear()
1196 clrevtolocalrev.clear()
1197
1197
1198 linkrevnodes = linknodes(filerevlog, fname)
1198 linkrevnodes = linknodes(filerevlog, fname)
1199 # Lookup for filenodes, we collected the linkrev nodes above in the
1199 # Lookup for filenodes, we collected the linkrev nodes above in the
1200 # fastpath case and with lookupmf in the slowpath case.
1200 # fastpath case and with lookupmf in the slowpath case.
1201 def lookupfilelog(x):
1201 def lookupfilelog(x):
1202 return linkrevnodes[x]
1202 return linkrevnodes[x]
1203
1203
1204 frev, flr = filerevlog.rev, filerevlog.linkrev
1204 frev, flr = filerevlog.rev, filerevlog.linkrev
1205 # Skip sending any filenode we know the client already
1205 # Skip sending any filenode we know the client already
1206 # has. This avoids over-sending files relatively
1206 # has. This avoids over-sending files relatively
1207 # inexpensively, so it's not a problem if we under-filter
1207 # inexpensively, so it's not a problem if we under-filter
1208 # here.
1208 # here.
1209 filenodes = [n for n in linkrevnodes
1209 filenodes = [n for n in linkrevnodes
1210 if flr(frev(n)) not in commonrevs]
1210 if flr(frev(n)) not in commonrevs]
1211
1211
1212 if not filenodes:
1212 if not filenodes:
1213 continue
1213 continue
1214
1214
1215 progress.update(i + 1, item=fname)
1215 progress.update(i + 1, item=fname)
1216
1216
1217 deltas = deltagroup(
1217 deltas = deltagroup(
1218 self._repo, filerevlog, filenodes, False, lookupfilelog,
1218 self._repo, filerevlog, filenodes, False, lookupfilelog,
1219 self._forcedeltaparentprev,
1219 self._forcedeltaparentprev,
1220 ellipses=self._ellipses,
1220 ellipses=self._ellipses,
1221 clrevtolocalrev=clrevtolocalrev,
1221 clrevtolocalrev=clrevtolocalrev,
1222 fullclnodes=self._fullclnodes,
1222 fullclnodes=self._fullclnodes,
1223 precomputedellipsis=self._precomputedellipsis)
1223 precomputedellipsis=self._precomputedellipsis)
1224
1224
1225 yield fname, deltas
1225 yield fname, deltas
1226
1226
1227 progress.complete()
1227 progress.complete()
1228
1228
1229 def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
1229 def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
1230 ellipses=False, shallow=False, ellipsisroots=None,
1230 ellipses=False, shallow=False, ellipsisroots=None,
1231 fullnodes=None):
1231 fullnodes=None):
1232 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1232 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1233 d.node, d.p1node, d.p2node, d.linknode)
1233 d.node, d.p1node, d.p2node, d.linknode)
1234
1234
1235 return cgpacker(repo, oldmatcher, matcher, b'01',
1235 return cgpacker(repo, oldmatcher, matcher, b'01',
1236 builddeltaheader=builddeltaheader,
1236 builddeltaheader=builddeltaheader,
1237 manifestsend=b'',
1237 manifestsend=b'',
1238 forcedeltaparentprev=True,
1238 forcedeltaparentprev=True,
1239 bundlecaps=bundlecaps,
1239 bundlecaps=bundlecaps,
1240 ellipses=ellipses,
1240 ellipses=ellipses,
1241 shallow=shallow,
1241 shallow=shallow,
1242 ellipsisroots=ellipsisroots,
1242 ellipsisroots=ellipsisroots,
1243 fullnodes=fullnodes)
1243 fullnodes=fullnodes)
1244
1244
1245 def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
1245 def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
1246 ellipses=False, shallow=False, ellipsisroots=None,
1246 ellipses=False, shallow=False, ellipsisroots=None,
1247 fullnodes=None):
1247 fullnodes=None):
1248 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1248 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1249 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1249 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1250
1250
1251 return cgpacker(repo, oldmatcher, matcher, b'02',
1251 return cgpacker(repo, oldmatcher, matcher, b'02',
1252 builddeltaheader=builddeltaheader,
1252 builddeltaheader=builddeltaheader,
1253 manifestsend=b'',
1253 manifestsend=b'',
1254 bundlecaps=bundlecaps,
1254 bundlecaps=bundlecaps,
1255 ellipses=ellipses,
1255 ellipses=ellipses,
1256 shallow=shallow,
1256 shallow=shallow,
1257 ellipsisroots=ellipsisroots,
1257 ellipsisroots=ellipsisroots,
1258 fullnodes=fullnodes)
1258 fullnodes=fullnodes)
1259
1259
1260 def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
1260 def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
1261 ellipses=False, shallow=False, ellipsisroots=None,
1261 ellipses=False, shallow=False, ellipsisroots=None,
1262 fullnodes=None):
1262 fullnodes=None):
1263 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1263 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1264 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1264 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1265
1265
1266 return cgpacker(repo, oldmatcher, matcher, b'03',
1266 return cgpacker(repo, oldmatcher, matcher, b'03',
1267 builddeltaheader=builddeltaheader,
1267 builddeltaheader=builddeltaheader,
1268 manifestsend=closechunk(),
1268 manifestsend=closechunk(),
1269 bundlecaps=bundlecaps,
1269 bundlecaps=bundlecaps,
1270 ellipses=ellipses,
1270 ellipses=ellipses,
1271 shallow=shallow,
1271 shallow=shallow,
1272 ellipsisroots=ellipsisroots,
1272 ellipsisroots=ellipsisroots,
1273 fullnodes=fullnodes)
1273 fullnodes=fullnodes)
1274
1274
1275 _packermap = {'01': (_makecg1packer, cg1unpacker),
1275 _packermap = {'01': (_makecg1packer, cg1unpacker),
1276 # cg2 adds support for exchanging generaldelta
1276 # cg2 adds support for exchanging generaldelta
1277 '02': (_makecg2packer, cg2unpacker),
1277 '02': (_makecg2packer, cg2unpacker),
1278 # cg3 adds support for exchanging revlog flags and treemanifests
1278 # cg3 adds support for exchanging revlog flags and treemanifests
1279 '03': (_makecg3packer, cg3unpacker),
1279 '03': (_makecg3packer, cg3unpacker),
1280 }
1280 }
1281
1281
1282 def allsupportedversions(repo):
1282 def allsupportedversions(repo):
1283 versions = set(_packermap.keys())
1283 versions = set(_packermap.keys())
1284 if not (repo.ui.configbool('experimental', 'changegroup3') or
1284 needv03 = False
1285 repo.ui.configbool('experimental', 'treemanifest') or
1285 if (repo.ui.configbool('experimental', 'changegroup3') or
1286 'treemanifest' in repo.requirements):
1286 repo.ui.configbool('experimental', 'treemanifest') or
1287 'treemanifest' in repo.requirements):
1288 # we keep version 03 because we need to to exchange treemanifest data
1289 #
1290 # we also keep vresion 01 and 02, because it is possible for repo to
1291 # contains both normal and tree manifest at the same time. so using
1292 # older version to pull data is viable
1293 #
1294 # (or even to push subset of history)
1295 needv03 = True
1296 if not needv03:
1287 versions.discard('03')
1297 versions.discard('03')
1288 return versions
1298 return versions
1289
1299
1290 # Changegroup versions that can be applied to the repo
1300 # Changegroup versions that can be applied to the repo
1291 def supportedincomingversions(repo):
1301 def supportedincomingversions(repo):
1292 return allsupportedversions(repo)
1302 return allsupportedversions(repo)
1293
1303
1294 # Changegroup versions that can be created from the repo
1304 # Changegroup versions that can be created from the repo
1295 def supportedoutgoingversions(repo):
1305 def supportedoutgoingversions(repo):
1296 versions = allsupportedversions(repo)
1306 versions = allsupportedversions(repo)
1297 if 'treemanifest' in repo.requirements:
1307 if 'treemanifest' in repo.requirements:
1298 # Versions 01 and 02 support only flat manifests and it's just too
1308 # Versions 01 and 02 support only flat manifests and it's just too
1299 # expensive to convert between the flat manifest and tree manifest on
1309 # expensive to convert between the flat manifest and tree manifest on
1300 # the fly. Since tree manifests are hashed differently, all of history
1310 # the fly. Since tree manifests are hashed differently, all of history
1301 # would have to be converted. Instead, we simply don't even pretend to
1311 # would have to be converted. Instead, we simply don't even pretend to
1302 # support versions 01 and 02.
1312 # support versions 01 and 02.
1303 versions.discard('01')
1313 versions.discard('01')
1304 versions.discard('02')
1314 versions.discard('02')
1305 if repository.NARROW_REQUIREMENT in repo.requirements:
1315 if repository.NARROW_REQUIREMENT in repo.requirements:
1306 # Versions 01 and 02 don't support revlog flags, and we need to
1316 # Versions 01 and 02 don't support revlog flags, and we need to
1307 # support that for stripping and unbundling to work.
1317 # support that for stripping and unbundling to work.
1308 versions.discard('01')
1318 versions.discard('01')
1309 versions.discard('02')
1319 versions.discard('02')
1310 if LFS_REQUIREMENT in repo.requirements:
1320 if LFS_REQUIREMENT in repo.requirements:
1311 # Versions 01 and 02 don't support revlog flags, and we need to
1321 # Versions 01 and 02 don't support revlog flags, and we need to
1312 # mark LFS entries with REVIDX_EXTSTORED.
1322 # mark LFS entries with REVIDX_EXTSTORED.
1313 versions.discard('01')
1323 versions.discard('01')
1314 versions.discard('02')
1324 versions.discard('02')
1315
1325
1316 return versions
1326 return versions
1317
1327
1318 def localversion(repo):
1328 def localversion(repo):
1319 # Finds the best version to use for bundles that are meant to be used
1329 # Finds the best version to use for bundles that are meant to be used
1320 # locally, such as those from strip and shelve, and temporary bundles.
1330 # locally, such as those from strip and shelve, and temporary bundles.
1321 return max(supportedoutgoingversions(repo))
1331 return max(supportedoutgoingversions(repo))
1322
1332
1323 def safeversion(repo):
1333 def safeversion(repo):
1324 # Finds the smallest version that it's safe to assume clients of the repo
1334 # Finds the smallest version that it's safe to assume clients of the repo
1325 # will support. For example, all hg versions that support generaldelta also
1335 # will support. For example, all hg versions that support generaldelta also
1326 # support changegroup 02.
1336 # support changegroup 02.
1327 versions = supportedoutgoingversions(repo)
1337 versions = supportedoutgoingversions(repo)
1328 if 'generaldelta' in repo.requirements:
1338 if 'generaldelta' in repo.requirements:
1329 versions.discard('01')
1339 versions.discard('01')
1330 assert versions
1340 assert versions
1331 return min(versions)
1341 return min(versions)
1332
1342
1333 def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
1343 def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
1334 matcher=None, ellipses=False, shallow=False,
1344 matcher=None, ellipses=False, shallow=False,
1335 ellipsisroots=None, fullnodes=None):
1345 ellipsisroots=None, fullnodes=None):
1336 assert version in supportedoutgoingversions(repo)
1346 assert version in supportedoutgoingversions(repo)
1337
1347
1338 if matcher is None:
1348 if matcher is None:
1339 matcher = matchmod.always()
1349 matcher = matchmod.always()
1340 if oldmatcher is None:
1350 if oldmatcher is None:
1341 oldmatcher = matchmod.never()
1351 oldmatcher = matchmod.never()
1342
1352
1343 if version == '01' and not matcher.always():
1353 if version == '01' and not matcher.always():
1344 raise error.ProgrammingError('version 01 changegroups do not support '
1354 raise error.ProgrammingError('version 01 changegroups do not support '
1345 'sparse file matchers')
1355 'sparse file matchers')
1346
1356
1347 if ellipses and version in (b'01', b'02'):
1357 if ellipses and version in (b'01', b'02'):
1348 raise error.Abort(
1358 raise error.Abort(
1349 _('ellipsis nodes require at least cg3 on client and server, '
1359 _('ellipsis nodes require at least cg3 on client and server, '
1350 'but negotiated version %s') % version)
1360 'but negotiated version %s') % version)
1351
1361
1352 # Requested files could include files not in the local store. So
1362 # Requested files could include files not in the local store. So
1353 # filter those out.
1363 # filter those out.
1354 matcher = repo.narrowmatch(matcher)
1364 matcher = repo.narrowmatch(matcher)
1355
1365
1356 fn = _packermap[version][0]
1366 fn = _packermap[version][0]
1357 return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
1367 return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
1358 shallow=shallow, ellipsisroots=ellipsisroots,
1368 shallow=shallow, ellipsisroots=ellipsisroots,
1359 fullnodes=fullnodes)
1369 fullnodes=fullnodes)
1360
1370
1361 def getunbundler(version, fh, alg, extras=None):
1371 def getunbundler(version, fh, alg, extras=None):
1362 return _packermap[version][1](fh, alg, extras=extras)
1372 return _packermap[version][1](fh, alg, extras=extras)
1363
1373
1364 def _changegroupinfo(repo, nodes, source):
1374 def _changegroupinfo(repo, nodes, source):
1365 if repo.ui.verbose or source == 'bundle':
1375 if repo.ui.verbose or source == 'bundle':
1366 repo.ui.status(_("%d changesets found\n") % len(nodes))
1376 repo.ui.status(_("%d changesets found\n") % len(nodes))
1367 if repo.ui.debugflag:
1377 if repo.ui.debugflag:
1368 repo.ui.debug("list of changesets:\n")
1378 repo.ui.debug("list of changesets:\n")
1369 for node in nodes:
1379 for node in nodes:
1370 repo.ui.debug("%s\n" % hex(node))
1380 repo.ui.debug("%s\n" % hex(node))
1371
1381
1372 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1382 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1373 bundlecaps=None):
1383 bundlecaps=None):
1374 cgstream = makestream(repo, outgoing, version, source,
1384 cgstream = makestream(repo, outgoing, version, source,
1375 fastpath=fastpath, bundlecaps=bundlecaps)
1385 fastpath=fastpath, bundlecaps=bundlecaps)
1376 return getunbundler(version, util.chunkbuffer(cgstream), None,
1386 return getunbundler(version, util.chunkbuffer(cgstream), None,
1377 {'clcount': len(outgoing.missing) })
1387 {'clcount': len(outgoing.missing) })
1378
1388
1379 def makestream(repo, outgoing, version, source, fastpath=False,
1389 def makestream(repo, outgoing, version, source, fastpath=False,
1380 bundlecaps=None, matcher=None):
1390 bundlecaps=None, matcher=None):
1381 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1391 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1382 matcher=matcher)
1392 matcher=matcher)
1383
1393
1384 repo = repo.unfiltered()
1394 repo = repo.unfiltered()
1385 commonrevs = outgoing.common
1395 commonrevs = outgoing.common
1386 csets = outgoing.missing
1396 csets = outgoing.missing
1387 heads = outgoing.missingheads
1397 heads = outgoing.missingheads
1388 # We go through the fast path if we get told to, or if all (unfiltered
1398 # We go through the fast path if we get told to, or if all (unfiltered
1389 # heads have been requested (since we then know there all linkrevs will
1399 # heads have been requested (since we then know there all linkrevs will
1390 # be pulled by the client).
1400 # be pulled by the client).
1391 heads.sort()
1401 heads.sort()
1392 fastpathlinkrev = fastpath or (
1402 fastpathlinkrev = fastpath or (
1393 repo.filtername is None and heads == sorted(repo.heads()))
1403 repo.filtername is None and heads == sorted(repo.heads()))
1394
1404
1395 repo.hook('preoutgoing', throw=True, source=source)
1405 repo.hook('preoutgoing', throw=True, source=source)
1396 _changegroupinfo(repo, csets, source)
1406 _changegroupinfo(repo, csets, source)
1397 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1407 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1398
1408
1399 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1409 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1400 revisions = 0
1410 revisions = 0
1401 files = 0
1411 files = 0
1402 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1412 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1403 total=expectedfiles)
1413 total=expectedfiles)
1404 for chunkdata in iter(source.filelogheader, {}):
1414 for chunkdata in iter(source.filelogheader, {}):
1405 files += 1
1415 files += 1
1406 f = chunkdata["filename"]
1416 f = chunkdata["filename"]
1407 repo.ui.debug("adding %s revisions\n" % f)
1417 repo.ui.debug("adding %s revisions\n" % f)
1408 progress.increment()
1418 progress.increment()
1409 fl = repo.file(f)
1419 fl = repo.file(f)
1410 o = len(fl)
1420 o = len(fl)
1411 try:
1421 try:
1412 deltas = source.deltaiter()
1422 deltas = source.deltaiter()
1413 if not fl.addgroup(deltas, revmap, trp):
1423 if not fl.addgroup(deltas, revmap, trp):
1414 raise error.Abort(_("received file revlog group is empty"))
1424 raise error.Abort(_("received file revlog group is empty"))
1415 except error.CensoredBaseError as e:
1425 except error.CensoredBaseError as e:
1416 raise error.Abort(_("received delta base is censored: %s") % e)
1426 raise error.Abort(_("received delta base is censored: %s") % e)
1417 revisions += len(fl) - o
1427 revisions += len(fl) - o
1418 if f in needfiles:
1428 if f in needfiles:
1419 needs = needfiles[f]
1429 needs = needfiles[f]
1420 for new in pycompat.xrange(o, len(fl)):
1430 for new in pycompat.xrange(o, len(fl)):
1421 n = fl.node(new)
1431 n = fl.node(new)
1422 if n in needs:
1432 if n in needs:
1423 needs.remove(n)
1433 needs.remove(n)
1424 else:
1434 else:
1425 raise error.Abort(
1435 raise error.Abort(
1426 _("received spurious file revlog entry"))
1436 _("received spurious file revlog entry"))
1427 if not needs:
1437 if not needs:
1428 del needfiles[f]
1438 del needfiles[f]
1429 progress.complete()
1439 progress.complete()
1430
1440
1431 for f, needs in needfiles.iteritems():
1441 for f, needs in needfiles.iteritems():
1432 fl = repo.file(f)
1442 fl = repo.file(f)
1433 for n in needs:
1443 for n in needs:
1434 try:
1444 try:
1435 fl.rev(n)
1445 fl.rev(n)
1436 except error.LookupError:
1446 except error.LookupError:
1437 raise error.Abort(
1447 raise error.Abort(
1438 _('missing file data for %s:%s - run hg verify') %
1448 _('missing file data for %s:%s - run hg verify') %
1439 (f, hex(n)))
1449 (f, hex(n)))
1440
1450
1441 return revisions, files
1451 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now