##// END OF EJS Templates
tests: fix differing output between py2 and py3...
Raphaël Gomès -
r47263:c3c7a86e default
parent child Browse files
Show More
@@ -1,1710 +1,1710 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import open
21 from .pycompat import open
22
22
23 from . import (
23 from . import (
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 requirements,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import repository
34 from .interfaces import repository
35
35
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
39
39
40 LFS_REQUIREMENT = b'lfs'
40 LFS_REQUIREMENT = b'lfs'
41
41
42 readexactly = util.readexactly
42 readexactly = util.readexactly
43
43
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(b">l", d)[0]
48 l = struct.unpack(b">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_(b"invalid chunk length %d") % l)
51 raise error.Abort(_(b"invalid chunk length %d") % l)
52 return b""
52 return b""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(b">l", length + 4)
58 return struct.pack(b">l", length + 4)
59
59
60
60
61 def closechunk():
61 def closechunk():
62 """return a changegroup chunk header (string) for a zero-length chunk"""
62 """return a changegroup chunk header (string) for a zero-length chunk"""
63 return struct.pack(b">l", 0)
63 return struct.pack(b">l", 0)
64
64
65
65
66 def _fileheader(path):
66 def _fileheader(path):
67 """Obtain a changegroup chunk header for a named path."""
67 """Obtain a changegroup chunk header for a named path."""
68 return chunkheader(len(path)) + path
68 return chunkheader(len(path)) + path
69
69
70
70
71 def writechunks(ui, chunks, filename, vfs=None):
71 def writechunks(ui, chunks, filename, vfs=None):
72 """Write chunks to a file and return its filename.
72 """Write chunks to a file and return its filename.
73
73
74 The stream is assumed to be a bundle file.
74 The stream is assumed to be a bundle file.
75 Existing files will not be overwritten.
75 Existing files will not be overwritten.
76 If no filename is specified, a temporary file is created.
76 If no filename is specified, a temporary file is created.
77 """
77 """
78 fh = None
78 fh = None
79 cleanup = None
79 cleanup = None
80 try:
80 try:
81 if filename:
81 if filename:
82 if vfs:
82 if vfs:
83 fh = vfs.open(filename, b"wb")
83 fh = vfs.open(filename, b"wb")
84 else:
84 else:
85 # Increase default buffer size because default is usually
85 # Increase default buffer size because default is usually
86 # small (4k is common on Linux).
86 # small (4k is common on Linux).
87 fh = open(filename, b"wb", 131072)
87 fh = open(filename, b"wb", 131072)
88 else:
88 else:
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
90 fh = os.fdopen(fd, "wb")
90 fh = os.fdopen(fd, "wb")
91 cleanup = filename
91 cleanup = filename
92 for c in chunks:
92 for c in chunks:
93 fh.write(c)
93 fh.write(c)
94 cleanup = None
94 cleanup = None
95 return filename
95 return filename
96 finally:
96 finally:
97 if fh is not None:
97 if fh is not None:
98 fh.close()
98 fh.close()
99 if cleanup is not None:
99 if cleanup is not None:
100 if filename and vfs:
100 if filename and vfs:
101 vfs.unlink(cleanup)
101 vfs.unlink(cleanup)
102 else:
102 else:
103 os.unlink(cleanup)
103 os.unlink(cleanup)
104
104
105
105
106 class cg1unpacker(object):
106 class cg1unpacker(object):
107 """Unpacker for cg1 changegroup streams.
107 """Unpacker for cg1 changegroup streams.
108
108
109 A changegroup unpacker handles the framing of the revision data in
109 A changegroup unpacker handles the framing of the revision data in
110 the wire format. Most consumers will want to use the apply()
110 the wire format. Most consumers will want to use the apply()
111 method to add the changes from the changegroup to a repository.
111 method to add the changes from the changegroup to a repository.
112
112
113 If you're forwarding a changegroup unmodified to another consumer,
113 If you're forwarding a changegroup unmodified to another consumer,
114 use getchunks(), which returns an iterator of changegroup
114 use getchunks(), which returns an iterator of changegroup
115 chunks. This is mostly useful for cases where you need to know the
115 chunks. This is mostly useful for cases where you need to know the
116 data stream has ended by observing the end of the changegroup.
116 data stream has ended by observing the end of the changegroup.
117
117
118 deltachunk() is useful only if you're applying delta data. Most
118 deltachunk() is useful only if you're applying delta data. Most
119 consumers should prefer apply() instead.
119 consumers should prefer apply() instead.
120
120
121 A few other public methods exist. Those are used only for
121 A few other public methods exist. Those are used only for
122 bundlerepo and some debug commands - their use is discouraged.
122 bundlerepo and some debug commands - their use is discouraged.
123 """
123 """
124
124
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
126 deltaheadersize = deltaheader.size
126 deltaheadersize = deltaheader.size
127 version = b'01'
127 version = b'01'
128 _grouplistcount = 1 # One list of files after the manifests
128 _grouplistcount = 1 # One list of files after the manifests
129
129
130 def __init__(self, fh, alg, extras=None):
130 def __init__(self, fh, alg, extras=None):
131 if alg is None:
131 if alg is None:
132 alg = b'UN'
132 alg = b'UN'
133 if alg not in util.compengines.supportedbundletypes:
133 if alg not in util.compengines.supportedbundletypes:
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
135 if alg == b'BZ':
135 if alg == b'BZ':
136 alg = b'_truncatedBZ'
136 alg = b'_truncatedBZ'
137
137
138 compengine = util.compengines.forbundletype(alg)
138 compengine = util.compengines.forbundletype(alg)
139 self._stream = compengine.decompressorreader(fh)
139 self._stream = compengine.decompressorreader(fh)
140 self._type = alg
140 self._type = alg
141 self.extras = extras or {}
141 self.extras = extras or {}
142 self.callback = None
142 self.callback = None
143
143
144 # These methods (compressed, read, seek, tell) all appear to only
144 # These methods (compressed, read, seek, tell) all appear to only
145 # be used by bundlerepo, but it's a little hard to tell.
145 # be used by bundlerepo, but it's a little hard to tell.
146 def compressed(self):
146 def compressed(self):
147 return self._type is not None and self._type != b'UN'
147 return self._type is not None and self._type != b'UN'
148
148
149 def read(self, l):
149 def read(self, l):
150 return self._stream.read(l)
150 return self._stream.read(l)
151
151
152 def seek(self, pos):
152 def seek(self, pos):
153 return self._stream.seek(pos)
153 return self._stream.seek(pos)
154
154
155 def tell(self):
155 def tell(self):
156 return self._stream.tell()
156 return self._stream.tell()
157
157
158 def close(self):
158 def close(self):
159 return self._stream.close()
159 return self._stream.close()
160
160
161 def _chunklength(self):
161 def _chunklength(self):
162 d = readexactly(self._stream, 4)
162 d = readexactly(self._stream, 4)
163 l = struct.unpack(b">l", d)[0]
163 l = struct.unpack(b">l", d)[0]
164 if l <= 4:
164 if l <= 4:
165 if l:
165 if l:
166 raise error.Abort(_(b"invalid chunk length %d") % l)
166 raise error.Abort(_(b"invalid chunk length %d") % l)
167 return 0
167 return 0
168 if self.callback:
168 if self.callback:
169 self.callback()
169 self.callback()
170 return l - 4
170 return l - 4
171
171
172 def changelogheader(self):
172 def changelogheader(self):
173 """v10 does not have a changelog header chunk"""
173 """v10 does not have a changelog header chunk"""
174 return {}
174 return {}
175
175
176 def manifestheader(self):
176 def manifestheader(self):
177 """v10 does not have a manifest header chunk"""
177 """v10 does not have a manifest header chunk"""
178 return {}
178 return {}
179
179
180 def filelogheader(self):
180 def filelogheader(self):
181 """return the header of the filelogs chunk, v10 only has the filename"""
181 """return the header of the filelogs chunk, v10 only has the filename"""
182 l = self._chunklength()
182 l = self._chunklength()
183 if not l:
183 if not l:
184 return {}
184 return {}
185 fname = readexactly(self._stream, l)
185 fname = readexactly(self._stream, l)
186 return {b'filename': fname}
186 return {b'filename': fname}
187
187
188 def _deltaheader(self, headertuple, prevnode):
188 def _deltaheader(self, headertuple, prevnode):
189 node, p1, p2, cs = headertuple
189 node, p1, p2, cs = headertuple
190 if prevnode is None:
190 if prevnode is None:
191 deltabase = p1
191 deltabase = p1
192 else:
192 else:
193 deltabase = prevnode
193 deltabase = prevnode
194 flags = 0
194 flags = 0
195 return node, p1, p2, deltabase, cs, flags
195 return node, p1, p2, deltabase, cs, flags
196
196
197 def deltachunk(self, prevnode):
197 def deltachunk(self, prevnode):
198 l = self._chunklength()
198 l = self._chunklength()
199 if not l:
199 if not l:
200 return {}
200 return {}
201 headerdata = readexactly(self._stream, self.deltaheadersize)
201 headerdata = readexactly(self._stream, self.deltaheadersize)
202 header = self.deltaheader.unpack(headerdata)
202 header = self.deltaheader.unpack(headerdata)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
205 return (node, p1, p2, cs, deltabase, delta, flags)
205 return (node, p1, p2, cs, deltabase, delta, flags)
206
206
207 def getchunks(self):
207 def getchunks(self):
208 """returns all the chunks contains in the bundle
208 """returns all the chunks contains in the bundle
209
209
210 Used when you need to forward the binary stream to a file or another
210 Used when you need to forward the binary stream to a file or another
211 network API. To do so, it parse the changegroup data, otherwise it will
211 network API. To do so, it parse the changegroup data, otherwise it will
212 block in case of sshrepo because it don't know the end of the stream.
212 block in case of sshrepo because it don't know the end of the stream.
213 """
213 """
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
217 # filelogs.
217 # filelogs.
218 #
218 #
219 # Changelog and manifestlog parts are terminated with empty chunks. The
219 # Changelog and manifestlog parts are terminated with empty chunks. The
220 # tree and file parts are a list of entry sections. Each entry section
220 # tree and file parts are a list of entry sections. Each entry section
221 # is a series of chunks terminating in an empty chunk. The list of these
221 # is a series of chunks terminating in an empty chunk. The list of these
222 # entry sections is terminated in yet another empty chunk, so we know
222 # entry sections is terminated in yet another empty chunk, so we know
223 # we've reached the end of the tree/file list when we reach an empty
223 # we've reached the end of the tree/file list when we reach an empty
224 # chunk that was proceeded by no non-empty chunks.
224 # chunk that was proceeded by no non-empty chunks.
225
225
226 parts = 0
226 parts = 0
227 while parts < 2 + self._grouplistcount:
227 while parts < 2 + self._grouplistcount:
228 noentries = True
228 noentries = True
229 while True:
229 while True:
230 chunk = getchunk(self)
230 chunk = getchunk(self)
231 if not chunk:
231 if not chunk:
232 # The first two empty chunks represent the end of the
232 # The first two empty chunks represent the end of the
233 # changelog and the manifestlog portions. The remaining
233 # changelog and the manifestlog portions. The remaining
234 # empty chunks represent either A) the end of individual
234 # empty chunks represent either A) the end of individual
235 # tree or file entries in the file list, or B) the end of
235 # tree or file entries in the file list, or B) the end of
236 # the entire list. It's the end of the entire list if there
236 # the entire list. It's the end of the entire list if there
237 # were no entries (i.e. noentries is True).
237 # were no entries (i.e. noentries is True).
238 if parts < 2:
238 if parts < 2:
239 parts += 1
239 parts += 1
240 elif noentries:
240 elif noentries:
241 parts += 1
241 parts += 1
242 break
242 break
243 noentries = False
243 noentries = False
244 yield chunkheader(len(chunk))
244 yield chunkheader(len(chunk))
245 pos = 0
245 pos = 0
246 while pos < len(chunk):
246 while pos < len(chunk):
247 next = pos + 2 ** 20
247 next = pos + 2 ** 20
248 yield chunk[pos:next]
248 yield chunk[pos:next]
249 pos = next
249 pos = next
250 yield closechunk()
250 yield closechunk()
251
251
252 def _unpackmanifests(self, repo, revmap, trp, prog):
252 def _unpackmanifests(self, repo, revmap, trp, prog):
253 self.callback = prog.increment
253 self.callback = prog.increment
254 # no need to check for empty manifest group here:
254 # no need to check for empty manifest group here:
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
256 # no new manifest will be created and the manifest group will
256 # no new manifest will be created and the manifest group will
257 # be empty during the pull
257 # be empty during the pull
258 self.manifestheader()
258 self.manifestheader()
259 deltas = self.deltaiter()
259 deltas = self.deltaiter()
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
261 prog.complete()
261 prog.complete()
262 self.callback = None
262 self.callback = None
263
263
264 def apply(
264 def apply(
265 self,
265 self,
266 repo,
266 repo,
267 tr,
267 tr,
268 srctype,
268 srctype,
269 url,
269 url,
270 targetphase=phases.draft,
270 targetphase=phases.draft,
271 expectedtotal=None,
271 expectedtotal=None,
272 ):
272 ):
273 """Add the changegroup returned by source.read() to this repo.
273 """Add the changegroup returned by source.read() to this repo.
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
275 the URL of the repo where this changegroup is coming from.
275 the URL of the repo where this changegroup is coming from.
276
276
277 Return an integer summarizing the change to this repo:
277 Return an integer summarizing the change to this repo:
278 - nothing changed or no source: 0
278 - nothing changed or no source: 0
279 - more heads than before: 1+added heads (2..n)
279 - more heads than before: 1+added heads (2..n)
280 - fewer heads than before: -1-removed heads (-2..-n)
280 - fewer heads than before: -1-removed heads (-2..-n)
281 - number of heads stays the same: 1
281 - number of heads stays the same: 1
282 """
282 """
283 repo = repo.unfiltered()
283 repo = repo.unfiltered()
284
284
285 def csmap(x):
285 def csmap(x):
286 repo.ui.debug(b"add changeset %s\n" % short(x))
286 repo.ui.debug(b"add changeset %s\n" % short(x))
287 return len(cl)
287 return len(cl)
288
288
289 def revmap(x):
289 def revmap(x):
290 return cl.rev(x)
290 return cl.rev(x)
291
291
292 try:
292 try:
293 # The transaction may already carry source information. In this
293 # The transaction may already carry source information. In this
294 # case we use the top level data. We overwrite the argument
294 # case we use the top level data. We overwrite the argument
295 # because we need to use the top level value (if they exist)
295 # because we need to use the top level value (if they exist)
296 # in this function.
296 # in this function.
297 srctype = tr.hookargs.setdefault(b'source', srctype)
297 srctype = tr.hookargs.setdefault(b'source', srctype)
298 tr.hookargs.setdefault(b'url', url)
298 tr.hookargs.setdefault(b'url', url)
299 repo.hook(
299 repo.hook(
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
301 )
301 )
302
302
303 # write changelog data to temp files so concurrent readers
303 # write changelog data to temp files so concurrent readers
304 # will not see an inconsistent view
304 # will not see an inconsistent view
305 cl = repo.changelog
305 cl = repo.changelog
306 cl.delayupdate(tr)
306 cl.delayupdate(tr)
307 oldheads = set(cl.heads())
307 oldheads = set(cl.heads())
308
308
309 trp = weakref.proxy(tr)
309 trp = weakref.proxy(tr)
310 # pull off the changeset group
310 # pull off the changeset group
311 repo.ui.status(_(b"adding changesets\n"))
311 repo.ui.status(_(b"adding changesets\n"))
312 clstart = len(cl)
312 clstart = len(cl)
313 progress = repo.ui.makeprogress(
313 progress = repo.ui.makeprogress(
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
315 )
315 )
316 self.callback = progress.increment
316 self.callback = progress.increment
317
317
318 efilesset = set()
318 efilesset = set()
319 duprevs = []
319 duprevs = []
320
320
321 def ondupchangelog(cl, rev):
321 def ondupchangelog(cl, rev):
322 if rev < clstart:
322 if rev < clstart:
323 duprevs.append(rev)
323 duprevs.append(rev)
324
324
325 def onchangelog(cl, rev):
325 def onchangelog(cl, rev):
326 ctx = cl.changelogrevision(rev)
326 ctx = cl.changelogrevision(rev)
327 efilesset.update(ctx.files)
327 efilesset.update(ctx.files)
328 repo.register_changeset(rev, ctx)
328 repo.register_changeset(rev, ctx)
329
329
330 self.changelogheader()
330 self.changelogheader()
331 deltas = self.deltaiter()
331 deltas = self.deltaiter()
332 if not cl.addgroup(
332 if not cl.addgroup(
333 deltas,
333 deltas,
334 csmap,
334 csmap,
335 trp,
335 trp,
336 alwayscache=True,
336 alwayscache=True,
337 addrevisioncb=onchangelog,
337 addrevisioncb=onchangelog,
338 duplicaterevisioncb=ondupchangelog,
338 duplicaterevisioncb=ondupchangelog,
339 ):
339 ):
340 repo.ui.develwarn(
340 repo.ui.develwarn(
341 b'applied empty changelog from changegroup',
341 b'applied empty changelog from changegroup',
342 config=b'warn-empty-changegroup',
342 config=b'warn-empty-changegroup',
343 )
343 )
344 efiles = len(efilesset)
344 efiles = len(efilesset)
345 clend = len(cl)
345 clend = len(cl)
346 changesets = clend - clstart
346 changesets = clend - clstart
347 progress.complete()
347 progress.complete()
348 del deltas
348 del deltas
349 # TODO Python 2.7 removal
349 # TODO Python 2.7 removal
350 # del efilesset
350 # del efilesset
351 efilesset = None
351 efilesset = None
352 self.callback = None
352 self.callback = None
353
353
354 # pull off the manifest group
354 # pull off the manifest group
355 repo.ui.status(_(b"adding manifests\n"))
355 repo.ui.status(_(b"adding manifests\n"))
356 # We know that we'll never have more manifests than we had
356 # We know that we'll never have more manifests than we had
357 # changesets.
357 # changesets.
358 progress = repo.ui.makeprogress(
358 progress = repo.ui.makeprogress(
359 _(b'manifests'), unit=_(b'chunks'), total=changesets
359 _(b'manifests'), unit=_(b'chunks'), total=changesets
360 )
360 )
361 self._unpackmanifests(repo, revmap, trp, progress)
361 self._unpackmanifests(repo, revmap, trp, progress)
362
362
363 needfiles = {}
363 needfiles = {}
364 if repo.ui.configbool(b'server', b'validate'):
364 if repo.ui.configbool(b'server', b'validate'):
365 cl = repo.changelog
365 cl = repo.changelog
366 ml = repo.manifestlog
366 ml = repo.manifestlog
367 # validate incoming csets have their manifests
367 # validate incoming csets have their manifests
368 for cset in pycompat.xrange(clstart, clend):
368 for cset in pycompat.xrange(clstart, clend):
369 mfnode = cl.changelogrevision(cset).manifest
369 mfnode = cl.changelogrevision(cset).manifest
370 mfest = ml[mfnode].readdelta()
370 mfest = ml[mfnode].readdelta()
371 # store file nodes we must see
371 # store file nodes we must see
372 for f, n in pycompat.iteritems(mfest):
372 for f, n in pycompat.iteritems(mfest):
373 needfiles.setdefault(f, set()).add(n)
373 needfiles.setdefault(f, set()).add(n)
374
374
375 # process the files
375 # process the files
376 repo.ui.status(_(b"adding file changes\n"))
376 repo.ui.status(_(b"adding file changes\n"))
377 newrevs, newfiles = _addchangegroupfiles(
377 newrevs, newfiles = _addchangegroupfiles(
378 repo, self, revmap, trp, efiles, needfiles
378 repo, self, revmap, trp, efiles, needfiles
379 )
379 )
380
380
381 # making sure the value exists
381 # making sure the value exists
382 tr.changes.setdefault(b'changegroup-count-changesets', 0)
382 tr.changes.setdefault(b'changegroup-count-changesets', 0)
383 tr.changes.setdefault(b'changegroup-count-revisions', 0)
383 tr.changes.setdefault(b'changegroup-count-revisions', 0)
384 tr.changes.setdefault(b'changegroup-count-files', 0)
384 tr.changes.setdefault(b'changegroup-count-files', 0)
385 tr.changes.setdefault(b'changegroup-count-heads', 0)
385 tr.changes.setdefault(b'changegroup-count-heads', 0)
386
386
387 # some code use bundle operation for internal purpose. They usually
387 # some code use bundle operation for internal purpose. They usually
388 # set `ui.quiet` to do this outside of user sight. Size the report
388 # set `ui.quiet` to do this outside of user sight. Size the report
389 # of such operation now happens at the end of the transaction, that
389 # of such operation now happens at the end of the transaction, that
390 # ui.quiet has not direct effect on the output.
390 # ui.quiet has not direct effect on the output.
391 #
391 #
392 # To preserve this intend use an inelegant hack, we fail to report
392 # To preserve this intend use an inelegant hack, we fail to report
393 # the change if `quiet` is set. We should probably move to
393 # the change if `quiet` is set. We should probably move to
394 # something better, but this is a good first step to allow the "end
394 # something better, but this is a good first step to allow the "end
395 # of transaction report" to pass tests.
395 # of transaction report" to pass tests.
396 if not repo.ui.quiet:
396 if not repo.ui.quiet:
397 tr.changes[b'changegroup-count-changesets'] += changesets
397 tr.changes[b'changegroup-count-changesets'] += changesets
398 tr.changes[b'changegroup-count-revisions'] += newrevs
398 tr.changes[b'changegroup-count-revisions'] += newrevs
399 tr.changes[b'changegroup-count-files'] += newfiles
399 tr.changes[b'changegroup-count-files'] += newfiles
400
400
401 deltaheads = 0
401 deltaheads = 0
402 if oldheads:
402 if oldheads:
403 heads = cl.heads()
403 heads = cl.heads()
404 deltaheads += len(heads) - len(oldheads)
404 deltaheads += len(heads) - len(oldheads)
405 for h in heads:
405 for h in heads:
406 if h not in oldheads and repo[h].closesbranch():
406 if h not in oldheads and repo[h].closesbranch():
407 deltaheads -= 1
407 deltaheads -= 1
408
408
409 # see previous comment about checking ui.quiet
409 # see previous comment about checking ui.quiet
410 if not repo.ui.quiet:
410 if not repo.ui.quiet:
411 tr.changes[b'changegroup-count-heads'] += deltaheads
411 tr.changes[b'changegroup-count-heads'] += deltaheads
412 repo.invalidatevolatilesets()
412 repo.invalidatevolatilesets()
413
413
414 if changesets > 0:
414 if changesets > 0:
415 if b'node' not in tr.hookargs:
415 if b'node' not in tr.hookargs:
416 tr.hookargs[b'node'] = hex(cl.node(clstart))
416 tr.hookargs[b'node'] = hex(cl.node(clstart))
417 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
417 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
418 hookargs = dict(tr.hookargs)
418 hookargs = dict(tr.hookargs)
419 else:
419 else:
420 hookargs = dict(tr.hookargs)
420 hookargs = dict(tr.hookargs)
421 hookargs[b'node'] = hex(cl.node(clstart))
421 hookargs[b'node'] = hex(cl.node(clstart))
422 hookargs[b'node_last'] = hex(cl.node(clend - 1))
422 hookargs[b'node_last'] = hex(cl.node(clend - 1))
423 repo.hook(
423 repo.hook(
424 b'pretxnchangegroup',
424 b'pretxnchangegroup',
425 throw=True,
425 throw=True,
426 **pycompat.strkwargs(hookargs)
426 **pycompat.strkwargs(hookargs)
427 )
427 )
428
428
429 added = pycompat.xrange(clstart, clend)
429 added = pycompat.xrange(clstart, clend)
430 phaseall = None
430 phaseall = None
431 if srctype in (b'push', b'serve'):
431 if srctype in (b'push', b'serve'):
432 # Old servers can not push the boundary themselves.
432 # Old servers can not push the boundary themselves.
433 # New servers won't push the boundary if changeset already
433 # New servers won't push the boundary if changeset already
434 # exists locally as secret
434 # exists locally as secret
435 #
435 #
436 # We should not use added here but the list of all change in
436 # We should not use added here but the list of all change in
437 # the bundle
437 # the bundle
438 if repo.publishing():
438 if repo.publishing():
439 targetphase = phaseall = phases.public
439 targetphase = phaseall = phases.public
440 else:
440 else:
441 # closer target phase computation
441 # closer target phase computation
442
442
443 # Those changesets have been pushed from the
443 # Those changesets have been pushed from the
444 # outside, their phases are going to be pushed
444 # outside, their phases are going to be pushed
445 # alongside. Therefor `targetphase` is
445 # alongside. Therefor `targetphase` is
446 # ignored.
446 # ignored.
447 targetphase = phaseall = phases.draft
447 targetphase = phaseall = phases.draft
448 if added:
448 if added:
449 phases.registernew(repo, tr, targetphase, added)
449 phases.registernew(repo, tr, targetphase, added)
450 if phaseall is not None:
450 if phaseall is not None:
451 if duprevs:
451 if duprevs:
452 duprevs.extend(added)
452 duprevs.extend(added)
453 else:
453 else:
454 duprevs = added
454 duprevs = added
455 phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
455 phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
456 duprevs = []
456 duprevs = []
457
457
458 if changesets > 0:
458 if changesets > 0:
459
459
460 def runhooks(unused_success):
460 def runhooks(unused_success):
461 # These hooks run when the lock releases, not when the
461 # These hooks run when the lock releases, not when the
462 # transaction closes. So it's possible for the changelog
462 # transaction closes. So it's possible for the changelog
463 # to have changed since we last saw it.
463 # to have changed since we last saw it.
464 if clstart >= len(repo):
464 if clstart >= len(repo):
465 return
465 return
466
466
467 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
467 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
468
468
469 for rev in added:
469 for rev in added:
470 args = hookargs.copy()
470 args = hookargs.copy()
471 args[b'node'] = hex(cl.node(rev))
471 args[b'node'] = hex(cl.node(rev))
472 del args[b'node_last']
472 del args[b'node_last']
473 repo.hook(b"incoming", **pycompat.strkwargs(args))
473 repo.hook(b"incoming", **pycompat.strkwargs(args))
474
474
475 newheads = [h for h in repo.heads() if h not in oldheads]
475 newheads = [h for h in repo.heads() if h not in oldheads]
476 repo.ui.log(
476 repo.ui.log(
477 b"incoming",
477 b"incoming",
478 b"%d incoming changes - new heads: %s\n",
478 b"%d incoming changes - new heads: %s\n",
479 len(added),
479 len(added),
480 b', '.join([hex(c[:6]) for c in newheads]),
480 b', '.join([hex(c[:6]) for c in newheads]),
481 )
481 )
482
482
483 tr.addpostclose(
483 tr.addpostclose(
484 b'changegroup-runhooks-%020i' % clstart,
484 b'changegroup-runhooks-%020i' % clstart,
485 lambda tr: repo._afterlock(runhooks),
485 lambda tr: repo._afterlock(runhooks),
486 )
486 )
487 finally:
487 finally:
488 repo.ui.flush()
488 repo.ui.flush()
489 # never return 0 here:
489 # never return 0 here:
490 if deltaheads < 0:
490 if deltaheads < 0:
491 ret = deltaheads - 1
491 ret = deltaheads - 1
492 else:
492 else:
493 ret = deltaheads + 1
493 ret = deltaheads + 1
494 return ret
494 return ret
495
495
496 def deltaiter(self):
496 def deltaiter(self):
497 """
497 """
498 returns an iterator of the deltas in this changegroup
498 returns an iterator of the deltas in this changegroup
499
499
500 Useful for passing to the underlying storage system to be stored.
500 Useful for passing to the underlying storage system to be stored.
501 """
501 """
502 chain = None
502 chain = None
503 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
503 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
504 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
504 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
505 yield chunkdata
505 yield chunkdata
506 chain = chunkdata[0]
506 chain = chunkdata[0]
507
507
508
508
509 class cg2unpacker(cg1unpacker):
509 class cg2unpacker(cg1unpacker):
510 """Unpacker for cg2 streams.
510 """Unpacker for cg2 streams.
511
511
512 cg2 streams add support for generaldelta, so the delta header
512 cg2 streams add support for generaldelta, so the delta header
513 format is slightly different. All other features about the data
513 format is slightly different. All other features about the data
514 remain the same.
514 remain the same.
515 """
515 """
516
516
517 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
517 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
518 deltaheadersize = deltaheader.size
518 deltaheadersize = deltaheader.size
519 version = b'02'
519 version = b'02'
520
520
521 def _deltaheader(self, headertuple, prevnode):
521 def _deltaheader(self, headertuple, prevnode):
522 node, p1, p2, deltabase, cs = headertuple
522 node, p1, p2, deltabase, cs = headertuple
523 flags = 0
523 flags = 0
524 return node, p1, p2, deltabase, cs, flags
524 return node, p1, p2, deltabase, cs, flags
525
525
526
526
527 class cg3unpacker(cg2unpacker):
527 class cg3unpacker(cg2unpacker):
528 """Unpacker for cg3 streams.
528 """Unpacker for cg3 streams.
529
529
530 cg3 streams add support for exchanging treemanifests and revlog
530 cg3 streams add support for exchanging treemanifests and revlog
531 flags. It adds the revlog flags to the delta header and an empty chunk
531 flags. It adds the revlog flags to the delta header and an empty chunk
532 separating manifests and files.
532 separating manifests and files.
533 """
533 """
534
534
535 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
535 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
536 deltaheadersize = deltaheader.size
536 deltaheadersize = deltaheader.size
537 version = b'03'
537 version = b'03'
538 _grouplistcount = 2 # One list of manifests and one list of files
538 _grouplistcount = 2 # One list of manifests and one list of files
539
539
540 def _deltaheader(self, headertuple, prevnode):
540 def _deltaheader(self, headertuple, prevnode):
541 node, p1, p2, deltabase, cs, flags = headertuple
541 node, p1, p2, deltabase, cs, flags = headertuple
542 return node, p1, p2, deltabase, cs, flags
542 return node, p1, p2, deltabase, cs, flags
543
543
544 def _unpackmanifests(self, repo, revmap, trp, prog):
544 def _unpackmanifests(self, repo, revmap, trp, prog):
545 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
545 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
546 for chunkdata in iter(self.filelogheader, {}):
546 for chunkdata in iter(self.filelogheader, {}):
547 # If we get here, there are directory manifests in the changegroup
547 # If we get here, there are directory manifests in the changegroup
548 d = chunkdata[b"filename"]
548 d = chunkdata[b"filename"]
549 repo.ui.debug(b"adding %s revisions\n" % d)
549 repo.ui.debug(b"adding %s revisions\n" % d)
550 deltas = self.deltaiter()
550 deltas = self.deltaiter()
551 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
551 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
552 raise error.Abort(_(b"received dir revlog group is empty"))
552 raise error.Abort(_(b"received dir revlog group is empty"))
553
553
554
554
555 class headerlessfixup(object):
555 class headerlessfixup(object):
556 def __init__(self, fh, h):
556 def __init__(self, fh, h):
557 self._h = h
557 self._h = h
558 self._fh = fh
558 self._fh = fh
559
559
560 def read(self, n):
560 def read(self, n):
561 if self._h:
561 if self._h:
562 d, self._h = self._h[:n], self._h[n:]
562 d, self._h = self._h[:n], self._h[n:]
563 if len(d) < n:
563 if len(d) < n:
564 d += readexactly(self._fh, n - len(d))
564 d += readexactly(self._fh, n - len(d))
565 return d
565 return d
566 return readexactly(self._fh, n)
566 return readexactly(self._fh, n)
567
567
568
568
569 def _revisiondeltatochunks(delta, headerfn):
569 def _revisiondeltatochunks(delta, headerfn):
570 """Serialize a revisiondelta to changegroup chunks."""
570 """Serialize a revisiondelta to changegroup chunks."""
571
571
572 # The captured revision delta may be encoded as a delta against
572 # The captured revision delta may be encoded as a delta against
573 # a base revision or as a full revision. The changegroup format
573 # a base revision or as a full revision. The changegroup format
574 # requires that everything on the wire be deltas. So for full
574 # requires that everything on the wire be deltas. So for full
575 # revisions, we need to invent a header that says to rewrite
575 # revisions, we need to invent a header that says to rewrite
576 # data.
576 # data.
577
577
578 if delta.delta is not None:
578 if delta.delta is not None:
579 prefix, data = b'', delta.delta
579 prefix, data = b'', delta.delta
580 elif delta.basenode == nullid:
580 elif delta.basenode == nullid:
581 data = delta.revision
581 data = delta.revision
582 prefix = mdiff.trivialdiffheader(len(data))
582 prefix = mdiff.trivialdiffheader(len(data))
583 else:
583 else:
584 data = delta.revision
584 data = delta.revision
585 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
585 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
586
586
587 meta = headerfn(delta)
587 meta = headerfn(delta)
588
588
589 yield chunkheader(len(meta) + len(prefix) + len(data))
589 yield chunkheader(len(meta) + len(prefix) + len(data))
590 yield meta
590 yield meta
591 if prefix:
591 if prefix:
592 yield prefix
592 yield prefix
593 yield data
593 yield data
594
594
595
595
596 def _sortnodesellipsis(store, nodes, cl, lookup):
596 def _sortnodesellipsis(store, nodes, cl, lookup):
597 """Sort nodes for changegroup generation."""
597 """Sort nodes for changegroup generation."""
598 # Ellipses serving mode.
598 # Ellipses serving mode.
599 #
599 #
600 # In a perfect world, we'd generate better ellipsis-ified graphs
600 # In a perfect world, we'd generate better ellipsis-ified graphs
601 # for non-changelog revlogs. In practice, we haven't started doing
601 # for non-changelog revlogs. In practice, we haven't started doing
602 # that yet, so the resulting DAGs for the manifestlog and filelogs
602 # that yet, so the resulting DAGs for the manifestlog and filelogs
603 # are actually full of bogus parentage on all the ellipsis
603 # are actually full of bogus parentage on all the ellipsis
604 # nodes. This has the side effect that, while the contents are
604 # nodes. This has the side effect that, while the contents are
605 # correct, the individual DAGs might be completely out of whack in
605 # correct, the individual DAGs might be completely out of whack in
606 # a case like 882681bc3166 and its ancestors (back about 10
606 # a case like 882681bc3166 and its ancestors (back about 10
607 # revisions or so) in the main hg repo.
607 # revisions or so) in the main hg repo.
608 #
608 #
609 # The one invariant we *know* holds is that the new (potentially
609 # The one invariant we *know* holds is that the new (potentially
610 # bogus) DAG shape will be valid if we order the nodes in the
610 # bogus) DAG shape will be valid if we order the nodes in the
611 # order that they're introduced in dramatis personae by the
611 # order that they're introduced in dramatis personae by the
612 # changelog, so what we do is we sort the non-changelog histories
612 # changelog, so what we do is we sort the non-changelog histories
613 # by the order in which they are used by the changelog.
613 # by the order in which they are used by the changelog.
614 key = lambda n: cl.rev(lookup(n))
614 key = lambda n: cl.rev(lookup(n))
615 return sorted(nodes, key=key)
615 return sorted(nodes, key=key)
616
616
617
617
618 def _resolvenarrowrevisioninfo(
618 def _resolvenarrowrevisioninfo(
619 cl,
619 cl,
620 store,
620 store,
621 ischangelog,
621 ischangelog,
622 rev,
622 rev,
623 linkrev,
623 linkrev,
624 linknode,
624 linknode,
625 clrevtolocalrev,
625 clrevtolocalrev,
626 fullclnodes,
626 fullclnodes,
627 precomputedellipsis,
627 precomputedellipsis,
628 ):
628 ):
629 linkparents = precomputedellipsis[linkrev]
629 linkparents = precomputedellipsis[linkrev]
630
630
631 def local(clrev):
631 def local(clrev):
632 """Turn a changelog revnum into a local revnum.
632 """Turn a changelog revnum into a local revnum.
633
633
634 The ellipsis dag is stored as revnums on the changelog,
634 The ellipsis dag is stored as revnums on the changelog,
635 but when we're producing ellipsis entries for
635 but when we're producing ellipsis entries for
636 non-changelog revlogs, we need to turn those numbers into
636 non-changelog revlogs, we need to turn those numbers into
637 something local. This does that for us, and during the
637 something local. This does that for us, and during the
638 changelog sending phase will also expand the stored
638 changelog sending phase will also expand the stored
639 mappings as needed.
639 mappings as needed.
640 """
640 """
641 if clrev == nullrev:
641 if clrev == nullrev:
642 return nullrev
642 return nullrev
643
643
644 if ischangelog:
644 if ischangelog:
645 return clrev
645 return clrev
646
646
647 # Walk the ellipsis-ized changelog breadth-first looking for a
647 # Walk the ellipsis-ized changelog breadth-first looking for a
648 # change that has been linked from the current revlog.
648 # change that has been linked from the current revlog.
649 #
649 #
650 # For a flat manifest revlog only a single step should be necessary
650 # For a flat manifest revlog only a single step should be necessary
651 # as all relevant changelog entries are relevant to the flat
651 # as all relevant changelog entries are relevant to the flat
652 # manifest.
652 # manifest.
653 #
653 #
654 # For a filelog or tree manifest dirlog however not every changelog
654 # For a filelog or tree manifest dirlog however not every changelog
655 # entry will have been relevant, so we need to skip some changelog
655 # entry will have been relevant, so we need to skip some changelog
656 # nodes even after ellipsis-izing.
656 # nodes even after ellipsis-izing.
657 walk = [clrev]
657 walk = [clrev]
658 while walk:
658 while walk:
659 p = walk[0]
659 p = walk[0]
660 walk = walk[1:]
660 walk = walk[1:]
661 if p in clrevtolocalrev:
661 if p in clrevtolocalrev:
662 return clrevtolocalrev[p]
662 return clrevtolocalrev[p]
663 elif p in fullclnodes:
663 elif p in fullclnodes:
664 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
664 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
665 elif p in precomputedellipsis:
665 elif p in precomputedellipsis:
666 walk.extend(
666 walk.extend(
667 [pp for pp in precomputedellipsis[p] if pp != nullrev]
667 [pp for pp in precomputedellipsis[p] if pp != nullrev]
668 )
668 )
669 else:
669 else:
670 # In this case, we've got an ellipsis with parents
670 # In this case, we've got an ellipsis with parents
671 # outside the current bundle (likely an
671 # outside the current bundle (likely an
672 # incremental pull). We "know" that we can use the
672 # incremental pull). We "know" that we can use the
673 # value of this same revlog at whatever revision
673 # value of this same revlog at whatever revision
674 # is pointed to by linknode. "Know" is in scare
674 # is pointed to by linknode. "Know" is in scare
675 # quotes because I haven't done enough examination
675 # quotes because I haven't done enough examination
676 # of edge cases to convince myself this is really
676 # of edge cases to convince myself this is really
677 # a fact - it works for all the (admittedly
677 # a fact - it works for all the (admittedly
678 # thorough) cases in our testsuite, but I would be
678 # thorough) cases in our testsuite, but I would be
679 # somewhat unsurprised to find a case in the wild
679 # somewhat unsurprised to find a case in the wild
680 # where this breaks down a bit. That said, I don't
680 # where this breaks down a bit. That said, I don't
681 # know if it would hurt anything.
681 # know if it would hurt anything.
682 for i in pycompat.xrange(rev, 0, -1):
682 for i in pycompat.xrange(rev, 0, -1):
683 if store.linkrev(i) == clrev:
683 if store.linkrev(i) == clrev:
684 return i
684 return i
685 # We failed to resolve a parent for this node, so
685 # We failed to resolve a parent for this node, so
686 # we crash the changegroup construction.
686 # we crash the changegroup construction.
687 raise error.Abort(
687 raise error.Abort(
688 b'unable to resolve parent while packing %r %r'
688 b"unable to resolve parent while packing '%s' %r"
689 b' for changeset %r' % (store.indexfile, rev, clrev)
689 b' for changeset %r' % (store.indexfile, rev, clrev)
690 )
690 )
691
691
692 return nullrev
692 return nullrev
693
693
694 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
694 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
695 p1, p2 = nullrev, nullrev
695 p1, p2 = nullrev, nullrev
696 elif len(linkparents) == 1:
696 elif len(linkparents) == 1:
697 (p1,) = sorted(local(p) for p in linkparents)
697 (p1,) = sorted(local(p) for p in linkparents)
698 p2 = nullrev
698 p2 = nullrev
699 else:
699 else:
700 p1, p2 = sorted(local(p) for p in linkparents)
700 p1, p2 = sorted(local(p) for p in linkparents)
701
701
702 p1node, p2node = store.node(p1), store.node(p2)
702 p1node, p2node = store.node(p1), store.node(p2)
703
703
704 return p1node, p2node, linknode
704 return p1node, p2node, linknode
705
705
706
706
707 def deltagroup(
707 def deltagroup(
708 repo,
708 repo,
709 store,
709 store,
710 nodes,
710 nodes,
711 ischangelog,
711 ischangelog,
712 lookup,
712 lookup,
713 forcedeltaparentprev,
713 forcedeltaparentprev,
714 topic=None,
714 topic=None,
715 ellipses=False,
715 ellipses=False,
716 clrevtolocalrev=None,
716 clrevtolocalrev=None,
717 fullclnodes=None,
717 fullclnodes=None,
718 precomputedellipsis=None,
718 precomputedellipsis=None,
719 ):
719 ):
720 """Calculate deltas for a set of revisions.
720 """Calculate deltas for a set of revisions.
721
721
722 Is a generator of ``revisiondelta`` instances.
722 Is a generator of ``revisiondelta`` instances.
723
723
724 If topic is not None, progress detail will be generated using this
724 If topic is not None, progress detail will be generated using this
725 topic name (e.g. changesets, manifests, etc).
725 topic name (e.g. changesets, manifests, etc).
726 """
726 """
727 if not nodes:
727 if not nodes:
728 return
728 return
729
729
730 cl = repo.changelog
730 cl = repo.changelog
731
731
732 if ischangelog:
732 if ischangelog:
733 # `hg log` shows changesets in storage order. To preserve order
733 # `hg log` shows changesets in storage order. To preserve order
734 # across clones, send out changesets in storage order.
734 # across clones, send out changesets in storage order.
735 nodesorder = b'storage'
735 nodesorder = b'storage'
736 elif ellipses:
736 elif ellipses:
737 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
737 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
738 nodesorder = b'nodes'
738 nodesorder = b'nodes'
739 else:
739 else:
740 nodesorder = None
740 nodesorder = None
741
741
742 # Perform ellipses filtering and revision massaging. We do this before
742 # Perform ellipses filtering and revision massaging. We do this before
743 # emitrevisions() because a) filtering out revisions creates less work
743 # emitrevisions() because a) filtering out revisions creates less work
744 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
744 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
745 # assumptions about delta choices and we would possibly send a delta
745 # assumptions about delta choices and we would possibly send a delta
746 # referencing a missing base revision.
746 # referencing a missing base revision.
747 #
747 #
748 # Also, calling lookup() has side-effects with regards to populating
748 # Also, calling lookup() has side-effects with regards to populating
749 # data structures. If we don't call lookup() for each node or if we call
749 # data structures. If we don't call lookup() for each node or if we call
750 # lookup() after the first pass through each node, things can break -
750 # lookup() after the first pass through each node, things can break -
751 # possibly intermittently depending on the python hash seed! For that
751 # possibly intermittently depending on the python hash seed! For that
752 # reason, we store a mapping of all linknodes during the initial node
752 # reason, we store a mapping of all linknodes during the initial node
753 # pass rather than use lookup() on the output side.
753 # pass rather than use lookup() on the output side.
754 if ellipses:
754 if ellipses:
755 filtered = []
755 filtered = []
756 adjustedparents = {}
756 adjustedparents = {}
757 linknodes = {}
757 linknodes = {}
758
758
759 for node in nodes:
759 for node in nodes:
760 rev = store.rev(node)
760 rev = store.rev(node)
761 linknode = lookup(node)
761 linknode = lookup(node)
762 linkrev = cl.rev(linknode)
762 linkrev = cl.rev(linknode)
763 clrevtolocalrev[linkrev] = rev
763 clrevtolocalrev[linkrev] = rev
764
764
765 # If linknode is in fullclnodes, it means the corresponding
765 # If linknode is in fullclnodes, it means the corresponding
766 # changeset was a full changeset and is being sent unaltered.
766 # changeset was a full changeset and is being sent unaltered.
767 if linknode in fullclnodes:
767 if linknode in fullclnodes:
768 linknodes[node] = linknode
768 linknodes[node] = linknode
769
769
770 # If the corresponding changeset wasn't in the set computed
770 # If the corresponding changeset wasn't in the set computed
771 # as relevant to us, it should be dropped outright.
771 # as relevant to us, it should be dropped outright.
772 elif linkrev not in precomputedellipsis:
772 elif linkrev not in precomputedellipsis:
773 continue
773 continue
774
774
775 else:
775 else:
776 # We could probably do this later and avoid the dict
776 # We could probably do this later and avoid the dict
777 # holding state. But it likely doesn't matter.
777 # holding state. But it likely doesn't matter.
778 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
778 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
779 cl,
779 cl,
780 store,
780 store,
781 ischangelog,
781 ischangelog,
782 rev,
782 rev,
783 linkrev,
783 linkrev,
784 linknode,
784 linknode,
785 clrevtolocalrev,
785 clrevtolocalrev,
786 fullclnodes,
786 fullclnodes,
787 precomputedellipsis,
787 precomputedellipsis,
788 )
788 )
789
789
790 adjustedparents[node] = (p1node, p2node)
790 adjustedparents[node] = (p1node, p2node)
791 linknodes[node] = linknode
791 linknodes[node] = linknode
792
792
793 filtered.append(node)
793 filtered.append(node)
794
794
795 nodes = filtered
795 nodes = filtered
796
796
797 # We expect the first pass to be fast, so we only engage the progress
797 # We expect the first pass to be fast, so we only engage the progress
798 # meter for constructing the revision deltas.
798 # meter for constructing the revision deltas.
799 progress = None
799 progress = None
800 if topic is not None:
800 if topic is not None:
801 progress = repo.ui.makeprogress(
801 progress = repo.ui.makeprogress(
802 topic, unit=_(b'chunks'), total=len(nodes)
802 topic, unit=_(b'chunks'), total=len(nodes)
803 )
803 )
804
804
805 configtarget = repo.ui.config(b'devel', b'bundle.delta')
805 configtarget = repo.ui.config(b'devel', b'bundle.delta')
806 if configtarget not in (b'', b'p1', b'full'):
806 if configtarget not in (b'', b'p1', b'full'):
807 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
807 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
808 repo.ui.warn(msg % configtarget)
808 repo.ui.warn(msg % configtarget)
809
809
810 deltamode = repository.CG_DELTAMODE_STD
810 deltamode = repository.CG_DELTAMODE_STD
811 if forcedeltaparentprev:
811 if forcedeltaparentprev:
812 deltamode = repository.CG_DELTAMODE_PREV
812 deltamode = repository.CG_DELTAMODE_PREV
813 elif configtarget == b'p1':
813 elif configtarget == b'p1':
814 deltamode = repository.CG_DELTAMODE_P1
814 deltamode = repository.CG_DELTAMODE_P1
815 elif configtarget == b'full':
815 elif configtarget == b'full':
816 deltamode = repository.CG_DELTAMODE_FULL
816 deltamode = repository.CG_DELTAMODE_FULL
817
817
818 revisions = store.emitrevisions(
818 revisions = store.emitrevisions(
819 nodes,
819 nodes,
820 nodesorder=nodesorder,
820 nodesorder=nodesorder,
821 revisiondata=True,
821 revisiondata=True,
822 assumehaveparentrevisions=not ellipses,
822 assumehaveparentrevisions=not ellipses,
823 deltamode=deltamode,
823 deltamode=deltamode,
824 )
824 )
825
825
826 for i, revision in enumerate(revisions):
826 for i, revision in enumerate(revisions):
827 if progress:
827 if progress:
828 progress.update(i + 1)
828 progress.update(i + 1)
829
829
830 if ellipses:
830 if ellipses:
831 linknode = linknodes[revision.node]
831 linknode = linknodes[revision.node]
832
832
833 if revision.node in adjustedparents:
833 if revision.node in adjustedparents:
834 p1node, p2node = adjustedparents[revision.node]
834 p1node, p2node = adjustedparents[revision.node]
835 revision.p1node = p1node
835 revision.p1node = p1node
836 revision.p2node = p2node
836 revision.p2node = p2node
837 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
837 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
838
838
839 else:
839 else:
840 linknode = lookup(revision.node)
840 linknode = lookup(revision.node)
841
841
842 revision.linknode = linknode
842 revision.linknode = linknode
843 yield revision
843 yield revision
844
844
845 if progress:
845 if progress:
846 progress.complete()
846 progress.complete()
847
847
848
848
849 class cgpacker(object):
849 class cgpacker(object):
850 def __init__(
850 def __init__(
851 self,
851 self,
852 repo,
852 repo,
853 oldmatcher,
853 oldmatcher,
854 matcher,
854 matcher,
855 version,
855 version,
856 builddeltaheader,
856 builddeltaheader,
857 manifestsend,
857 manifestsend,
858 forcedeltaparentprev=False,
858 forcedeltaparentprev=False,
859 bundlecaps=None,
859 bundlecaps=None,
860 ellipses=False,
860 ellipses=False,
861 shallow=False,
861 shallow=False,
862 ellipsisroots=None,
862 ellipsisroots=None,
863 fullnodes=None,
863 fullnodes=None,
864 ):
864 ):
865 """Given a source repo, construct a bundler.
865 """Given a source repo, construct a bundler.
866
866
867 oldmatcher is a matcher that matches on files the client already has.
867 oldmatcher is a matcher that matches on files the client already has.
868 These will not be included in the changegroup.
868 These will not be included in the changegroup.
869
869
870 matcher is a matcher that matches on files to include in the
870 matcher is a matcher that matches on files to include in the
871 changegroup. Used to facilitate sparse changegroups.
871 changegroup. Used to facilitate sparse changegroups.
872
872
873 forcedeltaparentprev indicates whether delta parents must be against
873 forcedeltaparentprev indicates whether delta parents must be against
874 the previous revision in a delta group. This should only be used for
874 the previous revision in a delta group. This should only be used for
875 compatibility with changegroup version 1.
875 compatibility with changegroup version 1.
876
876
877 builddeltaheader is a callable that constructs the header for a group
877 builddeltaheader is a callable that constructs the header for a group
878 delta.
878 delta.
879
879
880 manifestsend is a chunk to send after manifests have been fully emitted.
880 manifestsend is a chunk to send after manifests have been fully emitted.
881
881
882 ellipses indicates whether ellipsis serving mode is enabled.
882 ellipses indicates whether ellipsis serving mode is enabled.
883
883
884 bundlecaps is optional and can be used to specify the set of
884 bundlecaps is optional and can be used to specify the set of
885 capabilities which can be used to build the bundle. While bundlecaps is
885 capabilities which can be used to build the bundle. While bundlecaps is
886 unused in core Mercurial, extensions rely on this feature to communicate
886 unused in core Mercurial, extensions rely on this feature to communicate
887 capabilities to customize the changegroup packer.
887 capabilities to customize the changegroup packer.
888
888
889 shallow indicates whether shallow data might be sent. The packer may
889 shallow indicates whether shallow data might be sent. The packer may
890 need to pack file contents not introduced by the changes being packed.
890 need to pack file contents not introduced by the changes being packed.
891
891
892 fullnodes is the set of changelog nodes which should not be ellipsis
892 fullnodes is the set of changelog nodes which should not be ellipsis
893 nodes. We store this rather than the set of nodes that should be
893 nodes. We store this rather than the set of nodes that should be
894 ellipsis because for very large histories we expect this to be
894 ellipsis because for very large histories we expect this to be
895 significantly smaller.
895 significantly smaller.
896 """
896 """
897 assert oldmatcher
897 assert oldmatcher
898 assert matcher
898 assert matcher
899 self._oldmatcher = oldmatcher
899 self._oldmatcher = oldmatcher
900 self._matcher = matcher
900 self._matcher = matcher
901
901
902 self.version = version
902 self.version = version
903 self._forcedeltaparentprev = forcedeltaparentprev
903 self._forcedeltaparentprev = forcedeltaparentprev
904 self._builddeltaheader = builddeltaheader
904 self._builddeltaheader = builddeltaheader
905 self._manifestsend = manifestsend
905 self._manifestsend = manifestsend
906 self._ellipses = ellipses
906 self._ellipses = ellipses
907
907
908 # Set of capabilities we can use to build the bundle.
908 # Set of capabilities we can use to build the bundle.
909 if bundlecaps is None:
909 if bundlecaps is None:
910 bundlecaps = set()
910 bundlecaps = set()
911 self._bundlecaps = bundlecaps
911 self._bundlecaps = bundlecaps
912 self._isshallow = shallow
912 self._isshallow = shallow
913 self._fullclnodes = fullnodes
913 self._fullclnodes = fullnodes
914
914
915 # Maps ellipsis revs to their roots at the changelog level.
915 # Maps ellipsis revs to their roots at the changelog level.
916 self._precomputedellipsis = ellipsisroots
916 self._precomputedellipsis = ellipsisroots
917
917
918 self._repo = repo
918 self._repo = repo
919
919
920 if self._repo.ui.verbose and not self._repo.ui.debugflag:
920 if self._repo.ui.verbose and not self._repo.ui.debugflag:
921 self._verbosenote = self._repo.ui.note
921 self._verbosenote = self._repo.ui.note
922 else:
922 else:
923 self._verbosenote = lambda s: None
923 self._verbosenote = lambda s: None
924
924
925 def generate(
925 def generate(
926 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
926 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
927 ):
927 ):
928 """Yield a sequence of changegroup byte chunks.
928 """Yield a sequence of changegroup byte chunks.
929 If changelog is False, changelog data won't be added to changegroup
929 If changelog is False, changelog data won't be added to changegroup
930 """
930 """
931
931
932 repo = self._repo
932 repo = self._repo
933 cl = repo.changelog
933 cl = repo.changelog
934
934
935 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
935 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
936 size = 0
936 size = 0
937
937
938 clstate, deltas = self._generatechangelog(
938 clstate, deltas = self._generatechangelog(
939 cl, clnodes, generate=changelog
939 cl, clnodes, generate=changelog
940 )
940 )
941 for delta in deltas:
941 for delta in deltas:
942 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
942 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
943 size += len(chunk)
943 size += len(chunk)
944 yield chunk
944 yield chunk
945
945
946 close = closechunk()
946 close = closechunk()
947 size += len(close)
947 size += len(close)
948 yield closechunk()
948 yield closechunk()
949
949
950 self._verbosenote(_(b'%8.i (changelog)\n') % size)
950 self._verbosenote(_(b'%8.i (changelog)\n') % size)
951
951
952 clrevorder = clstate[b'clrevorder']
952 clrevorder = clstate[b'clrevorder']
953 manifests = clstate[b'manifests']
953 manifests = clstate[b'manifests']
954 changedfiles = clstate[b'changedfiles']
954 changedfiles = clstate[b'changedfiles']
955
955
956 # We need to make sure that the linkrev in the changegroup refers to
956 # We need to make sure that the linkrev in the changegroup refers to
957 # the first changeset that introduced the manifest or file revision.
957 # the first changeset that introduced the manifest or file revision.
958 # The fastpath is usually safer than the slowpath, because the filelogs
958 # The fastpath is usually safer than the slowpath, because the filelogs
959 # are walked in revlog order.
959 # are walked in revlog order.
960 #
960 #
961 # When taking the slowpath when the manifest revlog uses generaldelta,
961 # When taking the slowpath when the manifest revlog uses generaldelta,
962 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
962 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
963 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
963 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
964 #
964 #
965 # When taking the fastpath, we are only vulnerable to reordering
965 # When taking the fastpath, we are only vulnerable to reordering
966 # of the changelog itself. The changelog never uses generaldelta and is
966 # of the changelog itself. The changelog never uses generaldelta and is
967 # never reordered. To handle this case, we simply take the slowpath,
967 # never reordered. To handle this case, we simply take the slowpath,
968 # which already has the 'clrevorder' logic. This was also fixed in
968 # which already has the 'clrevorder' logic. This was also fixed in
969 # cc0ff93d0c0c.
969 # cc0ff93d0c0c.
970
970
971 # Treemanifests don't work correctly with fastpathlinkrev
971 # Treemanifests don't work correctly with fastpathlinkrev
972 # either, because we don't discover which directory nodes to
972 # either, because we don't discover which directory nodes to
973 # send along with files. This could probably be fixed.
973 # send along with files. This could probably be fixed.
974 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
974 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
975
975
976 fnodes = {} # needed file nodes
976 fnodes = {} # needed file nodes
977
977
978 size = 0
978 size = 0
979 it = self.generatemanifests(
979 it = self.generatemanifests(
980 commonrevs,
980 commonrevs,
981 clrevorder,
981 clrevorder,
982 fastpathlinkrev,
982 fastpathlinkrev,
983 manifests,
983 manifests,
984 fnodes,
984 fnodes,
985 source,
985 source,
986 clstate[b'clrevtomanifestrev'],
986 clstate[b'clrevtomanifestrev'],
987 )
987 )
988
988
989 for tree, deltas in it:
989 for tree, deltas in it:
990 if tree:
990 if tree:
991 assert self.version == b'03'
991 assert self.version == b'03'
992 chunk = _fileheader(tree)
992 chunk = _fileheader(tree)
993 size += len(chunk)
993 size += len(chunk)
994 yield chunk
994 yield chunk
995
995
996 for delta in deltas:
996 for delta in deltas:
997 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
997 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
998 for chunk in chunks:
998 for chunk in chunks:
999 size += len(chunk)
999 size += len(chunk)
1000 yield chunk
1000 yield chunk
1001
1001
1002 close = closechunk()
1002 close = closechunk()
1003 size += len(close)
1003 size += len(close)
1004 yield close
1004 yield close
1005
1005
1006 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1006 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1007 yield self._manifestsend
1007 yield self._manifestsend
1008
1008
1009 mfdicts = None
1009 mfdicts = None
1010 if self._ellipses and self._isshallow:
1010 if self._ellipses and self._isshallow:
1011 mfdicts = [
1011 mfdicts = [
1012 (self._repo.manifestlog[n].read(), lr)
1012 (self._repo.manifestlog[n].read(), lr)
1013 for (n, lr) in pycompat.iteritems(manifests)
1013 for (n, lr) in pycompat.iteritems(manifests)
1014 ]
1014 ]
1015
1015
1016 manifests.clear()
1016 manifests.clear()
1017 clrevs = {cl.rev(x) for x in clnodes}
1017 clrevs = {cl.rev(x) for x in clnodes}
1018
1018
1019 it = self.generatefiles(
1019 it = self.generatefiles(
1020 changedfiles,
1020 changedfiles,
1021 commonrevs,
1021 commonrevs,
1022 source,
1022 source,
1023 mfdicts,
1023 mfdicts,
1024 fastpathlinkrev,
1024 fastpathlinkrev,
1025 fnodes,
1025 fnodes,
1026 clrevs,
1026 clrevs,
1027 )
1027 )
1028
1028
1029 for path, deltas in it:
1029 for path, deltas in it:
1030 h = _fileheader(path)
1030 h = _fileheader(path)
1031 size = len(h)
1031 size = len(h)
1032 yield h
1032 yield h
1033
1033
1034 for delta in deltas:
1034 for delta in deltas:
1035 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1035 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1036 for chunk in chunks:
1036 for chunk in chunks:
1037 size += len(chunk)
1037 size += len(chunk)
1038 yield chunk
1038 yield chunk
1039
1039
1040 close = closechunk()
1040 close = closechunk()
1041 size += len(close)
1041 size += len(close)
1042 yield close
1042 yield close
1043
1043
1044 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1044 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1045
1045
1046 yield closechunk()
1046 yield closechunk()
1047
1047
1048 if clnodes:
1048 if clnodes:
1049 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1049 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1050
1050
1051 def _generatechangelog(self, cl, nodes, generate=True):
1051 def _generatechangelog(self, cl, nodes, generate=True):
1052 """Generate data for changelog chunks.
1052 """Generate data for changelog chunks.
1053
1053
1054 Returns a 2-tuple of a dict containing state and an iterable of
1054 Returns a 2-tuple of a dict containing state and an iterable of
1055 byte chunks. The state will not be fully populated until the
1055 byte chunks. The state will not be fully populated until the
1056 chunk stream has been fully consumed.
1056 chunk stream has been fully consumed.
1057
1057
1058 if generate is False, the state will be fully populated and no chunk
1058 if generate is False, the state will be fully populated and no chunk
1059 stream will be yielded
1059 stream will be yielded
1060 """
1060 """
1061 clrevorder = {}
1061 clrevorder = {}
1062 manifests = {}
1062 manifests = {}
1063 mfl = self._repo.manifestlog
1063 mfl = self._repo.manifestlog
1064 changedfiles = set()
1064 changedfiles = set()
1065 clrevtomanifestrev = {}
1065 clrevtomanifestrev = {}
1066
1066
1067 state = {
1067 state = {
1068 b'clrevorder': clrevorder,
1068 b'clrevorder': clrevorder,
1069 b'manifests': manifests,
1069 b'manifests': manifests,
1070 b'changedfiles': changedfiles,
1070 b'changedfiles': changedfiles,
1071 b'clrevtomanifestrev': clrevtomanifestrev,
1071 b'clrevtomanifestrev': clrevtomanifestrev,
1072 }
1072 }
1073
1073
1074 if not (generate or self._ellipses):
1074 if not (generate or self._ellipses):
1075 # sort the nodes in storage order
1075 # sort the nodes in storage order
1076 nodes = sorted(nodes, key=cl.rev)
1076 nodes = sorted(nodes, key=cl.rev)
1077 for node in nodes:
1077 for node in nodes:
1078 c = cl.changelogrevision(node)
1078 c = cl.changelogrevision(node)
1079 clrevorder[node] = len(clrevorder)
1079 clrevorder[node] = len(clrevorder)
1080 # record the first changeset introducing this manifest version
1080 # record the first changeset introducing this manifest version
1081 manifests.setdefault(c.manifest, node)
1081 manifests.setdefault(c.manifest, node)
1082 # Record a complete list of potentially-changed files in
1082 # Record a complete list of potentially-changed files in
1083 # this manifest.
1083 # this manifest.
1084 changedfiles.update(c.files)
1084 changedfiles.update(c.files)
1085
1085
1086 return state, ()
1086 return state, ()
1087
1087
1088 # Callback for the changelog, used to collect changed files and
1088 # Callback for the changelog, used to collect changed files and
1089 # manifest nodes.
1089 # manifest nodes.
1090 # Returns the linkrev node (identity in the changelog case).
1090 # Returns the linkrev node (identity in the changelog case).
1091 def lookupcl(x):
1091 def lookupcl(x):
1092 c = cl.changelogrevision(x)
1092 c = cl.changelogrevision(x)
1093 clrevorder[x] = len(clrevorder)
1093 clrevorder[x] = len(clrevorder)
1094
1094
1095 if self._ellipses:
1095 if self._ellipses:
1096 # Only update manifests if x is going to be sent. Otherwise we
1096 # Only update manifests if x is going to be sent. Otherwise we
1097 # end up with bogus linkrevs specified for manifests and
1097 # end up with bogus linkrevs specified for manifests and
1098 # we skip some manifest nodes that we should otherwise
1098 # we skip some manifest nodes that we should otherwise
1099 # have sent.
1099 # have sent.
1100 if (
1100 if (
1101 x in self._fullclnodes
1101 x in self._fullclnodes
1102 or cl.rev(x) in self._precomputedellipsis
1102 or cl.rev(x) in self._precomputedellipsis
1103 ):
1103 ):
1104
1104
1105 manifestnode = c.manifest
1105 manifestnode = c.manifest
1106 # Record the first changeset introducing this manifest
1106 # Record the first changeset introducing this manifest
1107 # version.
1107 # version.
1108 manifests.setdefault(manifestnode, x)
1108 manifests.setdefault(manifestnode, x)
1109 # Set this narrow-specific dict so we have the lowest
1109 # Set this narrow-specific dict so we have the lowest
1110 # manifest revnum to look up for this cl revnum. (Part of
1110 # manifest revnum to look up for this cl revnum. (Part of
1111 # mapping changelog ellipsis parents to manifest ellipsis
1111 # mapping changelog ellipsis parents to manifest ellipsis
1112 # parents)
1112 # parents)
1113 clrevtomanifestrev.setdefault(
1113 clrevtomanifestrev.setdefault(
1114 cl.rev(x), mfl.rev(manifestnode)
1114 cl.rev(x), mfl.rev(manifestnode)
1115 )
1115 )
1116 # We can't trust the changed files list in the changeset if the
1116 # We can't trust the changed files list in the changeset if the
1117 # client requested a shallow clone.
1117 # client requested a shallow clone.
1118 if self._isshallow:
1118 if self._isshallow:
1119 changedfiles.update(mfl[c.manifest].read().keys())
1119 changedfiles.update(mfl[c.manifest].read().keys())
1120 else:
1120 else:
1121 changedfiles.update(c.files)
1121 changedfiles.update(c.files)
1122 else:
1122 else:
1123 # record the first changeset introducing this manifest version
1123 # record the first changeset introducing this manifest version
1124 manifests.setdefault(c.manifest, x)
1124 manifests.setdefault(c.manifest, x)
1125 # Record a complete list of potentially-changed files in
1125 # Record a complete list of potentially-changed files in
1126 # this manifest.
1126 # this manifest.
1127 changedfiles.update(c.files)
1127 changedfiles.update(c.files)
1128
1128
1129 return x
1129 return x
1130
1130
1131 gen = deltagroup(
1131 gen = deltagroup(
1132 self._repo,
1132 self._repo,
1133 cl,
1133 cl,
1134 nodes,
1134 nodes,
1135 True,
1135 True,
1136 lookupcl,
1136 lookupcl,
1137 self._forcedeltaparentprev,
1137 self._forcedeltaparentprev,
1138 ellipses=self._ellipses,
1138 ellipses=self._ellipses,
1139 topic=_(b'changesets'),
1139 topic=_(b'changesets'),
1140 clrevtolocalrev={},
1140 clrevtolocalrev={},
1141 fullclnodes=self._fullclnodes,
1141 fullclnodes=self._fullclnodes,
1142 precomputedellipsis=self._precomputedellipsis,
1142 precomputedellipsis=self._precomputedellipsis,
1143 )
1143 )
1144
1144
1145 return state, gen
1145 return state, gen
1146
1146
1147 def generatemanifests(
1147 def generatemanifests(
1148 self,
1148 self,
1149 commonrevs,
1149 commonrevs,
1150 clrevorder,
1150 clrevorder,
1151 fastpathlinkrev,
1151 fastpathlinkrev,
1152 manifests,
1152 manifests,
1153 fnodes,
1153 fnodes,
1154 source,
1154 source,
1155 clrevtolocalrev,
1155 clrevtolocalrev,
1156 ):
1156 ):
1157 """Returns an iterator of changegroup chunks containing manifests.
1157 """Returns an iterator of changegroup chunks containing manifests.
1158
1158
1159 `source` is unused here, but is used by extensions like remotefilelog to
1159 `source` is unused here, but is used by extensions like remotefilelog to
1160 change what is sent based in pulls vs pushes, etc.
1160 change what is sent based in pulls vs pushes, etc.
1161 """
1161 """
1162 repo = self._repo
1162 repo = self._repo
1163 mfl = repo.manifestlog
1163 mfl = repo.manifestlog
1164 tmfnodes = {b'': manifests}
1164 tmfnodes = {b'': manifests}
1165
1165
1166 # Callback for the manifest, used to collect linkrevs for filelog
1166 # Callback for the manifest, used to collect linkrevs for filelog
1167 # revisions.
1167 # revisions.
1168 # Returns the linkrev node (collected in lookupcl).
1168 # Returns the linkrev node (collected in lookupcl).
1169 def makelookupmflinknode(tree, nodes):
1169 def makelookupmflinknode(tree, nodes):
1170 if fastpathlinkrev:
1170 if fastpathlinkrev:
1171 assert not tree
1171 assert not tree
1172 return (
1172 return (
1173 manifests.__getitem__
1173 manifests.__getitem__
1174 ) # pytype: disable=unsupported-operands
1174 ) # pytype: disable=unsupported-operands
1175
1175
1176 def lookupmflinknode(x):
1176 def lookupmflinknode(x):
1177 """Callback for looking up the linknode for manifests.
1177 """Callback for looking up the linknode for manifests.
1178
1178
1179 Returns the linkrev node for the specified manifest.
1179 Returns the linkrev node for the specified manifest.
1180
1180
1181 SIDE EFFECT:
1181 SIDE EFFECT:
1182
1182
1183 1) fclnodes gets populated with the list of relevant
1183 1) fclnodes gets populated with the list of relevant
1184 file nodes if we're not using fastpathlinkrev
1184 file nodes if we're not using fastpathlinkrev
1185 2) When treemanifests are in use, collects treemanifest nodes
1185 2) When treemanifests are in use, collects treemanifest nodes
1186 to send
1186 to send
1187
1187
1188 Note that this means manifests must be completely sent to
1188 Note that this means manifests must be completely sent to
1189 the client before you can trust the list of files and
1189 the client before you can trust the list of files and
1190 treemanifests to send.
1190 treemanifests to send.
1191 """
1191 """
1192 clnode = nodes[x]
1192 clnode = nodes[x]
1193 mdata = mfl.get(tree, x).readfast(shallow=True)
1193 mdata = mfl.get(tree, x).readfast(shallow=True)
1194 for p, n, fl in mdata.iterentries():
1194 for p, n, fl in mdata.iterentries():
1195 if fl == b't': # subdirectory manifest
1195 if fl == b't': # subdirectory manifest
1196 subtree = tree + p + b'/'
1196 subtree = tree + p + b'/'
1197 tmfclnodes = tmfnodes.setdefault(subtree, {})
1197 tmfclnodes = tmfnodes.setdefault(subtree, {})
1198 tmfclnode = tmfclnodes.setdefault(n, clnode)
1198 tmfclnode = tmfclnodes.setdefault(n, clnode)
1199 if clrevorder[clnode] < clrevorder[tmfclnode]:
1199 if clrevorder[clnode] < clrevorder[tmfclnode]:
1200 tmfclnodes[n] = clnode
1200 tmfclnodes[n] = clnode
1201 else:
1201 else:
1202 f = tree + p
1202 f = tree + p
1203 fclnodes = fnodes.setdefault(f, {})
1203 fclnodes = fnodes.setdefault(f, {})
1204 fclnode = fclnodes.setdefault(n, clnode)
1204 fclnode = fclnodes.setdefault(n, clnode)
1205 if clrevorder[clnode] < clrevorder[fclnode]:
1205 if clrevorder[clnode] < clrevorder[fclnode]:
1206 fclnodes[n] = clnode
1206 fclnodes[n] = clnode
1207 return clnode
1207 return clnode
1208
1208
1209 return lookupmflinknode
1209 return lookupmflinknode
1210
1210
1211 while tmfnodes:
1211 while tmfnodes:
1212 tree, nodes = tmfnodes.popitem()
1212 tree, nodes = tmfnodes.popitem()
1213
1213
1214 should_visit = self._matcher.visitdir(tree[:-1])
1214 should_visit = self._matcher.visitdir(tree[:-1])
1215 if tree and not should_visit:
1215 if tree and not should_visit:
1216 continue
1216 continue
1217
1217
1218 store = mfl.getstorage(tree)
1218 store = mfl.getstorage(tree)
1219
1219
1220 if not should_visit:
1220 if not should_visit:
1221 # No nodes to send because this directory is out of
1221 # No nodes to send because this directory is out of
1222 # the client's view of the repository (probably
1222 # the client's view of the repository (probably
1223 # because of narrow clones). Do this even for the root
1223 # because of narrow clones). Do this even for the root
1224 # directory (tree=='')
1224 # directory (tree=='')
1225 prunednodes = []
1225 prunednodes = []
1226 else:
1226 else:
1227 # Avoid sending any manifest nodes we can prove the
1227 # Avoid sending any manifest nodes we can prove the
1228 # client already has by checking linkrevs. See the
1228 # client already has by checking linkrevs. See the
1229 # related comment in generatefiles().
1229 # related comment in generatefiles().
1230 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1230 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1231
1231
1232 if tree and not prunednodes:
1232 if tree and not prunednodes:
1233 continue
1233 continue
1234
1234
1235 lookupfn = makelookupmflinknode(tree, nodes)
1235 lookupfn = makelookupmflinknode(tree, nodes)
1236
1236
1237 deltas = deltagroup(
1237 deltas = deltagroup(
1238 self._repo,
1238 self._repo,
1239 store,
1239 store,
1240 prunednodes,
1240 prunednodes,
1241 False,
1241 False,
1242 lookupfn,
1242 lookupfn,
1243 self._forcedeltaparentprev,
1243 self._forcedeltaparentprev,
1244 ellipses=self._ellipses,
1244 ellipses=self._ellipses,
1245 topic=_(b'manifests'),
1245 topic=_(b'manifests'),
1246 clrevtolocalrev=clrevtolocalrev,
1246 clrevtolocalrev=clrevtolocalrev,
1247 fullclnodes=self._fullclnodes,
1247 fullclnodes=self._fullclnodes,
1248 precomputedellipsis=self._precomputedellipsis,
1248 precomputedellipsis=self._precomputedellipsis,
1249 )
1249 )
1250
1250
1251 if not self._oldmatcher.visitdir(store.tree[:-1]):
1251 if not self._oldmatcher.visitdir(store.tree[:-1]):
1252 yield tree, deltas
1252 yield tree, deltas
1253 else:
1253 else:
1254 # 'deltas' is a generator and we need to consume it even if
1254 # 'deltas' is a generator and we need to consume it even if
1255 # we are not going to send it because a side-effect is that
1255 # we are not going to send it because a side-effect is that
1256 # it updates tmdnodes (via lookupfn)
1256 # it updates tmdnodes (via lookupfn)
1257 for d in deltas:
1257 for d in deltas:
1258 pass
1258 pass
1259 if not tree:
1259 if not tree:
1260 yield tree, []
1260 yield tree, []
1261
1261
1262 def _prunemanifests(self, store, nodes, commonrevs):
1262 def _prunemanifests(self, store, nodes, commonrevs):
1263 if not self._ellipses:
1263 if not self._ellipses:
1264 # In non-ellipses case and large repositories, it is better to
1264 # In non-ellipses case and large repositories, it is better to
1265 # prevent calling of store.rev and store.linkrev on a lot of
1265 # prevent calling of store.rev and store.linkrev on a lot of
1266 # nodes as compared to sending some extra data
1266 # nodes as compared to sending some extra data
1267 return nodes.copy()
1267 return nodes.copy()
1268 # This is split out as a separate method to allow filtering
1268 # This is split out as a separate method to allow filtering
1269 # commonrevs in extension code.
1269 # commonrevs in extension code.
1270 #
1270 #
1271 # TODO(augie): this shouldn't be required, instead we should
1271 # TODO(augie): this shouldn't be required, instead we should
1272 # make filtering of revisions to send delegated to the store
1272 # make filtering of revisions to send delegated to the store
1273 # layer.
1273 # layer.
1274 frev, flr = store.rev, store.linkrev
1274 frev, flr = store.rev, store.linkrev
1275 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1275 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1276
1276
1277 # The 'source' parameter is useful for extensions
1277 # The 'source' parameter is useful for extensions
1278 def generatefiles(
1278 def generatefiles(
1279 self,
1279 self,
1280 changedfiles,
1280 changedfiles,
1281 commonrevs,
1281 commonrevs,
1282 source,
1282 source,
1283 mfdicts,
1283 mfdicts,
1284 fastpathlinkrev,
1284 fastpathlinkrev,
1285 fnodes,
1285 fnodes,
1286 clrevs,
1286 clrevs,
1287 ):
1287 ):
1288 changedfiles = [
1288 changedfiles = [
1289 f
1289 f
1290 for f in changedfiles
1290 for f in changedfiles
1291 if self._matcher(f) and not self._oldmatcher(f)
1291 if self._matcher(f) and not self._oldmatcher(f)
1292 ]
1292 ]
1293
1293
1294 if not fastpathlinkrev:
1294 if not fastpathlinkrev:
1295
1295
1296 def normallinknodes(unused, fname):
1296 def normallinknodes(unused, fname):
1297 return fnodes.get(fname, {})
1297 return fnodes.get(fname, {})
1298
1298
1299 else:
1299 else:
1300 cln = self._repo.changelog.node
1300 cln = self._repo.changelog.node
1301
1301
1302 def normallinknodes(store, fname):
1302 def normallinknodes(store, fname):
1303 flinkrev = store.linkrev
1303 flinkrev = store.linkrev
1304 fnode = store.node
1304 fnode = store.node
1305 revs = ((r, flinkrev(r)) for r in store)
1305 revs = ((r, flinkrev(r)) for r in store)
1306 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1306 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1307
1307
1308 clrevtolocalrev = {}
1308 clrevtolocalrev = {}
1309
1309
1310 if self._isshallow:
1310 if self._isshallow:
1311 # In a shallow clone, the linknodes callback needs to also include
1311 # In a shallow clone, the linknodes callback needs to also include
1312 # those file nodes that are in the manifests we sent but weren't
1312 # those file nodes that are in the manifests we sent but weren't
1313 # introduced by those manifests.
1313 # introduced by those manifests.
1314 commonctxs = [self._repo[c] for c in commonrevs]
1314 commonctxs = [self._repo[c] for c in commonrevs]
1315 clrev = self._repo.changelog.rev
1315 clrev = self._repo.changelog.rev
1316
1316
1317 def linknodes(flog, fname):
1317 def linknodes(flog, fname):
1318 for c in commonctxs:
1318 for c in commonctxs:
1319 try:
1319 try:
1320 fnode = c.filenode(fname)
1320 fnode = c.filenode(fname)
1321 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1321 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1322 except error.ManifestLookupError:
1322 except error.ManifestLookupError:
1323 pass
1323 pass
1324 links = normallinknodes(flog, fname)
1324 links = normallinknodes(flog, fname)
1325 if len(links) != len(mfdicts):
1325 if len(links) != len(mfdicts):
1326 for mf, lr in mfdicts:
1326 for mf, lr in mfdicts:
1327 fnode = mf.get(fname, None)
1327 fnode = mf.get(fname, None)
1328 if fnode in links:
1328 if fnode in links:
1329 links[fnode] = min(links[fnode], lr, key=clrev)
1329 links[fnode] = min(links[fnode], lr, key=clrev)
1330 elif fnode:
1330 elif fnode:
1331 links[fnode] = lr
1331 links[fnode] = lr
1332 return links
1332 return links
1333
1333
1334 else:
1334 else:
1335 linknodes = normallinknodes
1335 linknodes = normallinknodes
1336
1336
1337 repo = self._repo
1337 repo = self._repo
1338 progress = repo.ui.makeprogress(
1338 progress = repo.ui.makeprogress(
1339 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1339 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1340 )
1340 )
1341 for i, fname in enumerate(sorted(changedfiles)):
1341 for i, fname in enumerate(sorted(changedfiles)):
1342 filerevlog = repo.file(fname)
1342 filerevlog = repo.file(fname)
1343 if not filerevlog:
1343 if not filerevlog:
1344 raise error.Abort(
1344 raise error.Abort(
1345 _(b"empty or missing file data for %s") % fname
1345 _(b"empty or missing file data for %s") % fname
1346 )
1346 )
1347
1347
1348 clrevtolocalrev.clear()
1348 clrevtolocalrev.clear()
1349
1349
1350 linkrevnodes = linknodes(filerevlog, fname)
1350 linkrevnodes = linknodes(filerevlog, fname)
1351 # Lookup for filenodes, we collected the linkrev nodes above in the
1351 # Lookup for filenodes, we collected the linkrev nodes above in the
1352 # fastpath case and with lookupmf in the slowpath case.
1352 # fastpath case and with lookupmf in the slowpath case.
1353 def lookupfilelog(x):
1353 def lookupfilelog(x):
1354 return linkrevnodes[x]
1354 return linkrevnodes[x]
1355
1355
1356 frev, flr = filerevlog.rev, filerevlog.linkrev
1356 frev, flr = filerevlog.rev, filerevlog.linkrev
1357 # Skip sending any filenode we know the client already
1357 # Skip sending any filenode we know the client already
1358 # has. This avoids over-sending files relatively
1358 # has. This avoids over-sending files relatively
1359 # inexpensively, so it's not a problem if we under-filter
1359 # inexpensively, so it's not a problem if we under-filter
1360 # here.
1360 # here.
1361 filenodes = [
1361 filenodes = [
1362 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1362 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1363 ]
1363 ]
1364
1364
1365 if not filenodes:
1365 if not filenodes:
1366 continue
1366 continue
1367
1367
1368 progress.update(i + 1, item=fname)
1368 progress.update(i + 1, item=fname)
1369
1369
1370 deltas = deltagroup(
1370 deltas = deltagroup(
1371 self._repo,
1371 self._repo,
1372 filerevlog,
1372 filerevlog,
1373 filenodes,
1373 filenodes,
1374 False,
1374 False,
1375 lookupfilelog,
1375 lookupfilelog,
1376 self._forcedeltaparentprev,
1376 self._forcedeltaparentprev,
1377 ellipses=self._ellipses,
1377 ellipses=self._ellipses,
1378 clrevtolocalrev=clrevtolocalrev,
1378 clrevtolocalrev=clrevtolocalrev,
1379 fullclnodes=self._fullclnodes,
1379 fullclnodes=self._fullclnodes,
1380 precomputedellipsis=self._precomputedellipsis,
1380 precomputedellipsis=self._precomputedellipsis,
1381 )
1381 )
1382
1382
1383 yield fname, deltas
1383 yield fname, deltas
1384
1384
1385 progress.complete()
1385 progress.complete()
1386
1386
1387
1387
1388 def _makecg1packer(
1388 def _makecg1packer(
1389 repo,
1389 repo,
1390 oldmatcher,
1390 oldmatcher,
1391 matcher,
1391 matcher,
1392 bundlecaps,
1392 bundlecaps,
1393 ellipses=False,
1393 ellipses=False,
1394 shallow=False,
1394 shallow=False,
1395 ellipsisroots=None,
1395 ellipsisroots=None,
1396 fullnodes=None,
1396 fullnodes=None,
1397 ):
1397 ):
1398 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1398 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1399 d.node, d.p1node, d.p2node, d.linknode
1399 d.node, d.p1node, d.p2node, d.linknode
1400 )
1400 )
1401
1401
1402 return cgpacker(
1402 return cgpacker(
1403 repo,
1403 repo,
1404 oldmatcher,
1404 oldmatcher,
1405 matcher,
1405 matcher,
1406 b'01',
1406 b'01',
1407 builddeltaheader=builddeltaheader,
1407 builddeltaheader=builddeltaheader,
1408 manifestsend=b'',
1408 manifestsend=b'',
1409 forcedeltaparentprev=True,
1409 forcedeltaparentprev=True,
1410 bundlecaps=bundlecaps,
1410 bundlecaps=bundlecaps,
1411 ellipses=ellipses,
1411 ellipses=ellipses,
1412 shallow=shallow,
1412 shallow=shallow,
1413 ellipsisroots=ellipsisroots,
1413 ellipsisroots=ellipsisroots,
1414 fullnodes=fullnodes,
1414 fullnodes=fullnodes,
1415 )
1415 )
1416
1416
1417
1417
1418 def _makecg2packer(
1418 def _makecg2packer(
1419 repo,
1419 repo,
1420 oldmatcher,
1420 oldmatcher,
1421 matcher,
1421 matcher,
1422 bundlecaps,
1422 bundlecaps,
1423 ellipses=False,
1423 ellipses=False,
1424 shallow=False,
1424 shallow=False,
1425 ellipsisroots=None,
1425 ellipsisroots=None,
1426 fullnodes=None,
1426 fullnodes=None,
1427 ):
1427 ):
1428 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1428 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1429 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1429 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1430 )
1430 )
1431
1431
1432 return cgpacker(
1432 return cgpacker(
1433 repo,
1433 repo,
1434 oldmatcher,
1434 oldmatcher,
1435 matcher,
1435 matcher,
1436 b'02',
1436 b'02',
1437 builddeltaheader=builddeltaheader,
1437 builddeltaheader=builddeltaheader,
1438 manifestsend=b'',
1438 manifestsend=b'',
1439 bundlecaps=bundlecaps,
1439 bundlecaps=bundlecaps,
1440 ellipses=ellipses,
1440 ellipses=ellipses,
1441 shallow=shallow,
1441 shallow=shallow,
1442 ellipsisroots=ellipsisroots,
1442 ellipsisroots=ellipsisroots,
1443 fullnodes=fullnodes,
1443 fullnodes=fullnodes,
1444 )
1444 )
1445
1445
1446
1446
1447 def _makecg3packer(
1447 def _makecg3packer(
1448 repo,
1448 repo,
1449 oldmatcher,
1449 oldmatcher,
1450 matcher,
1450 matcher,
1451 bundlecaps,
1451 bundlecaps,
1452 ellipses=False,
1452 ellipses=False,
1453 shallow=False,
1453 shallow=False,
1454 ellipsisroots=None,
1454 ellipsisroots=None,
1455 fullnodes=None,
1455 fullnodes=None,
1456 ):
1456 ):
1457 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1457 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1458 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1458 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1459 )
1459 )
1460
1460
1461 return cgpacker(
1461 return cgpacker(
1462 repo,
1462 repo,
1463 oldmatcher,
1463 oldmatcher,
1464 matcher,
1464 matcher,
1465 b'03',
1465 b'03',
1466 builddeltaheader=builddeltaheader,
1466 builddeltaheader=builddeltaheader,
1467 manifestsend=closechunk(),
1467 manifestsend=closechunk(),
1468 bundlecaps=bundlecaps,
1468 bundlecaps=bundlecaps,
1469 ellipses=ellipses,
1469 ellipses=ellipses,
1470 shallow=shallow,
1470 shallow=shallow,
1471 ellipsisroots=ellipsisroots,
1471 ellipsisroots=ellipsisroots,
1472 fullnodes=fullnodes,
1472 fullnodes=fullnodes,
1473 )
1473 )
1474
1474
1475
1475
1476 _packermap = {
1476 _packermap = {
1477 b'01': (_makecg1packer, cg1unpacker),
1477 b'01': (_makecg1packer, cg1unpacker),
1478 # cg2 adds support for exchanging generaldelta
1478 # cg2 adds support for exchanging generaldelta
1479 b'02': (_makecg2packer, cg2unpacker),
1479 b'02': (_makecg2packer, cg2unpacker),
1480 # cg3 adds support for exchanging revlog flags and treemanifests
1480 # cg3 adds support for exchanging revlog flags and treemanifests
1481 b'03': (_makecg3packer, cg3unpacker),
1481 b'03': (_makecg3packer, cg3unpacker),
1482 }
1482 }
1483
1483
1484
1484
1485 def allsupportedversions(repo):
1485 def allsupportedversions(repo):
1486 versions = set(_packermap.keys())
1486 versions = set(_packermap.keys())
1487 needv03 = False
1487 needv03 = False
1488 if (
1488 if (
1489 repo.ui.configbool(b'experimental', b'changegroup3')
1489 repo.ui.configbool(b'experimental', b'changegroup3')
1490 or repo.ui.configbool(b'experimental', b'treemanifest')
1490 or repo.ui.configbool(b'experimental', b'treemanifest')
1491 or scmutil.istreemanifest(repo)
1491 or scmutil.istreemanifest(repo)
1492 ):
1492 ):
1493 # we keep version 03 because we need to to exchange treemanifest data
1493 # we keep version 03 because we need to to exchange treemanifest data
1494 #
1494 #
1495 # we also keep vresion 01 and 02, because it is possible for repo to
1495 # we also keep vresion 01 and 02, because it is possible for repo to
1496 # contains both normal and tree manifest at the same time. so using
1496 # contains both normal and tree manifest at the same time. so using
1497 # older version to pull data is viable
1497 # older version to pull data is viable
1498 #
1498 #
1499 # (or even to push subset of history)
1499 # (or even to push subset of history)
1500 needv03 = True
1500 needv03 = True
1501 if b'exp-sidedata-flag' in repo.requirements:
1501 if b'exp-sidedata-flag' in repo.requirements:
1502 needv03 = True
1502 needv03 = True
1503 # don't attempt to use 01/02 until we do sidedata cleaning
1503 # don't attempt to use 01/02 until we do sidedata cleaning
1504 versions.discard(b'01')
1504 versions.discard(b'01')
1505 versions.discard(b'02')
1505 versions.discard(b'02')
1506 if not needv03:
1506 if not needv03:
1507 versions.discard(b'03')
1507 versions.discard(b'03')
1508 return versions
1508 return versions
1509
1509
1510
1510
1511 # Changegroup versions that can be applied to the repo
1511 # Changegroup versions that can be applied to the repo
1512 def supportedincomingversions(repo):
1512 def supportedincomingversions(repo):
1513 return allsupportedversions(repo)
1513 return allsupportedversions(repo)
1514
1514
1515
1515
1516 # Changegroup versions that can be created from the repo
1516 # Changegroup versions that can be created from the repo
1517 def supportedoutgoingversions(repo):
1517 def supportedoutgoingversions(repo):
1518 versions = allsupportedversions(repo)
1518 versions = allsupportedversions(repo)
1519 if scmutil.istreemanifest(repo):
1519 if scmutil.istreemanifest(repo):
1520 # Versions 01 and 02 support only flat manifests and it's just too
1520 # Versions 01 and 02 support only flat manifests and it's just too
1521 # expensive to convert between the flat manifest and tree manifest on
1521 # expensive to convert between the flat manifest and tree manifest on
1522 # the fly. Since tree manifests are hashed differently, all of history
1522 # the fly. Since tree manifests are hashed differently, all of history
1523 # would have to be converted. Instead, we simply don't even pretend to
1523 # would have to be converted. Instead, we simply don't even pretend to
1524 # support versions 01 and 02.
1524 # support versions 01 and 02.
1525 versions.discard(b'01')
1525 versions.discard(b'01')
1526 versions.discard(b'02')
1526 versions.discard(b'02')
1527 if requirements.NARROW_REQUIREMENT in repo.requirements:
1527 if requirements.NARROW_REQUIREMENT in repo.requirements:
1528 # Versions 01 and 02 don't support revlog flags, and we need to
1528 # Versions 01 and 02 don't support revlog flags, and we need to
1529 # support that for stripping and unbundling to work.
1529 # support that for stripping and unbundling to work.
1530 versions.discard(b'01')
1530 versions.discard(b'01')
1531 versions.discard(b'02')
1531 versions.discard(b'02')
1532 if LFS_REQUIREMENT in repo.requirements:
1532 if LFS_REQUIREMENT in repo.requirements:
1533 # Versions 01 and 02 don't support revlog flags, and we need to
1533 # Versions 01 and 02 don't support revlog flags, and we need to
1534 # mark LFS entries with REVIDX_EXTSTORED.
1534 # mark LFS entries with REVIDX_EXTSTORED.
1535 versions.discard(b'01')
1535 versions.discard(b'01')
1536 versions.discard(b'02')
1536 versions.discard(b'02')
1537
1537
1538 return versions
1538 return versions
1539
1539
1540
1540
1541 def localversion(repo):
1541 def localversion(repo):
1542 # Finds the best version to use for bundles that are meant to be used
1542 # Finds the best version to use for bundles that are meant to be used
1543 # locally, such as those from strip and shelve, and temporary bundles.
1543 # locally, such as those from strip and shelve, and temporary bundles.
1544 return max(supportedoutgoingversions(repo))
1544 return max(supportedoutgoingversions(repo))
1545
1545
1546
1546
1547 def safeversion(repo):
1547 def safeversion(repo):
1548 # Finds the smallest version that it's safe to assume clients of the repo
1548 # Finds the smallest version that it's safe to assume clients of the repo
1549 # will support. For example, all hg versions that support generaldelta also
1549 # will support. For example, all hg versions that support generaldelta also
1550 # support changegroup 02.
1550 # support changegroup 02.
1551 versions = supportedoutgoingversions(repo)
1551 versions = supportedoutgoingversions(repo)
1552 if b'generaldelta' in repo.requirements:
1552 if b'generaldelta' in repo.requirements:
1553 versions.discard(b'01')
1553 versions.discard(b'01')
1554 assert versions
1554 assert versions
1555 return min(versions)
1555 return min(versions)
1556
1556
1557
1557
1558 def getbundler(
1558 def getbundler(
1559 version,
1559 version,
1560 repo,
1560 repo,
1561 bundlecaps=None,
1561 bundlecaps=None,
1562 oldmatcher=None,
1562 oldmatcher=None,
1563 matcher=None,
1563 matcher=None,
1564 ellipses=False,
1564 ellipses=False,
1565 shallow=False,
1565 shallow=False,
1566 ellipsisroots=None,
1566 ellipsisroots=None,
1567 fullnodes=None,
1567 fullnodes=None,
1568 ):
1568 ):
1569 assert version in supportedoutgoingversions(repo)
1569 assert version in supportedoutgoingversions(repo)
1570
1570
1571 if matcher is None:
1571 if matcher is None:
1572 matcher = matchmod.always()
1572 matcher = matchmod.always()
1573 if oldmatcher is None:
1573 if oldmatcher is None:
1574 oldmatcher = matchmod.never()
1574 oldmatcher = matchmod.never()
1575
1575
1576 if version == b'01' and not matcher.always():
1576 if version == b'01' and not matcher.always():
1577 raise error.ProgrammingError(
1577 raise error.ProgrammingError(
1578 b'version 01 changegroups do not support sparse file matchers'
1578 b'version 01 changegroups do not support sparse file matchers'
1579 )
1579 )
1580
1580
1581 if ellipses and version in (b'01', b'02'):
1581 if ellipses and version in (b'01', b'02'):
1582 raise error.Abort(
1582 raise error.Abort(
1583 _(
1583 _(
1584 b'ellipsis nodes require at least cg3 on client and server, '
1584 b'ellipsis nodes require at least cg3 on client and server, '
1585 b'but negotiated version %s'
1585 b'but negotiated version %s'
1586 )
1586 )
1587 % version
1587 % version
1588 )
1588 )
1589
1589
1590 # Requested files could include files not in the local store. So
1590 # Requested files could include files not in the local store. So
1591 # filter those out.
1591 # filter those out.
1592 matcher = repo.narrowmatch(matcher)
1592 matcher = repo.narrowmatch(matcher)
1593
1593
1594 fn = _packermap[version][0]
1594 fn = _packermap[version][0]
1595 return fn(
1595 return fn(
1596 repo,
1596 repo,
1597 oldmatcher,
1597 oldmatcher,
1598 matcher,
1598 matcher,
1599 bundlecaps,
1599 bundlecaps,
1600 ellipses=ellipses,
1600 ellipses=ellipses,
1601 shallow=shallow,
1601 shallow=shallow,
1602 ellipsisroots=ellipsisroots,
1602 ellipsisroots=ellipsisroots,
1603 fullnodes=fullnodes,
1603 fullnodes=fullnodes,
1604 )
1604 )
1605
1605
1606
1606
1607 def getunbundler(version, fh, alg, extras=None):
1607 def getunbundler(version, fh, alg, extras=None):
1608 return _packermap[version][1](fh, alg, extras=extras)
1608 return _packermap[version][1](fh, alg, extras=extras)
1609
1609
1610
1610
1611 def _changegroupinfo(repo, nodes, source):
1611 def _changegroupinfo(repo, nodes, source):
1612 if repo.ui.verbose or source == b'bundle':
1612 if repo.ui.verbose or source == b'bundle':
1613 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1613 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1614 if repo.ui.debugflag:
1614 if repo.ui.debugflag:
1615 repo.ui.debug(b"list of changesets:\n")
1615 repo.ui.debug(b"list of changesets:\n")
1616 for node in nodes:
1616 for node in nodes:
1617 repo.ui.debug(b"%s\n" % hex(node))
1617 repo.ui.debug(b"%s\n" % hex(node))
1618
1618
1619
1619
1620 def makechangegroup(
1620 def makechangegroup(
1621 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1621 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1622 ):
1622 ):
1623 cgstream = makestream(
1623 cgstream = makestream(
1624 repo,
1624 repo,
1625 outgoing,
1625 outgoing,
1626 version,
1626 version,
1627 source,
1627 source,
1628 fastpath=fastpath,
1628 fastpath=fastpath,
1629 bundlecaps=bundlecaps,
1629 bundlecaps=bundlecaps,
1630 )
1630 )
1631 return getunbundler(
1631 return getunbundler(
1632 version,
1632 version,
1633 util.chunkbuffer(cgstream),
1633 util.chunkbuffer(cgstream),
1634 None,
1634 None,
1635 {b'clcount': len(outgoing.missing)},
1635 {b'clcount': len(outgoing.missing)},
1636 )
1636 )
1637
1637
1638
1638
1639 def makestream(
1639 def makestream(
1640 repo,
1640 repo,
1641 outgoing,
1641 outgoing,
1642 version,
1642 version,
1643 source,
1643 source,
1644 fastpath=False,
1644 fastpath=False,
1645 bundlecaps=None,
1645 bundlecaps=None,
1646 matcher=None,
1646 matcher=None,
1647 ):
1647 ):
1648 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1648 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1649
1649
1650 repo = repo.unfiltered()
1650 repo = repo.unfiltered()
1651 commonrevs = outgoing.common
1651 commonrevs = outgoing.common
1652 csets = outgoing.missing
1652 csets = outgoing.missing
1653 heads = outgoing.ancestorsof
1653 heads = outgoing.ancestorsof
1654 # We go through the fast path if we get told to, or if all (unfiltered
1654 # We go through the fast path if we get told to, or if all (unfiltered
1655 # heads have been requested (since we then know there all linkrevs will
1655 # heads have been requested (since we then know there all linkrevs will
1656 # be pulled by the client).
1656 # be pulled by the client).
1657 heads.sort()
1657 heads.sort()
1658 fastpathlinkrev = fastpath or (
1658 fastpathlinkrev = fastpath or (
1659 repo.filtername is None and heads == sorted(repo.heads())
1659 repo.filtername is None and heads == sorted(repo.heads())
1660 )
1660 )
1661
1661
1662 repo.hook(b'preoutgoing', throw=True, source=source)
1662 repo.hook(b'preoutgoing', throw=True, source=source)
1663 _changegroupinfo(repo, csets, source)
1663 _changegroupinfo(repo, csets, source)
1664 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1664 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1665
1665
1666
1666
1667 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1667 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1668 revisions = 0
1668 revisions = 0
1669 files = 0
1669 files = 0
1670 progress = repo.ui.makeprogress(
1670 progress = repo.ui.makeprogress(
1671 _(b'files'), unit=_(b'files'), total=expectedfiles
1671 _(b'files'), unit=_(b'files'), total=expectedfiles
1672 )
1672 )
1673 for chunkdata in iter(source.filelogheader, {}):
1673 for chunkdata in iter(source.filelogheader, {}):
1674 files += 1
1674 files += 1
1675 f = chunkdata[b"filename"]
1675 f = chunkdata[b"filename"]
1676 repo.ui.debug(b"adding %s revisions\n" % f)
1676 repo.ui.debug(b"adding %s revisions\n" % f)
1677 progress.increment()
1677 progress.increment()
1678 fl = repo.file(f)
1678 fl = repo.file(f)
1679 o = len(fl)
1679 o = len(fl)
1680 try:
1680 try:
1681 deltas = source.deltaiter()
1681 deltas = source.deltaiter()
1682 if not fl.addgroup(deltas, revmap, trp):
1682 if not fl.addgroup(deltas, revmap, trp):
1683 raise error.Abort(_(b"received file revlog group is empty"))
1683 raise error.Abort(_(b"received file revlog group is empty"))
1684 except error.CensoredBaseError as e:
1684 except error.CensoredBaseError as e:
1685 raise error.Abort(_(b"received delta base is censored: %s") % e)
1685 raise error.Abort(_(b"received delta base is censored: %s") % e)
1686 revisions += len(fl) - o
1686 revisions += len(fl) - o
1687 if f in needfiles:
1687 if f in needfiles:
1688 needs = needfiles[f]
1688 needs = needfiles[f]
1689 for new in pycompat.xrange(o, len(fl)):
1689 for new in pycompat.xrange(o, len(fl)):
1690 n = fl.node(new)
1690 n = fl.node(new)
1691 if n in needs:
1691 if n in needs:
1692 needs.remove(n)
1692 needs.remove(n)
1693 else:
1693 else:
1694 raise error.Abort(_(b"received spurious file revlog entry"))
1694 raise error.Abort(_(b"received spurious file revlog entry"))
1695 if not needs:
1695 if not needs:
1696 del needfiles[f]
1696 del needfiles[f]
1697 progress.complete()
1697 progress.complete()
1698
1698
1699 for f, needs in pycompat.iteritems(needfiles):
1699 for f, needs in pycompat.iteritems(needfiles):
1700 fl = repo.file(f)
1700 fl = repo.file(f)
1701 for n in needs:
1701 for n in needs:
1702 try:
1702 try:
1703 fl.rev(n)
1703 fl.rev(n)
1704 except error.LookupError:
1704 except error.LookupError:
1705 raise error.Abort(
1705 raise error.Abort(
1706 _(b'missing file data for %s:%s - run hg verify')
1706 _(b'missing file data for %s:%s - run hg verify')
1707 % (f, hex(n))
1707 % (f, hex(n))
1708 )
1708 )
1709
1709
1710 return revisions, files
1710 return revisions, files
@@ -1,154 +1,154 b''
1 $ . "$TESTDIR/narrow-library.sh"
1 $ . "$TESTDIR/narrow-library.sh"
2
2
3 $ hg init master
3 $ hg init master
4 $ cd master
4 $ cd master
5 $ cat >> .hg/hgrc <<EOF
5 $ cat >> .hg/hgrc <<EOF
6 > [narrow]
6 > [narrow]
7 > serveellipses=True
7 > serveellipses=True
8 > EOF
8 > EOF
9 $ for x in `$TESTDIR/seq.py 10`
9 $ for x in `$TESTDIR/seq.py 10`
10 > do
10 > do
11 > echo $x > "f$x"
11 > echo $x > "f$x"
12 > hg add "f$x"
12 > hg add "f$x"
13 > hg commit -m "Commit f$x"
13 > hg commit -m "Commit f$x"
14 > done
14 > done
15 $ cd ..
15 $ cd ..
16
16
17 narrow clone a couple files, f2 and f8
17 narrow clone a couple files, f2 and f8
18
18
19 $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
19 $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
20 requesting all changes
20 requesting all changes
21 adding changesets
21 adding changesets
22 adding manifests
22 adding manifests
23 adding file changes
23 adding file changes
24 added 5 changesets with 2 changes to 2 files
24 added 5 changesets with 2 changes to 2 files
25 new changesets *:* (glob)
25 new changesets *:* (glob)
26 updating to branch default
26 updating to branch default
27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 $ cd narrow
28 $ cd narrow
29 $ ls -A
29 $ ls -A
30 .hg
30 .hg
31 f2
31 f2
32 f8
32 f8
33 $ cat f2 f8
33 $ cat f2 f8
34 2
34 2
35 8
35 8
36
36
37 $ cd ..
37 $ cd ..
38
38
39 change every upstream file twice
39 change every upstream file twice
40
40
41 $ cd master
41 $ cd master
42 $ for x in `$TESTDIR/seq.py 10`
42 $ for x in `$TESTDIR/seq.py 10`
43 > do
43 > do
44 > echo "update#1 $x" >> "f$x"
44 > echo "update#1 $x" >> "f$x"
45 > hg commit -m "Update#1 to f$x" "f$x"
45 > hg commit -m "Update#1 to f$x" "f$x"
46 > done
46 > done
47 $ for x in `$TESTDIR/seq.py 10`
47 $ for x in `$TESTDIR/seq.py 10`
48 > do
48 > do
49 > echo "update#2 $x" >> "f$x"
49 > echo "update#2 $x" >> "f$x"
50 > hg commit -m "Update#2 to f$x" "f$x"
50 > hg commit -m "Update#2 to f$x" "f$x"
51 > done
51 > done
52 $ cd ..
52 $ cd ..
53
53
54 look for incoming changes
54 look for incoming changes
55
55
56 $ cd narrow
56 $ cd narrow
57 $ hg incoming --limit 3
57 $ hg incoming --limit 3
58 comparing with ssh://user@dummy/master
58 comparing with ssh://user@dummy/master
59 searching for changes
59 searching for changes
60 changeset: 5:ddc055582556
60 changeset: 5:ddc055582556
61 user: test
61 user: test
62 date: Thu Jan 01 00:00:00 1970 +0000
62 date: Thu Jan 01 00:00:00 1970 +0000
63 summary: Update#1 to f1
63 summary: Update#1 to f1
64
64
65 changeset: 6:f66eb5ad621d
65 changeset: 6:f66eb5ad621d
66 user: test
66 user: test
67 date: Thu Jan 01 00:00:00 1970 +0000
67 date: Thu Jan 01 00:00:00 1970 +0000
68 summary: Update#1 to f2
68 summary: Update#1 to f2
69
69
70 changeset: 7:c42ecff04e99
70 changeset: 7:c42ecff04e99
71 user: test
71 user: test
72 date: Thu Jan 01 00:00:00 1970 +0000
72 date: Thu Jan 01 00:00:00 1970 +0000
73 summary: Update#1 to f3
73 summary: Update#1 to f3
74
74
75
75
76 Interrupting the pull is safe
76 Interrupting the pull is safe
77 $ hg --config hooks.pretxnchangegroup.bad=false pull -q
77 $ hg --config hooks.pretxnchangegroup.bad=false pull -q
78 transaction abort!
78 transaction abort!
79 rollback completed
79 rollback completed
80 abort: pretxnchangegroup.bad hook exited with status 1
80 abort: pretxnchangegroup.bad hook exited with status 1
81 [40]
81 [40]
82 $ hg id
82 $ hg id
83 223311e70a6f tip
83 223311e70a6f tip
84
84
85 pull new changes down to the narrow clone. Should get 8 new changesets: 4
85 pull new changes down to the narrow clone. Should get 8 new changesets: 4
86 relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
86 relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
87
87
88 $ hg pull
88 $ hg pull
89 pulling from ssh://user@dummy/master
89 pulling from ssh://user@dummy/master
90 searching for changes
90 searching for changes
91 adding changesets
91 adding changesets
92 adding manifests
92 adding manifests
93 adding file changes
93 adding file changes
94 added 9 changesets with 4 changes to 2 files
94 added 9 changesets with 4 changes to 2 files
95 new changesets *:* (glob)
95 new changesets *:* (glob)
96 (run 'hg update' to get a working copy)
96 (run 'hg update' to get a working copy)
97 $ hg log -T '{rev}: {desc}\n'
97 $ hg log -T '{rev}: {desc}\n'
98 13: Update#2 to f10
98 13: Update#2 to f10
99 12: Update#2 to f8
99 12: Update#2 to f8
100 11: Update#2 to f7
100 11: Update#2 to f7
101 10: Update#2 to f2
101 10: Update#2 to f2
102 9: Update#2 to f1
102 9: Update#2 to f1
103 8: Update#1 to f8
103 8: Update#1 to f8
104 7: Update#1 to f7
104 7: Update#1 to f7
105 6: Update#1 to f2
105 6: Update#1 to f2
106 5: Update#1 to f1
106 5: Update#1 to f1
107 4: Commit f10
107 4: Commit f10
108 3: Commit f8
108 3: Commit f8
109 2: Commit f7
109 2: Commit f7
110 1: Commit f2
110 1: Commit f2
111 0: Commit f1
111 0: Commit f1
112 $ hg update tip
112 $ hg update tip
113 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
114
114
115 add a change and push it
115 add a change and push it
116
116
117 $ echo "update#3 2" >> f2
117 $ echo "update#3 2" >> f2
118 $ hg commit -m "Update#3 to f2" f2
118 $ hg commit -m "Update#3 to f2" f2
119 $ hg log f2 -T '{rev}: {desc}\n'
119 $ hg log f2 -T '{rev}: {desc}\n'
120 14: Update#3 to f2
120 14: Update#3 to f2
121 10: Update#2 to f2
121 10: Update#2 to f2
122 6: Update#1 to f2
122 6: Update#1 to f2
123 1: Commit f2
123 1: Commit f2
124 $ hg push
124 $ hg push
125 pushing to ssh://user@dummy/master
125 pushing to ssh://user@dummy/master
126 searching for changes
126 searching for changes
127 remote: adding changesets
127 remote: adding changesets
128 remote: adding manifests
128 remote: adding manifests
129 remote: adding file changes
129 remote: adding file changes
130 remote: added 1 changesets with 1 changes to 1 files
130 remote: added 1 changesets with 1 changes to 1 files
131 $ cd ..
131 $ cd ..
132
132
133 $ cd master
133 $ cd master
134 $ hg log f2 -T '{rev}: {desc}\n'
134 $ hg log f2 -T '{rev}: {desc}\n'
135 30: Update#3 to f2
135 30: Update#3 to f2
136 21: Update#2 to f2
136 21: Update#2 to f2
137 11: Update#1 to f2
137 11: Update#1 to f2
138 1: Commit f2
138 1: Commit f2
139 $ hg log -l 3 -T '{rev}: {desc}\n'
139 $ hg log -l 3 -T '{rev}: {desc}\n'
140 30: Update#3 to f2
140 30: Update#3 to f2
141 29: Update#2 to f10
141 29: Update#2 to f10
142 28: Update#2 to f9
142 28: Update#2 to f9
143
143
144 Can pull into repo with a single commit
144 Can pull into repo with a single commit
145
145
146 $ cd ..
146 $ cd ..
147 $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
147 $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
148 $ cd narrow2
148 $ cd narrow2
149 $ hg pull -q -r 1
149 $ hg pull -q -r 1
150 remote: abort: unexpected error: unable to resolve parent while packing b'00manifest.i' 1 for changeset 0
150 remote: abort: unexpected error: unable to resolve parent while packing '00manifest.i' 1 for changeset 0
151 transaction abort!
151 transaction abort!
152 rollback completed
152 rollback completed
153 abort: pull failed on remote
153 abort: pull failed on remote
154 [255]
154 [255]
General Comments 0
You need to be logged in to leave comments. Login now