##// END OF EJS Templates
changegroup: capture revision delta in a data structure...
Gregory Szorc -
r38929:23d582ca default
parent child Browse files
Show More
@@ -1,1328 +1,1368 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
23 attr,
24 )
25
22 from . import (
26 from . import (
23 dagutil,
27 dagutil,
24 error,
28 error,
25 manifest,
29 manifest,
26 match as matchmod,
30 match as matchmod,
27 mdiff,
31 mdiff,
28 phases,
32 phases,
29 pycompat,
33 pycompat,
30 repository,
34 repository,
31 revlog,
35 revlog,
32 util,
36 util,
33 )
37 )
34
38
35 from .utils import (
39 from .utils import (
36 stringutil,
40 stringutil,
37 )
41 )
38
42
39 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
43 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
40 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
44 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
41 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
45 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
42
46
43 LFS_REQUIREMENT = 'lfs'
47 LFS_REQUIREMENT = 'lfs'
44
48
45 readexactly = util.readexactly
49 readexactly = util.readexactly
46
50
47 def getchunk(stream):
51 def getchunk(stream):
48 """return the next chunk from stream as a string"""
52 """return the next chunk from stream as a string"""
49 d = readexactly(stream, 4)
53 d = readexactly(stream, 4)
50 l = struct.unpack(">l", d)[0]
54 l = struct.unpack(">l", d)[0]
51 if l <= 4:
55 if l <= 4:
52 if l:
56 if l:
53 raise error.Abort(_("invalid chunk length %d") % l)
57 raise error.Abort(_("invalid chunk length %d") % l)
54 return ""
58 return ""
55 return readexactly(stream, l - 4)
59 return readexactly(stream, l - 4)
56
60
57 def chunkheader(length):
61 def chunkheader(length):
58 """return a changegroup chunk header (string)"""
62 """return a changegroup chunk header (string)"""
59 return struct.pack(">l", length + 4)
63 return struct.pack(">l", length + 4)
60
64
61 def closechunk():
65 def closechunk():
62 """return a changegroup chunk header (string) for a zero-length chunk"""
66 """return a changegroup chunk header (string) for a zero-length chunk"""
63 return struct.pack(">l", 0)
67 return struct.pack(">l", 0)
64
68
65 def writechunks(ui, chunks, filename, vfs=None):
69 def writechunks(ui, chunks, filename, vfs=None):
66 """Write chunks to a file and return its filename.
70 """Write chunks to a file and return its filename.
67
71
68 The stream is assumed to be a bundle file.
72 The stream is assumed to be a bundle file.
69 Existing files will not be overwritten.
73 Existing files will not be overwritten.
70 If no filename is specified, a temporary file is created.
74 If no filename is specified, a temporary file is created.
71 """
75 """
72 fh = None
76 fh = None
73 cleanup = None
77 cleanup = None
74 try:
78 try:
75 if filename:
79 if filename:
76 if vfs:
80 if vfs:
77 fh = vfs.open(filename, "wb")
81 fh = vfs.open(filename, "wb")
78 else:
82 else:
79 # Increase default buffer size because default is usually
83 # Increase default buffer size because default is usually
80 # small (4k is common on Linux).
84 # small (4k is common on Linux).
81 fh = open(filename, "wb", 131072)
85 fh = open(filename, "wb", 131072)
82 else:
86 else:
83 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
87 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
84 fh = os.fdopen(fd, r"wb")
88 fh = os.fdopen(fd, r"wb")
85 cleanup = filename
89 cleanup = filename
86 for c in chunks:
90 for c in chunks:
87 fh.write(c)
91 fh.write(c)
88 cleanup = None
92 cleanup = None
89 return filename
93 return filename
90 finally:
94 finally:
91 if fh is not None:
95 if fh is not None:
92 fh.close()
96 fh.close()
93 if cleanup is not None:
97 if cleanup is not None:
94 if filename and vfs:
98 if filename and vfs:
95 vfs.unlink(cleanup)
99 vfs.unlink(cleanup)
96 else:
100 else:
97 os.unlink(cleanup)
101 os.unlink(cleanup)
98
102
99 class cg1unpacker(object):
103 class cg1unpacker(object):
100 """Unpacker for cg1 changegroup streams.
104 """Unpacker for cg1 changegroup streams.
101
105
102 A changegroup unpacker handles the framing of the revision data in
106 A changegroup unpacker handles the framing of the revision data in
103 the wire format. Most consumers will want to use the apply()
107 the wire format. Most consumers will want to use the apply()
104 method to add the changes from the changegroup to a repository.
108 method to add the changes from the changegroup to a repository.
105
109
106 If you're forwarding a changegroup unmodified to another consumer,
110 If you're forwarding a changegroup unmodified to another consumer,
107 use getchunks(), which returns an iterator of changegroup
111 use getchunks(), which returns an iterator of changegroup
108 chunks. This is mostly useful for cases where you need to know the
112 chunks. This is mostly useful for cases where you need to know the
109 data stream has ended by observing the end of the changegroup.
113 data stream has ended by observing the end of the changegroup.
110
114
111 deltachunk() is useful only if you're applying delta data. Most
115 deltachunk() is useful only if you're applying delta data. Most
112 consumers should prefer apply() instead.
116 consumers should prefer apply() instead.
113
117
114 A few other public methods exist. Those are used only for
118 A few other public methods exist. Those are used only for
115 bundlerepo and some debug commands - their use is discouraged.
119 bundlerepo and some debug commands - their use is discouraged.
116 """
120 """
117 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
121 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
118 deltaheadersize = struct.calcsize(deltaheader)
122 deltaheadersize = struct.calcsize(deltaheader)
119 version = '01'
123 version = '01'
120 _grouplistcount = 1 # One list of files after the manifests
124 _grouplistcount = 1 # One list of files after the manifests
121
125
122 def __init__(self, fh, alg, extras=None):
126 def __init__(self, fh, alg, extras=None):
123 if alg is None:
127 if alg is None:
124 alg = 'UN'
128 alg = 'UN'
125 if alg not in util.compengines.supportedbundletypes:
129 if alg not in util.compengines.supportedbundletypes:
126 raise error.Abort(_('unknown stream compression type: %s')
130 raise error.Abort(_('unknown stream compression type: %s')
127 % alg)
131 % alg)
128 if alg == 'BZ':
132 if alg == 'BZ':
129 alg = '_truncatedBZ'
133 alg = '_truncatedBZ'
130
134
131 compengine = util.compengines.forbundletype(alg)
135 compengine = util.compengines.forbundletype(alg)
132 self._stream = compengine.decompressorreader(fh)
136 self._stream = compengine.decompressorreader(fh)
133 self._type = alg
137 self._type = alg
134 self.extras = extras or {}
138 self.extras = extras or {}
135 self.callback = None
139 self.callback = None
136
140
137 # These methods (compressed, read, seek, tell) all appear to only
141 # These methods (compressed, read, seek, tell) all appear to only
138 # be used by bundlerepo, but it's a little hard to tell.
142 # be used by bundlerepo, but it's a little hard to tell.
139 def compressed(self):
143 def compressed(self):
140 return self._type is not None and self._type != 'UN'
144 return self._type is not None and self._type != 'UN'
141 def read(self, l):
145 def read(self, l):
142 return self._stream.read(l)
146 return self._stream.read(l)
143 def seek(self, pos):
147 def seek(self, pos):
144 return self._stream.seek(pos)
148 return self._stream.seek(pos)
145 def tell(self):
149 def tell(self):
146 return self._stream.tell()
150 return self._stream.tell()
147 def close(self):
151 def close(self):
148 return self._stream.close()
152 return self._stream.close()
149
153
150 def _chunklength(self):
154 def _chunklength(self):
151 d = readexactly(self._stream, 4)
155 d = readexactly(self._stream, 4)
152 l = struct.unpack(">l", d)[0]
156 l = struct.unpack(">l", d)[0]
153 if l <= 4:
157 if l <= 4:
154 if l:
158 if l:
155 raise error.Abort(_("invalid chunk length %d") % l)
159 raise error.Abort(_("invalid chunk length %d") % l)
156 return 0
160 return 0
157 if self.callback:
161 if self.callback:
158 self.callback()
162 self.callback()
159 return l - 4
163 return l - 4
160
164
161 def changelogheader(self):
165 def changelogheader(self):
162 """v10 does not have a changelog header chunk"""
166 """v10 does not have a changelog header chunk"""
163 return {}
167 return {}
164
168
165 def manifestheader(self):
169 def manifestheader(self):
166 """v10 does not have a manifest header chunk"""
170 """v10 does not have a manifest header chunk"""
167 return {}
171 return {}
168
172
169 def filelogheader(self):
173 def filelogheader(self):
170 """return the header of the filelogs chunk, v10 only has the filename"""
174 """return the header of the filelogs chunk, v10 only has the filename"""
171 l = self._chunklength()
175 l = self._chunklength()
172 if not l:
176 if not l:
173 return {}
177 return {}
174 fname = readexactly(self._stream, l)
178 fname = readexactly(self._stream, l)
175 return {'filename': fname}
179 return {'filename': fname}
176
180
177 def _deltaheader(self, headertuple, prevnode):
181 def _deltaheader(self, headertuple, prevnode):
178 node, p1, p2, cs = headertuple
182 node, p1, p2, cs = headertuple
179 if prevnode is None:
183 if prevnode is None:
180 deltabase = p1
184 deltabase = p1
181 else:
185 else:
182 deltabase = prevnode
186 deltabase = prevnode
183 flags = 0
187 flags = 0
184 return node, p1, p2, deltabase, cs, flags
188 return node, p1, p2, deltabase, cs, flags
185
189
186 def deltachunk(self, prevnode):
190 def deltachunk(self, prevnode):
187 l = self._chunklength()
191 l = self._chunklength()
188 if not l:
192 if not l:
189 return {}
193 return {}
190 headerdata = readexactly(self._stream, self.deltaheadersize)
194 headerdata = readexactly(self._stream, self.deltaheadersize)
191 header = struct.unpack(self.deltaheader, headerdata)
195 header = struct.unpack(self.deltaheader, headerdata)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
196 delta = readexactly(self._stream, l - self.deltaheadersize)
193 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
197 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
194 return (node, p1, p2, cs, deltabase, delta, flags)
198 return (node, p1, p2, cs, deltabase, delta, flags)
195
199
196 def getchunks(self):
200 def getchunks(self):
197 """returns all the chunks contains in the bundle
201 """returns all the chunks contains in the bundle
198
202
199 Used when you need to forward the binary stream to a file or another
203 Used when you need to forward the binary stream to a file or another
200 network API. To do so, it parse the changegroup data, otherwise it will
204 network API. To do so, it parse the changegroup data, otherwise it will
201 block in case of sshrepo because it don't know the end of the stream.
205 block in case of sshrepo because it don't know the end of the stream.
202 """
206 """
203 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
207 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
204 # and a list of filelogs. For changegroup 3, we expect 4 parts:
208 # and a list of filelogs. For changegroup 3, we expect 4 parts:
205 # changelog, manifestlog, a list of tree manifestlogs, and a list of
209 # changelog, manifestlog, a list of tree manifestlogs, and a list of
206 # filelogs.
210 # filelogs.
207 #
211 #
208 # Changelog and manifestlog parts are terminated with empty chunks. The
212 # Changelog and manifestlog parts are terminated with empty chunks. The
209 # tree and file parts are a list of entry sections. Each entry section
213 # tree and file parts are a list of entry sections. Each entry section
210 # is a series of chunks terminating in an empty chunk. The list of these
214 # is a series of chunks terminating in an empty chunk. The list of these
211 # entry sections is terminated in yet another empty chunk, so we know
215 # entry sections is terminated in yet another empty chunk, so we know
212 # we've reached the end of the tree/file list when we reach an empty
216 # we've reached the end of the tree/file list when we reach an empty
213 # chunk that was proceeded by no non-empty chunks.
217 # chunk that was proceeded by no non-empty chunks.
214
218
215 parts = 0
219 parts = 0
216 while parts < 2 + self._grouplistcount:
220 while parts < 2 + self._grouplistcount:
217 noentries = True
221 noentries = True
218 while True:
222 while True:
219 chunk = getchunk(self)
223 chunk = getchunk(self)
220 if not chunk:
224 if not chunk:
221 # The first two empty chunks represent the end of the
225 # The first two empty chunks represent the end of the
222 # changelog and the manifestlog portions. The remaining
226 # changelog and the manifestlog portions. The remaining
223 # empty chunks represent either A) the end of individual
227 # empty chunks represent either A) the end of individual
224 # tree or file entries in the file list, or B) the end of
228 # tree or file entries in the file list, or B) the end of
225 # the entire list. It's the end of the entire list if there
229 # the entire list. It's the end of the entire list if there
226 # were no entries (i.e. noentries is True).
230 # were no entries (i.e. noentries is True).
227 if parts < 2:
231 if parts < 2:
228 parts += 1
232 parts += 1
229 elif noentries:
233 elif noentries:
230 parts += 1
234 parts += 1
231 break
235 break
232 noentries = False
236 noentries = False
233 yield chunkheader(len(chunk))
237 yield chunkheader(len(chunk))
234 pos = 0
238 pos = 0
235 while pos < len(chunk):
239 while pos < len(chunk):
236 next = pos + 2**20
240 next = pos + 2**20
237 yield chunk[pos:next]
241 yield chunk[pos:next]
238 pos = next
242 pos = next
239 yield closechunk()
243 yield closechunk()
240
244
241 def _unpackmanifests(self, repo, revmap, trp, prog):
245 def _unpackmanifests(self, repo, revmap, trp, prog):
242 self.callback = prog.increment
246 self.callback = prog.increment
243 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
244 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
245 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
246 # be empty during the pull
250 # be empty during the pull
247 self.manifestheader()
251 self.manifestheader()
248 deltas = self.deltaiter()
252 deltas = self.deltaiter()
249 repo.manifestlog.addgroup(deltas, revmap, trp)
253 repo.manifestlog.addgroup(deltas, revmap, trp)
250 prog.complete()
254 prog.complete()
251 self.callback = None
255 self.callback = None
252
256
253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
257 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
254 expectedtotal=None):
258 expectedtotal=None):
255 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
257 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
258
262
259 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
260 - nothing changed or no source: 0
264 - nothing changed or no source: 0
261 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
262 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
263 - number of heads stays the same: 1
267 - number of heads stays the same: 1
264 """
268 """
265 repo = repo.unfiltered()
269 repo = repo.unfiltered()
266 def csmap(x):
270 def csmap(x):
267 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
268 return len(cl)
272 return len(cl)
269
273
270 def revmap(x):
274 def revmap(x):
271 return cl.rev(x)
275 return cl.rev(x)
272
276
273 changesets = files = revisions = 0
277 changesets = files = revisions = 0
274
278
275 try:
279 try:
276 # The transaction may already carry source information. In this
280 # The transaction may already carry source information. In this
277 # case we use the top level data. We overwrite the argument
281 # case we use the top level data. We overwrite the argument
278 # because we need to use the top level value (if they exist)
282 # because we need to use the top level value (if they exist)
279 # in this function.
283 # in this function.
280 srctype = tr.hookargs.setdefault('source', srctype)
284 srctype = tr.hookargs.setdefault('source', srctype)
281 url = tr.hookargs.setdefault('url', url)
285 url = tr.hookargs.setdefault('url', url)
282 repo.hook('prechangegroup',
286 repo.hook('prechangegroup',
283 throw=True, **pycompat.strkwargs(tr.hookargs))
287 throw=True, **pycompat.strkwargs(tr.hookargs))
284
288
285 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
286 # will not see an inconsistent view
290 # will not see an inconsistent view
287 cl = repo.changelog
291 cl = repo.changelog
288 cl.delayupdate(tr)
292 cl.delayupdate(tr)
289 oldheads = set(cl.heads())
293 oldheads = set(cl.heads())
290
294
291 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
292 # pull off the changeset group
296 # pull off the changeset group
293 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
294 clstart = len(cl)
298 clstart = len(cl)
295 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
299 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
296 total=expectedtotal)
300 total=expectedtotal)
297 self.callback = progress.increment
301 self.callback = progress.increment
298
302
299 efiles = set()
303 efiles = set()
300 def onchangelog(cl, node):
304 def onchangelog(cl, node):
301 efiles.update(cl.readfiles(node))
305 efiles.update(cl.readfiles(node))
302
306
303 self.changelogheader()
307 self.changelogheader()
304 deltas = self.deltaiter()
308 deltas = self.deltaiter()
305 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
309 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
306 efiles = len(efiles)
310 efiles = len(efiles)
307
311
308 if not cgnodes:
312 if not cgnodes:
309 repo.ui.develwarn('applied empty changegroup',
313 repo.ui.develwarn('applied empty changegroup',
310 config='warn-empty-changegroup')
314 config='warn-empty-changegroup')
311 clend = len(cl)
315 clend = len(cl)
312 changesets = clend - clstart
316 changesets = clend - clstart
313 progress.complete()
317 progress.complete()
314 self.callback = None
318 self.callback = None
315
319
316 # pull off the manifest group
320 # pull off the manifest group
317 repo.ui.status(_("adding manifests\n"))
321 repo.ui.status(_("adding manifests\n"))
318 # We know that we'll never have more manifests than we had
322 # We know that we'll never have more manifests than we had
319 # changesets.
323 # changesets.
320 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
324 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
321 total=changesets)
325 total=changesets)
322 self._unpackmanifests(repo, revmap, trp, progress)
326 self._unpackmanifests(repo, revmap, trp, progress)
323
327
324 needfiles = {}
328 needfiles = {}
325 if repo.ui.configbool('server', 'validate'):
329 if repo.ui.configbool('server', 'validate'):
326 cl = repo.changelog
330 cl = repo.changelog
327 ml = repo.manifestlog
331 ml = repo.manifestlog
328 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
329 for cset in pycompat.xrange(clstart, clend):
333 for cset in pycompat.xrange(clstart, clend):
330 mfnode = cl.changelogrevision(cset).manifest
334 mfnode = cl.changelogrevision(cset).manifest
331 mfest = ml[mfnode].readdelta()
335 mfest = ml[mfnode].readdelta()
332 # store file cgnodes we must see
336 # store file cgnodes we must see
333 for f, n in mfest.iteritems():
337 for f, n in mfest.iteritems():
334 needfiles.setdefault(f, set()).add(n)
338 needfiles.setdefault(f, set()).add(n)
335
339
336 # process the files
340 # process the files
337 repo.ui.status(_("adding file changes\n"))
341 repo.ui.status(_("adding file changes\n"))
338 newrevs, newfiles = _addchangegroupfiles(
342 newrevs, newfiles = _addchangegroupfiles(
339 repo, self, revmap, trp, efiles, needfiles)
343 repo, self, revmap, trp, efiles, needfiles)
340 revisions += newrevs
344 revisions += newrevs
341 files += newfiles
345 files += newfiles
342
346
343 deltaheads = 0
347 deltaheads = 0
344 if oldheads:
348 if oldheads:
345 heads = cl.heads()
349 heads = cl.heads()
346 deltaheads = len(heads) - len(oldheads)
350 deltaheads = len(heads) - len(oldheads)
347 for h in heads:
351 for h in heads:
348 if h not in oldheads and repo[h].closesbranch():
352 if h not in oldheads and repo[h].closesbranch():
349 deltaheads -= 1
353 deltaheads -= 1
350 htext = ""
354 htext = ""
351 if deltaheads:
355 if deltaheads:
352 htext = _(" (%+d heads)") % deltaheads
356 htext = _(" (%+d heads)") % deltaheads
353
357
354 repo.ui.status(_("added %d changesets"
358 repo.ui.status(_("added %d changesets"
355 " with %d changes to %d files%s\n")
359 " with %d changes to %d files%s\n")
356 % (changesets, revisions, files, htext))
360 % (changesets, revisions, files, htext))
357 repo.invalidatevolatilesets()
361 repo.invalidatevolatilesets()
358
362
359 if changesets > 0:
363 if changesets > 0:
360 if 'node' not in tr.hookargs:
364 if 'node' not in tr.hookargs:
361 tr.hookargs['node'] = hex(cl.node(clstart))
365 tr.hookargs['node'] = hex(cl.node(clstart))
362 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
363 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
364 else:
368 else:
365 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
366 hookargs['node'] = hex(cl.node(clstart))
370 hookargs['node'] = hex(cl.node(clstart))
367 hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
368 repo.hook('pretxnchangegroup',
372 repo.hook('pretxnchangegroup',
369 throw=True, **pycompat.strkwargs(hookargs))
373 throw=True, **pycompat.strkwargs(hookargs))
370
374
371 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
375 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
372 phaseall = None
376 phaseall = None
373 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
374 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
375 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
376 # exists locally as secret
380 # exists locally as secret
377 #
381 #
378 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
379 # the bundle
383 # the bundle
380 if repo.publishing():
384 if repo.publishing():
381 targetphase = phaseall = phases.public
385 targetphase = phaseall = phases.public
382 else:
386 else:
383 # closer target phase computation
387 # closer target phase computation
384
388
385 # Those changesets have been pushed from the
389 # Those changesets have been pushed from the
386 # outside, their phases are going to be pushed
390 # outside, their phases are going to be pushed
387 # alongside. Therefor `targetphase` is
391 # alongside. Therefor `targetphase` is
388 # ignored.
392 # ignored.
389 targetphase = phaseall = phases.draft
393 targetphase = phaseall = phases.draft
390 if added:
394 if added:
391 phases.registernew(repo, tr, targetphase, added)
395 phases.registernew(repo, tr, targetphase, added)
392 if phaseall is not None:
396 if phaseall is not None:
393 phases.advanceboundary(repo, tr, phaseall, cgnodes)
397 phases.advanceboundary(repo, tr, phaseall, cgnodes)
394
398
395 if changesets > 0:
399 if changesets > 0:
396
400
397 def runhooks():
401 def runhooks():
398 # These hooks run when the lock releases, not when the
402 # These hooks run when the lock releases, not when the
399 # transaction closes. So it's possible for the changelog
403 # transaction closes. So it's possible for the changelog
400 # to have changed since we last saw it.
404 # to have changed since we last saw it.
401 if clstart >= len(repo):
405 if clstart >= len(repo):
402 return
406 return
403
407
404 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
408 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
405
409
406 for n in added:
410 for n in added:
407 args = hookargs.copy()
411 args = hookargs.copy()
408 args['node'] = hex(n)
412 args['node'] = hex(n)
409 del args['node_last']
413 del args['node_last']
410 repo.hook("incoming", **pycompat.strkwargs(args))
414 repo.hook("incoming", **pycompat.strkwargs(args))
411
415
412 newheads = [h for h in repo.heads()
416 newheads = [h for h in repo.heads()
413 if h not in oldheads]
417 if h not in oldheads]
414 repo.ui.log("incoming",
418 repo.ui.log("incoming",
415 "%d incoming changes - new heads: %s\n",
419 "%d incoming changes - new heads: %s\n",
416 len(added),
420 len(added),
417 ', '.join([hex(c[:6]) for c in newheads]))
421 ', '.join([hex(c[:6]) for c in newheads]))
418
422
419 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
423 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
420 lambda tr: repo._afterlock(runhooks))
424 lambda tr: repo._afterlock(runhooks))
421 finally:
425 finally:
422 repo.ui.flush()
426 repo.ui.flush()
423 # never return 0 here:
427 # never return 0 here:
424 if deltaheads < 0:
428 if deltaheads < 0:
425 ret = deltaheads - 1
429 ret = deltaheads - 1
426 else:
430 else:
427 ret = deltaheads + 1
431 ret = deltaheads + 1
428 return ret
432 return ret
429
433
430 def deltaiter(self):
434 def deltaiter(self):
431 """
435 """
432 returns an iterator of the deltas in this changegroup
436 returns an iterator of the deltas in this changegroup
433
437
434 Useful for passing to the underlying storage system to be stored.
438 Useful for passing to the underlying storage system to be stored.
435 """
439 """
436 chain = None
440 chain = None
437 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
441 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
438 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
442 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
439 yield chunkdata
443 yield chunkdata
440 chain = chunkdata[0]
444 chain = chunkdata[0]
441
445
442 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
443 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
444
448
445 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
446 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
447 remain the same.
451 remain the same.
448 """
452 """
449 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 deltaheadersize = struct.calcsize(deltaheader)
454 deltaheadersize = struct.calcsize(deltaheader)
451 version = '02'
455 version = '02'
452
456
453 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
454 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
455 flags = 0
459 flags = 0
456 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
457
461
458 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
459 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
460
464
461 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
462 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
463 separating manifests and files.
467 separating manifests and files.
464 """
468 """
465 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
466 deltaheadersize = struct.calcsize(deltaheader)
470 deltaheadersize = struct.calcsize(deltaheader)
467 version = '03'
471 version = '03'
468 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
469
473
470 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
471 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
472 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
473
477
474 def _unpackmanifests(self, repo, revmap, trp, prog):
478 def _unpackmanifests(self, repo, revmap, trp, prog):
475 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
476 for chunkdata in iter(self.filelogheader, {}):
480 for chunkdata in iter(self.filelogheader, {}):
477 # If we get here, there are directory manifests in the changegroup
481 # If we get here, there are directory manifests in the changegroup
478 d = chunkdata["filename"]
482 d = chunkdata["filename"]
479 repo.ui.debug("adding %s revisions\n" % d)
483 repo.ui.debug("adding %s revisions\n" % d)
480 dirlog = repo.manifestlog._revlog.dirlog(d)
484 dirlog = repo.manifestlog._revlog.dirlog(d)
481 deltas = self.deltaiter()
485 deltas = self.deltaiter()
482 if not dirlog.addgroup(deltas, revmap, trp):
486 if not dirlog.addgroup(deltas, revmap, trp):
483 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
484
488
485 class headerlessfixup(object):
489 class headerlessfixup(object):
486 def __init__(self, fh, h):
490 def __init__(self, fh, h):
487 self._h = h
491 self._h = h
488 self._fh = fh
492 self._fh = fh
489 def read(self, n):
493 def read(self, n):
490 if self._h:
494 if self._h:
491 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
492 if len(d) < n:
496 if len(d) < n:
493 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
494 return d
498 return d
495 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
496
500
501 @attr.s(slots=True, frozen=True)
502 class revisiondelta(object):
503 """Describes a delta entry in a changegroup.
504
505 Captured data is sufficient to serialize the delta into multiple
506 formats.
507 """
508 # 20 byte node of this revision.
509 node = attr.ib()
510 # 20 byte nodes of parent revisions.
511 p1node = attr.ib()
512 p2node = attr.ib()
513 # 20 byte node of node this delta is against.
514 basenode = attr.ib()
515 # 20 byte node of changeset revision this delta is associated with.
516 linknode = attr.ib()
517 # 2 bytes of flags to apply to revision data.
518 flags = attr.ib()
519 # Iterable of chunks holding raw delta data.
520 deltachunks = attr.ib()
497
521
498 class cg1packer(object):
522 class cg1packer(object):
499 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
523 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
500 version = '01'
524 version = '01'
501 def __init__(self, repo, filematcher, bundlecaps=None):
525 def __init__(self, repo, filematcher, bundlecaps=None):
502 """Given a source repo, construct a bundler.
526 """Given a source repo, construct a bundler.
503
527
504 filematcher is a matcher that matches on files to include in the
528 filematcher is a matcher that matches on files to include in the
505 changegroup. Used to facilitate sparse changegroups.
529 changegroup. Used to facilitate sparse changegroups.
506
530
507 bundlecaps is optional and can be used to specify the set of
531 bundlecaps is optional and can be used to specify the set of
508 capabilities which can be used to build the bundle. While bundlecaps is
532 capabilities which can be used to build the bundle. While bundlecaps is
509 unused in core Mercurial, extensions rely on this feature to communicate
533 unused in core Mercurial, extensions rely on this feature to communicate
510 capabilities to customize the changegroup packer.
534 capabilities to customize the changegroup packer.
511 """
535 """
512 assert filematcher
536 assert filematcher
513 self._filematcher = filematcher
537 self._filematcher = filematcher
514
538
515 # Set of capabilities we can use to build the bundle.
539 # Set of capabilities we can use to build the bundle.
516 if bundlecaps is None:
540 if bundlecaps is None:
517 bundlecaps = set()
541 bundlecaps = set()
518 self._bundlecaps = bundlecaps
542 self._bundlecaps = bundlecaps
519 # experimental config: bundle.reorder
543 # experimental config: bundle.reorder
520 reorder = repo.ui.config('bundle', 'reorder')
544 reorder = repo.ui.config('bundle', 'reorder')
521 if reorder == 'auto':
545 if reorder == 'auto':
522 reorder = None
546 reorder = None
523 else:
547 else:
524 reorder = stringutil.parsebool(reorder)
548 reorder = stringutil.parsebool(reorder)
525 self._repo = repo
549 self._repo = repo
526 self._reorder = reorder
550 self._reorder = reorder
527 if self._repo.ui.verbose and not self._repo.ui.debugflag:
551 if self._repo.ui.verbose and not self._repo.ui.debugflag:
528 self._verbosenote = self._repo.ui.note
552 self._verbosenote = self._repo.ui.note
529 else:
553 else:
530 self._verbosenote = lambda s: None
554 self._verbosenote = lambda s: None
531
555
532 def close(self):
556 def close(self):
533 # Ellipses serving mode.
557 # Ellipses serving mode.
534 getattr(self, 'clrev_to_localrev', {}).clear()
558 getattr(self, 'clrev_to_localrev', {}).clear()
535 if getattr(self, 'next_clrev_to_localrev', {}):
559 if getattr(self, 'next_clrev_to_localrev', {}):
536 self.clrev_to_localrev = self.next_clrev_to_localrev
560 self.clrev_to_localrev = self.next_clrev_to_localrev
537 del self.next_clrev_to_localrev
561 del self.next_clrev_to_localrev
538 self.changelog_done = True
562 self.changelog_done = True
539
563
540 return closechunk()
564 return closechunk()
541
565
542 def fileheader(self, fname):
566 def fileheader(self, fname):
543 return chunkheader(len(fname)) + fname
567 return chunkheader(len(fname)) + fname
544
568
545 # Extracted both for clarity and for overriding in extensions.
569 # Extracted both for clarity and for overriding in extensions.
546 def _sortgroup(self, store, nodelist, lookup):
570 def _sortgroup(self, store, nodelist, lookup):
547 """Sort nodes for change group and turn them into revnums."""
571 """Sort nodes for change group and turn them into revnums."""
548 # Ellipses serving mode.
572 # Ellipses serving mode.
549 #
573 #
550 # In a perfect world, we'd generate better ellipsis-ified graphs
574 # In a perfect world, we'd generate better ellipsis-ified graphs
551 # for non-changelog revlogs. In practice, we haven't started doing
575 # for non-changelog revlogs. In practice, we haven't started doing
552 # that yet, so the resulting DAGs for the manifestlog and filelogs
576 # that yet, so the resulting DAGs for the manifestlog and filelogs
553 # are actually full of bogus parentage on all the ellipsis
577 # are actually full of bogus parentage on all the ellipsis
554 # nodes. This has the side effect that, while the contents are
578 # nodes. This has the side effect that, while the contents are
555 # correct, the individual DAGs might be completely out of whack in
579 # correct, the individual DAGs might be completely out of whack in
556 # a case like 882681bc3166 and its ancestors (back about 10
580 # a case like 882681bc3166 and its ancestors (back about 10
557 # revisions or so) in the main hg repo.
581 # revisions or so) in the main hg repo.
558 #
582 #
559 # The one invariant we *know* holds is that the new (potentially
583 # The one invariant we *know* holds is that the new (potentially
560 # bogus) DAG shape will be valid if we order the nodes in the
584 # bogus) DAG shape will be valid if we order the nodes in the
561 # order that they're introduced in dramatis personae by the
585 # order that they're introduced in dramatis personae by the
562 # changelog, so what we do is we sort the non-changelog histories
586 # changelog, so what we do is we sort the non-changelog histories
563 # by the order in which they are used by the changelog.
587 # by the order in which they are used by the changelog.
564 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
588 if util.safehasattr(self, 'full_nodes') and self.clnode_to_rev:
565 key = lambda n: self.clnode_to_rev[lookup(n)]
589 key = lambda n: self.clnode_to_rev[lookup(n)]
566 return [store.rev(n) for n in sorted(nodelist, key=key)]
590 return [store.rev(n) for n in sorted(nodelist, key=key)]
567
591
568 # for generaldelta revlogs, we linearize the revs; this will both be
592 # for generaldelta revlogs, we linearize the revs; this will both be
569 # much quicker and generate a much smaller bundle
593 # much quicker and generate a much smaller bundle
570 if (store._generaldelta and self._reorder is None) or self._reorder:
594 if (store._generaldelta and self._reorder is None) or self._reorder:
571 dag = dagutil.revlogdag(store)
595 dag = dagutil.revlogdag(store)
572 return dag.linearize(set(store.rev(n) for n in nodelist))
596 return dag.linearize(set(store.rev(n) for n in nodelist))
573 else:
597 else:
574 return sorted([store.rev(n) for n in nodelist])
598 return sorted([store.rev(n) for n in nodelist])
575
599
576 def group(self, nodelist, store, lookup, units=None):
600 def group(self, nodelist, store, lookup, units=None):
577 """Calculate a delta group, yielding a sequence of changegroup chunks
601 """Calculate a delta group, yielding a sequence of changegroup chunks
578 (strings).
602 (strings).
579
603
580 Given a list of changeset revs, return a set of deltas and
604 Given a list of changeset revs, return a set of deltas and
581 metadata corresponding to nodes. The first delta is
605 metadata corresponding to nodes. The first delta is
582 first parent(nodelist[0]) -> nodelist[0], the receiver is
606 first parent(nodelist[0]) -> nodelist[0], the receiver is
583 guaranteed to have this parent as it has all history before
607 guaranteed to have this parent as it has all history before
584 these changesets. In the case firstparent is nullrev the
608 these changesets. In the case firstparent is nullrev the
585 changegroup starts with a full revision.
609 changegroup starts with a full revision.
586
610
587 If units is not None, progress detail will be generated, units specifies
611 If units is not None, progress detail will be generated, units specifies
588 the type of revlog that is touched (changelog, manifest, etc.).
612 the type of revlog that is touched (changelog, manifest, etc.).
589 """
613 """
590 # if we don't have any revisions touched by these changesets, bail
614 # if we don't have any revisions touched by these changesets, bail
591 if len(nodelist) == 0:
615 if len(nodelist) == 0:
592 yield self.close()
616 yield self.close()
593 return
617 return
594
618
595 revs = self._sortgroup(store, nodelist, lookup)
619 revs = self._sortgroup(store, nodelist, lookup)
596
620
597 # add the parent of the first rev
621 # add the parent of the first rev
598 p = store.parentrevs(revs[0])[0]
622 p = store.parentrevs(revs[0])[0]
599 revs.insert(0, p)
623 revs.insert(0, p)
600
624
601 # build deltas
625 # build deltas
602 progress = None
626 progress = None
603 if units is not None:
627 if units is not None:
604 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
628 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
605 total=(len(revs) - 1))
629 total=(len(revs) - 1))
606 for r in pycompat.xrange(len(revs) - 1):
630 for r in pycompat.xrange(len(revs) - 1):
607 if progress:
631 if progress:
608 progress.update(r + 1)
632 progress.update(r + 1)
609 prev, curr = revs[r], revs[r + 1]
633 prev, curr = revs[r], revs[r + 1]
610 linknode = lookup(store.node(curr))
634 linknode = lookup(store.node(curr))
611 for c in self.revchunk(store, curr, prev, linknode):
635 for c in self.revchunk(store, curr, prev, linknode):
612 yield c
636 yield c
613
637
614 if progress:
638 if progress:
615 progress.complete()
639 progress.complete()
616 yield self.close()
640 yield self.close()
617
641
618 # filter any nodes that claim to be part of the known set
642 # filter any nodes that claim to be part of the known set
619 def prune(self, store, missing, commonrevs):
643 def prune(self, store, missing, commonrevs):
620 # TODO this violates storage abstraction for manifests.
644 # TODO this violates storage abstraction for manifests.
621 if isinstance(store, manifest.manifestrevlog):
645 if isinstance(store, manifest.manifestrevlog):
622 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
646 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
623 return []
647 return []
624
648
625 rr, rl = store.rev, store.linkrev
649 rr, rl = store.rev, store.linkrev
626 return [n for n in missing if rl(rr(n)) not in commonrevs]
650 return [n for n in missing if rl(rr(n)) not in commonrevs]
627
651
628 def _packmanifests(self, dir, mfnodes, lookuplinknode):
652 def _packmanifests(self, dir, mfnodes, lookuplinknode):
629 """Pack flat manifests into a changegroup stream."""
653 """Pack flat manifests into a changegroup stream."""
630 assert not dir
654 assert not dir
631 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
655 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
632 lookuplinknode, units=_('manifests')):
656 lookuplinknode, units=_('manifests')):
633 yield chunk
657 yield chunk
634
658
635 def _manifestsdone(self):
659 def _manifestsdone(self):
636 return ''
660 return ''
637
661
638 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
662 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
639 '''yield a sequence of changegroup chunks (strings)'''
663 '''yield a sequence of changegroup chunks (strings)'''
640 repo = self._repo
664 repo = self._repo
641 cl = repo.changelog
665 cl = repo.changelog
642
666
643 clrevorder = {}
667 clrevorder = {}
644 mfs = {} # needed manifests
668 mfs = {} # needed manifests
645 fnodes = {} # needed file nodes
669 fnodes = {} # needed file nodes
646 mfl = repo.manifestlog
670 mfl = repo.manifestlog
647 # TODO violates storage abstraction.
671 # TODO violates storage abstraction.
648 mfrevlog = mfl._revlog
672 mfrevlog = mfl._revlog
649 changedfiles = set()
673 changedfiles = set()
650
674
651 ellipsesmode = util.safehasattr(self, 'full_nodes')
675 ellipsesmode = util.safehasattr(self, 'full_nodes')
652
676
653 # Callback for the changelog, used to collect changed files and
677 # Callback for the changelog, used to collect changed files and
654 # manifest nodes.
678 # manifest nodes.
655 # Returns the linkrev node (identity in the changelog case).
679 # Returns the linkrev node (identity in the changelog case).
656 def lookupcl(x):
680 def lookupcl(x):
657 c = cl.read(x)
681 c = cl.read(x)
658 clrevorder[x] = len(clrevorder)
682 clrevorder[x] = len(clrevorder)
659
683
660 if ellipsesmode:
684 if ellipsesmode:
661 # Only update mfs if x is going to be sent. Otherwise we
685 # Only update mfs if x is going to be sent. Otherwise we
662 # end up with bogus linkrevs specified for manifests and
686 # end up with bogus linkrevs specified for manifests and
663 # we skip some manifest nodes that we should otherwise
687 # we skip some manifest nodes that we should otherwise
664 # have sent.
688 # have sent.
665 if (x in self.full_nodes
689 if (x in self.full_nodes
666 or cl.rev(x) in self.precomputed_ellipsis):
690 or cl.rev(x) in self.precomputed_ellipsis):
667 n = c[0]
691 n = c[0]
668 # Record the first changeset introducing this manifest
692 # Record the first changeset introducing this manifest
669 # version.
693 # version.
670 mfs.setdefault(n, x)
694 mfs.setdefault(n, x)
671 # Set this narrow-specific dict so we have the lowest
695 # Set this narrow-specific dict so we have the lowest
672 # manifest revnum to look up for this cl revnum. (Part of
696 # manifest revnum to look up for this cl revnum. (Part of
673 # mapping changelog ellipsis parents to manifest ellipsis
697 # mapping changelog ellipsis parents to manifest ellipsis
674 # parents)
698 # parents)
675 self.next_clrev_to_localrev.setdefault(cl.rev(x),
699 self.next_clrev_to_localrev.setdefault(cl.rev(x),
676 mfrevlog.rev(n))
700 mfrevlog.rev(n))
677 # We can't trust the changed files list in the changeset if the
701 # We can't trust the changed files list in the changeset if the
678 # client requested a shallow clone.
702 # client requested a shallow clone.
679 if self.is_shallow:
703 if self.is_shallow:
680 changedfiles.update(mfl[c[0]].read().keys())
704 changedfiles.update(mfl[c[0]].read().keys())
681 else:
705 else:
682 changedfiles.update(c[3])
706 changedfiles.update(c[3])
683 else:
707 else:
684
708
685 n = c[0]
709 n = c[0]
686 # record the first changeset introducing this manifest version
710 # record the first changeset introducing this manifest version
687 mfs.setdefault(n, x)
711 mfs.setdefault(n, x)
688 # Record a complete list of potentially-changed files in
712 # Record a complete list of potentially-changed files in
689 # this manifest.
713 # this manifest.
690 changedfiles.update(c[3])
714 changedfiles.update(c[3])
691
715
692 return x
716 return x
693
717
694 self._verbosenote(_('uncompressed size of bundle content:\n'))
718 self._verbosenote(_('uncompressed size of bundle content:\n'))
695 size = 0
719 size = 0
696 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
720 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
697 size += len(chunk)
721 size += len(chunk)
698 yield chunk
722 yield chunk
699 self._verbosenote(_('%8.i (changelog)\n') % size)
723 self._verbosenote(_('%8.i (changelog)\n') % size)
700
724
701 # We need to make sure that the linkrev in the changegroup refers to
725 # We need to make sure that the linkrev in the changegroup refers to
702 # the first changeset that introduced the manifest or file revision.
726 # the first changeset that introduced the manifest or file revision.
703 # The fastpath is usually safer than the slowpath, because the filelogs
727 # The fastpath is usually safer than the slowpath, because the filelogs
704 # are walked in revlog order.
728 # are walked in revlog order.
705 #
729 #
706 # When taking the slowpath with reorder=None and the manifest revlog
730 # When taking the slowpath with reorder=None and the manifest revlog
707 # uses generaldelta, the manifest may be walked in the "wrong" order.
731 # uses generaldelta, the manifest may be walked in the "wrong" order.
708 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
732 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
709 # cc0ff93d0c0c).
733 # cc0ff93d0c0c).
710 #
734 #
711 # When taking the fastpath, we are only vulnerable to reordering
735 # When taking the fastpath, we are only vulnerable to reordering
712 # of the changelog itself. The changelog never uses generaldelta, so
736 # of the changelog itself. The changelog never uses generaldelta, so
713 # it is only reordered when reorder=True. To handle this case, we
737 # it is only reordered when reorder=True. To handle this case, we
714 # simply take the slowpath, which already has the 'clrevorder' logic.
738 # simply take the slowpath, which already has the 'clrevorder' logic.
715 # This was also fixed in cc0ff93d0c0c.
739 # This was also fixed in cc0ff93d0c0c.
716 fastpathlinkrev = fastpathlinkrev and not self._reorder
740 fastpathlinkrev = fastpathlinkrev and not self._reorder
717 # Treemanifests don't work correctly with fastpathlinkrev
741 # Treemanifests don't work correctly with fastpathlinkrev
718 # either, because we don't discover which directory nodes to
742 # either, because we don't discover which directory nodes to
719 # send along with files. This could probably be fixed.
743 # send along with files. This could probably be fixed.
720 fastpathlinkrev = fastpathlinkrev and (
744 fastpathlinkrev = fastpathlinkrev and (
721 'treemanifest' not in repo.requirements)
745 'treemanifest' not in repo.requirements)
722
746
723 for chunk in self.generatemanifests(commonrevs, clrevorder,
747 for chunk in self.generatemanifests(commonrevs, clrevorder,
724 fastpathlinkrev, mfs, fnodes, source):
748 fastpathlinkrev, mfs, fnodes, source):
725 yield chunk
749 yield chunk
726
750
727 if ellipsesmode:
751 if ellipsesmode:
728 mfdicts = None
752 mfdicts = None
729 if self.is_shallow:
753 if self.is_shallow:
730 mfdicts = [(self._repo.manifestlog[n].read(), lr)
754 mfdicts = [(self._repo.manifestlog[n].read(), lr)
731 for (n, lr) in mfs.iteritems()]
755 for (n, lr) in mfs.iteritems()]
732
756
733 mfs.clear()
757 mfs.clear()
734 clrevs = set(cl.rev(x) for x in clnodes)
758 clrevs = set(cl.rev(x) for x in clnodes)
735
759
736 if not fastpathlinkrev:
760 if not fastpathlinkrev:
737 def linknodes(unused, fname):
761 def linknodes(unused, fname):
738 return fnodes.get(fname, {})
762 return fnodes.get(fname, {})
739 else:
763 else:
740 cln = cl.node
764 cln = cl.node
741 def linknodes(filerevlog, fname):
765 def linknodes(filerevlog, fname):
742 llr = filerevlog.linkrev
766 llr = filerevlog.linkrev
743 fln = filerevlog.node
767 fln = filerevlog.node
744 revs = ((r, llr(r)) for r in filerevlog)
768 revs = ((r, llr(r)) for r in filerevlog)
745 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
769 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
746
770
747 if ellipsesmode:
771 if ellipsesmode:
748 # We need to pass the mfdicts variable down into
772 # We need to pass the mfdicts variable down into
749 # generatefiles(), but more than one command might have
773 # generatefiles(), but more than one command might have
750 # wrapped generatefiles so we can't modify the function
774 # wrapped generatefiles so we can't modify the function
751 # signature. Instead, we pass the data to ourselves using an
775 # signature. Instead, we pass the data to ourselves using an
752 # instance attribute. I'm sorry.
776 # instance attribute. I'm sorry.
753 self._mfdicts = mfdicts
777 self._mfdicts = mfdicts
754
778
755 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
779 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
756 source):
780 source):
757 yield chunk
781 yield chunk
758
782
759 yield self.close()
783 yield self.close()
760
784
761 if clnodes:
785 if clnodes:
762 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
786 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
763
787
764 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
788 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
765 fnodes, source):
789 fnodes, source):
766 """Returns an iterator of changegroup chunks containing manifests.
790 """Returns an iterator of changegroup chunks containing manifests.
767
791
768 `source` is unused here, but is used by extensions like remotefilelog to
792 `source` is unused here, but is used by extensions like remotefilelog to
769 change what is sent based in pulls vs pushes, etc.
793 change what is sent based in pulls vs pushes, etc.
770 """
794 """
771 repo = self._repo
795 repo = self._repo
772 mfl = repo.manifestlog
796 mfl = repo.manifestlog
773 dirlog = mfl._revlog.dirlog
797 dirlog = mfl._revlog.dirlog
774 tmfnodes = {'': mfs}
798 tmfnodes = {'': mfs}
775
799
776 # Callback for the manifest, used to collect linkrevs for filelog
800 # Callback for the manifest, used to collect linkrevs for filelog
777 # revisions.
801 # revisions.
778 # Returns the linkrev node (collected in lookupcl).
802 # Returns the linkrev node (collected in lookupcl).
779 def makelookupmflinknode(dir, nodes):
803 def makelookupmflinknode(dir, nodes):
780 if fastpathlinkrev:
804 if fastpathlinkrev:
781 assert not dir
805 assert not dir
782 return mfs.__getitem__
806 return mfs.__getitem__
783
807
784 def lookupmflinknode(x):
808 def lookupmflinknode(x):
785 """Callback for looking up the linknode for manifests.
809 """Callback for looking up the linknode for manifests.
786
810
787 Returns the linkrev node for the specified manifest.
811 Returns the linkrev node for the specified manifest.
788
812
789 SIDE EFFECT:
813 SIDE EFFECT:
790
814
791 1) fclnodes gets populated with the list of relevant
815 1) fclnodes gets populated with the list of relevant
792 file nodes if we're not using fastpathlinkrev
816 file nodes if we're not using fastpathlinkrev
793 2) When treemanifests are in use, collects treemanifest nodes
817 2) When treemanifests are in use, collects treemanifest nodes
794 to send
818 to send
795
819
796 Note that this means manifests must be completely sent to
820 Note that this means manifests must be completely sent to
797 the client before you can trust the list of files and
821 the client before you can trust the list of files and
798 treemanifests to send.
822 treemanifests to send.
799 """
823 """
800 clnode = nodes[x]
824 clnode = nodes[x]
801 mdata = mfl.get(dir, x).readfast(shallow=True)
825 mdata = mfl.get(dir, x).readfast(shallow=True)
802 for p, n, fl in mdata.iterentries():
826 for p, n, fl in mdata.iterentries():
803 if fl == 't': # subdirectory manifest
827 if fl == 't': # subdirectory manifest
804 subdir = dir + p + '/'
828 subdir = dir + p + '/'
805 tmfclnodes = tmfnodes.setdefault(subdir, {})
829 tmfclnodes = tmfnodes.setdefault(subdir, {})
806 tmfclnode = tmfclnodes.setdefault(n, clnode)
830 tmfclnode = tmfclnodes.setdefault(n, clnode)
807 if clrevorder[clnode] < clrevorder[tmfclnode]:
831 if clrevorder[clnode] < clrevorder[tmfclnode]:
808 tmfclnodes[n] = clnode
832 tmfclnodes[n] = clnode
809 else:
833 else:
810 f = dir + p
834 f = dir + p
811 fclnodes = fnodes.setdefault(f, {})
835 fclnodes = fnodes.setdefault(f, {})
812 fclnode = fclnodes.setdefault(n, clnode)
836 fclnode = fclnodes.setdefault(n, clnode)
813 if clrevorder[clnode] < clrevorder[fclnode]:
837 if clrevorder[clnode] < clrevorder[fclnode]:
814 fclnodes[n] = clnode
838 fclnodes[n] = clnode
815 return clnode
839 return clnode
816 return lookupmflinknode
840 return lookupmflinknode
817
841
818 size = 0
842 size = 0
819 while tmfnodes:
843 while tmfnodes:
820 dir, nodes = tmfnodes.popitem()
844 dir, nodes = tmfnodes.popitem()
821 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
845 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
822 if not dir or prunednodes:
846 if not dir or prunednodes:
823 for x in self._packmanifests(dir, prunednodes,
847 for x in self._packmanifests(dir, prunednodes,
824 makelookupmflinknode(dir, nodes)):
848 makelookupmflinknode(dir, nodes)):
825 size += len(x)
849 size += len(x)
826 yield x
850 yield x
827 self._verbosenote(_('%8.i (manifests)\n') % size)
851 self._verbosenote(_('%8.i (manifests)\n') % size)
828 yield self._manifestsdone()
852 yield self._manifestsdone()
829
853
830 # The 'source' parameter is useful for extensions
854 # The 'source' parameter is useful for extensions
831 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
855 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
832 changedfiles = list(filter(self._filematcher, changedfiles))
856 changedfiles = list(filter(self._filematcher, changedfiles))
833
857
834 if getattr(self, 'is_shallow', False):
858 if getattr(self, 'is_shallow', False):
835 # See comment in generate() for why this sadness is a thing.
859 # See comment in generate() for why this sadness is a thing.
836 mfdicts = self._mfdicts
860 mfdicts = self._mfdicts
837 del self._mfdicts
861 del self._mfdicts
838 # In a shallow clone, the linknodes callback needs to also include
862 # In a shallow clone, the linknodes callback needs to also include
839 # those file nodes that are in the manifests we sent but weren't
863 # those file nodes that are in the manifests we sent but weren't
840 # introduced by those manifests.
864 # introduced by those manifests.
841 commonctxs = [self._repo[c] for c in commonrevs]
865 commonctxs = [self._repo[c] for c in commonrevs]
842 oldlinknodes = linknodes
866 oldlinknodes = linknodes
843 clrev = self._repo.changelog.rev
867 clrev = self._repo.changelog.rev
844
868
845 # Defining this function has a side-effect of overriding the
869 # Defining this function has a side-effect of overriding the
846 # function of the same name that was passed in as an argument.
870 # function of the same name that was passed in as an argument.
847 # TODO have caller pass in appropriate function.
871 # TODO have caller pass in appropriate function.
848 def linknodes(flog, fname):
872 def linknodes(flog, fname):
849 for c in commonctxs:
873 for c in commonctxs:
850 try:
874 try:
851 fnode = c.filenode(fname)
875 fnode = c.filenode(fname)
852 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
876 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
853 except error.ManifestLookupError:
877 except error.ManifestLookupError:
854 pass
878 pass
855 links = oldlinknodes(flog, fname)
879 links = oldlinknodes(flog, fname)
856 if len(links) != len(mfdicts):
880 if len(links) != len(mfdicts):
857 for mf, lr in mfdicts:
881 for mf, lr in mfdicts:
858 fnode = mf.get(fname, None)
882 fnode = mf.get(fname, None)
859 if fnode in links:
883 if fnode in links:
860 links[fnode] = min(links[fnode], lr, key=clrev)
884 links[fnode] = min(links[fnode], lr, key=clrev)
861 elif fnode:
885 elif fnode:
862 links[fnode] = lr
886 links[fnode] = lr
863 return links
887 return links
864
888
865 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
889 return self._generatefiles(changedfiles, linknodes, commonrevs, source)
866
890
867 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
891 def _generatefiles(self, changedfiles, linknodes, commonrevs, source):
868 repo = self._repo
892 repo = self._repo
869 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
893 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
870 total=len(changedfiles))
894 total=len(changedfiles))
871 for i, fname in enumerate(sorted(changedfiles)):
895 for i, fname in enumerate(sorted(changedfiles)):
872 filerevlog = repo.file(fname)
896 filerevlog = repo.file(fname)
873 if not filerevlog:
897 if not filerevlog:
874 raise error.Abort(_("empty or missing file data for %s") %
898 raise error.Abort(_("empty or missing file data for %s") %
875 fname)
899 fname)
876
900
877 linkrevnodes = linknodes(filerevlog, fname)
901 linkrevnodes = linknodes(filerevlog, fname)
878 # Lookup for filenodes, we collected the linkrev nodes above in the
902 # Lookup for filenodes, we collected the linkrev nodes above in the
879 # fastpath case and with lookupmf in the slowpath case.
903 # fastpath case and with lookupmf in the slowpath case.
880 def lookupfilelog(x):
904 def lookupfilelog(x):
881 return linkrevnodes[x]
905 return linkrevnodes[x]
882
906
883 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
907 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
884 if filenodes:
908 if filenodes:
885 progress.update(i + 1, item=fname)
909 progress.update(i + 1, item=fname)
886 h = self.fileheader(fname)
910 h = self.fileheader(fname)
887 size = len(h)
911 size = len(h)
888 yield h
912 yield h
889 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
913 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
890 size += len(chunk)
914 size += len(chunk)
891 yield chunk
915 yield chunk
892 self._verbosenote(_('%8.i %s\n') % (size, fname))
916 self._verbosenote(_('%8.i %s\n') % (size, fname))
893 progress.complete()
917 progress.complete()
894
918
895 def deltaparent(self, store, rev, p1, p2, prev):
919 def deltaparent(self, store, rev, p1, p2, prev):
896 if not store.candelta(prev, rev):
920 if not store.candelta(prev, rev):
897 raise error.ProgrammingError('cg1 should not be used in this case')
921 raise error.ProgrammingError('cg1 should not be used in this case')
898 return prev
922 return prev
899
923
900 def revchunk(self, store, rev, prev, linknode):
924 def revchunk(self, store, rev, prev, linknode):
901 if util.safehasattr(self, 'full_nodes'):
925 if util.safehasattr(self, 'full_nodes'):
902 fn = self._revchunknarrow
926 fn = self._revisiondeltanarrow
903 else:
927 else:
904 fn = self._revchunknormal
928 fn = self._revisiondeltanormal
929
930 delta = fn(store, rev, prev, linknode)
931 if not delta:
932 return
905
933
906 return fn(store, rev, prev, linknode)
934 meta = self.builddeltaheader(delta.node, delta.p1node, delta.p2node,
935 delta.basenode, delta.linknode,
936 delta.flags)
937 l = len(meta) + sum(len(x) for x in delta.deltachunks)
907
938
908 def _revchunknormal(self, store, rev, prev, linknode):
939 yield chunkheader(l)
940 yield meta
941 for x in delta.deltachunks:
942 yield x
943
944 def _revisiondeltanormal(self, store, rev, prev, linknode):
909 node = store.node(rev)
945 node = store.node(rev)
910 p1, p2 = store.parentrevs(rev)
946 p1, p2 = store.parentrevs(rev)
911 base = self.deltaparent(store, rev, p1, p2, prev)
947 base = self.deltaparent(store, rev, p1, p2, prev)
912
948
913 prefix = ''
949 prefix = ''
914 if store.iscensored(base) or store.iscensored(rev):
950 if store.iscensored(base) or store.iscensored(rev):
915 try:
951 try:
916 delta = store.revision(node, raw=True)
952 delta = store.revision(node, raw=True)
917 except error.CensoredNodeError as e:
953 except error.CensoredNodeError as e:
918 delta = e.tombstone
954 delta = e.tombstone
919 if base == nullrev:
955 if base == nullrev:
920 prefix = mdiff.trivialdiffheader(len(delta))
956 prefix = mdiff.trivialdiffheader(len(delta))
921 else:
957 else:
922 baselen = store.rawsize(base)
958 baselen = store.rawsize(base)
923 prefix = mdiff.replacediffheader(baselen, len(delta))
959 prefix = mdiff.replacediffheader(baselen, len(delta))
924 elif base == nullrev:
960 elif base == nullrev:
925 delta = store.revision(node, raw=True)
961 delta = store.revision(node, raw=True)
926 prefix = mdiff.trivialdiffheader(len(delta))
962 prefix = mdiff.trivialdiffheader(len(delta))
927 else:
963 else:
928 delta = store.revdiff(base, rev)
964 delta = store.revdiff(base, rev)
929 p1n, p2n = store.parents(node)
965 p1n, p2n = store.parents(node)
930 basenode = store.node(base)
931 flags = store.flags(rev)
932 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
933 meta += prefix
934 l = len(meta) + len(delta)
935 yield chunkheader(l)
936 yield meta
937 yield delta
938
966
939 def _revchunknarrow(self, store, rev, prev, linknode):
967 return revisiondelta(
968 node=node,
969 p1node=p1n,
970 p2node=p2n,
971 basenode=store.node(base),
972 linknode=linknode,
973 flags=store.flags(rev),
974 deltachunks=(prefix, delta),
975 )
976
977 def _revisiondeltanarrow(self, store, rev, prev, linknode):
940 # build up some mapping information that's useful later. See
978 # build up some mapping information that's useful later. See
941 # the local() nested function below.
979 # the local() nested function below.
942 if not self.changelog_done:
980 if not self.changelog_done:
943 self.clnode_to_rev[linknode] = rev
981 self.clnode_to_rev[linknode] = rev
944 linkrev = rev
982 linkrev = rev
945 self.clrev_to_localrev[linkrev] = rev
983 self.clrev_to_localrev[linkrev] = rev
946 else:
984 else:
947 linkrev = self.clnode_to_rev[linknode]
985 linkrev = self.clnode_to_rev[linknode]
948 self.clrev_to_localrev[linkrev] = rev
986 self.clrev_to_localrev[linkrev] = rev
949
987
950 # This is a node to send in full, because the changeset it
988 # This is a node to send in full, because the changeset it
951 # corresponds to was a full changeset.
989 # corresponds to was a full changeset.
952 if linknode in self.full_nodes:
990 if linknode in self.full_nodes:
953 for x in self._revchunknormal(store, rev, prev, linknode):
991 return self._revisiondeltanormal(store, rev, prev, linknode)
954 yield x
955 return
956
992
957 # At this point, a node can either be one we should skip or an
993 # At this point, a node can either be one we should skip or an
958 # ellipsis. If it's not an ellipsis, bail immediately.
994 # ellipsis. If it's not an ellipsis, bail immediately.
959 if linkrev not in self.precomputed_ellipsis:
995 if linkrev not in self.precomputed_ellipsis:
960 return
996 return
961
997
962 linkparents = self.precomputed_ellipsis[linkrev]
998 linkparents = self.precomputed_ellipsis[linkrev]
963 def local(clrev):
999 def local(clrev):
964 """Turn a changelog revnum into a local revnum.
1000 """Turn a changelog revnum into a local revnum.
965
1001
966 The ellipsis dag is stored as revnums on the changelog,
1002 The ellipsis dag is stored as revnums on the changelog,
967 but when we're producing ellipsis entries for
1003 but when we're producing ellipsis entries for
968 non-changelog revlogs, we need to turn those numbers into
1004 non-changelog revlogs, we need to turn those numbers into
969 something local. This does that for us, and during the
1005 something local. This does that for us, and during the
970 changelog sending phase will also expand the stored
1006 changelog sending phase will also expand the stored
971 mappings as needed.
1007 mappings as needed.
972 """
1008 """
973 if clrev == nullrev:
1009 if clrev == nullrev:
974 return nullrev
1010 return nullrev
975
1011
976 if not self.changelog_done:
1012 if not self.changelog_done:
977 # If we're doing the changelog, it's possible that we
1013 # If we're doing the changelog, it's possible that we
978 # have a parent that is already on the client, and we
1014 # have a parent that is already on the client, and we
979 # need to store some extra mapping information so that
1015 # need to store some extra mapping information so that
980 # our contained ellipsis nodes will be able to resolve
1016 # our contained ellipsis nodes will be able to resolve
981 # their parents.
1017 # their parents.
982 if clrev not in self.clrev_to_localrev:
1018 if clrev not in self.clrev_to_localrev:
983 clnode = store.node(clrev)
1019 clnode = store.node(clrev)
984 self.clnode_to_rev[clnode] = clrev
1020 self.clnode_to_rev[clnode] = clrev
985 return clrev
1021 return clrev
986
1022
987 # Walk the ellipsis-ized changelog breadth-first looking for a
1023 # Walk the ellipsis-ized changelog breadth-first looking for a
988 # change that has been linked from the current revlog.
1024 # change that has been linked from the current revlog.
989 #
1025 #
990 # For a flat manifest revlog only a single step should be necessary
1026 # For a flat manifest revlog only a single step should be necessary
991 # as all relevant changelog entries are relevant to the flat
1027 # as all relevant changelog entries are relevant to the flat
992 # manifest.
1028 # manifest.
993 #
1029 #
994 # For a filelog or tree manifest dirlog however not every changelog
1030 # For a filelog or tree manifest dirlog however not every changelog
995 # entry will have been relevant, so we need to skip some changelog
1031 # entry will have been relevant, so we need to skip some changelog
996 # nodes even after ellipsis-izing.
1032 # nodes even after ellipsis-izing.
997 walk = [clrev]
1033 walk = [clrev]
998 while walk:
1034 while walk:
999 p = walk[0]
1035 p = walk[0]
1000 walk = walk[1:]
1036 walk = walk[1:]
1001 if p in self.clrev_to_localrev:
1037 if p in self.clrev_to_localrev:
1002 return self.clrev_to_localrev[p]
1038 return self.clrev_to_localrev[p]
1003 elif p in self.full_nodes:
1039 elif p in self.full_nodes:
1004 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1040 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
1005 if pp != nullrev])
1041 if pp != nullrev])
1006 elif p in self.precomputed_ellipsis:
1042 elif p in self.precomputed_ellipsis:
1007 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1043 walk.extend([pp for pp in self.precomputed_ellipsis[p]
1008 if pp != nullrev])
1044 if pp != nullrev])
1009 else:
1045 else:
1010 # In this case, we've got an ellipsis with parents
1046 # In this case, we've got an ellipsis with parents
1011 # outside the current bundle (likely an
1047 # outside the current bundle (likely an
1012 # incremental pull). We "know" that we can use the
1048 # incremental pull). We "know" that we can use the
1013 # value of this same revlog at whatever revision
1049 # value of this same revlog at whatever revision
1014 # is pointed to by linknode. "Know" is in scare
1050 # is pointed to by linknode. "Know" is in scare
1015 # quotes because I haven't done enough examination
1051 # quotes because I haven't done enough examination
1016 # of edge cases to convince myself this is really
1052 # of edge cases to convince myself this is really
1017 # a fact - it works for all the (admittedly
1053 # a fact - it works for all the (admittedly
1018 # thorough) cases in our testsuite, but I would be
1054 # thorough) cases in our testsuite, but I would be
1019 # somewhat unsurprised to find a case in the wild
1055 # somewhat unsurprised to find a case in the wild
1020 # where this breaks down a bit. That said, I don't
1056 # where this breaks down a bit. That said, I don't
1021 # know if it would hurt anything.
1057 # know if it would hurt anything.
1022 for i in pycompat.xrange(rev, 0, -1):
1058 for i in pycompat.xrange(rev, 0, -1):
1023 if store.linkrev(i) == clrev:
1059 if store.linkrev(i) == clrev:
1024 return i
1060 return i
1025 # We failed to resolve a parent for this node, so
1061 # We failed to resolve a parent for this node, so
1026 # we crash the changegroup construction.
1062 # we crash the changegroup construction.
1027 raise error.Abort(
1063 raise error.Abort(
1028 'unable to resolve parent while packing %r %r'
1064 'unable to resolve parent while packing %r %r'
1029 ' for changeset %r' % (store.indexfile, rev, clrev))
1065 ' for changeset %r' % (store.indexfile, rev, clrev))
1030
1066
1031 return nullrev
1067 return nullrev
1032
1068
1033 if not linkparents or (
1069 if not linkparents or (
1034 store.parentrevs(rev) == (nullrev, nullrev)):
1070 store.parentrevs(rev) == (nullrev, nullrev)):
1035 p1, p2 = nullrev, nullrev
1071 p1, p2 = nullrev, nullrev
1036 elif len(linkparents) == 1:
1072 elif len(linkparents) == 1:
1037 p1, = sorted(local(p) for p in linkparents)
1073 p1, = sorted(local(p) for p in linkparents)
1038 p2 = nullrev
1074 p2 = nullrev
1039 else:
1075 else:
1040 p1, p2 = sorted(local(p) for p in linkparents)
1076 p1, p2 = sorted(local(p) for p in linkparents)
1041
1077
1042 n = store.node(rev)
1078 n = store.node(rev)
1043 p1n, p2n = store.node(p1), store.node(p2)
1079 p1n, p2n = store.node(p1), store.node(p2)
1044 flags = store.flags(rev)
1080 flags = store.flags(rev)
1045 flags |= revlog.REVIDX_ELLIPSIS
1081 flags |= revlog.REVIDX_ELLIPSIS
1046 meta = self.builddeltaheader(
1082
1047 n, p1n, p2n, nullid, linknode, flags)
1048 # TODO: try and actually send deltas for ellipsis data blocks
1083 # TODO: try and actually send deltas for ellipsis data blocks
1049 data = store.revision(n)
1084 data = store.revision(n)
1050 diffheader = mdiff.trivialdiffheader(len(data))
1085 diffheader = mdiff.trivialdiffheader(len(data))
1051 l = len(meta) + len(diffheader) + len(data)
1086
1052 yield ''.join((chunkheader(l),
1087 return revisiondelta(
1053 meta,
1088 node=n,
1054 diffheader,
1089 p1node=p1n,
1055 data))
1090 p2node=p2n,
1091 basenode=nullid,
1092 linknode=linknode,
1093 flags=flags,
1094 deltachunks=(diffheader, data),
1095 )
1056
1096
1057 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1097 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1058 # do nothing with basenode, it is implicitly the previous one in HG10
1098 # do nothing with basenode, it is implicitly the previous one in HG10
1059 # do nothing with flags, it is implicitly 0 for cg1 and cg2
1099 # do nothing with flags, it is implicitly 0 for cg1 and cg2
1060 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
1100 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
1061
1101
1062 class cg2packer(cg1packer):
1102 class cg2packer(cg1packer):
1063 version = '02'
1103 version = '02'
1064 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
1104 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
1065
1105
1066 def __init__(self, repo, filematcher, bundlecaps=None):
1106 def __init__(self, repo, filematcher, bundlecaps=None):
1067 super(cg2packer, self).__init__(repo, filematcher,
1107 super(cg2packer, self).__init__(repo, filematcher,
1068 bundlecaps=bundlecaps)
1108 bundlecaps=bundlecaps)
1069
1109
1070 if self._reorder is None:
1110 if self._reorder is None:
1071 # Since generaldelta is directly supported by cg2, reordering
1111 # Since generaldelta is directly supported by cg2, reordering
1072 # generally doesn't help, so we disable it by default (treating
1112 # generally doesn't help, so we disable it by default (treating
1073 # bundle.reorder=auto just like bundle.reorder=False).
1113 # bundle.reorder=auto just like bundle.reorder=False).
1074 self._reorder = False
1114 self._reorder = False
1075
1115
1076 def deltaparent(self, store, rev, p1, p2, prev):
1116 def deltaparent(self, store, rev, p1, p2, prev):
1077 # Narrow ellipses mode.
1117 # Narrow ellipses mode.
1078 if util.safehasattr(self, 'full_nodes'):
1118 if util.safehasattr(self, 'full_nodes'):
1079 # TODO: send better deltas when in narrow mode.
1119 # TODO: send better deltas when in narrow mode.
1080 #
1120 #
1081 # changegroup.group() loops over revisions to send,
1121 # changegroup.group() loops over revisions to send,
1082 # including revisions we'll skip. What this means is that
1122 # including revisions we'll skip. What this means is that
1083 # `prev` will be a potentially useless delta base for all
1123 # `prev` will be a potentially useless delta base for all
1084 # ellipsis nodes, as the client likely won't have it. In
1124 # ellipsis nodes, as the client likely won't have it. In
1085 # the future we should do bookkeeping about which nodes
1125 # the future we should do bookkeeping about which nodes
1086 # have been sent to the client, and try to be
1126 # have been sent to the client, and try to be
1087 # significantly smarter about delta bases. This is
1127 # significantly smarter about delta bases. This is
1088 # slightly tricky because this same code has to work for
1128 # slightly tricky because this same code has to work for
1089 # all revlogs, and we don't have the linkrev/linknode here.
1129 # all revlogs, and we don't have the linkrev/linknode here.
1090 return p1
1130 return p1
1091
1131
1092 dp = store.deltaparent(rev)
1132 dp = store.deltaparent(rev)
1093 if dp == nullrev and store.storedeltachains:
1133 if dp == nullrev and store.storedeltachains:
1094 # Avoid sending full revisions when delta parent is null. Pick prev
1134 # Avoid sending full revisions when delta parent is null. Pick prev
1095 # in that case. It's tempting to pick p1 in this case, as p1 will
1135 # in that case. It's tempting to pick p1 in this case, as p1 will
1096 # be smaller in the common case. However, computing a delta against
1136 # be smaller in the common case. However, computing a delta against
1097 # p1 may require resolving the raw text of p1, which could be
1137 # p1 may require resolving the raw text of p1, which could be
1098 # expensive. The revlog caches should have prev cached, meaning
1138 # expensive. The revlog caches should have prev cached, meaning
1099 # less CPU for changegroup generation. There is likely room to add
1139 # less CPU for changegroup generation. There is likely room to add
1100 # a flag and/or config option to control this behavior.
1140 # a flag and/or config option to control this behavior.
1101 base = prev
1141 base = prev
1102 elif dp == nullrev:
1142 elif dp == nullrev:
1103 # revlog is configured to use full snapshot for a reason,
1143 # revlog is configured to use full snapshot for a reason,
1104 # stick to full snapshot.
1144 # stick to full snapshot.
1105 base = nullrev
1145 base = nullrev
1106 elif dp not in (p1, p2, prev):
1146 elif dp not in (p1, p2, prev):
1107 # Pick prev when we can't be sure remote has the base revision.
1147 # Pick prev when we can't be sure remote has the base revision.
1108 return prev
1148 return prev
1109 else:
1149 else:
1110 base = dp
1150 base = dp
1111 if base != nullrev and not store.candelta(base, rev):
1151 if base != nullrev and not store.candelta(base, rev):
1112 base = nullrev
1152 base = nullrev
1113 return base
1153 return base
1114
1154
1115 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1155 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1116 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
1156 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
1117 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
1157 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
1118
1158
1119 class cg3packer(cg2packer):
1159 class cg3packer(cg2packer):
1120 version = '03'
1160 version = '03'
1121 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
1161 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
1122
1162
1123 def _packmanifests(self, dir, mfnodes, lookuplinknode):
1163 def _packmanifests(self, dir, mfnodes, lookuplinknode):
1124 if dir:
1164 if dir:
1125 yield self.fileheader(dir)
1165 yield self.fileheader(dir)
1126
1166
1127 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
1167 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
1128 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
1168 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
1129 units=_('manifests')):
1169 units=_('manifests')):
1130 yield chunk
1170 yield chunk
1131
1171
1132 def _manifestsdone(self):
1172 def _manifestsdone(self):
1133 return self.close()
1173 return self.close()
1134
1174
1135 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1175 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
1136 return struct.pack(
1176 return struct.pack(
1137 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
1177 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
1138
1178
1139 _packermap = {'01': (cg1packer, cg1unpacker),
1179 _packermap = {'01': (cg1packer, cg1unpacker),
1140 # cg2 adds support for exchanging generaldelta
1180 # cg2 adds support for exchanging generaldelta
1141 '02': (cg2packer, cg2unpacker),
1181 '02': (cg2packer, cg2unpacker),
1142 # cg3 adds support for exchanging revlog flags and treemanifests
1182 # cg3 adds support for exchanging revlog flags and treemanifests
1143 '03': (cg3packer, cg3unpacker),
1183 '03': (cg3packer, cg3unpacker),
1144 }
1184 }
1145
1185
1146 def allsupportedversions(repo):
1186 def allsupportedversions(repo):
1147 versions = set(_packermap.keys())
1187 versions = set(_packermap.keys())
1148 if not (repo.ui.configbool('experimental', 'changegroup3') or
1188 if not (repo.ui.configbool('experimental', 'changegroup3') or
1149 repo.ui.configbool('experimental', 'treemanifest') or
1189 repo.ui.configbool('experimental', 'treemanifest') or
1150 'treemanifest' in repo.requirements):
1190 'treemanifest' in repo.requirements):
1151 versions.discard('03')
1191 versions.discard('03')
1152 return versions
1192 return versions
1153
1193
1154 # Changegroup versions that can be applied to the repo
1194 # Changegroup versions that can be applied to the repo
1155 def supportedincomingversions(repo):
1195 def supportedincomingversions(repo):
1156 return allsupportedversions(repo)
1196 return allsupportedversions(repo)
1157
1197
1158 # Changegroup versions that can be created from the repo
1198 # Changegroup versions that can be created from the repo
1159 def supportedoutgoingversions(repo):
1199 def supportedoutgoingversions(repo):
1160 versions = allsupportedversions(repo)
1200 versions = allsupportedversions(repo)
1161 if 'treemanifest' in repo.requirements:
1201 if 'treemanifest' in repo.requirements:
1162 # Versions 01 and 02 support only flat manifests and it's just too
1202 # Versions 01 and 02 support only flat manifests and it's just too
1163 # expensive to convert between the flat manifest and tree manifest on
1203 # expensive to convert between the flat manifest and tree manifest on
1164 # the fly. Since tree manifests are hashed differently, all of history
1204 # the fly. Since tree manifests are hashed differently, all of history
1165 # would have to be converted. Instead, we simply don't even pretend to
1205 # would have to be converted. Instead, we simply don't even pretend to
1166 # support versions 01 and 02.
1206 # support versions 01 and 02.
1167 versions.discard('01')
1207 versions.discard('01')
1168 versions.discard('02')
1208 versions.discard('02')
1169 if repository.NARROW_REQUIREMENT in repo.requirements:
1209 if repository.NARROW_REQUIREMENT in repo.requirements:
1170 # Versions 01 and 02 don't support revlog flags, and we need to
1210 # Versions 01 and 02 don't support revlog flags, and we need to
1171 # support that for stripping and unbundling to work.
1211 # support that for stripping and unbundling to work.
1172 versions.discard('01')
1212 versions.discard('01')
1173 versions.discard('02')
1213 versions.discard('02')
1174 if LFS_REQUIREMENT in repo.requirements:
1214 if LFS_REQUIREMENT in repo.requirements:
1175 # Versions 01 and 02 don't support revlog flags, and we need to
1215 # Versions 01 and 02 don't support revlog flags, and we need to
1176 # mark LFS entries with REVIDX_EXTSTORED.
1216 # mark LFS entries with REVIDX_EXTSTORED.
1177 versions.discard('01')
1217 versions.discard('01')
1178 versions.discard('02')
1218 versions.discard('02')
1179
1219
1180 return versions
1220 return versions
1181
1221
1182 def localversion(repo):
1222 def localversion(repo):
1183 # Finds the best version to use for bundles that are meant to be used
1223 # Finds the best version to use for bundles that are meant to be used
1184 # locally, such as those from strip and shelve, and temporary bundles.
1224 # locally, such as those from strip and shelve, and temporary bundles.
1185 return max(supportedoutgoingversions(repo))
1225 return max(supportedoutgoingversions(repo))
1186
1226
1187 def safeversion(repo):
1227 def safeversion(repo):
1188 # Finds the smallest version that it's safe to assume clients of the repo
1228 # Finds the smallest version that it's safe to assume clients of the repo
1189 # will support. For example, all hg versions that support generaldelta also
1229 # will support. For example, all hg versions that support generaldelta also
1190 # support changegroup 02.
1230 # support changegroup 02.
1191 versions = supportedoutgoingversions(repo)
1231 versions = supportedoutgoingversions(repo)
1192 if 'generaldelta' in repo.requirements:
1232 if 'generaldelta' in repo.requirements:
1193 versions.discard('01')
1233 versions.discard('01')
1194 assert versions
1234 assert versions
1195 return min(versions)
1235 return min(versions)
1196
1236
1197 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1237 def getbundler(version, repo, bundlecaps=None, filematcher=None):
1198 assert version in supportedoutgoingversions(repo)
1238 assert version in supportedoutgoingversions(repo)
1199
1239
1200 if filematcher is None:
1240 if filematcher is None:
1201 filematcher = matchmod.alwaysmatcher(repo.root, '')
1241 filematcher = matchmod.alwaysmatcher(repo.root, '')
1202
1242
1203 if version == '01' and not filematcher.always():
1243 if version == '01' and not filematcher.always():
1204 raise error.ProgrammingError('version 01 changegroups do not support '
1244 raise error.ProgrammingError('version 01 changegroups do not support '
1205 'sparse file matchers')
1245 'sparse file matchers')
1206
1246
1207 # Requested files could include files not in the local store. So
1247 # Requested files could include files not in the local store. So
1208 # filter those out.
1248 # filter those out.
1209 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1249 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1210 filematcher)
1250 filematcher)
1211
1251
1212 return _packermap[version][0](repo, filematcher=filematcher,
1252 return _packermap[version][0](repo, filematcher=filematcher,
1213 bundlecaps=bundlecaps)
1253 bundlecaps=bundlecaps)
1214
1254
1215 def getunbundler(version, fh, alg, extras=None):
1255 def getunbundler(version, fh, alg, extras=None):
1216 return _packermap[version][1](fh, alg, extras=extras)
1256 return _packermap[version][1](fh, alg, extras=extras)
1217
1257
1218 def _changegroupinfo(repo, nodes, source):
1258 def _changegroupinfo(repo, nodes, source):
1219 if repo.ui.verbose or source == 'bundle':
1259 if repo.ui.verbose or source == 'bundle':
1220 repo.ui.status(_("%d changesets found\n") % len(nodes))
1260 repo.ui.status(_("%d changesets found\n") % len(nodes))
1221 if repo.ui.debugflag:
1261 if repo.ui.debugflag:
1222 repo.ui.debug("list of changesets:\n")
1262 repo.ui.debug("list of changesets:\n")
1223 for node in nodes:
1263 for node in nodes:
1224 repo.ui.debug("%s\n" % hex(node))
1264 repo.ui.debug("%s\n" % hex(node))
1225
1265
1226 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1266 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1227 bundlecaps=None):
1267 bundlecaps=None):
1228 cgstream = makestream(repo, outgoing, version, source,
1268 cgstream = makestream(repo, outgoing, version, source,
1229 fastpath=fastpath, bundlecaps=bundlecaps)
1269 fastpath=fastpath, bundlecaps=bundlecaps)
1230 return getunbundler(version, util.chunkbuffer(cgstream), None,
1270 return getunbundler(version, util.chunkbuffer(cgstream), None,
1231 {'clcount': len(outgoing.missing) })
1271 {'clcount': len(outgoing.missing) })
1232
1272
1233 def makestream(repo, outgoing, version, source, fastpath=False,
1273 def makestream(repo, outgoing, version, source, fastpath=False,
1234 bundlecaps=None, filematcher=None):
1274 bundlecaps=None, filematcher=None):
1235 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1275 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1236 filematcher=filematcher)
1276 filematcher=filematcher)
1237
1277
1238 repo = repo.unfiltered()
1278 repo = repo.unfiltered()
1239 commonrevs = outgoing.common
1279 commonrevs = outgoing.common
1240 csets = outgoing.missing
1280 csets = outgoing.missing
1241 heads = outgoing.missingheads
1281 heads = outgoing.missingheads
1242 # We go through the fast path if we get told to, or if all (unfiltered
1282 # We go through the fast path if we get told to, or if all (unfiltered
1243 # heads have been requested (since we then know there all linkrevs will
1283 # heads have been requested (since we then know there all linkrevs will
1244 # be pulled by the client).
1284 # be pulled by the client).
1245 heads.sort()
1285 heads.sort()
1246 fastpathlinkrev = fastpath or (
1286 fastpathlinkrev = fastpath or (
1247 repo.filtername is None and heads == sorted(repo.heads()))
1287 repo.filtername is None and heads == sorted(repo.heads()))
1248
1288
1249 repo.hook('preoutgoing', throw=True, source=source)
1289 repo.hook('preoutgoing', throw=True, source=source)
1250 _changegroupinfo(repo, csets, source)
1290 _changegroupinfo(repo, csets, source)
1251 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1291 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1252
1292
1253 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1293 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1254 revisions = 0
1294 revisions = 0
1255 files = 0
1295 files = 0
1256 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1296 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1257 total=expectedfiles)
1297 total=expectedfiles)
1258 for chunkdata in iter(source.filelogheader, {}):
1298 for chunkdata in iter(source.filelogheader, {}):
1259 files += 1
1299 files += 1
1260 f = chunkdata["filename"]
1300 f = chunkdata["filename"]
1261 repo.ui.debug("adding %s revisions\n" % f)
1301 repo.ui.debug("adding %s revisions\n" % f)
1262 progress.increment()
1302 progress.increment()
1263 fl = repo.file(f)
1303 fl = repo.file(f)
1264 o = len(fl)
1304 o = len(fl)
1265 try:
1305 try:
1266 deltas = source.deltaiter()
1306 deltas = source.deltaiter()
1267 if not fl.addgroup(deltas, revmap, trp):
1307 if not fl.addgroup(deltas, revmap, trp):
1268 raise error.Abort(_("received file revlog group is empty"))
1308 raise error.Abort(_("received file revlog group is empty"))
1269 except error.CensoredBaseError as e:
1309 except error.CensoredBaseError as e:
1270 raise error.Abort(_("received delta base is censored: %s") % e)
1310 raise error.Abort(_("received delta base is censored: %s") % e)
1271 revisions += len(fl) - o
1311 revisions += len(fl) - o
1272 if f in needfiles:
1312 if f in needfiles:
1273 needs = needfiles[f]
1313 needs = needfiles[f]
1274 for new in pycompat.xrange(o, len(fl)):
1314 for new in pycompat.xrange(o, len(fl)):
1275 n = fl.node(new)
1315 n = fl.node(new)
1276 if n in needs:
1316 if n in needs:
1277 needs.remove(n)
1317 needs.remove(n)
1278 else:
1318 else:
1279 raise error.Abort(
1319 raise error.Abort(
1280 _("received spurious file revlog entry"))
1320 _("received spurious file revlog entry"))
1281 if not needs:
1321 if not needs:
1282 del needfiles[f]
1322 del needfiles[f]
1283 progress.complete()
1323 progress.complete()
1284
1324
1285 for f, needs in needfiles.iteritems():
1325 for f, needs in needfiles.iteritems():
1286 fl = repo.file(f)
1326 fl = repo.file(f)
1287 for n in needs:
1327 for n in needs:
1288 try:
1328 try:
1289 fl.rev(n)
1329 fl.rev(n)
1290 except error.LookupError:
1330 except error.LookupError:
1291 raise error.Abort(
1331 raise error.Abort(
1292 _('missing file data for %s:%s - run hg verify') %
1332 _('missing file data for %s:%s - run hg verify') %
1293 (f, hex(n)))
1333 (f, hex(n)))
1294
1334
1295 return revisions, files
1335 return revisions, files
1296
1336
1297 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1337 def _packellipsischangegroup(repo, common, match, relevant_nodes,
1298 ellipsisroots, visitnodes, depth, source, version):
1338 ellipsisroots, visitnodes, depth, source, version):
1299 if version in ('01', '02'):
1339 if version in ('01', '02'):
1300 raise error.Abort(
1340 raise error.Abort(
1301 'ellipsis nodes require at least cg3 on client and server, '
1341 'ellipsis nodes require at least cg3 on client and server, '
1302 'but negotiated version %s' % version)
1342 'but negotiated version %s' % version)
1303 # We wrap cg1packer.revchunk, using a side channel to pass
1343 # We wrap cg1packer.revchunk, using a side channel to pass
1304 # relevant_nodes into that area. Then if linknode isn't in the
1344 # relevant_nodes into that area. Then if linknode isn't in the
1305 # set, we know we have an ellipsis node and we should defer
1345 # set, we know we have an ellipsis node and we should defer
1306 # sending that node's data. We override close() to detect
1346 # sending that node's data. We override close() to detect
1307 # pending ellipsis nodes and flush them.
1347 # pending ellipsis nodes and flush them.
1308 packer = getbundler(version, repo, filematcher=match)
1348 packer = getbundler(version, repo, filematcher=match)
1309 # Give the packer the list of nodes which should not be
1349 # Give the packer the list of nodes which should not be
1310 # ellipsis nodes. We store this rather than the set of nodes
1350 # ellipsis nodes. We store this rather than the set of nodes
1311 # that should be an ellipsis because for very large histories
1351 # that should be an ellipsis because for very large histories
1312 # we expect this to be significantly smaller.
1352 # we expect this to be significantly smaller.
1313 packer.full_nodes = relevant_nodes
1353 packer.full_nodes = relevant_nodes
1314 # Maps ellipsis revs to their roots at the changelog level.
1354 # Maps ellipsis revs to their roots at the changelog level.
1315 packer.precomputed_ellipsis = ellipsisroots
1355 packer.precomputed_ellipsis = ellipsisroots
1316 # Maps CL revs to per-revlog revisions. Cleared in close() at
1356 # Maps CL revs to per-revlog revisions. Cleared in close() at
1317 # the end of each group.
1357 # the end of each group.
1318 packer.clrev_to_localrev = {}
1358 packer.clrev_to_localrev = {}
1319 packer.next_clrev_to_localrev = {}
1359 packer.next_clrev_to_localrev = {}
1320 # Maps changelog nodes to changelog revs. Filled in once
1360 # Maps changelog nodes to changelog revs. Filled in once
1321 # during changelog stage and then left unmodified.
1361 # during changelog stage and then left unmodified.
1322 packer.clnode_to_rev = {}
1362 packer.clnode_to_rev = {}
1323 packer.changelog_done = False
1363 packer.changelog_done = False
1324 # If true, informs the packer that it is serving shallow content and might
1364 # If true, informs the packer that it is serving shallow content and might
1325 # need to pack file contents not introduced by the changes being packed.
1365 # need to pack file contents not introduced by the changes being packed.
1326 packer.is_shallow = depth is not None
1366 packer.is_shallow = depth is not None
1327
1367
1328 return packer.generate(common, visitnodes, False, source)
1368 return packer.generate(common, visitnodes, False, source)
General Comments 0
You need to be logged in to leave comments. Login now