##// END OF EJS Templates
changegroup: rename dir to tree to avoid shadowing a built-in...
Gregory Szorc -
r39269:8b9b93bf default
parent child Browse files
Show More
@@ -1,1396 +1,1396 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 error,
28 error,
29 match as matchmod,
29 match as matchmod,
30 mdiff,
30 mdiff,
31 phases,
31 phases,
32 pycompat,
32 pycompat,
33 repository,
33 repository,
34 util,
34 util,
35 )
35 )
36
36
37 from .utils import (
37 from .utils import (
38 interfaceutil,
38 interfaceutil,
39 stringutil,
39 stringutil,
40 )
40 )
41
41
42 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
42 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
43 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
43 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
44 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
44 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
45
45
46 LFS_REQUIREMENT = 'lfs'
46 LFS_REQUIREMENT = 'lfs'
47
47
48 readexactly = util.readexactly
48 readexactly = util.readexactly
49
49
50 def getchunk(stream):
50 def getchunk(stream):
51 """return the next chunk from stream as a string"""
51 """return the next chunk from stream as a string"""
52 d = readexactly(stream, 4)
52 d = readexactly(stream, 4)
53 l = struct.unpack(">l", d)[0]
53 l = struct.unpack(">l", d)[0]
54 if l <= 4:
54 if l <= 4:
55 if l:
55 if l:
56 raise error.Abort(_("invalid chunk length %d") % l)
56 raise error.Abort(_("invalid chunk length %d") % l)
57 return ""
57 return ""
58 return readexactly(stream, l - 4)
58 return readexactly(stream, l - 4)
59
59
60 def chunkheader(length):
60 def chunkheader(length):
61 """return a changegroup chunk header (string)"""
61 """return a changegroup chunk header (string)"""
62 return struct.pack(">l", length + 4)
62 return struct.pack(">l", length + 4)
63
63
64 def closechunk():
64 def closechunk():
65 """return a changegroup chunk header (string) for a zero-length chunk"""
65 """return a changegroup chunk header (string) for a zero-length chunk"""
66 return struct.pack(">l", 0)
66 return struct.pack(">l", 0)
67
67
68 def _fileheader(path):
68 def _fileheader(path):
69 """Obtain a changegroup chunk header for a named path."""
69 """Obtain a changegroup chunk header for a named path."""
70 return chunkheader(len(path)) + path
70 return chunkheader(len(path)) + path
71
71
72 def writechunks(ui, chunks, filename, vfs=None):
72 def writechunks(ui, chunks, filename, vfs=None):
73 """Write chunks to a file and return its filename.
73 """Write chunks to a file and return its filename.
74
74
75 The stream is assumed to be a bundle file.
75 The stream is assumed to be a bundle file.
76 Existing files will not be overwritten.
76 Existing files will not be overwritten.
77 If no filename is specified, a temporary file is created.
77 If no filename is specified, a temporary file is created.
78 """
78 """
79 fh = None
79 fh = None
80 cleanup = None
80 cleanup = None
81 try:
81 try:
82 if filename:
82 if filename:
83 if vfs:
83 if vfs:
84 fh = vfs.open(filename, "wb")
84 fh = vfs.open(filename, "wb")
85 else:
85 else:
86 # Increase default buffer size because default is usually
86 # Increase default buffer size because default is usually
87 # small (4k is common on Linux).
87 # small (4k is common on Linux).
88 fh = open(filename, "wb", 131072)
88 fh = open(filename, "wb", 131072)
89 else:
89 else:
90 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
90 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
91 fh = os.fdopen(fd, r"wb")
91 fh = os.fdopen(fd, r"wb")
92 cleanup = filename
92 cleanup = filename
93 for c in chunks:
93 for c in chunks:
94 fh.write(c)
94 fh.write(c)
95 cleanup = None
95 cleanup = None
96 return filename
96 return filename
97 finally:
97 finally:
98 if fh is not None:
98 if fh is not None:
99 fh.close()
99 fh.close()
100 if cleanup is not None:
100 if cleanup is not None:
101 if filename and vfs:
101 if filename and vfs:
102 vfs.unlink(cleanup)
102 vfs.unlink(cleanup)
103 else:
103 else:
104 os.unlink(cleanup)
104 os.unlink(cleanup)
105
105
106 class cg1unpacker(object):
106 class cg1unpacker(object):
107 """Unpacker for cg1 changegroup streams.
107 """Unpacker for cg1 changegroup streams.
108
108
109 A changegroup unpacker handles the framing of the revision data in
109 A changegroup unpacker handles the framing of the revision data in
110 the wire format. Most consumers will want to use the apply()
110 the wire format. Most consumers will want to use the apply()
111 method to add the changes from the changegroup to a repository.
111 method to add the changes from the changegroup to a repository.
112
112
113 If you're forwarding a changegroup unmodified to another consumer,
113 If you're forwarding a changegroup unmodified to another consumer,
114 use getchunks(), which returns an iterator of changegroup
114 use getchunks(), which returns an iterator of changegroup
115 chunks. This is mostly useful for cases where you need to know the
115 chunks. This is mostly useful for cases where you need to know the
116 data stream has ended by observing the end of the changegroup.
116 data stream has ended by observing the end of the changegroup.
117
117
118 deltachunk() is useful only if you're applying delta data. Most
118 deltachunk() is useful only if you're applying delta data. Most
119 consumers should prefer apply() instead.
119 consumers should prefer apply() instead.
120
120
121 A few other public methods exist. Those are used only for
121 A few other public methods exist. Those are used only for
122 bundlerepo and some debug commands - their use is discouraged.
122 bundlerepo and some debug commands - their use is discouraged.
123 """
123 """
124 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
124 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
125 deltaheadersize = deltaheader.size
125 deltaheadersize = deltaheader.size
126 version = '01'
126 version = '01'
127 _grouplistcount = 1 # One list of files after the manifests
127 _grouplistcount = 1 # One list of files after the manifests
128
128
129 def __init__(self, fh, alg, extras=None):
129 def __init__(self, fh, alg, extras=None):
130 if alg is None:
130 if alg is None:
131 alg = 'UN'
131 alg = 'UN'
132 if alg not in util.compengines.supportedbundletypes:
132 if alg not in util.compengines.supportedbundletypes:
133 raise error.Abort(_('unknown stream compression type: %s')
133 raise error.Abort(_('unknown stream compression type: %s')
134 % alg)
134 % alg)
135 if alg == 'BZ':
135 if alg == 'BZ':
136 alg = '_truncatedBZ'
136 alg = '_truncatedBZ'
137
137
138 compengine = util.compengines.forbundletype(alg)
138 compengine = util.compengines.forbundletype(alg)
139 self._stream = compengine.decompressorreader(fh)
139 self._stream = compengine.decompressorreader(fh)
140 self._type = alg
140 self._type = alg
141 self.extras = extras or {}
141 self.extras = extras or {}
142 self.callback = None
142 self.callback = None
143
143
144 # These methods (compressed, read, seek, tell) all appear to only
144 # These methods (compressed, read, seek, tell) all appear to only
145 # be used by bundlerepo, but it's a little hard to tell.
145 # be used by bundlerepo, but it's a little hard to tell.
146 def compressed(self):
146 def compressed(self):
147 return self._type is not None and self._type != 'UN'
147 return self._type is not None and self._type != 'UN'
148 def read(self, l):
148 def read(self, l):
149 return self._stream.read(l)
149 return self._stream.read(l)
150 def seek(self, pos):
150 def seek(self, pos):
151 return self._stream.seek(pos)
151 return self._stream.seek(pos)
152 def tell(self):
152 def tell(self):
153 return self._stream.tell()
153 return self._stream.tell()
154 def close(self):
154 def close(self):
155 return self._stream.close()
155 return self._stream.close()
156
156
157 def _chunklength(self):
157 def _chunklength(self):
158 d = readexactly(self._stream, 4)
158 d = readexactly(self._stream, 4)
159 l = struct.unpack(">l", d)[0]
159 l = struct.unpack(">l", d)[0]
160 if l <= 4:
160 if l <= 4:
161 if l:
161 if l:
162 raise error.Abort(_("invalid chunk length %d") % l)
162 raise error.Abort(_("invalid chunk length %d") % l)
163 return 0
163 return 0
164 if self.callback:
164 if self.callback:
165 self.callback()
165 self.callback()
166 return l - 4
166 return l - 4
167
167
168 def changelogheader(self):
168 def changelogheader(self):
169 """v10 does not have a changelog header chunk"""
169 """v10 does not have a changelog header chunk"""
170 return {}
170 return {}
171
171
172 def manifestheader(self):
172 def manifestheader(self):
173 """v10 does not have a manifest header chunk"""
173 """v10 does not have a manifest header chunk"""
174 return {}
174 return {}
175
175
176 def filelogheader(self):
176 def filelogheader(self):
177 """return the header of the filelogs chunk, v10 only has the filename"""
177 """return the header of the filelogs chunk, v10 only has the filename"""
178 l = self._chunklength()
178 l = self._chunklength()
179 if not l:
179 if not l:
180 return {}
180 return {}
181 fname = readexactly(self._stream, l)
181 fname = readexactly(self._stream, l)
182 return {'filename': fname}
182 return {'filename': fname}
183
183
184 def _deltaheader(self, headertuple, prevnode):
184 def _deltaheader(self, headertuple, prevnode):
185 node, p1, p2, cs = headertuple
185 node, p1, p2, cs = headertuple
186 if prevnode is None:
186 if prevnode is None:
187 deltabase = p1
187 deltabase = p1
188 else:
188 else:
189 deltabase = prevnode
189 deltabase = prevnode
190 flags = 0
190 flags = 0
191 return node, p1, p2, deltabase, cs, flags
191 return node, p1, p2, deltabase, cs, flags
192
192
193 def deltachunk(self, prevnode):
193 def deltachunk(self, prevnode):
194 l = self._chunklength()
194 l = self._chunklength()
195 if not l:
195 if not l:
196 return {}
196 return {}
197 headerdata = readexactly(self._stream, self.deltaheadersize)
197 headerdata = readexactly(self._stream, self.deltaheadersize)
198 header = self.deltaheader.unpack(headerdata)
198 header = self.deltaheader.unpack(headerdata)
199 delta = readexactly(self._stream, l - self.deltaheadersize)
199 delta = readexactly(self._stream, l - self.deltaheadersize)
200 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
200 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
201 return (node, p1, p2, cs, deltabase, delta, flags)
201 return (node, p1, p2, cs, deltabase, delta, flags)
202
202
203 def getchunks(self):
203 def getchunks(self):
204 """returns all the chunks contains in the bundle
204 """returns all the chunks contains in the bundle
205
205
206 Used when you need to forward the binary stream to a file or another
206 Used when you need to forward the binary stream to a file or another
207 network API. To do so, it parse the changegroup data, otherwise it will
207 network API. To do so, it parse the changegroup data, otherwise it will
208 block in case of sshrepo because it don't know the end of the stream.
208 block in case of sshrepo because it don't know the end of the stream.
209 """
209 """
210 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
210 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
211 # and a list of filelogs. For changegroup 3, we expect 4 parts:
211 # and a list of filelogs. For changegroup 3, we expect 4 parts:
212 # changelog, manifestlog, a list of tree manifestlogs, and a list of
212 # changelog, manifestlog, a list of tree manifestlogs, and a list of
213 # filelogs.
213 # filelogs.
214 #
214 #
215 # Changelog and manifestlog parts are terminated with empty chunks. The
215 # Changelog and manifestlog parts are terminated with empty chunks. The
216 # tree and file parts are a list of entry sections. Each entry section
216 # tree and file parts are a list of entry sections. Each entry section
217 # is a series of chunks terminating in an empty chunk. The list of these
217 # is a series of chunks terminating in an empty chunk. The list of these
218 # entry sections is terminated in yet another empty chunk, so we know
218 # entry sections is terminated in yet another empty chunk, so we know
219 # we've reached the end of the tree/file list when we reach an empty
219 # we've reached the end of the tree/file list when we reach an empty
220 # chunk that was proceeded by no non-empty chunks.
220 # chunk that was proceeded by no non-empty chunks.
221
221
222 parts = 0
222 parts = 0
223 while parts < 2 + self._grouplistcount:
223 while parts < 2 + self._grouplistcount:
224 noentries = True
224 noentries = True
225 while True:
225 while True:
226 chunk = getchunk(self)
226 chunk = getchunk(self)
227 if not chunk:
227 if not chunk:
228 # The first two empty chunks represent the end of the
228 # The first two empty chunks represent the end of the
229 # changelog and the manifestlog portions. The remaining
229 # changelog and the manifestlog portions. The remaining
230 # empty chunks represent either A) the end of individual
230 # empty chunks represent either A) the end of individual
231 # tree or file entries in the file list, or B) the end of
231 # tree or file entries in the file list, or B) the end of
232 # the entire list. It's the end of the entire list if there
232 # the entire list. It's the end of the entire list if there
233 # were no entries (i.e. noentries is True).
233 # were no entries (i.e. noentries is True).
234 if parts < 2:
234 if parts < 2:
235 parts += 1
235 parts += 1
236 elif noentries:
236 elif noentries:
237 parts += 1
237 parts += 1
238 break
238 break
239 noentries = False
239 noentries = False
240 yield chunkheader(len(chunk))
240 yield chunkheader(len(chunk))
241 pos = 0
241 pos = 0
242 while pos < len(chunk):
242 while pos < len(chunk):
243 next = pos + 2**20
243 next = pos + 2**20
244 yield chunk[pos:next]
244 yield chunk[pos:next]
245 pos = next
245 pos = next
246 yield closechunk()
246 yield closechunk()
247
247
248 def _unpackmanifests(self, repo, revmap, trp, prog):
248 def _unpackmanifests(self, repo, revmap, trp, prog):
249 self.callback = prog.increment
249 self.callback = prog.increment
250 # no need to check for empty manifest group here:
250 # no need to check for empty manifest group here:
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
252 # no new manifest will be created and the manifest group will
252 # no new manifest will be created and the manifest group will
253 # be empty during the pull
253 # be empty during the pull
254 self.manifestheader()
254 self.manifestheader()
255 deltas = self.deltaiter()
255 deltas = self.deltaiter()
256 repo.manifestlog.addgroup(deltas, revmap, trp)
256 repo.manifestlog.addgroup(deltas, revmap, trp)
257 prog.complete()
257 prog.complete()
258 self.callback = None
258 self.callback = None
259
259
260 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
260 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
261 expectedtotal=None):
261 expectedtotal=None):
262 """Add the changegroup returned by source.read() to this repo.
262 """Add the changegroup returned by source.read() to this repo.
263 srctype is a string like 'push', 'pull', or 'unbundle'. url is
263 srctype is a string like 'push', 'pull', or 'unbundle'. url is
264 the URL of the repo where this changegroup is coming from.
264 the URL of the repo where this changegroup is coming from.
265
265
266 Return an integer summarizing the change to this repo:
266 Return an integer summarizing the change to this repo:
267 - nothing changed or no source: 0
267 - nothing changed or no source: 0
268 - more heads than before: 1+added heads (2..n)
268 - more heads than before: 1+added heads (2..n)
269 - fewer heads than before: -1-removed heads (-2..-n)
269 - fewer heads than before: -1-removed heads (-2..-n)
270 - number of heads stays the same: 1
270 - number of heads stays the same: 1
271 """
271 """
272 repo = repo.unfiltered()
272 repo = repo.unfiltered()
273 def csmap(x):
273 def csmap(x):
274 repo.ui.debug("add changeset %s\n" % short(x))
274 repo.ui.debug("add changeset %s\n" % short(x))
275 return len(cl)
275 return len(cl)
276
276
277 def revmap(x):
277 def revmap(x):
278 return cl.rev(x)
278 return cl.rev(x)
279
279
280 changesets = files = revisions = 0
280 changesets = files = revisions = 0
281
281
282 try:
282 try:
283 # The transaction may already carry source information. In this
283 # The transaction may already carry source information. In this
284 # case we use the top level data. We overwrite the argument
284 # case we use the top level data. We overwrite the argument
285 # because we need to use the top level value (if they exist)
285 # because we need to use the top level value (if they exist)
286 # in this function.
286 # in this function.
287 srctype = tr.hookargs.setdefault('source', srctype)
287 srctype = tr.hookargs.setdefault('source', srctype)
288 url = tr.hookargs.setdefault('url', url)
288 url = tr.hookargs.setdefault('url', url)
289 repo.hook('prechangegroup',
289 repo.hook('prechangegroup',
290 throw=True, **pycompat.strkwargs(tr.hookargs))
290 throw=True, **pycompat.strkwargs(tr.hookargs))
291
291
292 # write changelog data to temp files so concurrent readers
292 # write changelog data to temp files so concurrent readers
293 # will not see an inconsistent view
293 # will not see an inconsistent view
294 cl = repo.changelog
294 cl = repo.changelog
295 cl.delayupdate(tr)
295 cl.delayupdate(tr)
296 oldheads = set(cl.heads())
296 oldheads = set(cl.heads())
297
297
298 trp = weakref.proxy(tr)
298 trp = weakref.proxy(tr)
299 # pull off the changeset group
299 # pull off the changeset group
300 repo.ui.status(_("adding changesets\n"))
300 repo.ui.status(_("adding changesets\n"))
301 clstart = len(cl)
301 clstart = len(cl)
302 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
302 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
303 total=expectedtotal)
303 total=expectedtotal)
304 self.callback = progress.increment
304 self.callback = progress.increment
305
305
306 efiles = set()
306 efiles = set()
307 def onchangelog(cl, node):
307 def onchangelog(cl, node):
308 efiles.update(cl.readfiles(node))
308 efiles.update(cl.readfiles(node))
309
309
310 self.changelogheader()
310 self.changelogheader()
311 deltas = self.deltaiter()
311 deltas = self.deltaiter()
312 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
312 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
313 efiles = len(efiles)
313 efiles = len(efiles)
314
314
315 if not cgnodes:
315 if not cgnodes:
316 repo.ui.develwarn('applied empty changegroup',
316 repo.ui.develwarn('applied empty changegroup',
317 config='warn-empty-changegroup')
317 config='warn-empty-changegroup')
318 clend = len(cl)
318 clend = len(cl)
319 changesets = clend - clstart
319 changesets = clend - clstart
320 progress.complete()
320 progress.complete()
321 self.callback = None
321 self.callback = None
322
322
323 # pull off the manifest group
323 # pull off the manifest group
324 repo.ui.status(_("adding manifests\n"))
324 repo.ui.status(_("adding manifests\n"))
325 # We know that we'll never have more manifests than we had
325 # We know that we'll never have more manifests than we had
326 # changesets.
326 # changesets.
327 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
327 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
328 total=changesets)
328 total=changesets)
329 self._unpackmanifests(repo, revmap, trp, progress)
329 self._unpackmanifests(repo, revmap, trp, progress)
330
330
331 needfiles = {}
331 needfiles = {}
332 if repo.ui.configbool('server', 'validate'):
332 if repo.ui.configbool('server', 'validate'):
333 cl = repo.changelog
333 cl = repo.changelog
334 ml = repo.manifestlog
334 ml = repo.manifestlog
335 # validate incoming csets have their manifests
335 # validate incoming csets have their manifests
336 for cset in pycompat.xrange(clstart, clend):
336 for cset in pycompat.xrange(clstart, clend):
337 mfnode = cl.changelogrevision(cset).manifest
337 mfnode = cl.changelogrevision(cset).manifest
338 mfest = ml[mfnode].readdelta()
338 mfest = ml[mfnode].readdelta()
339 # store file cgnodes we must see
339 # store file cgnodes we must see
340 for f, n in mfest.iteritems():
340 for f, n in mfest.iteritems():
341 needfiles.setdefault(f, set()).add(n)
341 needfiles.setdefault(f, set()).add(n)
342
342
343 # process the files
343 # process the files
344 repo.ui.status(_("adding file changes\n"))
344 repo.ui.status(_("adding file changes\n"))
345 newrevs, newfiles = _addchangegroupfiles(
345 newrevs, newfiles = _addchangegroupfiles(
346 repo, self, revmap, trp, efiles, needfiles)
346 repo, self, revmap, trp, efiles, needfiles)
347 revisions += newrevs
347 revisions += newrevs
348 files += newfiles
348 files += newfiles
349
349
350 deltaheads = 0
350 deltaheads = 0
351 if oldheads:
351 if oldheads:
352 heads = cl.heads()
352 heads = cl.heads()
353 deltaheads = len(heads) - len(oldheads)
353 deltaheads = len(heads) - len(oldheads)
354 for h in heads:
354 for h in heads:
355 if h not in oldheads and repo[h].closesbranch():
355 if h not in oldheads and repo[h].closesbranch():
356 deltaheads -= 1
356 deltaheads -= 1
357 htext = ""
357 htext = ""
358 if deltaheads:
358 if deltaheads:
359 htext = _(" (%+d heads)") % deltaheads
359 htext = _(" (%+d heads)") % deltaheads
360
360
361 repo.ui.status(_("added %d changesets"
361 repo.ui.status(_("added %d changesets"
362 " with %d changes to %d files%s\n")
362 " with %d changes to %d files%s\n")
363 % (changesets, revisions, files, htext))
363 % (changesets, revisions, files, htext))
364 repo.invalidatevolatilesets()
364 repo.invalidatevolatilesets()
365
365
366 if changesets > 0:
366 if changesets > 0:
367 if 'node' not in tr.hookargs:
367 if 'node' not in tr.hookargs:
368 tr.hookargs['node'] = hex(cl.node(clstart))
368 tr.hookargs['node'] = hex(cl.node(clstart))
369 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
369 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
370 hookargs = dict(tr.hookargs)
370 hookargs = dict(tr.hookargs)
371 else:
371 else:
372 hookargs = dict(tr.hookargs)
372 hookargs = dict(tr.hookargs)
373 hookargs['node'] = hex(cl.node(clstart))
373 hookargs['node'] = hex(cl.node(clstart))
374 hookargs['node_last'] = hex(cl.node(clend - 1))
374 hookargs['node_last'] = hex(cl.node(clend - 1))
375 repo.hook('pretxnchangegroup',
375 repo.hook('pretxnchangegroup',
376 throw=True, **pycompat.strkwargs(hookargs))
376 throw=True, **pycompat.strkwargs(hookargs))
377
377
378 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
378 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
379 phaseall = None
379 phaseall = None
380 if srctype in ('push', 'serve'):
380 if srctype in ('push', 'serve'):
381 # Old servers can not push the boundary themselves.
381 # Old servers can not push the boundary themselves.
382 # New servers won't push the boundary if changeset already
382 # New servers won't push the boundary if changeset already
383 # exists locally as secret
383 # exists locally as secret
384 #
384 #
385 # We should not use added here but the list of all change in
385 # We should not use added here but the list of all change in
386 # the bundle
386 # the bundle
387 if repo.publishing():
387 if repo.publishing():
388 targetphase = phaseall = phases.public
388 targetphase = phaseall = phases.public
389 else:
389 else:
390 # closer target phase computation
390 # closer target phase computation
391
391
392 # Those changesets have been pushed from the
392 # Those changesets have been pushed from the
393 # outside, their phases are going to be pushed
393 # outside, their phases are going to be pushed
394 # alongside. Therefor `targetphase` is
394 # alongside. Therefor `targetphase` is
395 # ignored.
395 # ignored.
396 targetphase = phaseall = phases.draft
396 targetphase = phaseall = phases.draft
397 if added:
397 if added:
398 phases.registernew(repo, tr, targetphase, added)
398 phases.registernew(repo, tr, targetphase, added)
399 if phaseall is not None:
399 if phaseall is not None:
400 phases.advanceboundary(repo, tr, phaseall, cgnodes)
400 phases.advanceboundary(repo, tr, phaseall, cgnodes)
401
401
402 if changesets > 0:
402 if changesets > 0:
403
403
404 def runhooks():
404 def runhooks():
405 # These hooks run when the lock releases, not when the
405 # These hooks run when the lock releases, not when the
406 # transaction closes. So it's possible for the changelog
406 # transaction closes. So it's possible for the changelog
407 # to have changed since we last saw it.
407 # to have changed since we last saw it.
408 if clstart >= len(repo):
408 if clstart >= len(repo):
409 return
409 return
410
410
411 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
411 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
412
412
413 for n in added:
413 for n in added:
414 args = hookargs.copy()
414 args = hookargs.copy()
415 args['node'] = hex(n)
415 args['node'] = hex(n)
416 del args['node_last']
416 del args['node_last']
417 repo.hook("incoming", **pycompat.strkwargs(args))
417 repo.hook("incoming", **pycompat.strkwargs(args))
418
418
419 newheads = [h for h in repo.heads()
419 newheads = [h for h in repo.heads()
420 if h not in oldheads]
420 if h not in oldheads]
421 repo.ui.log("incoming",
421 repo.ui.log("incoming",
422 "%d incoming changes - new heads: %s\n",
422 "%d incoming changes - new heads: %s\n",
423 len(added),
423 len(added),
424 ', '.join([hex(c[:6]) for c in newheads]))
424 ', '.join([hex(c[:6]) for c in newheads]))
425
425
426 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
426 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
427 lambda tr: repo._afterlock(runhooks))
427 lambda tr: repo._afterlock(runhooks))
428 finally:
428 finally:
429 repo.ui.flush()
429 repo.ui.flush()
430 # never return 0 here:
430 # never return 0 here:
431 if deltaheads < 0:
431 if deltaheads < 0:
432 ret = deltaheads - 1
432 ret = deltaheads - 1
433 else:
433 else:
434 ret = deltaheads + 1
434 ret = deltaheads + 1
435 return ret
435 return ret
436
436
437 def deltaiter(self):
437 def deltaiter(self):
438 """
438 """
439 returns an iterator of the deltas in this changegroup
439 returns an iterator of the deltas in this changegroup
440
440
441 Useful for passing to the underlying storage system to be stored.
441 Useful for passing to the underlying storage system to be stored.
442 """
442 """
443 chain = None
443 chain = None
444 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
444 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
445 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
445 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
446 yield chunkdata
446 yield chunkdata
447 chain = chunkdata[0]
447 chain = chunkdata[0]
448
448
449 class cg2unpacker(cg1unpacker):
449 class cg2unpacker(cg1unpacker):
450 """Unpacker for cg2 streams.
450 """Unpacker for cg2 streams.
451
451
452 cg2 streams add support for generaldelta, so the delta header
452 cg2 streams add support for generaldelta, so the delta header
453 format is slightly different. All other features about the data
453 format is slightly different. All other features about the data
454 remain the same.
454 remain the same.
455 """
455 """
456 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
456 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
457 deltaheadersize = deltaheader.size
457 deltaheadersize = deltaheader.size
458 version = '02'
458 version = '02'
459
459
460 def _deltaheader(self, headertuple, prevnode):
460 def _deltaheader(self, headertuple, prevnode):
461 node, p1, p2, deltabase, cs = headertuple
461 node, p1, p2, deltabase, cs = headertuple
462 flags = 0
462 flags = 0
463 return node, p1, p2, deltabase, cs, flags
463 return node, p1, p2, deltabase, cs, flags
464
464
465 class cg3unpacker(cg2unpacker):
465 class cg3unpacker(cg2unpacker):
466 """Unpacker for cg3 streams.
466 """Unpacker for cg3 streams.
467
467
468 cg3 streams add support for exchanging treemanifests and revlog
468 cg3 streams add support for exchanging treemanifests and revlog
469 flags. It adds the revlog flags to the delta header and an empty chunk
469 flags. It adds the revlog flags to the delta header and an empty chunk
470 separating manifests and files.
470 separating manifests and files.
471 """
471 """
472 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
472 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
473 deltaheadersize = deltaheader.size
473 deltaheadersize = deltaheader.size
474 version = '03'
474 version = '03'
475 _grouplistcount = 2 # One list of manifests and one list of files
475 _grouplistcount = 2 # One list of manifests and one list of files
476
476
477 def _deltaheader(self, headertuple, prevnode):
477 def _deltaheader(self, headertuple, prevnode):
478 node, p1, p2, deltabase, cs, flags = headertuple
478 node, p1, p2, deltabase, cs, flags = headertuple
479 return node, p1, p2, deltabase, cs, flags
479 return node, p1, p2, deltabase, cs, flags
480
480
481 def _unpackmanifests(self, repo, revmap, trp, prog):
481 def _unpackmanifests(self, repo, revmap, trp, prog):
482 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
482 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
483 for chunkdata in iter(self.filelogheader, {}):
483 for chunkdata in iter(self.filelogheader, {}):
484 # If we get here, there are directory manifests in the changegroup
484 # If we get here, there are directory manifests in the changegroup
485 d = chunkdata["filename"]
485 d = chunkdata["filename"]
486 repo.ui.debug("adding %s revisions\n" % d)
486 repo.ui.debug("adding %s revisions\n" % d)
487 dirlog = repo.manifestlog._revlog.dirlog(d)
487 dirlog = repo.manifestlog._revlog.dirlog(d)
488 deltas = self.deltaiter()
488 deltas = self.deltaiter()
489 if not dirlog.addgroup(deltas, revmap, trp):
489 if not dirlog.addgroup(deltas, revmap, trp):
490 raise error.Abort(_("received dir revlog group is empty"))
490 raise error.Abort(_("received dir revlog group is empty"))
491
491
492 class headerlessfixup(object):
492 class headerlessfixup(object):
493 def __init__(self, fh, h):
493 def __init__(self, fh, h):
494 self._h = h
494 self._h = h
495 self._fh = fh
495 self._fh = fh
496 def read(self, n):
496 def read(self, n):
497 if self._h:
497 if self._h:
498 d, self._h = self._h[:n], self._h[n:]
498 d, self._h = self._h[:n], self._h[n:]
499 if len(d) < n:
499 if len(d) < n:
500 d += readexactly(self._fh, n - len(d))
500 d += readexactly(self._fh, n - len(d))
501 return d
501 return d
502 return readexactly(self._fh, n)
502 return readexactly(self._fh, n)
503
503
504 @interfaceutil.implementer(repository.irevisiondeltarequest)
504 @interfaceutil.implementer(repository.irevisiondeltarequest)
505 @attr.s(slots=True, frozen=True)
505 @attr.s(slots=True, frozen=True)
506 class revisiondeltarequest(object):
506 class revisiondeltarequest(object):
507 node = attr.ib()
507 node = attr.ib()
508 linknode = attr.ib()
508 linknode = attr.ib()
509 p1node = attr.ib()
509 p1node = attr.ib()
510 p2node = attr.ib()
510 p2node = attr.ib()
511 basenode = attr.ib()
511 basenode = attr.ib()
512 ellipsis = attr.ib(default=False)
512 ellipsis = attr.ib(default=False)
513
513
514 def _revisiondeltatochunks(delta, headerfn):
514 def _revisiondeltatochunks(delta, headerfn):
515 """Serialize a revisiondelta to changegroup chunks."""
515 """Serialize a revisiondelta to changegroup chunks."""
516
516
517 # The captured revision delta may be encoded as a delta against
517 # The captured revision delta may be encoded as a delta against
518 # a base revision or as a full revision. The changegroup format
518 # a base revision or as a full revision. The changegroup format
519 # requires that everything on the wire be deltas. So for full
519 # requires that everything on the wire be deltas. So for full
520 # revisions, we need to invent a header that says to rewrite
520 # revisions, we need to invent a header that says to rewrite
521 # data.
521 # data.
522
522
523 if delta.delta is not None:
523 if delta.delta is not None:
524 prefix, data = b'', delta.delta
524 prefix, data = b'', delta.delta
525 elif delta.basenode == nullid:
525 elif delta.basenode == nullid:
526 data = delta.revision
526 data = delta.revision
527 prefix = mdiff.trivialdiffheader(len(data))
527 prefix = mdiff.trivialdiffheader(len(data))
528 else:
528 else:
529 data = delta.revision
529 data = delta.revision
530 prefix = mdiff.replacediffheader(delta.baserevisionsize,
530 prefix = mdiff.replacediffheader(delta.baserevisionsize,
531 len(data))
531 len(data))
532
532
533 meta = headerfn(delta)
533 meta = headerfn(delta)
534
534
535 yield chunkheader(len(meta) + len(prefix) + len(data))
535 yield chunkheader(len(meta) + len(prefix) + len(data))
536 yield meta
536 yield meta
537 if prefix:
537 if prefix:
538 yield prefix
538 yield prefix
539 yield data
539 yield data
540
540
541 def _sortnodesnormal(store, nodes, reorder):
541 def _sortnodesnormal(store, nodes, reorder):
542 """Sort nodes for changegroup generation and turn into revnums."""
542 """Sort nodes for changegroup generation and turn into revnums."""
543 # for generaldelta revlogs, we linearize the revs; this will both be
543 # for generaldelta revlogs, we linearize the revs; this will both be
544 # much quicker and generate a much smaller bundle
544 # much quicker and generate a much smaller bundle
545 if (store._generaldelta and reorder is None) or reorder:
545 if (store._generaldelta and reorder is None) or reorder:
546 revs = set(store.rev(n) for n in nodes)
546 revs = set(store.rev(n) for n in nodes)
547 return dagop.linearize(revs, store.parentrevs)
547 return dagop.linearize(revs, store.parentrevs)
548 else:
548 else:
549 return sorted([store.rev(n) for n in nodes])
549 return sorted([store.rev(n) for n in nodes])
550
550
551 def _sortnodesellipsis(store, nodes, cl, lookup):
551 def _sortnodesellipsis(store, nodes, cl, lookup):
552 """Sort nodes for changegroup generation and turn into revnums."""
552 """Sort nodes for changegroup generation and turn into revnums."""
553 # Ellipses serving mode.
553 # Ellipses serving mode.
554 #
554 #
555 # In a perfect world, we'd generate better ellipsis-ified graphs
555 # In a perfect world, we'd generate better ellipsis-ified graphs
556 # for non-changelog revlogs. In practice, we haven't started doing
556 # for non-changelog revlogs. In practice, we haven't started doing
557 # that yet, so the resulting DAGs for the manifestlog and filelogs
557 # that yet, so the resulting DAGs for the manifestlog and filelogs
558 # are actually full of bogus parentage on all the ellipsis
558 # are actually full of bogus parentage on all the ellipsis
559 # nodes. This has the side effect that, while the contents are
559 # nodes. This has the side effect that, while the contents are
560 # correct, the individual DAGs might be completely out of whack in
560 # correct, the individual DAGs might be completely out of whack in
561 # a case like 882681bc3166 and its ancestors (back about 10
561 # a case like 882681bc3166 and its ancestors (back about 10
562 # revisions or so) in the main hg repo.
562 # revisions or so) in the main hg repo.
563 #
563 #
564 # The one invariant we *know* holds is that the new (potentially
564 # The one invariant we *know* holds is that the new (potentially
565 # bogus) DAG shape will be valid if we order the nodes in the
565 # bogus) DAG shape will be valid if we order the nodes in the
566 # order that they're introduced in dramatis personae by the
566 # order that they're introduced in dramatis personae by the
567 # changelog, so what we do is we sort the non-changelog histories
567 # changelog, so what we do is we sort the non-changelog histories
568 # by the order in which they are used by the changelog.
568 # by the order in which they are used by the changelog.
569 key = lambda n: cl.rev(lookup(n))
569 key = lambda n: cl.rev(lookup(n))
570 return [store.rev(n) for n in sorted(nodes, key=key)]
570 return [store.rev(n) for n in sorted(nodes, key=key)]
571
571
572 def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev,
572 def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev,
573 linknode, clrevtolocalrev, fullclnodes,
573 linknode, clrevtolocalrev, fullclnodes,
574 precomputedellipsis):
574 precomputedellipsis):
575 linkparents = precomputedellipsis[linkrev]
575 linkparents = precomputedellipsis[linkrev]
576 def local(clrev):
576 def local(clrev):
577 """Turn a changelog revnum into a local revnum.
577 """Turn a changelog revnum into a local revnum.
578
578
579 The ellipsis dag is stored as revnums on the changelog,
579 The ellipsis dag is stored as revnums on the changelog,
580 but when we're producing ellipsis entries for
580 but when we're producing ellipsis entries for
581 non-changelog revlogs, we need to turn those numbers into
581 non-changelog revlogs, we need to turn those numbers into
582 something local. This does that for us, and during the
582 something local. This does that for us, and during the
583 changelog sending phase will also expand the stored
583 changelog sending phase will also expand the stored
584 mappings as needed.
584 mappings as needed.
585 """
585 """
586 if clrev == nullrev:
586 if clrev == nullrev:
587 return nullrev
587 return nullrev
588
588
589 if ischangelog:
589 if ischangelog:
590 return clrev
590 return clrev
591
591
592 # Walk the ellipsis-ized changelog breadth-first looking for a
592 # Walk the ellipsis-ized changelog breadth-first looking for a
593 # change that has been linked from the current revlog.
593 # change that has been linked from the current revlog.
594 #
594 #
595 # For a flat manifest revlog only a single step should be necessary
595 # For a flat manifest revlog only a single step should be necessary
596 # as all relevant changelog entries are relevant to the flat
596 # as all relevant changelog entries are relevant to the flat
597 # manifest.
597 # manifest.
598 #
598 #
599 # For a filelog or tree manifest dirlog however not every changelog
599 # For a filelog or tree manifest dirlog however not every changelog
600 # entry will have been relevant, so we need to skip some changelog
600 # entry will have been relevant, so we need to skip some changelog
601 # nodes even after ellipsis-izing.
601 # nodes even after ellipsis-izing.
602 walk = [clrev]
602 walk = [clrev]
603 while walk:
603 while walk:
604 p = walk[0]
604 p = walk[0]
605 walk = walk[1:]
605 walk = walk[1:]
606 if p in clrevtolocalrev:
606 if p in clrevtolocalrev:
607 return clrevtolocalrev[p]
607 return clrevtolocalrev[p]
608 elif p in fullclnodes:
608 elif p in fullclnodes:
609 walk.extend([pp for pp in cl.parentrevs(p)
609 walk.extend([pp for pp in cl.parentrevs(p)
610 if pp != nullrev])
610 if pp != nullrev])
611 elif p in precomputedellipsis:
611 elif p in precomputedellipsis:
612 walk.extend([pp for pp in precomputedellipsis[p]
612 walk.extend([pp for pp in precomputedellipsis[p]
613 if pp != nullrev])
613 if pp != nullrev])
614 else:
614 else:
615 # In this case, we've got an ellipsis with parents
615 # In this case, we've got an ellipsis with parents
616 # outside the current bundle (likely an
616 # outside the current bundle (likely an
617 # incremental pull). We "know" that we can use the
617 # incremental pull). We "know" that we can use the
618 # value of this same revlog at whatever revision
618 # value of this same revlog at whatever revision
619 # is pointed to by linknode. "Know" is in scare
619 # is pointed to by linknode. "Know" is in scare
620 # quotes because I haven't done enough examination
620 # quotes because I haven't done enough examination
621 # of edge cases to convince myself this is really
621 # of edge cases to convince myself this is really
622 # a fact - it works for all the (admittedly
622 # a fact - it works for all the (admittedly
623 # thorough) cases in our testsuite, but I would be
623 # thorough) cases in our testsuite, but I would be
624 # somewhat unsurprised to find a case in the wild
624 # somewhat unsurprised to find a case in the wild
625 # where this breaks down a bit. That said, I don't
625 # where this breaks down a bit. That said, I don't
626 # know if it would hurt anything.
626 # know if it would hurt anything.
627 for i in pycompat.xrange(rev, 0, -1):
627 for i in pycompat.xrange(rev, 0, -1):
628 if store.linkrev(i) == clrev:
628 if store.linkrev(i) == clrev:
629 return i
629 return i
630 # We failed to resolve a parent for this node, so
630 # We failed to resolve a parent for this node, so
631 # we crash the changegroup construction.
631 # we crash the changegroup construction.
632 raise error.Abort(
632 raise error.Abort(
633 'unable to resolve parent while packing %r %r'
633 'unable to resolve parent while packing %r %r'
634 ' for changeset %r' % (store.indexfile, rev, clrev))
634 ' for changeset %r' % (store.indexfile, rev, clrev))
635
635
636 return nullrev
636 return nullrev
637
637
638 if not linkparents or (
638 if not linkparents or (
639 store.parentrevs(rev) == (nullrev, nullrev)):
639 store.parentrevs(rev) == (nullrev, nullrev)):
640 p1, p2 = nullrev, nullrev
640 p1, p2 = nullrev, nullrev
641 elif len(linkparents) == 1:
641 elif len(linkparents) == 1:
642 p1, = sorted(local(p) for p in linkparents)
642 p1, = sorted(local(p) for p in linkparents)
643 p2 = nullrev
643 p2 = nullrev
644 else:
644 else:
645 p1, p2 = sorted(local(p) for p in linkparents)
645 p1, p2 = sorted(local(p) for p in linkparents)
646
646
647 p1node, p2node = store.node(p1), store.node(p2)
647 p1node, p2node = store.node(p1), store.node(p2)
648
648
649 # TODO: try and actually send deltas for ellipsis data blocks
649 # TODO: try and actually send deltas for ellipsis data blocks
650 return revisiondeltarequest(
650 return revisiondeltarequest(
651 node=node,
651 node=node,
652 p1node=p1node,
652 p1node=p1node,
653 p2node=p2node,
653 p2node=p2node,
654 linknode=linknode,
654 linknode=linknode,
655 basenode=nullid,
655 basenode=nullid,
656 ellipsis=True,
656 ellipsis=True,
657 )
657 )
658
658
659 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
659 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
660 allowreorder,
660 allowreorder,
661 units=None,
661 units=None,
662 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
662 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
663 precomputedellipsis=None):
663 precomputedellipsis=None):
664 """Calculate deltas for a set of revisions.
664 """Calculate deltas for a set of revisions.
665
665
666 Is a generator of ``revisiondelta`` instances.
666 Is a generator of ``revisiondelta`` instances.
667
667
668 If units is not None, progress detail will be generated, units specifies
668 If units is not None, progress detail will be generated, units specifies
669 the type of revlog that is touched (changelog, manifest, etc.).
669 the type of revlog that is touched (changelog, manifest, etc.).
670 """
670 """
671 if not nodes:
671 if not nodes:
672 return
672 return
673
673
674 # We perform two passes over the revisions whose data we will emit.
674 # We perform two passes over the revisions whose data we will emit.
675 #
675 #
676 # In the first pass, we obtain information about the deltas that will
676 # In the first pass, we obtain information about the deltas that will
677 # be generated. This involves computing linknodes and adjusting the
677 # be generated. This involves computing linknodes and adjusting the
678 # request to take shallow fetching into account. The end result of
678 # request to take shallow fetching into account. The end result of
679 # this pass is a list of "request" objects stating which deltas
679 # this pass is a list of "request" objects stating which deltas
680 # to obtain.
680 # to obtain.
681 #
681 #
682 # The second pass is simply resolving the requested deltas.
682 # The second pass is simply resolving the requested deltas.
683
683
684 cl = repo.changelog
684 cl = repo.changelog
685
685
686 if ischangelog:
686 if ischangelog:
687 # Changelog doesn't benefit from reordering revisions. So send
687 # Changelog doesn't benefit from reordering revisions. So send
688 # out revisions in store order.
688 # out revisions in store order.
689 # TODO the API would be cleaner if this were controlled by the
689 # TODO the API would be cleaner if this were controlled by the
690 # store producing the deltas.
690 # store producing the deltas.
691 revs = sorted(cl.rev(n) for n in nodes)
691 revs = sorted(cl.rev(n) for n in nodes)
692 elif ellipses:
692 elif ellipses:
693 revs = _sortnodesellipsis(store, nodes, cl, lookup)
693 revs = _sortnodesellipsis(store, nodes, cl, lookup)
694 else:
694 else:
695 revs = _sortnodesnormal(store, nodes, allowreorder)
695 revs = _sortnodesnormal(store, nodes, allowreorder)
696
696
697 # In the first pass, collect info about the deltas we'll be
697 # In the first pass, collect info about the deltas we'll be
698 # generating.
698 # generating.
699 requests = []
699 requests = []
700
700
701 # Add the parent of the first rev.
701 # Add the parent of the first rev.
702 revs.insert(0, store.parentrevs(revs[0])[0])
702 revs.insert(0, store.parentrevs(revs[0])[0])
703
703
704 for i in pycompat.xrange(len(revs) - 1):
704 for i in pycompat.xrange(len(revs) - 1):
705 prev = revs[i]
705 prev = revs[i]
706 curr = revs[i + 1]
706 curr = revs[i + 1]
707
707
708 node = store.node(curr)
708 node = store.node(curr)
709 linknode = lookup(node)
709 linknode = lookup(node)
710 p1node, p2node = store.parents(node)
710 p1node, p2node = store.parents(node)
711
711
712 if ellipses:
712 if ellipses:
713 linkrev = cl.rev(linknode)
713 linkrev = cl.rev(linknode)
714 clrevtolocalrev[linkrev] = curr
714 clrevtolocalrev[linkrev] = curr
715
715
716 # This is a node to send in full, because the changeset it
716 # This is a node to send in full, because the changeset it
717 # corresponds to was a full changeset.
717 # corresponds to was a full changeset.
718 if linknode in fullclnodes:
718 if linknode in fullclnodes:
719 requests.append(revisiondeltarequest(
719 requests.append(revisiondeltarequest(
720 node=node,
720 node=node,
721 p1node=p1node,
721 p1node=p1node,
722 p2node=p2node,
722 p2node=p2node,
723 linknode=linknode,
723 linknode=linknode,
724 basenode=None,
724 basenode=None,
725 ))
725 ))
726
726
727 elif linkrev not in precomputedellipsis:
727 elif linkrev not in precomputedellipsis:
728 pass
728 pass
729 else:
729 else:
730 requests.append(_makenarrowdeltarequest(
730 requests.append(_makenarrowdeltarequest(
731 cl, store, ischangelog, curr, node, linkrev, linknode,
731 cl, store, ischangelog, curr, node, linkrev, linknode,
732 clrevtolocalrev, fullclnodes,
732 clrevtolocalrev, fullclnodes,
733 precomputedellipsis))
733 precomputedellipsis))
734 else:
734 else:
735 requests.append(revisiondeltarequest(
735 requests.append(revisiondeltarequest(
736 node=node,
736 node=node,
737 p1node=p1node,
737 p1node=p1node,
738 p2node=p2node,
738 p2node=p2node,
739 linknode=linknode,
739 linknode=linknode,
740 basenode=store.node(prev) if forcedeltaparentprev else None,
740 basenode=store.node(prev) if forcedeltaparentprev else None,
741 ))
741 ))
742
742
743 # We expect the first pass to be fast, so we only engage the progress
743 # We expect the first pass to be fast, so we only engage the progress
744 # meter for constructing the revision deltas.
744 # meter for constructing the revision deltas.
745 progress = None
745 progress = None
746 if units is not None:
746 if units is not None:
747 progress = repo.ui.makeprogress(_('bundling'), unit=units,
747 progress = repo.ui.makeprogress(_('bundling'), unit=units,
748 total=len(requests))
748 total=len(requests))
749
749
750 for i, delta in enumerate(store.emitrevisiondeltas(requests)):
750 for i, delta in enumerate(store.emitrevisiondeltas(requests)):
751 if progress:
751 if progress:
752 progress.update(i + 1)
752 progress.update(i + 1)
753
753
754 yield delta
754 yield delta
755
755
756 if progress:
756 if progress:
757 progress.complete()
757 progress.complete()
758
758
759 class cgpacker(object):
759 class cgpacker(object):
760 def __init__(self, repo, filematcher, version, allowreorder,
760 def __init__(self, repo, filematcher, version, allowreorder,
761 builddeltaheader, manifestsend,
761 builddeltaheader, manifestsend,
762 forcedeltaparentprev=False,
762 forcedeltaparentprev=False,
763 bundlecaps=None, ellipses=False,
763 bundlecaps=None, ellipses=False,
764 shallow=False, ellipsisroots=None, fullnodes=None):
764 shallow=False, ellipsisroots=None, fullnodes=None):
765 """Given a source repo, construct a bundler.
765 """Given a source repo, construct a bundler.
766
766
767 filematcher is a matcher that matches on files to include in the
767 filematcher is a matcher that matches on files to include in the
768 changegroup. Used to facilitate sparse changegroups.
768 changegroup. Used to facilitate sparse changegroups.
769
769
770 allowreorder controls whether reordering of revisions is allowed.
770 allowreorder controls whether reordering of revisions is allowed.
771 This value is used when ``bundle.reorder`` is ``auto`` or isn't
771 This value is used when ``bundle.reorder`` is ``auto`` or isn't
772 set.
772 set.
773
773
774 forcedeltaparentprev indicates whether delta parents must be against
774 forcedeltaparentprev indicates whether delta parents must be against
775 the previous revision in a delta group. This should only be used for
775 the previous revision in a delta group. This should only be used for
776 compatibility with changegroup version 1.
776 compatibility with changegroup version 1.
777
777
778 builddeltaheader is a callable that constructs the header for a group
778 builddeltaheader is a callable that constructs the header for a group
779 delta.
779 delta.
780
780
781 manifestsend is a chunk to send after manifests have been fully emitted.
781 manifestsend is a chunk to send after manifests have been fully emitted.
782
782
783 ellipses indicates whether ellipsis serving mode is enabled.
783 ellipses indicates whether ellipsis serving mode is enabled.
784
784
785 bundlecaps is optional and can be used to specify the set of
785 bundlecaps is optional and can be used to specify the set of
786 capabilities which can be used to build the bundle. While bundlecaps is
786 capabilities which can be used to build the bundle. While bundlecaps is
787 unused in core Mercurial, extensions rely on this feature to communicate
787 unused in core Mercurial, extensions rely on this feature to communicate
788 capabilities to customize the changegroup packer.
788 capabilities to customize the changegroup packer.
789
789
790 shallow indicates whether shallow data might be sent. The packer may
790 shallow indicates whether shallow data might be sent. The packer may
791 need to pack file contents not introduced by the changes being packed.
791 need to pack file contents not introduced by the changes being packed.
792
792
793 fullnodes is the set of changelog nodes which should not be ellipsis
793 fullnodes is the set of changelog nodes which should not be ellipsis
794 nodes. We store this rather than the set of nodes that should be
794 nodes. We store this rather than the set of nodes that should be
795 ellipsis because for very large histories we expect this to be
795 ellipsis because for very large histories we expect this to be
796 significantly smaller.
796 significantly smaller.
797 """
797 """
798 assert filematcher
798 assert filematcher
799 self._filematcher = filematcher
799 self._filematcher = filematcher
800
800
801 self.version = version
801 self.version = version
802 self._forcedeltaparentprev = forcedeltaparentprev
802 self._forcedeltaparentprev = forcedeltaparentprev
803 self._builddeltaheader = builddeltaheader
803 self._builddeltaheader = builddeltaheader
804 self._manifestsend = manifestsend
804 self._manifestsend = manifestsend
805 self._ellipses = ellipses
805 self._ellipses = ellipses
806
806
807 # Set of capabilities we can use to build the bundle.
807 # Set of capabilities we can use to build the bundle.
808 if bundlecaps is None:
808 if bundlecaps is None:
809 bundlecaps = set()
809 bundlecaps = set()
810 self._bundlecaps = bundlecaps
810 self._bundlecaps = bundlecaps
811 self._isshallow = shallow
811 self._isshallow = shallow
812 self._fullclnodes = fullnodes
812 self._fullclnodes = fullnodes
813
813
814 # Maps ellipsis revs to their roots at the changelog level.
814 # Maps ellipsis revs to their roots at the changelog level.
815 self._precomputedellipsis = ellipsisroots
815 self._precomputedellipsis = ellipsisroots
816
816
817 # experimental config: bundle.reorder
817 # experimental config: bundle.reorder
818 reorder = repo.ui.config('bundle', 'reorder')
818 reorder = repo.ui.config('bundle', 'reorder')
819 if reorder == 'auto':
819 if reorder == 'auto':
820 self._reorder = allowreorder
820 self._reorder = allowreorder
821 else:
821 else:
822 self._reorder = stringutil.parsebool(reorder)
822 self._reorder = stringutil.parsebool(reorder)
823
823
824 self._repo = repo
824 self._repo = repo
825
825
826 if self._repo.ui.verbose and not self._repo.ui.debugflag:
826 if self._repo.ui.verbose and not self._repo.ui.debugflag:
827 self._verbosenote = self._repo.ui.note
827 self._verbosenote = self._repo.ui.note
828 else:
828 else:
829 self._verbosenote = lambda s: None
829 self._verbosenote = lambda s: None
830
830
831 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
831 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
832 """Yield a sequence of changegroup byte chunks."""
832 """Yield a sequence of changegroup byte chunks."""
833
833
834 repo = self._repo
834 repo = self._repo
835 cl = repo.changelog
835 cl = repo.changelog
836
836
837 self._verbosenote(_('uncompressed size of bundle content:\n'))
837 self._verbosenote(_('uncompressed size of bundle content:\n'))
838 size = 0
838 size = 0
839
839
840 clstate, deltas = self._generatechangelog(cl, clnodes)
840 clstate, deltas = self._generatechangelog(cl, clnodes)
841 for delta in deltas:
841 for delta in deltas:
842 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
842 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
843 size += len(chunk)
843 size += len(chunk)
844 yield chunk
844 yield chunk
845
845
846 close = closechunk()
846 close = closechunk()
847 size += len(close)
847 size += len(close)
848 yield closechunk()
848 yield closechunk()
849
849
850 self._verbosenote(_('%8.i (changelog)\n') % size)
850 self._verbosenote(_('%8.i (changelog)\n') % size)
851
851
852 clrevorder = clstate['clrevorder']
852 clrevorder = clstate['clrevorder']
853 mfs = clstate['mfs']
853 mfs = clstate['mfs']
854 changedfiles = clstate['changedfiles']
854 changedfiles = clstate['changedfiles']
855
855
856 # We need to make sure that the linkrev in the changegroup refers to
856 # We need to make sure that the linkrev in the changegroup refers to
857 # the first changeset that introduced the manifest or file revision.
857 # the first changeset that introduced the manifest or file revision.
858 # The fastpath is usually safer than the slowpath, because the filelogs
858 # The fastpath is usually safer than the slowpath, because the filelogs
859 # are walked in revlog order.
859 # are walked in revlog order.
860 #
860 #
861 # When taking the slowpath with reorder=None and the manifest revlog
861 # When taking the slowpath with reorder=None and the manifest revlog
862 # uses generaldelta, the manifest may be walked in the "wrong" order.
862 # uses generaldelta, the manifest may be walked in the "wrong" order.
863 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
863 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
864 # cc0ff93d0c0c).
864 # cc0ff93d0c0c).
865 #
865 #
866 # When taking the fastpath, we are only vulnerable to reordering
866 # When taking the fastpath, we are only vulnerable to reordering
867 # of the changelog itself. The changelog never uses generaldelta, so
867 # of the changelog itself. The changelog never uses generaldelta, so
868 # it is only reordered when reorder=True. To handle this case, we
868 # it is only reordered when reorder=True. To handle this case, we
869 # simply take the slowpath, which already has the 'clrevorder' logic.
869 # simply take the slowpath, which already has the 'clrevorder' logic.
870 # This was also fixed in cc0ff93d0c0c.
870 # This was also fixed in cc0ff93d0c0c.
871 fastpathlinkrev = fastpathlinkrev and not self._reorder
871 fastpathlinkrev = fastpathlinkrev and not self._reorder
872 # Treemanifests don't work correctly with fastpathlinkrev
872 # Treemanifests don't work correctly with fastpathlinkrev
873 # either, because we don't discover which directory nodes to
873 # either, because we don't discover which directory nodes to
874 # send along with files. This could probably be fixed.
874 # send along with files. This could probably be fixed.
875 fastpathlinkrev = fastpathlinkrev and (
875 fastpathlinkrev = fastpathlinkrev and (
876 'treemanifest' not in repo.requirements)
876 'treemanifest' not in repo.requirements)
877
877
878 fnodes = {} # needed file nodes
878 fnodes = {} # needed file nodes
879
879
880 size = 0
880 size = 0
881 it = self.generatemanifests(
881 it = self.generatemanifests(
882 commonrevs, clrevorder, fastpathlinkrev, mfs, fnodes, source,
882 commonrevs, clrevorder, fastpathlinkrev, mfs, fnodes, source,
883 clstate['clrevtomanifestrev'])
883 clstate['clrevtomanifestrev'])
884
884
885 for dir, deltas in it:
885 for tree, deltas in it:
886 if dir:
886 if tree:
887 assert self.version == b'03'
887 assert self.version == b'03'
888 chunk = _fileheader(dir)
888 chunk = _fileheader(tree)
889 size += len(chunk)
889 size += len(chunk)
890 yield chunk
890 yield chunk
891
891
892 for delta in deltas:
892 for delta in deltas:
893 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
893 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
894 for chunk in chunks:
894 for chunk in chunks:
895 size += len(chunk)
895 size += len(chunk)
896 yield chunk
896 yield chunk
897
897
898 close = closechunk()
898 close = closechunk()
899 size += len(close)
899 size += len(close)
900 yield close
900 yield close
901
901
902 self._verbosenote(_('%8.i (manifests)\n') % size)
902 self._verbosenote(_('%8.i (manifests)\n') % size)
903 yield self._manifestsend
903 yield self._manifestsend
904
904
905 mfdicts = None
905 mfdicts = None
906 if self._ellipses and self._isshallow:
906 if self._ellipses and self._isshallow:
907 mfdicts = [(self._repo.manifestlog[n].read(), lr)
907 mfdicts = [(self._repo.manifestlog[n].read(), lr)
908 for (n, lr) in mfs.iteritems()]
908 for (n, lr) in mfs.iteritems()]
909
909
910 mfs.clear()
910 mfs.clear()
911 clrevs = set(cl.rev(x) for x in clnodes)
911 clrevs = set(cl.rev(x) for x in clnodes)
912
912
913 it = self.generatefiles(changedfiles, commonrevs,
913 it = self.generatefiles(changedfiles, commonrevs,
914 source, mfdicts, fastpathlinkrev,
914 source, mfdicts, fastpathlinkrev,
915 fnodes, clrevs)
915 fnodes, clrevs)
916
916
917 for path, deltas in it:
917 for path, deltas in it:
918 h = _fileheader(path)
918 h = _fileheader(path)
919 size = len(h)
919 size = len(h)
920 yield h
920 yield h
921
921
922 for delta in deltas:
922 for delta in deltas:
923 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
923 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
924 for chunk in chunks:
924 for chunk in chunks:
925 size += len(chunk)
925 size += len(chunk)
926 yield chunk
926 yield chunk
927
927
928 close = closechunk()
928 close = closechunk()
929 size += len(close)
929 size += len(close)
930 yield close
930 yield close
931
931
932 self._verbosenote(_('%8.i %s\n') % (size, path))
932 self._verbosenote(_('%8.i %s\n') % (size, path))
933
933
934 yield closechunk()
934 yield closechunk()
935
935
936 if clnodes:
936 if clnodes:
937 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
937 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
938
938
939 def _generatechangelog(self, cl, nodes):
939 def _generatechangelog(self, cl, nodes):
940 """Generate data for changelog chunks.
940 """Generate data for changelog chunks.
941
941
942 Returns a 2-tuple of a dict containing state and an iterable of
942 Returns a 2-tuple of a dict containing state and an iterable of
943 byte chunks. The state will not be fully populated until the
943 byte chunks. The state will not be fully populated until the
944 chunk stream has been fully consumed.
944 chunk stream has been fully consumed.
945 """
945 """
946 clrevorder = {}
946 clrevorder = {}
947 mfs = {} # needed manifests
947 mfs = {} # needed manifests
948 mfl = self._repo.manifestlog
948 mfl = self._repo.manifestlog
949 # TODO violates storage abstraction.
949 # TODO violates storage abstraction.
950 mfrevlog = mfl._revlog
950 mfrevlog = mfl._revlog
951 changedfiles = set()
951 changedfiles = set()
952 clrevtomanifestrev = {}
952 clrevtomanifestrev = {}
953
953
954 # Callback for the changelog, used to collect changed files and
954 # Callback for the changelog, used to collect changed files and
955 # manifest nodes.
955 # manifest nodes.
956 # Returns the linkrev node (identity in the changelog case).
956 # Returns the linkrev node (identity in the changelog case).
957 def lookupcl(x):
957 def lookupcl(x):
958 c = cl.read(x)
958 c = cl.read(x)
959 clrevorder[x] = len(clrevorder)
959 clrevorder[x] = len(clrevorder)
960
960
961 if self._ellipses:
961 if self._ellipses:
962 # Only update mfs if x is going to be sent. Otherwise we
962 # Only update mfs if x is going to be sent. Otherwise we
963 # end up with bogus linkrevs specified for manifests and
963 # end up with bogus linkrevs specified for manifests and
964 # we skip some manifest nodes that we should otherwise
964 # we skip some manifest nodes that we should otherwise
965 # have sent.
965 # have sent.
966 if (x in self._fullclnodes
966 if (x in self._fullclnodes
967 or cl.rev(x) in self._precomputedellipsis):
967 or cl.rev(x) in self._precomputedellipsis):
968 n = c[0]
968 n = c[0]
969 # Record the first changeset introducing this manifest
969 # Record the first changeset introducing this manifest
970 # version.
970 # version.
971 mfs.setdefault(n, x)
971 mfs.setdefault(n, x)
972 # Set this narrow-specific dict so we have the lowest
972 # Set this narrow-specific dict so we have the lowest
973 # manifest revnum to look up for this cl revnum. (Part of
973 # manifest revnum to look up for this cl revnum. (Part of
974 # mapping changelog ellipsis parents to manifest ellipsis
974 # mapping changelog ellipsis parents to manifest ellipsis
975 # parents)
975 # parents)
976 clrevtomanifestrev.setdefault(cl.rev(x), mfrevlog.rev(n))
976 clrevtomanifestrev.setdefault(cl.rev(x), mfrevlog.rev(n))
977 # We can't trust the changed files list in the changeset if the
977 # We can't trust the changed files list in the changeset if the
978 # client requested a shallow clone.
978 # client requested a shallow clone.
979 if self._isshallow:
979 if self._isshallow:
980 changedfiles.update(mfl[c[0]].read().keys())
980 changedfiles.update(mfl[c[0]].read().keys())
981 else:
981 else:
982 changedfiles.update(c[3])
982 changedfiles.update(c[3])
983 else:
983 else:
984
984
985 n = c[0]
985 n = c[0]
986 # record the first changeset introducing this manifest version
986 # record the first changeset introducing this manifest version
987 mfs.setdefault(n, x)
987 mfs.setdefault(n, x)
988 # Record a complete list of potentially-changed files in
988 # Record a complete list of potentially-changed files in
989 # this manifest.
989 # this manifest.
990 changedfiles.update(c[3])
990 changedfiles.update(c[3])
991
991
992 return x
992 return x
993
993
994 state = {
994 state = {
995 'clrevorder': clrevorder,
995 'clrevorder': clrevorder,
996 'mfs': mfs,
996 'mfs': mfs,
997 'changedfiles': changedfiles,
997 'changedfiles': changedfiles,
998 'clrevtomanifestrev': clrevtomanifestrev,
998 'clrevtomanifestrev': clrevtomanifestrev,
999 }
999 }
1000
1000
1001 gen = deltagroup(
1001 gen = deltagroup(
1002 self._repo, cl, nodes, True, lookupcl,
1002 self._repo, cl, nodes, True, lookupcl,
1003 self._forcedeltaparentprev,
1003 self._forcedeltaparentprev,
1004 # Reorder settings are currently ignored for changelog.
1004 # Reorder settings are currently ignored for changelog.
1005 True,
1005 True,
1006 ellipses=self._ellipses,
1006 ellipses=self._ellipses,
1007 units=_('changesets'),
1007 units=_('changesets'),
1008 clrevtolocalrev={},
1008 clrevtolocalrev={},
1009 fullclnodes=self._fullclnodes,
1009 fullclnodes=self._fullclnodes,
1010 precomputedellipsis=self._precomputedellipsis)
1010 precomputedellipsis=self._precomputedellipsis)
1011
1011
1012 return state, gen
1012 return state, gen
1013
1013
1014 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
1014 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
1015 fnodes, source, clrevtolocalrev):
1015 fnodes, source, clrevtolocalrev):
1016 """Returns an iterator of changegroup chunks containing manifests.
1016 """Returns an iterator of changegroup chunks containing manifests.
1017
1017
1018 `source` is unused here, but is used by extensions like remotefilelog to
1018 `source` is unused here, but is used by extensions like remotefilelog to
1019 change what is sent based in pulls vs pushes, etc.
1019 change what is sent based in pulls vs pushes, etc.
1020 """
1020 """
1021 repo = self._repo
1021 repo = self._repo
1022 mfl = repo.manifestlog
1022 mfl = repo.manifestlog
1023 dirlog = mfl._revlog.dirlog
1023 dirlog = mfl._revlog.dirlog
1024 tmfnodes = {'': mfs}
1024 tmfnodes = {'': mfs}
1025
1025
1026 # Callback for the manifest, used to collect linkrevs for filelog
1026 # Callback for the manifest, used to collect linkrevs for filelog
1027 # revisions.
1027 # revisions.
1028 # Returns the linkrev node (collected in lookupcl).
1028 # Returns the linkrev node (collected in lookupcl).
1029 def makelookupmflinknode(dir, nodes):
1029 def makelookupmflinknode(tree, nodes):
1030 if fastpathlinkrev:
1030 if fastpathlinkrev:
1031 assert not dir
1031 assert not tree
1032 return mfs.__getitem__
1032 return mfs.__getitem__
1033
1033
1034 def lookupmflinknode(x):
1034 def lookupmflinknode(x):
1035 """Callback for looking up the linknode for manifests.
1035 """Callback for looking up the linknode for manifests.
1036
1036
1037 Returns the linkrev node for the specified manifest.
1037 Returns the linkrev node for the specified manifest.
1038
1038
1039 SIDE EFFECT:
1039 SIDE EFFECT:
1040
1040
1041 1) fclnodes gets populated with the list of relevant
1041 1) fclnodes gets populated with the list of relevant
1042 file nodes if we're not using fastpathlinkrev
1042 file nodes if we're not using fastpathlinkrev
1043 2) When treemanifests are in use, collects treemanifest nodes
1043 2) When treemanifests are in use, collects treemanifest nodes
1044 to send
1044 to send
1045
1045
1046 Note that this means manifests must be completely sent to
1046 Note that this means manifests must be completely sent to
1047 the client before you can trust the list of files and
1047 the client before you can trust the list of files and
1048 treemanifests to send.
1048 treemanifests to send.
1049 """
1049 """
1050 clnode = nodes[x]
1050 clnode = nodes[x]
1051 mdata = mfl.get(dir, x).readfast(shallow=True)
1051 mdata = mfl.get(tree, x).readfast(shallow=True)
1052 for p, n, fl in mdata.iterentries():
1052 for p, n, fl in mdata.iterentries():
1053 if fl == 't': # subdirectory manifest
1053 if fl == 't': # subdirectory manifest
1054 subdir = dir + p + '/'
1054 subtree = tree + p + '/'
1055 tmfclnodes = tmfnodes.setdefault(subdir, {})
1055 tmfclnodes = tmfnodes.setdefault(subtree, {})
1056 tmfclnode = tmfclnodes.setdefault(n, clnode)
1056 tmfclnode = tmfclnodes.setdefault(n, clnode)
1057 if clrevorder[clnode] < clrevorder[tmfclnode]:
1057 if clrevorder[clnode] < clrevorder[tmfclnode]:
1058 tmfclnodes[n] = clnode
1058 tmfclnodes[n] = clnode
1059 else:
1059 else:
1060 f = dir + p
1060 f = tree + p
1061 fclnodes = fnodes.setdefault(f, {})
1061 fclnodes = fnodes.setdefault(f, {})
1062 fclnode = fclnodes.setdefault(n, clnode)
1062 fclnode = fclnodes.setdefault(n, clnode)
1063 if clrevorder[clnode] < clrevorder[fclnode]:
1063 if clrevorder[clnode] < clrevorder[fclnode]:
1064 fclnodes[n] = clnode
1064 fclnodes[n] = clnode
1065 return clnode
1065 return clnode
1066 return lookupmflinknode
1066 return lookupmflinknode
1067
1067
1068 while tmfnodes:
1068 while tmfnodes:
1069 dir, nodes = tmfnodes.popitem()
1069 tree, nodes = tmfnodes.popitem()
1070 store = dirlog(dir)
1070 store = dirlog(tree)
1071
1071
1072 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
1072 if not self._filematcher.visitdir(store._dir[:-1] or '.'):
1073 prunednodes = []
1073 prunednodes = []
1074 else:
1074 else:
1075 frev, flr = store.rev, store.linkrev
1075 frev, flr = store.rev, store.linkrev
1076 prunednodes = [n for n in nodes
1076 prunednodes = [n for n in nodes
1077 if flr(frev(n)) not in commonrevs]
1077 if flr(frev(n)) not in commonrevs]
1078
1078
1079 if dir and not prunednodes:
1079 if tree and not prunednodes:
1080 continue
1080 continue
1081
1081
1082 lookupfn = makelookupmflinknode(dir, nodes)
1082 lookupfn = makelookupmflinknode(tree, nodes)
1083
1083
1084 deltas = deltagroup(
1084 deltas = deltagroup(
1085 self._repo, store, prunednodes, False, lookupfn,
1085 self._repo, store, prunednodes, False, lookupfn,
1086 self._forcedeltaparentprev, self._reorder,
1086 self._forcedeltaparentprev, self._reorder,
1087 ellipses=self._ellipses,
1087 ellipses=self._ellipses,
1088 units=_('manifests'),
1088 units=_('manifests'),
1089 clrevtolocalrev=clrevtolocalrev,
1089 clrevtolocalrev=clrevtolocalrev,
1090 fullclnodes=self._fullclnodes,
1090 fullclnodes=self._fullclnodes,
1091 precomputedellipsis=self._precomputedellipsis)
1091 precomputedellipsis=self._precomputedellipsis)
1092
1092
1093 yield dir, deltas
1093 yield tree, deltas
1094
1094
1095 # The 'source' parameter is useful for extensions
1095 # The 'source' parameter is useful for extensions
1096 def generatefiles(self, changedfiles, commonrevs, source,
1096 def generatefiles(self, changedfiles, commonrevs, source,
1097 mfdicts, fastpathlinkrev, fnodes, clrevs):
1097 mfdicts, fastpathlinkrev, fnodes, clrevs):
1098 changedfiles = list(filter(self._filematcher, changedfiles))
1098 changedfiles = list(filter(self._filematcher, changedfiles))
1099
1099
1100 if not fastpathlinkrev:
1100 if not fastpathlinkrev:
1101 def normallinknodes(unused, fname):
1101 def normallinknodes(unused, fname):
1102 return fnodes.get(fname, {})
1102 return fnodes.get(fname, {})
1103 else:
1103 else:
1104 cln = self._repo.changelog.node
1104 cln = self._repo.changelog.node
1105
1105
1106 def normallinknodes(store, fname):
1106 def normallinknodes(store, fname):
1107 flinkrev = store.linkrev
1107 flinkrev = store.linkrev
1108 fnode = store.node
1108 fnode = store.node
1109 revs = ((r, flinkrev(r)) for r in store)
1109 revs = ((r, flinkrev(r)) for r in store)
1110 return dict((fnode(r), cln(lr))
1110 return dict((fnode(r), cln(lr))
1111 for r, lr in revs if lr in clrevs)
1111 for r, lr in revs if lr in clrevs)
1112
1112
1113 clrevtolocalrev = {}
1113 clrevtolocalrev = {}
1114
1114
1115 if self._isshallow:
1115 if self._isshallow:
1116 # In a shallow clone, the linknodes callback needs to also include
1116 # In a shallow clone, the linknodes callback needs to also include
1117 # those file nodes that are in the manifests we sent but weren't
1117 # those file nodes that are in the manifests we sent but weren't
1118 # introduced by those manifests.
1118 # introduced by those manifests.
1119 commonctxs = [self._repo[c] for c in commonrevs]
1119 commonctxs = [self._repo[c] for c in commonrevs]
1120 clrev = self._repo.changelog.rev
1120 clrev = self._repo.changelog.rev
1121
1121
1122 # Defining this function has a side-effect of overriding the
1122 # Defining this function has a side-effect of overriding the
1123 # function of the same name that was passed in as an argument.
1123 # function of the same name that was passed in as an argument.
1124 # TODO have caller pass in appropriate function.
1124 # TODO have caller pass in appropriate function.
1125 def linknodes(flog, fname):
1125 def linknodes(flog, fname):
1126 for c in commonctxs:
1126 for c in commonctxs:
1127 try:
1127 try:
1128 fnode = c.filenode(fname)
1128 fnode = c.filenode(fname)
1129 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1129 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1130 except error.ManifestLookupError:
1130 except error.ManifestLookupError:
1131 pass
1131 pass
1132 links = normallinknodes(flog, fname)
1132 links = normallinknodes(flog, fname)
1133 if len(links) != len(mfdicts):
1133 if len(links) != len(mfdicts):
1134 for mf, lr in mfdicts:
1134 for mf, lr in mfdicts:
1135 fnode = mf.get(fname, None)
1135 fnode = mf.get(fname, None)
1136 if fnode in links:
1136 if fnode in links:
1137 links[fnode] = min(links[fnode], lr, key=clrev)
1137 links[fnode] = min(links[fnode], lr, key=clrev)
1138 elif fnode:
1138 elif fnode:
1139 links[fnode] = lr
1139 links[fnode] = lr
1140 return links
1140 return links
1141 else:
1141 else:
1142 linknodes = normallinknodes
1142 linknodes = normallinknodes
1143
1143
1144 repo = self._repo
1144 repo = self._repo
1145 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
1145 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
1146 total=len(changedfiles))
1146 total=len(changedfiles))
1147 for i, fname in enumerate(sorted(changedfiles)):
1147 for i, fname in enumerate(sorted(changedfiles)):
1148 filerevlog = repo.file(fname)
1148 filerevlog = repo.file(fname)
1149 if not filerevlog:
1149 if not filerevlog:
1150 raise error.Abort(_("empty or missing file data for %s") %
1150 raise error.Abort(_("empty or missing file data for %s") %
1151 fname)
1151 fname)
1152
1152
1153 clrevtolocalrev.clear()
1153 clrevtolocalrev.clear()
1154
1154
1155 linkrevnodes = linknodes(filerevlog, fname)
1155 linkrevnodes = linknodes(filerevlog, fname)
1156 # Lookup for filenodes, we collected the linkrev nodes above in the
1156 # Lookup for filenodes, we collected the linkrev nodes above in the
1157 # fastpath case and with lookupmf in the slowpath case.
1157 # fastpath case and with lookupmf in the slowpath case.
1158 def lookupfilelog(x):
1158 def lookupfilelog(x):
1159 return linkrevnodes[x]
1159 return linkrevnodes[x]
1160
1160
1161 frev, flr = filerevlog.rev, filerevlog.linkrev
1161 frev, flr = filerevlog.rev, filerevlog.linkrev
1162 filenodes = [n for n in linkrevnodes
1162 filenodes = [n for n in linkrevnodes
1163 if flr(frev(n)) not in commonrevs]
1163 if flr(frev(n)) not in commonrevs]
1164
1164
1165 if not filenodes:
1165 if not filenodes:
1166 continue
1166 continue
1167
1167
1168 progress.update(i + 1, item=fname)
1168 progress.update(i + 1, item=fname)
1169
1169
1170 deltas = deltagroup(
1170 deltas = deltagroup(
1171 self._repo, filerevlog, filenodes, False, lookupfilelog,
1171 self._repo, filerevlog, filenodes, False, lookupfilelog,
1172 self._forcedeltaparentprev, self._reorder,
1172 self._forcedeltaparentprev, self._reorder,
1173 ellipses=self._ellipses,
1173 ellipses=self._ellipses,
1174 clrevtolocalrev=clrevtolocalrev,
1174 clrevtolocalrev=clrevtolocalrev,
1175 fullclnodes=self._fullclnodes,
1175 fullclnodes=self._fullclnodes,
1176 precomputedellipsis=self._precomputedellipsis)
1176 precomputedellipsis=self._precomputedellipsis)
1177
1177
1178 yield fname, deltas
1178 yield fname, deltas
1179
1179
1180 progress.complete()
1180 progress.complete()
1181
1181
1182 def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
1182 def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
1183 shallow=False, ellipsisroots=None, fullnodes=None):
1183 shallow=False, ellipsisroots=None, fullnodes=None):
1184 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1184 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1185 d.node, d.p1node, d.p2node, d.linknode)
1185 d.node, d.p1node, d.p2node, d.linknode)
1186
1186
1187 return cgpacker(repo, filematcher, b'01',
1187 return cgpacker(repo, filematcher, b'01',
1188 allowreorder=None,
1188 allowreorder=None,
1189 builddeltaheader=builddeltaheader,
1189 builddeltaheader=builddeltaheader,
1190 manifestsend=b'',
1190 manifestsend=b'',
1191 forcedeltaparentprev=True,
1191 forcedeltaparentprev=True,
1192 bundlecaps=bundlecaps,
1192 bundlecaps=bundlecaps,
1193 ellipses=ellipses,
1193 ellipses=ellipses,
1194 shallow=shallow,
1194 shallow=shallow,
1195 ellipsisroots=ellipsisroots,
1195 ellipsisroots=ellipsisroots,
1196 fullnodes=fullnodes)
1196 fullnodes=fullnodes)
1197
1197
1198 def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
1198 def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
1199 shallow=False, ellipsisroots=None, fullnodes=None):
1199 shallow=False, ellipsisroots=None, fullnodes=None):
1200 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1200 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1201 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1201 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1202
1202
1203 # Since generaldelta is directly supported by cg2, reordering
1203 # Since generaldelta is directly supported by cg2, reordering
1204 # generally doesn't help, so we disable it by default (treating
1204 # generally doesn't help, so we disable it by default (treating
1205 # bundle.reorder=auto just like bundle.reorder=False).
1205 # bundle.reorder=auto just like bundle.reorder=False).
1206 return cgpacker(repo, filematcher, b'02',
1206 return cgpacker(repo, filematcher, b'02',
1207 allowreorder=False,
1207 allowreorder=False,
1208 builddeltaheader=builddeltaheader,
1208 builddeltaheader=builddeltaheader,
1209 manifestsend=b'',
1209 manifestsend=b'',
1210 bundlecaps=bundlecaps,
1210 bundlecaps=bundlecaps,
1211 ellipses=ellipses,
1211 ellipses=ellipses,
1212 shallow=shallow,
1212 shallow=shallow,
1213 ellipsisroots=ellipsisroots,
1213 ellipsisroots=ellipsisroots,
1214 fullnodes=fullnodes)
1214 fullnodes=fullnodes)
1215
1215
1216 def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
1216 def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
1217 shallow=False, ellipsisroots=None, fullnodes=None):
1217 shallow=False, ellipsisroots=None, fullnodes=None):
1218 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1218 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1219 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1219 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1220
1220
1221 return cgpacker(repo, filematcher, b'03',
1221 return cgpacker(repo, filematcher, b'03',
1222 allowreorder=False,
1222 allowreorder=False,
1223 builddeltaheader=builddeltaheader,
1223 builddeltaheader=builddeltaheader,
1224 manifestsend=closechunk(),
1224 manifestsend=closechunk(),
1225 bundlecaps=bundlecaps,
1225 bundlecaps=bundlecaps,
1226 ellipses=ellipses,
1226 ellipses=ellipses,
1227 shallow=shallow,
1227 shallow=shallow,
1228 ellipsisroots=ellipsisroots,
1228 ellipsisroots=ellipsisroots,
1229 fullnodes=fullnodes)
1229 fullnodes=fullnodes)
1230
1230
1231 _packermap = {'01': (_makecg1packer, cg1unpacker),
1231 _packermap = {'01': (_makecg1packer, cg1unpacker),
1232 # cg2 adds support for exchanging generaldelta
1232 # cg2 adds support for exchanging generaldelta
1233 '02': (_makecg2packer, cg2unpacker),
1233 '02': (_makecg2packer, cg2unpacker),
1234 # cg3 adds support for exchanging revlog flags and treemanifests
1234 # cg3 adds support for exchanging revlog flags and treemanifests
1235 '03': (_makecg3packer, cg3unpacker),
1235 '03': (_makecg3packer, cg3unpacker),
1236 }
1236 }
1237
1237
1238 def allsupportedversions(repo):
1238 def allsupportedversions(repo):
1239 versions = set(_packermap.keys())
1239 versions = set(_packermap.keys())
1240 if not (repo.ui.configbool('experimental', 'changegroup3') or
1240 if not (repo.ui.configbool('experimental', 'changegroup3') or
1241 repo.ui.configbool('experimental', 'treemanifest') or
1241 repo.ui.configbool('experimental', 'treemanifest') or
1242 'treemanifest' in repo.requirements):
1242 'treemanifest' in repo.requirements):
1243 versions.discard('03')
1243 versions.discard('03')
1244 return versions
1244 return versions
1245
1245
1246 # Changegroup versions that can be applied to the repo
1246 # Changegroup versions that can be applied to the repo
1247 def supportedincomingversions(repo):
1247 def supportedincomingversions(repo):
1248 return allsupportedversions(repo)
1248 return allsupportedversions(repo)
1249
1249
1250 # Changegroup versions that can be created from the repo
1250 # Changegroup versions that can be created from the repo
1251 def supportedoutgoingversions(repo):
1251 def supportedoutgoingversions(repo):
1252 versions = allsupportedversions(repo)
1252 versions = allsupportedversions(repo)
1253 if 'treemanifest' in repo.requirements:
1253 if 'treemanifest' in repo.requirements:
1254 # Versions 01 and 02 support only flat manifests and it's just too
1254 # Versions 01 and 02 support only flat manifests and it's just too
1255 # expensive to convert between the flat manifest and tree manifest on
1255 # expensive to convert between the flat manifest and tree manifest on
1256 # the fly. Since tree manifests are hashed differently, all of history
1256 # the fly. Since tree manifests are hashed differently, all of history
1257 # would have to be converted. Instead, we simply don't even pretend to
1257 # would have to be converted. Instead, we simply don't even pretend to
1258 # support versions 01 and 02.
1258 # support versions 01 and 02.
1259 versions.discard('01')
1259 versions.discard('01')
1260 versions.discard('02')
1260 versions.discard('02')
1261 if repository.NARROW_REQUIREMENT in repo.requirements:
1261 if repository.NARROW_REQUIREMENT in repo.requirements:
1262 # Versions 01 and 02 don't support revlog flags, and we need to
1262 # Versions 01 and 02 don't support revlog flags, and we need to
1263 # support that for stripping and unbundling to work.
1263 # support that for stripping and unbundling to work.
1264 versions.discard('01')
1264 versions.discard('01')
1265 versions.discard('02')
1265 versions.discard('02')
1266 if LFS_REQUIREMENT in repo.requirements:
1266 if LFS_REQUIREMENT in repo.requirements:
1267 # Versions 01 and 02 don't support revlog flags, and we need to
1267 # Versions 01 and 02 don't support revlog flags, and we need to
1268 # mark LFS entries with REVIDX_EXTSTORED.
1268 # mark LFS entries with REVIDX_EXTSTORED.
1269 versions.discard('01')
1269 versions.discard('01')
1270 versions.discard('02')
1270 versions.discard('02')
1271
1271
1272 return versions
1272 return versions
1273
1273
1274 def localversion(repo):
1274 def localversion(repo):
1275 # Finds the best version to use for bundles that are meant to be used
1275 # Finds the best version to use for bundles that are meant to be used
1276 # locally, such as those from strip and shelve, and temporary bundles.
1276 # locally, such as those from strip and shelve, and temporary bundles.
1277 return max(supportedoutgoingversions(repo))
1277 return max(supportedoutgoingversions(repo))
1278
1278
1279 def safeversion(repo):
1279 def safeversion(repo):
1280 # Finds the smallest version that it's safe to assume clients of the repo
1280 # Finds the smallest version that it's safe to assume clients of the repo
1281 # will support. For example, all hg versions that support generaldelta also
1281 # will support. For example, all hg versions that support generaldelta also
1282 # support changegroup 02.
1282 # support changegroup 02.
1283 versions = supportedoutgoingversions(repo)
1283 versions = supportedoutgoingversions(repo)
1284 if 'generaldelta' in repo.requirements:
1284 if 'generaldelta' in repo.requirements:
1285 versions.discard('01')
1285 versions.discard('01')
1286 assert versions
1286 assert versions
1287 return min(versions)
1287 return min(versions)
1288
1288
1289 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1289 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1290 ellipses=False, shallow=False, ellipsisroots=None,
1290 ellipses=False, shallow=False, ellipsisroots=None,
1291 fullnodes=None):
1291 fullnodes=None):
1292 assert version in supportedoutgoingversions(repo)
1292 assert version in supportedoutgoingversions(repo)
1293
1293
1294 if filematcher is None:
1294 if filematcher is None:
1295 filematcher = matchmod.alwaysmatcher(repo.root, '')
1295 filematcher = matchmod.alwaysmatcher(repo.root, '')
1296
1296
1297 if version == '01' and not filematcher.always():
1297 if version == '01' and not filematcher.always():
1298 raise error.ProgrammingError('version 01 changegroups do not support '
1298 raise error.ProgrammingError('version 01 changegroups do not support '
1299 'sparse file matchers')
1299 'sparse file matchers')
1300
1300
1301 if ellipses and version in (b'01', b'02'):
1301 if ellipses and version in (b'01', b'02'):
1302 raise error.Abort(
1302 raise error.Abort(
1303 _('ellipsis nodes require at least cg3 on client and server, '
1303 _('ellipsis nodes require at least cg3 on client and server, '
1304 'but negotiated version %s') % version)
1304 'but negotiated version %s') % version)
1305
1305
1306 # Requested files could include files not in the local store. So
1306 # Requested files could include files not in the local store. So
1307 # filter those out.
1307 # filter those out.
1308 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1308 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1309 filematcher)
1309 filematcher)
1310
1310
1311 fn = _packermap[version][0]
1311 fn = _packermap[version][0]
1312 return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
1312 return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
1313 shallow=shallow, ellipsisroots=ellipsisroots,
1313 shallow=shallow, ellipsisroots=ellipsisroots,
1314 fullnodes=fullnodes)
1314 fullnodes=fullnodes)
1315
1315
1316 def getunbundler(version, fh, alg, extras=None):
1316 def getunbundler(version, fh, alg, extras=None):
1317 return _packermap[version][1](fh, alg, extras=extras)
1317 return _packermap[version][1](fh, alg, extras=extras)
1318
1318
1319 def _changegroupinfo(repo, nodes, source):
1319 def _changegroupinfo(repo, nodes, source):
1320 if repo.ui.verbose or source == 'bundle':
1320 if repo.ui.verbose or source == 'bundle':
1321 repo.ui.status(_("%d changesets found\n") % len(nodes))
1321 repo.ui.status(_("%d changesets found\n") % len(nodes))
1322 if repo.ui.debugflag:
1322 if repo.ui.debugflag:
1323 repo.ui.debug("list of changesets:\n")
1323 repo.ui.debug("list of changesets:\n")
1324 for node in nodes:
1324 for node in nodes:
1325 repo.ui.debug("%s\n" % hex(node))
1325 repo.ui.debug("%s\n" % hex(node))
1326
1326
1327 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1327 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1328 bundlecaps=None):
1328 bundlecaps=None):
1329 cgstream = makestream(repo, outgoing, version, source,
1329 cgstream = makestream(repo, outgoing, version, source,
1330 fastpath=fastpath, bundlecaps=bundlecaps)
1330 fastpath=fastpath, bundlecaps=bundlecaps)
1331 return getunbundler(version, util.chunkbuffer(cgstream), None,
1331 return getunbundler(version, util.chunkbuffer(cgstream), None,
1332 {'clcount': len(outgoing.missing) })
1332 {'clcount': len(outgoing.missing) })
1333
1333
1334 def makestream(repo, outgoing, version, source, fastpath=False,
1334 def makestream(repo, outgoing, version, source, fastpath=False,
1335 bundlecaps=None, filematcher=None):
1335 bundlecaps=None, filematcher=None):
1336 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1336 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1337 filematcher=filematcher)
1337 filematcher=filematcher)
1338
1338
1339 repo = repo.unfiltered()
1339 repo = repo.unfiltered()
1340 commonrevs = outgoing.common
1340 commonrevs = outgoing.common
1341 csets = outgoing.missing
1341 csets = outgoing.missing
1342 heads = outgoing.missingheads
1342 heads = outgoing.missingheads
1343 # We go through the fast path if we get told to, or if all (unfiltered
1343 # We go through the fast path if we get told to, or if all (unfiltered
1344 # heads have been requested (since we then know there all linkrevs will
1344 # heads have been requested (since we then know there all linkrevs will
1345 # be pulled by the client).
1345 # be pulled by the client).
1346 heads.sort()
1346 heads.sort()
1347 fastpathlinkrev = fastpath or (
1347 fastpathlinkrev = fastpath or (
1348 repo.filtername is None and heads == sorted(repo.heads()))
1348 repo.filtername is None and heads == sorted(repo.heads()))
1349
1349
1350 repo.hook('preoutgoing', throw=True, source=source)
1350 repo.hook('preoutgoing', throw=True, source=source)
1351 _changegroupinfo(repo, csets, source)
1351 _changegroupinfo(repo, csets, source)
1352 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1352 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1353
1353
1354 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1354 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1355 revisions = 0
1355 revisions = 0
1356 files = 0
1356 files = 0
1357 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1357 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1358 total=expectedfiles)
1358 total=expectedfiles)
1359 for chunkdata in iter(source.filelogheader, {}):
1359 for chunkdata in iter(source.filelogheader, {}):
1360 files += 1
1360 files += 1
1361 f = chunkdata["filename"]
1361 f = chunkdata["filename"]
1362 repo.ui.debug("adding %s revisions\n" % f)
1362 repo.ui.debug("adding %s revisions\n" % f)
1363 progress.increment()
1363 progress.increment()
1364 fl = repo.file(f)
1364 fl = repo.file(f)
1365 o = len(fl)
1365 o = len(fl)
1366 try:
1366 try:
1367 deltas = source.deltaiter()
1367 deltas = source.deltaiter()
1368 if not fl.addgroup(deltas, revmap, trp):
1368 if not fl.addgroup(deltas, revmap, trp):
1369 raise error.Abort(_("received file revlog group is empty"))
1369 raise error.Abort(_("received file revlog group is empty"))
1370 except error.CensoredBaseError as e:
1370 except error.CensoredBaseError as e:
1371 raise error.Abort(_("received delta base is censored: %s") % e)
1371 raise error.Abort(_("received delta base is censored: %s") % e)
1372 revisions += len(fl) - o
1372 revisions += len(fl) - o
1373 if f in needfiles:
1373 if f in needfiles:
1374 needs = needfiles[f]
1374 needs = needfiles[f]
1375 for new in pycompat.xrange(o, len(fl)):
1375 for new in pycompat.xrange(o, len(fl)):
1376 n = fl.node(new)
1376 n = fl.node(new)
1377 if n in needs:
1377 if n in needs:
1378 needs.remove(n)
1378 needs.remove(n)
1379 else:
1379 else:
1380 raise error.Abort(
1380 raise error.Abort(
1381 _("received spurious file revlog entry"))
1381 _("received spurious file revlog entry"))
1382 if not needs:
1382 if not needs:
1383 del needfiles[f]
1383 del needfiles[f]
1384 progress.complete()
1384 progress.complete()
1385
1385
1386 for f, needs in needfiles.iteritems():
1386 for f, needs in needfiles.iteritems():
1387 fl = repo.file(f)
1387 fl = repo.file(f)
1388 for n in needs:
1388 for n in needs:
1389 try:
1389 try:
1390 fl.rev(n)
1390 fl.rev(n)
1391 except error.LookupError:
1391 except error.LookupError:
1392 raise error.Abort(
1392 raise error.Abort(
1393 _('missing file data for %s:%s - run hg verify') %
1393 _('missing file data for %s:%s - run hg verify') %
1394 (f, hex(n)))
1394 (f, hex(n)))
1395
1395
1396 return revisions, files
1396 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now