##// END OF EJS Templates
changegroup: port to emitrevisions() (issue5976)...
Gregory Szorc -
r39901:31b7e8e7 default
parent child Browse files
Show More
@@ -1,1389 +1,1381 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from .thirdparty import (
22 from .thirdparty import (
23 attr,
23 attr,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 dagop,
28 error,
27 error,
29 match as matchmod,
28 match as matchmod,
30 mdiff,
29 mdiff,
31 phases,
30 phases,
32 pycompat,
31 pycompat,
33 repository,
32 repository,
33 revlog,
34 util,
34 util,
35 )
35 )
36
36
37 from .utils import (
37 from .utils import (
38 interfaceutil,
38 interfaceutil,
39 )
39 )
40
40
41 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
41 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
42 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
42 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
43 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
43 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
44
44
45 LFS_REQUIREMENT = 'lfs'
45 LFS_REQUIREMENT = 'lfs'
46
46
47 readexactly = util.readexactly
47 readexactly = util.readexactly
48
48
49 def getchunk(stream):
49 def getchunk(stream):
50 """return the next chunk from stream as a string"""
50 """return the next chunk from stream as a string"""
51 d = readexactly(stream, 4)
51 d = readexactly(stream, 4)
52 l = struct.unpack(">l", d)[0]
52 l = struct.unpack(">l", d)[0]
53 if l <= 4:
53 if l <= 4:
54 if l:
54 if l:
55 raise error.Abort(_("invalid chunk length %d") % l)
55 raise error.Abort(_("invalid chunk length %d") % l)
56 return ""
56 return ""
57 return readexactly(stream, l - 4)
57 return readexactly(stream, l - 4)
58
58
59 def chunkheader(length):
59 def chunkheader(length):
60 """return a changegroup chunk header (string)"""
60 """return a changegroup chunk header (string)"""
61 return struct.pack(">l", length + 4)
61 return struct.pack(">l", length + 4)
62
62
63 def closechunk():
63 def closechunk():
64 """return a changegroup chunk header (string) for a zero-length chunk"""
64 """return a changegroup chunk header (string) for a zero-length chunk"""
65 return struct.pack(">l", 0)
65 return struct.pack(">l", 0)
66
66
67 def _fileheader(path):
67 def _fileheader(path):
68 """Obtain a changegroup chunk header for a named path."""
68 """Obtain a changegroup chunk header for a named path."""
69 return chunkheader(len(path)) + path
69 return chunkheader(len(path)) + path
70
70
71 def writechunks(ui, chunks, filename, vfs=None):
71 def writechunks(ui, chunks, filename, vfs=None):
72 """Write chunks to a file and return its filename.
72 """Write chunks to a file and return its filename.
73
73
74 The stream is assumed to be a bundle file.
74 The stream is assumed to be a bundle file.
75 Existing files will not be overwritten.
75 Existing files will not be overwritten.
76 If no filename is specified, a temporary file is created.
76 If no filename is specified, a temporary file is created.
77 """
77 """
78 fh = None
78 fh = None
79 cleanup = None
79 cleanup = None
80 try:
80 try:
81 if filename:
81 if filename:
82 if vfs:
82 if vfs:
83 fh = vfs.open(filename, "wb")
83 fh = vfs.open(filename, "wb")
84 else:
84 else:
85 # Increase default buffer size because default is usually
85 # Increase default buffer size because default is usually
86 # small (4k is common on Linux).
86 # small (4k is common on Linux).
87 fh = open(filename, "wb", 131072)
87 fh = open(filename, "wb", 131072)
88 else:
88 else:
89 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
89 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
90 fh = os.fdopen(fd, r"wb")
90 fh = os.fdopen(fd, r"wb")
91 cleanup = filename
91 cleanup = filename
92 for c in chunks:
92 for c in chunks:
93 fh.write(c)
93 fh.write(c)
94 cleanup = None
94 cleanup = None
95 return filename
95 return filename
96 finally:
96 finally:
97 if fh is not None:
97 if fh is not None:
98 fh.close()
98 fh.close()
99 if cleanup is not None:
99 if cleanup is not None:
100 if filename and vfs:
100 if filename and vfs:
101 vfs.unlink(cleanup)
101 vfs.unlink(cleanup)
102 else:
102 else:
103 os.unlink(cleanup)
103 os.unlink(cleanup)
104
104
105 class cg1unpacker(object):
105 class cg1unpacker(object):
106 """Unpacker for cg1 changegroup streams.
106 """Unpacker for cg1 changegroup streams.
107
107
108 A changegroup unpacker handles the framing of the revision data in
108 A changegroup unpacker handles the framing of the revision data in
109 the wire format. Most consumers will want to use the apply()
109 the wire format. Most consumers will want to use the apply()
110 method to add the changes from the changegroup to a repository.
110 method to add the changes from the changegroup to a repository.
111
111
112 If you're forwarding a changegroup unmodified to another consumer,
112 If you're forwarding a changegroup unmodified to another consumer,
113 use getchunks(), which returns an iterator of changegroup
113 use getchunks(), which returns an iterator of changegroup
114 chunks. This is mostly useful for cases where you need to know the
114 chunks. This is mostly useful for cases where you need to know the
115 data stream has ended by observing the end of the changegroup.
115 data stream has ended by observing the end of the changegroup.
116
116
117 deltachunk() is useful only if you're applying delta data. Most
117 deltachunk() is useful only if you're applying delta data. Most
118 consumers should prefer apply() instead.
118 consumers should prefer apply() instead.
119
119
120 A few other public methods exist. Those are used only for
120 A few other public methods exist. Those are used only for
121 bundlerepo and some debug commands - their use is discouraged.
121 bundlerepo and some debug commands - their use is discouraged.
122 """
122 """
123 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
123 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
124 deltaheadersize = deltaheader.size
124 deltaheadersize = deltaheader.size
125 version = '01'
125 version = '01'
126 _grouplistcount = 1 # One list of files after the manifests
126 _grouplistcount = 1 # One list of files after the manifests
127
127
128 def __init__(self, fh, alg, extras=None):
128 def __init__(self, fh, alg, extras=None):
129 if alg is None:
129 if alg is None:
130 alg = 'UN'
130 alg = 'UN'
131 if alg not in util.compengines.supportedbundletypes:
131 if alg not in util.compengines.supportedbundletypes:
132 raise error.Abort(_('unknown stream compression type: %s')
132 raise error.Abort(_('unknown stream compression type: %s')
133 % alg)
133 % alg)
134 if alg == 'BZ':
134 if alg == 'BZ':
135 alg = '_truncatedBZ'
135 alg = '_truncatedBZ'
136
136
137 compengine = util.compengines.forbundletype(alg)
137 compengine = util.compengines.forbundletype(alg)
138 self._stream = compengine.decompressorreader(fh)
138 self._stream = compengine.decompressorreader(fh)
139 self._type = alg
139 self._type = alg
140 self.extras = extras or {}
140 self.extras = extras or {}
141 self.callback = None
141 self.callback = None
142
142
143 # These methods (compressed, read, seek, tell) all appear to only
143 # These methods (compressed, read, seek, tell) all appear to only
144 # be used by bundlerepo, but it's a little hard to tell.
144 # be used by bundlerepo, but it's a little hard to tell.
145 def compressed(self):
145 def compressed(self):
146 return self._type is not None and self._type != 'UN'
146 return self._type is not None and self._type != 'UN'
147 def read(self, l):
147 def read(self, l):
148 return self._stream.read(l)
148 return self._stream.read(l)
149 def seek(self, pos):
149 def seek(self, pos):
150 return self._stream.seek(pos)
150 return self._stream.seek(pos)
151 def tell(self):
151 def tell(self):
152 return self._stream.tell()
152 return self._stream.tell()
153 def close(self):
153 def close(self):
154 return self._stream.close()
154 return self._stream.close()
155
155
156 def _chunklength(self):
156 def _chunklength(self):
157 d = readexactly(self._stream, 4)
157 d = readexactly(self._stream, 4)
158 l = struct.unpack(">l", d)[0]
158 l = struct.unpack(">l", d)[0]
159 if l <= 4:
159 if l <= 4:
160 if l:
160 if l:
161 raise error.Abort(_("invalid chunk length %d") % l)
161 raise error.Abort(_("invalid chunk length %d") % l)
162 return 0
162 return 0
163 if self.callback:
163 if self.callback:
164 self.callback()
164 self.callback()
165 return l - 4
165 return l - 4
166
166
167 def changelogheader(self):
167 def changelogheader(self):
168 """v10 does not have a changelog header chunk"""
168 """v10 does not have a changelog header chunk"""
169 return {}
169 return {}
170
170
171 def manifestheader(self):
171 def manifestheader(self):
172 """v10 does not have a manifest header chunk"""
172 """v10 does not have a manifest header chunk"""
173 return {}
173 return {}
174
174
175 def filelogheader(self):
175 def filelogheader(self):
176 """return the header of the filelogs chunk, v10 only has the filename"""
176 """return the header of the filelogs chunk, v10 only has the filename"""
177 l = self._chunklength()
177 l = self._chunklength()
178 if not l:
178 if not l:
179 return {}
179 return {}
180 fname = readexactly(self._stream, l)
180 fname = readexactly(self._stream, l)
181 return {'filename': fname}
181 return {'filename': fname}
182
182
183 def _deltaheader(self, headertuple, prevnode):
183 def _deltaheader(self, headertuple, prevnode):
184 node, p1, p2, cs = headertuple
184 node, p1, p2, cs = headertuple
185 if prevnode is None:
185 if prevnode is None:
186 deltabase = p1
186 deltabase = p1
187 else:
187 else:
188 deltabase = prevnode
188 deltabase = prevnode
189 flags = 0
189 flags = 0
190 return node, p1, p2, deltabase, cs, flags
190 return node, p1, p2, deltabase, cs, flags
191
191
192 def deltachunk(self, prevnode):
192 def deltachunk(self, prevnode):
193 l = self._chunklength()
193 l = self._chunklength()
194 if not l:
194 if not l:
195 return {}
195 return {}
196 headerdata = readexactly(self._stream, self.deltaheadersize)
196 headerdata = readexactly(self._stream, self.deltaheadersize)
197 header = self.deltaheader.unpack(headerdata)
197 header = self.deltaheader.unpack(headerdata)
198 delta = readexactly(self._stream, l - self.deltaheadersize)
198 delta = readexactly(self._stream, l - self.deltaheadersize)
199 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
199 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
200 return (node, p1, p2, cs, deltabase, delta, flags)
200 return (node, p1, p2, cs, deltabase, delta, flags)
201
201
202 def getchunks(self):
202 def getchunks(self):
203 """returns all the chunks contains in the bundle
203 """returns all the chunks contains in the bundle
204
204
205 Used when you need to forward the binary stream to a file or another
205 Used when you need to forward the binary stream to a file or another
206 network API. To do so, it parse the changegroup data, otherwise it will
206 network API. To do so, it parse the changegroup data, otherwise it will
207 block in case of sshrepo because it don't know the end of the stream.
207 block in case of sshrepo because it don't know the end of the stream.
208 """
208 """
209 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
209 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
210 # and a list of filelogs. For changegroup 3, we expect 4 parts:
210 # and a list of filelogs. For changegroup 3, we expect 4 parts:
211 # changelog, manifestlog, a list of tree manifestlogs, and a list of
211 # changelog, manifestlog, a list of tree manifestlogs, and a list of
212 # filelogs.
212 # filelogs.
213 #
213 #
214 # Changelog and manifestlog parts are terminated with empty chunks. The
214 # Changelog and manifestlog parts are terminated with empty chunks. The
215 # tree and file parts are a list of entry sections. Each entry section
215 # tree and file parts are a list of entry sections. Each entry section
216 # is a series of chunks terminating in an empty chunk. The list of these
216 # is a series of chunks terminating in an empty chunk. The list of these
217 # entry sections is terminated in yet another empty chunk, so we know
217 # entry sections is terminated in yet another empty chunk, so we know
218 # we've reached the end of the tree/file list when we reach an empty
218 # we've reached the end of the tree/file list when we reach an empty
219 # chunk that was proceeded by no non-empty chunks.
219 # chunk that was proceeded by no non-empty chunks.
220
220
221 parts = 0
221 parts = 0
222 while parts < 2 + self._grouplistcount:
222 while parts < 2 + self._grouplistcount:
223 noentries = True
223 noentries = True
224 while True:
224 while True:
225 chunk = getchunk(self)
225 chunk = getchunk(self)
226 if not chunk:
226 if not chunk:
227 # The first two empty chunks represent the end of the
227 # The first two empty chunks represent the end of the
228 # changelog and the manifestlog portions. The remaining
228 # changelog and the manifestlog portions. The remaining
229 # empty chunks represent either A) the end of individual
229 # empty chunks represent either A) the end of individual
230 # tree or file entries in the file list, or B) the end of
230 # tree or file entries in the file list, or B) the end of
231 # the entire list. It's the end of the entire list if there
231 # the entire list. It's the end of the entire list if there
232 # were no entries (i.e. noentries is True).
232 # were no entries (i.e. noentries is True).
233 if parts < 2:
233 if parts < 2:
234 parts += 1
234 parts += 1
235 elif noentries:
235 elif noentries:
236 parts += 1
236 parts += 1
237 break
237 break
238 noentries = False
238 noentries = False
239 yield chunkheader(len(chunk))
239 yield chunkheader(len(chunk))
240 pos = 0
240 pos = 0
241 while pos < len(chunk):
241 while pos < len(chunk):
242 next = pos + 2**20
242 next = pos + 2**20
243 yield chunk[pos:next]
243 yield chunk[pos:next]
244 pos = next
244 pos = next
245 yield closechunk()
245 yield closechunk()
246
246
247 def _unpackmanifests(self, repo, revmap, trp, prog):
247 def _unpackmanifests(self, repo, revmap, trp, prog):
248 self.callback = prog.increment
248 self.callback = prog.increment
249 # no need to check for empty manifest group here:
249 # no need to check for empty manifest group here:
250 # if the result of the merge of 1 and 2 is the same in 3 and 4,
250 # if the result of the merge of 1 and 2 is the same in 3 and 4,
251 # no new manifest will be created and the manifest group will
251 # no new manifest will be created and the manifest group will
252 # be empty during the pull
252 # be empty during the pull
253 self.manifestheader()
253 self.manifestheader()
254 deltas = self.deltaiter()
254 deltas = self.deltaiter()
255 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
255 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
256 prog.complete()
256 prog.complete()
257 self.callback = None
257 self.callback = None
258
258
259 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
259 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
260 expectedtotal=None):
260 expectedtotal=None):
261 """Add the changegroup returned by source.read() to this repo.
261 """Add the changegroup returned by source.read() to this repo.
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
263 the URL of the repo where this changegroup is coming from.
263 the URL of the repo where this changegroup is coming from.
264
264
265 Return an integer summarizing the change to this repo:
265 Return an integer summarizing the change to this repo:
266 - nothing changed or no source: 0
266 - nothing changed or no source: 0
267 - more heads than before: 1+added heads (2..n)
267 - more heads than before: 1+added heads (2..n)
268 - fewer heads than before: -1-removed heads (-2..-n)
268 - fewer heads than before: -1-removed heads (-2..-n)
269 - number of heads stays the same: 1
269 - number of heads stays the same: 1
270 """
270 """
271 repo = repo.unfiltered()
271 repo = repo.unfiltered()
272 def csmap(x):
272 def csmap(x):
273 repo.ui.debug("add changeset %s\n" % short(x))
273 repo.ui.debug("add changeset %s\n" % short(x))
274 return len(cl)
274 return len(cl)
275
275
276 def revmap(x):
276 def revmap(x):
277 return cl.rev(x)
277 return cl.rev(x)
278
278
279 changesets = files = revisions = 0
279 changesets = files = revisions = 0
280
280
281 try:
281 try:
282 # The transaction may already carry source information. In this
282 # The transaction may already carry source information. In this
283 # case we use the top level data. We overwrite the argument
283 # case we use the top level data. We overwrite the argument
284 # because we need to use the top level value (if they exist)
284 # because we need to use the top level value (if they exist)
285 # in this function.
285 # in this function.
286 srctype = tr.hookargs.setdefault('source', srctype)
286 srctype = tr.hookargs.setdefault('source', srctype)
287 url = tr.hookargs.setdefault('url', url)
287 url = tr.hookargs.setdefault('url', url)
288 repo.hook('prechangegroup',
288 repo.hook('prechangegroup',
289 throw=True, **pycompat.strkwargs(tr.hookargs))
289 throw=True, **pycompat.strkwargs(tr.hookargs))
290
290
291 # write changelog data to temp files so concurrent readers
291 # write changelog data to temp files so concurrent readers
292 # will not see an inconsistent view
292 # will not see an inconsistent view
293 cl = repo.changelog
293 cl = repo.changelog
294 cl.delayupdate(tr)
294 cl.delayupdate(tr)
295 oldheads = set(cl.heads())
295 oldheads = set(cl.heads())
296
296
297 trp = weakref.proxy(tr)
297 trp = weakref.proxy(tr)
298 # pull off the changeset group
298 # pull off the changeset group
299 repo.ui.status(_("adding changesets\n"))
299 repo.ui.status(_("adding changesets\n"))
300 clstart = len(cl)
300 clstart = len(cl)
301 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
301 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
302 total=expectedtotal)
302 total=expectedtotal)
303 self.callback = progress.increment
303 self.callback = progress.increment
304
304
305 efiles = set()
305 efiles = set()
306 def onchangelog(cl, node):
306 def onchangelog(cl, node):
307 efiles.update(cl.readfiles(node))
307 efiles.update(cl.readfiles(node))
308
308
309 self.changelogheader()
309 self.changelogheader()
310 deltas = self.deltaiter()
310 deltas = self.deltaiter()
311 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
311 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
312 efiles = len(efiles)
312 efiles = len(efiles)
313
313
314 if not cgnodes:
314 if not cgnodes:
315 repo.ui.develwarn('applied empty changelog from changegroup',
315 repo.ui.develwarn('applied empty changelog from changegroup',
316 config='warn-empty-changegroup')
316 config='warn-empty-changegroup')
317 clend = len(cl)
317 clend = len(cl)
318 changesets = clend - clstart
318 changesets = clend - clstart
319 progress.complete()
319 progress.complete()
320 self.callback = None
320 self.callback = None
321
321
322 # pull off the manifest group
322 # pull off the manifest group
323 repo.ui.status(_("adding manifests\n"))
323 repo.ui.status(_("adding manifests\n"))
324 # We know that we'll never have more manifests than we had
324 # We know that we'll never have more manifests than we had
325 # changesets.
325 # changesets.
326 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
326 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
327 total=changesets)
327 total=changesets)
328 self._unpackmanifests(repo, revmap, trp, progress)
328 self._unpackmanifests(repo, revmap, trp, progress)
329
329
330 needfiles = {}
330 needfiles = {}
331 if repo.ui.configbool('server', 'validate'):
331 if repo.ui.configbool('server', 'validate'):
332 cl = repo.changelog
332 cl = repo.changelog
333 ml = repo.manifestlog
333 ml = repo.manifestlog
334 # validate incoming csets have their manifests
334 # validate incoming csets have their manifests
335 for cset in pycompat.xrange(clstart, clend):
335 for cset in pycompat.xrange(clstart, clend):
336 mfnode = cl.changelogrevision(cset).manifest
336 mfnode = cl.changelogrevision(cset).manifest
337 mfest = ml[mfnode].readdelta()
337 mfest = ml[mfnode].readdelta()
338 # store file cgnodes we must see
338 # store file cgnodes we must see
339 for f, n in mfest.iteritems():
339 for f, n in mfest.iteritems():
340 needfiles.setdefault(f, set()).add(n)
340 needfiles.setdefault(f, set()).add(n)
341
341
342 # process the files
342 # process the files
343 repo.ui.status(_("adding file changes\n"))
343 repo.ui.status(_("adding file changes\n"))
344 newrevs, newfiles = _addchangegroupfiles(
344 newrevs, newfiles = _addchangegroupfiles(
345 repo, self, revmap, trp, efiles, needfiles)
345 repo, self, revmap, trp, efiles, needfiles)
346 revisions += newrevs
346 revisions += newrevs
347 files += newfiles
347 files += newfiles
348
348
349 deltaheads = 0
349 deltaheads = 0
350 if oldheads:
350 if oldheads:
351 heads = cl.heads()
351 heads = cl.heads()
352 deltaheads = len(heads) - len(oldheads)
352 deltaheads = len(heads) - len(oldheads)
353 for h in heads:
353 for h in heads:
354 if h not in oldheads and repo[h].closesbranch():
354 if h not in oldheads and repo[h].closesbranch():
355 deltaheads -= 1
355 deltaheads -= 1
356 htext = ""
356 htext = ""
357 if deltaheads:
357 if deltaheads:
358 htext = _(" (%+d heads)") % deltaheads
358 htext = _(" (%+d heads)") % deltaheads
359
359
360 repo.ui.status(_("added %d changesets"
360 repo.ui.status(_("added %d changesets"
361 " with %d changes to %d files%s\n")
361 " with %d changes to %d files%s\n")
362 % (changesets, revisions, files, htext))
362 % (changesets, revisions, files, htext))
363 repo.invalidatevolatilesets()
363 repo.invalidatevolatilesets()
364
364
365 if changesets > 0:
365 if changesets > 0:
366 if 'node' not in tr.hookargs:
366 if 'node' not in tr.hookargs:
367 tr.hookargs['node'] = hex(cl.node(clstart))
367 tr.hookargs['node'] = hex(cl.node(clstart))
368 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
368 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
369 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
370 else:
370 else:
371 hookargs = dict(tr.hookargs)
371 hookargs = dict(tr.hookargs)
372 hookargs['node'] = hex(cl.node(clstart))
372 hookargs['node'] = hex(cl.node(clstart))
373 hookargs['node_last'] = hex(cl.node(clend - 1))
373 hookargs['node_last'] = hex(cl.node(clend - 1))
374 repo.hook('pretxnchangegroup',
374 repo.hook('pretxnchangegroup',
375 throw=True, **pycompat.strkwargs(hookargs))
375 throw=True, **pycompat.strkwargs(hookargs))
376
376
377 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
377 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
378 phaseall = None
378 phaseall = None
379 if srctype in ('push', 'serve'):
379 if srctype in ('push', 'serve'):
380 # Old servers can not push the boundary themselves.
380 # Old servers can not push the boundary themselves.
381 # New servers won't push the boundary if changeset already
381 # New servers won't push the boundary if changeset already
382 # exists locally as secret
382 # exists locally as secret
383 #
383 #
384 # We should not use added here but the list of all change in
384 # We should not use added here but the list of all change in
385 # the bundle
385 # the bundle
386 if repo.publishing():
386 if repo.publishing():
387 targetphase = phaseall = phases.public
387 targetphase = phaseall = phases.public
388 else:
388 else:
389 # closer target phase computation
389 # closer target phase computation
390
390
391 # Those changesets have been pushed from the
391 # Those changesets have been pushed from the
392 # outside, their phases are going to be pushed
392 # outside, their phases are going to be pushed
393 # alongside. Therefor `targetphase` is
393 # alongside. Therefor `targetphase` is
394 # ignored.
394 # ignored.
395 targetphase = phaseall = phases.draft
395 targetphase = phaseall = phases.draft
396 if added:
396 if added:
397 phases.registernew(repo, tr, targetphase, added)
397 phases.registernew(repo, tr, targetphase, added)
398 if phaseall is not None:
398 if phaseall is not None:
399 phases.advanceboundary(repo, tr, phaseall, cgnodes)
399 phases.advanceboundary(repo, tr, phaseall, cgnodes)
400
400
401 if changesets > 0:
401 if changesets > 0:
402
402
403 def runhooks():
403 def runhooks():
404 # These hooks run when the lock releases, not when the
404 # These hooks run when the lock releases, not when the
405 # transaction closes. So it's possible for the changelog
405 # transaction closes. So it's possible for the changelog
406 # to have changed since we last saw it.
406 # to have changed since we last saw it.
407 if clstart >= len(repo):
407 if clstart >= len(repo):
408 return
408 return
409
409
410 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
410 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
411
411
412 for n in added:
412 for n in added:
413 args = hookargs.copy()
413 args = hookargs.copy()
414 args['node'] = hex(n)
414 args['node'] = hex(n)
415 del args['node_last']
415 del args['node_last']
416 repo.hook("incoming", **pycompat.strkwargs(args))
416 repo.hook("incoming", **pycompat.strkwargs(args))
417
417
418 newheads = [h for h in repo.heads()
418 newheads = [h for h in repo.heads()
419 if h not in oldheads]
419 if h not in oldheads]
420 repo.ui.log("incoming",
420 repo.ui.log("incoming",
421 "%d incoming changes - new heads: %s\n",
421 "%d incoming changes - new heads: %s\n",
422 len(added),
422 len(added),
423 ', '.join([hex(c[:6]) for c in newheads]))
423 ', '.join([hex(c[:6]) for c in newheads]))
424
424
425 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
425 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
426 lambda tr: repo._afterlock(runhooks))
426 lambda tr: repo._afterlock(runhooks))
427 finally:
427 finally:
428 repo.ui.flush()
428 repo.ui.flush()
429 # never return 0 here:
429 # never return 0 here:
430 if deltaheads < 0:
430 if deltaheads < 0:
431 ret = deltaheads - 1
431 ret = deltaheads - 1
432 else:
432 else:
433 ret = deltaheads + 1
433 ret = deltaheads + 1
434 return ret
434 return ret
435
435
436 def deltaiter(self):
436 def deltaiter(self):
437 """
437 """
438 returns an iterator of the deltas in this changegroup
438 returns an iterator of the deltas in this changegroup
439
439
440 Useful for passing to the underlying storage system to be stored.
440 Useful for passing to the underlying storage system to be stored.
441 """
441 """
442 chain = None
442 chain = None
443 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
443 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
444 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
444 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
445 yield chunkdata
445 yield chunkdata
446 chain = chunkdata[0]
446 chain = chunkdata[0]
447
447
448 class cg2unpacker(cg1unpacker):
448 class cg2unpacker(cg1unpacker):
449 """Unpacker for cg2 streams.
449 """Unpacker for cg2 streams.
450
450
451 cg2 streams add support for generaldelta, so the delta header
451 cg2 streams add support for generaldelta, so the delta header
452 format is slightly different. All other features about the data
452 format is slightly different. All other features about the data
453 remain the same.
453 remain the same.
454 """
454 """
455 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
455 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
456 deltaheadersize = deltaheader.size
456 deltaheadersize = deltaheader.size
457 version = '02'
457 version = '02'
458
458
459 def _deltaheader(self, headertuple, prevnode):
459 def _deltaheader(self, headertuple, prevnode):
460 node, p1, p2, deltabase, cs = headertuple
460 node, p1, p2, deltabase, cs = headertuple
461 flags = 0
461 flags = 0
462 return node, p1, p2, deltabase, cs, flags
462 return node, p1, p2, deltabase, cs, flags
463
463
464 class cg3unpacker(cg2unpacker):
464 class cg3unpacker(cg2unpacker):
465 """Unpacker for cg3 streams.
465 """Unpacker for cg3 streams.
466
466
467 cg3 streams add support for exchanging treemanifests and revlog
467 cg3 streams add support for exchanging treemanifests and revlog
468 flags. It adds the revlog flags to the delta header and an empty chunk
468 flags. It adds the revlog flags to the delta header and an empty chunk
469 separating manifests and files.
469 separating manifests and files.
470 """
470 """
471 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
471 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
472 deltaheadersize = deltaheader.size
472 deltaheadersize = deltaheader.size
473 version = '03'
473 version = '03'
474 _grouplistcount = 2 # One list of manifests and one list of files
474 _grouplistcount = 2 # One list of manifests and one list of files
475
475
476 def _deltaheader(self, headertuple, prevnode):
476 def _deltaheader(self, headertuple, prevnode):
477 node, p1, p2, deltabase, cs, flags = headertuple
477 node, p1, p2, deltabase, cs, flags = headertuple
478 return node, p1, p2, deltabase, cs, flags
478 return node, p1, p2, deltabase, cs, flags
479
479
480 def _unpackmanifests(self, repo, revmap, trp, prog):
480 def _unpackmanifests(self, repo, revmap, trp, prog):
481 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
481 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
482 for chunkdata in iter(self.filelogheader, {}):
482 for chunkdata in iter(self.filelogheader, {}):
483 # If we get here, there are directory manifests in the changegroup
483 # If we get here, there are directory manifests in the changegroup
484 d = chunkdata["filename"]
484 d = chunkdata["filename"]
485 repo.ui.debug("adding %s revisions\n" % d)
485 repo.ui.debug("adding %s revisions\n" % d)
486 deltas = self.deltaiter()
486 deltas = self.deltaiter()
487 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
487 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
488 raise error.Abort(_("received dir revlog group is empty"))
488 raise error.Abort(_("received dir revlog group is empty"))
489
489
490 class headerlessfixup(object):
490 class headerlessfixup(object):
491 def __init__(self, fh, h):
491 def __init__(self, fh, h):
492 self._h = h
492 self._h = h
493 self._fh = fh
493 self._fh = fh
494 def read(self, n):
494 def read(self, n):
495 if self._h:
495 if self._h:
496 d, self._h = self._h[:n], self._h[n:]
496 d, self._h = self._h[:n], self._h[n:]
497 if len(d) < n:
497 if len(d) < n:
498 d += readexactly(self._fh, n - len(d))
498 d += readexactly(self._fh, n - len(d))
499 return d
499 return d
500 return readexactly(self._fh, n)
500 return readexactly(self._fh, n)
501
501
502 @interfaceutil.implementer(repository.irevisiondeltarequest)
502 @interfaceutil.implementer(repository.irevisiondeltarequest)
503 @attr.s(slots=True, frozen=True)
503 @attr.s(slots=True, frozen=True)
504 class revisiondeltarequest(object):
504 class revisiondeltarequest(object):
505 node = attr.ib()
505 node = attr.ib()
506 linknode = attr.ib()
506 linknode = attr.ib()
507 p1node = attr.ib()
507 p1node = attr.ib()
508 p2node = attr.ib()
508 p2node = attr.ib()
509 basenode = attr.ib()
509 basenode = attr.ib()
510 ellipsis = attr.ib(default=False)
510 ellipsis = attr.ib(default=False)
511
511
512 def _revisiondeltatochunks(delta, headerfn):
512 def _revisiondeltatochunks(delta, headerfn):
513 """Serialize a revisiondelta to changegroup chunks."""
513 """Serialize a revisiondelta to changegroup chunks."""
514
514
515 # The captured revision delta may be encoded as a delta against
515 # The captured revision delta may be encoded as a delta against
516 # a base revision or as a full revision. The changegroup format
516 # a base revision or as a full revision. The changegroup format
517 # requires that everything on the wire be deltas. So for full
517 # requires that everything on the wire be deltas. So for full
518 # revisions, we need to invent a header that says to rewrite
518 # revisions, we need to invent a header that says to rewrite
519 # data.
519 # data.
520
520
521 if delta.delta is not None:
521 if delta.delta is not None:
522 prefix, data = b'', delta.delta
522 prefix, data = b'', delta.delta
523 elif delta.basenode == nullid:
523 elif delta.basenode == nullid:
524 data = delta.revision
524 data = delta.revision
525 prefix = mdiff.trivialdiffheader(len(data))
525 prefix = mdiff.trivialdiffheader(len(data))
526 else:
526 else:
527 data = delta.revision
527 data = delta.revision
528 prefix = mdiff.replacediffheader(delta.baserevisionsize,
528 prefix = mdiff.replacediffheader(delta.baserevisionsize,
529 len(data))
529 len(data))
530
530
531 meta = headerfn(delta)
531 meta = headerfn(delta)
532
532
533 yield chunkheader(len(meta) + len(prefix) + len(data))
533 yield chunkheader(len(meta) + len(prefix) + len(data))
534 yield meta
534 yield meta
535 if prefix:
535 if prefix:
536 yield prefix
536 yield prefix
537 yield data
537 yield data
538
538
539 def _sortnodesnormal(store, nodes):
540 """Sort nodes for changegroup generation and turn into revnums."""
541 # for generaldelta revlogs, we linearize the revs; this will both be
542 # much quicker and generate a much smaller bundle
543 if store._generaldelta:
544 revs = set(store.rev(n) for n in nodes)
545 return dagop.linearize(revs, store.parentrevs)
546 else:
547 return sorted([store.rev(n) for n in nodes])
548
549 def _sortnodesellipsis(store, nodes, cl, lookup):
539 def _sortnodesellipsis(store, nodes, cl, lookup):
550 """Sort nodes for changegroup generation and turn into revnums."""
540 """Sort nodes for changegroup generation."""
551 # Ellipses serving mode.
541 # Ellipses serving mode.
552 #
542 #
553 # In a perfect world, we'd generate better ellipsis-ified graphs
543 # In a perfect world, we'd generate better ellipsis-ified graphs
554 # for non-changelog revlogs. In practice, we haven't started doing
544 # for non-changelog revlogs. In practice, we haven't started doing
555 # that yet, so the resulting DAGs for the manifestlog and filelogs
545 # that yet, so the resulting DAGs for the manifestlog and filelogs
556 # are actually full of bogus parentage on all the ellipsis
546 # are actually full of bogus parentage on all the ellipsis
557 # nodes. This has the side effect that, while the contents are
547 # nodes. This has the side effect that, while the contents are
558 # correct, the individual DAGs might be completely out of whack in
548 # correct, the individual DAGs might be completely out of whack in
559 # a case like 882681bc3166 and its ancestors (back about 10
549 # a case like 882681bc3166 and its ancestors (back about 10
560 # revisions or so) in the main hg repo.
550 # revisions or so) in the main hg repo.
561 #
551 #
562 # The one invariant we *know* holds is that the new (potentially
552 # The one invariant we *know* holds is that the new (potentially
563 # bogus) DAG shape will be valid if we order the nodes in the
553 # bogus) DAG shape will be valid if we order the nodes in the
564 # order that they're introduced in dramatis personae by the
554 # order that they're introduced in dramatis personae by the
565 # changelog, so what we do is we sort the non-changelog histories
555 # changelog, so what we do is we sort the non-changelog histories
566 # by the order in which they are used by the changelog.
556 # by the order in which they are used by the changelog.
567 key = lambda n: cl.rev(lookup(n))
557 key = lambda n: cl.rev(lookup(n))
568 return [store.rev(n) for n in sorted(nodes, key=key)]
558 return sorted(nodes, key=key)
569
559
570 def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev,
560 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
571 linknode, clrevtolocalrev, fullclnodes,
561 linknode, clrevtolocalrev, fullclnodes,
572 precomputedellipsis):
562 precomputedellipsis):
573 linkparents = precomputedellipsis[linkrev]
563 linkparents = precomputedellipsis[linkrev]
574 def local(clrev):
564 def local(clrev):
575 """Turn a changelog revnum into a local revnum.
565 """Turn a changelog revnum into a local revnum.
576
566
577 The ellipsis dag is stored as revnums on the changelog,
567 The ellipsis dag is stored as revnums on the changelog,
578 but when we're producing ellipsis entries for
568 but when we're producing ellipsis entries for
579 non-changelog revlogs, we need to turn those numbers into
569 non-changelog revlogs, we need to turn those numbers into
580 something local. This does that for us, and during the
570 something local. This does that for us, and during the
581 changelog sending phase will also expand the stored
571 changelog sending phase will also expand the stored
582 mappings as needed.
572 mappings as needed.
583 """
573 """
584 if clrev == nullrev:
574 if clrev == nullrev:
585 return nullrev
575 return nullrev
586
576
587 if ischangelog:
577 if ischangelog:
588 return clrev
578 return clrev
589
579
590 # Walk the ellipsis-ized changelog breadth-first looking for a
580 # Walk the ellipsis-ized changelog breadth-first looking for a
591 # change that has been linked from the current revlog.
581 # change that has been linked from the current revlog.
592 #
582 #
593 # For a flat manifest revlog only a single step should be necessary
583 # For a flat manifest revlog only a single step should be necessary
594 # as all relevant changelog entries are relevant to the flat
584 # as all relevant changelog entries are relevant to the flat
595 # manifest.
585 # manifest.
596 #
586 #
597 # For a filelog or tree manifest dirlog however not every changelog
587 # For a filelog or tree manifest dirlog however not every changelog
598 # entry will have been relevant, so we need to skip some changelog
588 # entry will have been relevant, so we need to skip some changelog
599 # nodes even after ellipsis-izing.
589 # nodes even after ellipsis-izing.
600 walk = [clrev]
590 walk = [clrev]
601 while walk:
591 while walk:
602 p = walk[0]
592 p = walk[0]
603 walk = walk[1:]
593 walk = walk[1:]
604 if p in clrevtolocalrev:
594 if p in clrevtolocalrev:
605 return clrevtolocalrev[p]
595 return clrevtolocalrev[p]
606 elif p in fullclnodes:
596 elif p in fullclnodes:
607 walk.extend([pp for pp in cl.parentrevs(p)
597 walk.extend([pp for pp in cl.parentrevs(p)
608 if pp != nullrev])
598 if pp != nullrev])
609 elif p in precomputedellipsis:
599 elif p in precomputedellipsis:
610 walk.extend([pp for pp in precomputedellipsis[p]
600 walk.extend([pp for pp in precomputedellipsis[p]
611 if pp != nullrev])
601 if pp != nullrev])
612 else:
602 else:
613 # In this case, we've got an ellipsis with parents
603 # In this case, we've got an ellipsis with parents
614 # outside the current bundle (likely an
604 # outside the current bundle (likely an
615 # incremental pull). We "know" that we can use the
605 # incremental pull). We "know" that we can use the
616 # value of this same revlog at whatever revision
606 # value of this same revlog at whatever revision
617 # is pointed to by linknode. "Know" is in scare
607 # is pointed to by linknode. "Know" is in scare
618 # quotes because I haven't done enough examination
608 # quotes because I haven't done enough examination
619 # of edge cases to convince myself this is really
609 # of edge cases to convince myself this is really
620 # a fact - it works for all the (admittedly
610 # a fact - it works for all the (admittedly
621 # thorough) cases in our testsuite, but I would be
611 # thorough) cases in our testsuite, but I would be
622 # somewhat unsurprised to find a case in the wild
612 # somewhat unsurprised to find a case in the wild
623 # where this breaks down a bit. That said, I don't
613 # where this breaks down a bit. That said, I don't
624 # know if it would hurt anything.
614 # know if it would hurt anything.
625 for i in pycompat.xrange(rev, 0, -1):
615 for i in pycompat.xrange(rev, 0, -1):
626 if store.linkrev(i) == clrev:
616 if store.linkrev(i) == clrev:
627 return i
617 return i
628 # We failed to resolve a parent for this node, so
618 # We failed to resolve a parent for this node, so
629 # we crash the changegroup construction.
619 # we crash the changegroup construction.
630 raise error.Abort(
620 raise error.Abort(
631 'unable to resolve parent while packing %r %r'
621 'unable to resolve parent while packing %r %r'
632 ' for changeset %r' % (store.indexfile, rev, clrev))
622 ' for changeset %r' % (store.indexfile, rev, clrev))
633
623
634 return nullrev
624 return nullrev
635
625
636 if not linkparents or (
626 if not linkparents or (
637 store.parentrevs(rev) == (nullrev, nullrev)):
627 store.parentrevs(rev) == (nullrev, nullrev)):
638 p1, p2 = nullrev, nullrev
628 p1, p2 = nullrev, nullrev
639 elif len(linkparents) == 1:
629 elif len(linkparents) == 1:
640 p1, = sorted(local(p) for p in linkparents)
630 p1, = sorted(local(p) for p in linkparents)
641 p2 = nullrev
631 p2 = nullrev
642 else:
632 else:
643 p1, p2 = sorted(local(p) for p in linkparents)
633 p1, p2 = sorted(local(p) for p in linkparents)
644
634
645 p1node, p2node = store.node(p1), store.node(p2)
635 p1node, p2node = store.node(p1), store.node(p2)
646
636
647 # TODO: try and actually send deltas for ellipsis data blocks
637 return p1node, p2node, linknode
648 return revisiondeltarequest(
649 node=node,
650 p1node=p1node,
651 p2node=p2node,
652 linknode=linknode,
653 basenode=nullid,
654 ellipsis=True,
655 )
656
638
657 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
639 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
658 topic=None,
640 topic=None,
659 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
641 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
660 precomputedellipsis=None):
642 precomputedellipsis=None):
661 """Calculate deltas for a set of revisions.
643 """Calculate deltas for a set of revisions.
662
644
663 Is a generator of ``revisiondelta`` instances.
645 Is a generator of ``revisiondelta`` instances.
664
646
665 If topic is not None, progress detail will be generated using this
647 If topic is not None, progress detail will be generated using this
666 topic name (e.g. changesets, manifests, etc).
648 topic name (e.g. changesets, manifests, etc).
667 """
649 """
668 if not nodes:
650 if not nodes:
669 return
651 return
670
652
671 # We perform two passes over the revisions whose data we will emit.
672 #
673 # In the first pass, we obtain information about the deltas that will
674 # be generated. This involves computing linknodes and adjusting the
675 # request to take shallow fetching into account. The end result of
676 # this pass is a list of "request" objects stating which deltas
677 # to obtain.
678 #
679 # The second pass is simply resolving the requested deltas.
680
681 cl = repo.changelog
653 cl = repo.changelog
682
654
683 if ischangelog:
655 if ischangelog:
684 # Changelog doesn't benefit from reordering revisions. So send
656 # `hg log` shows changesets in storage order. To preserve order
685 # out revisions in store order.
657 # across clones, send out changesets in storage order.
686 # TODO the API would be cleaner if this were controlled by the
658 nodesorder = 'storage'
687 # store producing the deltas.
688 revs = sorted(cl.rev(n) for n in nodes)
689 elif ellipses:
659 elif ellipses:
690 revs = _sortnodesellipsis(store, nodes, cl, lookup)
660 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
661 nodesorder = 'nodes'
691 else:
662 else:
692 revs = _sortnodesnormal(store, nodes)
663 nodesorder = None
693
664
694 # In the first pass, collect info about the deltas we'll be
665 # Perform ellipses filtering and revision massaging. We do this before
695 # generating.
666 # emitrevisions() because a) filtering out revisions creates less work
696 requests = []
667 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
697
668 # assumptions about delta choices and we would possibly send a delta
698 # Add the parent of the first rev.
669 # referencing a missing base revision.
699 revs.insert(0, store.parentrevs(revs[0])[0])
670 #
671 # Also, calling lookup() has side-effects with regards to populating
672 # data structures. If we don't call lookup() for each node or if we call
673 # lookup() after the first pass through each node, things can break -
674 # possibly intermittently depending on the python hash seed! For that
675 # reason, we store a mapping of all linknodes during the initial node
676 # pass rather than use lookup() on the output side.
677 if ellipses:
678 filtered = []
679 adjustedparents = {}
680 linknodes = {}
700
681
701 for i in pycompat.xrange(len(revs) - 1):
682 for node in nodes:
702 prev = revs[i]
683 rev = store.rev(node)
703 curr = revs[i + 1]
684 linknode = lookup(node)
704
705 node = store.node(curr)
706 linknode = lookup(node)
707 p1node, p2node = store.parents(node)
708
709 if ellipses:
710 linkrev = cl.rev(linknode)
685 linkrev = cl.rev(linknode)
711 clrevtolocalrev[linkrev] = curr
686 clrevtolocalrev[linkrev] = rev
712
687
713 # This is a node to send in full, because the changeset it
688 # If linknode is in fullclnodes, it means the corresponding
714 # corresponds to was a full changeset.
689 # changeset was a full changeset and is being sent unaltered.
715 if linknode in fullclnodes:
690 if linknode in fullclnodes:
716 requests.append(revisiondeltarequest(
691 linknodes[node] = linknode
717 node=node,
718 p1node=p1node,
719 p2node=p2node,
720 linknode=linknode,
721 basenode=None,
722 ))
723
692
693 # If the corresponding changeset wasn't in the set computed
694 # as relevant to us, it should be dropped outright.
724 elif linkrev not in precomputedellipsis:
695 elif linkrev not in precomputedellipsis:
725 pass
696 continue
697
726 else:
698 else:
727 requests.append(_makenarrowdeltarequest(
699 # We could probably do this later and avoid the dict
728 cl, store, ischangelog, curr, node, linkrev, linknode,
700 # holding state. But it likely doesn't matter.
729 clrevtolocalrev, fullclnodes,
701 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
730 precomputedellipsis))
702 cl, store, ischangelog, rev, linkrev, linknode,
731 else:
703 clrevtolocalrev, fullclnodes, precomputedellipsis)
732 requests.append(revisiondeltarequest(
704
733 node=node,
705 adjustedparents[node] = (p1node, p2node)
734 p1node=p1node,
706 linknodes[node] = linknode
735 p2node=p2node,
707
736 linknode=linknode,
708 filtered.append(node)
737 basenode=store.node(prev) if forcedeltaparentprev else None,
709
738 ))
710 nodes = filtered
739
711
740 # We expect the first pass to be fast, so we only engage the progress
712 # We expect the first pass to be fast, so we only engage the progress
741 # meter for constructing the revision deltas.
713 # meter for constructing the revision deltas.
742 progress = None
714 progress = None
743 if topic is not None:
715 if topic is not None:
744 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
716 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
745 total=len(requests))
717 total=len(nodes))
746
718
747 for i, delta in enumerate(store.emitrevisiondeltas(requests)):
719 revisions = store.emitrevisions(
720 nodes,
721 nodesorder=nodesorder,
722 revisiondata=True,
723 assumehaveparentrevisions=not ellipses,
724 deltaprevious=forcedeltaparentprev)
725
726 for i, revision in enumerate(revisions):
748 if progress:
727 if progress:
749 progress.update(i + 1)
728 progress.update(i + 1)
750
729
751 yield delta
730 if ellipses:
731 linknode = linknodes[revision.node]
732
733 if revision.node in adjustedparents:
734 p1node, p2node = adjustedparents[revision.node]
735 revision.p1node = p1node
736 revision.p2node = p2node
737 revision.flags |= revlog.REVIDX_ELLIPSIS
738
739 else:
740 linknode = lookup(revision.node)
741
742 revision.linknode = linknode
743 yield revision
752
744
753 if progress:
745 if progress:
754 progress.complete()
746 progress.complete()
755
747
756 class cgpacker(object):
748 class cgpacker(object):
757 def __init__(self, repo, filematcher, version,
749 def __init__(self, repo, filematcher, version,
758 builddeltaheader, manifestsend,
750 builddeltaheader, manifestsend,
759 forcedeltaparentprev=False,
751 forcedeltaparentprev=False,
760 bundlecaps=None, ellipses=False,
752 bundlecaps=None, ellipses=False,
761 shallow=False, ellipsisroots=None, fullnodes=None):
753 shallow=False, ellipsisroots=None, fullnodes=None):
762 """Given a source repo, construct a bundler.
754 """Given a source repo, construct a bundler.
763
755
764 filematcher is a matcher that matches on files to include in the
756 filematcher is a matcher that matches on files to include in the
765 changegroup. Used to facilitate sparse changegroups.
757 changegroup. Used to facilitate sparse changegroups.
766
758
767 forcedeltaparentprev indicates whether delta parents must be against
759 forcedeltaparentprev indicates whether delta parents must be against
768 the previous revision in a delta group. This should only be used for
760 the previous revision in a delta group. This should only be used for
769 compatibility with changegroup version 1.
761 compatibility with changegroup version 1.
770
762
771 builddeltaheader is a callable that constructs the header for a group
763 builddeltaheader is a callable that constructs the header for a group
772 delta.
764 delta.
773
765
774 manifestsend is a chunk to send after manifests have been fully emitted.
766 manifestsend is a chunk to send after manifests have been fully emitted.
775
767
776 ellipses indicates whether ellipsis serving mode is enabled.
768 ellipses indicates whether ellipsis serving mode is enabled.
777
769
778 bundlecaps is optional and can be used to specify the set of
770 bundlecaps is optional and can be used to specify the set of
779 capabilities which can be used to build the bundle. While bundlecaps is
771 capabilities which can be used to build the bundle. While bundlecaps is
780 unused in core Mercurial, extensions rely on this feature to communicate
772 unused in core Mercurial, extensions rely on this feature to communicate
781 capabilities to customize the changegroup packer.
773 capabilities to customize the changegroup packer.
782
774
783 shallow indicates whether shallow data might be sent. The packer may
775 shallow indicates whether shallow data might be sent. The packer may
784 need to pack file contents not introduced by the changes being packed.
776 need to pack file contents not introduced by the changes being packed.
785
777
786 fullnodes is the set of changelog nodes which should not be ellipsis
778 fullnodes is the set of changelog nodes which should not be ellipsis
787 nodes. We store this rather than the set of nodes that should be
779 nodes. We store this rather than the set of nodes that should be
788 ellipsis because for very large histories we expect this to be
780 ellipsis because for very large histories we expect this to be
789 significantly smaller.
781 significantly smaller.
790 """
782 """
791 assert filematcher
783 assert filematcher
792 self._filematcher = filematcher
784 self._filematcher = filematcher
793
785
794 self.version = version
786 self.version = version
795 self._forcedeltaparentprev = forcedeltaparentprev
787 self._forcedeltaparentprev = forcedeltaparentprev
796 self._builddeltaheader = builddeltaheader
788 self._builddeltaheader = builddeltaheader
797 self._manifestsend = manifestsend
789 self._manifestsend = manifestsend
798 self._ellipses = ellipses
790 self._ellipses = ellipses
799
791
800 # Set of capabilities we can use to build the bundle.
792 # Set of capabilities we can use to build the bundle.
801 if bundlecaps is None:
793 if bundlecaps is None:
802 bundlecaps = set()
794 bundlecaps = set()
803 self._bundlecaps = bundlecaps
795 self._bundlecaps = bundlecaps
804 self._isshallow = shallow
796 self._isshallow = shallow
805 self._fullclnodes = fullnodes
797 self._fullclnodes = fullnodes
806
798
807 # Maps ellipsis revs to their roots at the changelog level.
799 # Maps ellipsis revs to their roots at the changelog level.
808 self._precomputedellipsis = ellipsisroots
800 self._precomputedellipsis = ellipsisroots
809
801
810 self._repo = repo
802 self._repo = repo
811
803
812 if self._repo.ui.verbose and not self._repo.ui.debugflag:
804 if self._repo.ui.verbose and not self._repo.ui.debugflag:
813 self._verbosenote = self._repo.ui.note
805 self._verbosenote = self._repo.ui.note
814 else:
806 else:
815 self._verbosenote = lambda s: None
807 self._verbosenote = lambda s: None
816
808
817 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
809 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
818 changelog=True):
810 changelog=True):
819 """Yield a sequence of changegroup byte chunks.
811 """Yield a sequence of changegroup byte chunks.
820 If changelog is False, changelog data won't be added to changegroup
812 If changelog is False, changelog data won't be added to changegroup
821 """
813 """
822
814
823 repo = self._repo
815 repo = self._repo
824 cl = repo.changelog
816 cl = repo.changelog
825
817
826 self._verbosenote(_('uncompressed size of bundle content:\n'))
818 self._verbosenote(_('uncompressed size of bundle content:\n'))
827 size = 0
819 size = 0
828
820
829 clstate, deltas = self._generatechangelog(cl, clnodes)
821 clstate, deltas = self._generatechangelog(cl, clnodes)
830 for delta in deltas:
822 for delta in deltas:
831 if changelog:
823 if changelog:
832 for chunk in _revisiondeltatochunks(delta,
824 for chunk in _revisiondeltatochunks(delta,
833 self._builddeltaheader):
825 self._builddeltaheader):
834 size += len(chunk)
826 size += len(chunk)
835 yield chunk
827 yield chunk
836
828
837 close = closechunk()
829 close = closechunk()
838 size += len(close)
830 size += len(close)
839 yield closechunk()
831 yield closechunk()
840
832
841 self._verbosenote(_('%8.i (changelog)\n') % size)
833 self._verbosenote(_('%8.i (changelog)\n') % size)
842
834
843 clrevorder = clstate['clrevorder']
835 clrevorder = clstate['clrevorder']
844 manifests = clstate['manifests']
836 manifests = clstate['manifests']
845 changedfiles = clstate['changedfiles']
837 changedfiles = clstate['changedfiles']
846
838
847 # We need to make sure that the linkrev in the changegroup refers to
839 # We need to make sure that the linkrev in the changegroup refers to
848 # the first changeset that introduced the manifest or file revision.
840 # the first changeset that introduced the manifest or file revision.
849 # The fastpath is usually safer than the slowpath, because the filelogs
841 # The fastpath is usually safer than the slowpath, because the filelogs
850 # are walked in revlog order.
842 # are walked in revlog order.
851 #
843 #
852 # When taking the slowpath when the manifest revlog uses generaldelta,
844 # When taking the slowpath when the manifest revlog uses generaldelta,
853 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
845 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
854 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
846 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
855 #
847 #
856 # When taking the fastpath, we are only vulnerable to reordering
848 # When taking the fastpath, we are only vulnerable to reordering
857 # of the changelog itself. The changelog never uses generaldelta and is
849 # of the changelog itself. The changelog never uses generaldelta and is
858 # never reordered. To handle this case, we simply take the slowpath,
850 # never reordered. To handle this case, we simply take the slowpath,
859 # which already has the 'clrevorder' logic. This was also fixed in
851 # which already has the 'clrevorder' logic. This was also fixed in
860 # cc0ff93d0c0c.
852 # cc0ff93d0c0c.
861
853
862 # Treemanifests don't work correctly with fastpathlinkrev
854 # Treemanifests don't work correctly with fastpathlinkrev
863 # either, because we don't discover which directory nodes to
855 # either, because we don't discover which directory nodes to
864 # send along with files. This could probably be fixed.
856 # send along with files. This could probably be fixed.
865 fastpathlinkrev = fastpathlinkrev and (
857 fastpathlinkrev = fastpathlinkrev and (
866 'treemanifest' not in repo.requirements)
858 'treemanifest' not in repo.requirements)
867
859
868 fnodes = {} # needed file nodes
860 fnodes = {} # needed file nodes
869
861
870 size = 0
862 size = 0
871 it = self.generatemanifests(
863 it = self.generatemanifests(
872 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
864 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
873 clstate['clrevtomanifestrev'])
865 clstate['clrevtomanifestrev'])
874
866
875 for tree, deltas in it:
867 for tree, deltas in it:
876 if tree:
868 if tree:
877 assert self.version == b'03'
869 assert self.version == b'03'
878 chunk = _fileheader(tree)
870 chunk = _fileheader(tree)
879 size += len(chunk)
871 size += len(chunk)
880 yield chunk
872 yield chunk
881
873
882 for delta in deltas:
874 for delta in deltas:
883 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
875 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
884 for chunk in chunks:
876 for chunk in chunks:
885 size += len(chunk)
877 size += len(chunk)
886 yield chunk
878 yield chunk
887
879
888 close = closechunk()
880 close = closechunk()
889 size += len(close)
881 size += len(close)
890 yield close
882 yield close
891
883
892 self._verbosenote(_('%8.i (manifests)\n') % size)
884 self._verbosenote(_('%8.i (manifests)\n') % size)
893 yield self._manifestsend
885 yield self._manifestsend
894
886
895 mfdicts = None
887 mfdicts = None
896 if self._ellipses and self._isshallow:
888 if self._ellipses and self._isshallow:
897 mfdicts = [(self._repo.manifestlog[n].read(), lr)
889 mfdicts = [(self._repo.manifestlog[n].read(), lr)
898 for (n, lr) in manifests.iteritems()]
890 for (n, lr) in manifests.iteritems()]
899
891
900 manifests.clear()
892 manifests.clear()
901 clrevs = set(cl.rev(x) for x in clnodes)
893 clrevs = set(cl.rev(x) for x in clnodes)
902
894
903 it = self.generatefiles(changedfiles, commonrevs,
895 it = self.generatefiles(changedfiles, commonrevs,
904 source, mfdicts, fastpathlinkrev,
896 source, mfdicts, fastpathlinkrev,
905 fnodes, clrevs)
897 fnodes, clrevs)
906
898
907 for path, deltas in it:
899 for path, deltas in it:
908 h = _fileheader(path)
900 h = _fileheader(path)
909 size = len(h)
901 size = len(h)
910 yield h
902 yield h
911
903
912 for delta in deltas:
904 for delta in deltas:
913 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
905 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
914 for chunk in chunks:
906 for chunk in chunks:
915 size += len(chunk)
907 size += len(chunk)
916 yield chunk
908 yield chunk
917
909
918 close = closechunk()
910 close = closechunk()
919 size += len(close)
911 size += len(close)
920 yield close
912 yield close
921
913
922 self._verbosenote(_('%8.i %s\n') % (size, path))
914 self._verbosenote(_('%8.i %s\n') % (size, path))
923
915
924 yield closechunk()
916 yield closechunk()
925
917
926 if clnodes:
918 if clnodes:
927 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
919 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
928
920
929 def _generatechangelog(self, cl, nodes):
921 def _generatechangelog(self, cl, nodes):
930 """Generate data for changelog chunks.
922 """Generate data for changelog chunks.
931
923
932 Returns a 2-tuple of a dict containing state and an iterable of
924 Returns a 2-tuple of a dict containing state and an iterable of
933 byte chunks. The state will not be fully populated until the
925 byte chunks. The state will not be fully populated until the
934 chunk stream has been fully consumed.
926 chunk stream has been fully consumed.
935 """
927 """
936 clrevorder = {}
928 clrevorder = {}
937 manifests = {}
929 manifests = {}
938 mfl = self._repo.manifestlog
930 mfl = self._repo.manifestlog
939 changedfiles = set()
931 changedfiles = set()
940 clrevtomanifestrev = {}
932 clrevtomanifestrev = {}
941
933
942 # Callback for the changelog, used to collect changed files and
934 # Callback for the changelog, used to collect changed files and
943 # manifest nodes.
935 # manifest nodes.
944 # Returns the linkrev node (identity in the changelog case).
936 # Returns the linkrev node (identity in the changelog case).
945 def lookupcl(x):
937 def lookupcl(x):
946 c = cl.changelogrevision(x)
938 c = cl.changelogrevision(x)
947 clrevorder[x] = len(clrevorder)
939 clrevorder[x] = len(clrevorder)
948
940
949 if self._ellipses:
941 if self._ellipses:
950 # Only update manifests if x is going to be sent. Otherwise we
942 # Only update manifests if x is going to be sent. Otherwise we
951 # end up with bogus linkrevs specified for manifests and
943 # end up with bogus linkrevs specified for manifests and
952 # we skip some manifest nodes that we should otherwise
944 # we skip some manifest nodes that we should otherwise
953 # have sent.
945 # have sent.
954 if (x in self._fullclnodes
946 if (x in self._fullclnodes
955 or cl.rev(x) in self._precomputedellipsis):
947 or cl.rev(x) in self._precomputedellipsis):
956
948
957 manifestnode = c.manifest
949 manifestnode = c.manifest
958 # Record the first changeset introducing this manifest
950 # Record the first changeset introducing this manifest
959 # version.
951 # version.
960 manifests.setdefault(manifestnode, x)
952 manifests.setdefault(manifestnode, x)
961 # Set this narrow-specific dict so we have the lowest
953 # Set this narrow-specific dict so we have the lowest
962 # manifest revnum to look up for this cl revnum. (Part of
954 # manifest revnum to look up for this cl revnum. (Part of
963 # mapping changelog ellipsis parents to manifest ellipsis
955 # mapping changelog ellipsis parents to manifest ellipsis
964 # parents)
956 # parents)
965 clrevtomanifestrev.setdefault(
957 clrevtomanifestrev.setdefault(
966 cl.rev(x), mfl.rev(manifestnode))
958 cl.rev(x), mfl.rev(manifestnode))
967 # We can't trust the changed files list in the changeset if the
959 # We can't trust the changed files list in the changeset if the
968 # client requested a shallow clone.
960 # client requested a shallow clone.
969 if self._isshallow:
961 if self._isshallow:
970 changedfiles.update(mfl[c.manifest].read().keys())
962 changedfiles.update(mfl[c.manifest].read().keys())
971 else:
963 else:
972 changedfiles.update(c.files)
964 changedfiles.update(c.files)
973 else:
965 else:
974 # record the first changeset introducing this manifest version
966 # record the first changeset introducing this manifest version
975 manifests.setdefault(c.manifest, x)
967 manifests.setdefault(c.manifest, x)
976 # Record a complete list of potentially-changed files in
968 # Record a complete list of potentially-changed files in
977 # this manifest.
969 # this manifest.
978 changedfiles.update(c.files)
970 changedfiles.update(c.files)
979
971
980 return x
972 return x
981
973
982 state = {
974 state = {
983 'clrevorder': clrevorder,
975 'clrevorder': clrevorder,
984 'manifests': manifests,
976 'manifests': manifests,
985 'changedfiles': changedfiles,
977 'changedfiles': changedfiles,
986 'clrevtomanifestrev': clrevtomanifestrev,
978 'clrevtomanifestrev': clrevtomanifestrev,
987 }
979 }
988
980
989 gen = deltagroup(
981 gen = deltagroup(
990 self._repo, cl, nodes, True, lookupcl,
982 self._repo, cl, nodes, True, lookupcl,
991 self._forcedeltaparentprev,
983 self._forcedeltaparentprev,
992 ellipses=self._ellipses,
984 ellipses=self._ellipses,
993 topic=_('changesets'),
985 topic=_('changesets'),
994 clrevtolocalrev={},
986 clrevtolocalrev={},
995 fullclnodes=self._fullclnodes,
987 fullclnodes=self._fullclnodes,
996 precomputedellipsis=self._precomputedellipsis)
988 precomputedellipsis=self._precomputedellipsis)
997
989
998 return state, gen
990 return state, gen
999
991
1000 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
992 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
1001 manifests, fnodes, source, clrevtolocalrev):
993 manifests, fnodes, source, clrevtolocalrev):
1002 """Returns an iterator of changegroup chunks containing manifests.
994 """Returns an iterator of changegroup chunks containing manifests.
1003
995
1004 `source` is unused here, but is used by extensions like remotefilelog to
996 `source` is unused here, but is used by extensions like remotefilelog to
1005 change what is sent based in pulls vs pushes, etc.
997 change what is sent based in pulls vs pushes, etc.
1006 """
998 """
1007 repo = self._repo
999 repo = self._repo
1008 mfl = repo.manifestlog
1000 mfl = repo.manifestlog
1009 tmfnodes = {'': manifests}
1001 tmfnodes = {'': manifests}
1010
1002
1011 # Callback for the manifest, used to collect linkrevs for filelog
1003 # Callback for the manifest, used to collect linkrevs for filelog
1012 # revisions.
1004 # revisions.
1013 # Returns the linkrev node (collected in lookupcl).
1005 # Returns the linkrev node (collected in lookupcl).
1014 def makelookupmflinknode(tree, nodes):
1006 def makelookupmflinknode(tree, nodes):
1015 if fastpathlinkrev:
1007 if fastpathlinkrev:
1016 assert not tree
1008 assert not tree
1017 return manifests.__getitem__
1009 return manifests.__getitem__
1018
1010
1019 def lookupmflinknode(x):
1011 def lookupmflinknode(x):
1020 """Callback for looking up the linknode for manifests.
1012 """Callback for looking up the linknode for manifests.
1021
1013
1022 Returns the linkrev node for the specified manifest.
1014 Returns the linkrev node for the specified manifest.
1023
1015
1024 SIDE EFFECT:
1016 SIDE EFFECT:
1025
1017
1026 1) fclnodes gets populated with the list of relevant
1018 1) fclnodes gets populated with the list of relevant
1027 file nodes if we're not using fastpathlinkrev
1019 file nodes if we're not using fastpathlinkrev
1028 2) When treemanifests are in use, collects treemanifest nodes
1020 2) When treemanifests are in use, collects treemanifest nodes
1029 to send
1021 to send
1030
1022
1031 Note that this means manifests must be completely sent to
1023 Note that this means manifests must be completely sent to
1032 the client before you can trust the list of files and
1024 the client before you can trust the list of files and
1033 treemanifests to send.
1025 treemanifests to send.
1034 """
1026 """
1035 clnode = nodes[x]
1027 clnode = nodes[x]
1036 mdata = mfl.get(tree, x).readfast(shallow=True)
1028 mdata = mfl.get(tree, x).readfast(shallow=True)
1037 for p, n, fl in mdata.iterentries():
1029 for p, n, fl in mdata.iterentries():
1038 if fl == 't': # subdirectory manifest
1030 if fl == 't': # subdirectory manifest
1039 subtree = tree + p + '/'
1031 subtree = tree + p + '/'
1040 tmfclnodes = tmfnodes.setdefault(subtree, {})
1032 tmfclnodes = tmfnodes.setdefault(subtree, {})
1041 tmfclnode = tmfclnodes.setdefault(n, clnode)
1033 tmfclnode = tmfclnodes.setdefault(n, clnode)
1042 if clrevorder[clnode] < clrevorder[tmfclnode]:
1034 if clrevorder[clnode] < clrevorder[tmfclnode]:
1043 tmfclnodes[n] = clnode
1035 tmfclnodes[n] = clnode
1044 else:
1036 else:
1045 f = tree + p
1037 f = tree + p
1046 fclnodes = fnodes.setdefault(f, {})
1038 fclnodes = fnodes.setdefault(f, {})
1047 fclnode = fclnodes.setdefault(n, clnode)
1039 fclnode = fclnodes.setdefault(n, clnode)
1048 if clrevorder[clnode] < clrevorder[fclnode]:
1040 if clrevorder[clnode] < clrevorder[fclnode]:
1049 fclnodes[n] = clnode
1041 fclnodes[n] = clnode
1050 return clnode
1042 return clnode
1051 return lookupmflinknode
1043 return lookupmflinknode
1052
1044
1053 while tmfnodes:
1045 while tmfnodes:
1054 tree, nodes = tmfnodes.popitem()
1046 tree, nodes = tmfnodes.popitem()
1055 store = mfl.getstorage(tree)
1047 store = mfl.getstorage(tree)
1056
1048
1057 if not self._filematcher.visitdir(store.tree[:-1] or '.'):
1049 if not self._filematcher.visitdir(store.tree[:-1] or '.'):
1058 # No nodes to send because this directory is out of
1050 # No nodes to send because this directory is out of
1059 # the client's view of the repository (probably
1051 # the client's view of the repository (probably
1060 # because of narrow clones).
1052 # because of narrow clones).
1061 prunednodes = []
1053 prunednodes = []
1062 else:
1054 else:
1063 # Avoid sending any manifest nodes we can prove the
1055 # Avoid sending any manifest nodes we can prove the
1064 # client already has by checking linkrevs. See the
1056 # client already has by checking linkrevs. See the
1065 # related comment in generatefiles().
1057 # related comment in generatefiles().
1066 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1058 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1067 if tree and not prunednodes:
1059 if tree and not prunednodes:
1068 continue
1060 continue
1069
1061
1070 lookupfn = makelookupmflinknode(tree, nodes)
1062 lookupfn = makelookupmflinknode(tree, nodes)
1071
1063
1072 deltas = deltagroup(
1064 deltas = deltagroup(
1073 self._repo, store, prunednodes, False, lookupfn,
1065 self._repo, store, prunednodes, False, lookupfn,
1074 self._forcedeltaparentprev,
1066 self._forcedeltaparentprev,
1075 ellipses=self._ellipses,
1067 ellipses=self._ellipses,
1076 topic=_('manifests'),
1068 topic=_('manifests'),
1077 clrevtolocalrev=clrevtolocalrev,
1069 clrevtolocalrev=clrevtolocalrev,
1078 fullclnodes=self._fullclnodes,
1070 fullclnodes=self._fullclnodes,
1079 precomputedellipsis=self._precomputedellipsis)
1071 precomputedellipsis=self._precomputedellipsis)
1080
1072
1081 yield tree, deltas
1073 yield tree, deltas
1082
1074
1083 def _prunemanifests(self, store, nodes, commonrevs):
1075 def _prunemanifests(self, store, nodes, commonrevs):
1084 # This is split out as a separate method to allow filtering
1076 # This is split out as a separate method to allow filtering
1085 # commonrevs in extension code.
1077 # commonrevs in extension code.
1086 #
1078 #
1087 # TODO(augie): this shouldn't be required, instead we should
1079 # TODO(augie): this shouldn't be required, instead we should
1088 # make filtering of revisions to send delegated to the store
1080 # make filtering of revisions to send delegated to the store
1089 # layer.
1081 # layer.
1090 frev, flr = store.rev, store.linkrev
1082 frev, flr = store.rev, store.linkrev
1091 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1083 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1092
1084
1093 # The 'source' parameter is useful for extensions
1085 # The 'source' parameter is useful for extensions
1094 def generatefiles(self, changedfiles, commonrevs, source,
1086 def generatefiles(self, changedfiles, commonrevs, source,
1095 mfdicts, fastpathlinkrev, fnodes, clrevs):
1087 mfdicts, fastpathlinkrev, fnodes, clrevs):
1096 changedfiles = list(filter(self._filematcher, changedfiles))
1088 changedfiles = list(filter(self._filematcher, changedfiles))
1097
1089
1098 if not fastpathlinkrev:
1090 if not fastpathlinkrev:
1099 def normallinknodes(unused, fname):
1091 def normallinknodes(unused, fname):
1100 return fnodes.get(fname, {})
1092 return fnodes.get(fname, {})
1101 else:
1093 else:
1102 cln = self._repo.changelog.node
1094 cln = self._repo.changelog.node
1103
1095
1104 def normallinknodes(store, fname):
1096 def normallinknodes(store, fname):
1105 flinkrev = store.linkrev
1097 flinkrev = store.linkrev
1106 fnode = store.node
1098 fnode = store.node
1107 revs = ((r, flinkrev(r)) for r in store)
1099 revs = ((r, flinkrev(r)) for r in store)
1108 return dict((fnode(r), cln(lr))
1100 return dict((fnode(r), cln(lr))
1109 for r, lr in revs if lr in clrevs)
1101 for r, lr in revs if lr in clrevs)
1110
1102
1111 clrevtolocalrev = {}
1103 clrevtolocalrev = {}
1112
1104
1113 if self._isshallow:
1105 if self._isshallow:
1114 # In a shallow clone, the linknodes callback needs to also include
1106 # In a shallow clone, the linknodes callback needs to also include
1115 # those file nodes that are in the manifests we sent but weren't
1107 # those file nodes that are in the manifests we sent but weren't
1116 # introduced by those manifests.
1108 # introduced by those manifests.
1117 commonctxs = [self._repo[c] for c in commonrevs]
1109 commonctxs = [self._repo[c] for c in commonrevs]
1118 clrev = self._repo.changelog.rev
1110 clrev = self._repo.changelog.rev
1119
1111
1120 def linknodes(flog, fname):
1112 def linknodes(flog, fname):
1121 for c in commonctxs:
1113 for c in commonctxs:
1122 try:
1114 try:
1123 fnode = c.filenode(fname)
1115 fnode = c.filenode(fname)
1124 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1116 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1125 except error.ManifestLookupError:
1117 except error.ManifestLookupError:
1126 pass
1118 pass
1127 links = normallinknodes(flog, fname)
1119 links = normallinknodes(flog, fname)
1128 if len(links) != len(mfdicts):
1120 if len(links) != len(mfdicts):
1129 for mf, lr in mfdicts:
1121 for mf, lr in mfdicts:
1130 fnode = mf.get(fname, None)
1122 fnode = mf.get(fname, None)
1131 if fnode in links:
1123 if fnode in links:
1132 links[fnode] = min(links[fnode], lr, key=clrev)
1124 links[fnode] = min(links[fnode], lr, key=clrev)
1133 elif fnode:
1125 elif fnode:
1134 links[fnode] = lr
1126 links[fnode] = lr
1135 return links
1127 return links
1136 else:
1128 else:
1137 linknodes = normallinknodes
1129 linknodes = normallinknodes
1138
1130
1139 repo = self._repo
1131 repo = self._repo
1140 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1132 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1141 total=len(changedfiles))
1133 total=len(changedfiles))
1142 for i, fname in enumerate(sorted(changedfiles)):
1134 for i, fname in enumerate(sorted(changedfiles)):
1143 filerevlog = repo.file(fname)
1135 filerevlog = repo.file(fname)
1144 if not filerevlog:
1136 if not filerevlog:
1145 raise error.Abort(_("empty or missing file data for %s") %
1137 raise error.Abort(_("empty or missing file data for %s") %
1146 fname)
1138 fname)
1147
1139
1148 clrevtolocalrev.clear()
1140 clrevtolocalrev.clear()
1149
1141
1150 linkrevnodes = linknodes(filerevlog, fname)
1142 linkrevnodes = linknodes(filerevlog, fname)
1151 # Lookup for filenodes, we collected the linkrev nodes above in the
1143 # Lookup for filenodes, we collected the linkrev nodes above in the
1152 # fastpath case and with lookupmf in the slowpath case.
1144 # fastpath case and with lookupmf in the slowpath case.
1153 def lookupfilelog(x):
1145 def lookupfilelog(x):
1154 return linkrevnodes[x]
1146 return linkrevnodes[x]
1155
1147
1156 frev, flr = filerevlog.rev, filerevlog.linkrev
1148 frev, flr = filerevlog.rev, filerevlog.linkrev
1157 # Skip sending any filenode we know the client already
1149 # Skip sending any filenode we know the client already
1158 # has. This avoids over-sending files relatively
1150 # has. This avoids over-sending files relatively
1159 # inexpensively, so it's not a problem if we under-filter
1151 # inexpensively, so it's not a problem if we under-filter
1160 # here.
1152 # here.
1161 filenodes = [n for n in linkrevnodes
1153 filenodes = [n for n in linkrevnodes
1162 if flr(frev(n)) not in commonrevs]
1154 if flr(frev(n)) not in commonrevs]
1163
1155
1164 if not filenodes:
1156 if not filenodes:
1165 continue
1157 continue
1166
1158
1167 progress.update(i + 1, item=fname)
1159 progress.update(i + 1, item=fname)
1168
1160
1169 deltas = deltagroup(
1161 deltas = deltagroup(
1170 self._repo, filerevlog, filenodes, False, lookupfilelog,
1162 self._repo, filerevlog, filenodes, False, lookupfilelog,
1171 self._forcedeltaparentprev,
1163 self._forcedeltaparentprev,
1172 ellipses=self._ellipses,
1164 ellipses=self._ellipses,
1173 clrevtolocalrev=clrevtolocalrev,
1165 clrevtolocalrev=clrevtolocalrev,
1174 fullclnodes=self._fullclnodes,
1166 fullclnodes=self._fullclnodes,
1175 precomputedellipsis=self._precomputedellipsis)
1167 precomputedellipsis=self._precomputedellipsis)
1176
1168
1177 yield fname, deltas
1169 yield fname, deltas
1178
1170
1179 progress.complete()
1171 progress.complete()
1180
1172
1181 def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
1173 def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False,
1182 shallow=False, ellipsisroots=None, fullnodes=None):
1174 shallow=False, ellipsisroots=None, fullnodes=None):
1183 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1175 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1184 d.node, d.p1node, d.p2node, d.linknode)
1176 d.node, d.p1node, d.p2node, d.linknode)
1185
1177
1186 return cgpacker(repo, filematcher, b'01',
1178 return cgpacker(repo, filematcher, b'01',
1187 builddeltaheader=builddeltaheader,
1179 builddeltaheader=builddeltaheader,
1188 manifestsend=b'',
1180 manifestsend=b'',
1189 forcedeltaparentprev=True,
1181 forcedeltaparentprev=True,
1190 bundlecaps=bundlecaps,
1182 bundlecaps=bundlecaps,
1191 ellipses=ellipses,
1183 ellipses=ellipses,
1192 shallow=shallow,
1184 shallow=shallow,
1193 ellipsisroots=ellipsisroots,
1185 ellipsisroots=ellipsisroots,
1194 fullnodes=fullnodes)
1186 fullnodes=fullnodes)
1195
1187
1196 def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
1188 def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False,
1197 shallow=False, ellipsisroots=None, fullnodes=None):
1189 shallow=False, ellipsisroots=None, fullnodes=None):
1198 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1190 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1199 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1191 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1200
1192
1201 return cgpacker(repo, filematcher, b'02',
1193 return cgpacker(repo, filematcher, b'02',
1202 builddeltaheader=builddeltaheader,
1194 builddeltaheader=builddeltaheader,
1203 manifestsend=b'',
1195 manifestsend=b'',
1204 bundlecaps=bundlecaps,
1196 bundlecaps=bundlecaps,
1205 ellipses=ellipses,
1197 ellipses=ellipses,
1206 shallow=shallow,
1198 shallow=shallow,
1207 ellipsisroots=ellipsisroots,
1199 ellipsisroots=ellipsisroots,
1208 fullnodes=fullnodes)
1200 fullnodes=fullnodes)
1209
1201
1210 def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
1202 def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False,
1211 shallow=False, ellipsisroots=None, fullnodes=None):
1203 shallow=False, ellipsisroots=None, fullnodes=None):
1212 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1204 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1213 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1205 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1214
1206
1215 return cgpacker(repo, filematcher, b'03',
1207 return cgpacker(repo, filematcher, b'03',
1216 builddeltaheader=builddeltaheader,
1208 builddeltaheader=builddeltaheader,
1217 manifestsend=closechunk(),
1209 manifestsend=closechunk(),
1218 bundlecaps=bundlecaps,
1210 bundlecaps=bundlecaps,
1219 ellipses=ellipses,
1211 ellipses=ellipses,
1220 shallow=shallow,
1212 shallow=shallow,
1221 ellipsisroots=ellipsisroots,
1213 ellipsisroots=ellipsisroots,
1222 fullnodes=fullnodes)
1214 fullnodes=fullnodes)
1223
1215
1224 _packermap = {'01': (_makecg1packer, cg1unpacker),
1216 _packermap = {'01': (_makecg1packer, cg1unpacker),
1225 # cg2 adds support for exchanging generaldelta
1217 # cg2 adds support for exchanging generaldelta
1226 '02': (_makecg2packer, cg2unpacker),
1218 '02': (_makecg2packer, cg2unpacker),
1227 # cg3 adds support for exchanging revlog flags and treemanifests
1219 # cg3 adds support for exchanging revlog flags and treemanifests
1228 '03': (_makecg3packer, cg3unpacker),
1220 '03': (_makecg3packer, cg3unpacker),
1229 }
1221 }
1230
1222
1231 def allsupportedversions(repo):
1223 def allsupportedversions(repo):
1232 versions = set(_packermap.keys())
1224 versions = set(_packermap.keys())
1233 if not (repo.ui.configbool('experimental', 'changegroup3') or
1225 if not (repo.ui.configbool('experimental', 'changegroup3') or
1234 repo.ui.configbool('experimental', 'treemanifest') or
1226 repo.ui.configbool('experimental', 'treemanifest') or
1235 'treemanifest' in repo.requirements):
1227 'treemanifest' in repo.requirements):
1236 versions.discard('03')
1228 versions.discard('03')
1237 return versions
1229 return versions
1238
1230
1239 # Changegroup versions that can be applied to the repo
1231 # Changegroup versions that can be applied to the repo
1240 def supportedincomingversions(repo):
1232 def supportedincomingversions(repo):
1241 return allsupportedversions(repo)
1233 return allsupportedversions(repo)
1242
1234
1243 # Changegroup versions that can be created from the repo
1235 # Changegroup versions that can be created from the repo
1244 def supportedoutgoingversions(repo):
1236 def supportedoutgoingversions(repo):
1245 versions = allsupportedversions(repo)
1237 versions = allsupportedversions(repo)
1246 if 'treemanifest' in repo.requirements:
1238 if 'treemanifest' in repo.requirements:
1247 # Versions 01 and 02 support only flat manifests and it's just too
1239 # Versions 01 and 02 support only flat manifests and it's just too
1248 # expensive to convert between the flat manifest and tree manifest on
1240 # expensive to convert between the flat manifest and tree manifest on
1249 # the fly. Since tree manifests are hashed differently, all of history
1241 # the fly. Since tree manifests are hashed differently, all of history
1250 # would have to be converted. Instead, we simply don't even pretend to
1242 # would have to be converted. Instead, we simply don't even pretend to
1251 # support versions 01 and 02.
1243 # support versions 01 and 02.
1252 versions.discard('01')
1244 versions.discard('01')
1253 versions.discard('02')
1245 versions.discard('02')
1254 if repository.NARROW_REQUIREMENT in repo.requirements:
1246 if repository.NARROW_REQUIREMENT in repo.requirements:
1255 # Versions 01 and 02 don't support revlog flags, and we need to
1247 # Versions 01 and 02 don't support revlog flags, and we need to
1256 # support that for stripping and unbundling to work.
1248 # support that for stripping and unbundling to work.
1257 versions.discard('01')
1249 versions.discard('01')
1258 versions.discard('02')
1250 versions.discard('02')
1259 if LFS_REQUIREMENT in repo.requirements:
1251 if LFS_REQUIREMENT in repo.requirements:
1260 # Versions 01 and 02 don't support revlog flags, and we need to
1252 # Versions 01 and 02 don't support revlog flags, and we need to
1261 # mark LFS entries with REVIDX_EXTSTORED.
1253 # mark LFS entries with REVIDX_EXTSTORED.
1262 versions.discard('01')
1254 versions.discard('01')
1263 versions.discard('02')
1255 versions.discard('02')
1264
1256
1265 return versions
1257 return versions
1266
1258
1267 def localversion(repo):
1259 def localversion(repo):
1268 # Finds the best version to use for bundles that are meant to be used
1260 # Finds the best version to use for bundles that are meant to be used
1269 # locally, such as those from strip and shelve, and temporary bundles.
1261 # locally, such as those from strip and shelve, and temporary bundles.
1270 return max(supportedoutgoingversions(repo))
1262 return max(supportedoutgoingversions(repo))
1271
1263
1272 def safeversion(repo):
1264 def safeversion(repo):
1273 # Finds the smallest version that it's safe to assume clients of the repo
1265 # Finds the smallest version that it's safe to assume clients of the repo
1274 # will support. For example, all hg versions that support generaldelta also
1266 # will support. For example, all hg versions that support generaldelta also
1275 # support changegroup 02.
1267 # support changegroup 02.
1276 versions = supportedoutgoingversions(repo)
1268 versions = supportedoutgoingversions(repo)
1277 if 'generaldelta' in repo.requirements:
1269 if 'generaldelta' in repo.requirements:
1278 versions.discard('01')
1270 versions.discard('01')
1279 assert versions
1271 assert versions
1280 return min(versions)
1272 return min(versions)
1281
1273
1282 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1274 def getbundler(version, repo, bundlecaps=None, filematcher=None,
1283 ellipses=False, shallow=False, ellipsisroots=None,
1275 ellipses=False, shallow=False, ellipsisroots=None,
1284 fullnodes=None):
1276 fullnodes=None):
1285 assert version in supportedoutgoingversions(repo)
1277 assert version in supportedoutgoingversions(repo)
1286
1278
1287 if filematcher is None:
1279 if filematcher is None:
1288 filematcher = matchmod.alwaysmatcher(repo.root, '')
1280 filematcher = matchmod.alwaysmatcher(repo.root, '')
1289
1281
1290 if version == '01' and not filematcher.always():
1282 if version == '01' and not filematcher.always():
1291 raise error.ProgrammingError('version 01 changegroups do not support '
1283 raise error.ProgrammingError('version 01 changegroups do not support '
1292 'sparse file matchers')
1284 'sparse file matchers')
1293
1285
1294 if ellipses and version in (b'01', b'02'):
1286 if ellipses and version in (b'01', b'02'):
1295 raise error.Abort(
1287 raise error.Abort(
1296 _('ellipsis nodes require at least cg3 on client and server, '
1288 _('ellipsis nodes require at least cg3 on client and server, '
1297 'but negotiated version %s') % version)
1289 'but negotiated version %s') % version)
1298
1290
1299 # Requested files could include files not in the local store. So
1291 # Requested files could include files not in the local store. So
1300 # filter those out.
1292 # filter those out.
1301 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1293 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
1302 filematcher)
1294 filematcher)
1303
1295
1304 fn = _packermap[version][0]
1296 fn = _packermap[version][0]
1305 return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
1297 return fn(repo, filematcher, bundlecaps, ellipses=ellipses,
1306 shallow=shallow, ellipsisroots=ellipsisroots,
1298 shallow=shallow, ellipsisroots=ellipsisroots,
1307 fullnodes=fullnodes)
1299 fullnodes=fullnodes)
1308
1300
1309 def getunbundler(version, fh, alg, extras=None):
1301 def getunbundler(version, fh, alg, extras=None):
1310 return _packermap[version][1](fh, alg, extras=extras)
1302 return _packermap[version][1](fh, alg, extras=extras)
1311
1303
1312 def _changegroupinfo(repo, nodes, source):
1304 def _changegroupinfo(repo, nodes, source):
1313 if repo.ui.verbose or source == 'bundle':
1305 if repo.ui.verbose or source == 'bundle':
1314 repo.ui.status(_("%d changesets found\n") % len(nodes))
1306 repo.ui.status(_("%d changesets found\n") % len(nodes))
1315 if repo.ui.debugflag:
1307 if repo.ui.debugflag:
1316 repo.ui.debug("list of changesets:\n")
1308 repo.ui.debug("list of changesets:\n")
1317 for node in nodes:
1309 for node in nodes:
1318 repo.ui.debug("%s\n" % hex(node))
1310 repo.ui.debug("%s\n" % hex(node))
1319
1311
1320 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1312 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1321 bundlecaps=None):
1313 bundlecaps=None):
1322 cgstream = makestream(repo, outgoing, version, source,
1314 cgstream = makestream(repo, outgoing, version, source,
1323 fastpath=fastpath, bundlecaps=bundlecaps)
1315 fastpath=fastpath, bundlecaps=bundlecaps)
1324 return getunbundler(version, util.chunkbuffer(cgstream), None,
1316 return getunbundler(version, util.chunkbuffer(cgstream), None,
1325 {'clcount': len(outgoing.missing) })
1317 {'clcount': len(outgoing.missing) })
1326
1318
1327 def makestream(repo, outgoing, version, source, fastpath=False,
1319 def makestream(repo, outgoing, version, source, fastpath=False,
1328 bundlecaps=None, filematcher=None):
1320 bundlecaps=None, filematcher=None):
1329 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1321 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1330 filematcher=filematcher)
1322 filematcher=filematcher)
1331
1323
1332 repo = repo.unfiltered()
1324 repo = repo.unfiltered()
1333 commonrevs = outgoing.common
1325 commonrevs = outgoing.common
1334 csets = outgoing.missing
1326 csets = outgoing.missing
1335 heads = outgoing.missingheads
1327 heads = outgoing.missingheads
1336 # We go through the fast path if we get told to, or if all (unfiltered
1328 # We go through the fast path if we get told to, or if all (unfiltered
1337 # heads have been requested (since we then know there all linkrevs will
1329 # heads have been requested (since we then know there all linkrevs will
1338 # be pulled by the client).
1330 # be pulled by the client).
1339 heads.sort()
1331 heads.sort()
1340 fastpathlinkrev = fastpath or (
1332 fastpathlinkrev = fastpath or (
1341 repo.filtername is None and heads == sorted(repo.heads()))
1333 repo.filtername is None and heads == sorted(repo.heads()))
1342
1334
1343 repo.hook('preoutgoing', throw=True, source=source)
1335 repo.hook('preoutgoing', throw=True, source=source)
1344 _changegroupinfo(repo, csets, source)
1336 _changegroupinfo(repo, csets, source)
1345 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1337 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1346
1338
1347 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1339 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1348 revisions = 0
1340 revisions = 0
1349 files = 0
1341 files = 0
1350 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1342 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1351 total=expectedfiles)
1343 total=expectedfiles)
1352 for chunkdata in iter(source.filelogheader, {}):
1344 for chunkdata in iter(source.filelogheader, {}):
1353 files += 1
1345 files += 1
1354 f = chunkdata["filename"]
1346 f = chunkdata["filename"]
1355 repo.ui.debug("adding %s revisions\n" % f)
1347 repo.ui.debug("adding %s revisions\n" % f)
1356 progress.increment()
1348 progress.increment()
1357 fl = repo.file(f)
1349 fl = repo.file(f)
1358 o = len(fl)
1350 o = len(fl)
1359 try:
1351 try:
1360 deltas = source.deltaiter()
1352 deltas = source.deltaiter()
1361 if not fl.addgroup(deltas, revmap, trp):
1353 if not fl.addgroup(deltas, revmap, trp):
1362 raise error.Abort(_("received file revlog group is empty"))
1354 raise error.Abort(_("received file revlog group is empty"))
1363 except error.CensoredBaseError as e:
1355 except error.CensoredBaseError as e:
1364 raise error.Abort(_("received delta base is censored: %s") % e)
1356 raise error.Abort(_("received delta base is censored: %s") % e)
1365 revisions += len(fl) - o
1357 revisions += len(fl) - o
1366 if f in needfiles:
1358 if f in needfiles:
1367 needs = needfiles[f]
1359 needs = needfiles[f]
1368 for new in pycompat.xrange(o, len(fl)):
1360 for new in pycompat.xrange(o, len(fl)):
1369 n = fl.node(new)
1361 n = fl.node(new)
1370 if n in needs:
1362 if n in needs:
1371 needs.remove(n)
1363 needs.remove(n)
1372 else:
1364 else:
1373 raise error.Abort(
1365 raise error.Abort(
1374 _("received spurious file revlog entry"))
1366 _("received spurious file revlog entry"))
1375 if not needs:
1367 if not needs:
1376 del needfiles[f]
1368 del needfiles[f]
1377 progress.complete()
1369 progress.complete()
1378
1370
1379 for f, needs in needfiles.iteritems():
1371 for f, needs in needfiles.iteritems():
1380 fl = repo.file(f)
1372 fl = repo.file(f)
1381 for n in needs:
1373 for n in needs:
1382 try:
1374 try:
1383 fl.rev(n)
1375 fl.rev(n)
1384 except error.LookupError:
1376 except error.LookupError:
1385 raise error.Abort(
1377 raise error.Abort(
1386 _('missing file data for %s:%s - run hg verify') %
1378 _('missing file data for %s:%s - run hg verify') %
1387 (f, hex(n)))
1379 (f, hex(n)))
1388
1380
1389 return revisions, files
1381 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now