##// END OF EJS Templates
changegroup: use compression engines API...
Gregory Szorc -
r30354:a37a96d8 default
parent child Browse files
Show More
@@ -1,1046 +1,1048
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 branchmap,
23 branchmap,
24 dagutil,
24 dagutil,
25 discovery,
25 discovery,
26 error,
26 error,
27 mdiff,
27 mdiff,
28 phases,
28 phases,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 def writechunks(ui, chunks, filename, vfs=None):
82 def writechunks(ui, chunks, filename, vfs=None):
83 """Write chunks to a file and return its filename.
83 """Write chunks to a file and return its filename.
84
84
85 The stream is assumed to be a bundle file.
85 The stream is assumed to be a bundle file.
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 """
88 """
89 fh = None
89 fh = None
90 cleanup = None
90 cleanup = None
91 try:
91 try:
92 if filename:
92 if filename:
93 if vfs:
93 if vfs:
94 fh = vfs.open(filename, "wb")
94 fh = vfs.open(filename, "wb")
95 else:
95 else:
96 # Increase default buffer size because default is usually
96 # Increase default buffer size because default is usually
97 # small (4k is common on Linux).
97 # small (4k is common on Linux).
98 fh = open(filename, "wb", 131072)
98 fh = open(filename, "wb", 131072)
99 else:
99 else:
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fh = os.fdopen(fd, "wb")
101 fh = os.fdopen(fd, "wb")
102 cleanup = filename
102 cleanup = filename
103 for c in chunks:
103 for c in chunks:
104 fh.write(c)
104 fh.write(c)
105 cleanup = None
105 cleanup = None
106 return filename
106 return filename
107 finally:
107 finally:
108 if fh is not None:
108 if fh is not None:
109 fh.close()
109 fh.close()
110 if cleanup is not None:
110 if cleanup is not None:
111 if filename and vfs:
111 if filename and vfs:
112 vfs.unlink(cleanup)
112 vfs.unlink(cleanup)
113 else:
113 else:
114 os.unlink(cleanup)
114 os.unlink(cleanup)
115
115
116 class cg1unpacker(object):
116 class cg1unpacker(object):
117 """Unpacker for cg1 changegroup streams.
117 """Unpacker for cg1 changegroup streams.
118
118
119 A changegroup unpacker handles the framing of the revision data in
119 A changegroup unpacker handles the framing of the revision data in
120 the wire format. Most consumers will want to use the apply()
120 the wire format. Most consumers will want to use the apply()
121 method to add the changes from the changegroup to a repository.
121 method to add the changes from the changegroup to a repository.
122
122
123 If you're forwarding a changegroup unmodified to another consumer,
123 If you're forwarding a changegroup unmodified to another consumer,
124 use getchunks(), which returns an iterator of changegroup
124 use getchunks(), which returns an iterator of changegroup
125 chunks. This is mostly useful for cases where you need to know the
125 chunks. This is mostly useful for cases where you need to know the
126 data stream has ended by observing the end of the changegroup.
126 data stream has ended by observing the end of the changegroup.
127
127
128 deltachunk() is useful only if you're applying delta data. Most
128 deltachunk() is useful only if you're applying delta data. Most
129 consumers should prefer apply() instead.
129 consumers should prefer apply() instead.
130
130
131 A few other public methods exist. Those are used only for
131 A few other public methods exist. Those are used only for
132 bundlerepo and some debug commands - their use is discouraged.
132 bundlerepo and some debug commands - their use is discouraged.
133 """
133 """
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 version = '01'
136 version = '01'
137 _grouplistcount = 1 # One list of files after the manifests
137 _grouplistcount = 1 # One list of files after the manifests
138
138
139 def __init__(self, fh, alg, extras=None):
139 def __init__(self, fh, alg, extras=None):
140 if alg == 'UN':
140 if alg is None:
141 alg = None # get more modern without breaking too much
141 alg = 'UN'
142 if not alg in util.decompressors:
142 if alg not in util.compengines.supportedbundletypes:
143 raise error.Abort(_('unknown stream compression type: %s')
143 raise error.Abort(_('unknown stream compression type: %s')
144 % alg)
144 % alg)
145 if alg == 'BZ':
145 if alg == 'BZ':
146 alg = '_truncatedBZ'
146 alg = '_truncatedBZ'
147 self._stream = util.decompressors[alg](fh)
147
148 compengine = util.compengines.forbundletype(alg)
149 self._stream = compengine.decompressorreader(fh)
148 self._type = alg
150 self._type = alg
149 self.extras = extras or {}
151 self.extras = extras or {}
150 self.callback = None
152 self.callback = None
151
153
152 # These methods (compressed, read, seek, tell) all appear to only
154 # These methods (compressed, read, seek, tell) all appear to only
153 # be used by bundlerepo, but it's a little hard to tell.
155 # be used by bundlerepo, but it's a little hard to tell.
154 def compressed(self):
156 def compressed(self):
155 return self._type is not None
157 return self._type is not None
156 def read(self, l):
158 def read(self, l):
157 return self._stream.read(l)
159 return self._stream.read(l)
158 def seek(self, pos):
160 def seek(self, pos):
159 return self._stream.seek(pos)
161 return self._stream.seek(pos)
160 def tell(self):
162 def tell(self):
161 return self._stream.tell()
163 return self._stream.tell()
162 def close(self):
164 def close(self):
163 return self._stream.close()
165 return self._stream.close()
164
166
165 def _chunklength(self):
167 def _chunklength(self):
166 d = readexactly(self._stream, 4)
168 d = readexactly(self._stream, 4)
167 l = struct.unpack(">l", d)[0]
169 l = struct.unpack(">l", d)[0]
168 if l <= 4:
170 if l <= 4:
169 if l:
171 if l:
170 raise error.Abort(_("invalid chunk length %d") % l)
172 raise error.Abort(_("invalid chunk length %d") % l)
171 return 0
173 return 0
172 if self.callback:
174 if self.callback:
173 self.callback()
175 self.callback()
174 return l - 4
176 return l - 4
175
177
176 def changelogheader(self):
178 def changelogheader(self):
177 """v10 does not have a changelog header chunk"""
179 """v10 does not have a changelog header chunk"""
178 return {}
180 return {}
179
181
180 def manifestheader(self):
182 def manifestheader(self):
181 """v10 does not have a manifest header chunk"""
183 """v10 does not have a manifest header chunk"""
182 return {}
184 return {}
183
185
184 def filelogheader(self):
186 def filelogheader(self):
185 """return the header of the filelogs chunk, v10 only has the filename"""
187 """return the header of the filelogs chunk, v10 only has the filename"""
186 l = self._chunklength()
188 l = self._chunklength()
187 if not l:
189 if not l:
188 return {}
190 return {}
189 fname = readexactly(self._stream, l)
191 fname = readexactly(self._stream, l)
190 return {'filename': fname}
192 return {'filename': fname}
191
193
192 def _deltaheader(self, headertuple, prevnode):
194 def _deltaheader(self, headertuple, prevnode):
193 node, p1, p2, cs = headertuple
195 node, p1, p2, cs = headertuple
194 if prevnode is None:
196 if prevnode is None:
195 deltabase = p1
197 deltabase = p1
196 else:
198 else:
197 deltabase = prevnode
199 deltabase = prevnode
198 flags = 0
200 flags = 0
199 return node, p1, p2, deltabase, cs, flags
201 return node, p1, p2, deltabase, cs, flags
200
202
201 def deltachunk(self, prevnode):
203 def deltachunk(self, prevnode):
202 l = self._chunklength()
204 l = self._chunklength()
203 if not l:
205 if not l:
204 return {}
206 return {}
205 headerdata = readexactly(self._stream, self.deltaheadersize)
207 headerdata = readexactly(self._stream, self.deltaheadersize)
206 header = struct.unpack(self.deltaheader, headerdata)
208 header = struct.unpack(self.deltaheader, headerdata)
207 delta = readexactly(self._stream, l - self.deltaheadersize)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
208 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
209 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
210 'deltabase': deltabase, 'delta': delta, 'flags': flags}
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
211
213
212 def getchunks(self):
214 def getchunks(self):
213 """returns all the chunks contains in the bundle
215 """returns all the chunks contains in the bundle
214
216
215 Used when you need to forward the binary stream to a file or another
217 Used when you need to forward the binary stream to a file or another
216 network API. To do so, it parse the changegroup data, otherwise it will
218 network API. To do so, it parse the changegroup data, otherwise it will
217 block in case of sshrepo because it don't know the end of the stream.
219 block in case of sshrepo because it don't know the end of the stream.
218 """
220 """
219 # an empty chunkgroup is the end of the changegroup
221 # an empty chunkgroup is the end of the changegroup
220 # a changegroup has at least 2 chunkgroups (changelog and manifest).
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
221 # after that, changegroup versions 1 and 2 have a series of groups
223 # after that, changegroup versions 1 and 2 have a series of groups
222 # with one group per file. changegroup 3 has a series of directory
224 # with one group per file. changegroup 3 has a series of directory
223 # manifests before the files.
225 # manifests before the files.
224 count = 0
226 count = 0
225 emptycount = 0
227 emptycount = 0
226 while emptycount < self._grouplistcount:
228 while emptycount < self._grouplistcount:
227 empty = True
229 empty = True
228 count += 1
230 count += 1
229 while True:
231 while True:
230 chunk = getchunk(self)
232 chunk = getchunk(self)
231 if not chunk:
233 if not chunk:
232 if empty and count > 2:
234 if empty and count > 2:
233 emptycount += 1
235 emptycount += 1
234 break
236 break
235 empty = False
237 empty = False
236 yield chunkheader(len(chunk))
238 yield chunkheader(len(chunk))
237 pos = 0
239 pos = 0
238 while pos < len(chunk):
240 while pos < len(chunk):
239 next = pos + 2**20
241 next = pos + 2**20
240 yield chunk[pos:next]
242 yield chunk[pos:next]
241 pos = next
243 pos = next
242 yield closechunk()
244 yield closechunk()
243
245
244 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
245 # We know that we'll never have more manifests than we had
247 # We know that we'll never have more manifests than we had
246 # changesets.
248 # changesets.
247 self.callback = prog(_('manifests'), numchanges)
249 self.callback = prog(_('manifests'), numchanges)
248 # no need to check for empty manifest group here:
250 # no need to check for empty manifest group here:
249 # if the result of the merge of 1 and 2 is the same in 3 and 4,
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
250 # no new manifest will be created and the manifest group will
252 # no new manifest will be created and the manifest group will
251 # be empty during the pull
253 # be empty during the pull
252 self.manifestheader()
254 self.manifestheader()
253 repo.manifestlog._revlog.addgroup(self, revmap, trp)
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
254 repo.ui.progress(_('manifests'), None)
256 repo.ui.progress(_('manifests'), None)
255 self.callback = None
257 self.callback = None
256
258
257 def apply(self, repo, srctype, url, emptyok=False,
259 def apply(self, repo, srctype, url, emptyok=False,
258 targetphase=phases.draft, expectedtotal=None):
260 targetphase=phases.draft, expectedtotal=None):
259 """Add the changegroup returned by source.read() to this repo.
261 """Add the changegroup returned by source.read() to this repo.
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
261 the URL of the repo where this changegroup is coming from.
263 the URL of the repo where this changegroup is coming from.
262
264
263 Return an integer summarizing the change to this repo:
265 Return an integer summarizing the change to this repo:
264 - nothing changed or no source: 0
266 - nothing changed or no source: 0
265 - more heads than before: 1+added heads (2..n)
267 - more heads than before: 1+added heads (2..n)
266 - fewer heads than before: -1-removed heads (-2..-n)
268 - fewer heads than before: -1-removed heads (-2..-n)
267 - number of heads stays the same: 1
269 - number of heads stays the same: 1
268 """
270 """
269 repo = repo.unfiltered()
271 repo = repo.unfiltered()
270 def csmap(x):
272 def csmap(x):
271 repo.ui.debug("add changeset %s\n" % short(x))
273 repo.ui.debug("add changeset %s\n" % short(x))
272 return len(cl)
274 return len(cl)
273
275
274 def revmap(x):
276 def revmap(x):
275 return cl.rev(x)
277 return cl.rev(x)
276
278
277 changesets = files = revisions = 0
279 changesets = files = revisions = 0
278
280
279 try:
281 try:
280 with repo.transaction("\n".join([srctype,
282 with repo.transaction("\n".join([srctype,
281 util.hidepassword(url)])) as tr:
283 util.hidepassword(url)])) as tr:
282 # The transaction could have been created before and already
284 # The transaction could have been created before and already
283 # carries source information. In this case we use the top
285 # carries source information. In this case we use the top
284 # level data. We overwrite the argument because we need to use
286 # level data. We overwrite the argument because we need to use
285 # the top level value (if they exist) in this function.
287 # the top level value (if they exist) in this function.
286 srctype = tr.hookargs.setdefault('source', srctype)
288 srctype = tr.hookargs.setdefault('source', srctype)
287 url = tr.hookargs.setdefault('url', url)
289 url = tr.hookargs.setdefault('url', url)
288 repo.hook('prechangegroup', throw=True, **tr.hookargs)
290 repo.hook('prechangegroup', throw=True, **tr.hookargs)
289
291
290 # write changelog data to temp files so concurrent readers
292 # write changelog data to temp files so concurrent readers
291 # will not see an inconsistent view
293 # will not see an inconsistent view
292 cl = repo.changelog
294 cl = repo.changelog
293 cl.delayupdate(tr)
295 cl.delayupdate(tr)
294 oldheads = cl.heads()
296 oldheads = cl.heads()
295
297
296 trp = weakref.proxy(tr)
298 trp = weakref.proxy(tr)
297 # pull off the changeset group
299 # pull off the changeset group
298 repo.ui.status(_("adding changesets\n"))
300 repo.ui.status(_("adding changesets\n"))
299 clstart = len(cl)
301 clstart = len(cl)
300 class prog(object):
302 class prog(object):
301 def __init__(self, step, total):
303 def __init__(self, step, total):
302 self._step = step
304 self._step = step
303 self._total = total
305 self._total = total
304 self._count = 1
306 self._count = 1
305 def __call__(self):
307 def __call__(self):
306 repo.ui.progress(self._step, self._count,
308 repo.ui.progress(self._step, self._count,
307 unit=_('chunks'), total=self._total)
309 unit=_('chunks'), total=self._total)
308 self._count += 1
310 self._count += 1
309 self.callback = prog(_('changesets'), expectedtotal)
311 self.callback = prog(_('changesets'), expectedtotal)
310
312
311 efiles = set()
313 efiles = set()
312 def onchangelog(cl, node):
314 def onchangelog(cl, node):
313 efiles.update(cl.readfiles(node))
315 efiles.update(cl.readfiles(node))
314
316
315 self.changelogheader()
317 self.changelogheader()
316 srccontent = cl.addgroup(self, csmap, trp,
318 srccontent = cl.addgroup(self, csmap, trp,
317 addrevisioncb=onchangelog)
319 addrevisioncb=onchangelog)
318 efiles = len(efiles)
320 efiles = len(efiles)
319
321
320 if not (srccontent or emptyok):
322 if not (srccontent or emptyok):
321 raise error.Abort(_("received changelog group is empty"))
323 raise error.Abort(_("received changelog group is empty"))
322 clend = len(cl)
324 clend = len(cl)
323 changesets = clend - clstart
325 changesets = clend - clstart
324 repo.ui.progress(_('changesets'), None)
326 repo.ui.progress(_('changesets'), None)
325 self.callback = None
327 self.callback = None
326
328
327 # pull off the manifest group
329 # pull off the manifest group
328 repo.ui.status(_("adding manifests\n"))
330 repo.ui.status(_("adding manifests\n"))
329 self._unpackmanifests(repo, revmap, trp, prog, changesets)
331 self._unpackmanifests(repo, revmap, trp, prog, changesets)
330
332
331 needfiles = {}
333 needfiles = {}
332 if repo.ui.configbool('server', 'validate', default=False):
334 if repo.ui.configbool('server', 'validate', default=False):
333 cl = repo.changelog
335 cl = repo.changelog
334 ml = repo.manifestlog
336 ml = repo.manifestlog
335 # validate incoming csets have their manifests
337 # validate incoming csets have their manifests
336 for cset in xrange(clstart, clend):
338 for cset in xrange(clstart, clend):
337 mfnode = cl.changelogrevision(cset).manifest
339 mfnode = cl.changelogrevision(cset).manifest
338 mfest = ml[mfnode].readdelta()
340 mfest = ml[mfnode].readdelta()
339 # store file nodes we must see
341 # store file nodes we must see
340 for f, n in mfest.iteritems():
342 for f, n in mfest.iteritems():
341 needfiles.setdefault(f, set()).add(n)
343 needfiles.setdefault(f, set()).add(n)
342
344
343 # process the files
345 # process the files
344 repo.ui.status(_("adding file changes\n"))
346 repo.ui.status(_("adding file changes\n"))
345 newrevs, newfiles = _addchangegroupfiles(
347 newrevs, newfiles = _addchangegroupfiles(
346 repo, self, revmap, trp, efiles, needfiles)
348 repo, self, revmap, trp, efiles, needfiles)
347 revisions += newrevs
349 revisions += newrevs
348 files += newfiles
350 files += newfiles
349
351
350 dh = 0
352 dh = 0
351 if oldheads:
353 if oldheads:
352 heads = cl.heads()
354 heads = cl.heads()
353 dh = len(heads) - len(oldheads)
355 dh = len(heads) - len(oldheads)
354 for h in heads:
356 for h in heads:
355 if h not in oldheads and repo[h].closesbranch():
357 if h not in oldheads and repo[h].closesbranch():
356 dh -= 1
358 dh -= 1
357 htext = ""
359 htext = ""
358 if dh:
360 if dh:
359 htext = _(" (%+d heads)") % dh
361 htext = _(" (%+d heads)") % dh
360
362
361 repo.ui.status(_("added %d changesets"
363 repo.ui.status(_("added %d changesets"
362 " with %d changes to %d files%s\n")
364 " with %d changes to %d files%s\n")
363 % (changesets, revisions, files, htext))
365 % (changesets, revisions, files, htext))
364 repo.invalidatevolatilesets()
366 repo.invalidatevolatilesets()
365
367
366 if changesets > 0:
368 if changesets > 0:
367 if 'node' not in tr.hookargs:
369 if 'node' not in tr.hookargs:
368 tr.hookargs['node'] = hex(cl.node(clstart))
370 tr.hookargs['node'] = hex(cl.node(clstart))
369 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
371 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
370 hookargs = dict(tr.hookargs)
372 hookargs = dict(tr.hookargs)
371 else:
373 else:
372 hookargs = dict(tr.hookargs)
374 hookargs = dict(tr.hookargs)
373 hookargs['node'] = hex(cl.node(clstart))
375 hookargs['node'] = hex(cl.node(clstart))
374 hookargs['node_last'] = hex(cl.node(clend - 1))
376 hookargs['node_last'] = hex(cl.node(clend - 1))
375 repo.hook('pretxnchangegroup', throw=True, **hookargs)
377 repo.hook('pretxnchangegroup', throw=True, **hookargs)
376
378
377 added = [cl.node(r) for r in xrange(clstart, clend)]
379 added = [cl.node(r) for r in xrange(clstart, clend)]
378 publishing = repo.publishing()
380 publishing = repo.publishing()
379 if srctype in ('push', 'serve'):
381 if srctype in ('push', 'serve'):
380 # Old servers can not push the boundary themselves.
382 # Old servers can not push the boundary themselves.
381 # New servers won't push the boundary if changeset already
383 # New servers won't push the boundary if changeset already
382 # exists locally as secret
384 # exists locally as secret
383 #
385 #
384 # We should not use added here but the list of all change in
386 # We should not use added here but the list of all change in
385 # the bundle
387 # the bundle
386 if publishing:
388 if publishing:
387 phases.advanceboundary(repo, tr, phases.public,
389 phases.advanceboundary(repo, tr, phases.public,
388 srccontent)
390 srccontent)
389 else:
391 else:
390 # Those changesets have been pushed from the
392 # Those changesets have been pushed from the
391 # outside, their phases are going to be pushed
393 # outside, their phases are going to be pushed
392 # alongside. Therefor `targetphase` is
394 # alongside. Therefor `targetphase` is
393 # ignored.
395 # ignored.
394 phases.advanceboundary(repo, tr, phases.draft,
396 phases.advanceboundary(repo, tr, phases.draft,
395 srccontent)
397 srccontent)
396 phases.retractboundary(repo, tr, phases.draft, added)
398 phases.retractboundary(repo, tr, phases.draft, added)
397 elif srctype != 'strip':
399 elif srctype != 'strip':
398 # publishing only alter behavior during push
400 # publishing only alter behavior during push
399 #
401 #
400 # strip should not touch boundary at all
402 # strip should not touch boundary at all
401 phases.retractboundary(repo, tr, targetphase, added)
403 phases.retractboundary(repo, tr, targetphase, added)
402
404
403 if changesets > 0:
405 if changesets > 0:
404 if srctype != 'strip':
406 if srctype != 'strip':
405 # During strip, branchcache is invalid but
407 # During strip, branchcache is invalid but
406 # coming call to `destroyed` will repair it.
408 # coming call to `destroyed` will repair it.
407 # In other case we can safely update cache on
409 # In other case we can safely update cache on
408 # disk.
410 # disk.
409 repo.ui.debug('updating the branch cache\n')
411 repo.ui.debug('updating the branch cache\n')
410 branchmap.updatecache(repo.filtered('served'))
412 branchmap.updatecache(repo.filtered('served'))
411
413
412 def runhooks():
414 def runhooks():
413 # These hooks run when the lock releases, not when the
415 # These hooks run when the lock releases, not when the
414 # transaction closes. So it's possible for the changelog
416 # transaction closes. So it's possible for the changelog
415 # to have changed since we last saw it.
417 # to have changed since we last saw it.
416 if clstart >= len(repo):
418 if clstart >= len(repo):
417 return
419 return
418
420
419 repo.hook("changegroup", **hookargs)
421 repo.hook("changegroup", **hookargs)
420
422
421 for n in added:
423 for n in added:
422 args = hookargs.copy()
424 args = hookargs.copy()
423 args['node'] = hex(n)
425 args['node'] = hex(n)
424 del args['node_last']
426 del args['node_last']
425 repo.hook("incoming", **args)
427 repo.hook("incoming", **args)
426
428
427 newheads = [h for h in repo.heads()
429 newheads = [h for h in repo.heads()
428 if h not in oldheads]
430 if h not in oldheads]
429 repo.ui.log("incoming",
431 repo.ui.log("incoming",
430 "%s incoming changes - new heads: %s\n",
432 "%s incoming changes - new heads: %s\n",
431 len(added),
433 len(added),
432 ', '.join([hex(c[:6]) for c in newheads]))
434 ', '.join([hex(c[:6]) for c in newheads]))
433
435
434 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
436 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
435 lambda tr: repo._afterlock(runhooks))
437 lambda tr: repo._afterlock(runhooks))
436 finally:
438 finally:
437 repo.ui.flush()
439 repo.ui.flush()
438 # never return 0 here:
440 # never return 0 here:
439 if dh < 0:
441 if dh < 0:
440 return dh - 1
442 return dh - 1
441 else:
443 else:
442 return dh + 1
444 return dh + 1
443
445
444 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
445 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
446
448
447 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
448 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
449 remain the same.
451 remain the same.
450 """
452 """
451 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
452 deltaheadersize = struct.calcsize(deltaheader)
454 deltaheadersize = struct.calcsize(deltaheader)
453 version = '02'
455 version = '02'
454
456
455 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
456 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
457 flags = 0
459 flags = 0
458 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
459
461
460 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
461 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
462
464
463 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
464 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
465 separating manifests and files.
467 separating manifests and files.
466 """
468 """
467 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
468 deltaheadersize = struct.calcsize(deltaheader)
470 deltaheadersize = struct.calcsize(deltaheader)
469 version = '03'
471 version = '03'
470 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
471
473
472 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
473 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
474 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
475
477
476 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
478 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
477 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
478 numchanges)
480 numchanges)
479 for chunkdata in iter(self.filelogheader, {}):
481 for chunkdata in iter(self.filelogheader, {}):
480 # If we get here, there are directory manifests in the changegroup
482 # If we get here, there are directory manifests in the changegroup
481 d = chunkdata["filename"]
483 d = chunkdata["filename"]
482 repo.ui.debug("adding %s revisions\n" % d)
484 repo.ui.debug("adding %s revisions\n" % d)
483 dirlog = repo.manifestlog._revlog.dirlog(d)
485 dirlog = repo.manifestlog._revlog.dirlog(d)
484 if not dirlog.addgroup(self, revmap, trp):
486 if not dirlog.addgroup(self, revmap, trp):
485 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
486
488
487 class headerlessfixup(object):
489 class headerlessfixup(object):
488 def __init__(self, fh, h):
490 def __init__(self, fh, h):
489 self._h = h
491 self._h = h
490 self._fh = fh
492 self._fh = fh
491 def read(self, n):
493 def read(self, n):
492 if self._h:
494 if self._h:
493 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
494 if len(d) < n:
496 if len(d) < n:
495 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
496 return d
498 return d
497 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
498
500
499 class cg1packer(object):
501 class cg1packer(object):
500 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
502 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
501 version = '01'
503 version = '01'
502 def __init__(self, repo, bundlecaps=None):
504 def __init__(self, repo, bundlecaps=None):
503 """Given a source repo, construct a bundler.
505 """Given a source repo, construct a bundler.
504
506
505 bundlecaps is optional and can be used to specify the set of
507 bundlecaps is optional and can be used to specify the set of
506 capabilities which can be used to build the bundle.
508 capabilities which can be used to build the bundle.
507 """
509 """
508 # Set of capabilities we can use to build the bundle.
510 # Set of capabilities we can use to build the bundle.
509 if bundlecaps is None:
511 if bundlecaps is None:
510 bundlecaps = set()
512 bundlecaps = set()
511 self._bundlecaps = bundlecaps
513 self._bundlecaps = bundlecaps
512 # experimental config: bundle.reorder
514 # experimental config: bundle.reorder
513 reorder = repo.ui.config('bundle', 'reorder', 'auto')
515 reorder = repo.ui.config('bundle', 'reorder', 'auto')
514 if reorder == 'auto':
516 if reorder == 'auto':
515 reorder = None
517 reorder = None
516 else:
518 else:
517 reorder = util.parsebool(reorder)
519 reorder = util.parsebool(reorder)
518 self._repo = repo
520 self._repo = repo
519 self._reorder = reorder
521 self._reorder = reorder
520 self._progress = repo.ui.progress
522 self._progress = repo.ui.progress
521 if self._repo.ui.verbose and not self._repo.ui.debugflag:
523 if self._repo.ui.verbose and not self._repo.ui.debugflag:
522 self._verbosenote = self._repo.ui.note
524 self._verbosenote = self._repo.ui.note
523 else:
525 else:
524 self._verbosenote = lambda s: None
526 self._verbosenote = lambda s: None
525
527
526 def close(self):
528 def close(self):
527 return closechunk()
529 return closechunk()
528
530
529 def fileheader(self, fname):
531 def fileheader(self, fname):
530 return chunkheader(len(fname)) + fname
532 return chunkheader(len(fname)) + fname
531
533
532 # Extracted both for clarity and for overriding in extensions.
534 # Extracted both for clarity and for overriding in extensions.
533 def _sortgroup(self, revlog, nodelist, lookup):
535 def _sortgroup(self, revlog, nodelist, lookup):
534 """Sort nodes for change group and turn them into revnums."""
536 """Sort nodes for change group and turn them into revnums."""
535 # for generaldelta revlogs, we linearize the revs; this will both be
537 # for generaldelta revlogs, we linearize the revs; this will both be
536 # much quicker and generate a much smaller bundle
538 # much quicker and generate a much smaller bundle
537 if (revlog._generaldelta and self._reorder is None) or self._reorder:
539 if (revlog._generaldelta and self._reorder is None) or self._reorder:
538 dag = dagutil.revlogdag(revlog)
540 dag = dagutil.revlogdag(revlog)
539 return dag.linearize(set(revlog.rev(n) for n in nodelist))
541 return dag.linearize(set(revlog.rev(n) for n in nodelist))
540 else:
542 else:
541 return sorted([revlog.rev(n) for n in nodelist])
543 return sorted([revlog.rev(n) for n in nodelist])
542
544
543 def group(self, nodelist, revlog, lookup, units=None):
545 def group(self, nodelist, revlog, lookup, units=None):
544 """Calculate a delta group, yielding a sequence of changegroup chunks
546 """Calculate a delta group, yielding a sequence of changegroup chunks
545 (strings).
547 (strings).
546
548
547 Given a list of changeset revs, return a set of deltas and
549 Given a list of changeset revs, return a set of deltas and
548 metadata corresponding to nodes. The first delta is
550 metadata corresponding to nodes. The first delta is
549 first parent(nodelist[0]) -> nodelist[0], the receiver is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
550 guaranteed to have this parent as it has all history before
552 guaranteed to have this parent as it has all history before
551 these changesets. In the case firstparent is nullrev the
553 these changesets. In the case firstparent is nullrev the
552 changegroup starts with a full revision.
554 changegroup starts with a full revision.
553
555
554 If units is not None, progress detail will be generated, units specifies
556 If units is not None, progress detail will be generated, units specifies
555 the type of revlog that is touched (changelog, manifest, etc.).
557 the type of revlog that is touched (changelog, manifest, etc.).
556 """
558 """
557 # if we don't have any revisions touched by these changesets, bail
559 # if we don't have any revisions touched by these changesets, bail
558 if len(nodelist) == 0:
560 if len(nodelist) == 0:
559 yield self.close()
561 yield self.close()
560 return
562 return
561
563
562 revs = self._sortgroup(revlog, nodelist, lookup)
564 revs = self._sortgroup(revlog, nodelist, lookup)
563
565
564 # add the parent of the first rev
566 # add the parent of the first rev
565 p = revlog.parentrevs(revs[0])[0]
567 p = revlog.parentrevs(revs[0])[0]
566 revs.insert(0, p)
568 revs.insert(0, p)
567
569
568 # build deltas
570 # build deltas
569 total = len(revs) - 1
571 total = len(revs) - 1
570 msgbundling = _('bundling')
572 msgbundling = _('bundling')
571 for r in xrange(len(revs) - 1):
573 for r in xrange(len(revs) - 1):
572 if units is not None:
574 if units is not None:
573 self._progress(msgbundling, r + 1, unit=units, total=total)
575 self._progress(msgbundling, r + 1, unit=units, total=total)
574 prev, curr = revs[r], revs[r + 1]
576 prev, curr = revs[r], revs[r + 1]
575 linknode = lookup(revlog.node(curr))
577 linknode = lookup(revlog.node(curr))
576 for c in self.revchunk(revlog, curr, prev, linknode):
578 for c in self.revchunk(revlog, curr, prev, linknode):
577 yield c
579 yield c
578
580
579 if units is not None:
581 if units is not None:
580 self._progress(msgbundling, None)
582 self._progress(msgbundling, None)
581 yield self.close()
583 yield self.close()
582
584
583 # filter any nodes that claim to be part of the known set
585 # filter any nodes that claim to be part of the known set
584 def prune(self, revlog, missing, commonrevs):
586 def prune(self, revlog, missing, commonrevs):
585 rr, rl = revlog.rev, revlog.linkrev
587 rr, rl = revlog.rev, revlog.linkrev
586 return [n for n in missing if rl(rr(n)) not in commonrevs]
588 return [n for n in missing if rl(rr(n)) not in commonrevs]
587
589
588 def _packmanifests(self, dir, mfnodes, lookuplinknode):
590 def _packmanifests(self, dir, mfnodes, lookuplinknode):
589 """Pack flat manifests into a changegroup stream."""
591 """Pack flat manifests into a changegroup stream."""
590 assert not dir
592 assert not dir
591 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
593 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
592 lookuplinknode, units=_('manifests')):
594 lookuplinknode, units=_('manifests')):
593 yield chunk
595 yield chunk
594
596
595 def _manifestsdone(self):
597 def _manifestsdone(self):
596 return ''
598 return ''
597
599
598 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
600 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
599 '''yield a sequence of changegroup chunks (strings)'''
601 '''yield a sequence of changegroup chunks (strings)'''
600 repo = self._repo
602 repo = self._repo
601 cl = repo.changelog
603 cl = repo.changelog
602
604
603 clrevorder = {}
605 clrevorder = {}
604 mfs = {} # needed manifests
606 mfs = {} # needed manifests
605 fnodes = {} # needed file nodes
607 fnodes = {} # needed file nodes
606 changedfiles = set()
608 changedfiles = set()
607
609
608 # Callback for the changelog, used to collect changed files and manifest
610 # Callback for the changelog, used to collect changed files and manifest
609 # nodes.
611 # nodes.
610 # Returns the linkrev node (identity in the changelog case).
612 # Returns the linkrev node (identity in the changelog case).
611 def lookupcl(x):
613 def lookupcl(x):
612 c = cl.read(x)
614 c = cl.read(x)
613 clrevorder[x] = len(clrevorder)
615 clrevorder[x] = len(clrevorder)
614 n = c[0]
616 n = c[0]
615 # record the first changeset introducing this manifest version
617 # record the first changeset introducing this manifest version
616 mfs.setdefault(n, x)
618 mfs.setdefault(n, x)
617 # Record a complete list of potentially-changed files in
619 # Record a complete list of potentially-changed files in
618 # this manifest.
620 # this manifest.
619 changedfiles.update(c[3])
621 changedfiles.update(c[3])
620 return x
622 return x
621
623
622 self._verbosenote(_('uncompressed size of bundle content:\n'))
624 self._verbosenote(_('uncompressed size of bundle content:\n'))
623 size = 0
625 size = 0
624 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
626 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
625 size += len(chunk)
627 size += len(chunk)
626 yield chunk
628 yield chunk
627 self._verbosenote(_('%8.i (changelog)\n') % size)
629 self._verbosenote(_('%8.i (changelog)\n') % size)
628
630
629 # We need to make sure that the linkrev in the changegroup refers to
631 # We need to make sure that the linkrev in the changegroup refers to
630 # the first changeset that introduced the manifest or file revision.
632 # the first changeset that introduced the manifest or file revision.
631 # The fastpath is usually safer than the slowpath, because the filelogs
633 # The fastpath is usually safer than the slowpath, because the filelogs
632 # are walked in revlog order.
634 # are walked in revlog order.
633 #
635 #
634 # When taking the slowpath with reorder=None and the manifest revlog
636 # When taking the slowpath with reorder=None and the manifest revlog
635 # uses generaldelta, the manifest may be walked in the "wrong" order.
637 # uses generaldelta, the manifest may be walked in the "wrong" order.
636 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
638 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
637 # cc0ff93d0c0c).
639 # cc0ff93d0c0c).
638 #
640 #
639 # When taking the fastpath, we are only vulnerable to reordering
641 # When taking the fastpath, we are only vulnerable to reordering
640 # of the changelog itself. The changelog never uses generaldelta, so
642 # of the changelog itself. The changelog never uses generaldelta, so
641 # it is only reordered when reorder=True. To handle this case, we
643 # it is only reordered when reorder=True. To handle this case, we
642 # simply take the slowpath, which already has the 'clrevorder' logic.
644 # simply take the slowpath, which already has the 'clrevorder' logic.
643 # This was also fixed in cc0ff93d0c0c.
645 # This was also fixed in cc0ff93d0c0c.
644 fastpathlinkrev = fastpathlinkrev and not self._reorder
646 fastpathlinkrev = fastpathlinkrev and not self._reorder
645 # Treemanifests don't work correctly with fastpathlinkrev
647 # Treemanifests don't work correctly with fastpathlinkrev
646 # either, because we don't discover which directory nodes to
648 # either, because we don't discover which directory nodes to
647 # send along with files. This could probably be fixed.
649 # send along with files. This could probably be fixed.
648 fastpathlinkrev = fastpathlinkrev and (
650 fastpathlinkrev = fastpathlinkrev and (
649 'treemanifest' not in repo.requirements)
651 'treemanifest' not in repo.requirements)
650
652
651 for chunk in self.generatemanifests(commonrevs, clrevorder,
653 for chunk in self.generatemanifests(commonrevs, clrevorder,
652 fastpathlinkrev, mfs, fnodes):
654 fastpathlinkrev, mfs, fnodes):
653 yield chunk
655 yield chunk
654 mfs.clear()
656 mfs.clear()
655 clrevs = set(cl.rev(x) for x in clnodes)
657 clrevs = set(cl.rev(x) for x in clnodes)
656
658
657 if not fastpathlinkrev:
659 if not fastpathlinkrev:
658 def linknodes(unused, fname):
660 def linknodes(unused, fname):
659 return fnodes.get(fname, {})
661 return fnodes.get(fname, {})
660 else:
662 else:
661 cln = cl.node
663 cln = cl.node
662 def linknodes(filerevlog, fname):
664 def linknodes(filerevlog, fname):
663 llr = filerevlog.linkrev
665 llr = filerevlog.linkrev
664 fln = filerevlog.node
666 fln = filerevlog.node
665 revs = ((r, llr(r)) for r in filerevlog)
667 revs = ((r, llr(r)) for r in filerevlog)
666 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
668 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
667
669
668 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
670 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
669 source):
671 source):
670 yield chunk
672 yield chunk
671
673
672 yield self.close()
674 yield self.close()
673
675
674 if clnodes:
676 if clnodes:
675 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
677 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
676
678
677 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
679 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
678 fnodes):
680 fnodes):
679 repo = self._repo
681 repo = self._repo
680 mfl = repo.manifestlog
682 mfl = repo.manifestlog
681 dirlog = mfl._revlog.dirlog
683 dirlog = mfl._revlog.dirlog
682 tmfnodes = {'': mfs}
684 tmfnodes = {'': mfs}
683
685
684 # Callback for the manifest, used to collect linkrevs for filelog
686 # Callback for the manifest, used to collect linkrevs for filelog
685 # revisions.
687 # revisions.
686 # Returns the linkrev node (collected in lookupcl).
688 # Returns the linkrev node (collected in lookupcl).
687 def makelookupmflinknode(dir):
689 def makelookupmflinknode(dir):
688 if fastpathlinkrev:
690 if fastpathlinkrev:
689 assert not dir
691 assert not dir
690 return mfs.__getitem__
692 return mfs.__getitem__
691
693
692 def lookupmflinknode(x):
694 def lookupmflinknode(x):
693 """Callback for looking up the linknode for manifests.
695 """Callback for looking up the linknode for manifests.
694
696
695 Returns the linkrev node for the specified manifest.
697 Returns the linkrev node for the specified manifest.
696
698
697 SIDE EFFECT:
699 SIDE EFFECT:
698
700
699 1) fclnodes gets populated with the list of relevant
701 1) fclnodes gets populated with the list of relevant
700 file nodes if we're not using fastpathlinkrev
702 file nodes if we're not using fastpathlinkrev
701 2) When treemanifests are in use, collects treemanifest nodes
703 2) When treemanifests are in use, collects treemanifest nodes
702 to send
704 to send
703
705
704 Note that this means manifests must be completely sent to
706 Note that this means manifests must be completely sent to
705 the client before you can trust the list of files and
707 the client before you can trust the list of files and
706 treemanifests to send.
708 treemanifests to send.
707 """
709 """
708 clnode = tmfnodes[dir][x]
710 clnode = tmfnodes[dir][x]
709 mdata = mfl.get(dir, x).readfast(shallow=True)
711 mdata = mfl.get(dir, x).readfast(shallow=True)
710 for p, n, fl in mdata.iterentries():
712 for p, n, fl in mdata.iterentries():
711 if fl == 't': # subdirectory manifest
713 if fl == 't': # subdirectory manifest
712 subdir = dir + p + '/'
714 subdir = dir + p + '/'
713 tmfclnodes = tmfnodes.setdefault(subdir, {})
715 tmfclnodes = tmfnodes.setdefault(subdir, {})
714 tmfclnode = tmfclnodes.setdefault(n, clnode)
716 tmfclnode = tmfclnodes.setdefault(n, clnode)
715 if clrevorder[clnode] < clrevorder[tmfclnode]:
717 if clrevorder[clnode] < clrevorder[tmfclnode]:
716 tmfclnodes[n] = clnode
718 tmfclnodes[n] = clnode
717 else:
719 else:
718 f = dir + p
720 f = dir + p
719 fclnodes = fnodes.setdefault(f, {})
721 fclnodes = fnodes.setdefault(f, {})
720 fclnode = fclnodes.setdefault(n, clnode)
722 fclnode = fclnodes.setdefault(n, clnode)
721 if clrevorder[clnode] < clrevorder[fclnode]:
723 if clrevorder[clnode] < clrevorder[fclnode]:
722 fclnodes[n] = clnode
724 fclnodes[n] = clnode
723 return clnode
725 return clnode
724 return lookupmflinknode
726 return lookupmflinknode
725
727
726 size = 0
728 size = 0
727 while tmfnodes:
729 while tmfnodes:
728 dir = min(tmfnodes)
730 dir = min(tmfnodes)
729 nodes = tmfnodes[dir]
731 nodes = tmfnodes[dir]
730 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
732 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
731 if not dir or prunednodes:
733 if not dir or prunednodes:
732 for x in self._packmanifests(dir, prunednodes,
734 for x in self._packmanifests(dir, prunednodes,
733 makelookupmflinknode(dir)):
735 makelookupmflinknode(dir)):
734 size += len(x)
736 size += len(x)
735 yield x
737 yield x
736 del tmfnodes[dir]
738 del tmfnodes[dir]
737 self._verbosenote(_('%8.i (manifests)\n') % size)
739 self._verbosenote(_('%8.i (manifests)\n') % size)
738 yield self._manifestsdone()
740 yield self._manifestsdone()
739
741
740 # The 'source' parameter is useful for extensions
742 # The 'source' parameter is useful for extensions
741 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
743 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
742 repo = self._repo
744 repo = self._repo
743 progress = self._progress
745 progress = self._progress
744 msgbundling = _('bundling')
746 msgbundling = _('bundling')
745
747
746 total = len(changedfiles)
748 total = len(changedfiles)
747 # for progress output
749 # for progress output
748 msgfiles = _('files')
750 msgfiles = _('files')
749 for i, fname in enumerate(sorted(changedfiles)):
751 for i, fname in enumerate(sorted(changedfiles)):
750 filerevlog = repo.file(fname)
752 filerevlog = repo.file(fname)
751 if not filerevlog:
753 if not filerevlog:
752 raise error.Abort(_("empty or missing revlog for %s") % fname)
754 raise error.Abort(_("empty or missing revlog for %s") % fname)
753
755
754 linkrevnodes = linknodes(filerevlog, fname)
756 linkrevnodes = linknodes(filerevlog, fname)
755 # Lookup for filenodes, we collected the linkrev nodes above in the
757 # Lookup for filenodes, we collected the linkrev nodes above in the
756 # fastpath case and with lookupmf in the slowpath case.
758 # fastpath case and with lookupmf in the slowpath case.
757 def lookupfilelog(x):
759 def lookupfilelog(x):
758 return linkrevnodes[x]
760 return linkrevnodes[x]
759
761
760 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
762 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
761 if filenodes:
763 if filenodes:
762 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
764 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
763 total=total)
765 total=total)
764 h = self.fileheader(fname)
766 h = self.fileheader(fname)
765 size = len(h)
767 size = len(h)
766 yield h
768 yield h
767 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
769 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
768 size += len(chunk)
770 size += len(chunk)
769 yield chunk
771 yield chunk
770 self._verbosenote(_('%8.i %s\n') % (size, fname))
772 self._verbosenote(_('%8.i %s\n') % (size, fname))
771 progress(msgbundling, None)
773 progress(msgbundling, None)
772
774
773 def deltaparent(self, revlog, rev, p1, p2, prev):
775 def deltaparent(self, revlog, rev, p1, p2, prev):
774 return prev
776 return prev
775
777
776 def revchunk(self, revlog, rev, prev, linknode):
778 def revchunk(self, revlog, rev, prev, linknode):
777 node = revlog.node(rev)
779 node = revlog.node(rev)
778 p1, p2 = revlog.parentrevs(rev)
780 p1, p2 = revlog.parentrevs(rev)
779 base = self.deltaparent(revlog, rev, p1, p2, prev)
781 base = self.deltaparent(revlog, rev, p1, p2, prev)
780
782
781 prefix = ''
783 prefix = ''
782 if revlog.iscensored(base) or revlog.iscensored(rev):
784 if revlog.iscensored(base) or revlog.iscensored(rev):
783 try:
785 try:
784 delta = revlog.revision(node)
786 delta = revlog.revision(node)
785 except error.CensoredNodeError as e:
787 except error.CensoredNodeError as e:
786 delta = e.tombstone
788 delta = e.tombstone
787 if base == nullrev:
789 if base == nullrev:
788 prefix = mdiff.trivialdiffheader(len(delta))
790 prefix = mdiff.trivialdiffheader(len(delta))
789 else:
791 else:
790 baselen = revlog.rawsize(base)
792 baselen = revlog.rawsize(base)
791 prefix = mdiff.replacediffheader(baselen, len(delta))
793 prefix = mdiff.replacediffheader(baselen, len(delta))
792 elif base == nullrev:
794 elif base == nullrev:
793 delta = revlog.revision(node)
795 delta = revlog.revision(node)
794 prefix = mdiff.trivialdiffheader(len(delta))
796 prefix = mdiff.trivialdiffheader(len(delta))
795 else:
797 else:
796 delta = revlog.revdiff(base, rev)
798 delta = revlog.revdiff(base, rev)
797 p1n, p2n = revlog.parents(node)
799 p1n, p2n = revlog.parents(node)
798 basenode = revlog.node(base)
800 basenode = revlog.node(base)
799 flags = revlog.flags(rev)
801 flags = revlog.flags(rev)
800 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
802 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
801 meta += prefix
803 meta += prefix
802 l = len(meta) + len(delta)
804 l = len(meta) + len(delta)
803 yield chunkheader(l)
805 yield chunkheader(l)
804 yield meta
806 yield meta
805 yield delta
807 yield delta
806 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
808 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
807 # do nothing with basenode, it is implicitly the previous one in HG10
809 # do nothing with basenode, it is implicitly the previous one in HG10
808 # do nothing with flags, it is implicitly 0 for cg1 and cg2
810 # do nothing with flags, it is implicitly 0 for cg1 and cg2
809 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
811 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
810
812
811 class cg2packer(cg1packer):
813 class cg2packer(cg1packer):
812 version = '02'
814 version = '02'
813 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
815 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
814
816
815 def __init__(self, repo, bundlecaps=None):
817 def __init__(self, repo, bundlecaps=None):
816 super(cg2packer, self).__init__(repo, bundlecaps)
818 super(cg2packer, self).__init__(repo, bundlecaps)
817 if self._reorder is None:
819 if self._reorder is None:
818 # Since generaldelta is directly supported by cg2, reordering
820 # Since generaldelta is directly supported by cg2, reordering
819 # generally doesn't help, so we disable it by default (treating
821 # generally doesn't help, so we disable it by default (treating
820 # bundle.reorder=auto just like bundle.reorder=False).
822 # bundle.reorder=auto just like bundle.reorder=False).
821 self._reorder = False
823 self._reorder = False
822
824
823 def deltaparent(self, revlog, rev, p1, p2, prev):
825 def deltaparent(self, revlog, rev, p1, p2, prev):
824 dp = revlog.deltaparent(rev)
826 dp = revlog.deltaparent(rev)
825 if dp == nullrev and revlog.storedeltachains:
827 if dp == nullrev and revlog.storedeltachains:
826 # Avoid sending full revisions when delta parent is null. Pick prev
828 # Avoid sending full revisions when delta parent is null. Pick prev
827 # in that case. It's tempting to pick p1 in this case, as p1 will
829 # in that case. It's tempting to pick p1 in this case, as p1 will
828 # be smaller in the common case. However, computing a delta against
830 # be smaller in the common case. However, computing a delta against
829 # p1 may require resolving the raw text of p1, which could be
831 # p1 may require resolving the raw text of p1, which could be
830 # expensive. The revlog caches should have prev cached, meaning
832 # expensive. The revlog caches should have prev cached, meaning
831 # less CPU for changegroup generation. There is likely room to add
833 # less CPU for changegroup generation. There is likely room to add
832 # a flag and/or config option to control this behavior.
834 # a flag and/or config option to control this behavior.
833 return prev
835 return prev
834 elif dp == nullrev:
836 elif dp == nullrev:
835 # revlog is configured to use full snapshot for a reason,
837 # revlog is configured to use full snapshot for a reason,
836 # stick to full snapshot.
838 # stick to full snapshot.
837 return nullrev
839 return nullrev
838 elif dp not in (p1, p2, prev):
840 elif dp not in (p1, p2, prev):
839 # Pick prev when we can't be sure remote has the base revision.
841 # Pick prev when we can't be sure remote has the base revision.
840 return prev
842 return prev
841 else:
843 else:
842 return dp
844 return dp
843
845
844 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
846 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
845 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
847 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
846 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
848 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
847
849
848 class cg3packer(cg2packer):
850 class cg3packer(cg2packer):
849 version = '03'
851 version = '03'
850 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
852 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
851
853
852 def _packmanifests(self, dir, mfnodes, lookuplinknode):
854 def _packmanifests(self, dir, mfnodes, lookuplinknode):
853 if dir:
855 if dir:
854 yield self.fileheader(dir)
856 yield self.fileheader(dir)
855
857
856 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
858 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
857 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
859 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
858 units=_('manifests')):
860 units=_('manifests')):
859 yield chunk
861 yield chunk
860
862
861 def _manifestsdone(self):
863 def _manifestsdone(self):
862 return self.close()
864 return self.close()
863
865
864 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
866 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
865 return struct.pack(
867 return struct.pack(
866 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
868 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
867
869
868 _packermap = {'01': (cg1packer, cg1unpacker),
870 _packermap = {'01': (cg1packer, cg1unpacker),
869 # cg2 adds support for exchanging generaldelta
871 # cg2 adds support for exchanging generaldelta
870 '02': (cg2packer, cg2unpacker),
872 '02': (cg2packer, cg2unpacker),
871 # cg3 adds support for exchanging revlog flags and treemanifests
873 # cg3 adds support for exchanging revlog flags and treemanifests
872 '03': (cg3packer, cg3unpacker),
874 '03': (cg3packer, cg3unpacker),
873 }
875 }
874
876
875 def allsupportedversions(ui):
877 def allsupportedversions(ui):
876 versions = set(_packermap.keys())
878 versions = set(_packermap.keys())
877 versions.discard('03')
879 versions.discard('03')
878 if (ui.configbool('experimental', 'changegroup3') or
880 if (ui.configbool('experimental', 'changegroup3') or
879 ui.configbool('experimental', 'treemanifest')):
881 ui.configbool('experimental', 'treemanifest')):
880 versions.add('03')
882 versions.add('03')
881 return versions
883 return versions
882
884
883 # Changegroup versions that can be applied to the repo
885 # Changegroup versions that can be applied to the repo
884 def supportedincomingversions(repo):
886 def supportedincomingversions(repo):
885 versions = allsupportedversions(repo.ui)
887 versions = allsupportedversions(repo.ui)
886 if 'treemanifest' in repo.requirements:
888 if 'treemanifest' in repo.requirements:
887 versions.add('03')
889 versions.add('03')
888 return versions
890 return versions
889
891
890 # Changegroup versions that can be created from the repo
892 # Changegroup versions that can be created from the repo
891 def supportedoutgoingversions(repo):
893 def supportedoutgoingversions(repo):
892 versions = allsupportedversions(repo.ui)
894 versions = allsupportedversions(repo.ui)
893 if 'treemanifest' in repo.requirements:
895 if 'treemanifest' in repo.requirements:
894 # Versions 01 and 02 support only flat manifests and it's just too
896 # Versions 01 and 02 support only flat manifests and it's just too
895 # expensive to convert between the flat manifest and tree manifest on
897 # expensive to convert between the flat manifest and tree manifest on
896 # the fly. Since tree manifests are hashed differently, all of history
898 # the fly. Since tree manifests are hashed differently, all of history
897 # would have to be converted. Instead, we simply don't even pretend to
899 # would have to be converted. Instead, we simply don't even pretend to
898 # support versions 01 and 02.
900 # support versions 01 and 02.
899 versions.discard('01')
901 versions.discard('01')
900 versions.discard('02')
902 versions.discard('02')
901 versions.add('03')
903 versions.add('03')
902 return versions
904 return versions
903
905
904 def safeversion(repo):
906 def safeversion(repo):
905 # Finds the smallest version that it's safe to assume clients of the repo
907 # Finds the smallest version that it's safe to assume clients of the repo
906 # will support. For example, all hg versions that support generaldelta also
908 # will support. For example, all hg versions that support generaldelta also
907 # support changegroup 02.
909 # support changegroup 02.
908 versions = supportedoutgoingversions(repo)
910 versions = supportedoutgoingversions(repo)
909 if 'generaldelta' in repo.requirements:
911 if 'generaldelta' in repo.requirements:
910 versions.discard('01')
912 versions.discard('01')
911 assert versions
913 assert versions
912 return min(versions)
914 return min(versions)
913
915
914 def getbundler(version, repo, bundlecaps=None):
916 def getbundler(version, repo, bundlecaps=None):
915 assert version in supportedoutgoingversions(repo)
917 assert version in supportedoutgoingversions(repo)
916 return _packermap[version][0](repo, bundlecaps)
918 return _packermap[version][0](repo, bundlecaps)
917
919
918 def getunbundler(version, fh, alg, extras=None):
920 def getunbundler(version, fh, alg, extras=None):
919 return _packermap[version][1](fh, alg, extras=extras)
921 return _packermap[version][1](fh, alg, extras=extras)
920
922
921 def _changegroupinfo(repo, nodes, source):
923 def _changegroupinfo(repo, nodes, source):
922 if repo.ui.verbose or source == 'bundle':
924 if repo.ui.verbose or source == 'bundle':
923 repo.ui.status(_("%d changesets found\n") % len(nodes))
925 repo.ui.status(_("%d changesets found\n") % len(nodes))
924 if repo.ui.debugflag:
926 if repo.ui.debugflag:
925 repo.ui.debug("list of changesets:\n")
927 repo.ui.debug("list of changesets:\n")
926 for node in nodes:
928 for node in nodes:
927 repo.ui.debug("%s\n" % hex(node))
929 repo.ui.debug("%s\n" % hex(node))
928
930
929 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
931 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
930 repo = repo.unfiltered()
932 repo = repo.unfiltered()
931 commonrevs = outgoing.common
933 commonrevs = outgoing.common
932 csets = outgoing.missing
934 csets = outgoing.missing
933 heads = outgoing.missingheads
935 heads = outgoing.missingheads
934 # We go through the fast path if we get told to, or if all (unfiltered
936 # We go through the fast path if we get told to, or if all (unfiltered
935 # heads have been requested (since we then know there all linkrevs will
937 # heads have been requested (since we then know there all linkrevs will
936 # be pulled by the client).
938 # be pulled by the client).
937 heads.sort()
939 heads.sort()
938 fastpathlinkrev = fastpath or (
940 fastpathlinkrev = fastpath or (
939 repo.filtername is None and heads == sorted(repo.heads()))
941 repo.filtername is None and heads == sorted(repo.heads()))
940
942
941 repo.hook('preoutgoing', throw=True, source=source)
943 repo.hook('preoutgoing', throw=True, source=source)
942 _changegroupinfo(repo, csets, source)
944 _changegroupinfo(repo, csets, source)
943 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
945 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
944
946
945 def getsubset(repo, outgoing, bundler, source, fastpath=False):
947 def getsubset(repo, outgoing, bundler, source, fastpath=False):
946 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
948 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
947 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
949 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
948 {'clcount': len(outgoing.missing)})
950 {'clcount': len(outgoing.missing)})
949
951
950 def changegroupsubset(repo, roots, heads, source, version='01'):
952 def changegroupsubset(repo, roots, heads, source, version='01'):
951 """Compute a changegroup consisting of all the nodes that are
953 """Compute a changegroup consisting of all the nodes that are
952 descendants of any of the roots and ancestors of any of the heads.
954 descendants of any of the roots and ancestors of any of the heads.
953 Return a chunkbuffer object whose read() method will return
955 Return a chunkbuffer object whose read() method will return
954 successive changegroup chunks.
956 successive changegroup chunks.
955
957
956 It is fairly complex as determining which filenodes and which
958 It is fairly complex as determining which filenodes and which
957 manifest nodes need to be included for the changeset to be complete
959 manifest nodes need to be included for the changeset to be complete
958 is non-trivial.
960 is non-trivial.
959
961
960 Another wrinkle is doing the reverse, figuring out which changeset in
962 Another wrinkle is doing the reverse, figuring out which changeset in
961 the changegroup a particular filenode or manifestnode belongs to.
963 the changegroup a particular filenode or manifestnode belongs to.
962 """
964 """
963 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
965 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
964 bundler = getbundler(version, repo)
966 bundler = getbundler(version, repo)
965 return getsubset(repo, outgoing, bundler, source)
967 return getsubset(repo, outgoing, bundler, source)
966
968
967 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
969 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
968 version='01'):
970 version='01'):
969 """Like getbundle, but taking a discovery.outgoing as an argument.
971 """Like getbundle, but taking a discovery.outgoing as an argument.
970
972
971 This is only implemented for local repos and reuses potentially
973 This is only implemented for local repos and reuses potentially
972 precomputed sets in outgoing. Returns a raw changegroup generator."""
974 precomputed sets in outgoing. Returns a raw changegroup generator."""
973 if not outgoing.missing:
975 if not outgoing.missing:
974 return None
976 return None
975 bundler = getbundler(version, repo, bundlecaps)
977 bundler = getbundler(version, repo, bundlecaps)
976 return getsubsetraw(repo, outgoing, bundler, source)
978 return getsubsetraw(repo, outgoing, bundler, source)
977
979
978 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
980 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
979 version='01'):
981 version='01'):
980 """Like getbundle, but taking a discovery.outgoing as an argument.
982 """Like getbundle, but taking a discovery.outgoing as an argument.
981
983
982 This is only implemented for local repos and reuses potentially
984 This is only implemented for local repos and reuses potentially
983 precomputed sets in outgoing."""
985 precomputed sets in outgoing."""
984 if not outgoing.missing:
986 if not outgoing.missing:
985 return None
987 return None
986 bundler = getbundler(version, repo, bundlecaps)
988 bundler = getbundler(version, repo, bundlecaps)
987 return getsubset(repo, outgoing, bundler, source)
989 return getsubset(repo, outgoing, bundler, source)
988
990
989 def getchangegroup(repo, source, outgoing, bundlecaps=None,
991 def getchangegroup(repo, source, outgoing, bundlecaps=None,
990 version='01'):
992 version='01'):
991 """Like changegroupsubset, but returns the set difference between the
993 """Like changegroupsubset, but returns the set difference between the
992 ancestors of heads and the ancestors common.
994 ancestors of heads and the ancestors common.
993
995
994 If heads is None, use the local heads. If common is None, use [nullid].
996 If heads is None, use the local heads. If common is None, use [nullid].
995
997
996 The nodes in common might not all be known locally due to the way the
998 The nodes in common might not all be known locally due to the way the
997 current discovery protocol works.
999 current discovery protocol works.
998 """
1000 """
999 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1001 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1000 version=version)
1002 version=version)
1001
1003
1002 def changegroup(repo, basenodes, source):
1004 def changegroup(repo, basenodes, source):
1003 # to avoid a race we use changegroupsubset() (issue1320)
1005 # to avoid a race we use changegroupsubset() (issue1320)
1004 return changegroupsubset(repo, basenodes, repo.heads(), source)
1006 return changegroupsubset(repo, basenodes, repo.heads(), source)
1005
1007
1006 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1008 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1007 revisions = 0
1009 revisions = 0
1008 files = 0
1010 files = 0
1009 for chunkdata in iter(source.filelogheader, {}):
1011 for chunkdata in iter(source.filelogheader, {}):
1010 files += 1
1012 files += 1
1011 f = chunkdata["filename"]
1013 f = chunkdata["filename"]
1012 repo.ui.debug("adding %s revisions\n" % f)
1014 repo.ui.debug("adding %s revisions\n" % f)
1013 repo.ui.progress(_('files'), files, unit=_('files'),
1015 repo.ui.progress(_('files'), files, unit=_('files'),
1014 total=expectedfiles)
1016 total=expectedfiles)
1015 fl = repo.file(f)
1017 fl = repo.file(f)
1016 o = len(fl)
1018 o = len(fl)
1017 try:
1019 try:
1018 if not fl.addgroup(source, revmap, trp):
1020 if not fl.addgroup(source, revmap, trp):
1019 raise error.Abort(_("received file revlog group is empty"))
1021 raise error.Abort(_("received file revlog group is empty"))
1020 except error.CensoredBaseError as e:
1022 except error.CensoredBaseError as e:
1021 raise error.Abort(_("received delta base is censored: %s") % e)
1023 raise error.Abort(_("received delta base is censored: %s") % e)
1022 revisions += len(fl) - o
1024 revisions += len(fl) - o
1023 if f in needfiles:
1025 if f in needfiles:
1024 needs = needfiles[f]
1026 needs = needfiles[f]
1025 for new in xrange(o, len(fl)):
1027 for new in xrange(o, len(fl)):
1026 n = fl.node(new)
1028 n = fl.node(new)
1027 if n in needs:
1029 if n in needs:
1028 needs.remove(n)
1030 needs.remove(n)
1029 else:
1031 else:
1030 raise error.Abort(
1032 raise error.Abort(
1031 _("received spurious file revlog entry"))
1033 _("received spurious file revlog entry"))
1032 if not needs:
1034 if not needs:
1033 del needfiles[f]
1035 del needfiles[f]
1034 repo.ui.progress(_('files'), None)
1036 repo.ui.progress(_('files'), None)
1035
1037
1036 for f, needs in needfiles.iteritems():
1038 for f, needs in needfiles.iteritems():
1037 fl = repo.file(f)
1039 fl = repo.file(f)
1038 for n in needs:
1040 for n in needs:
1039 try:
1041 try:
1040 fl.rev(n)
1042 fl.rev(n)
1041 except error.LookupError:
1043 except error.LookupError:
1042 raise error.Abort(
1044 raise error.Abort(
1043 _('missing file data for %s:%s - run hg verify') %
1045 _('missing file data for %s:%s - run hg verify') %
1044 (f, hex(n)))
1046 (f, hex(n)))
1045
1047
1046 return revisions, files
1048 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now