##// END OF EJS Templates
cg1packer: fix `compressed` method...
Stanislau Hlebik -
r30589:182cacaa default
parent child Browse files
Show More
@@ -1,1048 +1,1048 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 branchmap,
23 branchmap,
24 dagutil,
24 dagutil,
25 discovery,
25 discovery,
26 error,
26 error,
27 mdiff,
27 mdiff,
28 phases,
28 phases,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 def writechunks(ui, chunks, filename, vfs=None):
82 def writechunks(ui, chunks, filename, vfs=None):
83 """Write chunks to a file and return its filename.
83 """Write chunks to a file and return its filename.
84
84
85 The stream is assumed to be a bundle file.
85 The stream is assumed to be a bundle file.
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 """
88 """
89 fh = None
89 fh = None
90 cleanup = None
90 cleanup = None
91 try:
91 try:
92 if filename:
92 if filename:
93 if vfs:
93 if vfs:
94 fh = vfs.open(filename, "wb")
94 fh = vfs.open(filename, "wb")
95 else:
95 else:
96 # Increase default buffer size because default is usually
96 # Increase default buffer size because default is usually
97 # small (4k is common on Linux).
97 # small (4k is common on Linux).
98 fh = open(filename, "wb", 131072)
98 fh = open(filename, "wb", 131072)
99 else:
99 else:
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fh = os.fdopen(fd, "wb")
101 fh = os.fdopen(fd, "wb")
102 cleanup = filename
102 cleanup = filename
103 for c in chunks:
103 for c in chunks:
104 fh.write(c)
104 fh.write(c)
105 cleanup = None
105 cleanup = None
106 return filename
106 return filename
107 finally:
107 finally:
108 if fh is not None:
108 if fh is not None:
109 fh.close()
109 fh.close()
110 if cleanup is not None:
110 if cleanup is not None:
111 if filename and vfs:
111 if filename and vfs:
112 vfs.unlink(cleanup)
112 vfs.unlink(cleanup)
113 else:
113 else:
114 os.unlink(cleanup)
114 os.unlink(cleanup)
115
115
116 class cg1unpacker(object):
116 class cg1unpacker(object):
117 """Unpacker for cg1 changegroup streams.
117 """Unpacker for cg1 changegroup streams.
118
118
119 A changegroup unpacker handles the framing of the revision data in
119 A changegroup unpacker handles the framing of the revision data in
120 the wire format. Most consumers will want to use the apply()
120 the wire format. Most consumers will want to use the apply()
121 method to add the changes from the changegroup to a repository.
121 method to add the changes from the changegroup to a repository.
122
122
123 If you're forwarding a changegroup unmodified to another consumer,
123 If you're forwarding a changegroup unmodified to another consumer,
124 use getchunks(), which returns an iterator of changegroup
124 use getchunks(), which returns an iterator of changegroup
125 chunks. This is mostly useful for cases where you need to know the
125 chunks. This is mostly useful for cases where you need to know the
126 data stream has ended by observing the end of the changegroup.
126 data stream has ended by observing the end of the changegroup.
127
127
128 deltachunk() is useful only if you're applying delta data. Most
128 deltachunk() is useful only if you're applying delta data. Most
129 consumers should prefer apply() instead.
129 consumers should prefer apply() instead.
130
130
131 A few other public methods exist. Those are used only for
131 A few other public methods exist. Those are used only for
132 bundlerepo and some debug commands - their use is discouraged.
132 bundlerepo and some debug commands - their use is discouraged.
133 """
133 """
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 version = '01'
136 version = '01'
137 _grouplistcount = 1 # One list of files after the manifests
137 _grouplistcount = 1 # One list of files after the manifests
138
138
139 def __init__(self, fh, alg, extras=None):
139 def __init__(self, fh, alg, extras=None):
140 if alg is None:
140 if alg is None:
141 alg = 'UN'
141 alg = 'UN'
142 if alg not in util.compengines.supportedbundletypes:
142 if alg not in util.compengines.supportedbundletypes:
143 raise error.Abort(_('unknown stream compression type: %s')
143 raise error.Abort(_('unknown stream compression type: %s')
144 % alg)
144 % alg)
145 if alg == 'BZ':
145 if alg == 'BZ':
146 alg = '_truncatedBZ'
146 alg = '_truncatedBZ'
147
147
148 compengine = util.compengines.forbundletype(alg)
148 compengine = util.compengines.forbundletype(alg)
149 self._stream = compengine.decompressorreader(fh)
149 self._stream = compengine.decompressorreader(fh)
150 self._type = alg
150 self._type = alg
151 self.extras = extras or {}
151 self.extras = extras or {}
152 self.callback = None
152 self.callback = None
153
153
154 # These methods (compressed, read, seek, tell) all appear to only
154 # These methods (compressed, read, seek, tell) all appear to only
155 # be used by bundlerepo, but it's a little hard to tell.
155 # be used by bundlerepo, but it's a little hard to tell.
156 def compressed(self):
156 def compressed(self):
157 return self._type is not None
157 return self._type is not None and self._type != 'UN'
158 def read(self, l):
158 def read(self, l):
159 return self._stream.read(l)
159 return self._stream.read(l)
160 def seek(self, pos):
160 def seek(self, pos):
161 return self._stream.seek(pos)
161 return self._stream.seek(pos)
162 def tell(self):
162 def tell(self):
163 return self._stream.tell()
163 return self._stream.tell()
164 def close(self):
164 def close(self):
165 return self._stream.close()
165 return self._stream.close()
166
166
167 def _chunklength(self):
167 def _chunklength(self):
168 d = readexactly(self._stream, 4)
168 d = readexactly(self._stream, 4)
169 l = struct.unpack(">l", d)[0]
169 l = struct.unpack(">l", d)[0]
170 if l <= 4:
170 if l <= 4:
171 if l:
171 if l:
172 raise error.Abort(_("invalid chunk length %d") % l)
172 raise error.Abort(_("invalid chunk length %d") % l)
173 return 0
173 return 0
174 if self.callback:
174 if self.callback:
175 self.callback()
175 self.callback()
176 return l - 4
176 return l - 4
177
177
178 def changelogheader(self):
178 def changelogheader(self):
179 """v10 does not have a changelog header chunk"""
179 """v10 does not have a changelog header chunk"""
180 return {}
180 return {}
181
181
182 def manifestheader(self):
182 def manifestheader(self):
183 """v10 does not have a manifest header chunk"""
183 """v10 does not have a manifest header chunk"""
184 return {}
184 return {}
185
185
186 def filelogheader(self):
186 def filelogheader(self):
187 """return the header of the filelogs chunk, v10 only has the filename"""
187 """return the header of the filelogs chunk, v10 only has the filename"""
188 l = self._chunklength()
188 l = self._chunklength()
189 if not l:
189 if not l:
190 return {}
190 return {}
191 fname = readexactly(self._stream, l)
191 fname = readexactly(self._stream, l)
192 return {'filename': fname}
192 return {'filename': fname}
193
193
194 def _deltaheader(self, headertuple, prevnode):
194 def _deltaheader(self, headertuple, prevnode):
195 node, p1, p2, cs = headertuple
195 node, p1, p2, cs = headertuple
196 if prevnode is None:
196 if prevnode is None:
197 deltabase = p1
197 deltabase = p1
198 else:
198 else:
199 deltabase = prevnode
199 deltabase = prevnode
200 flags = 0
200 flags = 0
201 return node, p1, p2, deltabase, cs, flags
201 return node, p1, p2, deltabase, cs, flags
202
202
203 def deltachunk(self, prevnode):
203 def deltachunk(self, prevnode):
204 l = self._chunklength()
204 l = self._chunklength()
205 if not l:
205 if not l:
206 return {}
206 return {}
207 headerdata = readexactly(self._stream, self.deltaheadersize)
207 headerdata = readexactly(self._stream, self.deltaheadersize)
208 header = struct.unpack(self.deltaheader, headerdata)
208 header = struct.unpack(self.deltaheader, headerdata)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
213
213
214 def getchunks(self):
214 def getchunks(self):
215 """returns all the chunks contains in the bundle
215 """returns all the chunks contains in the bundle
216
216
217 Used when you need to forward the binary stream to a file or another
217 Used when you need to forward the binary stream to a file or another
218 network API. To do so, it parse the changegroup data, otherwise it will
218 network API. To do so, it parse the changegroup data, otherwise it will
219 block in case of sshrepo because it don't know the end of the stream.
219 block in case of sshrepo because it don't know the end of the stream.
220 """
220 """
221 # an empty chunkgroup is the end of the changegroup
221 # an empty chunkgroup is the end of the changegroup
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
223 # after that, changegroup versions 1 and 2 have a series of groups
223 # after that, changegroup versions 1 and 2 have a series of groups
224 # with one group per file. changegroup 3 has a series of directory
224 # with one group per file. changegroup 3 has a series of directory
225 # manifests before the files.
225 # manifests before the files.
226 count = 0
226 count = 0
227 emptycount = 0
227 emptycount = 0
228 while emptycount < self._grouplistcount:
228 while emptycount < self._grouplistcount:
229 empty = True
229 empty = True
230 count += 1
230 count += 1
231 while True:
231 while True:
232 chunk = getchunk(self)
232 chunk = getchunk(self)
233 if not chunk:
233 if not chunk:
234 if empty and count > 2:
234 if empty and count > 2:
235 emptycount += 1
235 emptycount += 1
236 break
236 break
237 empty = False
237 empty = False
238 yield chunkheader(len(chunk))
238 yield chunkheader(len(chunk))
239 pos = 0
239 pos = 0
240 while pos < len(chunk):
240 while pos < len(chunk):
241 next = pos + 2**20
241 next = pos + 2**20
242 yield chunk[pos:next]
242 yield chunk[pos:next]
243 pos = next
243 pos = next
244 yield closechunk()
244 yield closechunk()
245
245
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
247 # We know that we'll never have more manifests than we had
247 # We know that we'll never have more manifests than we had
248 # changesets.
248 # changesets.
249 self.callback = prog(_('manifests'), numchanges)
249 self.callback = prog(_('manifests'), numchanges)
250 # no need to check for empty manifest group here:
250 # no need to check for empty manifest group here:
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
252 # no new manifest will be created and the manifest group will
252 # no new manifest will be created and the manifest group will
253 # be empty during the pull
253 # be empty during the pull
254 self.manifestheader()
254 self.manifestheader()
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
256 repo.ui.progress(_('manifests'), None)
256 repo.ui.progress(_('manifests'), None)
257 self.callback = None
257 self.callback = None
258
258
259 def apply(self, repo, srctype, url, emptyok=False,
259 def apply(self, repo, srctype, url, emptyok=False,
260 targetphase=phases.draft, expectedtotal=None):
260 targetphase=phases.draft, expectedtotal=None):
261 """Add the changegroup returned by source.read() to this repo.
261 """Add the changegroup returned by source.read() to this repo.
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
263 the URL of the repo where this changegroup is coming from.
263 the URL of the repo where this changegroup is coming from.
264
264
265 Return an integer summarizing the change to this repo:
265 Return an integer summarizing the change to this repo:
266 - nothing changed or no source: 0
266 - nothing changed or no source: 0
267 - more heads than before: 1+added heads (2..n)
267 - more heads than before: 1+added heads (2..n)
268 - fewer heads than before: -1-removed heads (-2..-n)
268 - fewer heads than before: -1-removed heads (-2..-n)
269 - number of heads stays the same: 1
269 - number of heads stays the same: 1
270 """
270 """
271 repo = repo.unfiltered()
271 repo = repo.unfiltered()
272 def csmap(x):
272 def csmap(x):
273 repo.ui.debug("add changeset %s\n" % short(x))
273 repo.ui.debug("add changeset %s\n" % short(x))
274 return len(cl)
274 return len(cl)
275
275
276 def revmap(x):
276 def revmap(x):
277 return cl.rev(x)
277 return cl.rev(x)
278
278
279 changesets = files = revisions = 0
279 changesets = files = revisions = 0
280
280
281 try:
281 try:
282 with repo.transaction("\n".join([srctype,
282 with repo.transaction("\n".join([srctype,
283 util.hidepassword(url)])) as tr:
283 util.hidepassword(url)])) as tr:
284 # The transaction could have been created before and already
284 # The transaction could have been created before and already
285 # carries source information. In this case we use the top
285 # carries source information. In this case we use the top
286 # level data. We overwrite the argument because we need to use
286 # level data. We overwrite the argument because we need to use
287 # the top level value (if they exist) in this function.
287 # the top level value (if they exist) in this function.
288 srctype = tr.hookargs.setdefault('source', srctype)
288 srctype = tr.hookargs.setdefault('source', srctype)
289 url = tr.hookargs.setdefault('url', url)
289 url = tr.hookargs.setdefault('url', url)
290 repo.hook('prechangegroup', throw=True, **tr.hookargs)
290 repo.hook('prechangegroup', throw=True, **tr.hookargs)
291
291
292 # write changelog data to temp files so concurrent readers
292 # write changelog data to temp files so concurrent readers
293 # will not see an inconsistent view
293 # will not see an inconsistent view
294 cl = repo.changelog
294 cl = repo.changelog
295 cl.delayupdate(tr)
295 cl.delayupdate(tr)
296 oldheads = cl.heads()
296 oldheads = cl.heads()
297
297
298 trp = weakref.proxy(tr)
298 trp = weakref.proxy(tr)
299 # pull off the changeset group
299 # pull off the changeset group
300 repo.ui.status(_("adding changesets\n"))
300 repo.ui.status(_("adding changesets\n"))
301 clstart = len(cl)
301 clstart = len(cl)
302 class prog(object):
302 class prog(object):
303 def __init__(self, step, total):
303 def __init__(self, step, total):
304 self._step = step
304 self._step = step
305 self._total = total
305 self._total = total
306 self._count = 1
306 self._count = 1
307 def __call__(self):
307 def __call__(self):
308 repo.ui.progress(self._step, self._count,
308 repo.ui.progress(self._step, self._count,
309 unit=_('chunks'), total=self._total)
309 unit=_('chunks'), total=self._total)
310 self._count += 1
310 self._count += 1
311 self.callback = prog(_('changesets'), expectedtotal)
311 self.callback = prog(_('changesets'), expectedtotal)
312
312
313 efiles = set()
313 efiles = set()
314 def onchangelog(cl, node):
314 def onchangelog(cl, node):
315 efiles.update(cl.readfiles(node))
315 efiles.update(cl.readfiles(node))
316
316
317 self.changelogheader()
317 self.changelogheader()
318 srccontent = cl.addgroup(self, csmap, trp,
318 srccontent = cl.addgroup(self, csmap, trp,
319 addrevisioncb=onchangelog)
319 addrevisioncb=onchangelog)
320 efiles = len(efiles)
320 efiles = len(efiles)
321
321
322 if not (srccontent or emptyok):
322 if not (srccontent or emptyok):
323 raise error.Abort(_("received changelog group is empty"))
323 raise error.Abort(_("received changelog group is empty"))
324 clend = len(cl)
324 clend = len(cl)
325 changesets = clend - clstart
325 changesets = clend - clstart
326 repo.ui.progress(_('changesets'), None)
326 repo.ui.progress(_('changesets'), None)
327 self.callback = None
327 self.callback = None
328
328
329 # pull off the manifest group
329 # pull off the manifest group
330 repo.ui.status(_("adding manifests\n"))
330 repo.ui.status(_("adding manifests\n"))
331 self._unpackmanifests(repo, revmap, trp, prog, changesets)
331 self._unpackmanifests(repo, revmap, trp, prog, changesets)
332
332
333 needfiles = {}
333 needfiles = {}
334 if repo.ui.configbool('server', 'validate', default=False):
334 if repo.ui.configbool('server', 'validate', default=False):
335 cl = repo.changelog
335 cl = repo.changelog
336 ml = repo.manifestlog
336 ml = repo.manifestlog
337 # validate incoming csets have their manifests
337 # validate incoming csets have their manifests
338 for cset in xrange(clstart, clend):
338 for cset in xrange(clstart, clend):
339 mfnode = cl.changelogrevision(cset).manifest
339 mfnode = cl.changelogrevision(cset).manifest
340 mfest = ml[mfnode].readdelta()
340 mfest = ml[mfnode].readdelta()
341 # store file nodes we must see
341 # store file nodes we must see
342 for f, n in mfest.iteritems():
342 for f, n in mfest.iteritems():
343 needfiles.setdefault(f, set()).add(n)
343 needfiles.setdefault(f, set()).add(n)
344
344
345 # process the files
345 # process the files
346 repo.ui.status(_("adding file changes\n"))
346 repo.ui.status(_("adding file changes\n"))
347 newrevs, newfiles = _addchangegroupfiles(
347 newrevs, newfiles = _addchangegroupfiles(
348 repo, self, revmap, trp, efiles, needfiles)
348 repo, self, revmap, trp, efiles, needfiles)
349 revisions += newrevs
349 revisions += newrevs
350 files += newfiles
350 files += newfiles
351
351
352 dh = 0
352 dh = 0
353 if oldheads:
353 if oldheads:
354 heads = cl.heads()
354 heads = cl.heads()
355 dh = len(heads) - len(oldheads)
355 dh = len(heads) - len(oldheads)
356 for h in heads:
356 for h in heads:
357 if h not in oldheads and repo[h].closesbranch():
357 if h not in oldheads and repo[h].closesbranch():
358 dh -= 1
358 dh -= 1
359 htext = ""
359 htext = ""
360 if dh:
360 if dh:
361 htext = _(" (%+d heads)") % dh
361 htext = _(" (%+d heads)") % dh
362
362
363 repo.ui.status(_("added %d changesets"
363 repo.ui.status(_("added %d changesets"
364 " with %d changes to %d files%s\n")
364 " with %d changes to %d files%s\n")
365 % (changesets, revisions, files, htext))
365 % (changesets, revisions, files, htext))
366 repo.invalidatevolatilesets()
366 repo.invalidatevolatilesets()
367
367
368 if changesets > 0:
368 if changesets > 0:
369 if 'node' not in tr.hookargs:
369 if 'node' not in tr.hookargs:
370 tr.hookargs['node'] = hex(cl.node(clstart))
370 tr.hookargs['node'] = hex(cl.node(clstart))
371 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
371 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
372 hookargs = dict(tr.hookargs)
372 hookargs = dict(tr.hookargs)
373 else:
373 else:
374 hookargs = dict(tr.hookargs)
374 hookargs = dict(tr.hookargs)
375 hookargs['node'] = hex(cl.node(clstart))
375 hookargs['node'] = hex(cl.node(clstart))
376 hookargs['node_last'] = hex(cl.node(clend - 1))
376 hookargs['node_last'] = hex(cl.node(clend - 1))
377 repo.hook('pretxnchangegroup', throw=True, **hookargs)
377 repo.hook('pretxnchangegroup', throw=True, **hookargs)
378
378
379 added = [cl.node(r) for r in xrange(clstart, clend)]
379 added = [cl.node(r) for r in xrange(clstart, clend)]
380 publishing = repo.publishing()
380 publishing = repo.publishing()
381 if srctype in ('push', 'serve'):
381 if srctype in ('push', 'serve'):
382 # Old servers can not push the boundary themselves.
382 # Old servers can not push the boundary themselves.
383 # New servers won't push the boundary if changeset already
383 # New servers won't push the boundary if changeset already
384 # exists locally as secret
384 # exists locally as secret
385 #
385 #
386 # We should not use added here but the list of all change in
386 # We should not use added here but the list of all change in
387 # the bundle
387 # the bundle
388 if publishing:
388 if publishing:
389 phases.advanceboundary(repo, tr, phases.public,
389 phases.advanceboundary(repo, tr, phases.public,
390 srccontent)
390 srccontent)
391 else:
391 else:
392 # Those changesets have been pushed from the
392 # Those changesets have been pushed from the
393 # outside, their phases are going to be pushed
393 # outside, their phases are going to be pushed
394 # alongside. Therefor `targetphase` is
394 # alongside. Therefor `targetphase` is
395 # ignored.
395 # ignored.
396 phases.advanceboundary(repo, tr, phases.draft,
396 phases.advanceboundary(repo, tr, phases.draft,
397 srccontent)
397 srccontent)
398 phases.retractboundary(repo, tr, phases.draft, added)
398 phases.retractboundary(repo, tr, phases.draft, added)
399 elif srctype != 'strip':
399 elif srctype != 'strip':
400 # publishing only alter behavior during push
400 # publishing only alter behavior during push
401 #
401 #
402 # strip should not touch boundary at all
402 # strip should not touch boundary at all
403 phases.retractboundary(repo, tr, targetphase, added)
403 phases.retractboundary(repo, tr, targetphase, added)
404
404
405 if changesets > 0:
405 if changesets > 0:
406 if srctype != 'strip':
406 if srctype != 'strip':
407 # During strip, branchcache is invalid but
407 # During strip, branchcache is invalid but
408 # coming call to `destroyed` will repair it.
408 # coming call to `destroyed` will repair it.
409 # In other case we can safely update cache on
409 # In other case we can safely update cache on
410 # disk.
410 # disk.
411 repo.ui.debug('updating the branch cache\n')
411 repo.ui.debug('updating the branch cache\n')
412 branchmap.updatecache(repo.filtered('served'))
412 branchmap.updatecache(repo.filtered('served'))
413
413
414 def runhooks():
414 def runhooks():
415 # These hooks run when the lock releases, not when the
415 # These hooks run when the lock releases, not when the
416 # transaction closes. So it's possible for the changelog
416 # transaction closes. So it's possible for the changelog
417 # to have changed since we last saw it.
417 # to have changed since we last saw it.
418 if clstart >= len(repo):
418 if clstart >= len(repo):
419 return
419 return
420
420
421 repo.hook("changegroup", **hookargs)
421 repo.hook("changegroup", **hookargs)
422
422
423 for n in added:
423 for n in added:
424 args = hookargs.copy()
424 args = hookargs.copy()
425 args['node'] = hex(n)
425 args['node'] = hex(n)
426 del args['node_last']
426 del args['node_last']
427 repo.hook("incoming", **args)
427 repo.hook("incoming", **args)
428
428
429 newheads = [h for h in repo.heads()
429 newheads = [h for h in repo.heads()
430 if h not in oldheads]
430 if h not in oldheads]
431 repo.ui.log("incoming",
431 repo.ui.log("incoming",
432 "%s incoming changes - new heads: %s\n",
432 "%s incoming changes - new heads: %s\n",
433 len(added),
433 len(added),
434 ', '.join([hex(c[:6]) for c in newheads]))
434 ', '.join([hex(c[:6]) for c in newheads]))
435
435
436 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
436 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
437 lambda tr: repo._afterlock(runhooks))
437 lambda tr: repo._afterlock(runhooks))
438 finally:
438 finally:
439 repo.ui.flush()
439 repo.ui.flush()
440 # never return 0 here:
440 # never return 0 here:
441 if dh < 0:
441 if dh < 0:
442 return dh - 1
442 return dh - 1
443 else:
443 else:
444 return dh + 1
444 return dh + 1
445
445
446 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
448
448
449 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
451 remain the same.
451 remain the same.
452 """
452 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = struct.calcsize(deltaheader)
454 deltaheadersize = struct.calcsize(deltaheader)
455 version = '02'
455 version = '02'
456
456
457 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
459 flags = 0
460 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
461
461
462 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
464
464
465 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
467 separating manifests and files.
468 """
468 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = struct.calcsize(deltaheader)
470 deltaheadersize = struct.calcsize(deltaheader)
471 version = '03'
471 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
473
473
474 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
477
477
478 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
478 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
480 numchanges)
480 numchanges)
481 for chunkdata in iter(self.filelogheader, {}):
481 for chunkdata in iter(self.filelogheader, {}):
482 # If we get here, there are directory manifests in the changegroup
482 # If we get here, there are directory manifests in the changegroup
483 d = chunkdata["filename"]
483 d = chunkdata["filename"]
484 repo.ui.debug("adding %s revisions\n" % d)
484 repo.ui.debug("adding %s revisions\n" % d)
485 dirlog = repo.manifestlog._revlog.dirlog(d)
485 dirlog = repo.manifestlog._revlog.dirlog(d)
486 if not dirlog.addgroup(self, revmap, trp):
486 if not dirlog.addgroup(self, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 class cg1packer(object):
501 class cg1packer(object):
502 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
502 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
503 version = '01'
503 version = '01'
504 def __init__(self, repo, bundlecaps=None):
504 def __init__(self, repo, bundlecaps=None):
505 """Given a source repo, construct a bundler.
505 """Given a source repo, construct a bundler.
506
506
507 bundlecaps is optional and can be used to specify the set of
507 bundlecaps is optional and can be used to specify the set of
508 capabilities which can be used to build the bundle.
508 capabilities which can be used to build the bundle.
509 """
509 """
510 # Set of capabilities we can use to build the bundle.
510 # Set of capabilities we can use to build the bundle.
511 if bundlecaps is None:
511 if bundlecaps is None:
512 bundlecaps = set()
512 bundlecaps = set()
513 self._bundlecaps = bundlecaps
513 self._bundlecaps = bundlecaps
514 # experimental config: bundle.reorder
514 # experimental config: bundle.reorder
515 reorder = repo.ui.config('bundle', 'reorder', 'auto')
515 reorder = repo.ui.config('bundle', 'reorder', 'auto')
516 if reorder == 'auto':
516 if reorder == 'auto':
517 reorder = None
517 reorder = None
518 else:
518 else:
519 reorder = util.parsebool(reorder)
519 reorder = util.parsebool(reorder)
520 self._repo = repo
520 self._repo = repo
521 self._reorder = reorder
521 self._reorder = reorder
522 self._progress = repo.ui.progress
522 self._progress = repo.ui.progress
523 if self._repo.ui.verbose and not self._repo.ui.debugflag:
523 if self._repo.ui.verbose and not self._repo.ui.debugflag:
524 self._verbosenote = self._repo.ui.note
524 self._verbosenote = self._repo.ui.note
525 else:
525 else:
526 self._verbosenote = lambda s: None
526 self._verbosenote = lambda s: None
527
527
528 def close(self):
528 def close(self):
529 return closechunk()
529 return closechunk()
530
530
531 def fileheader(self, fname):
531 def fileheader(self, fname):
532 return chunkheader(len(fname)) + fname
532 return chunkheader(len(fname)) + fname
533
533
534 # Extracted both for clarity and for overriding in extensions.
534 # Extracted both for clarity and for overriding in extensions.
535 def _sortgroup(self, revlog, nodelist, lookup):
535 def _sortgroup(self, revlog, nodelist, lookup):
536 """Sort nodes for change group and turn them into revnums."""
536 """Sort nodes for change group and turn them into revnums."""
537 # for generaldelta revlogs, we linearize the revs; this will both be
537 # for generaldelta revlogs, we linearize the revs; this will both be
538 # much quicker and generate a much smaller bundle
538 # much quicker and generate a much smaller bundle
539 if (revlog._generaldelta and self._reorder is None) or self._reorder:
539 if (revlog._generaldelta and self._reorder is None) or self._reorder:
540 dag = dagutil.revlogdag(revlog)
540 dag = dagutil.revlogdag(revlog)
541 return dag.linearize(set(revlog.rev(n) for n in nodelist))
541 return dag.linearize(set(revlog.rev(n) for n in nodelist))
542 else:
542 else:
543 return sorted([revlog.rev(n) for n in nodelist])
543 return sorted([revlog.rev(n) for n in nodelist])
544
544
545 def group(self, nodelist, revlog, lookup, units=None):
545 def group(self, nodelist, revlog, lookup, units=None):
546 """Calculate a delta group, yielding a sequence of changegroup chunks
546 """Calculate a delta group, yielding a sequence of changegroup chunks
547 (strings).
547 (strings).
548
548
549 Given a list of changeset revs, return a set of deltas and
549 Given a list of changeset revs, return a set of deltas and
550 metadata corresponding to nodes. The first delta is
550 metadata corresponding to nodes. The first delta is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
552 guaranteed to have this parent as it has all history before
552 guaranteed to have this parent as it has all history before
553 these changesets. In the case firstparent is nullrev the
553 these changesets. In the case firstparent is nullrev the
554 changegroup starts with a full revision.
554 changegroup starts with a full revision.
555
555
556 If units is not None, progress detail will be generated, units specifies
556 If units is not None, progress detail will be generated, units specifies
557 the type of revlog that is touched (changelog, manifest, etc.).
557 the type of revlog that is touched (changelog, manifest, etc.).
558 """
558 """
559 # if we don't have any revisions touched by these changesets, bail
559 # if we don't have any revisions touched by these changesets, bail
560 if len(nodelist) == 0:
560 if len(nodelist) == 0:
561 yield self.close()
561 yield self.close()
562 return
562 return
563
563
564 revs = self._sortgroup(revlog, nodelist, lookup)
564 revs = self._sortgroup(revlog, nodelist, lookup)
565
565
566 # add the parent of the first rev
566 # add the parent of the first rev
567 p = revlog.parentrevs(revs[0])[0]
567 p = revlog.parentrevs(revs[0])[0]
568 revs.insert(0, p)
568 revs.insert(0, p)
569
569
570 # build deltas
570 # build deltas
571 total = len(revs) - 1
571 total = len(revs) - 1
572 msgbundling = _('bundling')
572 msgbundling = _('bundling')
573 for r in xrange(len(revs) - 1):
573 for r in xrange(len(revs) - 1):
574 if units is not None:
574 if units is not None:
575 self._progress(msgbundling, r + 1, unit=units, total=total)
575 self._progress(msgbundling, r + 1, unit=units, total=total)
576 prev, curr = revs[r], revs[r + 1]
576 prev, curr = revs[r], revs[r + 1]
577 linknode = lookup(revlog.node(curr))
577 linknode = lookup(revlog.node(curr))
578 for c in self.revchunk(revlog, curr, prev, linknode):
578 for c in self.revchunk(revlog, curr, prev, linknode):
579 yield c
579 yield c
580
580
581 if units is not None:
581 if units is not None:
582 self._progress(msgbundling, None)
582 self._progress(msgbundling, None)
583 yield self.close()
583 yield self.close()
584
584
585 # filter any nodes that claim to be part of the known set
585 # filter any nodes that claim to be part of the known set
586 def prune(self, revlog, missing, commonrevs):
586 def prune(self, revlog, missing, commonrevs):
587 rr, rl = revlog.rev, revlog.linkrev
587 rr, rl = revlog.rev, revlog.linkrev
588 return [n for n in missing if rl(rr(n)) not in commonrevs]
588 return [n for n in missing if rl(rr(n)) not in commonrevs]
589
589
590 def _packmanifests(self, dir, mfnodes, lookuplinknode):
590 def _packmanifests(self, dir, mfnodes, lookuplinknode):
591 """Pack flat manifests into a changegroup stream."""
591 """Pack flat manifests into a changegroup stream."""
592 assert not dir
592 assert not dir
593 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
593 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
594 lookuplinknode, units=_('manifests')):
594 lookuplinknode, units=_('manifests')):
595 yield chunk
595 yield chunk
596
596
597 def _manifestsdone(self):
597 def _manifestsdone(self):
598 return ''
598 return ''
599
599
600 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
600 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
601 '''yield a sequence of changegroup chunks (strings)'''
601 '''yield a sequence of changegroup chunks (strings)'''
602 repo = self._repo
602 repo = self._repo
603 cl = repo.changelog
603 cl = repo.changelog
604
604
605 clrevorder = {}
605 clrevorder = {}
606 mfs = {} # needed manifests
606 mfs = {} # needed manifests
607 fnodes = {} # needed file nodes
607 fnodes = {} # needed file nodes
608 changedfiles = set()
608 changedfiles = set()
609
609
610 # Callback for the changelog, used to collect changed files and manifest
610 # Callback for the changelog, used to collect changed files and manifest
611 # nodes.
611 # nodes.
612 # Returns the linkrev node (identity in the changelog case).
612 # Returns the linkrev node (identity in the changelog case).
613 def lookupcl(x):
613 def lookupcl(x):
614 c = cl.read(x)
614 c = cl.read(x)
615 clrevorder[x] = len(clrevorder)
615 clrevorder[x] = len(clrevorder)
616 n = c[0]
616 n = c[0]
617 # record the first changeset introducing this manifest version
617 # record the first changeset introducing this manifest version
618 mfs.setdefault(n, x)
618 mfs.setdefault(n, x)
619 # Record a complete list of potentially-changed files in
619 # Record a complete list of potentially-changed files in
620 # this manifest.
620 # this manifest.
621 changedfiles.update(c[3])
621 changedfiles.update(c[3])
622 return x
622 return x
623
623
624 self._verbosenote(_('uncompressed size of bundle content:\n'))
624 self._verbosenote(_('uncompressed size of bundle content:\n'))
625 size = 0
625 size = 0
626 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
626 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
627 size += len(chunk)
627 size += len(chunk)
628 yield chunk
628 yield chunk
629 self._verbosenote(_('%8.i (changelog)\n') % size)
629 self._verbosenote(_('%8.i (changelog)\n') % size)
630
630
631 # We need to make sure that the linkrev in the changegroup refers to
631 # We need to make sure that the linkrev in the changegroup refers to
632 # the first changeset that introduced the manifest or file revision.
632 # the first changeset that introduced the manifest or file revision.
633 # The fastpath is usually safer than the slowpath, because the filelogs
633 # The fastpath is usually safer than the slowpath, because the filelogs
634 # are walked in revlog order.
634 # are walked in revlog order.
635 #
635 #
636 # When taking the slowpath with reorder=None and the manifest revlog
636 # When taking the slowpath with reorder=None and the manifest revlog
637 # uses generaldelta, the manifest may be walked in the "wrong" order.
637 # uses generaldelta, the manifest may be walked in the "wrong" order.
638 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
638 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
639 # cc0ff93d0c0c).
639 # cc0ff93d0c0c).
640 #
640 #
641 # When taking the fastpath, we are only vulnerable to reordering
641 # When taking the fastpath, we are only vulnerable to reordering
642 # of the changelog itself. The changelog never uses generaldelta, so
642 # of the changelog itself. The changelog never uses generaldelta, so
643 # it is only reordered when reorder=True. To handle this case, we
643 # it is only reordered when reorder=True. To handle this case, we
644 # simply take the slowpath, which already has the 'clrevorder' logic.
644 # simply take the slowpath, which already has the 'clrevorder' logic.
645 # This was also fixed in cc0ff93d0c0c.
645 # This was also fixed in cc0ff93d0c0c.
646 fastpathlinkrev = fastpathlinkrev and not self._reorder
646 fastpathlinkrev = fastpathlinkrev and not self._reorder
647 # Treemanifests don't work correctly with fastpathlinkrev
647 # Treemanifests don't work correctly with fastpathlinkrev
648 # either, because we don't discover which directory nodes to
648 # either, because we don't discover which directory nodes to
649 # send along with files. This could probably be fixed.
649 # send along with files. This could probably be fixed.
650 fastpathlinkrev = fastpathlinkrev and (
650 fastpathlinkrev = fastpathlinkrev and (
651 'treemanifest' not in repo.requirements)
651 'treemanifest' not in repo.requirements)
652
652
653 for chunk in self.generatemanifests(commonrevs, clrevorder,
653 for chunk in self.generatemanifests(commonrevs, clrevorder,
654 fastpathlinkrev, mfs, fnodes):
654 fastpathlinkrev, mfs, fnodes):
655 yield chunk
655 yield chunk
656 mfs.clear()
656 mfs.clear()
657 clrevs = set(cl.rev(x) for x in clnodes)
657 clrevs = set(cl.rev(x) for x in clnodes)
658
658
659 if not fastpathlinkrev:
659 if not fastpathlinkrev:
660 def linknodes(unused, fname):
660 def linknodes(unused, fname):
661 return fnodes.get(fname, {})
661 return fnodes.get(fname, {})
662 else:
662 else:
663 cln = cl.node
663 cln = cl.node
664 def linknodes(filerevlog, fname):
664 def linknodes(filerevlog, fname):
665 llr = filerevlog.linkrev
665 llr = filerevlog.linkrev
666 fln = filerevlog.node
666 fln = filerevlog.node
667 revs = ((r, llr(r)) for r in filerevlog)
667 revs = ((r, llr(r)) for r in filerevlog)
668 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
668 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
669
669
670 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
670 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
671 source):
671 source):
672 yield chunk
672 yield chunk
673
673
674 yield self.close()
674 yield self.close()
675
675
676 if clnodes:
676 if clnodes:
677 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
677 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
678
678
679 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
679 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
680 fnodes):
680 fnodes):
681 repo = self._repo
681 repo = self._repo
682 mfl = repo.manifestlog
682 mfl = repo.manifestlog
683 dirlog = mfl._revlog.dirlog
683 dirlog = mfl._revlog.dirlog
684 tmfnodes = {'': mfs}
684 tmfnodes = {'': mfs}
685
685
686 # Callback for the manifest, used to collect linkrevs for filelog
686 # Callback for the manifest, used to collect linkrevs for filelog
687 # revisions.
687 # revisions.
688 # Returns the linkrev node (collected in lookupcl).
688 # Returns the linkrev node (collected in lookupcl).
689 def makelookupmflinknode(dir):
689 def makelookupmflinknode(dir):
690 if fastpathlinkrev:
690 if fastpathlinkrev:
691 assert not dir
691 assert not dir
692 return mfs.__getitem__
692 return mfs.__getitem__
693
693
694 def lookupmflinknode(x):
694 def lookupmflinknode(x):
695 """Callback for looking up the linknode for manifests.
695 """Callback for looking up the linknode for manifests.
696
696
697 Returns the linkrev node for the specified manifest.
697 Returns the linkrev node for the specified manifest.
698
698
699 SIDE EFFECT:
699 SIDE EFFECT:
700
700
701 1) fclnodes gets populated with the list of relevant
701 1) fclnodes gets populated with the list of relevant
702 file nodes if we're not using fastpathlinkrev
702 file nodes if we're not using fastpathlinkrev
703 2) When treemanifests are in use, collects treemanifest nodes
703 2) When treemanifests are in use, collects treemanifest nodes
704 to send
704 to send
705
705
706 Note that this means manifests must be completely sent to
706 Note that this means manifests must be completely sent to
707 the client before you can trust the list of files and
707 the client before you can trust the list of files and
708 treemanifests to send.
708 treemanifests to send.
709 """
709 """
710 clnode = tmfnodes[dir][x]
710 clnode = tmfnodes[dir][x]
711 mdata = mfl.get(dir, x).readfast(shallow=True)
711 mdata = mfl.get(dir, x).readfast(shallow=True)
712 for p, n, fl in mdata.iterentries():
712 for p, n, fl in mdata.iterentries():
713 if fl == 't': # subdirectory manifest
713 if fl == 't': # subdirectory manifest
714 subdir = dir + p + '/'
714 subdir = dir + p + '/'
715 tmfclnodes = tmfnodes.setdefault(subdir, {})
715 tmfclnodes = tmfnodes.setdefault(subdir, {})
716 tmfclnode = tmfclnodes.setdefault(n, clnode)
716 tmfclnode = tmfclnodes.setdefault(n, clnode)
717 if clrevorder[clnode] < clrevorder[tmfclnode]:
717 if clrevorder[clnode] < clrevorder[tmfclnode]:
718 tmfclnodes[n] = clnode
718 tmfclnodes[n] = clnode
719 else:
719 else:
720 f = dir + p
720 f = dir + p
721 fclnodes = fnodes.setdefault(f, {})
721 fclnodes = fnodes.setdefault(f, {})
722 fclnode = fclnodes.setdefault(n, clnode)
722 fclnode = fclnodes.setdefault(n, clnode)
723 if clrevorder[clnode] < clrevorder[fclnode]:
723 if clrevorder[clnode] < clrevorder[fclnode]:
724 fclnodes[n] = clnode
724 fclnodes[n] = clnode
725 return clnode
725 return clnode
726 return lookupmflinknode
726 return lookupmflinknode
727
727
728 size = 0
728 size = 0
729 while tmfnodes:
729 while tmfnodes:
730 dir = min(tmfnodes)
730 dir = min(tmfnodes)
731 nodes = tmfnodes[dir]
731 nodes = tmfnodes[dir]
732 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
732 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
733 if not dir or prunednodes:
733 if not dir or prunednodes:
734 for x in self._packmanifests(dir, prunednodes,
734 for x in self._packmanifests(dir, prunednodes,
735 makelookupmflinknode(dir)):
735 makelookupmflinknode(dir)):
736 size += len(x)
736 size += len(x)
737 yield x
737 yield x
738 del tmfnodes[dir]
738 del tmfnodes[dir]
739 self._verbosenote(_('%8.i (manifests)\n') % size)
739 self._verbosenote(_('%8.i (manifests)\n') % size)
740 yield self._manifestsdone()
740 yield self._manifestsdone()
741
741
742 # The 'source' parameter is useful for extensions
742 # The 'source' parameter is useful for extensions
743 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
743 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
744 repo = self._repo
744 repo = self._repo
745 progress = self._progress
745 progress = self._progress
746 msgbundling = _('bundling')
746 msgbundling = _('bundling')
747
747
748 total = len(changedfiles)
748 total = len(changedfiles)
749 # for progress output
749 # for progress output
750 msgfiles = _('files')
750 msgfiles = _('files')
751 for i, fname in enumerate(sorted(changedfiles)):
751 for i, fname in enumerate(sorted(changedfiles)):
752 filerevlog = repo.file(fname)
752 filerevlog = repo.file(fname)
753 if not filerevlog:
753 if not filerevlog:
754 raise error.Abort(_("empty or missing revlog for %s") % fname)
754 raise error.Abort(_("empty or missing revlog for %s") % fname)
755
755
756 linkrevnodes = linknodes(filerevlog, fname)
756 linkrevnodes = linknodes(filerevlog, fname)
757 # Lookup for filenodes, we collected the linkrev nodes above in the
757 # Lookup for filenodes, we collected the linkrev nodes above in the
758 # fastpath case and with lookupmf in the slowpath case.
758 # fastpath case and with lookupmf in the slowpath case.
759 def lookupfilelog(x):
759 def lookupfilelog(x):
760 return linkrevnodes[x]
760 return linkrevnodes[x]
761
761
762 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
762 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
763 if filenodes:
763 if filenodes:
764 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
764 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
765 total=total)
765 total=total)
766 h = self.fileheader(fname)
766 h = self.fileheader(fname)
767 size = len(h)
767 size = len(h)
768 yield h
768 yield h
769 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
769 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
770 size += len(chunk)
770 size += len(chunk)
771 yield chunk
771 yield chunk
772 self._verbosenote(_('%8.i %s\n') % (size, fname))
772 self._verbosenote(_('%8.i %s\n') % (size, fname))
773 progress(msgbundling, None)
773 progress(msgbundling, None)
774
774
775 def deltaparent(self, revlog, rev, p1, p2, prev):
775 def deltaparent(self, revlog, rev, p1, p2, prev):
776 return prev
776 return prev
777
777
778 def revchunk(self, revlog, rev, prev, linknode):
778 def revchunk(self, revlog, rev, prev, linknode):
779 node = revlog.node(rev)
779 node = revlog.node(rev)
780 p1, p2 = revlog.parentrevs(rev)
780 p1, p2 = revlog.parentrevs(rev)
781 base = self.deltaparent(revlog, rev, p1, p2, prev)
781 base = self.deltaparent(revlog, rev, p1, p2, prev)
782
782
783 prefix = ''
783 prefix = ''
784 if revlog.iscensored(base) or revlog.iscensored(rev):
784 if revlog.iscensored(base) or revlog.iscensored(rev):
785 try:
785 try:
786 delta = revlog.revision(node)
786 delta = revlog.revision(node)
787 except error.CensoredNodeError as e:
787 except error.CensoredNodeError as e:
788 delta = e.tombstone
788 delta = e.tombstone
789 if base == nullrev:
789 if base == nullrev:
790 prefix = mdiff.trivialdiffheader(len(delta))
790 prefix = mdiff.trivialdiffheader(len(delta))
791 else:
791 else:
792 baselen = revlog.rawsize(base)
792 baselen = revlog.rawsize(base)
793 prefix = mdiff.replacediffheader(baselen, len(delta))
793 prefix = mdiff.replacediffheader(baselen, len(delta))
794 elif base == nullrev:
794 elif base == nullrev:
795 delta = revlog.revision(node)
795 delta = revlog.revision(node)
796 prefix = mdiff.trivialdiffheader(len(delta))
796 prefix = mdiff.trivialdiffheader(len(delta))
797 else:
797 else:
798 delta = revlog.revdiff(base, rev)
798 delta = revlog.revdiff(base, rev)
799 p1n, p2n = revlog.parents(node)
799 p1n, p2n = revlog.parents(node)
800 basenode = revlog.node(base)
800 basenode = revlog.node(base)
801 flags = revlog.flags(rev)
801 flags = revlog.flags(rev)
802 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
802 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
803 meta += prefix
803 meta += prefix
804 l = len(meta) + len(delta)
804 l = len(meta) + len(delta)
805 yield chunkheader(l)
805 yield chunkheader(l)
806 yield meta
806 yield meta
807 yield delta
807 yield delta
808 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
808 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
809 # do nothing with basenode, it is implicitly the previous one in HG10
809 # do nothing with basenode, it is implicitly the previous one in HG10
810 # do nothing with flags, it is implicitly 0 for cg1 and cg2
810 # do nothing with flags, it is implicitly 0 for cg1 and cg2
811 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
811 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
812
812
813 class cg2packer(cg1packer):
813 class cg2packer(cg1packer):
814 version = '02'
814 version = '02'
815 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
815 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
816
816
817 def __init__(self, repo, bundlecaps=None):
817 def __init__(self, repo, bundlecaps=None):
818 super(cg2packer, self).__init__(repo, bundlecaps)
818 super(cg2packer, self).__init__(repo, bundlecaps)
819 if self._reorder is None:
819 if self._reorder is None:
820 # Since generaldelta is directly supported by cg2, reordering
820 # Since generaldelta is directly supported by cg2, reordering
821 # generally doesn't help, so we disable it by default (treating
821 # generally doesn't help, so we disable it by default (treating
822 # bundle.reorder=auto just like bundle.reorder=False).
822 # bundle.reorder=auto just like bundle.reorder=False).
823 self._reorder = False
823 self._reorder = False
824
824
825 def deltaparent(self, revlog, rev, p1, p2, prev):
825 def deltaparent(self, revlog, rev, p1, p2, prev):
826 dp = revlog.deltaparent(rev)
826 dp = revlog.deltaparent(rev)
827 if dp == nullrev and revlog.storedeltachains:
827 if dp == nullrev and revlog.storedeltachains:
828 # Avoid sending full revisions when delta parent is null. Pick prev
828 # Avoid sending full revisions when delta parent is null. Pick prev
829 # in that case. It's tempting to pick p1 in this case, as p1 will
829 # in that case. It's tempting to pick p1 in this case, as p1 will
830 # be smaller in the common case. However, computing a delta against
830 # be smaller in the common case. However, computing a delta against
831 # p1 may require resolving the raw text of p1, which could be
831 # p1 may require resolving the raw text of p1, which could be
832 # expensive. The revlog caches should have prev cached, meaning
832 # expensive. The revlog caches should have prev cached, meaning
833 # less CPU for changegroup generation. There is likely room to add
833 # less CPU for changegroup generation. There is likely room to add
834 # a flag and/or config option to control this behavior.
834 # a flag and/or config option to control this behavior.
835 return prev
835 return prev
836 elif dp == nullrev:
836 elif dp == nullrev:
837 # revlog is configured to use full snapshot for a reason,
837 # revlog is configured to use full snapshot for a reason,
838 # stick to full snapshot.
838 # stick to full snapshot.
839 return nullrev
839 return nullrev
840 elif dp not in (p1, p2, prev):
840 elif dp not in (p1, p2, prev):
841 # Pick prev when we can't be sure remote has the base revision.
841 # Pick prev when we can't be sure remote has the base revision.
842 return prev
842 return prev
843 else:
843 else:
844 return dp
844 return dp
845
845
846 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
846 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
847 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
847 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
848 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
848 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
849
849
850 class cg3packer(cg2packer):
850 class cg3packer(cg2packer):
851 version = '03'
851 version = '03'
852 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
852 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
853
853
854 def _packmanifests(self, dir, mfnodes, lookuplinknode):
854 def _packmanifests(self, dir, mfnodes, lookuplinknode):
855 if dir:
855 if dir:
856 yield self.fileheader(dir)
856 yield self.fileheader(dir)
857
857
858 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
858 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
859 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
859 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
860 units=_('manifests')):
860 units=_('manifests')):
861 yield chunk
861 yield chunk
862
862
863 def _manifestsdone(self):
863 def _manifestsdone(self):
864 return self.close()
864 return self.close()
865
865
866 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
866 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
867 return struct.pack(
867 return struct.pack(
868 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
868 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
869
869
870 _packermap = {'01': (cg1packer, cg1unpacker),
870 _packermap = {'01': (cg1packer, cg1unpacker),
871 # cg2 adds support for exchanging generaldelta
871 # cg2 adds support for exchanging generaldelta
872 '02': (cg2packer, cg2unpacker),
872 '02': (cg2packer, cg2unpacker),
873 # cg3 adds support for exchanging revlog flags and treemanifests
873 # cg3 adds support for exchanging revlog flags and treemanifests
874 '03': (cg3packer, cg3unpacker),
874 '03': (cg3packer, cg3unpacker),
875 }
875 }
876
876
877 def allsupportedversions(ui):
877 def allsupportedversions(ui):
878 versions = set(_packermap.keys())
878 versions = set(_packermap.keys())
879 versions.discard('03')
879 versions.discard('03')
880 if (ui.configbool('experimental', 'changegroup3') or
880 if (ui.configbool('experimental', 'changegroup3') or
881 ui.configbool('experimental', 'treemanifest')):
881 ui.configbool('experimental', 'treemanifest')):
882 versions.add('03')
882 versions.add('03')
883 return versions
883 return versions
884
884
885 # Changegroup versions that can be applied to the repo
885 # Changegroup versions that can be applied to the repo
886 def supportedincomingversions(repo):
886 def supportedincomingversions(repo):
887 versions = allsupportedversions(repo.ui)
887 versions = allsupportedversions(repo.ui)
888 if 'treemanifest' in repo.requirements:
888 if 'treemanifest' in repo.requirements:
889 versions.add('03')
889 versions.add('03')
890 return versions
890 return versions
891
891
892 # Changegroup versions that can be created from the repo
892 # Changegroup versions that can be created from the repo
893 def supportedoutgoingversions(repo):
893 def supportedoutgoingversions(repo):
894 versions = allsupportedversions(repo.ui)
894 versions = allsupportedversions(repo.ui)
895 if 'treemanifest' in repo.requirements:
895 if 'treemanifest' in repo.requirements:
896 # Versions 01 and 02 support only flat manifests and it's just too
896 # Versions 01 and 02 support only flat manifests and it's just too
897 # expensive to convert between the flat manifest and tree manifest on
897 # expensive to convert between the flat manifest and tree manifest on
898 # the fly. Since tree manifests are hashed differently, all of history
898 # the fly. Since tree manifests are hashed differently, all of history
899 # would have to be converted. Instead, we simply don't even pretend to
899 # would have to be converted. Instead, we simply don't even pretend to
900 # support versions 01 and 02.
900 # support versions 01 and 02.
901 versions.discard('01')
901 versions.discard('01')
902 versions.discard('02')
902 versions.discard('02')
903 versions.add('03')
903 versions.add('03')
904 return versions
904 return versions
905
905
906 def safeversion(repo):
906 def safeversion(repo):
907 # Finds the smallest version that it's safe to assume clients of the repo
907 # Finds the smallest version that it's safe to assume clients of the repo
908 # will support. For example, all hg versions that support generaldelta also
908 # will support. For example, all hg versions that support generaldelta also
909 # support changegroup 02.
909 # support changegroup 02.
910 versions = supportedoutgoingversions(repo)
910 versions = supportedoutgoingversions(repo)
911 if 'generaldelta' in repo.requirements:
911 if 'generaldelta' in repo.requirements:
912 versions.discard('01')
912 versions.discard('01')
913 assert versions
913 assert versions
914 return min(versions)
914 return min(versions)
915
915
916 def getbundler(version, repo, bundlecaps=None):
916 def getbundler(version, repo, bundlecaps=None):
917 assert version in supportedoutgoingversions(repo)
917 assert version in supportedoutgoingversions(repo)
918 return _packermap[version][0](repo, bundlecaps)
918 return _packermap[version][0](repo, bundlecaps)
919
919
920 def getunbundler(version, fh, alg, extras=None):
920 def getunbundler(version, fh, alg, extras=None):
921 return _packermap[version][1](fh, alg, extras=extras)
921 return _packermap[version][1](fh, alg, extras=extras)
922
922
923 def _changegroupinfo(repo, nodes, source):
923 def _changegroupinfo(repo, nodes, source):
924 if repo.ui.verbose or source == 'bundle':
924 if repo.ui.verbose or source == 'bundle':
925 repo.ui.status(_("%d changesets found\n") % len(nodes))
925 repo.ui.status(_("%d changesets found\n") % len(nodes))
926 if repo.ui.debugflag:
926 if repo.ui.debugflag:
927 repo.ui.debug("list of changesets:\n")
927 repo.ui.debug("list of changesets:\n")
928 for node in nodes:
928 for node in nodes:
929 repo.ui.debug("%s\n" % hex(node))
929 repo.ui.debug("%s\n" % hex(node))
930
930
931 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
931 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
932 repo = repo.unfiltered()
932 repo = repo.unfiltered()
933 commonrevs = outgoing.common
933 commonrevs = outgoing.common
934 csets = outgoing.missing
934 csets = outgoing.missing
935 heads = outgoing.missingheads
935 heads = outgoing.missingheads
936 # We go through the fast path if we get told to, or if all (unfiltered
936 # We go through the fast path if we get told to, or if all (unfiltered
937 # heads have been requested (since we then know there all linkrevs will
937 # heads have been requested (since we then know there all linkrevs will
938 # be pulled by the client).
938 # be pulled by the client).
939 heads.sort()
939 heads.sort()
940 fastpathlinkrev = fastpath or (
940 fastpathlinkrev = fastpath or (
941 repo.filtername is None and heads == sorted(repo.heads()))
941 repo.filtername is None and heads == sorted(repo.heads()))
942
942
943 repo.hook('preoutgoing', throw=True, source=source)
943 repo.hook('preoutgoing', throw=True, source=source)
944 _changegroupinfo(repo, csets, source)
944 _changegroupinfo(repo, csets, source)
945 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
945 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
946
946
947 def getsubset(repo, outgoing, bundler, source, fastpath=False):
947 def getsubset(repo, outgoing, bundler, source, fastpath=False):
948 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
948 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
949 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
949 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
950 {'clcount': len(outgoing.missing)})
950 {'clcount': len(outgoing.missing)})
951
951
952 def changegroupsubset(repo, roots, heads, source, version='01'):
952 def changegroupsubset(repo, roots, heads, source, version='01'):
953 """Compute a changegroup consisting of all the nodes that are
953 """Compute a changegroup consisting of all the nodes that are
954 descendants of any of the roots and ancestors of any of the heads.
954 descendants of any of the roots and ancestors of any of the heads.
955 Return a chunkbuffer object whose read() method will return
955 Return a chunkbuffer object whose read() method will return
956 successive changegroup chunks.
956 successive changegroup chunks.
957
957
958 It is fairly complex as determining which filenodes and which
958 It is fairly complex as determining which filenodes and which
959 manifest nodes need to be included for the changeset to be complete
959 manifest nodes need to be included for the changeset to be complete
960 is non-trivial.
960 is non-trivial.
961
961
962 Another wrinkle is doing the reverse, figuring out which changeset in
962 Another wrinkle is doing the reverse, figuring out which changeset in
963 the changegroup a particular filenode or manifestnode belongs to.
963 the changegroup a particular filenode or manifestnode belongs to.
964 """
964 """
965 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
965 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
966 bundler = getbundler(version, repo)
966 bundler = getbundler(version, repo)
967 return getsubset(repo, outgoing, bundler, source)
967 return getsubset(repo, outgoing, bundler, source)
968
968
969 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
969 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
970 version='01'):
970 version='01'):
971 """Like getbundle, but taking a discovery.outgoing as an argument.
971 """Like getbundle, but taking a discovery.outgoing as an argument.
972
972
973 This is only implemented for local repos and reuses potentially
973 This is only implemented for local repos and reuses potentially
974 precomputed sets in outgoing. Returns a raw changegroup generator."""
974 precomputed sets in outgoing. Returns a raw changegroup generator."""
975 if not outgoing.missing:
975 if not outgoing.missing:
976 return None
976 return None
977 bundler = getbundler(version, repo, bundlecaps)
977 bundler = getbundler(version, repo, bundlecaps)
978 return getsubsetraw(repo, outgoing, bundler, source)
978 return getsubsetraw(repo, outgoing, bundler, source)
979
979
980 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
980 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
981 version='01'):
981 version='01'):
982 """Like getbundle, but taking a discovery.outgoing as an argument.
982 """Like getbundle, but taking a discovery.outgoing as an argument.
983
983
984 This is only implemented for local repos and reuses potentially
984 This is only implemented for local repos and reuses potentially
985 precomputed sets in outgoing."""
985 precomputed sets in outgoing."""
986 if not outgoing.missing:
986 if not outgoing.missing:
987 return None
987 return None
988 bundler = getbundler(version, repo, bundlecaps)
988 bundler = getbundler(version, repo, bundlecaps)
989 return getsubset(repo, outgoing, bundler, source)
989 return getsubset(repo, outgoing, bundler, source)
990
990
991 def getchangegroup(repo, source, outgoing, bundlecaps=None,
991 def getchangegroup(repo, source, outgoing, bundlecaps=None,
992 version='01'):
992 version='01'):
993 """Like changegroupsubset, but returns the set difference between the
993 """Like changegroupsubset, but returns the set difference between the
994 ancestors of heads and the ancestors common.
994 ancestors of heads and the ancestors common.
995
995
996 If heads is None, use the local heads. If common is None, use [nullid].
996 If heads is None, use the local heads. If common is None, use [nullid].
997
997
998 The nodes in common might not all be known locally due to the way the
998 The nodes in common might not all be known locally due to the way the
999 current discovery protocol works.
999 current discovery protocol works.
1000 """
1000 """
1001 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1001 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1002 version=version)
1002 version=version)
1003
1003
1004 def changegroup(repo, basenodes, source):
1004 def changegroup(repo, basenodes, source):
1005 # to avoid a race we use changegroupsubset() (issue1320)
1005 # to avoid a race we use changegroupsubset() (issue1320)
1006 return changegroupsubset(repo, basenodes, repo.heads(), source)
1006 return changegroupsubset(repo, basenodes, repo.heads(), source)
1007
1007
1008 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1008 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1009 revisions = 0
1009 revisions = 0
1010 files = 0
1010 files = 0
1011 for chunkdata in iter(source.filelogheader, {}):
1011 for chunkdata in iter(source.filelogheader, {}):
1012 files += 1
1012 files += 1
1013 f = chunkdata["filename"]
1013 f = chunkdata["filename"]
1014 repo.ui.debug("adding %s revisions\n" % f)
1014 repo.ui.debug("adding %s revisions\n" % f)
1015 repo.ui.progress(_('files'), files, unit=_('files'),
1015 repo.ui.progress(_('files'), files, unit=_('files'),
1016 total=expectedfiles)
1016 total=expectedfiles)
1017 fl = repo.file(f)
1017 fl = repo.file(f)
1018 o = len(fl)
1018 o = len(fl)
1019 try:
1019 try:
1020 if not fl.addgroup(source, revmap, trp):
1020 if not fl.addgroup(source, revmap, trp):
1021 raise error.Abort(_("received file revlog group is empty"))
1021 raise error.Abort(_("received file revlog group is empty"))
1022 except error.CensoredBaseError as e:
1022 except error.CensoredBaseError as e:
1023 raise error.Abort(_("received delta base is censored: %s") % e)
1023 raise error.Abort(_("received delta base is censored: %s") % e)
1024 revisions += len(fl) - o
1024 revisions += len(fl) - o
1025 if f in needfiles:
1025 if f in needfiles:
1026 needs = needfiles[f]
1026 needs = needfiles[f]
1027 for new in xrange(o, len(fl)):
1027 for new in xrange(o, len(fl)):
1028 n = fl.node(new)
1028 n = fl.node(new)
1029 if n in needs:
1029 if n in needs:
1030 needs.remove(n)
1030 needs.remove(n)
1031 else:
1031 else:
1032 raise error.Abort(
1032 raise error.Abort(
1033 _("received spurious file revlog entry"))
1033 _("received spurious file revlog entry"))
1034 if not needs:
1034 if not needs:
1035 del needfiles[f]
1035 del needfiles[f]
1036 repo.ui.progress(_('files'), None)
1036 repo.ui.progress(_('files'), None)
1037
1037
1038 for f, needs in needfiles.iteritems():
1038 for f, needs in needfiles.iteritems():
1039 fl = repo.file(f)
1039 fl = repo.file(f)
1040 for n in needs:
1040 for n in needs:
1041 try:
1041 try:
1042 fl.rev(n)
1042 fl.rev(n)
1043 except error.LookupError:
1043 except error.LookupError:
1044 raise error.Abort(
1044 raise error.Abort(
1045 _('missing file data for %s:%s - run hg verify') %
1045 _('missing file data for %s:%s - run hg verify') %
1046 (f, hex(n)))
1046 (f, hex(n)))
1047
1047
1048 return revisions, files
1048 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now