##// END OF EJS Templates
changegroup: store old heads as a set...
Gregory Szorc -
r31587:ed5b2587 4.1.2 stable
parent child Browse files
Show More
@@ -1,1044 +1,1044 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 branchmap,
23 branchmap,
24 dagutil,
24 dagutil,
25 discovery,
25 discovery,
26 error,
26 error,
27 mdiff,
27 mdiff,
28 phases,
28 phases,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 def writechunks(ui, chunks, filename, vfs=None):
82 def writechunks(ui, chunks, filename, vfs=None):
83 """Write chunks to a file and return its filename.
83 """Write chunks to a file and return its filename.
84
84
85 The stream is assumed to be a bundle file.
85 The stream is assumed to be a bundle file.
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 """
88 """
89 fh = None
89 fh = None
90 cleanup = None
90 cleanup = None
91 try:
91 try:
92 if filename:
92 if filename:
93 if vfs:
93 if vfs:
94 fh = vfs.open(filename, "wb")
94 fh = vfs.open(filename, "wb")
95 else:
95 else:
96 # Increase default buffer size because default is usually
96 # Increase default buffer size because default is usually
97 # small (4k is common on Linux).
97 # small (4k is common on Linux).
98 fh = open(filename, "wb", 131072)
98 fh = open(filename, "wb", 131072)
99 else:
99 else:
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fh = os.fdopen(fd, "wb")
101 fh = os.fdopen(fd, "wb")
102 cleanup = filename
102 cleanup = filename
103 for c in chunks:
103 for c in chunks:
104 fh.write(c)
104 fh.write(c)
105 cleanup = None
105 cleanup = None
106 return filename
106 return filename
107 finally:
107 finally:
108 if fh is not None:
108 if fh is not None:
109 fh.close()
109 fh.close()
110 if cleanup is not None:
110 if cleanup is not None:
111 if filename and vfs:
111 if filename and vfs:
112 vfs.unlink(cleanup)
112 vfs.unlink(cleanup)
113 else:
113 else:
114 os.unlink(cleanup)
114 os.unlink(cleanup)
115
115
116 class cg1unpacker(object):
116 class cg1unpacker(object):
117 """Unpacker for cg1 changegroup streams.
117 """Unpacker for cg1 changegroup streams.
118
118
119 A changegroup unpacker handles the framing of the revision data in
119 A changegroup unpacker handles the framing of the revision data in
120 the wire format. Most consumers will want to use the apply()
120 the wire format. Most consumers will want to use the apply()
121 method to add the changes from the changegroup to a repository.
121 method to add the changes from the changegroup to a repository.
122
122
123 If you're forwarding a changegroup unmodified to another consumer,
123 If you're forwarding a changegroup unmodified to another consumer,
124 use getchunks(), which returns an iterator of changegroup
124 use getchunks(), which returns an iterator of changegroup
125 chunks. This is mostly useful for cases where you need to know the
125 chunks. This is mostly useful for cases where you need to know the
126 data stream has ended by observing the end of the changegroup.
126 data stream has ended by observing the end of the changegroup.
127
127
128 deltachunk() is useful only if you're applying delta data. Most
128 deltachunk() is useful only if you're applying delta data. Most
129 consumers should prefer apply() instead.
129 consumers should prefer apply() instead.
130
130
131 A few other public methods exist. Those are used only for
131 A few other public methods exist. Those are used only for
132 bundlerepo and some debug commands - their use is discouraged.
132 bundlerepo and some debug commands - their use is discouraged.
133 """
133 """
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 version = '01'
136 version = '01'
137 _grouplistcount = 1 # One list of files after the manifests
137 _grouplistcount = 1 # One list of files after the manifests
138
138
139 def __init__(self, fh, alg, extras=None):
139 def __init__(self, fh, alg, extras=None):
140 if alg is None:
140 if alg is None:
141 alg = 'UN'
141 alg = 'UN'
142 if alg not in util.compengines.supportedbundletypes:
142 if alg not in util.compengines.supportedbundletypes:
143 raise error.Abort(_('unknown stream compression type: %s')
143 raise error.Abort(_('unknown stream compression type: %s')
144 % alg)
144 % alg)
145 if alg == 'BZ':
145 if alg == 'BZ':
146 alg = '_truncatedBZ'
146 alg = '_truncatedBZ'
147
147
148 compengine = util.compengines.forbundletype(alg)
148 compengine = util.compengines.forbundletype(alg)
149 self._stream = compengine.decompressorreader(fh)
149 self._stream = compengine.decompressorreader(fh)
150 self._type = alg
150 self._type = alg
151 self.extras = extras or {}
151 self.extras = extras or {}
152 self.callback = None
152 self.callback = None
153
153
154 # These methods (compressed, read, seek, tell) all appear to only
154 # These methods (compressed, read, seek, tell) all appear to only
155 # be used by bundlerepo, but it's a little hard to tell.
155 # be used by bundlerepo, but it's a little hard to tell.
156 def compressed(self):
156 def compressed(self):
157 return self._type is not None and self._type != 'UN'
157 return self._type is not None and self._type != 'UN'
158 def read(self, l):
158 def read(self, l):
159 return self._stream.read(l)
159 return self._stream.read(l)
160 def seek(self, pos):
160 def seek(self, pos):
161 return self._stream.seek(pos)
161 return self._stream.seek(pos)
162 def tell(self):
162 def tell(self):
163 return self._stream.tell()
163 return self._stream.tell()
164 def close(self):
164 def close(self):
165 return self._stream.close()
165 return self._stream.close()
166
166
167 def _chunklength(self):
167 def _chunklength(self):
168 d = readexactly(self._stream, 4)
168 d = readexactly(self._stream, 4)
169 l = struct.unpack(">l", d)[0]
169 l = struct.unpack(">l", d)[0]
170 if l <= 4:
170 if l <= 4:
171 if l:
171 if l:
172 raise error.Abort(_("invalid chunk length %d") % l)
172 raise error.Abort(_("invalid chunk length %d") % l)
173 return 0
173 return 0
174 if self.callback:
174 if self.callback:
175 self.callback()
175 self.callback()
176 return l - 4
176 return l - 4
177
177
178 def changelogheader(self):
178 def changelogheader(self):
179 """v10 does not have a changelog header chunk"""
179 """v10 does not have a changelog header chunk"""
180 return {}
180 return {}
181
181
182 def manifestheader(self):
182 def manifestheader(self):
183 """v10 does not have a manifest header chunk"""
183 """v10 does not have a manifest header chunk"""
184 return {}
184 return {}
185
185
186 def filelogheader(self):
186 def filelogheader(self):
187 """return the header of the filelogs chunk, v10 only has the filename"""
187 """return the header of the filelogs chunk, v10 only has the filename"""
188 l = self._chunklength()
188 l = self._chunklength()
189 if not l:
189 if not l:
190 return {}
190 return {}
191 fname = readexactly(self._stream, l)
191 fname = readexactly(self._stream, l)
192 return {'filename': fname}
192 return {'filename': fname}
193
193
194 def _deltaheader(self, headertuple, prevnode):
194 def _deltaheader(self, headertuple, prevnode):
195 node, p1, p2, cs = headertuple
195 node, p1, p2, cs = headertuple
196 if prevnode is None:
196 if prevnode is None:
197 deltabase = p1
197 deltabase = p1
198 else:
198 else:
199 deltabase = prevnode
199 deltabase = prevnode
200 flags = 0
200 flags = 0
201 return node, p1, p2, deltabase, cs, flags
201 return node, p1, p2, deltabase, cs, flags
202
202
203 def deltachunk(self, prevnode):
203 def deltachunk(self, prevnode):
204 l = self._chunklength()
204 l = self._chunklength()
205 if not l:
205 if not l:
206 return {}
206 return {}
207 headerdata = readexactly(self._stream, self.deltaheadersize)
207 headerdata = readexactly(self._stream, self.deltaheadersize)
208 header = struct.unpack(self.deltaheader, headerdata)
208 header = struct.unpack(self.deltaheader, headerdata)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
213
213
214 def getchunks(self):
214 def getchunks(self):
215 """returns all the chunks contains in the bundle
215 """returns all the chunks contains in the bundle
216
216
217 Used when you need to forward the binary stream to a file or another
217 Used when you need to forward the binary stream to a file or another
218 network API. To do so, it parse the changegroup data, otherwise it will
218 network API. To do so, it parse the changegroup data, otherwise it will
219 block in case of sshrepo because it don't know the end of the stream.
219 block in case of sshrepo because it don't know the end of the stream.
220 """
220 """
221 # an empty chunkgroup is the end of the changegroup
221 # an empty chunkgroup is the end of the changegroup
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
223 # after that, changegroup versions 1 and 2 have a series of groups
223 # after that, changegroup versions 1 and 2 have a series of groups
224 # with one group per file. changegroup 3 has a series of directory
224 # with one group per file. changegroup 3 has a series of directory
225 # manifests before the files.
225 # manifests before the files.
226 count = 0
226 count = 0
227 emptycount = 0
227 emptycount = 0
228 while emptycount < self._grouplistcount:
228 while emptycount < self._grouplistcount:
229 empty = True
229 empty = True
230 count += 1
230 count += 1
231 while True:
231 while True:
232 chunk = getchunk(self)
232 chunk = getchunk(self)
233 if not chunk:
233 if not chunk:
234 if empty and count > 2:
234 if empty and count > 2:
235 emptycount += 1
235 emptycount += 1
236 break
236 break
237 empty = False
237 empty = False
238 yield chunkheader(len(chunk))
238 yield chunkheader(len(chunk))
239 pos = 0
239 pos = 0
240 while pos < len(chunk):
240 while pos < len(chunk):
241 next = pos + 2**20
241 next = pos + 2**20
242 yield chunk[pos:next]
242 yield chunk[pos:next]
243 pos = next
243 pos = next
244 yield closechunk()
244 yield closechunk()
245
245
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
247 # We know that we'll never have more manifests than we had
247 # We know that we'll never have more manifests than we had
248 # changesets.
248 # changesets.
249 self.callback = prog(_('manifests'), numchanges)
249 self.callback = prog(_('manifests'), numchanges)
250 # no need to check for empty manifest group here:
250 # no need to check for empty manifest group here:
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
252 # no new manifest will be created and the manifest group will
252 # no new manifest will be created and the manifest group will
253 # be empty during the pull
253 # be empty during the pull
254 self.manifestheader()
254 self.manifestheader()
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
256 repo.ui.progress(_('manifests'), None)
256 repo.ui.progress(_('manifests'), None)
257 self.callback = None
257 self.callback = None
258
258
259 def apply(self, repo, srctype, url, emptyok=False,
259 def apply(self, repo, srctype, url, emptyok=False,
260 targetphase=phases.draft, expectedtotal=None):
260 targetphase=phases.draft, expectedtotal=None):
261 """Add the changegroup returned by source.read() to this repo.
261 """Add the changegroup returned by source.read() to this repo.
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
263 the URL of the repo where this changegroup is coming from.
263 the URL of the repo where this changegroup is coming from.
264
264
265 Return an integer summarizing the change to this repo:
265 Return an integer summarizing the change to this repo:
266 - nothing changed or no source: 0
266 - nothing changed or no source: 0
267 - more heads than before: 1+added heads (2..n)
267 - more heads than before: 1+added heads (2..n)
268 - fewer heads than before: -1-removed heads (-2..-n)
268 - fewer heads than before: -1-removed heads (-2..-n)
269 - number of heads stays the same: 1
269 - number of heads stays the same: 1
270 """
270 """
271 repo = repo.unfiltered()
271 repo = repo.unfiltered()
272 def csmap(x):
272 def csmap(x):
273 repo.ui.debug("add changeset %s\n" % short(x))
273 repo.ui.debug("add changeset %s\n" % short(x))
274 return len(cl)
274 return len(cl)
275
275
276 def revmap(x):
276 def revmap(x):
277 return cl.rev(x)
277 return cl.rev(x)
278
278
279 changesets = files = revisions = 0
279 changesets = files = revisions = 0
280
280
281 try:
281 try:
282 with repo.transaction("\n".join([srctype,
282 with repo.transaction("\n".join([srctype,
283 util.hidepassword(url)])) as tr:
283 util.hidepassword(url)])) as tr:
284 # The transaction could have been created before and already
284 # The transaction could have been created before and already
285 # carries source information. In this case we use the top
285 # carries source information. In this case we use the top
286 # level data. We overwrite the argument because we need to use
286 # level data. We overwrite the argument because we need to use
287 # the top level value (if they exist) in this function.
287 # the top level value (if they exist) in this function.
288 srctype = tr.hookargs.setdefault('source', srctype)
288 srctype = tr.hookargs.setdefault('source', srctype)
289 url = tr.hookargs.setdefault('url', url)
289 url = tr.hookargs.setdefault('url', url)
290 repo.hook('prechangegroup', throw=True, **tr.hookargs)
290 repo.hook('prechangegroup', throw=True, **tr.hookargs)
291
291
292 # write changelog data to temp files so concurrent readers
292 # write changelog data to temp files so concurrent readers
293 # will not see an inconsistent view
293 # will not see an inconsistent view
294 cl = repo.changelog
294 cl = repo.changelog
295 cl.delayupdate(tr)
295 cl.delayupdate(tr)
296 oldheads = cl.heads()
296 oldheads = set(cl.heads())
297
297
298 trp = weakref.proxy(tr)
298 trp = weakref.proxy(tr)
299 # pull off the changeset group
299 # pull off the changeset group
300 repo.ui.status(_("adding changesets\n"))
300 repo.ui.status(_("adding changesets\n"))
301 clstart = len(cl)
301 clstart = len(cl)
302 class prog(object):
302 class prog(object):
303 def __init__(self, step, total):
303 def __init__(self, step, total):
304 self._step = step
304 self._step = step
305 self._total = total
305 self._total = total
306 self._count = 1
306 self._count = 1
307 def __call__(self):
307 def __call__(self):
308 repo.ui.progress(self._step, self._count,
308 repo.ui.progress(self._step, self._count,
309 unit=_('chunks'), total=self._total)
309 unit=_('chunks'), total=self._total)
310 self._count += 1
310 self._count += 1
311 self.callback = prog(_('changesets'), expectedtotal)
311 self.callback = prog(_('changesets'), expectedtotal)
312
312
313 efiles = set()
313 efiles = set()
314 def onchangelog(cl, node):
314 def onchangelog(cl, node):
315 efiles.update(cl.readfiles(node))
315 efiles.update(cl.readfiles(node))
316
316
317 self.changelogheader()
317 self.changelogheader()
318 srccontent = cl.addgroup(self, csmap, trp,
318 srccontent = cl.addgroup(self, csmap, trp,
319 addrevisioncb=onchangelog)
319 addrevisioncb=onchangelog)
320 efiles = len(efiles)
320 efiles = len(efiles)
321
321
322 if not (srccontent or emptyok):
322 if not (srccontent or emptyok):
323 raise error.Abort(_("received changelog group is empty"))
323 raise error.Abort(_("received changelog group is empty"))
324 clend = len(cl)
324 clend = len(cl)
325 changesets = clend - clstart
325 changesets = clend - clstart
326 repo.ui.progress(_('changesets'), None)
326 repo.ui.progress(_('changesets'), None)
327 self.callback = None
327 self.callback = None
328
328
329 # pull off the manifest group
329 # pull off the manifest group
330 repo.ui.status(_("adding manifests\n"))
330 repo.ui.status(_("adding manifests\n"))
331 self._unpackmanifests(repo, revmap, trp, prog, changesets)
331 self._unpackmanifests(repo, revmap, trp, prog, changesets)
332
332
333 needfiles = {}
333 needfiles = {}
334 if repo.ui.configbool('server', 'validate', default=False):
334 if repo.ui.configbool('server', 'validate', default=False):
335 cl = repo.changelog
335 cl = repo.changelog
336 ml = repo.manifestlog
336 ml = repo.manifestlog
337 # validate incoming csets have their manifests
337 # validate incoming csets have their manifests
338 for cset in xrange(clstart, clend):
338 for cset in xrange(clstart, clend):
339 mfnode = cl.changelogrevision(cset).manifest
339 mfnode = cl.changelogrevision(cset).manifest
340 mfest = ml[mfnode].readdelta()
340 mfest = ml[mfnode].readdelta()
341 # store file nodes we must see
341 # store file nodes we must see
342 for f, n in mfest.iteritems():
342 for f, n in mfest.iteritems():
343 needfiles.setdefault(f, set()).add(n)
343 needfiles.setdefault(f, set()).add(n)
344
344
345 # process the files
345 # process the files
346 repo.ui.status(_("adding file changes\n"))
346 repo.ui.status(_("adding file changes\n"))
347 newrevs, newfiles = _addchangegroupfiles(
347 newrevs, newfiles = _addchangegroupfiles(
348 repo, self, revmap, trp, efiles, needfiles)
348 repo, self, revmap, trp, efiles, needfiles)
349 revisions += newrevs
349 revisions += newrevs
350 files += newfiles
350 files += newfiles
351
351
352 dh = 0
352 dh = 0
353 if oldheads:
353 if oldheads:
354 heads = cl.heads()
354 heads = cl.heads()
355 dh = len(heads) - len(oldheads)
355 dh = len(heads) - len(oldheads)
356 for h in heads:
356 for h in heads:
357 if h not in oldheads and repo[h].closesbranch():
357 if h not in oldheads and repo[h].closesbranch():
358 dh -= 1
358 dh -= 1
359 htext = ""
359 htext = ""
360 if dh:
360 if dh:
361 htext = _(" (%+d heads)") % dh
361 htext = _(" (%+d heads)") % dh
362
362
363 repo.ui.status(_("added %d changesets"
363 repo.ui.status(_("added %d changesets"
364 " with %d changes to %d files%s\n")
364 " with %d changes to %d files%s\n")
365 % (changesets, revisions, files, htext))
365 % (changesets, revisions, files, htext))
366 repo.invalidatevolatilesets()
366 repo.invalidatevolatilesets()
367
367
368 if changesets > 0:
368 if changesets > 0:
369 if 'node' not in tr.hookargs:
369 if 'node' not in tr.hookargs:
370 tr.hookargs['node'] = hex(cl.node(clstart))
370 tr.hookargs['node'] = hex(cl.node(clstart))
371 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
371 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
372 hookargs = dict(tr.hookargs)
372 hookargs = dict(tr.hookargs)
373 else:
373 else:
374 hookargs = dict(tr.hookargs)
374 hookargs = dict(tr.hookargs)
375 hookargs['node'] = hex(cl.node(clstart))
375 hookargs['node'] = hex(cl.node(clstart))
376 hookargs['node_last'] = hex(cl.node(clend - 1))
376 hookargs['node_last'] = hex(cl.node(clend - 1))
377 repo.hook('pretxnchangegroup', throw=True, **hookargs)
377 repo.hook('pretxnchangegroup', throw=True, **hookargs)
378
378
379 added = [cl.node(r) for r in xrange(clstart, clend)]
379 added = [cl.node(r) for r in xrange(clstart, clend)]
380 publishing = repo.publishing()
380 publishing = repo.publishing()
381 if srctype in ('push', 'serve'):
381 if srctype in ('push', 'serve'):
382 # Old servers can not push the boundary themselves.
382 # Old servers can not push the boundary themselves.
383 # New servers won't push the boundary if changeset already
383 # New servers won't push the boundary if changeset already
384 # exists locally as secret
384 # exists locally as secret
385 #
385 #
386 # We should not use added here but the list of all change in
386 # We should not use added here but the list of all change in
387 # the bundle
387 # the bundle
388 if publishing:
388 if publishing:
389 phases.advanceboundary(repo, tr, phases.public,
389 phases.advanceboundary(repo, tr, phases.public,
390 srccontent)
390 srccontent)
391 else:
391 else:
392 # Those changesets have been pushed from the
392 # Those changesets have been pushed from the
393 # outside, their phases are going to be pushed
393 # outside, their phases are going to be pushed
394 # alongside. Therefor `targetphase` is
394 # alongside. Therefor `targetphase` is
395 # ignored.
395 # ignored.
396 phases.advanceboundary(repo, tr, phases.draft,
396 phases.advanceboundary(repo, tr, phases.draft,
397 srccontent)
397 srccontent)
398 phases.retractboundary(repo, tr, phases.draft, added)
398 phases.retractboundary(repo, tr, phases.draft, added)
399 elif srctype != 'strip':
399 elif srctype != 'strip':
400 # publishing only alter behavior during push
400 # publishing only alter behavior during push
401 #
401 #
402 # strip should not touch boundary at all
402 # strip should not touch boundary at all
403 phases.retractboundary(repo, tr, targetphase, added)
403 phases.retractboundary(repo, tr, targetphase, added)
404
404
405 if changesets > 0:
405 if changesets > 0:
406 if srctype != 'strip':
406 if srctype != 'strip':
407 # During strip, branchcache is invalid but
407 # During strip, branchcache is invalid but
408 # coming call to `destroyed` will repair it.
408 # coming call to `destroyed` will repair it.
409 # In other case we can safely update cache on
409 # In other case we can safely update cache on
410 # disk.
410 # disk.
411 repo.ui.debug('updating the branch cache\n')
411 repo.ui.debug('updating the branch cache\n')
412 branchmap.updatecache(repo.filtered('served'))
412 branchmap.updatecache(repo.filtered('served'))
413
413
414 def runhooks():
414 def runhooks():
415 # These hooks run when the lock releases, not when the
415 # These hooks run when the lock releases, not when the
416 # transaction closes. So it's possible for the changelog
416 # transaction closes. So it's possible for the changelog
417 # to have changed since we last saw it.
417 # to have changed since we last saw it.
418 if clstart >= len(repo):
418 if clstart >= len(repo):
419 return
419 return
420
420
421 repo.hook("changegroup", **hookargs)
421 repo.hook("changegroup", **hookargs)
422
422
423 for n in added:
423 for n in added:
424 args = hookargs.copy()
424 args = hookargs.copy()
425 args['node'] = hex(n)
425 args['node'] = hex(n)
426 del args['node_last']
426 del args['node_last']
427 repo.hook("incoming", **args)
427 repo.hook("incoming", **args)
428
428
429 newheads = [h for h in repo.heads()
429 newheads = [h for h in repo.heads()
430 if h not in oldheads]
430 if h not in oldheads]
431 repo.ui.log("incoming",
431 repo.ui.log("incoming",
432 "%s incoming changes - new heads: %s\n",
432 "%s incoming changes - new heads: %s\n",
433 len(added),
433 len(added),
434 ', '.join([hex(c[:6]) for c in newheads]))
434 ', '.join([hex(c[:6]) for c in newheads]))
435
435
436 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
436 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
437 lambda tr: repo._afterlock(runhooks))
437 lambda tr: repo._afterlock(runhooks))
438 finally:
438 finally:
439 repo.ui.flush()
439 repo.ui.flush()
440 # never return 0 here:
440 # never return 0 here:
441 if dh < 0:
441 if dh < 0:
442 return dh - 1
442 return dh - 1
443 else:
443 else:
444 return dh + 1
444 return dh + 1
445
445
446 class cg2unpacker(cg1unpacker):
446 class cg2unpacker(cg1unpacker):
447 """Unpacker for cg2 streams.
447 """Unpacker for cg2 streams.
448
448
449 cg2 streams add support for generaldelta, so the delta header
449 cg2 streams add support for generaldelta, so the delta header
450 format is slightly different. All other features about the data
450 format is slightly different. All other features about the data
451 remain the same.
451 remain the same.
452 """
452 """
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
453 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
454 deltaheadersize = struct.calcsize(deltaheader)
454 deltaheadersize = struct.calcsize(deltaheader)
455 version = '02'
455 version = '02'
456
456
457 def _deltaheader(self, headertuple, prevnode):
457 def _deltaheader(self, headertuple, prevnode):
458 node, p1, p2, deltabase, cs = headertuple
458 node, p1, p2, deltabase, cs = headertuple
459 flags = 0
459 flags = 0
460 return node, p1, p2, deltabase, cs, flags
460 return node, p1, p2, deltabase, cs, flags
461
461
462 class cg3unpacker(cg2unpacker):
462 class cg3unpacker(cg2unpacker):
463 """Unpacker for cg3 streams.
463 """Unpacker for cg3 streams.
464
464
465 cg3 streams add support for exchanging treemanifests and revlog
465 cg3 streams add support for exchanging treemanifests and revlog
466 flags. It adds the revlog flags to the delta header and an empty chunk
466 flags. It adds the revlog flags to the delta header and an empty chunk
467 separating manifests and files.
467 separating manifests and files.
468 """
468 """
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
469 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
470 deltaheadersize = struct.calcsize(deltaheader)
470 deltaheadersize = struct.calcsize(deltaheader)
471 version = '03'
471 version = '03'
472 _grouplistcount = 2 # One list of manifests and one list of files
472 _grouplistcount = 2 # One list of manifests and one list of files
473
473
474 def _deltaheader(self, headertuple, prevnode):
474 def _deltaheader(self, headertuple, prevnode):
475 node, p1, p2, deltabase, cs, flags = headertuple
475 node, p1, p2, deltabase, cs, flags = headertuple
476 return node, p1, p2, deltabase, cs, flags
476 return node, p1, p2, deltabase, cs, flags
477
477
478 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
478 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
479 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
480 numchanges)
480 numchanges)
481 for chunkdata in iter(self.filelogheader, {}):
481 for chunkdata in iter(self.filelogheader, {}):
482 # If we get here, there are directory manifests in the changegroup
482 # If we get here, there are directory manifests in the changegroup
483 d = chunkdata["filename"]
483 d = chunkdata["filename"]
484 repo.ui.debug("adding %s revisions\n" % d)
484 repo.ui.debug("adding %s revisions\n" % d)
485 dirlog = repo.manifestlog._revlog.dirlog(d)
485 dirlog = repo.manifestlog._revlog.dirlog(d)
486 if not dirlog.addgroup(self, revmap, trp):
486 if not dirlog.addgroup(self, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 class cg1packer(object):
501 class cg1packer(object):
502 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
502 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
503 version = '01'
503 version = '01'
504 def __init__(self, repo, bundlecaps=None):
504 def __init__(self, repo, bundlecaps=None):
505 """Given a source repo, construct a bundler.
505 """Given a source repo, construct a bundler.
506
506
507 bundlecaps is optional and can be used to specify the set of
507 bundlecaps is optional and can be used to specify the set of
508 capabilities which can be used to build the bundle.
508 capabilities which can be used to build the bundle.
509 """
509 """
510 # Set of capabilities we can use to build the bundle.
510 # Set of capabilities we can use to build the bundle.
511 if bundlecaps is None:
511 if bundlecaps is None:
512 bundlecaps = set()
512 bundlecaps = set()
513 self._bundlecaps = bundlecaps
513 self._bundlecaps = bundlecaps
514 # experimental config: bundle.reorder
514 # experimental config: bundle.reorder
515 reorder = repo.ui.config('bundle', 'reorder', 'auto')
515 reorder = repo.ui.config('bundle', 'reorder', 'auto')
516 if reorder == 'auto':
516 if reorder == 'auto':
517 reorder = None
517 reorder = None
518 else:
518 else:
519 reorder = util.parsebool(reorder)
519 reorder = util.parsebool(reorder)
520 self._repo = repo
520 self._repo = repo
521 self._reorder = reorder
521 self._reorder = reorder
522 self._progress = repo.ui.progress
522 self._progress = repo.ui.progress
523 if self._repo.ui.verbose and not self._repo.ui.debugflag:
523 if self._repo.ui.verbose and not self._repo.ui.debugflag:
524 self._verbosenote = self._repo.ui.note
524 self._verbosenote = self._repo.ui.note
525 else:
525 else:
526 self._verbosenote = lambda s: None
526 self._verbosenote = lambda s: None
527
527
528 def close(self):
528 def close(self):
529 return closechunk()
529 return closechunk()
530
530
531 def fileheader(self, fname):
531 def fileheader(self, fname):
532 return chunkheader(len(fname)) + fname
532 return chunkheader(len(fname)) + fname
533
533
534 # Extracted both for clarity and for overriding in extensions.
534 # Extracted both for clarity and for overriding in extensions.
535 def _sortgroup(self, revlog, nodelist, lookup):
535 def _sortgroup(self, revlog, nodelist, lookup):
536 """Sort nodes for change group and turn them into revnums."""
536 """Sort nodes for change group and turn them into revnums."""
537 # for generaldelta revlogs, we linearize the revs; this will both be
537 # for generaldelta revlogs, we linearize the revs; this will both be
538 # much quicker and generate a much smaller bundle
538 # much quicker and generate a much smaller bundle
539 if (revlog._generaldelta and self._reorder is None) or self._reorder:
539 if (revlog._generaldelta and self._reorder is None) or self._reorder:
540 dag = dagutil.revlogdag(revlog)
540 dag = dagutil.revlogdag(revlog)
541 return dag.linearize(set(revlog.rev(n) for n in nodelist))
541 return dag.linearize(set(revlog.rev(n) for n in nodelist))
542 else:
542 else:
543 return sorted([revlog.rev(n) for n in nodelist])
543 return sorted([revlog.rev(n) for n in nodelist])
544
544
545 def group(self, nodelist, revlog, lookup, units=None):
545 def group(self, nodelist, revlog, lookup, units=None):
546 """Calculate a delta group, yielding a sequence of changegroup chunks
546 """Calculate a delta group, yielding a sequence of changegroup chunks
547 (strings).
547 (strings).
548
548
549 Given a list of changeset revs, return a set of deltas and
549 Given a list of changeset revs, return a set of deltas and
550 metadata corresponding to nodes. The first delta is
550 metadata corresponding to nodes. The first delta is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
552 guaranteed to have this parent as it has all history before
552 guaranteed to have this parent as it has all history before
553 these changesets. In the case firstparent is nullrev the
553 these changesets. In the case firstparent is nullrev the
554 changegroup starts with a full revision.
554 changegroup starts with a full revision.
555
555
556 If units is not None, progress detail will be generated, units specifies
556 If units is not None, progress detail will be generated, units specifies
557 the type of revlog that is touched (changelog, manifest, etc.).
557 the type of revlog that is touched (changelog, manifest, etc.).
558 """
558 """
559 # if we don't have any revisions touched by these changesets, bail
559 # if we don't have any revisions touched by these changesets, bail
560 if len(nodelist) == 0:
560 if len(nodelist) == 0:
561 yield self.close()
561 yield self.close()
562 return
562 return
563
563
564 revs = self._sortgroup(revlog, nodelist, lookup)
564 revs = self._sortgroup(revlog, nodelist, lookup)
565
565
566 # add the parent of the first rev
566 # add the parent of the first rev
567 p = revlog.parentrevs(revs[0])[0]
567 p = revlog.parentrevs(revs[0])[0]
568 revs.insert(0, p)
568 revs.insert(0, p)
569
569
570 # build deltas
570 # build deltas
571 total = len(revs) - 1
571 total = len(revs) - 1
572 msgbundling = _('bundling')
572 msgbundling = _('bundling')
573 for r in xrange(len(revs) - 1):
573 for r in xrange(len(revs) - 1):
574 if units is not None:
574 if units is not None:
575 self._progress(msgbundling, r + 1, unit=units, total=total)
575 self._progress(msgbundling, r + 1, unit=units, total=total)
576 prev, curr = revs[r], revs[r + 1]
576 prev, curr = revs[r], revs[r + 1]
577 linknode = lookup(revlog.node(curr))
577 linknode = lookup(revlog.node(curr))
578 for c in self.revchunk(revlog, curr, prev, linknode):
578 for c in self.revchunk(revlog, curr, prev, linknode):
579 yield c
579 yield c
580
580
581 if units is not None:
581 if units is not None:
582 self._progress(msgbundling, None)
582 self._progress(msgbundling, None)
583 yield self.close()
583 yield self.close()
584
584
585 # filter any nodes that claim to be part of the known set
585 # filter any nodes that claim to be part of the known set
586 def prune(self, revlog, missing, commonrevs):
586 def prune(self, revlog, missing, commonrevs):
587 rr, rl = revlog.rev, revlog.linkrev
587 rr, rl = revlog.rev, revlog.linkrev
588 return [n for n in missing if rl(rr(n)) not in commonrevs]
588 return [n for n in missing if rl(rr(n)) not in commonrevs]
589
589
590 def _packmanifests(self, dir, mfnodes, lookuplinknode):
590 def _packmanifests(self, dir, mfnodes, lookuplinknode):
591 """Pack flat manifests into a changegroup stream."""
591 """Pack flat manifests into a changegroup stream."""
592 assert not dir
592 assert not dir
593 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
593 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
594 lookuplinknode, units=_('manifests')):
594 lookuplinknode, units=_('manifests')):
595 yield chunk
595 yield chunk
596
596
597 def _manifestsdone(self):
597 def _manifestsdone(self):
598 return ''
598 return ''
599
599
600 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
600 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
601 '''yield a sequence of changegroup chunks (strings)'''
601 '''yield a sequence of changegroup chunks (strings)'''
602 repo = self._repo
602 repo = self._repo
603 cl = repo.changelog
603 cl = repo.changelog
604
604
605 clrevorder = {}
605 clrevorder = {}
606 mfs = {} # needed manifests
606 mfs = {} # needed manifests
607 fnodes = {} # needed file nodes
607 fnodes = {} # needed file nodes
608 changedfiles = set()
608 changedfiles = set()
609
609
610 # Callback for the changelog, used to collect changed files and manifest
610 # Callback for the changelog, used to collect changed files and manifest
611 # nodes.
611 # nodes.
612 # Returns the linkrev node (identity in the changelog case).
612 # Returns the linkrev node (identity in the changelog case).
613 def lookupcl(x):
613 def lookupcl(x):
614 c = cl.read(x)
614 c = cl.read(x)
615 clrevorder[x] = len(clrevorder)
615 clrevorder[x] = len(clrevorder)
616 n = c[0]
616 n = c[0]
617 # record the first changeset introducing this manifest version
617 # record the first changeset introducing this manifest version
618 mfs.setdefault(n, x)
618 mfs.setdefault(n, x)
619 # Record a complete list of potentially-changed files in
619 # Record a complete list of potentially-changed files in
620 # this manifest.
620 # this manifest.
621 changedfiles.update(c[3])
621 changedfiles.update(c[3])
622 return x
622 return x
623
623
624 self._verbosenote(_('uncompressed size of bundle content:\n'))
624 self._verbosenote(_('uncompressed size of bundle content:\n'))
625 size = 0
625 size = 0
626 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
626 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
627 size += len(chunk)
627 size += len(chunk)
628 yield chunk
628 yield chunk
629 self._verbosenote(_('%8.i (changelog)\n') % size)
629 self._verbosenote(_('%8.i (changelog)\n') % size)
630
630
631 # We need to make sure that the linkrev in the changegroup refers to
631 # We need to make sure that the linkrev in the changegroup refers to
632 # the first changeset that introduced the manifest or file revision.
632 # the first changeset that introduced the manifest or file revision.
633 # The fastpath is usually safer than the slowpath, because the filelogs
633 # The fastpath is usually safer than the slowpath, because the filelogs
634 # are walked in revlog order.
634 # are walked in revlog order.
635 #
635 #
636 # When taking the slowpath with reorder=None and the manifest revlog
636 # When taking the slowpath with reorder=None and the manifest revlog
637 # uses generaldelta, the manifest may be walked in the "wrong" order.
637 # uses generaldelta, the manifest may be walked in the "wrong" order.
638 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
638 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
639 # cc0ff93d0c0c).
639 # cc0ff93d0c0c).
640 #
640 #
641 # When taking the fastpath, we are only vulnerable to reordering
641 # When taking the fastpath, we are only vulnerable to reordering
642 # of the changelog itself. The changelog never uses generaldelta, so
642 # of the changelog itself. The changelog never uses generaldelta, so
643 # it is only reordered when reorder=True. To handle this case, we
643 # it is only reordered when reorder=True. To handle this case, we
644 # simply take the slowpath, which already has the 'clrevorder' logic.
644 # simply take the slowpath, which already has the 'clrevorder' logic.
645 # This was also fixed in cc0ff93d0c0c.
645 # This was also fixed in cc0ff93d0c0c.
646 fastpathlinkrev = fastpathlinkrev and not self._reorder
646 fastpathlinkrev = fastpathlinkrev and not self._reorder
647 # Treemanifests don't work correctly with fastpathlinkrev
647 # Treemanifests don't work correctly with fastpathlinkrev
648 # either, because we don't discover which directory nodes to
648 # either, because we don't discover which directory nodes to
649 # send along with files. This could probably be fixed.
649 # send along with files. This could probably be fixed.
650 fastpathlinkrev = fastpathlinkrev and (
650 fastpathlinkrev = fastpathlinkrev and (
651 'treemanifest' not in repo.requirements)
651 'treemanifest' not in repo.requirements)
652
652
653 for chunk in self.generatemanifests(commonrevs, clrevorder,
653 for chunk in self.generatemanifests(commonrevs, clrevorder,
654 fastpathlinkrev, mfs, fnodes):
654 fastpathlinkrev, mfs, fnodes):
655 yield chunk
655 yield chunk
656 mfs.clear()
656 mfs.clear()
657 clrevs = set(cl.rev(x) for x in clnodes)
657 clrevs = set(cl.rev(x) for x in clnodes)
658
658
659 if not fastpathlinkrev:
659 if not fastpathlinkrev:
660 def linknodes(unused, fname):
660 def linknodes(unused, fname):
661 return fnodes.get(fname, {})
661 return fnodes.get(fname, {})
662 else:
662 else:
663 cln = cl.node
663 cln = cl.node
664 def linknodes(filerevlog, fname):
664 def linknodes(filerevlog, fname):
665 llr = filerevlog.linkrev
665 llr = filerevlog.linkrev
666 fln = filerevlog.node
666 fln = filerevlog.node
667 revs = ((r, llr(r)) for r in filerevlog)
667 revs = ((r, llr(r)) for r in filerevlog)
668 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
668 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
669
669
670 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
670 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
671 source):
671 source):
672 yield chunk
672 yield chunk
673
673
674 yield self.close()
674 yield self.close()
675
675
676 if clnodes:
676 if clnodes:
677 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
677 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
678
678
679 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
679 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
680 fnodes):
680 fnodes):
681 repo = self._repo
681 repo = self._repo
682 mfl = repo.manifestlog
682 mfl = repo.manifestlog
683 dirlog = mfl._revlog.dirlog
683 dirlog = mfl._revlog.dirlog
684 tmfnodes = {'': mfs}
684 tmfnodes = {'': mfs}
685
685
686 # Callback for the manifest, used to collect linkrevs for filelog
686 # Callback for the manifest, used to collect linkrevs for filelog
687 # revisions.
687 # revisions.
688 # Returns the linkrev node (collected in lookupcl).
688 # Returns the linkrev node (collected in lookupcl).
689 def makelookupmflinknode(dir):
689 def makelookupmflinknode(dir):
690 if fastpathlinkrev:
690 if fastpathlinkrev:
691 assert not dir
691 assert not dir
692 return mfs.__getitem__
692 return mfs.__getitem__
693
693
694 def lookupmflinknode(x):
694 def lookupmflinknode(x):
695 """Callback for looking up the linknode for manifests.
695 """Callback for looking up the linknode for manifests.
696
696
697 Returns the linkrev node for the specified manifest.
697 Returns the linkrev node for the specified manifest.
698
698
699 SIDE EFFECT:
699 SIDE EFFECT:
700
700
701 1) fclnodes gets populated with the list of relevant
701 1) fclnodes gets populated with the list of relevant
702 file nodes if we're not using fastpathlinkrev
702 file nodes if we're not using fastpathlinkrev
703 2) When treemanifests are in use, collects treemanifest nodes
703 2) When treemanifests are in use, collects treemanifest nodes
704 to send
704 to send
705
705
706 Note that this means manifests must be completely sent to
706 Note that this means manifests must be completely sent to
707 the client before you can trust the list of files and
707 the client before you can trust the list of files and
708 treemanifests to send.
708 treemanifests to send.
709 """
709 """
710 clnode = tmfnodes[dir][x]
710 clnode = tmfnodes[dir][x]
711 mdata = mfl.get(dir, x).readfast(shallow=True)
711 mdata = mfl.get(dir, x).readfast(shallow=True)
712 for p, n, fl in mdata.iterentries():
712 for p, n, fl in mdata.iterentries():
713 if fl == 't': # subdirectory manifest
713 if fl == 't': # subdirectory manifest
714 subdir = dir + p + '/'
714 subdir = dir + p + '/'
715 tmfclnodes = tmfnodes.setdefault(subdir, {})
715 tmfclnodes = tmfnodes.setdefault(subdir, {})
716 tmfclnode = tmfclnodes.setdefault(n, clnode)
716 tmfclnode = tmfclnodes.setdefault(n, clnode)
717 if clrevorder[clnode] < clrevorder[tmfclnode]:
717 if clrevorder[clnode] < clrevorder[tmfclnode]:
718 tmfclnodes[n] = clnode
718 tmfclnodes[n] = clnode
719 else:
719 else:
720 f = dir + p
720 f = dir + p
721 fclnodes = fnodes.setdefault(f, {})
721 fclnodes = fnodes.setdefault(f, {})
722 fclnode = fclnodes.setdefault(n, clnode)
722 fclnode = fclnodes.setdefault(n, clnode)
723 if clrevorder[clnode] < clrevorder[fclnode]:
723 if clrevorder[clnode] < clrevorder[fclnode]:
724 fclnodes[n] = clnode
724 fclnodes[n] = clnode
725 return clnode
725 return clnode
726 return lookupmflinknode
726 return lookupmflinknode
727
727
728 size = 0
728 size = 0
729 while tmfnodes:
729 while tmfnodes:
730 dir = min(tmfnodes)
730 dir = min(tmfnodes)
731 nodes = tmfnodes[dir]
731 nodes = tmfnodes[dir]
732 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
732 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
733 if not dir or prunednodes:
733 if not dir or prunednodes:
734 for x in self._packmanifests(dir, prunednodes,
734 for x in self._packmanifests(dir, prunednodes,
735 makelookupmflinknode(dir)):
735 makelookupmflinknode(dir)):
736 size += len(x)
736 size += len(x)
737 yield x
737 yield x
738 del tmfnodes[dir]
738 del tmfnodes[dir]
739 self._verbosenote(_('%8.i (manifests)\n') % size)
739 self._verbosenote(_('%8.i (manifests)\n') % size)
740 yield self._manifestsdone()
740 yield self._manifestsdone()
741
741
742 # The 'source' parameter is useful for extensions
742 # The 'source' parameter is useful for extensions
743 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
743 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
744 repo = self._repo
744 repo = self._repo
745 progress = self._progress
745 progress = self._progress
746 msgbundling = _('bundling')
746 msgbundling = _('bundling')
747
747
748 total = len(changedfiles)
748 total = len(changedfiles)
749 # for progress output
749 # for progress output
750 msgfiles = _('files')
750 msgfiles = _('files')
751 for i, fname in enumerate(sorted(changedfiles)):
751 for i, fname in enumerate(sorted(changedfiles)):
752 filerevlog = repo.file(fname)
752 filerevlog = repo.file(fname)
753 if not filerevlog:
753 if not filerevlog:
754 raise error.Abort(_("empty or missing revlog for %s") % fname)
754 raise error.Abort(_("empty or missing revlog for %s") % fname)
755
755
756 linkrevnodes = linknodes(filerevlog, fname)
756 linkrevnodes = linknodes(filerevlog, fname)
757 # Lookup for filenodes, we collected the linkrev nodes above in the
757 # Lookup for filenodes, we collected the linkrev nodes above in the
758 # fastpath case and with lookupmf in the slowpath case.
758 # fastpath case and with lookupmf in the slowpath case.
759 def lookupfilelog(x):
759 def lookupfilelog(x):
760 return linkrevnodes[x]
760 return linkrevnodes[x]
761
761
762 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
762 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
763 if filenodes:
763 if filenodes:
764 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
764 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
765 total=total)
765 total=total)
766 h = self.fileheader(fname)
766 h = self.fileheader(fname)
767 size = len(h)
767 size = len(h)
768 yield h
768 yield h
769 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
769 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
770 size += len(chunk)
770 size += len(chunk)
771 yield chunk
771 yield chunk
772 self._verbosenote(_('%8.i %s\n') % (size, fname))
772 self._verbosenote(_('%8.i %s\n') % (size, fname))
773 progress(msgbundling, None)
773 progress(msgbundling, None)
774
774
775 def deltaparent(self, revlog, rev, p1, p2, prev):
775 def deltaparent(self, revlog, rev, p1, p2, prev):
776 return prev
776 return prev
777
777
778 def revchunk(self, revlog, rev, prev, linknode):
778 def revchunk(self, revlog, rev, prev, linknode):
779 node = revlog.node(rev)
779 node = revlog.node(rev)
780 p1, p2 = revlog.parentrevs(rev)
780 p1, p2 = revlog.parentrevs(rev)
781 base = self.deltaparent(revlog, rev, p1, p2, prev)
781 base = self.deltaparent(revlog, rev, p1, p2, prev)
782
782
783 prefix = ''
783 prefix = ''
784 if revlog.iscensored(base) or revlog.iscensored(rev):
784 if revlog.iscensored(base) or revlog.iscensored(rev):
785 try:
785 try:
786 delta = revlog.revision(node, raw=True)
786 delta = revlog.revision(node, raw=True)
787 except error.CensoredNodeError as e:
787 except error.CensoredNodeError as e:
788 delta = e.tombstone
788 delta = e.tombstone
789 if base == nullrev:
789 if base == nullrev:
790 prefix = mdiff.trivialdiffheader(len(delta))
790 prefix = mdiff.trivialdiffheader(len(delta))
791 else:
791 else:
792 baselen = revlog.rawsize(base)
792 baselen = revlog.rawsize(base)
793 prefix = mdiff.replacediffheader(baselen, len(delta))
793 prefix = mdiff.replacediffheader(baselen, len(delta))
794 elif base == nullrev:
794 elif base == nullrev:
795 delta = revlog.revision(node, raw=True)
795 delta = revlog.revision(node, raw=True)
796 prefix = mdiff.trivialdiffheader(len(delta))
796 prefix = mdiff.trivialdiffheader(len(delta))
797 else:
797 else:
798 delta = revlog.revdiff(base, rev)
798 delta = revlog.revdiff(base, rev)
799 p1n, p2n = revlog.parents(node)
799 p1n, p2n = revlog.parents(node)
800 basenode = revlog.node(base)
800 basenode = revlog.node(base)
801 flags = revlog.flags(rev)
801 flags = revlog.flags(rev)
802 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
802 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
803 meta += prefix
803 meta += prefix
804 l = len(meta) + len(delta)
804 l = len(meta) + len(delta)
805 yield chunkheader(l)
805 yield chunkheader(l)
806 yield meta
806 yield meta
807 yield delta
807 yield delta
808 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
808 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
809 # do nothing with basenode, it is implicitly the previous one in HG10
809 # do nothing with basenode, it is implicitly the previous one in HG10
810 # do nothing with flags, it is implicitly 0 for cg1 and cg2
810 # do nothing with flags, it is implicitly 0 for cg1 and cg2
811 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
811 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
812
812
813 class cg2packer(cg1packer):
813 class cg2packer(cg1packer):
814 version = '02'
814 version = '02'
815 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
815 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
816
816
817 def __init__(self, repo, bundlecaps=None):
817 def __init__(self, repo, bundlecaps=None):
818 super(cg2packer, self).__init__(repo, bundlecaps)
818 super(cg2packer, self).__init__(repo, bundlecaps)
819 if self._reorder is None:
819 if self._reorder is None:
820 # Since generaldelta is directly supported by cg2, reordering
820 # Since generaldelta is directly supported by cg2, reordering
821 # generally doesn't help, so we disable it by default (treating
821 # generally doesn't help, so we disable it by default (treating
822 # bundle.reorder=auto just like bundle.reorder=False).
822 # bundle.reorder=auto just like bundle.reorder=False).
823 self._reorder = False
823 self._reorder = False
824
824
825 def deltaparent(self, revlog, rev, p1, p2, prev):
825 def deltaparent(self, revlog, rev, p1, p2, prev):
826 dp = revlog.deltaparent(rev)
826 dp = revlog.deltaparent(rev)
827 if dp == nullrev and revlog.storedeltachains:
827 if dp == nullrev and revlog.storedeltachains:
828 # Avoid sending full revisions when delta parent is null. Pick prev
828 # Avoid sending full revisions when delta parent is null. Pick prev
829 # in that case. It's tempting to pick p1 in this case, as p1 will
829 # in that case. It's tempting to pick p1 in this case, as p1 will
830 # be smaller in the common case. However, computing a delta against
830 # be smaller in the common case. However, computing a delta against
831 # p1 may require resolving the raw text of p1, which could be
831 # p1 may require resolving the raw text of p1, which could be
832 # expensive. The revlog caches should have prev cached, meaning
832 # expensive. The revlog caches should have prev cached, meaning
833 # less CPU for changegroup generation. There is likely room to add
833 # less CPU for changegroup generation. There is likely room to add
834 # a flag and/or config option to control this behavior.
834 # a flag and/or config option to control this behavior.
835 return prev
835 return prev
836 elif dp == nullrev:
836 elif dp == nullrev:
837 # revlog is configured to use full snapshot for a reason,
837 # revlog is configured to use full snapshot for a reason,
838 # stick to full snapshot.
838 # stick to full snapshot.
839 return nullrev
839 return nullrev
840 elif dp not in (p1, p2, prev):
840 elif dp not in (p1, p2, prev):
841 # Pick prev when we can't be sure remote has the base revision.
841 # Pick prev when we can't be sure remote has the base revision.
842 return prev
842 return prev
843 else:
843 else:
844 return dp
844 return dp
845
845
846 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
846 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
847 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
847 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
848 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
848 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
849
849
850 class cg3packer(cg2packer):
850 class cg3packer(cg2packer):
851 version = '03'
851 version = '03'
852 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
852 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
853
853
854 def _packmanifests(self, dir, mfnodes, lookuplinknode):
854 def _packmanifests(self, dir, mfnodes, lookuplinknode):
855 if dir:
855 if dir:
856 yield self.fileheader(dir)
856 yield self.fileheader(dir)
857
857
858 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
858 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
859 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
859 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
860 units=_('manifests')):
860 units=_('manifests')):
861 yield chunk
861 yield chunk
862
862
863 def _manifestsdone(self):
863 def _manifestsdone(self):
864 return self.close()
864 return self.close()
865
865
866 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
866 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
867 return struct.pack(
867 return struct.pack(
868 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
868 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
869
869
870 _packermap = {'01': (cg1packer, cg1unpacker),
870 _packermap = {'01': (cg1packer, cg1unpacker),
871 # cg2 adds support for exchanging generaldelta
871 # cg2 adds support for exchanging generaldelta
872 '02': (cg2packer, cg2unpacker),
872 '02': (cg2packer, cg2unpacker),
873 # cg3 adds support for exchanging revlog flags and treemanifests
873 # cg3 adds support for exchanging revlog flags and treemanifests
874 '03': (cg3packer, cg3unpacker),
874 '03': (cg3packer, cg3unpacker),
875 }
875 }
876
876
877 def allsupportedversions(repo):
877 def allsupportedversions(repo):
878 versions = set(_packermap.keys())
878 versions = set(_packermap.keys())
879 if not (repo.ui.configbool('experimental', 'changegroup3') or
879 if not (repo.ui.configbool('experimental', 'changegroup3') or
880 repo.ui.configbool('experimental', 'treemanifest') or
880 repo.ui.configbool('experimental', 'treemanifest') or
881 'treemanifest' in repo.requirements):
881 'treemanifest' in repo.requirements):
882 versions.discard('03')
882 versions.discard('03')
883 return versions
883 return versions
884
884
885 # Changegroup versions that can be applied to the repo
885 # Changegroup versions that can be applied to the repo
886 def supportedincomingversions(repo):
886 def supportedincomingversions(repo):
887 return allsupportedversions(repo)
887 return allsupportedversions(repo)
888
888
889 # Changegroup versions that can be created from the repo
889 # Changegroup versions that can be created from the repo
890 def supportedoutgoingversions(repo):
890 def supportedoutgoingversions(repo):
891 versions = allsupportedversions(repo)
891 versions = allsupportedversions(repo)
892 if 'treemanifest' in repo.requirements:
892 if 'treemanifest' in repo.requirements:
893 # Versions 01 and 02 support only flat manifests and it's just too
893 # Versions 01 and 02 support only flat manifests and it's just too
894 # expensive to convert between the flat manifest and tree manifest on
894 # expensive to convert between the flat manifest and tree manifest on
895 # the fly. Since tree manifests are hashed differently, all of history
895 # the fly. Since tree manifests are hashed differently, all of history
896 # would have to be converted. Instead, we simply don't even pretend to
896 # would have to be converted. Instead, we simply don't even pretend to
897 # support versions 01 and 02.
897 # support versions 01 and 02.
898 versions.discard('01')
898 versions.discard('01')
899 versions.discard('02')
899 versions.discard('02')
900 return versions
900 return versions
901
901
902 def safeversion(repo):
902 def safeversion(repo):
903 # Finds the smallest version that it's safe to assume clients of the repo
903 # Finds the smallest version that it's safe to assume clients of the repo
904 # will support. For example, all hg versions that support generaldelta also
904 # will support. For example, all hg versions that support generaldelta also
905 # support changegroup 02.
905 # support changegroup 02.
906 versions = supportedoutgoingversions(repo)
906 versions = supportedoutgoingversions(repo)
907 if 'generaldelta' in repo.requirements:
907 if 'generaldelta' in repo.requirements:
908 versions.discard('01')
908 versions.discard('01')
909 assert versions
909 assert versions
910 return min(versions)
910 return min(versions)
911
911
912 def getbundler(version, repo, bundlecaps=None):
912 def getbundler(version, repo, bundlecaps=None):
913 assert version in supportedoutgoingversions(repo)
913 assert version in supportedoutgoingversions(repo)
914 return _packermap[version][0](repo, bundlecaps)
914 return _packermap[version][0](repo, bundlecaps)
915
915
916 def getunbundler(version, fh, alg, extras=None):
916 def getunbundler(version, fh, alg, extras=None):
917 return _packermap[version][1](fh, alg, extras=extras)
917 return _packermap[version][1](fh, alg, extras=extras)
918
918
919 def _changegroupinfo(repo, nodes, source):
919 def _changegroupinfo(repo, nodes, source):
920 if repo.ui.verbose or source == 'bundle':
920 if repo.ui.verbose or source == 'bundle':
921 repo.ui.status(_("%d changesets found\n") % len(nodes))
921 repo.ui.status(_("%d changesets found\n") % len(nodes))
922 if repo.ui.debugflag:
922 if repo.ui.debugflag:
923 repo.ui.debug("list of changesets:\n")
923 repo.ui.debug("list of changesets:\n")
924 for node in nodes:
924 for node in nodes:
925 repo.ui.debug("%s\n" % hex(node))
925 repo.ui.debug("%s\n" % hex(node))
926
926
927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
928 repo = repo.unfiltered()
928 repo = repo.unfiltered()
929 commonrevs = outgoing.common
929 commonrevs = outgoing.common
930 csets = outgoing.missing
930 csets = outgoing.missing
931 heads = outgoing.missingheads
931 heads = outgoing.missingheads
932 # We go through the fast path if we get told to, or if all (unfiltered
932 # We go through the fast path if we get told to, or if all (unfiltered
933 # heads have been requested (since we then know there all linkrevs will
933 # heads have been requested (since we then know there all linkrevs will
934 # be pulled by the client).
934 # be pulled by the client).
935 heads.sort()
935 heads.sort()
936 fastpathlinkrev = fastpath or (
936 fastpathlinkrev = fastpath or (
937 repo.filtername is None and heads == sorted(repo.heads()))
937 repo.filtername is None and heads == sorted(repo.heads()))
938
938
939 repo.hook('preoutgoing', throw=True, source=source)
939 repo.hook('preoutgoing', throw=True, source=source)
940 _changegroupinfo(repo, csets, source)
940 _changegroupinfo(repo, csets, source)
941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
942
942
943 def getsubset(repo, outgoing, bundler, source, fastpath=False):
943 def getsubset(repo, outgoing, bundler, source, fastpath=False):
944 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
944 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
945 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
945 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
946 {'clcount': len(outgoing.missing)})
946 {'clcount': len(outgoing.missing)})
947
947
948 def changegroupsubset(repo, roots, heads, source, version='01'):
948 def changegroupsubset(repo, roots, heads, source, version='01'):
949 """Compute a changegroup consisting of all the nodes that are
949 """Compute a changegroup consisting of all the nodes that are
950 descendants of any of the roots and ancestors of any of the heads.
950 descendants of any of the roots and ancestors of any of the heads.
951 Return a chunkbuffer object whose read() method will return
951 Return a chunkbuffer object whose read() method will return
952 successive changegroup chunks.
952 successive changegroup chunks.
953
953
954 It is fairly complex as determining which filenodes and which
954 It is fairly complex as determining which filenodes and which
955 manifest nodes need to be included for the changeset to be complete
955 manifest nodes need to be included for the changeset to be complete
956 is non-trivial.
956 is non-trivial.
957
957
958 Another wrinkle is doing the reverse, figuring out which changeset in
958 Another wrinkle is doing the reverse, figuring out which changeset in
959 the changegroup a particular filenode or manifestnode belongs to.
959 the changegroup a particular filenode or manifestnode belongs to.
960 """
960 """
961 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
961 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
962 bundler = getbundler(version, repo)
962 bundler = getbundler(version, repo)
963 return getsubset(repo, outgoing, bundler, source)
963 return getsubset(repo, outgoing, bundler, source)
964
964
965 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
965 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
966 version='01'):
966 version='01'):
967 """Like getbundle, but taking a discovery.outgoing as an argument.
967 """Like getbundle, but taking a discovery.outgoing as an argument.
968
968
969 This is only implemented for local repos and reuses potentially
969 This is only implemented for local repos and reuses potentially
970 precomputed sets in outgoing. Returns a raw changegroup generator."""
970 precomputed sets in outgoing. Returns a raw changegroup generator."""
971 if not outgoing.missing:
971 if not outgoing.missing:
972 return None
972 return None
973 bundler = getbundler(version, repo, bundlecaps)
973 bundler = getbundler(version, repo, bundlecaps)
974 return getsubsetraw(repo, outgoing, bundler, source)
974 return getsubsetraw(repo, outgoing, bundler, source)
975
975
976 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
976 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
977 version='01'):
977 version='01'):
978 """Like getbundle, but taking a discovery.outgoing as an argument.
978 """Like getbundle, but taking a discovery.outgoing as an argument.
979
979
980 This is only implemented for local repos and reuses potentially
980 This is only implemented for local repos and reuses potentially
981 precomputed sets in outgoing."""
981 precomputed sets in outgoing."""
982 if not outgoing.missing:
982 if not outgoing.missing:
983 return None
983 return None
984 bundler = getbundler(version, repo, bundlecaps)
984 bundler = getbundler(version, repo, bundlecaps)
985 return getsubset(repo, outgoing, bundler, source)
985 return getsubset(repo, outgoing, bundler, source)
986
986
987 def getchangegroup(repo, source, outgoing, bundlecaps=None,
987 def getchangegroup(repo, source, outgoing, bundlecaps=None,
988 version='01'):
988 version='01'):
989 """Like changegroupsubset, but returns the set difference between the
989 """Like changegroupsubset, but returns the set difference between the
990 ancestors of heads and the ancestors common.
990 ancestors of heads and the ancestors common.
991
991
992 If heads is None, use the local heads. If common is None, use [nullid].
992 If heads is None, use the local heads. If common is None, use [nullid].
993
993
994 The nodes in common might not all be known locally due to the way the
994 The nodes in common might not all be known locally due to the way the
995 current discovery protocol works.
995 current discovery protocol works.
996 """
996 """
997 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
997 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
998 version=version)
998 version=version)
999
999
1000 def changegroup(repo, basenodes, source):
1000 def changegroup(repo, basenodes, source):
1001 # to avoid a race we use changegroupsubset() (issue1320)
1001 # to avoid a race we use changegroupsubset() (issue1320)
1002 return changegroupsubset(repo, basenodes, repo.heads(), source)
1002 return changegroupsubset(repo, basenodes, repo.heads(), source)
1003
1003
1004 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1004 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1005 revisions = 0
1005 revisions = 0
1006 files = 0
1006 files = 0
1007 for chunkdata in iter(source.filelogheader, {}):
1007 for chunkdata in iter(source.filelogheader, {}):
1008 files += 1
1008 files += 1
1009 f = chunkdata["filename"]
1009 f = chunkdata["filename"]
1010 repo.ui.debug("adding %s revisions\n" % f)
1010 repo.ui.debug("adding %s revisions\n" % f)
1011 repo.ui.progress(_('files'), files, unit=_('files'),
1011 repo.ui.progress(_('files'), files, unit=_('files'),
1012 total=expectedfiles)
1012 total=expectedfiles)
1013 fl = repo.file(f)
1013 fl = repo.file(f)
1014 o = len(fl)
1014 o = len(fl)
1015 try:
1015 try:
1016 if not fl.addgroup(source, revmap, trp):
1016 if not fl.addgroup(source, revmap, trp):
1017 raise error.Abort(_("received file revlog group is empty"))
1017 raise error.Abort(_("received file revlog group is empty"))
1018 except error.CensoredBaseError as e:
1018 except error.CensoredBaseError as e:
1019 raise error.Abort(_("received delta base is censored: %s") % e)
1019 raise error.Abort(_("received delta base is censored: %s") % e)
1020 revisions += len(fl) - o
1020 revisions += len(fl) - o
1021 if f in needfiles:
1021 if f in needfiles:
1022 needs = needfiles[f]
1022 needs = needfiles[f]
1023 for new in xrange(o, len(fl)):
1023 for new in xrange(o, len(fl)):
1024 n = fl.node(new)
1024 n = fl.node(new)
1025 if n in needs:
1025 if n in needs:
1026 needs.remove(n)
1026 needs.remove(n)
1027 else:
1027 else:
1028 raise error.Abort(
1028 raise error.Abort(
1029 _("received spurious file revlog entry"))
1029 _("received spurious file revlog entry"))
1030 if not needs:
1030 if not needs:
1031 del needfiles[f]
1031 del needfiles[f]
1032 repo.ui.progress(_('files'), None)
1032 repo.ui.progress(_('files'), None)
1033
1033
1034 for f, needs in needfiles.iteritems():
1034 for f, needs in needfiles.iteritems():
1035 fl = repo.file(f)
1035 fl = repo.file(f)
1036 for n in needs:
1036 for n in needs:
1037 try:
1037 try:
1038 fl.rev(n)
1038 fl.rev(n)
1039 except error.LookupError:
1039 except error.LookupError:
1040 raise error.Abort(
1040 raise error.Abort(
1041 _('missing file data for %s:%s - run hg verify') %
1041 _('missing file data for %s:%s - run hg verify') %
1042 (f, hex(n)))
1042 (f, hex(n)))
1043
1043
1044 return revisions, files
1044 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now