##// END OF EJS Templates
changegroup: use `iter(callable, sentinel)` instead of while True...
Augie Fackler -
r29724:4e7be6e3 default
parent child Browse files
Show More
@@ -1,1055 +1,1049 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 branchmap,
24 branchmap,
25 dagutil,
25 dagutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 mdiff,
28 mdiff,
29 phases,
29 phases,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36
36
37 def readexactly(stream, n):
37 def readexactly(stream, n):
38 '''read n bytes from stream.read and abort if less was available'''
38 '''read n bytes from stream.read and abort if less was available'''
39 s = stream.read(n)
39 s = stream.read(n)
40 if len(s) < n:
40 if len(s) < n:
41 raise error.Abort(_("stream ended unexpectedly"
41 raise error.Abort(_("stream ended unexpectedly"
42 " (got %d bytes, expected %d)")
42 " (got %d bytes, expected %d)")
43 % (len(s), n))
43 % (len(s), n))
44 return s
44 return s
45
45
46 def getchunk(stream):
46 def getchunk(stream):
47 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
48 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
49 l = struct.unpack(">l", d)[0]
49 l = struct.unpack(">l", d)[0]
50 if l <= 4:
50 if l <= 4:
51 if l:
51 if l:
52 raise error.Abort(_("invalid chunk length %d") % l)
52 raise error.Abort(_("invalid chunk length %d") % l)
53 return ""
53 return ""
54 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(">l", length + 4)
58 return struct.pack(">l", length + 4)
59
59
60 def closechunk():
60 def closechunk():
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 return struct.pack(">l", 0)
62 return struct.pack(">l", 0)
63
63
64 def combineresults(results):
64 def combineresults(results):
65 """logic to combine 0 or more addchangegroup results into one"""
65 """logic to combine 0 or more addchangegroup results into one"""
66 changedheads = 0
66 changedheads = 0
67 result = 1
67 result = 1
68 for ret in results:
68 for ret in results:
69 # If any changegroup result is 0, return 0
69 # If any changegroup result is 0, return 0
70 if ret == 0:
70 if ret == 0:
71 result = 0
71 result = 0
72 break
72 break
73 if ret < -1:
73 if ret < -1:
74 changedheads += ret + 1
74 changedheads += ret + 1
75 elif ret > 1:
75 elif ret > 1:
76 changedheads += ret - 1
76 changedheads += ret - 1
77 if changedheads > 0:
77 if changedheads > 0:
78 result = 1 + changedheads
78 result = 1 + changedheads
79 elif changedheads < 0:
79 elif changedheads < 0:
80 result = -1 + changedheads
80 result = -1 + changedheads
81 return result
81 return result
82
82
83 def writechunks(ui, chunks, filename, vfs=None):
83 def writechunks(ui, chunks, filename, vfs=None):
84 """Write chunks to a file and return its filename.
84 """Write chunks to a file and return its filename.
85
85
86 The stream is assumed to be a bundle file.
86 The stream is assumed to be a bundle file.
87 Existing files will not be overwritten.
87 Existing files will not be overwritten.
88 If no filename is specified, a temporary file is created.
88 If no filename is specified, a temporary file is created.
89 """
89 """
90 fh = None
90 fh = None
91 cleanup = None
91 cleanup = None
92 try:
92 try:
93 if filename:
93 if filename:
94 if vfs:
94 if vfs:
95 fh = vfs.open(filename, "wb")
95 fh = vfs.open(filename, "wb")
96 else:
96 else:
97 fh = open(filename, "wb")
97 fh = open(filename, "wb")
98 else:
98 else:
99 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
99 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fh = os.fdopen(fd, "wb")
100 fh = os.fdopen(fd, "wb")
101 cleanup = filename
101 cleanup = filename
102 for c in chunks:
102 for c in chunks:
103 fh.write(c)
103 fh.write(c)
104 cleanup = None
104 cleanup = None
105 return filename
105 return filename
106 finally:
106 finally:
107 if fh is not None:
107 if fh is not None:
108 fh.close()
108 fh.close()
109 if cleanup is not None:
109 if cleanup is not None:
110 if filename and vfs:
110 if filename and vfs:
111 vfs.unlink(cleanup)
111 vfs.unlink(cleanup)
112 else:
112 else:
113 os.unlink(cleanup)
113 os.unlink(cleanup)
114
114
115 class cg1unpacker(object):
115 class cg1unpacker(object):
116 """Unpacker for cg1 changegroup streams.
116 """Unpacker for cg1 changegroup streams.
117
117
118 A changegroup unpacker handles the framing of the revision data in
118 A changegroup unpacker handles the framing of the revision data in
119 the wire format. Most consumers will want to use the apply()
119 the wire format. Most consumers will want to use the apply()
120 method to add the changes from the changegroup to a repository.
120 method to add the changes from the changegroup to a repository.
121
121
122 If you're forwarding a changegroup unmodified to another consumer,
122 If you're forwarding a changegroup unmodified to another consumer,
123 use getchunks(), which returns an iterator of changegroup
123 use getchunks(), which returns an iterator of changegroup
124 chunks. This is mostly useful for cases where you need to know the
124 chunks. This is mostly useful for cases where you need to know the
125 data stream has ended by observing the end of the changegroup.
125 data stream has ended by observing the end of the changegroup.
126
126
127 deltachunk() is useful only if you're applying delta data. Most
127 deltachunk() is useful only if you're applying delta data. Most
128 consumers should prefer apply() instead.
128 consumers should prefer apply() instead.
129
129
130 A few other public methods exist. Those are used only for
130 A few other public methods exist. Those are used only for
131 bundlerepo and some debug commands - their use is discouraged.
131 bundlerepo and some debug commands - their use is discouraged.
132 """
132 """
133 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
133 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheadersize = struct.calcsize(deltaheader)
134 deltaheadersize = struct.calcsize(deltaheader)
135 version = '01'
135 version = '01'
136 _grouplistcount = 1 # One list of files after the manifests
136 _grouplistcount = 1 # One list of files after the manifests
137
137
138 def __init__(self, fh, alg, extras=None):
138 def __init__(self, fh, alg, extras=None):
139 if alg == 'UN':
139 if alg == 'UN':
140 alg = None # get more modern without breaking too much
140 alg = None # get more modern without breaking too much
141 if not alg in util.decompressors:
141 if not alg in util.decompressors:
142 raise error.Abort(_('unknown stream compression type: %s')
142 raise error.Abort(_('unknown stream compression type: %s')
143 % alg)
143 % alg)
144 if alg == 'BZ':
144 if alg == 'BZ':
145 alg = '_truncatedBZ'
145 alg = '_truncatedBZ'
146 self._stream = util.decompressors[alg](fh)
146 self._stream = util.decompressors[alg](fh)
147 self._type = alg
147 self._type = alg
148 self.extras = extras or {}
148 self.extras = extras or {}
149 self.callback = None
149 self.callback = None
150
150
151 # These methods (compressed, read, seek, tell) all appear to only
151 # These methods (compressed, read, seek, tell) all appear to only
152 # be used by bundlerepo, but it's a little hard to tell.
152 # be used by bundlerepo, but it's a little hard to tell.
153 def compressed(self):
153 def compressed(self):
154 return self._type is not None
154 return self._type is not None
155 def read(self, l):
155 def read(self, l):
156 return self._stream.read(l)
156 return self._stream.read(l)
157 def seek(self, pos):
157 def seek(self, pos):
158 return self._stream.seek(pos)
158 return self._stream.seek(pos)
159 def tell(self):
159 def tell(self):
160 return self._stream.tell()
160 return self._stream.tell()
161 def close(self):
161 def close(self):
162 return self._stream.close()
162 return self._stream.close()
163
163
164 def _chunklength(self):
164 def _chunklength(self):
165 d = readexactly(self._stream, 4)
165 d = readexactly(self._stream, 4)
166 l = struct.unpack(">l", d)[0]
166 l = struct.unpack(">l", d)[0]
167 if l <= 4:
167 if l <= 4:
168 if l:
168 if l:
169 raise error.Abort(_("invalid chunk length %d") % l)
169 raise error.Abort(_("invalid chunk length %d") % l)
170 return 0
170 return 0
171 if self.callback:
171 if self.callback:
172 self.callback()
172 self.callback()
173 return l - 4
173 return l - 4
174
174
175 def changelogheader(self):
175 def changelogheader(self):
176 """v10 does not have a changelog header chunk"""
176 """v10 does not have a changelog header chunk"""
177 return {}
177 return {}
178
178
179 def manifestheader(self):
179 def manifestheader(self):
180 """v10 does not have a manifest header chunk"""
180 """v10 does not have a manifest header chunk"""
181 return {}
181 return {}
182
182
183 def filelogheader(self):
183 def filelogheader(self):
184 """return the header of the filelogs chunk, v10 only has the filename"""
184 """return the header of the filelogs chunk, v10 only has the filename"""
185 l = self._chunklength()
185 l = self._chunklength()
186 if not l:
186 if not l:
187 return {}
187 return {}
188 fname = readexactly(self._stream, l)
188 fname = readexactly(self._stream, l)
189 return {'filename': fname}
189 return {'filename': fname}
190
190
191 def _deltaheader(self, headertuple, prevnode):
191 def _deltaheader(self, headertuple, prevnode):
192 node, p1, p2, cs = headertuple
192 node, p1, p2, cs = headertuple
193 if prevnode is None:
193 if prevnode is None:
194 deltabase = p1
194 deltabase = p1
195 else:
195 else:
196 deltabase = prevnode
196 deltabase = prevnode
197 flags = 0
197 flags = 0
198 return node, p1, p2, deltabase, cs, flags
198 return node, p1, p2, deltabase, cs, flags
199
199
200 def deltachunk(self, prevnode):
200 def deltachunk(self, prevnode):
201 l = self._chunklength()
201 l = self._chunklength()
202 if not l:
202 if not l:
203 return {}
203 return {}
204 headerdata = readexactly(self._stream, self.deltaheadersize)
204 headerdata = readexactly(self._stream, self.deltaheadersize)
205 header = struct.unpack(self.deltaheader, headerdata)
205 header = struct.unpack(self.deltaheader, headerdata)
206 delta = readexactly(self._stream, l - self.deltaheadersize)
206 delta = readexactly(self._stream, l - self.deltaheadersize)
207 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
207 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
208 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
208 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
209 'deltabase': deltabase, 'delta': delta, 'flags': flags}
209 'deltabase': deltabase, 'delta': delta, 'flags': flags}
210
210
211 def getchunks(self):
211 def getchunks(self):
212 """returns all the chunks contains in the bundle
212 """returns all the chunks contains in the bundle
213
213
214 Used when you need to forward the binary stream to a file or another
214 Used when you need to forward the binary stream to a file or another
215 network API. To do so, it parse the changegroup data, otherwise it will
215 network API. To do so, it parse the changegroup data, otherwise it will
216 block in case of sshrepo because it don't know the end of the stream.
216 block in case of sshrepo because it don't know the end of the stream.
217 """
217 """
218 # an empty chunkgroup is the end of the changegroup
218 # an empty chunkgroup is the end of the changegroup
219 # a changegroup has at least 2 chunkgroups (changelog and manifest).
219 # a changegroup has at least 2 chunkgroups (changelog and manifest).
220 # after that, changegroup versions 1 and 2 have a series of groups
220 # after that, changegroup versions 1 and 2 have a series of groups
221 # with one group per file. changegroup 3 has a series of directory
221 # with one group per file. changegroup 3 has a series of directory
222 # manifests before the files.
222 # manifests before the files.
223 count = 0
223 count = 0
224 emptycount = 0
224 emptycount = 0
225 while emptycount < self._grouplistcount:
225 while emptycount < self._grouplistcount:
226 empty = True
226 empty = True
227 count += 1
227 count += 1
228 while True:
228 while True:
229 chunk = getchunk(self)
229 chunk = getchunk(self)
230 if not chunk:
230 if not chunk:
231 if empty and count > 2:
231 if empty and count > 2:
232 emptycount += 1
232 emptycount += 1
233 break
233 break
234 empty = False
234 empty = False
235 yield chunkheader(len(chunk))
235 yield chunkheader(len(chunk))
236 pos = 0
236 pos = 0
237 while pos < len(chunk):
237 while pos < len(chunk):
238 next = pos + 2**20
238 next = pos + 2**20
239 yield chunk[pos:next]
239 yield chunk[pos:next]
240 pos = next
240 pos = next
241 yield closechunk()
241 yield closechunk()
242
242
243 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
243 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
244 # We know that we'll never have more manifests than we had
244 # We know that we'll never have more manifests than we had
245 # changesets.
245 # changesets.
246 self.callback = prog(_('manifests'), numchanges)
246 self.callback = prog(_('manifests'), numchanges)
247 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
250 # be empty during the pull
250 # be empty during the pull
251 self.manifestheader()
251 self.manifestheader()
252 repo.manifest.addgroup(self, revmap, trp)
252 repo.manifest.addgroup(self, revmap, trp)
253 repo.ui.progress(_('manifests'), None)
253 repo.ui.progress(_('manifests'), None)
254 self.callback = None
254 self.callback = None
255
255
256 def apply(self, repo, srctype, url, emptyok=False,
256 def apply(self, repo, srctype, url, emptyok=False,
257 targetphase=phases.draft, expectedtotal=None):
257 targetphase=phases.draft, expectedtotal=None):
258 """Add the changegroup returned by source.read() to this repo.
258 """Add the changegroup returned by source.read() to this repo.
259 srctype is a string like 'push', 'pull', or 'unbundle'. url is
259 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 the URL of the repo where this changegroup is coming from.
260 the URL of the repo where this changegroup is coming from.
261
261
262 Return an integer summarizing the change to this repo:
262 Return an integer summarizing the change to this repo:
263 - nothing changed or no source: 0
263 - nothing changed or no source: 0
264 - more heads than before: 1+added heads (2..n)
264 - more heads than before: 1+added heads (2..n)
265 - fewer heads than before: -1-removed heads (-2..-n)
265 - fewer heads than before: -1-removed heads (-2..-n)
266 - number of heads stays the same: 1
266 - number of heads stays the same: 1
267 """
267 """
268 repo = repo.unfiltered()
268 repo = repo.unfiltered()
269 def csmap(x):
269 def csmap(x):
270 repo.ui.debug("add changeset %s\n" % short(x))
270 repo.ui.debug("add changeset %s\n" % short(x))
271 return len(cl)
271 return len(cl)
272
272
273 def revmap(x):
273 def revmap(x):
274 return cl.rev(x)
274 return cl.rev(x)
275
275
276 changesets = files = revisions = 0
276 changesets = files = revisions = 0
277
277
278 try:
278 try:
279 with repo.transaction("\n".join([srctype,
279 with repo.transaction("\n".join([srctype,
280 util.hidepassword(url)])) as tr:
280 util.hidepassword(url)])) as tr:
281 # The transaction could have been created before and already
281 # The transaction could have been created before and already
282 # carries source information. In this case we use the top
282 # carries source information. In this case we use the top
283 # level data. We overwrite the argument because we need to use
283 # level data. We overwrite the argument because we need to use
284 # the top level value (if they exist) in this function.
284 # the top level value (if they exist) in this function.
285 srctype = tr.hookargs.setdefault('source', srctype)
285 srctype = tr.hookargs.setdefault('source', srctype)
286 url = tr.hookargs.setdefault('url', url)
286 url = tr.hookargs.setdefault('url', url)
287 repo.hook('prechangegroup', throw=True, **tr.hookargs)
287 repo.hook('prechangegroup', throw=True, **tr.hookargs)
288
288
289 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
290 # will not see an inconsistent view
290 # will not see an inconsistent view
291 cl = repo.changelog
291 cl = repo.changelog
292 cl.delayupdate(tr)
292 cl.delayupdate(tr)
293 oldheads = cl.heads()
293 oldheads = cl.heads()
294
294
295 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
296 # pull off the changeset group
296 # pull off the changeset group
297 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
298 clstart = len(cl)
298 clstart = len(cl)
299 class prog(object):
299 class prog(object):
300 def __init__(self, step, total):
300 def __init__(self, step, total):
301 self._step = step
301 self._step = step
302 self._total = total
302 self._total = total
303 self._count = 1
303 self._count = 1
304 def __call__(self):
304 def __call__(self):
305 repo.ui.progress(self._step, self._count,
305 repo.ui.progress(self._step, self._count,
306 unit=_('chunks'), total=self._total)
306 unit=_('chunks'), total=self._total)
307 self._count += 1
307 self._count += 1
308 self.callback = prog(_('changesets'), expectedtotal)
308 self.callback = prog(_('changesets'), expectedtotal)
309
309
310 efiles = set()
310 efiles = set()
311 def onchangelog(cl, node):
311 def onchangelog(cl, node):
312 efiles.update(cl.readfiles(node))
312 efiles.update(cl.readfiles(node))
313
313
314 self.changelogheader()
314 self.changelogheader()
315 srccontent = cl.addgroup(self, csmap, trp,
315 srccontent = cl.addgroup(self, csmap, trp,
316 addrevisioncb=onchangelog)
316 addrevisioncb=onchangelog)
317 efiles = len(efiles)
317 efiles = len(efiles)
318
318
319 if not (srccontent or emptyok):
319 if not (srccontent or emptyok):
320 raise error.Abort(_("received changelog group is empty"))
320 raise error.Abort(_("received changelog group is empty"))
321 clend = len(cl)
321 clend = len(cl)
322 changesets = clend - clstart
322 changesets = clend - clstart
323 repo.ui.progress(_('changesets'), None)
323 repo.ui.progress(_('changesets'), None)
324 self.callback = None
324 self.callback = None
325
325
326 # pull off the manifest group
326 # pull off the manifest group
327 repo.ui.status(_("adding manifests\n"))
327 repo.ui.status(_("adding manifests\n"))
328 self._unpackmanifests(repo, revmap, trp, prog, changesets)
328 self._unpackmanifests(repo, revmap, trp, prog, changesets)
329
329
330 needfiles = {}
330 needfiles = {}
331 if repo.ui.configbool('server', 'validate', default=False):
331 if repo.ui.configbool('server', 'validate', default=False):
332 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
333 for cset in xrange(clstart, clend):
333 for cset in xrange(clstart, clend):
334 mfnode = repo.changelog.read(
334 mfnode = repo.changelog.read(
335 repo.changelog.node(cset))[0]
335 repo.changelog.node(cset))[0]
336 mfest = repo.manifest.readdelta(mfnode)
336 mfest = repo.manifest.readdelta(mfnode)
337 # store file nodes we must see
337 # store file nodes we must see
338 for f, n in mfest.iteritems():
338 for f, n in mfest.iteritems():
339 needfiles.setdefault(f, set()).add(n)
339 needfiles.setdefault(f, set()).add(n)
340
340
341 # process the files
341 # process the files
342 repo.ui.status(_("adding file changes\n"))
342 repo.ui.status(_("adding file changes\n"))
343 newrevs, newfiles = _addchangegroupfiles(
343 newrevs, newfiles = _addchangegroupfiles(
344 repo, self, revmap, trp, efiles, needfiles)
344 repo, self, revmap, trp, efiles, needfiles)
345 revisions += newrevs
345 revisions += newrevs
346 files += newfiles
346 files += newfiles
347
347
348 dh = 0
348 dh = 0
349 if oldheads:
349 if oldheads:
350 heads = cl.heads()
350 heads = cl.heads()
351 dh = len(heads) - len(oldheads)
351 dh = len(heads) - len(oldheads)
352 for h in heads:
352 for h in heads:
353 if h not in oldheads and repo[h].closesbranch():
353 if h not in oldheads and repo[h].closesbranch():
354 dh -= 1
354 dh -= 1
355 htext = ""
355 htext = ""
356 if dh:
356 if dh:
357 htext = _(" (%+d heads)") % dh
357 htext = _(" (%+d heads)") % dh
358
358
359 repo.ui.status(_("added %d changesets"
359 repo.ui.status(_("added %d changesets"
360 " with %d changes to %d files%s\n")
360 " with %d changes to %d files%s\n")
361 % (changesets, revisions, files, htext))
361 % (changesets, revisions, files, htext))
362 repo.invalidatevolatilesets()
362 repo.invalidatevolatilesets()
363
363
364 if changesets > 0:
364 if changesets > 0:
365 if 'node' not in tr.hookargs:
365 if 'node' not in tr.hookargs:
366 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node'] = hex(cl.node(clstart))
367 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
368 hookargs = dict(tr.hookargs)
368 hookargs = dict(tr.hookargs)
369 else:
369 else:
370 hookargs = dict(tr.hookargs)
370 hookargs = dict(tr.hookargs)
371 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node'] = hex(cl.node(clstart))
372 hookargs['node_last'] = hex(cl.node(clend - 1))
372 hookargs['node_last'] = hex(cl.node(clend - 1))
373 repo.hook('pretxnchangegroup', throw=True, **hookargs)
373 repo.hook('pretxnchangegroup', throw=True, **hookargs)
374
374
375 added = [cl.node(r) for r in xrange(clstart, clend)]
375 added = [cl.node(r) for r in xrange(clstart, clend)]
376 publishing = repo.publishing()
376 publishing = repo.publishing()
377 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
380 # exists locally as secret
380 # exists locally as secret
381 #
381 #
382 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
383 # the bundle
383 # the bundle
384 if publishing:
384 if publishing:
385 phases.advanceboundary(repo, tr, phases.public,
385 phases.advanceboundary(repo, tr, phases.public,
386 srccontent)
386 srccontent)
387 else:
387 else:
388 # Those changesets have been pushed from the
388 # Those changesets have been pushed from the
389 # outside, their phases are going to be pushed
389 # outside, their phases are going to be pushed
390 # alongside. Therefor `targetphase` is
390 # alongside. Therefor `targetphase` is
391 # ignored.
391 # ignored.
392 phases.advanceboundary(repo, tr, phases.draft,
392 phases.advanceboundary(repo, tr, phases.draft,
393 srccontent)
393 srccontent)
394 phases.retractboundary(repo, tr, phases.draft, added)
394 phases.retractboundary(repo, tr, phases.draft, added)
395 elif srctype != 'strip':
395 elif srctype != 'strip':
396 # publishing only alter behavior during push
396 # publishing only alter behavior during push
397 #
397 #
398 # strip should not touch boundary at all
398 # strip should not touch boundary at all
399 phases.retractboundary(repo, tr, targetphase, added)
399 phases.retractboundary(repo, tr, targetphase, added)
400
400
401 if changesets > 0:
401 if changesets > 0:
402 if srctype != 'strip':
402 if srctype != 'strip':
403 # During strip, branchcache is invalid but
403 # During strip, branchcache is invalid but
404 # coming call to `destroyed` will repair it.
404 # coming call to `destroyed` will repair it.
405 # In other case we can safely update cache on
405 # In other case we can safely update cache on
406 # disk.
406 # disk.
407 branchmap.updatecache(repo.filtered('served'))
407 branchmap.updatecache(repo.filtered('served'))
408
408
409 def runhooks():
409 def runhooks():
410 # These hooks run when the lock releases, not when the
410 # These hooks run when the lock releases, not when the
411 # transaction closes. So it's possible for the changelog
411 # transaction closes. So it's possible for the changelog
412 # to have changed since we last saw it.
412 # to have changed since we last saw it.
413 if clstart >= len(repo):
413 if clstart >= len(repo):
414 return
414 return
415
415
416 # forcefully update the on-disk branch cache
416 # forcefully update the on-disk branch cache
417 repo.ui.debug("updating the branch cache\n")
417 repo.ui.debug("updating the branch cache\n")
418 repo.hook("changegroup", **hookargs)
418 repo.hook("changegroup", **hookargs)
419
419
420 for n in added:
420 for n in added:
421 args = hookargs.copy()
421 args = hookargs.copy()
422 args['node'] = hex(n)
422 args['node'] = hex(n)
423 del args['node_last']
423 del args['node_last']
424 repo.hook("incoming", **args)
424 repo.hook("incoming", **args)
425
425
426 newheads = [h for h in repo.heads()
426 newheads = [h for h in repo.heads()
427 if h not in oldheads]
427 if h not in oldheads]
428 repo.ui.log("incoming",
428 repo.ui.log("incoming",
429 "%s incoming changes - new heads: %s\n",
429 "%s incoming changes - new heads: %s\n",
430 len(added),
430 len(added),
431 ', '.join([hex(c[:6]) for c in newheads]))
431 ', '.join([hex(c[:6]) for c in newheads]))
432
432
433 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
433 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
434 lambda tr: repo._afterlock(runhooks))
434 lambda tr: repo._afterlock(runhooks))
435 finally:
435 finally:
436 repo.ui.flush()
436 repo.ui.flush()
437 # never return 0 here:
437 # never return 0 here:
438 if dh < 0:
438 if dh < 0:
439 return dh - 1
439 return dh - 1
440 else:
440 else:
441 return dh + 1
441 return dh + 1
442
442
443 class cg2unpacker(cg1unpacker):
443 class cg2unpacker(cg1unpacker):
444 """Unpacker for cg2 streams.
444 """Unpacker for cg2 streams.
445
445
446 cg2 streams add support for generaldelta, so the delta header
446 cg2 streams add support for generaldelta, so the delta header
447 format is slightly different. All other features about the data
447 format is slightly different. All other features about the data
448 remain the same.
448 remain the same.
449 """
449 """
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451 deltaheadersize = struct.calcsize(deltaheader)
451 deltaheadersize = struct.calcsize(deltaheader)
452 version = '02'
452 version = '02'
453
453
454 def _deltaheader(self, headertuple, prevnode):
454 def _deltaheader(self, headertuple, prevnode):
455 node, p1, p2, deltabase, cs = headertuple
455 node, p1, p2, deltabase, cs = headertuple
456 flags = 0
456 flags = 0
457 return node, p1, p2, deltabase, cs, flags
457 return node, p1, p2, deltabase, cs, flags
458
458
459 class cg3unpacker(cg2unpacker):
459 class cg3unpacker(cg2unpacker):
460 """Unpacker for cg3 streams.
460 """Unpacker for cg3 streams.
461
461
462 cg3 streams add support for exchanging treemanifests and revlog
462 cg3 streams add support for exchanging treemanifests and revlog
463 flags. It adds the revlog flags to the delta header and an empty chunk
463 flags. It adds the revlog flags to the delta header and an empty chunk
464 separating manifests and files.
464 separating manifests and files.
465 """
465 """
466 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
466 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
467 deltaheadersize = struct.calcsize(deltaheader)
467 deltaheadersize = struct.calcsize(deltaheader)
468 version = '03'
468 version = '03'
469 _grouplistcount = 2 # One list of manifests and one list of files
469 _grouplistcount = 2 # One list of manifests and one list of files
470
470
471 def _deltaheader(self, headertuple, prevnode):
471 def _deltaheader(self, headertuple, prevnode):
472 node, p1, p2, deltabase, cs, flags = headertuple
472 node, p1, p2, deltabase, cs, flags = headertuple
473 return node, p1, p2, deltabase, cs, flags
473 return node, p1, p2, deltabase, cs, flags
474
474
475 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
475 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
476 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
476 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
477 numchanges)
477 numchanges)
478 while True:
478 for chunkdata in iter(self.filelogheader, {}):
479 chunkdata = self.filelogheader()
480 if not chunkdata:
481 break
482 # If we get here, there are directory manifests in the changegroup
479 # If we get here, there are directory manifests in the changegroup
483 d = chunkdata["filename"]
480 d = chunkdata["filename"]
484 repo.ui.debug("adding %s revisions\n" % d)
481 repo.ui.debug("adding %s revisions\n" % d)
485 dirlog = repo.manifest.dirlog(d)
482 dirlog = repo.manifest.dirlog(d)
486 if not dirlog.addgroup(self, revmap, trp):
483 if not dirlog.addgroup(self, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
484 raise error.Abort(_("received dir revlog group is empty"))
488
485
489 class headerlessfixup(object):
486 class headerlessfixup(object):
490 def __init__(self, fh, h):
487 def __init__(self, fh, h):
491 self._h = h
488 self._h = h
492 self._fh = fh
489 self._fh = fh
493 def read(self, n):
490 def read(self, n):
494 if self._h:
491 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
492 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
493 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
494 d += readexactly(self._fh, n - len(d))
498 return d
495 return d
499 return readexactly(self._fh, n)
496 return readexactly(self._fh, n)
500
497
501 class cg1packer(object):
498 class cg1packer(object):
502 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
499 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
503 version = '01'
500 version = '01'
504 def __init__(self, repo, bundlecaps=None):
501 def __init__(self, repo, bundlecaps=None):
505 """Given a source repo, construct a bundler.
502 """Given a source repo, construct a bundler.
506
503
507 bundlecaps is optional and can be used to specify the set of
504 bundlecaps is optional and can be used to specify the set of
508 capabilities which can be used to build the bundle.
505 capabilities which can be used to build the bundle.
509 """
506 """
510 # Set of capabilities we can use to build the bundle.
507 # Set of capabilities we can use to build the bundle.
511 if bundlecaps is None:
508 if bundlecaps is None:
512 bundlecaps = set()
509 bundlecaps = set()
513 self._bundlecaps = bundlecaps
510 self._bundlecaps = bundlecaps
514 # experimental config: bundle.reorder
511 # experimental config: bundle.reorder
515 reorder = repo.ui.config('bundle', 'reorder', 'auto')
512 reorder = repo.ui.config('bundle', 'reorder', 'auto')
516 if reorder == 'auto':
513 if reorder == 'auto':
517 reorder = None
514 reorder = None
518 else:
515 else:
519 reorder = util.parsebool(reorder)
516 reorder = util.parsebool(reorder)
520 self._repo = repo
517 self._repo = repo
521 self._reorder = reorder
518 self._reorder = reorder
522 self._progress = repo.ui.progress
519 self._progress = repo.ui.progress
523 if self._repo.ui.verbose and not self._repo.ui.debugflag:
520 if self._repo.ui.verbose and not self._repo.ui.debugflag:
524 self._verbosenote = self._repo.ui.note
521 self._verbosenote = self._repo.ui.note
525 else:
522 else:
526 self._verbosenote = lambda s: None
523 self._verbosenote = lambda s: None
527
524
528 def close(self):
525 def close(self):
529 return closechunk()
526 return closechunk()
530
527
531 def fileheader(self, fname):
528 def fileheader(self, fname):
532 return chunkheader(len(fname)) + fname
529 return chunkheader(len(fname)) + fname
533
530
534 # Extracted both for clarity and for overriding in extensions.
531 # Extracted both for clarity and for overriding in extensions.
535 def _sortgroup(self, revlog, nodelist, lookup):
532 def _sortgroup(self, revlog, nodelist, lookup):
536 """Sort nodes for change group and turn them into revnums."""
533 """Sort nodes for change group and turn them into revnums."""
537 # for generaldelta revlogs, we linearize the revs; this will both be
534 # for generaldelta revlogs, we linearize the revs; this will both be
538 # much quicker and generate a much smaller bundle
535 # much quicker and generate a much smaller bundle
539 if (revlog._generaldelta and self._reorder is None) or self._reorder:
536 if (revlog._generaldelta and self._reorder is None) or self._reorder:
540 dag = dagutil.revlogdag(revlog)
537 dag = dagutil.revlogdag(revlog)
541 return dag.linearize(set(revlog.rev(n) for n in nodelist))
538 return dag.linearize(set(revlog.rev(n) for n in nodelist))
542 else:
539 else:
543 return sorted([revlog.rev(n) for n in nodelist])
540 return sorted([revlog.rev(n) for n in nodelist])
544
541
545 def group(self, nodelist, revlog, lookup, units=None):
542 def group(self, nodelist, revlog, lookup, units=None):
546 """Calculate a delta group, yielding a sequence of changegroup chunks
543 """Calculate a delta group, yielding a sequence of changegroup chunks
547 (strings).
544 (strings).
548
545
549 Given a list of changeset revs, return a set of deltas and
546 Given a list of changeset revs, return a set of deltas and
550 metadata corresponding to nodes. The first delta is
547 metadata corresponding to nodes. The first delta is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
548 first parent(nodelist[0]) -> nodelist[0], the receiver is
552 guaranteed to have this parent as it has all history before
549 guaranteed to have this parent as it has all history before
553 these changesets. In the case firstparent is nullrev the
550 these changesets. In the case firstparent is nullrev the
554 changegroup starts with a full revision.
551 changegroup starts with a full revision.
555
552
556 If units is not None, progress detail will be generated, units specifies
553 If units is not None, progress detail will be generated, units specifies
557 the type of revlog that is touched (changelog, manifest, etc.).
554 the type of revlog that is touched (changelog, manifest, etc.).
558 """
555 """
559 # if we don't have any revisions touched by these changesets, bail
556 # if we don't have any revisions touched by these changesets, bail
560 if len(nodelist) == 0:
557 if len(nodelist) == 0:
561 yield self.close()
558 yield self.close()
562 return
559 return
563
560
564 revs = self._sortgroup(revlog, nodelist, lookup)
561 revs = self._sortgroup(revlog, nodelist, lookup)
565
562
566 # add the parent of the first rev
563 # add the parent of the first rev
567 p = revlog.parentrevs(revs[0])[0]
564 p = revlog.parentrevs(revs[0])[0]
568 revs.insert(0, p)
565 revs.insert(0, p)
569
566
570 # build deltas
567 # build deltas
571 total = len(revs) - 1
568 total = len(revs) - 1
572 msgbundling = _('bundling')
569 msgbundling = _('bundling')
573 for r in xrange(len(revs) - 1):
570 for r in xrange(len(revs) - 1):
574 if units is not None:
571 if units is not None:
575 self._progress(msgbundling, r + 1, unit=units, total=total)
572 self._progress(msgbundling, r + 1, unit=units, total=total)
576 prev, curr = revs[r], revs[r + 1]
573 prev, curr = revs[r], revs[r + 1]
577 linknode = lookup(revlog.node(curr))
574 linknode = lookup(revlog.node(curr))
578 for c in self.revchunk(revlog, curr, prev, linknode):
575 for c in self.revchunk(revlog, curr, prev, linknode):
579 yield c
576 yield c
580
577
581 if units is not None:
578 if units is not None:
582 self._progress(msgbundling, None)
579 self._progress(msgbundling, None)
583 yield self.close()
580 yield self.close()
584
581
585 # filter any nodes that claim to be part of the known set
582 # filter any nodes that claim to be part of the known set
586 def prune(self, revlog, missing, commonrevs):
583 def prune(self, revlog, missing, commonrevs):
587 rr, rl = revlog.rev, revlog.linkrev
584 rr, rl = revlog.rev, revlog.linkrev
588 return [n for n in missing if rl(rr(n)) not in commonrevs]
585 return [n for n in missing if rl(rr(n)) not in commonrevs]
589
586
590 def _packmanifests(self, dir, mfnodes, lookuplinknode):
587 def _packmanifests(self, dir, mfnodes, lookuplinknode):
591 """Pack flat manifests into a changegroup stream."""
588 """Pack flat manifests into a changegroup stream."""
592 assert not dir
589 assert not dir
593 for chunk in self.group(mfnodes, self._repo.manifest,
590 for chunk in self.group(mfnodes, self._repo.manifest,
594 lookuplinknode, units=_('manifests')):
591 lookuplinknode, units=_('manifests')):
595 yield chunk
592 yield chunk
596
593
597 def _manifestsdone(self):
594 def _manifestsdone(self):
598 return ''
595 return ''
599
596
600 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
597 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
601 '''yield a sequence of changegroup chunks (strings)'''
598 '''yield a sequence of changegroup chunks (strings)'''
602 repo = self._repo
599 repo = self._repo
603 cl = repo.changelog
600 cl = repo.changelog
604
601
605 clrevorder = {}
602 clrevorder = {}
606 mfs = {} # needed manifests
603 mfs = {} # needed manifests
607 fnodes = {} # needed file nodes
604 fnodes = {} # needed file nodes
608 changedfiles = set()
605 changedfiles = set()
609
606
610 # Callback for the changelog, used to collect changed files and manifest
607 # Callback for the changelog, used to collect changed files and manifest
611 # nodes.
608 # nodes.
612 # Returns the linkrev node (identity in the changelog case).
609 # Returns the linkrev node (identity in the changelog case).
613 def lookupcl(x):
610 def lookupcl(x):
614 c = cl.read(x)
611 c = cl.read(x)
615 clrevorder[x] = len(clrevorder)
612 clrevorder[x] = len(clrevorder)
616 n = c[0]
613 n = c[0]
617 # record the first changeset introducing this manifest version
614 # record the first changeset introducing this manifest version
618 mfs.setdefault(n, x)
615 mfs.setdefault(n, x)
619 # Record a complete list of potentially-changed files in
616 # Record a complete list of potentially-changed files in
620 # this manifest.
617 # this manifest.
621 changedfiles.update(c[3])
618 changedfiles.update(c[3])
622 return x
619 return x
623
620
624 self._verbosenote(_('uncompressed size of bundle content:\n'))
621 self._verbosenote(_('uncompressed size of bundle content:\n'))
625 size = 0
622 size = 0
626 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
623 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
627 size += len(chunk)
624 size += len(chunk)
628 yield chunk
625 yield chunk
629 self._verbosenote(_('%8.i (changelog)\n') % size)
626 self._verbosenote(_('%8.i (changelog)\n') % size)
630
627
631 # We need to make sure that the linkrev in the changegroup refers to
628 # We need to make sure that the linkrev in the changegroup refers to
632 # the first changeset that introduced the manifest or file revision.
629 # the first changeset that introduced the manifest or file revision.
633 # The fastpath is usually safer than the slowpath, because the filelogs
630 # The fastpath is usually safer than the slowpath, because the filelogs
634 # are walked in revlog order.
631 # are walked in revlog order.
635 #
632 #
636 # When taking the slowpath with reorder=None and the manifest revlog
633 # When taking the slowpath with reorder=None and the manifest revlog
637 # uses generaldelta, the manifest may be walked in the "wrong" order.
634 # uses generaldelta, the manifest may be walked in the "wrong" order.
638 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
635 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
639 # cc0ff93d0c0c).
636 # cc0ff93d0c0c).
640 #
637 #
641 # When taking the fastpath, we are only vulnerable to reordering
638 # When taking the fastpath, we are only vulnerable to reordering
642 # of the changelog itself. The changelog never uses generaldelta, so
639 # of the changelog itself. The changelog never uses generaldelta, so
643 # it is only reordered when reorder=True. To handle this case, we
640 # it is only reordered when reorder=True. To handle this case, we
644 # simply take the slowpath, which already has the 'clrevorder' logic.
641 # simply take the slowpath, which already has the 'clrevorder' logic.
645 # This was also fixed in cc0ff93d0c0c.
642 # This was also fixed in cc0ff93d0c0c.
646 fastpathlinkrev = fastpathlinkrev and not self._reorder
643 fastpathlinkrev = fastpathlinkrev and not self._reorder
647 # Treemanifests don't work correctly with fastpathlinkrev
644 # Treemanifests don't work correctly with fastpathlinkrev
648 # either, because we don't discover which directory nodes to
645 # either, because we don't discover which directory nodes to
649 # send along with files. This could probably be fixed.
646 # send along with files. This could probably be fixed.
650 fastpathlinkrev = fastpathlinkrev and (
647 fastpathlinkrev = fastpathlinkrev and (
651 'treemanifest' not in repo.requirements)
648 'treemanifest' not in repo.requirements)
652
649
653 for chunk in self.generatemanifests(commonrevs, clrevorder,
650 for chunk in self.generatemanifests(commonrevs, clrevorder,
654 fastpathlinkrev, mfs, fnodes):
651 fastpathlinkrev, mfs, fnodes):
655 yield chunk
652 yield chunk
656 mfs.clear()
653 mfs.clear()
657 clrevs = set(cl.rev(x) for x in clnodes)
654 clrevs = set(cl.rev(x) for x in clnodes)
658
655
659 if not fastpathlinkrev:
656 if not fastpathlinkrev:
660 def linknodes(unused, fname):
657 def linknodes(unused, fname):
661 return fnodes.get(fname, {})
658 return fnodes.get(fname, {})
662 else:
659 else:
663 cln = cl.node
660 cln = cl.node
664 def linknodes(filerevlog, fname):
661 def linknodes(filerevlog, fname):
665 llr = filerevlog.linkrev
662 llr = filerevlog.linkrev
666 fln = filerevlog.node
663 fln = filerevlog.node
667 revs = ((r, llr(r)) for r in filerevlog)
664 revs = ((r, llr(r)) for r in filerevlog)
668 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
665 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
669
666
670 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
667 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
671 source):
668 source):
672 yield chunk
669 yield chunk
673
670
674 yield self.close()
671 yield self.close()
675
672
676 if clnodes:
673 if clnodes:
677 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
674 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
678
675
679 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
676 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
680 fnodes):
677 fnodes):
681 repo = self._repo
678 repo = self._repo
682 dirlog = repo.manifest.dirlog
679 dirlog = repo.manifest.dirlog
683 tmfnodes = {'': mfs}
680 tmfnodes = {'': mfs}
684
681
685 # Callback for the manifest, used to collect linkrevs for filelog
682 # Callback for the manifest, used to collect linkrevs for filelog
686 # revisions.
683 # revisions.
687 # Returns the linkrev node (collected in lookupcl).
684 # Returns the linkrev node (collected in lookupcl).
688 def makelookupmflinknode(dir):
685 def makelookupmflinknode(dir):
689 if fastpathlinkrev:
686 if fastpathlinkrev:
690 assert not dir
687 assert not dir
691 return mfs.__getitem__
688 return mfs.__getitem__
692
689
693 def lookupmflinknode(x):
690 def lookupmflinknode(x):
694 """Callback for looking up the linknode for manifests.
691 """Callback for looking up the linknode for manifests.
695
692
696 Returns the linkrev node for the specified manifest.
693 Returns the linkrev node for the specified manifest.
697
694
698 SIDE EFFECT:
695 SIDE EFFECT:
699
696
700 1) fclnodes gets populated with the list of relevant
697 1) fclnodes gets populated with the list of relevant
701 file nodes if we're not using fastpathlinkrev
698 file nodes if we're not using fastpathlinkrev
702 2) When treemanifests are in use, collects treemanifest nodes
699 2) When treemanifests are in use, collects treemanifest nodes
703 to send
700 to send
704
701
705 Note that this means manifests must be completely sent to
702 Note that this means manifests must be completely sent to
706 the client before you can trust the list of files and
703 the client before you can trust the list of files and
707 treemanifests to send.
704 treemanifests to send.
708 """
705 """
709 clnode = tmfnodes[dir][x]
706 clnode = tmfnodes[dir][x]
710 mdata = dirlog(dir).readshallowfast(x)
707 mdata = dirlog(dir).readshallowfast(x)
711 for p, n, fl in mdata.iterentries():
708 for p, n, fl in mdata.iterentries():
712 if fl == 't': # subdirectory manifest
709 if fl == 't': # subdirectory manifest
713 subdir = dir + p + '/'
710 subdir = dir + p + '/'
714 tmfclnodes = tmfnodes.setdefault(subdir, {})
711 tmfclnodes = tmfnodes.setdefault(subdir, {})
715 tmfclnode = tmfclnodes.setdefault(n, clnode)
712 tmfclnode = tmfclnodes.setdefault(n, clnode)
716 if clrevorder[clnode] < clrevorder[tmfclnode]:
713 if clrevorder[clnode] < clrevorder[tmfclnode]:
717 tmfclnodes[n] = clnode
714 tmfclnodes[n] = clnode
718 else:
715 else:
719 f = dir + p
716 f = dir + p
720 fclnodes = fnodes.setdefault(f, {})
717 fclnodes = fnodes.setdefault(f, {})
721 fclnode = fclnodes.setdefault(n, clnode)
718 fclnode = fclnodes.setdefault(n, clnode)
722 if clrevorder[clnode] < clrevorder[fclnode]:
719 if clrevorder[clnode] < clrevorder[fclnode]:
723 fclnodes[n] = clnode
720 fclnodes[n] = clnode
724 return clnode
721 return clnode
725 return lookupmflinknode
722 return lookupmflinknode
726
723
727 size = 0
724 size = 0
728 while tmfnodes:
725 while tmfnodes:
729 dir = min(tmfnodes)
726 dir = min(tmfnodes)
730 nodes = tmfnodes[dir]
727 nodes = tmfnodes[dir]
731 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
728 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
732 if not dir or prunednodes:
729 if not dir or prunednodes:
733 for x in self._packmanifests(dir, prunednodes,
730 for x in self._packmanifests(dir, prunednodes,
734 makelookupmflinknode(dir)):
731 makelookupmflinknode(dir)):
735 size += len(x)
732 size += len(x)
736 yield x
733 yield x
737 del tmfnodes[dir]
734 del tmfnodes[dir]
738 self._verbosenote(_('%8.i (manifests)\n') % size)
735 self._verbosenote(_('%8.i (manifests)\n') % size)
739 yield self._manifestsdone()
736 yield self._manifestsdone()
740
737
741 # The 'source' parameter is useful for extensions
738 # The 'source' parameter is useful for extensions
742 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
739 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
743 repo = self._repo
740 repo = self._repo
744 progress = self._progress
741 progress = self._progress
745 msgbundling = _('bundling')
742 msgbundling = _('bundling')
746
743
747 total = len(changedfiles)
744 total = len(changedfiles)
748 # for progress output
745 # for progress output
749 msgfiles = _('files')
746 msgfiles = _('files')
750 for i, fname in enumerate(sorted(changedfiles)):
747 for i, fname in enumerate(sorted(changedfiles)):
751 filerevlog = repo.file(fname)
748 filerevlog = repo.file(fname)
752 if not filerevlog:
749 if not filerevlog:
753 raise error.Abort(_("empty or missing revlog for %s") % fname)
750 raise error.Abort(_("empty or missing revlog for %s") % fname)
754
751
755 linkrevnodes = linknodes(filerevlog, fname)
752 linkrevnodes = linknodes(filerevlog, fname)
756 # Lookup for filenodes, we collected the linkrev nodes above in the
753 # Lookup for filenodes, we collected the linkrev nodes above in the
757 # fastpath case and with lookupmf in the slowpath case.
754 # fastpath case and with lookupmf in the slowpath case.
758 def lookupfilelog(x):
755 def lookupfilelog(x):
759 return linkrevnodes[x]
756 return linkrevnodes[x]
760
757
761 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
758 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
762 if filenodes:
759 if filenodes:
763 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
760 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
764 total=total)
761 total=total)
765 h = self.fileheader(fname)
762 h = self.fileheader(fname)
766 size = len(h)
763 size = len(h)
767 yield h
764 yield h
768 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
765 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
769 size += len(chunk)
766 size += len(chunk)
770 yield chunk
767 yield chunk
771 self._verbosenote(_('%8.i %s\n') % (size, fname))
768 self._verbosenote(_('%8.i %s\n') % (size, fname))
772 progress(msgbundling, None)
769 progress(msgbundling, None)
773
770
774 def deltaparent(self, revlog, rev, p1, p2, prev):
771 def deltaparent(self, revlog, rev, p1, p2, prev):
775 return prev
772 return prev
776
773
777 def revchunk(self, revlog, rev, prev, linknode):
774 def revchunk(self, revlog, rev, prev, linknode):
778 node = revlog.node(rev)
775 node = revlog.node(rev)
779 p1, p2 = revlog.parentrevs(rev)
776 p1, p2 = revlog.parentrevs(rev)
780 base = self.deltaparent(revlog, rev, p1, p2, prev)
777 base = self.deltaparent(revlog, rev, p1, p2, prev)
781
778
782 prefix = ''
779 prefix = ''
783 if revlog.iscensored(base) or revlog.iscensored(rev):
780 if revlog.iscensored(base) or revlog.iscensored(rev):
784 try:
781 try:
785 delta = revlog.revision(node)
782 delta = revlog.revision(node)
786 except error.CensoredNodeError as e:
783 except error.CensoredNodeError as e:
787 delta = e.tombstone
784 delta = e.tombstone
788 if base == nullrev:
785 if base == nullrev:
789 prefix = mdiff.trivialdiffheader(len(delta))
786 prefix = mdiff.trivialdiffheader(len(delta))
790 else:
787 else:
791 baselen = revlog.rawsize(base)
788 baselen = revlog.rawsize(base)
792 prefix = mdiff.replacediffheader(baselen, len(delta))
789 prefix = mdiff.replacediffheader(baselen, len(delta))
793 elif base == nullrev:
790 elif base == nullrev:
794 delta = revlog.revision(node)
791 delta = revlog.revision(node)
795 prefix = mdiff.trivialdiffheader(len(delta))
792 prefix = mdiff.trivialdiffheader(len(delta))
796 else:
793 else:
797 delta = revlog.revdiff(base, rev)
794 delta = revlog.revdiff(base, rev)
798 p1n, p2n = revlog.parents(node)
795 p1n, p2n = revlog.parents(node)
799 basenode = revlog.node(base)
796 basenode = revlog.node(base)
800 flags = revlog.flags(rev)
797 flags = revlog.flags(rev)
801 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
798 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
802 meta += prefix
799 meta += prefix
803 l = len(meta) + len(delta)
800 l = len(meta) + len(delta)
804 yield chunkheader(l)
801 yield chunkheader(l)
805 yield meta
802 yield meta
806 yield delta
803 yield delta
807 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
804 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
808 # do nothing with basenode, it is implicitly the previous one in HG10
805 # do nothing with basenode, it is implicitly the previous one in HG10
809 # do nothing with flags, it is implicitly 0 for cg1 and cg2
806 # do nothing with flags, it is implicitly 0 for cg1 and cg2
810 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
807 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
811
808
812 class cg2packer(cg1packer):
809 class cg2packer(cg1packer):
813 version = '02'
810 version = '02'
814 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
811 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
815
812
816 def __init__(self, repo, bundlecaps=None):
813 def __init__(self, repo, bundlecaps=None):
817 super(cg2packer, self).__init__(repo, bundlecaps)
814 super(cg2packer, self).__init__(repo, bundlecaps)
818 if self._reorder is None:
815 if self._reorder is None:
819 # Since generaldelta is directly supported by cg2, reordering
816 # Since generaldelta is directly supported by cg2, reordering
820 # generally doesn't help, so we disable it by default (treating
817 # generally doesn't help, so we disable it by default (treating
821 # bundle.reorder=auto just like bundle.reorder=False).
818 # bundle.reorder=auto just like bundle.reorder=False).
822 self._reorder = False
819 self._reorder = False
823
820
824 def deltaparent(self, revlog, rev, p1, p2, prev):
821 def deltaparent(self, revlog, rev, p1, p2, prev):
825 dp = revlog.deltaparent(rev)
822 dp = revlog.deltaparent(rev)
826 # avoid storing full revisions; pick prev in those cases
823 # avoid storing full revisions; pick prev in those cases
827 # also pick prev when we can't be sure remote has dp
824 # also pick prev when we can't be sure remote has dp
828 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
825 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
829 return prev
826 return prev
830 return dp
827 return dp
831
828
832 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
829 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
833 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
830 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
834 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
831 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
835
832
836 class cg3packer(cg2packer):
833 class cg3packer(cg2packer):
837 version = '03'
834 version = '03'
838 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
835 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
839
836
840 def _packmanifests(self, dir, mfnodes, lookuplinknode):
837 def _packmanifests(self, dir, mfnodes, lookuplinknode):
841 if dir:
838 if dir:
842 yield self.fileheader(dir)
839 yield self.fileheader(dir)
843 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
840 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
844 lookuplinknode, units=_('manifests')):
841 lookuplinknode, units=_('manifests')):
845 yield chunk
842 yield chunk
846
843
847 def _manifestsdone(self):
844 def _manifestsdone(self):
848 return self.close()
845 return self.close()
849
846
850 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
847 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
851 return struct.pack(
848 return struct.pack(
852 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
849 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
853
850
854 _packermap = {'01': (cg1packer, cg1unpacker),
851 _packermap = {'01': (cg1packer, cg1unpacker),
855 # cg2 adds support for exchanging generaldelta
852 # cg2 adds support for exchanging generaldelta
856 '02': (cg2packer, cg2unpacker),
853 '02': (cg2packer, cg2unpacker),
857 # cg3 adds support for exchanging revlog flags and treemanifests
854 # cg3 adds support for exchanging revlog flags and treemanifests
858 '03': (cg3packer, cg3unpacker),
855 '03': (cg3packer, cg3unpacker),
859 }
856 }
860
857
861 def allsupportedversions(ui):
858 def allsupportedversions(ui):
862 versions = set(_packermap.keys())
859 versions = set(_packermap.keys())
863 versions.discard('03')
860 versions.discard('03')
864 if (ui.configbool('experimental', 'changegroup3') or
861 if (ui.configbool('experimental', 'changegroup3') or
865 ui.configbool('experimental', 'treemanifest')):
862 ui.configbool('experimental', 'treemanifest')):
866 versions.add('03')
863 versions.add('03')
867 return versions
864 return versions
868
865
869 # Changegroup versions that can be applied to the repo
866 # Changegroup versions that can be applied to the repo
870 def supportedincomingversions(repo):
867 def supportedincomingversions(repo):
871 versions = allsupportedversions(repo.ui)
868 versions = allsupportedversions(repo.ui)
872 if 'treemanifest' in repo.requirements:
869 if 'treemanifest' in repo.requirements:
873 versions.add('03')
870 versions.add('03')
874 return versions
871 return versions
875
872
876 # Changegroup versions that can be created from the repo
873 # Changegroup versions that can be created from the repo
877 def supportedoutgoingversions(repo):
874 def supportedoutgoingversions(repo):
878 versions = allsupportedversions(repo.ui)
875 versions = allsupportedversions(repo.ui)
879 if 'treemanifest' in repo.requirements:
876 if 'treemanifest' in repo.requirements:
880 # Versions 01 and 02 support only flat manifests and it's just too
877 # Versions 01 and 02 support only flat manifests and it's just too
881 # expensive to convert between the flat manifest and tree manifest on
878 # expensive to convert between the flat manifest and tree manifest on
882 # the fly. Since tree manifests are hashed differently, all of history
879 # the fly. Since tree manifests are hashed differently, all of history
883 # would have to be converted. Instead, we simply don't even pretend to
880 # would have to be converted. Instead, we simply don't even pretend to
884 # support versions 01 and 02.
881 # support versions 01 and 02.
885 versions.discard('01')
882 versions.discard('01')
886 versions.discard('02')
883 versions.discard('02')
887 versions.add('03')
884 versions.add('03')
888 return versions
885 return versions
889
886
890 def safeversion(repo):
887 def safeversion(repo):
891 # Finds the smallest version that it's safe to assume clients of the repo
888 # Finds the smallest version that it's safe to assume clients of the repo
892 # will support. For example, all hg versions that support generaldelta also
889 # will support. For example, all hg versions that support generaldelta also
893 # support changegroup 02.
890 # support changegroup 02.
894 versions = supportedoutgoingversions(repo)
891 versions = supportedoutgoingversions(repo)
895 if 'generaldelta' in repo.requirements:
892 if 'generaldelta' in repo.requirements:
896 versions.discard('01')
893 versions.discard('01')
897 assert versions
894 assert versions
898 return min(versions)
895 return min(versions)
899
896
900 def getbundler(version, repo, bundlecaps=None):
897 def getbundler(version, repo, bundlecaps=None):
901 assert version in supportedoutgoingversions(repo)
898 assert version in supportedoutgoingversions(repo)
902 return _packermap[version][0](repo, bundlecaps)
899 return _packermap[version][0](repo, bundlecaps)
903
900
904 def getunbundler(version, fh, alg, extras=None):
901 def getunbundler(version, fh, alg, extras=None):
905 return _packermap[version][1](fh, alg, extras=extras)
902 return _packermap[version][1](fh, alg, extras=extras)
906
903
907 def _changegroupinfo(repo, nodes, source):
904 def _changegroupinfo(repo, nodes, source):
908 if repo.ui.verbose or source == 'bundle':
905 if repo.ui.verbose or source == 'bundle':
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
906 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 if repo.ui.debugflag:
907 if repo.ui.debugflag:
911 repo.ui.debug("list of changesets:\n")
908 repo.ui.debug("list of changesets:\n")
912 for node in nodes:
909 for node in nodes:
913 repo.ui.debug("%s\n" % hex(node))
910 repo.ui.debug("%s\n" % hex(node))
914
911
915 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
912 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
916 repo = repo.unfiltered()
913 repo = repo.unfiltered()
917 commonrevs = outgoing.common
914 commonrevs = outgoing.common
918 csets = outgoing.missing
915 csets = outgoing.missing
919 heads = outgoing.missingheads
916 heads = outgoing.missingheads
920 # We go through the fast path if we get told to, or if all (unfiltered
917 # We go through the fast path if we get told to, or if all (unfiltered
921 # heads have been requested (since we then know there all linkrevs will
918 # heads have been requested (since we then know there all linkrevs will
922 # be pulled by the client).
919 # be pulled by the client).
923 heads.sort()
920 heads.sort()
924 fastpathlinkrev = fastpath or (
921 fastpathlinkrev = fastpath or (
925 repo.filtername is None and heads == sorted(repo.heads()))
922 repo.filtername is None and heads == sorted(repo.heads()))
926
923
927 repo.hook('preoutgoing', throw=True, source=source)
924 repo.hook('preoutgoing', throw=True, source=source)
928 _changegroupinfo(repo, csets, source)
925 _changegroupinfo(repo, csets, source)
929 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
926 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
930
927
931 def getsubset(repo, outgoing, bundler, source, fastpath=False):
928 def getsubset(repo, outgoing, bundler, source, fastpath=False):
932 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
929 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
933 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
930 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
934 {'clcount': len(outgoing.missing)})
931 {'clcount': len(outgoing.missing)})
935
932
936 def changegroupsubset(repo, roots, heads, source, version='01'):
933 def changegroupsubset(repo, roots, heads, source, version='01'):
937 """Compute a changegroup consisting of all the nodes that are
934 """Compute a changegroup consisting of all the nodes that are
938 descendants of any of the roots and ancestors of any of the heads.
935 descendants of any of the roots and ancestors of any of the heads.
939 Return a chunkbuffer object whose read() method will return
936 Return a chunkbuffer object whose read() method will return
940 successive changegroup chunks.
937 successive changegroup chunks.
941
938
942 It is fairly complex as determining which filenodes and which
939 It is fairly complex as determining which filenodes and which
943 manifest nodes need to be included for the changeset to be complete
940 manifest nodes need to be included for the changeset to be complete
944 is non-trivial.
941 is non-trivial.
945
942
946 Another wrinkle is doing the reverse, figuring out which changeset in
943 Another wrinkle is doing the reverse, figuring out which changeset in
947 the changegroup a particular filenode or manifestnode belongs to.
944 the changegroup a particular filenode or manifestnode belongs to.
948 """
945 """
949 outgoing = discovery.outgoingbetween(repo, roots, heads)
946 outgoing = discovery.outgoingbetween(repo, roots, heads)
950 bundler = getbundler(version, repo)
947 bundler = getbundler(version, repo)
951 return getsubset(repo, outgoing, bundler, source)
948 return getsubset(repo, outgoing, bundler, source)
952
949
953 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
950 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
954 version='01'):
951 version='01'):
955 """Like getbundle, but taking a discovery.outgoing as an argument.
952 """Like getbundle, but taking a discovery.outgoing as an argument.
956
953
957 This is only implemented for local repos and reuses potentially
954 This is only implemented for local repos and reuses potentially
958 precomputed sets in outgoing. Returns a raw changegroup generator."""
955 precomputed sets in outgoing. Returns a raw changegroup generator."""
959 if not outgoing.missing:
956 if not outgoing.missing:
960 return None
957 return None
961 bundler = getbundler(version, repo, bundlecaps)
958 bundler = getbundler(version, repo, bundlecaps)
962 return getsubsetraw(repo, outgoing, bundler, source)
959 return getsubsetraw(repo, outgoing, bundler, source)
963
960
964 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
961 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
965 version='01'):
962 version='01'):
966 """Like getbundle, but taking a discovery.outgoing as an argument.
963 """Like getbundle, but taking a discovery.outgoing as an argument.
967
964
968 This is only implemented for local repos and reuses potentially
965 This is only implemented for local repos and reuses potentially
969 precomputed sets in outgoing."""
966 precomputed sets in outgoing."""
970 if not outgoing.missing:
967 if not outgoing.missing:
971 return None
968 return None
972 bundler = getbundler(version, repo, bundlecaps)
969 bundler = getbundler(version, repo, bundlecaps)
973 return getsubset(repo, outgoing, bundler, source)
970 return getsubset(repo, outgoing, bundler, source)
974
971
975 def computeoutgoing(repo, heads, common):
972 def computeoutgoing(repo, heads, common):
976 """Computes which revs are outgoing given a set of common
973 """Computes which revs are outgoing given a set of common
977 and a set of heads.
974 and a set of heads.
978
975
979 This is a separate function so extensions can have access to
976 This is a separate function so extensions can have access to
980 the logic.
977 the logic.
981
978
982 Returns a discovery.outgoing object.
979 Returns a discovery.outgoing object.
983 """
980 """
984 cl = repo.changelog
981 cl = repo.changelog
985 if common:
982 if common:
986 hasnode = cl.hasnode
983 hasnode = cl.hasnode
987 common = [n for n in common if hasnode(n)]
984 common = [n for n in common if hasnode(n)]
988 else:
985 else:
989 common = [nullid]
986 common = [nullid]
990 if not heads:
987 if not heads:
991 heads = cl.heads()
988 heads = cl.heads()
992 return discovery.outgoing(cl, common, heads)
989 return discovery.outgoing(cl, common, heads)
993
990
994 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
991 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
995 version='01'):
992 version='01'):
996 """Like changegroupsubset, but returns the set difference between the
993 """Like changegroupsubset, but returns the set difference between the
997 ancestors of heads and the ancestors common.
994 ancestors of heads and the ancestors common.
998
995
999 If heads is None, use the local heads. If common is None, use [nullid].
996 If heads is None, use the local heads. If common is None, use [nullid].
1000
997
1001 The nodes in common might not all be known locally due to the way the
998 The nodes in common might not all be known locally due to the way the
1002 current discovery protocol works.
999 current discovery protocol works.
1003 """
1000 """
1004 outgoing = computeoutgoing(repo, heads, common)
1001 outgoing = computeoutgoing(repo, heads, common)
1005 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1002 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1006 version=version)
1003 version=version)
1007
1004
1008 def changegroup(repo, basenodes, source):
1005 def changegroup(repo, basenodes, source):
1009 # to avoid a race we use changegroupsubset() (issue1320)
1006 # to avoid a race we use changegroupsubset() (issue1320)
1010 return changegroupsubset(repo, basenodes, repo.heads(), source)
1007 return changegroupsubset(repo, basenodes, repo.heads(), source)
1011
1008
1012 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1009 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1013 revisions = 0
1010 revisions = 0
1014 files = 0
1011 files = 0
1015 while True:
1012 for chunkdata in iter(source.filelogheader, {}):
1016 chunkdata = source.filelogheader()
1017 if not chunkdata:
1018 break
1019 files += 1
1013 files += 1
1020 f = chunkdata["filename"]
1014 f = chunkdata["filename"]
1021 repo.ui.debug("adding %s revisions\n" % f)
1015 repo.ui.debug("adding %s revisions\n" % f)
1022 repo.ui.progress(_('files'), files, unit=_('files'),
1016 repo.ui.progress(_('files'), files, unit=_('files'),
1023 total=expectedfiles)
1017 total=expectedfiles)
1024 fl = repo.file(f)
1018 fl = repo.file(f)
1025 o = len(fl)
1019 o = len(fl)
1026 try:
1020 try:
1027 if not fl.addgroup(source, revmap, trp):
1021 if not fl.addgroup(source, revmap, trp):
1028 raise error.Abort(_("received file revlog group is empty"))
1022 raise error.Abort(_("received file revlog group is empty"))
1029 except error.CensoredBaseError as e:
1023 except error.CensoredBaseError as e:
1030 raise error.Abort(_("received delta base is censored: %s") % e)
1024 raise error.Abort(_("received delta base is censored: %s") % e)
1031 revisions += len(fl) - o
1025 revisions += len(fl) - o
1032 if f in needfiles:
1026 if f in needfiles:
1033 needs = needfiles[f]
1027 needs = needfiles[f]
1034 for new in xrange(o, len(fl)):
1028 for new in xrange(o, len(fl)):
1035 n = fl.node(new)
1029 n = fl.node(new)
1036 if n in needs:
1030 if n in needs:
1037 needs.remove(n)
1031 needs.remove(n)
1038 else:
1032 else:
1039 raise error.Abort(
1033 raise error.Abort(
1040 _("received spurious file revlog entry"))
1034 _("received spurious file revlog entry"))
1041 if not needs:
1035 if not needs:
1042 del needfiles[f]
1036 del needfiles[f]
1043 repo.ui.progress(_('files'), None)
1037 repo.ui.progress(_('files'), None)
1044
1038
1045 for f, needs in needfiles.iteritems():
1039 for f, needs in needfiles.iteritems():
1046 fl = repo.file(f)
1040 fl = repo.file(f)
1047 for n in needs:
1041 for n in needs:
1048 try:
1042 try:
1049 fl.rev(n)
1043 fl.rev(n)
1050 except error.LookupError:
1044 except error.LookupError:
1051 raise error.Abort(
1045 raise error.Abort(
1052 _('missing file data for %s:%s - run hg verify') %
1046 _('missing file data for %s:%s - run hg verify') %
1053 (f, hex(n)))
1047 (f, hex(n)))
1054
1048
1055 return revisions, files
1049 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now