##// END OF EJS Templates
changegroup: increase write buffer size to 128k...
Gregory Szorc -
r30212:260af198 default
parent child Browse files
Show More
@@ -1,1040 +1,1042 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 branchmap,
23 branchmap,
24 dagutil,
24 dagutil,
25 discovery,
25 discovery,
26 error,
26 error,
27 mdiff,
27 mdiff,
28 phases,
28 phases,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 def writechunks(ui, chunks, filename, vfs=None):
82 def writechunks(ui, chunks, filename, vfs=None):
83 """Write chunks to a file and return its filename.
83 """Write chunks to a file and return its filename.
84
84
85 The stream is assumed to be a bundle file.
85 The stream is assumed to be a bundle file.
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 """
88 """
89 fh = None
89 fh = None
90 cleanup = None
90 cleanup = None
91 try:
91 try:
92 if filename:
92 if filename:
93 if vfs:
93 if vfs:
94 fh = vfs.open(filename, "wb")
94 fh = vfs.open(filename, "wb")
95 else:
95 else:
96 fh = open(filename, "wb")
96 # Increase default buffer size because default is usually
97 # small (4k is common on Linux).
98 fh = open(filename, "wb", 131072)
97 else:
99 else:
98 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
99 fh = os.fdopen(fd, "wb")
101 fh = os.fdopen(fd, "wb")
100 cleanup = filename
102 cleanup = filename
101 for c in chunks:
103 for c in chunks:
102 fh.write(c)
104 fh.write(c)
103 cleanup = None
105 cleanup = None
104 return filename
106 return filename
105 finally:
107 finally:
106 if fh is not None:
108 if fh is not None:
107 fh.close()
109 fh.close()
108 if cleanup is not None:
110 if cleanup is not None:
109 if filename and vfs:
111 if filename and vfs:
110 vfs.unlink(cleanup)
112 vfs.unlink(cleanup)
111 else:
113 else:
112 os.unlink(cleanup)
114 os.unlink(cleanup)
113
115
114 class cg1unpacker(object):
116 class cg1unpacker(object):
115 """Unpacker for cg1 changegroup streams.
117 """Unpacker for cg1 changegroup streams.
116
118
117 A changegroup unpacker handles the framing of the revision data in
119 A changegroup unpacker handles the framing of the revision data in
118 the wire format. Most consumers will want to use the apply()
120 the wire format. Most consumers will want to use the apply()
119 method to add the changes from the changegroup to a repository.
121 method to add the changes from the changegroup to a repository.
120
122
121 If you're forwarding a changegroup unmodified to another consumer,
123 If you're forwarding a changegroup unmodified to another consumer,
122 use getchunks(), which returns an iterator of changegroup
124 use getchunks(), which returns an iterator of changegroup
123 chunks. This is mostly useful for cases where you need to know the
125 chunks. This is mostly useful for cases where you need to know the
124 data stream has ended by observing the end of the changegroup.
126 data stream has ended by observing the end of the changegroup.
125
127
126 deltachunk() is useful only if you're applying delta data. Most
128 deltachunk() is useful only if you're applying delta data. Most
127 consumers should prefer apply() instead.
129 consumers should prefer apply() instead.
128
130
129 A few other public methods exist. Those are used only for
131 A few other public methods exist. Those are used only for
130 bundlerepo and some debug commands - their use is discouraged.
132 bundlerepo and some debug commands - their use is discouraged.
131 """
133 """
132 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
133 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
134 version = '01'
136 version = '01'
135 _grouplistcount = 1 # One list of files after the manifests
137 _grouplistcount = 1 # One list of files after the manifests
136
138
137 def __init__(self, fh, alg, extras=None):
139 def __init__(self, fh, alg, extras=None):
138 if alg == 'UN':
140 if alg == 'UN':
139 alg = None # get more modern without breaking too much
141 alg = None # get more modern without breaking too much
140 if not alg in util.decompressors:
142 if not alg in util.decompressors:
141 raise error.Abort(_('unknown stream compression type: %s')
143 raise error.Abort(_('unknown stream compression type: %s')
142 % alg)
144 % alg)
143 if alg == 'BZ':
145 if alg == 'BZ':
144 alg = '_truncatedBZ'
146 alg = '_truncatedBZ'
145 self._stream = util.decompressors[alg](fh)
147 self._stream = util.decompressors[alg](fh)
146 self._type = alg
148 self._type = alg
147 self.extras = extras or {}
149 self.extras = extras or {}
148 self.callback = None
150 self.callback = None
149
151
150 # These methods (compressed, read, seek, tell) all appear to only
152 # These methods (compressed, read, seek, tell) all appear to only
151 # be used by bundlerepo, but it's a little hard to tell.
153 # be used by bundlerepo, but it's a little hard to tell.
152 def compressed(self):
154 def compressed(self):
153 return self._type is not None
155 return self._type is not None
154 def read(self, l):
156 def read(self, l):
155 return self._stream.read(l)
157 return self._stream.read(l)
156 def seek(self, pos):
158 def seek(self, pos):
157 return self._stream.seek(pos)
159 return self._stream.seek(pos)
158 def tell(self):
160 def tell(self):
159 return self._stream.tell()
161 return self._stream.tell()
160 def close(self):
162 def close(self):
161 return self._stream.close()
163 return self._stream.close()
162
164
163 def _chunklength(self):
165 def _chunklength(self):
164 d = readexactly(self._stream, 4)
166 d = readexactly(self._stream, 4)
165 l = struct.unpack(">l", d)[0]
167 l = struct.unpack(">l", d)[0]
166 if l <= 4:
168 if l <= 4:
167 if l:
169 if l:
168 raise error.Abort(_("invalid chunk length %d") % l)
170 raise error.Abort(_("invalid chunk length %d") % l)
169 return 0
171 return 0
170 if self.callback:
172 if self.callback:
171 self.callback()
173 self.callback()
172 return l - 4
174 return l - 4
173
175
174 def changelogheader(self):
176 def changelogheader(self):
175 """v10 does not have a changelog header chunk"""
177 """v10 does not have a changelog header chunk"""
176 return {}
178 return {}
177
179
178 def manifestheader(self):
180 def manifestheader(self):
179 """v10 does not have a manifest header chunk"""
181 """v10 does not have a manifest header chunk"""
180 return {}
182 return {}
181
183
182 def filelogheader(self):
184 def filelogheader(self):
183 """return the header of the filelogs chunk, v10 only has the filename"""
185 """return the header of the filelogs chunk, v10 only has the filename"""
184 l = self._chunklength()
186 l = self._chunklength()
185 if not l:
187 if not l:
186 return {}
188 return {}
187 fname = readexactly(self._stream, l)
189 fname = readexactly(self._stream, l)
188 return {'filename': fname}
190 return {'filename': fname}
189
191
190 def _deltaheader(self, headertuple, prevnode):
192 def _deltaheader(self, headertuple, prevnode):
191 node, p1, p2, cs = headertuple
193 node, p1, p2, cs = headertuple
192 if prevnode is None:
194 if prevnode is None:
193 deltabase = p1
195 deltabase = p1
194 else:
196 else:
195 deltabase = prevnode
197 deltabase = prevnode
196 flags = 0
198 flags = 0
197 return node, p1, p2, deltabase, cs, flags
199 return node, p1, p2, deltabase, cs, flags
198
200
199 def deltachunk(self, prevnode):
201 def deltachunk(self, prevnode):
200 l = self._chunklength()
202 l = self._chunklength()
201 if not l:
203 if not l:
202 return {}
204 return {}
203 headerdata = readexactly(self._stream, self.deltaheadersize)
205 headerdata = readexactly(self._stream, self.deltaheadersize)
204 header = struct.unpack(self.deltaheader, headerdata)
206 header = struct.unpack(self.deltaheader, headerdata)
205 delta = readexactly(self._stream, l - self.deltaheadersize)
207 delta = readexactly(self._stream, l - self.deltaheadersize)
206 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
208 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
207 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
209 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
208 'deltabase': deltabase, 'delta': delta, 'flags': flags}
210 'deltabase': deltabase, 'delta': delta, 'flags': flags}
209
211
210 def getchunks(self):
212 def getchunks(self):
211 """returns all the chunks contains in the bundle
213 """returns all the chunks contains in the bundle
212
214
213 Used when you need to forward the binary stream to a file or another
215 Used when you need to forward the binary stream to a file or another
214 network API. To do so, it parse the changegroup data, otherwise it will
216 network API. To do so, it parse the changegroup data, otherwise it will
215 block in case of sshrepo because it don't know the end of the stream.
217 block in case of sshrepo because it don't know the end of the stream.
216 """
218 """
217 # an empty chunkgroup is the end of the changegroup
219 # an empty chunkgroup is the end of the changegroup
218 # a changegroup has at least 2 chunkgroups (changelog and manifest).
220 # a changegroup has at least 2 chunkgroups (changelog and manifest).
219 # after that, changegroup versions 1 and 2 have a series of groups
221 # after that, changegroup versions 1 and 2 have a series of groups
220 # with one group per file. changegroup 3 has a series of directory
222 # with one group per file. changegroup 3 has a series of directory
221 # manifests before the files.
223 # manifests before the files.
222 count = 0
224 count = 0
223 emptycount = 0
225 emptycount = 0
224 while emptycount < self._grouplistcount:
226 while emptycount < self._grouplistcount:
225 empty = True
227 empty = True
226 count += 1
228 count += 1
227 while True:
229 while True:
228 chunk = getchunk(self)
230 chunk = getchunk(self)
229 if not chunk:
231 if not chunk:
230 if empty and count > 2:
232 if empty and count > 2:
231 emptycount += 1
233 emptycount += 1
232 break
234 break
233 empty = False
235 empty = False
234 yield chunkheader(len(chunk))
236 yield chunkheader(len(chunk))
235 pos = 0
237 pos = 0
236 while pos < len(chunk):
238 while pos < len(chunk):
237 next = pos + 2**20
239 next = pos + 2**20
238 yield chunk[pos:next]
240 yield chunk[pos:next]
239 pos = next
241 pos = next
240 yield closechunk()
242 yield closechunk()
241
243
242 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
244 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
243 # We know that we'll never have more manifests than we had
245 # We know that we'll never have more manifests than we had
244 # changesets.
246 # changesets.
245 self.callback = prog(_('manifests'), numchanges)
247 self.callback = prog(_('manifests'), numchanges)
246 # no need to check for empty manifest group here:
248 # no need to check for empty manifest group here:
247 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # no new manifest will be created and the manifest group will
250 # no new manifest will be created and the manifest group will
249 # be empty during the pull
251 # be empty during the pull
250 self.manifestheader()
252 self.manifestheader()
251 repo.manifest.addgroup(self, revmap, trp)
253 repo.manifest.addgroup(self, revmap, trp)
252 repo.ui.progress(_('manifests'), None)
254 repo.ui.progress(_('manifests'), None)
253 self.callback = None
255 self.callback = None
254
256
255 def apply(self, repo, srctype, url, emptyok=False,
257 def apply(self, repo, srctype, url, emptyok=False,
256 targetphase=phases.draft, expectedtotal=None):
258 targetphase=phases.draft, expectedtotal=None):
257 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
258 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
259 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
260
262
261 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
262 - nothing changed or no source: 0
264 - nothing changed or no source: 0
263 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
264 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
265 - number of heads stays the same: 1
267 - number of heads stays the same: 1
266 """
268 """
267 repo = repo.unfiltered()
269 repo = repo.unfiltered()
268 def csmap(x):
270 def csmap(x):
269 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
270 return len(cl)
272 return len(cl)
271
273
272 def revmap(x):
274 def revmap(x):
273 return cl.rev(x)
275 return cl.rev(x)
274
276
275 changesets = files = revisions = 0
277 changesets = files = revisions = 0
276
278
277 try:
279 try:
278 with repo.transaction("\n".join([srctype,
280 with repo.transaction("\n".join([srctype,
279 util.hidepassword(url)])) as tr:
281 util.hidepassword(url)])) as tr:
280 # The transaction could have been created before and already
282 # The transaction could have been created before and already
281 # carries source information. In this case we use the top
283 # carries source information. In this case we use the top
282 # level data. We overwrite the argument because we need to use
284 # level data. We overwrite the argument because we need to use
283 # the top level value (if they exist) in this function.
285 # the top level value (if they exist) in this function.
284 srctype = tr.hookargs.setdefault('source', srctype)
286 srctype = tr.hookargs.setdefault('source', srctype)
285 url = tr.hookargs.setdefault('url', url)
287 url = tr.hookargs.setdefault('url', url)
286 repo.hook('prechangegroup', throw=True, **tr.hookargs)
288 repo.hook('prechangegroup', throw=True, **tr.hookargs)
287
289
288 # write changelog data to temp files so concurrent readers
290 # write changelog data to temp files so concurrent readers
289 # will not see an inconsistent view
291 # will not see an inconsistent view
290 cl = repo.changelog
292 cl = repo.changelog
291 cl.delayupdate(tr)
293 cl.delayupdate(tr)
292 oldheads = cl.heads()
294 oldheads = cl.heads()
293
295
294 trp = weakref.proxy(tr)
296 trp = weakref.proxy(tr)
295 # pull off the changeset group
297 # pull off the changeset group
296 repo.ui.status(_("adding changesets\n"))
298 repo.ui.status(_("adding changesets\n"))
297 clstart = len(cl)
299 clstart = len(cl)
298 class prog(object):
300 class prog(object):
299 def __init__(self, step, total):
301 def __init__(self, step, total):
300 self._step = step
302 self._step = step
301 self._total = total
303 self._total = total
302 self._count = 1
304 self._count = 1
303 def __call__(self):
305 def __call__(self):
304 repo.ui.progress(self._step, self._count,
306 repo.ui.progress(self._step, self._count,
305 unit=_('chunks'), total=self._total)
307 unit=_('chunks'), total=self._total)
306 self._count += 1
308 self._count += 1
307 self.callback = prog(_('changesets'), expectedtotal)
309 self.callback = prog(_('changesets'), expectedtotal)
308
310
309 efiles = set()
311 efiles = set()
310 def onchangelog(cl, node):
312 def onchangelog(cl, node):
311 efiles.update(cl.readfiles(node))
313 efiles.update(cl.readfiles(node))
312
314
313 self.changelogheader()
315 self.changelogheader()
314 srccontent = cl.addgroup(self, csmap, trp,
316 srccontent = cl.addgroup(self, csmap, trp,
315 addrevisioncb=onchangelog)
317 addrevisioncb=onchangelog)
316 efiles = len(efiles)
318 efiles = len(efiles)
317
319
318 if not (srccontent or emptyok):
320 if not (srccontent or emptyok):
319 raise error.Abort(_("received changelog group is empty"))
321 raise error.Abort(_("received changelog group is empty"))
320 clend = len(cl)
322 clend = len(cl)
321 changesets = clend - clstart
323 changesets = clend - clstart
322 repo.ui.progress(_('changesets'), None)
324 repo.ui.progress(_('changesets'), None)
323 self.callback = None
325 self.callback = None
324
326
325 # pull off the manifest group
327 # pull off the manifest group
326 repo.ui.status(_("adding manifests\n"))
328 repo.ui.status(_("adding manifests\n"))
327 self._unpackmanifests(repo, revmap, trp, prog, changesets)
329 self._unpackmanifests(repo, revmap, trp, prog, changesets)
328
330
329 needfiles = {}
331 needfiles = {}
330 if repo.ui.configbool('server', 'validate', default=False):
332 if repo.ui.configbool('server', 'validate', default=False):
331 # validate incoming csets have their manifests
333 # validate incoming csets have their manifests
332 for cset in xrange(clstart, clend):
334 for cset in xrange(clstart, clend):
333 mfnode = repo.changelog.read(
335 mfnode = repo.changelog.read(
334 repo.changelog.node(cset))[0]
336 repo.changelog.node(cset))[0]
335 mfest = repo.manifestlog[mfnode].readdelta()
337 mfest = repo.manifestlog[mfnode].readdelta()
336 # store file nodes we must see
338 # store file nodes we must see
337 for f, n in mfest.iteritems():
339 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
340 needfiles.setdefault(f, set()).add(n)
339
341
340 # process the files
342 # process the files
341 repo.ui.status(_("adding file changes\n"))
343 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
344 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
345 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
346 revisions += newrevs
345 files += newfiles
347 files += newfiles
346
348
347 dh = 0
349 dh = 0
348 if oldheads:
350 if oldheads:
349 heads = cl.heads()
351 heads = cl.heads()
350 dh = len(heads) - len(oldheads)
352 dh = len(heads) - len(oldheads)
351 for h in heads:
353 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
354 if h not in oldheads and repo[h].closesbranch():
353 dh -= 1
355 dh -= 1
354 htext = ""
356 htext = ""
355 if dh:
357 if dh:
356 htext = _(" (%+d heads)") % dh
358 htext = _(" (%+d heads)") % dh
357
359
358 repo.ui.status(_("added %d changesets"
360 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
361 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
362 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
363 repo.invalidatevolatilesets()
362
364
363 if changesets > 0:
365 if changesets > 0:
364 if 'node' not in tr.hookargs:
366 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
367 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
368 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
368 else:
370 else:
369 hookargs = dict(tr.hookargs)
371 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
372 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
373 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup', throw=True, **hookargs)
374 repo.hook('pretxnchangegroup', throw=True, **hookargs)
373
375
374 added = [cl.node(r) for r in xrange(clstart, clend)]
376 added = [cl.node(r) for r in xrange(clstart, clend)]
375 publishing = repo.publishing()
377 publishing = repo.publishing()
376 if srctype in ('push', 'serve'):
378 if srctype in ('push', 'serve'):
377 # Old servers can not push the boundary themselves.
379 # Old servers can not push the boundary themselves.
378 # New servers won't push the boundary if changeset already
380 # New servers won't push the boundary if changeset already
379 # exists locally as secret
381 # exists locally as secret
380 #
382 #
381 # We should not use added here but the list of all change in
383 # We should not use added here but the list of all change in
382 # the bundle
384 # the bundle
383 if publishing:
385 if publishing:
384 phases.advanceboundary(repo, tr, phases.public,
386 phases.advanceboundary(repo, tr, phases.public,
385 srccontent)
387 srccontent)
386 else:
388 else:
387 # Those changesets have been pushed from the
389 # Those changesets have been pushed from the
388 # outside, their phases are going to be pushed
390 # outside, their phases are going to be pushed
389 # alongside. Therefor `targetphase` is
391 # alongside. Therefor `targetphase` is
390 # ignored.
392 # ignored.
391 phases.advanceboundary(repo, tr, phases.draft,
393 phases.advanceboundary(repo, tr, phases.draft,
392 srccontent)
394 srccontent)
393 phases.retractboundary(repo, tr, phases.draft, added)
395 phases.retractboundary(repo, tr, phases.draft, added)
394 elif srctype != 'strip':
396 elif srctype != 'strip':
395 # publishing only alter behavior during push
397 # publishing only alter behavior during push
396 #
398 #
397 # strip should not touch boundary at all
399 # strip should not touch boundary at all
398 phases.retractboundary(repo, tr, targetphase, added)
400 phases.retractboundary(repo, tr, targetphase, added)
399
401
400 if changesets > 0:
402 if changesets > 0:
401 if srctype != 'strip':
403 if srctype != 'strip':
402 # During strip, branchcache is invalid but
404 # During strip, branchcache is invalid but
403 # coming call to `destroyed` will repair it.
405 # coming call to `destroyed` will repair it.
404 # In other case we can safely update cache on
406 # In other case we can safely update cache on
405 # disk.
407 # disk.
406 repo.ui.debug('updating the branch cache\n')
408 repo.ui.debug('updating the branch cache\n')
407 branchmap.updatecache(repo.filtered('served'))
409 branchmap.updatecache(repo.filtered('served'))
408
410
409 def runhooks():
411 def runhooks():
410 # These hooks run when the lock releases, not when the
412 # These hooks run when the lock releases, not when the
411 # transaction closes. So it's possible for the changelog
413 # transaction closes. So it's possible for the changelog
412 # to have changed since we last saw it.
414 # to have changed since we last saw it.
413 if clstart >= len(repo):
415 if clstart >= len(repo):
414 return
416 return
415
417
416 repo.hook("changegroup", **hookargs)
418 repo.hook("changegroup", **hookargs)
417
419
418 for n in added:
420 for n in added:
419 args = hookargs.copy()
421 args = hookargs.copy()
420 args['node'] = hex(n)
422 args['node'] = hex(n)
421 del args['node_last']
423 del args['node_last']
422 repo.hook("incoming", **args)
424 repo.hook("incoming", **args)
423
425
424 newheads = [h for h in repo.heads()
426 newheads = [h for h in repo.heads()
425 if h not in oldheads]
427 if h not in oldheads]
426 repo.ui.log("incoming",
428 repo.ui.log("incoming",
427 "%s incoming changes - new heads: %s\n",
429 "%s incoming changes - new heads: %s\n",
428 len(added),
430 len(added),
429 ', '.join([hex(c[:6]) for c in newheads]))
431 ', '.join([hex(c[:6]) for c in newheads]))
430
432
431 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
433 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
432 lambda tr: repo._afterlock(runhooks))
434 lambda tr: repo._afterlock(runhooks))
433 finally:
435 finally:
434 repo.ui.flush()
436 repo.ui.flush()
435 # never return 0 here:
437 # never return 0 here:
436 if dh < 0:
438 if dh < 0:
437 return dh - 1
439 return dh - 1
438 else:
440 else:
439 return dh + 1
441 return dh + 1
440
442
441 class cg2unpacker(cg1unpacker):
443 class cg2unpacker(cg1unpacker):
442 """Unpacker for cg2 streams.
444 """Unpacker for cg2 streams.
443
445
444 cg2 streams add support for generaldelta, so the delta header
446 cg2 streams add support for generaldelta, so the delta header
445 format is slightly different. All other features about the data
447 format is slightly different. All other features about the data
446 remain the same.
448 remain the same.
447 """
449 """
448 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
449 deltaheadersize = struct.calcsize(deltaheader)
451 deltaheadersize = struct.calcsize(deltaheader)
450 version = '02'
452 version = '02'
451
453
452 def _deltaheader(self, headertuple, prevnode):
454 def _deltaheader(self, headertuple, prevnode):
453 node, p1, p2, deltabase, cs = headertuple
455 node, p1, p2, deltabase, cs = headertuple
454 flags = 0
456 flags = 0
455 return node, p1, p2, deltabase, cs, flags
457 return node, p1, p2, deltabase, cs, flags
456
458
457 class cg3unpacker(cg2unpacker):
459 class cg3unpacker(cg2unpacker):
458 """Unpacker for cg3 streams.
460 """Unpacker for cg3 streams.
459
461
460 cg3 streams add support for exchanging treemanifests and revlog
462 cg3 streams add support for exchanging treemanifests and revlog
461 flags. It adds the revlog flags to the delta header and an empty chunk
463 flags. It adds the revlog flags to the delta header and an empty chunk
462 separating manifests and files.
464 separating manifests and files.
463 """
465 """
464 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
466 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
465 deltaheadersize = struct.calcsize(deltaheader)
467 deltaheadersize = struct.calcsize(deltaheader)
466 version = '03'
468 version = '03'
467 _grouplistcount = 2 # One list of manifests and one list of files
469 _grouplistcount = 2 # One list of manifests and one list of files
468
470
469 def _deltaheader(self, headertuple, prevnode):
471 def _deltaheader(self, headertuple, prevnode):
470 node, p1, p2, deltabase, cs, flags = headertuple
472 node, p1, p2, deltabase, cs, flags = headertuple
471 return node, p1, p2, deltabase, cs, flags
473 return node, p1, p2, deltabase, cs, flags
472
474
473 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
475 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
474 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
476 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
475 numchanges)
477 numchanges)
476 for chunkdata in iter(self.filelogheader, {}):
478 for chunkdata in iter(self.filelogheader, {}):
477 # If we get here, there are directory manifests in the changegroup
479 # If we get here, there are directory manifests in the changegroup
478 d = chunkdata["filename"]
480 d = chunkdata["filename"]
479 repo.ui.debug("adding %s revisions\n" % d)
481 repo.ui.debug("adding %s revisions\n" % d)
480 dirlog = repo.manifest.dirlog(d)
482 dirlog = repo.manifest.dirlog(d)
481 if not dirlog.addgroup(self, revmap, trp):
483 if not dirlog.addgroup(self, revmap, trp):
482 raise error.Abort(_("received dir revlog group is empty"))
484 raise error.Abort(_("received dir revlog group is empty"))
483
485
484 class headerlessfixup(object):
486 class headerlessfixup(object):
485 def __init__(self, fh, h):
487 def __init__(self, fh, h):
486 self._h = h
488 self._h = h
487 self._fh = fh
489 self._fh = fh
488 def read(self, n):
490 def read(self, n):
489 if self._h:
491 if self._h:
490 d, self._h = self._h[:n], self._h[n:]
492 d, self._h = self._h[:n], self._h[n:]
491 if len(d) < n:
493 if len(d) < n:
492 d += readexactly(self._fh, n - len(d))
494 d += readexactly(self._fh, n - len(d))
493 return d
495 return d
494 return readexactly(self._fh, n)
496 return readexactly(self._fh, n)
495
497
496 class cg1packer(object):
498 class cg1packer(object):
497 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
499 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
498 version = '01'
500 version = '01'
499 def __init__(self, repo, bundlecaps=None):
501 def __init__(self, repo, bundlecaps=None):
500 """Given a source repo, construct a bundler.
502 """Given a source repo, construct a bundler.
501
503
502 bundlecaps is optional and can be used to specify the set of
504 bundlecaps is optional and can be used to specify the set of
503 capabilities which can be used to build the bundle.
505 capabilities which can be used to build the bundle.
504 """
506 """
505 # Set of capabilities we can use to build the bundle.
507 # Set of capabilities we can use to build the bundle.
506 if bundlecaps is None:
508 if bundlecaps is None:
507 bundlecaps = set()
509 bundlecaps = set()
508 self._bundlecaps = bundlecaps
510 self._bundlecaps = bundlecaps
509 # experimental config: bundle.reorder
511 # experimental config: bundle.reorder
510 reorder = repo.ui.config('bundle', 'reorder', 'auto')
512 reorder = repo.ui.config('bundle', 'reorder', 'auto')
511 if reorder == 'auto':
513 if reorder == 'auto':
512 reorder = None
514 reorder = None
513 else:
515 else:
514 reorder = util.parsebool(reorder)
516 reorder = util.parsebool(reorder)
515 self._repo = repo
517 self._repo = repo
516 self._reorder = reorder
518 self._reorder = reorder
517 self._progress = repo.ui.progress
519 self._progress = repo.ui.progress
518 if self._repo.ui.verbose and not self._repo.ui.debugflag:
520 if self._repo.ui.verbose and not self._repo.ui.debugflag:
519 self._verbosenote = self._repo.ui.note
521 self._verbosenote = self._repo.ui.note
520 else:
522 else:
521 self._verbosenote = lambda s: None
523 self._verbosenote = lambda s: None
522
524
523 def close(self):
525 def close(self):
524 return closechunk()
526 return closechunk()
525
527
526 def fileheader(self, fname):
528 def fileheader(self, fname):
527 return chunkheader(len(fname)) + fname
529 return chunkheader(len(fname)) + fname
528
530
529 # Extracted both for clarity and for overriding in extensions.
531 # Extracted both for clarity and for overriding in extensions.
530 def _sortgroup(self, revlog, nodelist, lookup):
532 def _sortgroup(self, revlog, nodelist, lookup):
531 """Sort nodes for change group and turn them into revnums."""
533 """Sort nodes for change group and turn them into revnums."""
532 # for generaldelta revlogs, we linearize the revs; this will both be
534 # for generaldelta revlogs, we linearize the revs; this will both be
533 # much quicker and generate a much smaller bundle
535 # much quicker and generate a much smaller bundle
534 if (revlog._generaldelta and self._reorder is None) or self._reorder:
536 if (revlog._generaldelta and self._reorder is None) or self._reorder:
535 dag = dagutil.revlogdag(revlog)
537 dag = dagutil.revlogdag(revlog)
536 return dag.linearize(set(revlog.rev(n) for n in nodelist))
538 return dag.linearize(set(revlog.rev(n) for n in nodelist))
537 else:
539 else:
538 return sorted([revlog.rev(n) for n in nodelist])
540 return sorted([revlog.rev(n) for n in nodelist])
539
541
540 def group(self, nodelist, revlog, lookup, units=None):
542 def group(self, nodelist, revlog, lookup, units=None):
541 """Calculate a delta group, yielding a sequence of changegroup chunks
543 """Calculate a delta group, yielding a sequence of changegroup chunks
542 (strings).
544 (strings).
543
545
544 Given a list of changeset revs, return a set of deltas and
546 Given a list of changeset revs, return a set of deltas and
545 metadata corresponding to nodes. The first delta is
547 metadata corresponding to nodes. The first delta is
546 first parent(nodelist[0]) -> nodelist[0], the receiver is
548 first parent(nodelist[0]) -> nodelist[0], the receiver is
547 guaranteed to have this parent as it has all history before
549 guaranteed to have this parent as it has all history before
548 these changesets. In the case firstparent is nullrev the
550 these changesets. In the case firstparent is nullrev the
549 changegroup starts with a full revision.
551 changegroup starts with a full revision.
550
552
551 If units is not None, progress detail will be generated, units specifies
553 If units is not None, progress detail will be generated, units specifies
552 the type of revlog that is touched (changelog, manifest, etc.).
554 the type of revlog that is touched (changelog, manifest, etc.).
553 """
555 """
554 # if we don't have any revisions touched by these changesets, bail
556 # if we don't have any revisions touched by these changesets, bail
555 if len(nodelist) == 0:
557 if len(nodelist) == 0:
556 yield self.close()
558 yield self.close()
557 return
559 return
558
560
559 revs = self._sortgroup(revlog, nodelist, lookup)
561 revs = self._sortgroup(revlog, nodelist, lookup)
560
562
561 # add the parent of the first rev
563 # add the parent of the first rev
562 p = revlog.parentrevs(revs[0])[0]
564 p = revlog.parentrevs(revs[0])[0]
563 revs.insert(0, p)
565 revs.insert(0, p)
564
566
565 # build deltas
567 # build deltas
566 total = len(revs) - 1
568 total = len(revs) - 1
567 msgbundling = _('bundling')
569 msgbundling = _('bundling')
568 for r in xrange(len(revs) - 1):
570 for r in xrange(len(revs) - 1):
569 if units is not None:
571 if units is not None:
570 self._progress(msgbundling, r + 1, unit=units, total=total)
572 self._progress(msgbundling, r + 1, unit=units, total=total)
571 prev, curr = revs[r], revs[r + 1]
573 prev, curr = revs[r], revs[r + 1]
572 linknode = lookup(revlog.node(curr))
574 linknode = lookup(revlog.node(curr))
573 for c in self.revchunk(revlog, curr, prev, linknode):
575 for c in self.revchunk(revlog, curr, prev, linknode):
574 yield c
576 yield c
575
577
576 if units is not None:
578 if units is not None:
577 self._progress(msgbundling, None)
579 self._progress(msgbundling, None)
578 yield self.close()
580 yield self.close()
579
581
580 # filter any nodes that claim to be part of the known set
582 # filter any nodes that claim to be part of the known set
581 def prune(self, revlog, missing, commonrevs):
583 def prune(self, revlog, missing, commonrevs):
582 rr, rl = revlog.rev, revlog.linkrev
584 rr, rl = revlog.rev, revlog.linkrev
583 return [n for n in missing if rl(rr(n)) not in commonrevs]
585 return [n for n in missing if rl(rr(n)) not in commonrevs]
584
586
585 def _packmanifests(self, dir, mfnodes, lookuplinknode):
587 def _packmanifests(self, dir, mfnodes, lookuplinknode):
586 """Pack flat manifests into a changegroup stream."""
588 """Pack flat manifests into a changegroup stream."""
587 assert not dir
589 assert not dir
588 for chunk in self.group(mfnodes, self._repo.manifest,
590 for chunk in self.group(mfnodes, self._repo.manifest,
589 lookuplinknode, units=_('manifests')):
591 lookuplinknode, units=_('manifests')):
590 yield chunk
592 yield chunk
591
593
592 def _manifestsdone(self):
594 def _manifestsdone(self):
593 return ''
595 return ''
594
596
595 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
597 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
596 '''yield a sequence of changegroup chunks (strings)'''
598 '''yield a sequence of changegroup chunks (strings)'''
597 repo = self._repo
599 repo = self._repo
598 cl = repo.changelog
600 cl = repo.changelog
599
601
600 clrevorder = {}
602 clrevorder = {}
601 mfs = {} # needed manifests
603 mfs = {} # needed manifests
602 fnodes = {} # needed file nodes
604 fnodes = {} # needed file nodes
603 changedfiles = set()
605 changedfiles = set()
604
606
605 # Callback for the changelog, used to collect changed files and manifest
607 # Callback for the changelog, used to collect changed files and manifest
606 # nodes.
608 # nodes.
607 # Returns the linkrev node (identity in the changelog case).
609 # Returns the linkrev node (identity in the changelog case).
608 def lookupcl(x):
610 def lookupcl(x):
609 c = cl.read(x)
611 c = cl.read(x)
610 clrevorder[x] = len(clrevorder)
612 clrevorder[x] = len(clrevorder)
611 n = c[0]
613 n = c[0]
612 # record the first changeset introducing this manifest version
614 # record the first changeset introducing this manifest version
613 mfs.setdefault(n, x)
615 mfs.setdefault(n, x)
614 # Record a complete list of potentially-changed files in
616 # Record a complete list of potentially-changed files in
615 # this manifest.
617 # this manifest.
616 changedfiles.update(c[3])
618 changedfiles.update(c[3])
617 return x
619 return x
618
620
619 self._verbosenote(_('uncompressed size of bundle content:\n'))
621 self._verbosenote(_('uncompressed size of bundle content:\n'))
620 size = 0
622 size = 0
621 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
623 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
622 size += len(chunk)
624 size += len(chunk)
623 yield chunk
625 yield chunk
624 self._verbosenote(_('%8.i (changelog)\n') % size)
626 self._verbosenote(_('%8.i (changelog)\n') % size)
625
627
626 # We need to make sure that the linkrev in the changegroup refers to
628 # We need to make sure that the linkrev in the changegroup refers to
627 # the first changeset that introduced the manifest or file revision.
629 # the first changeset that introduced the manifest or file revision.
628 # The fastpath is usually safer than the slowpath, because the filelogs
630 # The fastpath is usually safer than the slowpath, because the filelogs
629 # are walked in revlog order.
631 # are walked in revlog order.
630 #
632 #
631 # When taking the slowpath with reorder=None and the manifest revlog
633 # When taking the slowpath with reorder=None and the manifest revlog
632 # uses generaldelta, the manifest may be walked in the "wrong" order.
634 # uses generaldelta, the manifest may be walked in the "wrong" order.
633 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
635 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
634 # cc0ff93d0c0c).
636 # cc0ff93d0c0c).
635 #
637 #
636 # When taking the fastpath, we are only vulnerable to reordering
638 # When taking the fastpath, we are only vulnerable to reordering
637 # of the changelog itself. The changelog never uses generaldelta, so
639 # of the changelog itself. The changelog never uses generaldelta, so
638 # it is only reordered when reorder=True. To handle this case, we
640 # it is only reordered when reorder=True. To handle this case, we
639 # simply take the slowpath, which already has the 'clrevorder' logic.
641 # simply take the slowpath, which already has the 'clrevorder' logic.
640 # This was also fixed in cc0ff93d0c0c.
642 # This was also fixed in cc0ff93d0c0c.
641 fastpathlinkrev = fastpathlinkrev and not self._reorder
643 fastpathlinkrev = fastpathlinkrev and not self._reorder
642 # Treemanifests don't work correctly with fastpathlinkrev
644 # Treemanifests don't work correctly with fastpathlinkrev
643 # either, because we don't discover which directory nodes to
645 # either, because we don't discover which directory nodes to
644 # send along with files. This could probably be fixed.
646 # send along with files. This could probably be fixed.
645 fastpathlinkrev = fastpathlinkrev and (
647 fastpathlinkrev = fastpathlinkrev and (
646 'treemanifest' not in repo.requirements)
648 'treemanifest' not in repo.requirements)
647
649
648 for chunk in self.generatemanifests(commonrevs, clrevorder,
650 for chunk in self.generatemanifests(commonrevs, clrevorder,
649 fastpathlinkrev, mfs, fnodes):
651 fastpathlinkrev, mfs, fnodes):
650 yield chunk
652 yield chunk
651 mfs.clear()
653 mfs.clear()
652 clrevs = set(cl.rev(x) for x in clnodes)
654 clrevs = set(cl.rev(x) for x in clnodes)
653
655
654 if not fastpathlinkrev:
656 if not fastpathlinkrev:
655 def linknodes(unused, fname):
657 def linknodes(unused, fname):
656 return fnodes.get(fname, {})
658 return fnodes.get(fname, {})
657 else:
659 else:
658 cln = cl.node
660 cln = cl.node
659 def linknodes(filerevlog, fname):
661 def linknodes(filerevlog, fname):
660 llr = filerevlog.linkrev
662 llr = filerevlog.linkrev
661 fln = filerevlog.node
663 fln = filerevlog.node
662 revs = ((r, llr(r)) for r in filerevlog)
664 revs = ((r, llr(r)) for r in filerevlog)
663 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
665 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
664
666
665 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
667 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
666 source):
668 source):
667 yield chunk
669 yield chunk
668
670
669 yield self.close()
671 yield self.close()
670
672
671 if clnodes:
673 if clnodes:
672 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
674 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
673
675
674 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
676 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
675 fnodes):
677 fnodes):
676 repo = self._repo
678 repo = self._repo
677 dirlog = repo.manifest.dirlog
679 dirlog = repo.manifest.dirlog
678 tmfnodes = {'': mfs}
680 tmfnodes = {'': mfs}
679
681
680 # Callback for the manifest, used to collect linkrevs for filelog
682 # Callback for the manifest, used to collect linkrevs for filelog
681 # revisions.
683 # revisions.
682 # Returns the linkrev node (collected in lookupcl).
684 # Returns the linkrev node (collected in lookupcl).
683 def makelookupmflinknode(dir):
685 def makelookupmflinknode(dir):
684 if fastpathlinkrev:
686 if fastpathlinkrev:
685 assert not dir
687 assert not dir
686 return mfs.__getitem__
688 return mfs.__getitem__
687
689
688 def lookupmflinknode(x):
690 def lookupmflinknode(x):
689 """Callback for looking up the linknode for manifests.
691 """Callback for looking up the linknode for manifests.
690
692
691 Returns the linkrev node for the specified manifest.
693 Returns the linkrev node for the specified manifest.
692
694
693 SIDE EFFECT:
695 SIDE EFFECT:
694
696
695 1) fclnodes gets populated with the list of relevant
697 1) fclnodes gets populated with the list of relevant
696 file nodes if we're not using fastpathlinkrev
698 file nodes if we're not using fastpathlinkrev
697 2) When treemanifests are in use, collects treemanifest nodes
699 2) When treemanifests are in use, collects treemanifest nodes
698 to send
700 to send
699
701
700 Note that this means manifests must be completely sent to
702 Note that this means manifests must be completely sent to
701 the client before you can trust the list of files and
703 the client before you can trust the list of files and
702 treemanifests to send.
704 treemanifests to send.
703 """
705 """
704 clnode = tmfnodes[dir][x]
706 clnode = tmfnodes[dir][x]
705 mdata = dirlog(dir).readshallowfast(x)
707 mdata = dirlog(dir).readshallowfast(x)
706 for p, n, fl in mdata.iterentries():
708 for p, n, fl in mdata.iterentries():
707 if fl == 't': # subdirectory manifest
709 if fl == 't': # subdirectory manifest
708 subdir = dir + p + '/'
710 subdir = dir + p + '/'
709 tmfclnodes = tmfnodes.setdefault(subdir, {})
711 tmfclnodes = tmfnodes.setdefault(subdir, {})
710 tmfclnode = tmfclnodes.setdefault(n, clnode)
712 tmfclnode = tmfclnodes.setdefault(n, clnode)
711 if clrevorder[clnode] < clrevorder[tmfclnode]:
713 if clrevorder[clnode] < clrevorder[tmfclnode]:
712 tmfclnodes[n] = clnode
714 tmfclnodes[n] = clnode
713 else:
715 else:
714 f = dir + p
716 f = dir + p
715 fclnodes = fnodes.setdefault(f, {})
717 fclnodes = fnodes.setdefault(f, {})
716 fclnode = fclnodes.setdefault(n, clnode)
718 fclnode = fclnodes.setdefault(n, clnode)
717 if clrevorder[clnode] < clrevorder[fclnode]:
719 if clrevorder[clnode] < clrevorder[fclnode]:
718 fclnodes[n] = clnode
720 fclnodes[n] = clnode
719 return clnode
721 return clnode
720 return lookupmflinknode
722 return lookupmflinknode
721
723
722 size = 0
724 size = 0
723 while tmfnodes:
725 while tmfnodes:
724 dir = min(tmfnodes)
726 dir = min(tmfnodes)
725 nodes = tmfnodes[dir]
727 nodes = tmfnodes[dir]
726 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
728 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
727 if not dir or prunednodes:
729 if not dir or prunednodes:
728 for x in self._packmanifests(dir, prunednodes,
730 for x in self._packmanifests(dir, prunednodes,
729 makelookupmflinknode(dir)):
731 makelookupmflinknode(dir)):
730 size += len(x)
732 size += len(x)
731 yield x
733 yield x
732 del tmfnodes[dir]
734 del tmfnodes[dir]
733 self._verbosenote(_('%8.i (manifests)\n') % size)
735 self._verbosenote(_('%8.i (manifests)\n') % size)
734 yield self._manifestsdone()
736 yield self._manifestsdone()
735
737
736 # The 'source' parameter is useful for extensions
738 # The 'source' parameter is useful for extensions
737 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
739 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
738 repo = self._repo
740 repo = self._repo
739 progress = self._progress
741 progress = self._progress
740 msgbundling = _('bundling')
742 msgbundling = _('bundling')
741
743
742 total = len(changedfiles)
744 total = len(changedfiles)
743 # for progress output
745 # for progress output
744 msgfiles = _('files')
746 msgfiles = _('files')
745 for i, fname in enumerate(sorted(changedfiles)):
747 for i, fname in enumerate(sorted(changedfiles)):
746 filerevlog = repo.file(fname)
748 filerevlog = repo.file(fname)
747 if not filerevlog:
749 if not filerevlog:
748 raise error.Abort(_("empty or missing revlog for %s") % fname)
750 raise error.Abort(_("empty or missing revlog for %s") % fname)
749
751
750 linkrevnodes = linknodes(filerevlog, fname)
752 linkrevnodes = linknodes(filerevlog, fname)
751 # Lookup for filenodes, we collected the linkrev nodes above in the
753 # Lookup for filenodes, we collected the linkrev nodes above in the
752 # fastpath case and with lookupmf in the slowpath case.
754 # fastpath case and with lookupmf in the slowpath case.
753 def lookupfilelog(x):
755 def lookupfilelog(x):
754 return linkrevnodes[x]
756 return linkrevnodes[x]
755
757
756 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
758 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
757 if filenodes:
759 if filenodes:
758 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
760 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
759 total=total)
761 total=total)
760 h = self.fileheader(fname)
762 h = self.fileheader(fname)
761 size = len(h)
763 size = len(h)
762 yield h
764 yield h
763 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
765 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
764 size += len(chunk)
766 size += len(chunk)
765 yield chunk
767 yield chunk
766 self._verbosenote(_('%8.i %s\n') % (size, fname))
768 self._verbosenote(_('%8.i %s\n') % (size, fname))
767 progress(msgbundling, None)
769 progress(msgbundling, None)
768
770
769 def deltaparent(self, revlog, rev, p1, p2, prev):
771 def deltaparent(self, revlog, rev, p1, p2, prev):
770 return prev
772 return prev
771
773
772 def revchunk(self, revlog, rev, prev, linknode):
774 def revchunk(self, revlog, rev, prev, linknode):
773 node = revlog.node(rev)
775 node = revlog.node(rev)
774 p1, p2 = revlog.parentrevs(rev)
776 p1, p2 = revlog.parentrevs(rev)
775 base = self.deltaparent(revlog, rev, p1, p2, prev)
777 base = self.deltaparent(revlog, rev, p1, p2, prev)
776
778
777 prefix = ''
779 prefix = ''
778 if revlog.iscensored(base) or revlog.iscensored(rev):
780 if revlog.iscensored(base) or revlog.iscensored(rev):
779 try:
781 try:
780 delta = revlog.revision(node)
782 delta = revlog.revision(node)
781 except error.CensoredNodeError as e:
783 except error.CensoredNodeError as e:
782 delta = e.tombstone
784 delta = e.tombstone
783 if base == nullrev:
785 if base == nullrev:
784 prefix = mdiff.trivialdiffheader(len(delta))
786 prefix = mdiff.trivialdiffheader(len(delta))
785 else:
787 else:
786 baselen = revlog.rawsize(base)
788 baselen = revlog.rawsize(base)
787 prefix = mdiff.replacediffheader(baselen, len(delta))
789 prefix = mdiff.replacediffheader(baselen, len(delta))
788 elif base == nullrev:
790 elif base == nullrev:
789 delta = revlog.revision(node)
791 delta = revlog.revision(node)
790 prefix = mdiff.trivialdiffheader(len(delta))
792 prefix = mdiff.trivialdiffheader(len(delta))
791 else:
793 else:
792 delta = revlog.revdiff(base, rev)
794 delta = revlog.revdiff(base, rev)
793 p1n, p2n = revlog.parents(node)
795 p1n, p2n = revlog.parents(node)
794 basenode = revlog.node(base)
796 basenode = revlog.node(base)
795 flags = revlog.flags(rev)
797 flags = revlog.flags(rev)
796 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
798 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
797 meta += prefix
799 meta += prefix
798 l = len(meta) + len(delta)
800 l = len(meta) + len(delta)
799 yield chunkheader(l)
801 yield chunkheader(l)
800 yield meta
802 yield meta
801 yield delta
803 yield delta
802 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
804 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
803 # do nothing with basenode, it is implicitly the previous one in HG10
805 # do nothing with basenode, it is implicitly the previous one in HG10
804 # do nothing with flags, it is implicitly 0 for cg1 and cg2
806 # do nothing with flags, it is implicitly 0 for cg1 and cg2
805 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
807 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
806
808
807 class cg2packer(cg1packer):
809 class cg2packer(cg1packer):
808 version = '02'
810 version = '02'
809 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
811 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
810
812
811 def __init__(self, repo, bundlecaps=None):
813 def __init__(self, repo, bundlecaps=None):
812 super(cg2packer, self).__init__(repo, bundlecaps)
814 super(cg2packer, self).__init__(repo, bundlecaps)
813 if self._reorder is None:
815 if self._reorder is None:
814 # Since generaldelta is directly supported by cg2, reordering
816 # Since generaldelta is directly supported by cg2, reordering
815 # generally doesn't help, so we disable it by default (treating
817 # generally doesn't help, so we disable it by default (treating
816 # bundle.reorder=auto just like bundle.reorder=False).
818 # bundle.reorder=auto just like bundle.reorder=False).
817 self._reorder = False
819 self._reorder = False
818
820
819 def deltaparent(self, revlog, rev, p1, p2, prev):
821 def deltaparent(self, revlog, rev, p1, p2, prev):
820 dp = revlog.deltaparent(rev)
822 dp = revlog.deltaparent(rev)
821 if dp == nullrev and revlog.storedeltachains:
823 if dp == nullrev and revlog.storedeltachains:
822 # Avoid sending full revisions when delta parent is null. Pick prev
824 # Avoid sending full revisions when delta parent is null. Pick prev
823 # in that case. It's tempting to pick p1 in this case, as p1 will
825 # in that case. It's tempting to pick p1 in this case, as p1 will
824 # be smaller in the common case. However, computing a delta against
826 # be smaller in the common case. However, computing a delta against
825 # p1 may require resolving the raw text of p1, which could be
827 # p1 may require resolving the raw text of p1, which could be
826 # expensive. The revlog caches should have prev cached, meaning
828 # expensive. The revlog caches should have prev cached, meaning
827 # less CPU for changegroup generation. There is likely room to add
829 # less CPU for changegroup generation. There is likely room to add
828 # a flag and/or config option to control this behavior.
830 # a flag and/or config option to control this behavior.
829 return prev
831 return prev
830 elif dp == nullrev:
832 elif dp == nullrev:
831 # revlog is configured to use full snapshot for a reason,
833 # revlog is configured to use full snapshot for a reason,
832 # stick to full snapshot.
834 # stick to full snapshot.
833 return nullrev
835 return nullrev
834 elif dp not in (p1, p2, prev):
836 elif dp not in (p1, p2, prev):
835 # Pick prev when we can't be sure remote has the base revision.
837 # Pick prev when we can't be sure remote has the base revision.
836 return prev
838 return prev
837 else:
839 else:
838 return dp
840 return dp
839
841
840 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
842 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
841 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
843 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
842 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
844 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
843
845
844 class cg3packer(cg2packer):
846 class cg3packer(cg2packer):
845 version = '03'
847 version = '03'
846 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
848 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
847
849
848 def _packmanifests(self, dir, mfnodes, lookuplinknode):
850 def _packmanifests(self, dir, mfnodes, lookuplinknode):
849 if dir:
851 if dir:
850 yield self.fileheader(dir)
852 yield self.fileheader(dir)
851 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
853 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
852 lookuplinknode, units=_('manifests')):
854 lookuplinknode, units=_('manifests')):
853 yield chunk
855 yield chunk
854
856
855 def _manifestsdone(self):
857 def _manifestsdone(self):
856 return self.close()
858 return self.close()
857
859
858 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
860 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
859 return struct.pack(
861 return struct.pack(
860 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
862 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
861
863
862 _packermap = {'01': (cg1packer, cg1unpacker),
864 _packermap = {'01': (cg1packer, cg1unpacker),
863 # cg2 adds support for exchanging generaldelta
865 # cg2 adds support for exchanging generaldelta
864 '02': (cg2packer, cg2unpacker),
866 '02': (cg2packer, cg2unpacker),
865 # cg3 adds support for exchanging revlog flags and treemanifests
867 # cg3 adds support for exchanging revlog flags and treemanifests
866 '03': (cg3packer, cg3unpacker),
868 '03': (cg3packer, cg3unpacker),
867 }
869 }
868
870
869 def allsupportedversions(ui):
871 def allsupportedversions(ui):
870 versions = set(_packermap.keys())
872 versions = set(_packermap.keys())
871 versions.discard('03')
873 versions.discard('03')
872 if (ui.configbool('experimental', 'changegroup3') or
874 if (ui.configbool('experimental', 'changegroup3') or
873 ui.configbool('experimental', 'treemanifest')):
875 ui.configbool('experimental', 'treemanifest')):
874 versions.add('03')
876 versions.add('03')
875 return versions
877 return versions
876
878
877 # Changegroup versions that can be applied to the repo
879 # Changegroup versions that can be applied to the repo
878 def supportedincomingversions(repo):
880 def supportedincomingversions(repo):
879 versions = allsupportedversions(repo.ui)
881 versions = allsupportedversions(repo.ui)
880 if 'treemanifest' in repo.requirements:
882 if 'treemanifest' in repo.requirements:
881 versions.add('03')
883 versions.add('03')
882 return versions
884 return versions
883
885
884 # Changegroup versions that can be created from the repo
886 # Changegroup versions that can be created from the repo
885 def supportedoutgoingversions(repo):
887 def supportedoutgoingversions(repo):
886 versions = allsupportedversions(repo.ui)
888 versions = allsupportedversions(repo.ui)
887 if 'treemanifest' in repo.requirements:
889 if 'treemanifest' in repo.requirements:
888 # Versions 01 and 02 support only flat manifests and it's just too
890 # Versions 01 and 02 support only flat manifests and it's just too
889 # expensive to convert between the flat manifest and tree manifest on
891 # expensive to convert between the flat manifest and tree manifest on
890 # the fly. Since tree manifests are hashed differently, all of history
892 # the fly. Since tree manifests are hashed differently, all of history
891 # would have to be converted. Instead, we simply don't even pretend to
893 # would have to be converted. Instead, we simply don't even pretend to
892 # support versions 01 and 02.
894 # support versions 01 and 02.
893 versions.discard('01')
895 versions.discard('01')
894 versions.discard('02')
896 versions.discard('02')
895 versions.add('03')
897 versions.add('03')
896 return versions
898 return versions
897
899
898 def safeversion(repo):
900 def safeversion(repo):
899 # Finds the smallest version that it's safe to assume clients of the repo
901 # Finds the smallest version that it's safe to assume clients of the repo
900 # will support. For example, all hg versions that support generaldelta also
902 # will support. For example, all hg versions that support generaldelta also
901 # support changegroup 02.
903 # support changegroup 02.
902 versions = supportedoutgoingversions(repo)
904 versions = supportedoutgoingversions(repo)
903 if 'generaldelta' in repo.requirements:
905 if 'generaldelta' in repo.requirements:
904 versions.discard('01')
906 versions.discard('01')
905 assert versions
907 assert versions
906 return min(versions)
908 return min(versions)
907
909
908 def getbundler(version, repo, bundlecaps=None):
910 def getbundler(version, repo, bundlecaps=None):
909 assert version in supportedoutgoingversions(repo)
911 assert version in supportedoutgoingversions(repo)
910 return _packermap[version][0](repo, bundlecaps)
912 return _packermap[version][0](repo, bundlecaps)
911
913
912 def getunbundler(version, fh, alg, extras=None):
914 def getunbundler(version, fh, alg, extras=None):
913 return _packermap[version][1](fh, alg, extras=extras)
915 return _packermap[version][1](fh, alg, extras=extras)
914
916
915 def _changegroupinfo(repo, nodes, source):
917 def _changegroupinfo(repo, nodes, source):
916 if repo.ui.verbose or source == 'bundle':
918 if repo.ui.verbose or source == 'bundle':
917 repo.ui.status(_("%d changesets found\n") % len(nodes))
919 repo.ui.status(_("%d changesets found\n") % len(nodes))
918 if repo.ui.debugflag:
920 if repo.ui.debugflag:
919 repo.ui.debug("list of changesets:\n")
921 repo.ui.debug("list of changesets:\n")
920 for node in nodes:
922 for node in nodes:
921 repo.ui.debug("%s\n" % hex(node))
923 repo.ui.debug("%s\n" % hex(node))
922
924
923 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
925 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
924 repo = repo.unfiltered()
926 repo = repo.unfiltered()
925 commonrevs = outgoing.common
927 commonrevs = outgoing.common
926 csets = outgoing.missing
928 csets = outgoing.missing
927 heads = outgoing.missingheads
929 heads = outgoing.missingheads
928 # We go through the fast path if we get told to, or if all (unfiltered
930 # We go through the fast path if we get told to, or if all (unfiltered
929 # heads have been requested (since we then know there all linkrevs will
931 # heads have been requested (since we then know there all linkrevs will
930 # be pulled by the client).
932 # be pulled by the client).
931 heads.sort()
933 heads.sort()
932 fastpathlinkrev = fastpath or (
934 fastpathlinkrev = fastpath or (
933 repo.filtername is None and heads == sorted(repo.heads()))
935 repo.filtername is None and heads == sorted(repo.heads()))
934
936
935 repo.hook('preoutgoing', throw=True, source=source)
937 repo.hook('preoutgoing', throw=True, source=source)
936 _changegroupinfo(repo, csets, source)
938 _changegroupinfo(repo, csets, source)
937 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
939 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
938
940
939 def getsubset(repo, outgoing, bundler, source, fastpath=False):
941 def getsubset(repo, outgoing, bundler, source, fastpath=False):
940 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
942 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
941 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
943 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
942 {'clcount': len(outgoing.missing)})
944 {'clcount': len(outgoing.missing)})
943
945
944 def changegroupsubset(repo, roots, heads, source, version='01'):
946 def changegroupsubset(repo, roots, heads, source, version='01'):
945 """Compute a changegroup consisting of all the nodes that are
947 """Compute a changegroup consisting of all the nodes that are
946 descendants of any of the roots and ancestors of any of the heads.
948 descendants of any of the roots and ancestors of any of the heads.
947 Return a chunkbuffer object whose read() method will return
949 Return a chunkbuffer object whose read() method will return
948 successive changegroup chunks.
950 successive changegroup chunks.
949
951
950 It is fairly complex as determining which filenodes and which
952 It is fairly complex as determining which filenodes and which
951 manifest nodes need to be included for the changeset to be complete
953 manifest nodes need to be included for the changeset to be complete
952 is non-trivial.
954 is non-trivial.
953
955
954 Another wrinkle is doing the reverse, figuring out which changeset in
956 Another wrinkle is doing the reverse, figuring out which changeset in
955 the changegroup a particular filenode or manifestnode belongs to.
957 the changegroup a particular filenode or manifestnode belongs to.
956 """
958 """
957 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
959 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
958 bundler = getbundler(version, repo)
960 bundler = getbundler(version, repo)
959 return getsubset(repo, outgoing, bundler, source)
961 return getsubset(repo, outgoing, bundler, source)
960
962
961 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
963 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
962 version='01'):
964 version='01'):
963 """Like getbundle, but taking a discovery.outgoing as an argument.
965 """Like getbundle, but taking a discovery.outgoing as an argument.
964
966
965 This is only implemented for local repos and reuses potentially
967 This is only implemented for local repos and reuses potentially
966 precomputed sets in outgoing. Returns a raw changegroup generator."""
968 precomputed sets in outgoing. Returns a raw changegroup generator."""
967 if not outgoing.missing:
969 if not outgoing.missing:
968 return None
970 return None
969 bundler = getbundler(version, repo, bundlecaps)
971 bundler = getbundler(version, repo, bundlecaps)
970 return getsubsetraw(repo, outgoing, bundler, source)
972 return getsubsetraw(repo, outgoing, bundler, source)
971
973
972 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
974 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
973 version='01'):
975 version='01'):
974 """Like getbundle, but taking a discovery.outgoing as an argument.
976 """Like getbundle, but taking a discovery.outgoing as an argument.
975
977
976 This is only implemented for local repos and reuses potentially
978 This is only implemented for local repos and reuses potentially
977 precomputed sets in outgoing."""
979 precomputed sets in outgoing."""
978 if not outgoing.missing:
980 if not outgoing.missing:
979 return None
981 return None
980 bundler = getbundler(version, repo, bundlecaps)
982 bundler = getbundler(version, repo, bundlecaps)
981 return getsubset(repo, outgoing, bundler, source)
983 return getsubset(repo, outgoing, bundler, source)
982
984
983 def getchangegroup(repo, source, outgoing, bundlecaps=None,
985 def getchangegroup(repo, source, outgoing, bundlecaps=None,
984 version='01'):
986 version='01'):
985 """Like changegroupsubset, but returns the set difference between the
987 """Like changegroupsubset, but returns the set difference between the
986 ancestors of heads and the ancestors common.
988 ancestors of heads and the ancestors common.
987
989
988 If heads is None, use the local heads. If common is None, use [nullid].
990 If heads is None, use the local heads. If common is None, use [nullid].
989
991
990 The nodes in common might not all be known locally due to the way the
992 The nodes in common might not all be known locally due to the way the
991 current discovery protocol works.
993 current discovery protocol works.
992 """
994 """
993 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
995 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
994 version=version)
996 version=version)
995
997
996 def changegroup(repo, basenodes, source):
998 def changegroup(repo, basenodes, source):
997 # to avoid a race we use changegroupsubset() (issue1320)
999 # to avoid a race we use changegroupsubset() (issue1320)
998 return changegroupsubset(repo, basenodes, repo.heads(), source)
1000 return changegroupsubset(repo, basenodes, repo.heads(), source)
999
1001
1000 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1002 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1001 revisions = 0
1003 revisions = 0
1002 files = 0
1004 files = 0
1003 for chunkdata in iter(source.filelogheader, {}):
1005 for chunkdata in iter(source.filelogheader, {}):
1004 files += 1
1006 files += 1
1005 f = chunkdata["filename"]
1007 f = chunkdata["filename"]
1006 repo.ui.debug("adding %s revisions\n" % f)
1008 repo.ui.debug("adding %s revisions\n" % f)
1007 repo.ui.progress(_('files'), files, unit=_('files'),
1009 repo.ui.progress(_('files'), files, unit=_('files'),
1008 total=expectedfiles)
1010 total=expectedfiles)
1009 fl = repo.file(f)
1011 fl = repo.file(f)
1010 o = len(fl)
1012 o = len(fl)
1011 try:
1013 try:
1012 if not fl.addgroup(source, revmap, trp):
1014 if not fl.addgroup(source, revmap, trp):
1013 raise error.Abort(_("received file revlog group is empty"))
1015 raise error.Abort(_("received file revlog group is empty"))
1014 except error.CensoredBaseError as e:
1016 except error.CensoredBaseError as e:
1015 raise error.Abort(_("received delta base is censored: %s") % e)
1017 raise error.Abort(_("received delta base is censored: %s") % e)
1016 revisions += len(fl) - o
1018 revisions += len(fl) - o
1017 if f in needfiles:
1019 if f in needfiles:
1018 needs = needfiles[f]
1020 needs = needfiles[f]
1019 for new in xrange(o, len(fl)):
1021 for new in xrange(o, len(fl)):
1020 n = fl.node(new)
1022 n = fl.node(new)
1021 if n in needs:
1023 if n in needs:
1022 needs.remove(n)
1024 needs.remove(n)
1023 else:
1025 else:
1024 raise error.Abort(
1026 raise error.Abort(
1025 _("received spurious file revlog entry"))
1027 _("received spurious file revlog entry"))
1026 if not needs:
1028 if not needs:
1027 del needfiles[f]
1029 del needfiles[f]
1028 repo.ui.progress(_('files'), None)
1030 repo.ui.progress(_('files'), None)
1029
1031
1030 for f, needs in needfiles.iteritems():
1032 for f, needs in needfiles.iteritems():
1031 fl = repo.file(f)
1033 fl = repo.file(f)
1032 for n in needs:
1034 for n in needs:
1033 try:
1035 try:
1034 fl.rev(n)
1036 fl.rev(n)
1035 except error.LookupError:
1037 except error.LookupError:
1036 raise error.Abort(
1038 raise error.Abort(
1037 _('missing file data for %s:%s - run hg verify') %
1039 _('missing file data for %s:%s - run hg verify') %
1038 (f, hex(n)))
1040 (f, hex(n)))
1039
1041
1040 return revisions, files
1042 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now