##// END OF EJS Templates
changegroup: cache changelog and manifestlog outside of loop...
Gregory Szorc -
r30267:d92777f9 default
parent child Browse files
Show More
@@ -1,1042 +1,1043 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 branchmap,
23 branchmap,
24 dagutil,
24 dagutil,
25 discovery,
25 discovery,
26 error,
26 error,
27 mdiff,
27 mdiff,
28 phases,
28 phases,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 def writechunks(ui, chunks, filename, vfs=None):
82 def writechunks(ui, chunks, filename, vfs=None):
83 """Write chunks to a file and return its filename.
83 """Write chunks to a file and return its filename.
84
84
85 The stream is assumed to be a bundle file.
85 The stream is assumed to be a bundle file.
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 """
88 """
89 fh = None
89 fh = None
90 cleanup = None
90 cleanup = None
91 try:
91 try:
92 if filename:
92 if filename:
93 if vfs:
93 if vfs:
94 fh = vfs.open(filename, "wb")
94 fh = vfs.open(filename, "wb")
95 else:
95 else:
96 # Increase default buffer size because default is usually
96 # Increase default buffer size because default is usually
97 # small (4k is common on Linux).
97 # small (4k is common on Linux).
98 fh = open(filename, "wb", 131072)
98 fh = open(filename, "wb", 131072)
99 else:
99 else:
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fh = os.fdopen(fd, "wb")
101 fh = os.fdopen(fd, "wb")
102 cleanup = filename
102 cleanup = filename
103 for c in chunks:
103 for c in chunks:
104 fh.write(c)
104 fh.write(c)
105 cleanup = None
105 cleanup = None
106 return filename
106 return filename
107 finally:
107 finally:
108 if fh is not None:
108 if fh is not None:
109 fh.close()
109 fh.close()
110 if cleanup is not None:
110 if cleanup is not None:
111 if filename and vfs:
111 if filename and vfs:
112 vfs.unlink(cleanup)
112 vfs.unlink(cleanup)
113 else:
113 else:
114 os.unlink(cleanup)
114 os.unlink(cleanup)
115
115
116 class cg1unpacker(object):
116 class cg1unpacker(object):
117 """Unpacker for cg1 changegroup streams.
117 """Unpacker for cg1 changegroup streams.
118
118
119 A changegroup unpacker handles the framing of the revision data in
119 A changegroup unpacker handles the framing of the revision data in
120 the wire format. Most consumers will want to use the apply()
120 the wire format. Most consumers will want to use the apply()
121 method to add the changes from the changegroup to a repository.
121 method to add the changes from the changegroup to a repository.
122
122
123 If you're forwarding a changegroup unmodified to another consumer,
123 If you're forwarding a changegroup unmodified to another consumer,
124 use getchunks(), which returns an iterator of changegroup
124 use getchunks(), which returns an iterator of changegroup
125 chunks. This is mostly useful for cases where you need to know the
125 chunks. This is mostly useful for cases where you need to know the
126 data stream has ended by observing the end of the changegroup.
126 data stream has ended by observing the end of the changegroup.
127
127
128 deltachunk() is useful only if you're applying delta data. Most
128 deltachunk() is useful only if you're applying delta data. Most
129 consumers should prefer apply() instead.
129 consumers should prefer apply() instead.
130
130
131 A few other public methods exist. Those are used only for
131 A few other public methods exist. Those are used only for
132 bundlerepo and some debug commands - their use is discouraged.
132 bundlerepo and some debug commands - their use is discouraged.
133 """
133 """
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 version = '01'
136 version = '01'
137 _grouplistcount = 1 # One list of files after the manifests
137 _grouplistcount = 1 # One list of files after the manifests
138
138
139 def __init__(self, fh, alg, extras=None):
139 def __init__(self, fh, alg, extras=None):
140 if alg == 'UN':
140 if alg == 'UN':
141 alg = None # get more modern without breaking too much
141 alg = None # get more modern without breaking too much
142 if not alg in util.decompressors:
142 if not alg in util.decompressors:
143 raise error.Abort(_('unknown stream compression type: %s')
143 raise error.Abort(_('unknown stream compression type: %s')
144 % alg)
144 % alg)
145 if alg == 'BZ':
145 if alg == 'BZ':
146 alg = '_truncatedBZ'
146 alg = '_truncatedBZ'
147 self._stream = util.decompressors[alg](fh)
147 self._stream = util.decompressors[alg](fh)
148 self._type = alg
148 self._type = alg
149 self.extras = extras or {}
149 self.extras = extras or {}
150 self.callback = None
150 self.callback = None
151
151
152 # These methods (compressed, read, seek, tell) all appear to only
152 # These methods (compressed, read, seek, tell) all appear to only
153 # be used by bundlerepo, but it's a little hard to tell.
153 # be used by bundlerepo, but it's a little hard to tell.
154 def compressed(self):
154 def compressed(self):
155 return self._type is not None
155 return self._type is not None
156 def read(self, l):
156 def read(self, l):
157 return self._stream.read(l)
157 return self._stream.read(l)
158 def seek(self, pos):
158 def seek(self, pos):
159 return self._stream.seek(pos)
159 return self._stream.seek(pos)
160 def tell(self):
160 def tell(self):
161 return self._stream.tell()
161 return self._stream.tell()
162 def close(self):
162 def close(self):
163 return self._stream.close()
163 return self._stream.close()
164
164
165 def _chunklength(self):
165 def _chunklength(self):
166 d = readexactly(self._stream, 4)
166 d = readexactly(self._stream, 4)
167 l = struct.unpack(">l", d)[0]
167 l = struct.unpack(">l", d)[0]
168 if l <= 4:
168 if l <= 4:
169 if l:
169 if l:
170 raise error.Abort(_("invalid chunk length %d") % l)
170 raise error.Abort(_("invalid chunk length %d") % l)
171 return 0
171 return 0
172 if self.callback:
172 if self.callback:
173 self.callback()
173 self.callback()
174 return l - 4
174 return l - 4
175
175
176 def changelogheader(self):
176 def changelogheader(self):
177 """v10 does not have a changelog header chunk"""
177 """v10 does not have a changelog header chunk"""
178 return {}
178 return {}
179
179
180 def manifestheader(self):
180 def manifestheader(self):
181 """v10 does not have a manifest header chunk"""
181 """v10 does not have a manifest header chunk"""
182 return {}
182 return {}
183
183
184 def filelogheader(self):
184 def filelogheader(self):
185 """return the header of the filelogs chunk, v10 only has the filename"""
185 """return the header of the filelogs chunk, v10 only has the filename"""
186 l = self._chunklength()
186 l = self._chunklength()
187 if not l:
187 if not l:
188 return {}
188 return {}
189 fname = readexactly(self._stream, l)
189 fname = readexactly(self._stream, l)
190 return {'filename': fname}
190 return {'filename': fname}
191
191
192 def _deltaheader(self, headertuple, prevnode):
192 def _deltaheader(self, headertuple, prevnode):
193 node, p1, p2, cs = headertuple
193 node, p1, p2, cs = headertuple
194 if prevnode is None:
194 if prevnode is None:
195 deltabase = p1
195 deltabase = p1
196 else:
196 else:
197 deltabase = prevnode
197 deltabase = prevnode
198 flags = 0
198 flags = 0
199 return node, p1, p2, deltabase, cs, flags
199 return node, p1, p2, deltabase, cs, flags
200
200
201 def deltachunk(self, prevnode):
201 def deltachunk(self, prevnode):
202 l = self._chunklength()
202 l = self._chunklength()
203 if not l:
203 if not l:
204 return {}
204 return {}
205 headerdata = readexactly(self._stream, self.deltaheadersize)
205 headerdata = readexactly(self._stream, self.deltaheadersize)
206 header = struct.unpack(self.deltaheader, headerdata)
206 header = struct.unpack(self.deltaheader, headerdata)
207 delta = readexactly(self._stream, l - self.deltaheadersize)
207 delta = readexactly(self._stream, l - self.deltaheadersize)
208 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
208 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
209 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
209 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
210 'deltabase': deltabase, 'delta': delta, 'flags': flags}
210 'deltabase': deltabase, 'delta': delta, 'flags': flags}
211
211
212 def getchunks(self):
212 def getchunks(self):
213 """returns all the chunks contains in the bundle
213 """returns all the chunks contains in the bundle
214
214
215 Used when you need to forward the binary stream to a file or another
215 Used when you need to forward the binary stream to a file or another
216 network API. To do so, it parse the changegroup data, otherwise it will
216 network API. To do so, it parse the changegroup data, otherwise it will
217 block in case of sshrepo because it don't know the end of the stream.
217 block in case of sshrepo because it don't know the end of the stream.
218 """
218 """
219 # an empty chunkgroup is the end of the changegroup
219 # an empty chunkgroup is the end of the changegroup
220 # a changegroup has at least 2 chunkgroups (changelog and manifest).
220 # a changegroup has at least 2 chunkgroups (changelog and manifest).
221 # after that, changegroup versions 1 and 2 have a series of groups
221 # after that, changegroup versions 1 and 2 have a series of groups
222 # with one group per file. changegroup 3 has a series of directory
222 # with one group per file. changegroup 3 has a series of directory
223 # manifests before the files.
223 # manifests before the files.
224 count = 0
224 count = 0
225 emptycount = 0
225 emptycount = 0
226 while emptycount < self._grouplistcount:
226 while emptycount < self._grouplistcount:
227 empty = True
227 empty = True
228 count += 1
228 count += 1
229 while True:
229 while True:
230 chunk = getchunk(self)
230 chunk = getchunk(self)
231 if not chunk:
231 if not chunk:
232 if empty and count > 2:
232 if empty and count > 2:
233 emptycount += 1
233 emptycount += 1
234 break
234 break
235 empty = False
235 empty = False
236 yield chunkheader(len(chunk))
236 yield chunkheader(len(chunk))
237 pos = 0
237 pos = 0
238 while pos < len(chunk):
238 while pos < len(chunk):
239 next = pos + 2**20
239 next = pos + 2**20
240 yield chunk[pos:next]
240 yield chunk[pos:next]
241 pos = next
241 pos = next
242 yield closechunk()
242 yield closechunk()
243
243
244 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
244 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
245 # We know that we'll never have more manifests than we had
245 # We know that we'll never have more manifests than we had
246 # changesets.
246 # changesets.
247 self.callback = prog(_('manifests'), numchanges)
247 self.callback = prog(_('manifests'), numchanges)
248 # no need to check for empty manifest group here:
248 # no need to check for empty manifest group here:
249 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # if the result of the merge of 1 and 2 is the same in 3 and 4,
250 # no new manifest will be created and the manifest group will
250 # no new manifest will be created and the manifest group will
251 # be empty during the pull
251 # be empty during the pull
252 self.manifestheader()
252 self.manifestheader()
253 repo.manifest.addgroup(self, revmap, trp)
253 repo.manifest.addgroup(self, revmap, trp)
254 repo.ui.progress(_('manifests'), None)
254 repo.ui.progress(_('manifests'), None)
255 self.callback = None
255 self.callback = None
256
256
257 def apply(self, repo, srctype, url, emptyok=False,
257 def apply(self, repo, srctype, url, emptyok=False,
258 targetphase=phases.draft, expectedtotal=None):
258 targetphase=phases.draft, expectedtotal=None):
259 """Add the changegroup returned by source.read() to this repo.
259 """Add the changegroup returned by source.read() to this repo.
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
261 the URL of the repo where this changegroup is coming from.
261 the URL of the repo where this changegroup is coming from.
262
262
263 Return an integer summarizing the change to this repo:
263 Return an integer summarizing the change to this repo:
264 - nothing changed or no source: 0
264 - nothing changed or no source: 0
265 - more heads than before: 1+added heads (2..n)
265 - more heads than before: 1+added heads (2..n)
266 - fewer heads than before: -1-removed heads (-2..-n)
266 - fewer heads than before: -1-removed heads (-2..-n)
267 - number of heads stays the same: 1
267 - number of heads stays the same: 1
268 """
268 """
269 repo = repo.unfiltered()
269 repo = repo.unfiltered()
270 def csmap(x):
270 def csmap(x):
271 repo.ui.debug("add changeset %s\n" % short(x))
271 repo.ui.debug("add changeset %s\n" % short(x))
272 return len(cl)
272 return len(cl)
273
273
274 def revmap(x):
274 def revmap(x):
275 return cl.rev(x)
275 return cl.rev(x)
276
276
277 changesets = files = revisions = 0
277 changesets = files = revisions = 0
278
278
279 try:
279 try:
280 with repo.transaction("\n".join([srctype,
280 with repo.transaction("\n".join([srctype,
281 util.hidepassword(url)])) as tr:
281 util.hidepassword(url)])) as tr:
282 # The transaction could have been created before and already
282 # The transaction could have been created before and already
283 # carries source information. In this case we use the top
283 # carries source information. In this case we use the top
284 # level data. We overwrite the argument because we need to use
284 # level data. We overwrite the argument because we need to use
285 # the top level value (if they exist) in this function.
285 # the top level value (if they exist) in this function.
286 srctype = tr.hookargs.setdefault('source', srctype)
286 srctype = tr.hookargs.setdefault('source', srctype)
287 url = tr.hookargs.setdefault('url', url)
287 url = tr.hookargs.setdefault('url', url)
288 repo.hook('prechangegroup', throw=True, **tr.hookargs)
288 repo.hook('prechangegroup', throw=True, **tr.hookargs)
289
289
290 # write changelog data to temp files so concurrent readers
290 # write changelog data to temp files so concurrent readers
291 # will not see an inconsistent view
291 # will not see an inconsistent view
292 cl = repo.changelog
292 cl = repo.changelog
293 cl.delayupdate(tr)
293 cl.delayupdate(tr)
294 oldheads = cl.heads()
294 oldheads = cl.heads()
295
295
296 trp = weakref.proxy(tr)
296 trp = weakref.proxy(tr)
297 # pull off the changeset group
297 # pull off the changeset group
298 repo.ui.status(_("adding changesets\n"))
298 repo.ui.status(_("adding changesets\n"))
299 clstart = len(cl)
299 clstart = len(cl)
300 class prog(object):
300 class prog(object):
301 def __init__(self, step, total):
301 def __init__(self, step, total):
302 self._step = step
302 self._step = step
303 self._total = total
303 self._total = total
304 self._count = 1
304 self._count = 1
305 def __call__(self):
305 def __call__(self):
306 repo.ui.progress(self._step, self._count,
306 repo.ui.progress(self._step, self._count,
307 unit=_('chunks'), total=self._total)
307 unit=_('chunks'), total=self._total)
308 self._count += 1
308 self._count += 1
309 self.callback = prog(_('changesets'), expectedtotal)
309 self.callback = prog(_('changesets'), expectedtotal)
310
310
311 efiles = set()
311 efiles = set()
312 def onchangelog(cl, node):
312 def onchangelog(cl, node):
313 efiles.update(cl.readfiles(node))
313 efiles.update(cl.readfiles(node))
314
314
315 self.changelogheader()
315 self.changelogheader()
316 srccontent = cl.addgroup(self, csmap, trp,
316 srccontent = cl.addgroup(self, csmap, trp,
317 addrevisioncb=onchangelog)
317 addrevisioncb=onchangelog)
318 efiles = len(efiles)
318 efiles = len(efiles)
319
319
320 if not (srccontent or emptyok):
320 if not (srccontent or emptyok):
321 raise error.Abort(_("received changelog group is empty"))
321 raise error.Abort(_("received changelog group is empty"))
322 clend = len(cl)
322 clend = len(cl)
323 changesets = clend - clstart
323 changesets = clend - clstart
324 repo.ui.progress(_('changesets'), None)
324 repo.ui.progress(_('changesets'), None)
325 self.callback = None
325 self.callback = None
326
326
327 # pull off the manifest group
327 # pull off the manifest group
328 repo.ui.status(_("adding manifests\n"))
328 repo.ui.status(_("adding manifests\n"))
329 self._unpackmanifests(repo, revmap, trp, prog, changesets)
329 self._unpackmanifests(repo, revmap, trp, prog, changesets)
330
330
331 needfiles = {}
331 needfiles = {}
332 if repo.ui.configbool('server', 'validate', default=False):
332 if repo.ui.configbool('server', 'validate', default=False):
333 cl = repo.changelog
334 ml = repo.manifestlog
333 # validate incoming csets have their manifests
335 # validate incoming csets have their manifests
334 for cset in xrange(clstart, clend):
336 for cset in xrange(clstart, clend):
335 mfnode = repo.changelog.read(
337 mfnode = cl.read(cl.node(cset))[0]
336 repo.changelog.node(cset))[0]
338 mfest = ml[mfnode].readdelta()
337 mfest = repo.manifestlog[mfnode].readdelta()
338 # store file nodes we must see
339 # store file nodes we must see
339 for f, n in mfest.iteritems():
340 for f, n in mfest.iteritems():
340 needfiles.setdefault(f, set()).add(n)
341 needfiles.setdefault(f, set()).add(n)
341
342
342 # process the files
343 # process the files
343 repo.ui.status(_("adding file changes\n"))
344 repo.ui.status(_("adding file changes\n"))
344 newrevs, newfiles = _addchangegroupfiles(
345 newrevs, newfiles = _addchangegroupfiles(
345 repo, self, revmap, trp, efiles, needfiles)
346 repo, self, revmap, trp, efiles, needfiles)
346 revisions += newrevs
347 revisions += newrevs
347 files += newfiles
348 files += newfiles
348
349
349 dh = 0
350 dh = 0
350 if oldheads:
351 if oldheads:
351 heads = cl.heads()
352 heads = cl.heads()
352 dh = len(heads) - len(oldheads)
353 dh = len(heads) - len(oldheads)
353 for h in heads:
354 for h in heads:
354 if h not in oldheads and repo[h].closesbranch():
355 if h not in oldheads and repo[h].closesbranch():
355 dh -= 1
356 dh -= 1
356 htext = ""
357 htext = ""
357 if dh:
358 if dh:
358 htext = _(" (%+d heads)") % dh
359 htext = _(" (%+d heads)") % dh
359
360
360 repo.ui.status(_("added %d changesets"
361 repo.ui.status(_("added %d changesets"
361 " with %d changes to %d files%s\n")
362 " with %d changes to %d files%s\n")
362 % (changesets, revisions, files, htext))
363 % (changesets, revisions, files, htext))
363 repo.invalidatevolatilesets()
364 repo.invalidatevolatilesets()
364
365
365 if changesets > 0:
366 if changesets > 0:
366 if 'node' not in tr.hookargs:
367 if 'node' not in tr.hookargs:
367 tr.hookargs['node'] = hex(cl.node(clstart))
368 tr.hookargs['node'] = hex(cl.node(clstart))
368 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
369 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
369 hookargs = dict(tr.hookargs)
370 hookargs = dict(tr.hookargs)
370 else:
371 else:
371 hookargs = dict(tr.hookargs)
372 hookargs = dict(tr.hookargs)
372 hookargs['node'] = hex(cl.node(clstart))
373 hookargs['node'] = hex(cl.node(clstart))
373 hookargs['node_last'] = hex(cl.node(clend - 1))
374 hookargs['node_last'] = hex(cl.node(clend - 1))
374 repo.hook('pretxnchangegroup', throw=True, **hookargs)
375 repo.hook('pretxnchangegroup', throw=True, **hookargs)
375
376
376 added = [cl.node(r) for r in xrange(clstart, clend)]
377 added = [cl.node(r) for r in xrange(clstart, clend)]
377 publishing = repo.publishing()
378 publishing = repo.publishing()
378 if srctype in ('push', 'serve'):
379 if srctype in ('push', 'serve'):
379 # Old servers can not push the boundary themselves.
380 # Old servers can not push the boundary themselves.
380 # New servers won't push the boundary if changeset already
381 # New servers won't push the boundary if changeset already
381 # exists locally as secret
382 # exists locally as secret
382 #
383 #
383 # We should not use added here but the list of all change in
384 # We should not use added here but the list of all change in
384 # the bundle
385 # the bundle
385 if publishing:
386 if publishing:
386 phases.advanceboundary(repo, tr, phases.public,
387 phases.advanceboundary(repo, tr, phases.public,
387 srccontent)
388 srccontent)
388 else:
389 else:
389 # Those changesets have been pushed from the
390 # Those changesets have been pushed from the
390 # outside, their phases are going to be pushed
391 # outside, their phases are going to be pushed
391 # alongside. Therefor `targetphase` is
392 # alongside. Therefor `targetphase` is
392 # ignored.
393 # ignored.
393 phases.advanceboundary(repo, tr, phases.draft,
394 phases.advanceboundary(repo, tr, phases.draft,
394 srccontent)
395 srccontent)
395 phases.retractboundary(repo, tr, phases.draft, added)
396 phases.retractboundary(repo, tr, phases.draft, added)
396 elif srctype != 'strip':
397 elif srctype != 'strip':
397 # publishing only alter behavior during push
398 # publishing only alter behavior during push
398 #
399 #
399 # strip should not touch boundary at all
400 # strip should not touch boundary at all
400 phases.retractboundary(repo, tr, targetphase, added)
401 phases.retractboundary(repo, tr, targetphase, added)
401
402
402 if changesets > 0:
403 if changesets > 0:
403 if srctype != 'strip':
404 if srctype != 'strip':
404 # During strip, branchcache is invalid but
405 # During strip, branchcache is invalid but
405 # coming call to `destroyed` will repair it.
406 # coming call to `destroyed` will repair it.
406 # In other case we can safely update cache on
407 # In other case we can safely update cache on
407 # disk.
408 # disk.
408 repo.ui.debug('updating the branch cache\n')
409 repo.ui.debug('updating the branch cache\n')
409 branchmap.updatecache(repo.filtered('served'))
410 branchmap.updatecache(repo.filtered('served'))
410
411
411 def runhooks():
412 def runhooks():
412 # These hooks run when the lock releases, not when the
413 # These hooks run when the lock releases, not when the
413 # transaction closes. So it's possible for the changelog
414 # transaction closes. So it's possible for the changelog
414 # to have changed since we last saw it.
415 # to have changed since we last saw it.
415 if clstart >= len(repo):
416 if clstart >= len(repo):
416 return
417 return
417
418
418 repo.hook("changegroup", **hookargs)
419 repo.hook("changegroup", **hookargs)
419
420
420 for n in added:
421 for n in added:
421 args = hookargs.copy()
422 args = hookargs.copy()
422 args['node'] = hex(n)
423 args['node'] = hex(n)
423 del args['node_last']
424 del args['node_last']
424 repo.hook("incoming", **args)
425 repo.hook("incoming", **args)
425
426
426 newheads = [h for h in repo.heads()
427 newheads = [h for h in repo.heads()
427 if h not in oldheads]
428 if h not in oldheads]
428 repo.ui.log("incoming",
429 repo.ui.log("incoming",
429 "%s incoming changes - new heads: %s\n",
430 "%s incoming changes - new heads: %s\n",
430 len(added),
431 len(added),
431 ', '.join([hex(c[:6]) for c in newheads]))
432 ', '.join([hex(c[:6]) for c in newheads]))
432
433
433 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
434 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
434 lambda tr: repo._afterlock(runhooks))
435 lambda tr: repo._afterlock(runhooks))
435 finally:
436 finally:
436 repo.ui.flush()
437 repo.ui.flush()
437 # never return 0 here:
438 # never return 0 here:
438 if dh < 0:
439 if dh < 0:
439 return dh - 1
440 return dh - 1
440 else:
441 else:
441 return dh + 1
442 return dh + 1
442
443
443 class cg2unpacker(cg1unpacker):
444 class cg2unpacker(cg1unpacker):
444 """Unpacker for cg2 streams.
445 """Unpacker for cg2 streams.
445
446
446 cg2 streams add support for generaldelta, so the delta header
447 cg2 streams add support for generaldelta, so the delta header
447 format is slightly different. All other features about the data
448 format is slightly different. All other features about the data
448 remain the same.
449 remain the same.
449 """
450 """
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451 deltaheadersize = struct.calcsize(deltaheader)
452 deltaheadersize = struct.calcsize(deltaheader)
452 version = '02'
453 version = '02'
453
454
454 def _deltaheader(self, headertuple, prevnode):
455 def _deltaheader(self, headertuple, prevnode):
455 node, p1, p2, deltabase, cs = headertuple
456 node, p1, p2, deltabase, cs = headertuple
456 flags = 0
457 flags = 0
457 return node, p1, p2, deltabase, cs, flags
458 return node, p1, p2, deltabase, cs, flags
458
459
459 class cg3unpacker(cg2unpacker):
460 class cg3unpacker(cg2unpacker):
460 """Unpacker for cg3 streams.
461 """Unpacker for cg3 streams.
461
462
462 cg3 streams add support for exchanging treemanifests and revlog
463 cg3 streams add support for exchanging treemanifests and revlog
463 flags. It adds the revlog flags to the delta header and an empty chunk
464 flags. It adds the revlog flags to the delta header and an empty chunk
464 separating manifests and files.
465 separating manifests and files.
465 """
466 """
466 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
467 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
467 deltaheadersize = struct.calcsize(deltaheader)
468 deltaheadersize = struct.calcsize(deltaheader)
468 version = '03'
469 version = '03'
469 _grouplistcount = 2 # One list of manifests and one list of files
470 _grouplistcount = 2 # One list of manifests and one list of files
470
471
471 def _deltaheader(self, headertuple, prevnode):
472 def _deltaheader(self, headertuple, prevnode):
472 node, p1, p2, deltabase, cs, flags = headertuple
473 node, p1, p2, deltabase, cs, flags = headertuple
473 return node, p1, p2, deltabase, cs, flags
474 return node, p1, p2, deltabase, cs, flags
474
475
475 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
476 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
476 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
477 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
477 numchanges)
478 numchanges)
478 for chunkdata in iter(self.filelogheader, {}):
479 for chunkdata in iter(self.filelogheader, {}):
479 # If we get here, there are directory manifests in the changegroup
480 # If we get here, there are directory manifests in the changegroup
480 d = chunkdata["filename"]
481 d = chunkdata["filename"]
481 repo.ui.debug("adding %s revisions\n" % d)
482 repo.ui.debug("adding %s revisions\n" % d)
482 dirlog = repo.manifest.dirlog(d)
483 dirlog = repo.manifest.dirlog(d)
483 if not dirlog.addgroup(self, revmap, trp):
484 if not dirlog.addgroup(self, revmap, trp):
484 raise error.Abort(_("received dir revlog group is empty"))
485 raise error.Abort(_("received dir revlog group is empty"))
485
486
486 class headerlessfixup(object):
487 class headerlessfixup(object):
487 def __init__(self, fh, h):
488 def __init__(self, fh, h):
488 self._h = h
489 self._h = h
489 self._fh = fh
490 self._fh = fh
490 def read(self, n):
491 def read(self, n):
491 if self._h:
492 if self._h:
492 d, self._h = self._h[:n], self._h[n:]
493 d, self._h = self._h[:n], self._h[n:]
493 if len(d) < n:
494 if len(d) < n:
494 d += readexactly(self._fh, n - len(d))
495 d += readexactly(self._fh, n - len(d))
495 return d
496 return d
496 return readexactly(self._fh, n)
497 return readexactly(self._fh, n)
497
498
498 class cg1packer(object):
499 class cg1packer(object):
499 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
500 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
500 version = '01'
501 version = '01'
501 def __init__(self, repo, bundlecaps=None):
502 def __init__(self, repo, bundlecaps=None):
502 """Given a source repo, construct a bundler.
503 """Given a source repo, construct a bundler.
503
504
504 bundlecaps is optional and can be used to specify the set of
505 bundlecaps is optional and can be used to specify the set of
505 capabilities which can be used to build the bundle.
506 capabilities which can be used to build the bundle.
506 """
507 """
507 # Set of capabilities we can use to build the bundle.
508 # Set of capabilities we can use to build the bundle.
508 if bundlecaps is None:
509 if bundlecaps is None:
509 bundlecaps = set()
510 bundlecaps = set()
510 self._bundlecaps = bundlecaps
511 self._bundlecaps = bundlecaps
511 # experimental config: bundle.reorder
512 # experimental config: bundle.reorder
512 reorder = repo.ui.config('bundle', 'reorder', 'auto')
513 reorder = repo.ui.config('bundle', 'reorder', 'auto')
513 if reorder == 'auto':
514 if reorder == 'auto':
514 reorder = None
515 reorder = None
515 else:
516 else:
516 reorder = util.parsebool(reorder)
517 reorder = util.parsebool(reorder)
517 self._repo = repo
518 self._repo = repo
518 self._reorder = reorder
519 self._reorder = reorder
519 self._progress = repo.ui.progress
520 self._progress = repo.ui.progress
520 if self._repo.ui.verbose and not self._repo.ui.debugflag:
521 if self._repo.ui.verbose and not self._repo.ui.debugflag:
521 self._verbosenote = self._repo.ui.note
522 self._verbosenote = self._repo.ui.note
522 else:
523 else:
523 self._verbosenote = lambda s: None
524 self._verbosenote = lambda s: None
524
525
525 def close(self):
526 def close(self):
526 return closechunk()
527 return closechunk()
527
528
528 def fileheader(self, fname):
529 def fileheader(self, fname):
529 return chunkheader(len(fname)) + fname
530 return chunkheader(len(fname)) + fname
530
531
531 # Extracted both for clarity and for overriding in extensions.
532 # Extracted both for clarity and for overriding in extensions.
532 def _sortgroup(self, revlog, nodelist, lookup):
533 def _sortgroup(self, revlog, nodelist, lookup):
533 """Sort nodes for change group and turn them into revnums."""
534 """Sort nodes for change group and turn them into revnums."""
534 # for generaldelta revlogs, we linearize the revs; this will both be
535 # for generaldelta revlogs, we linearize the revs; this will both be
535 # much quicker and generate a much smaller bundle
536 # much quicker and generate a much smaller bundle
536 if (revlog._generaldelta and self._reorder is None) or self._reorder:
537 if (revlog._generaldelta and self._reorder is None) or self._reorder:
537 dag = dagutil.revlogdag(revlog)
538 dag = dagutil.revlogdag(revlog)
538 return dag.linearize(set(revlog.rev(n) for n in nodelist))
539 return dag.linearize(set(revlog.rev(n) for n in nodelist))
539 else:
540 else:
540 return sorted([revlog.rev(n) for n in nodelist])
541 return sorted([revlog.rev(n) for n in nodelist])
541
542
542 def group(self, nodelist, revlog, lookup, units=None):
543 def group(self, nodelist, revlog, lookup, units=None):
543 """Calculate a delta group, yielding a sequence of changegroup chunks
544 """Calculate a delta group, yielding a sequence of changegroup chunks
544 (strings).
545 (strings).
545
546
546 Given a list of changeset revs, return a set of deltas and
547 Given a list of changeset revs, return a set of deltas and
547 metadata corresponding to nodes. The first delta is
548 metadata corresponding to nodes. The first delta is
548 first parent(nodelist[0]) -> nodelist[0], the receiver is
549 first parent(nodelist[0]) -> nodelist[0], the receiver is
549 guaranteed to have this parent as it has all history before
550 guaranteed to have this parent as it has all history before
550 these changesets. In the case firstparent is nullrev the
551 these changesets. In the case firstparent is nullrev the
551 changegroup starts with a full revision.
552 changegroup starts with a full revision.
552
553
553 If units is not None, progress detail will be generated, units specifies
554 If units is not None, progress detail will be generated, units specifies
554 the type of revlog that is touched (changelog, manifest, etc.).
555 the type of revlog that is touched (changelog, manifest, etc.).
555 """
556 """
556 # if we don't have any revisions touched by these changesets, bail
557 # if we don't have any revisions touched by these changesets, bail
557 if len(nodelist) == 0:
558 if len(nodelist) == 0:
558 yield self.close()
559 yield self.close()
559 return
560 return
560
561
561 revs = self._sortgroup(revlog, nodelist, lookup)
562 revs = self._sortgroup(revlog, nodelist, lookup)
562
563
563 # add the parent of the first rev
564 # add the parent of the first rev
564 p = revlog.parentrevs(revs[0])[0]
565 p = revlog.parentrevs(revs[0])[0]
565 revs.insert(0, p)
566 revs.insert(0, p)
566
567
567 # build deltas
568 # build deltas
568 total = len(revs) - 1
569 total = len(revs) - 1
569 msgbundling = _('bundling')
570 msgbundling = _('bundling')
570 for r in xrange(len(revs) - 1):
571 for r in xrange(len(revs) - 1):
571 if units is not None:
572 if units is not None:
572 self._progress(msgbundling, r + 1, unit=units, total=total)
573 self._progress(msgbundling, r + 1, unit=units, total=total)
573 prev, curr = revs[r], revs[r + 1]
574 prev, curr = revs[r], revs[r + 1]
574 linknode = lookup(revlog.node(curr))
575 linknode = lookup(revlog.node(curr))
575 for c in self.revchunk(revlog, curr, prev, linknode):
576 for c in self.revchunk(revlog, curr, prev, linknode):
576 yield c
577 yield c
577
578
578 if units is not None:
579 if units is not None:
579 self._progress(msgbundling, None)
580 self._progress(msgbundling, None)
580 yield self.close()
581 yield self.close()
581
582
582 # filter any nodes that claim to be part of the known set
583 # filter any nodes that claim to be part of the known set
583 def prune(self, revlog, missing, commonrevs):
584 def prune(self, revlog, missing, commonrevs):
584 rr, rl = revlog.rev, revlog.linkrev
585 rr, rl = revlog.rev, revlog.linkrev
585 return [n for n in missing if rl(rr(n)) not in commonrevs]
586 return [n for n in missing if rl(rr(n)) not in commonrevs]
586
587
587 def _packmanifests(self, dir, mfnodes, lookuplinknode):
588 def _packmanifests(self, dir, mfnodes, lookuplinknode):
588 """Pack flat manifests into a changegroup stream."""
589 """Pack flat manifests into a changegroup stream."""
589 assert not dir
590 assert not dir
590 for chunk in self.group(mfnodes, self._repo.manifest,
591 for chunk in self.group(mfnodes, self._repo.manifest,
591 lookuplinknode, units=_('manifests')):
592 lookuplinknode, units=_('manifests')):
592 yield chunk
593 yield chunk
593
594
594 def _manifestsdone(self):
595 def _manifestsdone(self):
595 return ''
596 return ''
596
597
597 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
598 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
598 '''yield a sequence of changegroup chunks (strings)'''
599 '''yield a sequence of changegroup chunks (strings)'''
599 repo = self._repo
600 repo = self._repo
600 cl = repo.changelog
601 cl = repo.changelog
601
602
602 clrevorder = {}
603 clrevorder = {}
603 mfs = {} # needed manifests
604 mfs = {} # needed manifests
604 fnodes = {} # needed file nodes
605 fnodes = {} # needed file nodes
605 changedfiles = set()
606 changedfiles = set()
606
607
607 # Callback for the changelog, used to collect changed files and manifest
608 # Callback for the changelog, used to collect changed files and manifest
608 # nodes.
609 # nodes.
609 # Returns the linkrev node (identity in the changelog case).
610 # Returns the linkrev node (identity in the changelog case).
610 def lookupcl(x):
611 def lookupcl(x):
611 c = cl.read(x)
612 c = cl.read(x)
612 clrevorder[x] = len(clrevorder)
613 clrevorder[x] = len(clrevorder)
613 n = c[0]
614 n = c[0]
614 # record the first changeset introducing this manifest version
615 # record the first changeset introducing this manifest version
615 mfs.setdefault(n, x)
616 mfs.setdefault(n, x)
616 # Record a complete list of potentially-changed files in
617 # Record a complete list of potentially-changed files in
617 # this manifest.
618 # this manifest.
618 changedfiles.update(c[3])
619 changedfiles.update(c[3])
619 return x
620 return x
620
621
621 self._verbosenote(_('uncompressed size of bundle content:\n'))
622 self._verbosenote(_('uncompressed size of bundle content:\n'))
622 size = 0
623 size = 0
623 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
624 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
624 size += len(chunk)
625 size += len(chunk)
625 yield chunk
626 yield chunk
626 self._verbosenote(_('%8.i (changelog)\n') % size)
627 self._verbosenote(_('%8.i (changelog)\n') % size)
627
628
628 # We need to make sure that the linkrev in the changegroup refers to
629 # We need to make sure that the linkrev in the changegroup refers to
629 # the first changeset that introduced the manifest or file revision.
630 # the first changeset that introduced the manifest or file revision.
630 # The fastpath is usually safer than the slowpath, because the filelogs
631 # The fastpath is usually safer than the slowpath, because the filelogs
631 # are walked in revlog order.
632 # are walked in revlog order.
632 #
633 #
633 # When taking the slowpath with reorder=None and the manifest revlog
634 # When taking the slowpath with reorder=None and the manifest revlog
634 # uses generaldelta, the manifest may be walked in the "wrong" order.
635 # uses generaldelta, the manifest may be walked in the "wrong" order.
635 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
636 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
636 # cc0ff93d0c0c).
637 # cc0ff93d0c0c).
637 #
638 #
638 # When taking the fastpath, we are only vulnerable to reordering
639 # When taking the fastpath, we are only vulnerable to reordering
639 # of the changelog itself. The changelog never uses generaldelta, so
640 # of the changelog itself. The changelog never uses generaldelta, so
640 # it is only reordered when reorder=True. To handle this case, we
641 # it is only reordered when reorder=True. To handle this case, we
641 # simply take the slowpath, which already has the 'clrevorder' logic.
642 # simply take the slowpath, which already has the 'clrevorder' logic.
642 # This was also fixed in cc0ff93d0c0c.
643 # This was also fixed in cc0ff93d0c0c.
643 fastpathlinkrev = fastpathlinkrev and not self._reorder
644 fastpathlinkrev = fastpathlinkrev and not self._reorder
644 # Treemanifests don't work correctly with fastpathlinkrev
645 # Treemanifests don't work correctly with fastpathlinkrev
645 # either, because we don't discover which directory nodes to
646 # either, because we don't discover which directory nodes to
646 # send along with files. This could probably be fixed.
647 # send along with files. This could probably be fixed.
647 fastpathlinkrev = fastpathlinkrev and (
648 fastpathlinkrev = fastpathlinkrev and (
648 'treemanifest' not in repo.requirements)
649 'treemanifest' not in repo.requirements)
649
650
650 for chunk in self.generatemanifests(commonrevs, clrevorder,
651 for chunk in self.generatemanifests(commonrevs, clrevorder,
651 fastpathlinkrev, mfs, fnodes):
652 fastpathlinkrev, mfs, fnodes):
652 yield chunk
653 yield chunk
653 mfs.clear()
654 mfs.clear()
654 clrevs = set(cl.rev(x) for x in clnodes)
655 clrevs = set(cl.rev(x) for x in clnodes)
655
656
656 if not fastpathlinkrev:
657 if not fastpathlinkrev:
657 def linknodes(unused, fname):
658 def linknodes(unused, fname):
658 return fnodes.get(fname, {})
659 return fnodes.get(fname, {})
659 else:
660 else:
660 cln = cl.node
661 cln = cl.node
661 def linknodes(filerevlog, fname):
662 def linknodes(filerevlog, fname):
662 llr = filerevlog.linkrev
663 llr = filerevlog.linkrev
663 fln = filerevlog.node
664 fln = filerevlog.node
664 revs = ((r, llr(r)) for r in filerevlog)
665 revs = ((r, llr(r)) for r in filerevlog)
665 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
666 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
666
667
667 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
668 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
668 source):
669 source):
669 yield chunk
670 yield chunk
670
671
671 yield self.close()
672 yield self.close()
672
673
673 if clnodes:
674 if clnodes:
674 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
675 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
675
676
676 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
677 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
677 fnodes):
678 fnodes):
678 repo = self._repo
679 repo = self._repo
679 dirlog = repo.manifest.dirlog
680 dirlog = repo.manifest.dirlog
680 tmfnodes = {'': mfs}
681 tmfnodes = {'': mfs}
681
682
682 # Callback for the manifest, used to collect linkrevs for filelog
683 # Callback for the manifest, used to collect linkrevs for filelog
683 # revisions.
684 # revisions.
684 # Returns the linkrev node (collected in lookupcl).
685 # Returns the linkrev node (collected in lookupcl).
685 def makelookupmflinknode(dir):
686 def makelookupmflinknode(dir):
686 if fastpathlinkrev:
687 if fastpathlinkrev:
687 assert not dir
688 assert not dir
688 return mfs.__getitem__
689 return mfs.__getitem__
689
690
690 def lookupmflinknode(x):
691 def lookupmflinknode(x):
691 """Callback for looking up the linknode for manifests.
692 """Callback for looking up the linknode for manifests.
692
693
693 Returns the linkrev node for the specified manifest.
694 Returns the linkrev node for the specified manifest.
694
695
695 SIDE EFFECT:
696 SIDE EFFECT:
696
697
697 1) fclnodes gets populated with the list of relevant
698 1) fclnodes gets populated with the list of relevant
698 file nodes if we're not using fastpathlinkrev
699 file nodes if we're not using fastpathlinkrev
699 2) When treemanifests are in use, collects treemanifest nodes
700 2) When treemanifests are in use, collects treemanifest nodes
700 to send
701 to send
701
702
702 Note that this means manifests must be completely sent to
703 Note that this means manifests must be completely sent to
703 the client before you can trust the list of files and
704 the client before you can trust the list of files and
704 treemanifests to send.
705 treemanifests to send.
705 """
706 """
706 clnode = tmfnodes[dir][x]
707 clnode = tmfnodes[dir][x]
707 mdata = dirlog(dir).readshallowfast(x)
708 mdata = dirlog(dir).readshallowfast(x)
708 for p, n, fl in mdata.iterentries():
709 for p, n, fl in mdata.iterentries():
709 if fl == 't': # subdirectory manifest
710 if fl == 't': # subdirectory manifest
710 subdir = dir + p + '/'
711 subdir = dir + p + '/'
711 tmfclnodes = tmfnodes.setdefault(subdir, {})
712 tmfclnodes = tmfnodes.setdefault(subdir, {})
712 tmfclnode = tmfclnodes.setdefault(n, clnode)
713 tmfclnode = tmfclnodes.setdefault(n, clnode)
713 if clrevorder[clnode] < clrevorder[tmfclnode]:
714 if clrevorder[clnode] < clrevorder[tmfclnode]:
714 tmfclnodes[n] = clnode
715 tmfclnodes[n] = clnode
715 else:
716 else:
716 f = dir + p
717 f = dir + p
717 fclnodes = fnodes.setdefault(f, {})
718 fclnodes = fnodes.setdefault(f, {})
718 fclnode = fclnodes.setdefault(n, clnode)
719 fclnode = fclnodes.setdefault(n, clnode)
719 if clrevorder[clnode] < clrevorder[fclnode]:
720 if clrevorder[clnode] < clrevorder[fclnode]:
720 fclnodes[n] = clnode
721 fclnodes[n] = clnode
721 return clnode
722 return clnode
722 return lookupmflinknode
723 return lookupmflinknode
723
724
724 size = 0
725 size = 0
725 while tmfnodes:
726 while tmfnodes:
726 dir = min(tmfnodes)
727 dir = min(tmfnodes)
727 nodes = tmfnodes[dir]
728 nodes = tmfnodes[dir]
728 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
729 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
729 if not dir or prunednodes:
730 if not dir or prunednodes:
730 for x in self._packmanifests(dir, prunednodes,
731 for x in self._packmanifests(dir, prunednodes,
731 makelookupmflinknode(dir)):
732 makelookupmflinknode(dir)):
732 size += len(x)
733 size += len(x)
733 yield x
734 yield x
734 del tmfnodes[dir]
735 del tmfnodes[dir]
735 self._verbosenote(_('%8.i (manifests)\n') % size)
736 self._verbosenote(_('%8.i (manifests)\n') % size)
736 yield self._manifestsdone()
737 yield self._manifestsdone()
737
738
738 # The 'source' parameter is useful for extensions
739 # The 'source' parameter is useful for extensions
739 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
740 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
740 repo = self._repo
741 repo = self._repo
741 progress = self._progress
742 progress = self._progress
742 msgbundling = _('bundling')
743 msgbundling = _('bundling')
743
744
744 total = len(changedfiles)
745 total = len(changedfiles)
745 # for progress output
746 # for progress output
746 msgfiles = _('files')
747 msgfiles = _('files')
747 for i, fname in enumerate(sorted(changedfiles)):
748 for i, fname in enumerate(sorted(changedfiles)):
748 filerevlog = repo.file(fname)
749 filerevlog = repo.file(fname)
749 if not filerevlog:
750 if not filerevlog:
750 raise error.Abort(_("empty or missing revlog for %s") % fname)
751 raise error.Abort(_("empty or missing revlog for %s") % fname)
751
752
752 linkrevnodes = linknodes(filerevlog, fname)
753 linkrevnodes = linknodes(filerevlog, fname)
753 # Lookup for filenodes, we collected the linkrev nodes above in the
754 # Lookup for filenodes, we collected the linkrev nodes above in the
754 # fastpath case and with lookupmf in the slowpath case.
755 # fastpath case and with lookupmf in the slowpath case.
755 def lookupfilelog(x):
756 def lookupfilelog(x):
756 return linkrevnodes[x]
757 return linkrevnodes[x]
757
758
758 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
759 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
759 if filenodes:
760 if filenodes:
760 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
761 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
761 total=total)
762 total=total)
762 h = self.fileheader(fname)
763 h = self.fileheader(fname)
763 size = len(h)
764 size = len(h)
764 yield h
765 yield h
765 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
766 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
766 size += len(chunk)
767 size += len(chunk)
767 yield chunk
768 yield chunk
768 self._verbosenote(_('%8.i %s\n') % (size, fname))
769 self._verbosenote(_('%8.i %s\n') % (size, fname))
769 progress(msgbundling, None)
770 progress(msgbundling, None)
770
771
771 def deltaparent(self, revlog, rev, p1, p2, prev):
772 def deltaparent(self, revlog, rev, p1, p2, prev):
772 return prev
773 return prev
773
774
774 def revchunk(self, revlog, rev, prev, linknode):
775 def revchunk(self, revlog, rev, prev, linknode):
775 node = revlog.node(rev)
776 node = revlog.node(rev)
776 p1, p2 = revlog.parentrevs(rev)
777 p1, p2 = revlog.parentrevs(rev)
777 base = self.deltaparent(revlog, rev, p1, p2, prev)
778 base = self.deltaparent(revlog, rev, p1, p2, prev)
778
779
779 prefix = ''
780 prefix = ''
780 if revlog.iscensored(base) or revlog.iscensored(rev):
781 if revlog.iscensored(base) or revlog.iscensored(rev):
781 try:
782 try:
782 delta = revlog.revision(node)
783 delta = revlog.revision(node)
783 except error.CensoredNodeError as e:
784 except error.CensoredNodeError as e:
784 delta = e.tombstone
785 delta = e.tombstone
785 if base == nullrev:
786 if base == nullrev:
786 prefix = mdiff.trivialdiffheader(len(delta))
787 prefix = mdiff.trivialdiffheader(len(delta))
787 else:
788 else:
788 baselen = revlog.rawsize(base)
789 baselen = revlog.rawsize(base)
789 prefix = mdiff.replacediffheader(baselen, len(delta))
790 prefix = mdiff.replacediffheader(baselen, len(delta))
790 elif base == nullrev:
791 elif base == nullrev:
791 delta = revlog.revision(node)
792 delta = revlog.revision(node)
792 prefix = mdiff.trivialdiffheader(len(delta))
793 prefix = mdiff.trivialdiffheader(len(delta))
793 else:
794 else:
794 delta = revlog.revdiff(base, rev)
795 delta = revlog.revdiff(base, rev)
795 p1n, p2n = revlog.parents(node)
796 p1n, p2n = revlog.parents(node)
796 basenode = revlog.node(base)
797 basenode = revlog.node(base)
797 flags = revlog.flags(rev)
798 flags = revlog.flags(rev)
798 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
799 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
799 meta += prefix
800 meta += prefix
800 l = len(meta) + len(delta)
801 l = len(meta) + len(delta)
801 yield chunkheader(l)
802 yield chunkheader(l)
802 yield meta
803 yield meta
803 yield delta
804 yield delta
804 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
805 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
805 # do nothing with basenode, it is implicitly the previous one in HG10
806 # do nothing with basenode, it is implicitly the previous one in HG10
806 # do nothing with flags, it is implicitly 0 for cg1 and cg2
807 # do nothing with flags, it is implicitly 0 for cg1 and cg2
807 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
808 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
808
809
809 class cg2packer(cg1packer):
810 class cg2packer(cg1packer):
810 version = '02'
811 version = '02'
811 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
812 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
812
813
813 def __init__(self, repo, bundlecaps=None):
814 def __init__(self, repo, bundlecaps=None):
814 super(cg2packer, self).__init__(repo, bundlecaps)
815 super(cg2packer, self).__init__(repo, bundlecaps)
815 if self._reorder is None:
816 if self._reorder is None:
816 # Since generaldelta is directly supported by cg2, reordering
817 # Since generaldelta is directly supported by cg2, reordering
817 # generally doesn't help, so we disable it by default (treating
818 # generally doesn't help, so we disable it by default (treating
818 # bundle.reorder=auto just like bundle.reorder=False).
819 # bundle.reorder=auto just like bundle.reorder=False).
819 self._reorder = False
820 self._reorder = False
820
821
821 def deltaparent(self, revlog, rev, p1, p2, prev):
822 def deltaparent(self, revlog, rev, p1, p2, prev):
822 dp = revlog.deltaparent(rev)
823 dp = revlog.deltaparent(rev)
823 if dp == nullrev and revlog.storedeltachains:
824 if dp == nullrev and revlog.storedeltachains:
824 # Avoid sending full revisions when delta parent is null. Pick prev
825 # Avoid sending full revisions when delta parent is null. Pick prev
825 # in that case. It's tempting to pick p1 in this case, as p1 will
826 # in that case. It's tempting to pick p1 in this case, as p1 will
826 # be smaller in the common case. However, computing a delta against
827 # be smaller in the common case. However, computing a delta against
827 # p1 may require resolving the raw text of p1, which could be
828 # p1 may require resolving the raw text of p1, which could be
828 # expensive. The revlog caches should have prev cached, meaning
829 # expensive. The revlog caches should have prev cached, meaning
829 # less CPU for changegroup generation. There is likely room to add
830 # less CPU for changegroup generation. There is likely room to add
830 # a flag and/or config option to control this behavior.
831 # a flag and/or config option to control this behavior.
831 return prev
832 return prev
832 elif dp == nullrev:
833 elif dp == nullrev:
833 # revlog is configured to use full snapshot for a reason,
834 # revlog is configured to use full snapshot for a reason,
834 # stick to full snapshot.
835 # stick to full snapshot.
835 return nullrev
836 return nullrev
836 elif dp not in (p1, p2, prev):
837 elif dp not in (p1, p2, prev):
837 # Pick prev when we can't be sure remote has the base revision.
838 # Pick prev when we can't be sure remote has the base revision.
838 return prev
839 return prev
839 else:
840 else:
840 return dp
841 return dp
841
842
842 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
843 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
843 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
844 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
844 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
845 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
845
846
846 class cg3packer(cg2packer):
847 class cg3packer(cg2packer):
847 version = '03'
848 version = '03'
848 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
849 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
849
850
850 def _packmanifests(self, dir, mfnodes, lookuplinknode):
851 def _packmanifests(self, dir, mfnodes, lookuplinknode):
851 if dir:
852 if dir:
852 yield self.fileheader(dir)
853 yield self.fileheader(dir)
853 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
854 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
854 lookuplinknode, units=_('manifests')):
855 lookuplinknode, units=_('manifests')):
855 yield chunk
856 yield chunk
856
857
857 def _manifestsdone(self):
858 def _manifestsdone(self):
858 return self.close()
859 return self.close()
859
860
860 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
861 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
861 return struct.pack(
862 return struct.pack(
862 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
863 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
863
864
864 _packermap = {'01': (cg1packer, cg1unpacker),
865 _packermap = {'01': (cg1packer, cg1unpacker),
865 # cg2 adds support for exchanging generaldelta
866 # cg2 adds support for exchanging generaldelta
866 '02': (cg2packer, cg2unpacker),
867 '02': (cg2packer, cg2unpacker),
867 # cg3 adds support for exchanging revlog flags and treemanifests
868 # cg3 adds support for exchanging revlog flags and treemanifests
868 '03': (cg3packer, cg3unpacker),
869 '03': (cg3packer, cg3unpacker),
869 }
870 }
870
871
871 def allsupportedversions(ui):
872 def allsupportedversions(ui):
872 versions = set(_packermap.keys())
873 versions = set(_packermap.keys())
873 versions.discard('03')
874 versions.discard('03')
874 if (ui.configbool('experimental', 'changegroup3') or
875 if (ui.configbool('experimental', 'changegroup3') or
875 ui.configbool('experimental', 'treemanifest')):
876 ui.configbool('experimental', 'treemanifest')):
876 versions.add('03')
877 versions.add('03')
877 return versions
878 return versions
878
879
879 # Changegroup versions that can be applied to the repo
880 # Changegroup versions that can be applied to the repo
880 def supportedincomingversions(repo):
881 def supportedincomingversions(repo):
881 versions = allsupportedversions(repo.ui)
882 versions = allsupportedversions(repo.ui)
882 if 'treemanifest' in repo.requirements:
883 if 'treemanifest' in repo.requirements:
883 versions.add('03')
884 versions.add('03')
884 return versions
885 return versions
885
886
886 # Changegroup versions that can be created from the repo
887 # Changegroup versions that can be created from the repo
887 def supportedoutgoingversions(repo):
888 def supportedoutgoingversions(repo):
888 versions = allsupportedversions(repo.ui)
889 versions = allsupportedversions(repo.ui)
889 if 'treemanifest' in repo.requirements:
890 if 'treemanifest' in repo.requirements:
890 # Versions 01 and 02 support only flat manifests and it's just too
891 # Versions 01 and 02 support only flat manifests and it's just too
891 # expensive to convert between the flat manifest and tree manifest on
892 # expensive to convert between the flat manifest and tree manifest on
892 # the fly. Since tree manifests are hashed differently, all of history
893 # the fly. Since tree manifests are hashed differently, all of history
893 # would have to be converted. Instead, we simply don't even pretend to
894 # would have to be converted. Instead, we simply don't even pretend to
894 # support versions 01 and 02.
895 # support versions 01 and 02.
895 versions.discard('01')
896 versions.discard('01')
896 versions.discard('02')
897 versions.discard('02')
897 versions.add('03')
898 versions.add('03')
898 return versions
899 return versions
899
900
900 def safeversion(repo):
901 def safeversion(repo):
901 # Finds the smallest version that it's safe to assume clients of the repo
902 # Finds the smallest version that it's safe to assume clients of the repo
902 # will support. For example, all hg versions that support generaldelta also
903 # will support. For example, all hg versions that support generaldelta also
903 # support changegroup 02.
904 # support changegroup 02.
904 versions = supportedoutgoingversions(repo)
905 versions = supportedoutgoingversions(repo)
905 if 'generaldelta' in repo.requirements:
906 if 'generaldelta' in repo.requirements:
906 versions.discard('01')
907 versions.discard('01')
907 assert versions
908 assert versions
908 return min(versions)
909 return min(versions)
909
910
910 def getbundler(version, repo, bundlecaps=None):
911 def getbundler(version, repo, bundlecaps=None):
911 assert version in supportedoutgoingversions(repo)
912 assert version in supportedoutgoingversions(repo)
912 return _packermap[version][0](repo, bundlecaps)
913 return _packermap[version][0](repo, bundlecaps)
913
914
914 def getunbundler(version, fh, alg, extras=None):
915 def getunbundler(version, fh, alg, extras=None):
915 return _packermap[version][1](fh, alg, extras=extras)
916 return _packermap[version][1](fh, alg, extras=extras)
916
917
917 def _changegroupinfo(repo, nodes, source):
918 def _changegroupinfo(repo, nodes, source):
918 if repo.ui.verbose or source == 'bundle':
919 if repo.ui.verbose or source == 'bundle':
919 repo.ui.status(_("%d changesets found\n") % len(nodes))
920 repo.ui.status(_("%d changesets found\n") % len(nodes))
920 if repo.ui.debugflag:
921 if repo.ui.debugflag:
921 repo.ui.debug("list of changesets:\n")
922 repo.ui.debug("list of changesets:\n")
922 for node in nodes:
923 for node in nodes:
923 repo.ui.debug("%s\n" % hex(node))
924 repo.ui.debug("%s\n" % hex(node))
924
925
925 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
926 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
926 repo = repo.unfiltered()
927 repo = repo.unfiltered()
927 commonrevs = outgoing.common
928 commonrevs = outgoing.common
928 csets = outgoing.missing
929 csets = outgoing.missing
929 heads = outgoing.missingheads
930 heads = outgoing.missingheads
930 # We go through the fast path if we get told to, or if all (unfiltered
931 # We go through the fast path if we get told to, or if all (unfiltered
931 # heads have been requested (since we then know there all linkrevs will
932 # heads have been requested (since we then know there all linkrevs will
932 # be pulled by the client).
933 # be pulled by the client).
933 heads.sort()
934 heads.sort()
934 fastpathlinkrev = fastpath or (
935 fastpathlinkrev = fastpath or (
935 repo.filtername is None and heads == sorted(repo.heads()))
936 repo.filtername is None and heads == sorted(repo.heads()))
936
937
937 repo.hook('preoutgoing', throw=True, source=source)
938 repo.hook('preoutgoing', throw=True, source=source)
938 _changegroupinfo(repo, csets, source)
939 _changegroupinfo(repo, csets, source)
939 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
940 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
940
941
941 def getsubset(repo, outgoing, bundler, source, fastpath=False):
942 def getsubset(repo, outgoing, bundler, source, fastpath=False):
942 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
943 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
943 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
944 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
944 {'clcount': len(outgoing.missing)})
945 {'clcount': len(outgoing.missing)})
945
946
946 def changegroupsubset(repo, roots, heads, source, version='01'):
947 def changegroupsubset(repo, roots, heads, source, version='01'):
947 """Compute a changegroup consisting of all the nodes that are
948 """Compute a changegroup consisting of all the nodes that are
948 descendants of any of the roots and ancestors of any of the heads.
949 descendants of any of the roots and ancestors of any of the heads.
949 Return a chunkbuffer object whose read() method will return
950 Return a chunkbuffer object whose read() method will return
950 successive changegroup chunks.
951 successive changegroup chunks.
951
952
952 It is fairly complex as determining which filenodes and which
953 It is fairly complex as determining which filenodes and which
953 manifest nodes need to be included for the changeset to be complete
954 manifest nodes need to be included for the changeset to be complete
954 is non-trivial.
955 is non-trivial.
955
956
956 Another wrinkle is doing the reverse, figuring out which changeset in
957 Another wrinkle is doing the reverse, figuring out which changeset in
957 the changegroup a particular filenode or manifestnode belongs to.
958 the changegroup a particular filenode or manifestnode belongs to.
958 """
959 """
959 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
960 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
960 bundler = getbundler(version, repo)
961 bundler = getbundler(version, repo)
961 return getsubset(repo, outgoing, bundler, source)
962 return getsubset(repo, outgoing, bundler, source)
962
963
963 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
964 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
964 version='01'):
965 version='01'):
965 """Like getbundle, but taking a discovery.outgoing as an argument.
966 """Like getbundle, but taking a discovery.outgoing as an argument.
966
967
967 This is only implemented for local repos and reuses potentially
968 This is only implemented for local repos and reuses potentially
968 precomputed sets in outgoing. Returns a raw changegroup generator."""
969 precomputed sets in outgoing. Returns a raw changegroup generator."""
969 if not outgoing.missing:
970 if not outgoing.missing:
970 return None
971 return None
971 bundler = getbundler(version, repo, bundlecaps)
972 bundler = getbundler(version, repo, bundlecaps)
972 return getsubsetraw(repo, outgoing, bundler, source)
973 return getsubsetraw(repo, outgoing, bundler, source)
973
974
974 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
975 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
975 version='01'):
976 version='01'):
976 """Like getbundle, but taking a discovery.outgoing as an argument.
977 """Like getbundle, but taking a discovery.outgoing as an argument.
977
978
978 This is only implemented for local repos and reuses potentially
979 This is only implemented for local repos and reuses potentially
979 precomputed sets in outgoing."""
980 precomputed sets in outgoing."""
980 if not outgoing.missing:
981 if not outgoing.missing:
981 return None
982 return None
982 bundler = getbundler(version, repo, bundlecaps)
983 bundler = getbundler(version, repo, bundlecaps)
983 return getsubset(repo, outgoing, bundler, source)
984 return getsubset(repo, outgoing, bundler, source)
984
985
985 def getchangegroup(repo, source, outgoing, bundlecaps=None,
986 def getchangegroup(repo, source, outgoing, bundlecaps=None,
986 version='01'):
987 version='01'):
987 """Like changegroupsubset, but returns the set difference between the
988 """Like changegroupsubset, but returns the set difference between the
988 ancestors of heads and the ancestors common.
989 ancestors of heads and the ancestors common.
989
990
990 If heads is None, use the local heads. If common is None, use [nullid].
991 If heads is None, use the local heads. If common is None, use [nullid].
991
992
992 The nodes in common might not all be known locally due to the way the
993 The nodes in common might not all be known locally due to the way the
993 current discovery protocol works.
994 current discovery protocol works.
994 """
995 """
995 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
996 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
996 version=version)
997 version=version)
997
998
998 def changegroup(repo, basenodes, source):
999 def changegroup(repo, basenodes, source):
999 # to avoid a race we use changegroupsubset() (issue1320)
1000 # to avoid a race we use changegroupsubset() (issue1320)
1000 return changegroupsubset(repo, basenodes, repo.heads(), source)
1001 return changegroupsubset(repo, basenodes, repo.heads(), source)
1001
1002
1002 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1003 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1003 revisions = 0
1004 revisions = 0
1004 files = 0
1005 files = 0
1005 for chunkdata in iter(source.filelogheader, {}):
1006 for chunkdata in iter(source.filelogheader, {}):
1006 files += 1
1007 files += 1
1007 f = chunkdata["filename"]
1008 f = chunkdata["filename"]
1008 repo.ui.debug("adding %s revisions\n" % f)
1009 repo.ui.debug("adding %s revisions\n" % f)
1009 repo.ui.progress(_('files'), files, unit=_('files'),
1010 repo.ui.progress(_('files'), files, unit=_('files'),
1010 total=expectedfiles)
1011 total=expectedfiles)
1011 fl = repo.file(f)
1012 fl = repo.file(f)
1012 o = len(fl)
1013 o = len(fl)
1013 try:
1014 try:
1014 if not fl.addgroup(source, revmap, trp):
1015 if not fl.addgroup(source, revmap, trp):
1015 raise error.Abort(_("received file revlog group is empty"))
1016 raise error.Abort(_("received file revlog group is empty"))
1016 except error.CensoredBaseError as e:
1017 except error.CensoredBaseError as e:
1017 raise error.Abort(_("received delta base is censored: %s") % e)
1018 raise error.Abort(_("received delta base is censored: %s") % e)
1018 revisions += len(fl) - o
1019 revisions += len(fl) - o
1019 if f in needfiles:
1020 if f in needfiles:
1020 needs = needfiles[f]
1021 needs = needfiles[f]
1021 for new in xrange(o, len(fl)):
1022 for new in xrange(o, len(fl)):
1022 n = fl.node(new)
1023 n = fl.node(new)
1023 if n in needs:
1024 if n in needs:
1024 needs.remove(n)
1025 needs.remove(n)
1025 else:
1026 else:
1026 raise error.Abort(
1027 raise error.Abort(
1027 _("received spurious file revlog entry"))
1028 _("received spurious file revlog entry"))
1028 if not needs:
1029 if not needs:
1029 del needfiles[f]
1030 del needfiles[f]
1030 repo.ui.progress(_('files'), None)
1031 repo.ui.progress(_('files'), None)
1031
1032
1032 for f, needs in needfiles.iteritems():
1033 for f, needs in needfiles.iteritems():
1033 fl = repo.file(f)
1034 fl = repo.file(f)
1034 for n in needs:
1035 for n in needs:
1035 try:
1036 try:
1036 fl.rev(n)
1037 fl.rev(n)
1037 except error.LookupError:
1038 except error.LookupError:
1038 raise error.Abort(
1039 raise error.Abort(
1039 _('missing file data for %s:%s - run hg verify') %
1040 _('missing file data for %s:%s - run hg verify') %
1040 (f, hex(n)))
1041 (f, hex(n)))
1041
1042
1042 return revisions, files
1043 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now