##// END OF EJS Templates
changegroup: rename "dh" to the clearer "deltaheads"...
Martin von Zweigbergk -
r32870:b441296f default
parent child Browse files
Show More
@@ -1,1029 +1,1029
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 dagutil,
23 dagutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 def writechunks(ui, chunks, filename, vfs=None):
82 def writechunks(ui, chunks, filename, vfs=None):
83 """Write chunks to a file and return its filename.
83 """Write chunks to a file and return its filename.
84
84
85 The stream is assumed to be a bundle file.
85 The stream is assumed to be a bundle file.
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 """
88 """
89 fh = None
89 fh = None
90 cleanup = None
90 cleanup = None
91 try:
91 try:
92 if filename:
92 if filename:
93 if vfs:
93 if vfs:
94 fh = vfs.open(filename, "wb")
94 fh = vfs.open(filename, "wb")
95 else:
95 else:
96 # Increase default buffer size because default is usually
96 # Increase default buffer size because default is usually
97 # small (4k is common on Linux).
97 # small (4k is common on Linux).
98 fh = open(filename, "wb", 131072)
98 fh = open(filename, "wb", 131072)
99 else:
99 else:
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fh = os.fdopen(fd, pycompat.sysstr("wb"))
101 fh = os.fdopen(fd, pycompat.sysstr("wb"))
102 cleanup = filename
102 cleanup = filename
103 for c in chunks:
103 for c in chunks:
104 fh.write(c)
104 fh.write(c)
105 cleanup = None
105 cleanup = None
106 return filename
106 return filename
107 finally:
107 finally:
108 if fh is not None:
108 if fh is not None:
109 fh.close()
109 fh.close()
110 if cleanup is not None:
110 if cleanup is not None:
111 if filename and vfs:
111 if filename and vfs:
112 vfs.unlink(cleanup)
112 vfs.unlink(cleanup)
113 else:
113 else:
114 os.unlink(cleanup)
114 os.unlink(cleanup)
115
115
116 class cg1unpacker(object):
116 class cg1unpacker(object):
117 """Unpacker for cg1 changegroup streams.
117 """Unpacker for cg1 changegroup streams.
118
118
119 A changegroup unpacker handles the framing of the revision data in
119 A changegroup unpacker handles the framing of the revision data in
120 the wire format. Most consumers will want to use the apply()
120 the wire format. Most consumers will want to use the apply()
121 method to add the changes from the changegroup to a repository.
121 method to add the changes from the changegroup to a repository.
122
122
123 If you're forwarding a changegroup unmodified to another consumer,
123 If you're forwarding a changegroup unmodified to another consumer,
124 use getchunks(), which returns an iterator of changegroup
124 use getchunks(), which returns an iterator of changegroup
125 chunks. This is mostly useful for cases where you need to know the
125 chunks. This is mostly useful for cases where you need to know the
126 data stream has ended by observing the end of the changegroup.
126 data stream has ended by observing the end of the changegroup.
127
127
128 deltachunk() is useful only if you're applying delta data. Most
128 deltachunk() is useful only if you're applying delta data. Most
129 consumers should prefer apply() instead.
129 consumers should prefer apply() instead.
130
130
131 A few other public methods exist. Those are used only for
131 A few other public methods exist. Those are used only for
132 bundlerepo and some debug commands - their use is discouraged.
132 bundlerepo and some debug commands - their use is discouraged.
133 """
133 """
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 version = '01'
136 version = '01'
137 _grouplistcount = 1 # One list of files after the manifests
137 _grouplistcount = 1 # One list of files after the manifests
138
138
139 def __init__(self, fh, alg, extras=None):
139 def __init__(self, fh, alg, extras=None):
140 if alg is None:
140 if alg is None:
141 alg = 'UN'
141 alg = 'UN'
142 if alg not in util.compengines.supportedbundletypes:
142 if alg not in util.compengines.supportedbundletypes:
143 raise error.Abort(_('unknown stream compression type: %s')
143 raise error.Abort(_('unknown stream compression type: %s')
144 % alg)
144 % alg)
145 if alg == 'BZ':
145 if alg == 'BZ':
146 alg = '_truncatedBZ'
146 alg = '_truncatedBZ'
147
147
148 compengine = util.compengines.forbundletype(alg)
148 compengine = util.compengines.forbundletype(alg)
149 self._stream = compengine.decompressorreader(fh)
149 self._stream = compengine.decompressorreader(fh)
150 self._type = alg
150 self._type = alg
151 self.extras = extras or {}
151 self.extras = extras or {}
152 self.callback = None
152 self.callback = None
153
153
154 # These methods (compressed, read, seek, tell) all appear to only
154 # These methods (compressed, read, seek, tell) all appear to only
155 # be used by bundlerepo, but it's a little hard to tell.
155 # be used by bundlerepo, but it's a little hard to tell.
156 def compressed(self):
156 def compressed(self):
157 return self._type is not None and self._type != 'UN'
157 return self._type is not None and self._type != 'UN'
158 def read(self, l):
158 def read(self, l):
159 return self._stream.read(l)
159 return self._stream.read(l)
160 def seek(self, pos):
160 def seek(self, pos):
161 return self._stream.seek(pos)
161 return self._stream.seek(pos)
162 def tell(self):
162 def tell(self):
163 return self._stream.tell()
163 return self._stream.tell()
164 def close(self):
164 def close(self):
165 return self._stream.close()
165 return self._stream.close()
166
166
167 def _chunklength(self):
167 def _chunklength(self):
168 d = readexactly(self._stream, 4)
168 d = readexactly(self._stream, 4)
169 l = struct.unpack(">l", d)[0]
169 l = struct.unpack(">l", d)[0]
170 if l <= 4:
170 if l <= 4:
171 if l:
171 if l:
172 raise error.Abort(_("invalid chunk length %d") % l)
172 raise error.Abort(_("invalid chunk length %d") % l)
173 return 0
173 return 0
174 if self.callback:
174 if self.callback:
175 self.callback()
175 self.callback()
176 return l - 4
176 return l - 4
177
177
178 def changelogheader(self):
178 def changelogheader(self):
179 """v10 does not have a changelog header chunk"""
179 """v10 does not have a changelog header chunk"""
180 return {}
180 return {}
181
181
182 def manifestheader(self):
182 def manifestheader(self):
183 """v10 does not have a manifest header chunk"""
183 """v10 does not have a manifest header chunk"""
184 return {}
184 return {}
185
185
186 def filelogheader(self):
186 def filelogheader(self):
187 """return the header of the filelogs chunk, v10 only has the filename"""
187 """return the header of the filelogs chunk, v10 only has the filename"""
188 l = self._chunklength()
188 l = self._chunklength()
189 if not l:
189 if not l:
190 return {}
190 return {}
191 fname = readexactly(self._stream, l)
191 fname = readexactly(self._stream, l)
192 return {'filename': fname}
192 return {'filename': fname}
193
193
194 def _deltaheader(self, headertuple, prevnode):
194 def _deltaheader(self, headertuple, prevnode):
195 node, p1, p2, cs = headertuple
195 node, p1, p2, cs = headertuple
196 if prevnode is None:
196 if prevnode is None:
197 deltabase = p1
197 deltabase = p1
198 else:
198 else:
199 deltabase = prevnode
199 deltabase = prevnode
200 flags = 0
200 flags = 0
201 return node, p1, p2, deltabase, cs, flags
201 return node, p1, p2, deltabase, cs, flags
202
202
203 def deltachunk(self, prevnode):
203 def deltachunk(self, prevnode):
204 l = self._chunklength()
204 l = self._chunklength()
205 if not l:
205 if not l:
206 return {}
206 return {}
207 headerdata = readexactly(self._stream, self.deltaheadersize)
207 headerdata = readexactly(self._stream, self.deltaheadersize)
208 header = struct.unpack(self.deltaheader, headerdata)
208 header = struct.unpack(self.deltaheader, headerdata)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
209 delta = readexactly(self._stream, l - self.deltaheadersize)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
210 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
211 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
212 'deltabase': deltabase, 'delta': delta, 'flags': flags}
213
213
214 def getchunks(self):
214 def getchunks(self):
215 """returns all the chunks contains in the bundle
215 """returns all the chunks contains in the bundle
216
216
217 Used when you need to forward the binary stream to a file or another
217 Used when you need to forward the binary stream to a file or another
218 network API. To do so, it parse the changegroup data, otherwise it will
218 network API. To do so, it parse the changegroup data, otherwise it will
219 block in case of sshrepo because it don't know the end of the stream.
219 block in case of sshrepo because it don't know the end of the stream.
220 """
220 """
221 # an empty chunkgroup is the end of the changegroup
221 # an empty chunkgroup is the end of the changegroup
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
222 # a changegroup has at least 2 chunkgroups (changelog and manifest).
223 # after that, changegroup versions 1 and 2 have a series of groups
223 # after that, changegroup versions 1 and 2 have a series of groups
224 # with one group per file. changegroup 3 has a series of directory
224 # with one group per file. changegroup 3 has a series of directory
225 # manifests before the files.
225 # manifests before the files.
226 count = 0
226 count = 0
227 emptycount = 0
227 emptycount = 0
228 while emptycount < self._grouplistcount:
228 while emptycount < self._grouplistcount:
229 empty = True
229 empty = True
230 count += 1
230 count += 1
231 while True:
231 while True:
232 chunk = getchunk(self)
232 chunk = getchunk(self)
233 if not chunk:
233 if not chunk:
234 if empty and count > 2:
234 if empty and count > 2:
235 emptycount += 1
235 emptycount += 1
236 break
236 break
237 empty = False
237 empty = False
238 yield chunkheader(len(chunk))
238 yield chunkheader(len(chunk))
239 pos = 0
239 pos = 0
240 while pos < len(chunk):
240 while pos < len(chunk):
241 next = pos + 2**20
241 next = pos + 2**20
242 yield chunk[pos:next]
242 yield chunk[pos:next]
243 pos = next
243 pos = next
244 yield closechunk()
244 yield closechunk()
245
245
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
246 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
247 # We know that we'll never have more manifests than we had
247 # We know that we'll never have more manifests than we had
248 # changesets.
248 # changesets.
249 self.callback = prog(_('manifests'), numchanges)
249 self.callback = prog(_('manifests'), numchanges)
250 # no need to check for empty manifest group here:
250 # no need to check for empty manifest group here:
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
251 # if the result of the merge of 1 and 2 is the same in 3 and 4,
252 # no new manifest will be created and the manifest group will
252 # no new manifest will be created and the manifest group will
253 # be empty during the pull
253 # be empty during the pull
254 self.manifestheader()
254 self.manifestheader()
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
255 repo.manifestlog._revlog.addgroup(self, revmap, trp)
256 repo.ui.progress(_('manifests'), None)
256 repo.ui.progress(_('manifests'), None)
257 self.callback = None
257 self.callback = None
258
258
259 def apply(self, repo, srctype, url, emptyok=False,
259 def apply(self, repo, srctype, url, emptyok=False,
260 targetphase=phases.draft, expectedtotal=None):
260 targetphase=phases.draft, expectedtotal=None):
261 """Add the changegroup returned by source.read() to this repo.
261 """Add the changegroup returned by source.read() to this repo.
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
262 srctype is a string like 'push', 'pull', or 'unbundle'. url is
263 the URL of the repo where this changegroup is coming from.
263 the URL of the repo where this changegroup is coming from.
264
264
265 Return an integer summarizing the change to this repo:
265 Return an integer summarizing the change to this repo:
266 - nothing changed or no source: 0
266 - nothing changed or no source: 0
267 - more heads than before: 1+added heads (2..n)
267 - more heads than before: 1+added heads (2..n)
268 - fewer heads than before: -1-removed heads (-2..-n)
268 - fewer heads than before: -1-removed heads (-2..-n)
269 - number of heads stays the same: 1
269 - number of heads stays the same: 1
270 """
270 """
271 repo = repo.unfiltered()
271 repo = repo.unfiltered()
272 def csmap(x):
272 def csmap(x):
273 repo.ui.debug("add changeset %s\n" % short(x))
273 repo.ui.debug("add changeset %s\n" % short(x))
274 return len(cl)
274 return len(cl)
275
275
276 def revmap(x):
276 def revmap(x):
277 return cl.rev(x)
277 return cl.rev(x)
278
278
279 changesets = files = revisions = 0
279 changesets = files = revisions = 0
280
280
281 try:
281 try:
282 with repo.transaction("\n".join([srctype,
282 with repo.transaction("\n".join([srctype,
283 util.hidepassword(url)])) as tr:
283 util.hidepassword(url)])) as tr:
284 # The transaction could have been created before and already
284 # The transaction could have been created before and already
285 # carries source information. In this case we use the top
285 # carries source information. In this case we use the top
286 # level data. We overwrite the argument because we need to use
286 # level data. We overwrite the argument because we need to use
287 # the top level value (if they exist) in this function.
287 # the top level value (if they exist) in this function.
288 srctype = tr.hookargs.setdefault('source', srctype)
288 srctype = tr.hookargs.setdefault('source', srctype)
289 url = tr.hookargs.setdefault('url', url)
289 url = tr.hookargs.setdefault('url', url)
290 repo.hook('prechangegroup', throw=True, **tr.hookargs)
290 repo.hook('prechangegroup', throw=True, **tr.hookargs)
291
291
292 # write changelog data to temp files so concurrent readers
292 # write changelog data to temp files so concurrent readers
293 # will not see an inconsistent view
293 # will not see an inconsistent view
294 cl = repo.changelog
294 cl = repo.changelog
295 cl.delayupdate(tr)
295 cl.delayupdate(tr)
296 oldheads = set(cl.heads())
296 oldheads = set(cl.heads())
297
297
298 trp = weakref.proxy(tr)
298 trp = weakref.proxy(tr)
299 # pull off the changeset group
299 # pull off the changeset group
300 repo.ui.status(_("adding changesets\n"))
300 repo.ui.status(_("adding changesets\n"))
301 clstart = len(cl)
301 clstart = len(cl)
302 class prog(object):
302 class prog(object):
303 def __init__(self, step, total):
303 def __init__(self, step, total):
304 self._step = step
304 self._step = step
305 self._total = total
305 self._total = total
306 self._count = 1
306 self._count = 1
307 def __call__(self):
307 def __call__(self):
308 repo.ui.progress(self._step, self._count,
308 repo.ui.progress(self._step, self._count,
309 unit=_('chunks'), total=self._total)
309 unit=_('chunks'), total=self._total)
310 self._count += 1
310 self._count += 1
311 self.callback = prog(_('changesets'), expectedtotal)
311 self.callback = prog(_('changesets'), expectedtotal)
312
312
313 efiles = set()
313 efiles = set()
314 def onchangelog(cl, node):
314 def onchangelog(cl, node):
315 efiles.update(cl.readfiles(node))
315 efiles.update(cl.readfiles(node))
316
316
317 self.changelogheader()
317 self.changelogheader()
318 cgnodes = cl.addgroup(self, csmap, trp,
318 cgnodes = cl.addgroup(self, csmap, trp,
319 addrevisioncb=onchangelog)
319 addrevisioncb=onchangelog)
320 efiles = len(efiles)
320 efiles = len(efiles)
321
321
322 if not (cgnodes or emptyok):
322 if not (cgnodes or emptyok):
323 raise error.Abort(_("received changelog group is empty"))
323 raise error.Abort(_("received changelog group is empty"))
324 clend = len(cl)
324 clend = len(cl)
325 changesets = clend - clstart
325 changesets = clend - clstart
326 repo.ui.progress(_('changesets'), None)
326 repo.ui.progress(_('changesets'), None)
327 self.callback = None
327 self.callback = None
328
328
329 # pull off the manifest group
329 # pull off the manifest group
330 repo.ui.status(_("adding manifests\n"))
330 repo.ui.status(_("adding manifests\n"))
331 self._unpackmanifests(repo, revmap, trp, prog, changesets)
331 self._unpackmanifests(repo, revmap, trp, prog, changesets)
332
332
333 needfiles = {}
333 needfiles = {}
334 if repo.ui.configbool('server', 'validate', default=False):
334 if repo.ui.configbool('server', 'validate', default=False):
335 cl = repo.changelog
335 cl = repo.changelog
336 ml = repo.manifestlog
336 ml = repo.manifestlog
337 # validate incoming csets have their manifests
337 # validate incoming csets have their manifests
338 for cset in xrange(clstart, clend):
338 for cset in xrange(clstart, clend):
339 mfnode = cl.changelogrevision(cset).manifest
339 mfnode = cl.changelogrevision(cset).manifest
340 mfest = ml[mfnode].readdelta()
340 mfest = ml[mfnode].readdelta()
341 # store file cgnodes we must see
341 # store file cgnodes we must see
342 for f, n in mfest.iteritems():
342 for f, n in mfest.iteritems():
343 needfiles.setdefault(f, set()).add(n)
343 needfiles.setdefault(f, set()).add(n)
344
344
345 # process the files
345 # process the files
346 repo.ui.status(_("adding file changes\n"))
346 repo.ui.status(_("adding file changes\n"))
347 newrevs, newfiles = _addchangegroupfiles(
347 newrevs, newfiles = _addchangegroupfiles(
348 repo, self, revmap, trp, efiles, needfiles)
348 repo, self, revmap, trp, efiles, needfiles)
349 revisions += newrevs
349 revisions += newrevs
350 files += newfiles
350 files += newfiles
351
351
352 dh = 0
352 deltaheads = 0
353 if oldheads:
353 if oldheads:
354 heads = cl.heads()
354 heads = cl.heads()
355 dh = len(heads) - len(oldheads)
355 deltaheads = len(heads) - len(oldheads)
356 for h in heads:
356 for h in heads:
357 if h not in oldheads and repo[h].closesbranch():
357 if h not in oldheads and repo[h].closesbranch():
358 dh -= 1
358 deltaheads -= 1
359 htext = ""
359 htext = ""
360 if dh:
360 if deltaheads:
361 htext = _(" (%+d heads)") % dh
361 htext = _(" (%+d heads)") % deltaheads
362
362
363 repo.ui.status(_("added %d changesets"
363 repo.ui.status(_("added %d changesets"
364 " with %d changes to %d files%s\n")
364 " with %d changes to %d files%s\n")
365 % (changesets, revisions, files, htext))
365 % (changesets, revisions, files, htext))
366 repo.invalidatevolatilesets()
366 repo.invalidatevolatilesets()
367
367
368 if changesets > 0:
368 if changesets > 0:
369 if 'node' not in tr.hookargs:
369 if 'node' not in tr.hookargs:
370 tr.hookargs['node'] = hex(cl.node(clstart))
370 tr.hookargs['node'] = hex(cl.node(clstart))
371 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
371 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
372 hookargs = dict(tr.hookargs)
372 hookargs = dict(tr.hookargs)
373 else:
373 else:
374 hookargs = dict(tr.hookargs)
374 hookargs = dict(tr.hookargs)
375 hookargs['node'] = hex(cl.node(clstart))
375 hookargs['node'] = hex(cl.node(clstart))
376 hookargs['node_last'] = hex(cl.node(clend - 1))
376 hookargs['node_last'] = hex(cl.node(clend - 1))
377 repo.hook('pretxnchangegroup', throw=True, **hookargs)
377 repo.hook('pretxnchangegroup', throw=True, **hookargs)
378
378
379 added = [cl.node(r) for r in xrange(clstart, clend)]
379 added = [cl.node(r) for r in xrange(clstart, clend)]
380 publishing = repo.publishing()
380 publishing = repo.publishing()
381 if srctype in ('push', 'serve'):
381 if srctype in ('push', 'serve'):
382 # Old servers can not push the boundary themselves.
382 # Old servers can not push the boundary themselves.
383 # New servers won't push the boundary if changeset already
383 # New servers won't push the boundary if changeset already
384 # exists locally as secret
384 # exists locally as secret
385 #
385 #
386 # We should not use added here but the list of all change in
386 # We should not use added here but the list of all change in
387 # the bundle
387 # the bundle
388 if publishing:
388 if publishing:
389 phases.advanceboundary(repo, tr, phases.public, cgnodes)
389 phases.advanceboundary(repo, tr, phases.public, cgnodes)
390 else:
390 else:
391 # Those changesets have been pushed from the
391 # Those changesets have been pushed from the
392 # outside, their phases are going to be pushed
392 # outside, their phases are going to be pushed
393 # alongside. Therefor `targetphase` is
393 # alongside. Therefor `targetphase` is
394 # ignored.
394 # ignored.
395 phases.advanceboundary(repo, tr, phases.draft, cgnodes)
395 phases.advanceboundary(repo, tr, phases.draft, cgnodes)
396 phases.retractboundary(repo, tr, phases.draft, added)
396 phases.retractboundary(repo, tr, phases.draft, added)
397 elif srctype != 'strip':
397 elif srctype != 'strip':
398 # publishing only alter behavior during push
398 # publishing only alter behavior during push
399 #
399 #
400 # strip should not touch boundary at all
400 # strip should not touch boundary at all
401 phases.retractboundary(repo, tr, targetphase, added)
401 phases.retractboundary(repo, tr, targetphase, added)
402
402
403 if changesets > 0:
403 if changesets > 0:
404
404
405 def runhooks():
405 def runhooks():
406 # These hooks run when the lock releases, not when the
406 # These hooks run when the lock releases, not when the
407 # transaction closes. So it's possible for the changelog
407 # transaction closes. So it's possible for the changelog
408 # to have changed since we last saw it.
408 # to have changed since we last saw it.
409 if clstart >= len(repo):
409 if clstart >= len(repo):
410 return
410 return
411
411
412 repo.hook("changegroup", **hookargs)
412 repo.hook("changegroup", **hookargs)
413
413
414 for n in added:
414 for n in added:
415 args = hookargs.copy()
415 args = hookargs.copy()
416 args['node'] = hex(n)
416 args['node'] = hex(n)
417 del args['node_last']
417 del args['node_last']
418 repo.hook("incoming", **args)
418 repo.hook("incoming", **args)
419
419
420 newheads = [h for h in repo.heads()
420 newheads = [h for h in repo.heads()
421 if h not in oldheads]
421 if h not in oldheads]
422 repo.ui.log("incoming",
422 repo.ui.log("incoming",
423 "%s incoming changes - new heads: %s\n",
423 "%s incoming changes - new heads: %s\n",
424 len(added),
424 len(added),
425 ', '.join([hex(c[:6]) for c in newheads]))
425 ', '.join([hex(c[:6]) for c in newheads]))
426
426
427 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
427 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
428 lambda tr: repo._afterlock(runhooks))
428 lambda tr: repo._afterlock(runhooks))
429 finally:
429 finally:
430 repo.ui.flush()
430 repo.ui.flush()
431 # never return 0 here:
431 # never return 0 here:
432 if dh < 0:
432 if deltaheads < 0:
433 return dh - 1
433 return deltaheads - 1
434 else:
434 else:
435 return dh + 1
435 return deltaheads + 1
436
436
437 class cg2unpacker(cg1unpacker):
437 class cg2unpacker(cg1unpacker):
438 """Unpacker for cg2 streams.
438 """Unpacker for cg2 streams.
439
439
440 cg2 streams add support for generaldelta, so the delta header
440 cg2 streams add support for generaldelta, so the delta header
441 format is slightly different. All other features about the data
441 format is slightly different. All other features about the data
442 remain the same.
442 remain the same.
443 """
443 """
444 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
444 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
445 deltaheadersize = struct.calcsize(deltaheader)
445 deltaheadersize = struct.calcsize(deltaheader)
446 version = '02'
446 version = '02'
447
447
448 def _deltaheader(self, headertuple, prevnode):
448 def _deltaheader(self, headertuple, prevnode):
449 node, p1, p2, deltabase, cs = headertuple
449 node, p1, p2, deltabase, cs = headertuple
450 flags = 0
450 flags = 0
451 return node, p1, p2, deltabase, cs, flags
451 return node, p1, p2, deltabase, cs, flags
452
452
453 class cg3unpacker(cg2unpacker):
453 class cg3unpacker(cg2unpacker):
454 """Unpacker for cg3 streams.
454 """Unpacker for cg3 streams.
455
455
456 cg3 streams add support for exchanging treemanifests and revlog
456 cg3 streams add support for exchanging treemanifests and revlog
457 flags. It adds the revlog flags to the delta header and an empty chunk
457 flags. It adds the revlog flags to the delta header and an empty chunk
458 separating manifests and files.
458 separating manifests and files.
459 """
459 """
460 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
460 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
461 deltaheadersize = struct.calcsize(deltaheader)
461 deltaheadersize = struct.calcsize(deltaheader)
462 version = '03'
462 version = '03'
463 _grouplistcount = 2 # One list of manifests and one list of files
463 _grouplistcount = 2 # One list of manifests and one list of files
464
464
465 def _deltaheader(self, headertuple, prevnode):
465 def _deltaheader(self, headertuple, prevnode):
466 node, p1, p2, deltabase, cs, flags = headertuple
466 node, p1, p2, deltabase, cs, flags = headertuple
467 return node, p1, p2, deltabase, cs, flags
467 return node, p1, p2, deltabase, cs, flags
468
468
469 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
469 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
470 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
470 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
471 numchanges)
471 numchanges)
472 for chunkdata in iter(self.filelogheader, {}):
472 for chunkdata in iter(self.filelogheader, {}):
473 # If we get here, there are directory manifests in the changegroup
473 # If we get here, there are directory manifests in the changegroup
474 d = chunkdata["filename"]
474 d = chunkdata["filename"]
475 repo.ui.debug("adding %s revisions\n" % d)
475 repo.ui.debug("adding %s revisions\n" % d)
476 dirlog = repo.manifestlog._revlog.dirlog(d)
476 dirlog = repo.manifestlog._revlog.dirlog(d)
477 if not dirlog.addgroup(self, revmap, trp):
477 if not dirlog.addgroup(self, revmap, trp):
478 raise error.Abort(_("received dir revlog group is empty"))
478 raise error.Abort(_("received dir revlog group is empty"))
479
479
480 class headerlessfixup(object):
480 class headerlessfixup(object):
481 def __init__(self, fh, h):
481 def __init__(self, fh, h):
482 self._h = h
482 self._h = h
483 self._fh = fh
483 self._fh = fh
484 def read(self, n):
484 def read(self, n):
485 if self._h:
485 if self._h:
486 d, self._h = self._h[:n], self._h[n:]
486 d, self._h = self._h[:n], self._h[n:]
487 if len(d) < n:
487 if len(d) < n:
488 d += readexactly(self._fh, n - len(d))
488 d += readexactly(self._fh, n - len(d))
489 return d
489 return d
490 return readexactly(self._fh, n)
490 return readexactly(self._fh, n)
491
491
492 class cg1packer(object):
492 class cg1packer(object):
493 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
493 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
494 version = '01'
494 version = '01'
495 def __init__(self, repo, bundlecaps=None):
495 def __init__(self, repo, bundlecaps=None):
496 """Given a source repo, construct a bundler.
496 """Given a source repo, construct a bundler.
497
497
498 bundlecaps is optional and can be used to specify the set of
498 bundlecaps is optional and can be used to specify the set of
499 capabilities which can be used to build the bundle. While bundlecaps is
499 capabilities which can be used to build the bundle. While bundlecaps is
500 unused in core Mercurial, extensions rely on this feature to communicate
500 unused in core Mercurial, extensions rely on this feature to communicate
501 capabilities to customize the changegroup packer.
501 capabilities to customize the changegroup packer.
502 """
502 """
503 # Set of capabilities we can use to build the bundle.
503 # Set of capabilities we can use to build the bundle.
504 if bundlecaps is None:
504 if bundlecaps is None:
505 bundlecaps = set()
505 bundlecaps = set()
506 self._bundlecaps = bundlecaps
506 self._bundlecaps = bundlecaps
507 # experimental config: bundle.reorder
507 # experimental config: bundle.reorder
508 reorder = repo.ui.config('bundle', 'reorder', 'auto')
508 reorder = repo.ui.config('bundle', 'reorder', 'auto')
509 if reorder == 'auto':
509 if reorder == 'auto':
510 reorder = None
510 reorder = None
511 else:
511 else:
512 reorder = util.parsebool(reorder)
512 reorder = util.parsebool(reorder)
513 self._repo = repo
513 self._repo = repo
514 self._reorder = reorder
514 self._reorder = reorder
515 self._progress = repo.ui.progress
515 self._progress = repo.ui.progress
516 if self._repo.ui.verbose and not self._repo.ui.debugflag:
516 if self._repo.ui.verbose and not self._repo.ui.debugflag:
517 self._verbosenote = self._repo.ui.note
517 self._verbosenote = self._repo.ui.note
518 else:
518 else:
519 self._verbosenote = lambda s: None
519 self._verbosenote = lambda s: None
520
520
521 def close(self):
521 def close(self):
522 return closechunk()
522 return closechunk()
523
523
524 def fileheader(self, fname):
524 def fileheader(self, fname):
525 return chunkheader(len(fname)) + fname
525 return chunkheader(len(fname)) + fname
526
526
527 # Extracted both for clarity and for overriding in extensions.
527 # Extracted both for clarity and for overriding in extensions.
528 def _sortgroup(self, revlog, nodelist, lookup):
528 def _sortgroup(self, revlog, nodelist, lookup):
529 """Sort nodes for change group and turn them into revnums."""
529 """Sort nodes for change group and turn them into revnums."""
530 # for generaldelta revlogs, we linearize the revs; this will both be
530 # for generaldelta revlogs, we linearize the revs; this will both be
531 # much quicker and generate a much smaller bundle
531 # much quicker and generate a much smaller bundle
532 if (revlog._generaldelta and self._reorder is None) or self._reorder:
532 if (revlog._generaldelta and self._reorder is None) or self._reorder:
533 dag = dagutil.revlogdag(revlog)
533 dag = dagutil.revlogdag(revlog)
534 return dag.linearize(set(revlog.rev(n) for n in nodelist))
534 return dag.linearize(set(revlog.rev(n) for n in nodelist))
535 else:
535 else:
536 return sorted([revlog.rev(n) for n in nodelist])
536 return sorted([revlog.rev(n) for n in nodelist])
537
537
538 def group(self, nodelist, revlog, lookup, units=None):
538 def group(self, nodelist, revlog, lookup, units=None):
539 """Calculate a delta group, yielding a sequence of changegroup chunks
539 """Calculate a delta group, yielding a sequence of changegroup chunks
540 (strings).
540 (strings).
541
541
542 Given a list of changeset revs, return a set of deltas and
542 Given a list of changeset revs, return a set of deltas and
543 metadata corresponding to nodes. The first delta is
543 metadata corresponding to nodes. The first delta is
544 first parent(nodelist[0]) -> nodelist[0], the receiver is
544 first parent(nodelist[0]) -> nodelist[0], the receiver is
545 guaranteed to have this parent as it has all history before
545 guaranteed to have this parent as it has all history before
546 these changesets. In the case firstparent is nullrev the
546 these changesets. In the case firstparent is nullrev the
547 changegroup starts with a full revision.
547 changegroup starts with a full revision.
548
548
549 If units is not None, progress detail will be generated, units specifies
549 If units is not None, progress detail will be generated, units specifies
550 the type of revlog that is touched (changelog, manifest, etc.).
550 the type of revlog that is touched (changelog, manifest, etc.).
551 """
551 """
552 # if we don't have any revisions touched by these changesets, bail
552 # if we don't have any revisions touched by these changesets, bail
553 if len(nodelist) == 0:
553 if len(nodelist) == 0:
554 yield self.close()
554 yield self.close()
555 return
555 return
556
556
557 revs = self._sortgroup(revlog, nodelist, lookup)
557 revs = self._sortgroup(revlog, nodelist, lookup)
558
558
559 # add the parent of the first rev
559 # add the parent of the first rev
560 p = revlog.parentrevs(revs[0])[0]
560 p = revlog.parentrevs(revs[0])[0]
561 revs.insert(0, p)
561 revs.insert(0, p)
562
562
563 # build deltas
563 # build deltas
564 total = len(revs) - 1
564 total = len(revs) - 1
565 msgbundling = _('bundling')
565 msgbundling = _('bundling')
566 for r in xrange(len(revs) - 1):
566 for r in xrange(len(revs) - 1):
567 if units is not None:
567 if units is not None:
568 self._progress(msgbundling, r + 1, unit=units, total=total)
568 self._progress(msgbundling, r + 1, unit=units, total=total)
569 prev, curr = revs[r], revs[r + 1]
569 prev, curr = revs[r], revs[r + 1]
570 linknode = lookup(revlog.node(curr))
570 linknode = lookup(revlog.node(curr))
571 for c in self.revchunk(revlog, curr, prev, linknode):
571 for c in self.revchunk(revlog, curr, prev, linknode):
572 yield c
572 yield c
573
573
574 if units is not None:
574 if units is not None:
575 self._progress(msgbundling, None)
575 self._progress(msgbundling, None)
576 yield self.close()
576 yield self.close()
577
577
578 # filter any nodes that claim to be part of the known set
578 # filter any nodes that claim to be part of the known set
579 def prune(self, revlog, missing, commonrevs):
579 def prune(self, revlog, missing, commonrevs):
580 rr, rl = revlog.rev, revlog.linkrev
580 rr, rl = revlog.rev, revlog.linkrev
581 return [n for n in missing if rl(rr(n)) not in commonrevs]
581 return [n for n in missing if rl(rr(n)) not in commonrevs]
582
582
583 def _packmanifests(self, dir, mfnodes, lookuplinknode):
583 def _packmanifests(self, dir, mfnodes, lookuplinknode):
584 """Pack flat manifests into a changegroup stream."""
584 """Pack flat manifests into a changegroup stream."""
585 assert not dir
585 assert not dir
586 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
586 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
587 lookuplinknode, units=_('manifests')):
587 lookuplinknode, units=_('manifests')):
588 yield chunk
588 yield chunk
589
589
590 def _manifestsdone(self):
590 def _manifestsdone(self):
591 return ''
591 return ''
592
592
593 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
593 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
594 '''yield a sequence of changegroup chunks (strings)'''
594 '''yield a sequence of changegroup chunks (strings)'''
595 repo = self._repo
595 repo = self._repo
596 cl = repo.changelog
596 cl = repo.changelog
597
597
598 clrevorder = {}
598 clrevorder = {}
599 mfs = {} # needed manifests
599 mfs = {} # needed manifests
600 fnodes = {} # needed file nodes
600 fnodes = {} # needed file nodes
601 changedfiles = set()
601 changedfiles = set()
602
602
603 # Callback for the changelog, used to collect changed files and manifest
603 # Callback for the changelog, used to collect changed files and manifest
604 # nodes.
604 # nodes.
605 # Returns the linkrev node (identity in the changelog case).
605 # Returns the linkrev node (identity in the changelog case).
606 def lookupcl(x):
606 def lookupcl(x):
607 c = cl.read(x)
607 c = cl.read(x)
608 clrevorder[x] = len(clrevorder)
608 clrevorder[x] = len(clrevorder)
609 n = c[0]
609 n = c[0]
610 # record the first changeset introducing this manifest version
610 # record the first changeset introducing this manifest version
611 mfs.setdefault(n, x)
611 mfs.setdefault(n, x)
612 # Record a complete list of potentially-changed files in
612 # Record a complete list of potentially-changed files in
613 # this manifest.
613 # this manifest.
614 changedfiles.update(c[3])
614 changedfiles.update(c[3])
615 return x
615 return x
616
616
617 self._verbosenote(_('uncompressed size of bundle content:\n'))
617 self._verbosenote(_('uncompressed size of bundle content:\n'))
618 size = 0
618 size = 0
619 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
619 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
620 size += len(chunk)
620 size += len(chunk)
621 yield chunk
621 yield chunk
622 self._verbosenote(_('%8.i (changelog)\n') % size)
622 self._verbosenote(_('%8.i (changelog)\n') % size)
623
623
624 # We need to make sure that the linkrev in the changegroup refers to
624 # We need to make sure that the linkrev in the changegroup refers to
625 # the first changeset that introduced the manifest or file revision.
625 # the first changeset that introduced the manifest or file revision.
626 # The fastpath is usually safer than the slowpath, because the filelogs
626 # The fastpath is usually safer than the slowpath, because the filelogs
627 # are walked in revlog order.
627 # are walked in revlog order.
628 #
628 #
629 # When taking the slowpath with reorder=None and the manifest revlog
629 # When taking the slowpath with reorder=None and the manifest revlog
630 # uses generaldelta, the manifest may be walked in the "wrong" order.
630 # uses generaldelta, the manifest may be walked in the "wrong" order.
631 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
631 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
632 # cc0ff93d0c0c).
632 # cc0ff93d0c0c).
633 #
633 #
634 # When taking the fastpath, we are only vulnerable to reordering
634 # When taking the fastpath, we are only vulnerable to reordering
635 # of the changelog itself. The changelog never uses generaldelta, so
635 # of the changelog itself. The changelog never uses generaldelta, so
636 # it is only reordered when reorder=True. To handle this case, we
636 # it is only reordered when reorder=True. To handle this case, we
637 # simply take the slowpath, which already has the 'clrevorder' logic.
637 # simply take the slowpath, which already has the 'clrevorder' logic.
638 # This was also fixed in cc0ff93d0c0c.
638 # This was also fixed in cc0ff93d0c0c.
639 fastpathlinkrev = fastpathlinkrev and not self._reorder
639 fastpathlinkrev = fastpathlinkrev and not self._reorder
640 # Treemanifests don't work correctly with fastpathlinkrev
640 # Treemanifests don't work correctly with fastpathlinkrev
641 # either, because we don't discover which directory nodes to
641 # either, because we don't discover which directory nodes to
642 # send along with files. This could probably be fixed.
642 # send along with files. This could probably be fixed.
643 fastpathlinkrev = fastpathlinkrev and (
643 fastpathlinkrev = fastpathlinkrev and (
644 'treemanifest' not in repo.requirements)
644 'treemanifest' not in repo.requirements)
645
645
646 for chunk in self.generatemanifests(commonrevs, clrevorder,
646 for chunk in self.generatemanifests(commonrevs, clrevorder,
647 fastpathlinkrev, mfs, fnodes):
647 fastpathlinkrev, mfs, fnodes):
648 yield chunk
648 yield chunk
649 mfs.clear()
649 mfs.clear()
650 clrevs = set(cl.rev(x) for x in clnodes)
650 clrevs = set(cl.rev(x) for x in clnodes)
651
651
652 if not fastpathlinkrev:
652 if not fastpathlinkrev:
653 def linknodes(unused, fname):
653 def linknodes(unused, fname):
654 return fnodes.get(fname, {})
654 return fnodes.get(fname, {})
655 else:
655 else:
656 cln = cl.node
656 cln = cl.node
657 def linknodes(filerevlog, fname):
657 def linknodes(filerevlog, fname):
658 llr = filerevlog.linkrev
658 llr = filerevlog.linkrev
659 fln = filerevlog.node
659 fln = filerevlog.node
660 revs = ((r, llr(r)) for r in filerevlog)
660 revs = ((r, llr(r)) for r in filerevlog)
661 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
661 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
662
662
663 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
663 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
664 source):
664 source):
665 yield chunk
665 yield chunk
666
666
667 yield self.close()
667 yield self.close()
668
668
669 if clnodes:
669 if clnodes:
670 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
670 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
671
671
672 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
672 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
673 fnodes):
673 fnodes):
674 repo = self._repo
674 repo = self._repo
675 mfl = repo.manifestlog
675 mfl = repo.manifestlog
676 dirlog = mfl._revlog.dirlog
676 dirlog = mfl._revlog.dirlog
677 tmfnodes = {'': mfs}
677 tmfnodes = {'': mfs}
678
678
679 # Callback for the manifest, used to collect linkrevs for filelog
679 # Callback for the manifest, used to collect linkrevs for filelog
680 # revisions.
680 # revisions.
681 # Returns the linkrev node (collected in lookupcl).
681 # Returns the linkrev node (collected in lookupcl).
682 def makelookupmflinknode(dir):
682 def makelookupmflinknode(dir):
683 if fastpathlinkrev:
683 if fastpathlinkrev:
684 assert not dir
684 assert not dir
685 return mfs.__getitem__
685 return mfs.__getitem__
686
686
687 def lookupmflinknode(x):
687 def lookupmflinknode(x):
688 """Callback for looking up the linknode for manifests.
688 """Callback for looking up the linknode for manifests.
689
689
690 Returns the linkrev node for the specified manifest.
690 Returns the linkrev node for the specified manifest.
691
691
692 SIDE EFFECT:
692 SIDE EFFECT:
693
693
694 1) fclnodes gets populated with the list of relevant
694 1) fclnodes gets populated with the list of relevant
695 file nodes if we're not using fastpathlinkrev
695 file nodes if we're not using fastpathlinkrev
696 2) When treemanifests are in use, collects treemanifest nodes
696 2) When treemanifests are in use, collects treemanifest nodes
697 to send
697 to send
698
698
699 Note that this means manifests must be completely sent to
699 Note that this means manifests must be completely sent to
700 the client before you can trust the list of files and
700 the client before you can trust the list of files and
701 treemanifests to send.
701 treemanifests to send.
702 """
702 """
703 clnode = tmfnodes[dir][x]
703 clnode = tmfnodes[dir][x]
704 mdata = mfl.get(dir, x).readfast(shallow=True)
704 mdata = mfl.get(dir, x).readfast(shallow=True)
705 for p, n, fl in mdata.iterentries():
705 for p, n, fl in mdata.iterentries():
706 if fl == 't': # subdirectory manifest
706 if fl == 't': # subdirectory manifest
707 subdir = dir + p + '/'
707 subdir = dir + p + '/'
708 tmfclnodes = tmfnodes.setdefault(subdir, {})
708 tmfclnodes = tmfnodes.setdefault(subdir, {})
709 tmfclnode = tmfclnodes.setdefault(n, clnode)
709 tmfclnode = tmfclnodes.setdefault(n, clnode)
710 if clrevorder[clnode] < clrevorder[tmfclnode]:
710 if clrevorder[clnode] < clrevorder[tmfclnode]:
711 tmfclnodes[n] = clnode
711 tmfclnodes[n] = clnode
712 else:
712 else:
713 f = dir + p
713 f = dir + p
714 fclnodes = fnodes.setdefault(f, {})
714 fclnodes = fnodes.setdefault(f, {})
715 fclnode = fclnodes.setdefault(n, clnode)
715 fclnode = fclnodes.setdefault(n, clnode)
716 if clrevorder[clnode] < clrevorder[fclnode]:
716 if clrevorder[clnode] < clrevorder[fclnode]:
717 fclnodes[n] = clnode
717 fclnodes[n] = clnode
718 return clnode
718 return clnode
719 return lookupmflinknode
719 return lookupmflinknode
720
720
721 size = 0
721 size = 0
722 while tmfnodes:
722 while tmfnodes:
723 dir = min(tmfnodes)
723 dir = min(tmfnodes)
724 nodes = tmfnodes[dir]
724 nodes = tmfnodes[dir]
725 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
725 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
726 if not dir or prunednodes:
726 if not dir or prunednodes:
727 for x in self._packmanifests(dir, prunednodes,
727 for x in self._packmanifests(dir, prunednodes,
728 makelookupmflinknode(dir)):
728 makelookupmflinknode(dir)):
729 size += len(x)
729 size += len(x)
730 yield x
730 yield x
731 del tmfnodes[dir]
731 del tmfnodes[dir]
732 self._verbosenote(_('%8.i (manifests)\n') % size)
732 self._verbosenote(_('%8.i (manifests)\n') % size)
733 yield self._manifestsdone()
733 yield self._manifestsdone()
734
734
735 # The 'source' parameter is useful for extensions
735 # The 'source' parameter is useful for extensions
736 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
736 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
737 repo = self._repo
737 repo = self._repo
738 progress = self._progress
738 progress = self._progress
739 msgbundling = _('bundling')
739 msgbundling = _('bundling')
740
740
741 total = len(changedfiles)
741 total = len(changedfiles)
742 # for progress output
742 # for progress output
743 msgfiles = _('files')
743 msgfiles = _('files')
744 for i, fname in enumerate(sorted(changedfiles)):
744 for i, fname in enumerate(sorted(changedfiles)):
745 filerevlog = repo.file(fname)
745 filerevlog = repo.file(fname)
746 if not filerevlog:
746 if not filerevlog:
747 raise error.Abort(_("empty or missing revlog for %s") % fname)
747 raise error.Abort(_("empty or missing revlog for %s") % fname)
748
748
749 linkrevnodes = linknodes(filerevlog, fname)
749 linkrevnodes = linknodes(filerevlog, fname)
750 # Lookup for filenodes, we collected the linkrev nodes above in the
750 # Lookup for filenodes, we collected the linkrev nodes above in the
751 # fastpath case and with lookupmf in the slowpath case.
751 # fastpath case and with lookupmf in the slowpath case.
752 def lookupfilelog(x):
752 def lookupfilelog(x):
753 return linkrevnodes[x]
753 return linkrevnodes[x]
754
754
755 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
755 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
756 if filenodes:
756 if filenodes:
757 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
757 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
758 total=total)
758 total=total)
759 h = self.fileheader(fname)
759 h = self.fileheader(fname)
760 size = len(h)
760 size = len(h)
761 yield h
761 yield h
762 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
762 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
763 size += len(chunk)
763 size += len(chunk)
764 yield chunk
764 yield chunk
765 self._verbosenote(_('%8.i %s\n') % (size, fname))
765 self._verbosenote(_('%8.i %s\n') % (size, fname))
766 progress(msgbundling, None)
766 progress(msgbundling, None)
767
767
768 def deltaparent(self, revlog, rev, p1, p2, prev):
768 def deltaparent(self, revlog, rev, p1, p2, prev):
769 return prev
769 return prev
770
770
771 def revchunk(self, revlog, rev, prev, linknode):
771 def revchunk(self, revlog, rev, prev, linknode):
772 node = revlog.node(rev)
772 node = revlog.node(rev)
773 p1, p2 = revlog.parentrevs(rev)
773 p1, p2 = revlog.parentrevs(rev)
774 base = self.deltaparent(revlog, rev, p1, p2, prev)
774 base = self.deltaparent(revlog, rev, p1, p2, prev)
775
775
776 prefix = ''
776 prefix = ''
777 if revlog.iscensored(base) or revlog.iscensored(rev):
777 if revlog.iscensored(base) or revlog.iscensored(rev):
778 try:
778 try:
779 delta = revlog.revision(node, raw=True)
779 delta = revlog.revision(node, raw=True)
780 except error.CensoredNodeError as e:
780 except error.CensoredNodeError as e:
781 delta = e.tombstone
781 delta = e.tombstone
782 if base == nullrev:
782 if base == nullrev:
783 prefix = mdiff.trivialdiffheader(len(delta))
783 prefix = mdiff.trivialdiffheader(len(delta))
784 else:
784 else:
785 baselen = revlog.rawsize(base)
785 baselen = revlog.rawsize(base)
786 prefix = mdiff.replacediffheader(baselen, len(delta))
786 prefix = mdiff.replacediffheader(baselen, len(delta))
787 elif base == nullrev:
787 elif base == nullrev:
788 delta = revlog.revision(node, raw=True)
788 delta = revlog.revision(node, raw=True)
789 prefix = mdiff.trivialdiffheader(len(delta))
789 prefix = mdiff.trivialdiffheader(len(delta))
790 else:
790 else:
791 delta = revlog.revdiff(base, rev)
791 delta = revlog.revdiff(base, rev)
792 p1n, p2n = revlog.parents(node)
792 p1n, p2n = revlog.parents(node)
793 basenode = revlog.node(base)
793 basenode = revlog.node(base)
794 flags = revlog.flags(rev)
794 flags = revlog.flags(rev)
795 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
795 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
796 meta += prefix
796 meta += prefix
797 l = len(meta) + len(delta)
797 l = len(meta) + len(delta)
798 yield chunkheader(l)
798 yield chunkheader(l)
799 yield meta
799 yield meta
800 yield delta
800 yield delta
801 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
801 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
802 # do nothing with basenode, it is implicitly the previous one in HG10
802 # do nothing with basenode, it is implicitly the previous one in HG10
803 # do nothing with flags, it is implicitly 0 for cg1 and cg2
803 # do nothing with flags, it is implicitly 0 for cg1 and cg2
804 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
804 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
805
805
806 class cg2packer(cg1packer):
806 class cg2packer(cg1packer):
807 version = '02'
807 version = '02'
808 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
808 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
809
809
810 def __init__(self, repo, bundlecaps=None):
810 def __init__(self, repo, bundlecaps=None):
811 super(cg2packer, self).__init__(repo, bundlecaps)
811 super(cg2packer, self).__init__(repo, bundlecaps)
812 if self._reorder is None:
812 if self._reorder is None:
813 # Since generaldelta is directly supported by cg2, reordering
813 # Since generaldelta is directly supported by cg2, reordering
814 # generally doesn't help, so we disable it by default (treating
814 # generally doesn't help, so we disable it by default (treating
815 # bundle.reorder=auto just like bundle.reorder=False).
815 # bundle.reorder=auto just like bundle.reorder=False).
816 self._reorder = False
816 self._reorder = False
817
817
818 def deltaparent(self, revlog, rev, p1, p2, prev):
818 def deltaparent(self, revlog, rev, p1, p2, prev):
819 dp = revlog.deltaparent(rev)
819 dp = revlog.deltaparent(rev)
820 if dp == nullrev and revlog.storedeltachains:
820 if dp == nullrev and revlog.storedeltachains:
821 # Avoid sending full revisions when delta parent is null. Pick prev
821 # Avoid sending full revisions when delta parent is null. Pick prev
822 # in that case. It's tempting to pick p1 in this case, as p1 will
822 # in that case. It's tempting to pick p1 in this case, as p1 will
823 # be smaller in the common case. However, computing a delta against
823 # be smaller in the common case. However, computing a delta against
824 # p1 may require resolving the raw text of p1, which could be
824 # p1 may require resolving the raw text of p1, which could be
825 # expensive. The revlog caches should have prev cached, meaning
825 # expensive. The revlog caches should have prev cached, meaning
826 # less CPU for changegroup generation. There is likely room to add
826 # less CPU for changegroup generation. There is likely room to add
827 # a flag and/or config option to control this behavior.
827 # a flag and/or config option to control this behavior.
828 return prev
828 return prev
829 elif dp == nullrev:
829 elif dp == nullrev:
830 # revlog is configured to use full snapshot for a reason,
830 # revlog is configured to use full snapshot for a reason,
831 # stick to full snapshot.
831 # stick to full snapshot.
832 return nullrev
832 return nullrev
833 elif dp not in (p1, p2, prev):
833 elif dp not in (p1, p2, prev):
834 # Pick prev when we can't be sure remote has the base revision.
834 # Pick prev when we can't be sure remote has the base revision.
835 return prev
835 return prev
836 else:
836 else:
837 return dp
837 return dp
838
838
839 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
839 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
840 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
840 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
841 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
841 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
842
842
843 class cg3packer(cg2packer):
843 class cg3packer(cg2packer):
844 version = '03'
844 version = '03'
845 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
845 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
846
846
847 def _packmanifests(self, dir, mfnodes, lookuplinknode):
847 def _packmanifests(self, dir, mfnodes, lookuplinknode):
848 if dir:
848 if dir:
849 yield self.fileheader(dir)
849 yield self.fileheader(dir)
850
850
851 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
851 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
852 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
852 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
853 units=_('manifests')):
853 units=_('manifests')):
854 yield chunk
854 yield chunk
855
855
856 def _manifestsdone(self):
856 def _manifestsdone(self):
857 return self.close()
857 return self.close()
858
858
859 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
859 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
860 return struct.pack(
860 return struct.pack(
861 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
861 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
862
862
863 _packermap = {'01': (cg1packer, cg1unpacker),
863 _packermap = {'01': (cg1packer, cg1unpacker),
864 # cg2 adds support for exchanging generaldelta
864 # cg2 adds support for exchanging generaldelta
865 '02': (cg2packer, cg2unpacker),
865 '02': (cg2packer, cg2unpacker),
866 # cg3 adds support for exchanging revlog flags and treemanifests
866 # cg3 adds support for exchanging revlog flags and treemanifests
867 '03': (cg3packer, cg3unpacker),
867 '03': (cg3packer, cg3unpacker),
868 }
868 }
869
869
870 def allsupportedversions(repo):
870 def allsupportedversions(repo):
871 versions = set(_packermap.keys())
871 versions = set(_packermap.keys())
872 if not (repo.ui.configbool('experimental', 'changegroup3') or
872 if not (repo.ui.configbool('experimental', 'changegroup3') or
873 repo.ui.configbool('experimental', 'treemanifest') or
873 repo.ui.configbool('experimental', 'treemanifest') or
874 'treemanifest' in repo.requirements):
874 'treemanifest' in repo.requirements):
875 versions.discard('03')
875 versions.discard('03')
876 return versions
876 return versions
877
877
878 # Changegroup versions that can be applied to the repo
878 # Changegroup versions that can be applied to the repo
879 def supportedincomingversions(repo):
879 def supportedincomingversions(repo):
880 return allsupportedversions(repo)
880 return allsupportedversions(repo)
881
881
882 # Changegroup versions that can be created from the repo
882 # Changegroup versions that can be created from the repo
883 def supportedoutgoingversions(repo):
883 def supportedoutgoingversions(repo):
884 versions = allsupportedversions(repo)
884 versions = allsupportedversions(repo)
885 if 'treemanifest' in repo.requirements:
885 if 'treemanifest' in repo.requirements:
886 # Versions 01 and 02 support only flat manifests and it's just too
886 # Versions 01 and 02 support only flat manifests and it's just too
887 # expensive to convert between the flat manifest and tree manifest on
887 # expensive to convert between the flat manifest and tree manifest on
888 # the fly. Since tree manifests are hashed differently, all of history
888 # the fly. Since tree manifests are hashed differently, all of history
889 # would have to be converted. Instead, we simply don't even pretend to
889 # would have to be converted. Instead, we simply don't even pretend to
890 # support versions 01 and 02.
890 # support versions 01 and 02.
891 versions.discard('01')
891 versions.discard('01')
892 versions.discard('02')
892 versions.discard('02')
893 return versions
893 return versions
894
894
895 def safeversion(repo):
895 def safeversion(repo):
896 # Finds the smallest version that it's safe to assume clients of the repo
896 # Finds the smallest version that it's safe to assume clients of the repo
897 # will support. For example, all hg versions that support generaldelta also
897 # will support. For example, all hg versions that support generaldelta also
898 # support changegroup 02.
898 # support changegroup 02.
899 versions = supportedoutgoingversions(repo)
899 versions = supportedoutgoingversions(repo)
900 if 'generaldelta' in repo.requirements:
900 if 'generaldelta' in repo.requirements:
901 versions.discard('01')
901 versions.discard('01')
902 assert versions
902 assert versions
903 return min(versions)
903 return min(versions)
904
904
905 def getbundler(version, repo, bundlecaps=None):
905 def getbundler(version, repo, bundlecaps=None):
906 assert version in supportedoutgoingversions(repo)
906 assert version in supportedoutgoingversions(repo)
907 return _packermap[version][0](repo, bundlecaps)
907 return _packermap[version][0](repo, bundlecaps)
908
908
909 def getunbundler(version, fh, alg, extras=None):
909 def getunbundler(version, fh, alg, extras=None):
910 return _packermap[version][1](fh, alg, extras=extras)
910 return _packermap[version][1](fh, alg, extras=extras)
911
911
912 def _changegroupinfo(repo, nodes, source):
912 def _changegroupinfo(repo, nodes, source):
913 if repo.ui.verbose or source == 'bundle':
913 if repo.ui.verbose or source == 'bundle':
914 repo.ui.status(_("%d changesets found\n") % len(nodes))
914 repo.ui.status(_("%d changesets found\n") % len(nodes))
915 if repo.ui.debugflag:
915 if repo.ui.debugflag:
916 repo.ui.debug("list of changesets:\n")
916 repo.ui.debug("list of changesets:\n")
917 for node in nodes:
917 for node in nodes:
918 repo.ui.debug("%s\n" % hex(node))
918 repo.ui.debug("%s\n" % hex(node))
919
919
920 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
920 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
921 repo = repo.unfiltered()
921 repo = repo.unfiltered()
922 commonrevs = outgoing.common
922 commonrevs = outgoing.common
923 csets = outgoing.missing
923 csets = outgoing.missing
924 heads = outgoing.missingheads
924 heads = outgoing.missingheads
925 # We go through the fast path if we get told to, or if all (unfiltered
925 # We go through the fast path if we get told to, or if all (unfiltered
926 # heads have been requested (since we then know there all linkrevs will
926 # heads have been requested (since we then know there all linkrevs will
927 # be pulled by the client).
927 # be pulled by the client).
928 heads.sort()
928 heads.sort()
929 fastpathlinkrev = fastpath or (
929 fastpathlinkrev = fastpath or (
930 repo.filtername is None and heads == sorted(repo.heads()))
930 repo.filtername is None and heads == sorted(repo.heads()))
931
931
932 repo.hook('preoutgoing', throw=True, source=source)
932 repo.hook('preoutgoing', throw=True, source=source)
933 _changegroupinfo(repo, csets, source)
933 _changegroupinfo(repo, csets, source)
934 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
934 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
935
935
936 def getsubset(repo, outgoing, bundler, source, fastpath=False):
936 def getsubset(repo, outgoing, bundler, source, fastpath=False):
937 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
937 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
938 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
938 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
939 {'clcount': len(outgoing.missing)})
939 {'clcount': len(outgoing.missing)})
940
940
941 def changegroupsubset(repo, roots, heads, source, version='01'):
941 def changegroupsubset(repo, roots, heads, source, version='01'):
942 """Compute a changegroup consisting of all the nodes that are
942 """Compute a changegroup consisting of all the nodes that are
943 descendants of any of the roots and ancestors of any of the heads.
943 descendants of any of the roots and ancestors of any of the heads.
944 Return a chunkbuffer object whose read() method will return
944 Return a chunkbuffer object whose read() method will return
945 successive changegroup chunks.
945 successive changegroup chunks.
946
946
947 It is fairly complex as determining which filenodes and which
947 It is fairly complex as determining which filenodes and which
948 manifest nodes need to be included for the changeset to be complete
948 manifest nodes need to be included for the changeset to be complete
949 is non-trivial.
949 is non-trivial.
950
950
951 Another wrinkle is doing the reverse, figuring out which changeset in
951 Another wrinkle is doing the reverse, figuring out which changeset in
952 the changegroup a particular filenode or manifestnode belongs to.
952 the changegroup a particular filenode or manifestnode belongs to.
953 """
953 """
954 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
954 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
955 bundler = getbundler(version, repo)
955 bundler = getbundler(version, repo)
956 return getsubset(repo, outgoing, bundler, source)
956 return getsubset(repo, outgoing, bundler, source)
957
957
958 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
958 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
959 version='01'):
959 version='01'):
960 """Like getbundle, but taking a discovery.outgoing as an argument.
960 """Like getbundle, but taking a discovery.outgoing as an argument.
961
961
962 This is only implemented for local repos and reuses potentially
962 This is only implemented for local repos and reuses potentially
963 precomputed sets in outgoing. Returns a raw changegroup generator."""
963 precomputed sets in outgoing. Returns a raw changegroup generator."""
964 if not outgoing.missing:
964 if not outgoing.missing:
965 return None
965 return None
966 bundler = getbundler(version, repo, bundlecaps)
966 bundler = getbundler(version, repo, bundlecaps)
967 return getsubsetraw(repo, outgoing, bundler, source)
967 return getsubsetraw(repo, outgoing, bundler, source)
968
968
969 def getchangegroup(repo, source, outgoing, bundlecaps=None,
969 def getchangegroup(repo, source, outgoing, bundlecaps=None,
970 version='01'):
970 version='01'):
971 """Like getbundle, but taking a discovery.outgoing as an argument.
971 """Like getbundle, but taking a discovery.outgoing as an argument.
972
972
973 This is only implemented for local repos and reuses potentially
973 This is only implemented for local repos and reuses potentially
974 precomputed sets in outgoing."""
974 precomputed sets in outgoing."""
975 if not outgoing.missing:
975 if not outgoing.missing:
976 return None
976 return None
977 bundler = getbundler(version, repo, bundlecaps)
977 bundler = getbundler(version, repo, bundlecaps)
978 return getsubset(repo, outgoing, bundler, source)
978 return getsubset(repo, outgoing, bundler, source)
979
979
980 def getlocalchangegroup(repo, *args, **kwargs):
980 def getlocalchangegroup(repo, *args, **kwargs):
981 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
981 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
982 '4.3')
982 '4.3')
983 return getchangegroup(repo, *args, **kwargs)
983 return getchangegroup(repo, *args, **kwargs)
984
984
985 def changegroup(repo, basenodes, source):
985 def changegroup(repo, basenodes, source):
986 # to avoid a race we use changegroupsubset() (issue1320)
986 # to avoid a race we use changegroupsubset() (issue1320)
987 return changegroupsubset(repo, basenodes, repo.heads(), source)
987 return changegroupsubset(repo, basenodes, repo.heads(), source)
988
988
989 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
989 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
990 revisions = 0
990 revisions = 0
991 files = 0
991 files = 0
992 for chunkdata in iter(source.filelogheader, {}):
992 for chunkdata in iter(source.filelogheader, {}):
993 files += 1
993 files += 1
994 f = chunkdata["filename"]
994 f = chunkdata["filename"]
995 repo.ui.debug("adding %s revisions\n" % f)
995 repo.ui.debug("adding %s revisions\n" % f)
996 repo.ui.progress(_('files'), files, unit=_('files'),
996 repo.ui.progress(_('files'), files, unit=_('files'),
997 total=expectedfiles)
997 total=expectedfiles)
998 fl = repo.file(f)
998 fl = repo.file(f)
999 o = len(fl)
999 o = len(fl)
1000 try:
1000 try:
1001 if not fl.addgroup(source, revmap, trp):
1001 if not fl.addgroup(source, revmap, trp):
1002 raise error.Abort(_("received file revlog group is empty"))
1002 raise error.Abort(_("received file revlog group is empty"))
1003 except error.CensoredBaseError as e:
1003 except error.CensoredBaseError as e:
1004 raise error.Abort(_("received delta base is censored: %s") % e)
1004 raise error.Abort(_("received delta base is censored: %s") % e)
1005 revisions += len(fl) - o
1005 revisions += len(fl) - o
1006 if f in needfiles:
1006 if f in needfiles:
1007 needs = needfiles[f]
1007 needs = needfiles[f]
1008 for new in xrange(o, len(fl)):
1008 for new in xrange(o, len(fl)):
1009 n = fl.node(new)
1009 n = fl.node(new)
1010 if n in needs:
1010 if n in needs:
1011 needs.remove(n)
1011 needs.remove(n)
1012 else:
1012 else:
1013 raise error.Abort(
1013 raise error.Abort(
1014 _("received spurious file revlog entry"))
1014 _("received spurious file revlog entry"))
1015 if not needs:
1015 if not needs:
1016 del needfiles[f]
1016 del needfiles[f]
1017 repo.ui.progress(_('files'), None)
1017 repo.ui.progress(_('files'), None)
1018
1018
1019 for f, needs in needfiles.iteritems():
1019 for f, needs in needfiles.iteritems():
1020 fl = repo.file(f)
1020 fl = repo.file(f)
1021 for n in needs:
1021 for n in needs:
1022 try:
1022 try:
1023 fl.rev(n)
1023 fl.rev(n)
1024 except error.LookupError:
1024 except error.LookupError:
1025 raise error.Abort(
1025 raise error.Abort(
1026 _('missing file data for %s:%s - run hg verify') %
1026 _('missing file data for %s:%s - run hg verify') %
1027 (f, hex(n)))
1027 (f, hex(n)))
1028
1028
1029 return revisions, files
1029 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now