##// END OF EJS Templates
changegroup: extract method that sorts nodes to send...
Augie Fackler -
r29236:1b7d907e default
parent child Browse files
Show More
@@ -1,1058 +1,1062 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 branchmap,
24 branchmap,
25 dagutil,
25 dagutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 mdiff,
28 mdiff,
29 phases,
29 phases,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36
36
37 def readexactly(stream, n):
37 def readexactly(stream, n):
38 '''read n bytes from stream.read and abort if less was available'''
38 '''read n bytes from stream.read and abort if less was available'''
39 s = stream.read(n)
39 s = stream.read(n)
40 if len(s) < n:
40 if len(s) < n:
41 raise error.Abort(_("stream ended unexpectedly"
41 raise error.Abort(_("stream ended unexpectedly"
42 " (got %d bytes, expected %d)")
42 " (got %d bytes, expected %d)")
43 % (len(s), n))
43 % (len(s), n))
44 return s
44 return s
45
45
46 def getchunk(stream):
46 def getchunk(stream):
47 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
48 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
49 l = struct.unpack(">l", d)[0]
49 l = struct.unpack(">l", d)[0]
50 if l <= 4:
50 if l <= 4:
51 if l:
51 if l:
52 raise error.Abort(_("invalid chunk length %d") % l)
52 raise error.Abort(_("invalid chunk length %d") % l)
53 return ""
53 return ""
54 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(">l", length + 4)
58 return struct.pack(">l", length + 4)
59
59
60 def closechunk():
60 def closechunk():
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 return struct.pack(">l", 0)
62 return struct.pack(">l", 0)
63
63
64 def combineresults(results):
64 def combineresults(results):
65 """logic to combine 0 or more addchangegroup results into one"""
65 """logic to combine 0 or more addchangegroup results into one"""
66 changedheads = 0
66 changedheads = 0
67 result = 1
67 result = 1
68 for ret in results:
68 for ret in results:
69 # If any changegroup result is 0, return 0
69 # If any changegroup result is 0, return 0
70 if ret == 0:
70 if ret == 0:
71 result = 0
71 result = 0
72 break
72 break
73 if ret < -1:
73 if ret < -1:
74 changedheads += ret + 1
74 changedheads += ret + 1
75 elif ret > 1:
75 elif ret > 1:
76 changedheads += ret - 1
76 changedheads += ret - 1
77 if changedheads > 0:
77 if changedheads > 0:
78 result = 1 + changedheads
78 result = 1 + changedheads
79 elif changedheads < 0:
79 elif changedheads < 0:
80 result = -1 + changedheads
80 result = -1 + changedheads
81 return result
81 return result
82
82
83 def writechunks(ui, chunks, filename, vfs=None):
83 def writechunks(ui, chunks, filename, vfs=None):
84 """Write chunks to a file and return its filename.
84 """Write chunks to a file and return its filename.
85
85
86 The stream is assumed to be a bundle file.
86 The stream is assumed to be a bundle file.
87 Existing files will not be overwritten.
87 Existing files will not be overwritten.
88 If no filename is specified, a temporary file is created.
88 If no filename is specified, a temporary file is created.
89 """
89 """
90 fh = None
90 fh = None
91 cleanup = None
91 cleanup = None
92 try:
92 try:
93 if filename:
93 if filename:
94 if vfs:
94 if vfs:
95 fh = vfs.open(filename, "wb")
95 fh = vfs.open(filename, "wb")
96 else:
96 else:
97 fh = open(filename, "wb")
97 fh = open(filename, "wb")
98 else:
98 else:
99 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
99 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fh = os.fdopen(fd, "wb")
100 fh = os.fdopen(fd, "wb")
101 cleanup = filename
101 cleanup = filename
102 for c in chunks:
102 for c in chunks:
103 fh.write(c)
103 fh.write(c)
104 cleanup = None
104 cleanup = None
105 return filename
105 return filename
106 finally:
106 finally:
107 if fh is not None:
107 if fh is not None:
108 fh.close()
108 fh.close()
109 if cleanup is not None:
109 if cleanup is not None:
110 if filename and vfs:
110 if filename and vfs:
111 vfs.unlink(cleanup)
111 vfs.unlink(cleanup)
112 else:
112 else:
113 os.unlink(cleanup)
113 os.unlink(cleanup)
114
114
115 class cg1unpacker(object):
115 class cg1unpacker(object):
116 """Unpacker for cg1 changegroup streams.
116 """Unpacker for cg1 changegroup streams.
117
117
118 A changegroup unpacker handles the framing of the revision data in
118 A changegroup unpacker handles the framing of the revision data in
119 the wire format. Most consumers will want to use the apply()
119 the wire format. Most consumers will want to use the apply()
120 method to add the changes from the changegroup to a repository.
120 method to add the changes from the changegroup to a repository.
121
121
122 If you're forwarding a changegroup unmodified to another consumer,
122 If you're forwarding a changegroup unmodified to another consumer,
123 use getchunks(), which returns an iterator of changegroup
123 use getchunks(), which returns an iterator of changegroup
124 chunks. This is mostly useful for cases where you need to know the
124 chunks. This is mostly useful for cases where you need to know the
125 data stream has ended by observing the end of the changegroup.
125 data stream has ended by observing the end of the changegroup.
126
126
127 deltachunk() is useful only if you're applying delta data. Most
127 deltachunk() is useful only if you're applying delta data. Most
128 consumers should prefer apply() instead.
128 consumers should prefer apply() instead.
129
129
130 A few other public methods exist. Those are used only for
130 A few other public methods exist. Those are used only for
131 bundlerepo and some debug commands - their use is discouraged.
131 bundlerepo and some debug commands - their use is discouraged.
132 """
132 """
133 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
133 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheadersize = struct.calcsize(deltaheader)
134 deltaheadersize = struct.calcsize(deltaheader)
135 version = '01'
135 version = '01'
136 _grouplistcount = 1 # One list of files after the manifests
136 _grouplistcount = 1 # One list of files after the manifests
137
137
138 def __init__(self, fh, alg):
138 def __init__(self, fh, alg):
139 if alg == 'UN':
139 if alg == 'UN':
140 alg = None # get more modern without breaking too much
140 alg = None # get more modern without breaking too much
141 if not alg in util.decompressors:
141 if not alg in util.decompressors:
142 raise error.Abort(_('unknown stream compression type: %s')
142 raise error.Abort(_('unknown stream compression type: %s')
143 % alg)
143 % alg)
144 if alg == 'BZ':
144 if alg == 'BZ':
145 alg = '_truncatedBZ'
145 alg = '_truncatedBZ'
146 self._stream = util.decompressors[alg](fh)
146 self._stream = util.decompressors[alg](fh)
147 self._type = alg
147 self._type = alg
148 self.callback = None
148 self.callback = None
149
149
150 # These methods (compressed, read, seek, tell) all appear to only
150 # These methods (compressed, read, seek, tell) all appear to only
151 # be used by bundlerepo, but it's a little hard to tell.
151 # be used by bundlerepo, but it's a little hard to tell.
152 def compressed(self):
152 def compressed(self):
153 return self._type is not None
153 return self._type is not None
154 def read(self, l):
154 def read(self, l):
155 return self._stream.read(l)
155 return self._stream.read(l)
156 def seek(self, pos):
156 def seek(self, pos):
157 return self._stream.seek(pos)
157 return self._stream.seek(pos)
158 def tell(self):
158 def tell(self):
159 return self._stream.tell()
159 return self._stream.tell()
160 def close(self):
160 def close(self):
161 return self._stream.close()
161 return self._stream.close()
162
162
163 def _chunklength(self):
163 def _chunklength(self):
164 d = readexactly(self._stream, 4)
164 d = readexactly(self._stream, 4)
165 l = struct.unpack(">l", d)[0]
165 l = struct.unpack(">l", d)[0]
166 if l <= 4:
166 if l <= 4:
167 if l:
167 if l:
168 raise error.Abort(_("invalid chunk length %d") % l)
168 raise error.Abort(_("invalid chunk length %d") % l)
169 return 0
169 return 0
170 if self.callback:
170 if self.callback:
171 self.callback()
171 self.callback()
172 return l - 4
172 return l - 4
173
173
174 def changelogheader(self):
174 def changelogheader(self):
175 """v10 does not have a changelog header chunk"""
175 """v10 does not have a changelog header chunk"""
176 return {}
176 return {}
177
177
178 def manifestheader(self):
178 def manifestheader(self):
179 """v10 does not have a manifest header chunk"""
179 """v10 does not have a manifest header chunk"""
180 return {}
180 return {}
181
181
182 def filelogheader(self):
182 def filelogheader(self):
183 """return the header of the filelogs chunk, v10 only has the filename"""
183 """return the header of the filelogs chunk, v10 only has the filename"""
184 l = self._chunklength()
184 l = self._chunklength()
185 if not l:
185 if not l:
186 return {}
186 return {}
187 fname = readexactly(self._stream, l)
187 fname = readexactly(self._stream, l)
188 return {'filename': fname}
188 return {'filename': fname}
189
189
190 def _deltaheader(self, headertuple, prevnode):
190 def _deltaheader(self, headertuple, prevnode):
191 node, p1, p2, cs = headertuple
191 node, p1, p2, cs = headertuple
192 if prevnode is None:
192 if prevnode is None:
193 deltabase = p1
193 deltabase = p1
194 else:
194 else:
195 deltabase = prevnode
195 deltabase = prevnode
196 flags = 0
196 flags = 0
197 return node, p1, p2, deltabase, cs, flags
197 return node, p1, p2, deltabase, cs, flags
198
198
199 def deltachunk(self, prevnode):
199 def deltachunk(self, prevnode):
200 l = self._chunklength()
200 l = self._chunklength()
201 if not l:
201 if not l:
202 return {}
202 return {}
203 headerdata = readexactly(self._stream, self.deltaheadersize)
203 headerdata = readexactly(self._stream, self.deltaheadersize)
204 header = struct.unpack(self.deltaheader, headerdata)
204 header = struct.unpack(self.deltaheader, headerdata)
205 delta = readexactly(self._stream, l - self.deltaheadersize)
205 delta = readexactly(self._stream, l - self.deltaheadersize)
206 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
206 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
207 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
207 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
208 'deltabase': deltabase, 'delta': delta, 'flags': flags}
208 'deltabase': deltabase, 'delta': delta, 'flags': flags}
209
209
210 def getchunks(self):
210 def getchunks(self):
211 """returns all the chunks contains in the bundle
211 """returns all the chunks contains in the bundle
212
212
213 Used when you need to forward the binary stream to a file or another
213 Used when you need to forward the binary stream to a file or another
214 network API. To do so, it parse the changegroup data, otherwise it will
214 network API. To do so, it parse the changegroup data, otherwise it will
215 block in case of sshrepo because it don't know the end of the stream.
215 block in case of sshrepo because it don't know the end of the stream.
216 """
216 """
217 # an empty chunkgroup is the end of the changegroup
217 # an empty chunkgroup is the end of the changegroup
218 # a changegroup has at least 2 chunkgroups (changelog and manifest).
218 # a changegroup has at least 2 chunkgroups (changelog and manifest).
219 # after that, changegroup versions 1 and 2 have a series of groups
219 # after that, changegroup versions 1 and 2 have a series of groups
220 # with one group per file. changegroup 3 has a series of directory
220 # with one group per file. changegroup 3 has a series of directory
221 # manifests before the files.
221 # manifests before the files.
222 count = 0
222 count = 0
223 emptycount = 0
223 emptycount = 0
224 while emptycount < self._grouplistcount:
224 while emptycount < self._grouplistcount:
225 empty = True
225 empty = True
226 count += 1
226 count += 1
227 while True:
227 while True:
228 chunk = getchunk(self)
228 chunk = getchunk(self)
229 if not chunk:
229 if not chunk:
230 if empty and count > 2:
230 if empty and count > 2:
231 emptycount += 1
231 emptycount += 1
232 break
232 break
233 empty = False
233 empty = False
234 yield chunkheader(len(chunk))
234 yield chunkheader(len(chunk))
235 pos = 0
235 pos = 0
236 while pos < len(chunk):
236 while pos < len(chunk):
237 next = pos + 2**20
237 next = pos + 2**20
238 yield chunk[pos:next]
238 yield chunk[pos:next]
239 pos = next
239 pos = next
240 yield closechunk()
240 yield closechunk()
241
241
242 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
242 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
243 # We know that we'll never have more manifests than we had
243 # We know that we'll never have more manifests than we had
244 # changesets.
244 # changesets.
245 self.callback = prog(_('manifests'), numchanges)
245 self.callback = prog(_('manifests'), numchanges)
246 # no need to check for empty manifest group here:
246 # no need to check for empty manifest group here:
247 # if the result of the merge of 1 and 2 is the same in 3 and 4,
247 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # no new manifest will be created and the manifest group will
248 # no new manifest will be created and the manifest group will
249 # be empty during the pull
249 # be empty during the pull
250 self.manifestheader()
250 self.manifestheader()
251 repo.manifest.addgroup(self, revmap, trp)
251 repo.manifest.addgroup(self, revmap, trp)
252 repo.ui.progress(_('manifests'), None)
252 repo.ui.progress(_('manifests'), None)
253 self.callback = None
253 self.callback = None
254
254
255 def apply(self, repo, srctype, url, emptyok=False,
255 def apply(self, repo, srctype, url, emptyok=False,
256 targetphase=phases.draft, expectedtotal=None):
256 targetphase=phases.draft, expectedtotal=None):
257 """Add the changegroup returned by source.read() to this repo.
257 """Add the changegroup returned by source.read() to this repo.
258 srctype is a string like 'push', 'pull', or 'unbundle'. url is
258 srctype is a string like 'push', 'pull', or 'unbundle'. url is
259 the URL of the repo where this changegroup is coming from.
259 the URL of the repo where this changegroup is coming from.
260
260
261 Return an integer summarizing the change to this repo:
261 Return an integer summarizing the change to this repo:
262 - nothing changed or no source: 0
262 - nothing changed or no source: 0
263 - more heads than before: 1+added heads (2..n)
263 - more heads than before: 1+added heads (2..n)
264 - fewer heads than before: -1-removed heads (-2..-n)
264 - fewer heads than before: -1-removed heads (-2..-n)
265 - number of heads stays the same: 1
265 - number of heads stays the same: 1
266 """
266 """
267 repo = repo.unfiltered()
267 repo = repo.unfiltered()
268 def csmap(x):
268 def csmap(x):
269 repo.ui.debug("add changeset %s\n" % short(x))
269 repo.ui.debug("add changeset %s\n" % short(x))
270 return len(cl)
270 return len(cl)
271
271
272 def revmap(x):
272 def revmap(x):
273 return cl.rev(x)
273 return cl.rev(x)
274
274
275 changesets = files = revisions = 0
275 changesets = files = revisions = 0
276
276
277 try:
277 try:
278 with repo.transaction("\n".join([srctype,
278 with repo.transaction("\n".join([srctype,
279 util.hidepassword(url)])) as tr:
279 util.hidepassword(url)])) as tr:
280 # The transaction could have been created before and already
280 # The transaction could have been created before and already
281 # carries source information. In this case we use the top
281 # carries source information. In this case we use the top
282 # level data. We overwrite the argument because we need to use
282 # level data. We overwrite the argument because we need to use
283 # the top level value (if they exist) in this function.
283 # the top level value (if they exist) in this function.
284 srctype = tr.hookargs.setdefault('source', srctype)
284 srctype = tr.hookargs.setdefault('source', srctype)
285 url = tr.hookargs.setdefault('url', url)
285 url = tr.hookargs.setdefault('url', url)
286 repo.hook('prechangegroup', throw=True, **tr.hookargs)
286 repo.hook('prechangegroup', throw=True, **tr.hookargs)
287
287
288 # write changelog data to temp files so concurrent readers
288 # write changelog data to temp files so concurrent readers
289 # will not see an inconsistent view
289 # will not see an inconsistent view
290 cl = repo.changelog
290 cl = repo.changelog
291 cl.delayupdate(tr)
291 cl.delayupdate(tr)
292 oldheads = cl.heads()
292 oldheads = cl.heads()
293
293
294 trp = weakref.proxy(tr)
294 trp = weakref.proxy(tr)
295 # pull off the changeset group
295 # pull off the changeset group
296 repo.ui.status(_("adding changesets\n"))
296 repo.ui.status(_("adding changesets\n"))
297 clstart = len(cl)
297 clstart = len(cl)
298 class prog(object):
298 class prog(object):
299 def __init__(self, step, total):
299 def __init__(self, step, total):
300 self._step = step
300 self._step = step
301 self._total = total
301 self._total = total
302 self._count = 1
302 self._count = 1
303 def __call__(self):
303 def __call__(self):
304 repo.ui.progress(self._step, self._count,
304 repo.ui.progress(self._step, self._count,
305 unit=_('chunks'), total=self._total)
305 unit=_('chunks'), total=self._total)
306 self._count += 1
306 self._count += 1
307 self.callback = prog(_('changesets'), expectedtotal)
307 self.callback = prog(_('changesets'), expectedtotal)
308
308
309 efiles = set()
309 efiles = set()
310 def onchangelog(cl, node):
310 def onchangelog(cl, node):
311 efiles.update(cl.readfiles(node))
311 efiles.update(cl.readfiles(node))
312
312
313 self.changelogheader()
313 self.changelogheader()
314 srccontent = cl.addgroup(self, csmap, trp,
314 srccontent = cl.addgroup(self, csmap, trp,
315 addrevisioncb=onchangelog)
315 addrevisioncb=onchangelog)
316 efiles = len(efiles)
316 efiles = len(efiles)
317
317
318 if not (srccontent or emptyok):
318 if not (srccontent or emptyok):
319 raise error.Abort(_("received changelog group is empty"))
319 raise error.Abort(_("received changelog group is empty"))
320 clend = len(cl)
320 clend = len(cl)
321 changesets = clend - clstart
321 changesets = clend - clstart
322 repo.ui.progress(_('changesets'), None)
322 repo.ui.progress(_('changesets'), None)
323 self.callback = None
323 self.callback = None
324
324
325 # pull off the manifest group
325 # pull off the manifest group
326 repo.ui.status(_("adding manifests\n"))
326 repo.ui.status(_("adding manifests\n"))
327 self._unpackmanifests(repo, revmap, trp, prog, changesets)
327 self._unpackmanifests(repo, revmap, trp, prog, changesets)
328
328
329 needfiles = {}
329 needfiles = {}
330 if repo.ui.configbool('server', 'validate', default=False):
330 if repo.ui.configbool('server', 'validate', default=False):
331 # validate incoming csets have their manifests
331 # validate incoming csets have their manifests
332 for cset in xrange(clstart, clend):
332 for cset in xrange(clstart, clend):
333 mfnode = repo.changelog.read(
333 mfnode = repo.changelog.read(
334 repo.changelog.node(cset))[0]
334 repo.changelog.node(cset))[0]
335 mfest = repo.manifest.readdelta(mfnode)
335 mfest = repo.manifest.readdelta(mfnode)
336 # store file nodes we must see
336 # store file nodes we must see
337 for f, n in mfest.iteritems():
337 for f, n in mfest.iteritems():
338 needfiles.setdefault(f, set()).add(n)
338 needfiles.setdefault(f, set()).add(n)
339
339
340 # process the files
340 # process the files
341 repo.ui.status(_("adding file changes\n"))
341 repo.ui.status(_("adding file changes\n"))
342 newrevs, newfiles = _addchangegroupfiles(
342 newrevs, newfiles = _addchangegroupfiles(
343 repo, self, revmap, trp, efiles, needfiles)
343 repo, self, revmap, trp, efiles, needfiles)
344 revisions += newrevs
344 revisions += newrevs
345 files += newfiles
345 files += newfiles
346
346
347 dh = 0
347 dh = 0
348 if oldheads:
348 if oldheads:
349 heads = cl.heads()
349 heads = cl.heads()
350 dh = len(heads) - len(oldheads)
350 dh = len(heads) - len(oldheads)
351 for h in heads:
351 for h in heads:
352 if h not in oldheads and repo[h].closesbranch():
352 if h not in oldheads and repo[h].closesbranch():
353 dh -= 1
353 dh -= 1
354 htext = ""
354 htext = ""
355 if dh:
355 if dh:
356 htext = _(" (%+d heads)") % dh
356 htext = _(" (%+d heads)") % dh
357
357
358 repo.ui.status(_("added %d changesets"
358 repo.ui.status(_("added %d changesets"
359 " with %d changes to %d files%s\n")
359 " with %d changes to %d files%s\n")
360 % (changesets, revisions, files, htext))
360 % (changesets, revisions, files, htext))
361 repo.invalidatevolatilesets()
361 repo.invalidatevolatilesets()
362
362
363 if changesets > 0:
363 if changesets > 0:
364 if 'node' not in tr.hookargs:
364 if 'node' not in tr.hookargs:
365 tr.hookargs['node'] = hex(cl.node(clstart))
365 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
366 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 hookargs = dict(tr.hookargs)
367 hookargs = dict(tr.hookargs)
368 else:
368 else:
369 hookargs = dict(tr.hookargs)
369 hookargs = dict(tr.hookargs)
370 hookargs['node'] = hex(cl.node(clstart))
370 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
371 hookargs['node_last'] = hex(cl.node(clend - 1))
372 repo.hook('pretxnchangegroup', throw=True, **hookargs)
372 repo.hook('pretxnchangegroup', throw=True, **hookargs)
373
373
374 added = [cl.node(r) for r in xrange(clstart, clend)]
374 added = [cl.node(r) for r in xrange(clstart, clend)]
375 publishing = repo.publishing()
375 publishing = repo.publishing()
376 if srctype in ('push', 'serve'):
376 if srctype in ('push', 'serve'):
377 # Old servers can not push the boundary themselves.
377 # Old servers can not push the boundary themselves.
378 # New servers won't push the boundary if changeset already
378 # New servers won't push the boundary if changeset already
379 # exists locally as secret
379 # exists locally as secret
380 #
380 #
381 # We should not use added here but the list of all change in
381 # We should not use added here but the list of all change in
382 # the bundle
382 # the bundle
383 if publishing:
383 if publishing:
384 phases.advanceboundary(repo, tr, phases.public,
384 phases.advanceboundary(repo, tr, phases.public,
385 srccontent)
385 srccontent)
386 else:
386 else:
387 # Those changesets have been pushed from the
387 # Those changesets have been pushed from the
388 # outside, their phases are going to be pushed
388 # outside, their phases are going to be pushed
389 # alongside. Therefor `targetphase` is
389 # alongside. Therefor `targetphase` is
390 # ignored.
390 # ignored.
391 phases.advanceboundary(repo, tr, phases.draft,
391 phases.advanceboundary(repo, tr, phases.draft,
392 srccontent)
392 srccontent)
393 phases.retractboundary(repo, tr, phases.draft, added)
393 phases.retractboundary(repo, tr, phases.draft, added)
394 elif srctype != 'strip':
394 elif srctype != 'strip':
395 # publishing only alter behavior during push
395 # publishing only alter behavior during push
396 #
396 #
397 # strip should not touch boundary at all
397 # strip should not touch boundary at all
398 phases.retractboundary(repo, tr, targetphase, added)
398 phases.retractboundary(repo, tr, targetphase, added)
399
399
400 if changesets > 0:
400 if changesets > 0:
401 if srctype != 'strip':
401 if srctype != 'strip':
402 # During strip, branchcache is invalid but
402 # During strip, branchcache is invalid but
403 # coming call to `destroyed` will repair it.
403 # coming call to `destroyed` will repair it.
404 # In other case we can safely update cache on
404 # In other case we can safely update cache on
405 # disk.
405 # disk.
406 branchmap.updatecache(repo.filtered('served'))
406 branchmap.updatecache(repo.filtered('served'))
407
407
408 def runhooks():
408 def runhooks():
409 # These hooks run when the lock releases, not when the
409 # These hooks run when the lock releases, not when the
410 # transaction closes. So it's possible for the changelog
410 # transaction closes. So it's possible for the changelog
411 # to have changed since we last saw it.
411 # to have changed since we last saw it.
412 if clstart >= len(repo):
412 if clstart >= len(repo):
413 return
413 return
414
414
415 # forcefully update the on-disk branch cache
415 # forcefully update the on-disk branch cache
416 repo.ui.debug("updating the branch cache\n")
416 repo.ui.debug("updating the branch cache\n")
417 repo.hook("changegroup", **hookargs)
417 repo.hook("changegroup", **hookargs)
418
418
419 for n in added:
419 for n in added:
420 args = hookargs.copy()
420 args = hookargs.copy()
421 args['node'] = hex(n)
421 args['node'] = hex(n)
422 del args['node_last']
422 del args['node_last']
423 repo.hook("incoming", **args)
423 repo.hook("incoming", **args)
424
424
425 newheads = [h for h in repo.heads()
425 newheads = [h for h in repo.heads()
426 if h not in oldheads]
426 if h not in oldheads]
427 repo.ui.log("incoming",
427 repo.ui.log("incoming",
428 "%s incoming changes - new heads: %s\n",
428 "%s incoming changes - new heads: %s\n",
429 len(added),
429 len(added),
430 ', '.join([hex(c[:6]) for c in newheads]))
430 ', '.join([hex(c[:6]) for c in newheads]))
431
431
432 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
432 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
433 lambda tr: repo._afterlock(runhooks))
433 lambda tr: repo._afterlock(runhooks))
434 finally:
434 finally:
435 repo.ui.flush()
435 repo.ui.flush()
436 # never return 0 here:
436 # never return 0 here:
437 if dh < 0:
437 if dh < 0:
438 return dh - 1
438 return dh - 1
439 else:
439 else:
440 return dh + 1
440 return dh + 1
441
441
442 class cg2unpacker(cg1unpacker):
442 class cg2unpacker(cg1unpacker):
443 """Unpacker for cg2 streams.
443 """Unpacker for cg2 streams.
444
444
445 cg2 streams add support for generaldelta, so the delta header
445 cg2 streams add support for generaldelta, so the delta header
446 format is slightly different. All other features about the data
446 format is slightly different. All other features about the data
447 remain the same.
447 remain the same.
448 """
448 """
449 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
449 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 deltaheadersize = struct.calcsize(deltaheader)
450 deltaheadersize = struct.calcsize(deltaheader)
451 version = '02'
451 version = '02'
452
452
453 def _deltaheader(self, headertuple, prevnode):
453 def _deltaheader(self, headertuple, prevnode):
454 node, p1, p2, deltabase, cs = headertuple
454 node, p1, p2, deltabase, cs = headertuple
455 flags = 0
455 flags = 0
456 return node, p1, p2, deltabase, cs, flags
456 return node, p1, p2, deltabase, cs, flags
457
457
458 class cg3unpacker(cg2unpacker):
458 class cg3unpacker(cg2unpacker):
459 """Unpacker for cg3 streams.
459 """Unpacker for cg3 streams.
460
460
461 cg3 streams add support for exchanging treemanifests and revlog
461 cg3 streams add support for exchanging treemanifests and revlog
462 flags. It adds the revlog flags to the delta header and an empty chunk
462 flags. It adds the revlog flags to the delta header and an empty chunk
463 separating manifests and files.
463 separating manifests and files.
464 """
464 """
465 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
465 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
466 deltaheadersize = struct.calcsize(deltaheader)
466 deltaheadersize = struct.calcsize(deltaheader)
467 version = '03'
467 version = '03'
468 _grouplistcount = 2 # One list of manifests and one list of files
468 _grouplistcount = 2 # One list of manifests and one list of files
469
469
470 def _deltaheader(self, headertuple, prevnode):
470 def _deltaheader(self, headertuple, prevnode):
471 node, p1, p2, deltabase, cs, flags = headertuple
471 node, p1, p2, deltabase, cs, flags = headertuple
472 return node, p1, p2, deltabase, cs, flags
472 return node, p1, p2, deltabase, cs, flags
473
473
474 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
474 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
475 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
475 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
476 numchanges)
476 numchanges)
477 while True:
477 while True:
478 chunkdata = self.filelogheader()
478 chunkdata = self.filelogheader()
479 if not chunkdata:
479 if not chunkdata:
480 break
480 break
481 # If we get here, there are directory manifests in the changegroup
481 # If we get here, there are directory manifests in the changegroup
482 d = chunkdata["filename"]
482 d = chunkdata["filename"]
483 repo.ui.debug("adding %s revisions\n" % d)
483 repo.ui.debug("adding %s revisions\n" % d)
484 dirlog = repo.manifest.dirlog(d)
484 dirlog = repo.manifest.dirlog(d)
485 if not dirlog.addgroup(self, revmap, trp):
485 if not dirlog.addgroup(self, revmap, trp):
486 raise error.Abort(_("received dir revlog group is empty"))
486 raise error.Abort(_("received dir revlog group is empty"))
487
487
488 class headerlessfixup(object):
488 class headerlessfixup(object):
489 def __init__(self, fh, h):
489 def __init__(self, fh, h):
490 self._h = h
490 self._h = h
491 self._fh = fh
491 self._fh = fh
492 def read(self, n):
492 def read(self, n):
493 if self._h:
493 if self._h:
494 d, self._h = self._h[:n], self._h[n:]
494 d, self._h = self._h[:n], self._h[n:]
495 if len(d) < n:
495 if len(d) < n:
496 d += readexactly(self._fh, n - len(d))
496 d += readexactly(self._fh, n - len(d))
497 return d
497 return d
498 return readexactly(self._fh, n)
498 return readexactly(self._fh, n)
499
499
500 class cg1packer(object):
500 class cg1packer(object):
501 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
501 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
502 version = '01'
502 version = '01'
503 def __init__(self, repo, bundlecaps=None):
503 def __init__(self, repo, bundlecaps=None):
504 """Given a source repo, construct a bundler.
504 """Given a source repo, construct a bundler.
505
505
506 bundlecaps is optional and can be used to specify the set of
506 bundlecaps is optional and can be used to specify the set of
507 capabilities which can be used to build the bundle.
507 capabilities which can be used to build the bundle.
508 """
508 """
509 # Set of capabilities we can use to build the bundle.
509 # Set of capabilities we can use to build the bundle.
510 if bundlecaps is None:
510 if bundlecaps is None:
511 bundlecaps = set()
511 bundlecaps = set()
512 self._bundlecaps = bundlecaps
512 self._bundlecaps = bundlecaps
513 # experimental config: bundle.reorder
513 # experimental config: bundle.reorder
514 reorder = repo.ui.config('bundle', 'reorder', 'auto')
514 reorder = repo.ui.config('bundle', 'reorder', 'auto')
515 if reorder == 'auto':
515 if reorder == 'auto':
516 reorder = None
516 reorder = None
517 else:
517 else:
518 reorder = util.parsebool(reorder)
518 reorder = util.parsebool(reorder)
519 self._repo = repo
519 self._repo = repo
520 self._reorder = reorder
520 self._reorder = reorder
521 self._progress = repo.ui.progress
521 self._progress = repo.ui.progress
522 if self._repo.ui.verbose and not self._repo.ui.debugflag:
522 if self._repo.ui.verbose and not self._repo.ui.debugflag:
523 self._verbosenote = self._repo.ui.note
523 self._verbosenote = self._repo.ui.note
524 else:
524 else:
525 self._verbosenote = lambda s: None
525 self._verbosenote = lambda s: None
526
526
527 def close(self):
527 def close(self):
528 return closechunk()
528 return closechunk()
529
529
530 def fileheader(self, fname):
530 def fileheader(self, fname):
531 return chunkheader(len(fname)) + fname
531 return chunkheader(len(fname)) + fname
532
532
533 # Extracted both for clarity and for overriding in extensions.
534 def _sortgroup(self, revlog, nodelist, lookup):
535 """Sort nodes for change group and turn them into revnums."""
536 # for generaldelta revlogs, we linearize the revs; this will both be
537 # much quicker and generate a much smaller bundle
538 if (revlog._generaldelta and self._reorder is None) or self._reorder:
539 dag = dagutil.revlogdag(revlog)
540 return dag.linearize(set(revlog.rev(n) for n in nodelist))
541 else:
542 return sorted([revlog.rev(n) for n in nodelist])
543
533 def group(self, nodelist, revlog, lookup, units=None):
544 def group(self, nodelist, revlog, lookup, units=None):
534 """Calculate a delta group, yielding a sequence of changegroup chunks
545 """Calculate a delta group, yielding a sequence of changegroup chunks
535 (strings).
546 (strings).
536
547
537 Given a list of changeset revs, return a set of deltas and
548 Given a list of changeset revs, return a set of deltas and
538 metadata corresponding to nodes. The first delta is
549 metadata corresponding to nodes. The first delta is
539 first parent(nodelist[0]) -> nodelist[0], the receiver is
550 first parent(nodelist[0]) -> nodelist[0], the receiver is
540 guaranteed to have this parent as it has all history before
551 guaranteed to have this parent as it has all history before
541 these changesets. In the case firstparent is nullrev the
552 these changesets. In the case firstparent is nullrev the
542 changegroup starts with a full revision.
553 changegroup starts with a full revision.
543
554
544 If units is not None, progress detail will be generated, units specifies
555 If units is not None, progress detail will be generated, units specifies
545 the type of revlog that is touched (changelog, manifest, etc.).
556 the type of revlog that is touched (changelog, manifest, etc.).
546 """
557 """
547 # if we don't have any revisions touched by these changesets, bail
558 # if we don't have any revisions touched by these changesets, bail
548 if len(nodelist) == 0:
559 if len(nodelist) == 0:
549 yield self.close()
560 yield self.close()
550 return
561 return
551
562
552 # for generaldelta revlogs, we linearize the revs; this will both be
563 revs = self._sortgroup(revlog, nodelist, lookup)
553 # much quicker and generate a much smaller bundle
554 if (revlog._generaldelta and self._reorder is None) or self._reorder:
555 dag = dagutil.revlogdag(revlog)
556 revs = set(revlog.rev(n) for n in nodelist)
557 revs = dag.linearize(revs)
558 else:
559 revs = sorted([revlog.rev(n) for n in nodelist])
560
564
561 # add the parent of the first rev
565 # add the parent of the first rev
562 p = revlog.parentrevs(revs[0])[0]
566 p = revlog.parentrevs(revs[0])[0]
563 revs.insert(0, p)
567 revs.insert(0, p)
564
568
565 # build deltas
569 # build deltas
566 total = len(revs) - 1
570 total = len(revs) - 1
567 msgbundling = _('bundling')
571 msgbundling = _('bundling')
568 for r in xrange(len(revs) - 1):
572 for r in xrange(len(revs) - 1):
569 if units is not None:
573 if units is not None:
570 self._progress(msgbundling, r + 1, unit=units, total=total)
574 self._progress(msgbundling, r + 1, unit=units, total=total)
571 prev, curr = revs[r], revs[r + 1]
575 prev, curr = revs[r], revs[r + 1]
572 linknode = lookup(revlog.node(curr))
576 linknode = lookup(revlog.node(curr))
573 for c in self.revchunk(revlog, curr, prev, linknode):
577 for c in self.revchunk(revlog, curr, prev, linknode):
574 yield c
578 yield c
575
579
576 if units is not None:
580 if units is not None:
577 self._progress(msgbundling, None)
581 self._progress(msgbundling, None)
578 yield self.close()
582 yield self.close()
579
583
580 # filter any nodes that claim to be part of the known set
584 # filter any nodes that claim to be part of the known set
581 def prune(self, revlog, missing, commonrevs):
585 def prune(self, revlog, missing, commonrevs):
582 rr, rl = revlog.rev, revlog.linkrev
586 rr, rl = revlog.rev, revlog.linkrev
583 return [n for n in missing if rl(rr(n)) not in commonrevs]
587 return [n for n in missing if rl(rr(n)) not in commonrevs]
584
588
585 def _packmanifests(self, dir, mfnodes, lookuplinknode):
589 def _packmanifests(self, dir, mfnodes, lookuplinknode):
586 """Pack flat manifests into a changegroup stream."""
590 """Pack flat manifests into a changegroup stream."""
587 assert not dir
591 assert not dir
588 for chunk in self.group(mfnodes, self._repo.manifest,
592 for chunk in self.group(mfnodes, self._repo.manifest,
589 lookuplinknode, units=_('manifests')):
593 lookuplinknode, units=_('manifests')):
590 yield chunk
594 yield chunk
591
595
592 def _manifestsdone(self):
596 def _manifestsdone(self):
593 return ''
597 return ''
594
598
595 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
599 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
596 '''yield a sequence of changegroup chunks (strings)'''
600 '''yield a sequence of changegroup chunks (strings)'''
597 repo = self._repo
601 repo = self._repo
598 cl = repo.changelog
602 cl = repo.changelog
599
603
600 clrevorder = {}
604 clrevorder = {}
601 mfs = {} # needed manifests
605 mfs = {} # needed manifests
602 fnodes = {} # needed file nodes
606 fnodes = {} # needed file nodes
603 changedfiles = set()
607 changedfiles = set()
604
608
605 # Callback for the changelog, used to collect changed files and manifest
609 # Callback for the changelog, used to collect changed files and manifest
606 # nodes.
610 # nodes.
607 # Returns the linkrev node (identity in the changelog case).
611 # Returns the linkrev node (identity in the changelog case).
608 def lookupcl(x):
612 def lookupcl(x):
609 c = cl.read(x)
613 c = cl.read(x)
610 clrevorder[x] = len(clrevorder)
614 clrevorder[x] = len(clrevorder)
611 n = c[0]
615 n = c[0]
612 # record the first changeset introducing this manifest version
616 # record the first changeset introducing this manifest version
613 mfs.setdefault(n, x)
617 mfs.setdefault(n, x)
614 # Record a complete list of potentially-changed files in
618 # Record a complete list of potentially-changed files in
615 # this manifest.
619 # this manifest.
616 changedfiles.update(c[3])
620 changedfiles.update(c[3])
617 return x
621 return x
618
622
619 self._verbosenote(_('uncompressed size of bundle content:\n'))
623 self._verbosenote(_('uncompressed size of bundle content:\n'))
620 size = 0
624 size = 0
621 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
625 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
622 size += len(chunk)
626 size += len(chunk)
623 yield chunk
627 yield chunk
624 self._verbosenote(_('%8.i (changelog)\n') % size)
628 self._verbosenote(_('%8.i (changelog)\n') % size)
625
629
626 # We need to make sure that the linkrev in the changegroup refers to
630 # We need to make sure that the linkrev in the changegroup refers to
627 # the first changeset that introduced the manifest or file revision.
631 # the first changeset that introduced the manifest or file revision.
628 # The fastpath is usually safer than the slowpath, because the filelogs
632 # The fastpath is usually safer than the slowpath, because the filelogs
629 # are walked in revlog order.
633 # are walked in revlog order.
630 #
634 #
631 # When taking the slowpath with reorder=None and the manifest revlog
635 # When taking the slowpath with reorder=None and the manifest revlog
632 # uses generaldelta, the manifest may be walked in the "wrong" order.
636 # uses generaldelta, the manifest may be walked in the "wrong" order.
633 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
637 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
634 # cc0ff93d0c0c).
638 # cc0ff93d0c0c).
635 #
639 #
636 # When taking the fastpath, we are only vulnerable to reordering
640 # When taking the fastpath, we are only vulnerable to reordering
637 # of the changelog itself. The changelog never uses generaldelta, so
641 # of the changelog itself. The changelog never uses generaldelta, so
638 # it is only reordered when reorder=True. To handle this case, we
642 # it is only reordered when reorder=True. To handle this case, we
639 # simply take the slowpath, which already has the 'clrevorder' logic.
643 # simply take the slowpath, which already has the 'clrevorder' logic.
640 # This was also fixed in cc0ff93d0c0c.
644 # This was also fixed in cc0ff93d0c0c.
641 fastpathlinkrev = fastpathlinkrev and not self._reorder
645 fastpathlinkrev = fastpathlinkrev and not self._reorder
642 # Treemanifests don't work correctly with fastpathlinkrev
646 # Treemanifests don't work correctly with fastpathlinkrev
643 # either, because we don't discover which directory nodes to
647 # either, because we don't discover which directory nodes to
644 # send along with files. This could probably be fixed.
648 # send along with files. This could probably be fixed.
645 fastpathlinkrev = fastpathlinkrev and (
649 fastpathlinkrev = fastpathlinkrev and (
646 'treemanifest' not in repo.requirements)
650 'treemanifest' not in repo.requirements)
647
651
648 for chunk in self.generatemanifests(commonrevs, clrevorder,
652 for chunk in self.generatemanifests(commonrevs, clrevorder,
649 fastpathlinkrev, mfs, fnodes):
653 fastpathlinkrev, mfs, fnodes):
650 yield chunk
654 yield chunk
651 mfs.clear()
655 mfs.clear()
652 clrevs = set(cl.rev(x) for x in clnodes)
656 clrevs = set(cl.rev(x) for x in clnodes)
653
657
654 if not fastpathlinkrev:
658 if not fastpathlinkrev:
655 def linknodes(unused, fname):
659 def linknodes(unused, fname):
656 return fnodes.get(fname, {})
660 return fnodes.get(fname, {})
657 else:
661 else:
658 cln = cl.node
662 cln = cl.node
659 def linknodes(filerevlog, fname):
663 def linknodes(filerevlog, fname):
660 llr = filerevlog.linkrev
664 llr = filerevlog.linkrev
661 fln = filerevlog.node
665 fln = filerevlog.node
662 revs = ((r, llr(r)) for r in filerevlog)
666 revs = ((r, llr(r)) for r in filerevlog)
663 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
667 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
664
668
665 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
669 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
666 source):
670 source):
667 yield chunk
671 yield chunk
668
672
669 yield self.close()
673 yield self.close()
670
674
671 if clnodes:
675 if clnodes:
672 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
676 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
673
677
674 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
678 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
675 fnodes):
679 fnodes):
676 repo = self._repo
680 repo = self._repo
677 dirlog = repo.manifest.dirlog
681 dirlog = repo.manifest.dirlog
678 tmfnodes = {'': mfs}
682 tmfnodes = {'': mfs}
679
683
680 # Callback for the manifest, used to collect linkrevs for filelog
684 # Callback for the manifest, used to collect linkrevs for filelog
681 # revisions.
685 # revisions.
682 # Returns the linkrev node (collected in lookupcl).
686 # Returns the linkrev node (collected in lookupcl).
683 def makelookupmflinknode(dir):
687 def makelookupmflinknode(dir):
684 if fastpathlinkrev:
688 if fastpathlinkrev:
685 assert not dir
689 assert not dir
686 return mfs.__getitem__
690 return mfs.__getitem__
687
691
688 def lookupmflinknode(x):
692 def lookupmflinknode(x):
689 """Callback for looking up the linknode for manifests.
693 """Callback for looking up the linknode for manifests.
690
694
691 Returns the linkrev node for the specified manifest.
695 Returns the linkrev node for the specified manifest.
692
696
693 SIDE EFFECT:
697 SIDE EFFECT:
694
698
695 1) fclnodes gets populated with the list of relevant
699 1) fclnodes gets populated with the list of relevant
696 file nodes if we're not using fastpathlinkrev
700 file nodes if we're not using fastpathlinkrev
697 2) When treemanifests are in use, collects treemanifest nodes
701 2) When treemanifests are in use, collects treemanifest nodes
698 to send
702 to send
699
703
700 Note that this means manifests must be completely sent to
704 Note that this means manifests must be completely sent to
701 the client before you can trust the list of files and
705 the client before you can trust the list of files and
702 treemanifests to send.
706 treemanifests to send.
703 """
707 """
704 clnode = tmfnodes[dir][x]
708 clnode = tmfnodes[dir][x]
705 mdata = dirlog(dir).readshallowfast(x)
709 mdata = dirlog(dir).readshallowfast(x)
706 for p, n, fl in mdata.iterentries():
710 for p, n, fl in mdata.iterentries():
707 if fl == 't': # subdirectory manifest
711 if fl == 't': # subdirectory manifest
708 subdir = dir + p + '/'
712 subdir = dir + p + '/'
709 tmfclnodes = tmfnodes.setdefault(subdir, {})
713 tmfclnodes = tmfnodes.setdefault(subdir, {})
710 tmfclnode = tmfclnodes.setdefault(n, clnode)
714 tmfclnode = tmfclnodes.setdefault(n, clnode)
711 if clrevorder[clnode] < clrevorder[tmfclnode]:
715 if clrevorder[clnode] < clrevorder[tmfclnode]:
712 tmfclnodes[n] = clnode
716 tmfclnodes[n] = clnode
713 else:
717 else:
714 f = dir + p
718 f = dir + p
715 fclnodes = fnodes.setdefault(f, {})
719 fclnodes = fnodes.setdefault(f, {})
716 fclnode = fclnodes.setdefault(n, clnode)
720 fclnode = fclnodes.setdefault(n, clnode)
717 if clrevorder[clnode] < clrevorder[fclnode]:
721 if clrevorder[clnode] < clrevorder[fclnode]:
718 fclnodes[n] = clnode
722 fclnodes[n] = clnode
719 return clnode
723 return clnode
720 return lookupmflinknode
724 return lookupmflinknode
721
725
722 size = 0
726 size = 0
723 while tmfnodes:
727 while tmfnodes:
724 dir = min(tmfnodes)
728 dir = min(tmfnodes)
725 nodes = tmfnodes[dir]
729 nodes = tmfnodes[dir]
726 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
730 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
727 for x in self._packmanifests(dir, prunednodes,
731 for x in self._packmanifests(dir, prunednodes,
728 makelookupmflinknode(dir)):
732 makelookupmflinknode(dir)):
729 size += len(x)
733 size += len(x)
730 yield x
734 yield x
731 del tmfnodes[dir]
735 del tmfnodes[dir]
732 self._verbosenote(_('%8.i (manifests)\n') % size)
736 self._verbosenote(_('%8.i (manifests)\n') % size)
733 yield self._manifestsdone()
737 yield self._manifestsdone()
734
738
735 # The 'source' parameter is useful for extensions
739 # The 'source' parameter is useful for extensions
736 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
740 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
737 repo = self._repo
741 repo = self._repo
738 progress = self._progress
742 progress = self._progress
739 msgbundling = _('bundling')
743 msgbundling = _('bundling')
740
744
741 total = len(changedfiles)
745 total = len(changedfiles)
742 # for progress output
746 # for progress output
743 msgfiles = _('files')
747 msgfiles = _('files')
744 for i, fname in enumerate(sorted(changedfiles)):
748 for i, fname in enumerate(sorted(changedfiles)):
745 filerevlog = repo.file(fname)
749 filerevlog = repo.file(fname)
746 if not filerevlog:
750 if not filerevlog:
747 raise error.Abort(_("empty or missing revlog for %s") % fname)
751 raise error.Abort(_("empty or missing revlog for %s") % fname)
748
752
749 linkrevnodes = linknodes(filerevlog, fname)
753 linkrevnodes = linknodes(filerevlog, fname)
750 # Lookup for filenodes, we collected the linkrev nodes above in the
754 # Lookup for filenodes, we collected the linkrev nodes above in the
751 # fastpath case and with lookupmf in the slowpath case.
755 # fastpath case and with lookupmf in the slowpath case.
752 def lookupfilelog(x):
756 def lookupfilelog(x):
753 return linkrevnodes[x]
757 return linkrevnodes[x]
754
758
755 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
759 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
756 if filenodes:
760 if filenodes:
757 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
761 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
758 total=total)
762 total=total)
759 h = self.fileheader(fname)
763 h = self.fileheader(fname)
760 size = len(h)
764 size = len(h)
761 yield h
765 yield h
762 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
766 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
763 size += len(chunk)
767 size += len(chunk)
764 yield chunk
768 yield chunk
765 self._verbosenote(_('%8.i %s\n') % (size, fname))
769 self._verbosenote(_('%8.i %s\n') % (size, fname))
766 progress(msgbundling, None)
770 progress(msgbundling, None)
767
771
768 def deltaparent(self, revlog, rev, p1, p2, prev):
772 def deltaparent(self, revlog, rev, p1, p2, prev):
769 return prev
773 return prev
770
774
771 def revchunk(self, revlog, rev, prev, linknode):
775 def revchunk(self, revlog, rev, prev, linknode):
772 node = revlog.node(rev)
776 node = revlog.node(rev)
773 p1, p2 = revlog.parentrevs(rev)
777 p1, p2 = revlog.parentrevs(rev)
774 base = self.deltaparent(revlog, rev, p1, p2, prev)
778 base = self.deltaparent(revlog, rev, p1, p2, prev)
775
779
776 prefix = ''
780 prefix = ''
777 if revlog.iscensored(base) or revlog.iscensored(rev):
781 if revlog.iscensored(base) or revlog.iscensored(rev):
778 try:
782 try:
779 delta = revlog.revision(node)
783 delta = revlog.revision(node)
780 except error.CensoredNodeError as e:
784 except error.CensoredNodeError as e:
781 delta = e.tombstone
785 delta = e.tombstone
782 if base == nullrev:
786 if base == nullrev:
783 prefix = mdiff.trivialdiffheader(len(delta))
787 prefix = mdiff.trivialdiffheader(len(delta))
784 else:
788 else:
785 baselen = revlog.rawsize(base)
789 baselen = revlog.rawsize(base)
786 prefix = mdiff.replacediffheader(baselen, len(delta))
790 prefix = mdiff.replacediffheader(baselen, len(delta))
787 elif base == nullrev:
791 elif base == nullrev:
788 delta = revlog.revision(node)
792 delta = revlog.revision(node)
789 prefix = mdiff.trivialdiffheader(len(delta))
793 prefix = mdiff.trivialdiffheader(len(delta))
790 else:
794 else:
791 delta = revlog.revdiff(base, rev)
795 delta = revlog.revdiff(base, rev)
792 p1n, p2n = revlog.parents(node)
796 p1n, p2n = revlog.parents(node)
793 basenode = revlog.node(base)
797 basenode = revlog.node(base)
794 flags = revlog.flags(rev)
798 flags = revlog.flags(rev)
795 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
799 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
796 meta += prefix
800 meta += prefix
797 l = len(meta) + len(delta)
801 l = len(meta) + len(delta)
798 yield chunkheader(l)
802 yield chunkheader(l)
799 yield meta
803 yield meta
800 yield delta
804 yield delta
801 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
805 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
802 # do nothing with basenode, it is implicitly the previous one in HG10
806 # do nothing with basenode, it is implicitly the previous one in HG10
803 # do nothing with flags, it is implicitly 0 for cg1 and cg2
807 # do nothing with flags, it is implicitly 0 for cg1 and cg2
804 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
808 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
805
809
806 class cg2packer(cg1packer):
810 class cg2packer(cg1packer):
807 version = '02'
811 version = '02'
808 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
812 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
809
813
810 def __init__(self, repo, bundlecaps=None):
814 def __init__(self, repo, bundlecaps=None):
811 super(cg2packer, self).__init__(repo, bundlecaps)
815 super(cg2packer, self).__init__(repo, bundlecaps)
812 if self._reorder is None:
816 if self._reorder is None:
813 # Since generaldelta is directly supported by cg2, reordering
817 # Since generaldelta is directly supported by cg2, reordering
814 # generally doesn't help, so we disable it by default (treating
818 # generally doesn't help, so we disable it by default (treating
815 # bundle.reorder=auto just like bundle.reorder=False).
819 # bundle.reorder=auto just like bundle.reorder=False).
816 self._reorder = False
820 self._reorder = False
817
821
818 def deltaparent(self, revlog, rev, p1, p2, prev):
822 def deltaparent(self, revlog, rev, p1, p2, prev):
819 dp = revlog.deltaparent(rev)
823 dp = revlog.deltaparent(rev)
820 # avoid storing full revisions; pick prev in those cases
824 # avoid storing full revisions; pick prev in those cases
821 # also pick prev when we can't be sure remote has dp
825 # also pick prev when we can't be sure remote has dp
822 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
826 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
823 return prev
827 return prev
824 return dp
828 return dp
825
829
826 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
830 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
827 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
831 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
828 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
832 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
829
833
830 class cg3packer(cg2packer):
834 class cg3packer(cg2packer):
831 version = '03'
835 version = '03'
832 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
836 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
833
837
834 def _packmanifests(self, dir, mfnodes, lookuplinknode):
838 def _packmanifests(self, dir, mfnodes, lookuplinknode):
835 if dir:
839 if dir:
836 yield self.fileheader(dir)
840 yield self.fileheader(dir)
837 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
841 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
838 lookuplinknode, units=_('manifests')):
842 lookuplinknode, units=_('manifests')):
839 yield chunk
843 yield chunk
840
844
841 def _manifestsdone(self):
845 def _manifestsdone(self):
842 return self.close()
846 return self.close()
843
847
844 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
848 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
845 return struct.pack(
849 return struct.pack(
846 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
850 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
847
851
848 _packermap = {'01': (cg1packer, cg1unpacker),
852 _packermap = {'01': (cg1packer, cg1unpacker),
849 # cg2 adds support for exchanging generaldelta
853 # cg2 adds support for exchanging generaldelta
850 '02': (cg2packer, cg2unpacker),
854 '02': (cg2packer, cg2unpacker),
851 # cg3 adds support for exchanging revlog flags and treemanifests
855 # cg3 adds support for exchanging revlog flags and treemanifests
852 '03': (cg3packer, cg3unpacker),
856 '03': (cg3packer, cg3unpacker),
853 }
857 }
854
858
855 def allsupportedversions(ui):
859 def allsupportedversions(ui):
856 versions = set(_packermap.keys())
860 versions = set(_packermap.keys())
857 versions.discard('03')
861 versions.discard('03')
858 if (ui.configbool('experimental', 'changegroup3') or
862 if (ui.configbool('experimental', 'changegroup3') or
859 ui.configbool('experimental', 'treemanifest')):
863 ui.configbool('experimental', 'treemanifest')):
860 versions.add('03')
864 versions.add('03')
861 return versions
865 return versions
862
866
863 # Changegroup versions that can be applied to the repo
867 # Changegroup versions that can be applied to the repo
864 def supportedincomingversions(repo):
868 def supportedincomingversions(repo):
865 versions = allsupportedversions(repo.ui)
869 versions = allsupportedversions(repo.ui)
866 if 'treemanifest' in repo.requirements:
870 if 'treemanifest' in repo.requirements:
867 versions.add('03')
871 versions.add('03')
868 return versions
872 return versions
869
873
870 # Changegroup versions that can be created from the repo
874 # Changegroup versions that can be created from the repo
871 def supportedoutgoingversions(repo):
875 def supportedoutgoingversions(repo):
872 versions = allsupportedversions(repo.ui)
876 versions = allsupportedversions(repo.ui)
873 if 'treemanifest' in repo.requirements:
877 if 'treemanifest' in repo.requirements:
874 # Versions 01 and 02 support only flat manifests and it's just too
878 # Versions 01 and 02 support only flat manifests and it's just too
875 # expensive to convert between the flat manifest and tree manifest on
879 # expensive to convert between the flat manifest and tree manifest on
876 # the fly. Since tree manifests are hashed differently, all of history
880 # the fly. Since tree manifests are hashed differently, all of history
877 # would have to be converted. Instead, we simply don't even pretend to
881 # would have to be converted. Instead, we simply don't even pretend to
878 # support versions 01 and 02.
882 # support versions 01 and 02.
879 versions.discard('01')
883 versions.discard('01')
880 versions.discard('02')
884 versions.discard('02')
881 versions.add('03')
885 versions.add('03')
882 return versions
886 return versions
883
887
884 def safeversion(repo):
888 def safeversion(repo):
885 # Finds the smallest version that it's safe to assume clients of the repo
889 # Finds the smallest version that it's safe to assume clients of the repo
886 # will support. For example, all hg versions that support generaldelta also
890 # will support. For example, all hg versions that support generaldelta also
887 # support changegroup 02.
891 # support changegroup 02.
888 versions = supportedoutgoingversions(repo)
892 versions = supportedoutgoingversions(repo)
889 if 'generaldelta' in repo.requirements:
893 if 'generaldelta' in repo.requirements:
890 versions.discard('01')
894 versions.discard('01')
891 assert versions
895 assert versions
892 return min(versions)
896 return min(versions)
893
897
894 def getbundler(version, repo, bundlecaps=None):
898 def getbundler(version, repo, bundlecaps=None):
895 assert version in supportedoutgoingversions(repo)
899 assert version in supportedoutgoingversions(repo)
896 return _packermap[version][0](repo, bundlecaps)
900 return _packermap[version][0](repo, bundlecaps)
897
901
898 def getunbundler(version, fh, alg):
902 def getunbundler(version, fh, alg):
899 return _packermap[version][1](fh, alg)
903 return _packermap[version][1](fh, alg)
900
904
901 def _changegroupinfo(repo, nodes, source):
905 def _changegroupinfo(repo, nodes, source):
902 if repo.ui.verbose or source == 'bundle':
906 if repo.ui.verbose or source == 'bundle':
903 repo.ui.status(_("%d changesets found\n") % len(nodes))
907 repo.ui.status(_("%d changesets found\n") % len(nodes))
904 if repo.ui.debugflag:
908 if repo.ui.debugflag:
905 repo.ui.debug("list of changesets:\n")
909 repo.ui.debug("list of changesets:\n")
906 for node in nodes:
910 for node in nodes:
907 repo.ui.debug("%s\n" % hex(node))
911 repo.ui.debug("%s\n" % hex(node))
908
912
909 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
913 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
910 repo = repo.unfiltered()
914 repo = repo.unfiltered()
911 commonrevs = outgoing.common
915 commonrevs = outgoing.common
912 csets = outgoing.missing
916 csets = outgoing.missing
913 heads = outgoing.missingheads
917 heads = outgoing.missingheads
914 # We go through the fast path if we get told to, or if all (unfiltered
918 # We go through the fast path if we get told to, or if all (unfiltered
915 # heads have been requested (since we then know there all linkrevs will
919 # heads have been requested (since we then know there all linkrevs will
916 # be pulled by the client).
920 # be pulled by the client).
917 heads.sort()
921 heads.sort()
918 fastpathlinkrev = fastpath or (
922 fastpathlinkrev = fastpath or (
919 repo.filtername is None and heads == sorted(repo.heads()))
923 repo.filtername is None and heads == sorted(repo.heads()))
920
924
921 repo.hook('preoutgoing', throw=True, source=source)
925 repo.hook('preoutgoing', throw=True, source=source)
922 _changegroupinfo(repo, csets, source)
926 _changegroupinfo(repo, csets, source)
923 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
927 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
924
928
925 def getsubset(repo, outgoing, bundler, source, fastpath=False):
929 def getsubset(repo, outgoing, bundler, source, fastpath=False):
926 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
930 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
927 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
931 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
928
932
929 def changegroupsubset(repo, roots, heads, source, version='01'):
933 def changegroupsubset(repo, roots, heads, source, version='01'):
930 """Compute a changegroup consisting of all the nodes that are
934 """Compute a changegroup consisting of all the nodes that are
931 descendants of any of the roots and ancestors of any of the heads.
935 descendants of any of the roots and ancestors of any of the heads.
932 Return a chunkbuffer object whose read() method will return
936 Return a chunkbuffer object whose read() method will return
933 successive changegroup chunks.
937 successive changegroup chunks.
934
938
935 It is fairly complex as determining which filenodes and which
939 It is fairly complex as determining which filenodes and which
936 manifest nodes need to be included for the changeset to be complete
940 manifest nodes need to be included for the changeset to be complete
937 is non-trivial.
941 is non-trivial.
938
942
939 Another wrinkle is doing the reverse, figuring out which changeset in
943 Another wrinkle is doing the reverse, figuring out which changeset in
940 the changegroup a particular filenode or manifestnode belongs to.
944 the changegroup a particular filenode or manifestnode belongs to.
941 """
945 """
942 cl = repo.changelog
946 cl = repo.changelog
943 if not roots:
947 if not roots:
944 roots = [nullid]
948 roots = [nullid]
945 discbases = []
949 discbases = []
946 for n in roots:
950 for n in roots:
947 discbases.extend([p for p in cl.parents(n) if p != nullid])
951 discbases.extend([p for p in cl.parents(n) if p != nullid])
948 # TODO: remove call to nodesbetween.
952 # TODO: remove call to nodesbetween.
949 csets, roots, heads = cl.nodesbetween(roots, heads)
953 csets, roots, heads = cl.nodesbetween(roots, heads)
950 included = set(csets)
954 included = set(csets)
951 discbases = [n for n in discbases if n not in included]
955 discbases = [n for n in discbases if n not in included]
952 outgoing = discovery.outgoing(cl, discbases, heads)
956 outgoing = discovery.outgoing(cl, discbases, heads)
953 bundler = getbundler(version, repo)
957 bundler = getbundler(version, repo)
954 return getsubset(repo, outgoing, bundler, source)
958 return getsubset(repo, outgoing, bundler, source)
955
959
956 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
960 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
957 version='01'):
961 version='01'):
958 """Like getbundle, but taking a discovery.outgoing as an argument.
962 """Like getbundle, but taking a discovery.outgoing as an argument.
959
963
960 This is only implemented for local repos and reuses potentially
964 This is only implemented for local repos and reuses potentially
961 precomputed sets in outgoing. Returns a raw changegroup generator."""
965 precomputed sets in outgoing. Returns a raw changegroup generator."""
962 if not outgoing.missing:
966 if not outgoing.missing:
963 return None
967 return None
964 bundler = getbundler(version, repo, bundlecaps)
968 bundler = getbundler(version, repo, bundlecaps)
965 return getsubsetraw(repo, outgoing, bundler, source)
969 return getsubsetraw(repo, outgoing, bundler, source)
966
970
967 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
971 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
968 version='01'):
972 version='01'):
969 """Like getbundle, but taking a discovery.outgoing as an argument.
973 """Like getbundle, but taking a discovery.outgoing as an argument.
970
974
971 This is only implemented for local repos and reuses potentially
975 This is only implemented for local repos and reuses potentially
972 precomputed sets in outgoing."""
976 precomputed sets in outgoing."""
973 if not outgoing.missing:
977 if not outgoing.missing:
974 return None
978 return None
975 bundler = getbundler(version, repo, bundlecaps)
979 bundler = getbundler(version, repo, bundlecaps)
976 return getsubset(repo, outgoing, bundler, source)
980 return getsubset(repo, outgoing, bundler, source)
977
981
978 def computeoutgoing(repo, heads, common):
982 def computeoutgoing(repo, heads, common):
979 """Computes which revs are outgoing given a set of common
983 """Computes which revs are outgoing given a set of common
980 and a set of heads.
984 and a set of heads.
981
985
982 This is a separate function so extensions can have access to
986 This is a separate function so extensions can have access to
983 the logic.
987 the logic.
984
988
985 Returns a discovery.outgoing object.
989 Returns a discovery.outgoing object.
986 """
990 """
987 cl = repo.changelog
991 cl = repo.changelog
988 if common:
992 if common:
989 hasnode = cl.hasnode
993 hasnode = cl.hasnode
990 common = [n for n in common if hasnode(n)]
994 common = [n for n in common if hasnode(n)]
991 else:
995 else:
992 common = [nullid]
996 common = [nullid]
993 if not heads:
997 if not heads:
994 heads = cl.heads()
998 heads = cl.heads()
995 return discovery.outgoing(cl, common, heads)
999 return discovery.outgoing(cl, common, heads)
996
1000
997 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1001 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
998 version='01'):
1002 version='01'):
999 """Like changegroupsubset, but returns the set difference between the
1003 """Like changegroupsubset, but returns the set difference between the
1000 ancestors of heads and the ancestors common.
1004 ancestors of heads and the ancestors common.
1001
1005
1002 If heads is None, use the local heads. If common is None, use [nullid].
1006 If heads is None, use the local heads. If common is None, use [nullid].
1003
1007
1004 The nodes in common might not all be known locally due to the way the
1008 The nodes in common might not all be known locally due to the way the
1005 current discovery protocol works.
1009 current discovery protocol works.
1006 """
1010 """
1007 outgoing = computeoutgoing(repo, heads, common)
1011 outgoing = computeoutgoing(repo, heads, common)
1008 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1012 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1009 version=version)
1013 version=version)
1010
1014
1011 def changegroup(repo, basenodes, source):
1015 def changegroup(repo, basenodes, source):
1012 # to avoid a race we use changegroupsubset() (issue1320)
1016 # to avoid a race we use changegroupsubset() (issue1320)
1013 return changegroupsubset(repo, basenodes, repo.heads(), source)
1017 return changegroupsubset(repo, basenodes, repo.heads(), source)
1014
1018
1015 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1019 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1016 revisions = 0
1020 revisions = 0
1017 files = 0
1021 files = 0
1018 while True:
1022 while True:
1019 chunkdata = source.filelogheader()
1023 chunkdata = source.filelogheader()
1020 if not chunkdata:
1024 if not chunkdata:
1021 break
1025 break
1022 files += 1
1026 files += 1
1023 f = chunkdata["filename"]
1027 f = chunkdata["filename"]
1024 repo.ui.debug("adding %s revisions\n" % f)
1028 repo.ui.debug("adding %s revisions\n" % f)
1025 repo.ui.progress(_('files'), files, unit=_('files'),
1029 repo.ui.progress(_('files'), files, unit=_('files'),
1026 total=expectedfiles)
1030 total=expectedfiles)
1027 fl = repo.file(f)
1031 fl = repo.file(f)
1028 o = len(fl)
1032 o = len(fl)
1029 try:
1033 try:
1030 if not fl.addgroup(source, revmap, trp):
1034 if not fl.addgroup(source, revmap, trp):
1031 raise error.Abort(_("received file revlog group is empty"))
1035 raise error.Abort(_("received file revlog group is empty"))
1032 except error.CensoredBaseError as e:
1036 except error.CensoredBaseError as e:
1033 raise error.Abort(_("received delta base is censored: %s") % e)
1037 raise error.Abort(_("received delta base is censored: %s") % e)
1034 revisions += len(fl) - o
1038 revisions += len(fl) - o
1035 if f in needfiles:
1039 if f in needfiles:
1036 needs = needfiles[f]
1040 needs = needfiles[f]
1037 for new in xrange(o, len(fl)):
1041 for new in xrange(o, len(fl)):
1038 n = fl.node(new)
1042 n = fl.node(new)
1039 if n in needs:
1043 if n in needs:
1040 needs.remove(n)
1044 needs.remove(n)
1041 else:
1045 else:
1042 raise error.Abort(
1046 raise error.Abort(
1043 _("received spurious file revlog entry"))
1047 _("received spurious file revlog entry"))
1044 if not needs:
1048 if not needs:
1045 del needfiles[f]
1049 del needfiles[f]
1046 repo.ui.progress(_('files'), None)
1050 repo.ui.progress(_('files'), None)
1047
1051
1048 for f, needs in needfiles.iteritems():
1052 for f, needs in needfiles.iteritems():
1049 fl = repo.file(f)
1053 fl = repo.file(f)
1050 for n in needs:
1054 for n in needs:
1051 try:
1055 try:
1052 fl.rev(n)
1056 fl.rev(n)
1053 except error.LookupError:
1057 except error.LookupError:
1054 raise error.Abort(
1058 raise error.Abort(
1055 _('missing file data for %s:%s - run hg verify') %
1059 _('missing file data for %s:%s - run hg verify') %
1056 (f, hex(n)))
1060 (f, hex(n)))
1057
1061
1058 return revisions, files
1062 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now