##// END OF EJS Templates
changegroup3: move treemanifest support into _unpackmanifests()...
Martin von Zweigbergk -
r27754:a09f143d default
parent child Browse files
Show More
@@ -1,1107 +1,1111 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 branchmap,
24 branchmap,
25 dagutil,
25 dagutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 mdiff,
28 mdiff,
29 phases,
29 phases,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36
36
37 def readexactly(stream, n):
37 def readexactly(stream, n):
38 '''read n bytes from stream.read and abort if less was available'''
38 '''read n bytes from stream.read and abort if less was available'''
39 s = stream.read(n)
39 s = stream.read(n)
40 if len(s) < n:
40 if len(s) < n:
41 raise error.Abort(_("stream ended unexpectedly"
41 raise error.Abort(_("stream ended unexpectedly"
42 " (got %d bytes, expected %d)")
42 " (got %d bytes, expected %d)")
43 % (len(s), n))
43 % (len(s), n))
44 return s
44 return s
45
45
46 def getchunk(stream):
46 def getchunk(stream):
47 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
48 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
49 l = struct.unpack(">l", d)[0]
49 l = struct.unpack(">l", d)[0]
50 if l <= 4:
50 if l <= 4:
51 if l:
51 if l:
52 raise error.Abort(_("invalid chunk length %d") % l)
52 raise error.Abort(_("invalid chunk length %d") % l)
53 return ""
53 return ""
54 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(">l", length + 4)
58 return struct.pack(">l", length + 4)
59
59
60 def closechunk():
60 def closechunk():
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 return struct.pack(">l", 0)
62 return struct.pack(">l", 0)
63
63
64 def combineresults(results):
64 def combineresults(results):
65 """logic to combine 0 or more addchangegroup results into one"""
65 """logic to combine 0 or more addchangegroup results into one"""
66 changedheads = 0
66 changedheads = 0
67 result = 1
67 result = 1
68 for ret in results:
68 for ret in results:
69 # If any changegroup result is 0, return 0
69 # If any changegroup result is 0, return 0
70 if ret == 0:
70 if ret == 0:
71 result = 0
71 result = 0
72 break
72 break
73 if ret < -1:
73 if ret < -1:
74 changedheads += ret + 1
74 changedheads += ret + 1
75 elif ret > 1:
75 elif ret > 1:
76 changedheads += ret - 1
76 changedheads += ret - 1
77 if changedheads > 0:
77 if changedheads > 0:
78 result = 1 + changedheads
78 result = 1 + changedheads
79 elif changedheads < 0:
79 elif changedheads < 0:
80 result = -1 + changedheads
80 result = -1 + changedheads
81 return result
81 return result
82
82
83 bundletypes = {
83 bundletypes = {
84 "": ("", None), # only when using unbundle on ssh and old http servers
84 "": ("", None), # only when using unbundle on ssh and old http servers
85 # since the unification ssh accepts a header but there
85 # since the unification ssh accepts a header but there
86 # is no capability signaling it.
86 # is no capability signaling it.
87 "HG20": (), # special-cased below
87 "HG20": (), # special-cased below
88 "HG10UN": ("HG10UN", None),
88 "HG10UN": ("HG10UN", None),
89 "HG10BZ": ("HG10", 'BZ'),
89 "HG10BZ": ("HG10", 'BZ'),
90 "HG10GZ": ("HG10GZ", 'GZ'),
90 "HG10GZ": ("HG10GZ", 'GZ'),
91 }
91 }
92
92
93 # hgweb uses this list to communicate its preferred type
93 # hgweb uses this list to communicate its preferred type
94 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
94 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
95
95
96 def writechunks(ui, chunks, filename, vfs=None):
96 def writechunks(ui, chunks, filename, vfs=None):
97 """Write chunks to a file and return its filename.
97 """Write chunks to a file and return its filename.
98
98
99 The stream is assumed to be a bundle file.
99 The stream is assumed to be a bundle file.
100 Existing files will not be overwritten.
100 Existing files will not be overwritten.
101 If no filename is specified, a temporary file is created.
101 If no filename is specified, a temporary file is created.
102 """
102 """
103 fh = None
103 fh = None
104 cleanup = None
104 cleanup = None
105 try:
105 try:
106 if filename:
106 if filename:
107 if vfs:
107 if vfs:
108 fh = vfs.open(filename, "wb")
108 fh = vfs.open(filename, "wb")
109 else:
109 else:
110 fh = open(filename, "wb")
110 fh = open(filename, "wb")
111 else:
111 else:
112 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
112 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
113 fh = os.fdopen(fd, "wb")
113 fh = os.fdopen(fd, "wb")
114 cleanup = filename
114 cleanup = filename
115 for c in chunks:
115 for c in chunks:
116 fh.write(c)
116 fh.write(c)
117 cleanup = None
117 cleanup = None
118 return filename
118 return filename
119 finally:
119 finally:
120 if fh is not None:
120 if fh is not None:
121 fh.close()
121 fh.close()
122 if cleanup is not None:
122 if cleanup is not None:
123 if filename and vfs:
123 if filename and vfs:
124 vfs.unlink(cleanup)
124 vfs.unlink(cleanup)
125 else:
125 else:
126 os.unlink(cleanup)
126 os.unlink(cleanup)
127
127
128 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
128 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
129 """Write a bundle file and return its filename.
129 """Write a bundle file and return its filename.
130
130
131 Existing files will not be overwritten.
131 Existing files will not be overwritten.
132 If no filename is specified, a temporary file is created.
132 If no filename is specified, a temporary file is created.
133 bz2 compression can be turned off.
133 bz2 compression can be turned off.
134 The bundle file will be deleted in case of errors.
134 The bundle file will be deleted in case of errors.
135 """
135 """
136
136
137 if bundletype == "HG20":
137 if bundletype == "HG20":
138 from . import bundle2
138 from . import bundle2
139 bundle = bundle2.bundle20(ui)
139 bundle = bundle2.bundle20(ui)
140 bundle.setcompression(compression)
140 bundle.setcompression(compression)
141 part = bundle.newpart('changegroup', data=cg.getchunks())
141 part = bundle.newpart('changegroup', data=cg.getchunks())
142 part.addparam('version', cg.version)
142 part.addparam('version', cg.version)
143 chunkiter = bundle.getchunks()
143 chunkiter = bundle.getchunks()
144 else:
144 else:
145 # compression argument is only for the bundle2 case
145 # compression argument is only for the bundle2 case
146 assert compression is None
146 assert compression is None
147 if cg.version != '01':
147 if cg.version != '01':
148 raise error.Abort(_('old bundle types only supports v1 '
148 raise error.Abort(_('old bundle types only supports v1 '
149 'changegroups'))
149 'changegroups'))
150 header, comp = bundletypes[bundletype]
150 header, comp = bundletypes[bundletype]
151 if comp not in util.compressors:
151 if comp not in util.compressors:
152 raise error.Abort(_('unknown stream compression type: %s')
152 raise error.Abort(_('unknown stream compression type: %s')
153 % comp)
153 % comp)
154 z = util.compressors[comp]()
154 z = util.compressors[comp]()
155 subchunkiter = cg.getchunks()
155 subchunkiter = cg.getchunks()
156 def chunkiter():
156 def chunkiter():
157 yield header
157 yield header
158 for chunk in subchunkiter:
158 for chunk in subchunkiter:
159 yield z.compress(chunk)
159 yield z.compress(chunk)
160 yield z.flush()
160 yield z.flush()
161 chunkiter = chunkiter()
161 chunkiter = chunkiter()
162
162
163 # parse the changegroup data, otherwise we will block
163 # parse the changegroup data, otherwise we will block
164 # in case of sshrepo because we don't know the end of the stream
164 # in case of sshrepo because we don't know the end of the stream
165
165
166 # an empty chunkgroup is the end of the changegroup
166 # an empty chunkgroup is the end of the changegroup
167 # a changegroup has at least 2 chunkgroups (changelog and manifest).
167 # a changegroup has at least 2 chunkgroups (changelog and manifest).
168 # after that, an empty chunkgroup is the end of the changegroup
168 # after that, an empty chunkgroup is the end of the changegroup
169 return writechunks(ui, chunkiter, filename, vfs=vfs)
169 return writechunks(ui, chunkiter, filename, vfs=vfs)
170
170
171 class cg1unpacker(object):
171 class cg1unpacker(object):
172 """Unpacker for cg1 changegroup streams.
172 """Unpacker for cg1 changegroup streams.
173
173
174 A changegroup unpacker handles the framing of the revision data in
174 A changegroup unpacker handles the framing of the revision data in
175 the wire format. Most consumers will want to use the apply()
175 the wire format. Most consumers will want to use the apply()
176 method to add the changes from the changegroup to a repository.
176 method to add the changes from the changegroup to a repository.
177
177
178 If you're forwarding a changegroup unmodified to another consumer,
178 If you're forwarding a changegroup unmodified to another consumer,
179 use getchunks(), which returns an iterator of changegroup
179 use getchunks(), which returns an iterator of changegroup
180 chunks. This is mostly useful for cases where you need to know the
180 chunks. This is mostly useful for cases where you need to know the
181 data stream has ended by observing the end of the changegroup.
181 data stream has ended by observing the end of the changegroup.
182
182
183 deltachunk() is useful only if you're applying delta data. Most
183 deltachunk() is useful only if you're applying delta data. Most
184 consumers should prefer apply() instead.
184 consumers should prefer apply() instead.
185
185
186 A few other public methods exist. Those are used only for
186 A few other public methods exist. Those are used only for
187 bundlerepo and some debug commands - their use is discouraged.
187 bundlerepo and some debug commands - their use is discouraged.
188 """
188 """
189 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
189 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
190 deltaheadersize = struct.calcsize(deltaheader)
190 deltaheadersize = struct.calcsize(deltaheader)
191 version = '01'
191 version = '01'
192 def __init__(self, fh, alg):
192 def __init__(self, fh, alg):
193 if alg == 'UN':
193 if alg == 'UN':
194 alg = None # get more modern without breaking too much
194 alg = None # get more modern without breaking too much
195 if not alg in util.decompressors:
195 if not alg in util.decompressors:
196 raise error.Abort(_('unknown stream compression type: %s')
196 raise error.Abort(_('unknown stream compression type: %s')
197 % alg)
197 % alg)
198 if alg == 'BZ':
198 if alg == 'BZ':
199 alg = '_truncatedBZ'
199 alg = '_truncatedBZ'
200 self._stream = util.decompressors[alg](fh)
200 self._stream = util.decompressors[alg](fh)
201 self._type = alg
201 self._type = alg
202 self.callback = None
202 self.callback = None
203
203
204 # These methods (compressed, read, seek, tell) all appear to only
204 # These methods (compressed, read, seek, tell) all appear to only
205 # be used by bundlerepo, but it's a little hard to tell.
205 # be used by bundlerepo, but it's a little hard to tell.
206 def compressed(self):
206 def compressed(self):
207 return self._type is not None
207 return self._type is not None
208 def read(self, l):
208 def read(self, l):
209 return self._stream.read(l)
209 return self._stream.read(l)
210 def seek(self, pos):
210 def seek(self, pos):
211 return self._stream.seek(pos)
211 return self._stream.seek(pos)
212 def tell(self):
212 def tell(self):
213 return self._stream.tell()
213 return self._stream.tell()
214 def close(self):
214 def close(self):
215 return self._stream.close()
215 return self._stream.close()
216
216
217 def _chunklength(self):
217 def _chunklength(self):
218 d = readexactly(self._stream, 4)
218 d = readexactly(self._stream, 4)
219 l = struct.unpack(">l", d)[0]
219 l = struct.unpack(">l", d)[0]
220 if l <= 4:
220 if l <= 4:
221 if l:
221 if l:
222 raise error.Abort(_("invalid chunk length %d") % l)
222 raise error.Abort(_("invalid chunk length %d") % l)
223 return 0
223 return 0
224 if self.callback:
224 if self.callback:
225 self.callback()
225 self.callback()
226 return l - 4
226 return l - 4
227
227
228 def changelogheader(self):
228 def changelogheader(self):
229 """v10 does not have a changelog header chunk"""
229 """v10 does not have a changelog header chunk"""
230 return {}
230 return {}
231
231
232 def manifestheader(self):
232 def manifestheader(self):
233 """v10 does not have a manifest header chunk"""
233 """v10 does not have a manifest header chunk"""
234 return {}
234 return {}
235
235
236 def filelogheader(self):
236 def filelogheader(self):
237 """return the header of the filelogs chunk, v10 only has the filename"""
237 """return the header of the filelogs chunk, v10 only has the filename"""
238 l = self._chunklength()
238 l = self._chunklength()
239 if not l:
239 if not l:
240 return {}
240 return {}
241 fname = readexactly(self._stream, l)
241 fname = readexactly(self._stream, l)
242 return {'filename': fname}
242 return {'filename': fname}
243
243
244 def _deltaheader(self, headertuple, prevnode):
244 def _deltaheader(self, headertuple, prevnode):
245 node, p1, p2, cs = headertuple
245 node, p1, p2, cs = headertuple
246 if prevnode is None:
246 if prevnode is None:
247 deltabase = p1
247 deltabase = p1
248 else:
248 else:
249 deltabase = prevnode
249 deltabase = prevnode
250 flags = 0
250 flags = 0
251 return node, p1, p2, deltabase, cs, flags
251 return node, p1, p2, deltabase, cs, flags
252
252
253 def deltachunk(self, prevnode):
253 def deltachunk(self, prevnode):
254 l = self._chunklength()
254 l = self._chunklength()
255 if not l:
255 if not l:
256 return {}
256 return {}
257 headerdata = readexactly(self._stream, self.deltaheadersize)
257 headerdata = readexactly(self._stream, self.deltaheadersize)
258 header = struct.unpack(self.deltaheader, headerdata)
258 header = struct.unpack(self.deltaheader, headerdata)
259 delta = readexactly(self._stream, l - self.deltaheadersize)
259 delta = readexactly(self._stream, l - self.deltaheadersize)
260 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
260 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
261 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
261 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
262 'deltabase': deltabase, 'delta': delta, 'flags': flags}
262 'deltabase': deltabase, 'delta': delta, 'flags': flags}
263
263
264 def getchunks(self):
264 def getchunks(self):
265 """returns all the chunks contains in the bundle
265 """returns all the chunks contains in the bundle
266
266
267 Used when you need to forward the binary stream to a file or another
267 Used when you need to forward the binary stream to a file or another
268 network API. To do so, it parse the changegroup data, otherwise it will
268 network API. To do so, it parse the changegroup data, otherwise it will
269 block in case of sshrepo because it don't know the end of the stream.
269 block in case of sshrepo because it don't know the end of the stream.
270 """
270 """
271 # an empty chunkgroup is the end of the changegroup
271 # an empty chunkgroup is the end of the changegroup
272 # a changegroup has at least 2 chunkgroups (changelog and manifest).
272 # a changegroup has at least 2 chunkgroups (changelog and manifest).
273 # after that, an empty chunkgroup is the end of the changegroup
273 # after that, an empty chunkgroup is the end of the changegroup
274 empty = False
274 empty = False
275 count = 0
275 count = 0
276 while not empty or count <= 2:
276 while not empty or count <= 2:
277 empty = True
277 empty = True
278 count += 1
278 count += 1
279 while True:
279 while True:
280 chunk = getchunk(self)
280 chunk = getchunk(self)
281 if not chunk:
281 if not chunk:
282 break
282 break
283 empty = False
283 empty = False
284 yield chunkheader(len(chunk))
284 yield chunkheader(len(chunk))
285 pos = 0
285 pos = 0
286 while pos < len(chunk):
286 while pos < len(chunk):
287 next = pos + 2**20
287 next = pos + 2**20
288 yield chunk[pos:next]
288 yield chunk[pos:next]
289 pos = next
289 pos = next
290 yield closechunk()
290 yield closechunk()
291
291
292 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
292 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
293 # We know that we'll never have more manifests than we had
293 # We know that we'll never have more manifests than we had
294 # changesets.
294 # changesets.
295 self.callback = prog(_('manifests'), numchanges)
295 self.callback = prog(_('manifests'), numchanges)
296 # no need to check for empty manifest group here:
296 # no need to check for empty manifest group here:
297 # if the result of the merge of 1 and 2 is the same in 3 and 4,
297 # if the result of the merge of 1 and 2 is the same in 3 and 4,
298 # no new manifest will be created and the manifest group will
298 # no new manifest will be created and the manifest group will
299 # be empty during the pull
299 # be empty during the pull
300 self.manifestheader()
300 self.manifestheader()
301 repo.manifest.addgroup(self, revmap, trp)
301 repo.manifest.addgroup(self, revmap, trp)
302 repo.ui.progress(_('manifests'), None)
302 repo.ui.progress(_('manifests'), None)
303
303
304 def apply(self, repo, srctype, url, emptyok=False,
304 def apply(self, repo, srctype, url, emptyok=False,
305 targetphase=phases.draft, expectedtotal=None):
305 targetphase=phases.draft, expectedtotal=None):
306 """Add the changegroup returned by source.read() to this repo.
306 """Add the changegroup returned by source.read() to this repo.
307 srctype is a string like 'push', 'pull', or 'unbundle'. url is
307 srctype is a string like 'push', 'pull', or 'unbundle'. url is
308 the URL of the repo where this changegroup is coming from.
308 the URL of the repo where this changegroup is coming from.
309
309
310 Return an integer summarizing the change to this repo:
310 Return an integer summarizing the change to this repo:
311 - nothing changed or no source: 0
311 - nothing changed or no source: 0
312 - more heads than before: 1+added heads (2..n)
312 - more heads than before: 1+added heads (2..n)
313 - fewer heads than before: -1-removed heads (-2..-n)
313 - fewer heads than before: -1-removed heads (-2..-n)
314 - number of heads stays the same: 1
314 - number of heads stays the same: 1
315 """
315 """
316 repo = repo.unfiltered()
316 repo = repo.unfiltered()
317 def csmap(x):
317 def csmap(x):
318 repo.ui.debug("add changeset %s\n" % short(x))
318 repo.ui.debug("add changeset %s\n" % short(x))
319 return len(cl)
319 return len(cl)
320
320
321 def revmap(x):
321 def revmap(x):
322 return cl.rev(x)
322 return cl.rev(x)
323
323
324 changesets = files = revisions = 0
324 changesets = files = revisions = 0
325
325
326 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
326 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
327 try:
327 try:
328 # The transaction could have been created before and already
328 # The transaction could have been created before and already
329 # carries source information. In this case we use the top
329 # carries source information. In this case we use the top
330 # level data. We overwrite the argument because we need to use
330 # level data. We overwrite the argument because we need to use
331 # the top level value (if they exist) in this function.
331 # the top level value (if they exist) in this function.
332 srctype = tr.hookargs.setdefault('source', srctype)
332 srctype = tr.hookargs.setdefault('source', srctype)
333 url = tr.hookargs.setdefault('url', url)
333 url = tr.hookargs.setdefault('url', url)
334 repo.hook('prechangegroup', throw=True, **tr.hookargs)
334 repo.hook('prechangegroup', throw=True, **tr.hookargs)
335
335
336 # write changelog data to temp files so concurrent readers
336 # write changelog data to temp files so concurrent readers
337 # will not see an inconsistent view
337 # will not see an inconsistent view
338 cl = repo.changelog
338 cl = repo.changelog
339 cl.delayupdate(tr)
339 cl.delayupdate(tr)
340 oldheads = cl.heads()
340 oldheads = cl.heads()
341
341
342 trp = weakref.proxy(tr)
342 trp = weakref.proxy(tr)
343 # pull off the changeset group
343 # pull off the changeset group
344 repo.ui.status(_("adding changesets\n"))
344 repo.ui.status(_("adding changesets\n"))
345 clstart = len(cl)
345 clstart = len(cl)
346 class prog(object):
346 class prog(object):
347 def __init__(self, step, total):
347 def __init__(self, step, total):
348 self._step = step
348 self._step = step
349 self._total = total
349 self._total = total
350 self._count = 1
350 self._count = 1
351 def __call__(self):
351 def __call__(self):
352 repo.ui.progress(self._step, self._count, unit=_('chunks'),
352 repo.ui.progress(self._step, self._count, unit=_('chunks'),
353 total=self._total)
353 total=self._total)
354 self._count += 1
354 self._count += 1
355 self.callback = prog(_('changesets'), expectedtotal)
355 self.callback = prog(_('changesets'), expectedtotal)
356
356
357 efiles = set()
357 efiles = set()
358 def onchangelog(cl, node):
358 def onchangelog(cl, node):
359 efiles.update(cl.read(node)[3])
359 efiles.update(cl.read(node)[3])
360
360
361 self.changelogheader()
361 self.changelogheader()
362 srccontent = cl.addgroup(self, csmap, trp,
362 srccontent = cl.addgroup(self, csmap, trp,
363 addrevisioncb=onchangelog)
363 addrevisioncb=onchangelog)
364 efiles = len(efiles)
364 efiles = len(efiles)
365
365
366 if not (srccontent or emptyok):
366 if not (srccontent or emptyok):
367 raise error.Abort(_("received changelog group is empty"))
367 raise error.Abort(_("received changelog group is empty"))
368 clend = len(cl)
368 clend = len(cl)
369 changesets = clend - clstart
369 changesets = clend - clstart
370 repo.ui.progress(_('changesets'), None)
370 repo.ui.progress(_('changesets'), None)
371
371
372 # pull off the manifest group
372 # pull off the manifest group
373 repo.ui.status(_("adding manifests\n"))
373 repo.ui.status(_("adding manifests\n"))
374 self._unpackmanifests(repo, revmap, trp, prog, changesets)
374 self._unpackmanifests(repo, revmap, trp, prog, changesets)
375
375
376 needfiles = {}
376 needfiles = {}
377 if repo.ui.configbool('server', 'validate', default=False):
377 if repo.ui.configbool('server', 'validate', default=False):
378 # validate incoming csets have their manifests
378 # validate incoming csets have their manifests
379 for cset in xrange(clstart, clend):
379 for cset in xrange(clstart, clend):
380 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
380 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
381 mfest = repo.manifest.readdelta(mfnode)
381 mfest = repo.manifest.readdelta(mfnode)
382 # store file nodes we must see
382 # store file nodes we must see
383 for f, n in mfest.iteritems():
383 for f, n in mfest.iteritems():
384 needfiles.setdefault(f, set()).add(n)
384 needfiles.setdefault(f, set()).add(n)
385
385
386 # process the files
386 # process the files
387 repo.ui.status(_("adding file changes\n"))
387 repo.ui.status(_("adding file changes\n"))
388 self.callback = None
388 self.callback = None
389 pr = prog(_('files'), efiles)
389 pr = prog(_('files'), efiles)
390 newrevs, newfiles = _addchangegroupfiles(
390 newrevs, newfiles = _addchangegroupfiles(
391 repo, self, revmap, trp, pr, needfiles)
391 repo, self, revmap, trp, pr, needfiles)
392 revisions += newrevs
392 revisions += newrevs
393 files += newfiles
393 files += newfiles
394
394
395 dh = 0
395 dh = 0
396 if oldheads:
396 if oldheads:
397 heads = cl.heads()
397 heads = cl.heads()
398 dh = len(heads) - len(oldheads)
398 dh = len(heads) - len(oldheads)
399 for h in heads:
399 for h in heads:
400 if h not in oldheads and repo[h].closesbranch():
400 if h not in oldheads and repo[h].closesbranch():
401 dh -= 1
401 dh -= 1
402 htext = ""
402 htext = ""
403 if dh:
403 if dh:
404 htext = _(" (%+d heads)") % dh
404 htext = _(" (%+d heads)") % dh
405
405
406 repo.ui.status(_("added %d changesets"
406 repo.ui.status(_("added %d changesets"
407 " with %d changes to %d files%s\n")
407 " with %d changes to %d files%s\n")
408 % (changesets, revisions, files, htext))
408 % (changesets, revisions, files, htext))
409 repo.invalidatevolatilesets()
409 repo.invalidatevolatilesets()
410
410
411 if changesets > 0:
411 if changesets > 0:
412 if 'node' not in tr.hookargs:
412 if 'node' not in tr.hookargs:
413 tr.hookargs['node'] = hex(cl.node(clstart))
413 tr.hookargs['node'] = hex(cl.node(clstart))
414 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
414 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
415 hookargs = dict(tr.hookargs)
415 hookargs = dict(tr.hookargs)
416 else:
416 else:
417 hookargs = dict(tr.hookargs)
417 hookargs = dict(tr.hookargs)
418 hookargs['node'] = hex(cl.node(clstart))
418 hookargs['node'] = hex(cl.node(clstart))
419 hookargs['node_last'] = hex(cl.node(clend - 1))
419 hookargs['node_last'] = hex(cl.node(clend - 1))
420 repo.hook('pretxnchangegroup', throw=True, **hookargs)
420 repo.hook('pretxnchangegroup', throw=True, **hookargs)
421
421
422 added = [cl.node(r) for r in xrange(clstart, clend)]
422 added = [cl.node(r) for r in xrange(clstart, clend)]
423 publishing = repo.publishing()
423 publishing = repo.publishing()
424 if srctype in ('push', 'serve'):
424 if srctype in ('push', 'serve'):
425 # Old servers can not push the boundary themselves.
425 # Old servers can not push the boundary themselves.
426 # New servers won't push the boundary if changeset already
426 # New servers won't push the boundary if changeset already
427 # exists locally as secret
427 # exists locally as secret
428 #
428 #
429 # We should not use added here but the list of all change in
429 # We should not use added here but the list of all change in
430 # the bundle
430 # the bundle
431 if publishing:
431 if publishing:
432 phases.advanceboundary(repo, tr, phases.public, srccontent)
432 phases.advanceboundary(repo, tr, phases.public, srccontent)
433 else:
433 else:
434 # Those changesets have been pushed from the outside, their
434 # Those changesets have been pushed from the outside, their
435 # phases are going to be pushed alongside. Therefor
435 # phases are going to be pushed alongside. Therefor
436 # `targetphase` is ignored.
436 # `targetphase` is ignored.
437 phases.advanceboundary(repo, tr, phases.draft, srccontent)
437 phases.advanceboundary(repo, tr, phases.draft, srccontent)
438 phases.retractboundary(repo, tr, phases.draft, added)
438 phases.retractboundary(repo, tr, phases.draft, added)
439 elif srctype != 'strip':
439 elif srctype != 'strip':
440 # publishing only alter behavior during push
440 # publishing only alter behavior during push
441 #
441 #
442 # strip should not touch boundary at all
442 # strip should not touch boundary at all
443 phases.retractboundary(repo, tr, targetphase, added)
443 phases.retractboundary(repo, tr, targetphase, added)
444
444
445 if changesets > 0:
445 if changesets > 0:
446 if srctype != 'strip':
446 if srctype != 'strip':
447 # During strip, branchcache is invalid but coming call to
447 # During strip, branchcache is invalid but coming call to
448 # `destroyed` will repair it.
448 # `destroyed` will repair it.
449 # In other case we can safely update cache on disk.
449 # In other case we can safely update cache on disk.
450 branchmap.updatecache(repo.filtered('served'))
450 branchmap.updatecache(repo.filtered('served'))
451
451
452 def runhooks():
452 def runhooks():
453 # These hooks run when the lock releases, not when the
453 # These hooks run when the lock releases, not when the
454 # transaction closes. So it's possible for the changelog
454 # transaction closes. So it's possible for the changelog
455 # to have changed since we last saw it.
455 # to have changed since we last saw it.
456 if clstart >= len(repo):
456 if clstart >= len(repo):
457 return
457 return
458
458
459 # forcefully update the on-disk branch cache
459 # forcefully update the on-disk branch cache
460 repo.ui.debug("updating the branch cache\n")
460 repo.ui.debug("updating the branch cache\n")
461 repo.hook("changegroup", **hookargs)
461 repo.hook("changegroup", **hookargs)
462
462
463 for n in added:
463 for n in added:
464 args = hookargs.copy()
464 args = hookargs.copy()
465 args['node'] = hex(n)
465 args['node'] = hex(n)
466 del args['node_last']
466 del args['node_last']
467 repo.hook("incoming", **args)
467 repo.hook("incoming", **args)
468
468
469 newheads = [h for h in repo.heads() if h not in oldheads]
469 newheads = [h for h in repo.heads() if h not in oldheads]
470 repo.ui.log("incoming",
470 repo.ui.log("incoming",
471 "%s incoming changes - new heads: %s\n",
471 "%s incoming changes - new heads: %s\n",
472 len(added),
472 len(added),
473 ', '.join([hex(c[:6]) for c in newheads]))
473 ', '.join([hex(c[:6]) for c in newheads]))
474
474
475 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
475 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
476 lambda tr: repo._afterlock(runhooks))
476 lambda tr: repo._afterlock(runhooks))
477
477
478 tr.close()
478 tr.close()
479
479
480 finally:
480 finally:
481 tr.release()
481 tr.release()
482 repo.ui.flush()
482 repo.ui.flush()
483 # never return 0 here:
483 # never return 0 here:
484 if dh < 0:
484 if dh < 0:
485 return dh - 1
485 return dh - 1
486 else:
486 else:
487 return dh + 1
487 return dh + 1
488
488
489 class cg2unpacker(cg1unpacker):
489 class cg2unpacker(cg1unpacker):
490 """Unpacker for cg2 streams.
490 """Unpacker for cg2 streams.
491
491
492 cg2 streams add support for generaldelta, so the delta header
492 cg2 streams add support for generaldelta, so the delta header
493 format is slightly different. All other features about the data
493 format is slightly different. All other features about the data
494 remain the same.
494 remain the same.
495 """
495 """
496 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
496 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
497 deltaheadersize = struct.calcsize(deltaheader)
497 deltaheadersize = struct.calcsize(deltaheader)
498 version = '02'
498 version = '02'
499
499
500 def _deltaheader(self, headertuple, prevnode):
500 def _deltaheader(self, headertuple, prevnode):
501 node, p1, p2, deltabase, cs = headertuple
501 node, p1, p2, deltabase, cs = headertuple
502 flags = 0
502 flags = 0
503 return node, p1, p2, deltabase, cs, flags
503 return node, p1, p2, deltabase, cs, flags
504
504
505 class cg3unpacker(cg2unpacker):
505 class cg3unpacker(cg2unpacker):
506 """Unpacker for cg3 streams.
506 """Unpacker for cg3 streams.
507
507
508 cg3 streams add support for exchanging treemanifests and revlog
508 cg3 streams add support for exchanging treemanifests and revlog
509 flags. It adds the revlog flags to the delta header and an empty chunk
509 flags. It adds the revlog flags to the delta header and an empty chunk
510 separating manifests and files.
510 separating manifests and files.
511 """
511 """
512 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
512 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
513 deltaheadersize = struct.calcsize(deltaheader)
513 deltaheadersize = struct.calcsize(deltaheader)
514 version = '03'
514 version = '03'
515
515
516 def _deltaheader(self, headertuple, prevnode):
516 def _deltaheader(self, headertuple, prevnode):
517 node, p1, p2, deltabase, cs, flags = headertuple
517 node, p1, p2, deltabase, cs, flags = headertuple
518 return node, p1, p2, deltabase, cs, flags
518 return node, p1, p2, deltabase, cs, flags
519
519
520 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
521 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
522 numchanges)
523 while True:
524 chunkdata = self.filelogheader()
525 if not chunkdata:
526 break
527 # If we get here, there are directory manifests in the changegroup
528 d = chunkdata["filename"]
529 repo.ui.debug("adding %s revisions\n" % d)
530 dirlog = repo.manifest.dirlog(d)
531 if not dirlog.addgroup(self, revmap, trp):
532 raise error.Abort(_("received dir revlog group is empty"))
533
520 class headerlessfixup(object):
534 class headerlessfixup(object):
521 def __init__(self, fh, h):
535 def __init__(self, fh, h):
522 self._h = h
536 self._h = h
523 self._fh = fh
537 self._fh = fh
524 def read(self, n):
538 def read(self, n):
525 if self._h:
539 if self._h:
526 d, self._h = self._h[:n], self._h[n:]
540 d, self._h = self._h[:n], self._h[n:]
527 if len(d) < n:
541 if len(d) < n:
528 d += readexactly(self._fh, n - len(d))
542 d += readexactly(self._fh, n - len(d))
529 return d
543 return d
530 return readexactly(self._fh, n)
544 return readexactly(self._fh, n)
531
545
532 def _moddirs(files):
546 def _moddirs(files):
533 """Given a set of modified files, find the list of modified directories.
547 """Given a set of modified files, find the list of modified directories.
534
548
535 This returns a list of (path to changed dir, changed dir) tuples,
549 This returns a list of (path to changed dir, changed dir) tuples,
536 as that's what the one client needs anyway.
550 as that's what the one client needs anyway.
537
551
538 >>> _moddirs(['a/b/c.py', 'a/b/c.txt', 'a/d/e/f/g.txt', 'i.txt', ])
552 >>> _moddirs(['a/b/c.py', 'a/b/c.txt', 'a/d/e/f/g.txt', 'i.txt', ])
539 [('/', 'a/'), ('a/', 'b/'), ('a/', 'd/'), ('a/d/', 'e/'), ('a/d/e/', 'f/')]
553 [('/', 'a/'), ('a/', 'b/'), ('a/', 'd/'), ('a/d/', 'e/'), ('a/d/e/', 'f/')]
540
554
541 """
555 """
542 alldirs = set()
556 alldirs = set()
543 for f in files:
557 for f in files:
544 path = f.split('/')[:-1]
558 path = f.split('/')[:-1]
545 for i in xrange(len(path) - 1, -1, -1):
559 for i in xrange(len(path) - 1, -1, -1):
546 dn = '/'.join(path[:i])
560 dn = '/'.join(path[:i])
547 current = dn + '/', path[i] + '/'
561 current = dn + '/', path[i] + '/'
548 if current in alldirs:
562 if current in alldirs:
549 break
563 break
550 alldirs.add(current)
564 alldirs.add(current)
551 return sorted(alldirs)
565 return sorted(alldirs)
552
566
553 class cg1packer(object):
567 class cg1packer(object):
554 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
568 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
555 version = '01'
569 version = '01'
556 def __init__(self, repo, bundlecaps=None):
570 def __init__(self, repo, bundlecaps=None):
557 """Given a source repo, construct a bundler.
571 """Given a source repo, construct a bundler.
558
572
559 bundlecaps is optional and can be used to specify the set of
573 bundlecaps is optional and can be used to specify the set of
560 capabilities which can be used to build the bundle.
574 capabilities which can be used to build the bundle.
561 """
575 """
562 # Set of capabilities we can use to build the bundle.
576 # Set of capabilities we can use to build the bundle.
563 if bundlecaps is None:
577 if bundlecaps is None:
564 bundlecaps = set()
578 bundlecaps = set()
565 self._bundlecaps = bundlecaps
579 self._bundlecaps = bundlecaps
566 # experimental config: bundle.reorder
580 # experimental config: bundle.reorder
567 reorder = repo.ui.config('bundle', 'reorder', 'auto')
581 reorder = repo.ui.config('bundle', 'reorder', 'auto')
568 if reorder == 'auto':
582 if reorder == 'auto':
569 reorder = None
583 reorder = None
570 else:
584 else:
571 reorder = util.parsebool(reorder)
585 reorder = util.parsebool(reorder)
572 self._repo = repo
586 self._repo = repo
573 self._reorder = reorder
587 self._reorder = reorder
574 self._progress = repo.ui.progress
588 self._progress = repo.ui.progress
575 if self._repo.ui.verbose and not self._repo.ui.debugflag:
589 if self._repo.ui.verbose and not self._repo.ui.debugflag:
576 self._verbosenote = self._repo.ui.note
590 self._verbosenote = self._repo.ui.note
577 else:
591 else:
578 self._verbosenote = lambda s: None
592 self._verbosenote = lambda s: None
579
593
580 def close(self):
594 def close(self):
581 return closechunk()
595 return closechunk()
582
596
583 def fileheader(self, fname):
597 def fileheader(self, fname):
584 return chunkheader(len(fname)) + fname
598 return chunkheader(len(fname)) + fname
585
599
586 def group(self, nodelist, revlog, lookup, units=None):
600 def group(self, nodelist, revlog, lookup, units=None):
587 """Calculate a delta group, yielding a sequence of changegroup chunks
601 """Calculate a delta group, yielding a sequence of changegroup chunks
588 (strings).
602 (strings).
589
603
590 Given a list of changeset revs, return a set of deltas and
604 Given a list of changeset revs, return a set of deltas and
591 metadata corresponding to nodes. The first delta is
605 metadata corresponding to nodes. The first delta is
592 first parent(nodelist[0]) -> nodelist[0], the receiver is
606 first parent(nodelist[0]) -> nodelist[0], the receiver is
593 guaranteed to have this parent as it has all history before
607 guaranteed to have this parent as it has all history before
594 these changesets. In the case firstparent is nullrev the
608 these changesets. In the case firstparent is nullrev the
595 changegroup starts with a full revision.
609 changegroup starts with a full revision.
596
610
597 If units is not None, progress detail will be generated, units specifies
611 If units is not None, progress detail will be generated, units specifies
598 the type of revlog that is touched (changelog, manifest, etc.).
612 the type of revlog that is touched (changelog, manifest, etc.).
599 """
613 """
600 # if we don't have any revisions touched by these changesets, bail
614 # if we don't have any revisions touched by these changesets, bail
601 if len(nodelist) == 0:
615 if len(nodelist) == 0:
602 yield self.close()
616 yield self.close()
603 return
617 return
604
618
605 # for generaldelta revlogs, we linearize the revs; this will both be
619 # for generaldelta revlogs, we linearize the revs; this will both be
606 # much quicker and generate a much smaller bundle
620 # much quicker and generate a much smaller bundle
607 if (revlog._generaldelta and self._reorder is None) or self._reorder:
621 if (revlog._generaldelta and self._reorder is None) or self._reorder:
608 dag = dagutil.revlogdag(revlog)
622 dag = dagutil.revlogdag(revlog)
609 revs = set(revlog.rev(n) for n in nodelist)
623 revs = set(revlog.rev(n) for n in nodelist)
610 revs = dag.linearize(revs)
624 revs = dag.linearize(revs)
611 else:
625 else:
612 revs = sorted([revlog.rev(n) for n in nodelist])
626 revs = sorted([revlog.rev(n) for n in nodelist])
613
627
614 # add the parent of the first rev
628 # add the parent of the first rev
615 p = revlog.parentrevs(revs[0])[0]
629 p = revlog.parentrevs(revs[0])[0]
616 revs.insert(0, p)
630 revs.insert(0, p)
617
631
618 # build deltas
632 # build deltas
619 total = len(revs) - 1
633 total = len(revs) - 1
620 msgbundling = _('bundling')
634 msgbundling = _('bundling')
621 for r in xrange(len(revs) - 1):
635 for r in xrange(len(revs) - 1):
622 if units is not None:
636 if units is not None:
623 self._progress(msgbundling, r + 1, unit=units, total=total)
637 self._progress(msgbundling, r + 1, unit=units, total=total)
624 prev, curr = revs[r], revs[r + 1]
638 prev, curr = revs[r], revs[r + 1]
625 linknode = lookup(revlog.node(curr))
639 linknode = lookup(revlog.node(curr))
626 for c in self.revchunk(revlog, curr, prev, linknode):
640 for c in self.revchunk(revlog, curr, prev, linknode):
627 yield c
641 yield c
628
642
629 if units is not None:
643 if units is not None:
630 self._progress(msgbundling, None)
644 self._progress(msgbundling, None)
631 yield self.close()
645 yield self.close()
632
646
633 # filter any nodes that claim to be part of the known set
647 # filter any nodes that claim to be part of the known set
634 def prune(self, revlog, missing, commonrevs):
648 def prune(self, revlog, missing, commonrevs):
635 rr, rl = revlog.rev, revlog.linkrev
649 rr, rl = revlog.rev, revlog.linkrev
636 return [n for n in missing if rl(rr(n)) not in commonrevs]
650 return [n for n in missing if rl(rr(n)) not in commonrevs]
637
651
638 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
652 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
639 """Pack flat manifests into a changegroup stream."""
653 """Pack flat manifests into a changegroup stream."""
640 ml = self._repo.manifest
654 ml = self._repo.manifest
641 size = 0
655 size = 0
642 for chunk in self.group(
656 for chunk in self.group(
643 mfnodes, ml, lookuplinknode, units=_('manifests')):
657 mfnodes, ml, lookuplinknode, units=_('manifests')):
644 size += len(chunk)
658 size += len(chunk)
645 yield chunk
659 yield chunk
646 self._verbosenote(_('%8.i (manifests)\n') % size)
660 self._verbosenote(_('%8.i (manifests)\n') % size)
647 # It looks odd to assert this here, but tmfnodes doesn't get
661 # It looks odd to assert this here, but tmfnodes doesn't get
648 # filled in until after we've called lookuplinknode for
662 # filled in until after we've called lookuplinknode for
649 # sending root manifests, so the only way to tell the streams
663 # sending root manifests, so the only way to tell the streams
650 # got crossed is to check after we've done all the work.
664 # got crossed is to check after we've done all the work.
651 assert not tmfnodes
665 assert not tmfnodes
652
666
653 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
667 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
654 '''yield a sequence of changegroup chunks (strings)'''
668 '''yield a sequence of changegroup chunks (strings)'''
655 repo = self._repo
669 repo = self._repo
656 cl = repo.changelog
670 cl = repo.changelog
657 ml = repo.manifest
671 ml = repo.manifest
658
672
659 clrevorder = {}
673 clrevorder = {}
660 mfs = {} # needed manifests
674 mfs = {} # needed manifests
661 tmfnodes = {}
675 tmfnodes = {}
662 fnodes = {} # needed file nodes
676 fnodes = {} # needed file nodes
663 # maps manifest node id -> set(changed files)
677 # maps manifest node id -> set(changed files)
664 mfchangedfiles = {}
678 mfchangedfiles = {}
665
679
666 # Callback for the changelog, used to collect changed files and manifest
680 # Callback for the changelog, used to collect changed files and manifest
667 # nodes.
681 # nodes.
668 # Returns the linkrev node (identity in the changelog case).
682 # Returns the linkrev node (identity in the changelog case).
669 def lookupcl(x):
683 def lookupcl(x):
670 c = cl.read(x)
684 c = cl.read(x)
671 clrevorder[x] = len(clrevorder)
685 clrevorder[x] = len(clrevorder)
672 n = c[0]
686 n = c[0]
673 # record the first changeset introducing this manifest version
687 # record the first changeset introducing this manifest version
674 mfs.setdefault(n, x)
688 mfs.setdefault(n, x)
675 # Record a complete list of potentially-changed files in
689 # Record a complete list of potentially-changed files in
676 # this manifest.
690 # this manifest.
677 mfchangedfiles.setdefault(n, set()).update(c[3])
691 mfchangedfiles.setdefault(n, set()).update(c[3])
678 return x
692 return x
679
693
680 self._verbosenote(_('uncompressed size of bundle content:\n'))
694 self._verbosenote(_('uncompressed size of bundle content:\n'))
681 size = 0
695 size = 0
682 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
696 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
683 size += len(chunk)
697 size += len(chunk)
684 yield chunk
698 yield chunk
685 self._verbosenote(_('%8.i (changelog)\n') % size)
699 self._verbosenote(_('%8.i (changelog)\n') % size)
686
700
687 # We need to make sure that the linkrev in the changegroup refers to
701 # We need to make sure that the linkrev in the changegroup refers to
688 # the first changeset that introduced the manifest or file revision.
702 # the first changeset that introduced the manifest or file revision.
689 # The fastpath is usually safer than the slowpath, because the filelogs
703 # The fastpath is usually safer than the slowpath, because the filelogs
690 # are walked in revlog order.
704 # are walked in revlog order.
691 #
705 #
692 # When taking the slowpath with reorder=None and the manifest revlog
706 # When taking the slowpath with reorder=None and the manifest revlog
693 # uses generaldelta, the manifest may be walked in the "wrong" order.
707 # uses generaldelta, the manifest may be walked in the "wrong" order.
694 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
708 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
695 # cc0ff93d0c0c).
709 # cc0ff93d0c0c).
696 #
710 #
697 # When taking the fastpath, we are only vulnerable to reordering
711 # When taking the fastpath, we are only vulnerable to reordering
698 # of the changelog itself. The changelog never uses generaldelta, so
712 # of the changelog itself. The changelog never uses generaldelta, so
699 # it is only reordered when reorder=True. To handle this case, we
713 # it is only reordered when reorder=True. To handle this case, we
700 # simply take the slowpath, which already has the 'clrevorder' logic.
714 # simply take the slowpath, which already has the 'clrevorder' logic.
701 # This was also fixed in cc0ff93d0c0c.
715 # This was also fixed in cc0ff93d0c0c.
702 fastpathlinkrev = fastpathlinkrev and not self._reorder
716 fastpathlinkrev = fastpathlinkrev and not self._reorder
703 # Treemanifests don't work correctly with fastpathlinkrev
717 # Treemanifests don't work correctly with fastpathlinkrev
704 # either, because we don't discover which directory nodes to
718 # either, because we don't discover which directory nodes to
705 # send along with files. This could probably be fixed.
719 # send along with files. This could probably be fixed.
706 fastpathlinkrev = fastpathlinkrev and (
720 fastpathlinkrev = fastpathlinkrev and (
707 'treemanifest' not in repo.requirements)
721 'treemanifest' not in repo.requirements)
708 # Callback for the manifest, used to collect linkrevs for filelog
722 # Callback for the manifest, used to collect linkrevs for filelog
709 # revisions.
723 # revisions.
710 # Returns the linkrev node (collected in lookupcl).
724 # Returns the linkrev node (collected in lookupcl).
711 if fastpathlinkrev:
725 if fastpathlinkrev:
712 lookupmflinknode = mfs.__getitem__
726 lookupmflinknode = mfs.__getitem__
713 else:
727 else:
714 def lookupmflinknode(x):
728 def lookupmflinknode(x):
715 """Callback for looking up the linknode for manifests.
729 """Callback for looking up the linknode for manifests.
716
730
717 Returns the linkrev node for the specified manifest.
731 Returns the linkrev node for the specified manifest.
718
732
719 SIDE EFFECT:
733 SIDE EFFECT:
720
734
721 1) fclnodes gets populated with the list of relevant
735 1) fclnodes gets populated with the list of relevant
722 file nodes if we're not using fastpathlinkrev
736 file nodes if we're not using fastpathlinkrev
723 2) When treemanifests are in use, collects treemanifest nodes
737 2) When treemanifests are in use, collects treemanifest nodes
724 to send
738 to send
725
739
726 Note that this means manifests must be completely sent to
740 Note that this means manifests must be completely sent to
727 the client before you can trust the list of files and
741 the client before you can trust the list of files and
728 treemanifests to send.
742 treemanifests to send.
729 """
743 """
730 clnode = mfs[x]
744 clnode = mfs[x]
731 # We no longer actually care about reading deltas of
745 # We no longer actually care about reading deltas of
732 # the manifest here, because we already know the list
746 # the manifest here, because we already know the list
733 # of changed files, so for treemanifests (which
747 # of changed files, so for treemanifests (which
734 # lazily-load anyway to *generate* a readdelta) we can
748 # lazily-load anyway to *generate* a readdelta) we can
735 # just load them with read() and then we'll actually
749 # just load them with read() and then we'll actually
736 # be able to correctly load node IDs from the
750 # be able to correctly load node IDs from the
737 # submanifest entries.
751 # submanifest entries.
738 if 'treemanifest' in repo.requirements:
752 if 'treemanifest' in repo.requirements:
739 mdata = ml.read(x)
753 mdata = ml.read(x)
740 else:
754 else:
741 mdata = ml.readfast(x)
755 mdata = ml.readfast(x)
742 for f in mfchangedfiles[x]:
756 for f in mfchangedfiles[x]:
743 try:
757 try:
744 n = mdata[f]
758 n = mdata[f]
745 except KeyError:
759 except KeyError:
746 continue
760 continue
747 # record the first changeset introducing this filelog
761 # record the first changeset introducing this filelog
748 # version
762 # version
749 fclnodes = fnodes.setdefault(f, {})
763 fclnodes = fnodes.setdefault(f, {})
750 fclnode = fclnodes.setdefault(n, clnode)
764 fclnode = fclnodes.setdefault(n, clnode)
751 if clrevorder[clnode] < clrevorder[fclnode]:
765 if clrevorder[clnode] < clrevorder[fclnode]:
752 fclnodes[n] = clnode
766 fclnodes[n] = clnode
753 # gather list of changed treemanifest nodes
767 # gather list of changed treemanifest nodes
754 if 'treemanifest' in repo.requirements:
768 if 'treemanifest' in repo.requirements:
755 submfs = {'/': mdata}
769 submfs = {'/': mdata}
756 for dn, bn in _moddirs(mfchangedfiles[x]):
770 for dn, bn in _moddirs(mfchangedfiles[x]):
757 submf = submfs[dn]
771 submf = submfs[dn]
758 submf = submf._dirs[bn]
772 submf = submf._dirs[bn]
759 submfs[submf.dir()] = submf
773 submfs[submf.dir()] = submf
760 tmfclnodes = tmfnodes.setdefault(submf.dir(), {})
774 tmfclnodes = tmfnodes.setdefault(submf.dir(), {})
761 tmfclnodes.setdefault(submf._node, clnode)
775 tmfclnodes.setdefault(submf._node, clnode)
762 if clrevorder[clnode] < clrevorder[fclnode]:
776 if clrevorder[clnode] < clrevorder[fclnode]:
763 tmfclnodes[n] = clnode
777 tmfclnodes[n] = clnode
764 return clnode
778 return clnode
765
779
766 mfnodes = self.prune(ml, mfs, commonrevs)
780 mfnodes = self.prune(ml, mfs, commonrevs)
767 for x in self._packmanifests(
781 for x in self._packmanifests(
768 mfnodes, tmfnodes, lookupmflinknode):
782 mfnodes, tmfnodes, lookupmflinknode):
769 yield x
783 yield x
770
784
771 mfs.clear()
785 mfs.clear()
772 clrevs = set(cl.rev(x) for x in clnodes)
786 clrevs = set(cl.rev(x) for x in clnodes)
773
787
774 if not fastpathlinkrev:
788 if not fastpathlinkrev:
775 def linknodes(unused, fname):
789 def linknodes(unused, fname):
776 return fnodes.get(fname, {})
790 return fnodes.get(fname, {})
777 else:
791 else:
778 cln = cl.node
792 cln = cl.node
779 def linknodes(filerevlog, fname):
793 def linknodes(filerevlog, fname):
780 llr = filerevlog.linkrev
794 llr = filerevlog.linkrev
781 fln = filerevlog.node
795 fln = filerevlog.node
782 revs = ((r, llr(r)) for r in filerevlog)
796 revs = ((r, llr(r)) for r in filerevlog)
783 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
797 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
784
798
785 changedfiles = set()
799 changedfiles = set()
786 for x in mfchangedfiles.itervalues():
800 for x in mfchangedfiles.itervalues():
787 changedfiles.update(x)
801 changedfiles.update(x)
788 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
802 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
789 source):
803 source):
790 yield chunk
804 yield chunk
791
805
792 yield self.close()
806 yield self.close()
793
807
794 if clnodes:
808 if clnodes:
795 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
809 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
796
810
797 # The 'source' parameter is useful for extensions
811 # The 'source' parameter is useful for extensions
798 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
812 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
799 repo = self._repo
813 repo = self._repo
800 progress = self._progress
814 progress = self._progress
801 msgbundling = _('bundling')
815 msgbundling = _('bundling')
802
816
803 total = len(changedfiles)
817 total = len(changedfiles)
804 # for progress output
818 # for progress output
805 msgfiles = _('files')
819 msgfiles = _('files')
806 for i, fname in enumerate(sorted(changedfiles)):
820 for i, fname in enumerate(sorted(changedfiles)):
807 filerevlog = repo.file(fname)
821 filerevlog = repo.file(fname)
808 if not filerevlog:
822 if not filerevlog:
809 raise error.Abort(_("empty or missing revlog for %s") % fname)
823 raise error.Abort(_("empty or missing revlog for %s") % fname)
810
824
811 linkrevnodes = linknodes(filerevlog, fname)
825 linkrevnodes = linknodes(filerevlog, fname)
812 # Lookup for filenodes, we collected the linkrev nodes above in the
826 # Lookup for filenodes, we collected the linkrev nodes above in the
813 # fastpath case and with lookupmf in the slowpath case.
827 # fastpath case and with lookupmf in the slowpath case.
814 def lookupfilelog(x):
828 def lookupfilelog(x):
815 return linkrevnodes[x]
829 return linkrevnodes[x]
816
830
817 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
831 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
818 if filenodes:
832 if filenodes:
819 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
833 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
820 total=total)
834 total=total)
821 h = self.fileheader(fname)
835 h = self.fileheader(fname)
822 size = len(h)
836 size = len(h)
823 yield h
837 yield h
824 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
838 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
825 size += len(chunk)
839 size += len(chunk)
826 yield chunk
840 yield chunk
827 self._verbosenote(_('%8.i %s\n') % (size, fname))
841 self._verbosenote(_('%8.i %s\n') % (size, fname))
828 progress(msgbundling, None)
842 progress(msgbundling, None)
829
843
830 def deltaparent(self, revlog, rev, p1, p2, prev):
844 def deltaparent(self, revlog, rev, p1, p2, prev):
831 return prev
845 return prev
832
846
833 def revchunk(self, revlog, rev, prev, linknode):
847 def revchunk(self, revlog, rev, prev, linknode):
834 node = revlog.node(rev)
848 node = revlog.node(rev)
835 p1, p2 = revlog.parentrevs(rev)
849 p1, p2 = revlog.parentrevs(rev)
836 base = self.deltaparent(revlog, rev, p1, p2, prev)
850 base = self.deltaparent(revlog, rev, p1, p2, prev)
837
851
838 prefix = ''
852 prefix = ''
839 if revlog.iscensored(base) or revlog.iscensored(rev):
853 if revlog.iscensored(base) or revlog.iscensored(rev):
840 try:
854 try:
841 delta = revlog.revision(node)
855 delta = revlog.revision(node)
842 except error.CensoredNodeError as e:
856 except error.CensoredNodeError as e:
843 delta = e.tombstone
857 delta = e.tombstone
844 if base == nullrev:
858 if base == nullrev:
845 prefix = mdiff.trivialdiffheader(len(delta))
859 prefix = mdiff.trivialdiffheader(len(delta))
846 else:
860 else:
847 baselen = revlog.rawsize(base)
861 baselen = revlog.rawsize(base)
848 prefix = mdiff.replacediffheader(baselen, len(delta))
862 prefix = mdiff.replacediffheader(baselen, len(delta))
849 elif base == nullrev:
863 elif base == nullrev:
850 delta = revlog.revision(node)
864 delta = revlog.revision(node)
851 prefix = mdiff.trivialdiffheader(len(delta))
865 prefix = mdiff.trivialdiffheader(len(delta))
852 else:
866 else:
853 delta = revlog.revdiff(base, rev)
867 delta = revlog.revdiff(base, rev)
854 p1n, p2n = revlog.parents(node)
868 p1n, p2n = revlog.parents(node)
855 basenode = revlog.node(base)
869 basenode = revlog.node(base)
856 flags = revlog.flags(rev)
870 flags = revlog.flags(rev)
857 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
871 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
858 meta += prefix
872 meta += prefix
859 l = len(meta) + len(delta)
873 l = len(meta) + len(delta)
860 yield chunkheader(l)
874 yield chunkheader(l)
861 yield meta
875 yield meta
862 yield delta
876 yield delta
863 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
877 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
864 # do nothing with basenode, it is implicitly the previous one in HG10
878 # do nothing with basenode, it is implicitly the previous one in HG10
865 # do nothing with flags, it is implicitly 0 for cg1 and cg2
879 # do nothing with flags, it is implicitly 0 for cg1 and cg2
866 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
880 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
867
881
868 class cg2packer(cg1packer):
882 class cg2packer(cg1packer):
869 version = '02'
883 version = '02'
870 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
884 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
871
885
872 def __init__(self, repo, bundlecaps=None):
886 def __init__(self, repo, bundlecaps=None):
873 super(cg2packer, self).__init__(repo, bundlecaps)
887 super(cg2packer, self).__init__(repo, bundlecaps)
874 if self._reorder is None:
888 if self._reorder is None:
875 # Since generaldelta is directly supported by cg2, reordering
889 # Since generaldelta is directly supported by cg2, reordering
876 # generally doesn't help, so we disable it by default (treating
890 # generally doesn't help, so we disable it by default (treating
877 # bundle.reorder=auto just like bundle.reorder=False).
891 # bundle.reorder=auto just like bundle.reorder=False).
878 self._reorder = False
892 self._reorder = False
879
893
880 def deltaparent(self, revlog, rev, p1, p2, prev):
894 def deltaparent(self, revlog, rev, p1, p2, prev):
881 dp = revlog.deltaparent(rev)
895 dp = revlog.deltaparent(rev)
882 # avoid storing full revisions; pick prev in those cases
896 # avoid storing full revisions; pick prev in those cases
883 # also pick prev when we can't be sure remote has dp
897 # also pick prev when we can't be sure remote has dp
884 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
898 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
885 return prev
899 return prev
886 return dp
900 return dp
887
901
888 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
902 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
889 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
903 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
890 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
904 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
891
905
892 class cg3packer(cg2packer):
906 class cg3packer(cg2packer):
893 version = '03'
907 version = '03'
894 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
908 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
895
909
896 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
910 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
897 # Note that debug prints are super confusing in this code, as
911 # Note that debug prints are super confusing in this code, as
898 # tmfnodes gets populated by the calls to lookuplinknode in
912 # tmfnodes gets populated by the calls to lookuplinknode in
899 # the superclass's manifest packer. In the future we should
913 # the superclass's manifest packer. In the future we should
900 # probably see if we can refactor this somehow to be less
914 # probably see if we can refactor this somehow to be less
901 # confusing.
915 # confusing.
902 for x in super(cg3packer, self)._packmanifests(
916 for x in super(cg3packer, self)._packmanifests(
903 mfnodes, {}, lookuplinknode):
917 mfnodes, {}, lookuplinknode):
904 yield x
918 yield x
905 dirlog = self._repo.manifest.dirlog
919 dirlog = self._repo.manifest.dirlog
906 for name, nodes in tmfnodes.iteritems():
920 for name, nodes in tmfnodes.iteritems():
907 # For now, directory headers are simply file headers with
921 # For now, directory headers are simply file headers with
908 # a trailing '/' on the path (already in the name).
922 # a trailing '/' on the path (already in the name).
909 yield self.fileheader(name)
923 yield self.fileheader(name)
910 for chunk in self.group(nodes, dirlog(name), nodes.get):
924 for chunk in self.group(nodes, dirlog(name), nodes.get):
911 yield chunk
925 yield chunk
912 yield self.close()
926 yield self.close()
913
927
914 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
928 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
915 return struct.pack(
929 return struct.pack(
916 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
930 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
917
931
918 _packermap = {'01': (cg1packer, cg1unpacker),
932 _packermap = {'01': (cg1packer, cg1unpacker),
919 # cg2 adds support for exchanging generaldelta
933 # cg2 adds support for exchanging generaldelta
920 '02': (cg2packer, cg2unpacker),
934 '02': (cg2packer, cg2unpacker),
921 # cg3 adds support for exchanging revlog flags and treemanifests
935 # cg3 adds support for exchanging revlog flags and treemanifests
922 '03': (cg3packer, cg3unpacker),
936 '03': (cg3packer, cg3unpacker),
923 }
937 }
924
938
925 def supportedversions(repo):
939 def supportedversions(repo):
926 versions = _packermap.keys()
940 versions = _packermap.keys()
927 cg3 = ('treemanifest' in repo.requirements or
941 cg3 = ('treemanifest' in repo.requirements or
928 repo.ui.configbool('experimental', 'changegroup3') or
942 repo.ui.configbool('experimental', 'changegroup3') or
929 repo.ui.configbool('experimental', 'treemanifest'))
943 repo.ui.configbool('experimental', 'treemanifest'))
930 if not cg3:
944 if not cg3:
931 versions.remove('03')
945 versions.remove('03')
932 return versions
946 return versions
933
947
934 def getbundler(version, repo, bundlecaps=None):
948 def getbundler(version, repo, bundlecaps=None):
935 assert version in supportedversions(repo)
949 assert version in supportedversions(repo)
936 return _packermap[version][0](repo, bundlecaps)
950 return _packermap[version][0](repo, bundlecaps)
937
951
938 def getunbundler(version, fh, alg):
952 def getunbundler(version, fh, alg):
939 return _packermap[version][1](fh, alg)
953 return _packermap[version][1](fh, alg)
940
954
941 def _changegroupinfo(repo, nodes, source):
955 def _changegroupinfo(repo, nodes, source):
942 if repo.ui.verbose or source == 'bundle':
956 if repo.ui.verbose or source == 'bundle':
943 repo.ui.status(_("%d changesets found\n") % len(nodes))
957 repo.ui.status(_("%d changesets found\n") % len(nodes))
944 if repo.ui.debugflag:
958 if repo.ui.debugflag:
945 repo.ui.debug("list of changesets:\n")
959 repo.ui.debug("list of changesets:\n")
946 for node in nodes:
960 for node in nodes:
947 repo.ui.debug("%s\n" % hex(node))
961 repo.ui.debug("%s\n" % hex(node))
948
962
949 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
963 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
950 repo = repo.unfiltered()
964 repo = repo.unfiltered()
951 commonrevs = outgoing.common
965 commonrevs = outgoing.common
952 csets = outgoing.missing
966 csets = outgoing.missing
953 heads = outgoing.missingheads
967 heads = outgoing.missingheads
954 # We go through the fast path if we get told to, or if all (unfiltered
968 # We go through the fast path if we get told to, or if all (unfiltered
955 # heads have been requested (since we then know there all linkrevs will
969 # heads have been requested (since we then know there all linkrevs will
956 # be pulled by the client).
970 # be pulled by the client).
957 heads.sort()
971 heads.sort()
958 fastpathlinkrev = fastpath or (
972 fastpathlinkrev = fastpath or (
959 repo.filtername is None and heads == sorted(repo.heads()))
973 repo.filtername is None and heads == sorted(repo.heads()))
960
974
961 repo.hook('preoutgoing', throw=True, source=source)
975 repo.hook('preoutgoing', throw=True, source=source)
962 _changegroupinfo(repo, csets, source)
976 _changegroupinfo(repo, csets, source)
963 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
977 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
964
978
965 def getsubset(repo, outgoing, bundler, source, fastpath=False):
979 def getsubset(repo, outgoing, bundler, source, fastpath=False):
966 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
980 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
967 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
981 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
968
982
969 def changegroupsubset(repo, roots, heads, source, version='01'):
983 def changegroupsubset(repo, roots, heads, source, version='01'):
970 """Compute a changegroup consisting of all the nodes that are
984 """Compute a changegroup consisting of all the nodes that are
971 descendants of any of the roots and ancestors of any of the heads.
985 descendants of any of the roots and ancestors of any of the heads.
972 Return a chunkbuffer object whose read() method will return
986 Return a chunkbuffer object whose read() method will return
973 successive changegroup chunks.
987 successive changegroup chunks.
974
988
975 It is fairly complex as determining which filenodes and which
989 It is fairly complex as determining which filenodes and which
976 manifest nodes need to be included for the changeset to be complete
990 manifest nodes need to be included for the changeset to be complete
977 is non-trivial.
991 is non-trivial.
978
992
979 Another wrinkle is doing the reverse, figuring out which changeset in
993 Another wrinkle is doing the reverse, figuring out which changeset in
980 the changegroup a particular filenode or manifestnode belongs to.
994 the changegroup a particular filenode or manifestnode belongs to.
981 """
995 """
982 cl = repo.changelog
996 cl = repo.changelog
983 if not roots:
997 if not roots:
984 roots = [nullid]
998 roots = [nullid]
985 discbases = []
999 discbases = []
986 for n in roots:
1000 for n in roots:
987 discbases.extend([p for p in cl.parents(n) if p != nullid])
1001 discbases.extend([p for p in cl.parents(n) if p != nullid])
988 # TODO: remove call to nodesbetween.
1002 # TODO: remove call to nodesbetween.
989 csets, roots, heads = cl.nodesbetween(roots, heads)
1003 csets, roots, heads = cl.nodesbetween(roots, heads)
990 included = set(csets)
1004 included = set(csets)
991 discbases = [n for n in discbases if n not in included]
1005 discbases = [n for n in discbases if n not in included]
992 outgoing = discovery.outgoing(cl, discbases, heads)
1006 outgoing = discovery.outgoing(cl, discbases, heads)
993 bundler = getbundler(version, repo)
1007 bundler = getbundler(version, repo)
994 return getsubset(repo, outgoing, bundler, source)
1008 return getsubset(repo, outgoing, bundler, source)
995
1009
996 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
1010 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
997 version='01'):
1011 version='01'):
998 """Like getbundle, but taking a discovery.outgoing as an argument.
1012 """Like getbundle, but taking a discovery.outgoing as an argument.
999
1013
1000 This is only implemented for local repos and reuses potentially
1014 This is only implemented for local repos and reuses potentially
1001 precomputed sets in outgoing. Returns a raw changegroup generator."""
1015 precomputed sets in outgoing. Returns a raw changegroup generator."""
1002 if not outgoing.missing:
1016 if not outgoing.missing:
1003 return None
1017 return None
1004 bundler = getbundler(version, repo, bundlecaps)
1018 bundler = getbundler(version, repo, bundlecaps)
1005 return getsubsetraw(repo, outgoing, bundler, source)
1019 return getsubsetraw(repo, outgoing, bundler, source)
1006
1020
1007 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
1021 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
1008 version='01'):
1022 version='01'):
1009 """Like getbundle, but taking a discovery.outgoing as an argument.
1023 """Like getbundle, but taking a discovery.outgoing as an argument.
1010
1024
1011 This is only implemented for local repos and reuses potentially
1025 This is only implemented for local repos and reuses potentially
1012 precomputed sets in outgoing."""
1026 precomputed sets in outgoing."""
1013 if not outgoing.missing:
1027 if not outgoing.missing:
1014 return None
1028 return None
1015 bundler = getbundler(version, repo, bundlecaps)
1029 bundler = getbundler(version, repo, bundlecaps)
1016 return getsubset(repo, outgoing, bundler, source)
1030 return getsubset(repo, outgoing, bundler, source)
1017
1031
1018 def computeoutgoing(repo, heads, common):
1032 def computeoutgoing(repo, heads, common):
1019 """Computes which revs are outgoing given a set of common
1033 """Computes which revs are outgoing given a set of common
1020 and a set of heads.
1034 and a set of heads.
1021
1035
1022 This is a separate function so extensions can have access to
1036 This is a separate function so extensions can have access to
1023 the logic.
1037 the logic.
1024
1038
1025 Returns a discovery.outgoing object.
1039 Returns a discovery.outgoing object.
1026 """
1040 """
1027 cl = repo.changelog
1041 cl = repo.changelog
1028 if common:
1042 if common:
1029 hasnode = cl.hasnode
1043 hasnode = cl.hasnode
1030 common = [n for n in common if hasnode(n)]
1044 common = [n for n in common if hasnode(n)]
1031 else:
1045 else:
1032 common = [nullid]
1046 common = [nullid]
1033 if not heads:
1047 if not heads:
1034 heads = cl.heads()
1048 heads = cl.heads()
1035 return discovery.outgoing(cl, common, heads)
1049 return discovery.outgoing(cl, common, heads)
1036
1050
1037 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1051 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1038 version='01'):
1052 version='01'):
1039 """Like changegroupsubset, but returns the set difference between the
1053 """Like changegroupsubset, but returns the set difference between the
1040 ancestors of heads and the ancestors common.
1054 ancestors of heads and the ancestors common.
1041
1055
1042 If heads is None, use the local heads. If common is None, use [nullid].
1056 If heads is None, use the local heads. If common is None, use [nullid].
1043
1057
1044 The nodes in common might not all be known locally due to the way the
1058 The nodes in common might not all be known locally due to the way the
1045 current discovery protocol works.
1059 current discovery protocol works.
1046 """
1060 """
1047 outgoing = computeoutgoing(repo, heads, common)
1061 outgoing = computeoutgoing(repo, heads, common)
1048 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1062 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1049 version=version)
1063 version=version)
1050
1064
1051 def changegroup(repo, basenodes, source):
1065 def changegroup(repo, basenodes, source):
1052 # to avoid a race we use changegroupsubset() (issue1320)
1066 # to avoid a race we use changegroupsubset() (issue1320)
1053 return changegroupsubset(repo, basenodes, repo.heads(), source)
1067 return changegroupsubset(repo, basenodes, repo.heads(), source)
1054
1068
1055 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
1069 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
1056 revisions = 0
1070 revisions = 0
1057 files = 0
1071 files = 0
1058 submfsdone = False
1059 while True:
1072 while True:
1060 chunkdata = source.filelogheader()
1073 chunkdata = source.filelogheader()
1061 if not chunkdata:
1074 if not chunkdata:
1062 if source.version == "03" and not submfsdone:
1063 submfsdone = True
1064 continue
1065 break
1075 break
1066 f = chunkdata["filename"]
1076 f = chunkdata["filename"]
1067 repo.ui.debug("adding %s revisions\n" % f)
1077 repo.ui.debug("adding %s revisions\n" % f)
1068 pr()
1078 pr()
1069 directory = (f[-1] == '/')
1070 if directory:
1071 # a directory using treemanifests
1072 fl = repo.manifest.dirlog(f)
1073 else:
1074 fl = repo.file(f)
1079 fl = repo.file(f)
1075 o = len(fl)
1080 o = len(fl)
1076 try:
1081 try:
1077 if not fl.addgroup(source, revmap, trp):
1082 if not fl.addgroup(source, revmap, trp):
1078 raise error.Abort(_("received file revlog group is empty"))
1083 raise error.Abort(_("received file revlog group is empty"))
1079 except error.CensoredBaseError as e:
1084 except error.CensoredBaseError as e:
1080 raise error.Abort(_("received delta base is censored: %s") % e)
1085 raise error.Abort(_("received delta base is censored: %s") % e)
1081 if not directory:
1082 revisions += len(fl) - o
1086 revisions += len(fl) - o
1083 files += 1
1087 files += 1
1084 if f in needfiles:
1088 if f in needfiles:
1085 needs = needfiles[f]
1089 needs = needfiles[f]
1086 for new in xrange(o, len(fl)):
1090 for new in xrange(o, len(fl)):
1087 n = fl.node(new)
1091 n = fl.node(new)
1088 if n in needs:
1092 if n in needs:
1089 needs.remove(n)
1093 needs.remove(n)
1090 else:
1094 else:
1091 raise error.Abort(
1095 raise error.Abort(
1092 _("received spurious file revlog entry"))
1096 _("received spurious file revlog entry"))
1093 if not needs:
1097 if not needs:
1094 del needfiles[f]
1098 del needfiles[f]
1095 repo.ui.progress(_('files'), None)
1099 repo.ui.progress(_('files'), None)
1096
1100
1097 for f, needs in needfiles.iteritems():
1101 for f, needs in needfiles.iteritems():
1098 fl = repo.file(f)
1102 fl = repo.file(f)
1099 for n in needs:
1103 for n in needs:
1100 try:
1104 try:
1101 fl.rev(n)
1105 fl.rev(n)
1102 except error.LookupError:
1106 except error.LookupError:
1103 raise error.Abort(
1107 raise error.Abort(
1104 _('missing file data for %s:%s - run hg verify') %
1108 _('missing file data for %s:%s - run hg verify') %
1105 (f, hex(n)))
1109 (f, hex(n)))
1106
1110
1107 return revisions, files
1111 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now