##// END OF EJS Templates
changegroup: remove one special case from lookupmflinknode...
Augie Fackler -
r27239:65c47779 default
parent child Browse files
Show More
@@ -1,971 +1,973 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 branchmap,
24 branchmap,
25 dagutil,
25 dagutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 mdiff,
28 mdiff,
29 phases,
29 phases,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35
35
36 def readexactly(stream, n):
36 def readexactly(stream, n):
37 '''read n bytes from stream.read and abort if less was available'''
37 '''read n bytes from stream.read and abort if less was available'''
38 s = stream.read(n)
38 s = stream.read(n)
39 if len(s) < n:
39 if len(s) < n:
40 raise error.Abort(_("stream ended unexpectedly"
40 raise error.Abort(_("stream ended unexpectedly"
41 " (got %d bytes, expected %d)")
41 " (got %d bytes, expected %d)")
42 % (len(s), n))
42 % (len(s), n))
43 return s
43 return s
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
52 return ""
52 return ""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55 def chunkheader(length):
55 def chunkheader(length):
56 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
57 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
62
62
63 def combineresults(results):
63 def combineresults(results):
64 """logic to combine 0 or more addchangegroup results into one"""
64 """logic to combine 0 or more addchangegroup results into one"""
65 changedheads = 0
65 changedheads = 0
66 result = 1
66 result = 1
67 for ret in results:
67 for ret in results:
68 # If any changegroup result is 0, return 0
68 # If any changegroup result is 0, return 0
69 if ret == 0:
69 if ret == 0:
70 result = 0
70 result = 0
71 break
71 break
72 if ret < -1:
72 if ret < -1:
73 changedheads += ret + 1
73 changedheads += ret + 1
74 elif ret > 1:
74 elif ret > 1:
75 changedheads += ret - 1
75 changedheads += ret - 1
76 if changedheads > 0:
76 if changedheads > 0:
77 result = 1 + changedheads
77 result = 1 + changedheads
78 elif changedheads < 0:
78 elif changedheads < 0:
79 result = -1 + changedheads
79 result = -1 + changedheads
80 return result
80 return result
81
81
82 bundletypes = {
82 bundletypes = {
83 "": ("", None), # only when using unbundle on ssh and old http servers
83 "": ("", None), # only when using unbundle on ssh and old http servers
84 # since the unification ssh accepts a header but there
84 # since the unification ssh accepts a header but there
85 # is no capability signaling it.
85 # is no capability signaling it.
86 "HG20": (), # special-cased below
86 "HG20": (), # special-cased below
87 "HG10UN": ("HG10UN", None),
87 "HG10UN": ("HG10UN", None),
88 "HG10BZ": ("HG10", 'BZ'),
88 "HG10BZ": ("HG10", 'BZ'),
89 "HG10GZ": ("HG10GZ", 'GZ'),
89 "HG10GZ": ("HG10GZ", 'GZ'),
90 }
90 }
91
91
92 # hgweb uses this list to communicate its preferred type
92 # hgweb uses this list to communicate its preferred type
93 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
93 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
94
94
95 def writechunks(ui, chunks, filename, vfs=None):
95 def writechunks(ui, chunks, filename, vfs=None):
96 """Write chunks to a file and return its filename.
96 """Write chunks to a file and return its filename.
97
97
98 The stream is assumed to be a bundle file.
98 The stream is assumed to be a bundle file.
99 Existing files will not be overwritten.
99 Existing files will not be overwritten.
100 If no filename is specified, a temporary file is created.
100 If no filename is specified, a temporary file is created.
101 """
101 """
102 fh = None
102 fh = None
103 cleanup = None
103 cleanup = None
104 try:
104 try:
105 if filename:
105 if filename:
106 if vfs:
106 if vfs:
107 fh = vfs.open(filename, "wb")
107 fh = vfs.open(filename, "wb")
108 else:
108 else:
109 fh = open(filename, "wb")
109 fh = open(filename, "wb")
110 else:
110 else:
111 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
111 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
112 fh = os.fdopen(fd, "wb")
112 fh = os.fdopen(fd, "wb")
113 cleanup = filename
113 cleanup = filename
114 for c in chunks:
114 for c in chunks:
115 fh.write(c)
115 fh.write(c)
116 cleanup = None
116 cleanup = None
117 return filename
117 return filename
118 finally:
118 finally:
119 if fh is not None:
119 if fh is not None:
120 fh.close()
120 fh.close()
121 if cleanup is not None:
121 if cleanup is not None:
122 if filename and vfs:
122 if filename and vfs:
123 vfs.unlink(cleanup)
123 vfs.unlink(cleanup)
124 else:
124 else:
125 os.unlink(cleanup)
125 os.unlink(cleanup)
126
126
127 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
127 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
128 """Write a bundle file and return its filename.
128 """Write a bundle file and return its filename.
129
129
130 Existing files will not be overwritten.
130 Existing files will not be overwritten.
131 If no filename is specified, a temporary file is created.
131 If no filename is specified, a temporary file is created.
132 bz2 compression can be turned off.
132 bz2 compression can be turned off.
133 The bundle file will be deleted in case of errors.
133 The bundle file will be deleted in case of errors.
134 """
134 """
135
135
136 if bundletype == "HG20":
136 if bundletype == "HG20":
137 from . import bundle2
137 from . import bundle2
138 bundle = bundle2.bundle20(ui)
138 bundle = bundle2.bundle20(ui)
139 bundle.setcompression(compression)
139 bundle.setcompression(compression)
140 part = bundle.newpart('changegroup', data=cg.getchunks())
140 part = bundle.newpart('changegroup', data=cg.getchunks())
141 part.addparam('version', cg.version)
141 part.addparam('version', cg.version)
142 chunkiter = bundle.getchunks()
142 chunkiter = bundle.getchunks()
143 else:
143 else:
144 # compression argument is only for the bundle2 case
144 # compression argument is only for the bundle2 case
145 assert compression is None
145 assert compression is None
146 if cg.version != '01':
146 if cg.version != '01':
147 raise error.Abort(_('old bundle types only supports v1 '
147 raise error.Abort(_('old bundle types only supports v1 '
148 'changegroups'))
148 'changegroups'))
149 header, comp = bundletypes[bundletype]
149 header, comp = bundletypes[bundletype]
150 if comp not in util.compressors:
150 if comp not in util.compressors:
151 raise error.Abort(_('unknown stream compression type: %s')
151 raise error.Abort(_('unknown stream compression type: %s')
152 % comp)
152 % comp)
153 z = util.compressors[comp]()
153 z = util.compressors[comp]()
154 subchunkiter = cg.getchunks()
154 subchunkiter = cg.getchunks()
155 def chunkiter():
155 def chunkiter():
156 yield header
156 yield header
157 for chunk in subchunkiter:
157 for chunk in subchunkiter:
158 yield z.compress(chunk)
158 yield z.compress(chunk)
159 yield z.flush()
159 yield z.flush()
160 chunkiter = chunkiter()
160 chunkiter = chunkiter()
161
161
162 # parse the changegroup data, otherwise we will block
162 # parse the changegroup data, otherwise we will block
163 # in case of sshrepo because we don't know the end of the stream
163 # in case of sshrepo because we don't know the end of the stream
164
164
165 # an empty chunkgroup is the end of the changegroup
165 # an empty chunkgroup is the end of the changegroup
166 # a changegroup has at least 2 chunkgroups (changelog and manifest).
166 # a changegroup has at least 2 chunkgroups (changelog and manifest).
167 # after that, an empty chunkgroup is the end of the changegroup
167 # after that, an empty chunkgroup is the end of the changegroup
168 return writechunks(ui, chunkiter, filename, vfs=vfs)
168 return writechunks(ui, chunkiter, filename, vfs=vfs)
169
169
170 class cg1unpacker(object):
170 class cg1unpacker(object):
171 """Unpacker for cg1 changegroup streams.
171 """Unpacker for cg1 changegroup streams.
172
172
173 A changegroup unpacker handles the framing of the revision data in
173 A changegroup unpacker handles the framing of the revision data in
174 the wire format. Most consumers will want to use the apply()
174 the wire format. Most consumers will want to use the apply()
175 method to add the changes from the changegroup to a repository.
175 method to add the changes from the changegroup to a repository.
176
176
177 If you're forwarding a changegroup unmodified to another consumer,
177 If you're forwarding a changegroup unmodified to another consumer,
178 use getchunks(), which returns an iterator of changegroup
178 use getchunks(), which returns an iterator of changegroup
179 chunks. This is mostly useful for cases where you need to know the
179 chunks. This is mostly useful for cases where you need to know the
180 data stream has ended by observing the end of the changegroup.
180 data stream has ended by observing the end of the changegroup.
181
181
182 deltachunk() is useful only if you're applying delta data. Most
182 deltachunk() is useful only if you're applying delta data. Most
183 consumers should prefer apply() instead.
183 consumers should prefer apply() instead.
184
184
185 A few other public methods exist. Those are used only for
185 A few other public methods exist. Those are used only for
186 bundlerepo and some debug commands - their use is discouraged.
186 bundlerepo and some debug commands - their use is discouraged.
187 """
187 """
188 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
188 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
189 deltaheadersize = struct.calcsize(deltaheader)
189 deltaheadersize = struct.calcsize(deltaheader)
190 version = '01'
190 version = '01'
191 def __init__(self, fh, alg):
191 def __init__(self, fh, alg):
192 if alg == 'UN':
192 if alg == 'UN':
193 alg = None # get more modern without breaking too much
193 alg = None # get more modern without breaking too much
194 if not alg in util.decompressors:
194 if not alg in util.decompressors:
195 raise error.Abort(_('unknown stream compression type: %s')
195 raise error.Abort(_('unknown stream compression type: %s')
196 % alg)
196 % alg)
197 if alg == 'BZ':
197 if alg == 'BZ':
198 alg = '_truncatedBZ'
198 alg = '_truncatedBZ'
199 self._stream = util.decompressors[alg](fh)
199 self._stream = util.decompressors[alg](fh)
200 self._type = alg
200 self._type = alg
201 self.callback = None
201 self.callback = None
202
202
203 # These methods (compressed, read, seek, tell) all appear to only
203 # These methods (compressed, read, seek, tell) all appear to only
204 # be used by bundlerepo, but it's a little hard to tell.
204 # be used by bundlerepo, but it's a little hard to tell.
205 def compressed(self):
205 def compressed(self):
206 return self._type is not None
206 return self._type is not None
207 def read(self, l):
207 def read(self, l):
208 return self._stream.read(l)
208 return self._stream.read(l)
209 def seek(self, pos):
209 def seek(self, pos):
210 return self._stream.seek(pos)
210 return self._stream.seek(pos)
211 def tell(self):
211 def tell(self):
212 return self._stream.tell()
212 return self._stream.tell()
213 def close(self):
213 def close(self):
214 return self._stream.close()
214 return self._stream.close()
215
215
216 def _chunklength(self):
216 def _chunklength(self):
217 d = readexactly(self._stream, 4)
217 d = readexactly(self._stream, 4)
218 l = struct.unpack(">l", d)[0]
218 l = struct.unpack(">l", d)[0]
219 if l <= 4:
219 if l <= 4:
220 if l:
220 if l:
221 raise error.Abort(_("invalid chunk length %d") % l)
221 raise error.Abort(_("invalid chunk length %d") % l)
222 return 0
222 return 0
223 if self.callback:
223 if self.callback:
224 self.callback()
224 self.callback()
225 return l - 4
225 return l - 4
226
226
227 def changelogheader(self):
227 def changelogheader(self):
228 """v10 does not have a changelog header chunk"""
228 """v10 does not have a changelog header chunk"""
229 return {}
229 return {}
230
230
231 def manifestheader(self):
231 def manifestheader(self):
232 """v10 does not have a manifest header chunk"""
232 """v10 does not have a manifest header chunk"""
233 return {}
233 return {}
234
234
235 def filelogheader(self):
235 def filelogheader(self):
236 """return the header of the filelogs chunk, v10 only has the filename"""
236 """return the header of the filelogs chunk, v10 only has the filename"""
237 l = self._chunklength()
237 l = self._chunklength()
238 if not l:
238 if not l:
239 return {}
239 return {}
240 fname = readexactly(self._stream, l)
240 fname = readexactly(self._stream, l)
241 return {'filename': fname}
241 return {'filename': fname}
242
242
243 def _deltaheader(self, headertuple, prevnode):
243 def _deltaheader(self, headertuple, prevnode):
244 node, p1, p2, cs = headertuple
244 node, p1, p2, cs = headertuple
245 if prevnode is None:
245 if prevnode is None:
246 deltabase = p1
246 deltabase = p1
247 else:
247 else:
248 deltabase = prevnode
248 deltabase = prevnode
249 return node, p1, p2, deltabase, cs
249 return node, p1, p2, deltabase, cs
250
250
251 def deltachunk(self, prevnode):
251 def deltachunk(self, prevnode):
252 l = self._chunklength()
252 l = self._chunklength()
253 if not l:
253 if not l:
254 return {}
254 return {}
255 headerdata = readexactly(self._stream, self.deltaheadersize)
255 headerdata = readexactly(self._stream, self.deltaheadersize)
256 header = struct.unpack(self.deltaheader, headerdata)
256 header = struct.unpack(self.deltaheader, headerdata)
257 delta = readexactly(self._stream, l - self.deltaheadersize)
257 delta = readexactly(self._stream, l - self.deltaheadersize)
258 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
258 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
259 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
259 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
260 'deltabase': deltabase, 'delta': delta}
260 'deltabase': deltabase, 'delta': delta}
261
261
262 def getchunks(self):
262 def getchunks(self):
263 """returns all the chunks contains in the bundle
263 """returns all the chunks contains in the bundle
264
264
265 Used when you need to forward the binary stream to a file or another
265 Used when you need to forward the binary stream to a file or another
266 network API. To do so, it parse the changegroup data, otherwise it will
266 network API. To do so, it parse the changegroup data, otherwise it will
267 block in case of sshrepo because it don't know the end of the stream.
267 block in case of sshrepo because it don't know the end of the stream.
268 """
268 """
269 # an empty chunkgroup is the end of the changegroup
269 # an empty chunkgroup is the end of the changegroup
270 # a changegroup has at least 2 chunkgroups (changelog and manifest).
270 # a changegroup has at least 2 chunkgroups (changelog and manifest).
271 # after that, an empty chunkgroup is the end of the changegroup
271 # after that, an empty chunkgroup is the end of the changegroup
272 empty = False
272 empty = False
273 count = 0
273 count = 0
274 while not empty or count <= 2:
274 while not empty or count <= 2:
275 empty = True
275 empty = True
276 count += 1
276 count += 1
277 while True:
277 while True:
278 chunk = getchunk(self)
278 chunk = getchunk(self)
279 if not chunk:
279 if not chunk:
280 break
280 break
281 empty = False
281 empty = False
282 yield chunkheader(len(chunk))
282 yield chunkheader(len(chunk))
283 pos = 0
283 pos = 0
284 while pos < len(chunk):
284 while pos < len(chunk):
285 next = pos + 2**20
285 next = pos + 2**20
286 yield chunk[pos:next]
286 yield chunk[pos:next]
287 pos = next
287 pos = next
288 yield closechunk()
288 yield closechunk()
289
289
290 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
290 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
291 # We know that we'll never have more manifests than we had
291 # We know that we'll never have more manifests than we had
292 # changesets.
292 # changesets.
293 self.callback = prog(_('manifests'), numchanges)
293 self.callback = prog(_('manifests'), numchanges)
294 # no need to check for empty manifest group here:
294 # no need to check for empty manifest group here:
295 # if the result of the merge of 1 and 2 is the same in 3 and 4,
295 # if the result of the merge of 1 and 2 is the same in 3 and 4,
296 # no new manifest will be created and the manifest group will
296 # no new manifest will be created and the manifest group will
297 # be empty during the pull
297 # be empty during the pull
298 self.manifestheader()
298 self.manifestheader()
299 repo.manifest.addgroup(self, revmap, trp)
299 repo.manifest.addgroup(self, revmap, trp)
300 repo.ui.progress(_('manifests'), None)
300 repo.ui.progress(_('manifests'), None)
301
301
302 def apply(self, repo, srctype, url, emptyok=False,
302 def apply(self, repo, srctype, url, emptyok=False,
303 targetphase=phases.draft, expectedtotal=None):
303 targetphase=phases.draft, expectedtotal=None):
304 """Add the changegroup returned by source.read() to this repo.
304 """Add the changegroup returned by source.read() to this repo.
305 srctype is a string like 'push', 'pull', or 'unbundle'. url is
305 srctype is a string like 'push', 'pull', or 'unbundle'. url is
306 the URL of the repo where this changegroup is coming from.
306 the URL of the repo where this changegroup is coming from.
307
307
308 Return an integer summarizing the change to this repo:
308 Return an integer summarizing the change to this repo:
309 - nothing changed or no source: 0
309 - nothing changed or no source: 0
310 - more heads than before: 1+added heads (2..n)
310 - more heads than before: 1+added heads (2..n)
311 - fewer heads than before: -1-removed heads (-2..-n)
311 - fewer heads than before: -1-removed heads (-2..-n)
312 - number of heads stays the same: 1
312 - number of heads stays the same: 1
313 """
313 """
314 repo = repo.unfiltered()
314 repo = repo.unfiltered()
315 wasempty = (len(repo.changelog) == 0)
315 wasempty = (len(repo.changelog) == 0)
316 def csmap(x):
316 def csmap(x):
317 repo.ui.debug("add changeset %s\n" % short(x))
317 repo.ui.debug("add changeset %s\n" % short(x))
318 return len(cl)
318 return len(cl)
319
319
320 def revmap(x):
320 def revmap(x):
321 return cl.rev(x)
321 return cl.rev(x)
322
322
323 changesets = files = revisions = 0
323 changesets = files = revisions = 0
324
324
325 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
325 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
326 try:
326 try:
327 # The transaction could have been created before and already
327 # The transaction could have been created before and already
328 # carries source information. In this case we use the top
328 # carries source information. In this case we use the top
329 # level data. We overwrite the argument because we need to use
329 # level data. We overwrite the argument because we need to use
330 # the top level value (if they exist) in this function.
330 # the top level value (if they exist) in this function.
331 srctype = tr.hookargs.setdefault('source', srctype)
331 srctype = tr.hookargs.setdefault('source', srctype)
332 url = tr.hookargs.setdefault('url', url)
332 url = tr.hookargs.setdefault('url', url)
333 repo.hook('prechangegroup', throw=True, **tr.hookargs)
333 repo.hook('prechangegroup', throw=True, **tr.hookargs)
334
334
335 # write changelog data to temp files so concurrent readers
335 # write changelog data to temp files so concurrent readers
336 # will not see an inconsistent view
336 # will not see an inconsistent view
337 cl = repo.changelog
337 cl = repo.changelog
338 cl.delayupdate(tr)
338 cl.delayupdate(tr)
339 oldheads = cl.heads()
339 oldheads = cl.heads()
340
340
341 trp = weakref.proxy(tr)
341 trp = weakref.proxy(tr)
342 # pull off the changeset group
342 # pull off the changeset group
343 repo.ui.status(_("adding changesets\n"))
343 repo.ui.status(_("adding changesets\n"))
344 clstart = len(cl)
344 clstart = len(cl)
345 class prog(object):
345 class prog(object):
346 def __init__(self, step, total):
346 def __init__(self, step, total):
347 self._step = step
347 self._step = step
348 self._total = total
348 self._total = total
349 self._count = 1
349 self._count = 1
350 def __call__(self):
350 def __call__(self):
351 repo.ui.progress(self._step, self._count, unit=_('chunks'),
351 repo.ui.progress(self._step, self._count, unit=_('chunks'),
352 total=self._total)
352 total=self._total)
353 self._count += 1
353 self._count += 1
354 self.callback = prog(_('changesets'), expectedtotal)
354 self.callback = prog(_('changesets'), expectedtotal)
355
355
356 efiles = set()
356 efiles = set()
357 def onchangelog(cl, node):
357 def onchangelog(cl, node):
358 efiles.update(cl.read(node)[3])
358 efiles.update(cl.read(node)[3])
359
359
360 self.changelogheader()
360 self.changelogheader()
361 srccontent = cl.addgroup(self, csmap, trp,
361 srccontent = cl.addgroup(self, csmap, trp,
362 addrevisioncb=onchangelog)
362 addrevisioncb=onchangelog)
363 efiles = len(efiles)
363 efiles = len(efiles)
364
364
365 if not (srccontent or emptyok):
365 if not (srccontent or emptyok):
366 raise error.Abort(_("received changelog group is empty"))
366 raise error.Abort(_("received changelog group is empty"))
367 clend = len(cl)
367 clend = len(cl)
368 changesets = clend - clstart
368 changesets = clend - clstart
369 repo.ui.progress(_('changesets'), None)
369 repo.ui.progress(_('changesets'), None)
370
370
371 # pull off the manifest group
371 # pull off the manifest group
372 repo.ui.status(_("adding manifests\n"))
372 repo.ui.status(_("adding manifests\n"))
373 self._unpackmanifests(repo, revmap, trp, prog, changesets)
373 self._unpackmanifests(repo, revmap, trp, prog, changesets)
374
374
375 needfiles = {}
375 needfiles = {}
376 if repo.ui.configbool('server', 'validate', default=False):
376 if repo.ui.configbool('server', 'validate', default=False):
377 # validate incoming csets have their manifests
377 # validate incoming csets have their manifests
378 for cset in xrange(clstart, clend):
378 for cset in xrange(clstart, clend):
379 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
379 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
380 mfest = repo.manifest.readdelta(mfnode)
380 mfest = repo.manifest.readdelta(mfnode)
381 # store file nodes we must see
381 # store file nodes we must see
382 for f, n in mfest.iteritems():
382 for f, n in mfest.iteritems():
383 needfiles.setdefault(f, set()).add(n)
383 needfiles.setdefault(f, set()).add(n)
384
384
385 # process the files
385 # process the files
386 repo.ui.status(_("adding file changes\n"))
386 repo.ui.status(_("adding file changes\n"))
387 self.callback = None
387 self.callback = None
388 pr = prog(_('files'), efiles)
388 pr = prog(_('files'), efiles)
389 newrevs, newfiles = _addchangegroupfiles(
389 newrevs, newfiles = _addchangegroupfiles(
390 repo, self, revmap, trp, pr, needfiles, wasempty)
390 repo, self, revmap, trp, pr, needfiles, wasempty)
391 revisions += newrevs
391 revisions += newrevs
392 files += newfiles
392 files += newfiles
393
393
394 dh = 0
394 dh = 0
395 if oldheads:
395 if oldheads:
396 heads = cl.heads()
396 heads = cl.heads()
397 dh = len(heads) - len(oldheads)
397 dh = len(heads) - len(oldheads)
398 for h in heads:
398 for h in heads:
399 if h not in oldheads and repo[h].closesbranch():
399 if h not in oldheads and repo[h].closesbranch():
400 dh -= 1
400 dh -= 1
401 htext = ""
401 htext = ""
402 if dh:
402 if dh:
403 htext = _(" (%+d heads)") % dh
403 htext = _(" (%+d heads)") % dh
404
404
405 repo.ui.status(_("added %d changesets"
405 repo.ui.status(_("added %d changesets"
406 " with %d changes to %d files%s\n")
406 " with %d changes to %d files%s\n")
407 % (changesets, revisions, files, htext))
407 % (changesets, revisions, files, htext))
408 repo.invalidatevolatilesets()
408 repo.invalidatevolatilesets()
409
409
410 if changesets > 0:
410 if changesets > 0:
411 if 'node' not in tr.hookargs:
411 if 'node' not in tr.hookargs:
412 tr.hookargs['node'] = hex(cl.node(clstart))
412 tr.hookargs['node'] = hex(cl.node(clstart))
413 hookargs = dict(tr.hookargs)
413 hookargs = dict(tr.hookargs)
414 else:
414 else:
415 hookargs = dict(tr.hookargs)
415 hookargs = dict(tr.hookargs)
416 hookargs['node'] = hex(cl.node(clstart))
416 hookargs['node'] = hex(cl.node(clstart))
417 repo.hook('pretxnchangegroup', throw=True, **hookargs)
417 repo.hook('pretxnchangegroup', throw=True, **hookargs)
418
418
419 added = [cl.node(r) for r in xrange(clstart, clend)]
419 added = [cl.node(r) for r in xrange(clstart, clend)]
420 publishing = repo.publishing()
420 publishing = repo.publishing()
421 if srctype in ('push', 'serve'):
421 if srctype in ('push', 'serve'):
422 # Old servers can not push the boundary themselves.
422 # Old servers can not push the boundary themselves.
423 # New servers won't push the boundary if changeset already
423 # New servers won't push the boundary if changeset already
424 # exists locally as secret
424 # exists locally as secret
425 #
425 #
426 # We should not use added here but the list of all change in
426 # We should not use added here but the list of all change in
427 # the bundle
427 # the bundle
428 if publishing:
428 if publishing:
429 phases.advanceboundary(repo, tr, phases.public, srccontent)
429 phases.advanceboundary(repo, tr, phases.public, srccontent)
430 else:
430 else:
431 # Those changesets have been pushed from the outside, their
431 # Those changesets have been pushed from the outside, their
432 # phases are going to be pushed alongside. Therefor
432 # phases are going to be pushed alongside. Therefor
433 # `targetphase` is ignored.
433 # `targetphase` is ignored.
434 phases.advanceboundary(repo, tr, phases.draft, srccontent)
434 phases.advanceboundary(repo, tr, phases.draft, srccontent)
435 phases.retractboundary(repo, tr, phases.draft, added)
435 phases.retractboundary(repo, tr, phases.draft, added)
436 elif srctype != 'strip':
436 elif srctype != 'strip':
437 # publishing only alter behavior during push
437 # publishing only alter behavior during push
438 #
438 #
439 # strip should not touch boundary at all
439 # strip should not touch boundary at all
440 phases.retractboundary(repo, tr, targetphase, added)
440 phases.retractboundary(repo, tr, targetphase, added)
441
441
442 if changesets > 0:
442 if changesets > 0:
443 if srctype != 'strip':
443 if srctype != 'strip':
444 # During strip, branchcache is invalid but coming call to
444 # During strip, branchcache is invalid but coming call to
445 # `destroyed` will repair it.
445 # `destroyed` will repair it.
446 # In other case we can safely update cache on disk.
446 # In other case we can safely update cache on disk.
447 branchmap.updatecache(repo.filtered('served'))
447 branchmap.updatecache(repo.filtered('served'))
448
448
449 def runhooks():
449 def runhooks():
450 # These hooks run when the lock releases, not when the
450 # These hooks run when the lock releases, not when the
451 # transaction closes. So it's possible for the changelog
451 # transaction closes. So it's possible for the changelog
452 # to have changed since we last saw it.
452 # to have changed since we last saw it.
453 if clstart >= len(repo):
453 if clstart >= len(repo):
454 return
454 return
455
455
456 # forcefully update the on-disk branch cache
456 # forcefully update the on-disk branch cache
457 repo.ui.debug("updating the branch cache\n")
457 repo.ui.debug("updating the branch cache\n")
458 repo.hook("changegroup", **hookargs)
458 repo.hook("changegroup", **hookargs)
459
459
460 for n in added:
460 for n in added:
461 args = hookargs.copy()
461 args = hookargs.copy()
462 args['node'] = hex(n)
462 args['node'] = hex(n)
463 repo.hook("incoming", **args)
463 repo.hook("incoming", **args)
464
464
465 newheads = [h for h in repo.heads() if h not in oldheads]
465 newheads = [h for h in repo.heads() if h not in oldheads]
466 repo.ui.log("incoming",
466 repo.ui.log("incoming",
467 "%s incoming changes - new heads: %s\n",
467 "%s incoming changes - new heads: %s\n",
468 len(added),
468 len(added),
469 ', '.join([hex(c[:6]) for c in newheads]))
469 ', '.join([hex(c[:6]) for c in newheads]))
470
470
471 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
471 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
472 lambda tr: repo._afterlock(runhooks))
472 lambda tr: repo._afterlock(runhooks))
473
473
474 tr.close()
474 tr.close()
475
475
476 finally:
476 finally:
477 tr.release()
477 tr.release()
478 repo.ui.flush()
478 repo.ui.flush()
479 # never return 0 here:
479 # never return 0 here:
480 if dh < 0:
480 if dh < 0:
481 return dh - 1
481 return dh - 1
482 else:
482 else:
483 return dh + 1
483 return dh + 1
484
484
485 class cg2unpacker(cg1unpacker):
485 class cg2unpacker(cg1unpacker):
486 """Unpacker for cg2 streams.
486 """Unpacker for cg2 streams.
487
487
488 cg2 streams add support for generaldelta, so the delta header
488 cg2 streams add support for generaldelta, so the delta header
489 format is slightly different. All other features about the data
489 format is slightly different. All other features about the data
490 remain the same.
490 remain the same.
491 """
491 """
492 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
492 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
493 deltaheadersize = struct.calcsize(deltaheader)
493 deltaheadersize = struct.calcsize(deltaheader)
494 version = '02'
494 version = '02'
495
495
496 def _deltaheader(self, headertuple, prevnode):
496 def _deltaheader(self, headertuple, prevnode):
497 node, p1, p2, deltabase, cs = headertuple
497 node, p1, p2, deltabase, cs = headertuple
498 return node, p1, p2, deltabase, cs
498 return node, p1, p2, deltabase, cs
499
499
500 class headerlessfixup(object):
500 class headerlessfixup(object):
501 def __init__(self, fh, h):
501 def __init__(self, fh, h):
502 self._h = h
502 self._h = h
503 self._fh = fh
503 self._fh = fh
504 def read(self, n):
504 def read(self, n):
505 if self._h:
505 if self._h:
506 d, self._h = self._h[:n], self._h[n:]
506 d, self._h = self._h[:n], self._h[n:]
507 if len(d) < n:
507 if len(d) < n:
508 d += readexactly(self._fh, n - len(d))
508 d += readexactly(self._fh, n - len(d))
509 return d
509 return d
510 return readexactly(self._fh, n)
510 return readexactly(self._fh, n)
511
511
512 class cg1packer(object):
512 class cg1packer(object):
513 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
513 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
514 version = '01'
514 version = '01'
515 def __init__(self, repo, bundlecaps=None):
515 def __init__(self, repo, bundlecaps=None):
516 """Given a source repo, construct a bundler.
516 """Given a source repo, construct a bundler.
517
517
518 bundlecaps is optional and can be used to specify the set of
518 bundlecaps is optional and can be used to specify the set of
519 capabilities which can be used to build the bundle.
519 capabilities which can be used to build the bundle.
520 """
520 """
521 # Set of capabilities we can use to build the bundle.
521 # Set of capabilities we can use to build the bundle.
522 if bundlecaps is None:
522 if bundlecaps is None:
523 bundlecaps = set()
523 bundlecaps = set()
524 self._bundlecaps = bundlecaps
524 self._bundlecaps = bundlecaps
525 # experimental config: bundle.reorder
525 # experimental config: bundle.reorder
526 reorder = repo.ui.config('bundle', 'reorder', 'auto')
526 reorder = repo.ui.config('bundle', 'reorder', 'auto')
527 if reorder == 'auto':
527 if reorder == 'auto':
528 reorder = None
528 reorder = None
529 else:
529 else:
530 reorder = util.parsebool(reorder)
530 reorder = util.parsebool(reorder)
531 self._repo = repo
531 self._repo = repo
532 self._reorder = reorder
532 self._reorder = reorder
533 self._progress = repo.ui.progress
533 self._progress = repo.ui.progress
534 if self._repo.ui.verbose and not self._repo.ui.debugflag:
534 if self._repo.ui.verbose and not self._repo.ui.debugflag:
535 self._verbosenote = self._repo.ui.note
535 self._verbosenote = self._repo.ui.note
536 else:
536 else:
537 self._verbosenote = lambda s: None
537 self._verbosenote = lambda s: None
538
538
539 def close(self):
539 def close(self):
540 return closechunk()
540 return closechunk()
541
541
542 def fileheader(self, fname):
542 def fileheader(self, fname):
543 return chunkheader(len(fname)) + fname
543 return chunkheader(len(fname)) + fname
544
544
545 def group(self, nodelist, revlog, lookup, units=None):
545 def group(self, nodelist, revlog, lookup, units=None):
546 """Calculate a delta group, yielding a sequence of changegroup chunks
546 """Calculate a delta group, yielding a sequence of changegroup chunks
547 (strings).
547 (strings).
548
548
549 Given a list of changeset revs, return a set of deltas and
549 Given a list of changeset revs, return a set of deltas and
550 metadata corresponding to nodes. The first delta is
550 metadata corresponding to nodes. The first delta is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
552 guaranteed to have this parent as it has all history before
552 guaranteed to have this parent as it has all history before
553 these changesets. In the case firstparent is nullrev the
553 these changesets. In the case firstparent is nullrev the
554 changegroup starts with a full revision.
554 changegroup starts with a full revision.
555
555
556 If units is not None, progress detail will be generated, units specifies
556 If units is not None, progress detail will be generated, units specifies
557 the type of revlog that is touched (changelog, manifest, etc.).
557 the type of revlog that is touched (changelog, manifest, etc.).
558 """
558 """
559 # if we don't have any revisions touched by these changesets, bail
559 # if we don't have any revisions touched by these changesets, bail
560 if len(nodelist) == 0:
560 if len(nodelist) == 0:
561 yield self.close()
561 yield self.close()
562 return
562 return
563
563
564 # for generaldelta revlogs, we linearize the revs; this will both be
564 # for generaldelta revlogs, we linearize the revs; this will both be
565 # much quicker and generate a much smaller bundle
565 # much quicker and generate a much smaller bundle
566 if (revlog._generaldelta and self._reorder is None) or self._reorder:
566 if (revlog._generaldelta and self._reorder is None) or self._reorder:
567 dag = dagutil.revlogdag(revlog)
567 dag = dagutil.revlogdag(revlog)
568 revs = set(revlog.rev(n) for n in nodelist)
568 revs = set(revlog.rev(n) for n in nodelist)
569 revs = dag.linearize(revs)
569 revs = dag.linearize(revs)
570 else:
570 else:
571 revs = sorted([revlog.rev(n) for n in nodelist])
571 revs = sorted([revlog.rev(n) for n in nodelist])
572
572
573 # add the parent of the first rev
573 # add the parent of the first rev
574 p = revlog.parentrevs(revs[0])[0]
574 p = revlog.parentrevs(revs[0])[0]
575 revs.insert(0, p)
575 revs.insert(0, p)
576
576
577 # build deltas
577 # build deltas
578 total = len(revs) - 1
578 total = len(revs) - 1
579 msgbundling = _('bundling')
579 msgbundling = _('bundling')
580 for r in xrange(len(revs) - 1):
580 for r in xrange(len(revs) - 1):
581 if units is not None:
581 if units is not None:
582 self._progress(msgbundling, r + 1, unit=units, total=total)
582 self._progress(msgbundling, r + 1, unit=units, total=total)
583 prev, curr = revs[r], revs[r + 1]
583 prev, curr = revs[r], revs[r + 1]
584 linknode = lookup(revlog.node(curr))
584 linknode = lookup(revlog.node(curr))
585 for c in self.revchunk(revlog, curr, prev, linknode):
585 for c in self.revchunk(revlog, curr, prev, linknode):
586 yield c
586 yield c
587
587
588 if units is not None:
588 if units is not None:
589 self._progress(msgbundling, None)
589 self._progress(msgbundling, None)
590 yield self.close()
590 yield self.close()
591
591
592 # filter any nodes that claim to be part of the known set
592 # filter any nodes that claim to be part of the known set
593 def prune(self, revlog, missing, commonrevs):
593 def prune(self, revlog, missing, commonrevs):
594 rr, rl = revlog.rev, revlog.linkrev
594 rr, rl = revlog.rev, revlog.linkrev
595 return [n for n in missing if rl(rr(n)) not in commonrevs]
595 return [n for n in missing if rl(rr(n)) not in commonrevs]
596
596
597 def _packmanifests(self, mfnodes, lookuplinknode):
597 def _packmanifests(self, mfnodes, lookuplinknode):
598 """Pack flat manifests into a changegroup stream."""
598 """Pack flat manifests into a changegroup stream."""
599 ml = self._repo.manifest
599 ml = self._repo.manifest
600 size = 0
600 size = 0
601 for chunk in self.group(
601 for chunk in self.group(
602 mfnodes, ml, lookuplinknode, units=_('manifests')):
602 mfnodes, ml, lookuplinknode, units=_('manifests')):
603 size += len(chunk)
603 size += len(chunk)
604 yield chunk
604 yield chunk
605 self._verbosenote(_('%8.i (manifests)\n') % size)
605 self._verbosenote(_('%8.i (manifests)\n') % size)
606
606
607 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
607 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
608 '''yield a sequence of changegroup chunks (strings)'''
608 '''yield a sequence of changegroup chunks (strings)'''
609 repo = self._repo
609 repo = self._repo
610 cl = repo.changelog
610 cl = repo.changelog
611 ml = repo.manifest
611 ml = repo.manifest
612
612
613 clrevorder = {}
613 clrevorder = {}
614 mfs = {} # needed manifests
614 mfs = {} # needed manifests
615 fnodes = {} # needed file nodes
615 fnodes = {} # needed file nodes
616 # maps manifest node id -> set(changed files)
616 # maps manifest node id -> set(changed files)
617 mfchangedfiles = {}
617 mfchangedfiles = {}
618
618
619 # Callback for the changelog, used to collect changed files and manifest
619 # Callback for the changelog, used to collect changed files and manifest
620 # nodes.
620 # nodes.
621 # Returns the linkrev node (identity in the changelog case).
621 # Returns the linkrev node (identity in the changelog case).
622 def lookupcl(x):
622 def lookupcl(x):
623 c = cl.read(x)
623 c = cl.read(x)
624 clrevorder[x] = len(clrevorder)
624 clrevorder[x] = len(clrevorder)
625 n = c[0]
625 n = c[0]
626 # record the first changeset introducing this manifest version
626 # record the first changeset introducing this manifest version
627 mfs.setdefault(n, x)
627 mfs.setdefault(n, x)
628 # Record a complete list of potentially-changed files in
628 # Record a complete list of potentially-changed files in
629 # this manifest.
629 # this manifest.
630 mfchangedfiles.setdefault(n, set()).update(c[3])
630 mfchangedfiles.setdefault(n, set()).update(c[3])
631 return x
631 return x
632
632
633 self._verbosenote(_('uncompressed size of bundle content:\n'))
633 self._verbosenote(_('uncompressed size of bundle content:\n'))
634 size = 0
634 size = 0
635 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
635 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
636 size += len(chunk)
636 size += len(chunk)
637 yield chunk
637 yield chunk
638 self._verbosenote(_('%8.i (changelog)\n') % size)
638 self._verbosenote(_('%8.i (changelog)\n') % size)
639
639
640 # We need to make sure that the linkrev in the changegroup refers to
640 # We need to make sure that the linkrev in the changegroup refers to
641 # the first changeset that introduced the manifest or file revision.
641 # the first changeset that introduced the manifest or file revision.
642 # The fastpath is usually safer than the slowpath, because the filelogs
642 # The fastpath is usually safer than the slowpath, because the filelogs
643 # are walked in revlog order.
643 # are walked in revlog order.
644 #
644 #
645 # When taking the slowpath with reorder=None and the manifest revlog
645 # When taking the slowpath with reorder=None and the manifest revlog
646 # uses generaldelta, the manifest may be walked in the "wrong" order.
646 # uses generaldelta, the manifest may be walked in the "wrong" order.
647 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
647 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
648 # cc0ff93d0c0c).
648 # cc0ff93d0c0c).
649 #
649 #
650 # When taking the fastpath, we are only vulnerable to reordering
650 # When taking the fastpath, we are only vulnerable to reordering
651 # of the changelog itself. The changelog never uses generaldelta, so
651 # of the changelog itself. The changelog never uses generaldelta, so
652 # it is only reordered when reorder=True. To handle this case, we
652 # it is only reordered when reorder=True. To handle this case, we
653 # simply take the slowpath, which already has the 'clrevorder' logic.
653 # simply take the slowpath, which already has the 'clrevorder' logic.
654 # This was also fixed in cc0ff93d0c0c.
654 # This was also fixed in cc0ff93d0c0c.
655 fastpathlinkrev = fastpathlinkrev and not self._reorder
655 fastpathlinkrev = fastpathlinkrev and not self._reorder
656 # Callback for the manifest, used to collect linkrevs for filelog
656 # Callback for the manifest, used to collect linkrevs for filelog
657 # revisions.
657 # revisions.
658 # Returns the linkrev node (collected in lookupcl).
658 # Returns the linkrev node (collected in lookupcl).
659 def lookupmflinknode(x):
659 if fastpathlinkrev:
660 """Callback for looking up the linknode for manifests.
660 lookupmflinknode = mfs.__getitem__
661 else:
662 def lookupmflinknode(x):
663 """Callback for looking up the linknode for manifests.
661
664
662 Returns the linkrev node for the specified manifest.
665 Returns the linkrev node for the specified manifest.
663
664 SIDE EFFECT:
665
666
666 fclnodes gets populated with the list of relevant
667 SIDE EFFECT:
667 file nodes if we're not using fastpathlinkrev.
668
669 fclnodes gets populated with the list of relevant
670 file nodes.
668
671
669 Note that this means you can't trust fclnodes until
672 Note that this means you can't trust fclnodes until
670 after manifests have been sent to the client.
673 after manifests have been sent to the client.
671 """
674 """
672 clnode = mfs[x]
675 clnode = mfs[x]
673 if not fastpathlinkrev:
674 mdata = ml.readfast(x)
676 mdata = ml.readfast(x)
675 for f in mfchangedfiles[x]:
677 for f in mfchangedfiles[x]:
676 try:
678 try:
677 n = mdata[f]
679 n = mdata[f]
678 except KeyError:
680 except KeyError:
679 continue
681 continue
680 # record the first changeset introducing this filelog
682 # record the first changeset introducing this filelog
681 # version
683 # version
682 fclnodes = fnodes.setdefault(f, {})
684 fclnodes = fnodes.setdefault(f, {})
683 fclnode = fclnodes.setdefault(n, clnode)
685 fclnode = fclnodes.setdefault(n, clnode)
684 if clrevorder[clnode] < clrevorder[fclnode]:
686 if clrevorder[clnode] < clrevorder[fclnode]:
685 fclnodes[n] = clnode
687 fclnodes[n] = clnode
686 return clnode
688 return clnode
687
689
688 mfnodes = self.prune(ml, mfs, commonrevs)
690 mfnodes = self.prune(ml, mfs, commonrevs)
689 for x in self._packmanifests(mfnodes, lookupmflinknode):
691 for x in self._packmanifests(mfnodes, lookupmflinknode):
690 yield x
692 yield x
691
693
692 mfs.clear()
694 mfs.clear()
693 clrevs = set(cl.rev(x) for x in clnodes)
695 clrevs = set(cl.rev(x) for x in clnodes)
694
696
695 def linknodes(filerevlog, fname):
697 def linknodes(filerevlog, fname):
696 if fastpathlinkrev:
698 if fastpathlinkrev:
697 llr = filerevlog.linkrev
699 llr = filerevlog.linkrev
698 def genfilenodes():
700 def genfilenodes():
699 for r in filerevlog:
701 for r in filerevlog:
700 linkrev = llr(r)
702 linkrev = llr(r)
701 if linkrev in clrevs:
703 if linkrev in clrevs:
702 yield filerevlog.node(r), cl.node(linkrev)
704 yield filerevlog.node(r), cl.node(linkrev)
703 return dict(genfilenodes())
705 return dict(genfilenodes())
704 return fnodes.get(fname, {})
706 return fnodes.get(fname, {})
705
707
706 changedfiles = set()
708 changedfiles = set()
707 for x in mfchangedfiles.itervalues():
709 for x in mfchangedfiles.itervalues():
708 changedfiles.update(x)
710 changedfiles.update(x)
709 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
711 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
710 source):
712 source):
711 yield chunk
713 yield chunk
712
714
713 yield self.close()
715 yield self.close()
714
716
715 if clnodes:
717 if clnodes:
716 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
718 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
717
719
718 # The 'source' parameter is useful for extensions
720 # The 'source' parameter is useful for extensions
719 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
721 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
720 repo = self._repo
722 repo = self._repo
721 progress = self._progress
723 progress = self._progress
722 msgbundling = _('bundling')
724 msgbundling = _('bundling')
723
725
724 total = len(changedfiles)
726 total = len(changedfiles)
725 # for progress output
727 # for progress output
726 msgfiles = _('files')
728 msgfiles = _('files')
727 for i, fname in enumerate(sorted(changedfiles)):
729 for i, fname in enumerate(sorted(changedfiles)):
728 filerevlog = repo.file(fname)
730 filerevlog = repo.file(fname)
729 if not filerevlog:
731 if not filerevlog:
730 raise error.Abort(_("empty or missing revlog for %s") % fname)
732 raise error.Abort(_("empty or missing revlog for %s") % fname)
731
733
732 linkrevnodes = linknodes(filerevlog, fname)
734 linkrevnodes = linknodes(filerevlog, fname)
733 # Lookup for filenodes, we collected the linkrev nodes above in the
735 # Lookup for filenodes, we collected the linkrev nodes above in the
734 # fastpath case and with lookupmf in the slowpath case.
736 # fastpath case and with lookupmf in the slowpath case.
735 def lookupfilelog(x):
737 def lookupfilelog(x):
736 return linkrevnodes[x]
738 return linkrevnodes[x]
737
739
738 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
740 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
739 if filenodes:
741 if filenodes:
740 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
742 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
741 total=total)
743 total=total)
742 h = self.fileheader(fname)
744 h = self.fileheader(fname)
743 size = len(h)
745 size = len(h)
744 yield h
746 yield h
745 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
747 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
746 size += len(chunk)
748 size += len(chunk)
747 yield chunk
749 yield chunk
748 self._verbosenote(_('%8.i %s\n') % (size, fname))
750 self._verbosenote(_('%8.i %s\n') % (size, fname))
749 progress(msgbundling, None)
751 progress(msgbundling, None)
750
752
751 def deltaparent(self, revlog, rev, p1, p2, prev):
753 def deltaparent(self, revlog, rev, p1, p2, prev):
752 return prev
754 return prev
753
755
754 def revchunk(self, revlog, rev, prev, linknode):
756 def revchunk(self, revlog, rev, prev, linknode):
755 node = revlog.node(rev)
757 node = revlog.node(rev)
756 p1, p2 = revlog.parentrevs(rev)
758 p1, p2 = revlog.parentrevs(rev)
757 base = self.deltaparent(revlog, rev, p1, p2, prev)
759 base = self.deltaparent(revlog, rev, p1, p2, prev)
758
760
759 prefix = ''
761 prefix = ''
760 if revlog.iscensored(base) or revlog.iscensored(rev):
762 if revlog.iscensored(base) or revlog.iscensored(rev):
761 try:
763 try:
762 delta = revlog.revision(node)
764 delta = revlog.revision(node)
763 except error.CensoredNodeError as e:
765 except error.CensoredNodeError as e:
764 delta = e.tombstone
766 delta = e.tombstone
765 if base == nullrev:
767 if base == nullrev:
766 prefix = mdiff.trivialdiffheader(len(delta))
768 prefix = mdiff.trivialdiffheader(len(delta))
767 else:
769 else:
768 baselen = revlog.rawsize(base)
770 baselen = revlog.rawsize(base)
769 prefix = mdiff.replacediffheader(baselen, len(delta))
771 prefix = mdiff.replacediffheader(baselen, len(delta))
770 elif base == nullrev:
772 elif base == nullrev:
771 delta = revlog.revision(node)
773 delta = revlog.revision(node)
772 prefix = mdiff.trivialdiffheader(len(delta))
774 prefix = mdiff.trivialdiffheader(len(delta))
773 else:
775 else:
774 delta = revlog.revdiff(base, rev)
776 delta = revlog.revdiff(base, rev)
775 p1n, p2n = revlog.parents(node)
777 p1n, p2n = revlog.parents(node)
776 basenode = revlog.node(base)
778 basenode = revlog.node(base)
777 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
779 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
778 meta += prefix
780 meta += prefix
779 l = len(meta) + len(delta)
781 l = len(meta) + len(delta)
780 yield chunkheader(l)
782 yield chunkheader(l)
781 yield meta
783 yield meta
782 yield delta
784 yield delta
783 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
785 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
784 # do nothing with basenode, it is implicitly the previous one in HG10
786 # do nothing with basenode, it is implicitly the previous one in HG10
785 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
787 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
786
788
787 class cg2packer(cg1packer):
789 class cg2packer(cg1packer):
788 version = '02'
790 version = '02'
789 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
791 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
790
792
791 def __init__(self, repo, bundlecaps=None):
793 def __init__(self, repo, bundlecaps=None):
792 super(cg2packer, self).__init__(repo, bundlecaps)
794 super(cg2packer, self).__init__(repo, bundlecaps)
793 if self._reorder is None:
795 if self._reorder is None:
794 # Since generaldelta is directly supported by cg2, reordering
796 # Since generaldelta is directly supported by cg2, reordering
795 # generally doesn't help, so we disable it by default (treating
797 # generally doesn't help, so we disable it by default (treating
796 # bundle.reorder=auto just like bundle.reorder=False).
798 # bundle.reorder=auto just like bundle.reorder=False).
797 self._reorder = False
799 self._reorder = False
798
800
799 def deltaparent(self, revlog, rev, p1, p2, prev):
801 def deltaparent(self, revlog, rev, p1, p2, prev):
800 dp = revlog.deltaparent(rev)
802 dp = revlog.deltaparent(rev)
801 # avoid storing full revisions; pick prev in those cases
803 # avoid storing full revisions; pick prev in those cases
802 # also pick prev when we can't be sure remote has dp
804 # also pick prev when we can't be sure remote has dp
803 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
805 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
804 return prev
806 return prev
805 return dp
807 return dp
806
808
807 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
809 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
808 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
810 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
809
811
810 packermap = {'01': (cg1packer, cg1unpacker),
812 packermap = {'01': (cg1packer, cg1unpacker),
811 # cg2 adds support for exchanging generaldelta
813 # cg2 adds support for exchanging generaldelta
812 '02': (cg2packer, cg2unpacker),
814 '02': (cg2packer, cg2unpacker),
813 }
815 }
814
816
815 def _changegroupinfo(repo, nodes, source):
817 def _changegroupinfo(repo, nodes, source):
816 if repo.ui.verbose or source == 'bundle':
818 if repo.ui.verbose or source == 'bundle':
817 repo.ui.status(_("%d changesets found\n") % len(nodes))
819 repo.ui.status(_("%d changesets found\n") % len(nodes))
818 if repo.ui.debugflag:
820 if repo.ui.debugflag:
819 repo.ui.debug("list of changesets:\n")
821 repo.ui.debug("list of changesets:\n")
820 for node in nodes:
822 for node in nodes:
821 repo.ui.debug("%s\n" % hex(node))
823 repo.ui.debug("%s\n" % hex(node))
822
824
823 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
825 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
824 repo = repo.unfiltered()
826 repo = repo.unfiltered()
825 commonrevs = outgoing.common
827 commonrevs = outgoing.common
826 csets = outgoing.missing
828 csets = outgoing.missing
827 heads = outgoing.missingheads
829 heads = outgoing.missingheads
828 # We go through the fast path if we get told to, or if all (unfiltered
830 # We go through the fast path if we get told to, or if all (unfiltered
829 # heads have been requested (since we then know there all linkrevs will
831 # heads have been requested (since we then know there all linkrevs will
830 # be pulled by the client).
832 # be pulled by the client).
831 heads.sort()
833 heads.sort()
832 fastpathlinkrev = fastpath or (
834 fastpathlinkrev = fastpath or (
833 repo.filtername is None and heads == sorted(repo.heads()))
835 repo.filtername is None and heads == sorted(repo.heads()))
834
836
835 repo.hook('preoutgoing', throw=True, source=source)
837 repo.hook('preoutgoing', throw=True, source=source)
836 _changegroupinfo(repo, csets, source)
838 _changegroupinfo(repo, csets, source)
837 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
839 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
838
840
839 def getsubset(repo, outgoing, bundler, source, fastpath=False):
841 def getsubset(repo, outgoing, bundler, source, fastpath=False):
840 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
842 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
841 return packermap[bundler.version][1](util.chunkbuffer(gengroup), None)
843 return packermap[bundler.version][1](util.chunkbuffer(gengroup), None)
842
844
843 def changegroupsubset(repo, roots, heads, source, version='01'):
845 def changegroupsubset(repo, roots, heads, source, version='01'):
844 """Compute a changegroup consisting of all the nodes that are
846 """Compute a changegroup consisting of all the nodes that are
845 descendants of any of the roots and ancestors of any of the heads.
847 descendants of any of the roots and ancestors of any of the heads.
846 Return a chunkbuffer object whose read() method will return
848 Return a chunkbuffer object whose read() method will return
847 successive changegroup chunks.
849 successive changegroup chunks.
848
850
849 It is fairly complex as determining which filenodes and which
851 It is fairly complex as determining which filenodes and which
850 manifest nodes need to be included for the changeset to be complete
852 manifest nodes need to be included for the changeset to be complete
851 is non-trivial.
853 is non-trivial.
852
854
853 Another wrinkle is doing the reverse, figuring out which changeset in
855 Another wrinkle is doing the reverse, figuring out which changeset in
854 the changegroup a particular filenode or manifestnode belongs to.
856 the changegroup a particular filenode or manifestnode belongs to.
855 """
857 """
856 cl = repo.changelog
858 cl = repo.changelog
857 if not roots:
859 if not roots:
858 roots = [nullid]
860 roots = [nullid]
859 discbases = []
861 discbases = []
860 for n in roots:
862 for n in roots:
861 discbases.extend([p for p in cl.parents(n) if p != nullid])
863 discbases.extend([p for p in cl.parents(n) if p != nullid])
862 # TODO: remove call to nodesbetween.
864 # TODO: remove call to nodesbetween.
863 csets, roots, heads = cl.nodesbetween(roots, heads)
865 csets, roots, heads = cl.nodesbetween(roots, heads)
864 included = set(csets)
866 included = set(csets)
865 discbases = [n for n in discbases if n not in included]
867 discbases = [n for n in discbases if n not in included]
866 outgoing = discovery.outgoing(cl, discbases, heads)
868 outgoing = discovery.outgoing(cl, discbases, heads)
867 bundler = packermap[version][0](repo)
869 bundler = packermap[version][0](repo)
868 return getsubset(repo, outgoing, bundler, source)
870 return getsubset(repo, outgoing, bundler, source)
869
871
870 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
872 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
871 version='01'):
873 version='01'):
872 """Like getbundle, but taking a discovery.outgoing as an argument.
874 """Like getbundle, but taking a discovery.outgoing as an argument.
873
875
874 This is only implemented for local repos and reuses potentially
876 This is only implemented for local repos and reuses potentially
875 precomputed sets in outgoing. Returns a raw changegroup generator."""
877 precomputed sets in outgoing. Returns a raw changegroup generator."""
876 if not outgoing.missing:
878 if not outgoing.missing:
877 return None
879 return None
878 bundler = packermap[version][0](repo, bundlecaps)
880 bundler = packermap[version][0](repo, bundlecaps)
879 return getsubsetraw(repo, outgoing, bundler, source)
881 return getsubsetraw(repo, outgoing, bundler, source)
880
882
881 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
883 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
882 version='01'):
884 version='01'):
883 """Like getbundle, but taking a discovery.outgoing as an argument.
885 """Like getbundle, but taking a discovery.outgoing as an argument.
884
886
885 This is only implemented for local repos and reuses potentially
887 This is only implemented for local repos and reuses potentially
886 precomputed sets in outgoing."""
888 precomputed sets in outgoing."""
887 if not outgoing.missing:
889 if not outgoing.missing:
888 return None
890 return None
889 bundler = packermap[version][0](repo, bundlecaps)
891 bundler = packermap[version][0](repo, bundlecaps)
890 return getsubset(repo, outgoing, bundler, source)
892 return getsubset(repo, outgoing, bundler, source)
891
893
892 def computeoutgoing(repo, heads, common):
894 def computeoutgoing(repo, heads, common):
893 """Computes which revs are outgoing given a set of common
895 """Computes which revs are outgoing given a set of common
894 and a set of heads.
896 and a set of heads.
895
897
896 This is a separate function so extensions can have access to
898 This is a separate function so extensions can have access to
897 the logic.
899 the logic.
898
900
899 Returns a discovery.outgoing object.
901 Returns a discovery.outgoing object.
900 """
902 """
901 cl = repo.changelog
903 cl = repo.changelog
902 if common:
904 if common:
903 hasnode = cl.hasnode
905 hasnode = cl.hasnode
904 common = [n for n in common if hasnode(n)]
906 common = [n for n in common if hasnode(n)]
905 else:
907 else:
906 common = [nullid]
908 common = [nullid]
907 if not heads:
909 if not heads:
908 heads = cl.heads()
910 heads = cl.heads()
909 return discovery.outgoing(cl, common, heads)
911 return discovery.outgoing(cl, common, heads)
910
912
911 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
913 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
912 version='01'):
914 version='01'):
913 """Like changegroupsubset, but returns the set difference between the
915 """Like changegroupsubset, but returns the set difference between the
914 ancestors of heads and the ancestors common.
916 ancestors of heads and the ancestors common.
915
917
916 If heads is None, use the local heads. If common is None, use [nullid].
918 If heads is None, use the local heads. If common is None, use [nullid].
917
919
918 The nodes in common might not all be known locally due to the way the
920 The nodes in common might not all be known locally due to the way the
919 current discovery protocol works.
921 current discovery protocol works.
920 """
922 """
921 outgoing = computeoutgoing(repo, heads, common)
923 outgoing = computeoutgoing(repo, heads, common)
922 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
924 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
923 version=version)
925 version=version)
924
926
925 def changegroup(repo, basenodes, source):
927 def changegroup(repo, basenodes, source):
926 # to avoid a race we use changegroupsubset() (issue1320)
928 # to avoid a race we use changegroupsubset() (issue1320)
927 return changegroupsubset(repo, basenodes, repo.heads(), source)
929 return changegroupsubset(repo, basenodes, repo.heads(), source)
928
930
929 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles, wasempty):
931 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles, wasempty):
930 revisions = 0
932 revisions = 0
931 files = 0
933 files = 0
932 while True:
934 while True:
933 chunkdata = source.filelogheader()
935 chunkdata = source.filelogheader()
934 if not chunkdata:
936 if not chunkdata:
935 break
937 break
936 f = chunkdata["filename"]
938 f = chunkdata["filename"]
937 repo.ui.debug("adding %s revisions\n" % f)
939 repo.ui.debug("adding %s revisions\n" % f)
938 pr()
940 pr()
939 fl = repo.file(f)
941 fl = repo.file(f)
940 o = len(fl)
942 o = len(fl)
941 try:
943 try:
942 if not fl.addgroup(source, revmap, trp):
944 if not fl.addgroup(source, revmap, trp):
943 raise error.Abort(_("received file revlog group is empty"))
945 raise error.Abort(_("received file revlog group is empty"))
944 except error.CensoredBaseError as e:
946 except error.CensoredBaseError as e:
945 raise error.Abort(_("received delta base is censored: %s") % e)
947 raise error.Abort(_("received delta base is censored: %s") % e)
946 revisions += len(fl) - o
948 revisions += len(fl) - o
947 files += 1
949 files += 1
948 if f in needfiles:
950 if f in needfiles:
949 needs = needfiles[f]
951 needs = needfiles[f]
950 for new in xrange(o, len(fl)):
952 for new in xrange(o, len(fl)):
951 n = fl.node(new)
953 n = fl.node(new)
952 if n in needs:
954 if n in needs:
953 needs.remove(n)
955 needs.remove(n)
954 else:
956 else:
955 raise error.Abort(
957 raise error.Abort(
956 _("received spurious file revlog entry"))
958 _("received spurious file revlog entry"))
957 if not needs:
959 if not needs:
958 del needfiles[f]
960 del needfiles[f]
959 repo.ui.progress(_('files'), None)
961 repo.ui.progress(_('files'), None)
960
962
961 for f, needs in needfiles.iteritems():
963 for f, needs in needfiles.iteritems():
962 fl = repo.file(f)
964 fl = repo.file(f)
963 for n in needs:
965 for n in needs:
964 try:
966 try:
965 fl.rev(n)
967 fl.rev(n)
966 except error.LookupError:
968 except error.LookupError:
967 raise error.Abort(
969 raise error.Abort(
968 _('missing file data for %s:%s - run hg verify') %
970 _('missing file data for %s:%s - run hg verify') %
969 (f, hex(n)))
971 (f, hex(n)))
970
972
971 return revisions, files
973 return revisions, files
General Comments 0
You need to be logged in to leave comments. Login now