##// END OF EJS Templates
changegroup.cg2packer: lookup 'group' via inheritance chain...
Siddharth Agarwal -
r23243:c5843268 default
parent child Browse files
Show More
@@ -1,831 +1,831
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17
17
18 def readexactly(stream, n):
18 def readexactly(stream, n):
19 '''read n bytes from stream.read and abort if less was available'''
19 '''read n bytes from stream.read and abort if less was available'''
20 s = stream.read(n)
20 s = stream.read(n)
21 if len(s) < n:
21 if len(s) < n:
22 raise util.Abort(_("stream ended unexpectedly"
22 raise util.Abort(_("stream ended unexpectedly"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(s), n))
24 % (len(s), n))
25 return s
25 return s
26
26
27 def getchunk(stream):
27 def getchunk(stream):
28 """return the next chunk from stream as a string"""
28 """return the next chunk from stream as a string"""
29 d = readexactly(stream, 4)
29 d = readexactly(stream, 4)
30 l = struct.unpack(">l", d)[0]
30 l = struct.unpack(">l", d)[0]
31 if l <= 4:
31 if l <= 4:
32 if l:
32 if l:
33 raise util.Abort(_("invalid chunk length %d") % l)
33 raise util.Abort(_("invalid chunk length %d") % l)
34 return ""
34 return ""
35 return readexactly(stream, l - 4)
35 return readexactly(stream, l - 4)
36
36
37 def chunkheader(length):
37 def chunkheader(length):
38 """return a changegroup chunk header (string)"""
38 """return a changegroup chunk header (string)"""
39 return struct.pack(">l", length + 4)
39 return struct.pack(">l", length + 4)
40
40
41 def closechunk():
41 def closechunk():
42 """return a changegroup chunk header (string) for a zero-length chunk"""
42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 return struct.pack(">l", 0)
43 return struct.pack(">l", 0)
44
44
45 class nocompress(object):
45 class nocompress(object):
46 def compress(self, x):
46 def compress(self, x):
47 return x
47 return x
48 def flush(self):
48 def flush(self):
49 return ""
49 return ""
50
50
51 bundletypes = {
51 bundletypes = {
52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 "": ("", nocompress), # only when using unbundle on ssh and old http servers
53 # since the unification ssh accepts a header but there
53 # since the unification ssh accepts a header but there
54 # is no capability signaling it.
54 # is no capability signaling it.
55 "HG10UN": ("HG10UN", nocompress),
55 "HG10UN": ("HG10UN", nocompress),
56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
58 }
58 }
59
59
60 # hgweb uses this list to communicate its preferred type
60 # hgweb uses this list to communicate its preferred type
61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
62
62
63 def writebundle(cg, filename, bundletype, vfs=None):
63 def writebundle(cg, filename, bundletype, vfs=None):
64 """Write a bundle file and return its filename.
64 """Write a bundle file and return its filename.
65
65
66 Existing files will not be overwritten.
66 Existing files will not be overwritten.
67 If no filename is specified, a temporary file is created.
67 If no filename is specified, a temporary file is created.
68 bz2 compression can be turned off.
68 bz2 compression can be turned off.
69 The bundle file will be deleted in case of errors.
69 The bundle file will be deleted in case of errors.
70 """
70 """
71
71
72 fh = None
72 fh = None
73 cleanup = None
73 cleanup = None
74 try:
74 try:
75 if filename:
75 if filename:
76 if vfs:
76 if vfs:
77 fh = vfs.open(filename, "wb")
77 fh = vfs.open(filename, "wb")
78 else:
78 else:
79 fh = open(filename, "wb")
79 fh = open(filename, "wb")
80 else:
80 else:
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 fh = os.fdopen(fd, "wb")
82 fh = os.fdopen(fd, "wb")
83 cleanup = filename
83 cleanup = filename
84
84
85 header, compressor = bundletypes[bundletype]
85 header, compressor = bundletypes[bundletype]
86 fh.write(header)
86 fh.write(header)
87 z = compressor()
87 z = compressor()
88
88
89 # parse the changegroup data, otherwise we will block
89 # parse the changegroup data, otherwise we will block
90 # in case of sshrepo because we don't know the end of the stream
90 # in case of sshrepo because we don't know the end of the stream
91
91
92 # an empty chunkgroup is the end of the changegroup
92 # an empty chunkgroup is the end of the changegroup
93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # a changegroup has at least 2 chunkgroups (changelog and manifest).
94 # after that, an empty chunkgroup is the end of the changegroup
94 # after that, an empty chunkgroup is the end of the changegroup
95 for chunk in cg.getchunks():
95 for chunk in cg.getchunks():
96 fh.write(z.compress(chunk))
96 fh.write(z.compress(chunk))
97 fh.write(z.flush())
97 fh.write(z.flush())
98 cleanup = None
98 cleanup = None
99 return filename
99 return filename
100 finally:
100 finally:
101 if fh is not None:
101 if fh is not None:
102 fh.close()
102 fh.close()
103 if cleanup is not None:
103 if cleanup is not None:
104 if filename and vfs:
104 if filename and vfs:
105 vfs.unlink(cleanup)
105 vfs.unlink(cleanup)
106 else:
106 else:
107 os.unlink(cleanup)
107 os.unlink(cleanup)
108
108
109 def decompressor(fh, alg):
109 def decompressor(fh, alg):
110 if alg == 'UN':
110 if alg == 'UN':
111 return fh
111 return fh
112 elif alg == 'GZ':
112 elif alg == 'GZ':
113 def generator(f):
113 def generator(f):
114 zd = zlib.decompressobj()
114 zd = zlib.decompressobj()
115 for chunk in util.filechunkiter(f):
115 for chunk in util.filechunkiter(f):
116 yield zd.decompress(chunk)
116 yield zd.decompress(chunk)
117 elif alg == 'BZ':
117 elif alg == 'BZ':
118 def generator(f):
118 def generator(f):
119 zd = bz2.BZ2Decompressor()
119 zd = bz2.BZ2Decompressor()
120 zd.decompress("BZ")
120 zd.decompress("BZ")
121 for chunk in util.filechunkiter(f, 4096):
121 for chunk in util.filechunkiter(f, 4096):
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 else:
123 else:
124 raise util.Abort("unknown bundle compression '%s'" % alg)
124 raise util.Abort("unknown bundle compression '%s'" % alg)
125 return util.chunkbuffer(generator(fh))
125 return util.chunkbuffer(generator(fh))
126
126
127 class cg1unpacker(object):
127 class cg1unpacker(object):
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
129 deltaheadersize = struct.calcsize(deltaheader)
129 deltaheadersize = struct.calcsize(deltaheader)
130 def __init__(self, fh, alg):
130 def __init__(self, fh, alg):
131 self._stream = decompressor(fh, alg)
131 self._stream = decompressor(fh, alg)
132 self._type = alg
132 self._type = alg
133 self.callback = None
133 self.callback = None
134 def compressed(self):
134 def compressed(self):
135 return self._type != 'UN'
135 return self._type != 'UN'
136 def read(self, l):
136 def read(self, l):
137 return self._stream.read(l)
137 return self._stream.read(l)
138 def seek(self, pos):
138 def seek(self, pos):
139 return self._stream.seek(pos)
139 return self._stream.seek(pos)
140 def tell(self):
140 def tell(self):
141 return self._stream.tell()
141 return self._stream.tell()
142 def close(self):
142 def close(self):
143 return self._stream.close()
143 return self._stream.close()
144
144
145 def chunklength(self):
145 def chunklength(self):
146 d = readexactly(self._stream, 4)
146 d = readexactly(self._stream, 4)
147 l = struct.unpack(">l", d)[0]
147 l = struct.unpack(">l", d)[0]
148 if l <= 4:
148 if l <= 4:
149 if l:
149 if l:
150 raise util.Abort(_("invalid chunk length %d") % l)
150 raise util.Abort(_("invalid chunk length %d") % l)
151 return 0
151 return 0
152 if self.callback:
152 if self.callback:
153 self.callback()
153 self.callback()
154 return l - 4
154 return l - 4
155
155
156 def changelogheader(self):
156 def changelogheader(self):
157 """v10 does not have a changelog header chunk"""
157 """v10 does not have a changelog header chunk"""
158 return {}
158 return {}
159
159
160 def manifestheader(self):
160 def manifestheader(self):
161 """v10 does not have a manifest header chunk"""
161 """v10 does not have a manifest header chunk"""
162 return {}
162 return {}
163
163
164 def filelogheader(self):
164 def filelogheader(self):
165 """return the header of the filelogs chunk, v10 only has the filename"""
165 """return the header of the filelogs chunk, v10 only has the filename"""
166 l = self.chunklength()
166 l = self.chunklength()
167 if not l:
167 if not l:
168 return {}
168 return {}
169 fname = readexactly(self._stream, l)
169 fname = readexactly(self._stream, l)
170 return {'filename': fname}
170 return {'filename': fname}
171
171
172 def _deltaheader(self, headertuple, prevnode):
172 def _deltaheader(self, headertuple, prevnode):
173 node, p1, p2, cs = headertuple
173 node, p1, p2, cs = headertuple
174 if prevnode is None:
174 if prevnode is None:
175 deltabase = p1
175 deltabase = p1
176 else:
176 else:
177 deltabase = prevnode
177 deltabase = prevnode
178 return node, p1, p2, deltabase, cs
178 return node, p1, p2, deltabase, cs
179
179
180 def deltachunk(self, prevnode):
180 def deltachunk(self, prevnode):
181 l = self.chunklength()
181 l = self.chunklength()
182 if not l:
182 if not l:
183 return {}
183 return {}
184 headerdata = readexactly(self._stream, self.deltaheadersize)
184 headerdata = readexactly(self._stream, self.deltaheadersize)
185 header = struct.unpack(self.deltaheader, headerdata)
185 header = struct.unpack(self.deltaheader, headerdata)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
186 delta = readexactly(self._stream, l - self.deltaheadersize)
187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
189 'deltabase': deltabase, 'delta': delta}
189 'deltabase': deltabase, 'delta': delta}
190
190
191 def getchunks(self):
191 def getchunks(self):
192 """returns all the chunks contains in the bundle
192 """returns all the chunks contains in the bundle
193
193
194 Used when you need to forward the binary stream to a file or another
194 Used when you need to forward the binary stream to a file or another
195 network API. To do so, it parse the changegroup data, otherwise it will
195 network API. To do so, it parse the changegroup data, otherwise it will
196 block in case of sshrepo because it don't know the end of the stream.
196 block in case of sshrepo because it don't know the end of the stream.
197 """
197 """
198 # an empty chunkgroup is the end of the changegroup
198 # an empty chunkgroup is the end of the changegroup
199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # a changegroup has at least 2 chunkgroups (changelog and manifest).
200 # after that, an empty chunkgroup is the end of the changegroup
200 # after that, an empty chunkgroup is the end of the changegroup
201 empty = False
201 empty = False
202 count = 0
202 count = 0
203 while not empty or count <= 2:
203 while not empty or count <= 2:
204 empty = True
204 empty = True
205 count += 1
205 count += 1
206 while True:
206 while True:
207 chunk = getchunk(self)
207 chunk = getchunk(self)
208 if not chunk:
208 if not chunk:
209 break
209 break
210 empty = False
210 empty = False
211 yield chunkheader(len(chunk))
211 yield chunkheader(len(chunk))
212 pos = 0
212 pos = 0
213 while pos < len(chunk):
213 while pos < len(chunk):
214 next = pos + 2**20
214 next = pos + 2**20
215 yield chunk[pos:next]
215 yield chunk[pos:next]
216 pos = next
216 pos = next
217 yield closechunk()
217 yield closechunk()
218
218
219 class cg2unpacker(cg1unpacker):
219 class cg2unpacker(cg1unpacker):
220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
220 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
221 deltaheadersize = struct.calcsize(deltaheader)
221 deltaheadersize = struct.calcsize(deltaheader)
222
222
223 def _deltaheader(self, headertuple, prevnode):
223 def _deltaheader(self, headertuple, prevnode):
224 node, p1, p2, deltabase, cs = headertuple
224 node, p1, p2, deltabase, cs = headertuple
225 return node, p1, p2, deltabase, cs
225 return node, p1, p2, deltabase, cs
226
226
227 class headerlessfixup(object):
227 class headerlessfixup(object):
228 def __init__(self, fh, h):
228 def __init__(self, fh, h):
229 self._h = h
229 self._h = h
230 self._fh = fh
230 self._fh = fh
231 def read(self, n):
231 def read(self, n):
232 if self._h:
232 if self._h:
233 d, self._h = self._h[:n], self._h[n:]
233 d, self._h = self._h[:n], self._h[n:]
234 if len(d) < n:
234 if len(d) < n:
235 d += readexactly(self._fh, n - len(d))
235 d += readexactly(self._fh, n - len(d))
236 return d
236 return d
237 return readexactly(self._fh, n)
237 return readexactly(self._fh, n)
238
238
239 class cg1packer(object):
239 class cg1packer(object):
240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
240 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
241 def __init__(self, repo, bundlecaps=None):
241 def __init__(self, repo, bundlecaps=None):
242 """Given a source repo, construct a bundler.
242 """Given a source repo, construct a bundler.
243
243
244 bundlecaps is optional and can be used to specify the set of
244 bundlecaps is optional and can be used to specify the set of
245 capabilities which can be used to build the bundle.
245 capabilities which can be used to build the bundle.
246 """
246 """
247 # Set of capabilities we can use to build the bundle.
247 # Set of capabilities we can use to build the bundle.
248 if bundlecaps is None:
248 if bundlecaps is None:
249 bundlecaps = set()
249 bundlecaps = set()
250 self._bundlecaps = bundlecaps
250 self._bundlecaps = bundlecaps
251 self._changelog = repo.changelog
251 self._changelog = repo.changelog
252 self._manifest = repo.manifest
252 self._manifest = repo.manifest
253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
253 reorder = repo.ui.config('bundle', 'reorder', 'auto')
254 if reorder == 'auto':
254 if reorder == 'auto':
255 reorder = None
255 reorder = None
256 else:
256 else:
257 reorder = util.parsebool(reorder)
257 reorder = util.parsebool(reorder)
258 self._repo = repo
258 self._repo = repo
259 self._reorder = reorder
259 self._reorder = reorder
260 self._progress = repo.ui.progress
260 self._progress = repo.ui.progress
261 def close(self):
261 def close(self):
262 return closechunk()
262 return closechunk()
263
263
264 def fileheader(self, fname):
264 def fileheader(self, fname):
265 return chunkheader(len(fname)) + fname
265 return chunkheader(len(fname)) + fname
266
266
267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
267 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
268 """Calculate a delta group, yielding a sequence of changegroup chunks
268 """Calculate a delta group, yielding a sequence of changegroup chunks
269 (strings).
269 (strings).
270
270
271 Given a list of changeset revs, return a set of deltas and
271 Given a list of changeset revs, return a set of deltas and
272 metadata corresponding to nodes. The first delta is
272 metadata corresponding to nodes. The first delta is
273 first parent(nodelist[0]) -> nodelist[0], the receiver is
273 first parent(nodelist[0]) -> nodelist[0], the receiver is
274 guaranteed to have this parent as it has all history before
274 guaranteed to have this parent as it has all history before
275 these changesets. In the case firstparent is nullrev the
275 these changesets. In the case firstparent is nullrev the
276 changegroup starts with a full revision.
276 changegroup starts with a full revision.
277
277
278 If units is not None, progress detail will be generated, units specifies
278 If units is not None, progress detail will be generated, units specifies
279 the type of revlog that is touched (changelog, manifest, etc.).
279 the type of revlog that is touched (changelog, manifest, etc.).
280 """
280 """
281 # if we don't have any revisions touched by these changesets, bail
281 # if we don't have any revisions touched by these changesets, bail
282 if len(nodelist) == 0:
282 if len(nodelist) == 0:
283 yield self.close()
283 yield self.close()
284 return
284 return
285
285
286 # for generaldelta revlogs, we linearize the revs; this will both be
286 # for generaldelta revlogs, we linearize the revs; this will both be
287 # much quicker and generate a much smaller bundle
287 # much quicker and generate a much smaller bundle
288 if (revlog._generaldelta and reorder is not False) or reorder:
288 if (revlog._generaldelta and reorder is not False) or reorder:
289 dag = dagutil.revlogdag(revlog)
289 dag = dagutil.revlogdag(revlog)
290 revs = set(revlog.rev(n) for n in nodelist)
290 revs = set(revlog.rev(n) for n in nodelist)
291 revs = dag.linearize(revs)
291 revs = dag.linearize(revs)
292 else:
292 else:
293 revs = sorted([revlog.rev(n) for n in nodelist])
293 revs = sorted([revlog.rev(n) for n in nodelist])
294
294
295 # add the parent of the first rev
295 # add the parent of the first rev
296 p = revlog.parentrevs(revs[0])[0]
296 p = revlog.parentrevs(revs[0])[0]
297 revs.insert(0, p)
297 revs.insert(0, p)
298
298
299 # build deltas
299 # build deltas
300 total = len(revs) - 1
300 total = len(revs) - 1
301 msgbundling = _('bundling')
301 msgbundling = _('bundling')
302 for r in xrange(len(revs) - 1):
302 for r in xrange(len(revs) - 1):
303 if units is not None:
303 if units is not None:
304 self._progress(msgbundling, r + 1, unit=units, total=total)
304 self._progress(msgbundling, r + 1, unit=units, total=total)
305 prev, curr = revs[r], revs[r + 1]
305 prev, curr = revs[r], revs[r + 1]
306 linknode = lookup(revlog.node(curr))
306 linknode = lookup(revlog.node(curr))
307 for c in self.revchunk(revlog, curr, prev, linknode):
307 for c in self.revchunk(revlog, curr, prev, linknode):
308 yield c
308 yield c
309
309
310 yield self.close()
310 yield self.close()
311
311
312 # filter any nodes that claim to be part of the known set
312 # filter any nodes that claim to be part of the known set
313 def prune(self, revlog, missing, commonrevs, source):
313 def prune(self, revlog, missing, commonrevs, source):
314 rr, rl = revlog.rev, revlog.linkrev
314 rr, rl = revlog.rev, revlog.linkrev
315 return [n for n in missing if rl(rr(n)) not in commonrevs]
315 return [n for n in missing if rl(rr(n)) not in commonrevs]
316
316
317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
317 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
318 '''yield a sequence of changegroup chunks (strings)'''
318 '''yield a sequence of changegroup chunks (strings)'''
319 repo = self._repo
319 repo = self._repo
320 cl = self._changelog
320 cl = self._changelog
321 mf = self._manifest
321 mf = self._manifest
322 reorder = self._reorder
322 reorder = self._reorder
323 progress = self._progress
323 progress = self._progress
324
324
325 # for progress output
325 # for progress output
326 msgbundling = _('bundling')
326 msgbundling = _('bundling')
327
327
328 mfs = {} # needed manifests
328 mfs = {} # needed manifests
329 fnodes = {} # needed file nodes
329 fnodes = {} # needed file nodes
330 changedfiles = set()
330 changedfiles = set()
331
331
332 # Callback for the changelog, used to collect changed files and manifest
332 # Callback for the changelog, used to collect changed files and manifest
333 # nodes.
333 # nodes.
334 # Returns the linkrev node (identity in the changelog case).
334 # Returns the linkrev node (identity in the changelog case).
335 def lookupcl(x):
335 def lookupcl(x):
336 c = cl.read(x)
336 c = cl.read(x)
337 changedfiles.update(c[3])
337 changedfiles.update(c[3])
338 # record the first changeset introducing this manifest version
338 # record the first changeset introducing this manifest version
339 mfs.setdefault(c[0], x)
339 mfs.setdefault(c[0], x)
340 return x
340 return x
341
341
342 # Callback for the manifest, used to collect linkrevs for filelog
342 # Callback for the manifest, used to collect linkrevs for filelog
343 # revisions.
343 # revisions.
344 # Returns the linkrev node (collected in lookupcl).
344 # Returns the linkrev node (collected in lookupcl).
345 def lookupmf(x):
345 def lookupmf(x):
346 clnode = mfs[x]
346 clnode = mfs[x]
347 if not fastpathlinkrev:
347 if not fastpathlinkrev:
348 mdata = mf.readfast(x)
348 mdata = mf.readfast(x)
349 for f, n in mdata.iteritems():
349 for f, n in mdata.iteritems():
350 if f in changedfiles:
350 if f in changedfiles:
351 # record the first changeset introducing this filelog
351 # record the first changeset introducing this filelog
352 # version
352 # version
353 fnodes[f].setdefault(n, clnode)
353 fnodes[f].setdefault(n, clnode)
354 return clnode
354 return clnode
355
355
356 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
356 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
357 reorder=reorder):
357 reorder=reorder):
358 yield chunk
358 yield chunk
359 progress(msgbundling, None)
359 progress(msgbundling, None)
360
360
361 for f in changedfiles:
361 for f in changedfiles:
362 fnodes[f] = {}
362 fnodes[f] = {}
363 mfnodes = self.prune(mf, mfs, commonrevs, source)
363 mfnodes = self.prune(mf, mfs, commonrevs, source)
364 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
364 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
365 reorder=reorder):
365 reorder=reorder):
366 yield chunk
366 yield chunk
367 progress(msgbundling, None)
367 progress(msgbundling, None)
368
368
369 mfs.clear()
369 mfs.clear()
370 needed = set(cl.rev(x) for x in clnodes)
370 needed = set(cl.rev(x) for x in clnodes)
371
371
372 def linknodes(filerevlog, fname):
372 def linknodes(filerevlog, fname):
373 if fastpathlinkrev:
373 if fastpathlinkrev:
374 llr = filerevlog.linkrev
374 llr = filerevlog.linkrev
375 def genfilenodes():
375 def genfilenodes():
376 for r in filerevlog:
376 for r in filerevlog:
377 linkrev = llr(r)
377 linkrev = llr(r)
378 if linkrev in needed:
378 if linkrev in needed:
379 yield filerevlog.node(r), cl.node(linkrev)
379 yield filerevlog.node(r), cl.node(linkrev)
380 fnodes[fname] = dict(genfilenodes())
380 fnodes[fname] = dict(genfilenodes())
381 return fnodes.get(fname, {})
381 return fnodes.get(fname, {})
382
382
383 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
383 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
384 source):
384 source):
385 yield chunk
385 yield chunk
386
386
387 yield self.close()
387 yield self.close()
388 progress(msgbundling, None)
388 progress(msgbundling, None)
389
389
390 if clnodes:
390 if clnodes:
391 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
391 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
392
392
393 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
393 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
394 repo = self._repo
394 repo = self._repo
395 progress = self._progress
395 progress = self._progress
396 reorder = self._reorder
396 reorder = self._reorder
397 msgbundling = _('bundling')
397 msgbundling = _('bundling')
398
398
399 total = len(changedfiles)
399 total = len(changedfiles)
400 # for progress output
400 # for progress output
401 msgfiles = _('files')
401 msgfiles = _('files')
402 for i, fname in enumerate(sorted(changedfiles)):
402 for i, fname in enumerate(sorted(changedfiles)):
403 filerevlog = repo.file(fname)
403 filerevlog = repo.file(fname)
404 if not filerevlog:
404 if not filerevlog:
405 raise util.Abort(_("empty or missing revlog for %s") % fname)
405 raise util.Abort(_("empty or missing revlog for %s") % fname)
406
406
407 linkrevnodes = linknodes(filerevlog, fname)
407 linkrevnodes = linknodes(filerevlog, fname)
408 # Lookup for filenodes, we collected the linkrev nodes above in the
408 # Lookup for filenodes, we collected the linkrev nodes above in the
409 # fastpath case and with lookupmf in the slowpath case.
409 # fastpath case and with lookupmf in the slowpath case.
410 def lookupfilelog(x):
410 def lookupfilelog(x):
411 return linkrevnodes[x]
411 return linkrevnodes[x]
412
412
413 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
413 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
414 if filenodes:
414 if filenodes:
415 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
415 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
416 total=total)
416 total=total)
417 yield self.fileheader(fname)
417 yield self.fileheader(fname)
418 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
418 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
419 reorder=reorder):
419 reorder=reorder):
420 yield chunk
420 yield chunk
421
421
422 def deltaparent(self, revlog, rev, p1, p2, prev):
422 def deltaparent(self, revlog, rev, p1, p2, prev):
423 return prev
423 return prev
424
424
425 def revchunk(self, revlog, rev, prev, linknode):
425 def revchunk(self, revlog, rev, prev, linknode):
426 node = revlog.node(rev)
426 node = revlog.node(rev)
427 p1, p2 = revlog.parentrevs(rev)
427 p1, p2 = revlog.parentrevs(rev)
428 base = self.deltaparent(revlog, rev, p1, p2, prev)
428 base = self.deltaparent(revlog, rev, p1, p2, prev)
429
429
430 prefix = ''
430 prefix = ''
431 if base == nullrev:
431 if base == nullrev:
432 delta = revlog.revision(node)
432 delta = revlog.revision(node)
433 prefix = mdiff.trivialdiffheader(len(delta))
433 prefix = mdiff.trivialdiffheader(len(delta))
434 else:
434 else:
435 delta = revlog.revdiff(base, rev)
435 delta = revlog.revdiff(base, rev)
436 p1n, p2n = revlog.parents(node)
436 p1n, p2n = revlog.parents(node)
437 basenode = revlog.node(base)
437 basenode = revlog.node(base)
438 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
438 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
439 meta += prefix
439 meta += prefix
440 l = len(meta) + len(delta)
440 l = len(meta) + len(delta)
441 yield chunkheader(l)
441 yield chunkheader(l)
442 yield meta
442 yield meta
443 yield delta
443 yield delta
444 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
444 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
445 # do nothing with basenode, it is implicitly the previous one in HG10
445 # do nothing with basenode, it is implicitly the previous one in HG10
446 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
446 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
447
447
448 class cg2packer(cg1packer):
448 class cg2packer(cg1packer):
449
449
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451
451
452 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
452 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
453 if (revlog._generaldelta and reorder is not True):
453 if (revlog._generaldelta and reorder is not True):
454 reorder = False
454 reorder = False
455 return cg1packer.group(self, nodelist, revlog, lookup,
455 return super(cg2packer, self).group(nodelist, revlog, lookup,
456 units=units, reorder=reorder)
456 units=units, reorder=reorder)
457
457
458 def deltaparent(self, revlog, rev, p1, p2, prev):
458 def deltaparent(self, revlog, rev, p1, p2, prev):
459 dp = revlog.deltaparent(rev)
459 dp = revlog.deltaparent(rev)
460 # avoid storing full revisions; pick prev in those cases
460 # avoid storing full revisions; pick prev in those cases
461 # also pick prev when we can't be sure remote has dp
461 # also pick prev when we can't be sure remote has dp
462 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
462 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
463 return prev
463 return prev
464 return dp
464 return dp
465
465
466 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
466 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
467 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
467 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
468
468
469 packermap = {'01': (cg1packer, cg1unpacker),
469 packermap = {'01': (cg1packer, cg1unpacker),
470 '02': (cg2packer, cg2unpacker)}
470 '02': (cg2packer, cg2unpacker)}
471
471
472 def _changegroupinfo(repo, nodes, source):
472 def _changegroupinfo(repo, nodes, source):
473 if repo.ui.verbose or source == 'bundle':
473 if repo.ui.verbose or source == 'bundle':
474 repo.ui.status(_("%d changesets found\n") % len(nodes))
474 repo.ui.status(_("%d changesets found\n") % len(nodes))
475 if repo.ui.debugflag:
475 if repo.ui.debugflag:
476 repo.ui.debug("list of changesets:\n")
476 repo.ui.debug("list of changesets:\n")
477 for node in nodes:
477 for node in nodes:
478 repo.ui.debug("%s\n" % hex(node))
478 repo.ui.debug("%s\n" % hex(node))
479
479
480 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
480 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
481 repo = repo.unfiltered()
481 repo = repo.unfiltered()
482 commonrevs = outgoing.common
482 commonrevs = outgoing.common
483 csets = outgoing.missing
483 csets = outgoing.missing
484 heads = outgoing.missingheads
484 heads = outgoing.missingheads
485 # We go through the fast path if we get told to, or if all (unfiltered
485 # We go through the fast path if we get told to, or if all (unfiltered
486 # heads have been requested (since we then know there all linkrevs will
486 # heads have been requested (since we then know there all linkrevs will
487 # be pulled by the client).
487 # be pulled by the client).
488 heads.sort()
488 heads.sort()
489 fastpathlinkrev = fastpath or (
489 fastpathlinkrev = fastpath or (
490 repo.filtername is None and heads == sorted(repo.heads()))
490 repo.filtername is None and heads == sorted(repo.heads()))
491
491
492 repo.hook('preoutgoing', throw=True, source=source)
492 repo.hook('preoutgoing', throw=True, source=source)
493 _changegroupinfo(repo, csets, source)
493 _changegroupinfo(repo, csets, source)
494 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
494 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
495
495
496 def getsubset(repo, outgoing, bundler, source, fastpath=False):
496 def getsubset(repo, outgoing, bundler, source, fastpath=False):
497 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
497 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
498 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
498 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
499
499
500 def changegroupsubset(repo, roots, heads, source):
500 def changegroupsubset(repo, roots, heads, source):
501 """Compute a changegroup consisting of all the nodes that are
501 """Compute a changegroup consisting of all the nodes that are
502 descendants of any of the roots and ancestors of any of the heads.
502 descendants of any of the roots and ancestors of any of the heads.
503 Return a chunkbuffer object whose read() method will return
503 Return a chunkbuffer object whose read() method will return
504 successive changegroup chunks.
504 successive changegroup chunks.
505
505
506 It is fairly complex as determining which filenodes and which
506 It is fairly complex as determining which filenodes and which
507 manifest nodes need to be included for the changeset to be complete
507 manifest nodes need to be included for the changeset to be complete
508 is non-trivial.
508 is non-trivial.
509
509
510 Another wrinkle is doing the reverse, figuring out which changeset in
510 Another wrinkle is doing the reverse, figuring out which changeset in
511 the changegroup a particular filenode or manifestnode belongs to.
511 the changegroup a particular filenode or manifestnode belongs to.
512 """
512 """
513 cl = repo.changelog
513 cl = repo.changelog
514 if not roots:
514 if not roots:
515 roots = [nullid]
515 roots = [nullid]
516 # TODO: remove call to nodesbetween.
516 # TODO: remove call to nodesbetween.
517 csets, roots, heads = cl.nodesbetween(roots, heads)
517 csets, roots, heads = cl.nodesbetween(roots, heads)
518 discbases = []
518 discbases = []
519 for n in roots:
519 for n in roots:
520 discbases.extend([p for p in cl.parents(n) if p != nullid])
520 discbases.extend([p for p in cl.parents(n) if p != nullid])
521 outgoing = discovery.outgoing(cl, discbases, heads)
521 outgoing = discovery.outgoing(cl, discbases, heads)
522 bundler = cg1packer(repo)
522 bundler = cg1packer(repo)
523 return getsubset(repo, outgoing, bundler, source)
523 return getsubset(repo, outgoing, bundler, source)
524
524
525 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
525 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
526 version='01'):
526 version='01'):
527 """Like getbundle, but taking a discovery.outgoing as an argument.
527 """Like getbundle, but taking a discovery.outgoing as an argument.
528
528
529 This is only implemented for local repos and reuses potentially
529 This is only implemented for local repos and reuses potentially
530 precomputed sets in outgoing. Returns a raw changegroup generator."""
530 precomputed sets in outgoing. Returns a raw changegroup generator."""
531 if not outgoing.missing:
531 if not outgoing.missing:
532 return None
532 return None
533 bundler = packermap[version][0](repo, bundlecaps)
533 bundler = packermap[version][0](repo, bundlecaps)
534 return getsubsetraw(repo, outgoing, bundler, source)
534 return getsubsetraw(repo, outgoing, bundler, source)
535
535
536 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
536 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
537 """Like getbundle, but taking a discovery.outgoing as an argument.
537 """Like getbundle, but taking a discovery.outgoing as an argument.
538
538
539 This is only implemented for local repos and reuses potentially
539 This is only implemented for local repos and reuses potentially
540 precomputed sets in outgoing."""
540 precomputed sets in outgoing."""
541 if not outgoing.missing:
541 if not outgoing.missing:
542 return None
542 return None
543 bundler = cg1packer(repo, bundlecaps)
543 bundler = cg1packer(repo, bundlecaps)
544 return getsubset(repo, outgoing, bundler, source)
544 return getsubset(repo, outgoing, bundler, source)
545
545
546 def _computeoutgoing(repo, heads, common):
546 def _computeoutgoing(repo, heads, common):
547 """Computes which revs are outgoing given a set of common
547 """Computes which revs are outgoing given a set of common
548 and a set of heads.
548 and a set of heads.
549
549
550 This is a separate function so extensions can have access to
550 This is a separate function so extensions can have access to
551 the logic.
551 the logic.
552
552
553 Returns a discovery.outgoing object.
553 Returns a discovery.outgoing object.
554 """
554 """
555 cl = repo.changelog
555 cl = repo.changelog
556 if common:
556 if common:
557 hasnode = cl.hasnode
557 hasnode = cl.hasnode
558 common = [n for n in common if hasnode(n)]
558 common = [n for n in common if hasnode(n)]
559 else:
559 else:
560 common = [nullid]
560 common = [nullid]
561 if not heads:
561 if not heads:
562 heads = cl.heads()
562 heads = cl.heads()
563 return discovery.outgoing(cl, common, heads)
563 return discovery.outgoing(cl, common, heads)
564
564
565 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
565 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
566 version='01'):
566 version='01'):
567 """Like changegroupsubset, but returns the set difference between the
567 """Like changegroupsubset, but returns the set difference between the
568 ancestors of heads and the ancestors common.
568 ancestors of heads and the ancestors common.
569
569
570 If heads is None, use the local heads. If common is None, use [nullid].
570 If heads is None, use the local heads. If common is None, use [nullid].
571
571
572 If version is None, use a version '1' changegroup.
572 If version is None, use a version '1' changegroup.
573
573
574 The nodes in common might not all be known locally due to the way the
574 The nodes in common might not all be known locally due to the way the
575 current discovery protocol works. Returns a raw changegroup generator.
575 current discovery protocol works. Returns a raw changegroup generator.
576 """
576 """
577 outgoing = _computeoutgoing(repo, heads, common)
577 outgoing = _computeoutgoing(repo, heads, common)
578 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
578 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
579 version=version)
579 version=version)
580
580
581 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
581 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
582 """Like changegroupsubset, but returns the set difference between the
582 """Like changegroupsubset, but returns the set difference between the
583 ancestors of heads and the ancestors common.
583 ancestors of heads and the ancestors common.
584
584
585 If heads is None, use the local heads. If common is None, use [nullid].
585 If heads is None, use the local heads. If common is None, use [nullid].
586
586
587 The nodes in common might not all be known locally due to the way the
587 The nodes in common might not all be known locally due to the way the
588 current discovery protocol works.
588 current discovery protocol works.
589 """
589 """
590 outgoing = _computeoutgoing(repo, heads, common)
590 outgoing = _computeoutgoing(repo, heads, common)
591 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
591 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
592
592
593 def changegroup(repo, basenodes, source):
593 def changegroup(repo, basenodes, source):
594 # to avoid a race we use changegroupsubset() (issue1320)
594 # to avoid a race we use changegroupsubset() (issue1320)
595 return changegroupsubset(repo, basenodes, repo.heads(), source)
595 return changegroupsubset(repo, basenodes, repo.heads(), source)
596
596
597 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
597 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
598 revisions = 0
598 revisions = 0
599 files = 0
599 files = 0
600 while True:
600 while True:
601 chunkdata = source.filelogheader()
601 chunkdata = source.filelogheader()
602 if not chunkdata:
602 if not chunkdata:
603 break
603 break
604 f = chunkdata["filename"]
604 f = chunkdata["filename"]
605 repo.ui.debug("adding %s revisions\n" % f)
605 repo.ui.debug("adding %s revisions\n" % f)
606 pr()
606 pr()
607 fl = repo.file(f)
607 fl = repo.file(f)
608 o = len(fl)
608 o = len(fl)
609 if not fl.addgroup(source, revmap, trp):
609 if not fl.addgroup(source, revmap, trp):
610 raise util.Abort(_("received file revlog group is empty"))
610 raise util.Abort(_("received file revlog group is empty"))
611 revisions += len(fl) - o
611 revisions += len(fl) - o
612 files += 1
612 files += 1
613 if f in needfiles:
613 if f in needfiles:
614 needs = needfiles[f]
614 needs = needfiles[f]
615 for new in xrange(o, len(fl)):
615 for new in xrange(o, len(fl)):
616 n = fl.node(new)
616 n = fl.node(new)
617 if n in needs:
617 if n in needs:
618 needs.remove(n)
618 needs.remove(n)
619 else:
619 else:
620 raise util.Abort(
620 raise util.Abort(
621 _("received spurious file revlog entry"))
621 _("received spurious file revlog entry"))
622 if not needs:
622 if not needs:
623 del needfiles[f]
623 del needfiles[f]
624 repo.ui.progress(_('files'), None)
624 repo.ui.progress(_('files'), None)
625
625
626 for f, needs in needfiles.iteritems():
626 for f, needs in needfiles.iteritems():
627 fl = repo.file(f)
627 fl = repo.file(f)
628 for n in needs:
628 for n in needs:
629 try:
629 try:
630 fl.rev(n)
630 fl.rev(n)
631 except error.LookupError:
631 except error.LookupError:
632 raise util.Abort(
632 raise util.Abort(
633 _('missing file data for %s:%s - run hg verify') %
633 _('missing file data for %s:%s - run hg verify') %
634 (f, hex(n)))
634 (f, hex(n)))
635
635
636 return revisions, files
636 return revisions, files
637
637
638 def addchangegroup(repo, source, srctype, url, emptyok=False,
638 def addchangegroup(repo, source, srctype, url, emptyok=False,
639 targetphase=phases.draft):
639 targetphase=phases.draft):
640 """Add the changegroup returned by source.read() to this repo.
640 """Add the changegroup returned by source.read() to this repo.
641 srctype is a string like 'push', 'pull', or 'unbundle'. url is
641 srctype is a string like 'push', 'pull', or 'unbundle'. url is
642 the URL of the repo where this changegroup is coming from.
642 the URL of the repo where this changegroup is coming from.
643
643
644 Return an integer summarizing the change to this repo:
644 Return an integer summarizing the change to this repo:
645 - nothing changed or no source: 0
645 - nothing changed or no source: 0
646 - more heads than before: 1+added heads (2..n)
646 - more heads than before: 1+added heads (2..n)
647 - fewer heads than before: -1-removed heads (-2..-n)
647 - fewer heads than before: -1-removed heads (-2..-n)
648 - number of heads stays the same: 1
648 - number of heads stays the same: 1
649 """
649 """
650 repo = repo.unfiltered()
650 repo = repo.unfiltered()
651 def csmap(x):
651 def csmap(x):
652 repo.ui.debug("add changeset %s\n" % short(x))
652 repo.ui.debug("add changeset %s\n" % short(x))
653 return len(cl)
653 return len(cl)
654
654
655 def revmap(x):
655 def revmap(x):
656 return cl.rev(x)
656 return cl.rev(x)
657
657
658 if not source:
658 if not source:
659 return 0
659 return 0
660
660
661 changesets = files = revisions = 0
661 changesets = files = revisions = 0
662 efiles = set()
662 efiles = set()
663
663
664 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
664 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
665 # The transaction could have been created before and already carries source
665 # The transaction could have been created before and already carries source
666 # information. In this case we use the top level data. We overwrite the
666 # information. In this case we use the top level data. We overwrite the
667 # argument because we need to use the top level value (if they exist) in
667 # argument because we need to use the top level value (if they exist) in
668 # this function.
668 # this function.
669 srctype = tr.hookargs.setdefault('source', srctype)
669 srctype = tr.hookargs.setdefault('source', srctype)
670 url = tr.hookargs.setdefault('url', url)
670 url = tr.hookargs.setdefault('url', url)
671
671
672 # write changelog data to temp files so concurrent readers will not see
672 # write changelog data to temp files so concurrent readers will not see
673 # inconsistent view
673 # inconsistent view
674 cl = repo.changelog
674 cl = repo.changelog
675 cl.delayupdate(tr)
675 cl.delayupdate(tr)
676 oldheads = cl.heads()
676 oldheads = cl.heads()
677 try:
677 try:
678 repo.hook('prechangegroup', throw=True, **tr.hookargs)
678 repo.hook('prechangegroup', throw=True, **tr.hookargs)
679
679
680 trp = weakref.proxy(tr)
680 trp = weakref.proxy(tr)
681 # pull off the changeset group
681 # pull off the changeset group
682 repo.ui.status(_("adding changesets\n"))
682 repo.ui.status(_("adding changesets\n"))
683 clstart = len(cl)
683 clstart = len(cl)
684 class prog(object):
684 class prog(object):
685 step = _('changesets')
685 step = _('changesets')
686 count = 1
686 count = 1
687 ui = repo.ui
687 ui = repo.ui
688 total = None
688 total = None
689 def __call__(repo):
689 def __call__(repo):
690 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
690 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
691 total=repo.total)
691 total=repo.total)
692 repo.count += 1
692 repo.count += 1
693 pr = prog()
693 pr = prog()
694 source.callback = pr
694 source.callback = pr
695
695
696 source.changelogheader()
696 source.changelogheader()
697 srccontent = cl.addgroup(source, csmap, trp)
697 srccontent = cl.addgroup(source, csmap, trp)
698 if not (srccontent or emptyok):
698 if not (srccontent or emptyok):
699 raise util.Abort(_("received changelog group is empty"))
699 raise util.Abort(_("received changelog group is empty"))
700 clend = len(cl)
700 clend = len(cl)
701 changesets = clend - clstart
701 changesets = clend - clstart
702 for c in xrange(clstart, clend):
702 for c in xrange(clstart, clend):
703 efiles.update(repo[c].files())
703 efiles.update(repo[c].files())
704 efiles = len(efiles)
704 efiles = len(efiles)
705 repo.ui.progress(_('changesets'), None)
705 repo.ui.progress(_('changesets'), None)
706
706
707 # pull off the manifest group
707 # pull off the manifest group
708 repo.ui.status(_("adding manifests\n"))
708 repo.ui.status(_("adding manifests\n"))
709 pr.step = _('manifests')
709 pr.step = _('manifests')
710 pr.count = 1
710 pr.count = 1
711 pr.total = changesets # manifests <= changesets
711 pr.total = changesets # manifests <= changesets
712 # no need to check for empty manifest group here:
712 # no need to check for empty manifest group here:
713 # if the result of the merge of 1 and 2 is the same in 3 and 4,
713 # if the result of the merge of 1 and 2 is the same in 3 and 4,
714 # no new manifest will be created and the manifest group will
714 # no new manifest will be created and the manifest group will
715 # be empty during the pull
715 # be empty during the pull
716 source.manifestheader()
716 source.manifestheader()
717 repo.manifest.addgroup(source, revmap, trp)
717 repo.manifest.addgroup(source, revmap, trp)
718 repo.ui.progress(_('manifests'), None)
718 repo.ui.progress(_('manifests'), None)
719
719
720 needfiles = {}
720 needfiles = {}
721 if repo.ui.configbool('server', 'validate', default=False):
721 if repo.ui.configbool('server', 'validate', default=False):
722 # validate incoming csets have their manifests
722 # validate incoming csets have their manifests
723 for cset in xrange(clstart, clend):
723 for cset in xrange(clstart, clend):
724 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
724 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
725 mfest = repo.manifest.readdelta(mfest)
725 mfest = repo.manifest.readdelta(mfest)
726 # store file nodes we must see
726 # store file nodes we must see
727 for f, n in mfest.iteritems():
727 for f, n in mfest.iteritems():
728 needfiles.setdefault(f, set()).add(n)
728 needfiles.setdefault(f, set()).add(n)
729
729
730 # process the files
730 # process the files
731 repo.ui.status(_("adding file changes\n"))
731 repo.ui.status(_("adding file changes\n"))
732 pr.step = _('files')
732 pr.step = _('files')
733 pr.count = 1
733 pr.count = 1
734 pr.total = efiles
734 pr.total = efiles
735 source.callback = None
735 source.callback = None
736
736
737 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
737 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
738 needfiles)
738 needfiles)
739 revisions += newrevs
739 revisions += newrevs
740 files += newfiles
740 files += newfiles
741
741
742 dh = 0
742 dh = 0
743 if oldheads:
743 if oldheads:
744 heads = cl.heads()
744 heads = cl.heads()
745 dh = len(heads) - len(oldheads)
745 dh = len(heads) - len(oldheads)
746 for h in heads:
746 for h in heads:
747 if h not in oldheads and repo[h].closesbranch():
747 if h not in oldheads and repo[h].closesbranch():
748 dh -= 1
748 dh -= 1
749 htext = ""
749 htext = ""
750 if dh:
750 if dh:
751 htext = _(" (%+d heads)") % dh
751 htext = _(" (%+d heads)") % dh
752
752
753 repo.ui.status(_("added %d changesets"
753 repo.ui.status(_("added %d changesets"
754 " with %d changes to %d files%s\n")
754 " with %d changes to %d files%s\n")
755 % (changesets, revisions, files, htext))
755 % (changesets, revisions, files, htext))
756 repo.invalidatevolatilesets()
756 repo.invalidatevolatilesets()
757
757
758 if changesets > 0:
758 if changesets > 0:
759 p = lambda: tr.writepending() and repo.root or ""
759 p = lambda: tr.writepending() and repo.root or ""
760 if 'node' not in tr.hookargs:
760 if 'node' not in tr.hookargs:
761 tr.hookargs['node'] = hex(cl.node(clstart))
761 tr.hookargs['node'] = hex(cl.node(clstart))
762 hookargs = dict(tr.hookargs)
762 hookargs = dict(tr.hookargs)
763 else:
763 else:
764 hookargs = dict(tr.hookargs)
764 hookargs = dict(tr.hookargs)
765 hookargs['node'] = hex(cl.node(clstart))
765 hookargs['node'] = hex(cl.node(clstart))
766 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
766 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
767
767
768 added = [cl.node(r) for r in xrange(clstart, clend)]
768 added = [cl.node(r) for r in xrange(clstart, clend)]
769 publishing = repo.ui.configbool('phases', 'publish', True)
769 publishing = repo.ui.configbool('phases', 'publish', True)
770 if srctype in ('push', 'serve'):
770 if srctype in ('push', 'serve'):
771 # Old servers can not push the boundary themselves.
771 # Old servers can not push the boundary themselves.
772 # New servers won't push the boundary if changeset already
772 # New servers won't push the boundary if changeset already
773 # exists locally as secret
773 # exists locally as secret
774 #
774 #
775 # We should not use added here but the list of all change in
775 # We should not use added here but the list of all change in
776 # the bundle
776 # the bundle
777 if publishing:
777 if publishing:
778 phases.advanceboundary(repo, tr, phases.public, srccontent)
778 phases.advanceboundary(repo, tr, phases.public, srccontent)
779 else:
779 else:
780 # Those changesets have been pushed from the outside, their
780 # Those changesets have been pushed from the outside, their
781 # phases are going to be pushed alongside. Therefor
781 # phases are going to be pushed alongside. Therefor
782 # `targetphase` is ignored.
782 # `targetphase` is ignored.
783 phases.advanceboundary(repo, tr, phases.draft, srccontent)
783 phases.advanceboundary(repo, tr, phases.draft, srccontent)
784 phases.retractboundary(repo, tr, phases.draft, added)
784 phases.retractboundary(repo, tr, phases.draft, added)
785 elif srctype != 'strip':
785 elif srctype != 'strip':
786 # publishing only alter behavior during push
786 # publishing only alter behavior during push
787 #
787 #
788 # strip should not touch boundary at all
788 # strip should not touch boundary at all
789 phases.retractboundary(repo, tr, targetphase, added)
789 phases.retractboundary(repo, tr, targetphase, added)
790
790
791 if changesets > 0:
791 if changesets > 0:
792 if srctype != 'strip':
792 if srctype != 'strip':
793 # During strip, branchcache is invalid but coming call to
793 # During strip, branchcache is invalid but coming call to
794 # `destroyed` will repair it.
794 # `destroyed` will repair it.
795 # In other case we can safely update cache on disk.
795 # In other case we can safely update cache on disk.
796 branchmap.updatecache(repo.filtered('served'))
796 branchmap.updatecache(repo.filtered('served'))
797
797
798 def runhooks():
798 def runhooks():
799 # These hooks run when the lock releases, not when the
799 # These hooks run when the lock releases, not when the
800 # transaction closes. So it's possible for the changelog
800 # transaction closes. So it's possible for the changelog
801 # to have changed since we last saw it.
801 # to have changed since we last saw it.
802 if clstart >= len(repo):
802 if clstart >= len(repo):
803 return
803 return
804
804
805 # forcefully update the on-disk branch cache
805 # forcefully update the on-disk branch cache
806 repo.ui.debug("updating the branch cache\n")
806 repo.ui.debug("updating the branch cache\n")
807 repo.hook("changegroup", **hookargs)
807 repo.hook("changegroup", **hookargs)
808
808
809 for n in added:
809 for n in added:
810 args = hookargs.copy()
810 args = hookargs.copy()
811 args['node'] = hex(n)
811 args['node'] = hex(n)
812 repo.hook("incoming", **args)
812 repo.hook("incoming", **args)
813
813
814 newheads = [h for h in repo.heads() if h not in oldheads]
814 newheads = [h for h in repo.heads() if h not in oldheads]
815 repo.ui.log("incoming",
815 repo.ui.log("incoming",
816 "%s incoming changes - new heads: %s\n",
816 "%s incoming changes - new heads: %s\n",
817 len(added),
817 len(added),
818 ', '.join([hex(c[:6]) for c in newheads]))
818 ', '.join([hex(c[:6]) for c in newheads]))
819
819
820 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
820 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
821 lambda: repo._afterlock(runhooks))
821 lambda: repo._afterlock(runhooks))
822
822
823 tr.close()
823 tr.close()
824
824
825 finally:
825 finally:
826 tr.release()
826 tr.release()
827 # never return 0 here:
827 # never return 0 here:
828 if dh < 0:
828 if dh < 0:
829 return dh - 1
829 return dh - 1
830 else:
830 else:
831 return dh + 1
831 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now