##// END OF EJS Templates
changegroup: fix file linkrevs during reorders (issue4462)...
Durham Goode -
r23381:cc0ff93d stable
parent child Browse files
Show More
@@ -1,764 +1,769 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16
16
17 def readexactly(stream, n):
17 def readexactly(stream, n):
18 '''read n bytes from stream.read and abort if less was available'''
18 '''read n bytes from stream.read and abort if less was available'''
19 s = stream.read(n)
19 s = stream.read(n)
20 if len(s) < n:
20 if len(s) < n:
21 raise util.Abort(_("stream ended unexpectedly"
21 raise util.Abort(_("stream ended unexpectedly"
22 " (got %d bytes, expected %d)")
22 " (got %d bytes, expected %d)")
23 % (len(s), n))
23 % (len(s), n))
24 return s
24 return s
25
25
26 def getchunk(stream):
26 def getchunk(stream):
27 """return the next chunk from stream as a string"""
27 """return the next chunk from stream as a string"""
28 d = readexactly(stream, 4)
28 d = readexactly(stream, 4)
29 l = struct.unpack(">l", d)[0]
29 l = struct.unpack(">l", d)[0]
30 if l <= 4:
30 if l <= 4:
31 if l:
31 if l:
32 raise util.Abort(_("invalid chunk length %d") % l)
32 raise util.Abort(_("invalid chunk length %d") % l)
33 return ""
33 return ""
34 return readexactly(stream, l - 4)
34 return readexactly(stream, l - 4)
35
35
36 def chunkheader(length):
36 def chunkheader(length):
37 """return a changegroup chunk header (string)"""
37 """return a changegroup chunk header (string)"""
38 return struct.pack(">l", length + 4)
38 return struct.pack(">l", length + 4)
39
39
40 def closechunk():
40 def closechunk():
41 """return a changegroup chunk header (string) for a zero-length chunk"""
41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 return struct.pack(">l", 0)
42 return struct.pack(">l", 0)
43
43
44 class nocompress(object):
44 class nocompress(object):
45 def compress(self, x):
45 def compress(self, x):
46 return x
46 return x
47 def flush(self):
47 def flush(self):
48 return ""
48 return ""
49
49
50 bundletypes = {
50 bundletypes = {
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 # since the unification ssh accepts a header but there
52 # since the unification ssh accepts a header but there
53 # is no capability signaling it.
53 # is no capability signaling it.
54 "HG10UN": ("HG10UN", nocompress),
54 "HG10UN": ("HG10UN", nocompress),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 }
57 }
58
58
59 # hgweb uses this list to communicate its preferred type
59 # hgweb uses this list to communicate its preferred type
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61
61
62 def writebundle(cg, filename, bundletype, vfs=None):
62 def writebundle(cg, filename, bundletype, vfs=None):
63 """Write a bundle file and return its filename.
63 """Write a bundle file and return its filename.
64
64
65 Existing files will not be overwritten.
65 Existing files will not be overwritten.
66 If no filename is specified, a temporary file is created.
66 If no filename is specified, a temporary file is created.
67 bz2 compression can be turned off.
67 bz2 compression can be turned off.
68 The bundle file will be deleted in case of errors.
68 The bundle file will be deleted in case of errors.
69 """
69 """
70
70
71 fh = None
71 fh = None
72 cleanup = None
72 cleanup = None
73 try:
73 try:
74 if filename:
74 if filename:
75 if vfs:
75 if vfs:
76 fh = vfs.open(filename, "wb")
76 fh = vfs.open(filename, "wb")
77 else:
77 else:
78 fh = open(filename, "wb")
78 fh = open(filename, "wb")
79 else:
79 else:
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fh = os.fdopen(fd, "wb")
81 fh = os.fdopen(fd, "wb")
82 cleanup = filename
82 cleanup = filename
83
83
84 header, compressor = bundletypes[bundletype]
84 header, compressor = bundletypes[bundletype]
85 fh.write(header)
85 fh.write(header)
86 z = compressor()
86 z = compressor()
87
87
88 # parse the changegroup data, otherwise we will block
88 # parse the changegroup data, otherwise we will block
89 # in case of sshrepo because we don't know the end of the stream
89 # in case of sshrepo because we don't know the end of the stream
90
90
91 # an empty chunkgroup is the end of the changegroup
91 # an empty chunkgroup is the end of the changegroup
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # after that, an empty chunkgroup is the end of the changegroup
93 # after that, an empty chunkgroup is the end of the changegroup
94 for chunk in cg.getchunks():
94 for chunk in cg.getchunks():
95 fh.write(z.compress(chunk))
95 fh.write(z.compress(chunk))
96 fh.write(z.flush())
96 fh.write(z.flush())
97 cleanup = None
97 cleanup = None
98 return filename
98 return filename
99 finally:
99 finally:
100 if fh is not None:
100 if fh is not None:
101 fh.close()
101 fh.close()
102 if cleanup is not None:
102 if cleanup is not None:
103 if filename and vfs:
103 if filename and vfs:
104 vfs.unlink(cleanup)
104 vfs.unlink(cleanup)
105 else:
105 else:
106 os.unlink(cleanup)
106 os.unlink(cleanup)
107
107
108 def decompressor(fh, alg):
108 def decompressor(fh, alg):
109 if alg == 'UN':
109 if alg == 'UN':
110 return fh
110 return fh
111 elif alg == 'GZ':
111 elif alg == 'GZ':
112 def generator(f):
112 def generator(f):
113 zd = zlib.decompressobj()
113 zd = zlib.decompressobj()
114 for chunk in util.filechunkiter(f):
114 for chunk in util.filechunkiter(f):
115 yield zd.decompress(chunk)
115 yield zd.decompress(chunk)
116 elif alg == 'BZ':
116 elif alg == 'BZ':
117 def generator(f):
117 def generator(f):
118 zd = bz2.BZ2Decompressor()
118 zd = bz2.BZ2Decompressor()
119 zd.decompress("BZ")
119 zd.decompress("BZ")
120 for chunk in util.filechunkiter(f, 4096):
120 for chunk in util.filechunkiter(f, 4096):
121 yield zd.decompress(chunk)
121 yield zd.decompress(chunk)
122 else:
122 else:
123 raise util.Abort("unknown bundle compression '%s'" % alg)
123 raise util.Abort("unknown bundle compression '%s'" % alg)
124 return util.chunkbuffer(generator(fh))
124 return util.chunkbuffer(generator(fh))
125
125
126 class cg1unpacker(object):
126 class cg1unpacker(object):
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheadersize = struct.calcsize(deltaheader)
128 deltaheadersize = struct.calcsize(deltaheader)
129 def __init__(self, fh, alg):
129 def __init__(self, fh, alg):
130 self._stream = decompressor(fh, alg)
130 self._stream = decompressor(fh, alg)
131 self._type = alg
131 self._type = alg
132 self.callback = None
132 self.callback = None
133 def compressed(self):
133 def compressed(self):
134 return self._type != 'UN'
134 return self._type != 'UN'
135 def read(self, l):
135 def read(self, l):
136 return self._stream.read(l)
136 return self._stream.read(l)
137 def seek(self, pos):
137 def seek(self, pos):
138 return self._stream.seek(pos)
138 return self._stream.seek(pos)
139 def tell(self):
139 def tell(self):
140 return self._stream.tell()
140 return self._stream.tell()
141 def close(self):
141 def close(self):
142 return self._stream.close()
142 return self._stream.close()
143
143
144 def chunklength(self):
144 def chunklength(self):
145 d = readexactly(self._stream, 4)
145 d = readexactly(self._stream, 4)
146 l = struct.unpack(">l", d)[0]
146 l = struct.unpack(">l", d)[0]
147 if l <= 4:
147 if l <= 4:
148 if l:
148 if l:
149 raise util.Abort(_("invalid chunk length %d") % l)
149 raise util.Abort(_("invalid chunk length %d") % l)
150 return 0
150 return 0
151 if self.callback:
151 if self.callback:
152 self.callback()
152 self.callback()
153 return l - 4
153 return l - 4
154
154
155 def changelogheader(self):
155 def changelogheader(self):
156 """v10 does not have a changelog header chunk"""
156 """v10 does not have a changelog header chunk"""
157 return {}
157 return {}
158
158
159 def manifestheader(self):
159 def manifestheader(self):
160 """v10 does not have a manifest header chunk"""
160 """v10 does not have a manifest header chunk"""
161 return {}
161 return {}
162
162
163 def filelogheader(self):
163 def filelogheader(self):
164 """return the header of the filelogs chunk, v10 only has the filename"""
164 """return the header of the filelogs chunk, v10 only has the filename"""
165 l = self.chunklength()
165 l = self.chunklength()
166 if not l:
166 if not l:
167 return {}
167 return {}
168 fname = readexactly(self._stream, l)
168 fname = readexactly(self._stream, l)
169 return {'filename': fname}
169 return {'filename': fname}
170
170
171 def _deltaheader(self, headertuple, prevnode):
171 def _deltaheader(self, headertuple, prevnode):
172 node, p1, p2, cs = headertuple
172 node, p1, p2, cs = headertuple
173 if prevnode is None:
173 if prevnode is None:
174 deltabase = p1
174 deltabase = p1
175 else:
175 else:
176 deltabase = prevnode
176 deltabase = prevnode
177 return node, p1, p2, deltabase, cs
177 return node, p1, p2, deltabase, cs
178
178
179 def deltachunk(self, prevnode):
179 def deltachunk(self, prevnode):
180 l = self.chunklength()
180 l = self.chunklength()
181 if not l:
181 if not l:
182 return {}
182 return {}
183 headerdata = readexactly(self._stream, self.deltaheadersize)
183 headerdata = readexactly(self._stream, self.deltaheadersize)
184 header = struct.unpack(self.deltaheader, headerdata)
184 header = struct.unpack(self.deltaheader, headerdata)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 'deltabase': deltabase, 'delta': delta}
188 'deltabase': deltabase, 'delta': delta}
189
189
190 def getchunks(self):
190 def getchunks(self):
191 """returns all the chunks contains in the bundle
191 """returns all the chunks contains in the bundle
192
192
193 Used when you need to forward the binary stream to a file or another
193 Used when you need to forward the binary stream to a file or another
194 network API. To do so, it parse the changegroup data, otherwise it will
194 network API. To do so, it parse the changegroup data, otherwise it will
195 block in case of sshrepo because it don't know the end of the stream.
195 block in case of sshrepo because it don't know the end of the stream.
196 """
196 """
197 # an empty chunkgroup is the end of the changegroup
197 # an empty chunkgroup is the end of the changegroup
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # after that, an empty chunkgroup is the end of the changegroup
199 # after that, an empty chunkgroup is the end of the changegroup
200 empty = False
200 empty = False
201 count = 0
201 count = 0
202 while not empty or count <= 2:
202 while not empty or count <= 2:
203 empty = True
203 empty = True
204 count += 1
204 count += 1
205 while True:
205 while True:
206 chunk = getchunk(self)
206 chunk = getchunk(self)
207 if not chunk:
207 if not chunk:
208 break
208 break
209 empty = False
209 empty = False
210 yield chunkheader(len(chunk))
210 yield chunkheader(len(chunk))
211 pos = 0
211 pos = 0
212 while pos < len(chunk):
212 while pos < len(chunk):
213 next = pos + 2**20
213 next = pos + 2**20
214 yield chunk[pos:next]
214 yield chunk[pos:next]
215 pos = next
215 pos = next
216 yield closechunk()
216 yield closechunk()
217
217
218 class headerlessfixup(object):
218 class headerlessfixup(object):
219 def __init__(self, fh, h):
219 def __init__(self, fh, h):
220 self._h = h
220 self._h = h
221 self._fh = fh
221 self._fh = fh
222 def read(self, n):
222 def read(self, n):
223 if self._h:
223 if self._h:
224 d, self._h = self._h[:n], self._h[n:]
224 d, self._h = self._h[:n], self._h[n:]
225 if len(d) < n:
225 if len(d) < n:
226 d += readexactly(self._fh, n - len(d))
226 d += readexactly(self._fh, n - len(d))
227 return d
227 return d
228 return readexactly(self._fh, n)
228 return readexactly(self._fh, n)
229
229
230 class cg1packer(object):
230 class cg1packer(object):
231 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
231 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
232 def __init__(self, repo, bundlecaps=None):
232 def __init__(self, repo, bundlecaps=None):
233 """Given a source repo, construct a bundler.
233 """Given a source repo, construct a bundler.
234
234
235 bundlecaps is optional and can be used to specify the set of
235 bundlecaps is optional and can be used to specify the set of
236 capabilities which can be used to build the bundle.
236 capabilities which can be used to build the bundle.
237 """
237 """
238 # Set of capabilities we can use to build the bundle.
238 # Set of capabilities we can use to build the bundle.
239 if bundlecaps is None:
239 if bundlecaps is None:
240 bundlecaps = set()
240 bundlecaps = set()
241 self._bundlecaps = bundlecaps
241 self._bundlecaps = bundlecaps
242 self._changelog = repo.changelog
242 self._changelog = repo.changelog
243 self._manifest = repo.manifest
243 self._manifest = repo.manifest
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
245 if reorder == 'auto':
245 if reorder == 'auto':
246 reorder = None
246 reorder = None
247 else:
247 else:
248 reorder = util.parsebool(reorder)
248 reorder = util.parsebool(reorder)
249 self._repo = repo
249 self._repo = repo
250 self._reorder = reorder
250 self._reorder = reorder
251 self._progress = repo.ui.progress
251 self._progress = repo.ui.progress
252 def close(self):
252 def close(self):
253 return closechunk()
253 return closechunk()
254
254
255 def fileheader(self, fname):
255 def fileheader(self, fname):
256 return chunkheader(len(fname)) + fname
256 return chunkheader(len(fname)) + fname
257
257
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
259 """Calculate a delta group, yielding a sequence of changegroup chunks
259 """Calculate a delta group, yielding a sequence of changegroup chunks
260 (strings).
260 (strings).
261
261
262 Given a list of changeset revs, return a set of deltas and
262 Given a list of changeset revs, return a set of deltas and
263 metadata corresponding to nodes. The first delta is
263 metadata corresponding to nodes. The first delta is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
265 guaranteed to have this parent as it has all history before
265 guaranteed to have this parent as it has all history before
266 these changesets. In the case firstparent is nullrev the
266 these changesets. In the case firstparent is nullrev the
267 changegroup starts with a full revision.
267 changegroup starts with a full revision.
268
268
269 If units is not None, progress detail will be generated, units specifies
269 If units is not None, progress detail will be generated, units specifies
270 the type of revlog that is touched (changelog, manifest, etc.).
270 the type of revlog that is touched (changelog, manifest, etc.).
271 """
271 """
272 # if we don't have any revisions touched by these changesets, bail
272 # if we don't have any revisions touched by these changesets, bail
273 if len(nodelist) == 0:
273 if len(nodelist) == 0:
274 yield self.close()
274 yield self.close()
275 return
275 return
276
276
277 # for generaldelta revlogs, we linearize the revs; this will both be
277 # for generaldelta revlogs, we linearize the revs; this will both be
278 # much quicker and generate a much smaller bundle
278 # much quicker and generate a much smaller bundle
279 if (revlog._generaldelta and reorder is not False) or reorder:
279 if (revlog._generaldelta and reorder is not False) or reorder:
280 dag = dagutil.revlogdag(revlog)
280 dag = dagutil.revlogdag(revlog)
281 revs = set(revlog.rev(n) for n in nodelist)
281 revs = set(revlog.rev(n) for n in nodelist)
282 revs = dag.linearize(revs)
282 revs = dag.linearize(revs)
283 else:
283 else:
284 revs = sorted([revlog.rev(n) for n in nodelist])
284 revs = sorted([revlog.rev(n) for n in nodelist])
285
285
286 # add the parent of the first rev
286 # add the parent of the first rev
287 p = revlog.parentrevs(revs[0])[0]
287 p = revlog.parentrevs(revs[0])[0]
288 revs.insert(0, p)
288 revs.insert(0, p)
289
289
290 # build deltas
290 # build deltas
291 total = len(revs) - 1
291 total = len(revs) - 1
292 msgbundling = _('bundling')
292 msgbundling = _('bundling')
293 for r in xrange(len(revs) - 1):
293 for r in xrange(len(revs) - 1):
294 if units is not None:
294 if units is not None:
295 self._progress(msgbundling, r + 1, unit=units, total=total)
295 self._progress(msgbundling, r + 1, unit=units, total=total)
296 prev, curr = revs[r], revs[r + 1]
296 prev, curr = revs[r], revs[r + 1]
297 linknode = lookup(revlog.node(curr))
297 linknode = lookup(revlog.node(curr))
298 for c in self.revchunk(revlog, curr, prev, linknode):
298 for c in self.revchunk(revlog, curr, prev, linknode):
299 yield c
299 yield c
300
300
301 yield self.close()
301 yield self.close()
302
302
303 # filter any nodes that claim to be part of the known set
303 # filter any nodes that claim to be part of the known set
304 def prune(self, revlog, missing, commonrevs, source):
304 def prune(self, revlog, missing, commonrevs, source):
305 rr, rl = revlog.rev, revlog.linkrev
305 rr, rl = revlog.rev, revlog.linkrev
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
307
307
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
309 '''yield a sequence of changegroup chunks (strings)'''
309 '''yield a sequence of changegroup chunks (strings)'''
310 repo = self._repo
310 repo = self._repo
311 cl = self._changelog
311 cl = self._changelog
312 mf = self._manifest
312 mf = self._manifest
313 reorder = self._reorder
313 reorder = self._reorder
314 progress = self._progress
314 progress = self._progress
315
315
316 # for progress output
316 # for progress output
317 msgbundling = _('bundling')
317 msgbundling = _('bundling')
318
318
319 clrevorder = {}
319 mfs = {} # needed manifests
320 mfs = {} # needed manifests
320 fnodes = {} # needed file nodes
321 fnodes = {} # needed file nodes
321 changedfiles = set()
322 changedfiles = set()
322
323
323 # Callback for the changelog, used to collect changed files and manifest
324 # Callback for the changelog, used to collect changed files and manifest
324 # nodes.
325 # nodes.
325 # Returns the linkrev node (identity in the changelog case).
326 # Returns the linkrev node (identity in the changelog case).
326 def lookupcl(x):
327 def lookupcl(x):
327 c = cl.read(x)
328 c = cl.read(x)
329 clrevorder[x] = len(clrevorder)
328 changedfiles.update(c[3])
330 changedfiles.update(c[3])
329 # record the first changeset introducing this manifest version
331 # record the first changeset introducing this manifest version
330 mfs.setdefault(c[0], x)
332 mfs.setdefault(c[0], x)
331 return x
333 return x
332
334
333 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
335 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
334 reorder=reorder):
336 reorder=reorder):
335 yield chunk
337 yield chunk
336 progress(msgbundling, None)
338 progress(msgbundling, None)
337
339
338 # Callback for the manifest, used to collect linkrevs for filelog
340 # Callback for the manifest, used to collect linkrevs for filelog
339 # revisions.
341 # revisions.
340 # Returns the linkrev node (collected in lookupcl).
342 # Returns the linkrev node (collected in lookupcl).
341 def lookupmf(x):
343 def lookupmf(x):
342 clnode = mfs[x]
344 clnode = mfs[x]
343 if not fastpathlinkrev:
345 if not fastpathlinkrev or reorder:
344 mdata = mf.readfast(x)
346 mdata = mf.readfast(x)
345 for f, n in mdata.iteritems():
347 for f, n in mdata.iteritems():
346 if f in changedfiles:
348 if f in changedfiles:
347 # record the first changeset introducing this filelog
349 # record the first changeset introducing this filelog
348 # version
350 # version
349 fnodes.setdefault(f, {}).setdefault(n, clnode)
351 fclnodes = fnodes.setdefault(f, {})
352 fclnode = fclnodes.setdefault(n, clnode)
353 if clrevorder[clnode] < clrevorder[fclnode]:
354 fclnodes[n] = clnode
350 return clnode
355 return clnode
351
356
352 mfnodes = self.prune(mf, mfs, commonrevs, source)
357 mfnodes = self.prune(mf, mfs, commonrevs, source)
353 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
358 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
354 reorder=reorder):
359 reorder=reorder):
355 yield chunk
360 yield chunk
356 progress(msgbundling, None)
361 progress(msgbundling, None)
357
362
358 mfs.clear()
363 mfs.clear()
359 needed = set(cl.rev(x) for x in clnodes)
364 needed = set(cl.rev(x) for x in clnodes)
360
365
361 def linknodes(filerevlog, fname):
366 def linknodes(filerevlog, fname):
362 if fastpathlinkrev:
367 if fastpathlinkrev and not reorder:
363 llr = filerevlog.linkrev
368 llr = filerevlog.linkrev
364 def genfilenodes():
369 def genfilenodes():
365 for r in filerevlog:
370 for r in filerevlog:
366 linkrev = llr(r)
371 linkrev = llr(r)
367 if linkrev in needed:
372 if linkrev in needed:
368 yield filerevlog.node(r), cl.node(linkrev)
373 yield filerevlog.node(r), cl.node(linkrev)
369 return dict(genfilenodes())
374 return dict(genfilenodes())
370 return fnodes.get(fname, {})
375 return fnodes.get(fname, {})
371
376
372 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
377 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
373 source):
378 source):
374 yield chunk
379 yield chunk
375
380
376 yield self.close()
381 yield self.close()
377 progress(msgbundling, None)
382 progress(msgbundling, None)
378
383
379 if clnodes:
384 if clnodes:
380 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
385 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
381
386
382 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
387 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
383 repo = self._repo
388 repo = self._repo
384 progress = self._progress
389 progress = self._progress
385 reorder = self._reorder
390 reorder = self._reorder
386 msgbundling = _('bundling')
391 msgbundling = _('bundling')
387
392
388 total = len(changedfiles)
393 total = len(changedfiles)
389 # for progress output
394 # for progress output
390 msgfiles = _('files')
395 msgfiles = _('files')
391 for i, fname in enumerate(sorted(changedfiles)):
396 for i, fname in enumerate(sorted(changedfiles)):
392 filerevlog = repo.file(fname)
397 filerevlog = repo.file(fname)
393 if not filerevlog:
398 if not filerevlog:
394 raise util.Abort(_("empty or missing revlog for %s") % fname)
399 raise util.Abort(_("empty or missing revlog for %s") % fname)
395
400
396 linkrevnodes = linknodes(filerevlog, fname)
401 linkrevnodes = linknodes(filerevlog, fname)
397 # Lookup for filenodes, we collected the linkrev nodes above in the
402 # Lookup for filenodes, we collected the linkrev nodes above in the
398 # fastpath case and with lookupmf in the slowpath case.
403 # fastpath case and with lookupmf in the slowpath case.
399 def lookupfilelog(x):
404 def lookupfilelog(x):
400 return linkrevnodes[x]
405 return linkrevnodes[x]
401
406
402 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
407 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
403 if filenodes:
408 if filenodes:
404 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
409 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
405 total=total)
410 total=total)
406 yield self.fileheader(fname)
411 yield self.fileheader(fname)
407 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
412 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
408 reorder=reorder):
413 reorder=reorder):
409 yield chunk
414 yield chunk
410
415
411 def revchunk(self, revlog, rev, prev, linknode):
416 def revchunk(self, revlog, rev, prev, linknode):
412 node = revlog.node(rev)
417 node = revlog.node(rev)
413 p1, p2 = revlog.parentrevs(rev)
418 p1, p2 = revlog.parentrevs(rev)
414 base = prev
419 base = prev
415
420
416 prefix = ''
421 prefix = ''
417 if base == nullrev:
422 if base == nullrev:
418 delta = revlog.revision(node)
423 delta = revlog.revision(node)
419 prefix = mdiff.trivialdiffheader(len(delta))
424 prefix = mdiff.trivialdiffheader(len(delta))
420 else:
425 else:
421 delta = revlog.revdiff(base, rev)
426 delta = revlog.revdiff(base, rev)
422 p1n, p2n = revlog.parents(node)
427 p1n, p2n = revlog.parents(node)
423 basenode = revlog.node(base)
428 basenode = revlog.node(base)
424 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
429 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
425 meta += prefix
430 meta += prefix
426 l = len(meta) + len(delta)
431 l = len(meta) + len(delta)
427 yield chunkheader(l)
432 yield chunkheader(l)
428 yield meta
433 yield meta
429 yield delta
434 yield delta
430 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
435 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
431 # do nothing with basenode, it is implicitly the previous one in HG10
436 # do nothing with basenode, it is implicitly the previous one in HG10
432 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
437 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
433
438
434 def _changegroupinfo(repo, nodes, source):
439 def _changegroupinfo(repo, nodes, source):
435 if repo.ui.verbose or source == 'bundle':
440 if repo.ui.verbose or source == 'bundle':
436 repo.ui.status(_("%d changesets found\n") % len(nodes))
441 repo.ui.status(_("%d changesets found\n") % len(nodes))
437 if repo.ui.debugflag:
442 if repo.ui.debugflag:
438 repo.ui.debug("list of changesets:\n")
443 repo.ui.debug("list of changesets:\n")
439 for node in nodes:
444 for node in nodes:
440 repo.ui.debug("%s\n" % hex(node))
445 repo.ui.debug("%s\n" % hex(node))
441
446
442 def getsubset(repo, outgoing, bundler, source, fastpath=False):
447 def getsubset(repo, outgoing, bundler, source, fastpath=False):
443 repo = repo.unfiltered()
448 repo = repo.unfiltered()
444 commonrevs = outgoing.common
449 commonrevs = outgoing.common
445 csets = outgoing.missing
450 csets = outgoing.missing
446 heads = outgoing.missingheads
451 heads = outgoing.missingheads
447 # We go through the fast path if we get told to, or if all (unfiltered
452 # We go through the fast path if we get told to, or if all (unfiltered
448 # heads have been requested (since we then know there all linkrevs will
453 # heads have been requested (since we then know there all linkrevs will
449 # be pulled by the client).
454 # be pulled by the client).
450 heads.sort()
455 heads.sort()
451 fastpathlinkrev = fastpath or (
456 fastpathlinkrev = fastpath or (
452 repo.filtername is None and heads == sorted(repo.heads()))
457 repo.filtername is None and heads == sorted(repo.heads()))
453
458
454 repo.hook('preoutgoing', throw=True, source=source)
459 repo.hook('preoutgoing', throw=True, source=source)
455 _changegroupinfo(repo, csets, source)
460 _changegroupinfo(repo, csets, source)
456 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
461 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
457 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
462 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
458
463
459 def changegroupsubset(repo, roots, heads, source):
464 def changegroupsubset(repo, roots, heads, source):
460 """Compute a changegroup consisting of all the nodes that are
465 """Compute a changegroup consisting of all the nodes that are
461 descendants of any of the roots and ancestors of any of the heads.
466 descendants of any of the roots and ancestors of any of the heads.
462 Return a chunkbuffer object whose read() method will return
467 Return a chunkbuffer object whose read() method will return
463 successive changegroup chunks.
468 successive changegroup chunks.
464
469
465 It is fairly complex as determining which filenodes and which
470 It is fairly complex as determining which filenodes and which
466 manifest nodes need to be included for the changeset to be complete
471 manifest nodes need to be included for the changeset to be complete
467 is non-trivial.
472 is non-trivial.
468
473
469 Another wrinkle is doing the reverse, figuring out which changeset in
474 Another wrinkle is doing the reverse, figuring out which changeset in
470 the changegroup a particular filenode or manifestnode belongs to.
475 the changegroup a particular filenode or manifestnode belongs to.
471 """
476 """
472 cl = repo.changelog
477 cl = repo.changelog
473 if not roots:
478 if not roots:
474 roots = [nullid]
479 roots = [nullid]
475 # TODO: remove call to nodesbetween.
480 # TODO: remove call to nodesbetween.
476 csets, roots, heads = cl.nodesbetween(roots, heads)
481 csets, roots, heads = cl.nodesbetween(roots, heads)
477 discbases = []
482 discbases = []
478 for n in roots:
483 for n in roots:
479 discbases.extend([p for p in cl.parents(n) if p != nullid])
484 discbases.extend([p for p in cl.parents(n) if p != nullid])
480 outgoing = discovery.outgoing(cl, discbases, heads)
485 outgoing = discovery.outgoing(cl, discbases, heads)
481 bundler = cg1packer(repo)
486 bundler = cg1packer(repo)
482 return getsubset(repo, outgoing, bundler, source)
487 return getsubset(repo, outgoing, bundler, source)
483
488
484 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
489 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
485 """Like getbundle, but taking a discovery.outgoing as an argument.
490 """Like getbundle, but taking a discovery.outgoing as an argument.
486
491
487 This is only implemented for local repos and reuses potentially
492 This is only implemented for local repos and reuses potentially
488 precomputed sets in outgoing."""
493 precomputed sets in outgoing."""
489 if not outgoing.missing:
494 if not outgoing.missing:
490 return None
495 return None
491 bundler = cg1packer(repo, bundlecaps)
496 bundler = cg1packer(repo, bundlecaps)
492 return getsubset(repo, outgoing, bundler, source)
497 return getsubset(repo, outgoing, bundler, source)
493
498
494 def _computeoutgoing(repo, heads, common):
499 def _computeoutgoing(repo, heads, common):
495 """Computes which revs are outgoing given a set of common
500 """Computes which revs are outgoing given a set of common
496 and a set of heads.
501 and a set of heads.
497
502
498 This is a separate function so extensions can have access to
503 This is a separate function so extensions can have access to
499 the logic.
504 the logic.
500
505
501 Returns a discovery.outgoing object.
506 Returns a discovery.outgoing object.
502 """
507 """
503 cl = repo.changelog
508 cl = repo.changelog
504 if common:
509 if common:
505 hasnode = cl.hasnode
510 hasnode = cl.hasnode
506 common = [n for n in common if hasnode(n)]
511 common = [n for n in common if hasnode(n)]
507 else:
512 else:
508 common = [nullid]
513 common = [nullid]
509 if not heads:
514 if not heads:
510 heads = cl.heads()
515 heads = cl.heads()
511 return discovery.outgoing(cl, common, heads)
516 return discovery.outgoing(cl, common, heads)
512
517
513 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
518 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
514 """Like changegroupsubset, but returns the set difference between the
519 """Like changegroupsubset, but returns the set difference between the
515 ancestors of heads and the ancestors common.
520 ancestors of heads and the ancestors common.
516
521
517 If heads is None, use the local heads. If common is None, use [nullid].
522 If heads is None, use the local heads. If common is None, use [nullid].
518
523
519 The nodes in common might not all be known locally due to the way the
524 The nodes in common might not all be known locally due to the way the
520 current discovery protocol works.
525 current discovery protocol works.
521 """
526 """
522 outgoing = _computeoutgoing(repo, heads, common)
527 outgoing = _computeoutgoing(repo, heads, common)
523 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
528 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
524
529
525 def changegroup(repo, basenodes, source):
530 def changegroup(repo, basenodes, source):
526 # to avoid a race we use changegroupsubset() (issue1320)
531 # to avoid a race we use changegroupsubset() (issue1320)
527 return changegroupsubset(repo, basenodes, repo.heads(), source)
532 return changegroupsubset(repo, basenodes, repo.heads(), source)
528
533
529 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
534 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
530 revisions = 0
535 revisions = 0
531 files = 0
536 files = 0
532 while True:
537 while True:
533 chunkdata = source.filelogheader()
538 chunkdata = source.filelogheader()
534 if not chunkdata:
539 if not chunkdata:
535 break
540 break
536 f = chunkdata["filename"]
541 f = chunkdata["filename"]
537 repo.ui.debug("adding %s revisions\n" % f)
542 repo.ui.debug("adding %s revisions\n" % f)
538 pr()
543 pr()
539 fl = repo.file(f)
544 fl = repo.file(f)
540 o = len(fl)
545 o = len(fl)
541 if not fl.addgroup(source, revmap, trp):
546 if not fl.addgroup(source, revmap, trp):
542 raise util.Abort(_("received file revlog group is empty"))
547 raise util.Abort(_("received file revlog group is empty"))
543 revisions += len(fl) - o
548 revisions += len(fl) - o
544 files += 1
549 files += 1
545 if f in needfiles:
550 if f in needfiles:
546 needs = needfiles[f]
551 needs = needfiles[f]
547 for new in xrange(o, len(fl)):
552 for new in xrange(o, len(fl)):
548 n = fl.node(new)
553 n = fl.node(new)
549 if n in needs:
554 if n in needs:
550 needs.remove(n)
555 needs.remove(n)
551 else:
556 else:
552 raise util.Abort(
557 raise util.Abort(
553 _("received spurious file revlog entry"))
558 _("received spurious file revlog entry"))
554 if not needs:
559 if not needs:
555 del needfiles[f]
560 del needfiles[f]
556 repo.ui.progress(_('files'), None)
561 repo.ui.progress(_('files'), None)
557
562
558 for f, needs in needfiles.iteritems():
563 for f, needs in needfiles.iteritems():
559 fl = repo.file(f)
564 fl = repo.file(f)
560 for n in needs:
565 for n in needs:
561 try:
566 try:
562 fl.rev(n)
567 fl.rev(n)
563 except error.LookupError:
568 except error.LookupError:
564 raise util.Abort(
569 raise util.Abort(
565 _('missing file data for %s:%s - run hg verify') %
570 _('missing file data for %s:%s - run hg verify') %
566 (f, hex(n)))
571 (f, hex(n)))
567
572
568 return revisions, files
573 return revisions, files
569
574
570 def addchangegroup(repo, source, srctype, url, emptyok=False,
575 def addchangegroup(repo, source, srctype, url, emptyok=False,
571 targetphase=phases.draft):
576 targetphase=phases.draft):
572 """Add the changegroup returned by source.read() to this repo.
577 """Add the changegroup returned by source.read() to this repo.
573 srctype is a string like 'push', 'pull', or 'unbundle'. url is
578 srctype is a string like 'push', 'pull', or 'unbundle'. url is
574 the URL of the repo where this changegroup is coming from.
579 the URL of the repo where this changegroup is coming from.
575
580
576 Return an integer summarizing the change to this repo:
581 Return an integer summarizing the change to this repo:
577 - nothing changed or no source: 0
582 - nothing changed or no source: 0
578 - more heads than before: 1+added heads (2..n)
583 - more heads than before: 1+added heads (2..n)
579 - fewer heads than before: -1-removed heads (-2..-n)
584 - fewer heads than before: -1-removed heads (-2..-n)
580 - number of heads stays the same: 1
585 - number of heads stays the same: 1
581 """
586 """
582 repo = repo.unfiltered()
587 repo = repo.unfiltered()
583 def csmap(x):
588 def csmap(x):
584 repo.ui.debug("add changeset %s\n" % short(x))
589 repo.ui.debug("add changeset %s\n" % short(x))
585 return len(cl)
590 return len(cl)
586
591
587 def revmap(x):
592 def revmap(x):
588 return cl.rev(x)
593 return cl.rev(x)
589
594
590 if not source:
595 if not source:
591 return 0
596 return 0
592
597
593 changesets = files = revisions = 0
598 changesets = files = revisions = 0
594 efiles = set()
599 efiles = set()
595
600
596 # write changelog data to temp files so concurrent readers will not see
601 # write changelog data to temp files so concurrent readers will not see
597 # inconsistent view
602 # inconsistent view
598 cl = repo.changelog
603 cl = repo.changelog
599 cl.delayupdate()
604 cl.delayupdate()
600 oldheads = cl.heads()
605 oldheads = cl.heads()
601
606
602 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
607 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
603 # The transaction could have been created before and already carries source
608 # The transaction could have been created before and already carries source
604 # information. In this case we use the top level data. We overwrite the
609 # information. In this case we use the top level data. We overwrite the
605 # argument because we need to use the top level value (if they exist) in
610 # argument because we need to use the top level value (if they exist) in
606 # this function.
611 # this function.
607 srctype = tr.hookargs.setdefault('source', srctype)
612 srctype = tr.hookargs.setdefault('source', srctype)
608 url = tr.hookargs.setdefault('url', url)
613 url = tr.hookargs.setdefault('url', url)
609 try:
614 try:
610 repo.hook('prechangegroup', throw=True, **tr.hookargs)
615 repo.hook('prechangegroup', throw=True, **tr.hookargs)
611
616
612 trp = weakref.proxy(tr)
617 trp = weakref.proxy(tr)
613 # pull off the changeset group
618 # pull off the changeset group
614 repo.ui.status(_("adding changesets\n"))
619 repo.ui.status(_("adding changesets\n"))
615 clstart = len(cl)
620 clstart = len(cl)
616 class prog(object):
621 class prog(object):
617 step = _('changesets')
622 step = _('changesets')
618 count = 1
623 count = 1
619 ui = repo.ui
624 ui = repo.ui
620 total = None
625 total = None
621 def __call__(repo):
626 def __call__(repo):
622 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
627 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
623 total=repo.total)
628 total=repo.total)
624 repo.count += 1
629 repo.count += 1
625 pr = prog()
630 pr = prog()
626 source.callback = pr
631 source.callback = pr
627
632
628 source.changelogheader()
633 source.changelogheader()
629 srccontent = cl.addgroup(source, csmap, trp)
634 srccontent = cl.addgroup(source, csmap, trp)
630 if not (srccontent or emptyok):
635 if not (srccontent or emptyok):
631 raise util.Abort(_("received changelog group is empty"))
636 raise util.Abort(_("received changelog group is empty"))
632 clend = len(cl)
637 clend = len(cl)
633 changesets = clend - clstart
638 changesets = clend - clstart
634 for c in xrange(clstart, clend):
639 for c in xrange(clstart, clend):
635 efiles.update(repo[c].files())
640 efiles.update(repo[c].files())
636 efiles = len(efiles)
641 efiles = len(efiles)
637 repo.ui.progress(_('changesets'), None)
642 repo.ui.progress(_('changesets'), None)
638
643
639 # pull off the manifest group
644 # pull off the manifest group
640 repo.ui.status(_("adding manifests\n"))
645 repo.ui.status(_("adding manifests\n"))
641 pr.step = _('manifests')
646 pr.step = _('manifests')
642 pr.count = 1
647 pr.count = 1
643 pr.total = changesets # manifests <= changesets
648 pr.total = changesets # manifests <= changesets
644 # no need to check for empty manifest group here:
649 # no need to check for empty manifest group here:
645 # if the result of the merge of 1 and 2 is the same in 3 and 4,
650 # if the result of the merge of 1 and 2 is the same in 3 and 4,
646 # no new manifest will be created and the manifest group will
651 # no new manifest will be created and the manifest group will
647 # be empty during the pull
652 # be empty during the pull
648 source.manifestheader()
653 source.manifestheader()
649 repo.manifest.addgroup(source, revmap, trp)
654 repo.manifest.addgroup(source, revmap, trp)
650 repo.ui.progress(_('manifests'), None)
655 repo.ui.progress(_('manifests'), None)
651
656
652 needfiles = {}
657 needfiles = {}
653 if repo.ui.configbool('server', 'validate', default=False):
658 if repo.ui.configbool('server', 'validate', default=False):
654 # validate incoming csets have their manifests
659 # validate incoming csets have their manifests
655 for cset in xrange(clstart, clend):
660 for cset in xrange(clstart, clend):
656 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
661 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
657 mfest = repo.manifest.readdelta(mfest)
662 mfest = repo.manifest.readdelta(mfest)
658 # store file nodes we must see
663 # store file nodes we must see
659 for f, n in mfest.iteritems():
664 for f, n in mfest.iteritems():
660 needfiles.setdefault(f, set()).add(n)
665 needfiles.setdefault(f, set()).add(n)
661
666
662 # process the files
667 # process the files
663 repo.ui.status(_("adding file changes\n"))
668 repo.ui.status(_("adding file changes\n"))
664 pr.step = _('files')
669 pr.step = _('files')
665 pr.count = 1
670 pr.count = 1
666 pr.total = efiles
671 pr.total = efiles
667 source.callback = None
672 source.callback = None
668
673
669 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
674 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
670 needfiles)
675 needfiles)
671 revisions += newrevs
676 revisions += newrevs
672 files += newfiles
677 files += newfiles
673
678
674 dh = 0
679 dh = 0
675 if oldheads:
680 if oldheads:
676 heads = cl.heads()
681 heads = cl.heads()
677 dh = len(heads) - len(oldheads)
682 dh = len(heads) - len(oldheads)
678 for h in heads:
683 for h in heads:
679 if h not in oldheads and repo[h].closesbranch():
684 if h not in oldheads and repo[h].closesbranch():
680 dh -= 1
685 dh -= 1
681 htext = ""
686 htext = ""
682 if dh:
687 if dh:
683 htext = _(" (%+d heads)") % dh
688 htext = _(" (%+d heads)") % dh
684
689
685 repo.ui.status(_("added %d changesets"
690 repo.ui.status(_("added %d changesets"
686 " with %d changes to %d files%s\n")
691 " with %d changes to %d files%s\n")
687 % (changesets, revisions, files, htext))
692 % (changesets, revisions, files, htext))
688 repo.invalidatevolatilesets()
693 repo.invalidatevolatilesets()
689
694
690 if changesets > 0:
695 if changesets > 0:
691 p = lambda: cl.writepending() and repo.root or ""
696 p = lambda: cl.writepending() and repo.root or ""
692 if 'node' not in tr.hookargs:
697 if 'node' not in tr.hookargs:
693 tr.hookargs['node'] = hex(cl.node(clstart))
698 tr.hookargs['node'] = hex(cl.node(clstart))
694 hookargs = dict(tr.hookargs)
699 hookargs = dict(tr.hookargs)
695 else:
700 else:
696 hookargs = dict(tr.hookargs)
701 hookargs = dict(tr.hookargs)
697 hookargs['node'] = hex(cl.node(clstart))
702 hookargs['node'] = hex(cl.node(clstart))
698 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
703 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
699
704
700 added = [cl.node(r) for r in xrange(clstart, clend)]
705 added = [cl.node(r) for r in xrange(clstart, clend)]
701 publishing = repo.ui.configbool('phases', 'publish', True)
706 publishing = repo.ui.configbool('phases', 'publish', True)
702 if srctype in ('push', 'serve'):
707 if srctype in ('push', 'serve'):
703 # Old servers can not push the boundary themselves.
708 # Old servers can not push the boundary themselves.
704 # New servers won't push the boundary if changeset already
709 # New servers won't push the boundary if changeset already
705 # exists locally as secret
710 # exists locally as secret
706 #
711 #
707 # We should not use added here but the list of all change in
712 # We should not use added here but the list of all change in
708 # the bundle
713 # the bundle
709 if publishing:
714 if publishing:
710 phases.advanceboundary(repo, tr, phases.public, srccontent)
715 phases.advanceboundary(repo, tr, phases.public, srccontent)
711 else:
716 else:
712 # Those changesets have been pushed from the outside, their
717 # Those changesets have been pushed from the outside, their
713 # phases are going to be pushed alongside. Therefor
718 # phases are going to be pushed alongside. Therefor
714 # `targetphase` is ignored.
719 # `targetphase` is ignored.
715 phases.advanceboundary(repo, tr, phases.draft, srccontent)
720 phases.advanceboundary(repo, tr, phases.draft, srccontent)
716 phases.retractboundary(repo, tr, phases.draft, added)
721 phases.retractboundary(repo, tr, phases.draft, added)
717 elif srctype != 'strip':
722 elif srctype != 'strip':
718 # publishing only alter behavior during push
723 # publishing only alter behavior during push
719 #
724 #
720 # strip should not touch boundary at all
725 # strip should not touch boundary at all
721 phases.retractboundary(repo, tr, targetphase, added)
726 phases.retractboundary(repo, tr, targetphase, added)
722
727
723 # make changelog see real files again
728 # make changelog see real files again
724 cl.finalize(trp)
729 cl.finalize(trp)
725
730
726 tr.close()
731 tr.close()
727
732
728 if changesets > 0:
733 if changesets > 0:
729 if srctype != 'strip':
734 if srctype != 'strip':
730 # During strip, branchcache is invalid but coming call to
735 # During strip, branchcache is invalid but coming call to
731 # `destroyed` will repair it.
736 # `destroyed` will repair it.
732 # In other case we can safely update cache on disk.
737 # In other case we can safely update cache on disk.
733 branchmap.updatecache(repo.filtered('served'))
738 branchmap.updatecache(repo.filtered('served'))
734
739
735 def runhooks():
740 def runhooks():
736 # These hooks run when the lock releases, not when the
741 # These hooks run when the lock releases, not when the
737 # transaction closes. So it's possible for the changelog
742 # transaction closes. So it's possible for the changelog
738 # to have changed since we last saw it.
743 # to have changed since we last saw it.
739 if clstart >= len(repo):
744 if clstart >= len(repo):
740 return
745 return
741
746
742 # forcefully update the on-disk branch cache
747 # forcefully update the on-disk branch cache
743 repo.ui.debug("updating the branch cache\n")
748 repo.ui.debug("updating the branch cache\n")
744 repo.hook("changegroup", **hookargs)
749 repo.hook("changegroup", **hookargs)
745
750
746 for n in added:
751 for n in added:
747 args = hookargs.copy()
752 args = hookargs.copy()
748 args['node'] = hex(n)
753 args['node'] = hex(n)
749 repo.hook("incoming", **args)
754 repo.hook("incoming", **args)
750
755
751 newheads = [h for h in repo.heads() if h not in oldheads]
756 newheads = [h for h in repo.heads() if h not in oldheads]
752 repo.ui.log("incoming",
757 repo.ui.log("incoming",
753 "%s incoming changes - new heads: %s\n",
758 "%s incoming changes - new heads: %s\n",
754 len(added),
759 len(added),
755 ', '.join([hex(c[:6]) for c in newheads]))
760 ', '.join([hex(c[:6]) for c in newheads]))
756 repo._afterlock(runhooks)
761 repo._afterlock(runhooks)
757
762
758 finally:
763 finally:
759 tr.release()
764 tr.release()
760 # never return 0 here:
765 # never return 0 here:
761 if dh < 0:
766 if dh < 0:
762 return dh - 1
767 return dh - 1
763 else:
768 else:
764 return dh + 1
769 return dh + 1
@@ -1,24 +1,71 b''
1 Check whether size of generaldelta revlog is not bigger than its
1 Check whether size of generaldelta revlog is not bigger than its
2 regular equivalent. Test would fail if generaldelta was naive
2 regular equivalent. Test would fail if generaldelta was naive
3 implementation of parentdelta: third manifest revision would be fully
3 implementation of parentdelta: third manifest revision would be fully
4 inserted due to big distance from its paren revision (zero).
4 inserted due to big distance from its paren revision (zero).
5
5
6 $ hg init repo
6 $ hg init repo
7 $ cd repo
7 $ cd repo
8 $ echo foo > foo
8 $ echo foo > foo
9 $ echo bar > bar
9 $ echo bar > bar
10 $ hg commit -q -Am boo
10 $ hg commit -q -Am boo
11 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
11 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
12 $ for r in 1 2 3; do
12 $ for r in 1 2 3; do
13 > echo $r > foo
13 > echo $r > foo
14 > hg commit -q -m $r
14 > hg commit -q -m $r
15 > hg up -q -r 0
15 > hg up -q -r 0
16 > hg pull . -q -r $r -R ../gdrepo
16 > hg pull . -q -r $r -R ../gdrepo
17 > done
17 > done
18
18
19 $ cd ..
19 $ cd ..
20 >>> import os
20 >>> import os
21 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
21 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
22 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
22 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
23 >>> if regsize < gdsize:
23 >>> if regsize < gdsize:
24 ... print 'generaldata increased size of manifest'
24 ... print 'generaldata increased size of manifest'
25
26 Verify rev reordering doesnt create invalid bundles (issue4462)
27 This requires a commit tree that when pulled will reorder manifest revs such
28 that the second manifest to create a file rev will be ordered before the first
29 manifest to create that file rev. We also need to do a partial pull to ensure
30 reordering happens. At the end we verify the linkrev points at the earliest
31 commit.
32
33 $ hg init server --config format.generaldelta=True
34 $ cd server
35 $ touch a
36 $ hg commit -Aqm a
37 $ echo x > x
38 $ echo y > y
39 $ hg commit -Aqm xy
40 $ hg up -q '.^'
41 $ echo x > x
42 $ echo z > z
43 $ hg commit -Aqm xz
44 $ hg up -q 1
45 $ echo b > b
46 $ hg commit -Aqm b
47 $ hg merge -q 2
48 $ hg commit -Aqm merge
49 $ echo c > c
50 $ hg commit -Aqm c
51 $ hg log -G -T '{rev} {shortest(node)} {desc}'
52 @ 5 ebb8 c
53 |
54 o 4 baf7 merge
55 |\
56 | o 3 a129 b
57 | |
58 o | 2 958c xz
59 | |
60 | o 1 f00c xy
61 |/
62 o 0 3903 a
63
64 $ cd ..
65 $ hg init client
66 $ cd client
67 $ hg pull -q ../server -r 4
68 $ hg debugindex x
69 rev offset length base linkrev nodeid p1 p2
70 0 0 3 0 1 1406e7411862 000000000000 000000000000
71
General Comments 0
You need to be logged in to leave comments. Login now