##// END OF EJS Templates
changegroup.group: drop 'reorder' parameter...
Martin von Zweigbergk -
r24912:e285b98c default
parent child Browse files
Show More
@@ -1,898 +1,894 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17
17
18 def readexactly(stream, n):
18 def readexactly(stream, n):
19 '''read n bytes from stream.read and abort if less was available'''
19 '''read n bytes from stream.read and abort if less was available'''
20 s = stream.read(n)
20 s = stream.read(n)
21 if len(s) < n:
21 if len(s) < n:
22 raise util.Abort(_("stream ended unexpectedly"
22 raise util.Abort(_("stream ended unexpectedly"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(s), n))
24 % (len(s), n))
25 return s
25 return s
26
26
27 def getchunk(stream):
27 def getchunk(stream):
28 """return the next chunk from stream as a string"""
28 """return the next chunk from stream as a string"""
29 d = readexactly(stream, 4)
29 d = readexactly(stream, 4)
30 l = struct.unpack(">l", d)[0]
30 l = struct.unpack(">l", d)[0]
31 if l <= 4:
31 if l <= 4:
32 if l:
32 if l:
33 raise util.Abort(_("invalid chunk length %d") % l)
33 raise util.Abort(_("invalid chunk length %d") % l)
34 return ""
34 return ""
35 return readexactly(stream, l - 4)
35 return readexactly(stream, l - 4)
36
36
37 def chunkheader(length):
37 def chunkheader(length):
38 """return a changegroup chunk header (string)"""
38 """return a changegroup chunk header (string)"""
39 return struct.pack(">l", length + 4)
39 return struct.pack(">l", length + 4)
40
40
41 def closechunk():
41 def closechunk():
42 """return a changegroup chunk header (string) for a zero-length chunk"""
42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 return struct.pack(">l", 0)
43 return struct.pack(">l", 0)
44
44
45 def combineresults(results):
45 def combineresults(results):
46 """logic to combine 0 or more addchangegroup results into one"""
46 """logic to combine 0 or more addchangegroup results into one"""
47 changedheads = 0
47 changedheads = 0
48 result = 1
48 result = 1
49 for ret in results:
49 for ret in results:
50 # If any changegroup result is 0, return 0
50 # If any changegroup result is 0, return 0
51 if ret == 0:
51 if ret == 0:
52 result = 0
52 result = 0
53 break
53 break
54 if ret < -1:
54 if ret < -1:
55 changedheads += ret + 1
55 changedheads += ret + 1
56 elif ret > 1:
56 elif ret > 1:
57 changedheads += ret - 1
57 changedheads += ret - 1
58 if changedheads > 0:
58 if changedheads > 0:
59 result = 1 + changedheads
59 result = 1 + changedheads
60 elif changedheads < 0:
60 elif changedheads < 0:
61 result = -1 + changedheads
61 result = -1 + changedheads
62 return result
62 return result
63
63
64 class nocompress(object):
64 class nocompress(object):
65 def compress(self, x):
65 def compress(self, x):
66 return x
66 return x
67 def flush(self):
67 def flush(self):
68 return ""
68 return ""
69
69
70 bundletypes = {
70 bundletypes = {
71 "": ("", nocompress), # only when using unbundle on ssh and old http servers
71 "": ("", nocompress), # only when using unbundle on ssh and old http servers
72 # since the unification ssh accepts a header but there
72 # since the unification ssh accepts a header but there
73 # is no capability signaling it.
73 # is no capability signaling it.
74 "HG20": (), # special-cased below
74 "HG20": (), # special-cased below
75 "HG10UN": ("HG10UN", nocompress),
75 "HG10UN": ("HG10UN", nocompress),
76 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
76 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
77 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
77 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
78 }
78 }
79
79
80 # hgweb uses this list to communicate its preferred type
80 # hgweb uses this list to communicate its preferred type
81 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
81 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
82
82
83 def writebundle(ui, cg, filename, bundletype, vfs=None):
83 def writebundle(ui, cg, filename, bundletype, vfs=None):
84 """Write a bundle file and return its filename.
84 """Write a bundle file and return its filename.
85
85
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 bz2 compression can be turned off.
88 bz2 compression can be turned off.
89 The bundle file will be deleted in case of errors.
89 The bundle file will be deleted in case of errors.
90 """
90 """
91
91
92 fh = None
92 fh = None
93 cleanup = None
93 cleanup = None
94 try:
94 try:
95 if filename:
95 if filename:
96 if vfs:
96 if vfs:
97 fh = vfs.open(filename, "wb")
97 fh = vfs.open(filename, "wb")
98 else:
98 else:
99 fh = open(filename, "wb")
99 fh = open(filename, "wb")
100 else:
100 else:
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
102 fh = os.fdopen(fd, "wb")
102 fh = os.fdopen(fd, "wb")
103 cleanup = filename
103 cleanup = filename
104
104
105 if bundletype == "HG20":
105 if bundletype == "HG20":
106 import bundle2
106 import bundle2
107 bundle = bundle2.bundle20(ui)
107 bundle = bundle2.bundle20(ui)
108 part = bundle.newpart('changegroup', data=cg.getchunks())
108 part = bundle.newpart('changegroup', data=cg.getchunks())
109 part.addparam('version', cg.version)
109 part.addparam('version', cg.version)
110 z = nocompress()
110 z = nocompress()
111 chunkiter = bundle.getchunks()
111 chunkiter = bundle.getchunks()
112 else:
112 else:
113 if cg.version != '01':
113 if cg.version != '01':
114 raise util.Abort(_('old bundle types only supports v1 '
114 raise util.Abort(_('old bundle types only supports v1 '
115 'changegroups'))
115 'changegroups'))
116 header, compressor = bundletypes[bundletype]
116 header, compressor = bundletypes[bundletype]
117 fh.write(header)
117 fh.write(header)
118 z = compressor()
118 z = compressor()
119 chunkiter = cg.getchunks()
119 chunkiter = cg.getchunks()
120
120
121 # parse the changegroup data, otherwise we will block
121 # parse the changegroup data, otherwise we will block
122 # in case of sshrepo because we don't know the end of the stream
122 # in case of sshrepo because we don't know the end of the stream
123
123
124 # an empty chunkgroup is the end of the changegroup
124 # an empty chunkgroup is the end of the changegroup
125 # a changegroup has at least 2 chunkgroups (changelog and manifest).
125 # a changegroup has at least 2 chunkgroups (changelog and manifest).
126 # after that, an empty chunkgroup is the end of the changegroup
126 # after that, an empty chunkgroup is the end of the changegroup
127 for chunk in chunkiter:
127 for chunk in chunkiter:
128 fh.write(z.compress(chunk))
128 fh.write(z.compress(chunk))
129 fh.write(z.flush())
129 fh.write(z.flush())
130 cleanup = None
130 cleanup = None
131 return filename
131 return filename
132 finally:
132 finally:
133 if fh is not None:
133 if fh is not None:
134 fh.close()
134 fh.close()
135 if cleanup is not None:
135 if cleanup is not None:
136 if filename and vfs:
136 if filename and vfs:
137 vfs.unlink(cleanup)
137 vfs.unlink(cleanup)
138 else:
138 else:
139 os.unlink(cleanup)
139 os.unlink(cleanup)
140
140
141 def decompressor(fh, alg):
141 def decompressor(fh, alg):
142 if alg == 'UN':
142 if alg == 'UN':
143 return fh
143 return fh
144 elif alg == 'GZ':
144 elif alg == 'GZ':
145 def generator(f):
145 def generator(f):
146 zd = zlib.decompressobj()
146 zd = zlib.decompressobj()
147 for chunk in util.filechunkiter(f):
147 for chunk in util.filechunkiter(f):
148 yield zd.decompress(chunk)
148 yield zd.decompress(chunk)
149 elif alg == 'BZ':
149 elif alg == 'BZ':
150 def generator(f):
150 def generator(f):
151 zd = bz2.BZ2Decompressor()
151 zd = bz2.BZ2Decompressor()
152 zd.decompress("BZ")
152 zd.decompress("BZ")
153 for chunk in util.filechunkiter(f, 4096):
153 for chunk in util.filechunkiter(f, 4096):
154 yield zd.decompress(chunk)
154 yield zd.decompress(chunk)
155 else:
155 else:
156 raise util.Abort("unknown bundle compression '%s'" % alg)
156 raise util.Abort("unknown bundle compression '%s'" % alg)
157 return util.chunkbuffer(generator(fh))
157 return util.chunkbuffer(generator(fh))
158
158
159 class cg1unpacker(object):
159 class cg1unpacker(object):
160 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
160 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
161 deltaheadersize = struct.calcsize(deltaheader)
161 deltaheadersize = struct.calcsize(deltaheader)
162 version = '01'
162 version = '01'
163 def __init__(self, fh, alg):
163 def __init__(self, fh, alg):
164 self._stream = decompressor(fh, alg)
164 self._stream = decompressor(fh, alg)
165 self._type = alg
165 self._type = alg
166 self.callback = None
166 self.callback = None
167 def compressed(self):
167 def compressed(self):
168 return self._type != 'UN'
168 return self._type != 'UN'
169 def read(self, l):
169 def read(self, l):
170 return self._stream.read(l)
170 return self._stream.read(l)
171 def seek(self, pos):
171 def seek(self, pos):
172 return self._stream.seek(pos)
172 return self._stream.seek(pos)
173 def tell(self):
173 def tell(self):
174 return self._stream.tell()
174 return self._stream.tell()
175 def close(self):
175 def close(self):
176 return self._stream.close()
176 return self._stream.close()
177
177
178 def chunklength(self):
178 def chunklength(self):
179 d = readexactly(self._stream, 4)
179 d = readexactly(self._stream, 4)
180 l = struct.unpack(">l", d)[0]
180 l = struct.unpack(">l", d)[0]
181 if l <= 4:
181 if l <= 4:
182 if l:
182 if l:
183 raise util.Abort(_("invalid chunk length %d") % l)
183 raise util.Abort(_("invalid chunk length %d") % l)
184 return 0
184 return 0
185 if self.callback:
185 if self.callback:
186 self.callback()
186 self.callback()
187 return l - 4
187 return l - 4
188
188
189 def changelogheader(self):
189 def changelogheader(self):
190 """v10 does not have a changelog header chunk"""
190 """v10 does not have a changelog header chunk"""
191 return {}
191 return {}
192
192
193 def manifestheader(self):
193 def manifestheader(self):
194 """v10 does not have a manifest header chunk"""
194 """v10 does not have a manifest header chunk"""
195 return {}
195 return {}
196
196
197 def filelogheader(self):
197 def filelogheader(self):
198 """return the header of the filelogs chunk, v10 only has the filename"""
198 """return the header of the filelogs chunk, v10 only has the filename"""
199 l = self.chunklength()
199 l = self.chunklength()
200 if not l:
200 if not l:
201 return {}
201 return {}
202 fname = readexactly(self._stream, l)
202 fname = readexactly(self._stream, l)
203 return {'filename': fname}
203 return {'filename': fname}
204
204
205 def _deltaheader(self, headertuple, prevnode):
205 def _deltaheader(self, headertuple, prevnode):
206 node, p1, p2, cs = headertuple
206 node, p1, p2, cs = headertuple
207 if prevnode is None:
207 if prevnode is None:
208 deltabase = p1
208 deltabase = p1
209 else:
209 else:
210 deltabase = prevnode
210 deltabase = prevnode
211 return node, p1, p2, deltabase, cs
211 return node, p1, p2, deltabase, cs
212
212
213 def deltachunk(self, prevnode):
213 def deltachunk(self, prevnode):
214 l = self.chunklength()
214 l = self.chunklength()
215 if not l:
215 if not l:
216 return {}
216 return {}
217 headerdata = readexactly(self._stream, self.deltaheadersize)
217 headerdata = readexactly(self._stream, self.deltaheadersize)
218 header = struct.unpack(self.deltaheader, headerdata)
218 header = struct.unpack(self.deltaheader, headerdata)
219 delta = readexactly(self._stream, l - self.deltaheadersize)
219 delta = readexactly(self._stream, l - self.deltaheadersize)
220 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
220 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
221 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
221 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
222 'deltabase': deltabase, 'delta': delta}
222 'deltabase': deltabase, 'delta': delta}
223
223
224 def getchunks(self):
224 def getchunks(self):
225 """returns all the chunks contains in the bundle
225 """returns all the chunks contains in the bundle
226
226
227 Used when you need to forward the binary stream to a file or another
227 Used when you need to forward the binary stream to a file or another
228 network API. To do so, it parse the changegroup data, otherwise it will
228 network API. To do so, it parse the changegroup data, otherwise it will
229 block in case of sshrepo because it don't know the end of the stream.
229 block in case of sshrepo because it don't know the end of the stream.
230 """
230 """
231 # an empty chunkgroup is the end of the changegroup
231 # an empty chunkgroup is the end of the changegroup
232 # a changegroup has at least 2 chunkgroups (changelog and manifest).
232 # a changegroup has at least 2 chunkgroups (changelog and manifest).
233 # after that, an empty chunkgroup is the end of the changegroup
233 # after that, an empty chunkgroup is the end of the changegroup
234 empty = False
234 empty = False
235 count = 0
235 count = 0
236 while not empty or count <= 2:
236 while not empty or count <= 2:
237 empty = True
237 empty = True
238 count += 1
238 count += 1
239 while True:
239 while True:
240 chunk = getchunk(self)
240 chunk = getchunk(self)
241 if not chunk:
241 if not chunk:
242 break
242 break
243 empty = False
243 empty = False
244 yield chunkheader(len(chunk))
244 yield chunkheader(len(chunk))
245 pos = 0
245 pos = 0
246 while pos < len(chunk):
246 while pos < len(chunk):
247 next = pos + 2**20
247 next = pos + 2**20
248 yield chunk[pos:next]
248 yield chunk[pos:next]
249 pos = next
249 pos = next
250 yield closechunk()
250 yield closechunk()
251
251
252 class cg2unpacker(cg1unpacker):
252 class cg2unpacker(cg1unpacker):
253 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
253 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
254 deltaheadersize = struct.calcsize(deltaheader)
254 deltaheadersize = struct.calcsize(deltaheader)
255 version = '02'
255 version = '02'
256
256
257 def _deltaheader(self, headertuple, prevnode):
257 def _deltaheader(self, headertuple, prevnode):
258 node, p1, p2, deltabase, cs = headertuple
258 node, p1, p2, deltabase, cs = headertuple
259 return node, p1, p2, deltabase, cs
259 return node, p1, p2, deltabase, cs
260
260
261 class headerlessfixup(object):
261 class headerlessfixup(object):
262 def __init__(self, fh, h):
262 def __init__(self, fh, h):
263 self._h = h
263 self._h = h
264 self._fh = fh
264 self._fh = fh
265 def read(self, n):
265 def read(self, n):
266 if self._h:
266 if self._h:
267 d, self._h = self._h[:n], self._h[n:]
267 d, self._h = self._h[:n], self._h[n:]
268 if len(d) < n:
268 if len(d) < n:
269 d += readexactly(self._fh, n - len(d))
269 d += readexactly(self._fh, n - len(d))
270 return d
270 return d
271 return readexactly(self._fh, n)
271 return readexactly(self._fh, n)
272
272
273 class cg1packer(object):
273 class cg1packer(object):
274 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
274 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
275 version = '01'
275 version = '01'
276 def __init__(self, repo, bundlecaps=None):
276 def __init__(self, repo, bundlecaps=None):
277 """Given a source repo, construct a bundler.
277 """Given a source repo, construct a bundler.
278
278
279 bundlecaps is optional and can be used to specify the set of
279 bundlecaps is optional and can be used to specify the set of
280 capabilities which can be used to build the bundle.
280 capabilities which can be used to build the bundle.
281 """
281 """
282 # Set of capabilities we can use to build the bundle.
282 # Set of capabilities we can use to build the bundle.
283 if bundlecaps is None:
283 if bundlecaps is None:
284 bundlecaps = set()
284 bundlecaps = set()
285 self._bundlecaps = bundlecaps
285 self._bundlecaps = bundlecaps
286 self._changelog = repo.changelog
286 self._changelog = repo.changelog
287 self._manifest = repo.manifest
287 self._manifest = repo.manifest
288 reorder = repo.ui.config('bundle', 'reorder', 'auto')
288 reorder = repo.ui.config('bundle', 'reorder', 'auto')
289 if reorder == 'auto':
289 if reorder == 'auto':
290 reorder = None
290 reorder = None
291 else:
291 else:
292 reorder = util.parsebool(reorder)
292 reorder = util.parsebool(reorder)
293 self._repo = repo
293 self._repo = repo
294 self._reorder = reorder
294 self._reorder = reorder
295 self._progress = repo.ui.progress
295 self._progress = repo.ui.progress
296 if self._repo.ui.verbose and not self._repo.ui.debugflag:
296 if self._repo.ui.verbose and not self._repo.ui.debugflag:
297 self._verbosenote = self._repo.ui.note
297 self._verbosenote = self._repo.ui.note
298 else:
298 else:
299 self._verbosenote = lambda s: None
299 self._verbosenote = lambda s: None
300
300
301 def close(self):
301 def close(self):
302 return closechunk()
302 return closechunk()
303
303
304 def fileheader(self, fname):
304 def fileheader(self, fname):
305 return chunkheader(len(fname)) + fname
305 return chunkheader(len(fname)) + fname
306
306
307 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
307 def group(self, nodelist, revlog, lookup, units=None):
308 """Calculate a delta group, yielding a sequence of changegroup chunks
308 """Calculate a delta group, yielding a sequence of changegroup chunks
309 (strings).
309 (strings).
310
310
311 Given a list of changeset revs, return a set of deltas and
311 Given a list of changeset revs, return a set of deltas and
312 metadata corresponding to nodes. The first delta is
312 metadata corresponding to nodes. The first delta is
313 first parent(nodelist[0]) -> nodelist[0], the receiver is
313 first parent(nodelist[0]) -> nodelist[0], the receiver is
314 guaranteed to have this parent as it has all history before
314 guaranteed to have this parent as it has all history before
315 these changesets. In the case firstparent is nullrev the
315 these changesets. In the case firstparent is nullrev the
316 changegroup starts with a full revision.
316 changegroup starts with a full revision.
317
317
318 If units is not None, progress detail will be generated, units specifies
318 If units is not None, progress detail will be generated, units specifies
319 the type of revlog that is touched (changelog, manifest, etc.).
319 the type of revlog that is touched (changelog, manifest, etc.).
320 """
320 """
321 # if we don't have any revisions touched by these changesets, bail
321 # if we don't have any revisions touched by these changesets, bail
322 if len(nodelist) == 0:
322 if len(nodelist) == 0:
323 yield self.close()
323 yield self.close()
324 return
324 return
325
325
326 # for generaldelta revlogs, we linearize the revs; this will both be
326 # for generaldelta revlogs, we linearize the revs; this will both be
327 # much quicker and generate a much smaller bundle
327 # much quicker and generate a much smaller bundle
328 if (revlog._generaldelta and reorder is None) or reorder:
328 if (revlog._generaldelta and self._reorder is None) or self._reorder:
329 dag = dagutil.revlogdag(revlog)
329 dag = dagutil.revlogdag(revlog)
330 revs = set(revlog.rev(n) for n in nodelist)
330 revs = set(revlog.rev(n) for n in nodelist)
331 revs = dag.linearize(revs)
331 revs = dag.linearize(revs)
332 else:
332 else:
333 revs = sorted([revlog.rev(n) for n in nodelist])
333 revs = sorted([revlog.rev(n) for n in nodelist])
334
334
335 # add the parent of the first rev
335 # add the parent of the first rev
336 p = revlog.parentrevs(revs[0])[0]
336 p = revlog.parentrevs(revs[0])[0]
337 revs.insert(0, p)
337 revs.insert(0, p)
338
338
339 # build deltas
339 # build deltas
340 total = len(revs) - 1
340 total = len(revs) - 1
341 msgbundling = _('bundling')
341 msgbundling = _('bundling')
342 for r in xrange(len(revs) - 1):
342 for r in xrange(len(revs) - 1):
343 if units is not None:
343 if units is not None:
344 self._progress(msgbundling, r + 1, unit=units, total=total)
344 self._progress(msgbundling, r + 1, unit=units, total=total)
345 prev, curr = revs[r], revs[r + 1]
345 prev, curr = revs[r], revs[r + 1]
346 linknode = lookup(revlog.node(curr))
346 linknode = lookup(revlog.node(curr))
347 for c in self.revchunk(revlog, curr, prev, linknode):
347 for c in self.revchunk(revlog, curr, prev, linknode):
348 yield c
348 yield c
349
349
350 if units is not None:
350 if units is not None:
351 self._progress(msgbundling, None)
351 self._progress(msgbundling, None)
352 yield self.close()
352 yield self.close()
353
353
354 # filter any nodes that claim to be part of the known set
354 # filter any nodes that claim to be part of the known set
355 def prune(self, revlog, missing, commonrevs):
355 def prune(self, revlog, missing, commonrevs):
356 rr, rl = revlog.rev, revlog.linkrev
356 rr, rl = revlog.rev, revlog.linkrev
357 return [n for n in missing if rl(rr(n)) not in commonrevs]
357 return [n for n in missing if rl(rr(n)) not in commonrevs]
358
358
359 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
359 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
360 '''yield a sequence of changegroup chunks (strings)'''
360 '''yield a sequence of changegroup chunks (strings)'''
361 repo = self._repo
361 repo = self._repo
362 cl = self._changelog
362 cl = self._changelog
363 ml = self._manifest
363 ml = self._manifest
364 reorder = self._reorder
364 reorder = self._reorder
365
365
366 clrevorder = {}
366 clrevorder = {}
367 mfs = {} # needed manifests
367 mfs = {} # needed manifests
368 fnodes = {} # needed file nodes
368 fnodes = {} # needed file nodes
369 changedfiles = set()
369 changedfiles = set()
370
370
371 # Callback for the changelog, used to collect changed files and manifest
371 # Callback for the changelog, used to collect changed files and manifest
372 # nodes.
372 # nodes.
373 # Returns the linkrev node (identity in the changelog case).
373 # Returns the linkrev node (identity in the changelog case).
374 def lookupcl(x):
374 def lookupcl(x):
375 c = cl.read(x)
375 c = cl.read(x)
376 clrevorder[x] = len(clrevorder)
376 clrevorder[x] = len(clrevorder)
377 changedfiles.update(c[3])
377 changedfiles.update(c[3])
378 # record the first changeset introducing this manifest version
378 # record the first changeset introducing this manifest version
379 mfs.setdefault(c[0], x)
379 mfs.setdefault(c[0], x)
380 return x
380 return x
381
381
382 self._verbosenote(_('uncompressed size of bundle content:\n'))
382 self._verbosenote(_('uncompressed size of bundle content:\n'))
383 size = 0
383 size = 0
384 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
384 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
385 reorder=reorder):
386 size += len(chunk)
385 size += len(chunk)
387 yield chunk
386 yield chunk
388 self._verbosenote(_('%8.i (changelog)\n') % size)
387 self._verbosenote(_('%8.i (changelog)\n') % size)
389
388
390 # Callback for the manifest, used to collect linkrevs for filelog
389 # Callback for the manifest, used to collect linkrevs for filelog
391 # revisions.
390 # revisions.
392 # Returns the linkrev node (collected in lookupcl).
391 # Returns the linkrev node (collected in lookupcl).
393 def lookupmf(x):
392 def lookupmf(x):
394 clnode = mfs[x]
393 clnode = mfs[x]
395 if not fastpathlinkrev or reorder:
394 if not fastpathlinkrev or reorder:
396 mdata = ml.readfast(x)
395 mdata = ml.readfast(x)
397 for f, n in mdata.iteritems():
396 for f, n in mdata.iteritems():
398 if f in changedfiles:
397 if f in changedfiles:
399 # record the first changeset introducing this filelog
398 # record the first changeset introducing this filelog
400 # version
399 # version
401 fclnodes = fnodes.setdefault(f, {})
400 fclnodes = fnodes.setdefault(f, {})
402 fclnode = fclnodes.setdefault(n, clnode)
401 fclnode = fclnodes.setdefault(n, clnode)
403 if clrevorder[clnode] < clrevorder[fclnode]:
402 if clrevorder[clnode] < clrevorder[fclnode]:
404 fclnodes[n] = clnode
403 fclnodes[n] = clnode
405 return clnode
404 return clnode
406
405
407 mfnodes = self.prune(ml, mfs, commonrevs)
406 mfnodes = self.prune(ml, mfs, commonrevs)
408 size = 0
407 size = 0
409 for chunk in self.group(mfnodes, ml, lookupmf, units=_('manifests'),
408 for chunk in self.group(mfnodes, ml, lookupmf, units=_('manifests')):
410 reorder=reorder):
411 size += len(chunk)
409 size += len(chunk)
412 yield chunk
410 yield chunk
413 self._verbosenote(_('%8.i (manifests)\n') % size)
411 self._verbosenote(_('%8.i (manifests)\n') % size)
414
412
415 mfs.clear()
413 mfs.clear()
416 clrevs = set(cl.rev(x) for x in clnodes)
414 clrevs = set(cl.rev(x) for x in clnodes)
417
415
418 def linknodes(filerevlog, fname):
416 def linknodes(filerevlog, fname):
419 if fastpathlinkrev and not reorder:
417 if fastpathlinkrev and not reorder:
420 llr = filerevlog.linkrev
418 llr = filerevlog.linkrev
421 def genfilenodes():
419 def genfilenodes():
422 for r in filerevlog:
420 for r in filerevlog:
423 linkrev = llr(r)
421 linkrev = llr(r)
424 if linkrev in clrevs:
422 if linkrev in clrevs:
425 yield filerevlog.node(r), cl.node(linkrev)
423 yield filerevlog.node(r), cl.node(linkrev)
426 return dict(genfilenodes())
424 return dict(genfilenodes())
427 return fnodes.get(fname, {})
425 return fnodes.get(fname, {})
428
426
429 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
427 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
430 source):
428 source):
431 yield chunk
429 yield chunk
432
430
433 yield self.close()
431 yield self.close()
434
432
435 if clnodes:
433 if clnodes:
436 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
434 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
437
435
438 # The 'source' parameter is useful for extensions
436 # The 'source' parameter is useful for extensions
439 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
437 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
440 repo = self._repo
438 repo = self._repo
441 progress = self._progress
439 progress = self._progress
442 reorder = self._reorder
443 msgbundling = _('bundling')
440 msgbundling = _('bundling')
444
441
445 total = len(changedfiles)
442 total = len(changedfiles)
446 # for progress output
443 # for progress output
447 msgfiles = _('files')
444 msgfiles = _('files')
448 for i, fname in enumerate(sorted(changedfiles)):
445 for i, fname in enumerate(sorted(changedfiles)):
449 filerevlog = repo.file(fname)
446 filerevlog = repo.file(fname)
450 if not filerevlog:
447 if not filerevlog:
451 raise util.Abort(_("empty or missing revlog for %s") % fname)
448 raise util.Abort(_("empty or missing revlog for %s") % fname)
452
449
453 linkrevnodes = linknodes(filerevlog, fname)
450 linkrevnodes = linknodes(filerevlog, fname)
454 # Lookup for filenodes, we collected the linkrev nodes above in the
451 # Lookup for filenodes, we collected the linkrev nodes above in the
455 # fastpath case and with lookupmf in the slowpath case.
452 # fastpath case and with lookupmf in the slowpath case.
456 def lookupfilelog(x):
453 def lookupfilelog(x):
457 return linkrevnodes[x]
454 return linkrevnodes[x]
458
455
459 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
456 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
460 if filenodes:
457 if filenodes:
461 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
458 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
462 total=total)
459 total=total)
463 h = self.fileheader(fname)
460 h = self.fileheader(fname)
464 size = len(h)
461 size = len(h)
465 yield h
462 yield h
466 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
463 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
467 reorder=reorder):
468 size += len(chunk)
464 size += len(chunk)
469 yield chunk
465 yield chunk
470 self._verbosenote(_('%8.i %s\n') % (size, fname))
466 self._verbosenote(_('%8.i %s\n') % (size, fname))
471 progress(msgbundling, None)
467 progress(msgbundling, None)
472
468
473 def deltaparent(self, revlog, rev, p1, p2, prev):
469 def deltaparent(self, revlog, rev, p1, p2, prev):
474 return prev
470 return prev
475
471
476 def revchunk(self, revlog, rev, prev, linknode):
472 def revchunk(self, revlog, rev, prev, linknode):
477 node = revlog.node(rev)
473 node = revlog.node(rev)
478 p1, p2 = revlog.parentrevs(rev)
474 p1, p2 = revlog.parentrevs(rev)
479 base = self.deltaparent(revlog, rev, p1, p2, prev)
475 base = self.deltaparent(revlog, rev, p1, p2, prev)
480
476
481 prefix = ''
477 prefix = ''
482 if revlog.iscensored(base) or revlog.iscensored(rev):
478 if revlog.iscensored(base) or revlog.iscensored(rev):
483 try:
479 try:
484 delta = revlog.revision(node)
480 delta = revlog.revision(node)
485 except error.CensoredNodeError, e:
481 except error.CensoredNodeError, e:
486 delta = e.tombstone
482 delta = e.tombstone
487 if base == nullrev:
483 if base == nullrev:
488 prefix = mdiff.trivialdiffheader(len(delta))
484 prefix = mdiff.trivialdiffheader(len(delta))
489 else:
485 else:
490 baselen = revlog.rawsize(base)
486 baselen = revlog.rawsize(base)
491 prefix = mdiff.replacediffheader(baselen, len(delta))
487 prefix = mdiff.replacediffheader(baselen, len(delta))
492 elif base == nullrev:
488 elif base == nullrev:
493 delta = revlog.revision(node)
489 delta = revlog.revision(node)
494 prefix = mdiff.trivialdiffheader(len(delta))
490 prefix = mdiff.trivialdiffheader(len(delta))
495 else:
491 else:
496 delta = revlog.revdiff(base, rev)
492 delta = revlog.revdiff(base, rev)
497 p1n, p2n = revlog.parents(node)
493 p1n, p2n = revlog.parents(node)
498 basenode = revlog.node(base)
494 basenode = revlog.node(base)
499 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
495 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
500 meta += prefix
496 meta += prefix
501 l = len(meta) + len(delta)
497 l = len(meta) + len(delta)
502 yield chunkheader(l)
498 yield chunkheader(l)
503 yield meta
499 yield meta
504 yield delta
500 yield delta
505 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
501 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
506 # do nothing with basenode, it is implicitly the previous one in HG10
502 # do nothing with basenode, it is implicitly the previous one in HG10
507 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
503 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
508
504
509 class cg2packer(cg1packer):
505 class cg2packer(cg1packer):
510 version = '02'
506 version = '02'
511 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
507 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
512
508
513 def __init__(self, repo, bundlecaps=None):
509 def __init__(self, repo, bundlecaps=None):
514 super(cg2packer, self).__init__(repo, bundlecaps)
510 super(cg2packer, self).__init__(repo, bundlecaps)
515 if self._reorder is None:
511 if self._reorder is None:
516 # Since generaldelta is directly supported by cg2, reordering
512 # Since generaldelta is directly supported by cg2, reordering
517 # generally doesn't help, so we disable it by default (treating
513 # generally doesn't help, so we disable it by default (treating
518 # bundle.reorder=auto just like bundle.reorder=False).
514 # bundle.reorder=auto just like bundle.reorder=False).
519 self._reorder = False
515 self._reorder = False
520
516
521 def deltaparent(self, revlog, rev, p1, p2, prev):
517 def deltaparent(self, revlog, rev, p1, p2, prev):
522 dp = revlog.deltaparent(rev)
518 dp = revlog.deltaparent(rev)
523 # avoid storing full revisions; pick prev in those cases
519 # avoid storing full revisions; pick prev in those cases
524 # also pick prev when we can't be sure remote has dp
520 # also pick prev when we can't be sure remote has dp
525 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
521 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
526 return prev
522 return prev
527 return dp
523 return dp
528
524
529 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
525 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
530 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
526 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
531
527
532 packermap = {'01': (cg1packer, cg1unpacker),
528 packermap = {'01': (cg1packer, cg1unpacker),
533 '02': (cg2packer, cg2unpacker)}
529 '02': (cg2packer, cg2unpacker)}
534
530
535 def _changegroupinfo(repo, nodes, source):
531 def _changegroupinfo(repo, nodes, source):
536 if repo.ui.verbose or source == 'bundle':
532 if repo.ui.verbose or source == 'bundle':
537 repo.ui.status(_("%d changesets found\n") % len(nodes))
533 repo.ui.status(_("%d changesets found\n") % len(nodes))
538 if repo.ui.debugflag:
534 if repo.ui.debugflag:
539 repo.ui.debug("list of changesets:\n")
535 repo.ui.debug("list of changesets:\n")
540 for node in nodes:
536 for node in nodes:
541 repo.ui.debug("%s\n" % hex(node))
537 repo.ui.debug("%s\n" % hex(node))
542
538
543 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
539 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
544 repo = repo.unfiltered()
540 repo = repo.unfiltered()
545 commonrevs = outgoing.common
541 commonrevs = outgoing.common
546 csets = outgoing.missing
542 csets = outgoing.missing
547 heads = outgoing.missingheads
543 heads = outgoing.missingheads
548 # We go through the fast path if we get told to, or if all (unfiltered
544 # We go through the fast path if we get told to, or if all (unfiltered
549 # heads have been requested (since we then know there all linkrevs will
545 # heads have been requested (since we then know there all linkrevs will
550 # be pulled by the client).
546 # be pulled by the client).
551 heads.sort()
547 heads.sort()
552 fastpathlinkrev = fastpath or (
548 fastpathlinkrev = fastpath or (
553 repo.filtername is None and heads == sorted(repo.heads()))
549 repo.filtername is None and heads == sorted(repo.heads()))
554
550
555 repo.hook('preoutgoing', throw=True, source=source)
551 repo.hook('preoutgoing', throw=True, source=source)
556 _changegroupinfo(repo, csets, source)
552 _changegroupinfo(repo, csets, source)
557 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
553 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
558
554
559 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
555 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
560 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
556 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
561 return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
557 return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
562
558
563 def changegroupsubset(repo, roots, heads, source, version='01'):
559 def changegroupsubset(repo, roots, heads, source, version='01'):
564 """Compute a changegroup consisting of all the nodes that are
560 """Compute a changegroup consisting of all the nodes that are
565 descendants of any of the roots and ancestors of any of the heads.
561 descendants of any of the roots and ancestors of any of the heads.
566 Return a chunkbuffer object whose read() method will return
562 Return a chunkbuffer object whose read() method will return
567 successive changegroup chunks.
563 successive changegroup chunks.
568
564
569 It is fairly complex as determining which filenodes and which
565 It is fairly complex as determining which filenodes and which
570 manifest nodes need to be included for the changeset to be complete
566 manifest nodes need to be included for the changeset to be complete
571 is non-trivial.
567 is non-trivial.
572
568
573 Another wrinkle is doing the reverse, figuring out which changeset in
569 Another wrinkle is doing the reverse, figuring out which changeset in
574 the changegroup a particular filenode or manifestnode belongs to.
570 the changegroup a particular filenode or manifestnode belongs to.
575 """
571 """
576 cl = repo.changelog
572 cl = repo.changelog
577 if not roots:
573 if not roots:
578 roots = [nullid]
574 roots = [nullid]
579 # TODO: remove call to nodesbetween.
575 # TODO: remove call to nodesbetween.
580 csets, roots, heads = cl.nodesbetween(roots, heads)
576 csets, roots, heads = cl.nodesbetween(roots, heads)
581 discbases = []
577 discbases = []
582 for n in roots:
578 for n in roots:
583 discbases.extend([p for p in cl.parents(n) if p != nullid])
579 discbases.extend([p for p in cl.parents(n) if p != nullid])
584 outgoing = discovery.outgoing(cl, discbases, heads)
580 outgoing = discovery.outgoing(cl, discbases, heads)
585 bundler = packermap[version][0](repo)
581 bundler = packermap[version][0](repo)
586 return getsubset(repo, outgoing, bundler, source, version=version)
582 return getsubset(repo, outgoing, bundler, source, version=version)
587
583
588 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
584 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
589 version='01'):
585 version='01'):
590 """Like getbundle, but taking a discovery.outgoing as an argument.
586 """Like getbundle, but taking a discovery.outgoing as an argument.
591
587
592 This is only implemented for local repos and reuses potentially
588 This is only implemented for local repos and reuses potentially
593 precomputed sets in outgoing. Returns a raw changegroup generator."""
589 precomputed sets in outgoing. Returns a raw changegroup generator."""
594 if not outgoing.missing:
590 if not outgoing.missing:
595 return None
591 return None
596 bundler = packermap[version][0](repo, bundlecaps)
592 bundler = packermap[version][0](repo, bundlecaps)
597 return getsubsetraw(repo, outgoing, bundler, source)
593 return getsubsetraw(repo, outgoing, bundler, source)
598
594
599 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
595 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
600 """Like getbundle, but taking a discovery.outgoing as an argument.
596 """Like getbundle, but taking a discovery.outgoing as an argument.
601
597
602 This is only implemented for local repos and reuses potentially
598 This is only implemented for local repos and reuses potentially
603 precomputed sets in outgoing."""
599 precomputed sets in outgoing."""
604 if not outgoing.missing:
600 if not outgoing.missing:
605 return None
601 return None
606 bundler = cg1packer(repo, bundlecaps)
602 bundler = cg1packer(repo, bundlecaps)
607 return getsubset(repo, outgoing, bundler, source)
603 return getsubset(repo, outgoing, bundler, source)
608
604
609 def _computeoutgoing(repo, heads, common):
605 def _computeoutgoing(repo, heads, common):
610 """Computes which revs are outgoing given a set of common
606 """Computes which revs are outgoing given a set of common
611 and a set of heads.
607 and a set of heads.
612
608
613 This is a separate function so extensions can have access to
609 This is a separate function so extensions can have access to
614 the logic.
610 the logic.
615
611
616 Returns a discovery.outgoing object.
612 Returns a discovery.outgoing object.
617 """
613 """
618 cl = repo.changelog
614 cl = repo.changelog
619 if common:
615 if common:
620 hasnode = cl.hasnode
616 hasnode = cl.hasnode
621 common = [n for n in common if hasnode(n)]
617 common = [n for n in common if hasnode(n)]
622 else:
618 else:
623 common = [nullid]
619 common = [nullid]
624 if not heads:
620 if not heads:
625 heads = cl.heads()
621 heads = cl.heads()
626 return discovery.outgoing(cl, common, heads)
622 return discovery.outgoing(cl, common, heads)
627
623
628 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
624 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
629 version='01'):
625 version='01'):
630 """Like changegroupsubset, but returns the set difference between the
626 """Like changegroupsubset, but returns the set difference between the
631 ancestors of heads and the ancestors common.
627 ancestors of heads and the ancestors common.
632
628
633 If heads is None, use the local heads. If common is None, use [nullid].
629 If heads is None, use the local heads. If common is None, use [nullid].
634
630
635 If version is None, use a version '1' changegroup.
631 If version is None, use a version '1' changegroup.
636
632
637 The nodes in common might not all be known locally due to the way the
633 The nodes in common might not all be known locally due to the way the
638 current discovery protocol works. Returns a raw changegroup generator.
634 current discovery protocol works. Returns a raw changegroup generator.
639 """
635 """
640 outgoing = _computeoutgoing(repo, heads, common)
636 outgoing = _computeoutgoing(repo, heads, common)
641 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
637 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
642 version=version)
638 version=version)
643
639
644 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
640 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
645 """Like changegroupsubset, but returns the set difference between the
641 """Like changegroupsubset, but returns the set difference between the
646 ancestors of heads and the ancestors common.
642 ancestors of heads and the ancestors common.
647
643
648 If heads is None, use the local heads. If common is None, use [nullid].
644 If heads is None, use the local heads. If common is None, use [nullid].
649
645
650 The nodes in common might not all be known locally due to the way the
646 The nodes in common might not all be known locally due to the way the
651 current discovery protocol works.
647 current discovery protocol works.
652 """
648 """
653 outgoing = _computeoutgoing(repo, heads, common)
649 outgoing = _computeoutgoing(repo, heads, common)
654 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
650 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
655
651
656 def changegroup(repo, basenodes, source):
652 def changegroup(repo, basenodes, source):
657 # to avoid a race we use changegroupsubset() (issue1320)
653 # to avoid a race we use changegroupsubset() (issue1320)
658 return changegroupsubset(repo, basenodes, repo.heads(), source)
654 return changegroupsubset(repo, basenodes, repo.heads(), source)
659
655
660 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
656 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
661 revisions = 0
657 revisions = 0
662 files = 0
658 files = 0
663 while True:
659 while True:
664 chunkdata = source.filelogheader()
660 chunkdata = source.filelogheader()
665 if not chunkdata:
661 if not chunkdata:
666 break
662 break
667 f = chunkdata["filename"]
663 f = chunkdata["filename"]
668 repo.ui.debug("adding %s revisions\n" % f)
664 repo.ui.debug("adding %s revisions\n" % f)
669 pr()
665 pr()
670 fl = repo.file(f)
666 fl = repo.file(f)
671 o = len(fl)
667 o = len(fl)
672 try:
668 try:
673 if not fl.addgroup(source, revmap, trp):
669 if not fl.addgroup(source, revmap, trp):
674 raise util.Abort(_("received file revlog group is empty"))
670 raise util.Abort(_("received file revlog group is empty"))
675 except error.CensoredBaseError, e:
671 except error.CensoredBaseError, e:
676 raise util.Abort(_("received delta base is censored: %s") % e)
672 raise util.Abort(_("received delta base is censored: %s") % e)
677 revisions += len(fl) - o
673 revisions += len(fl) - o
678 files += 1
674 files += 1
679 if f in needfiles:
675 if f in needfiles:
680 needs = needfiles[f]
676 needs = needfiles[f]
681 for new in xrange(o, len(fl)):
677 for new in xrange(o, len(fl)):
682 n = fl.node(new)
678 n = fl.node(new)
683 if n in needs:
679 if n in needs:
684 needs.remove(n)
680 needs.remove(n)
685 else:
681 else:
686 raise util.Abort(
682 raise util.Abort(
687 _("received spurious file revlog entry"))
683 _("received spurious file revlog entry"))
688 if not needs:
684 if not needs:
689 del needfiles[f]
685 del needfiles[f]
690 repo.ui.progress(_('files'), None)
686 repo.ui.progress(_('files'), None)
691
687
692 for f, needs in needfiles.iteritems():
688 for f, needs in needfiles.iteritems():
693 fl = repo.file(f)
689 fl = repo.file(f)
694 for n in needs:
690 for n in needs:
695 try:
691 try:
696 fl.rev(n)
692 fl.rev(n)
697 except error.LookupError:
693 except error.LookupError:
698 raise util.Abort(
694 raise util.Abort(
699 _('missing file data for %s:%s - run hg verify') %
695 _('missing file data for %s:%s - run hg verify') %
700 (f, hex(n)))
696 (f, hex(n)))
701
697
702 return revisions, files
698 return revisions, files
703
699
704 def addchangegroup(repo, source, srctype, url, emptyok=False,
700 def addchangegroup(repo, source, srctype, url, emptyok=False,
705 targetphase=phases.draft):
701 targetphase=phases.draft):
706 """Add the changegroup returned by source.read() to this repo.
702 """Add the changegroup returned by source.read() to this repo.
707 srctype is a string like 'push', 'pull', or 'unbundle'. url is
703 srctype is a string like 'push', 'pull', or 'unbundle'. url is
708 the URL of the repo where this changegroup is coming from.
704 the URL of the repo where this changegroup is coming from.
709
705
710 Return an integer summarizing the change to this repo:
706 Return an integer summarizing the change to this repo:
711 - nothing changed or no source: 0
707 - nothing changed or no source: 0
712 - more heads than before: 1+added heads (2..n)
708 - more heads than before: 1+added heads (2..n)
713 - fewer heads than before: -1-removed heads (-2..-n)
709 - fewer heads than before: -1-removed heads (-2..-n)
714 - number of heads stays the same: 1
710 - number of heads stays the same: 1
715 """
711 """
716 repo = repo.unfiltered()
712 repo = repo.unfiltered()
717 def csmap(x):
713 def csmap(x):
718 repo.ui.debug("add changeset %s\n" % short(x))
714 repo.ui.debug("add changeset %s\n" % short(x))
719 return len(cl)
715 return len(cl)
720
716
721 def revmap(x):
717 def revmap(x):
722 return cl.rev(x)
718 return cl.rev(x)
723
719
724 if not source:
720 if not source:
725 return 0
721 return 0
726
722
727 changesets = files = revisions = 0
723 changesets = files = revisions = 0
728 efiles = set()
724 efiles = set()
729
725
730 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
726 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
731 # The transaction could have been created before and already carries source
727 # The transaction could have been created before and already carries source
732 # information. In this case we use the top level data. We overwrite the
728 # information. In this case we use the top level data. We overwrite the
733 # argument because we need to use the top level value (if they exist) in
729 # argument because we need to use the top level value (if they exist) in
734 # this function.
730 # this function.
735 srctype = tr.hookargs.setdefault('source', srctype)
731 srctype = tr.hookargs.setdefault('source', srctype)
736 url = tr.hookargs.setdefault('url', url)
732 url = tr.hookargs.setdefault('url', url)
737
733
738 # write changelog data to temp files so concurrent readers will not see
734 # write changelog data to temp files so concurrent readers will not see
739 # inconsistent view
735 # inconsistent view
740 cl = repo.changelog
736 cl = repo.changelog
741 cl.delayupdate(tr)
737 cl.delayupdate(tr)
742 oldheads = cl.heads()
738 oldheads = cl.heads()
743 try:
739 try:
744 repo.hook('prechangegroup', throw=True, **tr.hookargs)
740 repo.hook('prechangegroup', throw=True, **tr.hookargs)
745
741
746 trp = weakref.proxy(tr)
742 trp = weakref.proxy(tr)
747 # pull off the changeset group
743 # pull off the changeset group
748 repo.ui.status(_("adding changesets\n"))
744 repo.ui.status(_("adding changesets\n"))
749 clstart = len(cl)
745 clstart = len(cl)
750 class prog(object):
746 class prog(object):
751 step = _('changesets')
747 step = _('changesets')
752 count = 1
748 count = 1
753 ui = repo.ui
749 ui = repo.ui
754 total = None
750 total = None
755 def __call__(repo):
751 def __call__(repo):
756 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
752 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
757 total=repo.total)
753 total=repo.total)
758 repo.count += 1
754 repo.count += 1
759 pr = prog()
755 pr = prog()
760 source.callback = pr
756 source.callback = pr
761
757
762 source.changelogheader()
758 source.changelogheader()
763 srccontent = cl.addgroup(source, csmap, trp)
759 srccontent = cl.addgroup(source, csmap, trp)
764 if not (srccontent or emptyok):
760 if not (srccontent or emptyok):
765 raise util.Abort(_("received changelog group is empty"))
761 raise util.Abort(_("received changelog group is empty"))
766 clend = len(cl)
762 clend = len(cl)
767 changesets = clend - clstart
763 changesets = clend - clstart
768 for c in xrange(clstart, clend):
764 for c in xrange(clstart, clend):
769 efiles.update(repo[c].files())
765 efiles.update(repo[c].files())
770 efiles = len(efiles)
766 efiles = len(efiles)
771 repo.ui.progress(_('changesets'), None)
767 repo.ui.progress(_('changesets'), None)
772
768
773 # pull off the manifest group
769 # pull off the manifest group
774 repo.ui.status(_("adding manifests\n"))
770 repo.ui.status(_("adding manifests\n"))
775 pr.step = _('manifests')
771 pr.step = _('manifests')
776 pr.count = 1
772 pr.count = 1
777 pr.total = changesets # manifests <= changesets
773 pr.total = changesets # manifests <= changesets
778 # no need to check for empty manifest group here:
774 # no need to check for empty manifest group here:
779 # if the result of the merge of 1 and 2 is the same in 3 and 4,
775 # if the result of the merge of 1 and 2 is the same in 3 and 4,
780 # no new manifest will be created and the manifest group will
776 # no new manifest will be created and the manifest group will
781 # be empty during the pull
777 # be empty during the pull
782 source.manifestheader()
778 source.manifestheader()
783 repo.manifest.addgroup(source, revmap, trp)
779 repo.manifest.addgroup(source, revmap, trp)
784 repo.ui.progress(_('manifests'), None)
780 repo.ui.progress(_('manifests'), None)
785
781
786 needfiles = {}
782 needfiles = {}
787 if repo.ui.configbool('server', 'validate', default=False):
783 if repo.ui.configbool('server', 'validate', default=False):
788 # validate incoming csets have their manifests
784 # validate incoming csets have their manifests
789 for cset in xrange(clstart, clend):
785 for cset in xrange(clstart, clend):
790 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
786 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
791 mfest = repo.manifest.readdelta(mfnode)
787 mfest = repo.manifest.readdelta(mfnode)
792 # store file nodes we must see
788 # store file nodes we must see
793 for f, n in mfest.iteritems():
789 for f, n in mfest.iteritems():
794 needfiles.setdefault(f, set()).add(n)
790 needfiles.setdefault(f, set()).add(n)
795
791
796 # process the files
792 # process the files
797 repo.ui.status(_("adding file changes\n"))
793 repo.ui.status(_("adding file changes\n"))
798 pr.step = _('files')
794 pr.step = _('files')
799 pr.count = 1
795 pr.count = 1
800 pr.total = efiles
796 pr.total = efiles
801 source.callback = None
797 source.callback = None
802
798
803 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
799 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
804 needfiles)
800 needfiles)
805 revisions += newrevs
801 revisions += newrevs
806 files += newfiles
802 files += newfiles
807
803
808 dh = 0
804 dh = 0
809 if oldheads:
805 if oldheads:
810 heads = cl.heads()
806 heads = cl.heads()
811 dh = len(heads) - len(oldheads)
807 dh = len(heads) - len(oldheads)
812 for h in heads:
808 for h in heads:
813 if h not in oldheads and repo[h].closesbranch():
809 if h not in oldheads and repo[h].closesbranch():
814 dh -= 1
810 dh -= 1
815 htext = ""
811 htext = ""
816 if dh:
812 if dh:
817 htext = _(" (%+d heads)") % dh
813 htext = _(" (%+d heads)") % dh
818
814
819 repo.ui.status(_("added %d changesets"
815 repo.ui.status(_("added %d changesets"
820 " with %d changes to %d files%s\n")
816 " with %d changes to %d files%s\n")
821 % (changesets, revisions, files, htext))
817 % (changesets, revisions, files, htext))
822 repo.invalidatevolatilesets()
818 repo.invalidatevolatilesets()
823
819
824 if changesets > 0:
820 if changesets > 0:
825 p = lambda: tr.writepending() and repo.root or ""
821 p = lambda: tr.writepending() and repo.root or ""
826 if 'node' not in tr.hookargs:
822 if 'node' not in tr.hookargs:
827 tr.hookargs['node'] = hex(cl.node(clstart))
823 tr.hookargs['node'] = hex(cl.node(clstart))
828 hookargs = dict(tr.hookargs)
824 hookargs = dict(tr.hookargs)
829 else:
825 else:
830 hookargs = dict(tr.hookargs)
826 hookargs = dict(tr.hookargs)
831 hookargs['node'] = hex(cl.node(clstart))
827 hookargs['node'] = hex(cl.node(clstart))
832 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
828 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
833
829
834 added = [cl.node(r) for r in xrange(clstart, clend)]
830 added = [cl.node(r) for r in xrange(clstart, clend)]
835 publishing = repo.ui.configbool('phases', 'publish', True)
831 publishing = repo.ui.configbool('phases', 'publish', True)
836 if srctype in ('push', 'serve'):
832 if srctype in ('push', 'serve'):
837 # Old servers can not push the boundary themselves.
833 # Old servers can not push the boundary themselves.
838 # New servers won't push the boundary if changeset already
834 # New servers won't push the boundary if changeset already
839 # exists locally as secret
835 # exists locally as secret
840 #
836 #
841 # We should not use added here but the list of all change in
837 # We should not use added here but the list of all change in
842 # the bundle
838 # the bundle
843 if publishing:
839 if publishing:
844 phases.advanceboundary(repo, tr, phases.public, srccontent)
840 phases.advanceboundary(repo, tr, phases.public, srccontent)
845 else:
841 else:
846 # Those changesets have been pushed from the outside, their
842 # Those changesets have been pushed from the outside, their
847 # phases are going to be pushed alongside. Therefor
843 # phases are going to be pushed alongside. Therefor
848 # `targetphase` is ignored.
844 # `targetphase` is ignored.
849 phases.advanceboundary(repo, tr, phases.draft, srccontent)
845 phases.advanceboundary(repo, tr, phases.draft, srccontent)
850 phases.retractboundary(repo, tr, phases.draft, added)
846 phases.retractboundary(repo, tr, phases.draft, added)
851 elif srctype != 'strip':
847 elif srctype != 'strip':
852 # publishing only alter behavior during push
848 # publishing only alter behavior during push
853 #
849 #
854 # strip should not touch boundary at all
850 # strip should not touch boundary at all
855 phases.retractboundary(repo, tr, targetphase, added)
851 phases.retractboundary(repo, tr, targetphase, added)
856
852
857 if changesets > 0:
853 if changesets > 0:
858 if srctype != 'strip':
854 if srctype != 'strip':
859 # During strip, branchcache is invalid but coming call to
855 # During strip, branchcache is invalid but coming call to
860 # `destroyed` will repair it.
856 # `destroyed` will repair it.
861 # In other case we can safely update cache on disk.
857 # In other case we can safely update cache on disk.
862 branchmap.updatecache(repo.filtered('served'))
858 branchmap.updatecache(repo.filtered('served'))
863
859
864 def runhooks():
860 def runhooks():
865 # These hooks run when the lock releases, not when the
861 # These hooks run when the lock releases, not when the
866 # transaction closes. So it's possible for the changelog
862 # transaction closes. So it's possible for the changelog
867 # to have changed since we last saw it.
863 # to have changed since we last saw it.
868 if clstart >= len(repo):
864 if clstart >= len(repo):
869 return
865 return
870
866
871 # forcefully update the on-disk branch cache
867 # forcefully update the on-disk branch cache
872 repo.ui.debug("updating the branch cache\n")
868 repo.ui.debug("updating the branch cache\n")
873 repo.hook("changegroup", **hookargs)
869 repo.hook("changegroup", **hookargs)
874
870
875 for n in added:
871 for n in added:
876 args = hookargs.copy()
872 args = hookargs.copy()
877 args['node'] = hex(n)
873 args['node'] = hex(n)
878 repo.hook("incoming", **args)
874 repo.hook("incoming", **args)
879
875
880 newheads = [h for h in repo.heads() if h not in oldheads]
876 newheads = [h for h in repo.heads() if h not in oldheads]
881 repo.ui.log("incoming",
877 repo.ui.log("incoming",
882 "%s incoming changes - new heads: %s\n",
878 "%s incoming changes - new heads: %s\n",
883 len(added),
879 len(added),
884 ', '.join([hex(c[:6]) for c in newheads]))
880 ', '.join([hex(c[:6]) for c in newheads]))
885
881
886 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
882 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
887 lambda tr: repo._afterlock(runhooks))
883 lambda tr: repo._afterlock(runhooks))
888
884
889 tr.close()
885 tr.close()
890
886
891 finally:
887 finally:
892 tr.release()
888 tr.release()
893 repo.ui.flush()
889 repo.ui.flush()
894 # never return 0 here:
890 # never return 0 here:
895 if dh < 0:
891 if dh < 0:
896 return dh - 1
892 return dh - 1
897 else:
893 else:
898 return dh + 1
894 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now