##// END OF EJS Templates
changegroup: document that 'source' parameter exists for extensions...
Martin von Zweigbergk -
r24897:5c35a604 default
parent child Browse files
Show More
@@ -1,899 +1,900 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17
17
18 def readexactly(stream, n):
18 def readexactly(stream, n):
19 '''read n bytes from stream.read and abort if less was available'''
19 '''read n bytes from stream.read and abort if less was available'''
20 s = stream.read(n)
20 s = stream.read(n)
21 if len(s) < n:
21 if len(s) < n:
22 raise util.Abort(_("stream ended unexpectedly"
22 raise util.Abort(_("stream ended unexpectedly"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(s), n))
24 % (len(s), n))
25 return s
25 return s
26
26
27 def getchunk(stream):
27 def getchunk(stream):
28 """return the next chunk from stream as a string"""
28 """return the next chunk from stream as a string"""
29 d = readexactly(stream, 4)
29 d = readexactly(stream, 4)
30 l = struct.unpack(">l", d)[0]
30 l = struct.unpack(">l", d)[0]
31 if l <= 4:
31 if l <= 4:
32 if l:
32 if l:
33 raise util.Abort(_("invalid chunk length %d") % l)
33 raise util.Abort(_("invalid chunk length %d") % l)
34 return ""
34 return ""
35 return readexactly(stream, l - 4)
35 return readexactly(stream, l - 4)
36
36
37 def chunkheader(length):
37 def chunkheader(length):
38 """return a changegroup chunk header (string)"""
38 """return a changegroup chunk header (string)"""
39 return struct.pack(">l", length + 4)
39 return struct.pack(">l", length + 4)
40
40
41 def closechunk():
41 def closechunk():
42 """return a changegroup chunk header (string) for a zero-length chunk"""
42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 return struct.pack(">l", 0)
43 return struct.pack(">l", 0)
44
44
45 def combineresults(results):
45 def combineresults(results):
46 """logic to combine 0 or more addchangegroup results into one"""
46 """logic to combine 0 or more addchangegroup results into one"""
47 changedheads = 0
47 changedheads = 0
48 result = 1
48 result = 1
49 for ret in results:
49 for ret in results:
50 # If any changegroup result is 0, return 0
50 # If any changegroup result is 0, return 0
51 if ret == 0:
51 if ret == 0:
52 result = 0
52 result = 0
53 break
53 break
54 if ret < -1:
54 if ret < -1:
55 changedheads += ret + 1
55 changedheads += ret + 1
56 elif ret > 1:
56 elif ret > 1:
57 changedheads += ret - 1
57 changedheads += ret - 1
58 if changedheads > 0:
58 if changedheads > 0:
59 result = 1 + changedheads
59 result = 1 + changedheads
60 elif changedheads < 0:
60 elif changedheads < 0:
61 result = -1 + changedheads
61 result = -1 + changedheads
62 return result
62 return result
63
63
64 class nocompress(object):
64 class nocompress(object):
65 def compress(self, x):
65 def compress(self, x):
66 return x
66 return x
67 def flush(self):
67 def flush(self):
68 return ""
68 return ""
69
69
70 bundletypes = {
70 bundletypes = {
71 "": ("", nocompress), # only when using unbundle on ssh and old http servers
71 "": ("", nocompress), # only when using unbundle on ssh and old http servers
72 # since the unification ssh accepts a header but there
72 # since the unification ssh accepts a header but there
73 # is no capability signaling it.
73 # is no capability signaling it.
74 "HG20": (), # special-cased below
74 "HG20": (), # special-cased below
75 "HG10UN": ("HG10UN", nocompress),
75 "HG10UN": ("HG10UN", nocompress),
76 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
76 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
77 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
77 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
78 }
78 }
79
79
80 # hgweb uses this list to communicate its preferred type
80 # hgweb uses this list to communicate its preferred type
81 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
81 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
82
82
83 def writebundle(ui, cg, filename, bundletype, vfs=None):
83 def writebundle(ui, cg, filename, bundletype, vfs=None):
84 """Write a bundle file and return its filename.
84 """Write a bundle file and return its filename.
85
85
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 bz2 compression can be turned off.
88 bz2 compression can be turned off.
89 The bundle file will be deleted in case of errors.
89 The bundle file will be deleted in case of errors.
90 """
90 """
91
91
92 fh = None
92 fh = None
93 cleanup = None
93 cleanup = None
94 try:
94 try:
95 if filename:
95 if filename:
96 if vfs:
96 if vfs:
97 fh = vfs.open(filename, "wb")
97 fh = vfs.open(filename, "wb")
98 else:
98 else:
99 fh = open(filename, "wb")
99 fh = open(filename, "wb")
100 else:
100 else:
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
102 fh = os.fdopen(fd, "wb")
102 fh = os.fdopen(fd, "wb")
103 cleanup = filename
103 cleanup = filename
104
104
105 if bundletype == "HG20":
105 if bundletype == "HG20":
106 import bundle2
106 import bundle2
107 bundle = bundle2.bundle20(ui)
107 bundle = bundle2.bundle20(ui)
108 part = bundle.newpart('changegroup', data=cg.getchunks())
108 part = bundle.newpart('changegroup', data=cg.getchunks())
109 part.addparam('version', cg.version)
109 part.addparam('version', cg.version)
110 z = nocompress()
110 z = nocompress()
111 chunkiter = bundle.getchunks()
111 chunkiter = bundle.getchunks()
112 else:
112 else:
113 if cg.version != '01':
113 if cg.version != '01':
114 raise util.Abort(_('old bundle types only supports v1 '
114 raise util.Abort(_('old bundle types only supports v1 '
115 'changegroups'))
115 'changegroups'))
116 header, compressor = bundletypes[bundletype]
116 header, compressor = bundletypes[bundletype]
117 fh.write(header)
117 fh.write(header)
118 z = compressor()
118 z = compressor()
119 chunkiter = cg.getchunks()
119 chunkiter = cg.getchunks()
120
120
121 # parse the changegroup data, otherwise we will block
121 # parse the changegroup data, otherwise we will block
122 # in case of sshrepo because we don't know the end of the stream
122 # in case of sshrepo because we don't know the end of the stream
123
123
124 # an empty chunkgroup is the end of the changegroup
124 # an empty chunkgroup is the end of the changegroup
125 # a changegroup has at least 2 chunkgroups (changelog and manifest).
125 # a changegroup has at least 2 chunkgroups (changelog and manifest).
126 # after that, an empty chunkgroup is the end of the changegroup
126 # after that, an empty chunkgroup is the end of the changegroup
127 for chunk in chunkiter:
127 for chunk in chunkiter:
128 fh.write(z.compress(chunk))
128 fh.write(z.compress(chunk))
129 fh.write(z.flush())
129 fh.write(z.flush())
130 cleanup = None
130 cleanup = None
131 return filename
131 return filename
132 finally:
132 finally:
133 if fh is not None:
133 if fh is not None:
134 fh.close()
134 fh.close()
135 if cleanup is not None:
135 if cleanup is not None:
136 if filename and vfs:
136 if filename and vfs:
137 vfs.unlink(cleanup)
137 vfs.unlink(cleanup)
138 else:
138 else:
139 os.unlink(cleanup)
139 os.unlink(cleanup)
140
140
141 def decompressor(fh, alg):
141 def decompressor(fh, alg):
142 if alg == 'UN':
142 if alg == 'UN':
143 return fh
143 return fh
144 elif alg == 'GZ':
144 elif alg == 'GZ':
145 def generator(f):
145 def generator(f):
146 zd = zlib.decompressobj()
146 zd = zlib.decompressobj()
147 for chunk in util.filechunkiter(f):
147 for chunk in util.filechunkiter(f):
148 yield zd.decompress(chunk)
148 yield zd.decompress(chunk)
149 elif alg == 'BZ':
149 elif alg == 'BZ':
150 def generator(f):
150 def generator(f):
151 zd = bz2.BZ2Decompressor()
151 zd = bz2.BZ2Decompressor()
152 zd.decompress("BZ")
152 zd.decompress("BZ")
153 for chunk in util.filechunkiter(f, 4096):
153 for chunk in util.filechunkiter(f, 4096):
154 yield zd.decompress(chunk)
154 yield zd.decompress(chunk)
155 else:
155 else:
156 raise util.Abort("unknown bundle compression '%s'" % alg)
156 raise util.Abort("unknown bundle compression '%s'" % alg)
157 return util.chunkbuffer(generator(fh))
157 return util.chunkbuffer(generator(fh))
158
158
159 class cg1unpacker(object):
159 class cg1unpacker(object):
160 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
160 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
161 deltaheadersize = struct.calcsize(deltaheader)
161 deltaheadersize = struct.calcsize(deltaheader)
162 version = '01'
162 version = '01'
163 def __init__(self, fh, alg):
163 def __init__(self, fh, alg):
164 self._stream = decompressor(fh, alg)
164 self._stream = decompressor(fh, alg)
165 self._type = alg
165 self._type = alg
166 self.callback = None
166 self.callback = None
167 def compressed(self):
167 def compressed(self):
168 return self._type != 'UN'
168 return self._type != 'UN'
169 def read(self, l):
169 def read(self, l):
170 return self._stream.read(l)
170 return self._stream.read(l)
171 def seek(self, pos):
171 def seek(self, pos):
172 return self._stream.seek(pos)
172 return self._stream.seek(pos)
173 def tell(self):
173 def tell(self):
174 return self._stream.tell()
174 return self._stream.tell()
175 def close(self):
175 def close(self):
176 return self._stream.close()
176 return self._stream.close()
177
177
178 def chunklength(self):
178 def chunklength(self):
179 d = readexactly(self._stream, 4)
179 d = readexactly(self._stream, 4)
180 l = struct.unpack(">l", d)[0]
180 l = struct.unpack(">l", d)[0]
181 if l <= 4:
181 if l <= 4:
182 if l:
182 if l:
183 raise util.Abort(_("invalid chunk length %d") % l)
183 raise util.Abort(_("invalid chunk length %d") % l)
184 return 0
184 return 0
185 if self.callback:
185 if self.callback:
186 self.callback()
186 self.callback()
187 return l - 4
187 return l - 4
188
188
189 def changelogheader(self):
189 def changelogheader(self):
190 """v10 does not have a changelog header chunk"""
190 """v10 does not have a changelog header chunk"""
191 return {}
191 return {}
192
192
193 def manifestheader(self):
193 def manifestheader(self):
194 """v10 does not have a manifest header chunk"""
194 """v10 does not have a manifest header chunk"""
195 return {}
195 return {}
196
196
197 def filelogheader(self):
197 def filelogheader(self):
198 """return the header of the filelogs chunk, v10 only has the filename"""
198 """return the header of the filelogs chunk, v10 only has the filename"""
199 l = self.chunklength()
199 l = self.chunklength()
200 if not l:
200 if not l:
201 return {}
201 return {}
202 fname = readexactly(self._stream, l)
202 fname = readexactly(self._stream, l)
203 return {'filename': fname}
203 return {'filename': fname}
204
204
205 def _deltaheader(self, headertuple, prevnode):
205 def _deltaheader(self, headertuple, prevnode):
206 node, p1, p2, cs = headertuple
206 node, p1, p2, cs = headertuple
207 if prevnode is None:
207 if prevnode is None:
208 deltabase = p1
208 deltabase = p1
209 else:
209 else:
210 deltabase = prevnode
210 deltabase = prevnode
211 return node, p1, p2, deltabase, cs
211 return node, p1, p2, deltabase, cs
212
212
213 def deltachunk(self, prevnode):
213 def deltachunk(self, prevnode):
214 l = self.chunklength()
214 l = self.chunklength()
215 if not l:
215 if not l:
216 return {}
216 return {}
217 headerdata = readexactly(self._stream, self.deltaheadersize)
217 headerdata = readexactly(self._stream, self.deltaheadersize)
218 header = struct.unpack(self.deltaheader, headerdata)
218 header = struct.unpack(self.deltaheader, headerdata)
219 delta = readexactly(self._stream, l - self.deltaheadersize)
219 delta = readexactly(self._stream, l - self.deltaheadersize)
220 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
220 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
221 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
221 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
222 'deltabase': deltabase, 'delta': delta}
222 'deltabase': deltabase, 'delta': delta}
223
223
224 def getchunks(self):
224 def getchunks(self):
225 """returns all the chunks contains in the bundle
225 """returns all the chunks contains in the bundle
226
226
227 Used when you need to forward the binary stream to a file or another
227 Used when you need to forward the binary stream to a file or another
228 network API. To do so, it parse the changegroup data, otherwise it will
228 network API. To do so, it parse the changegroup data, otherwise it will
229 block in case of sshrepo because it don't know the end of the stream.
229 block in case of sshrepo because it don't know the end of the stream.
230 """
230 """
231 # an empty chunkgroup is the end of the changegroup
231 # an empty chunkgroup is the end of the changegroup
232 # a changegroup has at least 2 chunkgroups (changelog and manifest).
232 # a changegroup has at least 2 chunkgroups (changelog and manifest).
233 # after that, an empty chunkgroup is the end of the changegroup
233 # after that, an empty chunkgroup is the end of the changegroup
234 empty = False
234 empty = False
235 count = 0
235 count = 0
236 while not empty or count <= 2:
236 while not empty or count <= 2:
237 empty = True
237 empty = True
238 count += 1
238 count += 1
239 while True:
239 while True:
240 chunk = getchunk(self)
240 chunk = getchunk(self)
241 if not chunk:
241 if not chunk:
242 break
242 break
243 empty = False
243 empty = False
244 yield chunkheader(len(chunk))
244 yield chunkheader(len(chunk))
245 pos = 0
245 pos = 0
246 while pos < len(chunk):
246 while pos < len(chunk):
247 next = pos + 2**20
247 next = pos + 2**20
248 yield chunk[pos:next]
248 yield chunk[pos:next]
249 pos = next
249 pos = next
250 yield closechunk()
250 yield closechunk()
251
251
252 class cg2unpacker(cg1unpacker):
252 class cg2unpacker(cg1unpacker):
253 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
253 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
254 deltaheadersize = struct.calcsize(deltaheader)
254 deltaheadersize = struct.calcsize(deltaheader)
255 version = '02'
255 version = '02'
256
256
257 def _deltaheader(self, headertuple, prevnode):
257 def _deltaheader(self, headertuple, prevnode):
258 node, p1, p2, deltabase, cs = headertuple
258 node, p1, p2, deltabase, cs = headertuple
259 return node, p1, p2, deltabase, cs
259 return node, p1, p2, deltabase, cs
260
260
261 class headerlessfixup(object):
261 class headerlessfixup(object):
262 def __init__(self, fh, h):
262 def __init__(self, fh, h):
263 self._h = h
263 self._h = h
264 self._fh = fh
264 self._fh = fh
265 def read(self, n):
265 def read(self, n):
266 if self._h:
266 if self._h:
267 d, self._h = self._h[:n], self._h[n:]
267 d, self._h = self._h[:n], self._h[n:]
268 if len(d) < n:
268 if len(d) < n:
269 d += readexactly(self._fh, n - len(d))
269 d += readexactly(self._fh, n - len(d))
270 return d
270 return d
271 return readexactly(self._fh, n)
271 return readexactly(self._fh, n)
272
272
273 class cg1packer(object):
273 class cg1packer(object):
274 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
274 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
275 version = '01'
275 version = '01'
276 def __init__(self, repo, bundlecaps=None):
276 def __init__(self, repo, bundlecaps=None):
277 """Given a source repo, construct a bundler.
277 """Given a source repo, construct a bundler.
278
278
279 bundlecaps is optional and can be used to specify the set of
279 bundlecaps is optional and can be used to specify the set of
280 capabilities which can be used to build the bundle.
280 capabilities which can be used to build the bundle.
281 """
281 """
282 # Set of capabilities we can use to build the bundle.
282 # Set of capabilities we can use to build the bundle.
283 if bundlecaps is None:
283 if bundlecaps is None:
284 bundlecaps = set()
284 bundlecaps = set()
285 self._bundlecaps = bundlecaps
285 self._bundlecaps = bundlecaps
286 self._changelog = repo.changelog
286 self._changelog = repo.changelog
287 self._manifest = repo.manifest
287 self._manifest = repo.manifest
288 reorder = repo.ui.config('bundle', 'reorder', 'auto')
288 reorder = repo.ui.config('bundle', 'reorder', 'auto')
289 if reorder == 'auto':
289 if reorder == 'auto':
290 reorder = None
290 reorder = None
291 else:
291 else:
292 reorder = util.parsebool(reorder)
292 reorder = util.parsebool(reorder)
293 self._repo = repo
293 self._repo = repo
294 self._reorder = reorder
294 self._reorder = reorder
295 self._progress = repo.ui.progress
295 self._progress = repo.ui.progress
296 if self._repo.ui.verbose and not self._repo.ui.debugflag:
296 if self._repo.ui.verbose and not self._repo.ui.debugflag:
297 self._verbosenote = self._repo.ui.note
297 self._verbosenote = self._repo.ui.note
298 else:
298 else:
299 self._verbosenote = lambda s: None
299 self._verbosenote = lambda s: None
300
300
301 def close(self):
301 def close(self):
302 return closechunk()
302 return closechunk()
303
303
304 def fileheader(self, fname):
304 def fileheader(self, fname):
305 return chunkheader(len(fname)) + fname
305 return chunkheader(len(fname)) + fname
306
306
307 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
307 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
308 """Calculate a delta group, yielding a sequence of changegroup chunks
308 """Calculate a delta group, yielding a sequence of changegroup chunks
309 (strings).
309 (strings).
310
310
311 Given a list of changeset revs, return a set of deltas and
311 Given a list of changeset revs, return a set of deltas and
312 metadata corresponding to nodes. The first delta is
312 metadata corresponding to nodes. The first delta is
313 first parent(nodelist[0]) -> nodelist[0], the receiver is
313 first parent(nodelist[0]) -> nodelist[0], the receiver is
314 guaranteed to have this parent as it has all history before
314 guaranteed to have this parent as it has all history before
315 these changesets. In the case firstparent is nullrev the
315 these changesets. In the case firstparent is nullrev the
316 changegroup starts with a full revision.
316 changegroup starts with a full revision.
317
317
318 If units is not None, progress detail will be generated, units specifies
318 If units is not None, progress detail will be generated, units specifies
319 the type of revlog that is touched (changelog, manifest, etc.).
319 the type of revlog that is touched (changelog, manifest, etc.).
320 """
320 """
321 # if we don't have any revisions touched by these changesets, bail
321 # if we don't have any revisions touched by these changesets, bail
322 if len(nodelist) == 0:
322 if len(nodelist) == 0:
323 yield self.close()
323 yield self.close()
324 return
324 return
325
325
326 # for generaldelta revlogs, we linearize the revs; this will both be
326 # for generaldelta revlogs, we linearize the revs; this will both be
327 # much quicker and generate a much smaller bundle
327 # much quicker and generate a much smaller bundle
328 if (revlog._generaldelta and reorder is not False) or reorder:
328 if (revlog._generaldelta and reorder is not False) or reorder:
329 dag = dagutil.revlogdag(revlog)
329 dag = dagutil.revlogdag(revlog)
330 revs = set(revlog.rev(n) for n in nodelist)
330 revs = set(revlog.rev(n) for n in nodelist)
331 revs = dag.linearize(revs)
331 revs = dag.linearize(revs)
332 else:
332 else:
333 revs = sorted([revlog.rev(n) for n in nodelist])
333 revs = sorted([revlog.rev(n) for n in nodelist])
334
334
335 # add the parent of the first rev
335 # add the parent of the first rev
336 p = revlog.parentrevs(revs[0])[0]
336 p = revlog.parentrevs(revs[0])[0]
337 revs.insert(0, p)
337 revs.insert(0, p)
338
338
339 # build deltas
339 # build deltas
340 total = len(revs) - 1
340 total = len(revs) - 1
341 msgbundling = _('bundling')
341 msgbundling = _('bundling')
342 for r in xrange(len(revs) - 1):
342 for r in xrange(len(revs) - 1):
343 if units is not None:
343 if units is not None:
344 self._progress(msgbundling, r + 1, unit=units, total=total)
344 self._progress(msgbundling, r + 1, unit=units, total=total)
345 prev, curr = revs[r], revs[r + 1]
345 prev, curr = revs[r], revs[r + 1]
346 linknode = lookup(revlog.node(curr))
346 linknode = lookup(revlog.node(curr))
347 for c in self.revchunk(revlog, curr, prev, linknode):
347 for c in self.revchunk(revlog, curr, prev, linknode):
348 yield c
348 yield c
349
349
350 yield self.close()
350 yield self.close()
351
351
352 # filter any nodes that claim to be part of the known set
352 # filter any nodes that claim to be part of the known set
353 def prune(self, revlog, missing, commonrevs):
353 def prune(self, revlog, missing, commonrevs):
354 rr, rl = revlog.rev, revlog.linkrev
354 rr, rl = revlog.rev, revlog.linkrev
355 return [n for n in missing if rl(rr(n)) not in commonrevs]
355 return [n for n in missing if rl(rr(n)) not in commonrevs]
356
356
357 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
357 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
358 '''yield a sequence of changegroup chunks (strings)'''
358 '''yield a sequence of changegroup chunks (strings)'''
359 repo = self._repo
359 repo = self._repo
360 cl = self._changelog
360 cl = self._changelog
361 mf = self._manifest
361 mf = self._manifest
362 reorder = self._reorder
362 reorder = self._reorder
363 progress = self._progress
363 progress = self._progress
364
364
365 # for progress output
365 # for progress output
366 msgbundling = _('bundling')
366 msgbundling = _('bundling')
367
367
368 clrevorder = {}
368 clrevorder = {}
369 mfs = {} # needed manifests
369 mfs = {} # needed manifests
370 fnodes = {} # needed file nodes
370 fnodes = {} # needed file nodes
371 changedfiles = set()
371 changedfiles = set()
372
372
373 # Callback for the changelog, used to collect changed files and manifest
373 # Callback for the changelog, used to collect changed files and manifest
374 # nodes.
374 # nodes.
375 # Returns the linkrev node (identity in the changelog case).
375 # Returns the linkrev node (identity in the changelog case).
376 def lookupcl(x):
376 def lookupcl(x):
377 c = cl.read(x)
377 c = cl.read(x)
378 clrevorder[x] = len(clrevorder)
378 clrevorder[x] = len(clrevorder)
379 changedfiles.update(c[3])
379 changedfiles.update(c[3])
380 # record the first changeset introducing this manifest version
380 # record the first changeset introducing this manifest version
381 mfs.setdefault(c[0], x)
381 mfs.setdefault(c[0], x)
382 return x
382 return x
383
383
384 self._verbosenote(_('uncompressed size of bundle content:\n'))
384 self._verbosenote(_('uncompressed size of bundle content:\n'))
385 size = 0
385 size = 0
386 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
386 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
387 reorder=reorder):
387 reorder=reorder):
388 size += len(chunk)
388 size += len(chunk)
389 yield chunk
389 yield chunk
390 self._verbosenote(_('%8.i (changelog)\n') % size)
390 self._verbosenote(_('%8.i (changelog)\n') % size)
391 progress(msgbundling, None)
391 progress(msgbundling, None)
392
392
393 # Callback for the manifest, used to collect linkrevs for filelog
393 # Callback for the manifest, used to collect linkrevs for filelog
394 # revisions.
394 # revisions.
395 # Returns the linkrev node (collected in lookupcl).
395 # Returns the linkrev node (collected in lookupcl).
396 def lookupmf(x):
396 def lookupmf(x):
397 clnode = mfs[x]
397 clnode = mfs[x]
398 if not fastpathlinkrev or reorder:
398 if not fastpathlinkrev or reorder:
399 mdata = mf.readfast(x)
399 mdata = mf.readfast(x)
400 for f, n in mdata.iteritems():
400 for f, n in mdata.iteritems():
401 if f in changedfiles:
401 if f in changedfiles:
402 # record the first changeset introducing this filelog
402 # record the first changeset introducing this filelog
403 # version
403 # version
404 fclnodes = fnodes.setdefault(f, {})
404 fclnodes = fnodes.setdefault(f, {})
405 fclnode = fclnodes.setdefault(n, clnode)
405 fclnode = fclnodes.setdefault(n, clnode)
406 if clrevorder[clnode] < clrevorder[fclnode]:
406 if clrevorder[clnode] < clrevorder[fclnode]:
407 fclnodes[n] = clnode
407 fclnodes[n] = clnode
408 return clnode
408 return clnode
409
409
410 mfnodes = self.prune(mf, mfs, commonrevs)
410 mfnodes = self.prune(mf, mfs, commonrevs)
411 size = 0
411 size = 0
412 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
412 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
413 reorder=reorder):
413 reorder=reorder):
414 size += len(chunk)
414 size += len(chunk)
415 yield chunk
415 yield chunk
416 self._verbosenote(_('%8.i (manifests)\n') % size)
416 self._verbosenote(_('%8.i (manifests)\n') % size)
417 progress(msgbundling, None)
417 progress(msgbundling, None)
418
418
419 mfs.clear()
419 mfs.clear()
420 needed = set(cl.rev(x) for x in clnodes)
420 needed = set(cl.rev(x) for x in clnodes)
421
421
422 def linknodes(filerevlog, fname):
422 def linknodes(filerevlog, fname):
423 if fastpathlinkrev and not reorder:
423 if fastpathlinkrev and not reorder:
424 llr = filerevlog.linkrev
424 llr = filerevlog.linkrev
425 def genfilenodes():
425 def genfilenodes():
426 for r in filerevlog:
426 for r in filerevlog:
427 linkrev = llr(r)
427 linkrev = llr(r)
428 if linkrev in needed:
428 if linkrev in needed:
429 yield filerevlog.node(r), cl.node(linkrev)
429 yield filerevlog.node(r), cl.node(linkrev)
430 return dict(genfilenodes())
430 return dict(genfilenodes())
431 return fnodes.get(fname, {})
431 return fnodes.get(fname, {})
432
432
433 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
433 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
434 source):
434 source):
435 yield chunk
435 yield chunk
436
436
437 yield self.close()
437 yield self.close()
438 progress(msgbundling, None)
438 progress(msgbundling, None)
439
439
440 if clnodes:
440 if clnodes:
441 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
441 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
442
442
443 # The 'source' parameter is useful for extensions
443 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
444 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
444 repo = self._repo
445 repo = self._repo
445 progress = self._progress
446 progress = self._progress
446 reorder = self._reorder
447 reorder = self._reorder
447 msgbundling = _('bundling')
448 msgbundling = _('bundling')
448
449
449 total = len(changedfiles)
450 total = len(changedfiles)
450 # for progress output
451 # for progress output
451 msgfiles = _('files')
452 msgfiles = _('files')
452 for i, fname in enumerate(sorted(changedfiles)):
453 for i, fname in enumerate(sorted(changedfiles)):
453 filerevlog = repo.file(fname)
454 filerevlog = repo.file(fname)
454 if not filerevlog:
455 if not filerevlog:
455 raise util.Abort(_("empty or missing revlog for %s") % fname)
456 raise util.Abort(_("empty or missing revlog for %s") % fname)
456
457
457 linkrevnodes = linknodes(filerevlog, fname)
458 linkrevnodes = linknodes(filerevlog, fname)
458 # Lookup for filenodes, we collected the linkrev nodes above in the
459 # Lookup for filenodes, we collected the linkrev nodes above in the
459 # fastpath case and with lookupmf in the slowpath case.
460 # fastpath case and with lookupmf in the slowpath case.
460 def lookupfilelog(x):
461 def lookupfilelog(x):
461 return linkrevnodes[x]
462 return linkrevnodes[x]
462
463
463 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
464 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
464 if filenodes:
465 if filenodes:
465 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
466 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
466 total=total)
467 total=total)
467 h = self.fileheader(fname)
468 h = self.fileheader(fname)
468 size = len(h)
469 size = len(h)
469 yield h
470 yield h
470 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
471 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
471 reorder=reorder):
472 reorder=reorder):
472 size += len(chunk)
473 size += len(chunk)
473 yield chunk
474 yield chunk
474 self._verbosenote(_('%8.i %s\n') % (size, fname))
475 self._verbosenote(_('%8.i %s\n') % (size, fname))
475
476
476 def deltaparent(self, revlog, rev, p1, p2, prev):
477 def deltaparent(self, revlog, rev, p1, p2, prev):
477 return prev
478 return prev
478
479
479 def revchunk(self, revlog, rev, prev, linknode):
480 def revchunk(self, revlog, rev, prev, linknode):
480 node = revlog.node(rev)
481 node = revlog.node(rev)
481 p1, p2 = revlog.parentrevs(rev)
482 p1, p2 = revlog.parentrevs(rev)
482 base = self.deltaparent(revlog, rev, p1, p2, prev)
483 base = self.deltaparent(revlog, rev, p1, p2, prev)
483
484
484 prefix = ''
485 prefix = ''
485 if revlog.iscensored(base) or revlog.iscensored(rev):
486 if revlog.iscensored(base) or revlog.iscensored(rev):
486 try:
487 try:
487 delta = revlog.revision(node)
488 delta = revlog.revision(node)
488 except error.CensoredNodeError, e:
489 except error.CensoredNodeError, e:
489 delta = e.tombstone
490 delta = e.tombstone
490 if base == nullrev:
491 if base == nullrev:
491 prefix = mdiff.trivialdiffheader(len(delta))
492 prefix = mdiff.trivialdiffheader(len(delta))
492 else:
493 else:
493 baselen = revlog.rawsize(base)
494 baselen = revlog.rawsize(base)
494 prefix = mdiff.replacediffheader(baselen, len(delta))
495 prefix = mdiff.replacediffheader(baselen, len(delta))
495 elif base == nullrev:
496 elif base == nullrev:
496 delta = revlog.revision(node)
497 delta = revlog.revision(node)
497 prefix = mdiff.trivialdiffheader(len(delta))
498 prefix = mdiff.trivialdiffheader(len(delta))
498 else:
499 else:
499 delta = revlog.revdiff(base, rev)
500 delta = revlog.revdiff(base, rev)
500 p1n, p2n = revlog.parents(node)
501 p1n, p2n = revlog.parents(node)
501 basenode = revlog.node(base)
502 basenode = revlog.node(base)
502 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
503 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
503 meta += prefix
504 meta += prefix
504 l = len(meta) + len(delta)
505 l = len(meta) + len(delta)
505 yield chunkheader(l)
506 yield chunkheader(l)
506 yield meta
507 yield meta
507 yield delta
508 yield delta
508 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
509 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
509 # do nothing with basenode, it is implicitly the previous one in HG10
510 # do nothing with basenode, it is implicitly the previous one in HG10
510 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
511 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
511
512
512 class cg2packer(cg1packer):
513 class cg2packer(cg1packer):
513 version = '02'
514 version = '02'
514 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
515 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
515
516
516 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
517 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
517 if (revlog._generaldelta and reorder is not True):
518 if (revlog._generaldelta and reorder is not True):
518 reorder = False
519 reorder = False
519 return super(cg2packer, self).group(nodelist, revlog, lookup,
520 return super(cg2packer, self).group(nodelist, revlog, lookup,
520 units=units, reorder=reorder)
521 units=units, reorder=reorder)
521
522
522 def deltaparent(self, revlog, rev, p1, p2, prev):
523 def deltaparent(self, revlog, rev, p1, p2, prev):
523 dp = revlog.deltaparent(rev)
524 dp = revlog.deltaparent(rev)
524 # avoid storing full revisions; pick prev in those cases
525 # avoid storing full revisions; pick prev in those cases
525 # also pick prev when we can't be sure remote has dp
526 # also pick prev when we can't be sure remote has dp
526 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
527 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
527 return prev
528 return prev
528 return dp
529 return dp
529
530
530 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
531 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
531 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
532 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
532
533
533 packermap = {'01': (cg1packer, cg1unpacker),
534 packermap = {'01': (cg1packer, cg1unpacker),
534 '02': (cg2packer, cg2unpacker)}
535 '02': (cg2packer, cg2unpacker)}
535
536
536 def _changegroupinfo(repo, nodes, source):
537 def _changegroupinfo(repo, nodes, source):
537 if repo.ui.verbose or source == 'bundle':
538 if repo.ui.verbose or source == 'bundle':
538 repo.ui.status(_("%d changesets found\n") % len(nodes))
539 repo.ui.status(_("%d changesets found\n") % len(nodes))
539 if repo.ui.debugflag:
540 if repo.ui.debugflag:
540 repo.ui.debug("list of changesets:\n")
541 repo.ui.debug("list of changesets:\n")
541 for node in nodes:
542 for node in nodes:
542 repo.ui.debug("%s\n" % hex(node))
543 repo.ui.debug("%s\n" % hex(node))
543
544
544 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
545 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
545 repo = repo.unfiltered()
546 repo = repo.unfiltered()
546 commonrevs = outgoing.common
547 commonrevs = outgoing.common
547 csets = outgoing.missing
548 csets = outgoing.missing
548 heads = outgoing.missingheads
549 heads = outgoing.missingheads
549 # We go through the fast path if we get told to, or if all (unfiltered
550 # We go through the fast path if we get told to, or if all (unfiltered
550 # heads have been requested (since we then know there all linkrevs will
551 # heads have been requested (since we then know there all linkrevs will
551 # be pulled by the client).
552 # be pulled by the client).
552 heads.sort()
553 heads.sort()
553 fastpathlinkrev = fastpath or (
554 fastpathlinkrev = fastpath or (
554 repo.filtername is None and heads == sorted(repo.heads()))
555 repo.filtername is None and heads == sorted(repo.heads()))
555
556
556 repo.hook('preoutgoing', throw=True, source=source)
557 repo.hook('preoutgoing', throw=True, source=source)
557 _changegroupinfo(repo, csets, source)
558 _changegroupinfo(repo, csets, source)
558 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
559 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
559
560
560 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
561 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
561 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
562 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
562 return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
563 return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
563
564
564 def changegroupsubset(repo, roots, heads, source, version='01'):
565 def changegroupsubset(repo, roots, heads, source, version='01'):
565 """Compute a changegroup consisting of all the nodes that are
566 """Compute a changegroup consisting of all the nodes that are
566 descendants of any of the roots and ancestors of any of the heads.
567 descendants of any of the roots and ancestors of any of the heads.
567 Return a chunkbuffer object whose read() method will return
568 Return a chunkbuffer object whose read() method will return
568 successive changegroup chunks.
569 successive changegroup chunks.
569
570
570 It is fairly complex as determining which filenodes and which
571 It is fairly complex as determining which filenodes and which
571 manifest nodes need to be included for the changeset to be complete
572 manifest nodes need to be included for the changeset to be complete
572 is non-trivial.
573 is non-trivial.
573
574
574 Another wrinkle is doing the reverse, figuring out which changeset in
575 Another wrinkle is doing the reverse, figuring out which changeset in
575 the changegroup a particular filenode or manifestnode belongs to.
576 the changegroup a particular filenode or manifestnode belongs to.
576 """
577 """
577 cl = repo.changelog
578 cl = repo.changelog
578 if not roots:
579 if not roots:
579 roots = [nullid]
580 roots = [nullid]
580 # TODO: remove call to nodesbetween.
581 # TODO: remove call to nodesbetween.
581 csets, roots, heads = cl.nodesbetween(roots, heads)
582 csets, roots, heads = cl.nodesbetween(roots, heads)
582 discbases = []
583 discbases = []
583 for n in roots:
584 for n in roots:
584 discbases.extend([p for p in cl.parents(n) if p != nullid])
585 discbases.extend([p for p in cl.parents(n) if p != nullid])
585 outgoing = discovery.outgoing(cl, discbases, heads)
586 outgoing = discovery.outgoing(cl, discbases, heads)
586 bundler = packermap[version][0](repo)
587 bundler = packermap[version][0](repo)
587 return getsubset(repo, outgoing, bundler, source, version=version)
588 return getsubset(repo, outgoing, bundler, source, version=version)
588
589
589 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
590 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
590 version='01'):
591 version='01'):
591 """Like getbundle, but taking a discovery.outgoing as an argument.
592 """Like getbundle, but taking a discovery.outgoing as an argument.
592
593
593 This is only implemented for local repos and reuses potentially
594 This is only implemented for local repos and reuses potentially
594 precomputed sets in outgoing. Returns a raw changegroup generator."""
595 precomputed sets in outgoing. Returns a raw changegroup generator."""
595 if not outgoing.missing:
596 if not outgoing.missing:
596 return None
597 return None
597 bundler = packermap[version][0](repo, bundlecaps)
598 bundler = packermap[version][0](repo, bundlecaps)
598 return getsubsetraw(repo, outgoing, bundler, source)
599 return getsubsetraw(repo, outgoing, bundler, source)
599
600
600 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
601 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
601 """Like getbundle, but taking a discovery.outgoing as an argument.
602 """Like getbundle, but taking a discovery.outgoing as an argument.
602
603
603 This is only implemented for local repos and reuses potentially
604 This is only implemented for local repos and reuses potentially
604 precomputed sets in outgoing."""
605 precomputed sets in outgoing."""
605 if not outgoing.missing:
606 if not outgoing.missing:
606 return None
607 return None
607 bundler = cg1packer(repo, bundlecaps)
608 bundler = cg1packer(repo, bundlecaps)
608 return getsubset(repo, outgoing, bundler, source)
609 return getsubset(repo, outgoing, bundler, source)
609
610
610 def _computeoutgoing(repo, heads, common):
611 def _computeoutgoing(repo, heads, common):
611 """Computes which revs are outgoing given a set of common
612 """Computes which revs are outgoing given a set of common
612 and a set of heads.
613 and a set of heads.
613
614
614 This is a separate function so extensions can have access to
615 This is a separate function so extensions can have access to
615 the logic.
616 the logic.
616
617
617 Returns a discovery.outgoing object.
618 Returns a discovery.outgoing object.
618 """
619 """
619 cl = repo.changelog
620 cl = repo.changelog
620 if common:
621 if common:
621 hasnode = cl.hasnode
622 hasnode = cl.hasnode
622 common = [n for n in common if hasnode(n)]
623 common = [n for n in common if hasnode(n)]
623 else:
624 else:
624 common = [nullid]
625 common = [nullid]
625 if not heads:
626 if not heads:
626 heads = cl.heads()
627 heads = cl.heads()
627 return discovery.outgoing(cl, common, heads)
628 return discovery.outgoing(cl, common, heads)
628
629
629 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
630 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
630 version='01'):
631 version='01'):
631 """Like changegroupsubset, but returns the set difference between the
632 """Like changegroupsubset, but returns the set difference between the
632 ancestors of heads and the ancestors common.
633 ancestors of heads and the ancestors common.
633
634
634 If heads is None, use the local heads. If common is None, use [nullid].
635 If heads is None, use the local heads. If common is None, use [nullid].
635
636
636 If version is None, use a version '1' changegroup.
637 If version is None, use a version '1' changegroup.
637
638
638 The nodes in common might not all be known locally due to the way the
639 The nodes in common might not all be known locally due to the way the
639 current discovery protocol works. Returns a raw changegroup generator.
640 current discovery protocol works. Returns a raw changegroup generator.
640 """
641 """
641 outgoing = _computeoutgoing(repo, heads, common)
642 outgoing = _computeoutgoing(repo, heads, common)
642 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
643 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
643 version=version)
644 version=version)
644
645
645 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
646 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
646 """Like changegroupsubset, but returns the set difference between the
647 """Like changegroupsubset, but returns the set difference between the
647 ancestors of heads and the ancestors common.
648 ancestors of heads and the ancestors common.
648
649
649 If heads is None, use the local heads. If common is None, use [nullid].
650 If heads is None, use the local heads. If common is None, use [nullid].
650
651
651 The nodes in common might not all be known locally due to the way the
652 The nodes in common might not all be known locally due to the way the
652 current discovery protocol works.
653 current discovery protocol works.
653 """
654 """
654 outgoing = _computeoutgoing(repo, heads, common)
655 outgoing = _computeoutgoing(repo, heads, common)
655 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
656 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
656
657
657 def changegroup(repo, basenodes, source):
658 def changegroup(repo, basenodes, source):
658 # to avoid a race we use changegroupsubset() (issue1320)
659 # to avoid a race we use changegroupsubset() (issue1320)
659 return changegroupsubset(repo, basenodes, repo.heads(), source)
660 return changegroupsubset(repo, basenodes, repo.heads(), source)
660
661
661 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
662 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
662 revisions = 0
663 revisions = 0
663 files = 0
664 files = 0
664 while True:
665 while True:
665 chunkdata = source.filelogheader()
666 chunkdata = source.filelogheader()
666 if not chunkdata:
667 if not chunkdata:
667 break
668 break
668 f = chunkdata["filename"]
669 f = chunkdata["filename"]
669 repo.ui.debug("adding %s revisions\n" % f)
670 repo.ui.debug("adding %s revisions\n" % f)
670 pr()
671 pr()
671 fl = repo.file(f)
672 fl = repo.file(f)
672 o = len(fl)
673 o = len(fl)
673 try:
674 try:
674 if not fl.addgroup(source, revmap, trp):
675 if not fl.addgroup(source, revmap, trp):
675 raise util.Abort(_("received file revlog group is empty"))
676 raise util.Abort(_("received file revlog group is empty"))
676 except error.CensoredBaseError, e:
677 except error.CensoredBaseError, e:
677 raise util.Abort(_("received delta base is censored: %s") % e)
678 raise util.Abort(_("received delta base is censored: %s") % e)
678 revisions += len(fl) - o
679 revisions += len(fl) - o
679 files += 1
680 files += 1
680 if f in needfiles:
681 if f in needfiles:
681 needs = needfiles[f]
682 needs = needfiles[f]
682 for new in xrange(o, len(fl)):
683 for new in xrange(o, len(fl)):
683 n = fl.node(new)
684 n = fl.node(new)
684 if n in needs:
685 if n in needs:
685 needs.remove(n)
686 needs.remove(n)
686 else:
687 else:
687 raise util.Abort(
688 raise util.Abort(
688 _("received spurious file revlog entry"))
689 _("received spurious file revlog entry"))
689 if not needs:
690 if not needs:
690 del needfiles[f]
691 del needfiles[f]
691 repo.ui.progress(_('files'), None)
692 repo.ui.progress(_('files'), None)
692
693
693 for f, needs in needfiles.iteritems():
694 for f, needs in needfiles.iteritems():
694 fl = repo.file(f)
695 fl = repo.file(f)
695 for n in needs:
696 for n in needs:
696 try:
697 try:
697 fl.rev(n)
698 fl.rev(n)
698 except error.LookupError:
699 except error.LookupError:
699 raise util.Abort(
700 raise util.Abort(
700 _('missing file data for %s:%s - run hg verify') %
701 _('missing file data for %s:%s - run hg verify') %
701 (f, hex(n)))
702 (f, hex(n)))
702
703
703 return revisions, files
704 return revisions, files
704
705
705 def addchangegroup(repo, source, srctype, url, emptyok=False,
706 def addchangegroup(repo, source, srctype, url, emptyok=False,
706 targetphase=phases.draft):
707 targetphase=phases.draft):
707 """Add the changegroup returned by source.read() to this repo.
708 """Add the changegroup returned by source.read() to this repo.
708 srctype is a string like 'push', 'pull', or 'unbundle'. url is
709 srctype is a string like 'push', 'pull', or 'unbundle'. url is
709 the URL of the repo where this changegroup is coming from.
710 the URL of the repo where this changegroup is coming from.
710
711
711 Return an integer summarizing the change to this repo:
712 Return an integer summarizing the change to this repo:
712 - nothing changed or no source: 0
713 - nothing changed or no source: 0
713 - more heads than before: 1+added heads (2..n)
714 - more heads than before: 1+added heads (2..n)
714 - fewer heads than before: -1-removed heads (-2..-n)
715 - fewer heads than before: -1-removed heads (-2..-n)
715 - number of heads stays the same: 1
716 - number of heads stays the same: 1
716 """
717 """
717 repo = repo.unfiltered()
718 repo = repo.unfiltered()
718 def csmap(x):
719 def csmap(x):
719 repo.ui.debug("add changeset %s\n" % short(x))
720 repo.ui.debug("add changeset %s\n" % short(x))
720 return len(cl)
721 return len(cl)
721
722
722 def revmap(x):
723 def revmap(x):
723 return cl.rev(x)
724 return cl.rev(x)
724
725
725 if not source:
726 if not source:
726 return 0
727 return 0
727
728
728 changesets = files = revisions = 0
729 changesets = files = revisions = 0
729 efiles = set()
730 efiles = set()
730
731
731 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
732 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
732 # The transaction could have been created before and already carries source
733 # The transaction could have been created before and already carries source
733 # information. In this case we use the top level data. We overwrite the
734 # information. In this case we use the top level data. We overwrite the
734 # argument because we need to use the top level value (if they exist) in
735 # argument because we need to use the top level value (if they exist) in
735 # this function.
736 # this function.
736 srctype = tr.hookargs.setdefault('source', srctype)
737 srctype = tr.hookargs.setdefault('source', srctype)
737 url = tr.hookargs.setdefault('url', url)
738 url = tr.hookargs.setdefault('url', url)
738
739
739 # write changelog data to temp files so concurrent readers will not see
740 # write changelog data to temp files so concurrent readers will not see
740 # inconsistent view
741 # inconsistent view
741 cl = repo.changelog
742 cl = repo.changelog
742 cl.delayupdate(tr)
743 cl.delayupdate(tr)
743 oldheads = cl.heads()
744 oldheads = cl.heads()
744 try:
745 try:
745 repo.hook('prechangegroup', throw=True, **tr.hookargs)
746 repo.hook('prechangegroup', throw=True, **tr.hookargs)
746
747
747 trp = weakref.proxy(tr)
748 trp = weakref.proxy(tr)
748 # pull off the changeset group
749 # pull off the changeset group
749 repo.ui.status(_("adding changesets\n"))
750 repo.ui.status(_("adding changesets\n"))
750 clstart = len(cl)
751 clstart = len(cl)
751 class prog(object):
752 class prog(object):
752 step = _('changesets')
753 step = _('changesets')
753 count = 1
754 count = 1
754 ui = repo.ui
755 ui = repo.ui
755 total = None
756 total = None
756 def __call__(repo):
757 def __call__(repo):
757 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
758 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
758 total=repo.total)
759 total=repo.total)
759 repo.count += 1
760 repo.count += 1
760 pr = prog()
761 pr = prog()
761 source.callback = pr
762 source.callback = pr
762
763
763 source.changelogheader()
764 source.changelogheader()
764 srccontent = cl.addgroup(source, csmap, trp)
765 srccontent = cl.addgroup(source, csmap, trp)
765 if not (srccontent or emptyok):
766 if not (srccontent or emptyok):
766 raise util.Abort(_("received changelog group is empty"))
767 raise util.Abort(_("received changelog group is empty"))
767 clend = len(cl)
768 clend = len(cl)
768 changesets = clend - clstart
769 changesets = clend - clstart
769 for c in xrange(clstart, clend):
770 for c in xrange(clstart, clend):
770 efiles.update(repo[c].files())
771 efiles.update(repo[c].files())
771 efiles = len(efiles)
772 efiles = len(efiles)
772 repo.ui.progress(_('changesets'), None)
773 repo.ui.progress(_('changesets'), None)
773
774
774 # pull off the manifest group
775 # pull off the manifest group
775 repo.ui.status(_("adding manifests\n"))
776 repo.ui.status(_("adding manifests\n"))
776 pr.step = _('manifests')
777 pr.step = _('manifests')
777 pr.count = 1
778 pr.count = 1
778 pr.total = changesets # manifests <= changesets
779 pr.total = changesets # manifests <= changesets
779 # no need to check for empty manifest group here:
780 # no need to check for empty manifest group here:
780 # if the result of the merge of 1 and 2 is the same in 3 and 4,
781 # if the result of the merge of 1 and 2 is the same in 3 and 4,
781 # no new manifest will be created and the manifest group will
782 # no new manifest will be created and the manifest group will
782 # be empty during the pull
783 # be empty during the pull
783 source.manifestheader()
784 source.manifestheader()
784 repo.manifest.addgroup(source, revmap, trp)
785 repo.manifest.addgroup(source, revmap, trp)
785 repo.ui.progress(_('manifests'), None)
786 repo.ui.progress(_('manifests'), None)
786
787
787 needfiles = {}
788 needfiles = {}
788 if repo.ui.configbool('server', 'validate', default=False):
789 if repo.ui.configbool('server', 'validate', default=False):
789 # validate incoming csets have their manifests
790 # validate incoming csets have their manifests
790 for cset in xrange(clstart, clend):
791 for cset in xrange(clstart, clend):
791 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
792 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
792 mfest = repo.manifest.readdelta(mfest)
793 mfest = repo.manifest.readdelta(mfest)
793 # store file nodes we must see
794 # store file nodes we must see
794 for f, n in mfest.iteritems():
795 for f, n in mfest.iteritems():
795 needfiles.setdefault(f, set()).add(n)
796 needfiles.setdefault(f, set()).add(n)
796
797
797 # process the files
798 # process the files
798 repo.ui.status(_("adding file changes\n"))
799 repo.ui.status(_("adding file changes\n"))
799 pr.step = _('files')
800 pr.step = _('files')
800 pr.count = 1
801 pr.count = 1
801 pr.total = efiles
802 pr.total = efiles
802 source.callback = None
803 source.callback = None
803
804
804 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
805 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
805 needfiles)
806 needfiles)
806 revisions += newrevs
807 revisions += newrevs
807 files += newfiles
808 files += newfiles
808
809
809 dh = 0
810 dh = 0
810 if oldheads:
811 if oldheads:
811 heads = cl.heads()
812 heads = cl.heads()
812 dh = len(heads) - len(oldheads)
813 dh = len(heads) - len(oldheads)
813 for h in heads:
814 for h in heads:
814 if h not in oldheads and repo[h].closesbranch():
815 if h not in oldheads and repo[h].closesbranch():
815 dh -= 1
816 dh -= 1
816 htext = ""
817 htext = ""
817 if dh:
818 if dh:
818 htext = _(" (%+d heads)") % dh
819 htext = _(" (%+d heads)") % dh
819
820
820 repo.ui.status(_("added %d changesets"
821 repo.ui.status(_("added %d changesets"
821 " with %d changes to %d files%s\n")
822 " with %d changes to %d files%s\n")
822 % (changesets, revisions, files, htext))
823 % (changesets, revisions, files, htext))
823 repo.invalidatevolatilesets()
824 repo.invalidatevolatilesets()
824
825
825 if changesets > 0:
826 if changesets > 0:
826 p = lambda: tr.writepending() and repo.root or ""
827 p = lambda: tr.writepending() and repo.root or ""
827 if 'node' not in tr.hookargs:
828 if 'node' not in tr.hookargs:
828 tr.hookargs['node'] = hex(cl.node(clstart))
829 tr.hookargs['node'] = hex(cl.node(clstart))
829 hookargs = dict(tr.hookargs)
830 hookargs = dict(tr.hookargs)
830 else:
831 else:
831 hookargs = dict(tr.hookargs)
832 hookargs = dict(tr.hookargs)
832 hookargs['node'] = hex(cl.node(clstart))
833 hookargs['node'] = hex(cl.node(clstart))
833 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
834 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
834
835
835 added = [cl.node(r) for r in xrange(clstart, clend)]
836 added = [cl.node(r) for r in xrange(clstart, clend)]
836 publishing = repo.ui.configbool('phases', 'publish', True)
837 publishing = repo.ui.configbool('phases', 'publish', True)
837 if srctype in ('push', 'serve'):
838 if srctype in ('push', 'serve'):
838 # Old servers can not push the boundary themselves.
839 # Old servers can not push the boundary themselves.
839 # New servers won't push the boundary if changeset already
840 # New servers won't push the boundary if changeset already
840 # exists locally as secret
841 # exists locally as secret
841 #
842 #
842 # We should not use added here but the list of all change in
843 # We should not use added here but the list of all change in
843 # the bundle
844 # the bundle
844 if publishing:
845 if publishing:
845 phases.advanceboundary(repo, tr, phases.public, srccontent)
846 phases.advanceboundary(repo, tr, phases.public, srccontent)
846 else:
847 else:
847 # Those changesets have been pushed from the outside, their
848 # Those changesets have been pushed from the outside, their
848 # phases are going to be pushed alongside. Therefor
849 # phases are going to be pushed alongside. Therefor
849 # `targetphase` is ignored.
850 # `targetphase` is ignored.
850 phases.advanceboundary(repo, tr, phases.draft, srccontent)
851 phases.advanceboundary(repo, tr, phases.draft, srccontent)
851 phases.retractboundary(repo, tr, phases.draft, added)
852 phases.retractboundary(repo, tr, phases.draft, added)
852 elif srctype != 'strip':
853 elif srctype != 'strip':
853 # publishing only alter behavior during push
854 # publishing only alter behavior during push
854 #
855 #
855 # strip should not touch boundary at all
856 # strip should not touch boundary at all
856 phases.retractboundary(repo, tr, targetphase, added)
857 phases.retractboundary(repo, tr, targetphase, added)
857
858
858 if changesets > 0:
859 if changesets > 0:
859 if srctype != 'strip':
860 if srctype != 'strip':
860 # During strip, branchcache is invalid but coming call to
861 # During strip, branchcache is invalid but coming call to
861 # `destroyed` will repair it.
862 # `destroyed` will repair it.
862 # In other case we can safely update cache on disk.
863 # In other case we can safely update cache on disk.
863 branchmap.updatecache(repo.filtered('served'))
864 branchmap.updatecache(repo.filtered('served'))
864
865
865 def runhooks():
866 def runhooks():
866 # These hooks run when the lock releases, not when the
867 # These hooks run when the lock releases, not when the
867 # transaction closes. So it's possible for the changelog
868 # transaction closes. So it's possible for the changelog
868 # to have changed since we last saw it.
869 # to have changed since we last saw it.
869 if clstart >= len(repo):
870 if clstart >= len(repo):
870 return
871 return
871
872
872 # forcefully update the on-disk branch cache
873 # forcefully update the on-disk branch cache
873 repo.ui.debug("updating the branch cache\n")
874 repo.ui.debug("updating the branch cache\n")
874 repo.hook("changegroup", **hookargs)
875 repo.hook("changegroup", **hookargs)
875
876
876 for n in added:
877 for n in added:
877 args = hookargs.copy()
878 args = hookargs.copy()
878 args['node'] = hex(n)
879 args['node'] = hex(n)
879 repo.hook("incoming", **args)
880 repo.hook("incoming", **args)
880
881
881 newheads = [h for h in repo.heads() if h not in oldheads]
882 newheads = [h for h in repo.heads() if h not in oldheads]
882 repo.ui.log("incoming",
883 repo.ui.log("incoming",
883 "%s incoming changes - new heads: %s\n",
884 "%s incoming changes - new heads: %s\n",
884 len(added),
885 len(added),
885 ', '.join([hex(c[:6]) for c in newheads]))
886 ', '.join([hex(c[:6]) for c in newheads]))
886
887
887 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
888 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
888 lambda tr: repo._afterlock(runhooks))
889 lambda tr: repo._afterlock(runhooks))
889
890
890 tr.close()
891 tr.close()
891
892
892 finally:
893 finally:
893 tr.release()
894 tr.release()
894 repo.ui.flush()
895 repo.ui.flush()
895 # never return 0 here:
896 # never return 0 here:
896 if dh < 0:
897 if dh < 0:
897 return dh - 1
898 return dh - 1
898 else:
899 else:
899 return dh + 1
900 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now