##// END OF EJS Templates
changegroup: close progress in same function as it's started...
Martin von Zweigbergk -
r24901:e9edd537 default
parent child Browse files
Show More
@@ -1,900 +1,896 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
16 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
17
17
18 def readexactly(stream, n):
18 def readexactly(stream, n):
19 '''read n bytes from stream.read and abort if less was available'''
19 '''read n bytes from stream.read and abort if less was available'''
20 s = stream.read(n)
20 s = stream.read(n)
21 if len(s) < n:
21 if len(s) < n:
22 raise util.Abort(_("stream ended unexpectedly"
22 raise util.Abort(_("stream ended unexpectedly"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(s), n))
24 % (len(s), n))
25 return s
25 return s
26
26
27 def getchunk(stream):
27 def getchunk(stream):
28 """return the next chunk from stream as a string"""
28 """return the next chunk from stream as a string"""
29 d = readexactly(stream, 4)
29 d = readexactly(stream, 4)
30 l = struct.unpack(">l", d)[0]
30 l = struct.unpack(">l", d)[0]
31 if l <= 4:
31 if l <= 4:
32 if l:
32 if l:
33 raise util.Abort(_("invalid chunk length %d") % l)
33 raise util.Abort(_("invalid chunk length %d") % l)
34 return ""
34 return ""
35 return readexactly(stream, l - 4)
35 return readexactly(stream, l - 4)
36
36
37 def chunkheader(length):
37 def chunkheader(length):
38 """return a changegroup chunk header (string)"""
38 """return a changegroup chunk header (string)"""
39 return struct.pack(">l", length + 4)
39 return struct.pack(">l", length + 4)
40
40
41 def closechunk():
41 def closechunk():
42 """return a changegroup chunk header (string) for a zero-length chunk"""
42 """return a changegroup chunk header (string) for a zero-length chunk"""
43 return struct.pack(">l", 0)
43 return struct.pack(">l", 0)
44
44
45 def combineresults(results):
45 def combineresults(results):
46 """logic to combine 0 or more addchangegroup results into one"""
46 """logic to combine 0 or more addchangegroup results into one"""
47 changedheads = 0
47 changedheads = 0
48 result = 1
48 result = 1
49 for ret in results:
49 for ret in results:
50 # If any changegroup result is 0, return 0
50 # If any changegroup result is 0, return 0
51 if ret == 0:
51 if ret == 0:
52 result = 0
52 result = 0
53 break
53 break
54 if ret < -1:
54 if ret < -1:
55 changedheads += ret + 1
55 changedheads += ret + 1
56 elif ret > 1:
56 elif ret > 1:
57 changedheads += ret - 1
57 changedheads += ret - 1
58 if changedheads > 0:
58 if changedheads > 0:
59 result = 1 + changedheads
59 result = 1 + changedheads
60 elif changedheads < 0:
60 elif changedheads < 0:
61 result = -1 + changedheads
61 result = -1 + changedheads
62 return result
62 return result
63
63
64 class nocompress(object):
64 class nocompress(object):
65 def compress(self, x):
65 def compress(self, x):
66 return x
66 return x
67 def flush(self):
67 def flush(self):
68 return ""
68 return ""
69
69
70 bundletypes = {
70 bundletypes = {
71 "": ("", nocompress), # only when using unbundle on ssh and old http servers
71 "": ("", nocompress), # only when using unbundle on ssh and old http servers
72 # since the unification ssh accepts a header but there
72 # since the unification ssh accepts a header but there
73 # is no capability signaling it.
73 # is no capability signaling it.
74 "HG20": (), # special-cased below
74 "HG20": (), # special-cased below
75 "HG10UN": ("HG10UN", nocompress),
75 "HG10UN": ("HG10UN", nocompress),
76 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
76 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
77 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
77 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
78 }
78 }
79
79
80 # hgweb uses this list to communicate its preferred type
80 # hgweb uses this list to communicate its preferred type
81 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
81 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
82
82
83 def writebundle(ui, cg, filename, bundletype, vfs=None):
83 def writebundle(ui, cg, filename, bundletype, vfs=None):
84 """Write a bundle file and return its filename.
84 """Write a bundle file and return its filename.
85
85
86 Existing files will not be overwritten.
86 Existing files will not be overwritten.
87 If no filename is specified, a temporary file is created.
87 If no filename is specified, a temporary file is created.
88 bz2 compression can be turned off.
88 bz2 compression can be turned off.
89 The bundle file will be deleted in case of errors.
89 The bundle file will be deleted in case of errors.
90 """
90 """
91
91
92 fh = None
92 fh = None
93 cleanup = None
93 cleanup = None
94 try:
94 try:
95 if filename:
95 if filename:
96 if vfs:
96 if vfs:
97 fh = vfs.open(filename, "wb")
97 fh = vfs.open(filename, "wb")
98 else:
98 else:
99 fh = open(filename, "wb")
99 fh = open(filename, "wb")
100 else:
100 else:
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
101 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
102 fh = os.fdopen(fd, "wb")
102 fh = os.fdopen(fd, "wb")
103 cleanup = filename
103 cleanup = filename
104
104
105 if bundletype == "HG20":
105 if bundletype == "HG20":
106 import bundle2
106 import bundle2
107 bundle = bundle2.bundle20(ui)
107 bundle = bundle2.bundle20(ui)
108 part = bundle.newpart('changegroup', data=cg.getchunks())
108 part = bundle.newpart('changegroup', data=cg.getchunks())
109 part.addparam('version', cg.version)
109 part.addparam('version', cg.version)
110 z = nocompress()
110 z = nocompress()
111 chunkiter = bundle.getchunks()
111 chunkiter = bundle.getchunks()
112 else:
112 else:
113 if cg.version != '01':
113 if cg.version != '01':
114 raise util.Abort(_('old bundle types only supports v1 '
114 raise util.Abort(_('old bundle types only supports v1 '
115 'changegroups'))
115 'changegroups'))
116 header, compressor = bundletypes[bundletype]
116 header, compressor = bundletypes[bundletype]
117 fh.write(header)
117 fh.write(header)
118 z = compressor()
118 z = compressor()
119 chunkiter = cg.getchunks()
119 chunkiter = cg.getchunks()
120
120
121 # parse the changegroup data, otherwise we will block
121 # parse the changegroup data, otherwise we will block
122 # in case of sshrepo because we don't know the end of the stream
122 # in case of sshrepo because we don't know the end of the stream
123
123
124 # an empty chunkgroup is the end of the changegroup
124 # an empty chunkgroup is the end of the changegroup
125 # a changegroup has at least 2 chunkgroups (changelog and manifest).
125 # a changegroup has at least 2 chunkgroups (changelog and manifest).
126 # after that, an empty chunkgroup is the end of the changegroup
126 # after that, an empty chunkgroup is the end of the changegroup
127 for chunk in chunkiter:
127 for chunk in chunkiter:
128 fh.write(z.compress(chunk))
128 fh.write(z.compress(chunk))
129 fh.write(z.flush())
129 fh.write(z.flush())
130 cleanup = None
130 cleanup = None
131 return filename
131 return filename
132 finally:
132 finally:
133 if fh is not None:
133 if fh is not None:
134 fh.close()
134 fh.close()
135 if cleanup is not None:
135 if cleanup is not None:
136 if filename and vfs:
136 if filename and vfs:
137 vfs.unlink(cleanup)
137 vfs.unlink(cleanup)
138 else:
138 else:
139 os.unlink(cleanup)
139 os.unlink(cleanup)
140
140
141 def decompressor(fh, alg):
141 def decompressor(fh, alg):
142 if alg == 'UN':
142 if alg == 'UN':
143 return fh
143 return fh
144 elif alg == 'GZ':
144 elif alg == 'GZ':
145 def generator(f):
145 def generator(f):
146 zd = zlib.decompressobj()
146 zd = zlib.decompressobj()
147 for chunk in util.filechunkiter(f):
147 for chunk in util.filechunkiter(f):
148 yield zd.decompress(chunk)
148 yield zd.decompress(chunk)
149 elif alg == 'BZ':
149 elif alg == 'BZ':
150 def generator(f):
150 def generator(f):
151 zd = bz2.BZ2Decompressor()
151 zd = bz2.BZ2Decompressor()
152 zd.decompress("BZ")
152 zd.decompress("BZ")
153 for chunk in util.filechunkiter(f, 4096):
153 for chunk in util.filechunkiter(f, 4096):
154 yield zd.decompress(chunk)
154 yield zd.decompress(chunk)
155 else:
155 else:
156 raise util.Abort("unknown bundle compression '%s'" % alg)
156 raise util.Abort("unknown bundle compression '%s'" % alg)
157 return util.chunkbuffer(generator(fh))
157 return util.chunkbuffer(generator(fh))
158
158
159 class cg1unpacker(object):
159 class cg1unpacker(object):
160 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
160 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
161 deltaheadersize = struct.calcsize(deltaheader)
161 deltaheadersize = struct.calcsize(deltaheader)
162 version = '01'
162 version = '01'
163 def __init__(self, fh, alg):
163 def __init__(self, fh, alg):
164 self._stream = decompressor(fh, alg)
164 self._stream = decompressor(fh, alg)
165 self._type = alg
165 self._type = alg
166 self.callback = None
166 self.callback = None
167 def compressed(self):
167 def compressed(self):
168 return self._type != 'UN'
168 return self._type != 'UN'
169 def read(self, l):
169 def read(self, l):
170 return self._stream.read(l)
170 return self._stream.read(l)
171 def seek(self, pos):
171 def seek(self, pos):
172 return self._stream.seek(pos)
172 return self._stream.seek(pos)
173 def tell(self):
173 def tell(self):
174 return self._stream.tell()
174 return self._stream.tell()
175 def close(self):
175 def close(self):
176 return self._stream.close()
176 return self._stream.close()
177
177
178 def chunklength(self):
178 def chunklength(self):
179 d = readexactly(self._stream, 4)
179 d = readexactly(self._stream, 4)
180 l = struct.unpack(">l", d)[0]
180 l = struct.unpack(">l", d)[0]
181 if l <= 4:
181 if l <= 4:
182 if l:
182 if l:
183 raise util.Abort(_("invalid chunk length %d") % l)
183 raise util.Abort(_("invalid chunk length %d") % l)
184 return 0
184 return 0
185 if self.callback:
185 if self.callback:
186 self.callback()
186 self.callback()
187 return l - 4
187 return l - 4
188
188
189 def changelogheader(self):
189 def changelogheader(self):
190 """v10 does not have a changelog header chunk"""
190 """v10 does not have a changelog header chunk"""
191 return {}
191 return {}
192
192
193 def manifestheader(self):
193 def manifestheader(self):
194 """v10 does not have a manifest header chunk"""
194 """v10 does not have a manifest header chunk"""
195 return {}
195 return {}
196
196
197 def filelogheader(self):
197 def filelogheader(self):
198 """return the header of the filelogs chunk, v10 only has the filename"""
198 """return the header of the filelogs chunk, v10 only has the filename"""
199 l = self.chunklength()
199 l = self.chunklength()
200 if not l:
200 if not l:
201 return {}
201 return {}
202 fname = readexactly(self._stream, l)
202 fname = readexactly(self._stream, l)
203 return {'filename': fname}
203 return {'filename': fname}
204
204
205 def _deltaheader(self, headertuple, prevnode):
205 def _deltaheader(self, headertuple, prevnode):
206 node, p1, p2, cs = headertuple
206 node, p1, p2, cs = headertuple
207 if prevnode is None:
207 if prevnode is None:
208 deltabase = p1
208 deltabase = p1
209 else:
209 else:
210 deltabase = prevnode
210 deltabase = prevnode
211 return node, p1, p2, deltabase, cs
211 return node, p1, p2, deltabase, cs
212
212
213 def deltachunk(self, prevnode):
213 def deltachunk(self, prevnode):
214 l = self.chunklength()
214 l = self.chunklength()
215 if not l:
215 if not l:
216 return {}
216 return {}
217 headerdata = readexactly(self._stream, self.deltaheadersize)
217 headerdata = readexactly(self._stream, self.deltaheadersize)
218 header = struct.unpack(self.deltaheader, headerdata)
218 header = struct.unpack(self.deltaheader, headerdata)
219 delta = readexactly(self._stream, l - self.deltaheadersize)
219 delta = readexactly(self._stream, l - self.deltaheadersize)
220 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
220 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
221 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
221 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
222 'deltabase': deltabase, 'delta': delta}
222 'deltabase': deltabase, 'delta': delta}
223
223
224 def getchunks(self):
224 def getchunks(self):
225 """returns all the chunks contains in the bundle
225 """returns all the chunks contains in the bundle
226
226
227 Used when you need to forward the binary stream to a file or another
227 Used when you need to forward the binary stream to a file or another
228 network API. To do so, it parse the changegroup data, otherwise it will
228 network API. To do so, it parse the changegroup data, otherwise it will
229 block in case of sshrepo because it don't know the end of the stream.
229 block in case of sshrepo because it don't know the end of the stream.
230 """
230 """
231 # an empty chunkgroup is the end of the changegroup
231 # an empty chunkgroup is the end of the changegroup
232 # a changegroup has at least 2 chunkgroups (changelog and manifest).
232 # a changegroup has at least 2 chunkgroups (changelog and manifest).
233 # after that, an empty chunkgroup is the end of the changegroup
233 # after that, an empty chunkgroup is the end of the changegroup
234 empty = False
234 empty = False
235 count = 0
235 count = 0
236 while not empty or count <= 2:
236 while not empty or count <= 2:
237 empty = True
237 empty = True
238 count += 1
238 count += 1
239 while True:
239 while True:
240 chunk = getchunk(self)
240 chunk = getchunk(self)
241 if not chunk:
241 if not chunk:
242 break
242 break
243 empty = False
243 empty = False
244 yield chunkheader(len(chunk))
244 yield chunkheader(len(chunk))
245 pos = 0
245 pos = 0
246 while pos < len(chunk):
246 while pos < len(chunk):
247 next = pos + 2**20
247 next = pos + 2**20
248 yield chunk[pos:next]
248 yield chunk[pos:next]
249 pos = next
249 pos = next
250 yield closechunk()
250 yield closechunk()
251
251
252 class cg2unpacker(cg1unpacker):
252 class cg2unpacker(cg1unpacker):
253 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
253 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
254 deltaheadersize = struct.calcsize(deltaheader)
254 deltaheadersize = struct.calcsize(deltaheader)
255 version = '02'
255 version = '02'
256
256
257 def _deltaheader(self, headertuple, prevnode):
257 def _deltaheader(self, headertuple, prevnode):
258 node, p1, p2, deltabase, cs = headertuple
258 node, p1, p2, deltabase, cs = headertuple
259 return node, p1, p2, deltabase, cs
259 return node, p1, p2, deltabase, cs
260
260
261 class headerlessfixup(object):
261 class headerlessfixup(object):
262 def __init__(self, fh, h):
262 def __init__(self, fh, h):
263 self._h = h
263 self._h = h
264 self._fh = fh
264 self._fh = fh
265 def read(self, n):
265 def read(self, n):
266 if self._h:
266 if self._h:
267 d, self._h = self._h[:n], self._h[n:]
267 d, self._h = self._h[:n], self._h[n:]
268 if len(d) < n:
268 if len(d) < n:
269 d += readexactly(self._fh, n - len(d))
269 d += readexactly(self._fh, n - len(d))
270 return d
270 return d
271 return readexactly(self._fh, n)
271 return readexactly(self._fh, n)
272
272
273 class cg1packer(object):
273 class cg1packer(object):
274 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
274 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
275 version = '01'
275 version = '01'
276 def __init__(self, repo, bundlecaps=None):
276 def __init__(self, repo, bundlecaps=None):
277 """Given a source repo, construct a bundler.
277 """Given a source repo, construct a bundler.
278
278
279 bundlecaps is optional and can be used to specify the set of
279 bundlecaps is optional and can be used to specify the set of
280 capabilities which can be used to build the bundle.
280 capabilities which can be used to build the bundle.
281 """
281 """
282 # Set of capabilities we can use to build the bundle.
282 # Set of capabilities we can use to build the bundle.
283 if bundlecaps is None:
283 if bundlecaps is None:
284 bundlecaps = set()
284 bundlecaps = set()
285 self._bundlecaps = bundlecaps
285 self._bundlecaps = bundlecaps
286 self._changelog = repo.changelog
286 self._changelog = repo.changelog
287 self._manifest = repo.manifest
287 self._manifest = repo.manifest
288 reorder = repo.ui.config('bundle', 'reorder', 'auto')
288 reorder = repo.ui.config('bundle', 'reorder', 'auto')
289 if reorder == 'auto':
289 if reorder == 'auto':
290 reorder = None
290 reorder = None
291 else:
291 else:
292 reorder = util.parsebool(reorder)
292 reorder = util.parsebool(reorder)
293 self._repo = repo
293 self._repo = repo
294 self._reorder = reorder
294 self._reorder = reorder
295 self._progress = repo.ui.progress
295 self._progress = repo.ui.progress
296 if self._repo.ui.verbose and not self._repo.ui.debugflag:
296 if self._repo.ui.verbose and not self._repo.ui.debugflag:
297 self._verbosenote = self._repo.ui.note
297 self._verbosenote = self._repo.ui.note
298 else:
298 else:
299 self._verbosenote = lambda s: None
299 self._verbosenote = lambda s: None
300
300
301 def close(self):
301 def close(self):
302 return closechunk()
302 return closechunk()
303
303
304 def fileheader(self, fname):
304 def fileheader(self, fname):
305 return chunkheader(len(fname)) + fname
305 return chunkheader(len(fname)) + fname
306
306
307 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
307 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
308 """Calculate a delta group, yielding a sequence of changegroup chunks
308 """Calculate a delta group, yielding a sequence of changegroup chunks
309 (strings).
309 (strings).
310
310
311 Given a list of changeset revs, return a set of deltas and
311 Given a list of changeset revs, return a set of deltas and
312 metadata corresponding to nodes. The first delta is
312 metadata corresponding to nodes. The first delta is
313 first parent(nodelist[0]) -> nodelist[0], the receiver is
313 first parent(nodelist[0]) -> nodelist[0], the receiver is
314 guaranteed to have this parent as it has all history before
314 guaranteed to have this parent as it has all history before
315 these changesets. In the case firstparent is nullrev the
315 these changesets. In the case firstparent is nullrev the
316 changegroup starts with a full revision.
316 changegroup starts with a full revision.
317
317
318 If units is not None, progress detail will be generated, units specifies
318 If units is not None, progress detail will be generated, units specifies
319 the type of revlog that is touched (changelog, manifest, etc.).
319 the type of revlog that is touched (changelog, manifest, etc.).
320 """
320 """
321 # if we don't have any revisions touched by these changesets, bail
321 # if we don't have any revisions touched by these changesets, bail
322 if len(nodelist) == 0:
322 if len(nodelist) == 0:
323 yield self.close()
323 yield self.close()
324 return
324 return
325
325
326 # for generaldelta revlogs, we linearize the revs; this will both be
326 # for generaldelta revlogs, we linearize the revs; this will both be
327 # much quicker and generate a much smaller bundle
327 # much quicker and generate a much smaller bundle
328 if (revlog._generaldelta and reorder is not False) or reorder:
328 if (revlog._generaldelta and reorder is not False) or reorder:
329 dag = dagutil.revlogdag(revlog)
329 dag = dagutil.revlogdag(revlog)
330 revs = set(revlog.rev(n) for n in nodelist)
330 revs = set(revlog.rev(n) for n in nodelist)
331 revs = dag.linearize(revs)
331 revs = dag.linearize(revs)
332 else:
332 else:
333 revs = sorted([revlog.rev(n) for n in nodelist])
333 revs = sorted([revlog.rev(n) for n in nodelist])
334
334
335 # add the parent of the first rev
335 # add the parent of the first rev
336 p = revlog.parentrevs(revs[0])[0]
336 p = revlog.parentrevs(revs[0])[0]
337 revs.insert(0, p)
337 revs.insert(0, p)
338
338
339 # build deltas
339 # build deltas
340 total = len(revs) - 1
340 total = len(revs) - 1
341 msgbundling = _('bundling')
341 msgbundling = _('bundling')
342 for r in xrange(len(revs) - 1):
342 for r in xrange(len(revs) - 1):
343 if units is not None:
343 if units is not None:
344 self._progress(msgbundling, r + 1, unit=units, total=total)
344 self._progress(msgbundling, r + 1, unit=units, total=total)
345 prev, curr = revs[r], revs[r + 1]
345 prev, curr = revs[r], revs[r + 1]
346 linknode = lookup(revlog.node(curr))
346 linknode = lookup(revlog.node(curr))
347 for c in self.revchunk(revlog, curr, prev, linknode):
347 for c in self.revchunk(revlog, curr, prev, linknode):
348 yield c
348 yield c
349
349
350 if units is not None:
351 self._progress(msgbundling, None)
350 yield self.close()
352 yield self.close()
351
353
352 # filter any nodes that claim to be part of the known set
354 # filter any nodes that claim to be part of the known set
353 def prune(self, revlog, missing, commonrevs):
355 def prune(self, revlog, missing, commonrevs):
354 rr, rl = revlog.rev, revlog.linkrev
356 rr, rl = revlog.rev, revlog.linkrev
355 return [n for n in missing if rl(rr(n)) not in commonrevs]
357 return [n for n in missing if rl(rr(n)) not in commonrevs]
356
358
357 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
359 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
358 '''yield a sequence of changegroup chunks (strings)'''
360 '''yield a sequence of changegroup chunks (strings)'''
359 repo = self._repo
361 repo = self._repo
360 cl = self._changelog
362 cl = self._changelog
361 ml = self._manifest
363 ml = self._manifest
362 reorder = self._reorder
364 reorder = self._reorder
363 progress = self._progress
364
365 # for progress output
366 msgbundling = _('bundling')
367
365
368 clrevorder = {}
366 clrevorder = {}
369 mfs = {} # needed manifests
367 mfs = {} # needed manifests
370 fnodes = {} # needed file nodes
368 fnodes = {} # needed file nodes
371 changedfiles = set()
369 changedfiles = set()
372
370
373 # Callback for the changelog, used to collect changed files and manifest
371 # Callback for the changelog, used to collect changed files and manifest
374 # nodes.
372 # nodes.
375 # Returns the linkrev node (identity in the changelog case).
373 # Returns the linkrev node (identity in the changelog case).
376 def lookupcl(x):
374 def lookupcl(x):
377 c = cl.read(x)
375 c = cl.read(x)
378 clrevorder[x] = len(clrevorder)
376 clrevorder[x] = len(clrevorder)
379 changedfiles.update(c[3])
377 changedfiles.update(c[3])
380 # record the first changeset introducing this manifest version
378 # record the first changeset introducing this manifest version
381 mfs.setdefault(c[0], x)
379 mfs.setdefault(c[0], x)
382 return x
380 return x
383
381
384 self._verbosenote(_('uncompressed size of bundle content:\n'))
382 self._verbosenote(_('uncompressed size of bundle content:\n'))
385 size = 0
383 size = 0
386 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
384 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
387 reorder=reorder):
385 reorder=reorder):
388 size += len(chunk)
386 size += len(chunk)
389 yield chunk
387 yield chunk
390 self._verbosenote(_('%8.i (changelog)\n') % size)
388 self._verbosenote(_('%8.i (changelog)\n') % size)
391 progress(msgbundling, None)
392
389
393 # Callback for the manifest, used to collect linkrevs for filelog
390 # Callback for the manifest, used to collect linkrevs for filelog
394 # revisions.
391 # revisions.
395 # Returns the linkrev node (collected in lookupcl).
392 # Returns the linkrev node (collected in lookupcl).
396 def lookupmf(x):
393 def lookupmf(x):
397 clnode = mfs[x]
394 clnode = mfs[x]
398 if not fastpathlinkrev or reorder:
395 if not fastpathlinkrev or reorder:
399 mdata = ml.readfast(x)
396 mdata = ml.readfast(x)
400 for f, n in mdata.iteritems():
397 for f, n in mdata.iteritems():
401 if f in changedfiles:
398 if f in changedfiles:
402 # record the first changeset introducing this filelog
399 # record the first changeset introducing this filelog
403 # version
400 # version
404 fclnodes = fnodes.setdefault(f, {})
401 fclnodes = fnodes.setdefault(f, {})
405 fclnode = fclnodes.setdefault(n, clnode)
402 fclnode = fclnodes.setdefault(n, clnode)
406 if clrevorder[clnode] < clrevorder[fclnode]:
403 if clrevorder[clnode] < clrevorder[fclnode]:
407 fclnodes[n] = clnode
404 fclnodes[n] = clnode
408 return clnode
405 return clnode
409
406
410 mfnodes = self.prune(ml, mfs, commonrevs)
407 mfnodes = self.prune(ml, mfs, commonrevs)
411 size = 0
408 size = 0
412 for chunk in self.group(mfnodes, ml, lookupmf, units=_('manifests'),
409 for chunk in self.group(mfnodes, ml, lookupmf, units=_('manifests'),
413 reorder=reorder):
410 reorder=reorder):
414 size += len(chunk)
411 size += len(chunk)
415 yield chunk
412 yield chunk
416 self._verbosenote(_('%8.i (manifests)\n') % size)
413 self._verbosenote(_('%8.i (manifests)\n') % size)
417 progress(msgbundling, None)
418
414
419 mfs.clear()
415 mfs.clear()
420 clrevs = set(cl.rev(x) for x in clnodes)
416 clrevs = set(cl.rev(x) for x in clnodes)
421
417
422 def linknodes(filerevlog, fname):
418 def linknodes(filerevlog, fname):
423 if fastpathlinkrev and not reorder:
419 if fastpathlinkrev and not reorder:
424 llr = filerevlog.linkrev
420 llr = filerevlog.linkrev
425 def genfilenodes():
421 def genfilenodes():
426 for r in filerevlog:
422 for r in filerevlog:
427 linkrev = llr(r)
423 linkrev = llr(r)
428 if linkrev in clrevs:
424 if linkrev in clrevs:
429 yield filerevlog.node(r), cl.node(linkrev)
425 yield filerevlog.node(r), cl.node(linkrev)
430 return dict(genfilenodes())
426 return dict(genfilenodes())
431 return fnodes.get(fname, {})
427 return fnodes.get(fname, {})
432
428
433 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
429 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
434 source):
430 source):
435 yield chunk
431 yield chunk
436
432
437 yield self.close()
433 yield self.close()
438 progress(msgbundling, None)
439
434
440 if clnodes:
435 if clnodes:
441 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
436 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
442
437
443 # The 'source' parameter is useful for extensions
438 # The 'source' parameter is useful for extensions
444 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
439 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
445 repo = self._repo
440 repo = self._repo
446 progress = self._progress
441 progress = self._progress
447 reorder = self._reorder
442 reorder = self._reorder
448 msgbundling = _('bundling')
443 msgbundling = _('bundling')
449
444
450 total = len(changedfiles)
445 total = len(changedfiles)
451 # for progress output
446 # for progress output
452 msgfiles = _('files')
447 msgfiles = _('files')
453 for i, fname in enumerate(sorted(changedfiles)):
448 for i, fname in enumerate(sorted(changedfiles)):
454 filerevlog = repo.file(fname)
449 filerevlog = repo.file(fname)
455 if not filerevlog:
450 if not filerevlog:
456 raise util.Abort(_("empty or missing revlog for %s") % fname)
451 raise util.Abort(_("empty or missing revlog for %s") % fname)
457
452
458 linkrevnodes = linknodes(filerevlog, fname)
453 linkrevnodes = linknodes(filerevlog, fname)
459 # Lookup for filenodes, we collected the linkrev nodes above in the
454 # Lookup for filenodes, we collected the linkrev nodes above in the
460 # fastpath case and with lookupmf in the slowpath case.
455 # fastpath case and with lookupmf in the slowpath case.
461 def lookupfilelog(x):
456 def lookupfilelog(x):
462 return linkrevnodes[x]
457 return linkrevnodes[x]
463
458
464 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
459 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
465 if filenodes:
460 if filenodes:
466 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
461 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
467 total=total)
462 total=total)
468 h = self.fileheader(fname)
463 h = self.fileheader(fname)
469 size = len(h)
464 size = len(h)
470 yield h
465 yield h
471 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
466 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
472 reorder=reorder):
467 reorder=reorder):
473 size += len(chunk)
468 size += len(chunk)
474 yield chunk
469 yield chunk
475 self._verbosenote(_('%8.i %s\n') % (size, fname))
470 self._verbosenote(_('%8.i %s\n') % (size, fname))
471 progress(msgbundling, None)
476
472
477 def deltaparent(self, revlog, rev, p1, p2, prev):
473 def deltaparent(self, revlog, rev, p1, p2, prev):
478 return prev
474 return prev
479
475
480 def revchunk(self, revlog, rev, prev, linknode):
476 def revchunk(self, revlog, rev, prev, linknode):
481 node = revlog.node(rev)
477 node = revlog.node(rev)
482 p1, p2 = revlog.parentrevs(rev)
478 p1, p2 = revlog.parentrevs(rev)
483 base = self.deltaparent(revlog, rev, p1, p2, prev)
479 base = self.deltaparent(revlog, rev, p1, p2, prev)
484
480
485 prefix = ''
481 prefix = ''
486 if revlog.iscensored(base) or revlog.iscensored(rev):
482 if revlog.iscensored(base) or revlog.iscensored(rev):
487 try:
483 try:
488 delta = revlog.revision(node)
484 delta = revlog.revision(node)
489 except error.CensoredNodeError, e:
485 except error.CensoredNodeError, e:
490 delta = e.tombstone
486 delta = e.tombstone
491 if base == nullrev:
487 if base == nullrev:
492 prefix = mdiff.trivialdiffheader(len(delta))
488 prefix = mdiff.trivialdiffheader(len(delta))
493 else:
489 else:
494 baselen = revlog.rawsize(base)
490 baselen = revlog.rawsize(base)
495 prefix = mdiff.replacediffheader(baselen, len(delta))
491 prefix = mdiff.replacediffheader(baselen, len(delta))
496 elif base == nullrev:
492 elif base == nullrev:
497 delta = revlog.revision(node)
493 delta = revlog.revision(node)
498 prefix = mdiff.trivialdiffheader(len(delta))
494 prefix = mdiff.trivialdiffheader(len(delta))
499 else:
495 else:
500 delta = revlog.revdiff(base, rev)
496 delta = revlog.revdiff(base, rev)
501 p1n, p2n = revlog.parents(node)
497 p1n, p2n = revlog.parents(node)
502 basenode = revlog.node(base)
498 basenode = revlog.node(base)
503 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
499 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
504 meta += prefix
500 meta += prefix
505 l = len(meta) + len(delta)
501 l = len(meta) + len(delta)
506 yield chunkheader(l)
502 yield chunkheader(l)
507 yield meta
503 yield meta
508 yield delta
504 yield delta
509 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
505 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
510 # do nothing with basenode, it is implicitly the previous one in HG10
506 # do nothing with basenode, it is implicitly the previous one in HG10
511 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
507 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
512
508
513 class cg2packer(cg1packer):
509 class cg2packer(cg1packer):
514 version = '02'
510 version = '02'
515 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
511 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
516
512
517 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
513 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
518 if (revlog._generaldelta and reorder is not True):
514 if (revlog._generaldelta and reorder is not True):
519 reorder = False
515 reorder = False
520 return super(cg2packer, self).group(nodelist, revlog, lookup,
516 return super(cg2packer, self).group(nodelist, revlog, lookup,
521 units=units, reorder=reorder)
517 units=units, reorder=reorder)
522
518
523 def deltaparent(self, revlog, rev, p1, p2, prev):
519 def deltaparent(self, revlog, rev, p1, p2, prev):
524 dp = revlog.deltaparent(rev)
520 dp = revlog.deltaparent(rev)
525 # avoid storing full revisions; pick prev in those cases
521 # avoid storing full revisions; pick prev in those cases
526 # also pick prev when we can't be sure remote has dp
522 # also pick prev when we can't be sure remote has dp
527 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
523 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
528 return prev
524 return prev
529 return dp
525 return dp
530
526
531 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
527 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
532 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
528 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
533
529
534 packermap = {'01': (cg1packer, cg1unpacker),
530 packermap = {'01': (cg1packer, cg1unpacker),
535 '02': (cg2packer, cg2unpacker)}
531 '02': (cg2packer, cg2unpacker)}
536
532
537 def _changegroupinfo(repo, nodes, source):
533 def _changegroupinfo(repo, nodes, source):
538 if repo.ui.verbose or source == 'bundle':
534 if repo.ui.verbose or source == 'bundle':
539 repo.ui.status(_("%d changesets found\n") % len(nodes))
535 repo.ui.status(_("%d changesets found\n") % len(nodes))
540 if repo.ui.debugflag:
536 if repo.ui.debugflag:
541 repo.ui.debug("list of changesets:\n")
537 repo.ui.debug("list of changesets:\n")
542 for node in nodes:
538 for node in nodes:
543 repo.ui.debug("%s\n" % hex(node))
539 repo.ui.debug("%s\n" % hex(node))
544
540
545 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
541 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
546 repo = repo.unfiltered()
542 repo = repo.unfiltered()
547 commonrevs = outgoing.common
543 commonrevs = outgoing.common
548 csets = outgoing.missing
544 csets = outgoing.missing
549 heads = outgoing.missingheads
545 heads = outgoing.missingheads
550 # We go through the fast path if we get told to, or if all (unfiltered
546 # We go through the fast path if we get told to, or if all (unfiltered
551 # heads have been requested (since we then know there all linkrevs will
547 # heads have been requested (since we then know there all linkrevs will
552 # be pulled by the client).
548 # be pulled by the client).
553 heads.sort()
549 heads.sort()
554 fastpathlinkrev = fastpath or (
550 fastpathlinkrev = fastpath or (
555 repo.filtername is None and heads == sorted(repo.heads()))
551 repo.filtername is None and heads == sorted(repo.heads()))
556
552
557 repo.hook('preoutgoing', throw=True, source=source)
553 repo.hook('preoutgoing', throw=True, source=source)
558 _changegroupinfo(repo, csets, source)
554 _changegroupinfo(repo, csets, source)
559 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
555 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
560
556
561 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
557 def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
562 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
558 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
563 return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
559 return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
564
560
565 def changegroupsubset(repo, roots, heads, source, version='01'):
561 def changegroupsubset(repo, roots, heads, source, version='01'):
566 """Compute a changegroup consisting of all the nodes that are
562 """Compute a changegroup consisting of all the nodes that are
567 descendants of any of the roots and ancestors of any of the heads.
563 descendants of any of the roots and ancestors of any of the heads.
568 Return a chunkbuffer object whose read() method will return
564 Return a chunkbuffer object whose read() method will return
569 successive changegroup chunks.
565 successive changegroup chunks.
570
566
571 It is fairly complex as determining which filenodes and which
567 It is fairly complex as determining which filenodes and which
572 manifest nodes need to be included for the changeset to be complete
568 manifest nodes need to be included for the changeset to be complete
573 is non-trivial.
569 is non-trivial.
574
570
575 Another wrinkle is doing the reverse, figuring out which changeset in
571 Another wrinkle is doing the reverse, figuring out which changeset in
576 the changegroup a particular filenode or manifestnode belongs to.
572 the changegroup a particular filenode or manifestnode belongs to.
577 """
573 """
578 cl = repo.changelog
574 cl = repo.changelog
579 if not roots:
575 if not roots:
580 roots = [nullid]
576 roots = [nullid]
581 # TODO: remove call to nodesbetween.
577 # TODO: remove call to nodesbetween.
582 csets, roots, heads = cl.nodesbetween(roots, heads)
578 csets, roots, heads = cl.nodesbetween(roots, heads)
583 discbases = []
579 discbases = []
584 for n in roots:
580 for n in roots:
585 discbases.extend([p for p in cl.parents(n) if p != nullid])
581 discbases.extend([p for p in cl.parents(n) if p != nullid])
586 outgoing = discovery.outgoing(cl, discbases, heads)
582 outgoing = discovery.outgoing(cl, discbases, heads)
587 bundler = packermap[version][0](repo)
583 bundler = packermap[version][0](repo)
588 return getsubset(repo, outgoing, bundler, source, version=version)
584 return getsubset(repo, outgoing, bundler, source, version=version)
589
585
590 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
586 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
591 version='01'):
587 version='01'):
592 """Like getbundle, but taking a discovery.outgoing as an argument.
588 """Like getbundle, but taking a discovery.outgoing as an argument.
593
589
594 This is only implemented for local repos and reuses potentially
590 This is only implemented for local repos and reuses potentially
595 precomputed sets in outgoing. Returns a raw changegroup generator."""
591 precomputed sets in outgoing. Returns a raw changegroup generator."""
596 if not outgoing.missing:
592 if not outgoing.missing:
597 return None
593 return None
598 bundler = packermap[version][0](repo, bundlecaps)
594 bundler = packermap[version][0](repo, bundlecaps)
599 return getsubsetraw(repo, outgoing, bundler, source)
595 return getsubsetraw(repo, outgoing, bundler, source)
600
596
601 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
597 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
602 """Like getbundle, but taking a discovery.outgoing as an argument.
598 """Like getbundle, but taking a discovery.outgoing as an argument.
603
599
604 This is only implemented for local repos and reuses potentially
600 This is only implemented for local repos and reuses potentially
605 precomputed sets in outgoing."""
601 precomputed sets in outgoing."""
606 if not outgoing.missing:
602 if not outgoing.missing:
607 return None
603 return None
608 bundler = cg1packer(repo, bundlecaps)
604 bundler = cg1packer(repo, bundlecaps)
609 return getsubset(repo, outgoing, bundler, source)
605 return getsubset(repo, outgoing, bundler, source)
610
606
611 def _computeoutgoing(repo, heads, common):
607 def _computeoutgoing(repo, heads, common):
612 """Computes which revs are outgoing given a set of common
608 """Computes which revs are outgoing given a set of common
613 and a set of heads.
609 and a set of heads.
614
610
615 This is a separate function so extensions can have access to
611 This is a separate function so extensions can have access to
616 the logic.
612 the logic.
617
613
618 Returns a discovery.outgoing object.
614 Returns a discovery.outgoing object.
619 """
615 """
620 cl = repo.changelog
616 cl = repo.changelog
621 if common:
617 if common:
622 hasnode = cl.hasnode
618 hasnode = cl.hasnode
623 common = [n for n in common if hasnode(n)]
619 common = [n for n in common if hasnode(n)]
624 else:
620 else:
625 common = [nullid]
621 common = [nullid]
626 if not heads:
622 if not heads:
627 heads = cl.heads()
623 heads = cl.heads()
628 return discovery.outgoing(cl, common, heads)
624 return discovery.outgoing(cl, common, heads)
629
625
630 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
626 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
631 version='01'):
627 version='01'):
632 """Like changegroupsubset, but returns the set difference between the
628 """Like changegroupsubset, but returns the set difference between the
633 ancestors of heads and the ancestors common.
629 ancestors of heads and the ancestors common.
634
630
635 If heads is None, use the local heads. If common is None, use [nullid].
631 If heads is None, use the local heads. If common is None, use [nullid].
636
632
637 If version is None, use a version '1' changegroup.
633 If version is None, use a version '1' changegroup.
638
634
639 The nodes in common might not all be known locally due to the way the
635 The nodes in common might not all be known locally due to the way the
640 current discovery protocol works. Returns a raw changegroup generator.
636 current discovery protocol works. Returns a raw changegroup generator.
641 """
637 """
642 outgoing = _computeoutgoing(repo, heads, common)
638 outgoing = _computeoutgoing(repo, heads, common)
643 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
639 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
644 version=version)
640 version=version)
645
641
646 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
642 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
647 """Like changegroupsubset, but returns the set difference between the
643 """Like changegroupsubset, but returns the set difference between the
648 ancestors of heads and the ancestors common.
644 ancestors of heads and the ancestors common.
649
645
650 If heads is None, use the local heads. If common is None, use [nullid].
646 If heads is None, use the local heads. If common is None, use [nullid].
651
647
652 The nodes in common might not all be known locally due to the way the
648 The nodes in common might not all be known locally due to the way the
653 current discovery protocol works.
649 current discovery protocol works.
654 """
650 """
655 outgoing = _computeoutgoing(repo, heads, common)
651 outgoing = _computeoutgoing(repo, heads, common)
656 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
652 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
657
653
658 def changegroup(repo, basenodes, source):
654 def changegroup(repo, basenodes, source):
659 # to avoid a race we use changegroupsubset() (issue1320)
655 # to avoid a race we use changegroupsubset() (issue1320)
660 return changegroupsubset(repo, basenodes, repo.heads(), source)
656 return changegroupsubset(repo, basenodes, repo.heads(), source)
661
657
662 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
658 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
663 revisions = 0
659 revisions = 0
664 files = 0
660 files = 0
665 while True:
661 while True:
666 chunkdata = source.filelogheader()
662 chunkdata = source.filelogheader()
667 if not chunkdata:
663 if not chunkdata:
668 break
664 break
669 f = chunkdata["filename"]
665 f = chunkdata["filename"]
670 repo.ui.debug("adding %s revisions\n" % f)
666 repo.ui.debug("adding %s revisions\n" % f)
671 pr()
667 pr()
672 fl = repo.file(f)
668 fl = repo.file(f)
673 o = len(fl)
669 o = len(fl)
674 try:
670 try:
675 if not fl.addgroup(source, revmap, trp):
671 if not fl.addgroup(source, revmap, trp):
676 raise util.Abort(_("received file revlog group is empty"))
672 raise util.Abort(_("received file revlog group is empty"))
677 except error.CensoredBaseError, e:
673 except error.CensoredBaseError, e:
678 raise util.Abort(_("received delta base is censored: %s") % e)
674 raise util.Abort(_("received delta base is censored: %s") % e)
679 revisions += len(fl) - o
675 revisions += len(fl) - o
680 files += 1
676 files += 1
681 if f in needfiles:
677 if f in needfiles:
682 needs = needfiles[f]
678 needs = needfiles[f]
683 for new in xrange(o, len(fl)):
679 for new in xrange(o, len(fl)):
684 n = fl.node(new)
680 n = fl.node(new)
685 if n in needs:
681 if n in needs:
686 needs.remove(n)
682 needs.remove(n)
687 else:
683 else:
688 raise util.Abort(
684 raise util.Abort(
689 _("received spurious file revlog entry"))
685 _("received spurious file revlog entry"))
690 if not needs:
686 if not needs:
691 del needfiles[f]
687 del needfiles[f]
692 repo.ui.progress(_('files'), None)
688 repo.ui.progress(_('files'), None)
693
689
694 for f, needs in needfiles.iteritems():
690 for f, needs in needfiles.iteritems():
695 fl = repo.file(f)
691 fl = repo.file(f)
696 for n in needs:
692 for n in needs:
697 try:
693 try:
698 fl.rev(n)
694 fl.rev(n)
699 except error.LookupError:
695 except error.LookupError:
700 raise util.Abort(
696 raise util.Abort(
701 _('missing file data for %s:%s - run hg verify') %
697 _('missing file data for %s:%s - run hg verify') %
702 (f, hex(n)))
698 (f, hex(n)))
703
699
704 return revisions, files
700 return revisions, files
705
701
706 def addchangegroup(repo, source, srctype, url, emptyok=False,
702 def addchangegroup(repo, source, srctype, url, emptyok=False,
707 targetphase=phases.draft):
703 targetphase=phases.draft):
708 """Add the changegroup returned by source.read() to this repo.
704 """Add the changegroup returned by source.read() to this repo.
709 srctype is a string like 'push', 'pull', or 'unbundle'. url is
705 srctype is a string like 'push', 'pull', or 'unbundle'. url is
710 the URL of the repo where this changegroup is coming from.
706 the URL of the repo where this changegroup is coming from.
711
707
712 Return an integer summarizing the change to this repo:
708 Return an integer summarizing the change to this repo:
713 - nothing changed or no source: 0
709 - nothing changed or no source: 0
714 - more heads than before: 1+added heads (2..n)
710 - more heads than before: 1+added heads (2..n)
715 - fewer heads than before: -1-removed heads (-2..-n)
711 - fewer heads than before: -1-removed heads (-2..-n)
716 - number of heads stays the same: 1
712 - number of heads stays the same: 1
717 """
713 """
718 repo = repo.unfiltered()
714 repo = repo.unfiltered()
719 def csmap(x):
715 def csmap(x):
720 repo.ui.debug("add changeset %s\n" % short(x))
716 repo.ui.debug("add changeset %s\n" % short(x))
721 return len(cl)
717 return len(cl)
722
718
723 def revmap(x):
719 def revmap(x):
724 return cl.rev(x)
720 return cl.rev(x)
725
721
726 if not source:
722 if not source:
727 return 0
723 return 0
728
724
729 changesets = files = revisions = 0
725 changesets = files = revisions = 0
730 efiles = set()
726 efiles = set()
731
727
732 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
728 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
733 # The transaction could have been created before and already carries source
729 # The transaction could have been created before and already carries source
734 # information. In this case we use the top level data. We overwrite the
730 # information. In this case we use the top level data. We overwrite the
735 # argument because we need to use the top level value (if they exist) in
731 # argument because we need to use the top level value (if they exist) in
736 # this function.
732 # this function.
737 srctype = tr.hookargs.setdefault('source', srctype)
733 srctype = tr.hookargs.setdefault('source', srctype)
738 url = tr.hookargs.setdefault('url', url)
734 url = tr.hookargs.setdefault('url', url)
739
735
740 # write changelog data to temp files so concurrent readers will not see
736 # write changelog data to temp files so concurrent readers will not see
741 # inconsistent view
737 # inconsistent view
742 cl = repo.changelog
738 cl = repo.changelog
743 cl.delayupdate(tr)
739 cl.delayupdate(tr)
744 oldheads = cl.heads()
740 oldheads = cl.heads()
745 try:
741 try:
746 repo.hook('prechangegroup', throw=True, **tr.hookargs)
742 repo.hook('prechangegroup', throw=True, **tr.hookargs)
747
743
748 trp = weakref.proxy(tr)
744 trp = weakref.proxy(tr)
749 # pull off the changeset group
745 # pull off the changeset group
750 repo.ui.status(_("adding changesets\n"))
746 repo.ui.status(_("adding changesets\n"))
751 clstart = len(cl)
747 clstart = len(cl)
752 class prog(object):
748 class prog(object):
753 step = _('changesets')
749 step = _('changesets')
754 count = 1
750 count = 1
755 ui = repo.ui
751 ui = repo.ui
756 total = None
752 total = None
757 def __call__(repo):
753 def __call__(repo):
758 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
754 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
759 total=repo.total)
755 total=repo.total)
760 repo.count += 1
756 repo.count += 1
761 pr = prog()
757 pr = prog()
762 source.callback = pr
758 source.callback = pr
763
759
764 source.changelogheader()
760 source.changelogheader()
765 srccontent = cl.addgroup(source, csmap, trp)
761 srccontent = cl.addgroup(source, csmap, trp)
766 if not (srccontent or emptyok):
762 if not (srccontent or emptyok):
767 raise util.Abort(_("received changelog group is empty"))
763 raise util.Abort(_("received changelog group is empty"))
768 clend = len(cl)
764 clend = len(cl)
769 changesets = clend - clstart
765 changesets = clend - clstart
770 for c in xrange(clstart, clend):
766 for c in xrange(clstart, clend):
771 efiles.update(repo[c].files())
767 efiles.update(repo[c].files())
772 efiles = len(efiles)
768 efiles = len(efiles)
773 repo.ui.progress(_('changesets'), None)
769 repo.ui.progress(_('changesets'), None)
774
770
775 # pull off the manifest group
771 # pull off the manifest group
776 repo.ui.status(_("adding manifests\n"))
772 repo.ui.status(_("adding manifests\n"))
777 pr.step = _('manifests')
773 pr.step = _('manifests')
778 pr.count = 1
774 pr.count = 1
779 pr.total = changesets # manifests <= changesets
775 pr.total = changesets # manifests <= changesets
780 # no need to check for empty manifest group here:
776 # no need to check for empty manifest group here:
781 # if the result of the merge of 1 and 2 is the same in 3 and 4,
777 # if the result of the merge of 1 and 2 is the same in 3 and 4,
782 # no new manifest will be created and the manifest group will
778 # no new manifest will be created and the manifest group will
783 # be empty during the pull
779 # be empty during the pull
784 source.manifestheader()
780 source.manifestheader()
785 repo.manifest.addgroup(source, revmap, trp)
781 repo.manifest.addgroup(source, revmap, trp)
786 repo.ui.progress(_('manifests'), None)
782 repo.ui.progress(_('manifests'), None)
787
783
788 needfiles = {}
784 needfiles = {}
789 if repo.ui.configbool('server', 'validate', default=False):
785 if repo.ui.configbool('server', 'validate', default=False):
790 # validate incoming csets have their manifests
786 # validate incoming csets have their manifests
791 for cset in xrange(clstart, clend):
787 for cset in xrange(clstart, clend):
792 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
788 mfnode = repo.changelog.read(repo.changelog.node(cset))[0]
793 mfest = repo.manifest.readdelta(mfnode)
789 mfest = repo.manifest.readdelta(mfnode)
794 # store file nodes we must see
790 # store file nodes we must see
795 for f, n in mfest.iteritems():
791 for f, n in mfest.iteritems():
796 needfiles.setdefault(f, set()).add(n)
792 needfiles.setdefault(f, set()).add(n)
797
793
798 # process the files
794 # process the files
799 repo.ui.status(_("adding file changes\n"))
795 repo.ui.status(_("adding file changes\n"))
800 pr.step = _('files')
796 pr.step = _('files')
801 pr.count = 1
797 pr.count = 1
802 pr.total = efiles
798 pr.total = efiles
803 source.callback = None
799 source.callback = None
804
800
805 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
801 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
806 needfiles)
802 needfiles)
807 revisions += newrevs
803 revisions += newrevs
808 files += newfiles
804 files += newfiles
809
805
810 dh = 0
806 dh = 0
811 if oldheads:
807 if oldheads:
812 heads = cl.heads()
808 heads = cl.heads()
813 dh = len(heads) - len(oldheads)
809 dh = len(heads) - len(oldheads)
814 for h in heads:
810 for h in heads:
815 if h not in oldheads and repo[h].closesbranch():
811 if h not in oldheads and repo[h].closesbranch():
816 dh -= 1
812 dh -= 1
817 htext = ""
813 htext = ""
818 if dh:
814 if dh:
819 htext = _(" (%+d heads)") % dh
815 htext = _(" (%+d heads)") % dh
820
816
821 repo.ui.status(_("added %d changesets"
817 repo.ui.status(_("added %d changesets"
822 " with %d changes to %d files%s\n")
818 " with %d changes to %d files%s\n")
823 % (changesets, revisions, files, htext))
819 % (changesets, revisions, files, htext))
824 repo.invalidatevolatilesets()
820 repo.invalidatevolatilesets()
825
821
826 if changesets > 0:
822 if changesets > 0:
827 p = lambda: tr.writepending() and repo.root or ""
823 p = lambda: tr.writepending() and repo.root or ""
828 if 'node' not in tr.hookargs:
824 if 'node' not in tr.hookargs:
829 tr.hookargs['node'] = hex(cl.node(clstart))
825 tr.hookargs['node'] = hex(cl.node(clstart))
830 hookargs = dict(tr.hookargs)
826 hookargs = dict(tr.hookargs)
831 else:
827 else:
832 hookargs = dict(tr.hookargs)
828 hookargs = dict(tr.hookargs)
833 hookargs['node'] = hex(cl.node(clstart))
829 hookargs['node'] = hex(cl.node(clstart))
834 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
830 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
835
831
836 added = [cl.node(r) for r in xrange(clstart, clend)]
832 added = [cl.node(r) for r in xrange(clstart, clend)]
837 publishing = repo.ui.configbool('phases', 'publish', True)
833 publishing = repo.ui.configbool('phases', 'publish', True)
838 if srctype in ('push', 'serve'):
834 if srctype in ('push', 'serve'):
839 # Old servers can not push the boundary themselves.
835 # Old servers can not push the boundary themselves.
840 # New servers won't push the boundary if changeset already
836 # New servers won't push the boundary if changeset already
841 # exists locally as secret
837 # exists locally as secret
842 #
838 #
843 # We should not use added here but the list of all change in
839 # We should not use added here but the list of all change in
844 # the bundle
840 # the bundle
845 if publishing:
841 if publishing:
846 phases.advanceboundary(repo, tr, phases.public, srccontent)
842 phases.advanceboundary(repo, tr, phases.public, srccontent)
847 else:
843 else:
848 # Those changesets have been pushed from the outside, their
844 # Those changesets have been pushed from the outside, their
849 # phases are going to be pushed alongside. Therefor
845 # phases are going to be pushed alongside. Therefor
850 # `targetphase` is ignored.
846 # `targetphase` is ignored.
851 phases.advanceboundary(repo, tr, phases.draft, srccontent)
847 phases.advanceboundary(repo, tr, phases.draft, srccontent)
852 phases.retractboundary(repo, tr, phases.draft, added)
848 phases.retractboundary(repo, tr, phases.draft, added)
853 elif srctype != 'strip':
849 elif srctype != 'strip':
854 # publishing only alter behavior during push
850 # publishing only alter behavior during push
855 #
851 #
856 # strip should not touch boundary at all
852 # strip should not touch boundary at all
857 phases.retractboundary(repo, tr, targetphase, added)
853 phases.retractboundary(repo, tr, targetphase, added)
858
854
859 if changesets > 0:
855 if changesets > 0:
860 if srctype != 'strip':
856 if srctype != 'strip':
861 # During strip, branchcache is invalid but coming call to
857 # During strip, branchcache is invalid but coming call to
862 # `destroyed` will repair it.
858 # `destroyed` will repair it.
863 # In other case we can safely update cache on disk.
859 # In other case we can safely update cache on disk.
864 branchmap.updatecache(repo.filtered('served'))
860 branchmap.updatecache(repo.filtered('served'))
865
861
866 def runhooks():
862 def runhooks():
867 # These hooks run when the lock releases, not when the
863 # These hooks run when the lock releases, not when the
868 # transaction closes. So it's possible for the changelog
864 # transaction closes. So it's possible for the changelog
869 # to have changed since we last saw it.
865 # to have changed since we last saw it.
870 if clstart >= len(repo):
866 if clstart >= len(repo):
871 return
867 return
872
868
873 # forcefully update the on-disk branch cache
869 # forcefully update the on-disk branch cache
874 repo.ui.debug("updating the branch cache\n")
870 repo.ui.debug("updating the branch cache\n")
875 repo.hook("changegroup", **hookargs)
871 repo.hook("changegroup", **hookargs)
876
872
877 for n in added:
873 for n in added:
878 args = hookargs.copy()
874 args = hookargs.copy()
879 args['node'] = hex(n)
875 args['node'] = hex(n)
880 repo.hook("incoming", **args)
876 repo.hook("incoming", **args)
881
877
882 newheads = [h for h in repo.heads() if h not in oldheads]
878 newheads = [h for h in repo.heads() if h not in oldheads]
883 repo.ui.log("incoming",
879 repo.ui.log("incoming",
884 "%s incoming changes - new heads: %s\n",
880 "%s incoming changes - new heads: %s\n",
885 len(added),
881 len(added),
886 ', '.join([hex(c[:6]) for c in newheads]))
882 ', '.join([hex(c[:6]) for c in newheads]))
887
883
888 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
884 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
889 lambda tr: repo._afterlock(runhooks))
885 lambda tr: repo._afterlock(runhooks))
890
886
891 tr.close()
887 tr.close()
892
888
893 finally:
889 finally:
894 tr.release()
890 tr.release()
895 repo.ui.flush()
891 repo.ui.flush()
896 # never return 0 here:
892 # never return 0 here:
897 if dh < 0:
893 if dh < 0:
898 return dh - 1
894 return dh - 1
899 else:
895 else:
900 return dh + 1
896 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now