##// END OF EJS Templates
changegroup: move chunk extraction into a getchunks method of unbundle10...
Pierre-Yves David -
r20999:1e28ec97 default
parent child Browse files
Show More
@@ -1,746 +1,759 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
16
16
17 def readexactly(stream, n):
17 def readexactly(stream, n):
18 '''read n bytes from stream.read and abort if less was available'''
18 '''read n bytes from stream.read and abort if less was available'''
19 s = stream.read(n)
19 s = stream.read(n)
20 if len(s) < n:
20 if len(s) < n:
21 raise util.Abort(_("stream ended unexpectedly"
21 raise util.Abort(_("stream ended unexpectedly"
22 " (got %d bytes, expected %d)")
22 " (got %d bytes, expected %d)")
23 % (len(s), n))
23 % (len(s), n))
24 return s
24 return s
25
25
26 def getchunk(stream):
26 def getchunk(stream):
27 """return the next chunk from stream as a string"""
27 """return the next chunk from stream as a string"""
28 d = readexactly(stream, 4)
28 d = readexactly(stream, 4)
29 l = struct.unpack(">l", d)[0]
29 l = struct.unpack(">l", d)[0]
30 if l <= 4:
30 if l <= 4:
31 if l:
31 if l:
32 raise util.Abort(_("invalid chunk length %d") % l)
32 raise util.Abort(_("invalid chunk length %d") % l)
33 return ""
33 return ""
34 return readexactly(stream, l - 4)
34 return readexactly(stream, l - 4)
35
35
36 def chunkheader(length):
36 def chunkheader(length):
37 """return a changegroup chunk header (string)"""
37 """return a changegroup chunk header (string)"""
38 return struct.pack(">l", length + 4)
38 return struct.pack(">l", length + 4)
39
39
40 def closechunk():
40 def closechunk():
41 """return a changegroup chunk header (string) for a zero-length chunk"""
41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 return struct.pack(">l", 0)
42 return struct.pack(">l", 0)
43
43
44 class nocompress(object):
44 class nocompress(object):
45 def compress(self, x):
45 def compress(self, x):
46 return x
46 return x
47 def flush(self):
47 def flush(self):
48 return ""
48 return ""
49
49
50 bundletypes = {
50 bundletypes = {
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 # since the unification ssh accepts a header but there
52 # since the unification ssh accepts a header but there
53 # is no capability signaling it.
53 # is no capability signaling it.
54 "HG10UN": ("HG10UN", nocompress),
54 "HG10UN": ("HG10UN", nocompress),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 }
57 }
58
58
59 # hgweb uses this list to communicate its preferred type
59 # hgweb uses this list to communicate its preferred type
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61
61
62 def writebundle(cg, filename, bundletype, vfs=None):
62 def writebundle(cg, filename, bundletype, vfs=None):
63 """Write a bundle file and return its filename.
63 """Write a bundle file and return its filename.
64
64
65 Existing files will not be overwritten.
65 Existing files will not be overwritten.
66 If no filename is specified, a temporary file is created.
66 If no filename is specified, a temporary file is created.
67 bz2 compression can be turned off.
67 bz2 compression can be turned off.
68 The bundle file will be deleted in case of errors.
68 The bundle file will be deleted in case of errors.
69 """
69 """
70
70
71 fh = None
71 fh = None
72 cleanup = None
72 cleanup = None
73 try:
73 try:
74 if filename:
74 if filename:
75 if vfs:
75 if vfs:
76 fh = vfs.open(filename, "wb")
76 fh = vfs.open(filename, "wb")
77 else:
77 else:
78 fh = open(filename, "wb")
78 fh = open(filename, "wb")
79 else:
79 else:
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fh = os.fdopen(fd, "wb")
81 fh = os.fdopen(fd, "wb")
82 cleanup = filename
82 cleanup = filename
83
83
84 header, compressor = bundletypes[bundletype]
84 header, compressor = bundletypes[bundletype]
85 fh.write(header)
85 fh.write(header)
86 z = compressor()
86 z = compressor()
87
87
88 # parse the changegroup data, otherwise we will block
88 # parse the changegroup data, otherwise we will block
89 # in case of sshrepo because we don't know the end of the stream
89 # in case of sshrepo because we don't know the end of the stream
90
90
91 # an empty chunkgroup is the end of the changegroup
91 # an empty chunkgroup is the end of the changegroup
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # after that, an empty chunkgroup is the end of the changegroup
93 # after that, an empty chunkgroup is the end of the changegroup
94 empty = False
94 for chunk in cg.getchunks():
95 count = 0
95 fh.write(z.compress(chunk))
96 while not empty or count <= 2:
97 empty = True
98 count += 1
99 while True:
100 chunk = getchunk(cg)
101 if not chunk:
102 break
103 empty = False
104 fh.write(z.compress(chunkheader(len(chunk))))
105 pos = 0
106 while pos < len(chunk):
107 next = pos + 2**20
108 fh.write(z.compress(chunk[pos:next]))
109 pos = next
110 fh.write(z.compress(closechunk()))
111 fh.write(z.flush())
96 fh.write(z.flush())
112 cleanup = None
97 cleanup = None
113 return filename
98 return filename
114 finally:
99 finally:
115 if fh is not None:
100 if fh is not None:
116 fh.close()
101 fh.close()
117 if cleanup is not None:
102 if cleanup is not None:
118 if filename and vfs:
103 if filename and vfs:
119 vfs.unlink(cleanup)
104 vfs.unlink(cleanup)
120 else:
105 else:
121 os.unlink(cleanup)
106 os.unlink(cleanup)
122
107
123 def decompressor(fh, alg):
108 def decompressor(fh, alg):
124 if alg == 'UN':
109 if alg == 'UN':
125 return fh
110 return fh
126 elif alg == 'GZ':
111 elif alg == 'GZ':
127 def generator(f):
112 def generator(f):
128 zd = zlib.decompressobj()
113 zd = zlib.decompressobj()
129 for chunk in util.filechunkiter(f):
114 for chunk in util.filechunkiter(f):
130 yield zd.decompress(chunk)
115 yield zd.decompress(chunk)
131 elif alg == 'BZ':
116 elif alg == 'BZ':
132 def generator(f):
117 def generator(f):
133 zd = bz2.BZ2Decompressor()
118 zd = bz2.BZ2Decompressor()
134 zd.decompress("BZ")
119 zd.decompress("BZ")
135 for chunk in util.filechunkiter(f, 4096):
120 for chunk in util.filechunkiter(f, 4096):
136 yield zd.decompress(chunk)
121 yield zd.decompress(chunk)
137 else:
122 else:
138 raise util.Abort("unknown bundle compression '%s'" % alg)
123 raise util.Abort("unknown bundle compression '%s'" % alg)
139 return util.chunkbuffer(generator(fh))
124 return util.chunkbuffer(generator(fh))
140
125
141 class unbundle10(object):
126 class unbundle10(object):
142 deltaheader = _BUNDLE10_DELTA_HEADER
127 deltaheader = _BUNDLE10_DELTA_HEADER
143 deltaheadersize = struct.calcsize(deltaheader)
128 deltaheadersize = struct.calcsize(deltaheader)
144 def __init__(self, fh, alg):
129 def __init__(self, fh, alg):
145 self._stream = decompressor(fh, alg)
130 self._stream = decompressor(fh, alg)
146 self._type = alg
131 self._type = alg
147 self.callback = None
132 self.callback = None
148 def compressed(self):
133 def compressed(self):
149 return self._type != 'UN'
134 return self._type != 'UN'
150 def read(self, l):
135 def read(self, l):
151 return self._stream.read(l)
136 return self._stream.read(l)
152 def seek(self, pos):
137 def seek(self, pos):
153 return self._stream.seek(pos)
138 return self._stream.seek(pos)
154 def tell(self):
139 def tell(self):
155 return self._stream.tell()
140 return self._stream.tell()
156 def close(self):
141 def close(self):
157 return self._stream.close()
142 return self._stream.close()
158
143
159 def chunklength(self):
144 def chunklength(self):
160 d = readexactly(self._stream, 4)
145 d = readexactly(self._stream, 4)
161 l = struct.unpack(">l", d)[0]
146 l = struct.unpack(">l", d)[0]
162 if l <= 4:
147 if l <= 4:
163 if l:
148 if l:
164 raise util.Abort(_("invalid chunk length %d") % l)
149 raise util.Abort(_("invalid chunk length %d") % l)
165 return 0
150 return 0
166 if self.callback:
151 if self.callback:
167 self.callback()
152 self.callback()
168 return l - 4
153 return l - 4
169
154
170 def changelogheader(self):
155 def changelogheader(self):
171 """v10 does not have a changelog header chunk"""
156 """v10 does not have a changelog header chunk"""
172 return {}
157 return {}
173
158
174 def manifestheader(self):
159 def manifestheader(self):
175 """v10 does not have a manifest header chunk"""
160 """v10 does not have a manifest header chunk"""
176 return {}
161 return {}
177
162
178 def filelogheader(self):
163 def filelogheader(self):
179 """return the header of the filelogs chunk, v10 only has the filename"""
164 """return the header of the filelogs chunk, v10 only has the filename"""
180 l = self.chunklength()
165 l = self.chunklength()
181 if not l:
166 if not l:
182 return {}
167 return {}
183 fname = readexactly(self._stream, l)
168 fname = readexactly(self._stream, l)
184 return {'filename': fname}
169 return {'filename': fname}
185
170
186 def _deltaheader(self, headertuple, prevnode):
171 def _deltaheader(self, headertuple, prevnode):
187 node, p1, p2, cs = headertuple
172 node, p1, p2, cs = headertuple
188 if prevnode is None:
173 if prevnode is None:
189 deltabase = p1
174 deltabase = p1
190 else:
175 else:
191 deltabase = prevnode
176 deltabase = prevnode
192 return node, p1, p2, deltabase, cs
177 return node, p1, p2, deltabase, cs
193
178
194 def deltachunk(self, prevnode):
179 def deltachunk(self, prevnode):
195 l = self.chunklength()
180 l = self.chunklength()
196 if not l:
181 if not l:
197 return {}
182 return {}
198 headerdata = readexactly(self._stream, self.deltaheadersize)
183 headerdata = readexactly(self._stream, self.deltaheadersize)
199 header = struct.unpack(self.deltaheader, headerdata)
184 header = struct.unpack(self.deltaheader, headerdata)
200 delta = readexactly(self._stream, l - self.deltaheadersize)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
201 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
202 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
203 'deltabase': deltabase, 'delta': delta}
188 'deltabase': deltabase, 'delta': delta}
204
189
190 def getchunks(self):
191 """returns all the chunks contains in the bundle
192
193 Used when you need to forward the binary stream to a file or another
194 network API. To do so, it parse the changegroup data, otherwise it will
195 block in case of sshrepo because it don't know the end of the stream.
196 """
197 # an empty chunkgroup is the end of the changegroup
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # after that, an empty chunkgroup is the end of the changegroup
200 empty = False
201 count = 0
202 while not empty or count <= 2:
203 empty = True
204 count += 1
205 while True:
206 chunk = getchunk(self)
207 if not chunk:
208 break
209 empty = False
210 yield chunkheader(len(chunk))
211 pos = 0
212 while pos < len(chunk):
213 next = pos + 2**20
214 yield chunk[pos:next]
215 pos = next
216 yield closechunk()
217
205 class headerlessfixup(object):
218 class headerlessfixup(object):
206 def __init__(self, fh, h):
219 def __init__(self, fh, h):
207 self._h = h
220 self._h = h
208 self._fh = fh
221 self._fh = fh
209 def read(self, n):
222 def read(self, n):
210 if self._h:
223 if self._h:
211 d, self._h = self._h[:n], self._h[n:]
224 d, self._h = self._h[:n], self._h[n:]
212 if len(d) < n:
225 if len(d) < n:
213 d += readexactly(self._fh, n - len(d))
226 d += readexactly(self._fh, n - len(d))
214 return d
227 return d
215 return readexactly(self._fh, n)
228 return readexactly(self._fh, n)
216
229
217 def readbundle(fh, fname, vfs=None):
230 def readbundle(fh, fname, vfs=None):
218 header = readexactly(fh, 6)
231 header = readexactly(fh, 6)
219
232
220 if not fname:
233 if not fname:
221 fname = "stream"
234 fname = "stream"
222 if not header.startswith('HG') and header.startswith('\0'):
235 if not header.startswith('HG') and header.startswith('\0'):
223 fh = headerlessfixup(fh, header)
236 fh = headerlessfixup(fh, header)
224 header = "HG10UN"
237 header = "HG10UN"
225 elif vfs:
238 elif vfs:
226 fname = vfs.join(fname)
239 fname = vfs.join(fname)
227
240
228 magic, version, alg = header[0:2], header[2:4], header[4:6]
241 magic, version, alg = header[0:2], header[2:4], header[4:6]
229
242
230 if magic != 'HG':
243 if magic != 'HG':
231 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
244 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
232 if version != '10':
245 if version != '10':
233 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
246 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
234 return unbundle10(fh, alg)
247 return unbundle10(fh, alg)
235
248
236 class bundle10(object):
249 class bundle10(object):
237 deltaheader = _BUNDLE10_DELTA_HEADER
250 deltaheader = _BUNDLE10_DELTA_HEADER
238 def __init__(self, repo, bundlecaps=None):
251 def __init__(self, repo, bundlecaps=None):
239 """Given a source repo, construct a bundler.
252 """Given a source repo, construct a bundler.
240
253
241 bundlecaps is optional and can be used to specify the set of
254 bundlecaps is optional and can be used to specify the set of
242 capabilities which can be used to build the bundle.
255 capabilities which can be used to build the bundle.
243 """
256 """
244 # Set of capabilities we can use to build the bundle.
257 # Set of capabilities we can use to build the bundle.
245 if bundlecaps is None:
258 if bundlecaps is None:
246 bundlecaps = set()
259 bundlecaps = set()
247 self._bundlecaps = bundlecaps
260 self._bundlecaps = bundlecaps
248 self._changelog = repo.changelog
261 self._changelog = repo.changelog
249 self._manifest = repo.manifest
262 self._manifest = repo.manifest
250 reorder = repo.ui.config('bundle', 'reorder', 'auto')
263 reorder = repo.ui.config('bundle', 'reorder', 'auto')
251 if reorder == 'auto':
264 if reorder == 'auto':
252 reorder = None
265 reorder = None
253 else:
266 else:
254 reorder = util.parsebool(reorder)
267 reorder = util.parsebool(reorder)
255 self._repo = repo
268 self._repo = repo
256 self._reorder = reorder
269 self._reorder = reorder
257 self._progress = repo.ui.progress
270 self._progress = repo.ui.progress
258 def close(self):
271 def close(self):
259 return closechunk()
272 return closechunk()
260
273
261 def fileheader(self, fname):
274 def fileheader(self, fname):
262 return chunkheader(len(fname)) + fname
275 return chunkheader(len(fname)) + fname
263
276
264 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
277 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
265 """Calculate a delta group, yielding a sequence of changegroup chunks
278 """Calculate a delta group, yielding a sequence of changegroup chunks
266 (strings).
279 (strings).
267
280
268 Given a list of changeset revs, return a set of deltas and
281 Given a list of changeset revs, return a set of deltas and
269 metadata corresponding to nodes. The first delta is
282 metadata corresponding to nodes. The first delta is
270 first parent(nodelist[0]) -> nodelist[0], the receiver is
283 first parent(nodelist[0]) -> nodelist[0], the receiver is
271 guaranteed to have this parent as it has all history before
284 guaranteed to have this parent as it has all history before
272 these changesets. In the case firstparent is nullrev the
285 these changesets. In the case firstparent is nullrev the
273 changegroup starts with a full revision.
286 changegroup starts with a full revision.
274
287
275 If units is not None, progress detail will be generated, units specifies
288 If units is not None, progress detail will be generated, units specifies
276 the type of revlog that is touched (changelog, manifest, etc.).
289 the type of revlog that is touched (changelog, manifest, etc.).
277 """
290 """
278 # if we don't have any revisions touched by these changesets, bail
291 # if we don't have any revisions touched by these changesets, bail
279 if len(nodelist) == 0:
292 if len(nodelist) == 0:
280 yield self.close()
293 yield self.close()
281 return
294 return
282
295
283 # for generaldelta revlogs, we linearize the revs; this will both be
296 # for generaldelta revlogs, we linearize the revs; this will both be
284 # much quicker and generate a much smaller bundle
297 # much quicker and generate a much smaller bundle
285 if (revlog._generaldelta and reorder is not False) or reorder:
298 if (revlog._generaldelta and reorder is not False) or reorder:
286 dag = dagutil.revlogdag(revlog)
299 dag = dagutil.revlogdag(revlog)
287 revs = set(revlog.rev(n) for n in nodelist)
300 revs = set(revlog.rev(n) for n in nodelist)
288 revs = dag.linearize(revs)
301 revs = dag.linearize(revs)
289 else:
302 else:
290 revs = sorted([revlog.rev(n) for n in nodelist])
303 revs = sorted([revlog.rev(n) for n in nodelist])
291
304
292 # add the parent of the first rev
305 # add the parent of the first rev
293 p = revlog.parentrevs(revs[0])[0]
306 p = revlog.parentrevs(revs[0])[0]
294 revs.insert(0, p)
307 revs.insert(0, p)
295
308
296 # build deltas
309 # build deltas
297 total = len(revs) - 1
310 total = len(revs) - 1
298 msgbundling = _('bundling')
311 msgbundling = _('bundling')
299 for r in xrange(len(revs) - 1):
312 for r in xrange(len(revs) - 1):
300 if units is not None:
313 if units is not None:
301 self._progress(msgbundling, r + 1, unit=units, total=total)
314 self._progress(msgbundling, r + 1, unit=units, total=total)
302 prev, curr = revs[r], revs[r + 1]
315 prev, curr = revs[r], revs[r + 1]
303 linknode = lookup(revlog.node(curr))
316 linknode = lookup(revlog.node(curr))
304 for c in self.revchunk(revlog, curr, prev, linknode):
317 for c in self.revchunk(revlog, curr, prev, linknode):
305 yield c
318 yield c
306
319
307 yield self.close()
320 yield self.close()
308
321
309 # filter any nodes that claim to be part of the known set
322 # filter any nodes that claim to be part of the known set
310 def prune(self, revlog, missing, commonrevs, source):
323 def prune(self, revlog, missing, commonrevs, source):
311 rr, rl = revlog.rev, revlog.linkrev
324 rr, rl = revlog.rev, revlog.linkrev
312 return [n for n in missing if rl(rr(n)) not in commonrevs]
325 return [n for n in missing if rl(rr(n)) not in commonrevs]
313
326
314 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
327 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
315 '''yield a sequence of changegroup chunks (strings)'''
328 '''yield a sequence of changegroup chunks (strings)'''
316 repo = self._repo
329 repo = self._repo
317 cl = self._changelog
330 cl = self._changelog
318 mf = self._manifest
331 mf = self._manifest
319 reorder = self._reorder
332 reorder = self._reorder
320 progress = self._progress
333 progress = self._progress
321
334
322 # for progress output
335 # for progress output
323 msgbundling = _('bundling')
336 msgbundling = _('bundling')
324
337
325 mfs = {} # needed manifests
338 mfs = {} # needed manifests
326 fnodes = {} # needed file nodes
339 fnodes = {} # needed file nodes
327 changedfiles = set()
340 changedfiles = set()
328
341
329 # Callback for the changelog, used to collect changed files and manifest
342 # Callback for the changelog, used to collect changed files and manifest
330 # nodes.
343 # nodes.
331 # Returns the linkrev node (identity in the changelog case).
344 # Returns the linkrev node (identity in the changelog case).
332 def lookupcl(x):
345 def lookupcl(x):
333 c = cl.read(x)
346 c = cl.read(x)
334 changedfiles.update(c[3])
347 changedfiles.update(c[3])
335 # record the first changeset introducing this manifest version
348 # record the first changeset introducing this manifest version
336 mfs.setdefault(c[0], x)
349 mfs.setdefault(c[0], x)
337 return x
350 return x
338
351
339 # Callback for the manifest, used to collect linkrevs for filelog
352 # Callback for the manifest, used to collect linkrevs for filelog
340 # revisions.
353 # revisions.
341 # Returns the linkrev node (collected in lookupcl).
354 # Returns the linkrev node (collected in lookupcl).
342 def lookupmf(x):
355 def lookupmf(x):
343 clnode = mfs[x]
356 clnode = mfs[x]
344 if not fastpathlinkrev:
357 if not fastpathlinkrev:
345 mdata = mf.readfast(x)
358 mdata = mf.readfast(x)
346 for f, n in mdata.iteritems():
359 for f, n in mdata.iteritems():
347 if f in changedfiles:
360 if f in changedfiles:
348 # record the first changeset introducing this filelog
361 # record the first changeset introducing this filelog
349 # version
362 # version
350 fnodes[f].setdefault(n, clnode)
363 fnodes[f].setdefault(n, clnode)
351 return clnode
364 return clnode
352
365
353 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
366 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
354 reorder=reorder):
367 reorder=reorder):
355 yield chunk
368 yield chunk
356 progress(msgbundling, None)
369 progress(msgbundling, None)
357
370
358 for f in changedfiles:
371 for f in changedfiles:
359 fnodes[f] = {}
372 fnodes[f] = {}
360 mfnodes = self.prune(mf, mfs, commonrevs, source)
373 mfnodes = self.prune(mf, mfs, commonrevs, source)
361 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
374 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
362 reorder=reorder):
375 reorder=reorder):
363 yield chunk
376 yield chunk
364 progress(msgbundling, None)
377 progress(msgbundling, None)
365
378
366 mfs.clear()
379 mfs.clear()
367 needed = set(cl.rev(x) for x in clnodes)
380 needed = set(cl.rev(x) for x in clnodes)
368
381
369 def linknodes(filerevlog, fname):
382 def linknodes(filerevlog, fname):
370 if fastpathlinkrev:
383 if fastpathlinkrev:
371 llr = filerevlog.linkrev
384 llr = filerevlog.linkrev
372 def genfilenodes():
385 def genfilenodes():
373 for r in filerevlog:
386 for r in filerevlog:
374 linkrev = llr(r)
387 linkrev = llr(r)
375 if linkrev in needed:
388 if linkrev in needed:
376 yield filerevlog.node(r), cl.node(linkrev)
389 yield filerevlog.node(r), cl.node(linkrev)
377 fnodes[fname] = dict(genfilenodes())
390 fnodes[fname] = dict(genfilenodes())
378 return fnodes.get(fname, {})
391 return fnodes.get(fname, {})
379
392
380 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
393 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
381 source):
394 source):
382 yield chunk
395 yield chunk
383
396
384 yield self.close()
397 yield self.close()
385 progress(msgbundling, None)
398 progress(msgbundling, None)
386
399
387 if clnodes:
400 if clnodes:
388 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
401 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
389
402
390 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
403 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
391 repo = self._repo
404 repo = self._repo
392 progress = self._progress
405 progress = self._progress
393 reorder = self._reorder
406 reorder = self._reorder
394 msgbundling = _('bundling')
407 msgbundling = _('bundling')
395
408
396 total = len(changedfiles)
409 total = len(changedfiles)
397 # for progress output
410 # for progress output
398 msgfiles = _('files')
411 msgfiles = _('files')
399 for i, fname in enumerate(sorted(changedfiles)):
412 for i, fname in enumerate(sorted(changedfiles)):
400 filerevlog = repo.file(fname)
413 filerevlog = repo.file(fname)
401 if not filerevlog:
414 if not filerevlog:
402 raise util.Abort(_("empty or missing revlog for %s") % fname)
415 raise util.Abort(_("empty or missing revlog for %s") % fname)
403
416
404 linkrevnodes = linknodes(filerevlog, fname)
417 linkrevnodes = linknodes(filerevlog, fname)
405 # Lookup for filenodes, we collected the linkrev nodes above in the
418 # Lookup for filenodes, we collected the linkrev nodes above in the
406 # fastpath case and with lookupmf in the slowpath case.
419 # fastpath case and with lookupmf in the slowpath case.
407 def lookupfilelog(x):
420 def lookupfilelog(x):
408 return linkrevnodes[x]
421 return linkrevnodes[x]
409
422
410 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
423 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
411 if filenodes:
424 if filenodes:
412 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
425 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
413 total=total)
426 total=total)
414 yield self.fileheader(fname)
427 yield self.fileheader(fname)
415 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
428 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
416 reorder=reorder):
429 reorder=reorder):
417 yield chunk
430 yield chunk
418
431
419 def revchunk(self, revlog, rev, prev, linknode):
432 def revchunk(self, revlog, rev, prev, linknode):
420 node = revlog.node(rev)
433 node = revlog.node(rev)
421 p1, p2 = revlog.parentrevs(rev)
434 p1, p2 = revlog.parentrevs(rev)
422 base = prev
435 base = prev
423
436
424 prefix = ''
437 prefix = ''
425 if base == nullrev:
438 if base == nullrev:
426 delta = revlog.revision(node)
439 delta = revlog.revision(node)
427 prefix = mdiff.trivialdiffheader(len(delta))
440 prefix = mdiff.trivialdiffheader(len(delta))
428 else:
441 else:
429 delta = revlog.revdiff(base, rev)
442 delta = revlog.revdiff(base, rev)
430 p1n, p2n = revlog.parents(node)
443 p1n, p2n = revlog.parents(node)
431 basenode = revlog.node(base)
444 basenode = revlog.node(base)
432 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
445 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
433 meta += prefix
446 meta += prefix
434 l = len(meta) + len(delta)
447 l = len(meta) + len(delta)
435 yield chunkheader(l)
448 yield chunkheader(l)
436 yield meta
449 yield meta
437 yield delta
450 yield delta
438 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
451 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
439 # do nothing with basenode, it is implicitly the previous one in HG10
452 # do nothing with basenode, it is implicitly the previous one in HG10
440 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
453 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
441
454
442 def _changegroupinfo(repo, nodes, source):
455 def _changegroupinfo(repo, nodes, source):
443 if repo.ui.verbose or source == 'bundle':
456 if repo.ui.verbose or source == 'bundle':
444 repo.ui.status(_("%d changesets found\n") % len(nodes))
457 repo.ui.status(_("%d changesets found\n") % len(nodes))
445 if repo.ui.debugflag:
458 if repo.ui.debugflag:
446 repo.ui.debug("list of changesets:\n")
459 repo.ui.debug("list of changesets:\n")
447 for node in nodes:
460 for node in nodes:
448 repo.ui.debug("%s\n" % hex(node))
461 repo.ui.debug("%s\n" % hex(node))
449
462
450 def getsubset(repo, outgoing, bundler, source, fastpath=False):
463 def getsubset(repo, outgoing, bundler, source, fastpath=False):
451 repo = repo.unfiltered()
464 repo = repo.unfiltered()
452 commonrevs = outgoing.common
465 commonrevs = outgoing.common
453 csets = outgoing.missing
466 csets = outgoing.missing
454 heads = outgoing.missingheads
467 heads = outgoing.missingheads
455 # We go through the fast path if we get told to, or if all (unfiltered
468 # We go through the fast path if we get told to, or if all (unfiltered
456 # heads have been requested (since we then know there all linkrevs will
469 # heads have been requested (since we then know there all linkrevs will
457 # be pulled by the client).
470 # be pulled by the client).
458 heads.sort()
471 heads.sort()
459 fastpathlinkrev = fastpath or (
472 fastpathlinkrev = fastpath or (
460 repo.filtername is None and heads == sorted(repo.heads()))
473 repo.filtername is None and heads == sorted(repo.heads()))
461
474
462 repo.hook('preoutgoing', throw=True, source=source)
475 repo.hook('preoutgoing', throw=True, source=source)
463 _changegroupinfo(repo, csets, source)
476 _changegroupinfo(repo, csets, source)
464 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
477 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
465 return unbundle10(util.chunkbuffer(gengroup), 'UN')
478 return unbundle10(util.chunkbuffer(gengroup), 'UN')
466
479
467 def changegroupsubset(repo, roots, heads, source):
480 def changegroupsubset(repo, roots, heads, source):
468 """Compute a changegroup consisting of all the nodes that are
481 """Compute a changegroup consisting of all the nodes that are
469 descendants of any of the roots and ancestors of any of the heads.
482 descendants of any of the roots and ancestors of any of the heads.
470 Return a chunkbuffer object whose read() method will return
483 Return a chunkbuffer object whose read() method will return
471 successive changegroup chunks.
484 successive changegroup chunks.
472
485
473 It is fairly complex as determining which filenodes and which
486 It is fairly complex as determining which filenodes and which
474 manifest nodes need to be included for the changeset to be complete
487 manifest nodes need to be included for the changeset to be complete
475 is non-trivial.
488 is non-trivial.
476
489
477 Another wrinkle is doing the reverse, figuring out which changeset in
490 Another wrinkle is doing the reverse, figuring out which changeset in
478 the changegroup a particular filenode or manifestnode belongs to.
491 the changegroup a particular filenode or manifestnode belongs to.
479 """
492 """
480 cl = repo.changelog
493 cl = repo.changelog
481 if not roots:
494 if not roots:
482 roots = [nullid]
495 roots = [nullid]
483 # TODO: remove call to nodesbetween.
496 # TODO: remove call to nodesbetween.
484 csets, roots, heads = cl.nodesbetween(roots, heads)
497 csets, roots, heads = cl.nodesbetween(roots, heads)
485 discbases = []
498 discbases = []
486 for n in roots:
499 for n in roots:
487 discbases.extend([p for p in cl.parents(n) if p != nullid])
500 discbases.extend([p for p in cl.parents(n) if p != nullid])
488 outgoing = discovery.outgoing(cl, discbases, heads)
501 outgoing = discovery.outgoing(cl, discbases, heads)
489 bundler = bundle10(repo)
502 bundler = bundle10(repo)
490 return getsubset(repo, outgoing, bundler, source)
503 return getsubset(repo, outgoing, bundler, source)
491
504
492 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
505 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
493 """Like getbundle, but taking a discovery.outgoing as an argument.
506 """Like getbundle, but taking a discovery.outgoing as an argument.
494
507
495 This is only implemented for local repos and reuses potentially
508 This is only implemented for local repos and reuses potentially
496 precomputed sets in outgoing."""
509 precomputed sets in outgoing."""
497 if not outgoing.missing:
510 if not outgoing.missing:
498 return None
511 return None
499 bundler = bundle10(repo, bundlecaps)
512 bundler = bundle10(repo, bundlecaps)
500 return getsubset(repo, outgoing, bundler, source)
513 return getsubset(repo, outgoing, bundler, source)
501
514
502 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
515 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
503 """Like changegroupsubset, but returns the set difference between the
516 """Like changegroupsubset, but returns the set difference between the
504 ancestors of heads and the ancestors common.
517 ancestors of heads and the ancestors common.
505
518
506 If heads is None, use the local heads. If common is None, use [nullid].
519 If heads is None, use the local heads. If common is None, use [nullid].
507
520
508 The nodes in common might not all be known locally due to the way the
521 The nodes in common might not all be known locally due to the way the
509 current discovery protocol works.
522 current discovery protocol works.
510 """
523 """
511 cl = repo.changelog
524 cl = repo.changelog
512 if common:
525 if common:
513 hasnode = cl.hasnode
526 hasnode = cl.hasnode
514 common = [n for n in common if hasnode(n)]
527 common = [n for n in common if hasnode(n)]
515 else:
528 else:
516 common = [nullid]
529 common = [nullid]
517 if not heads:
530 if not heads:
518 heads = cl.heads()
531 heads = cl.heads()
519 outgoing = discovery.outgoing(cl, common, heads)
532 outgoing = discovery.outgoing(cl, common, heads)
520 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
533 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
521
534
522 def changegroup(repo, basenodes, source):
535 def changegroup(repo, basenodes, source):
523 # to avoid a race we use changegroupsubset() (issue1320)
536 # to avoid a race we use changegroupsubset() (issue1320)
524 return changegroupsubset(repo, basenodes, repo.heads(), source)
537 return changegroupsubset(repo, basenodes, repo.heads(), source)
525
538
526 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
539 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
527 revisions = 0
540 revisions = 0
528 files = 0
541 files = 0
529 while True:
542 while True:
530 chunkdata = source.filelogheader()
543 chunkdata = source.filelogheader()
531 if not chunkdata:
544 if not chunkdata:
532 break
545 break
533 f = chunkdata["filename"]
546 f = chunkdata["filename"]
534 repo.ui.debug("adding %s revisions\n" % f)
547 repo.ui.debug("adding %s revisions\n" % f)
535 pr()
548 pr()
536 fl = repo.file(f)
549 fl = repo.file(f)
537 o = len(fl)
550 o = len(fl)
538 if not fl.addgroup(source, revmap, trp):
551 if not fl.addgroup(source, revmap, trp):
539 raise util.Abort(_("received file revlog group is empty"))
552 raise util.Abort(_("received file revlog group is empty"))
540 revisions += len(fl) - o
553 revisions += len(fl) - o
541 files += 1
554 files += 1
542 if f in needfiles:
555 if f in needfiles:
543 needs = needfiles[f]
556 needs = needfiles[f]
544 for new in xrange(o, len(fl)):
557 for new in xrange(o, len(fl)):
545 n = fl.node(new)
558 n = fl.node(new)
546 if n in needs:
559 if n in needs:
547 needs.remove(n)
560 needs.remove(n)
548 else:
561 else:
549 raise util.Abort(
562 raise util.Abort(
550 _("received spurious file revlog entry"))
563 _("received spurious file revlog entry"))
551 if not needs:
564 if not needs:
552 del needfiles[f]
565 del needfiles[f]
553 repo.ui.progress(_('files'), None)
566 repo.ui.progress(_('files'), None)
554
567
555 for f, needs in needfiles.iteritems():
568 for f, needs in needfiles.iteritems():
556 fl = repo.file(f)
569 fl = repo.file(f)
557 for n in needs:
570 for n in needs:
558 try:
571 try:
559 fl.rev(n)
572 fl.rev(n)
560 except error.LookupError:
573 except error.LookupError:
561 raise util.Abort(
574 raise util.Abort(
562 _('missing file data for %s:%s - run hg verify') %
575 _('missing file data for %s:%s - run hg verify') %
563 (f, hex(n)))
576 (f, hex(n)))
564
577
565 return revisions, files
578 return revisions, files
566
579
567 def addchangegroup(repo, source, srctype, url, emptyok=False):
580 def addchangegroup(repo, source, srctype, url, emptyok=False):
568 """Add the changegroup returned by source.read() to this repo.
581 """Add the changegroup returned by source.read() to this repo.
569 srctype is a string like 'push', 'pull', or 'unbundle'. url is
582 srctype is a string like 'push', 'pull', or 'unbundle'. url is
570 the URL of the repo where this changegroup is coming from.
583 the URL of the repo where this changegroup is coming from.
571
584
572 Return an integer summarizing the change to this repo:
585 Return an integer summarizing the change to this repo:
573 - nothing changed or no source: 0
586 - nothing changed or no source: 0
574 - more heads than before: 1+added heads (2..n)
587 - more heads than before: 1+added heads (2..n)
575 - fewer heads than before: -1-removed heads (-2..-n)
588 - fewer heads than before: -1-removed heads (-2..-n)
576 - number of heads stays the same: 1
589 - number of heads stays the same: 1
577 """
590 """
578 repo = repo.unfiltered()
591 repo = repo.unfiltered()
579 def csmap(x):
592 def csmap(x):
580 repo.ui.debug("add changeset %s\n" % short(x))
593 repo.ui.debug("add changeset %s\n" % short(x))
581 return len(cl)
594 return len(cl)
582
595
583 def revmap(x):
596 def revmap(x):
584 return cl.rev(x)
597 return cl.rev(x)
585
598
586 if not source:
599 if not source:
587 return 0
600 return 0
588
601
589 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
602 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
590
603
591 changesets = files = revisions = 0
604 changesets = files = revisions = 0
592 efiles = set()
605 efiles = set()
593
606
594 # write changelog data to temp files so concurrent readers will not see
607 # write changelog data to temp files so concurrent readers will not see
595 # inconsistent view
608 # inconsistent view
596 cl = repo.changelog
609 cl = repo.changelog
597 cl.delayupdate()
610 cl.delayupdate()
598 oldheads = cl.heads()
611 oldheads = cl.heads()
599
612
600 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
613 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
601 try:
614 try:
602 trp = weakref.proxy(tr)
615 trp = weakref.proxy(tr)
603 # pull off the changeset group
616 # pull off the changeset group
604 repo.ui.status(_("adding changesets\n"))
617 repo.ui.status(_("adding changesets\n"))
605 clstart = len(cl)
618 clstart = len(cl)
606 class prog(object):
619 class prog(object):
607 step = _('changesets')
620 step = _('changesets')
608 count = 1
621 count = 1
609 ui = repo.ui
622 ui = repo.ui
610 total = None
623 total = None
611 def __call__(repo):
624 def __call__(repo):
612 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
625 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
613 total=repo.total)
626 total=repo.total)
614 repo.count += 1
627 repo.count += 1
615 pr = prog()
628 pr = prog()
616 source.callback = pr
629 source.callback = pr
617
630
618 source.changelogheader()
631 source.changelogheader()
619 srccontent = cl.addgroup(source, csmap, trp)
632 srccontent = cl.addgroup(source, csmap, trp)
620 if not (srccontent or emptyok):
633 if not (srccontent or emptyok):
621 raise util.Abort(_("received changelog group is empty"))
634 raise util.Abort(_("received changelog group is empty"))
622 clend = len(cl)
635 clend = len(cl)
623 changesets = clend - clstart
636 changesets = clend - clstart
624 for c in xrange(clstart, clend):
637 for c in xrange(clstart, clend):
625 efiles.update(repo[c].files())
638 efiles.update(repo[c].files())
626 efiles = len(efiles)
639 efiles = len(efiles)
627 repo.ui.progress(_('changesets'), None)
640 repo.ui.progress(_('changesets'), None)
628
641
629 # pull off the manifest group
642 # pull off the manifest group
630 repo.ui.status(_("adding manifests\n"))
643 repo.ui.status(_("adding manifests\n"))
631 pr.step = _('manifests')
644 pr.step = _('manifests')
632 pr.count = 1
645 pr.count = 1
633 pr.total = changesets # manifests <= changesets
646 pr.total = changesets # manifests <= changesets
634 # no need to check for empty manifest group here:
647 # no need to check for empty manifest group here:
635 # if the result of the merge of 1 and 2 is the same in 3 and 4,
648 # if the result of the merge of 1 and 2 is the same in 3 and 4,
636 # no new manifest will be created and the manifest group will
649 # no new manifest will be created and the manifest group will
637 # be empty during the pull
650 # be empty during the pull
638 source.manifestheader()
651 source.manifestheader()
639 repo.manifest.addgroup(source, revmap, trp)
652 repo.manifest.addgroup(source, revmap, trp)
640 repo.ui.progress(_('manifests'), None)
653 repo.ui.progress(_('manifests'), None)
641
654
642 needfiles = {}
655 needfiles = {}
643 if repo.ui.configbool('server', 'validate', default=False):
656 if repo.ui.configbool('server', 'validate', default=False):
644 # validate incoming csets have their manifests
657 # validate incoming csets have their manifests
645 for cset in xrange(clstart, clend):
658 for cset in xrange(clstart, clend):
646 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
659 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
647 mfest = repo.manifest.readdelta(mfest)
660 mfest = repo.manifest.readdelta(mfest)
648 # store file nodes we must see
661 # store file nodes we must see
649 for f, n in mfest.iteritems():
662 for f, n in mfest.iteritems():
650 needfiles.setdefault(f, set()).add(n)
663 needfiles.setdefault(f, set()).add(n)
651
664
652 # process the files
665 # process the files
653 repo.ui.status(_("adding file changes\n"))
666 repo.ui.status(_("adding file changes\n"))
654 pr.step = _('files')
667 pr.step = _('files')
655 pr.count = 1
668 pr.count = 1
656 pr.total = efiles
669 pr.total = efiles
657 source.callback = None
670 source.callback = None
658
671
659 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
672 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
660 needfiles)
673 needfiles)
661 revisions += newrevs
674 revisions += newrevs
662 files += newfiles
675 files += newfiles
663
676
664 dh = 0
677 dh = 0
665 if oldheads:
678 if oldheads:
666 heads = cl.heads()
679 heads = cl.heads()
667 dh = len(heads) - len(oldheads)
680 dh = len(heads) - len(oldheads)
668 for h in heads:
681 for h in heads:
669 if h not in oldheads and repo[h].closesbranch():
682 if h not in oldheads and repo[h].closesbranch():
670 dh -= 1
683 dh -= 1
671 htext = ""
684 htext = ""
672 if dh:
685 if dh:
673 htext = _(" (%+d heads)") % dh
686 htext = _(" (%+d heads)") % dh
674
687
675 repo.ui.status(_("added %d changesets"
688 repo.ui.status(_("added %d changesets"
676 " with %d changes to %d files%s\n")
689 " with %d changes to %d files%s\n")
677 % (changesets, revisions, files, htext))
690 % (changesets, revisions, files, htext))
678 repo.invalidatevolatilesets()
691 repo.invalidatevolatilesets()
679
692
680 if changesets > 0:
693 if changesets > 0:
681 p = lambda: cl.writepending() and repo.root or ""
694 p = lambda: cl.writepending() and repo.root or ""
682 repo.hook('pretxnchangegroup', throw=True,
695 repo.hook('pretxnchangegroup', throw=True,
683 node=hex(cl.node(clstart)), source=srctype,
696 node=hex(cl.node(clstart)), source=srctype,
684 url=url, pending=p)
697 url=url, pending=p)
685
698
686 added = [cl.node(r) for r in xrange(clstart, clend)]
699 added = [cl.node(r) for r in xrange(clstart, clend)]
687 publishing = repo.ui.configbool('phases', 'publish', True)
700 publishing = repo.ui.configbool('phases', 'publish', True)
688 if srctype in ('push', 'serve'):
701 if srctype in ('push', 'serve'):
689 # Old servers can not push the boundary themselves.
702 # Old servers can not push the boundary themselves.
690 # New servers won't push the boundary if changeset already
703 # New servers won't push the boundary if changeset already
691 # exists locally as secret
704 # exists locally as secret
692 #
705 #
693 # We should not use added here but the list of all change in
706 # We should not use added here but the list of all change in
694 # the bundle
707 # the bundle
695 if publishing:
708 if publishing:
696 phases.advanceboundary(repo, phases.public, srccontent)
709 phases.advanceboundary(repo, phases.public, srccontent)
697 else:
710 else:
698 phases.advanceboundary(repo, phases.draft, srccontent)
711 phases.advanceboundary(repo, phases.draft, srccontent)
699 phases.retractboundary(repo, phases.draft, added)
712 phases.retractboundary(repo, phases.draft, added)
700 elif srctype != 'strip':
713 elif srctype != 'strip':
701 # publishing only alter behavior during push
714 # publishing only alter behavior during push
702 #
715 #
703 # strip should not touch boundary at all
716 # strip should not touch boundary at all
704 phases.retractboundary(repo, phases.draft, added)
717 phases.retractboundary(repo, phases.draft, added)
705
718
706 # make changelog see real files again
719 # make changelog see real files again
707 cl.finalize(trp)
720 cl.finalize(trp)
708
721
709 tr.close()
722 tr.close()
710
723
711 if changesets > 0:
724 if changesets > 0:
712 if srctype != 'strip':
725 if srctype != 'strip':
713 # During strip, branchcache is invalid but coming call to
726 # During strip, branchcache is invalid but coming call to
714 # `destroyed` will repair it.
727 # `destroyed` will repair it.
715 # In other case we can safely update cache on disk.
728 # In other case we can safely update cache on disk.
716 branchmap.updatecache(repo.filtered('served'))
729 branchmap.updatecache(repo.filtered('served'))
717 def runhooks():
730 def runhooks():
718 # These hooks run when the lock releases, not when the
731 # These hooks run when the lock releases, not when the
719 # transaction closes. So it's possible for the changelog
732 # transaction closes. So it's possible for the changelog
720 # to have changed since we last saw it.
733 # to have changed since we last saw it.
721 if clstart >= len(repo):
734 if clstart >= len(repo):
722 return
735 return
723
736
724 # forcefully update the on-disk branch cache
737 # forcefully update the on-disk branch cache
725 repo.ui.debug("updating the branch cache\n")
738 repo.ui.debug("updating the branch cache\n")
726 repo.hook("changegroup", node=hex(cl.node(clstart)),
739 repo.hook("changegroup", node=hex(cl.node(clstart)),
727 source=srctype, url=url)
740 source=srctype, url=url)
728
741
729 for n in added:
742 for n in added:
730 repo.hook("incoming", node=hex(n), source=srctype,
743 repo.hook("incoming", node=hex(n), source=srctype,
731 url=url)
744 url=url)
732
745
733 newheads = [h for h in repo.heads() if h not in oldheads]
746 newheads = [h for h in repo.heads() if h not in oldheads]
734 repo.ui.log("incoming",
747 repo.ui.log("incoming",
735 "%s incoming changes - new heads: %s\n",
748 "%s incoming changes - new heads: %s\n",
736 len(added),
749 len(added),
737 ', '.join([hex(c[:6]) for c in newheads]))
750 ', '.join([hex(c[:6]) for c in newheads]))
738 repo._afterlock(runhooks)
751 repo._afterlock(runhooks)
739
752
740 finally:
753 finally:
741 tr.release()
754 tr.release()
742 # never return 0 here:
755 # never return 0 here:
743 if dh < 0:
756 if dh < 0:
744 return dh - 1
757 return dh - 1
745 else:
758 else:
746 return dh + 1
759 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now