##// END OF EJS Templates
changegroup: add "vfs" argument to "writebundle()" for relative access via vfs...
FUJIWARA Katsunori -
r20976:c20f4898 default
parent child Browse files
Show More
@@ -1,738 +1,744 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
15 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
16
16
17 def readexactly(stream, n):
17 def readexactly(stream, n):
18 '''read n bytes from stream.read and abort if less was available'''
18 '''read n bytes from stream.read and abort if less was available'''
19 s = stream.read(n)
19 s = stream.read(n)
20 if len(s) < n:
20 if len(s) < n:
21 raise util.Abort(_("stream ended unexpectedly"
21 raise util.Abort(_("stream ended unexpectedly"
22 " (got %d bytes, expected %d)")
22 " (got %d bytes, expected %d)")
23 % (len(s), n))
23 % (len(s), n))
24 return s
24 return s
25
25
26 def getchunk(stream):
26 def getchunk(stream):
27 """return the next chunk from stream as a string"""
27 """return the next chunk from stream as a string"""
28 d = readexactly(stream, 4)
28 d = readexactly(stream, 4)
29 l = struct.unpack(">l", d)[0]
29 l = struct.unpack(">l", d)[0]
30 if l <= 4:
30 if l <= 4:
31 if l:
31 if l:
32 raise util.Abort(_("invalid chunk length %d") % l)
32 raise util.Abort(_("invalid chunk length %d") % l)
33 return ""
33 return ""
34 return readexactly(stream, l - 4)
34 return readexactly(stream, l - 4)
35
35
36 def chunkheader(length):
36 def chunkheader(length):
37 """return a changegroup chunk header (string)"""
37 """return a changegroup chunk header (string)"""
38 return struct.pack(">l", length + 4)
38 return struct.pack(">l", length + 4)
39
39
40 def closechunk():
40 def closechunk():
41 """return a changegroup chunk header (string) for a zero-length chunk"""
41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 return struct.pack(">l", 0)
42 return struct.pack(">l", 0)
43
43
44 class nocompress(object):
44 class nocompress(object):
45 def compress(self, x):
45 def compress(self, x):
46 return x
46 return x
47 def flush(self):
47 def flush(self):
48 return ""
48 return ""
49
49
50 bundletypes = {
50 bundletypes = {
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 # since the unification ssh accepts a header but there
52 # since the unification ssh accepts a header but there
53 # is no capability signaling it.
53 # is no capability signaling it.
54 "HG10UN": ("HG10UN", nocompress),
54 "HG10UN": ("HG10UN", nocompress),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 }
57 }
58
58
59 # hgweb uses this list to communicate its preferred type
59 # hgweb uses this list to communicate its preferred type
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61
61
62 def writebundle(cg, filename, bundletype):
62 def writebundle(cg, filename, bundletype, vfs=None):
63 """Write a bundle file and return its filename.
63 """Write a bundle file and return its filename.
64
64
65 Existing files will not be overwritten.
65 Existing files will not be overwritten.
66 If no filename is specified, a temporary file is created.
66 If no filename is specified, a temporary file is created.
67 bz2 compression can be turned off.
67 bz2 compression can be turned off.
68 The bundle file will be deleted in case of errors.
68 The bundle file will be deleted in case of errors.
69 """
69 """
70
70
71 fh = None
71 fh = None
72 cleanup = None
72 cleanup = None
73 try:
73 try:
74 if filename:
74 if filename:
75 fh = open(filename, "wb")
75 if vfs:
76 fh = vfs.open(filename, "wb")
77 else:
78 fh = open(filename, "wb")
76 else:
79 else:
77 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
78 fh = os.fdopen(fd, "wb")
81 fh = os.fdopen(fd, "wb")
79 cleanup = filename
82 cleanup = filename
80
83
81 header, compressor = bundletypes[bundletype]
84 header, compressor = bundletypes[bundletype]
82 fh.write(header)
85 fh.write(header)
83 z = compressor()
86 z = compressor()
84
87
85 # parse the changegroup data, otherwise we will block
88 # parse the changegroup data, otherwise we will block
86 # in case of sshrepo because we don't know the end of the stream
89 # in case of sshrepo because we don't know the end of the stream
87
90
88 # an empty chunkgroup is the end of the changegroup
91 # an empty chunkgroup is the end of the changegroup
89 # a changegroup has at least 2 chunkgroups (changelog and manifest).
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
90 # after that, an empty chunkgroup is the end of the changegroup
93 # after that, an empty chunkgroup is the end of the changegroup
91 empty = False
94 empty = False
92 count = 0
95 count = 0
93 while not empty or count <= 2:
96 while not empty or count <= 2:
94 empty = True
97 empty = True
95 count += 1
98 count += 1
96 while True:
99 while True:
97 chunk = getchunk(cg)
100 chunk = getchunk(cg)
98 if not chunk:
101 if not chunk:
99 break
102 break
100 empty = False
103 empty = False
101 fh.write(z.compress(chunkheader(len(chunk))))
104 fh.write(z.compress(chunkheader(len(chunk))))
102 pos = 0
105 pos = 0
103 while pos < len(chunk):
106 while pos < len(chunk):
104 next = pos + 2**20
107 next = pos + 2**20
105 fh.write(z.compress(chunk[pos:next]))
108 fh.write(z.compress(chunk[pos:next]))
106 pos = next
109 pos = next
107 fh.write(z.compress(closechunk()))
110 fh.write(z.compress(closechunk()))
108 fh.write(z.flush())
111 fh.write(z.flush())
109 cleanup = None
112 cleanup = None
110 return filename
113 return filename
111 finally:
114 finally:
112 if fh is not None:
115 if fh is not None:
113 fh.close()
116 fh.close()
114 if cleanup is not None:
117 if cleanup is not None:
115 os.unlink(cleanup)
118 if filename and vfs:
119 vfs.unlink(cleanup)
120 else:
121 os.unlink(cleanup)
116
122
117 def decompressor(fh, alg):
123 def decompressor(fh, alg):
118 if alg == 'UN':
124 if alg == 'UN':
119 return fh
125 return fh
120 elif alg == 'GZ':
126 elif alg == 'GZ':
121 def generator(f):
127 def generator(f):
122 zd = zlib.decompressobj()
128 zd = zlib.decompressobj()
123 for chunk in util.filechunkiter(f):
129 for chunk in util.filechunkiter(f):
124 yield zd.decompress(chunk)
130 yield zd.decompress(chunk)
125 elif alg == 'BZ':
131 elif alg == 'BZ':
126 def generator(f):
132 def generator(f):
127 zd = bz2.BZ2Decompressor()
133 zd = bz2.BZ2Decompressor()
128 zd.decompress("BZ")
134 zd.decompress("BZ")
129 for chunk in util.filechunkiter(f, 4096):
135 for chunk in util.filechunkiter(f, 4096):
130 yield zd.decompress(chunk)
136 yield zd.decompress(chunk)
131 else:
137 else:
132 raise util.Abort("unknown bundle compression '%s'" % alg)
138 raise util.Abort("unknown bundle compression '%s'" % alg)
133 return util.chunkbuffer(generator(fh))
139 return util.chunkbuffer(generator(fh))
134
140
135 class unbundle10(object):
141 class unbundle10(object):
136 deltaheader = _BUNDLE10_DELTA_HEADER
142 deltaheader = _BUNDLE10_DELTA_HEADER
137 deltaheadersize = struct.calcsize(deltaheader)
143 deltaheadersize = struct.calcsize(deltaheader)
138 def __init__(self, fh, alg):
144 def __init__(self, fh, alg):
139 self._stream = decompressor(fh, alg)
145 self._stream = decompressor(fh, alg)
140 self._type = alg
146 self._type = alg
141 self.callback = None
147 self.callback = None
142 def compressed(self):
148 def compressed(self):
143 return self._type != 'UN'
149 return self._type != 'UN'
144 def read(self, l):
150 def read(self, l):
145 return self._stream.read(l)
151 return self._stream.read(l)
146 def seek(self, pos):
152 def seek(self, pos):
147 return self._stream.seek(pos)
153 return self._stream.seek(pos)
148 def tell(self):
154 def tell(self):
149 return self._stream.tell()
155 return self._stream.tell()
150 def close(self):
156 def close(self):
151 return self._stream.close()
157 return self._stream.close()
152
158
153 def chunklength(self):
159 def chunklength(self):
154 d = readexactly(self._stream, 4)
160 d = readexactly(self._stream, 4)
155 l = struct.unpack(">l", d)[0]
161 l = struct.unpack(">l", d)[0]
156 if l <= 4:
162 if l <= 4:
157 if l:
163 if l:
158 raise util.Abort(_("invalid chunk length %d") % l)
164 raise util.Abort(_("invalid chunk length %d") % l)
159 return 0
165 return 0
160 if self.callback:
166 if self.callback:
161 self.callback()
167 self.callback()
162 return l - 4
168 return l - 4
163
169
164 def changelogheader(self):
170 def changelogheader(self):
165 """v10 does not have a changelog header chunk"""
171 """v10 does not have a changelog header chunk"""
166 return {}
172 return {}
167
173
168 def manifestheader(self):
174 def manifestheader(self):
169 """v10 does not have a manifest header chunk"""
175 """v10 does not have a manifest header chunk"""
170 return {}
176 return {}
171
177
172 def filelogheader(self):
178 def filelogheader(self):
173 """return the header of the filelogs chunk, v10 only has the filename"""
179 """return the header of the filelogs chunk, v10 only has the filename"""
174 l = self.chunklength()
180 l = self.chunklength()
175 if not l:
181 if not l:
176 return {}
182 return {}
177 fname = readexactly(self._stream, l)
183 fname = readexactly(self._stream, l)
178 return {'filename': fname}
184 return {'filename': fname}
179
185
180 def _deltaheader(self, headertuple, prevnode):
186 def _deltaheader(self, headertuple, prevnode):
181 node, p1, p2, cs = headertuple
187 node, p1, p2, cs = headertuple
182 if prevnode is None:
188 if prevnode is None:
183 deltabase = p1
189 deltabase = p1
184 else:
190 else:
185 deltabase = prevnode
191 deltabase = prevnode
186 return node, p1, p2, deltabase, cs
192 return node, p1, p2, deltabase, cs
187
193
188 def deltachunk(self, prevnode):
194 def deltachunk(self, prevnode):
189 l = self.chunklength()
195 l = self.chunklength()
190 if not l:
196 if not l:
191 return {}
197 return {}
192 headerdata = readexactly(self._stream, self.deltaheadersize)
198 headerdata = readexactly(self._stream, self.deltaheadersize)
193 header = struct.unpack(self.deltaheader, headerdata)
199 header = struct.unpack(self.deltaheader, headerdata)
194 delta = readexactly(self._stream, l - self.deltaheadersize)
200 delta = readexactly(self._stream, l - self.deltaheadersize)
195 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
201 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
196 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
202 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
197 'deltabase': deltabase, 'delta': delta}
203 'deltabase': deltabase, 'delta': delta}
198
204
199 class headerlessfixup(object):
205 class headerlessfixup(object):
200 def __init__(self, fh, h):
206 def __init__(self, fh, h):
201 self._h = h
207 self._h = h
202 self._fh = fh
208 self._fh = fh
203 def read(self, n):
209 def read(self, n):
204 if self._h:
210 if self._h:
205 d, self._h = self._h[:n], self._h[n:]
211 d, self._h = self._h[:n], self._h[n:]
206 if len(d) < n:
212 if len(d) < n:
207 d += readexactly(self._fh, n - len(d))
213 d += readexactly(self._fh, n - len(d))
208 return d
214 return d
209 return readexactly(self._fh, n)
215 return readexactly(self._fh, n)
210
216
211 def readbundle(fh, fname):
217 def readbundle(fh, fname):
212 header = readexactly(fh, 6)
218 header = readexactly(fh, 6)
213
219
214 if not fname:
220 if not fname:
215 fname = "stream"
221 fname = "stream"
216 if not header.startswith('HG') and header.startswith('\0'):
222 if not header.startswith('HG') and header.startswith('\0'):
217 fh = headerlessfixup(fh, header)
223 fh = headerlessfixup(fh, header)
218 header = "HG10UN"
224 header = "HG10UN"
219
225
220 magic, version, alg = header[0:2], header[2:4], header[4:6]
226 magic, version, alg = header[0:2], header[2:4], header[4:6]
221
227
222 if magic != 'HG':
228 if magic != 'HG':
223 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
229 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
224 if version != '10':
230 if version != '10':
225 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
231 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
226 return unbundle10(fh, alg)
232 return unbundle10(fh, alg)
227
233
228 class bundle10(object):
234 class bundle10(object):
229 deltaheader = _BUNDLE10_DELTA_HEADER
235 deltaheader = _BUNDLE10_DELTA_HEADER
230 def __init__(self, repo, bundlecaps=None):
236 def __init__(self, repo, bundlecaps=None):
231 """Given a source repo, construct a bundler.
237 """Given a source repo, construct a bundler.
232
238
233 bundlecaps is optional and can be used to specify the set of
239 bundlecaps is optional and can be used to specify the set of
234 capabilities which can be used to build the bundle.
240 capabilities which can be used to build the bundle.
235 """
241 """
236 # Set of capabilities we can use to build the bundle.
242 # Set of capabilities we can use to build the bundle.
237 if bundlecaps is None:
243 if bundlecaps is None:
238 bundlecaps = set()
244 bundlecaps = set()
239 self._bundlecaps = bundlecaps
245 self._bundlecaps = bundlecaps
240 self._changelog = repo.changelog
246 self._changelog = repo.changelog
241 self._manifest = repo.manifest
247 self._manifest = repo.manifest
242 reorder = repo.ui.config('bundle', 'reorder', 'auto')
248 reorder = repo.ui.config('bundle', 'reorder', 'auto')
243 if reorder == 'auto':
249 if reorder == 'auto':
244 reorder = None
250 reorder = None
245 else:
251 else:
246 reorder = util.parsebool(reorder)
252 reorder = util.parsebool(reorder)
247 self._repo = repo
253 self._repo = repo
248 self._reorder = reorder
254 self._reorder = reorder
249 self._progress = repo.ui.progress
255 self._progress = repo.ui.progress
250 def close(self):
256 def close(self):
251 return closechunk()
257 return closechunk()
252
258
253 def fileheader(self, fname):
259 def fileheader(self, fname):
254 return chunkheader(len(fname)) + fname
260 return chunkheader(len(fname)) + fname
255
261
256 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
262 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
257 """Calculate a delta group, yielding a sequence of changegroup chunks
263 """Calculate a delta group, yielding a sequence of changegroup chunks
258 (strings).
264 (strings).
259
265
260 Given a list of changeset revs, return a set of deltas and
266 Given a list of changeset revs, return a set of deltas and
261 metadata corresponding to nodes. The first delta is
267 metadata corresponding to nodes. The first delta is
262 first parent(nodelist[0]) -> nodelist[0], the receiver is
268 first parent(nodelist[0]) -> nodelist[0], the receiver is
263 guaranteed to have this parent as it has all history before
269 guaranteed to have this parent as it has all history before
264 these changesets. In the case firstparent is nullrev the
270 these changesets. In the case firstparent is nullrev the
265 changegroup starts with a full revision.
271 changegroup starts with a full revision.
266
272
267 If units is not None, progress detail will be generated, units specifies
273 If units is not None, progress detail will be generated, units specifies
268 the type of revlog that is touched (changelog, manifest, etc.).
274 the type of revlog that is touched (changelog, manifest, etc.).
269 """
275 """
270 # if we don't have any revisions touched by these changesets, bail
276 # if we don't have any revisions touched by these changesets, bail
271 if len(nodelist) == 0:
277 if len(nodelist) == 0:
272 yield self.close()
278 yield self.close()
273 return
279 return
274
280
275 # for generaldelta revlogs, we linearize the revs; this will both be
281 # for generaldelta revlogs, we linearize the revs; this will both be
276 # much quicker and generate a much smaller bundle
282 # much quicker and generate a much smaller bundle
277 if (revlog._generaldelta and reorder is not False) or reorder:
283 if (revlog._generaldelta and reorder is not False) or reorder:
278 dag = dagutil.revlogdag(revlog)
284 dag = dagutil.revlogdag(revlog)
279 revs = set(revlog.rev(n) for n in nodelist)
285 revs = set(revlog.rev(n) for n in nodelist)
280 revs = dag.linearize(revs)
286 revs = dag.linearize(revs)
281 else:
287 else:
282 revs = sorted([revlog.rev(n) for n in nodelist])
288 revs = sorted([revlog.rev(n) for n in nodelist])
283
289
284 # add the parent of the first rev
290 # add the parent of the first rev
285 p = revlog.parentrevs(revs[0])[0]
291 p = revlog.parentrevs(revs[0])[0]
286 revs.insert(0, p)
292 revs.insert(0, p)
287
293
288 # build deltas
294 # build deltas
289 total = len(revs) - 1
295 total = len(revs) - 1
290 msgbundling = _('bundling')
296 msgbundling = _('bundling')
291 for r in xrange(len(revs) - 1):
297 for r in xrange(len(revs) - 1):
292 if units is not None:
298 if units is not None:
293 self._progress(msgbundling, r + 1, unit=units, total=total)
299 self._progress(msgbundling, r + 1, unit=units, total=total)
294 prev, curr = revs[r], revs[r + 1]
300 prev, curr = revs[r], revs[r + 1]
295 linknode = lookup(revlog.node(curr))
301 linknode = lookup(revlog.node(curr))
296 for c in self.revchunk(revlog, curr, prev, linknode):
302 for c in self.revchunk(revlog, curr, prev, linknode):
297 yield c
303 yield c
298
304
299 yield self.close()
305 yield self.close()
300
306
301 # filter any nodes that claim to be part of the known set
307 # filter any nodes that claim to be part of the known set
302 def prune(self, revlog, missing, commonrevs, source):
308 def prune(self, revlog, missing, commonrevs, source):
303 rr, rl = revlog.rev, revlog.linkrev
309 rr, rl = revlog.rev, revlog.linkrev
304 return [n for n in missing if rl(rr(n)) not in commonrevs]
310 return [n for n in missing if rl(rr(n)) not in commonrevs]
305
311
306 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
312 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
307 '''yield a sequence of changegroup chunks (strings)'''
313 '''yield a sequence of changegroup chunks (strings)'''
308 repo = self._repo
314 repo = self._repo
309 cl = self._changelog
315 cl = self._changelog
310 mf = self._manifest
316 mf = self._manifest
311 reorder = self._reorder
317 reorder = self._reorder
312 progress = self._progress
318 progress = self._progress
313
319
314 # for progress output
320 # for progress output
315 msgbundling = _('bundling')
321 msgbundling = _('bundling')
316
322
317 mfs = {} # needed manifests
323 mfs = {} # needed manifests
318 fnodes = {} # needed file nodes
324 fnodes = {} # needed file nodes
319 changedfiles = set()
325 changedfiles = set()
320
326
321 # Callback for the changelog, used to collect changed files and manifest
327 # Callback for the changelog, used to collect changed files and manifest
322 # nodes.
328 # nodes.
323 # Returns the linkrev node (identity in the changelog case).
329 # Returns the linkrev node (identity in the changelog case).
324 def lookupcl(x):
330 def lookupcl(x):
325 c = cl.read(x)
331 c = cl.read(x)
326 changedfiles.update(c[3])
332 changedfiles.update(c[3])
327 # record the first changeset introducing this manifest version
333 # record the first changeset introducing this manifest version
328 mfs.setdefault(c[0], x)
334 mfs.setdefault(c[0], x)
329 return x
335 return x
330
336
331 # Callback for the manifest, used to collect linkrevs for filelog
337 # Callback for the manifest, used to collect linkrevs for filelog
332 # revisions.
338 # revisions.
333 # Returns the linkrev node (collected in lookupcl).
339 # Returns the linkrev node (collected in lookupcl).
334 def lookupmf(x):
340 def lookupmf(x):
335 clnode = mfs[x]
341 clnode = mfs[x]
336 if not fastpathlinkrev:
342 if not fastpathlinkrev:
337 mdata = mf.readfast(x)
343 mdata = mf.readfast(x)
338 for f, n in mdata.iteritems():
344 for f, n in mdata.iteritems():
339 if f in changedfiles:
345 if f in changedfiles:
340 # record the first changeset introducing this filelog
346 # record the first changeset introducing this filelog
341 # version
347 # version
342 fnodes[f].setdefault(n, clnode)
348 fnodes[f].setdefault(n, clnode)
343 return clnode
349 return clnode
344
350
345 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
351 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
346 reorder=reorder):
352 reorder=reorder):
347 yield chunk
353 yield chunk
348 progress(msgbundling, None)
354 progress(msgbundling, None)
349
355
350 for f in changedfiles:
356 for f in changedfiles:
351 fnodes[f] = {}
357 fnodes[f] = {}
352 mfnodes = self.prune(mf, mfs, commonrevs, source)
358 mfnodes = self.prune(mf, mfs, commonrevs, source)
353 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
359 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
354 reorder=reorder):
360 reorder=reorder):
355 yield chunk
361 yield chunk
356 progress(msgbundling, None)
362 progress(msgbundling, None)
357
363
358 mfs.clear()
364 mfs.clear()
359 needed = set(cl.rev(x) for x in clnodes)
365 needed = set(cl.rev(x) for x in clnodes)
360
366
361 def linknodes(filerevlog, fname):
367 def linknodes(filerevlog, fname):
362 if fastpathlinkrev:
368 if fastpathlinkrev:
363 llr = filerevlog.linkrev
369 llr = filerevlog.linkrev
364 def genfilenodes():
370 def genfilenodes():
365 for r in filerevlog:
371 for r in filerevlog:
366 linkrev = llr(r)
372 linkrev = llr(r)
367 if linkrev in needed:
373 if linkrev in needed:
368 yield filerevlog.node(r), cl.node(linkrev)
374 yield filerevlog.node(r), cl.node(linkrev)
369 fnodes[fname] = dict(genfilenodes())
375 fnodes[fname] = dict(genfilenodes())
370 return fnodes.get(fname, {})
376 return fnodes.get(fname, {})
371
377
372 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
378 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
373 source):
379 source):
374 yield chunk
380 yield chunk
375
381
376 yield self.close()
382 yield self.close()
377 progress(msgbundling, None)
383 progress(msgbundling, None)
378
384
379 if clnodes:
385 if clnodes:
380 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
386 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
381
387
382 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
388 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
383 repo = self._repo
389 repo = self._repo
384 progress = self._progress
390 progress = self._progress
385 reorder = self._reorder
391 reorder = self._reorder
386 msgbundling = _('bundling')
392 msgbundling = _('bundling')
387
393
388 total = len(changedfiles)
394 total = len(changedfiles)
389 # for progress output
395 # for progress output
390 msgfiles = _('files')
396 msgfiles = _('files')
391 for i, fname in enumerate(sorted(changedfiles)):
397 for i, fname in enumerate(sorted(changedfiles)):
392 filerevlog = repo.file(fname)
398 filerevlog = repo.file(fname)
393 if not filerevlog:
399 if not filerevlog:
394 raise util.Abort(_("empty or missing revlog for %s") % fname)
400 raise util.Abort(_("empty or missing revlog for %s") % fname)
395
401
396 linkrevnodes = linknodes(filerevlog, fname)
402 linkrevnodes = linknodes(filerevlog, fname)
397 # Lookup for filenodes, we collected the linkrev nodes above in the
403 # Lookup for filenodes, we collected the linkrev nodes above in the
398 # fastpath case and with lookupmf in the slowpath case.
404 # fastpath case and with lookupmf in the slowpath case.
399 def lookupfilelog(x):
405 def lookupfilelog(x):
400 return linkrevnodes[x]
406 return linkrevnodes[x]
401
407
402 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
408 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
403 if filenodes:
409 if filenodes:
404 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
410 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
405 total=total)
411 total=total)
406 yield self.fileheader(fname)
412 yield self.fileheader(fname)
407 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
413 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
408 reorder=reorder):
414 reorder=reorder):
409 yield chunk
415 yield chunk
410
416
411 def revchunk(self, revlog, rev, prev, linknode):
417 def revchunk(self, revlog, rev, prev, linknode):
412 node = revlog.node(rev)
418 node = revlog.node(rev)
413 p1, p2 = revlog.parentrevs(rev)
419 p1, p2 = revlog.parentrevs(rev)
414 base = prev
420 base = prev
415
421
416 prefix = ''
422 prefix = ''
417 if base == nullrev:
423 if base == nullrev:
418 delta = revlog.revision(node)
424 delta = revlog.revision(node)
419 prefix = mdiff.trivialdiffheader(len(delta))
425 prefix = mdiff.trivialdiffheader(len(delta))
420 else:
426 else:
421 delta = revlog.revdiff(base, rev)
427 delta = revlog.revdiff(base, rev)
422 p1n, p2n = revlog.parents(node)
428 p1n, p2n = revlog.parents(node)
423 basenode = revlog.node(base)
429 basenode = revlog.node(base)
424 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
430 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
425 meta += prefix
431 meta += prefix
426 l = len(meta) + len(delta)
432 l = len(meta) + len(delta)
427 yield chunkheader(l)
433 yield chunkheader(l)
428 yield meta
434 yield meta
429 yield delta
435 yield delta
430 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
436 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
431 # do nothing with basenode, it is implicitly the previous one in HG10
437 # do nothing with basenode, it is implicitly the previous one in HG10
432 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
438 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
433
439
434 def _changegroupinfo(repo, nodes, source):
440 def _changegroupinfo(repo, nodes, source):
435 if repo.ui.verbose or source == 'bundle':
441 if repo.ui.verbose or source == 'bundle':
436 repo.ui.status(_("%d changesets found\n") % len(nodes))
442 repo.ui.status(_("%d changesets found\n") % len(nodes))
437 if repo.ui.debugflag:
443 if repo.ui.debugflag:
438 repo.ui.debug("list of changesets:\n")
444 repo.ui.debug("list of changesets:\n")
439 for node in nodes:
445 for node in nodes:
440 repo.ui.debug("%s\n" % hex(node))
446 repo.ui.debug("%s\n" % hex(node))
441
447
442 def getsubset(repo, outgoing, bundler, source, fastpath=False):
448 def getsubset(repo, outgoing, bundler, source, fastpath=False):
443 repo = repo.unfiltered()
449 repo = repo.unfiltered()
444 commonrevs = outgoing.common
450 commonrevs = outgoing.common
445 csets = outgoing.missing
451 csets = outgoing.missing
446 heads = outgoing.missingheads
452 heads = outgoing.missingheads
447 # We go through the fast path if we get told to, or if all (unfiltered
453 # We go through the fast path if we get told to, or if all (unfiltered
448 # heads have been requested (since we then know there all linkrevs will
454 # heads have been requested (since we then know there all linkrevs will
449 # be pulled by the client).
455 # be pulled by the client).
450 heads.sort()
456 heads.sort()
451 fastpathlinkrev = fastpath or (
457 fastpathlinkrev = fastpath or (
452 repo.filtername is None and heads == sorted(repo.heads()))
458 repo.filtername is None and heads == sorted(repo.heads()))
453
459
454 repo.hook('preoutgoing', throw=True, source=source)
460 repo.hook('preoutgoing', throw=True, source=source)
455 _changegroupinfo(repo, csets, source)
461 _changegroupinfo(repo, csets, source)
456 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
462 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
457 return unbundle10(util.chunkbuffer(gengroup), 'UN')
463 return unbundle10(util.chunkbuffer(gengroup), 'UN')
458
464
459 def changegroupsubset(repo, roots, heads, source):
465 def changegroupsubset(repo, roots, heads, source):
460 """Compute a changegroup consisting of all the nodes that are
466 """Compute a changegroup consisting of all the nodes that are
461 descendants of any of the roots and ancestors of any of the heads.
467 descendants of any of the roots and ancestors of any of the heads.
462 Return a chunkbuffer object whose read() method will return
468 Return a chunkbuffer object whose read() method will return
463 successive changegroup chunks.
469 successive changegroup chunks.
464
470
465 It is fairly complex as determining which filenodes and which
471 It is fairly complex as determining which filenodes and which
466 manifest nodes need to be included for the changeset to be complete
472 manifest nodes need to be included for the changeset to be complete
467 is non-trivial.
473 is non-trivial.
468
474
469 Another wrinkle is doing the reverse, figuring out which changeset in
475 Another wrinkle is doing the reverse, figuring out which changeset in
470 the changegroup a particular filenode or manifestnode belongs to.
476 the changegroup a particular filenode or manifestnode belongs to.
471 """
477 """
472 cl = repo.changelog
478 cl = repo.changelog
473 if not roots:
479 if not roots:
474 roots = [nullid]
480 roots = [nullid]
475 # TODO: remove call to nodesbetween.
481 # TODO: remove call to nodesbetween.
476 csets, roots, heads = cl.nodesbetween(roots, heads)
482 csets, roots, heads = cl.nodesbetween(roots, heads)
477 discbases = []
483 discbases = []
478 for n in roots:
484 for n in roots:
479 discbases.extend([p for p in cl.parents(n) if p != nullid])
485 discbases.extend([p for p in cl.parents(n) if p != nullid])
480 outgoing = discovery.outgoing(cl, discbases, heads)
486 outgoing = discovery.outgoing(cl, discbases, heads)
481 bundler = bundle10(repo)
487 bundler = bundle10(repo)
482 return getsubset(repo, outgoing, bundler, source)
488 return getsubset(repo, outgoing, bundler, source)
483
489
484 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
490 def getlocalbundle(repo, source, outgoing, bundlecaps=None):
485 """Like getbundle, but taking a discovery.outgoing as an argument.
491 """Like getbundle, but taking a discovery.outgoing as an argument.
486
492
487 This is only implemented for local repos and reuses potentially
493 This is only implemented for local repos and reuses potentially
488 precomputed sets in outgoing."""
494 precomputed sets in outgoing."""
489 if not outgoing.missing:
495 if not outgoing.missing:
490 return None
496 return None
491 bundler = bundle10(repo, bundlecaps)
497 bundler = bundle10(repo, bundlecaps)
492 return getsubset(repo, outgoing, bundler, source)
498 return getsubset(repo, outgoing, bundler, source)
493
499
494 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
500 def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
495 """Like changegroupsubset, but returns the set difference between the
501 """Like changegroupsubset, but returns the set difference between the
496 ancestors of heads and the ancestors common.
502 ancestors of heads and the ancestors common.
497
503
498 If heads is None, use the local heads. If common is None, use [nullid].
504 If heads is None, use the local heads. If common is None, use [nullid].
499
505
500 The nodes in common might not all be known locally due to the way the
506 The nodes in common might not all be known locally due to the way the
501 current discovery protocol works.
507 current discovery protocol works.
502 """
508 """
503 cl = repo.changelog
509 cl = repo.changelog
504 if common:
510 if common:
505 hasnode = cl.hasnode
511 hasnode = cl.hasnode
506 common = [n for n in common if hasnode(n)]
512 common = [n for n in common if hasnode(n)]
507 else:
513 else:
508 common = [nullid]
514 common = [nullid]
509 if not heads:
515 if not heads:
510 heads = cl.heads()
516 heads = cl.heads()
511 outgoing = discovery.outgoing(cl, common, heads)
517 outgoing = discovery.outgoing(cl, common, heads)
512 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
518 return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
513
519
514 def changegroup(repo, basenodes, source):
520 def changegroup(repo, basenodes, source):
515 # to avoid a race we use changegroupsubset() (issue1320)
521 # to avoid a race we use changegroupsubset() (issue1320)
516 return changegroupsubset(repo, basenodes, repo.heads(), source)
522 return changegroupsubset(repo, basenodes, repo.heads(), source)
517
523
518 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
524 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
519 revisions = 0
525 revisions = 0
520 files = 0
526 files = 0
521 while True:
527 while True:
522 chunkdata = source.filelogheader()
528 chunkdata = source.filelogheader()
523 if not chunkdata:
529 if not chunkdata:
524 break
530 break
525 f = chunkdata["filename"]
531 f = chunkdata["filename"]
526 repo.ui.debug("adding %s revisions\n" % f)
532 repo.ui.debug("adding %s revisions\n" % f)
527 pr()
533 pr()
528 fl = repo.file(f)
534 fl = repo.file(f)
529 o = len(fl)
535 o = len(fl)
530 if not fl.addgroup(source, revmap, trp):
536 if not fl.addgroup(source, revmap, trp):
531 raise util.Abort(_("received file revlog group is empty"))
537 raise util.Abort(_("received file revlog group is empty"))
532 revisions += len(fl) - o
538 revisions += len(fl) - o
533 files += 1
539 files += 1
534 if f in needfiles:
540 if f in needfiles:
535 needs = needfiles[f]
541 needs = needfiles[f]
536 for new in xrange(o, len(fl)):
542 for new in xrange(o, len(fl)):
537 n = fl.node(new)
543 n = fl.node(new)
538 if n in needs:
544 if n in needs:
539 needs.remove(n)
545 needs.remove(n)
540 else:
546 else:
541 raise util.Abort(
547 raise util.Abort(
542 _("received spurious file revlog entry"))
548 _("received spurious file revlog entry"))
543 if not needs:
549 if not needs:
544 del needfiles[f]
550 del needfiles[f]
545 repo.ui.progress(_('files'), None)
551 repo.ui.progress(_('files'), None)
546
552
547 for f, needs in needfiles.iteritems():
553 for f, needs in needfiles.iteritems():
548 fl = repo.file(f)
554 fl = repo.file(f)
549 for n in needs:
555 for n in needs:
550 try:
556 try:
551 fl.rev(n)
557 fl.rev(n)
552 except error.LookupError:
558 except error.LookupError:
553 raise util.Abort(
559 raise util.Abort(
554 _('missing file data for %s:%s - run hg verify') %
560 _('missing file data for %s:%s - run hg verify') %
555 (f, hex(n)))
561 (f, hex(n)))
556
562
557 return revisions, files
563 return revisions, files
558
564
559 def addchangegroup(repo, source, srctype, url, emptyok=False):
565 def addchangegroup(repo, source, srctype, url, emptyok=False):
560 """Add the changegroup returned by source.read() to this repo.
566 """Add the changegroup returned by source.read() to this repo.
561 srctype is a string like 'push', 'pull', or 'unbundle'. url is
567 srctype is a string like 'push', 'pull', or 'unbundle'. url is
562 the URL of the repo where this changegroup is coming from.
568 the URL of the repo where this changegroup is coming from.
563
569
564 Return an integer summarizing the change to this repo:
570 Return an integer summarizing the change to this repo:
565 - nothing changed or no source: 0
571 - nothing changed or no source: 0
566 - more heads than before: 1+added heads (2..n)
572 - more heads than before: 1+added heads (2..n)
567 - fewer heads than before: -1-removed heads (-2..-n)
573 - fewer heads than before: -1-removed heads (-2..-n)
568 - number of heads stays the same: 1
574 - number of heads stays the same: 1
569 """
575 """
570 repo = repo.unfiltered()
576 repo = repo.unfiltered()
571 def csmap(x):
577 def csmap(x):
572 repo.ui.debug("add changeset %s\n" % short(x))
578 repo.ui.debug("add changeset %s\n" % short(x))
573 return len(cl)
579 return len(cl)
574
580
575 def revmap(x):
581 def revmap(x):
576 return cl.rev(x)
582 return cl.rev(x)
577
583
578 if not source:
584 if not source:
579 return 0
585 return 0
580
586
581 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
587 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
582
588
583 changesets = files = revisions = 0
589 changesets = files = revisions = 0
584 efiles = set()
590 efiles = set()
585
591
586 # write changelog data to temp files so concurrent readers will not see
592 # write changelog data to temp files so concurrent readers will not see
587 # inconsistent view
593 # inconsistent view
588 cl = repo.changelog
594 cl = repo.changelog
589 cl.delayupdate()
595 cl.delayupdate()
590 oldheads = cl.heads()
596 oldheads = cl.heads()
591
597
592 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
598 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
593 try:
599 try:
594 trp = weakref.proxy(tr)
600 trp = weakref.proxy(tr)
595 # pull off the changeset group
601 # pull off the changeset group
596 repo.ui.status(_("adding changesets\n"))
602 repo.ui.status(_("adding changesets\n"))
597 clstart = len(cl)
603 clstart = len(cl)
598 class prog(object):
604 class prog(object):
599 step = _('changesets')
605 step = _('changesets')
600 count = 1
606 count = 1
601 ui = repo.ui
607 ui = repo.ui
602 total = None
608 total = None
603 def __call__(repo):
609 def __call__(repo):
604 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
610 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
605 total=repo.total)
611 total=repo.total)
606 repo.count += 1
612 repo.count += 1
607 pr = prog()
613 pr = prog()
608 source.callback = pr
614 source.callback = pr
609
615
610 source.changelogheader()
616 source.changelogheader()
611 srccontent = cl.addgroup(source, csmap, trp)
617 srccontent = cl.addgroup(source, csmap, trp)
612 if not (srccontent or emptyok):
618 if not (srccontent or emptyok):
613 raise util.Abort(_("received changelog group is empty"))
619 raise util.Abort(_("received changelog group is empty"))
614 clend = len(cl)
620 clend = len(cl)
615 changesets = clend - clstart
621 changesets = clend - clstart
616 for c in xrange(clstart, clend):
622 for c in xrange(clstart, clend):
617 efiles.update(repo[c].files())
623 efiles.update(repo[c].files())
618 efiles = len(efiles)
624 efiles = len(efiles)
619 repo.ui.progress(_('changesets'), None)
625 repo.ui.progress(_('changesets'), None)
620
626
621 # pull off the manifest group
627 # pull off the manifest group
622 repo.ui.status(_("adding manifests\n"))
628 repo.ui.status(_("adding manifests\n"))
623 pr.step = _('manifests')
629 pr.step = _('manifests')
624 pr.count = 1
630 pr.count = 1
625 pr.total = changesets # manifests <= changesets
631 pr.total = changesets # manifests <= changesets
626 # no need to check for empty manifest group here:
632 # no need to check for empty manifest group here:
627 # if the result of the merge of 1 and 2 is the same in 3 and 4,
633 # if the result of the merge of 1 and 2 is the same in 3 and 4,
628 # no new manifest will be created and the manifest group will
634 # no new manifest will be created and the manifest group will
629 # be empty during the pull
635 # be empty during the pull
630 source.manifestheader()
636 source.manifestheader()
631 repo.manifest.addgroup(source, revmap, trp)
637 repo.manifest.addgroup(source, revmap, trp)
632 repo.ui.progress(_('manifests'), None)
638 repo.ui.progress(_('manifests'), None)
633
639
634 needfiles = {}
640 needfiles = {}
635 if repo.ui.configbool('server', 'validate', default=False):
641 if repo.ui.configbool('server', 'validate', default=False):
636 # validate incoming csets have their manifests
642 # validate incoming csets have their manifests
637 for cset in xrange(clstart, clend):
643 for cset in xrange(clstart, clend):
638 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
644 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
639 mfest = repo.manifest.readdelta(mfest)
645 mfest = repo.manifest.readdelta(mfest)
640 # store file nodes we must see
646 # store file nodes we must see
641 for f, n in mfest.iteritems():
647 for f, n in mfest.iteritems():
642 needfiles.setdefault(f, set()).add(n)
648 needfiles.setdefault(f, set()).add(n)
643
649
644 # process the files
650 # process the files
645 repo.ui.status(_("adding file changes\n"))
651 repo.ui.status(_("adding file changes\n"))
646 pr.step = _('files')
652 pr.step = _('files')
647 pr.count = 1
653 pr.count = 1
648 pr.total = efiles
654 pr.total = efiles
649 source.callback = None
655 source.callback = None
650
656
651 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
657 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
652 needfiles)
658 needfiles)
653 revisions += newrevs
659 revisions += newrevs
654 files += newfiles
660 files += newfiles
655
661
656 dh = 0
662 dh = 0
657 if oldheads:
663 if oldheads:
658 heads = cl.heads()
664 heads = cl.heads()
659 dh = len(heads) - len(oldheads)
665 dh = len(heads) - len(oldheads)
660 for h in heads:
666 for h in heads:
661 if h not in oldheads and repo[h].closesbranch():
667 if h not in oldheads and repo[h].closesbranch():
662 dh -= 1
668 dh -= 1
663 htext = ""
669 htext = ""
664 if dh:
670 if dh:
665 htext = _(" (%+d heads)") % dh
671 htext = _(" (%+d heads)") % dh
666
672
667 repo.ui.status(_("added %d changesets"
673 repo.ui.status(_("added %d changesets"
668 " with %d changes to %d files%s\n")
674 " with %d changes to %d files%s\n")
669 % (changesets, revisions, files, htext))
675 % (changesets, revisions, files, htext))
670 repo.invalidatevolatilesets()
676 repo.invalidatevolatilesets()
671
677
672 if changesets > 0:
678 if changesets > 0:
673 p = lambda: cl.writepending() and repo.root or ""
679 p = lambda: cl.writepending() and repo.root or ""
674 repo.hook('pretxnchangegroup', throw=True,
680 repo.hook('pretxnchangegroup', throw=True,
675 node=hex(cl.node(clstart)), source=srctype,
681 node=hex(cl.node(clstart)), source=srctype,
676 url=url, pending=p)
682 url=url, pending=p)
677
683
678 added = [cl.node(r) for r in xrange(clstart, clend)]
684 added = [cl.node(r) for r in xrange(clstart, clend)]
679 publishing = repo.ui.configbool('phases', 'publish', True)
685 publishing = repo.ui.configbool('phases', 'publish', True)
680 if srctype in ('push', 'serve'):
686 if srctype in ('push', 'serve'):
681 # Old servers can not push the boundary themselves.
687 # Old servers can not push the boundary themselves.
682 # New servers won't push the boundary if changeset already
688 # New servers won't push the boundary if changeset already
683 # exists locally as secret
689 # exists locally as secret
684 #
690 #
685 # We should not use added here but the list of all change in
691 # We should not use added here but the list of all change in
686 # the bundle
692 # the bundle
687 if publishing:
693 if publishing:
688 phases.advanceboundary(repo, phases.public, srccontent)
694 phases.advanceboundary(repo, phases.public, srccontent)
689 else:
695 else:
690 phases.advanceboundary(repo, phases.draft, srccontent)
696 phases.advanceboundary(repo, phases.draft, srccontent)
691 phases.retractboundary(repo, phases.draft, added)
697 phases.retractboundary(repo, phases.draft, added)
692 elif srctype != 'strip':
698 elif srctype != 'strip':
693 # publishing only alter behavior during push
699 # publishing only alter behavior during push
694 #
700 #
695 # strip should not touch boundary at all
701 # strip should not touch boundary at all
696 phases.retractboundary(repo, phases.draft, added)
702 phases.retractboundary(repo, phases.draft, added)
697
703
698 # make changelog see real files again
704 # make changelog see real files again
699 cl.finalize(trp)
705 cl.finalize(trp)
700
706
701 tr.close()
707 tr.close()
702
708
703 if changesets > 0:
709 if changesets > 0:
704 if srctype != 'strip':
710 if srctype != 'strip':
705 # During strip, branchcache is invalid but coming call to
711 # During strip, branchcache is invalid but coming call to
706 # `destroyed` will repair it.
712 # `destroyed` will repair it.
707 # In other case we can safely update cache on disk.
713 # In other case we can safely update cache on disk.
708 branchmap.updatecache(repo.filtered('served'))
714 branchmap.updatecache(repo.filtered('served'))
709 def runhooks():
715 def runhooks():
710 # These hooks run when the lock releases, not when the
716 # These hooks run when the lock releases, not when the
711 # transaction closes. So it's possible for the changelog
717 # transaction closes. So it's possible for the changelog
712 # to have changed since we last saw it.
718 # to have changed since we last saw it.
713 if clstart >= len(repo):
719 if clstart >= len(repo):
714 return
720 return
715
721
716 # forcefully update the on-disk branch cache
722 # forcefully update the on-disk branch cache
717 repo.ui.debug("updating the branch cache\n")
723 repo.ui.debug("updating the branch cache\n")
718 repo.hook("changegroup", node=hex(cl.node(clstart)),
724 repo.hook("changegroup", node=hex(cl.node(clstart)),
719 source=srctype, url=url)
725 source=srctype, url=url)
720
726
721 for n in added:
727 for n in added:
722 repo.hook("incoming", node=hex(n), source=srctype,
728 repo.hook("incoming", node=hex(n), source=srctype,
723 url=url)
729 url=url)
724
730
725 newheads = [h for h in repo.heads() if h not in oldheads]
731 newheads = [h for h in repo.heads() if h not in oldheads]
726 repo.ui.log("incoming",
732 repo.ui.log("incoming",
727 "%s incoming changes - new heads: %s\n",
733 "%s incoming changes - new heads: %s\n",
728 len(added),
734 len(added),
729 ', '.join([hex(c[:6]) for c in newheads]))
735 ', '.join([hex(c[:6]) for c in newheads]))
730 repo._afterlock(runhooks)
736 repo._afterlock(runhooks)
731
737
732 finally:
738 finally:
733 tr.release()
739 tr.release()
734 # never return 0 here:
740 # never return 0 here:
735 if dh < 0:
741 if dh < 0:
736 return dh - 1
742 return dh - 1
737 else:
743 else:
738 return dh + 1
744 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now