##// END OF EJS Templates
addchangegroup: call `prechangegroup` hook after transaction retrieval...
Pierre-Yves David -
r22969:805c18b2 default
parent child Browse files
Show More
@@ -1,762 +1,762 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16
16
17 def readexactly(stream, n):
17 def readexactly(stream, n):
18 '''read n bytes from stream.read and abort if less was available'''
18 '''read n bytes from stream.read and abort if less was available'''
19 s = stream.read(n)
19 s = stream.read(n)
20 if len(s) < n:
20 if len(s) < n:
21 raise util.Abort(_("stream ended unexpectedly"
21 raise util.Abort(_("stream ended unexpectedly"
22 " (got %d bytes, expected %d)")
22 " (got %d bytes, expected %d)")
23 % (len(s), n))
23 % (len(s), n))
24 return s
24 return s
25
25
26 def getchunk(stream):
26 def getchunk(stream):
27 """return the next chunk from stream as a string"""
27 """return the next chunk from stream as a string"""
28 d = readexactly(stream, 4)
28 d = readexactly(stream, 4)
29 l = struct.unpack(">l", d)[0]
29 l = struct.unpack(">l", d)[0]
30 if l <= 4:
30 if l <= 4:
31 if l:
31 if l:
32 raise util.Abort(_("invalid chunk length %d") % l)
32 raise util.Abort(_("invalid chunk length %d") % l)
33 return ""
33 return ""
34 return readexactly(stream, l - 4)
34 return readexactly(stream, l - 4)
35
35
36 def chunkheader(length):
36 def chunkheader(length):
37 """return a changegroup chunk header (string)"""
37 """return a changegroup chunk header (string)"""
38 return struct.pack(">l", length + 4)
38 return struct.pack(">l", length + 4)
39
39
40 def closechunk():
40 def closechunk():
41 """return a changegroup chunk header (string) for a zero-length chunk"""
41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 return struct.pack(">l", 0)
42 return struct.pack(">l", 0)
43
43
44 class nocompress(object):
44 class nocompress(object):
45 def compress(self, x):
45 def compress(self, x):
46 return x
46 return x
47 def flush(self):
47 def flush(self):
48 return ""
48 return ""
49
49
50 bundletypes = {
50 bundletypes = {
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 # since the unification ssh accepts a header but there
52 # since the unification ssh accepts a header but there
53 # is no capability signaling it.
53 # is no capability signaling it.
54 "HG10UN": ("HG10UN", nocompress),
54 "HG10UN": ("HG10UN", nocompress),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 }
57 }
58
58
59 # hgweb uses this list to communicate its preferred type
59 # hgweb uses this list to communicate its preferred type
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61
61
62 def writebundle(cg, filename, bundletype, vfs=None):
62 def writebundle(cg, filename, bundletype, vfs=None):
63 """Write a bundle file and return its filename.
63 """Write a bundle file and return its filename.
64
64
65 Existing files will not be overwritten.
65 Existing files will not be overwritten.
66 If no filename is specified, a temporary file is created.
66 If no filename is specified, a temporary file is created.
67 bz2 compression can be turned off.
67 bz2 compression can be turned off.
68 The bundle file will be deleted in case of errors.
68 The bundle file will be deleted in case of errors.
69 """
69 """
70
70
71 fh = None
71 fh = None
72 cleanup = None
72 cleanup = None
73 try:
73 try:
74 if filename:
74 if filename:
75 if vfs:
75 if vfs:
76 fh = vfs.open(filename, "wb")
76 fh = vfs.open(filename, "wb")
77 else:
77 else:
78 fh = open(filename, "wb")
78 fh = open(filename, "wb")
79 else:
79 else:
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fh = os.fdopen(fd, "wb")
81 fh = os.fdopen(fd, "wb")
82 cleanup = filename
82 cleanup = filename
83
83
84 header, compressor = bundletypes[bundletype]
84 header, compressor = bundletypes[bundletype]
85 fh.write(header)
85 fh.write(header)
86 z = compressor()
86 z = compressor()
87
87
88 # parse the changegroup data, otherwise we will block
88 # parse the changegroup data, otherwise we will block
89 # in case of sshrepo because we don't know the end of the stream
89 # in case of sshrepo because we don't know the end of the stream
90
90
91 # an empty chunkgroup is the end of the changegroup
91 # an empty chunkgroup is the end of the changegroup
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # after that, an empty chunkgroup is the end of the changegroup
93 # after that, an empty chunkgroup is the end of the changegroup
94 for chunk in cg.getchunks():
94 for chunk in cg.getchunks():
95 fh.write(z.compress(chunk))
95 fh.write(z.compress(chunk))
96 fh.write(z.flush())
96 fh.write(z.flush())
97 cleanup = None
97 cleanup = None
98 return filename
98 return filename
99 finally:
99 finally:
100 if fh is not None:
100 if fh is not None:
101 fh.close()
101 fh.close()
102 if cleanup is not None:
102 if cleanup is not None:
103 if filename and vfs:
103 if filename and vfs:
104 vfs.unlink(cleanup)
104 vfs.unlink(cleanup)
105 else:
105 else:
106 os.unlink(cleanup)
106 os.unlink(cleanup)
107
107
108 def decompressor(fh, alg):
108 def decompressor(fh, alg):
109 if alg == 'UN':
109 if alg == 'UN':
110 return fh
110 return fh
111 elif alg == 'GZ':
111 elif alg == 'GZ':
112 def generator(f):
112 def generator(f):
113 zd = zlib.decompressobj()
113 zd = zlib.decompressobj()
114 for chunk in util.filechunkiter(f):
114 for chunk in util.filechunkiter(f):
115 yield zd.decompress(chunk)
115 yield zd.decompress(chunk)
116 elif alg == 'BZ':
116 elif alg == 'BZ':
117 def generator(f):
117 def generator(f):
118 zd = bz2.BZ2Decompressor()
118 zd = bz2.BZ2Decompressor()
119 zd.decompress("BZ")
119 zd.decompress("BZ")
120 for chunk in util.filechunkiter(f, 4096):
120 for chunk in util.filechunkiter(f, 4096):
121 yield zd.decompress(chunk)
121 yield zd.decompress(chunk)
122 else:
122 else:
123 raise util.Abort("unknown bundle compression '%s'" % alg)
123 raise util.Abort("unknown bundle compression '%s'" % alg)
124 return util.chunkbuffer(generator(fh))
124 return util.chunkbuffer(generator(fh))
125
125
126 class cg1unpacker(object):
126 class cg1unpacker(object):
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheadersize = struct.calcsize(deltaheader)
128 deltaheadersize = struct.calcsize(deltaheader)
129 def __init__(self, fh, alg):
129 def __init__(self, fh, alg):
130 self._stream = decompressor(fh, alg)
130 self._stream = decompressor(fh, alg)
131 self._type = alg
131 self._type = alg
132 self.callback = None
132 self.callback = None
133 def compressed(self):
133 def compressed(self):
134 return self._type != 'UN'
134 return self._type != 'UN'
135 def read(self, l):
135 def read(self, l):
136 return self._stream.read(l)
136 return self._stream.read(l)
137 def seek(self, pos):
137 def seek(self, pos):
138 return self._stream.seek(pos)
138 return self._stream.seek(pos)
139 def tell(self):
139 def tell(self):
140 return self._stream.tell()
140 return self._stream.tell()
141 def close(self):
141 def close(self):
142 return self._stream.close()
142 return self._stream.close()
143
143
144 def chunklength(self):
144 def chunklength(self):
145 d = readexactly(self._stream, 4)
145 d = readexactly(self._stream, 4)
146 l = struct.unpack(">l", d)[0]
146 l = struct.unpack(">l", d)[0]
147 if l <= 4:
147 if l <= 4:
148 if l:
148 if l:
149 raise util.Abort(_("invalid chunk length %d") % l)
149 raise util.Abort(_("invalid chunk length %d") % l)
150 return 0
150 return 0
151 if self.callback:
151 if self.callback:
152 self.callback()
152 self.callback()
153 return l - 4
153 return l - 4
154
154
155 def changelogheader(self):
155 def changelogheader(self):
156 """v10 does not have a changelog header chunk"""
156 """v10 does not have a changelog header chunk"""
157 return {}
157 return {}
158
158
159 def manifestheader(self):
159 def manifestheader(self):
160 """v10 does not have a manifest header chunk"""
160 """v10 does not have a manifest header chunk"""
161 return {}
161 return {}
162
162
163 def filelogheader(self):
163 def filelogheader(self):
164 """return the header of the filelogs chunk, v10 only has the filename"""
164 """return the header of the filelogs chunk, v10 only has the filename"""
165 l = self.chunklength()
165 l = self.chunklength()
166 if not l:
166 if not l:
167 return {}
167 return {}
168 fname = readexactly(self._stream, l)
168 fname = readexactly(self._stream, l)
169 return {'filename': fname}
169 return {'filename': fname}
170
170
171 def _deltaheader(self, headertuple, prevnode):
171 def _deltaheader(self, headertuple, prevnode):
172 node, p1, p2, cs = headertuple
172 node, p1, p2, cs = headertuple
173 if prevnode is None:
173 if prevnode is None:
174 deltabase = p1
174 deltabase = p1
175 else:
175 else:
176 deltabase = prevnode
176 deltabase = prevnode
177 return node, p1, p2, deltabase, cs
177 return node, p1, p2, deltabase, cs
178
178
179 def deltachunk(self, prevnode):
179 def deltachunk(self, prevnode):
180 l = self.chunklength()
180 l = self.chunklength()
181 if not l:
181 if not l:
182 return {}
182 return {}
183 headerdata = readexactly(self._stream, self.deltaheadersize)
183 headerdata = readexactly(self._stream, self.deltaheadersize)
184 header = struct.unpack(self.deltaheader, headerdata)
184 header = struct.unpack(self.deltaheader, headerdata)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 'deltabase': deltabase, 'delta': delta}
188 'deltabase': deltabase, 'delta': delta}
189
189
190 def getchunks(self):
190 def getchunks(self):
191 """returns all the chunks contains in the bundle
191 """returns all the chunks contains in the bundle
192
192
193 Used when you need to forward the binary stream to a file or another
193 Used when you need to forward the binary stream to a file or another
194 network API. To do so, it parse the changegroup data, otherwise it will
194 network API. To do so, it parse the changegroup data, otherwise it will
195 block in case of sshrepo because it don't know the end of the stream.
195 block in case of sshrepo because it don't know the end of the stream.
196 """
196 """
197 # an empty chunkgroup is the end of the changegroup
197 # an empty chunkgroup is the end of the changegroup
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # after that, an empty chunkgroup is the end of the changegroup
199 # after that, an empty chunkgroup is the end of the changegroup
200 empty = False
200 empty = False
201 count = 0
201 count = 0
202 while not empty or count <= 2:
202 while not empty or count <= 2:
203 empty = True
203 empty = True
204 count += 1
204 count += 1
205 while True:
205 while True:
206 chunk = getchunk(self)
206 chunk = getchunk(self)
207 if not chunk:
207 if not chunk:
208 break
208 break
209 empty = False
209 empty = False
210 yield chunkheader(len(chunk))
210 yield chunkheader(len(chunk))
211 pos = 0
211 pos = 0
212 while pos < len(chunk):
212 while pos < len(chunk):
213 next = pos + 2**20
213 next = pos + 2**20
214 yield chunk[pos:next]
214 yield chunk[pos:next]
215 pos = next
215 pos = next
216 yield closechunk()
216 yield closechunk()
217
217
218 class headerlessfixup(object):
218 class headerlessfixup(object):
219 def __init__(self, fh, h):
219 def __init__(self, fh, h):
220 self._h = h
220 self._h = h
221 self._fh = fh
221 self._fh = fh
222 def read(self, n):
222 def read(self, n):
223 if self._h:
223 if self._h:
224 d, self._h = self._h[:n], self._h[n:]
224 d, self._h = self._h[:n], self._h[n:]
225 if len(d) < n:
225 if len(d) < n:
226 d += readexactly(self._fh, n - len(d))
226 d += readexactly(self._fh, n - len(d))
227 return d
227 return d
228 return readexactly(self._fh, n)
228 return readexactly(self._fh, n)
229
229
230 class cg1packer(object):
230 class cg1packer(object):
231 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
231 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
232 def __init__(self, repo, bundlecaps=None):
232 def __init__(self, repo, bundlecaps=None):
233 """Given a source repo, construct a bundler.
233 """Given a source repo, construct a bundler.
234
234
235 bundlecaps is optional and can be used to specify the set of
235 bundlecaps is optional and can be used to specify the set of
236 capabilities which can be used to build the bundle.
236 capabilities which can be used to build the bundle.
237 """
237 """
238 # Set of capabilities we can use to build the bundle.
238 # Set of capabilities we can use to build the bundle.
239 if bundlecaps is None:
239 if bundlecaps is None:
240 bundlecaps = set()
240 bundlecaps = set()
241 self._bundlecaps = bundlecaps
241 self._bundlecaps = bundlecaps
242 self._changelog = repo.changelog
242 self._changelog = repo.changelog
243 self._manifest = repo.manifest
243 self._manifest = repo.manifest
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
245 if reorder == 'auto':
245 if reorder == 'auto':
246 reorder = None
246 reorder = None
247 else:
247 else:
248 reorder = util.parsebool(reorder)
248 reorder = util.parsebool(reorder)
249 self._repo = repo
249 self._repo = repo
250 self._reorder = reorder
250 self._reorder = reorder
251 self._progress = repo.ui.progress
251 self._progress = repo.ui.progress
252 def close(self):
252 def close(self):
253 return closechunk()
253 return closechunk()
254
254
255 def fileheader(self, fname):
255 def fileheader(self, fname):
256 return chunkheader(len(fname)) + fname
256 return chunkheader(len(fname)) + fname
257
257
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
259 """Calculate a delta group, yielding a sequence of changegroup chunks
259 """Calculate a delta group, yielding a sequence of changegroup chunks
260 (strings).
260 (strings).
261
261
262 Given a list of changeset revs, return a set of deltas and
262 Given a list of changeset revs, return a set of deltas and
263 metadata corresponding to nodes. The first delta is
263 metadata corresponding to nodes. The first delta is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
265 guaranteed to have this parent as it has all history before
265 guaranteed to have this parent as it has all history before
266 these changesets. In the case firstparent is nullrev the
266 these changesets. In the case firstparent is nullrev the
267 changegroup starts with a full revision.
267 changegroup starts with a full revision.
268
268
269 If units is not None, progress detail will be generated, units specifies
269 If units is not None, progress detail will be generated, units specifies
270 the type of revlog that is touched (changelog, manifest, etc.).
270 the type of revlog that is touched (changelog, manifest, etc.).
271 """
271 """
272 # if we don't have any revisions touched by these changesets, bail
272 # if we don't have any revisions touched by these changesets, bail
273 if len(nodelist) == 0:
273 if len(nodelist) == 0:
274 yield self.close()
274 yield self.close()
275 return
275 return
276
276
277 # for generaldelta revlogs, we linearize the revs; this will both be
277 # for generaldelta revlogs, we linearize the revs; this will both be
278 # much quicker and generate a much smaller bundle
278 # much quicker and generate a much smaller bundle
279 if (revlog._generaldelta and reorder is not False) or reorder:
279 if (revlog._generaldelta and reorder is not False) or reorder:
280 dag = dagutil.revlogdag(revlog)
280 dag = dagutil.revlogdag(revlog)
281 revs = set(revlog.rev(n) for n in nodelist)
281 revs = set(revlog.rev(n) for n in nodelist)
282 revs = dag.linearize(revs)
282 revs = dag.linearize(revs)
283 else:
283 else:
284 revs = sorted([revlog.rev(n) for n in nodelist])
284 revs = sorted([revlog.rev(n) for n in nodelist])
285
285
286 # add the parent of the first rev
286 # add the parent of the first rev
287 p = revlog.parentrevs(revs[0])[0]
287 p = revlog.parentrevs(revs[0])[0]
288 revs.insert(0, p)
288 revs.insert(0, p)
289
289
290 # build deltas
290 # build deltas
291 total = len(revs) - 1
291 total = len(revs) - 1
292 msgbundling = _('bundling')
292 msgbundling = _('bundling')
293 for r in xrange(len(revs) - 1):
293 for r in xrange(len(revs) - 1):
294 if units is not None:
294 if units is not None:
295 self._progress(msgbundling, r + 1, unit=units, total=total)
295 self._progress(msgbundling, r + 1, unit=units, total=total)
296 prev, curr = revs[r], revs[r + 1]
296 prev, curr = revs[r], revs[r + 1]
297 linknode = lookup(revlog.node(curr))
297 linknode = lookup(revlog.node(curr))
298 for c in self.revchunk(revlog, curr, prev, linknode):
298 for c in self.revchunk(revlog, curr, prev, linknode):
299 yield c
299 yield c
300
300
301 yield self.close()
301 yield self.close()
302
302
303 # filter any nodes that claim to be part of the known set
303 # filter any nodes that claim to be part of the known set
304 def prune(self, revlog, missing, commonrevs, source):
304 def prune(self, revlog, missing, commonrevs, source):
305 rr, rl = revlog.rev, revlog.linkrev
305 rr, rl = revlog.rev, revlog.linkrev
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
307
307
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
309 '''yield a sequence of changegroup chunks (strings)'''
309 '''yield a sequence of changegroup chunks (strings)'''
310 repo = self._repo
310 repo = self._repo
311 cl = self._changelog
311 cl = self._changelog
312 mf = self._manifest
312 mf = self._manifest
313 reorder = self._reorder
313 reorder = self._reorder
314 progress = self._progress
314 progress = self._progress
315
315
316 # for progress output
316 # for progress output
317 msgbundling = _('bundling')
317 msgbundling = _('bundling')
318
318
319 mfs = {} # needed manifests
319 mfs = {} # needed manifests
320 fnodes = {} # needed file nodes
320 fnodes = {} # needed file nodes
321 changedfiles = set()
321 changedfiles = set()
322
322
323 # Callback for the changelog, used to collect changed files and manifest
323 # Callback for the changelog, used to collect changed files and manifest
324 # nodes.
324 # nodes.
325 # Returns the linkrev node (identity in the changelog case).
325 # Returns the linkrev node (identity in the changelog case).
326 def lookupcl(x):
326 def lookupcl(x):
327 c = cl.read(x)
327 c = cl.read(x)
328 changedfiles.update(c[3])
328 changedfiles.update(c[3])
329 # record the first changeset introducing this manifest version
329 # record the first changeset introducing this manifest version
330 mfs.setdefault(c[0], x)
330 mfs.setdefault(c[0], x)
331 return x
331 return x
332
332
333 # Callback for the manifest, used to collect linkrevs for filelog
333 # Callback for the manifest, used to collect linkrevs for filelog
334 # revisions.
334 # revisions.
335 # Returns the linkrev node (collected in lookupcl).
335 # Returns the linkrev node (collected in lookupcl).
336 def lookupmf(x):
336 def lookupmf(x):
337 clnode = mfs[x]
337 clnode = mfs[x]
338 if not fastpathlinkrev:
338 if not fastpathlinkrev:
339 mdata = mf.readfast(x)
339 mdata = mf.readfast(x)
340 for f, n in mdata.iteritems():
340 for f, n in mdata.iteritems():
341 if f in changedfiles:
341 if f in changedfiles:
342 # record the first changeset introducing this filelog
342 # record the first changeset introducing this filelog
343 # version
343 # version
344 fnodes[f].setdefault(n, clnode)
344 fnodes[f].setdefault(n, clnode)
345 return clnode
345 return clnode
346
346
347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
348 reorder=reorder):
348 reorder=reorder):
349 yield chunk
349 yield chunk
350 progress(msgbundling, None)
350 progress(msgbundling, None)
351
351
352 for f in changedfiles:
352 for f in changedfiles:
353 fnodes[f] = {}
353 fnodes[f] = {}
354 mfnodes = self.prune(mf, mfs, commonrevs, source)
354 mfnodes = self.prune(mf, mfs, commonrevs, source)
355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
356 reorder=reorder):
356 reorder=reorder):
357 yield chunk
357 yield chunk
358 progress(msgbundling, None)
358 progress(msgbundling, None)
359
359
360 mfs.clear()
360 mfs.clear()
361 needed = set(cl.rev(x) for x in clnodes)
361 needed = set(cl.rev(x) for x in clnodes)
362
362
363 def linknodes(filerevlog, fname):
363 def linknodes(filerevlog, fname):
364 if fastpathlinkrev:
364 if fastpathlinkrev:
365 llr = filerevlog.linkrev
365 llr = filerevlog.linkrev
366 def genfilenodes():
366 def genfilenodes():
367 for r in filerevlog:
367 for r in filerevlog:
368 linkrev = llr(r)
368 linkrev = llr(r)
369 if linkrev in needed:
369 if linkrev in needed:
370 yield filerevlog.node(r), cl.node(linkrev)
370 yield filerevlog.node(r), cl.node(linkrev)
371 fnodes[fname] = dict(genfilenodes())
371 fnodes[fname] = dict(genfilenodes())
372 return fnodes.get(fname, {})
372 return fnodes.get(fname, {})
373
373
374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
375 source):
375 source):
376 yield chunk
376 yield chunk
377
377
378 yield self.close()
378 yield self.close()
379 progress(msgbundling, None)
379 progress(msgbundling, None)
380
380
381 if clnodes:
381 if clnodes:
382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
383
383
384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
385 repo = self._repo
385 repo = self._repo
386 progress = self._progress
386 progress = self._progress
387 reorder = self._reorder
387 reorder = self._reorder
388 msgbundling = _('bundling')
388 msgbundling = _('bundling')
389
389
390 total = len(changedfiles)
390 total = len(changedfiles)
391 # for progress output
391 # for progress output
392 msgfiles = _('files')
392 msgfiles = _('files')
393 for i, fname in enumerate(sorted(changedfiles)):
393 for i, fname in enumerate(sorted(changedfiles)):
394 filerevlog = repo.file(fname)
394 filerevlog = repo.file(fname)
395 if not filerevlog:
395 if not filerevlog:
396 raise util.Abort(_("empty or missing revlog for %s") % fname)
396 raise util.Abort(_("empty or missing revlog for %s") % fname)
397
397
398 linkrevnodes = linknodes(filerevlog, fname)
398 linkrevnodes = linknodes(filerevlog, fname)
399 # Lookup for filenodes, we collected the linkrev nodes above in the
399 # Lookup for filenodes, we collected the linkrev nodes above in the
400 # fastpath case and with lookupmf in the slowpath case.
400 # fastpath case and with lookupmf in the slowpath case.
401 def lookupfilelog(x):
401 def lookupfilelog(x):
402 return linkrevnodes[x]
402 return linkrevnodes[x]
403
403
404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
405 if filenodes:
405 if filenodes:
406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
407 total=total)
407 total=total)
408 yield self.fileheader(fname)
408 yield self.fileheader(fname)
409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
410 reorder=reorder):
410 reorder=reorder):
411 yield chunk
411 yield chunk
412
412
413 def revchunk(self, revlog, rev, prev, linknode):
413 def revchunk(self, revlog, rev, prev, linknode):
414 node = revlog.node(rev)
414 node = revlog.node(rev)
415 p1, p2 = revlog.parentrevs(rev)
415 p1, p2 = revlog.parentrevs(rev)
416 base = prev
416 base = prev
417
417
418 prefix = ''
418 prefix = ''
419 if base == nullrev:
419 if base == nullrev:
420 delta = revlog.revision(node)
420 delta = revlog.revision(node)
421 prefix = mdiff.trivialdiffheader(len(delta))
421 prefix = mdiff.trivialdiffheader(len(delta))
422 else:
422 else:
423 delta = revlog.revdiff(base, rev)
423 delta = revlog.revdiff(base, rev)
424 p1n, p2n = revlog.parents(node)
424 p1n, p2n = revlog.parents(node)
425 basenode = revlog.node(base)
425 basenode = revlog.node(base)
426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
427 meta += prefix
427 meta += prefix
428 l = len(meta) + len(delta)
428 l = len(meta) + len(delta)
429 yield chunkheader(l)
429 yield chunkheader(l)
430 yield meta
430 yield meta
431 yield delta
431 yield delta
432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
433 # do nothing with basenode, it is implicitly the previous one in HG10
433 # do nothing with basenode, it is implicitly the previous one in HG10
434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
435
435
436 def _changegroupinfo(repo, nodes, source):
436 def _changegroupinfo(repo, nodes, source):
437 if repo.ui.verbose or source == 'bundle':
437 if repo.ui.verbose or source == 'bundle':
438 repo.ui.status(_("%d changesets found\n") % len(nodes))
438 repo.ui.status(_("%d changesets found\n") % len(nodes))
439 if repo.ui.debugflag:
439 if repo.ui.debugflag:
440 repo.ui.debug("list of changesets:\n")
440 repo.ui.debug("list of changesets:\n")
441 for node in nodes:
441 for node in nodes:
442 repo.ui.debug("%s\n" % hex(node))
442 repo.ui.debug("%s\n" % hex(node))
443
443
444 def getsubset(repo, outgoing, bundler, source, fastpath=False):
444 def getsubset(repo, outgoing, bundler, source, fastpath=False):
445 repo = repo.unfiltered()
445 repo = repo.unfiltered()
446 commonrevs = outgoing.common
446 commonrevs = outgoing.common
447 csets = outgoing.missing
447 csets = outgoing.missing
448 heads = outgoing.missingheads
448 heads = outgoing.missingheads
449 # We go through the fast path if we get told to, or if all (unfiltered
449 # We go through the fast path if we get told to, or if all (unfiltered
450 # heads have been requested (since we then know there all linkrevs will
450 # heads have been requested (since we then know there all linkrevs will
451 # be pulled by the client).
451 # be pulled by the client).
452 heads.sort()
452 heads.sort()
453 fastpathlinkrev = fastpath or (
453 fastpathlinkrev = fastpath or (
454 repo.filtername is None and heads == sorted(repo.heads()))
454 repo.filtername is None and heads == sorted(repo.heads()))
455
455
456 repo.hook('preoutgoing', throw=True, source=source)
456 repo.hook('preoutgoing', throw=True, source=source)
457 _changegroupinfo(repo, csets, source)
457 _changegroupinfo(repo, csets, source)
458 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
458 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
459 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
459 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
460
460
461 def changegroupsubset(repo, roots, heads, source):
461 def changegroupsubset(repo, roots, heads, source):
462 """Compute a changegroup consisting of all the nodes that are
462 """Compute a changegroup consisting of all the nodes that are
463 descendants of any of the roots and ancestors of any of the heads.
463 descendants of any of the roots and ancestors of any of the heads.
464 Return a chunkbuffer object whose read() method will return
464 Return a chunkbuffer object whose read() method will return
465 successive changegroup chunks.
465 successive changegroup chunks.
466
466
467 It is fairly complex as determining which filenodes and which
467 It is fairly complex as determining which filenodes and which
468 manifest nodes need to be included for the changeset to be complete
468 manifest nodes need to be included for the changeset to be complete
469 is non-trivial.
469 is non-trivial.
470
470
471 Another wrinkle is doing the reverse, figuring out which changeset in
471 Another wrinkle is doing the reverse, figuring out which changeset in
472 the changegroup a particular filenode or manifestnode belongs to.
472 the changegroup a particular filenode or manifestnode belongs to.
473 """
473 """
474 cl = repo.changelog
474 cl = repo.changelog
475 if not roots:
475 if not roots:
476 roots = [nullid]
476 roots = [nullid]
477 # TODO: remove call to nodesbetween.
477 # TODO: remove call to nodesbetween.
478 csets, roots, heads = cl.nodesbetween(roots, heads)
478 csets, roots, heads = cl.nodesbetween(roots, heads)
479 discbases = []
479 discbases = []
480 for n in roots:
480 for n in roots:
481 discbases.extend([p for p in cl.parents(n) if p != nullid])
481 discbases.extend([p for p in cl.parents(n) if p != nullid])
482 outgoing = discovery.outgoing(cl, discbases, heads)
482 outgoing = discovery.outgoing(cl, discbases, heads)
483 bundler = cg1packer(repo)
483 bundler = cg1packer(repo)
484 return getsubset(repo, outgoing, bundler, source)
484 return getsubset(repo, outgoing, bundler, source)
485
485
486 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
486 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
487 """Like getbundle, but taking a discovery.outgoing as an argument.
487 """Like getbundle, but taking a discovery.outgoing as an argument.
488
488
489 This is only implemented for local repos and reuses potentially
489 This is only implemented for local repos and reuses potentially
490 precomputed sets in outgoing."""
490 precomputed sets in outgoing."""
491 if not outgoing.missing:
491 if not outgoing.missing:
492 return None
492 return None
493 bundler = cg1packer(repo, bundlecaps)
493 bundler = cg1packer(repo, bundlecaps)
494 return getsubset(repo, outgoing, bundler, source)
494 return getsubset(repo, outgoing, bundler, source)
495
495
496 def _computeoutgoing(repo, heads, common):
496 def _computeoutgoing(repo, heads, common):
497 """Computes which revs are outgoing given a set of common
497 """Computes which revs are outgoing given a set of common
498 and a set of heads.
498 and a set of heads.
499
499
500 This is a separate function so extensions can have access to
500 This is a separate function so extensions can have access to
501 the logic.
501 the logic.
502
502
503 Returns a discovery.outgoing object.
503 Returns a discovery.outgoing object.
504 """
504 """
505 cl = repo.changelog
505 cl = repo.changelog
506 if common:
506 if common:
507 hasnode = cl.hasnode
507 hasnode = cl.hasnode
508 common = [n for n in common if hasnode(n)]
508 common = [n for n in common if hasnode(n)]
509 else:
509 else:
510 common = [nullid]
510 common = [nullid]
511 if not heads:
511 if not heads:
512 heads = cl.heads()
512 heads = cl.heads()
513 return discovery.outgoing(cl, common, heads)
513 return discovery.outgoing(cl, common, heads)
514
514
515 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
515 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
516 """Like changegroupsubset, but returns the set difference between the
516 """Like changegroupsubset, but returns the set difference between the
517 ancestors of heads and the ancestors common.
517 ancestors of heads and the ancestors common.
518
518
519 If heads is None, use the local heads. If common is None, use [nullid].
519 If heads is None, use the local heads. If common is None, use [nullid].
520
520
521 The nodes in common might not all be known locally due to the way the
521 The nodes in common might not all be known locally due to the way the
522 current discovery protocol works.
522 current discovery protocol works.
523 """
523 """
524 outgoing = _computeoutgoing(repo, heads, common)
524 outgoing = _computeoutgoing(repo, heads, common)
525 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
525 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
526
526
527 def changegroup(repo, basenodes, source):
527 def changegroup(repo, basenodes, source):
528 # to avoid a race we use changegroupsubset() (issue1320)
528 # to avoid a race we use changegroupsubset() (issue1320)
529 return changegroupsubset(repo, basenodes, repo.heads(), source)
529 return changegroupsubset(repo, basenodes, repo.heads(), source)
530
530
531 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
531 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
532 revisions = 0
532 revisions = 0
533 files = 0
533 files = 0
534 while True:
534 while True:
535 chunkdata = source.filelogheader()
535 chunkdata = source.filelogheader()
536 if not chunkdata:
536 if not chunkdata:
537 break
537 break
538 f = chunkdata["filename"]
538 f = chunkdata["filename"]
539 repo.ui.debug("adding %s revisions\n" % f)
539 repo.ui.debug("adding %s revisions\n" % f)
540 pr()
540 pr()
541 fl = repo.file(f)
541 fl = repo.file(f)
542 o = len(fl)
542 o = len(fl)
543 if not fl.addgroup(source, revmap, trp):
543 if not fl.addgroup(source, revmap, trp):
544 raise util.Abort(_("received file revlog group is empty"))
544 raise util.Abort(_("received file revlog group is empty"))
545 revisions += len(fl) - o
545 revisions += len(fl) - o
546 files += 1
546 files += 1
547 if f in needfiles:
547 if f in needfiles:
548 needs = needfiles[f]
548 needs = needfiles[f]
549 for new in xrange(o, len(fl)):
549 for new in xrange(o, len(fl)):
550 n = fl.node(new)
550 n = fl.node(new)
551 if n in needs:
551 if n in needs:
552 needs.remove(n)
552 needs.remove(n)
553 else:
553 else:
554 raise util.Abort(
554 raise util.Abort(
555 _("received spurious file revlog entry"))
555 _("received spurious file revlog entry"))
556 if not needs:
556 if not needs:
557 del needfiles[f]
557 del needfiles[f]
558 repo.ui.progress(_('files'), None)
558 repo.ui.progress(_('files'), None)
559
559
560 for f, needs in needfiles.iteritems():
560 for f, needs in needfiles.iteritems():
561 fl = repo.file(f)
561 fl = repo.file(f)
562 for n in needs:
562 for n in needs:
563 try:
563 try:
564 fl.rev(n)
564 fl.rev(n)
565 except error.LookupError:
565 except error.LookupError:
566 raise util.Abort(
566 raise util.Abort(
567 _('missing file data for %s:%s - run hg verify') %
567 _('missing file data for %s:%s - run hg verify') %
568 (f, hex(n)))
568 (f, hex(n)))
569
569
570 return revisions, files
570 return revisions, files
571
571
572 def addchangegroup(repo, source, srctype, url, emptyok=False,
572 def addchangegroup(repo, source, srctype, url, emptyok=False,
573 targetphase=phases.draft):
573 targetphase=phases.draft):
574 """Add the changegroup returned by source.read() to this repo.
574 """Add the changegroup returned by source.read() to this repo.
575 srctype is a string like 'push', 'pull', or 'unbundle'. url is
575 srctype is a string like 'push', 'pull', or 'unbundle'. url is
576 the URL of the repo where this changegroup is coming from.
576 the URL of the repo where this changegroup is coming from.
577
577
578 Return an integer summarizing the change to this repo:
578 Return an integer summarizing the change to this repo:
579 - nothing changed or no source: 0
579 - nothing changed or no source: 0
580 - more heads than before: 1+added heads (2..n)
580 - more heads than before: 1+added heads (2..n)
581 - fewer heads than before: -1-removed heads (-2..-n)
581 - fewer heads than before: -1-removed heads (-2..-n)
582 - number of heads stays the same: 1
582 - number of heads stays the same: 1
583 """
583 """
584 repo = repo.unfiltered()
584 repo = repo.unfiltered()
585 def csmap(x):
585 def csmap(x):
586 repo.ui.debug("add changeset %s\n" % short(x))
586 repo.ui.debug("add changeset %s\n" % short(x))
587 return len(cl)
587 return len(cl)
588
588
589 def revmap(x):
589 def revmap(x):
590 return cl.rev(x)
590 return cl.rev(x)
591
591
592 if not source:
592 if not source:
593 return 0
593 return 0
594
594
595 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
596
597 changesets = files = revisions = 0
595 changesets = files = revisions = 0
598 efiles = set()
596 efiles = set()
599
597
600 # write changelog data to temp files so concurrent readers will not see
598 # write changelog data to temp files so concurrent readers will not see
601 # inconsistent view
599 # inconsistent view
602 cl = repo.changelog
600 cl = repo.changelog
603 cl.delayupdate()
601 cl.delayupdate()
604 oldheads = cl.heads()
602 oldheads = cl.heads()
605
603
606 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
604 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
607 try:
605 try:
606 repo.hook('prechangegroup', throw=True, source=srctype, url=url)
607
608 trp = weakref.proxy(tr)
608 trp = weakref.proxy(tr)
609 # pull off the changeset group
609 # pull off the changeset group
610 repo.ui.status(_("adding changesets\n"))
610 repo.ui.status(_("adding changesets\n"))
611 clstart = len(cl)
611 clstart = len(cl)
612 class prog(object):
612 class prog(object):
613 step = _('changesets')
613 step = _('changesets')
614 count = 1
614 count = 1
615 ui = repo.ui
615 ui = repo.ui
616 total = None
616 total = None
617 def __call__(repo):
617 def __call__(repo):
618 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
618 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
619 total=repo.total)
619 total=repo.total)
620 repo.count += 1
620 repo.count += 1
621 pr = prog()
621 pr = prog()
622 source.callback = pr
622 source.callback = pr
623
623
624 source.changelogheader()
624 source.changelogheader()
625 srccontent = cl.addgroup(source, csmap, trp)
625 srccontent = cl.addgroup(source, csmap, trp)
626 if not (srccontent or emptyok):
626 if not (srccontent or emptyok):
627 raise util.Abort(_("received changelog group is empty"))
627 raise util.Abort(_("received changelog group is empty"))
628 clend = len(cl)
628 clend = len(cl)
629 changesets = clend - clstart
629 changesets = clend - clstart
630 for c in xrange(clstart, clend):
630 for c in xrange(clstart, clend):
631 efiles.update(repo[c].files())
631 efiles.update(repo[c].files())
632 efiles = len(efiles)
632 efiles = len(efiles)
633 repo.ui.progress(_('changesets'), None)
633 repo.ui.progress(_('changesets'), None)
634
634
635 # pull off the manifest group
635 # pull off the manifest group
636 repo.ui.status(_("adding manifests\n"))
636 repo.ui.status(_("adding manifests\n"))
637 pr.step = _('manifests')
637 pr.step = _('manifests')
638 pr.count = 1
638 pr.count = 1
639 pr.total = changesets # manifests <= changesets
639 pr.total = changesets # manifests <= changesets
640 # no need to check for empty manifest group here:
640 # no need to check for empty manifest group here:
641 # if the result of the merge of 1 and 2 is the same in 3 and 4,
641 # if the result of the merge of 1 and 2 is the same in 3 and 4,
642 # no new manifest will be created and the manifest group will
642 # no new manifest will be created and the manifest group will
643 # be empty during the pull
643 # be empty during the pull
644 source.manifestheader()
644 source.manifestheader()
645 repo.manifest.addgroup(source, revmap, trp)
645 repo.manifest.addgroup(source, revmap, trp)
646 repo.ui.progress(_('manifests'), None)
646 repo.ui.progress(_('manifests'), None)
647
647
648 needfiles = {}
648 needfiles = {}
649 if repo.ui.configbool('server', 'validate', default=False):
649 if repo.ui.configbool('server', 'validate', default=False):
650 # validate incoming csets have their manifests
650 # validate incoming csets have their manifests
651 for cset in xrange(clstart, clend):
651 for cset in xrange(clstart, clend):
652 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
652 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
653 mfest = repo.manifest.readdelta(mfest)
653 mfest = repo.manifest.readdelta(mfest)
654 # store file nodes we must see
654 # store file nodes we must see
655 for f, n in mfest.iteritems():
655 for f, n in mfest.iteritems():
656 needfiles.setdefault(f, set()).add(n)
656 needfiles.setdefault(f, set()).add(n)
657
657
658 # process the files
658 # process the files
659 repo.ui.status(_("adding file changes\n"))
659 repo.ui.status(_("adding file changes\n"))
660 pr.step = _('files')
660 pr.step = _('files')
661 pr.count = 1
661 pr.count = 1
662 pr.total = efiles
662 pr.total = efiles
663 source.callback = None
663 source.callback = None
664
664
665 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
665 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
666 needfiles)
666 needfiles)
667 revisions += newrevs
667 revisions += newrevs
668 files += newfiles
668 files += newfiles
669
669
670 dh = 0
670 dh = 0
671 if oldheads:
671 if oldheads:
672 heads = cl.heads()
672 heads = cl.heads()
673 dh = len(heads) - len(oldheads)
673 dh = len(heads) - len(oldheads)
674 for h in heads:
674 for h in heads:
675 if h not in oldheads and repo[h].closesbranch():
675 if h not in oldheads and repo[h].closesbranch():
676 dh -= 1
676 dh -= 1
677 htext = ""
677 htext = ""
678 if dh:
678 if dh:
679 htext = _(" (%+d heads)") % dh
679 htext = _(" (%+d heads)") % dh
680
680
681 repo.ui.status(_("added %d changesets"
681 repo.ui.status(_("added %d changesets"
682 " with %d changes to %d files%s\n")
682 " with %d changes to %d files%s\n")
683 % (changesets, revisions, files, htext))
683 % (changesets, revisions, files, htext))
684 repo.invalidatevolatilesets()
684 repo.invalidatevolatilesets()
685
685
686 if changesets > 0:
686 if changesets > 0:
687 p = lambda: cl.writepending() and repo.root or ""
687 p = lambda: cl.writepending() and repo.root or ""
688 if 'node' not in tr.hookargs:
688 if 'node' not in tr.hookargs:
689 tr.hookargs['node'] = hex(cl.node(clstart))
689 tr.hookargs['node'] = hex(cl.node(clstart))
690 hookargs = dict(tr.hookargs)
690 hookargs = dict(tr.hookargs)
691 else:
691 else:
692 hookargs = dict(tr.hookargs)
692 hookargs = dict(tr.hookargs)
693 hookargs['node'] = hex(cl.node(clstart))
693 hookargs['node'] = hex(cl.node(clstart))
694 repo.hook('pretxnchangegroup', throw=True, source=srctype,
694 repo.hook('pretxnchangegroup', throw=True, source=srctype,
695 url=url, pending=p, **hookargs)
695 url=url, pending=p, **hookargs)
696
696
697 added = [cl.node(r) for r in xrange(clstart, clend)]
697 added = [cl.node(r) for r in xrange(clstart, clend)]
698 publishing = repo.ui.configbool('phases', 'publish', True)
698 publishing = repo.ui.configbool('phases', 'publish', True)
699 if srctype in ('push', 'serve'):
699 if srctype in ('push', 'serve'):
700 # Old servers can not push the boundary themselves.
700 # Old servers can not push the boundary themselves.
701 # New servers won't push the boundary if changeset already
701 # New servers won't push the boundary if changeset already
702 # exists locally as secret
702 # exists locally as secret
703 #
703 #
704 # We should not use added here but the list of all change in
704 # We should not use added here but the list of all change in
705 # the bundle
705 # the bundle
706 if publishing:
706 if publishing:
707 phases.advanceboundary(repo, tr, phases.public, srccontent)
707 phases.advanceboundary(repo, tr, phases.public, srccontent)
708 else:
708 else:
709 # Those changesets have been pushed from the outside, their
709 # Those changesets have been pushed from the outside, their
710 # phases are going to be pushed alongside. Therefor
710 # phases are going to be pushed alongside. Therefor
711 # `targetphase` is ignored.
711 # `targetphase` is ignored.
712 phases.advanceboundary(repo, tr, phases.draft, srccontent)
712 phases.advanceboundary(repo, tr, phases.draft, srccontent)
713 phases.retractboundary(repo, tr, phases.draft, added)
713 phases.retractboundary(repo, tr, phases.draft, added)
714 elif srctype != 'strip':
714 elif srctype != 'strip':
715 # publishing only alter behavior during push
715 # publishing only alter behavior during push
716 #
716 #
717 # strip should not touch boundary at all
717 # strip should not touch boundary at all
718 phases.retractboundary(repo, tr, targetphase, added)
718 phases.retractboundary(repo, tr, targetphase, added)
719
719
720 # make changelog see real files again
720 # make changelog see real files again
721 cl.finalize(trp)
721 cl.finalize(trp)
722
722
723 tr.close()
723 tr.close()
724
724
725 if changesets > 0:
725 if changesets > 0:
726 if srctype != 'strip':
726 if srctype != 'strip':
727 # During strip, branchcache is invalid but coming call to
727 # During strip, branchcache is invalid but coming call to
728 # `destroyed` will repair it.
728 # `destroyed` will repair it.
729 # In other case we can safely update cache on disk.
729 # In other case we can safely update cache on disk.
730 branchmap.updatecache(repo.filtered('served'))
730 branchmap.updatecache(repo.filtered('served'))
731
731
732 def runhooks():
732 def runhooks():
733 # These hooks run when the lock releases, not when the
733 # These hooks run when the lock releases, not when the
734 # transaction closes. So it's possible for the changelog
734 # transaction closes. So it's possible for the changelog
735 # to have changed since we last saw it.
735 # to have changed since we last saw it.
736 if clstart >= len(repo):
736 if clstart >= len(repo):
737 return
737 return
738
738
739 # forcefully update the on-disk branch cache
739 # forcefully update the on-disk branch cache
740 repo.ui.debug("updating the branch cache\n")
740 repo.ui.debug("updating the branch cache\n")
741 repo.hook("changegroup", source=srctype, url=url,
741 repo.hook("changegroup", source=srctype, url=url,
742 **hookargs)
742 **hookargs)
743
743
744 for n in added:
744 for n in added:
745 args = hookargs.copy()
745 args = hookargs.copy()
746 args['node'] = hex(n)
746 args['node'] = hex(n)
747 repo.hook("incoming", source=srctype, url=url, **args)
747 repo.hook("incoming", source=srctype, url=url, **args)
748
748
749 newheads = [h for h in repo.heads() if h not in oldheads]
749 newheads = [h for h in repo.heads() if h not in oldheads]
750 repo.ui.log("incoming",
750 repo.ui.log("incoming",
751 "%s incoming changes - new heads: %s\n",
751 "%s incoming changes - new heads: %s\n",
752 len(added),
752 len(added),
753 ', '.join([hex(c[:6]) for c in newheads]))
753 ', '.join([hex(c[:6]) for c in newheads]))
754 repo._afterlock(runhooks)
754 repo._afterlock(runhooks)
755
755
756 finally:
756 finally:
757 tr.release()
757 tr.release()
758 # never return 0 here:
758 # never return 0 here:
759 if dh < 0:
759 if dh < 0:
760 return dh - 1
760 return dh - 1
761 else:
761 else:
762 return dh + 1
762 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now