##// END OF EJS Templates
changegroup: allow use of different cg#packer in getchangegroupraw...
Sune Foldager -
r23178:5e895ed5 default
parent child Browse files
Show More
@@ -1,793 +1,798 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16
16
17 def readexactly(stream, n):
17 def readexactly(stream, n):
18 '''read n bytes from stream.read and abort if less was available'''
18 '''read n bytes from stream.read and abort if less was available'''
19 s = stream.read(n)
19 s = stream.read(n)
20 if len(s) < n:
20 if len(s) < n:
21 raise util.Abort(_("stream ended unexpectedly"
21 raise util.Abort(_("stream ended unexpectedly"
22 " (got %d bytes, expected %d)")
22 " (got %d bytes, expected %d)")
23 % (len(s), n))
23 % (len(s), n))
24 return s
24 return s
25
25
26 def getchunk(stream):
26 def getchunk(stream):
27 """return the next chunk from stream as a string"""
27 """return the next chunk from stream as a string"""
28 d = readexactly(stream, 4)
28 d = readexactly(stream, 4)
29 l = struct.unpack(">l", d)[0]
29 l = struct.unpack(">l", d)[0]
30 if l <= 4:
30 if l <= 4:
31 if l:
31 if l:
32 raise util.Abort(_("invalid chunk length %d") % l)
32 raise util.Abort(_("invalid chunk length %d") % l)
33 return ""
33 return ""
34 return readexactly(stream, l - 4)
34 return readexactly(stream, l - 4)
35
35
36 def chunkheader(length):
36 def chunkheader(length):
37 """return a changegroup chunk header (string)"""
37 """return a changegroup chunk header (string)"""
38 return struct.pack(">l", length + 4)
38 return struct.pack(">l", length + 4)
39
39
40 def closechunk():
40 def closechunk():
41 """return a changegroup chunk header (string) for a zero-length chunk"""
41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 return struct.pack(">l", 0)
42 return struct.pack(">l", 0)
43
43
44 class nocompress(object):
44 class nocompress(object):
45 def compress(self, x):
45 def compress(self, x):
46 return x
46 return x
47 def flush(self):
47 def flush(self):
48 return ""
48 return ""
49
49
50 bundletypes = {
50 bundletypes = {
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 # since the unification ssh accepts a header but there
52 # since the unification ssh accepts a header but there
53 # is no capability signaling it.
53 # is no capability signaling it.
54 "HG10UN": ("HG10UN", nocompress),
54 "HG10UN": ("HG10UN", nocompress),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 }
57 }
58
58
59 # hgweb uses this list to communicate its preferred type
59 # hgweb uses this list to communicate its preferred type
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61
61
62 def writebundle(cg, filename, bundletype, vfs=None):
62 def writebundle(cg, filename, bundletype, vfs=None):
63 """Write a bundle file and return its filename.
63 """Write a bundle file and return its filename.
64
64
65 Existing files will not be overwritten.
65 Existing files will not be overwritten.
66 If no filename is specified, a temporary file is created.
66 If no filename is specified, a temporary file is created.
67 bz2 compression can be turned off.
67 bz2 compression can be turned off.
68 The bundle file will be deleted in case of errors.
68 The bundle file will be deleted in case of errors.
69 """
69 """
70
70
71 fh = None
71 fh = None
72 cleanup = None
72 cleanup = None
73 try:
73 try:
74 if filename:
74 if filename:
75 if vfs:
75 if vfs:
76 fh = vfs.open(filename, "wb")
76 fh = vfs.open(filename, "wb")
77 else:
77 else:
78 fh = open(filename, "wb")
78 fh = open(filename, "wb")
79 else:
79 else:
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fh = os.fdopen(fd, "wb")
81 fh = os.fdopen(fd, "wb")
82 cleanup = filename
82 cleanup = filename
83
83
84 header, compressor = bundletypes[bundletype]
84 header, compressor = bundletypes[bundletype]
85 fh.write(header)
85 fh.write(header)
86 z = compressor()
86 z = compressor()
87
87
88 # parse the changegroup data, otherwise we will block
88 # parse the changegroup data, otherwise we will block
89 # in case of sshrepo because we don't know the end of the stream
89 # in case of sshrepo because we don't know the end of the stream
90
90
91 # an empty chunkgroup is the end of the changegroup
91 # an empty chunkgroup is the end of the changegroup
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # after that, an empty chunkgroup is the end of the changegroup
93 # after that, an empty chunkgroup is the end of the changegroup
94 for chunk in cg.getchunks():
94 for chunk in cg.getchunks():
95 fh.write(z.compress(chunk))
95 fh.write(z.compress(chunk))
96 fh.write(z.flush())
96 fh.write(z.flush())
97 cleanup = None
97 cleanup = None
98 return filename
98 return filename
99 finally:
99 finally:
100 if fh is not None:
100 if fh is not None:
101 fh.close()
101 fh.close()
102 if cleanup is not None:
102 if cleanup is not None:
103 if filename and vfs:
103 if filename and vfs:
104 vfs.unlink(cleanup)
104 vfs.unlink(cleanup)
105 else:
105 else:
106 os.unlink(cleanup)
106 os.unlink(cleanup)
107
107
108 def decompressor(fh, alg):
108 def decompressor(fh, alg):
109 if alg == 'UN':
109 if alg == 'UN':
110 return fh
110 return fh
111 elif alg == 'GZ':
111 elif alg == 'GZ':
112 def generator(f):
112 def generator(f):
113 zd = zlib.decompressobj()
113 zd = zlib.decompressobj()
114 for chunk in util.filechunkiter(f):
114 for chunk in util.filechunkiter(f):
115 yield zd.decompress(chunk)
115 yield zd.decompress(chunk)
116 elif alg == 'BZ':
116 elif alg == 'BZ':
117 def generator(f):
117 def generator(f):
118 zd = bz2.BZ2Decompressor()
118 zd = bz2.BZ2Decompressor()
119 zd.decompress("BZ")
119 zd.decompress("BZ")
120 for chunk in util.filechunkiter(f, 4096):
120 for chunk in util.filechunkiter(f, 4096):
121 yield zd.decompress(chunk)
121 yield zd.decompress(chunk)
122 else:
122 else:
123 raise util.Abort("unknown bundle compression '%s'" % alg)
123 raise util.Abort("unknown bundle compression '%s'" % alg)
124 return util.chunkbuffer(generator(fh))
124 return util.chunkbuffer(generator(fh))
125
125
126 class cg1unpacker(object):
126 class cg1unpacker(object):
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheadersize = struct.calcsize(deltaheader)
128 deltaheadersize = struct.calcsize(deltaheader)
129 def __init__(self, fh, alg):
129 def __init__(self, fh, alg):
130 self._stream = decompressor(fh, alg)
130 self._stream = decompressor(fh, alg)
131 self._type = alg
131 self._type = alg
132 self.callback = None
132 self.callback = None
133 def compressed(self):
133 def compressed(self):
134 return self._type != 'UN'
134 return self._type != 'UN'
135 def read(self, l):
135 def read(self, l):
136 return self._stream.read(l)
136 return self._stream.read(l)
137 def seek(self, pos):
137 def seek(self, pos):
138 return self._stream.seek(pos)
138 return self._stream.seek(pos)
139 def tell(self):
139 def tell(self):
140 return self._stream.tell()
140 return self._stream.tell()
141 def close(self):
141 def close(self):
142 return self._stream.close()
142 return self._stream.close()
143
143
144 def chunklength(self):
144 def chunklength(self):
145 d = readexactly(self._stream, 4)
145 d = readexactly(self._stream, 4)
146 l = struct.unpack(">l", d)[0]
146 l = struct.unpack(">l", d)[0]
147 if l <= 4:
147 if l <= 4:
148 if l:
148 if l:
149 raise util.Abort(_("invalid chunk length %d") % l)
149 raise util.Abort(_("invalid chunk length %d") % l)
150 return 0
150 return 0
151 if self.callback:
151 if self.callback:
152 self.callback()
152 self.callback()
153 return l - 4
153 return l - 4
154
154
155 def changelogheader(self):
155 def changelogheader(self):
156 """v10 does not have a changelog header chunk"""
156 """v10 does not have a changelog header chunk"""
157 return {}
157 return {}
158
158
159 def manifestheader(self):
159 def manifestheader(self):
160 """v10 does not have a manifest header chunk"""
160 """v10 does not have a manifest header chunk"""
161 return {}
161 return {}
162
162
163 def filelogheader(self):
163 def filelogheader(self):
164 """return the header of the filelogs chunk, v10 only has the filename"""
164 """return the header of the filelogs chunk, v10 only has the filename"""
165 l = self.chunklength()
165 l = self.chunklength()
166 if not l:
166 if not l:
167 return {}
167 return {}
168 fname = readexactly(self._stream, l)
168 fname = readexactly(self._stream, l)
169 return {'filename': fname}
169 return {'filename': fname}
170
170
171 def _deltaheader(self, headertuple, prevnode):
171 def _deltaheader(self, headertuple, prevnode):
172 node, p1, p2, cs = headertuple
172 node, p1, p2, cs = headertuple
173 if prevnode is None:
173 if prevnode is None:
174 deltabase = p1
174 deltabase = p1
175 else:
175 else:
176 deltabase = prevnode
176 deltabase = prevnode
177 return node, p1, p2, deltabase, cs
177 return node, p1, p2, deltabase, cs
178
178
179 def deltachunk(self, prevnode):
179 def deltachunk(self, prevnode):
180 l = self.chunklength()
180 l = self.chunklength()
181 if not l:
181 if not l:
182 return {}
182 return {}
183 headerdata = readexactly(self._stream, self.deltaheadersize)
183 headerdata = readexactly(self._stream, self.deltaheadersize)
184 header = struct.unpack(self.deltaheader, headerdata)
184 header = struct.unpack(self.deltaheader, headerdata)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 'deltabase': deltabase, 'delta': delta}
188 'deltabase': deltabase, 'delta': delta}
189
189
190 def getchunks(self):
190 def getchunks(self):
191 """returns all the chunks contains in the bundle
191 """returns all the chunks contains in the bundle
192
192
193 Used when you need to forward the binary stream to a file or another
193 Used when you need to forward the binary stream to a file or another
194 network API. To do so, it parse the changegroup data, otherwise it will
194 network API. To do so, it parse the changegroup data, otherwise it will
195 block in case of sshrepo because it don't know the end of the stream.
195 block in case of sshrepo because it don't know the end of the stream.
196 """
196 """
197 # an empty chunkgroup is the end of the changegroup
197 # an empty chunkgroup is the end of the changegroup
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # after that, an empty chunkgroup is the end of the changegroup
199 # after that, an empty chunkgroup is the end of the changegroup
200 empty = False
200 empty = False
201 count = 0
201 count = 0
202 while not empty or count <= 2:
202 while not empty or count <= 2:
203 empty = True
203 empty = True
204 count += 1
204 count += 1
205 while True:
205 while True:
206 chunk = getchunk(self)
206 chunk = getchunk(self)
207 if not chunk:
207 if not chunk:
208 break
208 break
209 empty = False
209 empty = False
210 yield chunkheader(len(chunk))
210 yield chunkheader(len(chunk))
211 pos = 0
211 pos = 0
212 while pos < len(chunk):
212 while pos < len(chunk):
213 next = pos + 2**20
213 next = pos + 2**20
214 yield chunk[pos:next]
214 yield chunk[pos:next]
215 pos = next
215 pos = next
216 yield closechunk()
216 yield closechunk()
217
217
218 class headerlessfixup(object):
218 class headerlessfixup(object):
219 def __init__(self, fh, h):
219 def __init__(self, fh, h):
220 self._h = h
220 self._h = h
221 self._fh = fh
221 self._fh = fh
222 def read(self, n):
222 def read(self, n):
223 if self._h:
223 if self._h:
224 d, self._h = self._h[:n], self._h[n:]
224 d, self._h = self._h[:n], self._h[n:]
225 if len(d) < n:
225 if len(d) < n:
226 d += readexactly(self._fh, n - len(d))
226 d += readexactly(self._fh, n - len(d))
227 return d
227 return d
228 return readexactly(self._fh, n)
228 return readexactly(self._fh, n)
229
229
230 class cg1packer(object):
230 class cg1packer(object):
231 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
231 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
232 def __init__(self, repo, bundlecaps=None):
232 def __init__(self, repo, bundlecaps=None):
233 """Given a source repo, construct a bundler.
233 """Given a source repo, construct a bundler.
234
234
235 bundlecaps is optional and can be used to specify the set of
235 bundlecaps is optional and can be used to specify the set of
236 capabilities which can be used to build the bundle.
236 capabilities which can be used to build the bundle.
237 """
237 """
238 # Set of capabilities we can use to build the bundle.
238 # Set of capabilities we can use to build the bundle.
239 if bundlecaps is None:
239 if bundlecaps is None:
240 bundlecaps = set()
240 bundlecaps = set()
241 self._bundlecaps = bundlecaps
241 self._bundlecaps = bundlecaps
242 self._changelog = repo.changelog
242 self._changelog = repo.changelog
243 self._manifest = repo.manifest
243 self._manifest = repo.manifest
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
245 if reorder == 'auto':
245 if reorder == 'auto':
246 reorder = None
246 reorder = None
247 else:
247 else:
248 reorder = util.parsebool(reorder)
248 reorder = util.parsebool(reorder)
249 self._repo = repo
249 self._repo = repo
250 self._reorder = reorder
250 self._reorder = reorder
251 self._progress = repo.ui.progress
251 self._progress = repo.ui.progress
252 def close(self):
252 def close(self):
253 return closechunk()
253 return closechunk()
254
254
255 def fileheader(self, fname):
255 def fileheader(self, fname):
256 return chunkheader(len(fname)) + fname
256 return chunkheader(len(fname)) + fname
257
257
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
259 """Calculate a delta group, yielding a sequence of changegroup chunks
259 """Calculate a delta group, yielding a sequence of changegroup chunks
260 (strings).
260 (strings).
261
261
262 Given a list of changeset revs, return a set of deltas and
262 Given a list of changeset revs, return a set of deltas and
263 metadata corresponding to nodes. The first delta is
263 metadata corresponding to nodes. The first delta is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
265 guaranteed to have this parent as it has all history before
265 guaranteed to have this parent as it has all history before
266 these changesets. In the case firstparent is nullrev the
266 these changesets. In the case firstparent is nullrev the
267 changegroup starts with a full revision.
267 changegroup starts with a full revision.
268
268
269 If units is not None, progress detail will be generated, units specifies
269 If units is not None, progress detail will be generated, units specifies
270 the type of revlog that is touched (changelog, manifest, etc.).
270 the type of revlog that is touched (changelog, manifest, etc.).
271 """
271 """
272 # if we don't have any revisions touched by these changesets, bail
272 # if we don't have any revisions touched by these changesets, bail
273 if len(nodelist) == 0:
273 if len(nodelist) == 0:
274 yield self.close()
274 yield self.close()
275 return
275 return
276
276
277 # for generaldelta revlogs, we linearize the revs; this will both be
277 # for generaldelta revlogs, we linearize the revs; this will both be
278 # much quicker and generate a much smaller bundle
278 # much quicker and generate a much smaller bundle
279 if (revlog._generaldelta and reorder is not False) or reorder:
279 if (revlog._generaldelta and reorder is not False) or reorder:
280 dag = dagutil.revlogdag(revlog)
280 dag = dagutil.revlogdag(revlog)
281 revs = set(revlog.rev(n) for n in nodelist)
281 revs = set(revlog.rev(n) for n in nodelist)
282 revs = dag.linearize(revs)
282 revs = dag.linearize(revs)
283 else:
283 else:
284 revs = sorted([revlog.rev(n) for n in nodelist])
284 revs = sorted([revlog.rev(n) for n in nodelist])
285
285
286 # add the parent of the first rev
286 # add the parent of the first rev
287 p = revlog.parentrevs(revs[0])[0]
287 p = revlog.parentrevs(revs[0])[0]
288 revs.insert(0, p)
288 revs.insert(0, p)
289
289
290 # build deltas
290 # build deltas
291 total = len(revs) - 1
291 total = len(revs) - 1
292 msgbundling = _('bundling')
292 msgbundling = _('bundling')
293 for r in xrange(len(revs) - 1):
293 for r in xrange(len(revs) - 1):
294 if units is not None:
294 if units is not None:
295 self._progress(msgbundling, r + 1, unit=units, total=total)
295 self._progress(msgbundling, r + 1, unit=units, total=total)
296 prev, curr = revs[r], revs[r + 1]
296 prev, curr = revs[r], revs[r + 1]
297 linknode = lookup(revlog.node(curr))
297 linknode = lookup(revlog.node(curr))
298 for c in self.revchunk(revlog, curr, prev, linknode):
298 for c in self.revchunk(revlog, curr, prev, linknode):
299 yield c
299 yield c
300
300
301 yield self.close()
301 yield self.close()
302
302
303 # filter any nodes that claim to be part of the known set
303 # filter any nodes that claim to be part of the known set
304 def prune(self, revlog, missing, commonrevs, source):
304 def prune(self, revlog, missing, commonrevs, source):
305 rr, rl = revlog.rev, revlog.linkrev
305 rr, rl = revlog.rev, revlog.linkrev
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
307
307
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
309 '''yield a sequence of changegroup chunks (strings)'''
309 '''yield a sequence of changegroup chunks (strings)'''
310 repo = self._repo
310 repo = self._repo
311 cl = self._changelog
311 cl = self._changelog
312 mf = self._manifest
312 mf = self._manifest
313 reorder = self._reorder
313 reorder = self._reorder
314 progress = self._progress
314 progress = self._progress
315
315
316 # for progress output
316 # for progress output
317 msgbundling = _('bundling')
317 msgbundling = _('bundling')
318
318
319 mfs = {} # needed manifests
319 mfs = {} # needed manifests
320 fnodes = {} # needed file nodes
320 fnodes = {} # needed file nodes
321 changedfiles = set()
321 changedfiles = set()
322
322
323 # Callback for the changelog, used to collect changed files and manifest
323 # Callback for the changelog, used to collect changed files and manifest
324 # nodes.
324 # nodes.
325 # Returns the linkrev node (identity in the changelog case).
325 # Returns the linkrev node (identity in the changelog case).
326 def lookupcl(x):
326 def lookupcl(x):
327 c = cl.read(x)
327 c = cl.read(x)
328 changedfiles.update(c[3])
328 changedfiles.update(c[3])
329 # record the first changeset introducing this manifest version
329 # record the first changeset introducing this manifest version
330 mfs.setdefault(c[0], x)
330 mfs.setdefault(c[0], x)
331 return x
331 return x
332
332
333 # Callback for the manifest, used to collect linkrevs for filelog
333 # Callback for the manifest, used to collect linkrevs for filelog
334 # revisions.
334 # revisions.
335 # Returns the linkrev node (collected in lookupcl).
335 # Returns the linkrev node (collected in lookupcl).
336 def lookupmf(x):
336 def lookupmf(x):
337 clnode = mfs[x]
337 clnode = mfs[x]
338 if not fastpathlinkrev:
338 if not fastpathlinkrev:
339 mdata = mf.readfast(x)
339 mdata = mf.readfast(x)
340 for f, n in mdata.iteritems():
340 for f, n in mdata.iteritems():
341 if f in changedfiles:
341 if f in changedfiles:
342 # record the first changeset introducing this filelog
342 # record the first changeset introducing this filelog
343 # version
343 # version
344 fnodes[f].setdefault(n, clnode)
344 fnodes[f].setdefault(n, clnode)
345 return clnode
345 return clnode
346
346
347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
348 reorder=reorder):
348 reorder=reorder):
349 yield chunk
349 yield chunk
350 progress(msgbundling, None)
350 progress(msgbundling, None)
351
351
352 for f in changedfiles:
352 for f in changedfiles:
353 fnodes[f] = {}
353 fnodes[f] = {}
354 mfnodes = self.prune(mf, mfs, commonrevs, source)
354 mfnodes = self.prune(mf, mfs, commonrevs, source)
355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
356 reorder=reorder):
356 reorder=reorder):
357 yield chunk
357 yield chunk
358 progress(msgbundling, None)
358 progress(msgbundling, None)
359
359
360 mfs.clear()
360 mfs.clear()
361 needed = set(cl.rev(x) for x in clnodes)
361 needed = set(cl.rev(x) for x in clnodes)
362
362
363 def linknodes(filerevlog, fname):
363 def linknodes(filerevlog, fname):
364 if fastpathlinkrev:
364 if fastpathlinkrev:
365 llr = filerevlog.linkrev
365 llr = filerevlog.linkrev
366 def genfilenodes():
366 def genfilenodes():
367 for r in filerevlog:
367 for r in filerevlog:
368 linkrev = llr(r)
368 linkrev = llr(r)
369 if linkrev in needed:
369 if linkrev in needed:
370 yield filerevlog.node(r), cl.node(linkrev)
370 yield filerevlog.node(r), cl.node(linkrev)
371 fnodes[fname] = dict(genfilenodes())
371 fnodes[fname] = dict(genfilenodes())
372 return fnodes.get(fname, {})
372 return fnodes.get(fname, {})
373
373
374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
375 source):
375 source):
376 yield chunk
376 yield chunk
377
377
378 yield self.close()
378 yield self.close()
379 progress(msgbundling, None)
379 progress(msgbundling, None)
380
380
381 if clnodes:
381 if clnodes:
382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
383
383
384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
385 repo = self._repo
385 repo = self._repo
386 progress = self._progress
386 progress = self._progress
387 reorder = self._reorder
387 reorder = self._reorder
388 msgbundling = _('bundling')
388 msgbundling = _('bundling')
389
389
390 total = len(changedfiles)
390 total = len(changedfiles)
391 # for progress output
391 # for progress output
392 msgfiles = _('files')
392 msgfiles = _('files')
393 for i, fname in enumerate(sorted(changedfiles)):
393 for i, fname in enumerate(sorted(changedfiles)):
394 filerevlog = repo.file(fname)
394 filerevlog = repo.file(fname)
395 if not filerevlog:
395 if not filerevlog:
396 raise util.Abort(_("empty or missing revlog for %s") % fname)
396 raise util.Abort(_("empty or missing revlog for %s") % fname)
397
397
398 linkrevnodes = linknodes(filerevlog, fname)
398 linkrevnodes = linknodes(filerevlog, fname)
399 # Lookup for filenodes, we collected the linkrev nodes above in the
399 # Lookup for filenodes, we collected the linkrev nodes above in the
400 # fastpath case and with lookupmf in the slowpath case.
400 # fastpath case and with lookupmf in the slowpath case.
401 def lookupfilelog(x):
401 def lookupfilelog(x):
402 return linkrevnodes[x]
402 return linkrevnodes[x]
403
403
404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
405 if filenodes:
405 if filenodes:
406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
407 total=total)
407 total=total)
408 yield self.fileheader(fname)
408 yield self.fileheader(fname)
409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
410 reorder=reorder):
410 reorder=reorder):
411 yield chunk
411 yield chunk
412
412
413 def revchunk(self, revlog, rev, prev, linknode):
413 def revchunk(self, revlog, rev, prev, linknode):
414 node = revlog.node(rev)
414 node = revlog.node(rev)
415 p1, p2 = revlog.parentrevs(rev)
415 p1, p2 = revlog.parentrevs(rev)
416 base = prev
416 base = prev
417
417
418 prefix = ''
418 prefix = ''
419 if base == nullrev:
419 if base == nullrev:
420 delta = revlog.revision(node)
420 delta = revlog.revision(node)
421 prefix = mdiff.trivialdiffheader(len(delta))
421 prefix = mdiff.trivialdiffheader(len(delta))
422 else:
422 else:
423 delta = revlog.revdiff(base, rev)
423 delta = revlog.revdiff(base, rev)
424 p1n, p2n = revlog.parents(node)
424 p1n, p2n = revlog.parents(node)
425 basenode = revlog.node(base)
425 basenode = revlog.node(base)
426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
427 meta += prefix
427 meta += prefix
428 l = len(meta) + len(delta)
428 l = len(meta) + len(delta)
429 yield chunkheader(l)
429 yield chunkheader(l)
430 yield meta
430 yield meta
431 yield delta
431 yield delta
432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
433 # do nothing with basenode, it is implicitly the previous one in HG10
433 # do nothing with basenode, it is implicitly the previous one in HG10
434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
435
435
436 packermap = {'01': (cg1packer, cg1unpacker)}
436 packermap = {'01': (cg1packer, cg1unpacker)}
437
437
438 def _changegroupinfo(repo, nodes, source):
438 def _changegroupinfo(repo, nodes, source):
439 if repo.ui.verbose or source == 'bundle':
439 if repo.ui.verbose or source == 'bundle':
440 repo.ui.status(_("%d changesets found\n") % len(nodes))
440 repo.ui.status(_("%d changesets found\n") % len(nodes))
441 if repo.ui.debugflag:
441 if repo.ui.debugflag:
442 repo.ui.debug("list of changesets:\n")
442 repo.ui.debug("list of changesets:\n")
443 for node in nodes:
443 for node in nodes:
444 repo.ui.debug("%s\n" % hex(node))
444 repo.ui.debug("%s\n" % hex(node))
445
445
446 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
446 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
447 repo = repo.unfiltered()
447 repo = repo.unfiltered()
448 commonrevs = outgoing.common
448 commonrevs = outgoing.common
449 csets = outgoing.missing
449 csets = outgoing.missing
450 heads = outgoing.missingheads
450 heads = outgoing.missingheads
451 # We go through the fast path if we get told to, or if all (unfiltered
451 # We go through the fast path if we get told to, or if all (unfiltered
452 # heads have been requested (since we then know there all linkrevs will
452 # heads have been requested (since we then know there all linkrevs will
453 # be pulled by the client).
453 # be pulled by the client).
454 heads.sort()
454 heads.sort()
455 fastpathlinkrev = fastpath or (
455 fastpathlinkrev = fastpath or (
456 repo.filtername is None and heads == sorted(repo.heads()))
456 repo.filtername is None and heads == sorted(repo.heads()))
457
457
458 repo.hook('preoutgoing', throw=True, source=source)
458 repo.hook('preoutgoing', throw=True, source=source)
459 _changegroupinfo(repo, csets, source)
459 _changegroupinfo(repo, csets, source)
460 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
460 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
461
461
462 def getsubset(repo, outgoing, bundler, source, fastpath=False):
462 def getsubset(repo, outgoing, bundler, source, fastpath=False):
463 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
463 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
464 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
464 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
465
465
466 def changegroupsubset(repo, roots, heads, source):
466 def changegroupsubset(repo, roots, heads, source):
467 """Compute a changegroup consisting of all the nodes that are
467 """Compute a changegroup consisting of all the nodes that are
468 descendants of any of the roots and ancestors of any of the heads.
468 descendants of any of the roots and ancestors of any of the heads.
469 Return a chunkbuffer object whose read() method will return
469 Return a chunkbuffer object whose read() method will return
470 successive changegroup chunks.
470 successive changegroup chunks.
471
471
472 It is fairly complex as determining which filenodes and which
472 It is fairly complex as determining which filenodes and which
473 manifest nodes need to be included for the changeset to be complete
473 manifest nodes need to be included for the changeset to be complete
474 is non-trivial.
474 is non-trivial.
475
475
476 Another wrinkle is doing the reverse, figuring out which changeset in
476 Another wrinkle is doing the reverse, figuring out which changeset in
477 the changegroup a particular filenode or manifestnode belongs to.
477 the changegroup a particular filenode or manifestnode belongs to.
478 """
478 """
479 cl = repo.changelog
479 cl = repo.changelog
480 if not roots:
480 if not roots:
481 roots = [nullid]
481 roots = [nullid]
482 # TODO: remove call to nodesbetween.
482 # TODO: remove call to nodesbetween.
483 csets, roots, heads = cl.nodesbetween(roots, heads)
483 csets, roots, heads = cl.nodesbetween(roots, heads)
484 discbases = []
484 discbases = []
485 for n in roots:
485 for n in roots:
486 discbases.extend([p for p in cl.parents(n) if p != nullid])
486 discbases.extend([p for p in cl.parents(n) if p != nullid])
487 outgoing = discovery.outgoing(cl, discbases, heads)
487 outgoing = discovery.outgoing(cl, discbases, heads)
488 bundler = cg1packer(repo)
488 bundler = cg1packer(repo)
489 return getsubset(repo, outgoing, bundler, source)
489 return getsubset(repo, outgoing, bundler, source)
490
490
491 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None):
491 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
492 version='01'):
492 """Like getbundle, but taking a discovery.outgoing as an argument.
493 """Like getbundle, but taking a discovery.outgoing as an argument.
493
494
494 This is only implemented for local repos and reuses potentially
495 This is only implemented for local repos and reuses potentially
495 precomputed sets in outgoing. Returns a raw changegroup generator."""
496 precomputed sets in outgoing. Returns a raw changegroup generator."""
496 if not outgoing.missing:
497 if not outgoing.missing:
497 return None
498 return None
498 bundler = cg1packer(repo, bundlecaps)
499 bundler = packermap[version][0](repo, bundlecaps)
499 return getsubsetraw(repo, outgoing, bundler, source)
500 return getsubsetraw(repo, outgoing, bundler, source)
500
501
501 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
502 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
502 """Like getbundle, but taking a discovery.outgoing as an argument.
503 """Like getbundle, but taking a discovery.outgoing as an argument.
503
504
504 This is only implemented for local repos and reuses potentially
505 This is only implemented for local repos and reuses potentially
505 precomputed sets in outgoing."""
506 precomputed sets in outgoing."""
506 if not outgoing.missing:
507 if not outgoing.missing:
507 return None
508 return None
508 bundler = cg1packer(repo, bundlecaps)
509 bundler = cg1packer(repo, bundlecaps)
509 return getsubset(repo, outgoing, bundler, source)
510 return getsubset(repo, outgoing, bundler, source)
510
511
511 def _computeoutgoing(repo, heads, common):
512 def _computeoutgoing(repo, heads, common):
512 """Computes which revs are outgoing given a set of common
513 """Computes which revs are outgoing given a set of common
513 and a set of heads.
514 and a set of heads.
514
515
515 This is a separate function so extensions can have access to
516 This is a separate function so extensions can have access to
516 the logic.
517 the logic.
517
518
518 Returns a discovery.outgoing object.
519 Returns a discovery.outgoing object.
519 """
520 """
520 cl = repo.changelog
521 cl = repo.changelog
521 if common:
522 if common:
522 hasnode = cl.hasnode
523 hasnode = cl.hasnode
523 common = [n for n in common if hasnode(n)]
524 common = [n for n in common if hasnode(n)]
524 else:
525 else:
525 common = [nullid]
526 common = [nullid]
526 if not heads:
527 if not heads:
527 heads = cl.heads()
528 heads = cl.heads()
528 return discovery.outgoing(cl, common, heads)
529 return discovery.outgoing(cl, common, heads)
529
530
530 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None):
531 def getchangegroupraw(repo, source, heads=None, common=None, bundlecaps=None,
532 version='01'):
531 """Like changegroupsubset, but returns the set difference between the
533 """Like changegroupsubset, but returns the set difference between the
532 ancestors of heads and the ancestors common.
534 ancestors of heads and the ancestors common.
533
535
534 If heads is None, use the local heads. If common is None, use [nullid].
536 If heads is None, use the local heads. If common is None, use [nullid].
535
537
538 If version is None, use a version '1' changegroup.
539
536 The nodes in common might not all be known locally due to the way the
540 The nodes in common might not all be known locally due to the way the
537 current discovery protocol works. Returns a raw changegroup generator.
541 current discovery protocol works. Returns a raw changegroup generator.
538 """
542 """
539 outgoing = _computeoutgoing(repo, heads, common)
543 outgoing = _computeoutgoing(repo, heads, common)
540 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps)
544 return getlocalchangegroupraw(repo, source, outgoing, bundlecaps=bundlecaps,
545 version=version)
541
546
542 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
547 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
543 """Like changegroupsubset, but returns the set difference between the
548 """Like changegroupsubset, but returns the set difference between the
544 ancestors of heads and the ancestors common.
549 ancestors of heads and the ancestors common.
545
550
546 If heads is None, use the local heads. If common is None, use [nullid].
551 If heads is None, use the local heads. If common is None, use [nullid].
547
552
548 The nodes in common might not all be known locally due to the way the
553 The nodes in common might not all be known locally due to the way the
549 current discovery protocol works.
554 current discovery protocol works.
550 """
555 """
551 outgoing = _computeoutgoing(repo, heads, common)
556 outgoing = _computeoutgoing(repo, heads, common)
552 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
557 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
553
558
554 def changegroup(repo, basenodes, source):
559 def changegroup(repo, basenodes, source):
555 # to avoid a race we use changegroupsubset() (issue1320)
560 # to avoid a race we use changegroupsubset() (issue1320)
556 return changegroupsubset(repo, basenodes, repo.heads(), source)
561 return changegroupsubset(repo, basenodes, repo.heads(), source)
557
562
558 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
563 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
559 revisions = 0
564 revisions = 0
560 files = 0
565 files = 0
561 while True:
566 while True:
562 chunkdata = source.filelogheader()
567 chunkdata = source.filelogheader()
563 if not chunkdata:
568 if not chunkdata:
564 break
569 break
565 f = chunkdata["filename"]
570 f = chunkdata["filename"]
566 repo.ui.debug("adding %s revisions\n" % f)
571 repo.ui.debug("adding %s revisions\n" % f)
567 pr()
572 pr()
568 fl = repo.file(f)
573 fl = repo.file(f)
569 o = len(fl)
574 o = len(fl)
570 if not fl.addgroup(source, revmap, trp):
575 if not fl.addgroup(source, revmap, trp):
571 raise util.Abort(_("received file revlog group is empty"))
576 raise util.Abort(_("received file revlog group is empty"))
572 revisions += len(fl) - o
577 revisions += len(fl) - o
573 files += 1
578 files += 1
574 if f in needfiles:
579 if f in needfiles:
575 needs = needfiles[f]
580 needs = needfiles[f]
576 for new in xrange(o, len(fl)):
581 for new in xrange(o, len(fl)):
577 n = fl.node(new)
582 n = fl.node(new)
578 if n in needs:
583 if n in needs:
579 needs.remove(n)
584 needs.remove(n)
580 else:
585 else:
581 raise util.Abort(
586 raise util.Abort(
582 _("received spurious file revlog entry"))
587 _("received spurious file revlog entry"))
583 if not needs:
588 if not needs:
584 del needfiles[f]
589 del needfiles[f]
585 repo.ui.progress(_('files'), None)
590 repo.ui.progress(_('files'), None)
586
591
587 for f, needs in needfiles.iteritems():
592 for f, needs in needfiles.iteritems():
588 fl = repo.file(f)
593 fl = repo.file(f)
589 for n in needs:
594 for n in needs:
590 try:
595 try:
591 fl.rev(n)
596 fl.rev(n)
592 except error.LookupError:
597 except error.LookupError:
593 raise util.Abort(
598 raise util.Abort(
594 _('missing file data for %s:%s - run hg verify') %
599 _('missing file data for %s:%s - run hg verify') %
595 (f, hex(n)))
600 (f, hex(n)))
596
601
597 return revisions, files
602 return revisions, files
598
603
599 def addchangegroup(repo, source, srctype, url, emptyok=False,
604 def addchangegroup(repo, source, srctype, url, emptyok=False,
600 targetphase=phases.draft):
605 targetphase=phases.draft):
601 """Add the changegroup returned by source.read() to this repo.
606 """Add the changegroup returned by source.read() to this repo.
602 srctype is a string like 'push', 'pull', or 'unbundle'. url is
607 srctype is a string like 'push', 'pull', or 'unbundle'. url is
603 the URL of the repo where this changegroup is coming from.
608 the URL of the repo where this changegroup is coming from.
604
609
605 Return an integer summarizing the change to this repo:
610 Return an integer summarizing the change to this repo:
606 - nothing changed or no source: 0
611 - nothing changed or no source: 0
607 - more heads than before: 1+added heads (2..n)
612 - more heads than before: 1+added heads (2..n)
608 - fewer heads than before: -1-removed heads (-2..-n)
613 - fewer heads than before: -1-removed heads (-2..-n)
609 - number of heads stays the same: 1
614 - number of heads stays the same: 1
610 """
615 """
611 repo = repo.unfiltered()
616 repo = repo.unfiltered()
612 def csmap(x):
617 def csmap(x):
613 repo.ui.debug("add changeset %s\n" % short(x))
618 repo.ui.debug("add changeset %s\n" % short(x))
614 return len(cl)
619 return len(cl)
615
620
616 def revmap(x):
621 def revmap(x):
617 return cl.rev(x)
622 return cl.rev(x)
618
623
619 if not source:
624 if not source:
620 return 0
625 return 0
621
626
622 changesets = files = revisions = 0
627 changesets = files = revisions = 0
623 efiles = set()
628 efiles = set()
624
629
625 # write changelog data to temp files so concurrent readers will not see
630 # write changelog data to temp files so concurrent readers will not see
626 # inconsistent view
631 # inconsistent view
627 cl = repo.changelog
632 cl = repo.changelog
628 cl.delayupdate()
633 cl.delayupdate()
629 oldheads = cl.heads()
634 oldheads = cl.heads()
630
635
631 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
636 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
632 # The transaction could have been created before and already carries source
637 # The transaction could have been created before and already carries source
633 # information. In this case we use the top level data. We overwrite the
638 # information. In this case we use the top level data. We overwrite the
634 # argument because we need to use the top level value (if they exist) in
639 # argument because we need to use the top level value (if they exist) in
635 # this function.
640 # this function.
636 srctype = tr.hookargs.setdefault('source', srctype)
641 srctype = tr.hookargs.setdefault('source', srctype)
637 url = tr.hookargs.setdefault('url', url)
642 url = tr.hookargs.setdefault('url', url)
638 try:
643 try:
639 repo.hook('prechangegroup', throw=True, **tr.hookargs)
644 repo.hook('prechangegroup', throw=True, **tr.hookargs)
640
645
641 trp = weakref.proxy(tr)
646 trp = weakref.proxy(tr)
642 # pull off the changeset group
647 # pull off the changeset group
643 repo.ui.status(_("adding changesets\n"))
648 repo.ui.status(_("adding changesets\n"))
644 clstart = len(cl)
649 clstart = len(cl)
645 class prog(object):
650 class prog(object):
646 step = _('changesets')
651 step = _('changesets')
647 count = 1
652 count = 1
648 ui = repo.ui
653 ui = repo.ui
649 total = None
654 total = None
650 def __call__(repo):
655 def __call__(repo):
651 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
656 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
652 total=repo.total)
657 total=repo.total)
653 repo.count += 1
658 repo.count += 1
654 pr = prog()
659 pr = prog()
655 source.callback = pr
660 source.callback = pr
656
661
657 source.changelogheader()
662 source.changelogheader()
658 srccontent = cl.addgroup(source, csmap, trp)
663 srccontent = cl.addgroup(source, csmap, trp)
659 if not (srccontent or emptyok):
664 if not (srccontent or emptyok):
660 raise util.Abort(_("received changelog group is empty"))
665 raise util.Abort(_("received changelog group is empty"))
661 clend = len(cl)
666 clend = len(cl)
662 changesets = clend - clstart
667 changesets = clend - clstart
663 for c in xrange(clstart, clend):
668 for c in xrange(clstart, clend):
664 efiles.update(repo[c].files())
669 efiles.update(repo[c].files())
665 efiles = len(efiles)
670 efiles = len(efiles)
666 repo.ui.progress(_('changesets'), None)
671 repo.ui.progress(_('changesets'), None)
667
672
668 # pull off the manifest group
673 # pull off the manifest group
669 repo.ui.status(_("adding manifests\n"))
674 repo.ui.status(_("adding manifests\n"))
670 pr.step = _('manifests')
675 pr.step = _('manifests')
671 pr.count = 1
676 pr.count = 1
672 pr.total = changesets # manifests <= changesets
677 pr.total = changesets # manifests <= changesets
673 # no need to check for empty manifest group here:
678 # no need to check for empty manifest group here:
674 # if the result of the merge of 1 and 2 is the same in 3 and 4,
679 # if the result of the merge of 1 and 2 is the same in 3 and 4,
675 # no new manifest will be created and the manifest group will
680 # no new manifest will be created and the manifest group will
676 # be empty during the pull
681 # be empty during the pull
677 source.manifestheader()
682 source.manifestheader()
678 repo.manifest.addgroup(source, revmap, trp)
683 repo.manifest.addgroup(source, revmap, trp)
679 repo.ui.progress(_('manifests'), None)
684 repo.ui.progress(_('manifests'), None)
680
685
681 needfiles = {}
686 needfiles = {}
682 if repo.ui.configbool('server', 'validate', default=False):
687 if repo.ui.configbool('server', 'validate', default=False):
683 # validate incoming csets have their manifests
688 # validate incoming csets have their manifests
684 for cset in xrange(clstart, clend):
689 for cset in xrange(clstart, clend):
685 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
690 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
686 mfest = repo.manifest.readdelta(mfest)
691 mfest = repo.manifest.readdelta(mfest)
687 # store file nodes we must see
692 # store file nodes we must see
688 for f, n in mfest.iteritems():
693 for f, n in mfest.iteritems():
689 needfiles.setdefault(f, set()).add(n)
694 needfiles.setdefault(f, set()).add(n)
690
695
691 # process the files
696 # process the files
692 repo.ui.status(_("adding file changes\n"))
697 repo.ui.status(_("adding file changes\n"))
693 pr.step = _('files')
698 pr.step = _('files')
694 pr.count = 1
699 pr.count = 1
695 pr.total = efiles
700 pr.total = efiles
696 source.callback = None
701 source.callback = None
697
702
698 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
703 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
699 needfiles)
704 needfiles)
700 revisions += newrevs
705 revisions += newrevs
701 files += newfiles
706 files += newfiles
702
707
703 dh = 0
708 dh = 0
704 if oldheads:
709 if oldheads:
705 heads = cl.heads()
710 heads = cl.heads()
706 dh = len(heads) - len(oldheads)
711 dh = len(heads) - len(oldheads)
707 for h in heads:
712 for h in heads:
708 if h not in oldheads and repo[h].closesbranch():
713 if h not in oldheads and repo[h].closesbranch():
709 dh -= 1
714 dh -= 1
710 htext = ""
715 htext = ""
711 if dh:
716 if dh:
712 htext = _(" (%+d heads)") % dh
717 htext = _(" (%+d heads)") % dh
713
718
714 repo.ui.status(_("added %d changesets"
719 repo.ui.status(_("added %d changesets"
715 " with %d changes to %d files%s\n")
720 " with %d changes to %d files%s\n")
716 % (changesets, revisions, files, htext))
721 % (changesets, revisions, files, htext))
717 repo.invalidatevolatilesets()
722 repo.invalidatevolatilesets()
718
723
719 if changesets > 0:
724 if changesets > 0:
720 p = lambda: cl.writepending() and repo.root or ""
725 p = lambda: cl.writepending() and repo.root or ""
721 if 'node' not in tr.hookargs:
726 if 'node' not in tr.hookargs:
722 tr.hookargs['node'] = hex(cl.node(clstart))
727 tr.hookargs['node'] = hex(cl.node(clstart))
723 hookargs = dict(tr.hookargs)
728 hookargs = dict(tr.hookargs)
724 else:
729 else:
725 hookargs = dict(tr.hookargs)
730 hookargs = dict(tr.hookargs)
726 hookargs['node'] = hex(cl.node(clstart))
731 hookargs['node'] = hex(cl.node(clstart))
727 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
732 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
728
733
729 added = [cl.node(r) for r in xrange(clstart, clend)]
734 added = [cl.node(r) for r in xrange(clstart, clend)]
730 publishing = repo.ui.configbool('phases', 'publish', True)
735 publishing = repo.ui.configbool('phases', 'publish', True)
731 if srctype in ('push', 'serve'):
736 if srctype in ('push', 'serve'):
732 # Old servers can not push the boundary themselves.
737 # Old servers can not push the boundary themselves.
733 # New servers won't push the boundary if changeset already
738 # New servers won't push the boundary if changeset already
734 # exists locally as secret
739 # exists locally as secret
735 #
740 #
736 # We should not use added here but the list of all change in
741 # We should not use added here but the list of all change in
737 # the bundle
742 # the bundle
738 if publishing:
743 if publishing:
739 phases.advanceboundary(repo, tr, phases.public, srccontent)
744 phases.advanceboundary(repo, tr, phases.public, srccontent)
740 else:
745 else:
741 # Those changesets have been pushed from the outside, their
746 # Those changesets have been pushed from the outside, their
742 # phases are going to be pushed alongside. Therefor
747 # phases are going to be pushed alongside. Therefor
743 # `targetphase` is ignored.
748 # `targetphase` is ignored.
744 phases.advanceboundary(repo, tr, phases.draft, srccontent)
749 phases.advanceboundary(repo, tr, phases.draft, srccontent)
745 phases.retractboundary(repo, tr, phases.draft, added)
750 phases.retractboundary(repo, tr, phases.draft, added)
746 elif srctype != 'strip':
751 elif srctype != 'strip':
747 # publishing only alter behavior during push
752 # publishing only alter behavior during push
748 #
753 #
749 # strip should not touch boundary at all
754 # strip should not touch boundary at all
750 phases.retractboundary(repo, tr, targetphase, added)
755 phases.retractboundary(repo, tr, targetphase, added)
751
756
752 # make changelog see real files again
757 # make changelog see real files again
753 cl.finalize(trp)
758 cl.finalize(trp)
754
759
755 tr.close()
760 tr.close()
756
761
757 if changesets > 0:
762 if changesets > 0:
758 if srctype != 'strip':
763 if srctype != 'strip':
759 # During strip, branchcache is invalid but coming call to
764 # During strip, branchcache is invalid but coming call to
760 # `destroyed` will repair it.
765 # `destroyed` will repair it.
761 # In other case we can safely update cache on disk.
766 # In other case we can safely update cache on disk.
762 branchmap.updatecache(repo.filtered('served'))
767 branchmap.updatecache(repo.filtered('served'))
763
768
764 def runhooks():
769 def runhooks():
765 # These hooks run when the lock releases, not when the
770 # These hooks run when the lock releases, not when the
766 # transaction closes. So it's possible for the changelog
771 # transaction closes. So it's possible for the changelog
767 # to have changed since we last saw it.
772 # to have changed since we last saw it.
768 if clstart >= len(repo):
773 if clstart >= len(repo):
769 return
774 return
770
775
771 # forcefully update the on-disk branch cache
776 # forcefully update the on-disk branch cache
772 repo.ui.debug("updating the branch cache\n")
777 repo.ui.debug("updating the branch cache\n")
773 repo.hook("changegroup", **hookargs)
778 repo.hook("changegroup", **hookargs)
774
779
775 for n in added:
780 for n in added:
776 args = hookargs.copy()
781 args = hookargs.copy()
777 args['node'] = hex(n)
782 args['node'] = hex(n)
778 repo.hook("incoming", **args)
783 repo.hook("incoming", **args)
779
784
780 newheads = [h for h in repo.heads() if h not in oldheads]
785 newheads = [h for h in repo.heads() if h not in oldheads]
781 repo.ui.log("incoming",
786 repo.ui.log("incoming",
782 "%s incoming changes - new heads: %s\n",
787 "%s incoming changes - new heads: %s\n",
783 len(added),
788 len(added),
784 ', '.join([hex(c[:6]) for c in newheads]))
789 ', '.join([hex(c[:6]) for c in newheads]))
785 repo._afterlock(runhooks)
790 repo._afterlock(runhooks)
786
791
787 finally:
792 finally:
788 tr.release()
793 tr.release()
789 # never return 0 here:
794 # never return 0 here:
790 if dh < 0:
795 if dh < 0:
791 return dh - 1
796 return dh - 1
792 else:
797 else:
793 return dh + 1
798 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now