##// END OF EJS Templates
changegroup: add a "packermap" dictionary to track different packer versions...
Pierre-Yves David -
r23168:a92ba36a default
parent child Browse files
Show More
@@ -1,766 +1,768 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from i18n import _
9 from i18n import _
10 from node import nullrev, nullid, hex, short
10 from node import nullrev, nullid, hex, short
11 import mdiff, util, dagutil
11 import mdiff, util, dagutil
12 import struct, os, bz2, zlib, tempfile
12 import struct, os, bz2, zlib, tempfile
13 import discovery, error, phases, branchmap
13 import discovery, error, phases, branchmap
14
14
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
15 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
16
16
17 def readexactly(stream, n):
17 def readexactly(stream, n):
18 '''read n bytes from stream.read and abort if less was available'''
18 '''read n bytes from stream.read and abort if less was available'''
19 s = stream.read(n)
19 s = stream.read(n)
20 if len(s) < n:
20 if len(s) < n:
21 raise util.Abort(_("stream ended unexpectedly"
21 raise util.Abort(_("stream ended unexpectedly"
22 " (got %d bytes, expected %d)")
22 " (got %d bytes, expected %d)")
23 % (len(s), n))
23 % (len(s), n))
24 return s
24 return s
25
25
26 def getchunk(stream):
26 def getchunk(stream):
27 """return the next chunk from stream as a string"""
27 """return the next chunk from stream as a string"""
28 d = readexactly(stream, 4)
28 d = readexactly(stream, 4)
29 l = struct.unpack(">l", d)[0]
29 l = struct.unpack(">l", d)[0]
30 if l <= 4:
30 if l <= 4:
31 if l:
31 if l:
32 raise util.Abort(_("invalid chunk length %d") % l)
32 raise util.Abort(_("invalid chunk length %d") % l)
33 return ""
33 return ""
34 return readexactly(stream, l - 4)
34 return readexactly(stream, l - 4)
35
35
36 def chunkheader(length):
36 def chunkheader(length):
37 """return a changegroup chunk header (string)"""
37 """return a changegroup chunk header (string)"""
38 return struct.pack(">l", length + 4)
38 return struct.pack(">l", length + 4)
39
39
40 def closechunk():
40 def closechunk():
41 """return a changegroup chunk header (string) for a zero-length chunk"""
41 """return a changegroup chunk header (string) for a zero-length chunk"""
42 return struct.pack(">l", 0)
42 return struct.pack(">l", 0)
43
43
44 class nocompress(object):
44 class nocompress(object):
45 def compress(self, x):
45 def compress(self, x):
46 return x
46 return x
47 def flush(self):
47 def flush(self):
48 return ""
48 return ""
49
49
50 bundletypes = {
50 bundletypes = {
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
51 "": ("", nocompress), # only when using unbundle on ssh and old http servers
52 # since the unification ssh accepts a header but there
52 # since the unification ssh accepts a header but there
53 # is no capability signaling it.
53 # is no capability signaling it.
54 "HG10UN": ("HG10UN", nocompress),
54 "HG10UN": ("HG10UN", nocompress),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
55 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
56 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
57 }
57 }
58
58
59 # hgweb uses this list to communicate its preferred type
59 # hgweb uses this list to communicate its preferred type
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
60 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
61
61
62 def writebundle(cg, filename, bundletype, vfs=None):
62 def writebundle(cg, filename, bundletype, vfs=None):
63 """Write a bundle file and return its filename.
63 """Write a bundle file and return its filename.
64
64
65 Existing files will not be overwritten.
65 Existing files will not be overwritten.
66 If no filename is specified, a temporary file is created.
66 If no filename is specified, a temporary file is created.
67 bz2 compression can be turned off.
67 bz2 compression can be turned off.
68 The bundle file will be deleted in case of errors.
68 The bundle file will be deleted in case of errors.
69 """
69 """
70
70
71 fh = None
71 fh = None
72 cleanup = None
72 cleanup = None
73 try:
73 try:
74 if filename:
74 if filename:
75 if vfs:
75 if vfs:
76 fh = vfs.open(filename, "wb")
76 fh = vfs.open(filename, "wb")
77 else:
77 else:
78 fh = open(filename, "wb")
78 fh = open(filename, "wb")
79 else:
79 else:
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fh = os.fdopen(fd, "wb")
81 fh = os.fdopen(fd, "wb")
82 cleanup = filename
82 cleanup = filename
83
83
84 header, compressor = bundletypes[bundletype]
84 header, compressor = bundletypes[bundletype]
85 fh.write(header)
85 fh.write(header)
86 z = compressor()
86 z = compressor()
87
87
88 # parse the changegroup data, otherwise we will block
88 # parse the changegroup data, otherwise we will block
89 # in case of sshrepo because we don't know the end of the stream
89 # in case of sshrepo because we don't know the end of the stream
90
90
91 # an empty chunkgroup is the end of the changegroup
91 # an empty chunkgroup is the end of the changegroup
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
92 # a changegroup has at least 2 chunkgroups (changelog and manifest).
93 # after that, an empty chunkgroup is the end of the changegroup
93 # after that, an empty chunkgroup is the end of the changegroup
94 for chunk in cg.getchunks():
94 for chunk in cg.getchunks():
95 fh.write(z.compress(chunk))
95 fh.write(z.compress(chunk))
96 fh.write(z.flush())
96 fh.write(z.flush())
97 cleanup = None
97 cleanup = None
98 return filename
98 return filename
99 finally:
99 finally:
100 if fh is not None:
100 if fh is not None:
101 fh.close()
101 fh.close()
102 if cleanup is not None:
102 if cleanup is not None:
103 if filename and vfs:
103 if filename and vfs:
104 vfs.unlink(cleanup)
104 vfs.unlink(cleanup)
105 else:
105 else:
106 os.unlink(cleanup)
106 os.unlink(cleanup)
107
107
108 def decompressor(fh, alg):
108 def decompressor(fh, alg):
109 if alg == 'UN':
109 if alg == 'UN':
110 return fh
110 return fh
111 elif alg == 'GZ':
111 elif alg == 'GZ':
112 def generator(f):
112 def generator(f):
113 zd = zlib.decompressobj()
113 zd = zlib.decompressobj()
114 for chunk in util.filechunkiter(f):
114 for chunk in util.filechunkiter(f):
115 yield zd.decompress(chunk)
115 yield zd.decompress(chunk)
116 elif alg == 'BZ':
116 elif alg == 'BZ':
117 def generator(f):
117 def generator(f):
118 zd = bz2.BZ2Decompressor()
118 zd = bz2.BZ2Decompressor()
119 zd.decompress("BZ")
119 zd.decompress("BZ")
120 for chunk in util.filechunkiter(f, 4096):
120 for chunk in util.filechunkiter(f, 4096):
121 yield zd.decompress(chunk)
121 yield zd.decompress(chunk)
122 else:
122 else:
123 raise util.Abort("unknown bundle compression '%s'" % alg)
123 raise util.Abort("unknown bundle compression '%s'" % alg)
124 return util.chunkbuffer(generator(fh))
124 return util.chunkbuffer(generator(fh))
125
125
126 class cg1unpacker(object):
126 class cg1unpacker(object):
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
127 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheadersize = struct.calcsize(deltaheader)
128 deltaheadersize = struct.calcsize(deltaheader)
129 def __init__(self, fh, alg):
129 def __init__(self, fh, alg):
130 self._stream = decompressor(fh, alg)
130 self._stream = decompressor(fh, alg)
131 self._type = alg
131 self._type = alg
132 self.callback = None
132 self.callback = None
133 def compressed(self):
133 def compressed(self):
134 return self._type != 'UN'
134 return self._type != 'UN'
135 def read(self, l):
135 def read(self, l):
136 return self._stream.read(l)
136 return self._stream.read(l)
137 def seek(self, pos):
137 def seek(self, pos):
138 return self._stream.seek(pos)
138 return self._stream.seek(pos)
139 def tell(self):
139 def tell(self):
140 return self._stream.tell()
140 return self._stream.tell()
141 def close(self):
141 def close(self):
142 return self._stream.close()
142 return self._stream.close()
143
143
144 def chunklength(self):
144 def chunklength(self):
145 d = readexactly(self._stream, 4)
145 d = readexactly(self._stream, 4)
146 l = struct.unpack(">l", d)[0]
146 l = struct.unpack(">l", d)[0]
147 if l <= 4:
147 if l <= 4:
148 if l:
148 if l:
149 raise util.Abort(_("invalid chunk length %d") % l)
149 raise util.Abort(_("invalid chunk length %d") % l)
150 return 0
150 return 0
151 if self.callback:
151 if self.callback:
152 self.callback()
152 self.callback()
153 return l - 4
153 return l - 4
154
154
155 def changelogheader(self):
155 def changelogheader(self):
156 """v10 does not have a changelog header chunk"""
156 """v10 does not have a changelog header chunk"""
157 return {}
157 return {}
158
158
159 def manifestheader(self):
159 def manifestheader(self):
160 """v10 does not have a manifest header chunk"""
160 """v10 does not have a manifest header chunk"""
161 return {}
161 return {}
162
162
163 def filelogheader(self):
163 def filelogheader(self):
164 """return the header of the filelogs chunk, v10 only has the filename"""
164 """return the header of the filelogs chunk, v10 only has the filename"""
165 l = self.chunklength()
165 l = self.chunklength()
166 if not l:
166 if not l:
167 return {}
167 return {}
168 fname = readexactly(self._stream, l)
168 fname = readexactly(self._stream, l)
169 return {'filename': fname}
169 return {'filename': fname}
170
170
171 def _deltaheader(self, headertuple, prevnode):
171 def _deltaheader(self, headertuple, prevnode):
172 node, p1, p2, cs = headertuple
172 node, p1, p2, cs = headertuple
173 if prevnode is None:
173 if prevnode is None:
174 deltabase = p1
174 deltabase = p1
175 else:
175 else:
176 deltabase = prevnode
176 deltabase = prevnode
177 return node, p1, p2, deltabase, cs
177 return node, p1, p2, deltabase, cs
178
178
179 def deltachunk(self, prevnode):
179 def deltachunk(self, prevnode):
180 l = self.chunklength()
180 l = self.chunklength()
181 if not l:
181 if not l:
182 return {}
182 return {}
183 headerdata = readexactly(self._stream, self.deltaheadersize)
183 headerdata = readexactly(self._stream, self.deltaheadersize)
184 header = struct.unpack(self.deltaheader, headerdata)
184 header = struct.unpack(self.deltaheader, headerdata)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
185 delta = readexactly(self._stream, l - self.deltaheadersize)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
186 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
187 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
188 'deltabase': deltabase, 'delta': delta}
188 'deltabase': deltabase, 'delta': delta}
189
189
190 def getchunks(self):
190 def getchunks(self):
191 """returns all the chunks contains in the bundle
191 """returns all the chunks contains in the bundle
192
192
193 Used when you need to forward the binary stream to a file or another
193 Used when you need to forward the binary stream to a file or another
194 network API. To do so, it parse the changegroup data, otherwise it will
194 network API. To do so, it parse the changegroup data, otherwise it will
195 block in case of sshrepo because it don't know the end of the stream.
195 block in case of sshrepo because it don't know the end of the stream.
196 """
196 """
197 # an empty chunkgroup is the end of the changegroup
197 # an empty chunkgroup is the end of the changegroup
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
198 # a changegroup has at least 2 chunkgroups (changelog and manifest).
199 # after that, an empty chunkgroup is the end of the changegroup
199 # after that, an empty chunkgroup is the end of the changegroup
200 empty = False
200 empty = False
201 count = 0
201 count = 0
202 while not empty or count <= 2:
202 while not empty or count <= 2:
203 empty = True
203 empty = True
204 count += 1
204 count += 1
205 while True:
205 while True:
206 chunk = getchunk(self)
206 chunk = getchunk(self)
207 if not chunk:
207 if not chunk:
208 break
208 break
209 empty = False
209 empty = False
210 yield chunkheader(len(chunk))
210 yield chunkheader(len(chunk))
211 pos = 0
211 pos = 0
212 while pos < len(chunk):
212 while pos < len(chunk):
213 next = pos + 2**20
213 next = pos + 2**20
214 yield chunk[pos:next]
214 yield chunk[pos:next]
215 pos = next
215 pos = next
216 yield closechunk()
216 yield closechunk()
217
217
218 class headerlessfixup(object):
218 class headerlessfixup(object):
219 def __init__(self, fh, h):
219 def __init__(self, fh, h):
220 self._h = h
220 self._h = h
221 self._fh = fh
221 self._fh = fh
222 def read(self, n):
222 def read(self, n):
223 if self._h:
223 if self._h:
224 d, self._h = self._h[:n], self._h[n:]
224 d, self._h = self._h[:n], self._h[n:]
225 if len(d) < n:
225 if len(d) < n:
226 d += readexactly(self._fh, n - len(d))
226 d += readexactly(self._fh, n - len(d))
227 return d
227 return d
228 return readexactly(self._fh, n)
228 return readexactly(self._fh, n)
229
229
230 class cg1packer(object):
230 class cg1packer(object):
231 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
231 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
232 def __init__(self, repo, bundlecaps=None):
232 def __init__(self, repo, bundlecaps=None):
233 """Given a source repo, construct a bundler.
233 """Given a source repo, construct a bundler.
234
234
235 bundlecaps is optional and can be used to specify the set of
235 bundlecaps is optional and can be used to specify the set of
236 capabilities which can be used to build the bundle.
236 capabilities which can be used to build the bundle.
237 """
237 """
238 # Set of capabilities we can use to build the bundle.
238 # Set of capabilities we can use to build the bundle.
239 if bundlecaps is None:
239 if bundlecaps is None:
240 bundlecaps = set()
240 bundlecaps = set()
241 self._bundlecaps = bundlecaps
241 self._bundlecaps = bundlecaps
242 self._changelog = repo.changelog
242 self._changelog = repo.changelog
243 self._manifest = repo.manifest
243 self._manifest = repo.manifest
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
244 reorder = repo.ui.config('bundle', 'reorder', 'auto')
245 if reorder == 'auto':
245 if reorder == 'auto':
246 reorder = None
246 reorder = None
247 else:
247 else:
248 reorder = util.parsebool(reorder)
248 reorder = util.parsebool(reorder)
249 self._repo = repo
249 self._repo = repo
250 self._reorder = reorder
250 self._reorder = reorder
251 self._progress = repo.ui.progress
251 self._progress = repo.ui.progress
252 def close(self):
252 def close(self):
253 return closechunk()
253 return closechunk()
254
254
255 def fileheader(self, fname):
255 def fileheader(self, fname):
256 return chunkheader(len(fname)) + fname
256 return chunkheader(len(fname)) + fname
257
257
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
258 def group(self, nodelist, revlog, lookup, units=None, reorder=None):
259 """Calculate a delta group, yielding a sequence of changegroup chunks
259 """Calculate a delta group, yielding a sequence of changegroup chunks
260 (strings).
260 (strings).
261
261
262 Given a list of changeset revs, return a set of deltas and
262 Given a list of changeset revs, return a set of deltas and
263 metadata corresponding to nodes. The first delta is
263 metadata corresponding to nodes. The first delta is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
264 first parent(nodelist[0]) -> nodelist[0], the receiver is
265 guaranteed to have this parent as it has all history before
265 guaranteed to have this parent as it has all history before
266 these changesets. In the case firstparent is nullrev the
266 these changesets. In the case firstparent is nullrev the
267 changegroup starts with a full revision.
267 changegroup starts with a full revision.
268
268
269 If units is not None, progress detail will be generated, units specifies
269 If units is not None, progress detail will be generated, units specifies
270 the type of revlog that is touched (changelog, manifest, etc.).
270 the type of revlog that is touched (changelog, manifest, etc.).
271 """
271 """
272 # if we don't have any revisions touched by these changesets, bail
272 # if we don't have any revisions touched by these changesets, bail
273 if len(nodelist) == 0:
273 if len(nodelist) == 0:
274 yield self.close()
274 yield self.close()
275 return
275 return
276
276
277 # for generaldelta revlogs, we linearize the revs; this will both be
277 # for generaldelta revlogs, we linearize the revs; this will both be
278 # much quicker and generate a much smaller bundle
278 # much quicker and generate a much smaller bundle
279 if (revlog._generaldelta and reorder is not False) or reorder:
279 if (revlog._generaldelta and reorder is not False) or reorder:
280 dag = dagutil.revlogdag(revlog)
280 dag = dagutil.revlogdag(revlog)
281 revs = set(revlog.rev(n) for n in nodelist)
281 revs = set(revlog.rev(n) for n in nodelist)
282 revs = dag.linearize(revs)
282 revs = dag.linearize(revs)
283 else:
283 else:
284 revs = sorted([revlog.rev(n) for n in nodelist])
284 revs = sorted([revlog.rev(n) for n in nodelist])
285
285
286 # add the parent of the first rev
286 # add the parent of the first rev
287 p = revlog.parentrevs(revs[0])[0]
287 p = revlog.parentrevs(revs[0])[0]
288 revs.insert(0, p)
288 revs.insert(0, p)
289
289
290 # build deltas
290 # build deltas
291 total = len(revs) - 1
291 total = len(revs) - 1
292 msgbundling = _('bundling')
292 msgbundling = _('bundling')
293 for r in xrange(len(revs) - 1):
293 for r in xrange(len(revs) - 1):
294 if units is not None:
294 if units is not None:
295 self._progress(msgbundling, r + 1, unit=units, total=total)
295 self._progress(msgbundling, r + 1, unit=units, total=total)
296 prev, curr = revs[r], revs[r + 1]
296 prev, curr = revs[r], revs[r + 1]
297 linknode = lookup(revlog.node(curr))
297 linknode = lookup(revlog.node(curr))
298 for c in self.revchunk(revlog, curr, prev, linknode):
298 for c in self.revchunk(revlog, curr, prev, linknode):
299 yield c
299 yield c
300
300
301 yield self.close()
301 yield self.close()
302
302
303 # filter any nodes that claim to be part of the known set
303 # filter any nodes that claim to be part of the known set
304 def prune(self, revlog, missing, commonrevs, source):
304 def prune(self, revlog, missing, commonrevs, source):
305 rr, rl = revlog.rev, revlog.linkrev
305 rr, rl = revlog.rev, revlog.linkrev
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
306 return [n for n in missing if rl(rr(n)) not in commonrevs]
307
307
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
308 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
309 '''yield a sequence of changegroup chunks (strings)'''
309 '''yield a sequence of changegroup chunks (strings)'''
310 repo = self._repo
310 repo = self._repo
311 cl = self._changelog
311 cl = self._changelog
312 mf = self._manifest
312 mf = self._manifest
313 reorder = self._reorder
313 reorder = self._reorder
314 progress = self._progress
314 progress = self._progress
315
315
316 # for progress output
316 # for progress output
317 msgbundling = _('bundling')
317 msgbundling = _('bundling')
318
318
319 mfs = {} # needed manifests
319 mfs = {} # needed manifests
320 fnodes = {} # needed file nodes
320 fnodes = {} # needed file nodes
321 changedfiles = set()
321 changedfiles = set()
322
322
323 # Callback for the changelog, used to collect changed files and manifest
323 # Callback for the changelog, used to collect changed files and manifest
324 # nodes.
324 # nodes.
325 # Returns the linkrev node (identity in the changelog case).
325 # Returns the linkrev node (identity in the changelog case).
326 def lookupcl(x):
326 def lookupcl(x):
327 c = cl.read(x)
327 c = cl.read(x)
328 changedfiles.update(c[3])
328 changedfiles.update(c[3])
329 # record the first changeset introducing this manifest version
329 # record the first changeset introducing this manifest version
330 mfs.setdefault(c[0], x)
330 mfs.setdefault(c[0], x)
331 return x
331 return x
332
332
333 # Callback for the manifest, used to collect linkrevs for filelog
333 # Callback for the manifest, used to collect linkrevs for filelog
334 # revisions.
334 # revisions.
335 # Returns the linkrev node (collected in lookupcl).
335 # Returns the linkrev node (collected in lookupcl).
336 def lookupmf(x):
336 def lookupmf(x):
337 clnode = mfs[x]
337 clnode = mfs[x]
338 if not fastpathlinkrev:
338 if not fastpathlinkrev:
339 mdata = mf.readfast(x)
339 mdata = mf.readfast(x)
340 for f, n in mdata.iteritems():
340 for f, n in mdata.iteritems():
341 if f in changedfiles:
341 if f in changedfiles:
342 # record the first changeset introducing this filelog
342 # record the first changeset introducing this filelog
343 # version
343 # version
344 fnodes[f].setdefault(n, clnode)
344 fnodes[f].setdefault(n, clnode)
345 return clnode
345 return clnode
346
346
347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
347 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
348 reorder=reorder):
348 reorder=reorder):
349 yield chunk
349 yield chunk
350 progress(msgbundling, None)
350 progress(msgbundling, None)
351
351
352 for f in changedfiles:
352 for f in changedfiles:
353 fnodes[f] = {}
353 fnodes[f] = {}
354 mfnodes = self.prune(mf, mfs, commonrevs, source)
354 mfnodes = self.prune(mf, mfs, commonrevs, source)
355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
355 for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
356 reorder=reorder):
356 reorder=reorder):
357 yield chunk
357 yield chunk
358 progress(msgbundling, None)
358 progress(msgbundling, None)
359
359
360 mfs.clear()
360 mfs.clear()
361 needed = set(cl.rev(x) for x in clnodes)
361 needed = set(cl.rev(x) for x in clnodes)
362
362
363 def linknodes(filerevlog, fname):
363 def linknodes(filerevlog, fname):
364 if fastpathlinkrev:
364 if fastpathlinkrev:
365 llr = filerevlog.linkrev
365 llr = filerevlog.linkrev
366 def genfilenodes():
366 def genfilenodes():
367 for r in filerevlog:
367 for r in filerevlog:
368 linkrev = llr(r)
368 linkrev = llr(r)
369 if linkrev in needed:
369 if linkrev in needed:
370 yield filerevlog.node(r), cl.node(linkrev)
370 yield filerevlog.node(r), cl.node(linkrev)
371 fnodes[fname] = dict(genfilenodes())
371 fnodes[fname] = dict(genfilenodes())
372 return fnodes.get(fname, {})
372 return fnodes.get(fname, {})
373
373
374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
374 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
375 source):
375 source):
376 yield chunk
376 yield chunk
377
377
378 yield self.close()
378 yield self.close()
379 progress(msgbundling, None)
379 progress(msgbundling, None)
380
380
381 if clnodes:
381 if clnodes:
382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
382 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
383
383
384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
384 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
385 repo = self._repo
385 repo = self._repo
386 progress = self._progress
386 progress = self._progress
387 reorder = self._reorder
387 reorder = self._reorder
388 msgbundling = _('bundling')
388 msgbundling = _('bundling')
389
389
390 total = len(changedfiles)
390 total = len(changedfiles)
391 # for progress output
391 # for progress output
392 msgfiles = _('files')
392 msgfiles = _('files')
393 for i, fname in enumerate(sorted(changedfiles)):
393 for i, fname in enumerate(sorted(changedfiles)):
394 filerevlog = repo.file(fname)
394 filerevlog = repo.file(fname)
395 if not filerevlog:
395 if not filerevlog:
396 raise util.Abort(_("empty or missing revlog for %s") % fname)
396 raise util.Abort(_("empty or missing revlog for %s") % fname)
397
397
398 linkrevnodes = linknodes(filerevlog, fname)
398 linkrevnodes = linknodes(filerevlog, fname)
399 # Lookup for filenodes, we collected the linkrev nodes above in the
399 # Lookup for filenodes, we collected the linkrev nodes above in the
400 # fastpath case and with lookupmf in the slowpath case.
400 # fastpath case and with lookupmf in the slowpath case.
401 def lookupfilelog(x):
401 def lookupfilelog(x):
402 return linkrevnodes[x]
402 return linkrevnodes[x]
403
403
404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
404 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
405 if filenodes:
405 if filenodes:
406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
406 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
407 total=total)
407 total=total)
408 yield self.fileheader(fname)
408 yield self.fileheader(fname)
409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
409 for chunk in self.group(filenodes, filerevlog, lookupfilelog,
410 reorder=reorder):
410 reorder=reorder):
411 yield chunk
411 yield chunk
412
412
413 def revchunk(self, revlog, rev, prev, linknode):
413 def revchunk(self, revlog, rev, prev, linknode):
414 node = revlog.node(rev)
414 node = revlog.node(rev)
415 p1, p2 = revlog.parentrevs(rev)
415 p1, p2 = revlog.parentrevs(rev)
416 base = prev
416 base = prev
417
417
418 prefix = ''
418 prefix = ''
419 if base == nullrev:
419 if base == nullrev:
420 delta = revlog.revision(node)
420 delta = revlog.revision(node)
421 prefix = mdiff.trivialdiffheader(len(delta))
421 prefix = mdiff.trivialdiffheader(len(delta))
422 else:
422 else:
423 delta = revlog.revdiff(base, rev)
423 delta = revlog.revdiff(base, rev)
424 p1n, p2n = revlog.parents(node)
424 p1n, p2n = revlog.parents(node)
425 basenode = revlog.node(base)
425 basenode = revlog.node(base)
426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
426 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
427 meta += prefix
427 meta += prefix
428 l = len(meta) + len(delta)
428 l = len(meta) + len(delta)
429 yield chunkheader(l)
429 yield chunkheader(l)
430 yield meta
430 yield meta
431 yield delta
431 yield delta
432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
432 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
433 # do nothing with basenode, it is implicitly the previous one in HG10
433 # do nothing with basenode, it is implicitly the previous one in HG10
434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
434 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
435
435
436 packermap = {'01': (cg1packer, cg1unpacker)}
437
436 def _changegroupinfo(repo, nodes, source):
438 def _changegroupinfo(repo, nodes, source):
437 if repo.ui.verbose or source == 'bundle':
439 if repo.ui.verbose or source == 'bundle':
438 repo.ui.status(_("%d changesets found\n") % len(nodes))
440 repo.ui.status(_("%d changesets found\n") % len(nodes))
439 if repo.ui.debugflag:
441 if repo.ui.debugflag:
440 repo.ui.debug("list of changesets:\n")
442 repo.ui.debug("list of changesets:\n")
441 for node in nodes:
443 for node in nodes:
442 repo.ui.debug("%s\n" % hex(node))
444 repo.ui.debug("%s\n" % hex(node))
443
445
444 def getsubset(repo, outgoing, bundler, source, fastpath=False):
446 def getsubset(repo, outgoing, bundler, source, fastpath=False):
445 repo = repo.unfiltered()
447 repo = repo.unfiltered()
446 commonrevs = outgoing.common
448 commonrevs = outgoing.common
447 csets = outgoing.missing
449 csets = outgoing.missing
448 heads = outgoing.missingheads
450 heads = outgoing.missingheads
449 # We go through the fast path if we get told to, or if all (unfiltered
451 # We go through the fast path if we get told to, or if all (unfiltered
450 # heads have been requested (since we then know there all linkrevs will
452 # heads have been requested (since we then know there all linkrevs will
451 # be pulled by the client).
453 # be pulled by the client).
452 heads.sort()
454 heads.sort()
453 fastpathlinkrev = fastpath or (
455 fastpathlinkrev = fastpath or (
454 repo.filtername is None and heads == sorted(repo.heads()))
456 repo.filtername is None and heads == sorted(repo.heads()))
455
457
456 repo.hook('preoutgoing', throw=True, source=source)
458 repo.hook('preoutgoing', throw=True, source=source)
457 _changegroupinfo(repo, csets, source)
459 _changegroupinfo(repo, csets, source)
458 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
460 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
459 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
461 return cg1unpacker(util.chunkbuffer(gengroup), 'UN')
460
462
461 def changegroupsubset(repo, roots, heads, source):
463 def changegroupsubset(repo, roots, heads, source):
462 """Compute a changegroup consisting of all the nodes that are
464 """Compute a changegroup consisting of all the nodes that are
463 descendants of any of the roots and ancestors of any of the heads.
465 descendants of any of the roots and ancestors of any of the heads.
464 Return a chunkbuffer object whose read() method will return
466 Return a chunkbuffer object whose read() method will return
465 successive changegroup chunks.
467 successive changegroup chunks.
466
468
467 It is fairly complex as determining which filenodes and which
469 It is fairly complex as determining which filenodes and which
468 manifest nodes need to be included for the changeset to be complete
470 manifest nodes need to be included for the changeset to be complete
469 is non-trivial.
471 is non-trivial.
470
472
471 Another wrinkle is doing the reverse, figuring out which changeset in
473 Another wrinkle is doing the reverse, figuring out which changeset in
472 the changegroup a particular filenode or manifestnode belongs to.
474 the changegroup a particular filenode or manifestnode belongs to.
473 """
475 """
474 cl = repo.changelog
476 cl = repo.changelog
475 if not roots:
477 if not roots:
476 roots = [nullid]
478 roots = [nullid]
477 # TODO: remove call to nodesbetween.
479 # TODO: remove call to nodesbetween.
478 csets, roots, heads = cl.nodesbetween(roots, heads)
480 csets, roots, heads = cl.nodesbetween(roots, heads)
479 discbases = []
481 discbases = []
480 for n in roots:
482 for n in roots:
481 discbases.extend([p for p in cl.parents(n) if p != nullid])
483 discbases.extend([p for p in cl.parents(n) if p != nullid])
482 outgoing = discovery.outgoing(cl, discbases, heads)
484 outgoing = discovery.outgoing(cl, discbases, heads)
483 bundler = cg1packer(repo)
485 bundler = cg1packer(repo)
484 return getsubset(repo, outgoing, bundler, source)
486 return getsubset(repo, outgoing, bundler, source)
485
487
486 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
488 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None):
487 """Like getbundle, but taking a discovery.outgoing as an argument.
489 """Like getbundle, but taking a discovery.outgoing as an argument.
488
490
489 This is only implemented for local repos and reuses potentially
491 This is only implemented for local repos and reuses potentially
490 precomputed sets in outgoing."""
492 precomputed sets in outgoing."""
491 if not outgoing.missing:
493 if not outgoing.missing:
492 return None
494 return None
493 bundler = cg1packer(repo, bundlecaps)
495 bundler = cg1packer(repo, bundlecaps)
494 return getsubset(repo, outgoing, bundler, source)
496 return getsubset(repo, outgoing, bundler, source)
495
497
496 def _computeoutgoing(repo, heads, common):
498 def _computeoutgoing(repo, heads, common):
497 """Computes which revs are outgoing given a set of common
499 """Computes which revs are outgoing given a set of common
498 and a set of heads.
500 and a set of heads.
499
501
500 This is a separate function so extensions can have access to
502 This is a separate function so extensions can have access to
501 the logic.
503 the logic.
502
504
503 Returns a discovery.outgoing object.
505 Returns a discovery.outgoing object.
504 """
506 """
505 cl = repo.changelog
507 cl = repo.changelog
506 if common:
508 if common:
507 hasnode = cl.hasnode
509 hasnode = cl.hasnode
508 common = [n for n in common if hasnode(n)]
510 common = [n for n in common if hasnode(n)]
509 else:
511 else:
510 common = [nullid]
512 common = [nullid]
511 if not heads:
513 if not heads:
512 heads = cl.heads()
514 heads = cl.heads()
513 return discovery.outgoing(cl, common, heads)
515 return discovery.outgoing(cl, common, heads)
514
516
515 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
517 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None):
516 """Like changegroupsubset, but returns the set difference between the
518 """Like changegroupsubset, but returns the set difference between the
517 ancestors of heads and the ancestors common.
519 ancestors of heads and the ancestors common.
518
520
519 If heads is None, use the local heads. If common is None, use [nullid].
521 If heads is None, use the local heads. If common is None, use [nullid].
520
522
521 The nodes in common might not all be known locally due to the way the
523 The nodes in common might not all be known locally due to the way the
522 current discovery protocol works.
524 current discovery protocol works.
523 """
525 """
524 outgoing = _computeoutgoing(repo, heads, common)
526 outgoing = _computeoutgoing(repo, heads, common)
525 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
527 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps)
526
528
527 def changegroup(repo, basenodes, source):
529 def changegroup(repo, basenodes, source):
528 # to avoid a race we use changegroupsubset() (issue1320)
530 # to avoid a race we use changegroupsubset() (issue1320)
529 return changegroupsubset(repo, basenodes, repo.heads(), source)
531 return changegroupsubset(repo, basenodes, repo.heads(), source)
530
532
531 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
533 def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
532 revisions = 0
534 revisions = 0
533 files = 0
535 files = 0
534 while True:
536 while True:
535 chunkdata = source.filelogheader()
537 chunkdata = source.filelogheader()
536 if not chunkdata:
538 if not chunkdata:
537 break
539 break
538 f = chunkdata["filename"]
540 f = chunkdata["filename"]
539 repo.ui.debug("adding %s revisions\n" % f)
541 repo.ui.debug("adding %s revisions\n" % f)
540 pr()
542 pr()
541 fl = repo.file(f)
543 fl = repo.file(f)
542 o = len(fl)
544 o = len(fl)
543 if not fl.addgroup(source, revmap, trp):
545 if not fl.addgroup(source, revmap, trp):
544 raise util.Abort(_("received file revlog group is empty"))
546 raise util.Abort(_("received file revlog group is empty"))
545 revisions += len(fl) - o
547 revisions += len(fl) - o
546 files += 1
548 files += 1
547 if f in needfiles:
549 if f in needfiles:
548 needs = needfiles[f]
550 needs = needfiles[f]
549 for new in xrange(o, len(fl)):
551 for new in xrange(o, len(fl)):
550 n = fl.node(new)
552 n = fl.node(new)
551 if n in needs:
553 if n in needs:
552 needs.remove(n)
554 needs.remove(n)
553 else:
555 else:
554 raise util.Abort(
556 raise util.Abort(
555 _("received spurious file revlog entry"))
557 _("received spurious file revlog entry"))
556 if not needs:
558 if not needs:
557 del needfiles[f]
559 del needfiles[f]
558 repo.ui.progress(_('files'), None)
560 repo.ui.progress(_('files'), None)
559
561
560 for f, needs in needfiles.iteritems():
562 for f, needs in needfiles.iteritems():
561 fl = repo.file(f)
563 fl = repo.file(f)
562 for n in needs:
564 for n in needs:
563 try:
565 try:
564 fl.rev(n)
566 fl.rev(n)
565 except error.LookupError:
567 except error.LookupError:
566 raise util.Abort(
568 raise util.Abort(
567 _('missing file data for %s:%s - run hg verify') %
569 _('missing file data for %s:%s - run hg verify') %
568 (f, hex(n)))
570 (f, hex(n)))
569
571
570 return revisions, files
572 return revisions, files
571
573
572 def addchangegroup(repo, source, srctype, url, emptyok=False,
574 def addchangegroup(repo, source, srctype, url, emptyok=False,
573 targetphase=phases.draft):
575 targetphase=phases.draft):
574 """Add the changegroup returned by source.read() to this repo.
576 """Add the changegroup returned by source.read() to this repo.
575 srctype is a string like 'push', 'pull', or 'unbundle'. url is
577 srctype is a string like 'push', 'pull', or 'unbundle'. url is
576 the URL of the repo where this changegroup is coming from.
578 the URL of the repo where this changegroup is coming from.
577
579
578 Return an integer summarizing the change to this repo:
580 Return an integer summarizing the change to this repo:
579 - nothing changed or no source: 0
581 - nothing changed or no source: 0
580 - more heads than before: 1+added heads (2..n)
582 - more heads than before: 1+added heads (2..n)
581 - fewer heads than before: -1-removed heads (-2..-n)
583 - fewer heads than before: -1-removed heads (-2..-n)
582 - number of heads stays the same: 1
584 - number of heads stays the same: 1
583 """
585 """
584 repo = repo.unfiltered()
586 repo = repo.unfiltered()
585 def csmap(x):
587 def csmap(x):
586 repo.ui.debug("add changeset %s\n" % short(x))
588 repo.ui.debug("add changeset %s\n" % short(x))
587 return len(cl)
589 return len(cl)
588
590
589 def revmap(x):
591 def revmap(x):
590 return cl.rev(x)
592 return cl.rev(x)
591
593
592 if not source:
594 if not source:
593 return 0
595 return 0
594
596
595 changesets = files = revisions = 0
597 changesets = files = revisions = 0
596 efiles = set()
598 efiles = set()
597
599
598 # write changelog data to temp files so concurrent readers will not see
600 # write changelog data to temp files so concurrent readers will not see
599 # inconsistent view
601 # inconsistent view
600 cl = repo.changelog
602 cl = repo.changelog
601 cl.delayupdate()
603 cl.delayupdate()
602 oldheads = cl.heads()
604 oldheads = cl.heads()
603
605
604 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
606 tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
605 # The transaction could have been created before and already carries source
607 # The transaction could have been created before and already carries source
606 # information. In this case we use the top level data. We overwrite the
608 # information. In this case we use the top level data. We overwrite the
607 # argument because we need to use the top level value (if they exist) in
609 # argument because we need to use the top level value (if they exist) in
608 # this function.
610 # this function.
609 srctype = tr.hookargs.setdefault('source', srctype)
611 srctype = tr.hookargs.setdefault('source', srctype)
610 url = tr.hookargs.setdefault('url', url)
612 url = tr.hookargs.setdefault('url', url)
611 try:
613 try:
612 repo.hook('prechangegroup', throw=True, **tr.hookargs)
614 repo.hook('prechangegroup', throw=True, **tr.hookargs)
613
615
614 trp = weakref.proxy(tr)
616 trp = weakref.proxy(tr)
615 # pull off the changeset group
617 # pull off the changeset group
616 repo.ui.status(_("adding changesets\n"))
618 repo.ui.status(_("adding changesets\n"))
617 clstart = len(cl)
619 clstart = len(cl)
618 class prog(object):
620 class prog(object):
619 step = _('changesets')
621 step = _('changesets')
620 count = 1
622 count = 1
621 ui = repo.ui
623 ui = repo.ui
622 total = None
624 total = None
623 def __call__(repo):
625 def __call__(repo):
624 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
626 repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
625 total=repo.total)
627 total=repo.total)
626 repo.count += 1
628 repo.count += 1
627 pr = prog()
629 pr = prog()
628 source.callback = pr
630 source.callback = pr
629
631
630 source.changelogheader()
632 source.changelogheader()
631 srccontent = cl.addgroup(source, csmap, trp)
633 srccontent = cl.addgroup(source, csmap, trp)
632 if not (srccontent or emptyok):
634 if not (srccontent or emptyok):
633 raise util.Abort(_("received changelog group is empty"))
635 raise util.Abort(_("received changelog group is empty"))
634 clend = len(cl)
636 clend = len(cl)
635 changesets = clend - clstart
637 changesets = clend - clstart
636 for c in xrange(clstart, clend):
638 for c in xrange(clstart, clend):
637 efiles.update(repo[c].files())
639 efiles.update(repo[c].files())
638 efiles = len(efiles)
640 efiles = len(efiles)
639 repo.ui.progress(_('changesets'), None)
641 repo.ui.progress(_('changesets'), None)
640
642
641 # pull off the manifest group
643 # pull off the manifest group
642 repo.ui.status(_("adding manifests\n"))
644 repo.ui.status(_("adding manifests\n"))
643 pr.step = _('manifests')
645 pr.step = _('manifests')
644 pr.count = 1
646 pr.count = 1
645 pr.total = changesets # manifests <= changesets
647 pr.total = changesets # manifests <= changesets
646 # no need to check for empty manifest group here:
648 # no need to check for empty manifest group here:
647 # if the result of the merge of 1 and 2 is the same in 3 and 4,
649 # if the result of the merge of 1 and 2 is the same in 3 and 4,
648 # no new manifest will be created and the manifest group will
650 # no new manifest will be created and the manifest group will
649 # be empty during the pull
651 # be empty during the pull
650 source.manifestheader()
652 source.manifestheader()
651 repo.manifest.addgroup(source, revmap, trp)
653 repo.manifest.addgroup(source, revmap, trp)
652 repo.ui.progress(_('manifests'), None)
654 repo.ui.progress(_('manifests'), None)
653
655
654 needfiles = {}
656 needfiles = {}
655 if repo.ui.configbool('server', 'validate', default=False):
657 if repo.ui.configbool('server', 'validate', default=False):
656 # validate incoming csets have their manifests
658 # validate incoming csets have their manifests
657 for cset in xrange(clstart, clend):
659 for cset in xrange(clstart, clend):
658 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
660 mfest = repo.changelog.read(repo.changelog.node(cset))[0]
659 mfest = repo.manifest.readdelta(mfest)
661 mfest = repo.manifest.readdelta(mfest)
660 # store file nodes we must see
662 # store file nodes we must see
661 for f, n in mfest.iteritems():
663 for f, n in mfest.iteritems():
662 needfiles.setdefault(f, set()).add(n)
664 needfiles.setdefault(f, set()).add(n)
663
665
664 # process the files
666 # process the files
665 repo.ui.status(_("adding file changes\n"))
667 repo.ui.status(_("adding file changes\n"))
666 pr.step = _('files')
668 pr.step = _('files')
667 pr.count = 1
669 pr.count = 1
668 pr.total = efiles
670 pr.total = efiles
669 source.callback = None
671 source.callback = None
670
672
671 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
673 newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
672 needfiles)
674 needfiles)
673 revisions += newrevs
675 revisions += newrevs
674 files += newfiles
676 files += newfiles
675
677
676 dh = 0
678 dh = 0
677 if oldheads:
679 if oldheads:
678 heads = cl.heads()
680 heads = cl.heads()
679 dh = len(heads) - len(oldheads)
681 dh = len(heads) - len(oldheads)
680 for h in heads:
682 for h in heads:
681 if h not in oldheads and repo[h].closesbranch():
683 if h not in oldheads and repo[h].closesbranch():
682 dh -= 1
684 dh -= 1
683 htext = ""
685 htext = ""
684 if dh:
686 if dh:
685 htext = _(" (%+d heads)") % dh
687 htext = _(" (%+d heads)") % dh
686
688
687 repo.ui.status(_("added %d changesets"
689 repo.ui.status(_("added %d changesets"
688 " with %d changes to %d files%s\n")
690 " with %d changes to %d files%s\n")
689 % (changesets, revisions, files, htext))
691 % (changesets, revisions, files, htext))
690 repo.invalidatevolatilesets()
692 repo.invalidatevolatilesets()
691
693
692 if changesets > 0:
694 if changesets > 0:
693 p = lambda: cl.writepending() and repo.root or ""
695 p = lambda: cl.writepending() and repo.root or ""
694 if 'node' not in tr.hookargs:
696 if 'node' not in tr.hookargs:
695 tr.hookargs['node'] = hex(cl.node(clstart))
697 tr.hookargs['node'] = hex(cl.node(clstart))
696 hookargs = dict(tr.hookargs)
698 hookargs = dict(tr.hookargs)
697 else:
699 else:
698 hookargs = dict(tr.hookargs)
700 hookargs = dict(tr.hookargs)
699 hookargs['node'] = hex(cl.node(clstart))
701 hookargs['node'] = hex(cl.node(clstart))
700 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
702 repo.hook('pretxnchangegroup', throw=True, pending=p, **hookargs)
701
703
702 added = [cl.node(r) for r in xrange(clstart, clend)]
704 added = [cl.node(r) for r in xrange(clstart, clend)]
703 publishing = repo.ui.configbool('phases', 'publish', True)
705 publishing = repo.ui.configbool('phases', 'publish', True)
704 if srctype in ('push', 'serve'):
706 if srctype in ('push', 'serve'):
705 # Old servers can not push the boundary themselves.
707 # Old servers can not push the boundary themselves.
706 # New servers won't push the boundary if changeset already
708 # New servers won't push the boundary if changeset already
707 # exists locally as secret
709 # exists locally as secret
708 #
710 #
709 # We should not use added here but the list of all change in
711 # We should not use added here but the list of all change in
710 # the bundle
712 # the bundle
711 if publishing:
713 if publishing:
712 phases.advanceboundary(repo, tr, phases.public, srccontent)
714 phases.advanceboundary(repo, tr, phases.public, srccontent)
713 else:
715 else:
714 # Those changesets have been pushed from the outside, their
716 # Those changesets have been pushed from the outside, their
715 # phases are going to be pushed alongside. Therefor
717 # phases are going to be pushed alongside. Therefor
716 # `targetphase` is ignored.
718 # `targetphase` is ignored.
717 phases.advanceboundary(repo, tr, phases.draft, srccontent)
719 phases.advanceboundary(repo, tr, phases.draft, srccontent)
718 phases.retractboundary(repo, tr, phases.draft, added)
720 phases.retractboundary(repo, tr, phases.draft, added)
719 elif srctype != 'strip':
721 elif srctype != 'strip':
720 # publishing only alter behavior during push
722 # publishing only alter behavior during push
721 #
723 #
722 # strip should not touch boundary at all
724 # strip should not touch boundary at all
723 phases.retractboundary(repo, tr, targetphase, added)
725 phases.retractboundary(repo, tr, targetphase, added)
724
726
725 # make changelog see real files again
727 # make changelog see real files again
726 cl.finalize(trp)
728 cl.finalize(trp)
727
729
728 tr.close()
730 tr.close()
729
731
730 if changesets > 0:
732 if changesets > 0:
731 if srctype != 'strip':
733 if srctype != 'strip':
732 # During strip, branchcache is invalid but coming call to
734 # During strip, branchcache is invalid but coming call to
733 # `destroyed` will repair it.
735 # `destroyed` will repair it.
734 # In other case we can safely update cache on disk.
736 # In other case we can safely update cache on disk.
735 branchmap.updatecache(repo.filtered('served'))
737 branchmap.updatecache(repo.filtered('served'))
736
738
737 def runhooks():
739 def runhooks():
738 # These hooks run when the lock releases, not when the
740 # These hooks run when the lock releases, not when the
739 # transaction closes. So it's possible for the changelog
741 # transaction closes. So it's possible for the changelog
740 # to have changed since we last saw it.
742 # to have changed since we last saw it.
741 if clstart >= len(repo):
743 if clstart >= len(repo):
742 return
744 return
743
745
744 # forcefully update the on-disk branch cache
746 # forcefully update the on-disk branch cache
745 repo.ui.debug("updating the branch cache\n")
747 repo.ui.debug("updating the branch cache\n")
746 repo.hook("changegroup", **hookargs)
748 repo.hook("changegroup", **hookargs)
747
749
748 for n in added:
750 for n in added:
749 args = hookargs.copy()
751 args = hookargs.copy()
750 args['node'] = hex(n)
752 args['node'] = hex(n)
751 repo.hook("incoming", **args)
753 repo.hook("incoming", **args)
752
754
753 newheads = [h for h in repo.heads() if h not in oldheads]
755 newheads = [h for h in repo.heads() if h not in oldheads]
754 repo.ui.log("incoming",
756 repo.ui.log("incoming",
755 "%s incoming changes - new heads: %s\n",
757 "%s incoming changes - new heads: %s\n",
756 len(added),
758 len(added),
757 ', '.join([hex(c[:6]) for c in newheads]))
759 ', '.join([hex(c[:6]) for c in newheads]))
758 repo._afterlock(runhooks)
760 repo._afterlock(runhooks)
759
761
760 finally:
762 finally:
761 tr.release()
763 tr.release()
762 # never return 0 here:
764 # never return 0 here:
763 if dh < 0:
765 if dh < 0:
764 return dh - 1
766 return dh - 1
765 else:
767 else:
766 return dh + 1
768 return dh + 1
General Comments 0
You need to be logged in to leave comments. Login now