##// END OF EJS Templates
discovery: move code to create outgoing from roots and heads...
Gregory Szorc -
r29690:5684bc42 default
parent child Browse files
Show More
@@ -1,1065 +1,1055 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 branchmap,
24 branchmap,
25 dagutil,
25 dagutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 mdiff,
28 mdiff,
29 phases,
29 phases,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36
36
37 def readexactly(stream, n):
37 def readexactly(stream, n):
38 '''read n bytes from stream.read and abort if less was available'''
38 '''read n bytes from stream.read and abort if less was available'''
39 s = stream.read(n)
39 s = stream.read(n)
40 if len(s) < n:
40 if len(s) < n:
41 raise error.Abort(_("stream ended unexpectedly"
41 raise error.Abort(_("stream ended unexpectedly"
42 " (got %d bytes, expected %d)")
42 " (got %d bytes, expected %d)")
43 % (len(s), n))
43 % (len(s), n))
44 return s
44 return s
45
45
46 def getchunk(stream):
46 def getchunk(stream):
47 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
48 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
49 l = struct.unpack(">l", d)[0]
49 l = struct.unpack(">l", d)[0]
50 if l <= 4:
50 if l <= 4:
51 if l:
51 if l:
52 raise error.Abort(_("invalid chunk length %d") % l)
52 raise error.Abort(_("invalid chunk length %d") % l)
53 return ""
53 return ""
54 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(">l", length + 4)
58 return struct.pack(">l", length + 4)
59
59
60 def closechunk():
60 def closechunk():
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 return struct.pack(">l", 0)
62 return struct.pack(">l", 0)
63
63
64 def combineresults(results):
64 def combineresults(results):
65 """logic to combine 0 or more addchangegroup results into one"""
65 """logic to combine 0 or more addchangegroup results into one"""
66 changedheads = 0
66 changedheads = 0
67 result = 1
67 result = 1
68 for ret in results:
68 for ret in results:
69 # If any changegroup result is 0, return 0
69 # If any changegroup result is 0, return 0
70 if ret == 0:
70 if ret == 0:
71 result = 0
71 result = 0
72 break
72 break
73 if ret < -1:
73 if ret < -1:
74 changedheads += ret + 1
74 changedheads += ret + 1
75 elif ret > 1:
75 elif ret > 1:
76 changedheads += ret - 1
76 changedheads += ret - 1
77 if changedheads > 0:
77 if changedheads > 0:
78 result = 1 + changedheads
78 result = 1 + changedheads
79 elif changedheads < 0:
79 elif changedheads < 0:
80 result = -1 + changedheads
80 result = -1 + changedheads
81 return result
81 return result
82
82
83 def writechunks(ui, chunks, filename, vfs=None):
83 def writechunks(ui, chunks, filename, vfs=None):
84 """Write chunks to a file and return its filename.
84 """Write chunks to a file and return its filename.
85
85
86 The stream is assumed to be a bundle file.
86 The stream is assumed to be a bundle file.
87 Existing files will not be overwritten.
87 Existing files will not be overwritten.
88 If no filename is specified, a temporary file is created.
88 If no filename is specified, a temporary file is created.
89 """
89 """
90 fh = None
90 fh = None
91 cleanup = None
91 cleanup = None
92 try:
92 try:
93 if filename:
93 if filename:
94 if vfs:
94 if vfs:
95 fh = vfs.open(filename, "wb")
95 fh = vfs.open(filename, "wb")
96 else:
96 else:
97 fh = open(filename, "wb")
97 fh = open(filename, "wb")
98 else:
98 else:
99 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
99 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
100 fh = os.fdopen(fd, "wb")
100 fh = os.fdopen(fd, "wb")
101 cleanup = filename
101 cleanup = filename
102 for c in chunks:
102 for c in chunks:
103 fh.write(c)
103 fh.write(c)
104 cleanup = None
104 cleanup = None
105 return filename
105 return filename
106 finally:
106 finally:
107 if fh is not None:
107 if fh is not None:
108 fh.close()
108 fh.close()
109 if cleanup is not None:
109 if cleanup is not None:
110 if filename and vfs:
110 if filename and vfs:
111 vfs.unlink(cleanup)
111 vfs.unlink(cleanup)
112 else:
112 else:
113 os.unlink(cleanup)
113 os.unlink(cleanup)
114
114
115 class cg1unpacker(object):
115 class cg1unpacker(object):
116 """Unpacker for cg1 changegroup streams.
116 """Unpacker for cg1 changegroup streams.
117
117
118 A changegroup unpacker handles the framing of the revision data in
118 A changegroup unpacker handles the framing of the revision data in
119 the wire format. Most consumers will want to use the apply()
119 the wire format. Most consumers will want to use the apply()
120 method to add the changes from the changegroup to a repository.
120 method to add the changes from the changegroup to a repository.
121
121
122 If you're forwarding a changegroup unmodified to another consumer,
122 If you're forwarding a changegroup unmodified to another consumer,
123 use getchunks(), which returns an iterator of changegroup
123 use getchunks(), which returns an iterator of changegroup
124 chunks. This is mostly useful for cases where you need to know the
124 chunks. This is mostly useful for cases where you need to know the
125 data stream has ended by observing the end of the changegroup.
125 data stream has ended by observing the end of the changegroup.
126
126
127 deltachunk() is useful only if you're applying delta data. Most
127 deltachunk() is useful only if you're applying delta data. Most
128 consumers should prefer apply() instead.
128 consumers should prefer apply() instead.
129
129
130 A few other public methods exist. Those are used only for
130 A few other public methods exist. Those are used only for
131 bundlerepo and some debug commands - their use is discouraged.
131 bundlerepo and some debug commands - their use is discouraged.
132 """
132 """
133 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
133 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
134 deltaheadersize = struct.calcsize(deltaheader)
134 deltaheadersize = struct.calcsize(deltaheader)
135 version = '01'
135 version = '01'
136 _grouplistcount = 1 # One list of files after the manifests
136 _grouplistcount = 1 # One list of files after the manifests
137
137
138 def __init__(self, fh, alg, extras=None):
138 def __init__(self, fh, alg, extras=None):
139 if alg == 'UN':
139 if alg == 'UN':
140 alg = None # get more modern without breaking too much
140 alg = None # get more modern without breaking too much
141 if not alg in util.decompressors:
141 if not alg in util.decompressors:
142 raise error.Abort(_('unknown stream compression type: %s')
142 raise error.Abort(_('unknown stream compression type: %s')
143 % alg)
143 % alg)
144 if alg == 'BZ':
144 if alg == 'BZ':
145 alg = '_truncatedBZ'
145 alg = '_truncatedBZ'
146 self._stream = util.decompressors[alg](fh)
146 self._stream = util.decompressors[alg](fh)
147 self._type = alg
147 self._type = alg
148 self.extras = extras or {}
148 self.extras = extras or {}
149 self.callback = None
149 self.callback = None
150
150
151 # These methods (compressed, read, seek, tell) all appear to only
151 # These methods (compressed, read, seek, tell) all appear to only
152 # be used by bundlerepo, but it's a little hard to tell.
152 # be used by bundlerepo, but it's a little hard to tell.
153 def compressed(self):
153 def compressed(self):
154 return self._type is not None
154 return self._type is not None
155 def read(self, l):
155 def read(self, l):
156 return self._stream.read(l)
156 return self._stream.read(l)
157 def seek(self, pos):
157 def seek(self, pos):
158 return self._stream.seek(pos)
158 return self._stream.seek(pos)
159 def tell(self):
159 def tell(self):
160 return self._stream.tell()
160 return self._stream.tell()
161 def close(self):
161 def close(self):
162 return self._stream.close()
162 return self._stream.close()
163
163
164 def _chunklength(self):
164 def _chunklength(self):
165 d = readexactly(self._stream, 4)
165 d = readexactly(self._stream, 4)
166 l = struct.unpack(">l", d)[0]
166 l = struct.unpack(">l", d)[0]
167 if l <= 4:
167 if l <= 4:
168 if l:
168 if l:
169 raise error.Abort(_("invalid chunk length %d") % l)
169 raise error.Abort(_("invalid chunk length %d") % l)
170 return 0
170 return 0
171 if self.callback:
171 if self.callback:
172 self.callback()
172 self.callback()
173 return l - 4
173 return l - 4
174
174
175 def changelogheader(self):
175 def changelogheader(self):
176 """v10 does not have a changelog header chunk"""
176 """v10 does not have a changelog header chunk"""
177 return {}
177 return {}
178
178
179 def manifestheader(self):
179 def manifestheader(self):
180 """v10 does not have a manifest header chunk"""
180 """v10 does not have a manifest header chunk"""
181 return {}
181 return {}
182
182
183 def filelogheader(self):
183 def filelogheader(self):
184 """return the header of the filelogs chunk, v10 only has the filename"""
184 """return the header of the filelogs chunk, v10 only has the filename"""
185 l = self._chunklength()
185 l = self._chunklength()
186 if not l:
186 if not l:
187 return {}
187 return {}
188 fname = readexactly(self._stream, l)
188 fname = readexactly(self._stream, l)
189 return {'filename': fname}
189 return {'filename': fname}
190
190
191 def _deltaheader(self, headertuple, prevnode):
191 def _deltaheader(self, headertuple, prevnode):
192 node, p1, p2, cs = headertuple
192 node, p1, p2, cs = headertuple
193 if prevnode is None:
193 if prevnode is None:
194 deltabase = p1
194 deltabase = p1
195 else:
195 else:
196 deltabase = prevnode
196 deltabase = prevnode
197 flags = 0
197 flags = 0
198 return node, p1, p2, deltabase, cs, flags
198 return node, p1, p2, deltabase, cs, flags
199
199
200 def deltachunk(self, prevnode):
200 def deltachunk(self, prevnode):
201 l = self._chunklength()
201 l = self._chunklength()
202 if not l:
202 if not l:
203 return {}
203 return {}
204 headerdata = readexactly(self._stream, self.deltaheadersize)
204 headerdata = readexactly(self._stream, self.deltaheadersize)
205 header = struct.unpack(self.deltaheader, headerdata)
205 header = struct.unpack(self.deltaheader, headerdata)
206 delta = readexactly(self._stream, l - self.deltaheadersize)
206 delta = readexactly(self._stream, l - self.deltaheadersize)
207 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
207 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
208 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
208 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
209 'deltabase': deltabase, 'delta': delta, 'flags': flags}
209 'deltabase': deltabase, 'delta': delta, 'flags': flags}
210
210
211 def getchunks(self):
211 def getchunks(self):
212 """returns all the chunks contains in the bundle
212 """returns all the chunks contains in the bundle
213
213
214 Used when you need to forward the binary stream to a file or another
214 Used when you need to forward the binary stream to a file or another
215 network API. To do so, it parse the changegroup data, otherwise it will
215 network API. To do so, it parse the changegroup data, otherwise it will
216 block in case of sshrepo because it don't know the end of the stream.
216 block in case of sshrepo because it don't know the end of the stream.
217 """
217 """
218 # an empty chunkgroup is the end of the changegroup
218 # an empty chunkgroup is the end of the changegroup
219 # a changegroup has at least 2 chunkgroups (changelog and manifest).
219 # a changegroup has at least 2 chunkgroups (changelog and manifest).
220 # after that, changegroup versions 1 and 2 have a series of groups
220 # after that, changegroup versions 1 and 2 have a series of groups
221 # with one group per file. changegroup 3 has a series of directory
221 # with one group per file. changegroup 3 has a series of directory
222 # manifests before the files.
222 # manifests before the files.
223 count = 0
223 count = 0
224 emptycount = 0
224 emptycount = 0
225 while emptycount < self._grouplistcount:
225 while emptycount < self._grouplistcount:
226 empty = True
226 empty = True
227 count += 1
227 count += 1
228 while True:
228 while True:
229 chunk = getchunk(self)
229 chunk = getchunk(self)
230 if not chunk:
230 if not chunk:
231 if empty and count > 2:
231 if empty and count > 2:
232 emptycount += 1
232 emptycount += 1
233 break
233 break
234 empty = False
234 empty = False
235 yield chunkheader(len(chunk))
235 yield chunkheader(len(chunk))
236 pos = 0
236 pos = 0
237 while pos < len(chunk):
237 while pos < len(chunk):
238 next = pos + 2**20
238 next = pos + 2**20
239 yield chunk[pos:next]
239 yield chunk[pos:next]
240 pos = next
240 pos = next
241 yield closechunk()
241 yield closechunk()
242
242
243 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
243 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
244 # We know that we'll never have more manifests than we had
244 # We know that we'll never have more manifests than we had
245 # changesets.
245 # changesets.
246 self.callback = prog(_('manifests'), numchanges)
246 self.callback = prog(_('manifests'), numchanges)
247 # no need to check for empty manifest group here:
247 # no need to check for empty manifest group here:
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
248 # if the result of the merge of 1 and 2 is the same in 3 and 4,
249 # no new manifest will be created and the manifest group will
249 # no new manifest will be created and the manifest group will
250 # be empty during the pull
250 # be empty during the pull
251 self.manifestheader()
251 self.manifestheader()
252 repo.manifest.addgroup(self, revmap, trp)
252 repo.manifest.addgroup(self, revmap, trp)
253 repo.ui.progress(_('manifests'), None)
253 repo.ui.progress(_('manifests'), None)
254 self.callback = None
254 self.callback = None
255
255
256 def apply(self, repo, srctype, url, emptyok=False,
256 def apply(self, repo, srctype, url, emptyok=False,
257 targetphase=phases.draft, expectedtotal=None):
257 targetphase=phases.draft, expectedtotal=None):
258 """Add the changegroup returned by source.read() to this repo.
258 """Add the changegroup returned by source.read() to this repo.
259 srctype is a string like 'push', 'pull', or 'unbundle'. url is
259 srctype is a string like 'push', 'pull', or 'unbundle'. url is
260 the URL of the repo where this changegroup is coming from.
260 the URL of the repo where this changegroup is coming from.
261
261
262 Return an integer summarizing the change to this repo:
262 Return an integer summarizing the change to this repo:
263 - nothing changed or no source: 0
263 - nothing changed or no source: 0
264 - more heads than before: 1+added heads (2..n)
264 - more heads than before: 1+added heads (2..n)
265 - fewer heads than before: -1-removed heads (-2..-n)
265 - fewer heads than before: -1-removed heads (-2..-n)
266 - number of heads stays the same: 1
266 - number of heads stays the same: 1
267 """
267 """
268 repo = repo.unfiltered()
268 repo = repo.unfiltered()
269 def csmap(x):
269 def csmap(x):
270 repo.ui.debug("add changeset %s\n" % short(x))
270 repo.ui.debug("add changeset %s\n" % short(x))
271 return len(cl)
271 return len(cl)
272
272
273 def revmap(x):
273 def revmap(x):
274 return cl.rev(x)
274 return cl.rev(x)
275
275
276 changesets = files = revisions = 0
276 changesets = files = revisions = 0
277
277
278 try:
278 try:
279 with repo.transaction("\n".join([srctype,
279 with repo.transaction("\n".join([srctype,
280 util.hidepassword(url)])) as tr:
280 util.hidepassword(url)])) as tr:
281 # The transaction could have been created before and already
281 # The transaction could have been created before and already
282 # carries source information. In this case we use the top
282 # carries source information. In this case we use the top
283 # level data. We overwrite the argument because we need to use
283 # level data. We overwrite the argument because we need to use
284 # the top level value (if they exist) in this function.
284 # the top level value (if they exist) in this function.
285 srctype = tr.hookargs.setdefault('source', srctype)
285 srctype = tr.hookargs.setdefault('source', srctype)
286 url = tr.hookargs.setdefault('url', url)
286 url = tr.hookargs.setdefault('url', url)
287 repo.hook('prechangegroup', throw=True, **tr.hookargs)
287 repo.hook('prechangegroup', throw=True, **tr.hookargs)
288
288
289 # write changelog data to temp files so concurrent readers
289 # write changelog data to temp files so concurrent readers
290 # will not see an inconsistent view
290 # will not see an inconsistent view
291 cl = repo.changelog
291 cl = repo.changelog
292 cl.delayupdate(tr)
292 cl.delayupdate(tr)
293 oldheads = cl.heads()
293 oldheads = cl.heads()
294
294
295 trp = weakref.proxy(tr)
295 trp = weakref.proxy(tr)
296 # pull off the changeset group
296 # pull off the changeset group
297 repo.ui.status(_("adding changesets\n"))
297 repo.ui.status(_("adding changesets\n"))
298 clstart = len(cl)
298 clstart = len(cl)
299 class prog(object):
299 class prog(object):
300 def __init__(self, step, total):
300 def __init__(self, step, total):
301 self._step = step
301 self._step = step
302 self._total = total
302 self._total = total
303 self._count = 1
303 self._count = 1
304 def __call__(self):
304 def __call__(self):
305 repo.ui.progress(self._step, self._count,
305 repo.ui.progress(self._step, self._count,
306 unit=_('chunks'), total=self._total)
306 unit=_('chunks'), total=self._total)
307 self._count += 1
307 self._count += 1
308 self.callback = prog(_('changesets'), expectedtotal)
308 self.callback = prog(_('changesets'), expectedtotal)
309
309
310 efiles = set()
310 efiles = set()
311 def onchangelog(cl, node):
311 def onchangelog(cl, node):
312 efiles.update(cl.readfiles(node))
312 efiles.update(cl.readfiles(node))
313
313
314 self.changelogheader()
314 self.changelogheader()
315 srccontent = cl.addgroup(self, csmap, trp,
315 srccontent = cl.addgroup(self, csmap, trp,
316 addrevisioncb=onchangelog)
316 addrevisioncb=onchangelog)
317 efiles = len(efiles)
317 efiles = len(efiles)
318
318
319 if not (srccontent or emptyok):
319 if not (srccontent or emptyok):
320 raise error.Abort(_("received changelog group is empty"))
320 raise error.Abort(_("received changelog group is empty"))
321 clend = len(cl)
321 clend = len(cl)
322 changesets = clend - clstart
322 changesets = clend - clstart
323 repo.ui.progress(_('changesets'), None)
323 repo.ui.progress(_('changesets'), None)
324 self.callback = None
324 self.callback = None
325
325
326 # pull off the manifest group
326 # pull off the manifest group
327 repo.ui.status(_("adding manifests\n"))
327 repo.ui.status(_("adding manifests\n"))
328 self._unpackmanifests(repo, revmap, trp, prog, changesets)
328 self._unpackmanifests(repo, revmap, trp, prog, changesets)
329
329
330 needfiles = {}
330 needfiles = {}
331 if repo.ui.configbool('server', 'validate', default=False):
331 if repo.ui.configbool('server', 'validate', default=False):
332 # validate incoming csets have their manifests
332 # validate incoming csets have their manifests
333 for cset in xrange(clstart, clend):
333 for cset in xrange(clstart, clend):
334 mfnode = repo.changelog.read(
334 mfnode = repo.changelog.read(
335 repo.changelog.node(cset))[0]
335 repo.changelog.node(cset))[0]
336 mfest = repo.manifest.readdelta(mfnode)
336 mfest = repo.manifest.readdelta(mfnode)
337 # store file nodes we must see
337 # store file nodes we must see
338 for f, n in mfest.iteritems():
338 for f, n in mfest.iteritems():
339 needfiles.setdefault(f, set()).add(n)
339 needfiles.setdefault(f, set()).add(n)
340
340
341 # process the files
341 # process the files
342 repo.ui.status(_("adding file changes\n"))
342 repo.ui.status(_("adding file changes\n"))
343 newrevs, newfiles = _addchangegroupfiles(
343 newrevs, newfiles = _addchangegroupfiles(
344 repo, self, revmap, trp, efiles, needfiles)
344 repo, self, revmap, trp, efiles, needfiles)
345 revisions += newrevs
345 revisions += newrevs
346 files += newfiles
346 files += newfiles
347
347
348 dh = 0
348 dh = 0
349 if oldheads:
349 if oldheads:
350 heads = cl.heads()
350 heads = cl.heads()
351 dh = len(heads) - len(oldheads)
351 dh = len(heads) - len(oldheads)
352 for h in heads:
352 for h in heads:
353 if h not in oldheads and repo[h].closesbranch():
353 if h not in oldheads and repo[h].closesbranch():
354 dh -= 1
354 dh -= 1
355 htext = ""
355 htext = ""
356 if dh:
356 if dh:
357 htext = _(" (%+d heads)") % dh
357 htext = _(" (%+d heads)") % dh
358
358
359 repo.ui.status(_("added %d changesets"
359 repo.ui.status(_("added %d changesets"
360 " with %d changes to %d files%s\n")
360 " with %d changes to %d files%s\n")
361 % (changesets, revisions, files, htext))
361 % (changesets, revisions, files, htext))
362 repo.invalidatevolatilesets()
362 repo.invalidatevolatilesets()
363
363
364 if changesets > 0:
364 if changesets > 0:
365 if 'node' not in tr.hookargs:
365 if 'node' not in tr.hookargs:
366 tr.hookargs['node'] = hex(cl.node(clstart))
366 tr.hookargs['node'] = hex(cl.node(clstart))
367 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
367 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
368 hookargs = dict(tr.hookargs)
368 hookargs = dict(tr.hookargs)
369 else:
369 else:
370 hookargs = dict(tr.hookargs)
370 hookargs = dict(tr.hookargs)
371 hookargs['node'] = hex(cl.node(clstart))
371 hookargs['node'] = hex(cl.node(clstart))
372 hookargs['node_last'] = hex(cl.node(clend - 1))
372 hookargs['node_last'] = hex(cl.node(clend - 1))
373 repo.hook('pretxnchangegroup', throw=True, **hookargs)
373 repo.hook('pretxnchangegroup', throw=True, **hookargs)
374
374
375 added = [cl.node(r) for r in xrange(clstart, clend)]
375 added = [cl.node(r) for r in xrange(clstart, clend)]
376 publishing = repo.publishing()
376 publishing = repo.publishing()
377 if srctype in ('push', 'serve'):
377 if srctype in ('push', 'serve'):
378 # Old servers can not push the boundary themselves.
378 # Old servers can not push the boundary themselves.
379 # New servers won't push the boundary if changeset already
379 # New servers won't push the boundary if changeset already
380 # exists locally as secret
380 # exists locally as secret
381 #
381 #
382 # We should not use added here but the list of all change in
382 # We should not use added here but the list of all change in
383 # the bundle
383 # the bundle
384 if publishing:
384 if publishing:
385 phases.advanceboundary(repo, tr, phases.public,
385 phases.advanceboundary(repo, tr, phases.public,
386 srccontent)
386 srccontent)
387 else:
387 else:
388 # Those changesets have been pushed from the
388 # Those changesets have been pushed from the
389 # outside, their phases are going to be pushed
389 # outside, their phases are going to be pushed
390 # alongside. Therefor `targetphase` is
390 # alongside. Therefor `targetphase` is
391 # ignored.
391 # ignored.
392 phases.advanceboundary(repo, tr, phases.draft,
392 phases.advanceboundary(repo, tr, phases.draft,
393 srccontent)
393 srccontent)
394 phases.retractboundary(repo, tr, phases.draft, added)
394 phases.retractboundary(repo, tr, phases.draft, added)
395 elif srctype != 'strip':
395 elif srctype != 'strip':
396 # publishing only alter behavior during push
396 # publishing only alter behavior during push
397 #
397 #
398 # strip should not touch boundary at all
398 # strip should not touch boundary at all
399 phases.retractboundary(repo, tr, targetphase, added)
399 phases.retractboundary(repo, tr, targetphase, added)
400
400
401 if changesets > 0:
401 if changesets > 0:
402 if srctype != 'strip':
402 if srctype != 'strip':
403 # During strip, branchcache is invalid but
403 # During strip, branchcache is invalid but
404 # coming call to `destroyed` will repair it.
404 # coming call to `destroyed` will repair it.
405 # In other case we can safely update cache on
405 # In other case we can safely update cache on
406 # disk.
406 # disk.
407 branchmap.updatecache(repo.filtered('served'))
407 branchmap.updatecache(repo.filtered('served'))
408
408
409 def runhooks():
409 def runhooks():
410 # These hooks run when the lock releases, not when the
410 # These hooks run when the lock releases, not when the
411 # transaction closes. So it's possible for the changelog
411 # transaction closes. So it's possible for the changelog
412 # to have changed since we last saw it.
412 # to have changed since we last saw it.
413 if clstart >= len(repo):
413 if clstart >= len(repo):
414 return
414 return
415
415
416 # forcefully update the on-disk branch cache
416 # forcefully update the on-disk branch cache
417 repo.ui.debug("updating the branch cache\n")
417 repo.ui.debug("updating the branch cache\n")
418 repo.hook("changegroup", **hookargs)
418 repo.hook("changegroup", **hookargs)
419
419
420 for n in added:
420 for n in added:
421 args = hookargs.copy()
421 args = hookargs.copy()
422 args['node'] = hex(n)
422 args['node'] = hex(n)
423 del args['node_last']
423 del args['node_last']
424 repo.hook("incoming", **args)
424 repo.hook("incoming", **args)
425
425
426 newheads = [h for h in repo.heads()
426 newheads = [h for h in repo.heads()
427 if h not in oldheads]
427 if h not in oldheads]
428 repo.ui.log("incoming",
428 repo.ui.log("incoming",
429 "%s incoming changes - new heads: %s\n",
429 "%s incoming changes - new heads: %s\n",
430 len(added),
430 len(added),
431 ', '.join([hex(c[:6]) for c in newheads]))
431 ', '.join([hex(c[:6]) for c in newheads]))
432
432
433 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
433 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
434 lambda tr: repo._afterlock(runhooks))
434 lambda tr: repo._afterlock(runhooks))
435 finally:
435 finally:
436 repo.ui.flush()
436 repo.ui.flush()
437 # never return 0 here:
437 # never return 0 here:
438 if dh < 0:
438 if dh < 0:
439 return dh - 1
439 return dh - 1
440 else:
440 else:
441 return dh + 1
441 return dh + 1
442
442
443 class cg2unpacker(cg1unpacker):
443 class cg2unpacker(cg1unpacker):
444 """Unpacker for cg2 streams.
444 """Unpacker for cg2 streams.
445
445
446 cg2 streams add support for generaldelta, so the delta header
446 cg2 streams add support for generaldelta, so the delta header
447 format is slightly different. All other features about the data
447 format is slightly different. All other features about the data
448 remain the same.
448 remain the same.
449 """
449 """
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451 deltaheadersize = struct.calcsize(deltaheader)
451 deltaheadersize = struct.calcsize(deltaheader)
452 version = '02'
452 version = '02'
453
453
454 def _deltaheader(self, headertuple, prevnode):
454 def _deltaheader(self, headertuple, prevnode):
455 node, p1, p2, deltabase, cs = headertuple
455 node, p1, p2, deltabase, cs = headertuple
456 flags = 0
456 flags = 0
457 return node, p1, p2, deltabase, cs, flags
457 return node, p1, p2, deltabase, cs, flags
458
458
459 class cg3unpacker(cg2unpacker):
459 class cg3unpacker(cg2unpacker):
460 """Unpacker for cg3 streams.
460 """Unpacker for cg3 streams.
461
461
462 cg3 streams add support for exchanging treemanifests and revlog
462 cg3 streams add support for exchanging treemanifests and revlog
463 flags. It adds the revlog flags to the delta header and an empty chunk
463 flags. It adds the revlog flags to the delta header and an empty chunk
464 separating manifests and files.
464 separating manifests and files.
465 """
465 """
466 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
466 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
467 deltaheadersize = struct.calcsize(deltaheader)
467 deltaheadersize = struct.calcsize(deltaheader)
468 version = '03'
468 version = '03'
469 _grouplistcount = 2 # One list of manifests and one list of files
469 _grouplistcount = 2 # One list of manifests and one list of files
470
470
471 def _deltaheader(self, headertuple, prevnode):
471 def _deltaheader(self, headertuple, prevnode):
472 node, p1, p2, deltabase, cs, flags = headertuple
472 node, p1, p2, deltabase, cs, flags = headertuple
473 return node, p1, p2, deltabase, cs, flags
473 return node, p1, p2, deltabase, cs, flags
474
474
475 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
475 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
476 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
476 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
477 numchanges)
477 numchanges)
478 while True:
478 while True:
479 chunkdata = self.filelogheader()
479 chunkdata = self.filelogheader()
480 if not chunkdata:
480 if not chunkdata:
481 break
481 break
482 # If we get here, there are directory manifests in the changegroup
482 # If we get here, there are directory manifests in the changegroup
483 d = chunkdata["filename"]
483 d = chunkdata["filename"]
484 repo.ui.debug("adding %s revisions\n" % d)
484 repo.ui.debug("adding %s revisions\n" % d)
485 dirlog = repo.manifest.dirlog(d)
485 dirlog = repo.manifest.dirlog(d)
486 if not dirlog.addgroup(self, revmap, trp):
486 if not dirlog.addgroup(self, revmap, trp):
487 raise error.Abort(_("received dir revlog group is empty"))
487 raise error.Abort(_("received dir revlog group is empty"))
488
488
489 class headerlessfixup(object):
489 class headerlessfixup(object):
490 def __init__(self, fh, h):
490 def __init__(self, fh, h):
491 self._h = h
491 self._h = h
492 self._fh = fh
492 self._fh = fh
493 def read(self, n):
493 def read(self, n):
494 if self._h:
494 if self._h:
495 d, self._h = self._h[:n], self._h[n:]
495 d, self._h = self._h[:n], self._h[n:]
496 if len(d) < n:
496 if len(d) < n:
497 d += readexactly(self._fh, n - len(d))
497 d += readexactly(self._fh, n - len(d))
498 return d
498 return d
499 return readexactly(self._fh, n)
499 return readexactly(self._fh, n)
500
500
501 class cg1packer(object):
501 class cg1packer(object):
502 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
502 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
503 version = '01'
503 version = '01'
504 def __init__(self, repo, bundlecaps=None):
504 def __init__(self, repo, bundlecaps=None):
505 """Given a source repo, construct a bundler.
505 """Given a source repo, construct a bundler.
506
506
507 bundlecaps is optional and can be used to specify the set of
507 bundlecaps is optional and can be used to specify the set of
508 capabilities which can be used to build the bundle.
508 capabilities which can be used to build the bundle.
509 """
509 """
510 # Set of capabilities we can use to build the bundle.
510 # Set of capabilities we can use to build the bundle.
511 if bundlecaps is None:
511 if bundlecaps is None:
512 bundlecaps = set()
512 bundlecaps = set()
513 self._bundlecaps = bundlecaps
513 self._bundlecaps = bundlecaps
514 # experimental config: bundle.reorder
514 # experimental config: bundle.reorder
515 reorder = repo.ui.config('bundle', 'reorder', 'auto')
515 reorder = repo.ui.config('bundle', 'reorder', 'auto')
516 if reorder == 'auto':
516 if reorder == 'auto':
517 reorder = None
517 reorder = None
518 else:
518 else:
519 reorder = util.parsebool(reorder)
519 reorder = util.parsebool(reorder)
520 self._repo = repo
520 self._repo = repo
521 self._reorder = reorder
521 self._reorder = reorder
522 self._progress = repo.ui.progress
522 self._progress = repo.ui.progress
523 if self._repo.ui.verbose and not self._repo.ui.debugflag:
523 if self._repo.ui.verbose and not self._repo.ui.debugflag:
524 self._verbosenote = self._repo.ui.note
524 self._verbosenote = self._repo.ui.note
525 else:
525 else:
526 self._verbosenote = lambda s: None
526 self._verbosenote = lambda s: None
527
527
528 def close(self):
528 def close(self):
529 return closechunk()
529 return closechunk()
530
530
531 def fileheader(self, fname):
531 def fileheader(self, fname):
532 return chunkheader(len(fname)) + fname
532 return chunkheader(len(fname)) + fname
533
533
534 # Extracted both for clarity and for overriding in extensions.
534 # Extracted both for clarity and for overriding in extensions.
535 def _sortgroup(self, revlog, nodelist, lookup):
535 def _sortgroup(self, revlog, nodelist, lookup):
536 """Sort nodes for change group and turn them into revnums."""
536 """Sort nodes for change group and turn them into revnums."""
537 # for generaldelta revlogs, we linearize the revs; this will both be
537 # for generaldelta revlogs, we linearize the revs; this will both be
538 # much quicker and generate a much smaller bundle
538 # much quicker and generate a much smaller bundle
539 if (revlog._generaldelta and self._reorder is None) or self._reorder:
539 if (revlog._generaldelta and self._reorder is None) or self._reorder:
540 dag = dagutil.revlogdag(revlog)
540 dag = dagutil.revlogdag(revlog)
541 return dag.linearize(set(revlog.rev(n) for n in nodelist))
541 return dag.linearize(set(revlog.rev(n) for n in nodelist))
542 else:
542 else:
543 return sorted([revlog.rev(n) for n in nodelist])
543 return sorted([revlog.rev(n) for n in nodelist])
544
544
545 def group(self, nodelist, revlog, lookup, units=None):
545 def group(self, nodelist, revlog, lookup, units=None):
546 """Calculate a delta group, yielding a sequence of changegroup chunks
546 """Calculate a delta group, yielding a sequence of changegroup chunks
547 (strings).
547 (strings).
548
548
549 Given a list of changeset revs, return a set of deltas and
549 Given a list of changeset revs, return a set of deltas and
550 metadata corresponding to nodes. The first delta is
550 metadata corresponding to nodes. The first delta is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
551 first parent(nodelist[0]) -> nodelist[0], the receiver is
552 guaranteed to have this parent as it has all history before
552 guaranteed to have this parent as it has all history before
553 these changesets. In the case firstparent is nullrev the
553 these changesets. In the case firstparent is nullrev the
554 changegroup starts with a full revision.
554 changegroup starts with a full revision.
555
555
556 If units is not None, progress detail will be generated, units specifies
556 If units is not None, progress detail will be generated, units specifies
557 the type of revlog that is touched (changelog, manifest, etc.).
557 the type of revlog that is touched (changelog, manifest, etc.).
558 """
558 """
559 # if we don't have any revisions touched by these changesets, bail
559 # if we don't have any revisions touched by these changesets, bail
560 if len(nodelist) == 0:
560 if len(nodelist) == 0:
561 yield self.close()
561 yield self.close()
562 return
562 return
563
563
564 revs = self._sortgroup(revlog, nodelist, lookup)
564 revs = self._sortgroup(revlog, nodelist, lookup)
565
565
566 # add the parent of the first rev
566 # add the parent of the first rev
567 p = revlog.parentrevs(revs[0])[0]
567 p = revlog.parentrevs(revs[0])[0]
568 revs.insert(0, p)
568 revs.insert(0, p)
569
569
570 # build deltas
570 # build deltas
571 total = len(revs) - 1
571 total = len(revs) - 1
572 msgbundling = _('bundling')
572 msgbundling = _('bundling')
573 for r in xrange(len(revs) - 1):
573 for r in xrange(len(revs) - 1):
574 if units is not None:
574 if units is not None:
575 self._progress(msgbundling, r + 1, unit=units, total=total)
575 self._progress(msgbundling, r + 1, unit=units, total=total)
576 prev, curr = revs[r], revs[r + 1]
576 prev, curr = revs[r], revs[r + 1]
577 linknode = lookup(revlog.node(curr))
577 linknode = lookup(revlog.node(curr))
578 for c in self.revchunk(revlog, curr, prev, linknode):
578 for c in self.revchunk(revlog, curr, prev, linknode):
579 yield c
579 yield c
580
580
581 if units is not None:
581 if units is not None:
582 self._progress(msgbundling, None)
582 self._progress(msgbundling, None)
583 yield self.close()
583 yield self.close()
584
584
585 # filter any nodes that claim to be part of the known set
585 # filter any nodes that claim to be part of the known set
586 def prune(self, revlog, missing, commonrevs):
586 def prune(self, revlog, missing, commonrevs):
587 rr, rl = revlog.rev, revlog.linkrev
587 rr, rl = revlog.rev, revlog.linkrev
588 return [n for n in missing if rl(rr(n)) not in commonrevs]
588 return [n for n in missing if rl(rr(n)) not in commonrevs]
589
589
590 def _packmanifests(self, dir, mfnodes, lookuplinknode):
590 def _packmanifests(self, dir, mfnodes, lookuplinknode):
591 """Pack flat manifests into a changegroup stream."""
591 """Pack flat manifests into a changegroup stream."""
592 assert not dir
592 assert not dir
593 for chunk in self.group(mfnodes, self._repo.manifest,
593 for chunk in self.group(mfnodes, self._repo.manifest,
594 lookuplinknode, units=_('manifests')):
594 lookuplinknode, units=_('manifests')):
595 yield chunk
595 yield chunk
596
596
597 def _manifestsdone(self):
597 def _manifestsdone(self):
598 return ''
598 return ''
599
599
600 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
600 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
601 '''yield a sequence of changegroup chunks (strings)'''
601 '''yield a sequence of changegroup chunks (strings)'''
602 repo = self._repo
602 repo = self._repo
603 cl = repo.changelog
603 cl = repo.changelog
604
604
605 clrevorder = {}
605 clrevorder = {}
606 mfs = {} # needed manifests
606 mfs = {} # needed manifests
607 fnodes = {} # needed file nodes
607 fnodes = {} # needed file nodes
608 changedfiles = set()
608 changedfiles = set()
609
609
610 # Callback for the changelog, used to collect changed files and manifest
610 # Callback for the changelog, used to collect changed files and manifest
611 # nodes.
611 # nodes.
612 # Returns the linkrev node (identity in the changelog case).
612 # Returns the linkrev node (identity in the changelog case).
613 def lookupcl(x):
613 def lookupcl(x):
614 c = cl.read(x)
614 c = cl.read(x)
615 clrevorder[x] = len(clrevorder)
615 clrevorder[x] = len(clrevorder)
616 n = c[0]
616 n = c[0]
617 # record the first changeset introducing this manifest version
617 # record the first changeset introducing this manifest version
618 mfs.setdefault(n, x)
618 mfs.setdefault(n, x)
619 # Record a complete list of potentially-changed files in
619 # Record a complete list of potentially-changed files in
620 # this manifest.
620 # this manifest.
621 changedfiles.update(c[3])
621 changedfiles.update(c[3])
622 return x
622 return x
623
623
624 self._verbosenote(_('uncompressed size of bundle content:\n'))
624 self._verbosenote(_('uncompressed size of bundle content:\n'))
625 size = 0
625 size = 0
626 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
626 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
627 size += len(chunk)
627 size += len(chunk)
628 yield chunk
628 yield chunk
629 self._verbosenote(_('%8.i (changelog)\n') % size)
629 self._verbosenote(_('%8.i (changelog)\n') % size)
630
630
631 # We need to make sure that the linkrev in the changegroup refers to
631 # We need to make sure that the linkrev in the changegroup refers to
632 # the first changeset that introduced the manifest or file revision.
632 # the first changeset that introduced the manifest or file revision.
633 # The fastpath is usually safer than the slowpath, because the filelogs
633 # The fastpath is usually safer than the slowpath, because the filelogs
634 # are walked in revlog order.
634 # are walked in revlog order.
635 #
635 #
636 # When taking the slowpath with reorder=None and the manifest revlog
636 # When taking the slowpath with reorder=None and the manifest revlog
637 # uses generaldelta, the manifest may be walked in the "wrong" order.
637 # uses generaldelta, the manifest may be walked in the "wrong" order.
638 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
638 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
639 # cc0ff93d0c0c).
639 # cc0ff93d0c0c).
640 #
640 #
641 # When taking the fastpath, we are only vulnerable to reordering
641 # When taking the fastpath, we are only vulnerable to reordering
642 # of the changelog itself. The changelog never uses generaldelta, so
642 # of the changelog itself. The changelog never uses generaldelta, so
643 # it is only reordered when reorder=True. To handle this case, we
643 # it is only reordered when reorder=True. To handle this case, we
644 # simply take the slowpath, which already has the 'clrevorder' logic.
644 # simply take the slowpath, which already has the 'clrevorder' logic.
645 # This was also fixed in cc0ff93d0c0c.
645 # This was also fixed in cc0ff93d0c0c.
646 fastpathlinkrev = fastpathlinkrev and not self._reorder
646 fastpathlinkrev = fastpathlinkrev and not self._reorder
647 # Treemanifests don't work correctly with fastpathlinkrev
647 # Treemanifests don't work correctly with fastpathlinkrev
648 # either, because we don't discover which directory nodes to
648 # either, because we don't discover which directory nodes to
649 # send along with files. This could probably be fixed.
649 # send along with files. This could probably be fixed.
650 fastpathlinkrev = fastpathlinkrev and (
650 fastpathlinkrev = fastpathlinkrev and (
651 'treemanifest' not in repo.requirements)
651 'treemanifest' not in repo.requirements)
652
652
653 for chunk in self.generatemanifests(commonrevs, clrevorder,
653 for chunk in self.generatemanifests(commonrevs, clrevorder,
654 fastpathlinkrev, mfs, fnodes):
654 fastpathlinkrev, mfs, fnodes):
655 yield chunk
655 yield chunk
656 mfs.clear()
656 mfs.clear()
657 clrevs = set(cl.rev(x) for x in clnodes)
657 clrevs = set(cl.rev(x) for x in clnodes)
658
658
659 if not fastpathlinkrev:
659 if not fastpathlinkrev:
660 def linknodes(unused, fname):
660 def linknodes(unused, fname):
661 return fnodes.get(fname, {})
661 return fnodes.get(fname, {})
662 else:
662 else:
663 cln = cl.node
663 cln = cl.node
664 def linknodes(filerevlog, fname):
664 def linknodes(filerevlog, fname):
665 llr = filerevlog.linkrev
665 llr = filerevlog.linkrev
666 fln = filerevlog.node
666 fln = filerevlog.node
667 revs = ((r, llr(r)) for r in filerevlog)
667 revs = ((r, llr(r)) for r in filerevlog)
668 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
668 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
669
669
670 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
670 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
671 source):
671 source):
672 yield chunk
672 yield chunk
673
673
674 yield self.close()
674 yield self.close()
675
675
676 if clnodes:
676 if clnodes:
677 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
677 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
678
678
679 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
679 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
680 fnodes):
680 fnodes):
681 repo = self._repo
681 repo = self._repo
682 dirlog = repo.manifest.dirlog
682 dirlog = repo.manifest.dirlog
683 tmfnodes = {'': mfs}
683 tmfnodes = {'': mfs}
684
684
685 # Callback for the manifest, used to collect linkrevs for filelog
685 # Callback for the manifest, used to collect linkrevs for filelog
686 # revisions.
686 # revisions.
687 # Returns the linkrev node (collected in lookupcl).
687 # Returns the linkrev node (collected in lookupcl).
688 def makelookupmflinknode(dir):
688 def makelookupmflinknode(dir):
689 if fastpathlinkrev:
689 if fastpathlinkrev:
690 assert not dir
690 assert not dir
691 return mfs.__getitem__
691 return mfs.__getitem__
692
692
693 def lookupmflinknode(x):
693 def lookupmflinknode(x):
694 """Callback for looking up the linknode for manifests.
694 """Callback for looking up the linknode for manifests.
695
695
696 Returns the linkrev node for the specified manifest.
696 Returns the linkrev node for the specified manifest.
697
697
698 SIDE EFFECT:
698 SIDE EFFECT:
699
699
700 1) fclnodes gets populated with the list of relevant
700 1) fclnodes gets populated with the list of relevant
701 file nodes if we're not using fastpathlinkrev
701 file nodes if we're not using fastpathlinkrev
702 2) When treemanifests are in use, collects treemanifest nodes
702 2) When treemanifests are in use, collects treemanifest nodes
703 to send
703 to send
704
704
705 Note that this means manifests must be completely sent to
705 Note that this means manifests must be completely sent to
706 the client before you can trust the list of files and
706 the client before you can trust the list of files and
707 treemanifests to send.
707 treemanifests to send.
708 """
708 """
709 clnode = tmfnodes[dir][x]
709 clnode = tmfnodes[dir][x]
710 mdata = dirlog(dir).readshallowfast(x)
710 mdata = dirlog(dir).readshallowfast(x)
711 for p, n, fl in mdata.iterentries():
711 for p, n, fl in mdata.iterentries():
712 if fl == 't': # subdirectory manifest
712 if fl == 't': # subdirectory manifest
713 subdir = dir + p + '/'
713 subdir = dir + p + '/'
714 tmfclnodes = tmfnodes.setdefault(subdir, {})
714 tmfclnodes = tmfnodes.setdefault(subdir, {})
715 tmfclnode = tmfclnodes.setdefault(n, clnode)
715 tmfclnode = tmfclnodes.setdefault(n, clnode)
716 if clrevorder[clnode] < clrevorder[tmfclnode]:
716 if clrevorder[clnode] < clrevorder[tmfclnode]:
717 tmfclnodes[n] = clnode
717 tmfclnodes[n] = clnode
718 else:
718 else:
719 f = dir + p
719 f = dir + p
720 fclnodes = fnodes.setdefault(f, {})
720 fclnodes = fnodes.setdefault(f, {})
721 fclnode = fclnodes.setdefault(n, clnode)
721 fclnode = fclnodes.setdefault(n, clnode)
722 if clrevorder[clnode] < clrevorder[fclnode]:
722 if clrevorder[clnode] < clrevorder[fclnode]:
723 fclnodes[n] = clnode
723 fclnodes[n] = clnode
724 return clnode
724 return clnode
725 return lookupmflinknode
725 return lookupmflinknode
726
726
727 size = 0
727 size = 0
728 while tmfnodes:
728 while tmfnodes:
729 dir = min(tmfnodes)
729 dir = min(tmfnodes)
730 nodes = tmfnodes[dir]
730 nodes = tmfnodes[dir]
731 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
731 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
732 if not dir or prunednodes:
732 if not dir or prunednodes:
733 for x in self._packmanifests(dir, prunednodes,
733 for x in self._packmanifests(dir, prunednodes,
734 makelookupmflinknode(dir)):
734 makelookupmflinknode(dir)):
735 size += len(x)
735 size += len(x)
736 yield x
736 yield x
737 del tmfnodes[dir]
737 del tmfnodes[dir]
738 self._verbosenote(_('%8.i (manifests)\n') % size)
738 self._verbosenote(_('%8.i (manifests)\n') % size)
739 yield self._manifestsdone()
739 yield self._manifestsdone()
740
740
741 # The 'source' parameter is useful for extensions
741 # The 'source' parameter is useful for extensions
742 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
742 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
743 repo = self._repo
743 repo = self._repo
744 progress = self._progress
744 progress = self._progress
745 msgbundling = _('bundling')
745 msgbundling = _('bundling')
746
746
747 total = len(changedfiles)
747 total = len(changedfiles)
748 # for progress output
748 # for progress output
749 msgfiles = _('files')
749 msgfiles = _('files')
750 for i, fname in enumerate(sorted(changedfiles)):
750 for i, fname in enumerate(sorted(changedfiles)):
751 filerevlog = repo.file(fname)
751 filerevlog = repo.file(fname)
752 if not filerevlog:
752 if not filerevlog:
753 raise error.Abort(_("empty or missing revlog for %s") % fname)
753 raise error.Abort(_("empty or missing revlog for %s") % fname)
754
754
755 linkrevnodes = linknodes(filerevlog, fname)
755 linkrevnodes = linknodes(filerevlog, fname)
756 # Lookup for filenodes, we collected the linkrev nodes above in the
756 # Lookup for filenodes, we collected the linkrev nodes above in the
757 # fastpath case and with lookupmf in the slowpath case.
757 # fastpath case and with lookupmf in the slowpath case.
758 def lookupfilelog(x):
758 def lookupfilelog(x):
759 return linkrevnodes[x]
759 return linkrevnodes[x]
760
760
761 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
761 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
762 if filenodes:
762 if filenodes:
763 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
763 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
764 total=total)
764 total=total)
765 h = self.fileheader(fname)
765 h = self.fileheader(fname)
766 size = len(h)
766 size = len(h)
767 yield h
767 yield h
768 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
768 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
769 size += len(chunk)
769 size += len(chunk)
770 yield chunk
770 yield chunk
771 self._verbosenote(_('%8.i %s\n') % (size, fname))
771 self._verbosenote(_('%8.i %s\n') % (size, fname))
772 progress(msgbundling, None)
772 progress(msgbundling, None)
773
773
774 def deltaparent(self, revlog, rev, p1, p2, prev):
774 def deltaparent(self, revlog, rev, p1, p2, prev):
775 return prev
775 return prev
776
776
777 def revchunk(self, revlog, rev, prev, linknode):
777 def revchunk(self, revlog, rev, prev, linknode):
778 node = revlog.node(rev)
778 node = revlog.node(rev)
779 p1, p2 = revlog.parentrevs(rev)
779 p1, p2 = revlog.parentrevs(rev)
780 base = self.deltaparent(revlog, rev, p1, p2, prev)
780 base = self.deltaparent(revlog, rev, p1, p2, prev)
781
781
782 prefix = ''
782 prefix = ''
783 if revlog.iscensored(base) or revlog.iscensored(rev):
783 if revlog.iscensored(base) or revlog.iscensored(rev):
784 try:
784 try:
785 delta = revlog.revision(node)
785 delta = revlog.revision(node)
786 except error.CensoredNodeError as e:
786 except error.CensoredNodeError as e:
787 delta = e.tombstone
787 delta = e.tombstone
788 if base == nullrev:
788 if base == nullrev:
789 prefix = mdiff.trivialdiffheader(len(delta))
789 prefix = mdiff.trivialdiffheader(len(delta))
790 else:
790 else:
791 baselen = revlog.rawsize(base)
791 baselen = revlog.rawsize(base)
792 prefix = mdiff.replacediffheader(baselen, len(delta))
792 prefix = mdiff.replacediffheader(baselen, len(delta))
793 elif base == nullrev:
793 elif base == nullrev:
794 delta = revlog.revision(node)
794 delta = revlog.revision(node)
795 prefix = mdiff.trivialdiffheader(len(delta))
795 prefix = mdiff.trivialdiffheader(len(delta))
796 else:
796 else:
797 delta = revlog.revdiff(base, rev)
797 delta = revlog.revdiff(base, rev)
798 p1n, p2n = revlog.parents(node)
798 p1n, p2n = revlog.parents(node)
799 basenode = revlog.node(base)
799 basenode = revlog.node(base)
800 flags = revlog.flags(rev)
800 flags = revlog.flags(rev)
801 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
801 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
802 meta += prefix
802 meta += prefix
803 l = len(meta) + len(delta)
803 l = len(meta) + len(delta)
804 yield chunkheader(l)
804 yield chunkheader(l)
805 yield meta
805 yield meta
806 yield delta
806 yield delta
807 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
807 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
808 # do nothing with basenode, it is implicitly the previous one in HG10
808 # do nothing with basenode, it is implicitly the previous one in HG10
809 # do nothing with flags, it is implicitly 0 for cg1 and cg2
809 # do nothing with flags, it is implicitly 0 for cg1 and cg2
810 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
810 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
811
811
812 class cg2packer(cg1packer):
812 class cg2packer(cg1packer):
813 version = '02'
813 version = '02'
814 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
814 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
815
815
816 def __init__(self, repo, bundlecaps=None):
816 def __init__(self, repo, bundlecaps=None):
817 super(cg2packer, self).__init__(repo, bundlecaps)
817 super(cg2packer, self).__init__(repo, bundlecaps)
818 if self._reorder is None:
818 if self._reorder is None:
819 # Since generaldelta is directly supported by cg2, reordering
819 # Since generaldelta is directly supported by cg2, reordering
820 # generally doesn't help, so we disable it by default (treating
820 # generally doesn't help, so we disable it by default (treating
821 # bundle.reorder=auto just like bundle.reorder=False).
821 # bundle.reorder=auto just like bundle.reorder=False).
822 self._reorder = False
822 self._reorder = False
823
823
824 def deltaparent(self, revlog, rev, p1, p2, prev):
824 def deltaparent(self, revlog, rev, p1, p2, prev):
825 dp = revlog.deltaparent(rev)
825 dp = revlog.deltaparent(rev)
826 # avoid storing full revisions; pick prev in those cases
826 # avoid storing full revisions; pick prev in those cases
827 # also pick prev when we can't be sure remote has dp
827 # also pick prev when we can't be sure remote has dp
828 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
828 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
829 return prev
829 return prev
830 return dp
830 return dp
831
831
832 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
832 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
833 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
833 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
834 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
834 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
835
835
836 class cg3packer(cg2packer):
836 class cg3packer(cg2packer):
837 version = '03'
837 version = '03'
838 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
838 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
839
839
840 def _packmanifests(self, dir, mfnodes, lookuplinknode):
840 def _packmanifests(self, dir, mfnodes, lookuplinknode):
841 if dir:
841 if dir:
842 yield self.fileheader(dir)
842 yield self.fileheader(dir)
843 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
843 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
844 lookuplinknode, units=_('manifests')):
844 lookuplinknode, units=_('manifests')):
845 yield chunk
845 yield chunk
846
846
847 def _manifestsdone(self):
847 def _manifestsdone(self):
848 return self.close()
848 return self.close()
849
849
850 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
850 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
851 return struct.pack(
851 return struct.pack(
852 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
852 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
853
853
854 _packermap = {'01': (cg1packer, cg1unpacker),
854 _packermap = {'01': (cg1packer, cg1unpacker),
855 # cg2 adds support for exchanging generaldelta
855 # cg2 adds support for exchanging generaldelta
856 '02': (cg2packer, cg2unpacker),
856 '02': (cg2packer, cg2unpacker),
857 # cg3 adds support for exchanging revlog flags and treemanifests
857 # cg3 adds support for exchanging revlog flags and treemanifests
858 '03': (cg3packer, cg3unpacker),
858 '03': (cg3packer, cg3unpacker),
859 }
859 }
860
860
861 def allsupportedversions(ui):
861 def allsupportedversions(ui):
862 versions = set(_packermap.keys())
862 versions = set(_packermap.keys())
863 versions.discard('03')
863 versions.discard('03')
864 if (ui.configbool('experimental', 'changegroup3') or
864 if (ui.configbool('experimental', 'changegroup3') or
865 ui.configbool('experimental', 'treemanifest')):
865 ui.configbool('experimental', 'treemanifest')):
866 versions.add('03')
866 versions.add('03')
867 return versions
867 return versions
868
868
869 # Changegroup versions that can be applied to the repo
869 # Changegroup versions that can be applied to the repo
870 def supportedincomingversions(repo):
870 def supportedincomingversions(repo):
871 versions = allsupportedversions(repo.ui)
871 versions = allsupportedversions(repo.ui)
872 if 'treemanifest' in repo.requirements:
872 if 'treemanifest' in repo.requirements:
873 versions.add('03')
873 versions.add('03')
874 return versions
874 return versions
875
875
876 # Changegroup versions that can be created from the repo
876 # Changegroup versions that can be created from the repo
877 def supportedoutgoingversions(repo):
877 def supportedoutgoingversions(repo):
878 versions = allsupportedversions(repo.ui)
878 versions = allsupportedversions(repo.ui)
879 if 'treemanifest' in repo.requirements:
879 if 'treemanifest' in repo.requirements:
880 # Versions 01 and 02 support only flat manifests and it's just too
880 # Versions 01 and 02 support only flat manifests and it's just too
881 # expensive to convert between the flat manifest and tree manifest on
881 # expensive to convert between the flat manifest and tree manifest on
882 # the fly. Since tree manifests are hashed differently, all of history
882 # the fly. Since tree manifests are hashed differently, all of history
883 # would have to be converted. Instead, we simply don't even pretend to
883 # would have to be converted. Instead, we simply don't even pretend to
884 # support versions 01 and 02.
884 # support versions 01 and 02.
885 versions.discard('01')
885 versions.discard('01')
886 versions.discard('02')
886 versions.discard('02')
887 versions.add('03')
887 versions.add('03')
888 return versions
888 return versions
889
889
890 def safeversion(repo):
890 def safeversion(repo):
891 # Finds the smallest version that it's safe to assume clients of the repo
891 # Finds the smallest version that it's safe to assume clients of the repo
892 # will support. For example, all hg versions that support generaldelta also
892 # will support. For example, all hg versions that support generaldelta also
893 # support changegroup 02.
893 # support changegroup 02.
894 versions = supportedoutgoingversions(repo)
894 versions = supportedoutgoingversions(repo)
895 if 'generaldelta' in repo.requirements:
895 if 'generaldelta' in repo.requirements:
896 versions.discard('01')
896 versions.discard('01')
897 assert versions
897 assert versions
898 return min(versions)
898 return min(versions)
899
899
900 def getbundler(version, repo, bundlecaps=None):
900 def getbundler(version, repo, bundlecaps=None):
901 assert version in supportedoutgoingversions(repo)
901 assert version in supportedoutgoingversions(repo)
902 return _packermap[version][0](repo, bundlecaps)
902 return _packermap[version][0](repo, bundlecaps)
903
903
904 def getunbundler(version, fh, alg, extras=None):
904 def getunbundler(version, fh, alg, extras=None):
905 return _packermap[version][1](fh, alg, extras=extras)
905 return _packermap[version][1](fh, alg, extras=extras)
906
906
907 def _changegroupinfo(repo, nodes, source):
907 def _changegroupinfo(repo, nodes, source):
908 if repo.ui.verbose or source == 'bundle':
908 if repo.ui.verbose or source == 'bundle':
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
909 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 if repo.ui.debugflag:
910 if repo.ui.debugflag:
911 repo.ui.debug("list of changesets:\n")
911 repo.ui.debug("list of changesets:\n")
912 for node in nodes:
912 for node in nodes:
913 repo.ui.debug("%s\n" % hex(node))
913 repo.ui.debug("%s\n" % hex(node))
914
914
915 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
915 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
916 repo = repo.unfiltered()
916 repo = repo.unfiltered()
917 commonrevs = outgoing.common
917 commonrevs = outgoing.common
918 csets = outgoing.missing
918 csets = outgoing.missing
919 heads = outgoing.missingheads
919 heads = outgoing.missingheads
920 # We go through the fast path if we get told to, or if all (unfiltered
920 # We go through the fast path if we get told to, or if all (unfiltered
921 # heads have been requested (since we then know there all linkrevs will
921 # heads have been requested (since we then know there all linkrevs will
922 # be pulled by the client).
922 # be pulled by the client).
923 heads.sort()
923 heads.sort()
924 fastpathlinkrev = fastpath or (
924 fastpathlinkrev = fastpath or (
925 repo.filtername is None and heads == sorted(repo.heads()))
925 repo.filtername is None and heads == sorted(repo.heads()))
926
926
927 repo.hook('preoutgoing', throw=True, source=source)
927 repo.hook('preoutgoing', throw=True, source=source)
928 _changegroupinfo(repo, csets, source)
928 _changegroupinfo(repo, csets, source)
929 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
929 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
930
930
931 def getsubset(repo, outgoing, bundler, source, fastpath=False):
931 def getsubset(repo, outgoing, bundler, source, fastpath=False):
932 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
932 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
933 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
933 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None,
934 {'clcount': len(outgoing.missing)})
934 {'clcount': len(outgoing.missing)})
935
935
936 def changegroupsubset(repo, roots, heads, source, version='01'):
936 def changegroupsubset(repo, roots, heads, source, version='01'):
937 """Compute a changegroup consisting of all the nodes that are
937 """Compute a changegroup consisting of all the nodes that are
938 descendants of any of the roots and ancestors of any of the heads.
938 descendants of any of the roots and ancestors of any of the heads.
939 Return a chunkbuffer object whose read() method will return
939 Return a chunkbuffer object whose read() method will return
940 successive changegroup chunks.
940 successive changegroup chunks.
941
941
942 It is fairly complex as determining which filenodes and which
942 It is fairly complex as determining which filenodes and which
943 manifest nodes need to be included for the changeset to be complete
943 manifest nodes need to be included for the changeset to be complete
944 is non-trivial.
944 is non-trivial.
945
945
946 Another wrinkle is doing the reverse, figuring out which changeset in
946 Another wrinkle is doing the reverse, figuring out which changeset in
947 the changegroup a particular filenode or manifestnode belongs to.
947 the changegroup a particular filenode or manifestnode belongs to.
948 """
948 """
949 cl = repo.changelog
949 outgoing = discovery.outgoingbetween(repo, roots, heads)
950 if not roots:
951 roots = [nullid]
952 discbases = []
953 for n in roots:
954 discbases.extend([p for p in cl.parents(n) if p != nullid])
955 # TODO: remove call to nodesbetween.
956 csets, roots, heads = cl.nodesbetween(roots, heads)
957 included = set(csets)
958 discbases = [n for n in discbases if n not in included]
959 outgoing = discovery.outgoing(cl, discbases, heads)
960 bundler = getbundler(version, repo)
950 bundler = getbundler(version, repo)
961 return getsubset(repo, outgoing, bundler, source)
951 return getsubset(repo, outgoing, bundler, source)
962
952
963 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
953 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
964 version='01'):
954 version='01'):
965 """Like getbundle, but taking a discovery.outgoing as an argument.
955 """Like getbundle, but taking a discovery.outgoing as an argument.
966
956
967 This is only implemented for local repos and reuses potentially
957 This is only implemented for local repos and reuses potentially
968 precomputed sets in outgoing. Returns a raw changegroup generator."""
958 precomputed sets in outgoing. Returns a raw changegroup generator."""
969 if not outgoing.missing:
959 if not outgoing.missing:
970 return None
960 return None
971 bundler = getbundler(version, repo, bundlecaps)
961 bundler = getbundler(version, repo, bundlecaps)
972 return getsubsetraw(repo, outgoing, bundler, source)
962 return getsubsetraw(repo, outgoing, bundler, source)
973
963
974 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
964 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
975 version='01'):
965 version='01'):
976 """Like getbundle, but taking a discovery.outgoing as an argument.
966 """Like getbundle, but taking a discovery.outgoing as an argument.
977
967
978 This is only implemented for local repos and reuses potentially
968 This is only implemented for local repos and reuses potentially
979 precomputed sets in outgoing."""
969 precomputed sets in outgoing."""
980 if not outgoing.missing:
970 if not outgoing.missing:
981 return None
971 return None
982 bundler = getbundler(version, repo, bundlecaps)
972 bundler = getbundler(version, repo, bundlecaps)
983 return getsubset(repo, outgoing, bundler, source)
973 return getsubset(repo, outgoing, bundler, source)
984
974
985 def computeoutgoing(repo, heads, common):
975 def computeoutgoing(repo, heads, common):
986 """Computes which revs are outgoing given a set of common
976 """Computes which revs are outgoing given a set of common
987 and a set of heads.
977 and a set of heads.
988
978
989 This is a separate function so extensions can have access to
979 This is a separate function so extensions can have access to
990 the logic.
980 the logic.
991
981
992 Returns a discovery.outgoing object.
982 Returns a discovery.outgoing object.
993 """
983 """
994 cl = repo.changelog
984 cl = repo.changelog
995 if common:
985 if common:
996 hasnode = cl.hasnode
986 hasnode = cl.hasnode
997 common = [n for n in common if hasnode(n)]
987 common = [n for n in common if hasnode(n)]
998 else:
988 else:
999 common = [nullid]
989 common = [nullid]
1000 if not heads:
990 if not heads:
1001 heads = cl.heads()
991 heads = cl.heads()
1002 return discovery.outgoing(cl, common, heads)
992 return discovery.outgoing(cl, common, heads)
1003
993
1004 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
994 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1005 version='01'):
995 version='01'):
1006 """Like changegroupsubset, but returns the set difference between the
996 """Like changegroupsubset, but returns the set difference between the
1007 ancestors of heads and the ancestors common.
997 ancestors of heads and the ancestors common.
1008
998
1009 If heads is None, use the local heads. If common is None, use [nullid].
999 If heads is None, use the local heads. If common is None, use [nullid].
1010
1000
1011 The nodes in common might not all be known locally due to the way the
1001 The nodes in common might not all be known locally due to the way the
1012 current discovery protocol works.
1002 current discovery protocol works.
1013 """
1003 """
1014 outgoing = computeoutgoing(repo, heads, common)
1004 outgoing = computeoutgoing(repo, heads, common)
1015 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1005 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1016 version=version)
1006 version=version)
1017
1007
1018 def changegroup(repo, basenodes, source):
1008 def changegroup(repo, basenodes, source):
1019 # to avoid a race we use changegroupsubset() (issue1320)
1009 # to avoid a race we use changegroupsubset() (issue1320)
1020 return changegroupsubset(repo, basenodes, repo.heads(), source)
1010 return changegroupsubset(repo, basenodes, repo.heads(), source)
1021
1011
1022 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1012 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1023 revisions = 0
1013 revisions = 0
1024 files = 0
1014 files = 0
1025 while True:
1015 while True:
1026 chunkdata = source.filelogheader()
1016 chunkdata = source.filelogheader()
1027 if not chunkdata:
1017 if not chunkdata:
1028 break
1018 break
1029 files += 1
1019 files += 1
1030 f = chunkdata["filename"]
1020 f = chunkdata["filename"]
1031 repo.ui.debug("adding %s revisions\n" % f)
1021 repo.ui.debug("adding %s revisions\n" % f)
1032 repo.ui.progress(_('files'), files, unit=_('files'),
1022 repo.ui.progress(_('files'), files, unit=_('files'),
1033 total=expectedfiles)
1023 total=expectedfiles)
1034 fl = repo.file(f)
1024 fl = repo.file(f)
1035 o = len(fl)
1025 o = len(fl)
1036 try:
1026 try:
1037 if not fl.addgroup(source, revmap, trp):
1027 if not fl.addgroup(source, revmap, trp):
1038 raise error.Abort(_("received file revlog group is empty"))
1028 raise error.Abort(_("received file revlog group is empty"))
1039 except error.CensoredBaseError as e:
1029 except error.CensoredBaseError as e:
1040 raise error.Abort(_("received delta base is censored: %s") % e)
1030 raise error.Abort(_("received delta base is censored: %s") % e)
1041 revisions += len(fl) - o
1031 revisions += len(fl) - o
1042 if f in needfiles:
1032 if f in needfiles:
1043 needs = needfiles[f]
1033 needs = needfiles[f]
1044 for new in xrange(o, len(fl)):
1034 for new in xrange(o, len(fl)):
1045 n = fl.node(new)
1035 n = fl.node(new)
1046 if n in needs:
1036 if n in needs:
1047 needs.remove(n)
1037 needs.remove(n)
1048 else:
1038 else:
1049 raise error.Abort(
1039 raise error.Abort(
1050 _("received spurious file revlog entry"))
1040 _("received spurious file revlog entry"))
1051 if not needs:
1041 if not needs:
1052 del needfiles[f]
1042 del needfiles[f]
1053 repo.ui.progress(_('files'), None)
1043 repo.ui.progress(_('files'), None)
1054
1044
1055 for f, needs in needfiles.iteritems():
1045 for f, needs in needfiles.iteritems():
1056 fl = repo.file(f)
1046 fl = repo.file(f)
1057 for n in needs:
1047 for n in needs:
1058 try:
1048 try:
1059 fl.rev(n)
1049 fl.rev(n)
1060 except error.LookupError:
1050 except error.LookupError:
1061 raise error.Abort(
1051 raise error.Abort(
1062 _('missing file data for %s:%s - run hg verify') %
1052 _('missing file data for %s:%s - run hg verify') %
1063 (f, hex(n)))
1053 (f, hex(n)))
1064
1054
1065 return revisions, files
1055 return revisions, files
@@ -1,417 +1,438 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 nullid,
12 nullid,
13 short,
13 short,
14 )
14 )
15
15
16 from . import (
16 from . import (
17 bookmarks,
17 bookmarks,
18 branchmap,
18 branchmap,
19 error,
19 error,
20 obsolete,
20 obsolete,
21 phases,
21 phases,
22 setdiscovery,
22 setdiscovery,
23 treediscovery,
23 treediscovery,
24 util,
24 util,
25 )
25 )
26
26
27 def findcommonincoming(repo, remote, heads=None, force=False):
27 def findcommonincoming(repo, remote, heads=None, force=False):
28 """Return a tuple (common, anyincoming, heads) used to identify the common
28 """Return a tuple (common, anyincoming, heads) used to identify the common
29 subset of nodes between repo and remote.
29 subset of nodes between repo and remote.
30
30
31 "common" is a list of (at least) the heads of the common subset.
31 "common" is a list of (at least) the heads of the common subset.
32 "anyincoming" is testable as a boolean indicating if any nodes are missing
32 "anyincoming" is testable as a boolean indicating if any nodes are missing
33 locally. If remote does not support getbundle, this actually is a list of
33 locally. If remote does not support getbundle, this actually is a list of
34 roots of the nodes that would be incoming, to be supplied to
34 roots of the nodes that would be incoming, to be supplied to
35 changegroupsubset. No code except for pull should be relying on this fact
35 changegroupsubset. No code except for pull should be relying on this fact
36 any longer.
36 any longer.
37 "heads" is either the supplied heads, or else the remote's heads.
37 "heads" is either the supplied heads, or else the remote's heads.
38
38
39 If you pass heads and they are all known locally, the response lists just
39 If you pass heads and they are all known locally, the response lists just
40 these heads in "common" and in "heads".
40 these heads in "common" and in "heads".
41
41
42 Please use findcommonoutgoing to compute the set of outgoing nodes to give
42 Please use findcommonoutgoing to compute the set of outgoing nodes to give
43 extensions a good hook into outgoing.
43 extensions a good hook into outgoing.
44 """
44 """
45
45
46 if not remote.capable('getbundle'):
46 if not remote.capable('getbundle'):
47 return treediscovery.findcommonincoming(repo, remote, heads, force)
47 return treediscovery.findcommonincoming(repo, remote, heads, force)
48
48
49 if heads:
49 if heads:
50 allknown = True
50 allknown = True
51 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
51 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
52 for h in heads:
52 for h in heads:
53 if not knownnode(h):
53 if not knownnode(h):
54 allknown = False
54 allknown = False
55 break
55 break
56 if allknown:
56 if allknown:
57 return (heads, False, heads)
57 return (heads, False, heads)
58
58
59 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
59 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
60 abortwhenunrelated=not force)
60 abortwhenunrelated=not force)
61 common, anyinc, srvheads = res
61 common, anyinc, srvheads = res
62 return (list(common), anyinc, heads or list(srvheads))
62 return (list(common), anyinc, heads or list(srvheads))
63
63
64 class outgoing(object):
64 class outgoing(object):
65 '''Represents the set of nodes present in a local repo but not in a
65 '''Represents the set of nodes present in a local repo but not in a
66 (possibly) remote one.
66 (possibly) remote one.
67
67
68 Members:
68 Members:
69
69
70 missing is a list of all nodes present in local but not in remote.
70 missing is a list of all nodes present in local but not in remote.
71 common is a list of all nodes shared between the two repos.
71 common is a list of all nodes shared between the two repos.
72 excluded is the list of missing changeset that shouldn't be sent remotely.
72 excluded is the list of missing changeset that shouldn't be sent remotely.
73 missingheads is the list of heads of missing.
73 missingheads is the list of heads of missing.
74 commonheads is the list of heads of common.
74 commonheads is the list of heads of common.
75
75
76 The sets are computed on demand from the heads, unless provided upfront
76 The sets are computed on demand from the heads, unless provided upfront
77 by discovery.'''
77 by discovery.'''
78
78
79 def __init__(self, revlog, commonheads, missingheads):
79 def __init__(self, revlog, commonheads, missingheads):
80 self.commonheads = commonheads
80 self.commonheads = commonheads
81 self.missingheads = missingheads
81 self.missingheads = missingheads
82 self._revlog = revlog
82 self._revlog = revlog
83 self._common = None
83 self._common = None
84 self._missing = None
84 self._missing = None
85 self.excluded = []
85 self.excluded = []
86
86
87 def _computecommonmissing(self):
87 def _computecommonmissing(self):
88 sets = self._revlog.findcommonmissing(self.commonheads,
88 sets = self._revlog.findcommonmissing(self.commonheads,
89 self.missingheads)
89 self.missingheads)
90 self._common, self._missing = sets
90 self._common, self._missing = sets
91
91
92 @util.propertycache
92 @util.propertycache
93 def common(self):
93 def common(self):
94 if self._common is None:
94 if self._common is None:
95 self._computecommonmissing()
95 self._computecommonmissing()
96 return self._common
96 return self._common
97
97
98 @util.propertycache
98 @util.propertycache
99 def missing(self):
99 def missing(self):
100 if self._missing is None:
100 if self._missing is None:
101 self._computecommonmissing()
101 self._computecommonmissing()
102 return self._missing
102 return self._missing
103
103
104 def outgoingbetween(repo, roots, heads):
105 """create an ``outgoing`` consisting of nodes between roots and heads
106
107 The ``missing`` nodes will be descendants of any of the ``roots`` and
108 ancestors of any of the ``heads``, both are which are defined as a list
109 of binary nodes.
110 """
111 cl = repo.changelog
112 if not roots:
113 roots = [nullid]
114 discbases = []
115 for n in roots:
116 discbases.extend([p for p in cl.parents(n) if p != nullid])
117 # TODO remove call to nodesbetween.
118 # TODO populate attributes on outgoing instance instead of setting
119 # discbases.
120 csets, roots, heads = cl.nodesbetween(roots, heads)
121 included = set(csets)
122 discbases = [n for n in discbases if n not in included]
123 return outgoing(cl, discbases, heads)
124
104 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
105 commoninc=None, portable=False):
126 commoninc=None, portable=False):
106 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
107 not in other.
128 not in other.
108
129
109 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
110 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
111 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
112
133
113 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
114 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
115
136
116 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
117 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
118 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
119 og = outgoing(repo.changelog, None, None)
140 og = outgoing(repo.changelog, None, None)
120
141
121 # get common set if not provided
142 # get common set if not provided
122 if commoninc is None:
143 if commoninc is None:
123 commoninc = findcommonincoming(repo, other, force=force)
144 commoninc = findcommonincoming(repo, other, force=force)
124 og.commonheads, _any, _hds = commoninc
145 og.commonheads, _any, _hds = commoninc
125
146
126 # compute outgoing
147 # compute outgoing
127 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
148 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
128 if not mayexclude:
149 if not mayexclude:
129 og.missingheads = onlyheads or repo.heads()
150 og.missingheads = onlyheads or repo.heads()
130 elif onlyheads is None:
151 elif onlyheads is None:
131 # use visible heads as it should be cached
152 # use visible heads as it should be cached
132 og.missingheads = repo.filtered("served").heads()
153 og.missingheads = repo.filtered("served").heads()
133 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
154 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
134 else:
155 else:
135 # compute common, missing and exclude secret stuff
156 # compute common, missing and exclude secret stuff
136 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
157 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
137 og._common, allmissing = sets
158 og._common, allmissing = sets
138 og._missing = missing = []
159 og._missing = missing = []
139 og.excluded = excluded = []
160 og.excluded = excluded = []
140 for node in allmissing:
161 for node in allmissing:
141 ctx = repo[node]
162 ctx = repo[node]
142 if ctx.phase() >= phases.secret or ctx.extinct():
163 if ctx.phase() >= phases.secret or ctx.extinct():
143 excluded.append(node)
164 excluded.append(node)
144 else:
165 else:
145 missing.append(node)
166 missing.append(node)
146 if len(missing) == len(allmissing):
167 if len(missing) == len(allmissing):
147 missingheads = onlyheads
168 missingheads = onlyheads
148 else: # update missing heads
169 else: # update missing heads
149 missingheads = phases.newheads(repo, onlyheads, excluded)
170 missingheads = phases.newheads(repo, onlyheads, excluded)
150 og.missingheads = missingheads
171 og.missingheads = missingheads
151 if portable:
172 if portable:
152 # recompute common and missingheads as if -r<rev> had been given for
173 # recompute common and missingheads as if -r<rev> had been given for
153 # each head of missing, and --base <rev> for each head of the proper
174 # each head of missing, and --base <rev> for each head of the proper
154 # ancestors of missing
175 # ancestors of missing
155 og._computecommonmissing()
176 og._computecommonmissing()
156 cl = repo.changelog
177 cl = repo.changelog
157 missingrevs = set(cl.rev(n) for n in og._missing)
178 missingrevs = set(cl.rev(n) for n in og._missing)
158 og._common = set(cl.ancestors(missingrevs)) - missingrevs
179 og._common = set(cl.ancestors(missingrevs)) - missingrevs
159 commonheads = set(og.commonheads)
180 commonheads = set(og.commonheads)
160 og.missingheads = [h for h in og.missingheads if h not in commonheads]
181 og.missingheads = [h for h in og.missingheads if h not in commonheads]
161
182
162 return og
183 return og
163
184
164 def _headssummary(repo, remote, outgoing):
185 def _headssummary(repo, remote, outgoing):
165 """compute a summary of branch and heads status before and after push
186 """compute a summary of branch and heads status before and after push
166
187
167 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
188 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
168
189
169 - branch: the branch name
190 - branch: the branch name
170 - remoteheads: the list of remote heads known locally
191 - remoteheads: the list of remote heads known locally
171 None if the branch is new
192 None if the branch is new
172 - newheads: the new remote heads (known locally) with outgoing pushed
193 - newheads: the new remote heads (known locally) with outgoing pushed
173 - unsyncedheads: the list of remote heads unknown locally.
194 - unsyncedheads: the list of remote heads unknown locally.
174 """
195 """
175 cl = repo.changelog
196 cl = repo.changelog
176 headssum = {}
197 headssum = {}
177 # A. Create set of branches involved in the push.
198 # A. Create set of branches involved in the push.
178 branches = set(repo[n].branch() for n in outgoing.missing)
199 branches = set(repo[n].branch() for n in outgoing.missing)
179 remotemap = remote.branchmap()
200 remotemap = remote.branchmap()
180 newbranches = branches - set(remotemap)
201 newbranches = branches - set(remotemap)
181 branches.difference_update(newbranches)
202 branches.difference_update(newbranches)
182
203
183 # A. register remote heads
204 # A. register remote heads
184 remotebranches = set()
205 remotebranches = set()
185 for branch, heads in remote.branchmap().iteritems():
206 for branch, heads in remote.branchmap().iteritems():
186 remotebranches.add(branch)
207 remotebranches.add(branch)
187 known = []
208 known = []
188 unsynced = []
209 unsynced = []
189 knownnode = cl.hasnode # do not use nodemap until it is filtered
210 knownnode = cl.hasnode # do not use nodemap until it is filtered
190 for h in heads:
211 for h in heads:
191 if knownnode(h):
212 if knownnode(h):
192 known.append(h)
213 known.append(h)
193 else:
214 else:
194 unsynced.append(h)
215 unsynced.append(h)
195 headssum[branch] = (known, list(known), unsynced)
216 headssum[branch] = (known, list(known), unsynced)
196 # B. add new branch data
217 # B. add new branch data
197 missingctx = list(repo[n] for n in outgoing.missing)
218 missingctx = list(repo[n] for n in outgoing.missing)
198 touchedbranches = set()
219 touchedbranches = set()
199 for ctx in missingctx:
220 for ctx in missingctx:
200 branch = ctx.branch()
221 branch = ctx.branch()
201 touchedbranches.add(branch)
222 touchedbranches.add(branch)
202 if branch not in headssum:
223 if branch not in headssum:
203 headssum[branch] = (None, [], [])
224 headssum[branch] = (None, [], [])
204
225
205 # C drop data about untouched branches:
226 # C drop data about untouched branches:
206 for branch in remotebranches - touchedbranches:
227 for branch in remotebranches - touchedbranches:
207 del headssum[branch]
228 del headssum[branch]
208
229
209 # D. Update newmap with outgoing changes.
230 # D. Update newmap with outgoing changes.
210 # This will possibly add new heads and remove existing ones.
231 # This will possibly add new heads and remove existing ones.
211 newmap = branchmap.branchcache((branch, heads[1])
232 newmap = branchmap.branchcache((branch, heads[1])
212 for branch, heads in headssum.iteritems()
233 for branch, heads in headssum.iteritems()
213 if heads[0] is not None)
234 if heads[0] is not None)
214 newmap.update(repo, (ctx.rev() for ctx in missingctx))
235 newmap.update(repo, (ctx.rev() for ctx in missingctx))
215 for branch, newheads in newmap.iteritems():
236 for branch, newheads in newmap.iteritems():
216 headssum[branch][1][:] = newheads
237 headssum[branch][1][:] = newheads
217 return headssum
238 return headssum
218
239
219 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
240 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
220 """Compute branchmapsummary for repo without branchmap support"""
241 """Compute branchmapsummary for repo without branchmap support"""
221
242
222 # 1-4b. old servers: Check for new topological heads.
243 # 1-4b. old servers: Check for new topological heads.
223 # Construct {old,new}map with branch = None (topological branch).
244 # Construct {old,new}map with branch = None (topological branch).
224 # (code based on update)
245 # (code based on update)
225 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
246 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
226 oldheads = set(h for h in remoteheads if knownnode(h))
247 oldheads = set(h for h in remoteheads if knownnode(h))
227 # all nodes in outgoing.missing are children of either:
248 # all nodes in outgoing.missing are children of either:
228 # - an element of oldheads
249 # - an element of oldheads
229 # - another element of outgoing.missing
250 # - another element of outgoing.missing
230 # - nullrev
251 # - nullrev
231 # This explains why the new head are very simple to compute.
252 # This explains why the new head are very simple to compute.
232 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
253 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
233 newheads = list(c.node() for c in r)
254 newheads = list(c.node() for c in r)
234 # set some unsynced head to issue the "unsynced changes" warning
255 # set some unsynced head to issue the "unsynced changes" warning
235 if inc:
256 if inc:
236 unsynced = set([None])
257 unsynced = set([None])
237 else:
258 else:
238 unsynced = set()
259 unsynced = set()
239 return {None: (oldheads, newheads, unsynced)}
260 return {None: (oldheads, newheads, unsynced)}
240
261
241 def _nowarnheads(pushop):
262 def _nowarnheads(pushop):
242 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
263 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
243 repo = pushop.repo.unfiltered()
264 repo = pushop.repo.unfiltered()
244 remote = pushop.remote
265 remote = pushop.remote
245 localbookmarks = repo._bookmarks
266 localbookmarks = repo._bookmarks
246 remotebookmarks = remote.listkeys('bookmarks')
267 remotebookmarks = remote.listkeys('bookmarks')
247 bookmarkedheads = set()
268 bookmarkedheads = set()
248
269
249 # internal config: bookmarks.pushing
270 # internal config: bookmarks.pushing
250 newbookmarks = [localbookmarks.expandname(b)
271 newbookmarks = [localbookmarks.expandname(b)
251 for b in pushop.ui.configlist('bookmarks', 'pushing')]
272 for b in pushop.ui.configlist('bookmarks', 'pushing')]
252
273
253 for bm in localbookmarks:
274 for bm in localbookmarks:
254 rnode = remotebookmarks.get(bm)
275 rnode = remotebookmarks.get(bm)
255 if rnode and rnode in repo:
276 if rnode and rnode in repo:
256 lctx, rctx = repo[bm], repo[rnode]
277 lctx, rctx = repo[bm], repo[rnode]
257 if bookmarks.validdest(repo, rctx, lctx):
278 if bookmarks.validdest(repo, rctx, lctx):
258 bookmarkedheads.add(lctx.node())
279 bookmarkedheads.add(lctx.node())
259 else:
280 else:
260 if bm in newbookmarks and bm not in remotebookmarks:
281 if bm in newbookmarks and bm not in remotebookmarks:
261 bookmarkedheads.add(repo[bm].node())
282 bookmarkedheads.add(repo[bm].node())
262
283
263 return bookmarkedheads
284 return bookmarkedheads
264
285
265 def checkheads(pushop):
286 def checkheads(pushop):
266 """Check that a push won't add any outgoing head
287 """Check that a push won't add any outgoing head
267
288
268 raise Abort error and display ui message as needed.
289 raise Abort error and display ui message as needed.
269 """
290 """
270
291
271 repo = pushop.repo.unfiltered()
292 repo = pushop.repo.unfiltered()
272 remote = pushop.remote
293 remote = pushop.remote
273 outgoing = pushop.outgoing
294 outgoing = pushop.outgoing
274 remoteheads = pushop.remoteheads
295 remoteheads = pushop.remoteheads
275 newbranch = pushop.newbranch
296 newbranch = pushop.newbranch
276 inc = bool(pushop.incoming)
297 inc = bool(pushop.incoming)
277
298
278 # Check for each named branch if we're creating new remote heads.
299 # Check for each named branch if we're creating new remote heads.
279 # To be a remote head after push, node must be either:
300 # To be a remote head after push, node must be either:
280 # - unknown locally
301 # - unknown locally
281 # - a local outgoing head descended from update
302 # - a local outgoing head descended from update
282 # - a remote head that's known locally and not
303 # - a remote head that's known locally and not
283 # ancestral to an outgoing head
304 # ancestral to an outgoing head
284 if remoteheads == [nullid]:
305 if remoteheads == [nullid]:
285 # remote is empty, nothing to check.
306 # remote is empty, nothing to check.
286 return
307 return
287
308
288 if remote.capable('branchmap'):
309 if remote.capable('branchmap'):
289 headssum = _headssummary(repo, remote, outgoing)
310 headssum = _headssummary(repo, remote, outgoing)
290 else:
311 else:
291 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
312 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
292 newbranches = [branch for branch, heads in headssum.iteritems()
313 newbranches = [branch for branch, heads in headssum.iteritems()
293 if heads[0] is None]
314 if heads[0] is None]
294 # 1. Check for new branches on the remote.
315 # 1. Check for new branches on the remote.
295 if newbranches and not newbranch: # new branch requires --new-branch
316 if newbranches and not newbranch: # new branch requires --new-branch
296 branchnames = ', '.join(sorted(newbranches))
317 branchnames = ', '.join(sorted(newbranches))
297 raise error.Abort(_("push creates new remote branches: %s!")
318 raise error.Abort(_("push creates new remote branches: %s!")
298 % branchnames,
319 % branchnames,
299 hint=_("use 'hg push --new-branch' to create"
320 hint=_("use 'hg push --new-branch' to create"
300 " new remote branches"))
321 " new remote branches"))
301
322
302 # 2. Find heads that we need not warn about
323 # 2. Find heads that we need not warn about
303 nowarnheads = _nowarnheads(pushop)
324 nowarnheads = _nowarnheads(pushop)
304
325
305 # 3. Check for new heads.
326 # 3. Check for new heads.
306 # If there are more heads after the push than before, a suitable
327 # If there are more heads after the push than before, a suitable
307 # error message, depending on unsynced status, is displayed.
328 # error message, depending on unsynced status, is displayed.
308 errormsg = None
329 errormsg = None
309 # If there is no obsstore, allfuturecommon won't be used, so no
330 # If there is no obsstore, allfuturecommon won't be used, so no
310 # need to compute it.
331 # need to compute it.
311 if repo.obsstore:
332 if repo.obsstore:
312 allmissing = set(outgoing.missing)
333 allmissing = set(outgoing.missing)
313 cctx = repo.set('%ld', outgoing.common)
334 cctx = repo.set('%ld', outgoing.common)
314 allfuturecommon = set(c.node() for c in cctx)
335 allfuturecommon = set(c.node() for c in cctx)
315 allfuturecommon.update(allmissing)
336 allfuturecommon.update(allmissing)
316 for branch, heads in sorted(headssum.iteritems()):
337 for branch, heads in sorted(headssum.iteritems()):
317 remoteheads, newheads, unsyncedheads = heads
338 remoteheads, newheads, unsyncedheads = heads
318 candidate_newhs = set(newheads)
339 candidate_newhs = set(newheads)
319 # add unsynced data
340 # add unsynced data
320 if remoteheads is None:
341 if remoteheads is None:
321 oldhs = set()
342 oldhs = set()
322 else:
343 else:
323 oldhs = set(remoteheads)
344 oldhs = set(remoteheads)
324 oldhs.update(unsyncedheads)
345 oldhs.update(unsyncedheads)
325 candidate_newhs.update(unsyncedheads)
346 candidate_newhs.update(unsyncedheads)
326 dhs = None # delta heads, the new heads on branch
347 dhs = None # delta heads, the new heads on branch
327 discardedheads = set()
348 discardedheads = set()
328 if not repo.obsstore:
349 if not repo.obsstore:
329 newhs = candidate_newhs
350 newhs = candidate_newhs
330 else:
351 else:
331 # remove future heads which are actually obsoleted by another
352 # remove future heads which are actually obsoleted by another
332 # pushed element:
353 # pushed element:
333 #
354 #
334 # XXX as above, There are several cases this code does not handle
355 # XXX as above, There are several cases this code does not handle
335 # XXX properly
356 # XXX properly
336 #
357 #
337 # (1) if <nh> is public, it won't be affected by obsolete marker
358 # (1) if <nh> is public, it won't be affected by obsolete marker
338 # and a new is created
359 # and a new is created
339 #
360 #
340 # (2) if the new heads have ancestors which are not obsolete and
361 # (2) if the new heads have ancestors which are not obsolete and
341 # not ancestors of any other heads we will have a new head too.
362 # not ancestors of any other heads we will have a new head too.
342 #
363 #
343 # These two cases will be easy to handle for known changeset but
364 # These two cases will be easy to handle for known changeset but
344 # much more tricky for unsynced changes.
365 # much more tricky for unsynced changes.
345 #
366 #
346 # In addition, this code is confused by prune as it only looks for
367 # In addition, this code is confused by prune as it only looks for
347 # successors of the heads (none if pruned) leading to issue4354
368 # successors of the heads (none if pruned) leading to issue4354
348 newhs = set()
369 newhs = set()
349 for nh in candidate_newhs:
370 for nh in candidate_newhs:
350 if nh in repo and repo[nh].phase() <= phases.public:
371 if nh in repo and repo[nh].phase() <= phases.public:
351 newhs.add(nh)
372 newhs.add(nh)
352 else:
373 else:
353 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
374 for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
354 if suc != nh and suc in allfuturecommon:
375 if suc != nh and suc in allfuturecommon:
355 discardedheads.add(nh)
376 discardedheads.add(nh)
356 break
377 break
357 else:
378 else:
358 newhs.add(nh)
379 newhs.add(nh)
359 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
380 unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
360 if unsynced:
381 if unsynced:
361 if None in unsynced:
382 if None in unsynced:
362 # old remote, no heads data
383 # old remote, no heads data
363 heads = None
384 heads = None
364 elif len(unsynced) <= 4 or repo.ui.verbose:
385 elif len(unsynced) <= 4 or repo.ui.verbose:
365 heads = ' '.join(short(h) for h in unsynced)
386 heads = ' '.join(short(h) for h in unsynced)
366 else:
387 else:
367 heads = (' '.join(short(h) for h in unsynced[:4]) +
388 heads = (' '.join(short(h) for h in unsynced[:4]) +
368 ' ' + _("and %s others") % (len(unsynced) - 4))
389 ' ' + _("and %s others") % (len(unsynced) - 4))
369 if heads is None:
390 if heads is None:
370 repo.ui.status(_("remote has heads that are "
391 repo.ui.status(_("remote has heads that are "
371 "not known locally\n"))
392 "not known locally\n"))
372 elif branch is None:
393 elif branch is None:
373 repo.ui.status(_("remote has heads that are "
394 repo.ui.status(_("remote has heads that are "
374 "not known locally: %s\n") % heads)
395 "not known locally: %s\n") % heads)
375 else:
396 else:
376 repo.ui.status(_("remote has heads on branch '%s' that are "
397 repo.ui.status(_("remote has heads on branch '%s' that are "
377 "not known locally: %s\n") % (branch, heads))
398 "not known locally: %s\n") % (branch, heads))
378 if remoteheads is None:
399 if remoteheads is None:
379 if len(newhs) > 1:
400 if len(newhs) > 1:
380 dhs = list(newhs)
401 dhs = list(newhs)
381 if errormsg is None:
402 if errormsg is None:
382 errormsg = (_("push creates new branch '%s' "
403 errormsg = (_("push creates new branch '%s' "
383 "with multiple heads") % (branch))
404 "with multiple heads") % (branch))
384 hint = _("merge or"
405 hint = _("merge or"
385 " see \"hg help push\" for details about"
406 " see \"hg help push\" for details about"
386 " pushing new heads")
407 " pushing new heads")
387 elif len(newhs) > len(oldhs):
408 elif len(newhs) > len(oldhs):
388 # remove bookmarked or existing remote heads from the new heads list
409 # remove bookmarked or existing remote heads from the new heads list
389 dhs = sorted(newhs - nowarnheads - oldhs)
410 dhs = sorted(newhs - nowarnheads - oldhs)
390 if dhs:
411 if dhs:
391 if errormsg is None:
412 if errormsg is None:
392 if branch not in ('default', None):
413 if branch not in ('default', None):
393 errormsg = _("push creates new remote head %s "
414 errormsg = _("push creates new remote head %s "
394 "on branch '%s'!") % (short(dhs[0]), branch)
415 "on branch '%s'!") % (short(dhs[0]), branch)
395 elif repo[dhs[0]].bookmarks():
416 elif repo[dhs[0]].bookmarks():
396 errormsg = _("push creates new remote head %s "
417 errormsg = _("push creates new remote head %s "
397 "with bookmark '%s'!") % (
418 "with bookmark '%s'!") % (
398 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
419 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
399 else:
420 else:
400 errormsg = _("push creates new remote head %s!"
421 errormsg = _("push creates new remote head %s!"
401 ) % short(dhs[0])
422 ) % short(dhs[0])
402 if unsyncedheads:
423 if unsyncedheads:
403 hint = _("pull and merge or"
424 hint = _("pull and merge or"
404 " see \"hg help push\" for details about"
425 " see \"hg help push\" for details about"
405 " pushing new heads")
426 " pushing new heads")
406 else:
427 else:
407 hint = _("merge or"
428 hint = _("merge or"
408 " see \"hg help push\" for details about"
429 " see \"hg help push\" for details about"
409 " pushing new heads")
430 " pushing new heads")
410 if branch is None:
431 if branch is None:
411 repo.ui.note(_("new remote heads:\n"))
432 repo.ui.note(_("new remote heads:\n"))
412 else:
433 else:
413 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
434 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
414 for h in dhs:
435 for h in dhs:
415 repo.ui.note((" %s\n") % short(h))
436 repo.ui.note((" %s\n") % short(h))
416 if errormsg:
437 if errormsg:
417 raise error.Abort(errormsg, hint=hint)
438 raise error.Abort(errormsg, hint=hint)
General Comments 0
You need to be logged in to leave comments. Login now