Show More
@@ -1,436 +1,450 | |||
|
1 | 1 | # discovery.py - protocol changeset discovery functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | from .i18n import _ |
|
11 | 11 | from .node import ( |
|
12 | 12 | nullid, |
|
13 | 13 | short, |
|
14 | 14 | ) |
|
15 | 15 | |
|
16 | 16 | from . import ( |
|
17 | 17 | bookmarks, |
|
18 | 18 | branchmap, |
|
19 | 19 | error, |
|
20 | 20 | obsolete, |
|
21 | 21 | phases, |
|
22 | 22 | setdiscovery, |
|
23 | 23 | treediscovery, |
|
24 | 24 | util, |
|
25 | 25 | ) |
|
26 | 26 | |
|
27 | 27 | def findcommonincoming(repo, remote, heads=None, force=False): |
|
28 | 28 | """Return a tuple (common, anyincoming, heads) used to identify the common |
|
29 | 29 | subset of nodes between repo and remote. |
|
30 | 30 | |
|
31 | 31 | "common" is a list of (at least) the heads of the common subset. |
|
32 | 32 | "anyincoming" is testable as a boolean indicating if any nodes are missing |
|
33 | 33 | locally. If remote does not support getbundle, this actually is a list of |
|
34 | 34 | roots of the nodes that would be incoming, to be supplied to |
|
35 | 35 | changegroupsubset. No code except for pull should be relying on this fact |
|
36 | 36 | any longer. |
|
37 | 37 | "heads" is either the supplied heads, or else the remote's heads. |
|
38 | 38 | |
|
39 | 39 | If you pass heads and they are all known locally, the response lists just |
|
40 | 40 | these heads in "common" and in "heads". |
|
41 | 41 | |
|
42 | 42 | Please use findcommonoutgoing to compute the set of outgoing nodes to give |
|
43 | 43 | extensions a good hook into outgoing. |
|
44 | 44 | """ |
|
45 | 45 | |
|
46 | 46 | if not remote.capable('getbundle'): |
|
47 | 47 | return treediscovery.findcommonincoming(repo, remote, heads, force) |
|
48 | 48 | |
|
49 | 49 | if heads: |
|
50 | 50 | allknown = True |
|
51 | 51 | knownnode = repo.changelog.hasnode # no nodemap until it is filtered |
|
52 | 52 | for h in heads: |
|
53 | 53 | if not knownnode(h): |
|
54 | 54 | allknown = False |
|
55 | 55 | break |
|
56 | 56 | if allknown: |
|
57 | 57 | return (heads, False, heads) |
|
58 | 58 | |
|
59 | 59 | res = setdiscovery.findcommonheads(repo.ui, repo, remote, |
|
60 | 60 | abortwhenunrelated=not force) |
|
61 | 61 | common, anyinc, srvheads = res |
|
62 | 62 | return (list(common), anyinc, heads or list(srvheads)) |
|
63 | 63 | |
|
64 | 64 | class outgoing(object): |
|
65 | 65 | '''Represents the set of nodes present in a local repo but not in a |
|
66 | 66 | (possibly) remote one. |
|
67 | 67 | |
|
68 | 68 | Members: |
|
69 | 69 | |
|
70 | 70 | missing is a list of all nodes present in local but not in remote. |
|
71 | 71 | common is a list of all nodes shared between the two repos. |
|
72 | 72 | excluded is the list of missing changeset that shouldn't be sent remotely. |
|
73 | 73 | missingheads is the list of heads of missing. |
|
74 | 74 | commonheads is the list of heads of common. |
|
75 | 75 | |
|
76 | 76 | The sets are computed on demand from the heads, unless provided upfront |
|
77 | 77 | by discovery.''' |
|
78 | 78 | |
|
79 | 79 | def __init__(self, repo, commonheads=None, missingheads=None, |
|
80 | 80 | missingroots=None): |
|
81 | 81 | # at least one of them must not be set |
|
82 | 82 | assert None in (commonheads, missingroots) |
|
83 | 83 | cl = repo.changelog |
|
84 | 84 | if missingheads is None: |
|
85 | 85 | missingheads = cl.heads() |
|
86 | 86 | if missingroots: |
|
87 | 87 | discbases = [] |
|
88 | 88 | for n in missingroots: |
|
89 | 89 | discbases.extend([p for p in cl.parents(n) if p != nullid]) |
|
90 | 90 | # TODO remove call to nodesbetween. |
|
91 | 91 | # TODO populate attributes on outgoing instance instead of setting |
|
92 | 92 | # discbases. |
|
93 | 93 | csets, roots, heads = cl.nodesbetween(missingroots, missingheads) |
|
94 | 94 | included = set(csets) |
|
95 | 95 | missingheads = heads |
|
96 | 96 | commonheads = [n for n in discbases if n not in included] |
|
97 | 97 | elif not commonheads: |
|
98 | 98 | commonheads = [nullid] |
|
99 | 99 | self.commonheads = commonheads |
|
100 | 100 | self.missingheads = missingheads |
|
101 | 101 | self._revlog = cl |
|
102 | 102 | self._common = None |
|
103 | 103 | self._missing = None |
|
104 | 104 | self.excluded = [] |
|
105 | 105 | |
|
106 | 106 | def _computecommonmissing(self): |
|
107 | 107 | sets = self._revlog.findcommonmissing(self.commonheads, |
|
108 | 108 | self.missingheads) |
|
109 | 109 | self._common, self._missing = sets |
|
110 | 110 | |
|
111 | 111 | @util.propertycache |
|
112 | 112 | def common(self): |
|
113 | 113 | if self._common is None: |
|
114 | 114 | self._computecommonmissing() |
|
115 | 115 | return self._common |
|
116 | 116 | |
|
117 | 117 | @util.propertycache |
|
118 | 118 | def missing(self): |
|
119 | 119 | if self._missing is None: |
|
120 | 120 | self._computecommonmissing() |
|
121 | 121 | return self._missing |
|
122 | 122 | |
|
123 | 123 | def findcommonoutgoing(repo, other, onlyheads=None, force=False, |
|
124 | 124 | commoninc=None, portable=False): |
|
125 | 125 | '''Return an outgoing instance to identify the nodes present in repo but |
|
126 | 126 | not in other. |
|
127 | 127 | |
|
128 | 128 | If onlyheads is given, only nodes ancestral to nodes in onlyheads |
|
129 | 129 | (inclusive) are included. If you already know the local repo's heads, |
|
130 | 130 | passing them in onlyheads is faster than letting them be recomputed here. |
|
131 | 131 | |
|
132 | 132 | If commoninc is given, it must be the result of a prior call to |
|
133 | 133 | findcommonincoming(repo, other, force) to avoid recomputing it here. |
|
134 | 134 | |
|
135 | 135 | If portable is given, compute more conservative common and missingheads, |
|
136 | 136 | to make bundles created from the instance more portable.''' |
|
137 | 137 | # declare an empty outgoing object to be filled later |
|
138 | 138 | og = outgoing(repo, None, None) |
|
139 | 139 | |
|
140 | 140 | # get common set if not provided |
|
141 | 141 | if commoninc is None: |
|
142 | 142 | commoninc = findcommonincoming(repo, other, force=force) |
|
143 | 143 | og.commonheads, _any, _hds = commoninc |
|
144 | 144 | |
|
145 | 145 | # compute outgoing |
|
146 | 146 | mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore) |
|
147 | 147 | if not mayexclude: |
|
148 | 148 | og.missingheads = onlyheads or repo.heads() |
|
149 | 149 | elif onlyheads is None: |
|
150 | 150 | # use visible heads as it should be cached |
|
151 | 151 | og.missingheads = repo.filtered("served").heads() |
|
152 | 152 | og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')] |
|
153 | 153 | else: |
|
154 | 154 | # compute common, missing and exclude secret stuff |
|
155 | 155 | sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads) |
|
156 | 156 | og._common, allmissing = sets |
|
157 | 157 | og._missing = missing = [] |
|
158 | 158 | og.excluded = excluded = [] |
|
159 | 159 | for node in allmissing: |
|
160 | 160 | ctx = repo[node] |
|
161 | 161 | if ctx.phase() >= phases.secret or ctx.extinct(): |
|
162 | 162 | excluded.append(node) |
|
163 | 163 | else: |
|
164 | 164 | missing.append(node) |
|
165 | 165 | if len(missing) == len(allmissing): |
|
166 | 166 | missingheads = onlyheads |
|
167 | 167 | else: # update missing heads |
|
168 | 168 | missingheads = phases.newheads(repo, onlyheads, excluded) |
|
169 | 169 | og.missingheads = missingheads |
|
170 | 170 | if portable: |
|
171 | 171 | # recompute common and missingheads as if -r<rev> had been given for |
|
172 | 172 | # each head of missing, and --base <rev> for each head of the proper |
|
173 | 173 | # ancestors of missing |
|
174 | 174 | og._computecommonmissing() |
|
175 | 175 | cl = repo.changelog |
|
176 | 176 | missingrevs = set(cl.rev(n) for n in og._missing) |
|
177 | 177 | og._common = set(cl.ancestors(missingrevs)) - missingrevs |
|
178 | 178 | commonheads = set(og.commonheads) |
|
179 | 179 | og.missingheads = [h for h in og.missingheads if h not in commonheads] |
|
180 | 180 | |
|
181 | 181 | return og |
|
182 | 182 | |
|
183 | 183 | def _headssummary(repo, remote, outgoing): |
|
184 | 184 | """compute a summary of branch and heads status before and after push |
|
185 | 185 | |
|
186 | 186 | return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping |
|
187 | 187 | |
|
188 | 188 | - branch: the branch name |
|
189 | 189 | - remoteheads: the list of remote heads known locally |
|
190 | 190 | None if the branch is new |
|
191 | 191 | - newheads: the new remote heads (known locally) with outgoing pushed |
|
192 | 192 | - unsyncedheads: the list of remote heads unknown locally. |
|
193 | 193 | """ |
|
194 | 194 | cl = repo.changelog |
|
195 | 195 | headssum = {} |
|
196 | 196 | # A. Create set of branches involved in the push. |
|
197 | 197 | branches = set(repo[n].branch() for n in outgoing.missing) |
|
198 | 198 | remotemap = remote.branchmap() |
|
199 | 199 | newbranches = branches - set(remotemap) |
|
200 | 200 | branches.difference_update(newbranches) |
|
201 | 201 | |
|
202 | 202 | # A. register remote heads |
|
203 | 203 | remotebranches = set() |
|
204 | 204 | for branch, heads in remote.branchmap().iteritems(): |
|
205 | 205 | remotebranches.add(branch) |
|
206 | 206 | known = [] |
|
207 | 207 | unsynced = [] |
|
208 | 208 | knownnode = cl.hasnode # do not use nodemap until it is filtered |
|
209 | 209 | for h in heads: |
|
210 | 210 | if knownnode(h): |
|
211 | 211 | known.append(h) |
|
212 | 212 | else: |
|
213 | 213 | unsynced.append(h) |
|
214 | 214 | headssum[branch] = (known, list(known), unsynced) |
|
215 | 215 | # B. add new branch data |
|
216 | 216 | missingctx = list(repo[n] for n in outgoing.missing) |
|
217 | 217 | touchedbranches = set() |
|
218 | 218 | for ctx in missingctx: |
|
219 | 219 | branch = ctx.branch() |
|
220 | 220 | touchedbranches.add(branch) |
|
221 | 221 | if branch not in headssum: |
|
222 | 222 | headssum[branch] = (None, [], []) |
|
223 | 223 | |
|
224 | 224 | # C drop data about untouched branches: |
|
225 | 225 | for branch in remotebranches - touchedbranches: |
|
226 | 226 | del headssum[branch] |
|
227 | 227 | |
|
228 | 228 | # D. Update newmap with outgoing changes. |
|
229 | 229 | # This will possibly add new heads and remove existing ones. |
|
230 | 230 | newmap = branchmap.branchcache((branch, heads[1]) |
|
231 | 231 | for branch, heads in headssum.iteritems() |
|
232 | 232 | if heads[0] is not None) |
|
233 | 233 | newmap.update(repo, (ctx.rev() for ctx in missingctx)) |
|
234 | 234 | for branch, newheads in newmap.iteritems(): |
|
235 | 235 | headssum[branch][1][:] = newheads |
|
236 | 236 | return headssum |
|
237 | 237 | |
|
238 | 238 | def _oldheadssummary(repo, remoteheads, outgoing, inc=False): |
|
239 | 239 | """Compute branchmapsummary for repo without branchmap support""" |
|
240 | 240 | |
|
241 | 241 | # 1-4b. old servers: Check for new topological heads. |
|
242 | 242 | # Construct {old,new}map with branch = None (topological branch). |
|
243 | 243 | # (code based on update) |
|
244 | 244 | knownnode = repo.changelog.hasnode # no nodemap until it is filtered |
|
245 | 245 | oldheads = set(h for h in remoteheads if knownnode(h)) |
|
246 | 246 | # all nodes in outgoing.missing are children of either: |
|
247 | 247 | # - an element of oldheads |
|
248 | 248 | # - another element of outgoing.missing |
|
249 | 249 | # - nullrev |
|
250 | 250 | # This explains why the new head are very simple to compute. |
|
251 | 251 | r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing) |
|
252 | 252 | newheads = list(c.node() for c in r) |
|
253 | 253 | # set some unsynced head to issue the "unsynced changes" warning |
|
254 | 254 | if inc: |
|
255 | 255 | unsynced = set([None]) |
|
256 | 256 | else: |
|
257 | 257 | unsynced = set() |
|
258 | 258 | return {None: (oldheads, newheads, unsynced)} |
|
259 | 259 | |
|
260 | 260 | def _nowarnheads(pushop): |
|
261 | 261 | # Compute newly pushed bookmarks. We don't warn about bookmarked heads. |
|
262 | 262 | repo = pushop.repo.unfiltered() |
|
263 | 263 | remote = pushop.remote |
|
264 | 264 | localbookmarks = repo._bookmarks |
|
265 | 265 | remotebookmarks = remote.listkeys('bookmarks') |
|
266 | 266 | bookmarkedheads = set() |
|
267 | 267 | |
|
268 | 268 | # internal config: bookmarks.pushing |
|
269 | 269 | newbookmarks = [localbookmarks.expandname(b) |
|
270 | 270 | for b in pushop.ui.configlist('bookmarks', 'pushing')] |
|
271 | 271 | |
|
272 | 272 | for bm in localbookmarks: |
|
273 | 273 | rnode = remotebookmarks.get(bm) |
|
274 | 274 | if rnode and rnode in repo: |
|
275 | 275 | lctx, rctx = repo[bm], repo[rnode] |
|
276 | 276 | if bookmarks.validdest(repo, rctx, lctx): |
|
277 | 277 | bookmarkedheads.add(lctx.node()) |
|
278 | 278 | else: |
|
279 | 279 | if bm in newbookmarks and bm not in remotebookmarks: |
|
280 | 280 | bookmarkedheads.add(repo[bm].node()) |
|
281 | 281 | |
|
282 | 282 | return bookmarkedheads |
|
283 | 283 | |
|
284 | 284 | def checkheads(pushop): |
|
285 | 285 | """Check that a push won't add any outgoing head |
|
286 | 286 | |
|
287 | 287 | raise Abort error and display ui message as needed. |
|
288 | 288 | """ |
|
289 | 289 | |
|
290 | 290 | repo = pushop.repo.unfiltered() |
|
291 | 291 | remote = pushop.remote |
|
292 | 292 | outgoing = pushop.outgoing |
|
293 | 293 | remoteheads = pushop.remoteheads |
|
294 | 294 | newbranch = pushop.newbranch |
|
295 | 295 | inc = bool(pushop.incoming) |
|
296 | 296 | |
|
297 | 297 | # Check for each named branch if we're creating new remote heads. |
|
298 | 298 | # To be a remote head after push, node must be either: |
|
299 | 299 | # - unknown locally |
|
300 | 300 | # - a local outgoing head descended from update |
|
301 | 301 | # - a remote head that's known locally and not |
|
302 | 302 | # ancestral to an outgoing head |
|
303 | 303 | if remoteheads == [nullid]: |
|
304 | 304 | # remote is empty, nothing to check. |
|
305 | 305 | return |
|
306 | 306 | |
|
307 | 307 | if remote.capable('branchmap'): |
|
308 | 308 | headssum = _headssummary(repo, remote, outgoing) |
|
309 | 309 | else: |
|
310 | 310 | headssum = _oldheadssummary(repo, remoteheads, outgoing, inc) |
|
311 | 311 | newbranches = [branch for branch, heads in headssum.iteritems() |
|
312 | 312 | if heads[0] is None] |
|
313 | 313 | # 1. Check for new branches on the remote. |
|
314 | 314 | if newbranches and not newbranch: # new branch requires --new-branch |
|
315 | 315 | branchnames = ', '.join(sorted(newbranches)) |
|
316 | 316 | raise error.Abort(_("push creates new remote branches: %s!") |
|
317 | 317 | % branchnames, |
|
318 | 318 | hint=_("use 'hg push --new-branch' to create" |
|
319 | 319 | " new remote branches")) |
|
320 | 320 | |
|
321 | 321 | # 2. Find heads that we need not warn about |
|
322 | 322 | nowarnheads = _nowarnheads(pushop) |
|
323 | 323 | |
|
324 | 324 | # 3. Check for new heads. |
|
325 | 325 | # If there are more heads after the push than before, a suitable |
|
326 | 326 | # error message, depending on unsynced status, is displayed. |
|
327 | 327 | errormsg = None |
|
328 | 328 | # If there is no obsstore, allfuturecommon won't be used, so no |
|
329 | 329 | # need to compute it. |
|
330 | 330 | if repo.obsstore: |
|
331 | 331 | allmissing = set(outgoing.missing) |
|
332 | 332 | cctx = repo.set('%ld', outgoing.common) |
|
333 | 333 | allfuturecommon = set(c.node() for c in cctx) |
|
334 | 334 | allfuturecommon.update(allmissing) |
|
335 | 335 | for branch, heads in sorted(headssum.iteritems()): |
|
336 | 336 | remoteheads, newheads, unsyncedheads = heads |
|
337 | 337 | candidate_newhs = set(newheads) |
|
338 | 338 | # add unsynced data |
|
339 | 339 | if remoteheads is None: |
|
340 | 340 | oldhs = set() |
|
341 | 341 | else: |
|
342 | 342 | oldhs = set(remoteheads) |
|
343 | 343 | oldhs.update(unsyncedheads) |
|
344 | 344 | candidate_newhs.update(unsyncedheads) |
|
345 | 345 | dhs = None # delta heads, the new heads on branch |
|
346 | discardedheads = set() | |
|
347 | 346 | if not repo.obsstore: |
|
347 | discardedheads = set() | |
|
348 | 348 | newhs = candidate_newhs |
|
349 | 349 | else: |
|
350 | # remove future heads which are actually obsoleted by another | |
|
351 | # pushed element: | |
|
352 | # | |
|
353 | # XXX as above, There are several cases this code does not handle | |
|
354 | # XXX properly | |
|
355 | # | |
|
356 | # (1) if <nh> is public, it won't be affected by obsolete marker | |
|
357 | # and a new is created | |
|
358 | # | |
|
359 | # (2) if the new heads have ancestors which are not obsolete and | |
|
360 | # not ancestors of any other heads we will have a new head too. | |
|
361 | # | |
|
362 | # These two cases will be easy to handle for known changeset but | |
|
363 | # much more tricky for unsynced changes. | |
|
364 | # | |
|
365 | # In addition, this code is confused by prune as it only looks for | |
|
366 | # successors of the heads (none if pruned) leading to issue4354 | |
|
367 | newhs = set() | |
|
368 | for nh in candidate_newhs: | |
|
369 | if nh in repo and repo[nh].phase() <= phases.public: | |
|
370 | newhs.add(nh) | |
|
371 | else: | |
|
372 | for suc in obsolete.allsuccessors(repo.obsstore, [nh]): | |
|
373 | if suc != nh and suc in allfuturecommon: | |
|
374 | discardedheads.add(nh) | |
|
375 | break | |
|
376 | else: | |
|
377 | newhs.add(nh) | |
|
350 | newhs, discardedheads = _postprocessobsolete(pushop, | |
|
351 | allfuturecommon, | |
|
352 | candidate_newhs) | |
|
378 | 353 | unsynced = sorted(h for h in unsyncedheads if h not in discardedheads) |
|
379 | 354 | if unsynced: |
|
380 | 355 | if None in unsynced: |
|
381 | 356 | # old remote, no heads data |
|
382 | 357 | heads = None |
|
383 | 358 | elif len(unsynced) <= 4 or repo.ui.verbose: |
|
384 | 359 | heads = ' '.join(short(h) for h in unsynced) |
|
385 | 360 | else: |
|
386 | 361 | heads = (' '.join(short(h) for h in unsynced[:4]) + |
|
387 | 362 | ' ' + _("and %s others") % (len(unsynced) - 4)) |
|
388 | 363 | if heads is None: |
|
389 | 364 | repo.ui.status(_("remote has heads that are " |
|
390 | 365 | "not known locally\n")) |
|
391 | 366 | elif branch is None: |
|
392 | 367 | repo.ui.status(_("remote has heads that are " |
|
393 | 368 | "not known locally: %s\n") % heads) |
|
394 | 369 | else: |
|
395 | 370 | repo.ui.status(_("remote has heads on branch '%s' that are " |
|
396 | 371 | "not known locally: %s\n") % (branch, heads)) |
|
397 | 372 | if remoteheads is None: |
|
398 | 373 | if len(newhs) > 1: |
|
399 | 374 | dhs = list(newhs) |
|
400 | 375 | if errormsg is None: |
|
401 | 376 | errormsg = (_("push creates new branch '%s' " |
|
402 | 377 | "with multiple heads") % (branch)) |
|
403 | 378 | hint = _("merge or" |
|
404 | 379 | " see 'hg help push' for details about" |
|
405 | 380 | " pushing new heads") |
|
406 | 381 | elif len(newhs) > len(oldhs): |
|
407 | 382 | # remove bookmarked or existing remote heads from the new heads list |
|
408 | 383 | dhs = sorted(newhs - nowarnheads - oldhs) |
|
409 | 384 | if dhs: |
|
410 | 385 | if errormsg is None: |
|
411 | 386 | if branch not in ('default', None): |
|
412 | 387 | errormsg = _("push creates new remote head %s " |
|
413 | 388 | "on branch '%s'!") % (short(dhs[0]), branch) |
|
414 | 389 | elif repo[dhs[0]].bookmarks(): |
|
415 | 390 | errormsg = _("push creates new remote head %s " |
|
416 | 391 | "with bookmark '%s'!") % ( |
|
417 | 392 | short(dhs[0]), repo[dhs[0]].bookmarks()[0]) |
|
418 | 393 | else: |
|
419 | 394 | errormsg = _("push creates new remote head %s!" |
|
420 | 395 | ) % short(dhs[0]) |
|
421 | 396 | if unsyncedheads: |
|
422 | 397 | hint = _("pull and merge or" |
|
423 | 398 | " see 'hg help push' for details about" |
|
424 | 399 | " pushing new heads") |
|
425 | 400 | else: |
|
426 | 401 | hint = _("merge or" |
|
427 | 402 | " see 'hg help push' for details about" |
|
428 | 403 | " pushing new heads") |
|
429 | 404 | if branch is None: |
|
430 | 405 | repo.ui.note(_("new remote heads:\n")) |
|
431 | 406 | else: |
|
432 | 407 | repo.ui.note(_("new remote heads on branch '%s':\n") % branch) |
|
433 | 408 | for h in dhs: |
|
434 | 409 | repo.ui.note((" %s\n") % short(h)) |
|
435 | 410 | if errormsg: |
|
436 | 411 | raise error.Abort(errormsg, hint=hint) |
|
412 | ||
|
413 | def _postprocessobsolete(pushop, futurecommon, candidate_newhs): | |
|
414 | """post process the list of new heads with obsolescence information | |
|
415 | ||
|
416 | Exists as a subfunction to contain the complexity and allow extensions to | |
|
417 | experiment with smarter logic. | |
|
418 | Returns (newheads, discarded_heads) tuple | |
|
419 | """ | |
|
420 | # remove future heads which are actually obsoleted by another | |
|
421 | # pushed element: | |
|
422 | # | |
|
423 | # XXX as above, There are several cases this code does not handle | |
|
424 | # XXX properly | |
|
425 | # | |
|
426 | # (1) if <nh> is public, it won't be affected by obsolete marker | |
|
427 | # and a new is created | |
|
428 | # | |
|
429 | # (2) if the new heads have ancestors which are not obsolete and | |
|
430 | # not ancestors of any other heads we will have a new head too. | |
|
431 | # | |
|
432 | # These two cases will be easy to handle for known changeset but | |
|
433 | # much more tricky for unsynced changes. | |
|
434 | # | |
|
435 | # In addition, this code is confused by prune as it only looks for | |
|
436 | # successors of the heads (none if pruned) leading to issue4354 | |
|
437 | repo = pushop.repo | |
|
438 | newhs = set() | |
|
439 | discarded = set() | |
|
440 | for nh in candidate_newhs: | |
|
441 | if nh in repo and repo[nh].phase() <= phases.public: | |
|
442 | newhs.add(nh) | |
|
443 | else: | |
|
444 | for suc in obsolete.allsuccessors(repo.obsstore, [nh]): | |
|
445 | if suc != nh and suc in futurecommon: | |
|
446 | discarded.add(nh) | |
|
447 | break | |
|
448 | else: | |
|
449 | newhs.add(nh) | |
|
450 | return newhs, discarded |
General Comments 0
You need to be logged in to leave comments.
Login now