##// END OF EJS Templates
discovery: port _postprocessobsolete() changes from evolve, add tests...
av6 -
r49537:053a5bf5 default
parent child Browse files
Show More
@@ -0,0 +1,114 b''
1 =========================================
2 Testing single head enforcement: Case A-1
3 =========================================
4
5 A repository is set to only accept a single head per name (typically named
6 branch). However, obsolete changesets can make this enforcement more
7 complicated, because they can be kept visible by other changeset on other
8 branch.
9
10 This case is part of a series of tests checking this behavior.
11
12 Category A: Involving obsolescence
13 TestCase 1: A fully obsolete branch kept visible by another one
14
15 .. old-state:
16 ..
17 .. * 2 changesets on branch default
18 .. * 2 changesets on branch Z on top of them
19 ..
20 .. new-state:
21 ..
22 .. * 2 changesets on branch Z at the same location
23 .. * 2 changesets on branch default superseding the other ones
24 ..
25 .. expected-result:
26 ..
27 .. * only one head detected
28 ..
29 .. graph-summary:
30 ..
31 .. D ● (branch Z)
32 .. |
33 .. C ● (branch Z)
34 .. |
35 .. B ΓΈβ‡ β—” B'
36 .. | |
37 .. A ΓΈβ‡ β—” A'
38 .. |/
39 .. ●
40
41 $ . $TESTDIR/testlib/push-checkheads-util.sh
42
43 $ cat >> $HGRCPATH << EOF
44 > [command-templates]
45 > log = "{node|short} [{branch}] ({phase}): {desc}\n"
46 > EOF
47
48 Test setup
49 ----------
50
51 $ mkdir A1
52 $ cd A1
53 $ setuprepos single-head
54 creating basic server and client repo
55 updating to branch default
56 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 $ cd client
58 $ mkcommit B0
59 $ hg branch Z
60 marked working directory as branch Z
61 (branches are permanent and global, did you want a bookmark?)
62 $ mkcommit C0
63 $ mkcommit D0
64 $ hg push --new-branch
65 pushing to $TESTTMP/A1/server
66 searching for changes
67 adding changesets
68 adding manifests
69 adding file changes
70 added 3 changesets with 3 changes to 3 files
71 $ hg up 0
72 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
73 $ mkcommit A1
74 created new head
75 $ mkcommit B1
76 $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
77 1 new obsolescence markers
78 obsoleted 1 changesets
79 3 new orphan changesets
80 $ hg debugobsolete `getid "desc(B0)"` `getid "desc(B1)"`
81 1 new obsolescence markers
82 obsoleted 1 changesets
83 $ hg heads
84 262c8c798096 [default] (draft): B1
85 cdf1dbb37a67 [Z] (draft): D0
86 $ hg log -G --hidden
87 @ 262c8c798096 [default] (draft): B1
88 |
89 o f6082bc4ffef [default] (draft): A1
90 |
91 | * cdf1dbb37a67 [Z] (draft): D0
92 | |
93 | * 3213e3e16c67 [Z] (draft): C0
94 | |
95 | x d73caddc5533 [default] (draft): B0
96 | |
97 | x 8aaa48160adc [default] (draft): A0
98 |/
99 o 1e4be0697311 [default] (public): root
100
101
102 Actual testing
103 --------------
104
105 $ hg push -r 'desc("B1")'
106 pushing to $TESTTMP/A1/server
107 searching for changes
108 adding changesets
109 adding manifests
110 adding file changes
111 added 2 changesets with 2 changes to 2 files (+1 heads)
112 2 new obsolescence markers
113 obsoleted 2 changesets
114 2 new orphan changesets
@@ -0,0 +1,113 b''
1 =========================================
2 Testing single head enforcement: Case A-2
3 =========================================
4
5 A repository is set to only accept a single head per name (typically named
6 branch). However, obsolete changesets can make this enforcement more
7 complicated, because they can be kept visible by other changeset on other
8 branch.
9
10 This case is part of a series of tests checking this behavior.
11
12 Category A: Involving obsolescence
13 TestCase 2: A branch is split in two, effectively creating two heads
14
15 .. old-state:
16 ..
17 .. * 2 changesets on branch default
18 .. * 2 changesets on branch Z on top of them
19 ..
20 .. new-state:
21 ..
22 .. * 2 changesets on branch Z at the same location
23 .. * 1 changeset on branch default unchanged
24 .. * 1 changeset on branch default superseding the other ones
25 ..
26 .. expected-result:
27 ..
28 .. * two heads detected
29 ..
30 .. graph-summary:
31 ..
32 .. D ● (branch Z)
33 .. |
34 .. C ● (branch Z)
35 .. |
36 .. B ΓΈβ‡ β—” B'
37 .. | |
38 .. A ● |
39 .. |/
40 .. ●
41
42 $ . $TESTDIR/testlib/push-checkheads-util.sh
43
44 $ cat >> $HGRCPATH << EOF
45 > [command-templates]
46 > log = "{node|short} [{branch}] ({phase}): {desc}\n"
47 > EOF
48
49 Test setup
50 ----------
51
52 $ mkdir A2
53 $ cd A2
54 $ setuprepos single-head
55 creating basic server and client repo
56 updating to branch default
57 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 $ cd client
59 $ mkcommit B0
60 $ hg branch Z
61 marked working directory as branch Z
62 (branches are permanent and global, did you want a bookmark?)
63 $ mkcommit C0
64 $ mkcommit D0
65 $ hg push --new-branch
66 pushing to $TESTTMP/A2/server
67 searching for changes
68 adding changesets
69 adding manifests
70 adding file changes
71 added 3 changesets with 3 changes to 3 files
72 $ hg up 0
73 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
74 $ mkcommit B1
75 created new head
76 $ hg debugobsolete `getid "desc(B0)"` `getid "desc(B1)"`
77 1 new obsolescence markers
78 obsoleted 1 changesets
79 2 new orphan changesets
80 $ hg heads
81 25c56d33e4c4 [default] (draft): B1
82 cdf1dbb37a67 [Z] (draft): D0
83 8aaa48160adc [default] (draft): A0
84 $ hg log -G --hidden
85 @ 25c56d33e4c4 [default] (draft): B1
86 |
87 | * cdf1dbb37a67 [Z] (draft): D0
88 | |
89 | * 3213e3e16c67 [Z] (draft): C0
90 | |
91 | x d73caddc5533 [default] (draft): B0
92 | |
93 | o 8aaa48160adc [default] (draft): A0
94 |/
95 o 1e4be0697311 [default] (public): root
96
97
98 Actual testing
99 --------------
100
101 (force push to make sure we get the changeset on the remote)
102
103 $ hg push -r 'desc("B1")' --force
104 pushing to $TESTTMP/A2/server
105 searching for changes
106 adding changesets
107 adding manifests
108 adding file changes
109 transaction abort!
110 rollback completed
111 abort: rejecting multiple heads on branch "default"
112 (2 heads: 8aaa48160adc 25c56d33e4c4)
113 [255]
@@ -0,0 +1,120 b''
1 =========================================
2 Testing single head enforcement: Case A-3
3 =========================================
4
5 A repository is set to only accept a single head per name (typically named
6 branch). However, obsolete changesets can make this enforcement more
7 complicated, because they can be kept visible by other changeset on other
8 branch.
9
10 This case is part of a series of tests checking this behavior.
11
12 Category A: Involving obsolescence
13 TestCase 3: Full superseding of a branch interleaved with another
14
15 .. old-state:
16 ..
17 .. * 2 changesets on branch default
18 .. * 2 changesets on branch Z interleaved with the other
19 ..
20 .. new-state:
21 ..
22 .. * 2 changesets on branch Z at the same location
23 .. * 2 changesets on branch default superseding the other ones
24 ..
25 .. expected-result:
26 ..
27 .. * only one head detected
28 ..
29 .. graph-summary:
30 ..
31 .. D ● (branch Z)
32 .. |
33 .. C ΓΈβ‡ β—” C'
34 .. | |
35 .. B ● | (branch Z)
36 .. | |
37 .. A ΓΈβ‡ β—” A'
38 .. |/
39 .. ●
40
41 $ . $TESTDIR/testlib/push-checkheads-util.sh
42
43 $ cat >> $HGRCPATH << EOF
44 > [command-templates]
45 > log = "{node|short} [{branch}] ({phase}): {desc}\n"
46 > EOF
47
48 Test setup
49 ----------
50
51 $ mkdir A3
52 $ cd A3
53 $ setuprepos single-head
54 creating basic server and client repo
55 updating to branch default
56 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 $ cd client
58 $ hg branch Z
59 marked working directory as branch Z
60 (branches are permanent and global, did you want a bookmark?)
61 $ mkcommit B0
62 $ hg branch default --force
63 marked working directory as branch default
64 $ mkcommit C0
65 created new head
66 $ hg branch Z --force
67 marked working directory as branch Z
68 $ mkcommit D0
69 created new head
70 $ hg push --new-branch
71 pushing to $TESTTMP/A3/server
72 searching for changes
73 adding changesets
74 adding manifests
75 adding file changes
76 added 3 changesets with 3 changes to 3 files
77 $ hg up 0
78 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
79 $ mkcommit A1
80 created new head
81 $ mkcommit C1
82 $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
83 1 new obsolescence markers
84 obsoleted 1 changesets
85 3 new orphan changesets
86 $ hg debugobsolete `getid "desc(C0)"` `getid "desc(C1)"`
87 1 new obsolescence markers
88 obsoleted 1 changesets
89 $ hg heads
90 0c76bc104656 [default] (draft): C1
91 78578c4306ce [Z] (draft): D0
92 $ hg log -G --hidden
93 @ 0c76bc104656 [default] (draft): C1
94 |
95 o f6082bc4ffef [default] (draft): A1
96 |
97 | * 78578c4306ce [Z] (draft): D0
98 | |
99 | x afc55ba2ce61 [default] (draft): C0
100 | |
101 | * 93e5c1321ece [Z] (draft): B0
102 | |
103 | x 8aaa48160adc [default] (draft): A0
104 |/
105 o 1e4be0697311 [default] (public): root
106
107
108 Actual testing
109 --------------
110
111 $ hg push -r 'desc("C1")'
112 pushing to $TESTTMP/A3/server
113 searching for changes
114 adding changesets
115 adding manifests
116 adding file changes
117 added 2 changesets with 2 changes to 2 files (+1 heads)
118 2 new obsolescence markers
119 obsoleted 2 changesets
120 2 new orphan changesets
@@ -0,0 +1,117 b''
1 =========================================
2 Testing single head enforcement: Case A-4
3 =========================================
4
5 A repository is set to only accept a single head per name (typically named
6 branch). However, obsolete changesets can make this enforcement more
7 complicated, because they can be kept visible by other changeset on other
8 branch.
9
10 This case is part of a series of tests checking this behavior.
11
12 Category A: Involving obsolescence
13 TestCase 4: Partial rewrite of a branch to deinterleave it
14
15 .. old-state:
16 ..
17 .. * 2 changesets on branch default
18 .. * 2 changesets on branch Z interleaved with the other one
19 ..
20 .. new-state:
21 ..
22 .. * 2 changesets on branch Z at the same location
23 .. * 1 changeset on default untouched (the lower one)
24 .. * 1 changeset on default moved on the other one
25 ..
26 .. expected-result:
27 ..
28 .. * only one head detected
29 ..
30 .. graph-summary:
31 ..
32 .. D ● (branch Z)
33 .. |
34 .. C ΓΈβ‡ β—” C'
35 .. | |
36 .. B ● | (branch Z)
37 .. |/
38 .. A ●
39 .. |
40 .. ●
41
42 $ . $TESTDIR/testlib/push-checkheads-util.sh
43
44 $ cat >> $HGRCPATH << EOF
45 > [command-templates]
46 > log = "{node|short} [{branch}] ({phase}): {desc}\n"
47 > EOF
48
49 Test setup
50 ----------
51
52 $ mkdir A4
53 $ cd A4
54 $ setuprepos single-head
55 creating basic server and client repo
56 updating to branch default
57 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 $ cd client
59 $ hg branch Z
60 marked working directory as branch Z
61 (branches are permanent and global, did you want a bookmark?)
62 $ mkcommit B0
63 $ hg branch default --force
64 marked working directory as branch default
65 $ mkcommit C0
66 created new head
67 $ hg branch Z --force
68 marked working directory as branch Z
69 $ mkcommit D0
70 created new head
71 $ hg push --new-branch
72 pushing to $TESTTMP/A4/server
73 searching for changes
74 adding changesets
75 adding manifests
76 adding file changes
77 added 3 changesets with 3 changes to 3 files
78 $ hg up 'desc("A0")'
79 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
80 $ mkcommit C1
81 created new head
82 $ hg debugobsolete `getid "desc(C0)"` `getid "desc(C1)"`
83 1 new obsolescence markers
84 obsoleted 1 changesets
85 1 new orphan changesets
86 $ hg heads
87 cfe9ed94fa4a [default] (draft): C1
88 78578c4306ce [Z] (draft): D0
89 $ hg log -G --hidden
90 @ cfe9ed94fa4a [default] (draft): C1
91 |
92 | * 78578c4306ce [Z] (draft): D0
93 | |
94 | x afc55ba2ce61 [default] (draft): C0
95 | |
96 | o 93e5c1321ece [Z] (draft): B0
97 |/
98 o 8aaa48160adc [default] (draft): A0
99 |
100 o 1e4be0697311 [default] (public): root
101
102
103 Actual testing
104 --------------
105
106 (force push to make sure we get the changeset on the remote)
107
108 $ hg push -r 'desc("C1")' --force
109 pushing to $TESTTMP/A4/server
110 searching for changes
111 adding changesets
112 adding manifests
113 adding file changes
114 added 1 changesets with 1 changes to 1 files (+1 heads)
115 1 new obsolescence markers
116 obsoleted 1 changesets
117 1 new orphan changesets
@@ -0,0 +1,108 b''
1 =========================================
2 Testing single head enforcement: Case A-5
3 =========================================
4
5 A repository is set to only accept a single head per name (typically named
6 branch). However, obsolete changesets can make this enforcement more
7 complicated, because they can be kept visible by other changeset on other
8 branch.
9
10 This case is part of a series of tests checking this behavior.
11
12 Category A: Involving obsolescence
13 TestCase 5: Obsoleting a merge reveals two heads
14
15 .. old-state:
16 ..
17 .. * 3 changesets on branch default (2 on their own branch + 1 merge)
18 .. * 1 changeset on branch Z (children of the merge)
19 ..
20 .. new-state:
21 ..
22 .. * 2 changesets on branch default (merge is obsolete) each a head
23 .. * 1 changeset on branch Z keeping the merge visible
24 ..
25 .. expected-result:
26 ..
27 .. * 2 heads detected (because we skip the merge)
28 ..
29 .. graph-summary:
30 ..
31 .. C ● (branch Z)
32 .. |
33 .. M βŠ—
34 .. |\
35 .. A ● ● B
36 .. |/
37 .. ●
38
39 $ . $TESTDIR/testlib/push-checkheads-util.sh
40
41 $ cat >> $HGRCPATH << EOF
42 > [command-templates]
43 > log = "{node|short} [{branch}] ({phase}): {desc}\n"
44 > EOF
45
46 Test setup
47 ----------
48
49 $ mkdir A5
50 $ cd A5
51 $ setuprepos single-head
52 creating basic server and client repo
53 updating to branch default
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 $ cd client
56 $ hg up 0
57 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
58 $ mkcommit B0
59 created new head
60 $ hg merge
61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 (branch merge, don't forget to commit)
63 $ hg ci -m 'M0'
64 $ hg branch Z
65 marked working directory as branch Z
66 (branches are permanent and global, did you want a bookmark?)
67 $ mkcommit C0
68 $ hg push --new-branch
69 pushing to $TESTTMP/A5/server
70 searching for changes
71 adding changesets
72 adding manifests
73 adding file changes
74 added 3 changesets with 2 changes to 2 files
75 $ hg debugobsolete `getid "desc(M0)"` --record-parents
76 1 new obsolescence markers
77 obsoleted 1 changesets
78 1 new orphan changesets
79 $ hg heads
80 61c95483cc12 [Z] (draft): C0
81 74ff5441d343 [default] (draft): B0
82 8aaa48160adc [default] (draft): A0
83 $ hg log -G --hidden
84 @ 61c95483cc12 [Z] (draft): C0
85 |
86 x 14d3d4d41d1a [default] (draft): M0
87 |\
88 | o 74ff5441d343 [default] (draft): B0
89 | |
90 o | 8aaa48160adc [default] (draft): A0
91 |/
92 o 1e4be0697311 [default] (public): root
93
94
95 Actual testing
96 --------------
97
98 (force push to make sure we get the changeset on the remote)
99
100 $ hg push -r 'desc("C0")' --force
101 pushing to $TESTTMP/A5/server
102 searching for changes
103 no changes found
104 transaction abort!
105 rollback completed
106 abort: rejecting multiple heads on branch "default"
107 (2 heads: 8aaa48160adc 74ff5441d343)
108 [255]
@@ -1,617 +1,622 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 short,
15 short,
16 )
16 )
17
17
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 branchmap,
20 branchmap,
21 error,
21 error,
22 obsolete,
22 phases,
23 phases,
23 pycompat,
24 pycompat,
24 scmutil,
25 scmutil,
25 setdiscovery,
26 setdiscovery,
26 treediscovery,
27 treediscovery,
27 util,
28 util,
28 )
29 )
29
30
30
31
31 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 """Return a tuple (common, anyincoming, heads) used to identify the common
33 """Return a tuple (common, anyincoming, heads) used to identify the common
33 subset of nodes between repo and remote.
34 subset of nodes between repo and remote.
34
35
35 "common" is a list of (at least) the heads of the common subset.
36 "common" is a list of (at least) the heads of the common subset.
36 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 locally. If remote does not support getbundle, this actually is a list of
38 locally. If remote does not support getbundle, this actually is a list of
38 roots of the nodes that would be incoming, to be supplied to
39 roots of the nodes that would be incoming, to be supplied to
39 changegroupsubset. No code except for pull should be relying on this fact
40 changegroupsubset. No code except for pull should be relying on this fact
40 any longer.
41 any longer.
41 "heads" is either the supplied heads, or else the remote's heads.
42 "heads" is either the supplied heads, or else the remote's heads.
42 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 these nodes. Changeset outside of this set won't be considered (but may
44 these nodes. Changeset outside of this set won't be considered (but may
44 still appear in "common").
45 still appear in "common").
45
46
46 If you pass heads and they are all known locally, the response lists just
47 If you pass heads and they are all known locally, the response lists just
47 these heads in "common" and in "heads".
48 these heads in "common" and in "heads".
48
49
49 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 extensions a good hook into outgoing.
51 extensions a good hook into outgoing.
51 """
52 """
52
53
53 if not remote.capable(b'getbundle'):
54 if not remote.capable(b'getbundle'):
54 return treediscovery.findcommonincoming(repo, remote, heads, force)
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
55
56
56 if heads:
57 if heads:
57 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 if all(knownnode(h) for h in heads):
59 if all(knownnode(h) for h in heads):
59 return (heads, False, heads)
60 return (heads, False, heads)
60
61
61 res = setdiscovery.findcommonheads(
62 res = setdiscovery.findcommonheads(
62 repo.ui,
63 repo.ui,
63 repo,
64 repo,
64 remote,
65 remote,
65 abortwhenunrelated=not force,
66 abortwhenunrelated=not force,
66 ancestorsof=ancestorsof,
67 ancestorsof=ancestorsof,
67 )
68 )
68 common, anyinc, srvheads = res
69 common, anyinc, srvheads = res
69 if heads and not anyinc:
70 if heads and not anyinc:
70 # server could be lying on the advertised heads
71 # server could be lying on the advertised heads
71 has_node = repo.changelog.hasnode
72 has_node = repo.changelog.hasnode
72 anyinc = any(not has_node(n) for n in heads)
73 anyinc = any(not has_node(n) for n in heads)
73 return (list(common), anyinc, heads or list(srvheads))
74 return (list(common), anyinc, heads or list(srvheads))
74
75
75
76
76 class outgoing(object):
77 class outgoing(object):
77 """Represents the result of a findcommonoutgoing() call.
78 """Represents the result of a findcommonoutgoing() call.
78
79
79 Members:
80 Members:
80
81
81 ancestorsof is a list of the nodes whose ancestors are included in the
82 ancestorsof is a list of the nodes whose ancestors are included in the
82 outgoing operation.
83 outgoing operation.
83
84
84 missing is a list of those ancestors of ancestorsof that are present in
85 missing is a list of those ancestors of ancestorsof that are present in
85 local but not in remote.
86 local but not in remote.
86
87
87 common is a set containing revs common between the local and the remote
88 common is a set containing revs common between the local and the remote
88 repository (at least all of those that are ancestors of ancestorsof).
89 repository (at least all of those that are ancestors of ancestorsof).
89
90
90 commonheads is the list of heads of common.
91 commonheads is the list of heads of common.
91
92
92 excluded is the list of missing changeset that shouldn't be sent
93 excluded is the list of missing changeset that shouldn't be sent
93 remotely.
94 remotely.
94
95
95 Some members are computed on demand from the heads, unless provided upfront
96 Some members are computed on demand from the heads, unless provided upfront
96 by discovery."""
97 by discovery."""
97
98
98 def __init__(
99 def __init__(
99 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 self, repo, commonheads=None, ancestorsof=None, missingroots=None
100 ):
101 ):
101 # at least one of them must not be set
102 # at least one of them must not be set
102 assert None in (commonheads, missingroots)
103 assert None in (commonheads, missingroots)
103 cl = repo.changelog
104 cl = repo.changelog
104 if ancestorsof is None:
105 if ancestorsof is None:
105 ancestorsof = cl.heads()
106 ancestorsof = cl.heads()
106 if missingroots:
107 if missingroots:
107 discbases = []
108 discbases = []
108 for n in missingroots:
109 for n in missingroots:
109 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
110 discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
110 # TODO remove call to nodesbetween.
111 # TODO remove call to nodesbetween.
111 # TODO populate attributes on outgoing instance instead of setting
112 # TODO populate attributes on outgoing instance instead of setting
112 # discbases.
113 # discbases.
113 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
114 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
114 included = set(csets)
115 included = set(csets)
115 ancestorsof = heads
116 ancestorsof = heads
116 commonheads = [n for n in discbases if n not in included]
117 commonheads = [n for n in discbases if n not in included]
117 elif not commonheads:
118 elif not commonheads:
118 commonheads = [repo.nullid]
119 commonheads = [repo.nullid]
119 self.commonheads = commonheads
120 self.commonheads = commonheads
120 self.ancestorsof = ancestorsof
121 self.ancestorsof = ancestorsof
121 self._revlog = cl
122 self._revlog = cl
122 self._common = None
123 self._common = None
123 self._missing = None
124 self._missing = None
124 self.excluded = []
125 self.excluded = []
125
126
126 def _computecommonmissing(self):
127 def _computecommonmissing(self):
127 sets = self._revlog.findcommonmissing(
128 sets = self._revlog.findcommonmissing(
128 self.commonheads, self.ancestorsof
129 self.commonheads, self.ancestorsof
129 )
130 )
130 self._common, self._missing = sets
131 self._common, self._missing = sets
131
132
132 @util.propertycache
133 @util.propertycache
133 def common(self):
134 def common(self):
134 if self._common is None:
135 if self._common is None:
135 self._computecommonmissing()
136 self._computecommonmissing()
136 return self._common
137 return self._common
137
138
138 @util.propertycache
139 @util.propertycache
139 def missing(self):
140 def missing(self):
140 if self._missing is None:
141 if self._missing is None:
141 self._computecommonmissing()
142 self._computecommonmissing()
142 return self._missing
143 return self._missing
143
144
144
145
145 def findcommonoutgoing(
146 def findcommonoutgoing(
146 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
147 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
147 ):
148 ):
148 """Return an outgoing instance to identify the nodes present in repo but
149 """Return an outgoing instance to identify the nodes present in repo but
149 not in other.
150 not in other.
150
151
151 If onlyheads is given, only nodes ancestral to nodes in onlyheads
152 If onlyheads is given, only nodes ancestral to nodes in onlyheads
152 (inclusive) are included. If you already know the local repo's heads,
153 (inclusive) are included. If you already know the local repo's heads,
153 passing them in onlyheads is faster than letting them be recomputed here.
154 passing them in onlyheads is faster than letting them be recomputed here.
154
155
155 If commoninc is given, it must be the result of a prior call to
156 If commoninc is given, it must be the result of a prior call to
156 findcommonincoming(repo, other, force) to avoid recomputing it here.
157 findcommonincoming(repo, other, force) to avoid recomputing it here.
157
158
158 If portable is given, compute more conservative common and ancestorsof,
159 If portable is given, compute more conservative common and ancestorsof,
159 to make bundles created from the instance more portable."""
160 to make bundles created from the instance more portable."""
160 # declare an empty outgoing object to be filled later
161 # declare an empty outgoing object to be filled later
161 og = outgoing(repo, None, None)
162 og = outgoing(repo, None, None)
162
163
163 # get common set if not provided
164 # get common set if not provided
164 if commoninc is None:
165 if commoninc is None:
165 commoninc = findcommonincoming(
166 commoninc = findcommonincoming(
166 repo, other, force=force, ancestorsof=onlyheads
167 repo, other, force=force, ancestorsof=onlyheads
167 )
168 )
168 og.commonheads, _any, _hds = commoninc
169 og.commonheads, _any, _hds = commoninc
169
170
170 # compute outgoing
171 # compute outgoing
171 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
172 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
172 if not mayexclude:
173 if not mayexclude:
173 og.ancestorsof = onlyheads or repo.heads()
174 og.ancestorsof = onlyheads or repo.heads()
174 elif onlyheads is None:
175 elif onlyheads is None:
175 # use visible heads as it should be cached
176 # use visible heads as it should be cached
176 og.ancestorsof = repo.filtered(b"served").heads()
177 og.ancestorsof = repo.filtered(b"served").heads()
177 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
178 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
178 else:
179 else:
179 # compute common, missing and exclude secret stuff
180 # compute common, missing and exclude secret stuff
180 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
181 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
181 og._common, allmissing = sets
182 og._common, allmissing = sets
182 og._missing = missing = []
183 og._missing = missing = []
183 og.excluded = excluded = []
184 og.excluded = excluded = []
184 for node in allmissing:
185 for node in allmissing:
185 ctx = repo[node]
186 ctx = repo[node]
186 if ctx.phase() >= phases.secret or ctx.extinct():
187 if ctx.phase() >= phases.secret or ctx.extinct():
187 excluded.append(node)
188 excluded.append(node)
188 else:
189 else:
189 missing.append(node)
190 missing.append(node)
190 if len(missing) == len(allmissing):
191 if len(missing) == len(allmissing):
191 ancestorsof = onlyheads
192 ancestorsof = onlyheads
192 else: # update missing heads
193 else: # update missing heads
193 ancestorsof = phases.newheads(repo, onlyheads, excluded)
194 ancestorsof = phases.newheads(repo, onlyheads, excluded)
194 og.ancestorsof = ancestorsof
195 og.ancestorsof = ancestorsof
195 if portable:
196 if portable:
196 # recompute common and ancestorsof as if -r<rev> had been given for
197 # recompute common and ancestorsof as if -r<rev> had been given for
197 # each head of missing, and --base <rev> for each head of the proper
198 # each head of missing, and --base <rev> for each head of the proper
198 # ancestors of missing
199 # ancestors of missing
199 og._computecommonmissing()
200 og._computecommonmissing()
200 cl = repo.changelog
201 cl = repo.changelog
201 missingrevs = {cl.rev(n) for n in og._missing}
202 missingrevs = {cl.rev(n) for n in og._missing}
202 og._common = set(cl.ancestors(missingrevs)) - missingrevs
203 og._common = set(cl.ancestors(missingrevs)) - missingrevs
203 commonheads = set(og.commonheads)
204 commonheads = set(og.commonheads)
204 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
205 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
205
206
206 return og
207 return og
207
208
208
209
209 def _headssummary(pushop):
210 def _headssummary(pushop):
210 """compute a summary of branch and heads status before and after push
211 """compute a summary of branch and heads status before and after push
211
212
212 return {'branch': ([remoteheads], [newheads],
213 return {'branch': ([remoteheads], [newheads],
213 [unsyncedheads], [discardedheads])} mapping
214 [unsyncedheads], [discardedheads])} mapping
214
215
215 - branch: the branch name,
216 - branch: the branch name,
216 - remoteheads: the list of remote heads known locally
217 - remoteheads: the list of remote heads known locally
217 None if the branch is new,
218 None if the branch is new,
218 - newheads: the new remote heads (known locally) with outgoing pushed,
219 - newheads: the new remote heads (known locally) with outgoing pushed,
219 - unsyncedheads: the list of remote heads unknown locally,
220 - unsyncedheads: the list of remote heads unknown locally,
220 - discardedheads: the list of heads made obsolete by the push.
221 - discardedheads: the list of heads made obsolete by the push.
221 """
222 """
222 repo = pushop.repo.unfiltered()
223 repo = pushop.repo.unfiltered()
223 remote = pushop.remote
224 remote = pushop.remote
224 outgoing = pushop.outgoing
225 outgoing = pushop.outgoing
225 cl = repo.changelog
226 cl = repo.changelog
226 headssum = {}
227 headssum = {}
227 missingctx = set()
228 missingctx = set()
228 # A. Create set of branches involved in the push.
229 # A. Create set of branches involved in the push.
229 branches = set()
230 branches = set()
230 for n in outgoing.missing:
231 for n in outgoing.missing:
231 ctx = repo[n]
232 ctx = repo[n]
232 missingctx.add(ctx)
233 missingctx.add(ctx)
233 branches.add(ctx.branch())
234 branches.add(ctx.branch())
234
235
235 with remote.commandexecutor() as e:
236 with remote.commandexecutor() as e:
236 remotemap = e.callcommand(b'branchmap', {}).result()
237 remotemap = e.callcommand(b'branchmap', {}).result()
237
238
238 knownnode = cl.hasnode # do not use nodemap until it is filtered
239 knownnode = cl.hasnode # do not use nodemap until it is filtered
239 # A. register remote heads of branches which are in outgoing set
240 # A. register remote heads of branches which are in outgoing set
240 for branch, heads in pycompat.iteritems(remotemap):
241 for branch, heads in pycompat.iteritems(remotemap):
241 # don't add head info about branches which we don't have locally
242 # don't add head info about branches which we don't have locally
242 if branch not in branches:
243 if branch not in branches:
243 continue
244 continue
244 known = []
245 known = []
245 unsynced = []
246 unsynced = []
246 for h in heads:
247 for h in heads:
247 if knownnode(h):
248 if knownnode(h):
248 known.append(h)
249 known.append(h)
249 else:
250 else:
250 unsynced.append(h)
251 unsynced.append(h)
251 headssum[branch] = (known, list(known), unsynced)
252 headssum[branch] = (known, list(known), unsynced)
252
253
253 # B. add new branch data
254 # B. add new branch data
254 for branch in branches:
255 for branch in branches:
255 if branch not in headssum:
256 if branch not in headssum:
256 headssum[branch] = (None, [], [])
257 headssum[branch] = (None, [], [])
257
258
258 # C. Update newmap with outgoing changes.
259 # C. Update newmap with outgoing changes.
259 # This will possibly add new heads and remove existing ones.
260 # This will possibly add new heads and remove existing ones.
260 newmap = branchmap.remotebranchcache(
261 newmap = branchmap.remotebranchcache(
261 repo,
262 repo,
262 (
263 (
263 (branch, heads[1])
264 (branch, heads[1])
264 for branch, heads in pycompat.iteritems(headssum)
265 for branch, heads in pycompat.iteritems(headssum)
265 if heads[0] is not None
266 if heads[0] is not None
266 ),
267 ),
267 )
268 )
268 newmap.update(repo, (ctx.rev() for ctx in missingctx))
269 newmap.update(repo, (ctx.rev() for ctx in missingctx))
269 for branch, newheads in pycompat.iteritems(newmap):
270 for branch, newheads in pycompat.iteritems(newmap):
270 headssum[branch][1][:] = newheads
271 headssum[branch][1][:] = newheads
271 for branch, items in pycompat.iteritems(headssum):
272 for branch, items in pycompat.iteritems(headssum):
272 for l in items:
273 for l in items:
273 if l is not None:
274 if l is not None:
274 l.sort()
275 l.sort()
275 headssum[branch] = items + ([],)
276 headssum[branch] = items + ([],)
276
277
277 # If there are no obsstore, no post processing are needed.
278 # If there are no obsstore, no post processing are needed.
278 if repo.obsstore:
279 if repo.obsstore:
279 torev = repo.changelog.rev
280 torev = repo.changelog.rev
280 futureheads = {torev(h) for h in outgoing.ancestorsof}
281 futureheads = {torev(h) for h in outgoing.ancestorsof}
281 futureheads |= {torev(h) for h in outgoing.commonheads}
282 futureheads |= {torev(h) for h in outgoing.commonheads}
282 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
283 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
283 for branch, heads in sorted(pycompat.iteritems(headssum)):
284 for branch, heads in sorted(pycompat.iteritems(headssum)):
284 remoteheads, newheads, unsyncedheads, placeholder = heads
285 remoteheads, newheads, unsyncedheads, placeholder = heads
285 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
286 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
286 headssum[branch] = (
287 headssum[branch] = (
287 remoteheads,
288 remoteheads,
288 sorted(result[0]),
289 sorted(result[0]),
289 unsyncedheads,
290 unsyncedheads,
290 sorted(result[1]),
291 sorted(result[1]),
291 )
292 )
292 return headssum
293 return headssum
293
294
294
295
295 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
296 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
296 """Compute branchmapsummary for repo without branchmap support"""
297 """Compute branchmapsummary for repo without branchmap support"""
297
298
298 # 1-4b. old servers: Check for new topological heads.
299 # 1-4b. old servers: Check for new topological heads.
299 # Construct {old,new}map with branch = None (topological branch).
300 # Construct {old,new}map with branch = None (topological branch).
300 # (code based on update)
301 # (code based on update)
301 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
302 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
302 oldheads = sorted(h for h in remoteheads if knownnode(h))
303 oldheads = sorted(h for h in remoteheads if knownnode(h))
303 # all nodes in outgoing.missing are children of either:
304 # all nodes in outgoing.missing are children of either:
304 # - an element of oldheads
305 # - an element of oldheads
305 # - another element of outgoing.missing
306 # - another element of outgoing.missing
306 # - nullrev
307 # - nullrev
307 # This explains why the new head are very simple to compute.
308 # This explains why the new head are very simple to compute.
308 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
309 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
309 newheads = sorted(c.node() for c in r)
310 newheads = sorted(c.node() for c in r)
310 # set some unsynced head to issue the "unsynced changes" warning
311 # set some unsynced head to issue the "unsynced changes" warning
311 if inc:
312 if inc:
312 unsynced = [None]
313 unsynced = [None]
313 else:
314 else:
314 unsynced = []
315 unsynced = []
315 return {None: (oldheads, newheads, unsynced, [])}
316 return {None: (oldheads, newheads, unsynced, [])}
316
317
317
318
318 def _nowarnheads(pushop):
319 def _nowarnheads(pushop):
319 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
320 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
320 repo = pushop.repo.unfiltered()
321 repo = pushop.repo.unfiltered()
321 remote = pushop.remote
322 remote = pushop.remote
322 localbookmarks = repo._bookmarks
323 localbookmarks = repo._bookmarks
323
324
324 with remote.commandexecutor() as e:
325 with remote.commandexecutor() as e:
325 remotebookmarks = e.callcommand(
326 remotebookmarks = e.callcommand(
326 b'listkeys',
327 b'listkeys',
327 {
328 {
328 b'namespace': b'bookmarks',
329 b'namespace': b'bookmarks',
329 },
330 },
330 ).result()
331 ).result()
331
332
332 bookmarkedheads = set()
333 bookmarkedheads = set()
333
334
334 # internal config: bookmarks.pushing
335 # internal config: bookmarks.pushing
335 newbookmarks = [
336 newbookmarks = [
336 localbookmarks.expandname(b)
337 localbookmarks.expandname(b)
337 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
338 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
338 ]
339 ]
339
340
340 for bm in localbookmarks:
341 for bm in localbookmarks:
341 rnode = remotebookmarks.get(bm)
342 rnode = remotebookmarks.get(bm)
342 if rnode and rnode in repo:
343 if rnode and rnode in repo:
343 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
344 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
344 if bookmarks.validdest(repo, rctx, lctx):
345 if bookmarks.validdest(repo, rctx, lctx):
345 bookmarkedheads.add(lctx.node())
346 bookmarkedheads.add(lctx.node())
346 else:
347 else:
347 if bm in newbookmarks and bm not in remotebookmarks:
348 if bm in newbookmarks and bm not in remotebookmarks:
348 bookmarkedheads.add(localbookmarks[bm])
349 bookmarkedheads.add(localbookmarks[bm])
349
350
350 return bookmarkedheads
351 return bookmarkedheads
351
352
352
353
353 def checkheads(pushop):
354 def checkheads(pushop):
354 """Check that a push won't add any outgoing head
355 """Check that a push won't add any outgoing head
355
356
356 raise StateError error and display ui message as needed.
357 raise StateError error and display ui message as needed.
357 """
358 """
358
359
359 repo = pushop.repo.unfiltered()
360 repo = pushop.repo.unfiltered()
360 remote = pushop.remote
361 remote = pushop.remote
361 outgoing = pushop.outgoing
362 outgoing = pushop.outgoing
362 remoteheads = pushop.remoteheads
363 remoteheads = pushop.remoteheads
363 newbranch = pushop.newbranch
364 newbranch = pushop.newbranch
364 inc = bool(pushop.incoming)
365 inc = bool(pushop.incoming)
365
366
366 # Check for each named branch if we're creating new remote heads.
367 # Check for each named branch if we're creating new remote heads.
367 # To be a remote head after push, node must be either:
368 # To be a remote head after push, node must be either:
368 # - unknown locally
369 # - unknown locally
369 # - a local outgoing head descended from update
370 # - a local outgoing head descended from update
370 # - a remote head that's known locally and not
371 # - a remote head that's known locally and not
371 # ancestral to an outgoing head
372 # ancestral to an outgoing head
372 if remoteheads == [repo.nullid]:
373 if remoteheads == [repo.nullid]:
373 # remote is empty, nothing to check.
374 # remote is empty, nothing to check.
374 return
375 return
375
376
376 if remote.capable(b'branchmap'):
377 if remote.capable(b'branchmap'):
377 headssum = _headssummary(pushop)
378 headssum = _headssummary(pushop)
378 else:
379 else:
379 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
380 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
380 pushop.pushbranchmap = headssum
381 pushop.pushbranchmap = headssum
381 newbranches = [
382 newbranches = [
382 branch
383 branch
383 for branch, heads in pycompat.iteritems(headssum)
384 for branch, heads in pycompat.iteritems(headssum)
384 if heads[0] is None
385 if heads[0] is None
385 ]
386 ]
386 # 1. Check for new branches on the remote.
387 # 1. Check for new branches on the remote.
387 if newbranches and not newbranch: # new branch requires --new-branch
388 if newbranches and not newbranch: # new branch requires --new-branch
388 branchnames = b', '.join(sorted(newbranches))
389 branchnames = b', '.join(sorted(newbranches))
389 # Calculate how many of the new branches are closed branches
390 # Calculate how many of the new branches are closed branches
390 closedbranches = set()
391 closedbranches = set()
391 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
392 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
392 if isclosed:
393 if isclosed:
393 closedbranches.add(tag)
394 closedbranches.add(tag)
394 closedbranches = closedbranches & set(newbranches)
395 closedbranches = closedbranches & set(newbranches)
395 if closedbranches:
396 if closedbranches:
396 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
397 errmsg = _(b"push creates new remote branches: %s (%d closed)") % (
397 branchnames,
398 branchnames,
398 len(closedbranches),
399 len(closedbranches),
399 )
400 )
400 else:
401 else:
401 errmsg = _(b"push creates new remote branches: %s") % branchnames
402 errmsg = _(b"push creates new remote branches: %s") % branchnames
402 hint = _(b"use 'hg push --new-branch' to create new remote branches")
403 hint = _(b"use 'hg push --new-branch' to create new remote branches")
403 raise error.StateError(errmsg, hint=hint)
404 raise error.StateError(errmsg, hint=hint)
404
405
405 # 2. Find heads that we need not warn about
406 # 2. Find heads that we need not warn about
406 nowarnheads = _nowarnheads(pushop)
407 nowarnheads = _nowarnheads(pushop)
407
408
408 # 3. Check for new heads.
409 # 3. Check for new heads.
409 # If there are more heads after the push than before, a suitable
410 # If there are more heads after the push than before, a suitable
410 # error message, depending on unsynced status, is displayed.
411 # error message, depending on unsynced status, is displayed.
411 errormsg = None
412 errormsg = None
412 for branch, heads in sorted(pycompat.iteritems(headssum)):
413 for branch, heads in sorted(pycompat.iteritems(headssum)):
413 remoteheads, newheads, unsyncedheads, discardedheads = heads
414 remoteheads, newheads, unsyncedheads, discardedheads = heads
414 # add unsynced data
415 # add unsynced data
415 if remoteheads is None:
416 if remoteheads is None:
416 oldhs = set()
417 oldhs = set()
417 else:
418 else:
418 oldhs = set(remoteheads)
419 oldhs = set(remoteheads)
419 oldhs.update(unsyncedheads)
420 oldhs.update(unsyncedheads)
420 dhs = None # delta heads, the new heads on branch
421 dhs = None # delta heads, the new heads on branch
421 newhs = set(newheads)
422 newhs = set(newheads)
422 newhs.update(unsyncedheads)
423 newhs.update(unsyncedheads)
423 if unsyncedheads:
424 if unsyncedheads:
424 if None in unsyncedheads:
425 if None in unsyncedheads:
425 # old remote, no heads data
426 # old remote, no heads data
426 heads = None
427 heads = None
427 else:
428 else:
428 heads = scmutil.nodesummaries(repo, unsyncedheads)
429 heads = scmutil.nodesummaries(repo, unsyncedheads)
429 if heads is None:
430 if heads is None:
430 repo.ui.status(
431 repo.ui.status(
431 _(b"remote has heads that are not known locally\n")
432 _(b"remote has heads that are not known locally\n")
432 )
433 )
433 elif branch is None:
434 elif branch is None:
434 repo.ui.status(
435 repo.ui.status(
435 _(b"remote has heads that are not known locally: %s\n")
436 _(b"remote has heads that are not known locally: %s\n")
436 % heads
437 % heads
437 )
438 )
438 else:
439 else:
439 repo.ui.status(
440 repo.ui.status(
440 _(
441 _(
441 b"remote has heads on branch '%s' that are "
442 b"remote has heads on branch '%s' that are "
442 b"not known locally: %s\n"
443 b"not known locally: %s\n"
443 )
444 )
444 % (branch, heads)
445 % (branch, heads)
445 )
446 )
446 if remoteheads is None:
447 if remoteheads is None:
447 if len(newhs) > 1:
448 if len(newhs) > 1:
448 dhs = list(newhs)
449 dhs = list(newhs)
449 if errormsg is None:
450 if errormsg is None:
450 errormsg = (
451 errormsg = (
451 _(b"push creates new branch '%s' with multiple heads")
452 _(b"push creates new branch '%s' with multiple heads")
452 % branch
453 % branch
453 )
454 )
454 hint = _(
455 hint = _(
455 b"merge or"
456 b"merge or"
456 b" see 'hg help push' for details about"
457 b" see 'hg help push' for details about"
457 b" pushing new heads"
458 b" pushing new heads"
458 )
459 )
459 elif len(newhs) > len(oldhs):
460 elif len(newhs) > len(oldhs):
460 # remove bookmarked or existing remote heads from the new heads list
461 # remove bookmarked or existing remote heads from the new heads list
461 dhs = sorted(newhs - nowarnheads - oldhs)
462 dhs = sorted(newhs - nowarnheads - oldhs)
462 if dhs:
463 if dhs:
463 if errormsg is None:
464 if errormsg is None:
464 if branch not in (b'default', None):
465 if branch not in (b'default', None):
465 errormsg = _(
466 errormsg = _(
466 b"push creates new remote head %s on branch '%s'"
467 b"push creates new remote head %s on branch '%s'"
467 ) % (
468 ) % (
468 short(dhs[0]),
469 short(dhs[0]),
469 branch,
470 branch,
470 )
471 )
471 elif repo[dhs[0]].bookmarks():
472 elif repo[dhs[0]].bookmarks():
472 errormsg = _(
473 errormsg = _(
473 b"push creates new remote head %s "
474 b"push creates new remote head %s "
474 b"with bookmark '%s'"
475 b"with bookmark '%s'"
475 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
476 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
476 else:
477 else:
477 errormsg = _(b"push creates new remote head %s") % short(
478 errormsg = _(b"push creates new remote head %s") % short(
478 dhs[0]
479 dhs[0]
479 )
480 )
480 if unsyncedheads:
481 if unsyncedheads:
481 hint = _(
482 hint = _(
482 b"pull and merge or"
483 b"pull and merge or"
483 b" see 'hg help push' for details about"
484 b" see 'hg help push' for details about"
484 b" pushing new heads"
485 b" pushing new heads"
485 )
486 )
486 else:
487 else:
487 hint = _(
488 hint = _(
488 b"merge or"
489 b"merge or"
489 b" see 'hg help push' for details about"
490 b" see 'hg help push' for details about"
490 b" pushing new heads"
491 b" pushing new heads"
491 )
492 )
492 if branch is None:
493 if branch is None:
493 repo.ui.note(_(b"new remote heads:\n"))
494 repo.ui.note(_(b"new remote heads:\n"))
494 else:
495 else:
495 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
496 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
496 for h in dhs:
497 for h in dhs:
497 repo.ui.note(b" %s\n" % short(h))
498 repo.ui.note(b" %s\n" % short(h))
498 if errormsg:
499 if errormsg:
499 raise error.StateError(errormsg, hint=hint)
500 raise error.StateError(errormsg, hint=hint)
500
501
501
502
502 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
503 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
503 """post process the list of new heads with obsolescence information
504 """post process the list of new heads with obsolescence information
504
505
505 Exists as a sub-function to contain the complexity and allow extensions to
506 Exists as a sub-function to contain the complexity and allow extensions to
506 experiment with smarter logic.
507 experiment with smarter logic.
507
508
508 Returns (newheads, discarded_heads) tuple
509 Returns (newheads, discarded_heads) tuple
509 """
510 """
510 # known issue
511 # known issue
511 #
512 #
512 # * We "silently" skip processing on all changeset unknown locally
513 # * We "silently" skip processing on all changeset unknown locally
513 #
514 #
514 # * if <nh> is public on the remote, it won't be affected by obsolete
515 # * if <nh> is public on the remote, it won't be affected by obsolete
515 # marker and a new is created
516 # marker and a new is created
516
517
517 # define various utilities and containers
518 # define various utilities and containers
518 repo = pushop.repo
519 repo = pushop.repo
519 unfi = repo.unfiltered()
520 unfi = repo.unfiltered()
520 torev = unfi.changelog.index.get_rev
521 torev = unfi.changelog.index.get_rev
521 public = phases.public
522 public = phases.public
522 getphase = unfi._phasecache.phase
523 getphase = unfi._phasecache.phase
523 ispublic = lambda r: getphase(unfi, r) == public
524 ispublic = lambda r: getphase(unfi, r) == public
524 ispushed = lambda n: torev(n) in futurecommon
525 ispushed = lambda n: torev(n) in futurecommon
525 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
526 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
526 successorsmarkers = unfi.obsstore.successors
527 successorsmarkers = unfi.obsstore.successors
527 newhs = set() # final set of new heads
528 newhs = set() # final set of new heads
528 discarded = set() # new head of fully replaced branch
529 discarded = set() # new head of fully replaced branch
529
530
530 localcandidate = set() # candidate heads known locally
531 localcandidate = set() # candidate heads known locally
531 unknownheads = set() # candidate heads unknown locally
532 unknownheads = set() # candidate heads unknown locally
532 for h in candidate_newhs:
533 for h in candidate_newhs:
533 if h in unfi:
534 if h in unfi:
534 localcandidate.add(h)
535 localcandidate.add(h)
535 else:
536 else:
536 if successorsmarkers.get(h) is not None:
537 if successorsmarkers.get(h) is not None:
537 msg = (
538 msg = (
538 b'checkheads: remote head unknown locally has'
539 b'checkheads: remote head unknown locally has'
539 b' local marker: %s\n'
540 b' local marker: %s\n'
540 )
541 )
541 repo.ui.debug(msg % hex(h))
542 repo.ui.debug(msg % hex(h))
542 unknownheads.add(h)
543 unknownheads.add(h)
543
544
544 # fast path the simple case
545 # fast path the simple case
545 if len(localcandidate) == 1:
546 if len(localcandidate) == 1:
546 return unknownheads | set(candidate_newhs), set()
547 return unknownheads | set(candidate_newhs), set()
547
548
549 obsrevs = obsolete.getrevs(unfi, b'obsolete')
550 futurenonobsolete = frozenset(futurecommon) - obsrevs
551
548 # actually process branch replacement
552 # actually process branch replacement
549 while localcandidate:
553 while localcandidate:
550 nh = localcandidate.pop()
554 nh = localcandidate.pop()
555 r = torev(nh)
551 current_branch = unfi[nh].branch()
556 current_branch = unfi[nh].branch()
552 # run this check early to skip the evaluation of the whole branch
557 # run this check early to skip the evaluation of the whole branch
553 if torev(nh) in futurecommon or ispublic(torev(nh)):
558 if ispublic(r) or r not in obsrevs:
554 newhs.add(nh)
559 newhs.add(nh)
555 continue
560 continue
556
561
557 # Get all revs/nodes on the branch exclusive to this head
562 # Get all revs/nodes on the branch exclusive to this head
558 # (already filtered heads are "ignored"))
563 # (already filtered heads are "ignored"))
559 branchrevs = unfi.revs(
564 branchrevs = unfi.revs(
560 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
565 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
561 )
566 )
562
567
563 branchnodes = []
568 branchnodes = []
564 for r in branchrevs:
569 for r in branchrevs:
565 c = unfi[r]
570 c = unfi[r]
566 if c.branch() == current_branch:
571 if c.branch() == current_branch:
567 branchnodes.append(c.node())
572 branchnodes.append(c.node())
568
573
569 # The branch won't be hidden on the remote if
574 # The branch won't be hidden on the remote if
570 # * any part of it is public,
575 # * any part of it is public,
571 # * any part of it is considered part of the result by previous logic,
576 # * any part of it is considered part of the result by previous logic,
572 # * if we have no markers to push to obsolete it.
577 # * if we have no markers to push to obsolete it.
573 if (
578 if (
574 any(ispublic(r) for r in branchrevs)
579 any(ispublic(r) for r in branchrevs)
575 or any(torev(n) in futurecommon for n in branchnodes)
580 or any(torev(n) in futurenonobsolete for n in branchnodes)
576 or any(not hasoutmarker(n) for n in branchnodes)
581 or any(not hasoutmarker(n) for n in branchnodes)
577 ):
582 ):
578 newhs.add(nh)
583 newhs.add(nh)
579 else:
584 else:
580 # note: there is a corner case if there is a merge in the branch.
585 # note: there is a corner case if there is a merge in the branch.
581 # we might end up with -more- heads. However, these heads are not
586 # we might end up with -more- heads. However, these heads are not
582 # "added" by the push, but more by the "removal" on the remote so I
587 # "added" by the push, but more by the "removal" on the remote so I
583 # think is a okay to ignore them,
588 # think is a okay to ignore them,
584 discarded.add(nh)
589 discarded.add(nh)
585 newhs |= unknownheads
590 newhs |= unknownheads
586 return newhs, discarded
591 return newhs, discarded
587
592
588
593
589 def pushingmarkerfor(obsstore, ispushed, node):
594 def pushingmarkerfor(obsstore, ispushed, node):
590 """true if some markers are to be pushed for node
595 """true if some markers are to be pushed for node
591
596
592 We cannot just look in to the pushed obsmarkers from the pushop because
597 We cannot just look in to the pushed obsmarkers from the pushop because
593 discovery might have filtered relevant markers. In addition listing all
598 discovery might have filtered relevant markers. In addition listing all
594 markers relevant to all changesets in the pushed set would be too expensive
599 markers relevant to all changesets in the pushed set would be too expensive
595 (O(len(repo)))
600 (O(len(repo)))
596
601
597 (note: There are cache opportunity in this function. but it would requires
602 (note: There are cache opportunity in this function. but it would requires
598 a two dimensional stack.)
603 a two dimensional stack.)
599 """
604 """
600 successorsmarkers = obsstore.successors
605 successorsmarkers = obsstore.successors
601 stack = [node]
606 stack = [node]
602 seen = set(stack)
607 seen = set(stack)
603 while stack:
608 while stack:
604 current = stack.pop()
609 current = stack.pop()
605 if ispushed(current):
610 if ispushed(current):
606 return True
611 return True
607 markers = successorsmarkers.get(current, ())
612 markers = successorsmarkers.get(current, ())
608 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
613 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
609 for m in markers:
614 for m in markers:
610 nexts = m[1] # successors
615 nexts = m[1] # successors
611 if not nexts: # this is a prune marker
616 if not nexts: # this is a prune marker
612 nexts = m[5] or () # parents
617 nexts = m[5] or () # parents
613 for n in nexts:
618 for n in nexts:
614 if n not in seen:
619 if n not in seen:
615 seen.add(n)
620 seen.add(n)
616 stack.append(n)
621 stack.append(n)
617 return False
622 return False
@@ -1,44 +1,50 b''
1 # setup config and various utility to test new heads checks on push
1 # setup config and various utility to test new heads checks on push
2
2
3 cat >> $HGRCPATH <<EOF
3 cat >> $HGRCPATH <<EOF
4 [command-templates]
4 [command-templates]
5 # simpler log output
5 # simpler log output
6 log ="{node|short} ({phase}): {desc}\n"
6 log ="{node|short} ({phase}): {desc}\n"
7
7
8 [phases]
8 [phases]
9 # non publishing server
9 # non publishing server
10 publish=False
10 publish=False
11
11
12 [extensions]
12 [extensions]
13 # we need to strip some changeset for some test cases
13 # we need to strip some changeset for some test cases
14 strip=
14 strip=
15
15
16 [experimental]
16 [experimental]
17 # enable evolution
17 # enable evolution
18 evolution=all
18 evolution=all
19
19
20 [alias]
20 [alias]
21 # fix date used to create obsolete markers.
21 # fix date used to create obsolete markers.
22 debugobsolete=debugobsolete -d '0 0'
22 debugobsolete=debugobsolete -d '0 0'
23 EOF
23 EOF
24
24
25 mkcommit() {
25 mkcommit() {
26 echo "$1" > "$1"
26 echo "$1" > "$1"
27 hg add "$1"
27 hg add "$1"
28 hg ci -m "$1"
28 hg ci -m "$1"
29 }
29 }
30
30
31 getid() {
31 getid() {
32 hg log --hidden --template '{node}\n' --rev "$1"
32 hg log --hidden --template '{node}\n' --rev "$1"
33 }
33 }
34
34
35 setuprepos() {
35 setuprepos() {
36 echo creating basic server and client repo
36 echo creating basic server and client repo
37 hg init server
37 hg init server
38 cd server
38 cd server
39 mkcommit root
39 mkcommit root
40 hg phase --public .
40 hg phase --public .
41 mkcommit A0
41 mkcommit A0
42 cd ..
42 cd ..
43 hg clone server client
43 hg clone server client
44
45 if [ "$1" = "single-head" ]; then
46 echo >> "server/.hg/hgrc" "[experimental]"
47 echo >> "server/.hg/hgrc" "# enforce a single name per branch"
48 echo >> "server/.hg/hgrc" "single-head-per-branch = yes"
49 fi
44 }
50 }
General Comments 0
You need to be logged in to leave comments. Login now