##// END OF EJS Templates
fix push of moved bookmark when creating new branch heads...
Sune Foldager -
r17043:6f89c3f0 default
parent child Browse files
Show More
@@ -1,264 +1,268 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the reponse lists justs
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 class outgoing(object):
50 50 '''Represents the set of nodes present in a local repo but not in a
51 51 (possibly) remote one.
52 52
53 53 Members:
54 54
55 55 missing is a list of all nodes present in local but not in remote.
56 56 common is a list of all nodes shared between the two repos.
57 57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 58 missingheads is the list of heads of missing.
59 59 commonheads is the list of heads of common.
60 60
61 61 The sets are computed on demand from the heads, unless provided upfront
62 62 by discovery.'''
63 63
64 64 def __init__(self, revlog, commonheads, missingheads):
65 65 self.commonheads = commonheads
66 66 self.missingheads = missingheads
67 67 self._revlog = revlog
68 68 self._common = None
69 69 self._missing = None
70 70 self.excluded = []
71 71
72 72 def _computecommonmissing(self):
73 73 sets = self._revlog.findcommonmissing(self.commonheads,
74 74 self.missingheads)
75 75 self._common, self._missing = sets
76 76
77 77 @util.propertycache
78 78 def common(self):
79 79 if self._common is None:
80 80 self._computecommonmissing()
81 81 return self._common
82 82
83 83 @util.propertycache
84 84 def missing(self):
85 85 if self._missing is None:
86 86 self._computecommonmissing()
87 87 return self._missing
88 88
89 89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 90 commoninc=None, portable=False):
91 91 '''Return an outgoing instance to identify the nodes present in repo but
92 92 not in other.
93 93
94 94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 95 (inclusive) are included. If you already know the local repo's heads,
96 96 passing them in onlyheads is faster than letting them be recomputed here.
97 97
98 98 If commoninc is given, it must the the result of a prior call to
99 99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100 100
101 101 If portable is given, compute more conservative common and missingheads,
102 102 to make bundles created from the instance more portable.'''
103 103 # declare an empty outgoing object to be filled later
104 104 og = outgoing(repo.changelog, None, None)
105 105
106 106 # get common set if not provided
107 107 if commoninc is None:
108 108 commoninc = findcommonincoming(repo, other, force=force)
109 109 og.commonheads, _any, _hds = commoninc
110 110
111 111 # compute outgoing
112 112 if not repo._phasecache.phaseroots[phases.secret]:
113 113 og.missingheads = onlyheads or repo.heads()
114 114 elif onlyheads is None:
115 115 # use visible heads as it should be cached
116 116 og.missingheads = phases.visibleheads(repo)
117 117 og.excluded = [ctx.node() for ctx in repo.set('secret()')]
118 118 else:
119 119 # compute common, missing and exclude secret stuff
120 120 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
121 121 og._common, allmissing = sets
122 122 og._missing = missing = []
123 123 og.excluded = excluded = []
124 124 for node in allmissing:
125 125 if repo[node].phase() >= phases.secret:
126 126 excluded.append(node)
127 127 else:
128 128 missing.append(node)
129 129 if excluded:
130 130 # update missing heads
131 131 missingheads = phases.newheads(repo, onlyheads, excluded)
132 132 else:
133 133 missingheads = onlyheads
134 134 og.missingheads = missingheads
135 135
136 136 if portable:
137 137 # recompute common and missingheads as if -r<rev> had been given for
138 138 # each head of missing, and --base <rev> for each head of the proper
139 139 # ancestors of missing
140 140 og._computecommonmissing()
141 141 cl = repo.changelog
142 142 missingrevs = set(cl.rev(n) for n in og._missing)
143 143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 144 commonheads = set(og.commonheads)
145 145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146 146
147 147 return og
148 148
149 149 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
150 150 """Check that a push won't add any outgoing head
151 151
152 152 raise Abort error and display ui message as needed.
153 153 """
154 154 if remoteheads == [nullid]:
155 155 # remote is empty, nothing to check.
156 156 return
157 157
158 158 cl = repo.changelog
159 159 if remote.capable('branchmap'):
160 160 # Check for each named branch if we're creating new remote heads.
161 161 # To be a remote head after push, node must be either:
162 162 # - unknown locally
163 163 # - a local outgoing head descended from update
164 164 # - a remote head that's known locally and not
165 165 # ancestral to an outgoing head
166 166
167 167 # 1. Create set of branches involved in the push.
168 168 branches = set(repo[n].branch() for n in outgoing.missing)
169 169
170 170 # 2. Check for new branches on the remote.
171 171 if remote.local():
172 172 remotemap = phases.visiblebranchmap(remote)
173 173 else:
174 174 remotemap = remote.branchmap()
175 175 newbranches = branches - set(remotemap)
176 176 if newbranches and not newbranch: # new branch requires --new-branch
177 177 branchnames = ', '.join(sorted(newbranches))
178 178 raise util.Abort(_("push creates new remote branches: %s!")
179 179 % branchnames,
180 180 hint=_("use 'hg push --new-branch' to create"
181 181 " new remote branches"))
182 182 branches.difference_update(newbranches)
183 183
184 184 # 3. Construct the initial oldmap and newmap dicts.
185 185 # They contain information about the remote heads before and
186 186 # after the push, respectively.
187 187 # Heads not found locally are not included in either dict,
188 188 # since they won't be affected by the push.
189 189 # unsynced contains all branches with incoming changesets.
190 190 oldmap = {}
191 191 newmap = {}
192 192 unsynced = set()
193 193 for branch in branches:
194 194 remotebrheads = remotemap[branch]
195 195 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
196 196 oldmap[branch] = prunedbrheads
197 197 newmap[branch] = list(prunedbrheads)
198 198 if len(remotebrheads) > len(prunedbrheads):
199 199 unsynced.add(branch)
200 200
201 201 # 4. Update newmap with outgoing changes.
202 202 # This will possibly add new heads and remove existing ones.
203 203 ctxgen = (repo[n] for n in outgoing.missing)
204 204 repo._updatebranchcache(newmap, ctxgen)
205 205
206 206 else:
207 207 # 1-4b. old servers: Check for new topological heads.
208 208 # Construct {old,new}map with branch = None (topological branch).
209 209 # (code based on _updatebranchcache)
210 210 oldheadrevs = set(cl.rev(h) for h in remoteheads if h in cl.nodemap)
211 211 missingrevs = [cl.rev(node) for node in outgoing.missing]
212 212 newheadrevs = oldheadrevs.union(missingrevs)
213 213 if len(newheadrevs) > 1:
214 214 for latest in sorted(missingrevs, reverse=True):
215 215 if latest not in newheadrevs:
216 216 continue
217 217 reachable = cl.ancestors([latest], min(newheadrevs))
218 218 newheadrevs.difference_update(reachable)
219 219 branches = set([None])
220 220 newmap = {None: [cl.node(rev) for rev in newheadrevs]}
221 221 oldmap = {None: [cl.node(rev) for rev in oldheadrevs]}
222 222 unsynced = inc and branches or set()
223 223
224 224 # 5. Check for new heads.
225 225 # If there are more heads after the push than before, a suitable
226 226 # error message, depending on unsynced status, is displayed.
227 227 error = None
228 remotebookmarks = remote.listkeys('bookmarks')
229 228 localbookmarks = repo._bookmarks
230 229
231 230 for branch in branches:
232 231 newhs = set(newmap[branch])
233 232 oldhs = set(oldmap[branch])
234 233 dhs = None
235 234 if len(newhs) > len(oldhs):
236 235 # strip updates to existing remote heads from the new heads list
237 bookmarkedheads = set([repo[bm].node() for bm in localbookmarks
238 if bm in remotebookmarks and
239 remote[bm] == repo[bm].ancestor(remote[bm])])
236 remotebookmarks = remote.listkeys('bookmarks')
237 bookmarkedheads = set()
238 for bm in localbookmarks:
239 rnode = remotebookmarks.get(bm)
240 if rnode and rnode in repo:
241 lctx, rctx = repo[bm], repo[rnode]
242 if rctx == lctx.ancestor(rctx):
243 bookmarkedheads.add(lctx.node())
240 244 dhs = list(newhs - bookmarkedheads - oldhs)
241 245 if dhs:
242 246 if error is None:
243 247 if branch not in ('default', None):
244 248 error = _("push creates new remote head %s "
245 249 "on branch '%s'!") % (short(dhs[0]), branch)
246 250 else:
247 251 error = _("push creates new remote head %s!"
248 252 ) % short(dhs[0])
249 253 if branch in unsynced:
250 254 hint = _("you should pull and merge or "
251 255 "use push -f to force")
252 256 else:
253 257 hint = _("did you forget to merge? "
254 258 "use push -f to force")
255 259 if branch is not None:
256 260 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
257 261 for h in dhs:
258 262 repo.ui.note(_("new remote head %s\n") % short(h))
259 263 if error:
260 264 raise util.Abort(error, hint=hint)
261 265
262 266 # 6. Check for unsynced changes on involved branches.
263 267 if unsynced:
264 268 repo.ui.warn(_("note: unsynced remote changes!\n"))
@@ -1,254 +1,264 b''
1 1 $ "$TESTDIR/hghave" serve || exit 80
2 2
3 3 initialize
4 4
5 5 $ hg init a
6 6 $ cd a
7 7 $ echo 'test' > test
8 8 $ hg commit -Am'test'
9 9 adding test
10 10
11 11 set bookmarks
12 12
13 13 $ hg bookmark X
14 14 $ hg bookmark Y
15 15 $ hg bookmark Z
16 16
17 17 import bookmark by name
18 18
19 19 $ hg init ../b
20 20 $ cd ../b
21 21 $ hg book Y
22 22 $ hg book
23 23 * Y -1:000000000000
24 24 $ hg pull ../a
25 25 pulling from ../a
26 26 requesting all changes
27 27 adding changesets
28 28 adding manifests
29 29 adding file changes
30 30 added 1 changesets with 1 changes to 1 files
31 31 updating bookmark Y
32 32 adding remote bookmark X
33 33 adding remote bookmark Z
34 34 (run 'hg update' to get a working copy)
35 35 $ hg bookmarks
36 36 X 0:4e3505fd9583
37 37 Y 0:4e3505fd9583
38 38 Z 0:4e3505fd9583
39 39 $ hg debugpushkey ../a namespaces
40 40 bookmarks
41 41 phases
42 42 namespaces
43 43 $ hg debugpushkey ../a bookmarks
44 44 Y 4e3505fd95835d721066b76e75dbb8cc554d7f77
45 45 X 4e3505fd95835d721066b76e75dbb8cc554d7f77
46 46 Z 4e3505fd95835d721066b76e75dbb8cc554d7f77
47 47 $ hg pull -B X ../a
48 48 pulling from ../a
49 49 no changes found
50 50 importing bookmark X
51 51 $ hg bookmark
52 52 X 0:4e3505fd9583
53 53 Y 0:4e3505fd9583
54 54 Z 0:4e3505fd9583
55 55
56 56 export bookmark by name
57 57
58 58 $ hg bookmark W
59 59 $ hg bookmark foo
60 60 $ hg bookmark foobar
61 61 $ hg push -B W ../a
62 62 pushing to ../a
63 63 searching for changes
64 64 no changes found
65 65 exporting bookmark W
66 66 [1]
67 67 $ hg -R ../a bookmarks
68 68 W -1:000000000000
69 69 X 0:4e3505fd9583
70 70 Y 0:4e3505fd9583
71 71 * Z 0:4e3505fd9583
72 72
73 73 delete a remote bookmark
74 74
75 75 $ hg book -d W
76 76 $ hg push -B W ../a
77 77 pushing to ../a
78 78 searching for changes
79 79 no changes found
80 80 deleting remote bookmark W
81 81 [1]
82 82
83 83 push/pull name that doesn't exist
84 84
85 85 $ hg push -B badname ../a
86 86 pushing to ../a
87 87 searching for changes
88 88 no changes found
89 89 bookmark badname does not exist on the local or remote repository!
90 90 [2]
91 91 $ hg pull -B anotherbadname ../a
92 92 pulling from ../a
93 93 abort: remote bookmark anotherbadname not found!
94 94 [255]
95 95
96 96 divergent bookmarks
97 97
98 98 $ cd ../a
99 99 $ echo c1 > f1
100 100 $ hg ci -Am1
101 101 adding f1
102 102 $ hg book -f X
103 103 $ hg book
104 104 * X 1:0d2164f0ce0d
105 105 Y 0:4e3505fd9583
106 106 Z 1:0d2164f0ce0d
107 107
108 108 $ cd ../b
109 109 $ hg up
110 110 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 111 updating bookmark foobar
112 112 $ echo c2 > f2
113 113 $ hg ci -Am2
114 114 adding f2
115 115 $ hg book -f X
116 116 $ hg book
117 117 * X 1:9b140be10808
118 118 Y 0:4e3505fd9583
119 119 Z 0:4e3505fd9583
120 120 foo -1:000000000000
121 121 foobar 1:9b140be10808
122 122
123 123 $ hg pull --config paths.foo=../a foo
124 124 pulling from $TESTTMP/a (glob)
125 125 searching for changes
126 126 adding changesets
127 127 adding manifests
128 128 adding file changes
129 129 added 1 changesets with 1 changes to 1 files (+1 heads)
130 130 divergent bookmark X stored as X@foo
131 131 updating bookmark Z
132 132 (run 'hg heads' to see heads, 'hg merge' to merge)
133 133 $ hg book
134 134 * X 1:9b140be10808
135 135 X@foo 2:0d2164f0ce0d
136 136 Y 0:4e3505fd9583
137 137 Z 2:0d2164f0ce0d
138 138 foo -1:000000000000
139 139 foobar 1:9b140be10808
140 140 $ hg push -f ../a
141 141 pushing to ../a
142 142 searching for changes
143 143 adding changesets
144 144 adding manifests
145 145 adding file changes
146 146 added 1 changesets with 1 changes to 1 files (+1 heads)
147 147 $ hg -R ../a book
148 148 * X 1:0d2164f0ce0d
149 149 Y 0:4e3505fd9583
150 150 Z 1:0d2164f0ce0d
151 151
152 152 update a remote bookmark from a non-head to a head
153 153
154 154 $ hg up -q Y
155 155 $ echo c3 > f2
156 156 $ hg ci -Am3
157 157 adding f2
158 158 created new head
159 159 $ hg push ../a
160 160 pushing to ../a
161 161 searching for changes
162 162 adding changesets
163 163 adding manifests
164 164 adding file changes
165 165 added 1 changesets with 1 changes to 1 files (+1 heads)
166 166 updating bookmark Y
167 167 $ hg -R ../a book
168 168 * X 1:0d2164f0ce0d
169 169 Y 3:f6fc62dde3c0
170 170 Z 1:0d2164f0ce0d
171 171
172 172 diverging a remote bookmark fails
173 173
174 174 $ hg up -q 4e3505fd9583
175 175 $ echo c4 > f2
176 176 $ hg ci -Am4
177 177 adding f2
178 178 created new head
179 179 $ hg book -f Y
180 $ hg push ../a
181 pushing to ../a
180
181 $ cat <<EOF > ../a/.hg/hgrc
182 > [web]
183 > push_ssl = false
184 > allow_push = *
185 > EOF
186
187 $ hg -R ../a serve -p $HGPORT2 -d --pid-file=../hg2.pid
188 $ cat ../hg2.pid >> $DAEMON_PIDS
189
190 $ hg push http://localhost:$HGPORT2/
191 pushing to http://localhost:$HGPORT2/
182 192 searching for changes
183 193 abort: push creates new remote head 4efff6d98829!
184 194 (did you forget to merge? use push -f to force)
185 195 [255]
186 196 $ hg -R ../a book
187 197 * X 1:0d2164f0ce0d
188 198 Y 3:f6fc62dde3c0
189 199 Z 1:0d2164f0ce0d
190 200
191 201 hgweb
192 202
193 203 $ cat <<EOF > .hg/hgrc
194 204 > [web]
195 205 > push_ssl = false
196 206 > allow_push = *
197 207 > EOF
198 208
199 209 $ hg serve -p $HGPORT -d --pid-file=../hg.pid -E errors.log
200 210 $ cat ../hg.pid >> $DAEMON_PIDS
201 211 $ cd ../a
202 212
203 213 $ hg debugpushkey http://localhost:$HGPORT/ namespaces
204 214 bookmarks
205 215 phases
206 216 namespaces
207 217 $ hg debugpushkey http://localhost:$HGPORT/ bookmarks
208 218 Y 4efff6d98829d9c824c621afd6e3f01865f5439f
209 219 foobar 9b140be1080824d768c5a4691a564088eede71f9
210 220 Z 0d2164f0ce0d8f1d6f94351eba04b794909be66c
211 221 foo 0000000000000000000000000000000000000000
212 222 X 9b140be1080824d768c5a4691a564088eede71f9
213 223 $ hg out -B http://localhost:$HGPORT/
214 224 comparing with http://localhost:$HGPORT/
215 225 searching for changed bookmarks
216 226 no changed bookmarks found
217 227 [1]
218 228 $ hg push -B Z http://localhost:$HGPORT/
219 229 pushing to http://localhost:$HGPORT/
220 230 searching for changes
221 231 no changes found
222 232 exporting bookmark Z
223 233 [1]
224 234 $ hg book -d Z
225 235 $ hg in -B http://localhost:$HGPORT/
226 236 comparing with http://localhost:$HGPORT/
227 237 searching for changed bookmarks
228 238 Z 0d2164f0ce0d
229 239 foo 000000000000
230 240 foobar 9b140be10808
231 241 $ hg pull -B Z http://localhost:$HGPORT/
232 242 pulling from http://localhost:$HGPORT/
233 243 no changes found
234 244 adding remote bookmark foobar
235 245 adding remote bookmark Z
236 246 adding remote bookmark foo
237 247 divergent bookmark X stored as X@1
238 248 importing bookmark Z
239 249 $ hg clone http://localhost:$HGPORT/ cloned-bookmarks
240 250 requesting all changes
241 251 adding changesets
242 252 adding manifests
243 253 adding file changes
244 254 added 5 changesets with 5 changes to 3 files (+3 heads)
245 255 updating to branch default
246 256 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 257 $ hg -R cloned-bookmarks bookmarks
248 258 X 1:9b140be10808
249 259 Y 4:4efff6d98829
250 260 Z 2:0d2164f0ce0d
251 261 foo -1:000000000000
252 262 foobar 1:9b140be10808
253 263
254 264 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now