##// END OF EJS Templates
bookmarks: remove changectx() method from bmstore (API)...
Augie Fackler -
r43248:e3bb2a58 default
parent child Browse files
Show More
@@ -1,115 +1,116 b''
1 # Copyright 2017 Facebook, Inc.
1 # Copyright 2017 Facebook, Inc.
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9
9
10 from mercurial import (
10 from mercurial import (
11 bundle2,
11 bundle2,
12 changegroup,
12 changegroup,
13 error,
13 error,
14 extensions,
14 extensions,
15 node as nodemod,
15 revsetlang,
16 revsetlang,
16 util,
17 util,
17 )
18 )
18
19
19 from . import common
20 from . import common
20
21
21 isremotebooksenabled = common.isremotebooksenabled
22 isremotebooksenabled = common.isremotebooksenabled
22
23
23 scratchbranchparttype = 'b2x:infinitepush'
24 scratchbranchparttype = 'b2x:infinitepush'
24
25
25 def getscratchbranchparts(repo, peer, outgoing, ui, bookmark):
26 def getscratchbranchparts(repo, peer, outgoing, ui, bookmark):
26 if not outgoing.missing:
27 if not outgoing.missing:
27 raise error.Abort(_('no commits to push'))
28 raise error.Abort(_('no commits to push'))
28
29
29 if scratchbranchparttype not in bundle2.bundle2caps(peer):
30 if scratchbranchparttype not in bundle2.bundle2caps(peer):
30 raise error.Abort(_('no server support for %r') % scratchbranchparttype)
31 raise error.Abort(_('no server support for %r') % scratchbranchparttype)
31
32
32 _validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing),
33 _validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing),
33 bookmark)
34 bookmark)
34
35
35 supportedversions = changegroup.supportedoutgoingversions(repo)
36 supportedversions = changegroup.supportedoutgoingversions(repo)
36 # Explicitly avoid using '01' changegroup version in infinitepush to
37 # Explicitly avoid using '01' changegroup version in infinitepush to
37 # support general delta
38 # support general delta
38 supportedversions.discard('01')
39 supportedversions.discard('01')
39 cgversion = min(supportedversions)
40 cgversion = min(supportedversions)
40 _handlelfs(repo, outgoing.missing)
41 _handlelfs(repo, outgoing.missing)
41 cg = changegroup.makestream(repo, outgoing, cgversion, 'push')
42 cg = changegroup.makestream(repo, outgoing, cgversion, 'push')
42
43
43 params = {}
44 params = {}
44 params['cgversion'] = cgversion
45 params['cgversion'] = cgversion
45 if bookmark:
46 if bookmark:
46 params['bookmark'] = bookmark
47 params['bookmark'] = bookmark
47 # 'prevbooknode' is necessary for pushkey reply part
48 # 'prevbooknode' is necessary for pushkey reply part
48 params['bookprevnode'] = ''
49 params['bookprevnode'] = ''
49 bookmarks = repo._bookmarks
50 bookmarks = repo._bookmarks
50 if bookmark in bookmarks:
51 if bookmark in bookmarks:
51 params['bookprevnode'] = bookmarks.changectx(bookmark).hex()
52 params['bookprevnode'] = nodemod.hex(bookmarks[bookmark])
52
53
53 # Do not send pushback bundle2 part with bookmarks if remotenames extension
54 # Do not send pushback bundle2 part with bookmarks if remotenames extension
54 # is enabled. It will be handled manually in `_push()`
55 # is enabled. It will be handled manually in `_push()`
55 if not isremotebooksenabled(ui):
56 if not isremotebooksenabled(ui):
56 params['pushbackbookmarks'] = '1'
57 params['pushbackbookmarks'] = '1'
57
58
58 parts = []
59 parts = []
59
60
60 # .upper() marks this as a mandatory part: server will abort if there's no
61 # .upper() marks this as a mandatory part: server will abort if there's no
61 # handler
62 # handler
62 parts.append(bundle2.bundlepart(
63 parts.append(bundle2.bundlepart(
63 scratchbranchparttype.upper(),
64 scratchbranchparttype.upper(),
64 advisoryparams=params.iteritems(),
65 advisoryparams=params.iteritems(),
65 data=cg))
66 data=cg))
66
67
67 return parts
68 return parts
68
69
69 def _validaterevset(repo, revset, bookmark):
70 def _validaterevset(repo, revset, bookmark):
70 """Abort if the revs to be pushed aren't valid for a scratch branch."""
71 """Abort if the revs to be pushed aren't valid for a scratch branch."""
71 if not repo.revs(revset):
72 if not repo.revs(revset):
72 raise error.Abort(_('nothing to push'))
73 raise error.Abort(_('nothing to push'))
73 if bookmark:
74 if bookmark:
74 # Allow bundle with many heads only if no bookmark is specified
75 # Allow bundle with many heads only if no bookmark is specified
75 heads = repo.revs('heads(%r)', revset)
76 heads = repo.revs('heads(%r)', revset)
76 if len(heads) > 1:
77 if len(heads) > 1:
77 raise error.Abort(
78 raise error.Abort(
78 _('cannot push more than one head to a scratch branch'))
79 _('cannot push more than one head to a scratch branch'))
79
80
80 def _handlelfs(repo, missing):
81 def _handlelfs(repo, missing):
81 '''Special case if lfs is enabled
82 '''Special case if lfs is enabled
82
83
83 If lfs is enabled then we need to call prepush hook
84 If lfs is enabled then we need to call prepush hook
84 to make sure large files are uploaded to lfs
85 to make sure large files are uploaded to lfs
85 '''
86 '''
86 try:
87 try:
87 lfsmod = extensions.find('lfs')
88 lfsmod = extensions.find('lfs')
88 lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
89 lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
89 except KeyError:
90 except KeyError:
90 # Ignore if lfs extension is not enabled
91 # Ignore if lfs extension is not enabled
91 return
92 return
92
93
93 class copiedpart(object):
94 class copiedpart(object):
94 """a copy of unbundlepart content that can be consumed later"""
95 """a copy of unbundlepart content that can be consumed later"""
95
96
96 def __init__(self, part):
97 def __init__(self, part):
97 # copy "public properties"
98 # copy "public properties"
98 self.type = part.type
99 self.type = part.type
99 self.id = part.id
100 self.id = part.id
100 self.mandatory = part.mandatory
101 self.mandatory = part.mandatory
101 self.mandatoryparams = part.mandatoryparams
102 self.mandatoryparams = part.mandatoryparams
102 self.advisoryparams = part.advisoryparams
103 self.advisoryparams = part.advisoryparams
103 self.params = part.params
104 self.params = part.params
104 self.mandatorykeys = part.mandatorykeys
105 self.mandatorykeys = part.mandatorykeys
105 # copy the buffer
106 # copy the buffer
106 self._io = util.stringio(part.read())
107 self._io = util.stringio(part.read())
107
108
108 def consume(self):
109 def consume(self):
109 return
110 return
110
111
111 def read(self, size=None):
112 def read(self, size=None):
112 if size is None:
113 if size is None:
113 return self._io.read()
114 return self._io.read()
114 else:
115 else:
115 return self._io.read(size)
116 return self._io.read(size)
@@ -1,959 +1,955 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import struct
11 import struct
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 short,
17 short,
18 wdirid,
18 wdirid,
19 )
19 )
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 obsutil,
23 obsutil,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 txnutil,
26 txnutil,
27 util,
27 util,
28 )
28 )
29
29
30 # label constants
30 # label constants
31 # until 3.5, bookmarks.current was the advertised name, not
31 # until 3.5, bookmarks.current was the advertised name, not
32 # bookmarks.active, so we must use both to avoid breaking old
32 # bookmarks.active, so we must use both to avoid breaking old
33 # custom styles
33 # custom styles
34 activebookmarklabel = 'bookmarks.active bookmarks.current'
34 activebookmarklabel = 'bookmarks.active bookmarks.current'
35
35
36 BOOKMARKS_IN_STORE_REQUIREMENT = 'bookmarksinstore'
36 BOOKMARKS_IN_STORE_REQUIREMENT = 'bookmarksinstore'
37
37
38 def bookmarksinstore(repo):
38 def bookmarksinstore(repo):
39 return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
39 return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
40
40
41 def bookmarksvfs(repo):
41 def bookmarksvfs(repo):
42 return repo.svfs if bookmarksinstore(repo) else repo.vfs
42 return repo.svfs if bookmarksinstore(repo) else repo.vfs
43
43
44 def _getbkfile(repo):
44 def _getbkfile(repo):
45 """Hook so that extensions that mess with the store can hook bm storage.
45 """Hook so that extensions that mess with the store can hook bm storage.
46
46
47 For core, this just handles wether we should see pending
47 For core, this just handles wether we should see pending
48 bookmarks or the committed ones. Other extensions (like share)
48 bookmarks or the committed ones. Other extensions (like share)
49 may need to tweak this behavior further.
49 may need to tweak this behavior further.
50 """
50 """
51 fp, pending = txnutil.trypending(repo.root, bookmarksvfs(repo), 'bookmarks')
51 fp, pending = txnutil.trypending(repo.root, bookmarksvfs(repo), 'bookmarks')
52 return fp
52 return fp
53
53
54 class bmstore(object):
54 class bmstore(object):
55 r"""Storage for bookmarks.
55 r"""Storage for bookmarks.
56
56
57 This object should do all bookmark-related reads and writes, so
57 This object should do all bookmark-related reads and writes, so
58 that it's fairly simple to replace the storage underlying
58 that it's fairly simple to replace the storage underlying
59 bookmarks without having to clone the logic surrounding
59 bookmarks without having to clone the logic surrounding
60 bookmarks. This type also should manage the active bookmark, if
60 bookmarks. This type also should manage the active bookmark, if
61 any.
61 any.
62
62
63 This particular bmstore implementation stores bookmarks as
63 This particular bmstore implementation stores bookmarks as
64 {hash}\s{name}\n (the same format as localtags) in
64 {hash}\s{name}\n (the same format as localtags) in
65 .hg/bookmarks. The mapping is stored as {name: nodeid}.
65 .hg/bookmarks. The mapping is stored as {name: nodeid}.
66 """
66 """
67
67
68 def __init__(self, repo):
68 def __init__(self, repo):
69 self._repo = repo
69 self._repo = repo
70 self._refmap = refmap = {} # refspec: node
70 self._refmap = refmap = {} # refspec: node
71 self._nodemap = nodemap = {} # node: sorted([refspec, ...])
71 self._nodemap = nodemap = {} # node: sorted([refspec, ...])
72 self._clean = True
72 self._clean = True
73 self._aclean = True
73 self._aclean = True
74 nm = repo.changelog.nodemap
74 nm = repo.changelog.nodemap
75 tonode = bin # force local lookup
75 tonode = bin # force local lookup
76 try:
76 try:
77 with _getbkfile(repo) as bkfile:
77 with _getbkfile(repo) as bkfile:
78 for line in bkfile:
78 for line in bkfile:
79 line = line.strip()
79 line = line.strip()
80 if not line:
80 if not line:
81 continue
81 continue
82 try:
82 try:
83 sha, refspec = line.split(' ', 1)
83 sha, refspec = line.split(' ', 1)
84 node = tonode(sha)
84 node = tonode(sha)
85 if node in nm:
85 if node in nm:
86 refspec = encoding.tolocal(refspec)
86 refspec = encoding.tolocal(refspec)
87 refmap[refspec] = node
87 refmap[refspec] = node
88 nrefs = nodemap.get(node)
88 nrefs = nodemap.get(node)
89 if nrefs is None:
89 if nrefs is None:
90 nodemap[node] = [refspec]
90 nodemap[node] = [refspec]
91 else:
91 else:
92 nrefs.append(refspec)
92 nrefs.append(refspec)
93 if nrefs[-2] > refspec:
93 if nrefs[-2] > refspec:
94 # bookmarks weren't sorted before 4.5
94 # bookmarks weren't sorted before 4.5
95 nrefs.sort()
95 nrefs.sort()
96 except (TypeError, ValueError):
96 except (TypeError, ValueError):
97 # TypeError:
97 # TypeError:
98 # - bin(...)
98 # - bin(...)
99 # ValueError:
99 # ValueError:
100 # - node in nm, for non-20-bytes entry
100 # - node in nm, for non-20-bytes entry
101 # - split(...), for string without ' '
101 # - split(...), for string without ' '
102 bookmarkspath = '.hg/bookmarks'
102 bookmarkspath = '.hg/bookmarks'
103 if bookmarksinstore(repo):
103 if bookmarksinstore(repo):
104 bookmarkspath = '.hg/store/bookmarks'
104 bookmarkspath = '.hg/store/bookmarks'
105 repo.ui.warn(_('malformed line in %s: %r\n')
105 repo.ui.warn(_('malformed line in %s: %r\n')
106 % (bookmarkspath, pycompat.bytestr(line)))
106 % (bookmarkspath, pycompat.bytestr(line)))
107 except IOError as inst:
107 except IOError as inst:
108 if inst.errno != errno.ENOENT:
108 if inst.errno != errno.ENOENT:
109 raise
109 raise
110 self._active = _readactive(repo, self)
110 self._active = _readactive(repo, self)
111
111
112 @property
112 @property
113 def active(self):
113 def active(self):
114 return self._active
114 return self._active
115
115
116 @active.setter
116 @active.setter
117 def active(self, mark):
117 def active(self, mark):
118 if mark is not None and mark not in self._refmap:
118 if mark is not None and mark not in self._refmap:
119 raise AssertionError('bookmark %s does not exist!' % mark)
119 raise AssertionError('bookmark %s does not exist!' % mark)
120
120
121 self._active = mark
121 self._active = mark
122 self._aclean = False
122 self._aclean = False
123
123
124 def __len__(self):
124 def __len__(self):
125 return len(self._refmap)
125 return len(self._refmap)
126
126
127 def __iter__(self):
127 def __iter__(self):
128 return iter(self._refmap)
128 return iter(self._refmap)
129
129
130 def iteritems(self):
130 def iteritems(self):
131 return self._refmap.iteritems()
131 return self._refmap.iteritems()
132
132
133 def items(self):
133 def items(self):
134 return self._refmap.items()
134 return self._refmap.items()
135
135
136 # TODO: maybe rename to allnames()?
136 # TODO: maybe rename to allnames()?
137 def keys(self):
137 def keys(self):
138 return self._refmap.keys()
138 return self._refmap.keys()
139
139
140 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
140 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
141 # could be self._nodemap.keys()
141 # could be self._nodemap.keys()
142 def values(self):
142 def values(self):
143 return self._refmap.values()
143 return self._refmap.values()
144
144
145 def __contains__(self, mark):
145 def __contains__(self, mark):
146 return mark in self._refmap
146 return mark in self._refmap
147
147
148 def __getitem__(self, mark):
148 def __getitem__(self, mark):
149 return self._refmap[mark]
149 return self._refmap[mark]
150
150
151 def get(self, mark, default=None):
151 def get(self, mark, default=None):
152 return self._refmap.get(mark, default)
152 return self._refmap.get(mark, default)
153
153
154 def _set(self, mark, node):
154 def _set(self, mark, node):
155 self._clean = False
155 self._clean = False
156 if mark in self._refmap:
156 if mark in self._refmap:
157 self._del(mark)
157 self._del(mark)
158 self._refmap[mark] = node
158 self._refmap[mark] = node
159 nrefs = self._nodemap.get(node)
159 nrefs = self._nodemap.get(node)
160 if nrefs is None:
160 if nrefs is None:
161 self._nodemap[node] = [mark]
161 self._nodemap[node] = [mark]
162 else:
162 else:
163 nrefs.append(mark)
163 nrefs.append(mark)
164 nrefs.sort()
164 nrefs.sort()
165
165
166 def _del(self, mark):
166 def _del(self, mark):
167 self._clean = False
167 self._clean = False
168 node = self._refmap.pop(mark)
168 node = self._refmap.pop(mark)
169 nrefs = self._nodemap[node]
169 nrefs = self._nodemap[node]
170 if len(nrefs) == 1:
170 if len(nrefs) == 1:
171 assert nrefs[0] == mark
171 assert nrefs[0] == mark
172 del self._nodemap[node]
172 del self._nodemap[node]
173 else:
173 else:
174 nrefs.remove(mark)
174 nrefs.remove(mark)
175
175
176 def names(self, node):
176 def names(self, node):
177 """Return a sorted list of bookmarks pointing to the specified node"""
177 """Return a sorted list of bookmarks pointing to the specified node"""
178 return self._nodemap.get(node, [])
178 return self._nodemap.get(node, [])
179
179
180 def changectx(self, mark):
181 node = self._refmap[mark]
182 return self._repo[node]
183
184 def applychanges(self, repo, tr, changes):
180 def applychanges(self, repo, tr, changes):
185 """Apply a list of changes to bookmarks
181 """Apply a list of changes to bookmarks
186 """
182 """
187 bmchanges = tr.changes.get('bookmarks')
183 bmchanges = tr.changes.get('bookmarks')
188 for name, node in changes:
184 for name, node in changes:
189 old = self._refmap.get(name)
185 old = self._refmap.get(name)
190 if node is None:
186 if node is None:
191 self._del(name)
187 self._del(name)
192 else:
188 else:
193 self._set(name, node)
189 self._set(name, node)
194 if bmchanges is not None:
190 if bmchanges is not None:
195 # if a previous value exist preserve the "initial" value
191 # if a previous value exist preserve the "initial" value
196 previous = bmchanges.get(name)
192 previous = bmchanges.get(name)
197 if previous is not None:
193 if previous is not None:
198 old = previous[0]
194 old = previous[0]
199 bmchanges[name] = (old, node)
195 bmchanges[name] = (old, node)
200 self._recordchange(tr)
196 self._recordchange(tr)
201
197
202 def _recordchange(self, tr):
198 def _recordchange(self, tr):
203 """record that bookmarks have been changed in a transaction
199 """record that bookmarks have been changed in a transaction
204
200
205 The transaction is then responsible for updating the file content."""
201 The transaction is then responsible for updating the file content."""
206 location = '' if bookmarksinstore(self._repo) else 'plain'
202 location = '' if bookmarksinstore(self._repo) else 'plain'
207 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
203 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
208 location=location)
204 location=location)
209 tr.hookargs['bookmark_moved'] = '1'
205 tr.hookargs['bookmark_moved'] = '1'
210
206
211 def _writerepo(self, repo):
207 def _writerepo(self, repo):
212 """Factored out for extensibility"""
208 """Factored out for extensibility"""
213 rbm = repo._bookmarks
209 rbm = repo._bookmarks
214 if rbm.active not in self._refmap:
210 if rbm.active not in self._refmap:
215 rbm.active = None
211 rbm.active = None
216 rbm._writeactive()
212 rbm._writeactive()
217
213
218 if bookmarksinstore(repo):
214 if bookmarksinstore(repo):
219 vfs = repo.svfs
215 vfs = repo.svfs
220 lock = repo.lock()
216 lock = repo.lock()
221 else:
217 else:
222 vfs = repo.vfs
218 vfs = repo.vfs
223 lock = repo.wlock()
219 lock = repo.wlock()
224 with lock:
220 with lock:
225 with vfs('bookmarks', 'w', atomictemp=True, checkambig=True) as f:
221 with vfs('bookmarks', 'w', atomictemp=True, checkambig=True) as f:
226 self._write(f)
222 self._write(f)
227
223
228 def _writeactive(self):
224 def _writeactive(self):
229 if self._aclean:
225 if self._aclean:
230 return
226 return
231 with self._repo.wlock():
227 with self._repo.wlock():
232 if self._active is not None:
228 if self._active is not None:
233 with self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
229 with self._repo.vfs('bookmarks.current', 'w', atomictemp=True,
234 checkambig=True) as f:
230 checkambig=True) as f:
235 f.write(encoding.fromlocal(self._active))
231 f.write(encoding.fromlocal(self._active))
236 else:
232 else:
237 self._repo.vfs.tryunlink('bookmarks.current')
233 self._repo.vfs.tryunlink('bookmarks.current')
238 self._aclean = True
234 self._aclean = True
239
235
240 def _write(self, fp):
236 def _write(self, fp):
241 for name, node in sorted(self._refmap.iteritems()):
237 for name, node in sorted(self._refmap.iteritems()):
242 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
238 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
243 self._clean = True
239 self._clean = True
244 self._repo.invalidatevolatilesets()
240 self._repo.invalidatevolatilesets()
245
241
246 def expandname(self, bname):
242 def expandname(self, bname):
247 if bname == '.':
243 if bname == '.':
248 if self.active:
244 if self.active:
249 return self.active
245 return self.active
250 else:
246 else:
251 raise error.RepoLookupError(_("no active bookmark"))
247 raise error.RepoLookupError(_("no active bookmark"))
252 return bname
248 return bname
253
249
254 def checkconflict(self, mark, force=False, target=None):
250 def checkconflict(self, mark, force=False, target=None):
255 """check repo for a potential clash of mark with an existing bookmark,
251 """check repo for a potential clash of mark with an existing bookmark,
256 branch, or hash
252 branch, or hash
257
253
258 If target is supplied, then check that we are moving the bookmark
254 If target is supplied, then check that we are moving the bookmark
259 forward.
255 forward.
260
256
261 If force is supplied, then forcibly move the bookmark to a new commit
257 If force is supplied, then forcibly move the bookmark to a new commit
262 regardless if it is a move forward.
258 regardless if it is a move forward.
263
259
264 If divergent bookmark are to be deleted, they will be returned as list.
260 If divergent bookmark are to be deleted, they will be returned as list.
265 """
261 """
266 cur = self._repo['.'].node()
262 cur = self._repo['.'].node()
267 if mark in self._refmap and not force:
263 if mark in self._refmap and not force:
268 if target:
264 if target:
269 if self._refmap[mark] == target and target == cur:
265 if self._refmap[mark] == target and target == cur:
270 # re-activating a bookmark
266 # re-activating a bookmark
271 return []
267 return []
272 rev = self._repo[target].rev()
268 rev = self._repo[target].rev()
273 anc = self._repo.changelog.ancestors([rev])
269 anc = self._repo.changelog.ancestors([rev])
274 bmctx = self.changectx(mark)
270 bmctx = self._repo[self[mark]]
275 divs = [self._refmap[b] for b in self._refmap
271 divs = [self._refmap[b] for b in self._refmap
276 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
272 if b.split('@', 1)[0] == mark.split('@', 1)[0]]
277
273
278 # allow resolving a single divergent bookmark even if moving
274 # allow resolving a single divergent bookmark even if moving
279 # the bookmark across branches when a revision is specified
275 # the bookmark across branches when a revision is specified
280 # that contains a divergent bookmark
276 # that contains a divergent bookmark
281 if bmctx.rev() not in anc and target in divs:
277 if bmctx.rev() not in anc and target in divs:
282 return divergent2delete(self._repo, [target], mark)
278 return divergent2delete(self._repo, [target], mark)
283
279
284 deletefrom = [b for b in divs
280 deletefrom = [b for b in divs
285 if self._repo[b].rev() in anc or b == target]
281 if self._repo[b].rev() in anc or b == target]
286 delbms = divergent2delete(self._repo, deletefrom, mark)
282 delbms = divergent2delete(self._repo, deletefrom, mark)
287 if validdest(self._repo, bmctx, self._repo[target]):
283 if validdest(self._repo, bmctx, self._repo[target]):
288 self._repo.ui.status(
284 self._repo.ui.status(
289 _("moving bookmark '%s' forward from %s\n") %
285 _("moving bookmark '%s' forward from %s\n") %
290 (mark, short(bmctx.node())))
286 (mark, short(bmctx.node())))
291 return delbms
287 return delbms
292 raise error.Abort(_("bookmark '%s' already exists "
288 raise error.Abort(_("bookmark '%s' already exists "
293 "(use -f to force)") % mark)
289 "(use -f to force)") % mark)
294 if ((mark in self._repo.branchmap() or
290 if ((mark in self._repo.branchmap() or
295 mark == self._repo.dirstate.branch()) and not force):
291 mark == self._repo.dirstate.branch()) and not force):
296 raise error.Abort(
292 raise error.Abort(
297 _("a bookmark cannot have the name of an existing branch"))
293 _("a bookmark cannot have the name of an existing branch"))
298 if len(mark) > 3 and not force:
294 if len(mark) > 3 and not force:
299 try:
295 try:
300 shadowhash = scmutil.isrevsymbol(self._repo, mark)
296 shadowhash = scmutil.isrevsymbol(self._repo, mark)
301 except error.LookupError: # ambiguous identifier
297 except error.LookupError: # ambiguous identifier
302 shadowhash = False
298 shadowhash = False
303 if shadowhash:
299 if shadowhash:
304 self._repo.ui.warn(
300 self._repo.ui.warn(
305 _("bookmark %s matches a changeset hash\n"
301 _("bookmark %s matches a changeset hash\n"
306 "(did you leave a -r out of an 'hg bookmark' "
302 "(did you leave a -r out of an 'hg bookmark' "
307 "command?)\n")
303 "command?)\n")
308 % mark)
304 % mark)
309 return []
305 return []
310
306
311 def _readactive(repo, marks):
307 def _readactive(repo, marks):
312 """
308 """
313 Get the active bookmark. We can have an active bookmark that updates
309 Get the active bookmark. We can have an active bookmark that updates
314 itself as we commit. This function returns the name of that bookmark.
310 itself as we commit. This function returns the name of that bookmark.
315 It is stored in .hg/bookmarks.current
311 It is stored in .hg/bookmarks.current
316 """
312 """
317 # No readline() in osutil.posixfile, reading everything is
313 # No readline() in osutil.posixfile, reading everything is
318 # cheap.
314 # cheap.
319 content = repo.vfs.tryread('bookmarks.current')
315 content = repo.vfs.tryread('bookmarks.current')
320 mark = encoding.tolocal((content.splitlines() or [''])[0])
316 mark = encoding.tolocal((content.splitlines() or [''])[0])
321 if mark == '' or mark not in marks:
317 if mark == '' or mark not in marks:
322 mark = None
318 mark = None
323 return mark
319 return mark
324
320
325 def activate(repo, mark):
321 def activate(repo, mark):
326 """
322 """
327 Set the given bookmark to be 'active', meaning that this bookmark will
323 Set the given bookmark to be 'active', meaning that this bookmark will
328 follow new commits that are made.
324 follow new commits that are made.
329 The name is recorded in .hg/bookmarks.current
325 The name is recorded in .hg/bookmarks.current
330 """
326 """
331 repo._bookmarks.active = mark
327 repo._bookmarks.active = mark
332 repo._bookmarks._writeactive()
328 repo._bookmarks._writeactive()
333
329
334 def deactivate(repo):
330 def deactivate(repo):
335 """
331 """
336 Unset the active bookmark in this repository.
332 Unset the active bookmark in this repository.
337 """
333 """
338 repo._bookmarks.active = None
334 repo._bookmarks.active = None
339 repo._bookmarks._writeactive()
335 repo._bookmarks._writeactive()
340
336
341 def isactivewdirparent(repo):
337 def isactivewdirparent(repo):
342 """
338 """
343 Tell whether the 'active' bookmark (the one that follows new commits)
339 Tell whether the 'active' bookmark (the one that follows new commits)
344 points to one of the parents of the current working directory (wdir).
340 points to one of the parents of the current working directory (wdir).
345
341
346 While this is normally the case, it can on occasion be false; for example,
342 While this is normally the case, it can on occasion be false; for example,
347 immediately after a pull, the active bookmark can be moved to point
343 immediately after a pull, the active bookmark can be moved to point
348 to a place different than the wdir. This is solved by running `hg update`.
344 to a place different than the wdir. This is solved by running `hg update`.
349 """
345 """
350 mark = repo._activebookmark
346 mark = repo._activebookmark
351 marks = repo._bookmarks
347 marks = repo._bookmarks
352 parents = [p.node() for p in repo[None].parents()]
348 parents = [p.node() for p in repo[None].parents()]
353 return (mark in marks and marks[mark] in parents)
349 return (mark in marks and marks[mark] in parents)
354
350
355 def divergent2delete(repo, deletefrom, bm):
351 def divergent2delete(repo, deletefrom, bm):
356 """find divergent versions of bm on nodes in deletefrom.
352 """find divergent versions of bm on nodes in deletefrom.
357
353
358 the list of bookmark to delete."""
354 the list of bookmark to delete."""
359 todelete = []
355 todelete = []
360 marks = repo._bookmarks
356 marks = repo._bookmarks
361 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
357 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
362 for mark in divergent:
358 for mark in divergent:
363 if mark == '@' or '@' not in mark:
359 if mark == '@' or '@' not in mark:
364 # can't be divergent by definition
360 # can't be divergent by definition
365 continue
361 continue
366 if mark and marks[mark] in deletefrom:
362 if mark and marks[mark] in deletefrom:
367 if mark != bm:
363 if mark != bm:
368 todelete.append(mark)
364 todelete.append(mark)
369 return todelete
365 return todelete
370
366
371 def headsforactive(repo):
367 def headsforactive(repo):
372 """Given a repo with an active bookmark, return divergent bookmark nodes.
368 """Given a repo with an active bookmark, return divergent bookmark nodes.
373
369
374 Args:
370 Args:
375 repo: A repository with an active bookmark.
371 repo: A repository with an active bookmark.
376
372
377 Returns:
373 Returns:
378 A list of binary node ids that is the full list of other
374 A list of binary node ids that is the full list of other
379 revisions with bookmarks divergent from the active bookmark. If
375 revisions with bookmarks divergent from the active bookmark. If
380 there were no divergent bookmarks, then this list will contain
376 there were no divergent bookmarks, then this list will contain
381 only one entry.
377 only one entry.
382 """
378 """
383 if not repo._activebookmark:
379 if not repo._activebookmark:
384 raise ValueError(
380 raise ValueError(
385 'headsforactive() only makes sense with an active bookmark')
381 'headsforactive() only makes sense with an active bookmark')
386 name = repo._activebookmark.split('@', 1)[0]
382 name = repo._activebookmark.split('@', 1)[0]
387 heads = []
383 heads = []
388 for mark, n in repo._bookmarks.iteritems():
384 for mark, n in repo._bookmarks.iteritems():
389 if mark.split('@', 1)[0] == name:
385 if mark.split('@', 1)[0] == name:
390 heads.append(n)
386 heads.append(n)
391 return heads
387 return heads
392
388
393 def calculateupdate(ui, repo):
389 def calculateupdate(ui, repo):
394 '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark
390 '''Return a tuple (activemark, movemarkfrom) indicating the active bookmark
395 and where to move the active bookmark from, if needed.'''
391 and where to move the active bookmark from, if needed.'''
396 checkout, movemarkfrom = None, None
392 checkout, movemarkfrom = None, None
397 activemark = repo._activebookmark
393 activemark = repo._activebookmark
398 if isactivewdirparent(repo):
394 if isactivewdirparent(repo):
399 movemarkfrom = repo['.'].node()
395 movemarkfrom = repo['.'].node()
400 elif activemark:
396 elif activemark:
401 ui.status(_("updating to active bookmark %s\n") % activemark)
397 ui.status(_("updating to active bookmark %s\n") % activemark)
402 checkout = activemark
398 checkout = activemark
403 return (checkout, movemarkfrom)
399 return (checkout, movemarkfrom)
404
400
405 def update(repo, parents, node):
401 def update(repo, parents, node):
406 deletefrom = parents
402 deletefrom = parents
407 marks = repo._bookmarks
403 marks = repo._bookmarks
408 active = marks.active
404 active = marks.active
409 if not active:
405 if not active:
410 return False
406 return False
411
407
412 bmchanges = []
408 bmchanges = []
413 if marks[active] in parents:
409 if marks[active] in parents:
414 new = repo[node]
410 new = repo[node]
415 divs = [marks.changectx(b) for b in marks
411 divs = [repo[marks[b]] for b in marks
416 if b.split('@', 1)[0] == active.split('@', 1)[0]]
412 if b.split('@', 1)[0] == active.split('@', 1)[0]]
417 anc = repo.changelog.ancestors([new.rev()])
413 anc = repo.changelog.ancestors([new.rev()])
418 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
414 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
419 if validdest(repo, marks.changectx(active), new):
415 if validdest(repo, repo[marks[active]], new):
420 bmchanges.append((active, new.node()))
416 bmchanges.append((active, new.node()))
421
417
422 for bm in divergent2delete(repo, deletefrom, active):
418 for bm in divergent2delete(repo, deletefrom, active):
423 bmchanges.append((bm, None))
419 bmchanges.append((bm, None))
424
420
425 if bmchanges:
421 if bmchanges:
426 with repo.lock(), repo.transaction('bookmark') as tr:
422 with repo.lock(), repo.transaction('bookmark') as tr:
427 marks.applychanges(repo, tr, bmchanges)
423 marks.applychanges(repo, tr, bmchanges)
428 return bool(bmchanges)
424 return bool(bmchanges)
429
425
430 def listbinbookmarks(repo):
426 def listbinbookmarks(repo):
431 # We may try to list bookmarks on a repo type that does not
427 # We may try to list bookmarks on a repo type that does not
432 # support it (e.g., statichttprepository).
428 # support it (e.g., statichttprepository).
433 marks = getattr(repo, '_bookmarks', {})
429 marks = getattr(repo, '_bookmarks', {})
434
430
435 hasnode = repo.changelog.hasnode
431 hasnode = repo.changelog.hasnode
436 for k, v in marks.iteritems():
432 for k, v in marks.iteritems():
437 # don't expose local divergent bookmarks
433 # don't expose local divergent bookmarks
438 if hasnode(v) and ('@' not in k or k.endswith('@')):
434 if hasnode(v) and ('@' not in k or k.endswith('@')):
439 yield k, v
435 yield k, v
440
436
441 def listbookmarks(repo):
437 def listbookmarks(repo):
442 d = {}
438 d = {}
443 for book, node in listbinbookmarks(repo):
439 for book, node in listbinbookmarks(repo):
444 d[book] = hex(node)
440 d[book] = hex(node)
445 return d
441 return d
446
442
447 def pushbookmark(repo, key, old, new):
443 def pushbookmark(repo, key, old, new):
448 if bookmarksinstore(repo):
444 if bookmarksinstore(repo):
449 wlock = util.nullcontextmanager()
445 wlock = util.nullcontextmanager()
450 else:
446 else:
451 wlock = repo.wlock()
447 wlock = repo.wlock()
452 with wlock, repo.lock(), repo.transaction('bookmarks') as tr:
448 with wlock, repo.lock(), repo.transaction('bookmarks') as tr:
453 marks = repo._bookmarks
449 marks = repo._bookmarks
454 existing = hex(marks.get(key, ''))
450 existing = hex(marks.get(key, ''))
455 if existing != old and existing != new:
451 if existing != old and existing != new:
456 return False
452 return False
457 if new == '':
453 if new == '':
458 changes = [(key, None)]
454 changes = [(key, None)]
459 else:
455 else:
460 if new not in repo:
456 if new not in repo:
461 return False
457 return False
462 changes = [(key, repo[new].node())]
458 changes = [(key, repo[new].node())]
463 marks.applychanges(repo, tr, changes)
459 marks.applychanges(repo, tr, changes)
464 return True
460 return True
465
461
466 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
462 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
467 '''Compare bookmarks between srcmarks and dstmarks
463 '''Compare bookmarks between srcmarks and dstmarks
468
464
469 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
465 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
470 differ, invalid)", each are list of bookmarks below:
466 differ, invalid)", each are list of bookmarks below:
471
467
472 :addsrc: added on src side (removed on dst side, perhaps)
468 :addsrc: added on src side (removed on dst side, perhaps)
473 :adddst: added on dst side (removed on src side, perhaps)
469 :adddst: added on dst side (removed on src side, perhaps)
474 :advsrc: advanced on src side
470 :advsrc: advanced on src side
475 :advdst: advanced on dst side
471 :advdst: advanced on dst side
476 :diverge: diverge
472 :diverge: diverge
477 :differ: changed, but changeset referred on src is unknown on dst
473 :differ: changed, but changeset referred on src is unknown on dst
478 :invalid: unknown on both side
474 :invalid: unknown on both side
479 :same: same on both side
475 :same: same on both side
480
476
481 Each elements of lists in result tuple is tuple "(bookmark name,
477 Each elements of lists in result tuple is tuple "(bookmark name,
482 changeset ID on source side, changeset ID on destination
478 changeset ID on source side, changeset ID on destination
483 side)". Each changeset ID is a binary node or None.
479 side)". Each changeset ID is a binary node or None.
484
480
485 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
481 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
486 "invalid" list may be unknown for repo.
482 "invalid" list may be unknown for repo.
487
483
488 If "targets" is specified, only bookmarks listed in it are
484 If "targets" is specified, only bookmarks listed in it are
489 examined.
485 examined.
490 '''
486 '''
491
487
492 if targets:
488 if targets:
493 bset = set(targets)
489 bset = set(targets)
494 else:
490 else:
495 srcmarkset = set(srcmarks)
491 srcmarkset = set(srcmarks)
496 dstmarkset = set(dstmarks)
492 dstmarkset = set(dstmarks)
497 bset = srcmarkset | dstmarkset
493 bset = srcmarkset | dstmarkset
498
494
499 results = ([], [], [], [], [], [], [], [])
495 results = ([], [], [], [], [], [], [], [])
500 addsrc = results[0].append
496 addsrc = results[0].append
501 adddst = results[1].append
497 adddst = results[1].append
502 advsrc = results[2].append
498 advsrc = results[2].append
503 advdst = results[3].append
499 advdst = results[3].append
504 diverge = results[4].append
500 diverge = results[4].append
505 differ = results[5].append
501 differ = results[5].append
506 invalid = results[6].append
502 invalid = results[6].append
507 same = results[7].append
503 same = results[7].append
508
504
509 for b in sorted(bset):
505 for b in sorted(bset):
510 if b not in srcmarks:
506 if b not in srcmarks:
511 if b in dstmarks:
507 if b in dstmarks:
512 adddst((b, None, dstmarks[b]))
508 adddst((b, None, dstmarks[b]))
513 else:
509 else:
514 invalid((b, None, None))
510 invalid((b, None, None))
515 elif b not in dstmarks:
511 elif b not in dstmarks:
516 addsrc((b, srcmarks[b], None))
512 addsrc((b, srcmarks[b], None))
517 else:
513 else:
518 scid = srcmarks[b]
514 scid = srcmarks[b]
519 dcid = dstmarks[b]
515 dcid = dstmarks[b]
520 if scid == dcid:
516 if scid == dcid:
521 same((b, scid, dcid))
517 same((b, scid, dcid))
522 elif scid in repo and dcid in repo:
518 elif scid in repo and dcid in repo:
523 sctx = repo[scid]
519 sctx = repo[scid]
524 dctx = repo[dcid]
520 dctx = repo[dcid]
525 if sctx.rev() < dctx.rev():
521 if sctx.rev() < dctx.rev():
526 if validdest(repo, sctx, dctx):
522 if validdest(repo, sctx, dctx):
527 advdst((b, scid, dcid))
523 advdst((b, scid, dcid))
528 else:
524 else:
529 diverge((b, scid, dcid))
525 diverge((b, scid, dcid))
530 else:
526 else:
531 if validdest(repo, dctx, sctx):
527 if validdest(repo, dctx, sctx):
532 advsrc((b, scid, dcid))
528 advsrc((b, scid, dcid))
533 else:
529 else:
534 diverge((b, scid, dcid))
530 diverge((b, scid, dcid))
535 else:
531 else:
536 # it is too expensive to examine in detail, in this case
532 # it is too expensive to examine in detail, in this case
537 differ((b, scid, dcid))
533 differ((b, scid, dcid))
538
534
539 return results
535 return results
540
536
541 def _diverge(ui, b, path, localmarks, remotenode):
537 def _diverge(ui, b, path, localmarks, remotenode):
542 '''Return appropriate diverged bookmark for specified ``path``
538 '''Return appropriate diverged bookmark for specified ``path``
543
539
544 This returns None, if it is failed to assign any divergent
540 This returns None, if it is failed to assign any divergent
545 bookmark name.
541 bookmark name.
546
542
547 This reuses already existing one with "@number" suffix, if it
543 This reuses already existing one with "@number" suffix, if it
548 refers ``remotenode``.
544 refers ``remotenode``.
549 '''
545 '''
550 if b == '@':
546 if b == '@':
551 b = ''
547 b = ''
552 # try to use an @pathalias suffix
548 # try to use an @pathalias suffix
553 # if an @pathalias already exists, we overwrite (update) it
549 # if an @pathalias already exists, we overwrite (update) it
554 if path.startswith("file:"):
550 if path.startswith("file:"):
555 path = util.url(path).path
551 path = util.url(path).path
556 for p, u in ui.configitems("paths"):
552 for p, u in ui.configitems("paths"):
557 if u.startswith("file:"):
553 if u.startswith("file:"):
558 u = util.url(u).path
554 u = util.url(u).path
559 if path == u:
555 if path == u:
560 return '%s@%s' % (b, p)
556 return '%s@%s' % (b, p)
561
557
562 # assign a unique "@number" suffix newly
558 # assign a unique "@number" suffix newly
563 for x in range(1, 100):
559 for x in range(1, 100):
564 n = '%s@%d' % (b, x)
560 n = '%s@%d' % (b, x)
565 if n not in localmarks or localmarks[n] == remotenode:
561 if n not in localmarks or localmarks[n] == remotenode:
566 return n
562 return n
567
563
568 return None
564 return None
569
565
570 def unhexlifybookmarks(marks):
566 def unhexlifybookmarks(marks):
571 binremotemarks = {}
567 binremotemarks = {}
572 for name, node in marks.items():
568 for name, node in marks.items():
573 binremotemarks[name] = bin(node)
569 binremotemarks[name] = bin(node)
574 return binremotemarks
570 return binremotemarks
575
571
576 _binaryentry = struct.Struct('>20sH')
572 _binaryentry = struct.Struct('>20sH')
577
573
578 def binaryencode(bookmarks):
574 def binaryencode(bookmarks):
579 """encode a '(bookmark, node)' iterable into a binary stream
575 """encode a '(bookmark, node)' iterable into a binary stream
580
576
581 the binary format is:
577 the binary format is:
582
578
583 <node><bookmark-length><bookmark-name>
579 <node><bookmark-length><bookmark-name>
584
580
585 :node: is a 20 bytes binary node,
581 :node: is a 20 bytes binary node,
586 :bookmark-length: an unsigned short,
582 :bookmark-length: an unsigned short,
587 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
583 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
588
584
589 wdirid (all bits set) will be used as a special value for "missing"
585 wdirid (all bits set) will be used as a special value for "missing"
590 """
586 """
591 binarydata = []
587 binarydata = []
592 for book, node in bookmarks:
588 for book, node in bookmarks:
593 if not node: # None or ''
589 if not node: # None or ''
594 node = wdirid
590 node = wdirid
595 binarydata.append(_binaryentry.pack(node, len(book)))
591 binarydata.append(_binaryentry.pack(node, len(book)))
596 binarydata.append(book)
592 binarydata.append(book)
597 return ''.join(binarydata)
593 return ''.join(binarydata)
598
594
599 def binarydecode(stream):
595 def binarydecode(stream):
600 """decode a binary stream into an '(bookmark, node)' iterable
596 """decode a binary stream into an '(bookmark, node)' iterable
601
597
602 the binary format is:
598 the binary format is:
603
599
604 <node><bookmark-length><bookmark-name>
600 <node><bookmark-length><bookmark-name>
605
601
606 :node: is a 20 bytes binary node,
602 :node: is a 20 bytes binary node,
607 :bookmark-length: an unsigned short,
603 :bookmark-length: an unsigned short,
608 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
604 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
609
605
610 wdirid (all bits set) will be used as a special value for "missing"
606 wdirid (all bits set) will be used as a special value for "missing"
611 """
607 """
612 entrysize = _binaryentry.size
608 entrysize = _binaryentry.size
613 books = []
609 books = []
614 while True:
610 while True:
615 entry = stream.read(entrysize)
611 entry = stream.read(entrysize)
616 if len(entry) < entrysize:
612 if len(entry) < entrysize:
617 if entry:
613 if entry:
618 raise error.Abort(_('bad bookmark stream'))
614 raise error.Abort(_('bad bookmark stream'))
619 break
615 break
620 node, length = _binaryentry.unpack(entry)
616 node, length = _binaryentry.unpack(entry)
621 bookmark = stream.read(length)
617 bookmark = stream.read(length)
622 if len(bookmark) < length:
618 if len(bookmark) < length:
623 if entry:
619 if entry:
624 raise error.Abort(_('bad bookmark stream'))
620 raise error.Abort(_('bad bookmark stream'))
625 if node == wdirid:
621 if node == wdirid:
626 node = None
622 node = None
627 books.append((bookmark, node))
623 books.append((bookmark, node))
628 return books
624 return books
629
625
630 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
626 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
631 ui.debug("checking for updated bookmarks\n")
627 ui.debug("checking for updated bookmarks\n")
632 localmarks = repo._bookmarks
628 localmarks = repo._bookmarks
633 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
629 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
634 ) = comparebookmarks(repo, remotemarks, localmarks)
630 ) = comparebookmarks(repo, remotemarks, localmarks)
635
631
636 status = ui.status
632 status = ui.status
637 warn = ui.warn
633 warn = ui.warn
638 if ui.configbool('ui', 'quietbookmarkmove'):
634 if ui.configbool('ui', 'quietbookmarkmove'):
639 status = warn = ui.debug
635 status = warn = ui.debug
640
636
641 explicit = set(explicit)
637 explicit = set(explicit)
642 changed = []
638 changed = []
643 for b, scid, dcid in addsrc:
639 for b, scid, dcid in addsrc:
644 if scid in repo: # add remote bookmarks for changes we already have
640 if scid in repo: # add remote bookmarks for changes we already have
645 changed.append((b, scid, status,
641 changed.append((b, scid, status,
646 _("adding remote bookmark %s\n") % (b)))
642 _("adding remote bookmark %s\n") % (b)))
647 elif b in explicit:
643 elif b in explicit:
648 explicit.remove(b)
644 explicit.remove(b)
649 ui.warn(_("remote bookmark %s points to locally missing %s\n")
645 ui.warn(_("remote bookmark %s points to locally missing %s\n")
650 % (b, hex(scid)[:12]))
646 % (b, hex(scid)[:12]))
651
647
652 for b, scid, dcid in advsrc:
648 for b, scid, dcid in advsrc:
653 changed.append((b, scid, status,
649 changed.append((b, scid, status,
654 _("updating bookmark %s\n") % (b)))
650 _("updating bookmark %s\n") % (b)))
655 # remove normal movement from explicit set
651 # remove normal movement from explicit set
656 explicit.difference_update(d[0] for d in changed)
652 explicit.difference_update(d[0] for d in changed)
657
653
658 for b, scid, dcid in diverge:
654 for b, scid, dcid in diverge:
659 if b in explicit:
655 if b in explicit:
660 explicit.discard(b)
656 explicit.discard(b)
661 changed.append((b, scid, status,
657 changed.append((b, scid, status,
662 _("importing bookmark %s\n") % (b)))
658 _("importing bookmark %s\n") % (b)))
663 else:
659 else:
664 db = _diverge(ui, b, path, localmarks, scid)
660 db = _diverge(ui, b, path, localmarks, scid)
665 if db:
661 if db:
666 changed.append((db, scid, warn,
662 changed.append((db, scid, warn,
667 _("divergent bookmark %s stored as %s\n") %
663 _("divergent bookmark %s stored as %s\n") %
668 (b, db)))
664 (b, db)))
669 else:
665 else:
670 warn(_("warning: failed to assign numbered name "
666 warn(_("warning: failed to assign numbered name "
671 "to divergent bookmark %s\n") % (b))
667 "to divergent bookmark %s\n") % (b))
672 for b, scid, dcid in adddst + advdst:
668 for b, scid, dcid in adddst + advdst:
673 if b in explicit:
669 if b in explicit:
674 explicit.discard(b)
670 explicit.discard(b)
675 changed.append((b, scid, status,
671 changed.append((b, scid, status,
676 _("importing bookmark %s\n") % (b)))
672 _("importing bookmark %s\n") % (b)))
677 for b, scid, dcid in differ:
673 for b, scid, dcid in differ:
678 if b in explicit:
674 if b in explicit:
679 explicit.remove(b)
675 explicit.remove(b)
680 ui.warn(_("remote bookmark %s points to locally missing %s\n")
676 ui.warn(_("remote bookmark %s points to locally missing %s\n")
681 % (b, hex(scid)[:12]))
677 % (b, hex(scid)[:12]))
682
678
683 if changed:
679 if changed:
684 tr = trfunc()
680 tr = trfunc()
685 changes = []
681 changes = []
686 for b, node, writer, msg in sorted(changed):
682 for b, node, writer, msg in sorted(changed):
687 changes.append((b, node))
683 changes.append((b, node))
688 writer(msg)
684 writer(msg)
689 localmarks.applychanges(repo, tr, changes)
685 localmarks.applychanges(repo, tr, changes)
690
686
691 def incoming(ui, repo, peer):
687 def incoming(ui, repo, peer):
692 '''Show bookmarks incoming from other to repo
688 '''Show bookmarks incoming from other to repo
693 '''
689 '''
694 ui.status(_("searching for changed bookmarks\n"))
690 ui.status(_("searching for changed bookmarks\n"))
695
691
696 with peer.commandexecutor() as e:
692 with peer.commandexecutor() as e:
697 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
693 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
698 'namespace': 'bookmarks',
694 'namespace': 'bookmarks',
699 }).result())
695 }).result())
700
696
701 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
697 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
702 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
698 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
703
699
704 incomings = []
700 incomings = []
705 if ui.debugflag:
701 if ui.debugflag:
706 getid = lambda id: id
702 getid = lambda id: id
707 else:
703 else:
708 getid = lambda id: id[:12]
704 getid = lambda id: id[:12]
709 if ui.verbose:
705 if ui.verbose:
710 def add(b, id, st):
706 def add(b, id, st):
711 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
707 incomings.append(" %-25s %s %s\n" % (b, getid(id), st))
712 else:
708 else:
713 def add(b, id, st):
709 def add(b, id, st):
714 incomings.append(" %-25s %s\n" % (b, getid(id)))
710 incomings.append(" %-25s %s\n" % (b, getid(id)))
715 for b, scid, dcid in addsrc:
711 for b, scid, dcid in addsrc:
716 # i18n: "added" refers to a bookmark
712 # i18n: "added" refers to a bookmark
717 add(b, hex(scid), _('added'))
713 add(b, hex(scid), _('added'))
718 for b, scid, dcid in advsrc:
714 for b, scid, dcid in advsrc:
719 # i18n: "advanced" refers to a bookmark
715 # i18n: "advanced" refers to a bookmark
720 add(b, hex(scid), _('advanced'))
716 add(b, hex(scid), _('advanced'))
721 for b, scid, dcid in diverge:
717 for b, scid, dcid in diverge:
722 # i18n: "diverged" refers to a bookmark
718 # i18n: "diverged" refers to a bookmark
723 add(b, hex(scid), _('diverged'))
719 add(b, hex(scid), _('diverged'))
724 for b, scid, dcid in differ:
720 for b, scid, dcid in differ:
725 # i18n: "changed" refers to a bookmark
721 # i18n: "changed" refers to a bookmark
726 add(b, hex(scid), _('changed'))
722 add(b, hex(scid), _('changed'))
727
723
728 if not incomings:
724 if not incomings:
729 ui.status(_("no changed bookmarks found\n"))
725 ui.status(_("no changed bookmarks found\n"))
730 return 1
726 return 1
731
727
732 for s in sorted(incomings):
728 for s in sorted(incomings):
733 ui.write(s)
729 ui.write(s)
734
730
735 return 0
731 return 0
736
732
737 def outgoing(ui, repo, other):
733 def outgoing(ui, repo, other):
738 '''Show bookmarks outgoing from repo to other
734 '''Show bookmarks outgoing from repo to other
739 '''
735 '''
740 ui.status(_("searching for changed bookmarks\n"))
736 ui.status(_("searching for changed bookmarks\n"))
741
737
742 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
738 remotemarks = unhexlifybookmarks(other.listkeys('bookmarks'))
743 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
739 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
744 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
740 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
745
741
746 outgoings = []
742 outgoings = []
747 if ui.debugflag:
743 if ui.debugflag:
748 getid = lambda id: id
744 getid = lambda id: id
749 else:
745 else:
750 getid = lambda id: id[:12]
746 getid = lambda id: id[:12]
751 if ui.verbose:
747 if ui.verbose:
752 def add(b, id, st):
748 def add(b, id, st):
753 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
749 outgoings.append(" %-25s %s %s\n" % (b, getid(id), st))
754 else:
750 else:
755 def add(b, id, st):
751 def add(b, id, st):
756 outgoings.append(" %-25s %s\n" % (b, getid(id)))
752 outgoings.append(" %-25s %s\n" % (b, getid(id)))
757 for b, scid, dcid in addsrc:
753 for b, scid, dcid in addsrc:
758 # i18n: "added refers to a bookmark
754 # i18n: "added refers to a bookmark
759 add(b, hex(scid), _('added'))
755 add(b, hex(scid), _('added'))
760 for b, scid, dcid in adddst:
756 for b, scid, dcid in adddst:
761 # i18n: "deleted" refers to a bookmark
757 # i18n: "deleted" refers to a bookmark
762 add(b, ' ' * 40, _('deleted'))
758 add(b, ' ' * 40, _('deleted'))
763 for b, scid, dcid in advsrc:
759 for b, scid, dcid in advsrc:
764 # i18n: "advanced" refers to a bookmark
760 # i18n: "advanced" refers to a bookmark
765 add(b, hex(scid), _('advanced'))
761 add(b, hex(scid), _('advanced'))
766 for b, scid, dcid in diverge:
762 for b, scid, dcid in diverge:
767 # i18n: "diverged" refers to a bookmark
763 # i18n: "diverged" refers to a bookmark
768 add(b, hex(scid), _('diverged'))
764 add(b, hex(scid), _('diverged'))
769 for b, scid, dcid in differ:
765 for b, scid, dcid in differ:
770 # i18n: "changed" refers to a bookmark
766 # i18n: "changed" refers to a bookmark
771 add(b, hex(scid), _('changed'))
767 add(b, hex(scid), _('changed'))
772
768
773 if not outgoings:
769 if not outgoings:
774 ui.status(_("no changed bookmarks found\n"))
770 ui.status(_("no changed bookmarks found\n"))
775 return 1
771 return 1
776
772
777 for s in sorted(outgoings):
773 for s in sorted(outgoings):
778 ui.write(s)
774 ui.write(s)
779
775
780 return 0
776 return 0
781
777
782 def summary(repo, peer):
778 def summary(repo, peer):
783 '''Compare bookmarks between repo and other for "hg summary" output
779 '''Compare bookmarks between repo and other for "hg summary" output
784
780
785 This returns "(# of incoming, # of outgoing)" tuple.
781 This returns "(# of incoming, # of outgoing)" tuple.
786 '''
782 '''
787 with peer.commandexecutor() as e:
783 with peer.commandexecutor() as e:
788 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
784 remotemarks = unhexlifybookmarks(e.callcommand('listkeys', {
789 'namespace': 'bookmarks',
785 'namespace': 'bookmarks',
790 }).result())
786 }).result())
791
787
792 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
788 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
793 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
789 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
794 return (len(addsrc), len(adddst))
790 return (len(addsrc), len(adddst))
795
791
796 def validdest(repo, old, new):
792 def validdest(repo, old, new):
797 """Is the new bookmark destination a valid update from the old one"""
793 """Is the new bookmark destination a valid update from the old one"""
798 repo = repo.unfiltered()
794 repo = repo.unfiltered()
799 if old == new:
795 if old == new:
800 # Old == new -> nothing to update.
796 # Old == new -> nothing to update.
801 return False
797 return False
802 elif not old:
798 elif not old:
803 # old is nullrev, anything is valid.
799 # old is nullrev, anything is valid.
804 # (new != nullrev has been excluded by the previous check)
800 # (new != nullrev has been excluded by the previous check)
805 return True
801 return True
806 elif repo.obsstore:
802 elif repo.obsstore:
807 return new.node() in obsutil.foreground(repo, [old.node()])
803 return new.node() in obsutil.foreground(repo, [old.node()])
808 else:
804 else:
809 # still an independent clause as it is lazier (and therefore faster)
805 # still an independent clause as it is lazier (and therefore faster)
810 return old.isancestorof(new)
806 return old.isancestorof(new)
811
807
812 def checkformat(repo, mark):
808 def checkformat(repo, mark):
813 """return a valid version of a potential bookmark name
809 """return a valid version of a potential bookmark name
814
810
815 Raises an abort error if the bookmark name is not valid.
811 Raises an abort error if the bookmark name is not valid.
816 """
812 """
817 mark = mark.strip()
813 mark = mark.strip()
818 if not mark:
814 if not mark:
819 raise error.Abort(_("bookmark names cannot consist entirely of "
815 raise error.Abort(_("bookmark names cannot consist entirely of "
820 "whitespace"))
816 "whitespace"))
821 scmutil.checknewlabel(repo, mark, 'bookmark')
817 scmutil.checknewlabel(repo, mark, 'bookmark')
822 return mark
818 return mark
823
819
824 def delete(repo, tr, names):
820 def delete(repo, tr, names):
825 """remove a mark from the bookmark store
821 """remove a mark from the bookmark store
826
822
827 Raises an abort error if mark does not exist.
823 Raises an abort error if mark does not exist.
828 """
824 """
829 marks = repo._bookmarks
825 marks = repo._bookmarks
830 changes = []
826 changes = []
831 for mark in names:
827 for mark in names:
832 if mark not in marks:
828 if mark not in marks:
833 raise error.Abort(_("bookmark '%s' does not exist") % mark)
829 raise error.Abort(_("bookmark '%s' does not exist") % mark)
834 if mark == repo._activebookmark:
830 if mark == repo._activebookmark:
835 deactivate(repo)
831 deactivate(repo)
836 changes.append((mark, None))
832 changes.append((mark, None))
837 marks.applychanges(repo, tr, changes)
833 marks.applychanges(repo, tr, changes)
838
834
839 def rename(repo, tr, old, new, force=False, inactive=False):
835 def rename(repo, tr, old, new, force=False, inactive=False):
840 """rename a bookmark from old to new
836 """rename a bookmark from old to new
841
837
842 If force is specified, then the new name can overwrite an existing
838 If force is specified, then the new name can overwrite an existing
843 bookmark.
839 bookmark.
844
840
845 If inactive is specified, then do not activate the new bookmark.
841 If inactive is specified, then do not activate the new bookmark.
846
842
847 Raises an abort error if old is not in the bookmark store.
843 Raises an abort error if old is not in the bookmark store.
848 """
844 """
849 marks = repo._bookmarks
845 marks = repo._bookmarks
850 mark = checkformat(repo, new)
846 mark = checkformat(repo, new)
851 if old not in marks:
847 if old not in marks:
852 raise error.Abort(_("bookmark '%s' does not exist") % old)
848 raise error.Abort(_("bookmark '%s' does not exist") % old)
853 changes = []
849 changes = []
854 for bm in marks.checkconflict(mark, force):
850 for bm in marks.checkconflict(mark, force):
855 changes.append((bm, None))
851 changes.append((bm, None))
856 changes.extend([(mark, marks[old]), (old, None)])
852 changes.extend([(mark, marks[old]), (old, None)])
857 marks.applychanges(repo, tr, changes)
853 marks.applychanges(repo, tr, changes)
858 if repo._activebookmark == old and not inactive:
854 if repo._activebookmark == old and not inactive:
859 activate(repo, mark)
855 activate(repo, mark)
860
856
861 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
857 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
862 """add a list of bookmarks
858 """add a list of bookmarks
863
859
864 If force is specified, then the new name can overwrite an existing
860 If force is specified, then the new name can overwrite an existing
865 bookmark.
861 bookmark.
866
862
867 If inactive is specified, then do not activate any bookmark. Otherwise, the
863 If inactive is specified, then do not activate any bookmark. Otherwise, the
868 first bookmark is activated.
864 first bookmark is activated.
869
865
870 Raises an abort error if old is not in the bookmark store.
866 Raises an abort error if old is not in the bookmark store.
871 """
867 """
872 marks = repo._bookmarks
868 marks = repo._bookmarks
873 cur = repo['.'].node()
869 cur = repo['.'].node()
874 newact = None
870 newact = None
875 changes = []
871 changes = []
876 hiddenrev = None
872 hiddenrev = None
877
873
878 # unhide revs if any
874 # unhide revs if any
879 if rev:
875 if rev:
880 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
876 repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
881
877
882 for mark in names:
878 for mark in names:
883 mark = checkformat(repo, mark)
879 mark = checkformat(repo, mark)
884 if newact is None:
880 if newact is None:
885 newact = mark
881 newact = mark
886 if inactive and mark == repo._activebookmark:
882 if inactive and mark == repo._activebookmark:
887 deactivate(repo)
883 deactivate(repo)
888 return
884 return
889 tgt = cur
885 tgt = cur
890 if rev:
886 if rev:
891 ctx = scmutil.revsingle(repo, rev)
887 ctx = scmutil.revsingle(repo, rev)
892 if ctx.hidden():
888 if ctx.hidden():
893 hiddenrev = ctx.hex()[:12]
889 hiddenrev = ctx.hex()[:12]
894 tgt = ctx.node()
890 tgt = ctx.node()
895 for bm in marks.checkconflict(mark, force, tgt):
891 for bm in marks.checkconflict(mark, force, tgt):
896 changes.append((bm, None))
892 changes.append((bm, None))
897 changes.append((mark, tgt))
893 changes.append((mark, tgt))
898
894
899 if hiddenrev:
895 if hiddenrev:
900 repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev)
896 repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev)
901
897
902 if ctx.obsolete():
898 if ctx.obsolete():
903 msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx)
899 msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx)
904 repo.ui.warn("(%s)\n" % msg)
900 repo.ui.warn("(%s)\n" % msg)
905
901
906 marks.applychanges(repo, tr, changes)
902 marks.applychanges(repo, tr, changes)
907 if not inactive and cur == marks[newact] and not rev:
903 if not inactive and cur == marks[newact] and not rev:
908 activate(repo, newact)
904 activate(repo, newact)
909 elif cur != tgt and newact == repo._activebookmark:
905 elif cur != tgt and newact == repo._activebookmark:
910 deactivate(repo)
906 deactivate(repo)
911
907
912 def _printbookmarks(ui, repo, fm, bmarks):
908 def _printbookmarks(ui, repo, fm, bmarks):
913 """private method to print bookmarks
909 """private method to print bookmarks
914
910
915 Provides a way for extensions to control how bookmarks are printed (e.g.
911 Provides a way for extensions to control how bookmarks are printed (e.g.
916 prepend or postpend names)
912 prepend or postpend names)
917 """
913 """
918 hexfn = fm.hexfunc
914 hexfn = fm.hexfunc
919 if len(bmarks) == 0 and fm.isplain():
915 if len(bmarks) == 0 and fm.isplain():
920 ui.status(_("no bookmarks set\n"))
916 ui.status(_("no bookmarks set\n"))
921 for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
917 for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
922 fm.startitem()
918 fm.startitem()
923 fm.context(repo=repo)
919 fm.context(repo=repo)
924 if not ui.quiet:
920 if not ui.quiet:
925 fm.plain(' %s ' % prefix, label=label)
921 fm.plain(' %s ' % prefix, label=label)
926 fm.write('bookmark', '%s', bmark, label=label)
922 fm.write('bookmark', '%s', bmark, label=label)
927 pad = " " * (25 - encoding.colwidth(bmark))
923 pad = " " * (25 - encoding.colwidth(bmark))
928 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
924 fm.condwrite(not ui.quiet, 'rev node', pad + ' %d:%s',
929 repo.changelog.rev(n), hexfn(n), label=label)
925 repo.changelog.rev(n), hexfn(n), label=label)
930 fm.data(active=(activebookmarklabel in label))
926 fm.data(active=(activebookmarklabel in label))
931 fm.plain('\n')
927 fm.plain('\n')
932
928
933 def printbookmarks(ui, repo, fm, names=None):
929 def printbookmarks(ui, repo, fm, names=None):
934 """print bookmarks by the given formatter
930 """print bookmarks by the given formatter
935
931
936 Provides a way for extensions to control how bookmarks are printed.
932 Provides a way for extensions to control how bookmarks are printed.
937 """
933 """
938 marks = repo._bookmarks
934 marks = repo._bookmarks
939 bmarks = {}
935 bmarks = {}
940 for bmark in (names or marks):
936 for bmark in (names or marks):
941 if bmark not in marks:
937 if bmark not in marks:
942 raise error.Abort(_("bookmark '%s' does not exist") % bmark)
938 raise error.Abort(_("bookmark '%s' does not exist") % bmark)
943 active = repo._activebookmark
939 active = repo._activebookmark
944 if bmark == active:
940 if bmark == active:
945 prefix, label = '*', activebookmarklabel
941 prefix, label = '*', activebookmarklabel
946 else:
942 else:
947 prefix, label = ' ', ''
943 prefix, label = ' ', ''
948
944
949 bmarks[bmark] = (marks[bmark], prefix, label)
945 bmarks[bmark] = (marks[bmark], prefix, label)
950 _printbookmarks(ui, repo, fm, bmarks)
946 _printbookmarks(ui, repo, fm, bmarks)
951
947
952 def preparehookargs(name, old, new):
948 def preparehookargs(name, old, new):
953 if new is None:
949 if new is None:
954 new = ''
950 new = ''
955 if old is None:
951 if old is None:
956 old = ''
952 old = ''
957 return {'bookmark': name,
953 return {'bookmark': name,
958 'node': hex(new),
954 'node': hex(new),
959 'oldnode': hex(old)}
955 'oldnode': hex(old)}
@@ -1,538 +1,538 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 scmutil,
24 scmutil,
25 setdiscovery,
25 setdiscovery,
26 treediscovery,
26 treediscovery,
27 util,
27 util,
28 )
28 )
29
29
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
30 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
31 """Return a tuple (common, anyincoming, heads) used to identify the common
31 """Return a tuple (common, anyincoming, heads) used to identify the common
32 subset of nodes between repo and remote.
32 subset of nodes between repo and remote.
33
33
34 "common" is a list of (at least) the heads of the common subset.
34 "common" is a list of (at least) the heads of the common subset.
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
35 "anyincoming" is testable as a boolean indicating if any nodes are missing
36 locally. If remote does not support getbundle, this actually is a list of
36 locally. If remote does not support getbundle, this actually is a list of
37 roots of the nodes that would be incoming, to be supplied to
37 roots of the nodes that would be incoming, to be supplied to
38 changegroupsubset. No code except for pull should be relying on this fact
38 changegroupsubset. No code except for pull should be relying on this fact
39 any longer.
39 any longer.
40 "heads" is either the supplied heads, or else the remote's heads.
40 "heads" is either the supplied heads, or else the remote's heads.
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
41 "ancestorsof" if not None, restrict the discovery to a subset defined by
42 these nodes. Changeset outside of this set won't be considered (and
42 these nodes. Changeset outside of this set won't be considered (and
43 won't appears in "common")
43 won't appears in "common")
44
44
45 If you pass heads and they are all known locally, the response lists just
45 If you pass heads and they are all known locally, the response lists just
46 these heads in "common" and in "heads".
46 these heads in "common" and in "heads".
47
47
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
48 Please use findcommonoutgoing to compute the set of outgoing nodes to give
49 extensions a good hook into outgoing.
49 extensions a good hook into outgoing.
50 """
50 """
51
51
52 if not remote.capable('getbundle'):
52 if not remote.capable('getbundle'):
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
53 return treediscovery.findcommonincoming(repo, remote, heads, force)
54
54
55 if heads:
55 if heads:
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
56 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
57 if all(knownnode(h) for h in heads):
57 if all(knownnode(h) for h in heads):
58 return (heads, False, heads)
58 return (heads, False, heads)
59
59
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
60 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
61 abortwhenunrelated=not force,
61 abortwhenunrelated=not force,
62 ancestorsof=ancestorsof)
62 ancestorsof=ancestorsof)
63 common, anyinc, srvheads = res
63 common, anyinc, srvheads = res
64 return (list(common), anyinc, heads or list(srvheads))
64 return (list(common), anyinc, heads or list(srvheads))
65
65
66 class outgoing(object):
66 class outgoing(object):
67 '''Represents the set of nodes present in a local repo but not in a
67 '''Represents the set of nodes present in a local repo but not in a
68 (possibly) remote one.
68 (possibly) remote one.
69
69
70 Members:
70 Members:
71
71
72 missing is a list of all nodes present in local but not in remote.
72 missing is a list of all nodes present in local but not in remote.
73 common is a list of all nodes shared between the two repos.
73 common is a list of all nodes shared between the two repos.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
74 excluded is the list of missing changeset that shouldn't be sent remotely.
75 missingheads is the list of heads of missing.
75 missingheads is the list of heads of missing.
76 commonheads is the list of heads of common.
76 commonheads is the list of heads of common.
77
77
78 The sets are computed on demand from the heads, unless provided upfront
78 The sets are computed on demand from the heads, unless provided upfront
79 by discovery.'''
79 by discovery.'''
80
80
81 def __init__(self, repo, commonheads=None, missingheads=None,
81 def __init__(self, repo, commonheads=None, missingheads=None,
82 missingroots=None):
82 missingroots=None):
83 # at least one of them must not be set
83 # at least one of them must not be set
84 assert None in (commonheads, missingroots)
84 assert None in (commonheads, missingroots)
85 cl = repo.changelog
85 cl = repo.changelog
86 if missingheads is None:
86 if missingheads is None:
87 missingheads = cl.heads()
87 missingheads = cl.heads()
88 if missingroots:
88 if missingroots:
89 discbases = []
89 discbases = []
90 for n in missingroots:
90 for n in missingroots:
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
91 discbases.extend([p for p in cl.parents(n) if p != nullid])
92 # TODO remove call to nodesbetween.
92 # TODO remove call to nodesbetween.
93 # TODO populate attributes on outgoing instance instead of setting
93 # TODO populate attributes on outgoing instance instead of setting
94 # discbases.
94 # discbases.
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
95 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
96 included = set(csets)
96 included = set(csets)
97 missingheads = heads
97 missingheads = heads
98 commonheads = [n for n in discbases if n not in included]
98 commonheads = [n for n in discbases if n not in included]
99 elif not commonheads:
99 elif not commonheads:
100 commonheads = [nullid]
100 commonheads = [nullid]
101 self.commonheads = commonheads
101 self.commonheads = commonheads
102 self.missingheads = missingheads
102 self.missingheads = missingheads
103 self._revlog = cl
103 self._revlog = cl
104 self._common = None
104 self._common = None
105 self._missing = None
105 self._missing = None
106 self.excluded = []
106 self.excluded = []
107
107
108 def _computecommonmissing(self):
108 def _computecommonmissing(self):
109 sets = self._revlog.findcommonmissing(self.commonheads,
109 sets = self._revlog.findcommonmissing(self.commonheads,
110 self.missingheads)
110 self.missingheads)
111 self._common, self._missing = sets
111 self._common, self._missing = sets
112
112
113 @util.propertycache
113 @util.propertycache
114 def common(self):
114 def common(self):
115 if self._common is None:
115 if self._common is None:
116 self._computecommonmissing()
116 self._computecommonmissing()
117 return self._common
117 return self._common
118
118
119 @util.propertycache
119 @util.propertycache
120 def missing(self):
120 def missing(self):
121 if self._missing is None:
121 if self._missing is None:
122 self._computecommonmissing()
122 self._computecommonmissing()
123 return self._missing
123 return self._missing
124
124
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
125 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
126 commoninc=None, portable=False):
126 commoninc=None, portable=False):
127 '''Return an outgoing instance to identify the nodes present in repo but
127 '''Return an outgoing instance to identify the nodes present in repo but
128 not in other.
128 not in other.
129
129
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
130 If onlyheads is given, only nodes ancestral to nodes in onlyheads
131 (inclusive) are included. If you already know the local repo's heads,
131 (inclusive) are included. If you already know the local repo's heads,
132 passing them in onlyheads is faster than letting them be recomputed here.
132 passing them in onlyheads is faster than letting them be recomputed here.
133
133
134 If commoninc is given, it must be the result of a prior call to
134 If commoninc is given, it must be the result of a prior call to
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
135 findcommonincoming(repo, other, force) to avoid recomputing it here.
136
136
137 If portable is given, compute more conservative common and missingheads,
137 If portable is given, compute more conservative common and missingheads,
138 to make bundles created from the instance more portable.'''
138 to make bundles created from the instance more portable.'''
139 # declare an empty outgoing object to be filled later
139 # declare an empty outgoing object to be filled later
140 og = outgoing(repo, None, None)
140 og = outgoing(repo, None, None)
141
141
142 # get common set if not provided
142 # get common set if not provided
143 if commoninc is None:
143 if commoninc is None:
144 commoninc = findcommonincoming(repo, other, force=force,
144 commoninc = findcommonincoming(repo, other, force=force,
145 ancestorsof=onlyheads)
145 ancestorsof=onlyheads)
146 og.commonheads, _any, _hds = commoninc
146 og.commonheads, _any, _hds = commoninc
147
147
148 # compute outgoing
148 # compute outgoing
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
149 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
150 if not mayexclude:
150 if not mayexclude:
151 og.missingheads = onlyheads or repo.heads()
151 og.missingheads = onlyheads or repo.heads()
152 elif onlyheads is None:
152 elif onlyheads is None:
153 # use visible heads as it should be cached
153 # use visible heads as it should be cached
154 og.missingheads = repo.filtered("served").heads()
154 og.missingheads = repo.filtered("served").heads()
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
155 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
156 else:
156 else:
157 # compute common, missing and exclude secret stuff
157 # compute common, missing and exclude secret stuff
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
158 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
159 og._common, allmissing = sets
159 og._common, allmissing = sets
160 og._missing = missing = []
160 og._missing = missing = []
161 og.excluded = excluded = []
161 og.excluded = excluded = []
162 for node in allmissing:
162 for node in allmissing:
163 ctx = repo[node]
163 ctx = repo[node]
164 if ctx.phase() >= phases.secret or ctx.extinct():
164 if ctx.phase() >= phases.secret or ctx.extinct():
165 excluded.append(node)
165 excluded.append(node)
166 else:
166 else:
167 missing.append(node)
167 missing.append(node)
168 if len(missing) == len(allmissing):
168 if len(missing) == len(allmissing):
169 missingheads = onlyheads
169 missingheads = onlyheads
170 else: # update missing heads
170 else: # update missing heads
171 missingheads = phases.newheads(repo, onlyheads, excluded)
171 missingheads = phases.newheads(repo, onlyheads, excluded)
172 og.missingheads = missingheads
172 og.missingheads = missingheads
173 if portable:
173 if portable:
174 # recompute common and missingheads as if -r<rev> had been given for
174 # recompute common and missingheads as if -r<rev> had been given for
175 # each head of missing, and --base <rev> for each head of the proper
175 # each head of missing, and --base <rev> for each head of the proper
176 # ancestors of missing
176 # ancestors of missing
177 og._computecommonmissing()
177 og._computecommonmissing()
178 cl = repo.changelog
178 cl = repo.changelog
179 missingrevs = set(cl.rev(n) for n in og._missing)
179 missingrevs = set(cl.rev(n) for n in og._missing)
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
180 og._common = set(cl.ancestors(missingrevs)) - missingrevs
181 commonheads = set(og.commonheads)
181 commonheads = set(og.commonheads)
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
182 og.missingheads = [h for h in og.missingheads if h not in commonheads]
183
183
184 return og
184 return og
185
185
186 def _headssummary(pushop):
186 def _headssummary(pushop):
187 """compute a summary of branch and heads status before and after push
187 """compute a summary of branch and heads status before and after push
188
188
189 return {'branch': ([remoteheads], [newheads],
189 return {'branch': ([remoteheads], [newheads],
190 [unsyncedheads], [discardedheads])} mapping
190 [unsyncedheads], [discardedheads])} mapping
191
191
192 - branch: the branch name,
192 - branch: the branch name,
193 - remoteheads: the list of remote heads known locally
193 - remoteheads: the list of remote heads known locally
194 None if the branch is new,
194 None if the branch is new,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
195 - newheads: the new remote heads (known locally) with outgoing pushed,
196 - unsyncedheads: the list of remote heads unknown locally,
196 - unsyncedheads: the list of remote heads unknown locally,
197 - discardedheads: the list of heads made obsolete by the push.
197 - discardedheads: the list of heads made obsolete by the push.
198 """
198 """
199 repo = pushop.repo.unfiltered()
199 repo = pushop.repo.unfiltered()
200 remote = pushop.remote
200 remote = pushop.remote
201 outgoing = pushop.outgoing
201 outgoing = pushop.outgoing
202 cl = repo.changelog
202 cl = repo.changelog
203 headssum = {}
203 headssum = {}
204 missingctx = set()
204 missingctx = set()
205 # A. Create set of branches involved in the push.
205 # A. Create set of branches involved in the push.
206 branches = set()
206 branches = set()
207 for n in outgoing.missing:
207 for n in outgoing.missing:
208 ctx = repo[n]
208 ctx = repo[n]
209 missingctx.add(ctx)
209 missingctx.add(ctx)
210 branches.add(ctx.branch())
210 branches.add(ctx.branch())
211
211
212 with remote.commandexecutor() as e:
212 with remote.commandexecutor() as e:
213 remotemap = e.callcommand('branchmap', {}).result()
213 remotemap = e.callcommand('branchmap', {}).result()
214
214
215 knownnode = cl.hasnode # do not use nodemap until it is filtered
215 knownnode = cl.hasnode # do not use nodemap until it is filtered
216 # A. register remote heads of branches which are in outgoing set
216 # A. register remote heads of branches which are in outgoing set
217 for branch, heads in remotemap.iteritems():
217 for branch, heads in remotemap.iteritems():
218 # don't add head info about branches which we don't have locally
218 # don't add head info about branches which we don't have locally
219 if branch not in branches:
219 if branch not in branches:
220 continue
220 continue
221 known = []
221 known = []
222 unsynced = []
222 unsynced = []
223 for h in heads:
223 for h in heads:
224 if knownnode(h):
224 if knownnode(h):
225 known.append(h)
225 known.append(h)
226 else:
226 else:
227 unsynced.append(h)
227 unsynced.append(h)
228 headssum[branch] = (known, list(known), unsynced)
228 headssum[branch] = (known, list(known), unsynced)
229
229
230 # B. add new branch data
230 # B. add new branch data
231 for branch in branches:
231 for branch in branches:
232 if branch not in headssum:
232 if branch not in headssum:
233 headssum[branch] = (None, [], [])
233 headssum[branch] = (None, [], [])
234
234
235 # C. Update newmap with outgoing changes.
235 # C. Update newmap with outgoing changes.
236 # This will possibly add new heads and remove existing ones.
236 # This will possibly add new heads and remove existing ones.
237 newmap = branchmap.remotebranchcache((branch, heads[1])
237 newmap = branchmap.remotebranchcache((branch, heads[1])
238 for branch, heads in headssum.iteritems()
238 for branch, heads in headssum.iteritems()
239 if heads[0] is not None)
239 if heads[0] is not None)
240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
240 newmap.update(repo, (ctx.rev() for ctx in missingctx))
241 for branch, newheads in newmap.iteritems():
241 for branch, newheads in newmap.iteritems():
242 headssum[branch][1][:] = newheads
242 headssum[branch][1][:] = newheads
243 for branch, items in headssum.iteritems():
243 for branch, items in headssum.iteritems():
244 for l in items:
244 for l in items:
245 if l is not None:
245 if l is not None:
246 l.sort()
246 l.sort()
247 headssum[branch] = items + ([],)
247 headssum[branch] = items + ([],)
248
248
249 # If there are no obsstore, no post processing are needed.
249 # If there are no obsstore, no post processing are needed.
250 if repo.obsstore:
250 if repo.obsstore:
251 torev = repo.changelog.rev
251 torev = repo.changelog.rev
252 futureheads = set(torev(h) for h in outgoing.missingheads)
252 futureheads = set(torev(h) for h in outgoing.missingheads)
253 futureheads |= set(torev(h) for h in outgoing.commonheads)
253 futureheads |= set(torev(h) for h in outgoing.commonheads)
254 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
254 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
255 for branch, heads in sorted(headssum.iteritems()):
255 for branch, heads in sorted(headssum.iteritems()):
256 remoteheads, newheads, unsyncedheads, placeholder = heads
256 remoteheads, newheads, unsyncedheads, placeholder = heads
257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
257 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
258 headssum[branch] = (remoteheads, sorted(result[0]), unsyncedheads,
259 sorted(result[1]))
259 sorted(result[1]))
260 return headssum
260 return headssum
261
261
262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
262 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
263 """Compute branchmapsummary for repo without branchmap support"""
263 """Compute branchmapsummary for repo without branchmap support"""
264
264
265 # 1-4b. old servers: Check for new topological heads.
265 # 1-4b. old servers: Check for new topological heads.
266 # Construct {old,new}map with branch = None (topological branch).
266 # Construct {old,new}map with branch = None (topological branch).
267 # (code based on update)
267 # (code based on update)
268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
268 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
269 oldheads = sorted(h for h in remoteheads if knownnode(h))
269 oldheads = sorted(h for h in remoteheads if knownnode(h))
270 # all nodes in outgoing.missing are children of either:
270 # all nodes in outgoing.missing are children of either:
271 # - an element of oldheads
271 # - an element of oldheads
272 # - another element of outgoing.missing
272 # - another element of outgoing.missing
273 # - nullrev
273 # - nullrev
274 # This explains why the new head are very simple to compute.
274 # This explains why the new head are very simple to compute.
275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
275 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
276 newheads = sorted(c.node() for c in r)
276 newheads = sorted(c.node() for c in r)
277 # set some unsynced head to issue the "unsynced changes" warning
277 # set some unsynced head to issue the "unsynced changes" warning
278 if inc:
278 if inc:
279 unsynced = [None]
279 unsynced = [None]
280 else:
280 else:
281 unsynced = []
281 unsynced = []
282 return {None: (oldheads, newheads, unsynced, [])}
282 return {None: (oldheads, newheads, unsynced, [])}
283
283
284 def _nowarnheads(pushop):
284 def _nowarnheads(pushop):
285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
285 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
286 repo = pushop.repo.unfiltered()
286 repo = pushop.repo.unfiltered()
287 remote = pushop.remote
287 remote = pushop.remote
288 localbookmarks = repo._bookmarks
288 localbookmarks = repo._bookmarks
289
289
290 with remote.commandexecutor() as e:
290 with remote.commandexecutor() as e:
291 remotebookmarks = e.callcommand('listkeys', {
291 remotebookmarks = e.callcommand('listkeys', {
292 'namespace': 'bookmarks',
292 'namespace': 'bookmarks',
293 }).result()
293 }).result()
294
294
295 bookmarkedheads = set()
295 bookmarkedheads = set()
296
296
297 # internal config: bookmarks.pushing
297 # internal config: bookmarks.pushing
298 newbookmarks = [localbookmarks.expandname(b)
298 newbookmarks = [localbookmarks.expandname(b)
299 for b in pushop.ui.configlist('bookmarks', 'pushing')]
299 for b in pushop.ui.configlist('bookmarks', 'pushing')]
300
300
301 for bm in localbookmarks:
301 for bm in localbookmarks:
302 rnode = remotebookmarks.get(bm)
302 rnode = remotebookmarks.get(bm)
303 if rnode and rnode in repo:
303 if rnode and rnode in repo:
304 lctx, rctx = localbookmarks.changectx(bm), repo[rnode]
304 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
305 if bookmarks.validdest(repo, rctx, lctx):
305 if bookmarks.validdest(repo, rctx, lctx):
306 bookmarkedheads.add(lctx.node())
306 bookmarkedheads.add(lctx.node())
307 else:
307 else:
308 if bm in newbookmarks and bm not in remotebookmarks:
308 if bm in newbookmarks and bm not in remotebookmarks:
309 bookmarkedheads.add(localbookmarks[bm])
309 bookmarkedheads.add(localbookmarks[bm])
310
310
311 return bookmarkedheads
311 return bookmarkedheads
312
312
313 def checkheads(pushop):
313 def checkheads(pushop):
314 """Check that a push won't add any outgoing head
314 """Check that a push won't add any outgoing head
315
315
316 raise Abort error and display ui message as needed.
316 raise Abort error and display ui message as needed.
317 """
317 """
318
318
319 repo = pushop.repo.unfiltered()
319 repo = pushop.repo.unfiltered()
320 remote = pushop.remote
320 remote = pushop.remote
321 outgoing = pushop.outgoing
321 outgoing = pushop.outgoing
322 remoteheads = pushop.remoteheads
322 remoteheads = pushop.remoteheads
323 newbranch = pushop.newbranch
323 newbranch = pushop.newbranch
324 inc = bool(pushop.incoming)
324 inc = bool(pushop.incoming)
325
325
326 # Check for each named branch if we're creating new remote heads.
326 # Check for each named branch if we're creating new remote heads.
327 # To be a remote head after push, node must be either:
327 # To be a remote head after push, node must be either:
328 # - unknown locally
328 # - unknown locally
329 # - a local outgoing head descended from update
329 # - a local outgoing head descended from update
330 # - a remote head that's known locally and not
330 # - a remote head that's known locally and not
331 # ancestral to an outgoing head
331 # ancestral to an outgoing head
332 if remoteheads == [nullid]:
332 if remoteheads == [nullid]:
333 # remote is empty, nothing to check.
333 # remote is empty, nothing to check.
334 return
334 return
335
335
336 if remote.capable('branchmap'):
336 if remote.capable('branchmap'):
337 headssum = _headssummary(pushop)
337 headssum = _headssummary(pushop)
338 else:
338 else:
339 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
339 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
340 pushop.pushbranchmap = headssum
340 pushop.pushbranchmap = headssum
341 newbranches = [branch for branch, heads in headssum.iteritems()
341 newbranches = [branch for branch, heads in headssum.iteritems()
342 if heads[0] is None]
342 if heads[0] is None]
343 # 1. Check for new branches on the remote.
343 # 1. Check for new branches on the remote.
344 if newbranches and not newbranch: # new branch requires --new-branch
344 if newbranches and not newbranch: # new branch requires --new-branch
345 branchnames = ', '.join(sorted(newbranches))
345 branchnames = ', '.join(sorted(newbranches))
346 # Calculate how many of the new branches are closed branches
346 # Calculate how many of the new branches are closed branches
347 closedbranches = set()
347 closedbranches = set()
348 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
348 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
349 if isclosed:
349 if isclosed:
350 closedbranches.add(tag)
350 closedbranches.add(tag)
351 closedbranches = (closedbranches & set(newbranches))
351 closedbranches = (closedbranches & set(newbranches))
352 if closedbranches:
352 if closedbranches:
353 errmsg = (_("push creates new remote branches: %s (%d closed)!")
353 errmsg = (_("push creates new remote branches: %s (%d closed)!")
354 % (branchnames, len(closedbranches)))
354 % (branchnames, len(closedbranches)))
355 else:
355 else:
356 errmsg = (_("push creates new remote branches: %s!")% branchnames)
356 errmsg = (_("push creates new remote branches: %s!")% branchnames)
357 hint=_("use 'hg push --new-branch' to create new remote branches")
357 hint=_("use 'hg push --new-branch' to create new remote branches")
358 raise error.Abort(errmsg, hint=hint)
358 raise error.Abort(errmsg, hint=hint)
359
359
360 # 2. Find heads that we need not warn about
360 # 2. Find heads that we need not warn about
361 nowarnheads = _nowarnheads(pushop)
361 nowarnheads = _nowarnheads(pushop)
362
362
363 # 3. Check for new heads.
363 # 3. Check for new heads.
364 # If there are more heads after the push than before, a suitable
364 # If there are more heads after the push than before, a suitable
365 # error message, depending on unsynced status, is displayed.
365 # error message, depending on unsynced status, is displayed.
366 errormsg = None
366 errormsg = None
367 for branch, heads in sorted(headssum.iteritems()):
367 for branch, heads in sorted(headssum.iteritems()):
368 remoteheads, newheads, unsyncedheads, discardedheads = heads
368 remoteheads, newheads, unsyncedheads, discardedheads = heads
369 # add unsynced data
369 # add unsynced data
370 if remoteheads is None:
370 if remoteheads is None:
371 oldhs = set()
371 oldhs = set()
372 else:
372 else:
373 oldhs = set(remoteheads)
373 oldhs = set(remoteheads)
374 oldhs.update(unsyncedheads)
374 oldhs.update(unsyncedheads)
375 dhs = None # delta heads, the new heads on branch
375 dhs = None # delta heads, the new heads on branch
376 newhs = set(newheads)
376 newhs = set(newheads)
377 newhs.update(unsyncedheads)
377 newhs.update(unsyncedheads)
378 if unsyncedheads:
378 if unsyncedheads:
379 if None in unsyncedheads:
379 if None in unsyncedheads:
380 # old remote, no heads data
380 # old remote, no heads data
381 heads = None
381 heads = None
382 else:
382 else:
383 heads = scmutil.nodesummaries(repo, unsyncedheads)
383 heads = scmutil.nodesummaries(repo, unsyncedheads)
384 if heads is None:
384 if heads is None:
385 repo.ui.status(_("remote has heads that are "
385 repo.ui.status(_("remote has heads that are "
386 "not known locally\n"))
386 "not known locally\n"))
387 elif branch is None:
387 elif branch is None:
388 repo.ui.status(_("remote has heads that are "
388 repo.ui.status(_("remote has heads that are "
389 "not known locally: %s\n") % heads)
389 "not known locally: %s\n") % heads)
390 else:
390 else:
391 repo.ui.status(_("remote has heads on branch '%s' that are "
391 repo.ui.status(_("remote has heads on branch '%s' that are "
392 "not known locally: %s\n") % (branch, heads))
392 "not known locally: %s\n") % (branch, heads))
393 if remoteheads is None:
393 if remoteheads is None:
394 if len(newhs) > 1:
394 if len(newhs) > 1:
395 dhs = list(newhs)
395 dhs = list(newhs)
396 if errormsg is None:
396 if errormsg is None:
397 errormsg = (_("push creates new branch '%s' "
397 errormsg = (_("push creates new branch '%s' "
398 "with multiple heads") % (branch))
398 "with multiple heads") % (branch))
399 hint = _("merge or"
399 hint = _("merge or"
400 " see 'hg help push' for details about"
400 " see 'hg help push' for details about"
401 " pushing new heads")
401 " pushing new heads")
402 elif len(newhs) > len(oldhs):
402 elif len(newhs) > len(oldhs):
403 # remove bookmarked or existing remote heads from the new heads list
403 # remove bookmarked or existing remote heads from the new heads list
404 dhs = sorted(newhs - nowarnheads - oldhs)
404 dhs = sorted(newhs - nowarnheads - oldhs)
405 if dhs:
405 if dhs:
406 if errormsg is None:
406 if errormsg is None:
407 if branch not in ('default', None):
407 if branch not in ('default', None):
408 errormsg = _("push creates new remote head %s "
408 errormsg = _("push creates new remote head %s "
409 "on branch '%s'!") % (short(dhs[0]), branch)
409 "on branch '%s'!") % (short(dhs[0]), branch)
410 elif repo[dhs[0]].bookmarks():
410 elif repo[dhs[0]].bookmarks():
411 errormsg = _("push creates new remote head %s "
411 errormsg = _("push creates new remote head %s "
412 "with bookmark '%s'!") % (
412 "with bookmark '%s'!") % (
413 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
413 short(dhs[0]), repo[dhs[0]].bookmarks()[0])
414 else:
414 else:
415 errormsg = _("push creates new remote head %s!"
415 errormsg = _("push creates new remote head %s!"
416 ) % short(dhs[0])
416 ) % short(dhs[0])
417 if unsyncedheads:
417 if unsyncedheads:
418 hint = _("pull and merge or"
418 hint = _("pull and merge or"
419 " see 'hg help push' for details about"
419 " see 'hg help push' for details about"
420 " pushing new heads")
420 " pushing new heads")
421 else:
421 else:
422 hint = _("merge or"
422 hint = _("merge or"
423 " see 'hg help push' for details about"
423 " see 'hg help push' for details about"
424 " pushing new heads")
424 " pushing new heads")
425 if branch is None:
425 if branch is None:
426 repo.ui.note(_("new remote heads:\n"))
426 repo.ui.note(_("new remote heads:\n"))
427 else:
427 else:
428 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
428 repo.ui.note(_("new remote heads on branch '%s':\n") % branch)
429 for h in dhs:
429 for h in dhs:
430 repo.ui.note((" %s\n") % short(h))
430 repo.ui.note((" %s\n") % short(h))
431 if errormsg:
431 if errormsg:
432 raise error.Abort(errormsg, hint=hint)
432 raise error.Abort(errormsg, hint=hint)
433
433
434 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
434 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
435 """post process the list of new heads with obsolescence information
435 """post process the list of new heads with obsolescence information
436
436
437 Exists as a sub-function to contain the complexity and allow extensions to
437 Exists as a sub-function to contain the complexity and allow extensions to
438 experiment with smarter logic.
438 experiment with smarter logic.
439
439
440 Returns (newheads, discarded_heads) tuple
440 Returns (newheads, discarded_heads) tuple
441 """
441 """
442 # known issue
442 # known issue
443 #
443 #
444 # * We "silently" skip processing on all changeset unknown locally
444 # * We "silently" skip processing on all changeset unknown locally
445 #
445 #
446 # * if <nh> is public on the remote, it won't be affected by obsolete
446 # * if <nh> is public on the remote, it won't be affected by obsolete
447 # marker and a new is created
447 # marker and a new is created
448
448
449 # define various utilities and containers
449 # define various utilities and containers
450 repo = pushop.repo
450 repo = pushop.repo
451 unfi = repo.unfiltered()
451 unfi = repo.unfiltered()
452 tonode = unfi.changelog.node
452 tonode = unfi.changelog.node
453 torev = unfi.changelog.nodemap.get
453 torev = unfi.changelog.nodemap.get
454 public = phases.public
454 public = phases.public
455 getphase = unfi._phasecache.phase
455 getphase = unfi._phasecache.phase
456 ispublic = (lambda r: getphase(unfi, r) == public)
456 ispublic = (lambda r: getphase(unfi, r) == public)
457 ispushed = (lambda n: torev(n) in futurecommon)
457 ispushed = (lambda n: torev(n) in futurecommon)
458 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
458 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
459 successorsmarkers = unfi.obsstore.successors
459 successorsmarkers = unfi.obsstore.successors
460 newhs = set() # final set of new heads
460 newhs = set() # final set of new heads
461 discarded = set() # new head of fully replaced branch
461 discarded = set() # new head of fully replaced branch
462
462
463 localcandidate = set() # candidate heads known locally
463 localcandidate = set() # candidate heads known locally
464 unknownheads = set() # candidate heads unknown locally
464 unknownheads = set() # candidate heads unknown locally
465 for h in candidate_newhs:
465 for h in candidate_newhs:
466 if h in unfi:
466 if h in unfi:
467 localcandidate.add(h)
467 localcandidate.add(h)
468 else:
468 else:
469 if successorsmarkers.get(h) is not None:
469 if successorsmarkers.get(h) is not None:
470 msg = ('checkheads: remote head unknown locally has'
470 msg = ('checkheads: remote head unknown locally has'
471 ' local marker: %s\n')
471 ' local marker: %s\n')
472 repo.ui.debug(msg % hex(h))
472 repo.ui.debug(msg % hex(h))
473 unknownheads.add(h)
473 unknownheads.add(h)
474
474
475 # fast path the simple case
475 # fast path the simple case
476 if len(localcandidate) == 1:
476 if len(localcandidate) == 1:
477 return unknownheads | set(candidate_newhs), set()
477 return unknownheads | set(candidate_newhs), set()
478
478
479 # actually process branch replacement
479 # actually process branch replacement
480 while localcandidate:
480 while localcandidate:
481 nh = localcandidate.pop()
481 nh = localcandidate.pop()
482 # run this check early to skip the evaluation of the whole branch
482 # run this check early to skip the evaluation of the whole branch
483 if (torev(nh) in futurecommon or ispublic(torev(nh))):
483 if (torev(nh) in futurecommon or ispublic(torev(nh))):
484 newhs.add(nh)
484 newhs.add(nh)
485 continue
485 continue
486
486
487 # Get all revs/nodes on the branch exclusive to this head
487 # Get all revs/nodes on the branch exclusive to this head
488 # (already filtered heads are "ignored"))
488 # (already filtered heads are "ignored"))
489 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
489 branchrevs = unfi.revs('only(%n, (%ln+%ln))',
490 nh, localcandidate, newhs)
490 nh, localcandidate, newhs)
491 branchnodes = [tonode(r) for r in branchrevs]
491 branchnodes = [tonode(r) for r in branchrevs]
492
492
493 # The branch won't be hidden on the remote if
493 # The branch won't be hidden on the remote if
494 # * any part of it is public,
494 # * any part of it is public,
495 # * any part of it is considered part of the result by previous logic,
495 # * any part of it is considered part of the result by previous logic,
496 # * if we have no markers to push to obsolete it.
496 # * if we have no markers to push to obsolete it.
497 if (any(ispublic(r) for r in branchrevs)
497 if (any(ispublic(r) for r in branchrevs)
498 or any(torev(n) in futurecommon for n in branchnodes)
498 or any(torev(n) in futurecommon for n in branchnodes)
499 or any(not hasoutmarker(n) for n in branchnodes)):
499 or any(not hasoutmarker(n) for n in branchnodes)):
500 newhs.add(nh)
500 newhs.add(nh)
501 else:
501 else:
502 # note: there is a corner case if there is a merge in the branch.
502 # note: there is a corner case if there is a merge in the branch.
503 # we might end up with -more- heads. However, these heads are not
503 # we might end up with -more- heads. However, these heads are not
504 # "added" by the push, but more by the "removal" on the remote so I
504 # "added" by the push, but more by the "removal" on the remote so I
505 # think is a okay to ignore them,
505 # think is a okay to ignore them,
506 discarded.add(nh)
506 discarded.add(nh)
507 newhs |= unknownheads
507 newhs |= unknownheads
508 return newhs, discarded
508 return newhs, discarded
509
509
510 def pushingmarkerfor(obsstore, ispushed, node):
510 def pushingmarkerfor(obsstore, ispushed, node):
511 """true if some markers are to be pushed for node
511 """true if some markers are to be pushed for node
512
512
513 We cannot just look in to the pushed obsmarkers from the pushop because
513 We cannot just look in to the pushed obsmarkers from the pushop because
514 discovery might have filtered relevant markers. In addition listing all
514 discovery might have filtered relevant markers. In addition listing all
515 markers relevant to all changesets in the pushed set would be too expensive
515 markers relevant to all changesets in the pushed set would be too expensive
516 (O(len(repo)))
516 (O(len(repo)))
517
517
518 (note: There are cache opportunity in this function. but it would requires
518 (note: There are cache opportunity in this function. but it would requires
519 a two dimensional stack.)
519 a two dimensional stack.)
520 """
520 """
521 successorsmarkers = obsstore.successors
521 successorsmarkers = obsstore.successors
522 stack = [node]
522 stack = [node]
523 seen = set(stack)
523 seen = set(stack)
524 while stack:
524 while stack:
525 current = stack.pop()
525 current = stack.pop()
526 if ispushed(current):
526 if ispushed(current):
527 return True
527 return True
528 markers = successorsmarkers.get(current, ())
528 markers = successorsmarkers.get(current, ())
529 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
529 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
530 for m in markers:
530 for m in markers:
531 nexts = m[1] # successors
531 nexts = m[1] # successors
532 if not nexts: # this is a prune marker
532 if not nexts: # this is a prune marker
533 nexts = m[5] or () # parents
533 nexts = m[5] or () # parents
534 for n in nexts:
534 for n in nexts:
535 if n not in seen:
535 if n not in seen:
536 seen.add(n)
536 seen.add(n)
537 stack.append(n)
537 stack.append(n)
538 return False
538 return False
General Comments 0
You need to be logged in to leave comments. Login now