##// END OF EJS Templates
hooklib: fix detection of successors for changeset_obsoleted...
Joerg Sonnenberger -
r46020:04ef3810 default
parent child Browse files
Show More
@@ -1,131 +1,139 b''
1 # Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
1 # Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5 """changeset_obsoleted is a hook to send a mail when an
5 """changeset_obsoleted is a hook to send a mail when an
6 existing draft changeset is obsoleted by an obsmarker without successor.
6 existing draft changeset is obsoleted by an obsmarker without successor.
7
7
8 Correct message threading requires the same messageidseed to be used for both
8 Correct message threading requires the same messageidseed to be used for both
9 the original notification and the new mail.
9 the original notification and the new mail.
10
10
11 Usage:
11 Usage:
12 [notify]
12 [notify]
13 messageidseed = myseed
13 messageidseed = myseed
14
14
15 [hooks]
15 [hooks]
16 pretxnclose.changeset_obsoleted = \
16 pretxnclose.changeset_obsoleted = \
17 python:hgext.hooklib.changeset_obsoleted.hook
17 python:hgext.hooklib.changeset_obsoleted.hook
18 """
18 """
19
19
20 from __future__ import absolute_import
20 from __future__ import absolute_import
21
21
22 import email.errors as emailerrors
22 import email.errors as emailerrors
23 import email.utils as emailutils
23 import email.utils as emailutils
24
24
25 from mercurial.i18n import _
25 from mercurial.i18n import _
26 from mercurial import (
26 from mercurial import (
27 encoding,
27 encoding,
28 error,
28 error,
29 logcmdutil,
29 logcmdutil,
30 mail,
30 mail,
31 obsutil,
31 obsutil,
32 pycompat,
32 pycompat,
33 registrar,
33 registrar,
34 )
34 )
35 from mercurial.utils import dateutil
35 from mercurial.utils import dateutil
36 from .. import notify
36 from .. import notify
37
37
38 configtable = {}
38 configtable = {}
39 configitem = registrar.configitem(configtable)
39 configitem = registrar.configitem(configtable)
40
40
41 configitem(
41 configitem(
42 b'notify_obsoleted', b'domain', default=None,
42 b'notify_obsoleted', b'domain', default=None,
43 )
43 )
44 configitem(
44 configitem(
45 b'notify_obsoleted', b'messageidseed', default=None,
45 b'notify_obsoleted', b'messageidseed', default=None,
46 )
46 )
47 configitem(
47 configitem(
48 b'notify_obsoleted',
48 b'notify_obsoleted',
49 b'template',
49 b'template',
50 default=b'''Subject: changeset abandoned
50 default=b'''Subject: changeset abandoned
51
51
52 This changeset has been abandoned.
52 This changeset has been abandoned.
53 ''',
53 ''',
54 )
54 )
55
55
56
56
57 def _report_commit(ui, repo, ctx):
57 def _report_commit(ui, repo, ctx):
58 domain = ui.config(b'notify_obsoleted', b'domain') or ui.config(
58 domain = ui.config(b'notify_obsoleted', b'domain') or ui.config(
59 b'notify', b'domain'
59 b'notify', b'domain'
60 )
60 )
61 messageidseed = ui.config(
61 messageidseed = ui.config(
62 b'notify_obsoleted', b'messageidseed'
62 b'notify_obsoleted', b'messageidseed'
63 ) or ui.config(b'notify', b'messageidseed')
63 ) or ui.config(b'notify', b'messageidseed')
64 template = ui.config(b'notify_obsoleted', b'template')
64 template = ui.config(b'notify_obsoleted', b'template')
65 spec = logcmdutil.templatespec(template, None)
65 spec = logcmdutil.templatespec(template, None)
66 templater = logcmdutil.changesettemplater(ui, repo, spec)
66 templater = logcmdutil.changesettemplater(ui, repo, spec)
67 ui.pushbuffer()
67 ui.pushbuffer()
68 n = notify.notifier(ui, repo, b'incoming')
68 n = notify.notifier(ui, repo, b'incoming')
69
69
70 subs = set()
70 subs = set()
71 for sub, spec in n.subs:
71 for sub, spec in n.subs:
72 if spec is None:
72 if spec is None:
73 subs.add(sub)
73 subs.add(sub)
74 continue
74 continue
75 revs = repo.revs(b'%r and %d:', spec, ctx.rev())
75 revs = repo.revs(b'%r and %d:', spec, ctx.rev())
76 if len(revs):
76 if len(revs):
77 subs.add(sub)
77 subs.add(sub)
78 continue
78 continue
79 if len(subs) == 0:
79 if len(subs) == 0:
80 ui.debug(
80 ui.debug(
81 b'notify_obsoleted: no subscribers to selected repo and revset\n'
81 b'notify_obsoleted: no subscribers to selected repo and revset\n'
82 )
82 )
83 return
83 return
84
84
85 templater.show(
85 templater.show(
86 ctx,
86 ctx,
87 changes=ctx.changeset(),
87 changes=ctx.changeset(),
88 baseurl=ui.config(b'web', b'baseurl'),
88 baseurl=ui.config(b'web', b'baseurl'),
89 root=repo.root,
89 root=repo.root,
90 webroot=n.root,
90 webroot=n.root,
91 )
91 )
92 data = ui.popbuffer()
92 data = ui.popbuffer()
93
93
94 try:
94 try:
95 msg = mail.parsebytes(data)
95 msg = mail.parsebytes(data)
96 except emailerrors.MessageParseError as inst:
96 except emailerrors.MessageParseError as inst:
97 raise error.Abort(inst)
97 raise error.Abort(inst)
98
98
99 msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed)
99 msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed)
100 msg['Message-Id'] = notify.messageid(
100 msg['Message-Id'] = notify.messageid(
101 ctx, domain, messageidseed + b'-obsoleted'
101 ctx, domain, messageidseed + b'-obsoleted'
102 )
102 )
103 msg['Date'] = encoding.strfromlocal(
103 msg['Date'] = encoding.strfromlocal(
104 dateutil.datestr(format=b"%a, %d %b %Y %H:%M:%S %1%2")
104 dateutil.datestr(format=b"%a, %d %b %Y %H:%M:%S %1%2")
105 )
105 )
106 if not msg['From']:
106 if not msg['From']:
107 sender = ui.config(b'email', b'from') or ui.username()
107 sender = ui.config(b'email', b'from') or ui.username()
108 if b'@' not in sender or b'@localhost' in sender:
108 if b'@' not in sender or b'@localhost' in sender:
109 sender = n.fixmail(sender)
109 sender = n.fixmail(sender)
110 msg['From'] = mail.addressencode(ui, sender, n.charsets, n.test)
110 msg['From'] = mail.addressencode(ui, sender, n.charsets, n.test)
111 msg['To'] = ', '.join(sorted(subs))
111 msg['To'] = ', '.join(sorted(subs))
112
112
113 msgtext = msg.as_bytes() if pycompat.ispy3 else msg.as_string()
113 msgtext = msg.as_bytes() if pycompat.ispy3 else msg.as_string()
114 if ui.configbool(b'notify', b'test'):
114 if ui.configbool(b'notify', b'test'):
115 ui.write(msgtext)
115 ui.write(msgtext)
116 if not msgtext.endswith(b'\n'):
116 if not msgtext.endswith(b'\n'):
117 ui.write(b'\n')
117 ui.write(b'\n')
118 else:
118 else:
119 ui.status(_(b'notify_obsoleted: sending mail for %d\n') % ctx.rev())
119 ui.status(_(b'notify_obsoleted: sending mail for %d\n') % ctx.rev())
120 mail.sendmail(
120 mail.sendmail(
121 ui, emailutils.parseaddr(msg['From'])[1], subs, msgtext, mbox=n.mbox
121 ui, emailutils.parseaddr(msg['From'])[1], subs, msgtext, mbox=n.mbox
122 )
122 )
123
123
124
124
125 def has_successor(repo, rev):
126 return any(
127 r for r in obsutil.allsuccessors(repo.obsstore, [rev]) if r != rev
128 )
129
130
125 def hook(ui, repo, hooktype, node=None, **kwargs):
131 def hook(ui, repo, hooktype, node=None, **kwargs):
126 if hooktype != b"pretxnclose":
132 if hooktype != b"txnclose":
127 raise error.Abort(
133 raise error.Abort(
128 _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
134 _(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
129 )
135 )
130 for rev in obsutil.getobsoleted(repo, repo.currenttransaction()):
136 for rev in obsutil.getobsoleted(repo, changes=kwargs['changes']):
131 _report_commit(ui, repo, repo.unfiltered()[rev])
137 ctx = repo.unfiltered()[rev]
138 if not has_successor(repo, ctx.node()):
139 _report_commit(ui, repo, ctx)
@@ -1,1040 +1,1050 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 diffutil,
14 diffutil,
15 encoding,
15 encoding,
16 error,
16 node as nodemod,
17 node as nodemod,
17 phases,
18 phases,
18 pycompat,
19 pycompat,
19 util,
20 util,
20 )
21 )
21 from .utils import dateutil
22 from .utils import dateutil
22
23
23 ### obsolescence marker flag
24 ### obsolescence marker flag
24
25
25 ## bumpedfix flag
26 ## bumpedfix flag
26 #
27 #
27 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # When a changeset A' succeed to a changeset A which became public, we call A'
28 # "bumped" because it's a successors of a public changesets
29 # "bumped" because it's a successors of a public changesets
29 #
30 #
30 # o A' (bumped)
31 # o A' (bumped)
31 # |`:
32 # |`:
32 # | o A
33 # | o A
33 # |/
34 # |/
34 # o Z
35 # o Z
35 #
36 #
36 # The way to solve this situation is to create a new changeset Ad as children
37 # The way to solve this situation is to create a new changeset Ad as children
37 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # of A. This changeset have the same content than A'. So the diff from A to A'
38 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
39 #
40 #
40 # o Ad
41 # o Ad
41 # |`:
42 # |`:
42 # | x A'
43 # | x A'
43 # |'|
44 # |'|
44 # o | A
45 # o | A
45 # |/
46 # |/
46 # o Z
47 # o Z
47 #
48 #
48 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
49 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
50 # This flag mean that the successors express the changes between the public and
51 # This flag mean that the successors express the changes between the public and
51 # bumped version and fix the situation, breaking the transitivity of
52 # bumped version and fix the situation, breaking the transitivity of
52 # "bumped" here.
53 # "bumped" here.
53 bumpedfix = 1
54 bumpedfix = 1
54 usingsha256 = 2
55 usingsha256 = 2
55
56
56
57
57 class marker(object):
58 class marker(object):
58 """Wrap obsolete marker raw data"""
59 """Wrap obsolete marker raw data"""
59
60
60 def __init__(self, repo, data):
61 def __init__(self, repo, data):
61 # the repo argument will be used to create changectx in later version
62 # the repo argument will be used to create changectx in later version
62 self._repo = repo
63 self._repo = repo
63 self._data = data
64 self._data = data
64 self._decodedmeta = None
65 self._decodedmeta = None
65
66
66 def __hash__(self):
67 def __hash__(self):
67 return hash(self._data)
68 return hash(self._data)
68
69
69 def __eq__(self, other):
70 def __eq__(self, other):
70 if type(other) != type(self):
71 if type(other) != type(self):
71 return False
72 return False
72 return self._data == other._data
73 return self._data == other._data
73
74
74 def prednode(self):
75 def prednode(self):
75 """Predecessor changeset node identifier"""
76 """Predecessor changeset node identifier"""
76 return self._data[0]
77 return self._data[0]
77
78
78 def succnodes(self):
79 def succnodes(self):
79 """List of successor changesets node identifiers"""
80 """List of successor changesets node identifiers"""
80 return self._data[1]
81 return self._data[1]
81
82
82 def parentnodes(self):
83 def parentnodes(self):
83 """Parents of the predecessors (None if not recorded)"""
84 """Parents of the predecessors (None if not recorded)"""
84 return self._data[5]
85 return self._data[5]
85
86
86 def metadata(self):
87 def metadata(self):
87 """Decoded metadata dictionary"""
88 """Decoded metadata dictionary"""
88 return dict(self._data[3])
89 return dict(self._data[3])
89
90
90 def date(self):
91 def date(self):
91 """Creation date as (unixtime, offset)"""
92 """Creation date as (unixtime, offset)"""
92 return self._data[4]
93 return self._data[4]
93
94
94 def flags(self):
95 def flags(self):
95 """The flags field of the marker"""
96 """The flags field of the marker"""
96 return self._data[2]
97 return self._data[2]
97
98
98
99
99 def getmarkers(repo, nodes=None, exclusive=False):
100 def getmarkers(repo, nodes=None, exclusive=False):
100 """returns markers known in a repository
101 """returns markers known in a repository
101
102
102 If <nodes> is specified, only markers "relevant" to those nodes are are
103 If <nodes> is specified, only markers "relevant" to those nodes are are
103 returned"""
104 returned"""
104 if nodes is None:
105 if nodes is None:
105 rawmarkers = repo.obsstore
106 rawmarkers = repo.obsstore
106 elif exclusive:
107 elif exclusive:
107 rawmarkers = exclusivemarkers(repo, nodes)
108 rawmarkers = exclusivemarkers(repo, nodes)
108 else:
109 else:
109 rawmarkers = repo.obsstore.relevantmarkers(nodes)
110 rawmarkers = repo.obsstore.relevantmarkers(nodes)
110
111
111 for markerdata in rawmarkers:
112 for markerdata in rawmarkers:
112 yield marker(repo, markerdata)
113 yield marker(repo, markerdata)
113
114
114
115
115 def sortedmarkers(markers):
116 def sortedmarkers(markers):
116 # last item of marker tuple ('parents') may be None or a tuple
117 # last item of marker tuple ('parents') may be None or a tuple
117 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
118 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
118
119
119
120
120 def closestpredecessors(repo, nodeid):
121 def closestpredecessors(repo, nodeid):
121 """yield the list of next predecessors pointing on visible changectx nodes
122 """yield the list of next predecessors pointing on visible changectx nodes
122
123
123 This function respect the repoview filtering, filtered revision will be
124 This function respect the repoview filtering, filtered revision will be
124 considered missing.
125 considered missing.
125 """
126 """
126
127
127 precursors = repo.obsstore.predecessors
128 precursors = repo.obsstore.predecessors
128 stack = [nodeid]
129 stack = [nodeid]
129 seen = set(stack)
130 seen = set(stack)
130
131
131 while stack:
132 while stack:
132 current = stack.pop()
133 current = stack.pop()
133 currentpreccs = precursors.get(current, ())
134 currentpreccs = precursors.get(current, ())
134
135
135 for prec in currentpreccs:
136 for prec in currentpreccs:
136 precnodeid = prec[0]
137 precnodeid = prec[0]
137
138
138 # Basic cycle protection
139 # Basic cycle protection
139 if precnodeid in seen:
140 if precnodeid in seen:
140 continue
141 continue
141 seen.add(precnodeid)
142 seen.add(precnodeid)
142
143
143 if precnodeid in repo:
144 if precnodeid in repo:
144 yield precnodeid
145 yield precnodeid
145 else:
146 else:
146 stack.append(precnodeid)
147 stack.append(precnodeid)
147
148
148
149
149 def allpredecessors(obsstore, nodes, ignoreflags=0):
150 def allpredecessors(obsstore, nodes, ignoreflags=0):
150 """Yield node for every precursors of <nodes>.
151 """Yield node for every precursors of <nodes>.
151
152
152 Some precursors may be unknown locally.
153 Some precursors may be unknown locally.
153
154
154 This is a linear yield unsuited to detecting folded changesets. It includes
155 This is a linear yield unsuited to detecting folded changesets. It includes
155 initial nodes too."""
156 initial nodes too."""
156
157
157 remaining = set(nodes)
158 remaining = set(nodes)
158 seen = set(remaining)
159 seen = set(remaining)
159 prec = obsstore.predecessors.get
160 prec = obsstore.predecessors.get
160 while remaining:
161 while remaining:
161 current = remaining.pop()
162 current = remaining.pop()
162 yield current
163 yield current
163 for mark in prec(current, ()):
164 for mark in prec(current, ()):
164 # ignore marker flagged with specified flag
165 # ignore marker flagged with specified flag
165 if mark[2] & ignoreflags:
166 if mark[2] & ignoreflags:
166 continue
167 continue
167 suc = mark[0]
168 suc = mark[0]
168 if suc not in seen:
169 if suc not in seen:
169 seen.add(suc)
170 seen.add(suc)
170 remaining.add(suc)
171 remaining.add(suc)
171
172
172
173
173 def allsuccessors(obsstore, nodes, ignoreflags=0):
174 def allsuccessors(obsstore, nodes, ignoreflags=0):
174 """Yield node for every successor of <nodes>.
175 """Yield node for every successor of <nodes>.
175
176
176 Some successors may be unknown locally.
177 Some successors may be unknown locally.
177
178
178 This is a linear yield unsuited to detecting split changesets. It includes
179 This is a linear yield unsuited to detecting split changesets. It includes
179 initial nodes too."""
180 initial nodes too."""
180 remaining = set(nodes)
181 remaining = set(nodes)
181 seen = set(remaining)
182 seen = set(remaining)
182 while remaining:
183 while remaining:
183 current = remaining.pop()
184 current = remaining.pop()
184 yield current
185 yield current
185 for mark in obsstore.successors.get(current, ()):
186 for mark in obsstore.successors.get(current, ()):
186 # ignore marker flagged with specified flag
187 # ignore marker flagged with specified flag
187 if mark[2] & ignoreflags:
188 if mark[2] & ignoreflags:
188 continue
189 continue
189 for suc in mark[1]:
190 for suc in mark[1]:
190 if suc not in seen:
191 if suc not in seen:
191 seen.add(suc)
192 seen.add(suc)
192 remaining.add(suc)
193 remaining.add(suc)
193
194
194
195
195 def _filterprunes(markers):
196 def _filterprunes(markers):
196 """return a set with no prune markers"""
197 """return a set with no prune markers"""
197 return {m for m in markers if m[1]}
198 return {m for m in markers if m[1]}
198
199
199
200
200 def exclusivemarkers(repo, nodes):
201 def exclusivemarkers(repo, nodes):
201 """set of markers relevant to "nodes" but no other locally-known nodes
202 """set of markers relevant to "nodes" but no other locally-known nodes
202
203
203 This function compute the set of markers "exclusive" to a locally-known
204 This function compute the set of markers "exclusive" to a locally-known
204 node. This means we walk the markers starting from <nodes> until we reach a
205 node. This means we walk the markers starting from <nodes> until we reach a
205 locally-known precursors outside of <nodes>. Element of <nodes> with
206 locally-known precursors outside of <nodes>. Element of <nodes> with
206 locally-known successors outside of <nodes> are ignored (since their
207 locally-known successors outside of <nodes> are ignored (since their
207 precursors markers are also relevant to these successors).
208 precursors markers are also relevant to these successors).
208
209
209 For example:
210 For example:
210
211
211 # (A0 rewritten as A1)
212 # (A0 rewritten as A1)
212 #
213 #
213 # A0 <-1- A1 # Marker "1" is exclusive to A1
214 # A0 <-1- A1 # Marker "1" is exclusive to A1
214
215
215 or
216 or
216
217
217 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
218 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
218 #
219 #
219 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
220 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
220
221
221 or
222 or
222
223
223 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
224 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
224 #
225 #
225 # <-2- A1 # Marker "2" is exclusive to A0,A1
226 # <-2- A1 # Marker "2" is exclusive to A0,A1
226 # /
227 # /
227 # <-1- A0
228 # <-1- A0
228 # \
229 # \
229 # <-3- A2 # Marker "3" is exclusive to A0,A2
230 # <-3- A2 # Marker "3" is exclusive to A0,A2
230 #
231 #
231 # in addition:
232 # in addition:
232 #
233 #
233 # Markers "2,3" are exclusive to A1,A2
234 # Markers "2,3" are exclusive to A1,A2
234 # Markers "1,2,3" are exclusive to A0,A1,A2
235 # Markers "1,2,3" are exclusive to A0,A1,A2
235
236
236 See test/test-obsolete-bundle-strip.t for more examples.
237 See test/test-obsolete-bundle-strip.t for more examples.
237
238
238 An example usage is strip. When stripping a changeset, we also want to
239 An example usage is strip. When stripping a changeset, we also want to
239 strip the markers exclusive to this changeset. Otherwise we would have
240 strip the markers exclusive to this changeset. Otherwise we would have
240 "dangling"" obsolescence markers from its precursors: Obsolescence markers
241 "dangling"" obsolescence markers from its precursors: Obsolescence markers
241 marking a node as obsolete without any successors available locally.
242 marking a node as obsolete without any successors available locally.
242
243
243 As for relevant markers, the prune markers for children will be followed.
244 As for relevant markers, the prune markers for children will be followed.
244 Of course, they will only be followed if the pruned children is
245 Of course, they will only be followed if the pruned children is
245 locally-known. Since the prune markers are relevant to the pruned node.
246 locally-known. Since the prune markers are relevant to the pruned node.
246 However, while prune markers are considered relevant to the parent of the
247 However, while prune markers are considered relevant to the parent of the
247 pruned changesets, prune markers for locally-known changeset (with no
248 pruned changesets, prune markers for locally-known changeset (with no
248 successors) are considered exclusive to the pruned nodes. This allows
249 successors) are considered exclusive to the pruned nodes. This allows
249 to strip the prune markers (with the rest of the exclusive chain) alongside
250 to strip the prune markers (with the rest of the exclusive chain) alongside
250 the pruned changesets.
251 the pruned changesets.
251 """
252 """
252 # running on a filtered repository would be dangerous as markers could be
253 # running on a filtered repository would be dangerous as markers could be
253 # reported as exclusive when they are relevant for other filtered nodes.
254 # reported as exclusive when they are relevant for other filtered nodes.
254 unfi = repo.unfiltered()
255 unfi = repo.unfiltered()
255
256
256 # shortcut to various useful item
257 # shortcut to various useful item
257 has_node = unfi.changelog.index.has_node
258 has_node = unfi.changelog.index.has_node
258 precursorsmarkers = unfi.obsstore.predecessors
259 precursorsmarkers = unfi.obsstore.predecessors
259 successormarkers = unfi.obsstore.successors
260 successormarkers = unfi.obsstore.successors
260 childrenmarkers = unfi.obsstore.children
261 childrenmarkers = unfi.obsstore.children
261
262
262 # exclusive markers (return of the function)
263 # exclusive markers (return of the function)
263 exclmarkers = set()
264 exclmarkers = set()
264 # we need fast membership testing
265 # we need fast membership testing
265 nodes = set(nodes)
266 nodes = set(nodes)
266 # looking for head in the obshistory
267 # looking for head in the obshistory
267 #
268 #
268 # XXX we are ignoring all issues in regard with cycle for now.
269 # XXX we are ignoring all issues in regard with cycle for now.
269 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
270 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
270 stack.sort()
271 stack.sort()
271 # nodes already stacked
272 # nodes already stacked
272 seennodes = set(stack)
273 seennodes = set(stack)
273 while stack:
274 while stack:
274 current = stack.pop()
275 current = stack.pop()
275 # fetch precursors markers
276 # fetch precursors markers
276 markers = list(precursorsmarkers.get(current, ()))
277 markers = list(precursorsmarkers.get(current, ()))
277 # extend the list with prune markers
278 # extend the list with prune markers
278 for mark in successormarkers.get(current, ()):
279 for mark in successormarkers.get(current, ()):
279 if not mark[1]:
280 if not mark[1]:
280 markers.append(mark)
281 markers.append(mark)
281 # and markers from children (looking for prune)
282 # and markers from children (looking for prune)
282 for mark in childrenmarkers.get(current, ()):
283 for mark in childrenmarkers.get(current, ()):
283 if not mark[1]:
284 if not mark[1]:
284 markers.append(mark)
285 markers.append(mark)
285 # traverse the markers
286 # traverse the markers
286 for mark in markers:
287 for mark in markers:
287 if mark in exclmarkers:
288 if mark in exclmarkers:
288 # markers already selected
289 # markers already selected
289 continue
290 continue
290
291
291 # If the markers is about the current node, select it
292 # If the markers is about the current node, select it
292 #
293 #
293 # (this delay the addition of markers from children)
294 # (this delay the addition of markers from children)
294 if mark[1] or mark[0] == current:
295 if mark[1] or mark[0] == current:
295 exclmarkers.add(mark)
296 exclmarkers.add(mark)
296
297
297 # should we keep traversing through the precursors?
298 # should we keep traversing through the precursors?
298 prec = mark[0]
299 prec = mark[0]
299
300
300 # nodes in the stack or already processed
301 # nodes in the stack or already processed
301 if prec in seennodes:
302 if prec in seennodes:
302 continue
303 continue
303
304
304 # is this a locally known node ?
305 # is this a locally known node ?
305 known = has_node(prec)
306 known = has_node(prec)
306 # if locally-known and not in the <nodes> set the traversal
307 # if locally-known and not in the <nodes> set the traversal
307 # stop here.
308 # stop here.
308 if known and prec not in nodes:
309 if known and prec not in nodes:
309 continue
310 continue
310
311
311 # do not keep going if there are unselected markers pointing to this
312 # do not keep going if there are unselected markers pointing to this
312 # nodes. If we end up traversing these unselected markers later the
313 # nodes. If we end up traversing these unselected markers later the
313 # node will be taken care of at that point.
314 # node will be taken care of at that point.
314 precmarkers = _filterprunes(successormarkers.get(prec))
315 precmarkers = _filterprunes(successormarkers.get(prec))
315 if precmarkers.issubset(exclmarkers):
316 if precmarkers.issubset(exclmarkers):
316 seennodes.add(prec)
317 seennodes.add(prec)
317 stack.append(prec)
318 stack.append(prec)
318
319
319 return exclmarkers
320 return exclmarkers
320
321
321
322
322 def foreground(repo, nodes):
323 def foreground(repo, nodes):
323 """return all nodes in the "foreground" of other node
324 """return all nodes in the "foreground" of other node
324
325
325 The foreground of a revision is anything reachable using parent -> children
326 The foreground of a revision is anything reachable using parent -> children
326 or precursor -> successor relation. It is very similar to "descendant" but
327 or precursor -> successor relation. It is very similar to "descendant" but
327 augmented with obsolescence information.
328 augmented with obsolescence information.
328
329
329 Beware that possible obsolescence cycle may result if complex situation.
330 Beware that possible obsolescence cycle may result if complex situation.
330 """
331 """
331 repo = repo.unfiltered()
332 repo = repo.unfiltered()
332 foreground = set(repo.set(b'%ln::', nodes))
333 foreground = set(repo.set(b'%ln::', nodes))
333 if repo.obsstore:
334 if repo.obsstore:
334 # We only need this complicated logic if there is obsolescence
335 # We only need this complicated logic if there is obsolescence
335 # XXX will probably deserve an optimised revset.
336 # XXX will probably deserve an optimised revset.
336 has_node = repo.changelog.index.has_node
337 has_node = repo.changelog.index.has_node
337 plen = -1
338 plen = -1
338 # compute the whole set of successors or descendants
339 # compute the whole set of successors or descendants
339 while len(foreground) != plen:
340 while len(foreground) != plen:
340 plen = len(foreground)
341 plen = len(foreground)
341 succs = {c.node() for c in foreground}
342 succs = {c.node() for c in foreground}
342 mutable = [c.node() for c in foreground if c.mutable()]
343 mutable = [c.node() for c in foreground if c.mutable()]
343 succs.update(allsuccessors(repo.obsstore, mutable))
344 succs.update(allsuccessors(repo.obsstore, mutable))
344 known = (n for n in succs if has_node(n))
345 known = (n for n in succs if has_node(n))
345 foreground = set(repo.set(b'%ln::', known))
346 foreground = set(repo.set(b'%ln::', known))
346 return {c.node() for c in foreground}
347 return {c.node() for c in foreground}
347
348
348
349
349 # effectflag field
350 # effectflag field
350 #
351 #
351 # Effect-flag is a 1-byte bit field used to store what changed between a
352 # Effect-flag is a 1-byte bit field used to store what changed between a
352 # changeset and its successor(s).
353 # changeset and its successor(s).
353 #
354 #
354 # The effect flag is stored in obs-markers metadata while we iterate on the
355 # The effect flag is stored in obs-markers metadata while we iterate on the
355 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
356 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
356 # with an incompatible design for effect flag, we can store a new design under
357 # with an incompatible design for effect flag, we can store a new design under
357 # another field name so we don't break readers. We plan to extend the existing
358 # another field name so we don't break readers. We plan to extend the existing
358 # obsmarkers bit-field when the effect flag design will be stabilized.
359 # obsmarkers bit-field when the effect flag design will be stabilized.
359 #
360 #
360 # The effect-flag is placed behind an experimental flag
361 # The effect-flag is placed behind an experimental flag
361 # `effect-flags` set to off by default.
362 # `effect-flags` set to off by default.
362 #
363 #
363
364
364 EFFECTFLAGFIELD = b"ef1"
365 EFFECTFLAGFIELD = b"ef1"
365
366
366 DESCCHANGED = 1 << 0 # action changed the description
367 DESCCHANGED = 1 << 0 # action changed the description
367 METACHANGED = 1 << 1 # action change the meta
368 METACHANGED = 1 << 1 # action change the meta
368 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
369 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
369 PARENTCHANGED = 1 << 2 # action change the parent
370 PARENTCHANGED = 1 << 2 # action change the parent
370 USERCHANGED = 1 << 4 # the user changed
371 USERCHANGED = 1 << 4 # the user changed
371 DATECHANGED = 1 << 5 # the date changed
372 DATECHANGED = 1 << 5 # the date changed
372 BRANCHCHANGED = 1 << 6 # the branch changed
373 BRANCHCHANGED = 1 << 6 # the branch changed
373
374
374 METABLACKLIST = [
375 METABLACKLIST = [
375 re.compile(b'^branch$'),
376 re.compile(b'^branch$'),
376 re.compile(b'^.*-source$'),
377 re.compile(b'^.*-source$'),
377 re.compile(b'^.*_source$'),
378 re.compile(b'^.*_source$'),
378 re.compile(b'^source$'),
379 re.compile(b'^source$'),
379 ]
380 ]
380
381
381
382
382 def metanotblacklisted(metaitem):
383 def metanotblacklisted(metaitem):
383 """ Check that the key of a meta item (extrakey, extravalue) does not
384 """ Check that the key of a meta item (extrakey, extravalue) does not
384 match at least one of the blacklist pattern
385 match at least one of the blacklist pattern
385 """
386 """
386 metakey = metaitem[0]
387 metakey = metaitem[0]
387
388
388 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
389 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
389
390
390
391
391 def _prepare_hunk(hunk):
392 def _prepare_hunk(hunk):
392 """Drop all information but the username and patch"""
393 """Drop all information but the username and patch"""
393 cleanhunk = []
394 cleanhunk = []
394 for line in hunk.splitlines():
395 for line in hunk.splitlines():
395 if line.startswith(b'# User') or not line.startswith(b'#'):
396 if line.startswith(b'# User') or not line.startswith(b'#'):
396 if line.startswith(b'@@'):
397 if line.startswith(b'@@'):
397 line = b'@@\n'
398 line = b'@@\n'
398 cleanhunk.append(line)
399 cleanhunk.append(line)
399 return cleanhunk
400 return cleanhunk
400
401
401
402
402 def _getdifflines(iterdiff):
403 def _getdifflines(iterdiff):
403 """return a cleaned up lines"""
404 """return a cleaned up lines"""
404 lines = next(iterdiff, None)
405 lines = next(iterdiff, None)
405
406
406 if lines is None:
407 if lines is None:
407 return lines
408 return lines
408
409
409 return _prepare_hunk(lines)
410 return _prepare_hunk(lines)
410
411
411
412
412 def _cmpdiff(leftctx, rightctx):
413 def _cmpdiff(leftctx, rightctx):
413 """return True if both ctx introduce the "same diff"
414 """return True if both ctx introduce the "same diff"
414
415
415 This is a first and basic implementation, with many shortcoming.
416 This is a first and basic implementation, with many shortcoming.
416 """
417 """
417 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
418 diffopts = diffutil.diffallopts(leftctx.repo().ui, {b'git': True})
418
419
419 # Leftctx or right ctx might be filtered, so we need to use the contexts
420 # Leftctx or right ctx might be filtered, so we need to use the contexts
420 # with an unfiltered repository to safely compute the diff
421 # with an unfiltered repository to safely compute the diff
421
422
422 # leftctx and rightctx can be from different repository views in case of
423 # leftctx and rightctx can be from different repository views in case of
423 # hgsubversion, do don't try to access them from same repository
424 # hgsubversion, do don't try to access them from same repository
424 # rightctx.repo() and leftctx.repo() are not always the same
425 # rightctx.repo() and leftctx.repo() are not always the same
425 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
426 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
426 leftdiff = leftunfi.diff(opts=diffopts)
427 leftdiff = leftunfi.diff(opts=diffopts)
427 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
428 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
428 rightdiff = rightunfi.diff(opts=diffopts)
429 rightdiff = rightunfi.diff(opts=diffopts)
429
430
430 left, right = (0, 0)
431 left, right = (0, 0)
431 while None not in (left, right):
432 while None not in (left, right):
432 left = _getdifflines(leftdiff)
433 left = _getdifflines(leftdiff)
433 right = _getdifflines(rightdiff)
434 right = _getdifflines(rightdiff)
434
435
435 if left != right:
436 if left != right:
436 return False
437 return False
437 return True
438 return True
438
439
439
440
440 def geteffectflag(source, successors):
441 def geteffectflag(source, successors):
441 """ From an obs-marker relation, compute what changed between the
442 """ From an obs-marker relation, compute what changed between the
442 predecessor and the successor.
443 predecessor and the successor.
443 """
444 """
444 effects = 0
445 effects = 0
445
446
446 for changectx in successors:
447 for changectx in successors:
447 # Check if description has changed
448 # Check if description has changed
448 if changectx.description() != source.description():
449 if changectx.description() != source.description():
449 effects |= DESCCHANGED
450 effects |= DESCCHANGED
450
451
451 # Check if user has changed
452 # Check if user has changed
452 if changectx.user() != source.user():
453 if changectx.user() != source.user():
453 effects |= USERCHANGED
454 effects |= USERCHANGED
454
455
455 # Check if date has changed
456 # Check if date has changed
456 if changectx.date() != source.date():
457 if changectx.date() != source.date():
457 effects |= DATECHANGED
458 effects |= DATECHANGED
458
459
459 # Check if branch has changed
460 # Check if branch has changed
460 if changectx.branch() != source.branch():
461 if changectx.branch() != source.branch():
461 effects |= BRANCHCHANGED
462 effects |= BRANCHCHANGED
462
463
463 # Check if at least one of the parent has changed
464 # Check if at least one of the parent has changed
464 if changectx.parents() != source.parents():
465 if changectx.parents() != source.parents():
465 effects |= PARENTCHANGED
466 effects |= PARENTCHANGED
466
467
467 # Check if other meta has changed
468 # Check if other meta has changed
468 changeextra = changectx.extra().items()
469 changeextra = changectx.extra().items()
469 ctxmeta = list(filter(metanotblacklisted, changeextra))
470 ctxmeta = list(filter(metanotblacklisted, changeextra))
470
471
471 sourceextra = source.extra().items()
472 sourceextra = source.extra().items()
472 srcmeta = list(filter(metanotblacklisted, sourceextra))
473 srcmeta = list(filter(metanotblacklisted, sourceextra))
473
474
474 if ctxmeta != srcmeta:
475 if ctxmeta != srcmeta:
475 effects |= METACHANGED
476 effects |= METACHANGED
476
477
477 # Check if the diff has changed
478 # Check if the diff has changed
478 if not _cmpdiff(source, changectx):
479 if not _cmpdiff(source, changectx):
479 effects |= DIFFCHANGED
480 effects |= DIFFCHANGED
480
481
481 return effects
482 return effects
482
483
483
484
484 def getobsoleted(repo, tr):
485 def getobsoleted(repo, tr=None, changes=None):
485 """return the set of pre-existing revisions obsoleted by a transaction"""
486 """return the set of pre-existing revisions obsoleted by a transaction
487
488 Either the transaction or changes item of the transaction (for hooks)
489 must be provided, but not both.
490 """
491 if (tr is None) == (changes is None):
492 e = b"exactly one of tr and changes must be provided"
493 raise error.ProgrammingError(e)
486 torev = repo.unfiltered().changelog.index.get_rev
494 torev = repo.unfiltered().changelog.index.get_rev
487 phase = repo._phasecache.phase
495 phase = repo._phasecache.phase
488 succsmarkers = repo.obsstore.successors.get
496 succsmarkers = repo.obsstore.successors.get
489 public = phases.public
497 public = phases.public
490 addedmarkers = tr.changes[b'obsmarkers']
498 if changes is None:
491 origrepolen = tr.changes[b'origrepolen']
499 changes = tr.changes
500 addedmarkers = changes[b'obsmarkers']
501 origrepolen = changes[b'origrepolen']
492 seenrevs = set()
502 seenrevs = set()
493 obsoleted = set()
503 obsoleted = set()
494 for mark in addedmarkers:
504 for mark in addedmarkers:
495 node = mark[0]
505 node = mark[0]
496 rev = torev(node)
506 rev = torev(node)
497 if rev is None or rev in seenrevs or rev >= origrepolen:
507 if rev is None or rev in seenrevs or rev >= origrepolen:
498 continue
508 continue
499 seenrevs.add(rev)
509 seenrevs.add(rev)
500 if phase(repo, rev) == public:
510 if phase(repo, rev) == public:
501 continue
511 continue
502 if set(succsmarkers(node) or []).issubset(addedmarkers):
512 if set(succsmarkers(node) or []).issubset(addedmarkers):
503 obsoleted.add(rev)
513 obsoleted.add(rev)
504 return obsoleted
514 return obsoleted
505
515
506
516
507 class _succs(list):
517 class _succs(list):
508 """small class to represent a successors with some metadata about it"""
518 """small class to represent a successors with some metadata about it"""
509
519
510 def __init__(self, *args, **kwargs):
520 def __init__(self, *args, **kwargs):
511 super(_succs, self).__init__(*args, **kwargs)
521 super(_succs, self).__init__(*args, **kwargs)
512 self.markers = set()
522 self.markers = set()
513
523
514 def copy(self):
524 def copy(self):
515 new = _succs(self)
525 new = _succs(self)
516 new.markers = self.markers.copy()
526 new.markers = self.markers.copy()
517 return new
527 return new
518
528
519 @util.propertycache
529 @util.propertycache
520 def _set(self):
530 def _set(self):
521 # immutable
531 # immutable
522 return set(self)
532 return set(self)
523
533
524 def canmerge(self, other):
534 def canmerge(self, other):
525 return self._set.issubset(other._set)
535 return self._set.issubset(other._set)
526
536
527
537
528 def successorssets(repo, initialnode, closest=False, cache=None):
538 def successorssets(repo, initialnode, closest=False, cache=None):
529 """Return set of all latest successors of initial nodes
539 """Return set of all latest successors of initial nodes
530
540
531 The successors set of a changeset A are the group of revisions that succeed
541 The successors set of a changeset A are the group of revisions that succeed
532 A. It succeeds A as a consistent whole, each revision being only a partial
542 A. It succeeds A as a consistent whole, each revision being only a partial
533 replacement. By default, the successors set contains non-obsolete
543 replacement. By default, the successors set contains non-obsolete
534 changesets only, walking the obsolescence graph until reaching a leaf. If
544 changesets only, walking the obsolescence graph until reaching a leaf. If
535 'closest' is set to True, closest successors-sets are return (the
545 'closest' is set to True, closest successors-sets are return (the
536 obsolescence walk stops on known changesets).
546 obsolescence walk stops on known changesets).
537
547
538 This function returns the full list of successor sets which is why it
548 This function returns the full list of successor sets which is why it
539 returns a list of tuples and not just a single tuple. Each tuple is a valid
549 returns a list of tuples and not just a single tuple. Each tuple is a valid
540 successors set. Note that (A,) may be a valid successors set for changeset A
550 successors set. Note that (A,) may be a valid successors set for changeset A
541 (see below).
551 (see below).
542
552
543 In most cases, a changeset A will have a single element (e.g. the changeset
553 In most cases, a changeset A will have a single element (e.g. the changeset
544 A is replaced by A') in its successors set. Though, it is also common for a
554 A is replaced by A') in its successors set. Though, it is also common for a
545 changeset A to have no elements in its successor set (e.g. the changeset
555 changeset A to have no elements in its successor set (e.g. the changeset
546 has been pruned). Therefore, the returned list of successors sets will be
556 has been pruned). Therefore, the returned list of successors sets will be
547 [(A',)] or [], respectively.
557 [(A',)] or [], respectively.
548
558
549 When a changeset A is split into A' and B', however, it will result in a
559 When a changeset A is split into A' and B', however, it will result in a
550 successors set containing more than a single element, i.e. [(A',B')].
560 successors set containing more than a single element, i.e. [(A',B')].
551 Divergent changesets will result in multiple successors sets, i.e. [(A',),
561 Divergent changesets will result in multiple successors sets, i.e. [(A',),
552 (A'')].
562 (A'')].
553
563
554 If a changeset A is not obsolete, then it will conceptually have no
564 If a changeset A is not obsolete, then it will conceptually have no
555 successors set. To distinguish this from a pruned changeset, the successor
565 successors set. To distinguish this from a pruned changeset, the successor
556 set will contain itself only, i.e. [(A,)].
566 set will contain itself only, i.e. [(A,)].
557
567
558 Finally, final successors unknown locally are considered to be pruned
568 Finally, final successors unknown locally are considered to be pruned
559 (pruned: obsoleted without any successors). (Final: successors not affected
569 (pruned: obsoleted without any successors). (Final: successors not affected
560 by markers).
570 by markers).
561
571
562 The 'closest' mode respect the repoview filtering. For example, without
572 The 'closest' mode respect the repoview filtering. For example, without
563 filter it will stop at the first locally known changeset, with 'visible'
573 filter it will stop at the first locally known changeset, with 'visible'
564 filter it will stop on visible changesets).
574 filter it will stop on visible changesets).
565
575
566 The optional `cache` parameter is a dictionary that may contains
576 The optional `cache` parameter is a dictionary that may contains
567 precomputed successors sets. It is meant to reuse the computation of a
577 precomputed successors sets. It is meant to reuse the computation of a
568 previous call to `successorssets` when multiple calls are made at the same
578 previous call to `successorssets` when multiple calls are made at the same
569 time. The cache dictionary is updated in place. The caller is responsible
579 time. The cache dictionary is updated in place. The caller is responsible
570 for its life span. Code that makes multiple calls to `successorssets`
580 for its life span. Code that makes multiple calls to `successorssets`
571 *should* use this cache mechanism or risk a performance hit.
581 *should* use this cache mechanism or risk a performance hit.
572
582
573 Since results are different depending of the 'closest' most, the same cache
583 Since results are different depending of the 'closest' most, the same cache
574 cannot be reused for both mode.
584 cannot be reused for both mode.
575 """
585 """
576
586
577 succmarkers = repo.obsstore.successors
587 succmarkers = repo.obsstore.successors
578
588
579 # Stack of nodes we search successors sets for
589 # Stack of nodes we search successors sets for
580 toproceed = [initialnode]
590 toproceed = [initialnode]
581 # set version of above list for fast loop detection
591 # set version of above list for fast loop detection
582 # element added to "toproceed" must be added here
592 # element added to "toproceed" must be added here
583 stackedset = set(toproceed)
593 stackedset = set(toproceed)
584 if cache is None:
594 if cache is None:
585 cache = {}
595 cache = {}
586
596
587 # This while loop is the flattened version of a recursive search for
597 # This while loop is the flattened version of a recursive search for
588 # successors sets
598 # successors sets
589 #
599 #
590 # def successorssets(x):
600 # def successorssets(x):
591 # successors = directsuccessors(x)
601 # successors = directsuccessors(x)
592 # ss = [[]]
602 # ss = [[]]
593 # for succ in directsuccessors(x):
603 # for succ in directsuccessors(x):
594 # # product as in itertools cartesian product
604 # # product as in itertools cartesian product
595 # ss = product(ss, successorssets(succ))
605 # ss = product(ss, successorssets(succ))
596 # return ss
606 # return ss
597 #
607 #
598 # But we can not use plain recursive calls here:
608 # But we can not use plain recursive calls here:
599 # - that would blow the python call stack
609 # - that would blow the python call stack
600 # - obsolescence markers may have cycles, we need to handle them.
610 # - obsolescence markers may have cycles, we need to handle them.
601 #
611 #
602 # The `toproceed` list act as our call stack. Every node we search
612 # The `toproceed` list act as our call stack. Every node we search
603 # successors set for are stacked there.
613 # successors set for are stacked there.
604 #
614 #
605 # The `stackedset` is set version of this stack used to check if a node is
615 # The `stackedset` is set version of this stack used to check if a node is
606 # already stacked. This check is used to detect cycles and prevent infinite
616 # already stacked. This check is used to detect cycles and prevent infinite
607 # loop.
617 # loop.
608 #
618 #
609 # successors set of all nodes are stored in the `cache` dictionary.
619 # successors set of all nodes are stored in the `cache` dictionary.
610 #
620 #
611 # After this while loop ends we use the cache to return the successors sets
621 # After this while loop ends we use the cache to return the successors sets
612 # for the node requested by the caller.
622 # for the node requested by the caller.
613 while toproceed:
623 while toproceed:
614 # Every iteration tries to compute the successors sets of the topmost
624 # Every iteration tries to compute the successors sets of the topmost
615 # node of the stack: CURRENT.
625 # node of the stack: CURRENT.
616 #
626 #
617 # There are four possible outcomes:
627 # There are four possible outcomes:
618 #
628 #
619 # 1) We already know the successors sets of CURRENT:
629 # 1) We already know the successors sets of CURRENT:
620 # -> mission accomplished, pop it from the stack.
630 # -> mission accomplished, pop it from the stack.
621 # 2) Stop the walk:
631 # 2) Stop the walk:
622 # default case: Node is not obsolete
632 # default case: Node is not obsolete
623 # closest case: Node is known at this repo filter level
633 # closest case: Node is known at this repo filter level
624 # -> the node is its own successors sets. Add it to the cache.
634 # -> the node is its own successors sets. Add it to the cache.
625 # 3) We do not know successors set of direct successors of CURRENT:
635 # 3) We do not know successors set of direct successors of CURRENT:
626 # -> We add those successors to the stack.
636 # -> We add those successors to the stack.
627 # 4) We know successors sets of all direct successors of CURRENT:
637 # 4) We know successors sets of all direct successors of CURRENT:
628 # -> We can compute CURRENT successors set and add it to the
638 # -> We can compute CURRENT successors set and add it to the
629 # cache.
639 # cache.
630 #
640 #
631 current = toproceed[-1]
641 current = toproceed[-1]
632
642
633 # case 2 condition is a bit hairy because of closest,
643 # case 2 condition is a bit hairy because of closest,
634 # we compute it on its own
644 # we compute it on its own
635 case2condition = (current not in succmarkers) or (
645 case2condition = (current not in succmarkers) or (
636 closest and current != initialnode and current in repo
646 closest and current != initialnode and current in repo
637 )
647 )
638
648
639 if current in cache:
649 if current in cache:
640 # case (1): We already know the successors sets
650 # case (1): We already know the successors sets
641 stackedset.remove(toproceed.pop())
651 stackedset.remove(toproceed.pop())
642 elif case2condition:
652 elif case2condition:
643 # case (2): end of walk.
653 # case (2): end of walk.
644 if current in repo:
654 if current in repo:
645 # We have a valid successors.
655 # We have a valid successors.
646 cache[current] = [_succs((current,))]
656 cache[current] = [_succs((current,))]
647 else:
657 else:
648 # Final obsolete version is unknown locally.
658 # Final obsolete version is unknown locally.
649 # Do not count that as a valid successors
659 # Do not count that as a valid successors
650 cache[current] = []
660 cache[current] = []
651 else:
661 else:
652 # cases (3) and (4)
662 # cases (3) and (4)
653 #
663 #
654 # We proceed in two phases. Phase 1 aims to distinguish case (3)
664 # We proceed in two phases. Phase 1 aims to distinguish case (3)
655 # from case (4):
665 # from case (4):
656 #
666 #
657 # For each direct successors of CURRENT, we check whether its
667 # For each direct successors of CURRENT, we check whether its
658 # successors sets are known. If they are not, we stack the
668 # successors sets are known. If they are not, we stack the
659 # unknown node and proceed to the next iteration of the while
669 # unknown node and proceed to the next iteration of the while
660 # loop. (case 3)
670 # loop. (case 3)
661 #
671 #
662 # During this step, we may detect obsolescence cycles: a node
672 # During this step, we may detect obsolescence cycles: a node
663 # with unknown successors sets but already in the call stack.
673 # with unknown successors sets but already in the call stack.
664 # In such a situation, we arbitrary set the successors sets of
674 # In such a situation, we arbitrary set the successors sets of
665 # the node to nothing (node pruned) to break the cycle.
675 # the node to nothing (node pruned) to break the cycle.
666 #
676 #
667 # If no break was encountered we proceed to phase 2.
677 # If no break was encountered we proceed to phase 2.
668 #
678 #
669 # Phase 2 computes successors sets of CURRENT (case 4); see details
679 # Phase 2 computes successors sets of CURRENT (case 4); see details
670 # in phase 2 itself.
680 # in phase 2 itself.
671 #
681 #
672 # Note the two levels of iteration in each phase.
682 # Note the two levels of iteration in each phase.
673 # - The first one handles obsolescence markers using CURRENT as
683 # - The first one handles obsolescence markers using CURRENT as
674 # precursor (successors markers of CURRENT).
684 # precursor (successors markers of CURRENT).
675 #
685 #
676 # Having multiple entry here means divergence.
686 # Having multiple entry here means divergence.
677 #
687 #
678 # - The second one handles successors defined in each marker.
688 # - The second one handles successors defined in each marker.
679 #
689 #
680 # Having none means pruned node, multiple successors means split,
690 # Having none means pruned node, multiple successors means split,
681 # single successors are standard replacement.
691 # single successors are standard replacement.
682 #
692 #
683 for mark in sortedmarkers(succmarkers[current]):
693 for mark in sortedmarkers(succmarkers[current]):
684 for suc in mark[1]:
694 for suc in mark[1]:
685 if suc not in cache:
695 if suc not in cache:
686 if suc in stackedset:
696 if suc in stackedset:
687 # cycle breaking
697 # cycle breaking
688 cache[suc] = []
698 cache[suc] = []
689 else:
699 else:
690 # case (3) If we have not computed successors sets
700 # case (3) If we have not computed successors sets
691 # of one of those successors we add it to the
701 # of one of those successors we add it to the
692 # `toproceed` stack and stop all work for this
702 # `toproceed` stack and stop all work for this
693 # iteration.
703 # iteration.
694 toproceed.append(suc)
704 toproceed.append(suc)
695 stackedset.add(suc)
705 stackedset.add(suc)
696 break
706 break
697 else:
707 else:
698 continue
708 continue
699 break
709 break
700 else:
710 else:
701 # case (4): we know all successors sets of all direct
711 # case (4): we know all successors sets of all direct
702 # successors
712 # successors
703 #
713 #
704 # Successors set contributed by each marker depends on the
714 # Successors set contributed by each marker depends on the
705 # successors sets of all its "successors" node.
715 # successors sets of all its "successors" node.
706 #
716 #
707 # Each different marker is a divergence in the obsolescence
717 # Each different marker is a divergence in the obsolescence
708 # history. It contributes successors sets distinct from other
718 # history. It contributes successors sets distinct from other
709 # markers.
719 # markers.
710 #
720 #
711 # Within a marker, a successor may have divergent successors
721 # Within a marker, a successor may have divergent successors
712 # sets. In such a case, the marker will contribute multiple
722 # sets. In such a case, the marker will contribute multiple
713 # divergent successors sets. If multiple successors have
723 # divergent successors sets. If multiple successors have
714 # divergent successors sets, a Cartesian product is used.
724 # divergent successors sets, a Cartesian product is used.
715 #
725 #
716 # At the end we post-process successors sets to remove
726 # At the end we post-process successors sets to remove
717 # duplicated entry and successors set that are strict subset of
727 # duplicated entry and successors set that are strict subset of
718 # another one.
728 # another one.
719 succssets = []
729 succssets = []
720 for mark in sortedmarkers(succmarkers[current]):
730 for mark in sortedmarkers(succmarkers[current]):
721 # successors sets contributed by this marker
731 # successors sets contributed by this marker
722 base = _succs()
732 base = _succs()
723 base.markers.add(mark)
733 base.markers.add(mark)
724 markss = [base]
734 markss = [base]
725 for suc in mark[1]:
735 for suc in mark[1]:
726 # cardinal product with previous successors
736 # cardinal product with previous successors
727 productresult = []
737 productresult = []
728 for prefix in markss:
738 for prefix in markss:
729 for suffix in cache[suc]:
739 for suffix in cache[suc]:
730 newss = prefix.copy()
740 newss = prefix.copy()
731 newss.markers.update(suffix.markers)
741 newss.markers.update(suffix.markers)
732 for part in suffix:
742 for part in suffix:
733 # do not duplicated entry in successors set
743 # do not duplicated entry in successors set
734 # first entry wins.
744 # first entry wins.
735 if part not in newss:
745 if part not in newss:
736 newss.append(part)
746 newss.append(part)
737 productresult.append(newss)
747 productresult.append(newss)
738 if productresult:
748 if productresult:
739 markss = productresult
749 markss = productresult
740 succssets.extend(markss)
750 succssets.extend(markss)
741 # remove duplicated and subset
751 # remove duplicated and subset
742 seen = []
752 seen = []
743 final = []
753 final = []
744 candidates = sorted(
754 candidates = sorted(
745 (s for s in succssets if s), key=len, reverse=True
755 (s for s in succssets if s), key=len, reverse=True
746 )
756 )
747 for cand in candidates:
757 for cand in candidates:
748 for seensuccs in seen:
758 for seensuccs in seen:
749 if cand.canmerge(seensuccs):
759 if cand.canmerge(seensuccs):
750 seensuccs.markers.update(cand.markers)
760 seensuccs.markers.update(cand.markers)
751 break
761 break
752 else:
762 else:
753 final.append(cand)
763 final.append(cand)
754 seen.append(cand)
764 seen.append(cand)
755 final.reverse() # put small successors set first
765 final.reverse() # put small successors set first
756 cache[current] = final
766 cache[current] = final
757 return cache[initialnode]
767 return cache[initialnode]
758
768
759
769
760 def successorsandmarkers(repo, ctx):
770 def successorsandmarkers(repo, ctx):
761 """compute the raw data needed for computing obsfate
771 """compute the raw data needed for computing obsfate
762 Returns a list of dict, one dict per successors set
772 Returns a list of dict, one dict per successors set
763 """
773 """
764 if not ctx.obsolete():
774 if not ctx.obsolete():
765 return None
775 return None
766
776
767 ssets = successorssets(repo, ctx.node(), closest=True)
777 ssets = successorssets(repo, ctx.node(), closest=True)
768
778
769 # closestsuccessors returns an empty list for pruned revisions, remap it
779 # closestsuccessors returns an empty list for pruned revisions, remap it
770 # into a list containing an empty list for future processing
780 # into a list containing an empty list for future processing
771 if ssets == []:
781 if ssets == []:
772 ssets = [[]]
782 ssets = [[]]
773
783
774 # Try to recover pruned markers
784 # Try to recover pruned markers
775 succsmap = repo.obsstore.successors
785 succsmap = repo.obsstore.successors
776 fullsuccessorsets = [] # successor set + markers
786 fullsuccessorsets = [] # successor set + markers
777 for sset in ssets:
787 for sset in ssets:
778 if sset:
788 if sset:
779 fullsuccessorsets.append(sset)
789 fullsuccessorsets.append(sset)
780 else:
790 else:
781 # successorsset return an empty set() when ctx or one of its
791 # successorsset return an empty set() when ctx or one of its
782 # successors is pruned.
792 # successors is pruned.
783 # In this case, walk the obs-markers tree again starting with ctx
793 # In this case, walk the obs-markers tree again starting with ctx
784 # and find the relevant pruning obs-makers, the ones without
794 # and find the relevant pruning obs-makers, the ones without
785 # successors.
795 # successors.
786 # Having these markers allow us to compute some information about
796 # Having these markers allow us to compute some information about
787 # its fate, like who pruned this changeset and when.
797 # its fate, like who pruned this changeset and when.
788
798
789 # XXX we do not catch all prune markers (eg rewritten then pruned)
799 # XXX we do not catch all prune markers (eg rewritten then pruned)
790 # (fix me later)
800 # (fix me later)
791 foundany = False
801 foundany = False
792 for mark in succsmap.get(ctx.node(), ()):
802 for mark in succsmap.get(ctx.node(), ()):
793 if not mark[1]:
803 if not mark[1]:
794 foundany = True
804 foundany = True
795 sset = _succs()
805 sset = _succs()
796 sset.markers.add(mark)
806 sset.markers.add(mark)
797 fullsuccessorsets.append(sset)
807 fullsuccessorsets.append(sset)
798 if not foundany:
808 if not foundany:
799 fullsuccessorsets.append(_succs())
809 fullsuccessorsets.append(_succs())
800
810
801 values = []
811 values = []
802 for sset in fullsuccessorsets:
812 for sset in fullsuccessorsets:
803 values.append({b'successors': sset, b'markers': sset.markers})
813 values.append({b'successors': sset, b'markers': sset.markers})
804
814
805 return values
815 return values
806
816
807
817
808 def _getobsfate(successorssets):
818 def _getobsfate(successorssets):
809 """ Compute a changeset obsolescence fate based on its successorssets.
819 """ Compute a changeset obsolescence fate based on its successorssets.
810 Successors can be the tipmost ones or the immediate ones. This function
820 Successors can be the tipmost ones or the immediate ones. This function
811 return values are not meant to be shown directly to users, it is meant to
821 return values are not meant to be shown directly to users, it is meant to
812 be used by internal functions only.
822 be used by internal functions only.
813 Returns one fate from the following values:
823 Returns one fate from the following values:
814 - pruned
824 - pruned
815 - diverged
825 - diverged
816 - superseded
826 - superseded
817 - superseded_split
827 - superseded_split
818 """
828 """
819
829
820 if len(successorssets) == 0:
830 if len(successorssets) == 0:
821 # The commit has been pruned
831 # The commit has been pruned
822 return b'pruned'
832 return b'pruned'
823 elif len(successorssets) > 1:
833 elif len(successorssets) > 1:
824 return b'diverged'
834 return b'diverged'
825 else:
835 else:
826 # No divergence, only one set of successors
836 # No divergence, only one set of successors
827 successors = successorssets[0]
837 successors = successorssets[0]
828
838
829 if len(successors) == 1:
839 if len(successors) == 1:
830 return b'superseded'
840 return b'superseded'
831 else:
841 else:
832 return b'superseded_split'
842 return b'superseded_split'
833
843
834
844
835 def obsfateverb(successorset, markers):
845 def obsfateverb(successorset, markers):
836 """ Return the verb summarizing the successorset and potentially using
846 """ Return the verb summarizing the successorset and potentially using
837 information from the markers
847 information from the markers
838 """
848 """
839 if not successorset:
849 if not successorset:
840 verb = b'pruned'
850 verb = b'pruned'
841 elif len(successorset) == 1:
851 elif len(successorset) == 1:
842 verb = b'rewritten'
852 verb = b'rewritten'
843 else:
853 else:
844 verb = b'split'
854 verb = b'split'
845 return verb
855 return verb
846
856
847
857
848 def markersdates(markers):
858 def markersdates(markers):
849 """returns the list of dates for a list of markers
859 """returns the list of dates for a list of markers
850 """
860 """
851 return [m[4] for m in markers]
861 return [m[4] for m in markers]
852
862
853
863
854 def markersusers(markers):
864 def markersusers(markers):
855 """ Returns a sorted list of markers users without duplicates
865 """ Returns a sorted list of markers users without duplicates
856 """
866 """
857 markersmeta = [dict(m[3]) for m in markers]
867 markersmeta = [dict(m[3]) for m in markers]
858 users = {
868 users = {
859 encoding.tolocal(meta[b'user'])
869 encoding.tolocal(meta[b'user'])
860 for meta in markersmeta
870 for meta in markersmeta
861 if meta.get(b'user')
871 if meta.get(b'user')
862 }
872 }
863
873
864 return sorted(users)
874 return sorted(users)
865
875
866
876
867 def markersoperations(markers):
877 def markersoperations(markers):
868 """ Returns a sorted list of markers operations without duplicates
878 """ Returns a sorted list of markers operations without duplicates
869 """
879 """
870 markersmeta = [dict(m[3]) for m in markers]
880 markersmeta = [dict(m[3]) for m in markers]
871 operations = {
881 operations = {
872 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
882 meta.get(b'operation') for meta in markersmeta if meta.get(b'operation')
873 }
883 }
874
884
875 return sorted(operations)
885 return sorted(operations)
876
886
877
887
878 def obsfateprinter(ui, repo, successors, markers, formatctx):
888 def obsfateprinter(ui, repo, successors, markers, formatctx):
879 """ Build a obsfate string for a single successorset using all obsfate
889 """ Build a obsfate string for a single successorset using all obsfate
880 related function defined in obsutil
890 related function defined in obsutil
881 """
891 """
882 quiet = ui.quiet
892 quiet = ui.quiet
883 verbose = ui.verbose
893 verbose = ui.verbose
884 normal = not verbose and not quiet
894 normal = not verbose and not quiet
885
895
886 line = []
896 line = []
887
897
888 # Verb
898 # Verb
889 line.append(obsfateverb(successors, markers))
899 line.append(obsfateverb(successors, markers))
890
900
891 # Operations
901 # Operations
892 operations = markersoperations(markers)
902 operations = markersoperations(markers)
893 if operations:
903 if operations:
894 line.append(b" using %s" % b", ".join(operations))
904 line.append(b" using %s" % b", ".join(operations))
895
905
896 # Successors
906 # Successors
897 if successors:
907 if successors:
898 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
908 fmtsuccessors = [formatctx(repo[succ]) for succ in successors]
899 line.append(b" as %s" % b", ".join(fmtsuccessors))
909 line.append(b" as %s" % b", ".join(fmtsuccessors))
900
910
901 # Users
911 # Users
902 users = markersusers(markers)
912 users = markersusers(markers)
903 # Filter out current user in not verbose mode to reduce amount of
913 # Filter out current user in not verbose mode to reduce amount of
904 # information
914 # information
905 if not verbose:
915 if not verbose:
906 currentuser = ui.username(acceptempty=True)
916 currentuser = ui.username(acceptempty=True)
907 if len(users) == 1 and currentuser in users:
917 if len(users) == 1 and currentuser in users:
908 users = None
918 users = None
909
919
910 if (verbose or normal) and users:
920 if (verbose or normal) and users:
911 line.append(b" by %s" % b", ".join(users))
921 line.append(b" by %s" % b", ".join(users))
912
922
913 # Date
923 # Date
914 dates = markersdates(markers)
924 dates = markersdates(markers)
915
925
916 if dates and verbose:
926 if dates and verbose:
917 min_date = min(dates)
927 min_date = min(dates)
918 max_date = max(dates)
928 max_date = max(dates)
919
929
920 if min_date == max_date:
930 if min_date == max_date:
921 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
931 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
922 line.append(b" (at %s)" % fmtmin_date)
932 line.append(b" (at %s)" % fmtmin_date)
923 else:
933 else:
924 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
934 fmtmin_date = dateutil.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
925 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
935 fmtmax_date = dateutil.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
926 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
936 line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
927
937
928 return b"".join(line)
938 return b"".join(line)
929
939
930
940
931 filteredmsgtable = {
941 filteredmsgtable = {
932 b"pruned": _(b"hidden revision '%s' is pruned"),
942 b"pruned": _(b"hidden revision '%s' is pruned"),
933 b"diverged": _(b"hidden revision '%s' has diverged"),
943 b"diverged": _(b"hidden revision '%s' has diverged"),
934 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
944 b"superseded": _(b"hidden revision '%s' was rewritten as: %s"),
935 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
945 b"superseded_split": _(b"hidden revision '%s' was split as: %s"),
936 b"superseded_split_several": _(
946 b"superseded_split_several": _(
937 b"hidden revision '%s' was split as: %s and %d more"
947 b"hidden revision '%s' was split as: %s and %d more"
938 ),
948 ),
939 }
949 }
940
950
941
951
942 def _getfilteredreason(repo, changeid, ctx):
952 def _getfilteredreason(repo, changeid, ctx):
943 """return a human-friendly string on why a obsolete changeset is hidden
953 """return a human-friendly string on why a obsolete changeset is hidden
944 """
954 """
945 successors = successorssets(repo, ctx.node())
955 successors = successorssets(repo, ctx.node())
946 fate = _getobsfate(successors)
956 fate = _getobsfate(successors)
947
957
948 # Be more precise in case the revision is superseded
958 # Be more precise in case the revision is superseded
949 if fate == b'pruned':
959 if fate == b'pruned':
950 return filteredmsgtable[b'pruned'] % changeid
960 return filteredmsgtable[b'pruned'] % changeid
951 elif fate == b'diverged':
961 elif fate == b'diverged':
952 return filteredmsgtable[b'diverged'] % changeid
962 return filteredmsgtable[b'diverged'] % changeid
953 elif fate == b'superseded':
963 elif fate == b'superseded':
954 single_successor = nodemod.short(successors[0][0])
964 single_successor = nodemod.short(successors[0][0])
955 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
965 return filteredmsgtable[b'superseded'] % (changeid, single_successor)
956 elif fate == b'superseded_split':
966 elif fate == b'superseded_split':
957
967
958 succs = []
968 succs = []
959 for node_id in successors[0]:
969 for node_id in successors[0]:
960 succs.append(nodemod.short(node_id))
970 succs.append(nodemod.short(node_id))
961
971
962 if len(succs) <= 2:
972 if len(succs) <= 2:
963 fmtsuccs = b', '.join(succs)
973 fmtsuccs = b', '.join(succs)
964 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
974 return filteredmsgtable[b'superseded_split'] % (changeid, fmtsuccs)
965 else:
975 else:
966 firstsuccessors = b', '.join(succs[:2])
976 firstsuccessors = b', '.join(succs[:2])
967 remainingnumber = len(succs) - 2
977 remainingnumber = len(succs) - 2
968
978
969 args = (changeid, firstsuccessors, remainingnumber)
979 args = (changeid, firstsuccessors, remainingnumber)
970 return filteredmsgtable[b'superseded_split_several'] % args
980 return filteredmsgtable[b'superseded_split_several'] % args
971
981
972
982
973 def divergentsets(repo, ctx):
983 def divergentsets(repo, ctx):
974 """Compute sets of commits divergent with a given one"""
984 """Compute sets of commits divergent with a given one"""
975 cache = {}
985 cache = {}
976 base = {}
986 base = {}
977 for n in allpredecessors(repo.obsstore, [ctx.node()]):
987 for n in allpredecessors(repo.obsstore, [ctx.node()]):
978 if n == ctx.node():
988 if n == ctx.node():
979 # a node can't be a base for divergence with itself
989 # a node can't be a base for divergence with itself
980 continue
990 continue
981 nsuccsets = successorssets(repo, n, cache)
991 nsuccsets = successorssets(repo, n, cache)
982 for nsuccset in nsuccsets:
992 for nsuccset in nsuccsets:
983 if ctx.node() in nsuccset:
993 if ctx.node() in nsuccset:
984 # we are only interested in *other* successor sets
994 # we are only interested in *other* successor sets
985 continue
995 continue
986 if tuple(nsuccset) in base:
996 if tuple(nsuccset) in base:
987 # we already know the latest base for this divergency
997 # we already know the latest base for this divergency
988 continue
998 continue
989 base[tuple(nsuccset)] = n
999 base[tuple(nsuccset)] = n
990 return [
1000 return [
991 {b'divergentnodes': divset, b'commonpredecessor': b}
1001 {b'divergentnodes': divset, b'commonpredecessor': b}
992 for divset, b in pycompat.iteritems(base)
1002 for divset, b in pycompat.iteritems(base)
993 ]
1003 ]
994
1004
995
1005
996 def whyunstable(repo, ctx):
1006 def whyunstable(repo, ctx):
997 result = []
1007 result = []
998 if ctx.orphan():
1008 if ctx.orphan():
999 for parent in ctx.parents():
1009 for parent in ctx.parents():
1000 kind = None
1010 kind = None
1001 if parent.orphan():
1011 if parent.orphan():
1002 kind = b'orphan'
1012 kind = b'orphan'
1003 elif parent.obsolete():
1013 elif parent.obsolete():
1004 kind = b'obsolete'
1014 kind = b'obsolete'
1005 if kind is not None:
1015 if kind is not None:
1006 result.append(
1016 result.append(
1007 {
1017 {
1008 b'instability': b'orphan',
1018 b'instability': b'orphan',
1009 b'reason': b'%s parent' % kind,
1019 b'reason': b'%s parent' % kind,
1010 b'node': parent.hex(),
1020 b'node': parent.hex(),
1011 }
1021 }
1012 )
1022 )
1013 if ctx.phasedivergent():
1023 if ctx.phasedivergent():
1014 predecessors = allpredecessors(
1024 predecessors = allpredecessors(
1015 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1025 repo.obsstore, [ctx.node()], ignoreflags=bumpedfix
1016 )
1026 )
1017 immutable = [
1027 immutable = [
1018 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1028 repo[p] for p in predecessors if p in repo and not repo[p].mutable()
1019 ]
1029 ]
1020 for predecessor in immutable:
1030 for predecessor in immutable:
1021 result.append(
1031 result.append(
1022 {
1032 {
1023 b'instability': b'phase-divergent',
1033 b'instability': b'phase-divergent',
1024 b'reason': b'immutable predecessor',
1034 b'reason': b'immutable predecessor',
1025 b'node': predecessor.hex(),
1035 b'node': predecessor.hex(),
1026 }
1036 }
1027 )
1037 )
1028 if ctx.contentdivergent():
1038 if ctx.contentdivergent():
1029 dsets = divergentsets(repo, ctx)
1039 dsets = divergentsets(repo, ctx)
1030 for dset in dsets:
1040 for dset in dsets:
1031 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1041 divnodes = [repo[n] for n in dset[b'divergentnodes']]
1032 result.append(
1042 result.append(
1033 {
1043 {
1034 b'instability': b'content-divergent',
1044 b'instability': b'content-divergent',
1035 b'divergentnodes': divnodes,
1045 b'divergentnodes': divnodes,
1036 b'reason': b'predecessor',
1046 b'reason': b'predecessor',
1037 b'node': nodemod.hex(dset[b'commonpredecessor']),
1047 b'node': nodemod.hex(dset[b'commonpredecessor']),
1038 }
1048 }
1039 )
1049 )
1040 return result
1050 return result
@@ -1,84 +1,114 b''
1 $ cat <<EOF >> $HGRCPATH
1 $ cat <<EOF >> $HGRCPATH
2 > [experimental]
2 > [experimental]
3 > evolution = true
3 > evolution = true
4 >
4 >
5 > [extensions]
5 > [extensions]
6 > notify =
6 > notify =
7 > hooklib =
7 > hooklib =
8 >
8 >
9 > [phases]
9 > [phases]
10 > publish = False
10 > publish = False
11 >
11 >
12 > [notify]
12 > [notify]
13 > sources = pull
13 > sources = pull
14 > diffstat = False
14 > diffstat = False
15 > messageidseed = example
15 > messageidseed = example
16 > domain = example.com
16 > domain = example.com
17 >
17 >
18 > [reposubs]
18 > [reposubs]
19 > * = baz
19 > * = baz
20 > EOF
20 > EOF
21 $ hg init a
21 $ hg init a
22 $ hg --cwd a debugbuilddag +2
22 $ hg --cwd a debugbuilddag +2
23 $ hg init b
23 $ hg init b
24 $ cat <<EOF >> b/.hg/hgrc
24 $ cat <<EOF >> b/.hg/hgrc
25 > [hooks]
25 > [hooks]
26 > incoming.notify = python:hgext.notify.hook
26 > incoming.notify = python:hgext.notify.hook
27 > pretxnclose.changeset_obsoleted = python:hgext.hooklib.changeset_obsoleted.hook
27 > txnclose.changeset_obsoleted = python:hgext.hooklib.changeset_obsoleted.hook
28 > EOF
28 > EOF
29 $ hg --cwd b pull ../a | "$PYTHON" $TESTDIR/unwrap-message-id.py
29 $ hg --cwd b pull ../a | "$PYTHON" $TESTDIR/unwrap-message-id.py
30 pulling from ../a
30 pulling from ../a
31 requesting all changes
31 requesting all changes
32 adding changesets
32 adding changesets
33 adding manifests
33 adding manifests
34 adding file changes
34 adding file changes
35 added 2 changesets with 0 changes to 0 files
35 added 2 changesets with 0 changes to 0 files
36 new changesets 1ea73414a91b:66f7d451a68b (2 drafts)
36 new changesets 1ea73414a91b:66f7d451a68b (2 drafts)
37 MIME-Version: 1.0
37 MIME-Version: 1.0
38 Content-Type: text/plain; charset="us-ascii"
38 Content-Type: text/plain; charset="us-ascii"
39 Content-Transfer-Encoding: 7bit
39 Content-Transfer-Encoding: 7bit
40 Date: * (glob)
40 Date: * (glob)
41 Subject: changeset in * (glob)
41 Subject: changeset in * (glob)
42 From: debugbuilddag@example.com
42 From: debugbuilddag@example.com
43 X-Hg-Notification: changeset 1ea73414a91b
43 X-Hg-Notification: changeset 1ea73414a91b
44 Message-Id: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
44 Message-Id: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
45 To: baz@example.com
45 To: baz@example.com
46
46
47 changeset 1ea73414a91b in $TESTTMP/b
47 changeset 1ea73414a91b in $TESTTMP/b
48 details: $TESTTMP/b?cmd=changeset;node=1ea73414a91b
48 details: $TESTTMP/b?cmd=changeset;node=1ea73414a91b
49 description:
49 description:
50 r0
50 r0
51 MIME-Version: 1.0
51 MIME-Version: 1.0
52 Content-Type: text/plain; charset="us-ascii"
52 Content-Type: text/plain; charset="us-ascii"
53 Content-Transfer-Encoding: 7bit
53 Content-Transfer-Encoding: 7bit
54 Date: * (glob)
54 Date: * (glob)
55 Subject: changeset in * (glob)
55 Subject: changeset in * (glob)
56 From: debugbuilddag@example.com
56 From: debugbuilddag@example.com
57 X-Hg-Notification: changeset 66f7d451a68b
57 X-Hg-Notification: changeset 66f7d451a68b
58 Message-Id: <hg.364d03da7dc13829eb779a805be7e37f54f572e9afcea7d2626856a794d3e8f3@example.com>
58 Message-Id: <hg.364d03da7dc13829eb779a805be7e37f54f572e9afcea7d2626856a794d3e8f3@example.com>
59 To: baz@example.com
59 To: baz@example.com
60
60
61 changeset 66f7d451a68b in $TESTTMP/b
61 changeset 66f7d451a68b in $TESTTMP/b
62 details: $TESTTMP/b?cmd=changeset;node=66f7d451a68b
62 details: $TESTTMP/b?cmd=changeset;node=66f7d451a68b
63 description:
63 description:
64 r1
64 r1
65 (run 'hg update' to get a working copy)
65 (run 'hg update' to get a working copy)
66 $ hg --cwd a debugobsolete 1ea73414a91b0920940797d8fc6a11e447f8ea1e
66 $ hg --cwd a debugobsolete 1ea73414a91b0920940797d8fc6a11e447f8ea1e
67 1 new obsolescence markers
67 1 new obsolescence markers
68 obsoleted 1 changesets
68 obsoleted 1 changesets
69 1 new orphan changesets
69 1 new orphan changesets
70 $ hg --cwd a push ../b --hidden | "$PYTHON" $TESTDIR/unwrap-message-id.py
70 $ hg --cwd a push ../b --hidden | "$PYTHON" $TESTDIR/unwrap-message-id.py
71 1 new orphan changesets
71 1 new orphan changesets
72 pushing to ../b
72 pushing to ../b
73 searching for changes
73 searching for changes
74 no changes found
74 no changes found
75 1 new obsolescence markers
76 obsoleted 1 changesets
75 Subject: changeset abandoned
77 Subject: changeset abandoned
76 In-reply-to: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
78 In-reply-to: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
77 Message-Id: <hg.d6329e9481594f0f3c8a84362b3511318bfbce50748ab1123f909eb6fbcab018@example.com>
79 Message-Id: <hg.d6329e9481594f0f3c8a84362b3511318bfbce50748ab1123f909eb6fbcab018@example.com>
78 Date: * (glob)
80 Date: * (glob)
79 From: test@example.com
81 From: test@example.com
80 To: baz@example.com
82 To: baz@example.com
81
83
82 This changeset has been abandoned.
84 This changeset has been abandoned.
85
86 Check that known changesets with known successors do not result in a mail.
87
88 $ hg init c
89 $ hg init d
90 $ cat <<EOF >> d/.hg/hgrc
91 > [hooks]
92 > incoming.notify = python:hgext.notify.hook
93 > txnclose.changeset_obsoleted = python:hgext.hooklib.changeset_obsoleted.hook
94 > EOF
95 $ hg --cwd c debugbuilddag '.:parent.*parent'
96 $ hg --cwd c push ../d -r 1
97 pushing to ../d
98 searching for changes
99 adding changesets
100 adding manifests
101 adding file changes
102 added 2 changesets with 0 changes to 0 files
103 $ hg --cwd c debugobsolete $(hg --cwd c log -T '{node}' -r 1) $(hg --cwd c log -T '{node}' -r 2)
83 1 new obsolescence markers
104 1 new obsolescence markers
84 obsoleted 1 changesets
105 obsoleted 1 changesets
106 $ hg --cwd c push ../d | "$PYTHON" $TESTDIR/unwrap-message-id.py
107 pushing to ../d
108 searching for changes
109 adding changesets
110 adding manifests
111 adding file changes
112 added 1 changesets with 0 changes to 0 files (+1 heads)
113 1 new obsolescence markers
114 obsoleted 1 changesets
General Comments 0
You need to be logged in to leave comments. Login now