##// END OF EJS Templates
obsutil: rename allprecursors into allpredecessors...
Boris Feld -
r33701:e6d8ee3c default
parent child Browse files
Show More
@@ -1,632 +1,632 b''
1 # phabricator.py - simple Phabricator integration
1 # phabricator.py - simple Phabricator integration
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """simple Phabricator integration
7 """simple Phabricator integration
8
8
9 This extension provides a ``phabsend`` command which sends a stack of
9 This extension provides a ``phabsend`` command which sends a stack of
10 changesets to Phabricator without amending commit messages, and a ``phabread``
10 changesets to Phabricator without amending commit messages, and a ``phabread``
11 command which prints a stack of revisions in a format suitable
11 command which prints a stack of revisions in a format suitable
12 for :hg:`import`.
12 for :hg:`import`.
13
13
14 By default, Phabricator requires ``Test Plan`` which might prevent some
14 By default, Phabricator requires ``Test Plan`` which might prevent some
15 changeset from being sent. The requirement could be disabled by changing
15 changeset from being sent. The requirement could be disabled by changing
16 ``differential.require-test-plan-field`` config server side.
16 ``differential.require-test-plan-field`` config server side.
17
17
18 Config::
18 Config::
19
19
20 [phabricator]
20 [phabricator]
21 # Phabricator URL
21 # Phabricator URL
22 url = https://phab.example.com/
22 url = https://phab.example.com/
23
23
24 # API token. Get it from https://$HOST/conduit/login/
24 # API token. Get it from https://$HOST/conduit/login/
25 token = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
25 token = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
26
26
27 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
27 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
28 # callsign is "FOO".
28 # callsign is "FOO".
29 callsign = FOO
29 callsign = FOO
30
30
31 """
31 """
32
32
33 from __future__ import absolute_import
33 from __future__ import absolute_import
34
34
35 import json
35 import json
36 import re
36 import re
37
37
38 from mercurial.node import bin, nullid
38 from mercurial.node import bin, nullid
39 from mercurial.i18n import _
39 from mercurial.i18n import _
40 from mercurial import (
40 from mercurial import (
41 encoding,
41 encoding,
42 error,
42 error,
43 mdiff,
43 mdiff,
44 obsolete,
44 obsutil,
45 patch,
45 patch,
46 registrar,
46 registrar,
47 scmutil,
47 scmutil,
48 tags,
48 tags,
49 url as urlmod,
49 url as urlmod,
50 util,
50 util,
51 )
51 )
52
52
53 cmdtable = {}
53 cmdtable = {}
54 command = registrar.command(cmdtable)
54 command = registrar.command(cmdtable)
55
55
56 def urlencodenested(params):
56 def urlencodenested(params):
57 """like urlencode, but works with nested parameters.
57 """like urlencode, but works with nested parameters.
58
58
59 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
59 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
60 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
60 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
61 urlencode. Note: the encoding is consistent with PHP's http_build_query.
61 urlencode. Note: the encoding is consistent with PHP's http_build_query.
62 """
62 """
63 flatparams = util.sortdict()
63 flatparams = util.sortdict()
64 def process(prefix, obj):
64 def process(prefix, obj):
65 items = {list: enumerate, dict: lambda x: x.items()}.get(type(obj))
65 items = {list: enumerate, dict: lambda x: x.items()}.get(type(obj))
66 if items is None:
66 if items is None:
67 flatparams[prefix] = obj
67 flatparams[prefix] = obj
68 else:
68 else:
69 for k, v in items(obj):
69 for k, v in items(obj):
70 if prefix:
70 if prefix:
71 process('%s[%s]' % (prefix, k), v)
71 process('%s[%s]' % (prefix, k), v)
72 else:
72 else:
73 process(k, v)
73 process(k, v)
74 process('', params)
74 process('', params)
75 return util.urlreq.urlencode(flatparams)
75 return util.urlreq.urlencode(flatparams)
76
76
77 def readurltoken(repo):
77 def readurltoken(repo):
78 """return conduit url, token and make sure they exist
78 """return conduit url, token and make sure they exist
79
79
80 Currently read from [phabricator] config section. In the future, it might
80 Currently read from [phabricator] config section. In the future, it might
81 make sense to read from .arcconfig and .arcrc as well.
81 make sense to read from .arcconfig and .arcrc as well.
82 """
82 """
83 values = []
83 values = []
84 section = 'phabricator'
84 section = 'phabricator'
85 for name in ['url', 'token']:
85 for name in ['url', 'token']:
86 value = repo.ui.config(section, name)
86 value = repo.ui.config(section, name)
87 if not value:
87 if not value:
88 raise error.Abort(_('config %s.%s is required') % (section, name))
88 raise error.Abort(_('config %s.%s is required') % (section, name))
89 values.append(value)
89 values.append(value)
90 return values
90 return values
91
91
92 def callconduit(repo, name, params):
92 def callconduit(repo, name, params):
93 """call Conduit API, params is a dict. return json.loads result, or None"""
93 """call Conduit API, params is a dict. return json.loads result, or None"""
94 host, token = readurltoken(repo)
94 host, token = readurltoken(repo)
95 url, authinfo = util.url('/'.join([host, 'api', name])).authinfo()
95 url, authinfo = util.url('/'.join([host, 'api', name])).authinfo()
96 urlopener = urlmod.opener(repo.ui, authinfo)
96 urlopener = urlmod.opener(repo.ui, authinfo)
97 repo.ui.debug('Conduit Call: %s %s\n' % (url, params))
97 repo.ui.debug('Conduit Call: %s %s\n' % (url, params))
98 params = params.copy()
98 params = params.copy()
99 params['api.token'] = token
99 params['api.token'] = token
100 request = util.urlreq.request(url, data=urlencodenested(params))
100 request = util.urlreq.request(url, data=urlencodenested(params))
101 body = urlopener.open(request).read()
101 body = urlopener.open(request).read()
102 repo.ui.debug('Conduit Response: %s\n' % body)
102 repo.ui.debug('Conduit Response: %s\n' % body)
103 parsed = json.loads(body)
103 parsed = json.loads(body)
104 if parsed.get(r'error_code'):
104 if parsed.get(r'error_code'):
105 msg = (_('Conduit Error (%s): %s')
105 msg = (_('Conduit Error (%s): %s')
106 % (parsed[r'error_code'], parsed[r'error_info']))
106 % (parsed[r'error_code'], parsed[r'error_info']))
107 raise error.Abort(msg)
107 raise error.Abort(msg)
108 return parsed[r'result']
108 return parsed[r'result']
109
109
110 @command('debugcallconduit', [], _('METHOD'))
110 @command('debugcallconduit', [], _('METHOD'))
111 def debugcallconduit(ui, repo, name):
111 def debugcallconduit(ui, repo, name):
112 """call Conduit API
112 """call Conduit API
113
113
114 Call parameters are read from stdin as a JSON blob. Result will be written
114 Call parameters are read from stdin as a JSON blob. Result will be written
115 to stdout as a JSON blob.
115 to stdout as a JSON blob.
116 """
116 """
117 params = json.loads(ui.fin.read())
117 params = json.loads(ui.fin.read())
118 result = callconduit(repo, name, params)
118 result = callconduit(repo, name, params)
119 s = json.dumps(result, sort_keys=True, indent=2, separators=(',', ': '))
119 s = json.dumps(result, sort_keys=True, indent=2, separators=(',', ': '))
120 ui.write('%s\n' % s)
120 ui.write('%s\n' % s)
121
121
122 def getrepophid(repo):
122 def getrepophid(repo):
123 """given callsign, return repository PHID or None"""
123 """given callsign, return repository PHID or None"""
124 # developer config: phabricator.repophid
124 # developer config: phabricator.repophid
125 repophid = repo.ui.config('phabricator', 'repophid')
125 repophid = repo.ui.config('phabricator', 'repophid')
126 if repophid:
126 if repophid:
127 return repophid
127 return repophid
128 callsign = repo.ui.config('phabricator', 'callsign')
128 callsign = repo.ui.config('phabricator', 'callsign')
129 if not callsign:
129 if not callsign:
130 return None
130 return None
131 query = callconduit(repo, 'diffusion.repository.search',
131 query = callconduit(repo, 'diffusion.repository.search',
132 {'constraints': {'callsigns': [callsign]}})
132 {'constraints': {'callsigns': [callsign]}})
133 if len(query[r'data']) == 0:
133 if len(query[r'data']) == 0:
134 return None
134 return None
135 repophid = encoding.strtolocal(query[r'data'][0][r'phid'])
135 repophid = encoding.strtolocal(query[r'data'][0][r'phid'])
136 repo.ui.setconfig('phabricator', 'repophid', repophid)
136 repo.ui.setconfig('phabricator', 'repophid', repophid)
137 return repophid
137 return repophid
138
138
139 _differentialrevisiontagre = re.compile('\AD([1-9][0-9]*)\Z')
139 _differentialrevisiontagre = re.compile('\AD([1-9][0-9]*)\Z')
140 _differentialrevisiondescre = re.compile(
140 _differentialrevisiondescre = re.compile(
141 '^Differential Revision:\s*(?:.*)D([1-9][0-9]*)$', re.M)
141 '^Differential Revision:\s*(?:.*)D([1-9][0-9]*)$', re.M)
142
142
143 def getoldnodedrevmap(repo, nodelist):
143 def getoldnodedrevmap(repo, nodelist):
144 """find previous nodes that has been sent to Phabricator
144 """find previous nodes that has been sent to Phabricator
145
145
146 return {node: (oldnode, Differential diff, Differential Revision ID)}
146 return {node: (oldnode, Differential diff, Differential Revision ID)}
147 for node in nodelist with known previous sent versions, or associated
147 for node in nodelist with known previous sent versions, or associated
148 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
148 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
149 be ``None``.
149 be ``None``.
150
150
151 Examines commit messages like "Differential Revision:" to get the
151 Examines commit messages like "Differential Revision:" to get the
152 association information.
152 association information.
153
153
154 If such commit message line is not found, examines all precursors and their
154 If such commit message line is not found, examines all precursors and their
155 tags. Tags with format like "D1234" are considered a match and the node
155 tags. Tags with format like "D1234" are considered a match and the node
156 with that tag, and the number after "D" (ex. 1234) will be returned.
156 with that tag, and the number after "D" (ex. 1234) will be returned.
157
157
158 The ``old node``, if not None, is guaranteed to be the last diff of
158 The ``old node``, if not None, is guaranteed to be the last diff of
159 corresponding Differential Revision, and exist in the repo.
159 corresponding Differential Revision, and exist in the repo.
160 """
160 """
161 url, token = readurltoken(repo)
161 url, token = readurltoken(repo)
162 unfi = repo.unfiltered()
162 unfi = repo.unfiltered()
163 nodemap = unfi.changelog.nodemap
163 nodemap = unfi.changelog.nodemap
164
164
165 result = {} # {node: (oldnode?, lastdiff?, drev)}
165 result = {} # {node: (oldnode?, lastdiff?, drev)}
166 toconfirm = {} # {node: (force, {precnode}, drev)}
166 toconfirm = {} # {node: (force, {precnode}, drev)}
167 for node in nodelist:
167 for node in nodelist:
168 ctx = unfi[node]
168 ctx = unfi[node]
169 # For tags like "D123", put them into "toconfirm" to verify later
169 # For tags like "D123", put them into "toconfirm" to verify later
170 precnodes = list(obsolete.allprecursors(unfi.obsstore, [node]))
170 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
171 for n in precnodes:
171 for n in precnodes:
172 if n in nodemap:
172 if n in nodemap:
173 for tag in unfi.nodetags(n):
173 for tag in unfi.nodetags(n):
174 m = _differentialrevisiontagre.match(tag)
174 m = _differentialrevisiontagre.match(tag)
175 if m:
175 if m:
176 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
176 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
177 continue
177 continue
178
178
179 # Check commit message
179 # Check commit message
180 m = _differentialrevisiondescre.search(ctx.description())
180 m = _differentialrevisiondescre.search(ctx.description())
181 if m:
181 if m:
182 toconfirm[node] = (1, set(precnodes), int(m.group(1)))
182 toconfirm[node] = (1, set(precnodes), int(m.group(1)))
183
183
184 # Double check if tags are genuine by collecting all old nodes from
184 # Double check if tags are genuine by collecting all old nodes from
185 # Phabricator, and expect precursors overlap with it.
185 # Phabricator, and expect precursors overlap with it.
186 if toconfirm:
186 if toconfirm:
187 drevs = [drev for force, precs, drev in toconfirm.values()]
187 drevs = [drev for force, precs, drev in toconfirm.values()]
188 alldiffs = callconduit(unfi, 'differential.querydiffs',
188 alldiffs = callconduit(unfi, 'differential.querydiffs',
189 {'revisionIDs': drevs})
189 {'revisionIDs': drevs})
190 getnode = lambda d: bin(encoding.unitolocal(
190 getnode = lambda d: bin(encoding.unitolocal(
191 getdiffmeta(d).get(r'node', ''))) or None
191 getdiffmeta(d).get(r'node', ''))) or None
192 for newnode, (force, precset, drev) in toconfirm.items():
192 for newnode, (force, precset, drev) in toconfirm.items():
193 diffs = [d for d in alldiffs.values()
193 diffs = [d for d in alldiffs.values()
194 if int(d[r'revisionID']) == drev]
194 if int(d[r'revisionID']) == drev]
195
195
196 # "precursors" as known by Phabricator
196 # "precursors" as known by Phabricator
197 phprecset = set(getnode(d) for d in diffs)
197 phprecset = set(getnode(d) for d in diffs)
198
198
199 # Ignore if precursors (Phabricator and local repo) do not overlap,
199 # Ignore if precursors (Phabricator and local repo) do not overlap,
200 # and force is not set (when commit message says nothing)
200 # and force is not set (when commit message says nothing)
201 if not force and not bool(phprecset & precset):
201 if not force and not bool(phprecset & precset):
202 tagname = 'D%d' % drev
202 tagname = 'D%d' % drev
203 tags.tag(repo, tagname, nullid, message=None, user=None,
203 tags.tag(repo, tagname, nullid, message=None, user=None,
204 date=None, local=True)
204 date=None, local=True)
205 unfi.ui.warn(_('D%s: local tag removed - does not match '
205 unfi.ui.warn(_('D%s: local tag removed - does not match '
206 'Differential history\n') % drev)
206 'Differential history\n') % drev)
207 continue
207 continue
208
208
209 # Find the last node using Phabricator metadata, and make sure it
209 # Find the last node using Phabricator metadata, and make sure it
210 # exists in the repo
210 # exists in the repo
211 oldnode = lastdiff = None
211 oldnode = lastdiff = None
212 if diffs:
212 if diffs:
213 lastdiff = max(diffs, key=lambda d: int(d[r'id']))
213 lastdiff = max(diffs, key=lambda d: int(d[r'id']))
214 oldnode = getnode(lastdiff)
214 oldnode = getnode(lastdiff)
215 if oldnode and oldnode not in nodemap:
215 if oldnode and oldnode not in nodemap:
216 oldnode = None
216 oldnode = None
217
217
218 result[newnode] = (oldnode, lastdiff, drev)
218 result[newnode] = (oldnode, lastdiff, drev)
219
219
220 return result
220 return result
221
221
222 def getdiff(ctx, diffopts):
222 def getdiff(ctx, diffopts):
223 """plain-text diff without header (user, commit message, etc)"""
223 """plain-text diff without header (user, commit message, etc)"""
224 output = util.stringio()
224 output = util.stringio()
225 for chunk, _label in patch.diffui(ctx.repo(), ctx.p1().node(), ctx.node(),
225 for chunk, _label in patch.diffui(ctx.repo(), ctx.p1().node(), ctx.node(),
226 None, opts=diffopts):
226 None, opts=diffopts):
227 output.write(chunk)
227 output.write(chunk)
228 return output.getvalue()
228 return output.getvalue()
229
229
230 def creatediff(ctx):
230 def creatediff(ctx):
231 """create a Differential Diff"""
231 """create a Differential Diff"""
232 repo = ctx.repo()
232 repo = ctx.repo()
233 repophid = getrepophid(repo)
233 repophid = getrepophid(repo)
234 # Create a "Differential Diff" via "differential.createrawdiff" API
234 # Create a "Differential Diff" via "differential.createrawdiff" API
235 params = {'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
235 params = {'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
236 if repophid:
236 if repophid:
237 params['repositoryPHID'] = repophid
237 params['repositoryPHID'] = repophid
238 diff = callconduit(repo, 'differential.createrawdiff', params)
238 diff = callconduit(repo, 'differential.createrawdiff', params)
239 if not diff:
239 if not diff:
240 raise error.Abort(_('cannot create diff for %s') % ctx)
240 raise error.Abort(_('cannot create diff for %s') % ctx)
241 return diff
241 return diff
242
242
243 def writediffproperties(ctx, diff):
243 def writediffproperties(ctx, diff):
244 """write metadata to diff so patches could be applied losslessly"""
244 """write metadata to diff so patches could be applied losslessly"""
245 params = {
245 params = {
246 'diff_id': diff[r'id'],
246 'diff_id': diff[r'id'],
247 'name': 'hg:meta',
247 'name': 'hg:meta',
248 'data': json.dumps({
248 'data': json.dumps({
249 'user': ctx.user(),
249 'user': ctx.user(),
250 'date': '%d %d' % ctx.date(),
250 'date': '%d %d' % ctx.date(),
251 'node': ctx.hex(),
251 'node': ctx.hex(),
252 'parent': ctx.p1().hex(),
252 'parent': ctx.p1().hex(),
253 }),
253 }),
254 }
254 }
255 callconduit(ctx.repo(), 'differential.setdiffproperty', params)
255 callconduit(ctx.repo(), 'differential.setdiffproperty', params)
256
256
257 def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None,
257 def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None,
258 olddiff=None, actions=None):
258 olddiff=None, actions=None):
259 """create or update a Differential Revision
259 """create or update a Differential Revision
260
260
261 If revid is None, create a new Differential Revision, otherwise update
261 If revid is None, create a new Differential Revision, otherwise update
262 revid. If parentrevid is not None, set it as a dependency.
262 revid. If parentrevid is not None, set it as a dependency.
263
263
264 If oldnode is not None, check if the patch content (without commit message
264 If oldnode is not None, check if the patch content (without commit message
265 and metadata) has changed before creating another diff.
265 and metadata) has changed before creating another diff.
266
266
267 If actions is not None, they will be appended to the transaction.
267 If actions is not None, they will be appended to the transaction.
268 """
268 """
269 repo = ctx.repo()
269 repo = ctx.repo()
270 if oldnode:
270 if oldnode:
271 diffopts = mdiff.diffopts(git=True, context=1)
271 diffopts = mdiff.diffopts(git=True, context=1)
272 oldctx = repo.unfiltered()[oldnode]
272 oldctx = repo.unfiltered()[oldnode]
273 neednewdiff = (getdiff(ctx, diffopts) != getdiff(oldctx, diffopts))
273 neednewdiff = (getdiff(ctx, diffopts) != getdiff(oldctx, diffopts))
274 else:
274 else:
275 neednewdiff = True
275 neednewdiff = True
276
276
277 transactions = []
277 transactions = []
278 if neednewdiff:
278 if neednewdiff:
279 diff = creatediff(ctx)
279 diff = creatediff(ctx)
280 writediffproperties(ctx, diff)
280 writediffproperties(ctx, diff)
281 transactions.append({'type': 'update', 'value': diff[r'phid']})
281 transactions.append({'type': 'update', 'value': diff[r'phid']})
282 else:
282 else:
283 # Even if we don't need to upload a new diff because the patch content
283 # Even if we don't need to upload a new diff because the patch content
284 # does not change. We might still need to update its metadata so
284 # does not change. We might still need to update its metadata so
285 # pushers could know the correct node metadata.
285 # pushers could know the correct node metadata.
286 assert olddiff
286 assert olddiff
287 diff = olddiff
287 diff = olddiff
288 writediffproperties(ctx, diff)
288 writediffproperties(ctx, diff)
289
289
290 # Use a temporary summary to set dependency. There might be better ways but
290 # Use a temporary summary to set dependency. There might be better ways but
291 # I cannot find them for now. But do not do that if we are updating an
291 # I cannot find them for now. But do not do that if we are updating an
292 # existing revision (revid is not None) since that introduces visible
292 # existing revision (revid is not None) since that introduces visible
293 # churns (someone edited "Summary" twice) on the web page.
293 # churns (someone edited "Summary" twice) on the web page.
294 if parentrevid and revid is None:
294 if parentrevid and revid is None:
295 summary = 'Depends on D%s' % parentrevid
295 summary = 'Depends on D%s' % parentrevid
296 transactions += [{'type': 'summary', 'value': summary},
296 transactions += [{'type': 'summary', 'value': summary},
297 {'type': 'summary', 'value': ' '}]
297 {'type': 'summary', 'value': ' '}]
298
298
299 if actions:
299 if actions:
300 transactions += actions
300 transactions += actions
301
301
302 # Parse commit message and update related fields.
302 # Parse commit message and update related fields.
303 desc = ctx.description()
303 desc = ctx.description()
304 info = callconduit(repo, 'differential.parsecommitmessage',
304 info = callconduit(repo, 'differential.parsecommitmessage',
305 {'corpus': desc})
305 {'corpus': desc})
306 for k, v in info[r'fields'].items():
306 for k, v in info[r'fields'].items():
307 if k in ['title', 'summary', 'testPlan']:
307 if k in ['title', 'summary', 'testPlan']:
308 transactions.append({'type': k, 'value': v})
308 transactions.append({'type': k, 'value': v})
309
309
310 params = {'transactions': transactions}
310 params = {'transactions': transactions}
311 if revid is not None:
311 if revid is not None:
312 # Update an existing Differential Revision
312 # Update an existing Differential Revision
313 params['objectIdentifier'] = revid
313 params['objectIdentifier'] = revid
314
314
315 revision = callconduit(repo, 'differential.revision.edit', params)
315 revision = callconduit(repo, 'differential.revision.edit', params)
316 if not revision:
316 if not revision:
317 raise error.Abort(_('cannot create revision for %s') % ctx)
317 raise error.Abort(_('cannot create revision for %s') % ctx)
318
318
319 return revision
319 return revision
320
320
321 def userphids(repo, names):
321 def userphids(repo, names):
322 """convert user names to PHIDs"""
322 """convert user names to PHIDs"""
323 query = {'constraints': {'usernames': names}}
323 query = {'constraints': {'usernames': names}}
324 result = callconduit(repo, 'user.search', query)
324 result = callconduit(repo, 'user.search', query)
325 # username not found is not an error of the API. So check if we have missed
325 # username not found is not an error of the API. So check if we have missed
326 # some names here.
326 # some names here.
327 data = result[r'data']
327 data = result[r'data']
328 resolved = set(entry[r'fields'][r'username'] for entry in data)
328 resolved = set(entry[r'fields'][r'username'] for entry in data)
329 unresolved = set(names) - resolved
329 unresolved = set(names) - resolved
330 if unresolved:
330 if unresolved:
331 raise error.Abort(_('unknown username: %s')
331 raise error.Abort(_('unknown username: %s')
332 % ' '.join(sorted(unresolved)))
332 % ' '.join(sorted(unresolved)))
333 return [entry[r'phid'] for entry in data]
333 return [entry[r'phid'] for entry in data]
334
334
335 @command('phabsend',
335 @command('phabsend',
336 [('r', 'rev', [], _('revisions to send'), _('REV')),
336 [('r', 'rev', [], _('revisions to send'), _('REV')),
337 ('', 'reviewer', [], _('specify reviewers')),
337 ('', 'reviewer', [], _('specify reviewers')),
338 ('', 'confirm', None, _('ask for confirmation before sending'))],
338 ('', 'confirm', None, _('ask for confirmation before sending'))],
339 _('REV [OPTIONS]'))
339 _('REV [OPTIONS]'))
340 def phabsend(ui, repo, *revs, **opts):
340 def phabsend(ui, repo, *revs, **opts):
341 """upload changesets to Phabricator
341 """upload changesets to Phabricator
342
342
343 If there are multiple revisions specified, they will be send as a stack
343 If there are multiple revisions specified, they will be send as a stack
344 with a linear dependencies relationship using the order specified by the
344 with a linear dependencies relationship using the order specified by the
345 revset.
345 revset.
346
346
347 For the first time uploading changesets, local tags will be created to
347 For the first time uploading changesets, local tags will be created to
348 maintain the association. After the first time, phabsend will check
348 maintain the association. After the first time, phabsend will check
349 obsstore and tags information so it can figure out whether to update an
349 obsstore and tags information so it can figure out whether to update an
350 existing Differential Revision, or create a new one.
350 existing Differential Revision, or create a new one.
351
351
352 The --confirm option lets you confirm changesets before sending them. You
352 The --confirm option lets you confirm changesets before sending them. You
353 can also add following to your configuration file to make it default
353 can also add following to your configuration file to make it default
354 behaviour.
354 behaviour.
355
355
356 [phabsend]
356 [phabsend]
357 confirm = true
357 confirm = true
358 """
358 """
359 revs = list(revs) + opts.get('rev', [])
359 revs = list(revs) + opts.get('rev', [])
360 revs = scmutil.revrange(repo, revs)
360 revs = scmutil.revrange(repo, revs)
361
361
362 if not revs:
362 if not revs:
363 raise error.Abort(_('phabsend requires at least one changeset'))
363 raise error.Abort(_('phabsend requires at least one changeset'))
364
364
365 confirm = ui.configbool('phabsend', 'confirm')
365 confirm = ui.configbool('phabsend', 'confirm')
366 confirm |= bool(opts.get('confirm'))
366 confirm |= bool(opts.get('confirm'))
367 if confirm:
367 if confirm:
368 confirmed = _confirmbeforesend(repo, revs)
368 confirmed = _confirmbeforesend(repo, revs)
369 if not confirmed:
369 if not confirmed:
370 raise error.Abort(_('phabsend cancelled'))
370 raise error.Abort(_('phabsend cancelled'))
371
371
372 actions = []
372 actions = []
373 reviewers = opts.get('reviewer', [])
373 reviewers = opts.get('reviewer', [])
374 if reviewers:
374 if reviewers:
375 phids = userphids(repo, reviewers)
375 phids = userphids(repo, reviewers)
376 actions.append({'type': 'reviewers.add', 'value': phids})
376 actions.append({'type': 'reviewers.add', 'value': phids})
377
377
378 # {newnode: (oldnode, olddiff, olddrev}
378 # {newnode: (oldnode, olddiff, olddrev}
379 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
379 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
380
380
381 # Send patches one by one so we know their Differential Revision IDs and
381 # Send patches one by one so we know their Differential Revision IDs and
382 # can provide dependency relationship
382 # can provide dependency relationship
383 lastrevid = None
383 lastrevid = None
384 for rev in revs:
384 for rev in revs:
385 ui.debug('sending rev %d\n' % rev)
385 ui.debug('sending rev %d\n' % rev)
386 ctx = repo[rev]
386 ctx = repo[rev]
387
387
388 # Get Differential Revision ID
388 # Get Differential Revision ID
389 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
389 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
390 if oldnode != ctx.node():
390 if oldnode != ctx.node():
391 # Create or update Differential Revision
391 # Create or update Differential Revision
392 revision = createdifferentialrevision(ctx, revid, lastrevid,
392 revision = createdifferentialrevision(ctx, revid, lastrevid,
393 oldnode, olddiff, actions)
393 oldnode, olddiff, actions)
394 newrevid = int(revision[r'object'][r'id'])
394 newrevid = int(revision[r'object'][r'id'])
395 if revid:
395 if revid:
396 action = _('updated')
396 action = _('updated')
397 else:
397 else:
398 action = _('created')
398 action = _('created')
399
399
400 # Create a local tag to note the association
400 # Create a local tag to note the association
401 tagname = 'D%d' % newrevid
401 tagname = 'D%d' % newrevid
402 tags.tag(repo, tagname, ctx.node(), message=None, user=None,
402 tags.tag(repo, tagname, ctx.node(), message=None, user=None,
403 date=None, local=True)
403 date=None, local=True)
404 else:
404 else:
405 # Nothing changed. But still set "newrevid" so the next revision
405 # Nothing changed. But still set "newrevid" so the next revision
406 # could depend on this one.
406 # could depend on this one.
407 newrevid = revid
407 newrevid = revid
408 action = _('skipped')
408 action = _('skipped')
409
409
410 ui.write(_('D%s: %s - %s: %s\n') % (newrevid, action, ctx,
410 ui.write(_('D%s: %s - %s: %s\n') % (newrevid, action, ctx,
411 ctx.description().split('\n')[0]))
411 ctx.description().split('\n')[0]))
412 lastrevid = newrevid
412 lastrevid = newrevid
413
413
414 # Map from "hg:meta" keys to header understood by "hg import". The order is
414 # Map from "hg:meta" keys to header understood by "hg import". The order is
415 # consistent with "hg export" output.
415 # consistent with "hg export" output.
416 _metanamemap = util.sortdict([(r'user', 'User'), (r'date', 'Date'),
416 _metanamemap = util.sortdict([(r'user', 'User'), (r'date', 'Date'),
417 (r'node', 'Node ID'), (r'parent', 'Parent ')])
417 (r'node', 'Node ID'), (r'parent', 'Parent ')])
418
418
419 def _confirmbeforesend(repo, revs):
419 def _confirmbeforesend(repo, revs):
420 ui = repo.ui
420 ui = repo.ui
421 for rev in revs:
421 for rev in revs:
422 ctx = repo[rev]
422 ctx = repo[rev]
423 desc = ctx.description().splitlines()[0]
423 desc = ctx.description().splitlines()[0]
424 ui.write(('%d: ' % rev), label='phabsend.revnumber')
424 ui.write(('%d: ' % rev), label='phabsend.revnumber')
425 ui.write(('%s\n' % desc), label='phabsend.desc')
425 ui.write(('%s\n' % desc), label='phabsend.desc')
426
426
427 if ui.promptchoice(_('Phabsend the above changes (yn)?'
427 if ui.promptchoice(_('Phabsend the above changes (yn)?'
428 '$$ &Yes $$ &No')):
428 '$$ &Yes $$ &No')):
429 return False
429 return False
430
430
431 return True
431 return True
432
432
433 def querydrev(repo, params, stack=False):
433 def querydrev(repo, params, stack=False):
434 """return a list of "Differential Revision" dicts
434 """return a list of "Differential Revision" dicts
435
435
436 params is the input of "differential.query" API, and is expected to match
436 params is the input of "differential.query" API, and is expected to match
437 just a single Differential Revision.
437 just a single Differential Revision.
438
438
439 A "Differential Revision dict" looks like:
439 A "Differential Revision dict" looks like:
440
440
441 {
441 {
442 "id": "2",
442 "id": "2",
443 "phid": "PHID-DREV-672qvysjcczopag46qty",
443 "phid": "PHID-DREV-672qvysjcczopag46qty",
444 "title": "example",
444 "title": "example",
445 "uri": "https://phab.example.com/D2",
445 "uri": "https://phab.example.com/D2",
446 "dateCreated": "1499181406",
446 "dateCreated": "1499181406",
447 "dateModified": "1499182103",
447 "dateModified": "1499182103",
448 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
448 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
449 "status": "0",
449 "status": "0",
450 "statusName": "Needs Review",
450 "statusName": "Needs Review",
451 "properties": [],
451 "properties": [],
452 "branch": null,
452 "branch": null,
453 "summary": "",
453 "summary": "",
454 "testPlan": "",
454 "testPlan": "",
455 "lineCount": "2",
455 "lineCount": "2",
456 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
456 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
457 "diffs": [
457 "diffs": [
458 "3",
458 "3",
459 "4",
459 "4",
460 ],
460 ],
461 "commits": [],
461 "commits": [],
462 "reviewers": [],
462 "reviewers": [],
463 "ccs": [],
463 "ccs": [],
464 "hashes": [],
464 "hashes": [],
465 "auxiliary": {
465 "auxiliary": {
466 "phabricator:projects": [],
466 "phabricator:projects": [],
467 "phabricator:depends-on": [
467 "phabricator:depends-on": [
468 "PHID-DREV-gbapp366kutjebt7agcd"
468 "PHID-DREV-gbapp366kutjebt7agcd"
469 ]
469 ]
470 },
470 },
471 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
471 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
472 "sourcePath": null
472 "sourcePath": null
473 }
473 }
474
474
475 If stack is True, return a list of "Differential Revision dict"s in an
475 If stack is True, return a list of "Differential Revision dict"s in an
476 order that the latter ones depend on the former ones. Otherwise, return a
476 order that the latter ones depend on the former ones. Otherwise, return a
477 list of a unique "Differential Revision dict".
477 list of a unique "Differential Revision dict".
478 """
478 """
479 prefetched = {} # {id or phid: drev}
479 prefetched = {} # {id or phid: drev}
480 def fetch(params):
480 def fetch(params):
481 """params -> single drev or None"""
481 """params -> single drev or None"""
482 key = (params.get(r'ids') or params.get(r'phids') or [None])[0]
482 key = (params.get(r'ids') or params.get(r'phids') or [None])[0]
483 if key in prefetched:
483 if key in prefetched:
484 return prefetched[key]
484 return prefetched[key]
485 # Otherwise, send the request. If we're fetching a stack, be smarter
485 # Otherwise, send the request. If we're fetching a stack, be smarter
486 # and fetch more ids in one batch, even if it could be unnecessary.
486 # and fetch more ids in one batch, even if it could be unnecessary.
487 batchparams = params
487 batchparams = params
488 if stack and len(params.get(r'ids', [])) == 1:
488 if stack and len(params.get(r'ids', [])) == 1:
489 i = int(params[r'ids'][0])
489 i = int(params[r'ids'][0])
490 # developer config: phabricator.batchsize
490 # developer config: phabricator.batchsize
491 batchsize = repo.ui.configint('phabricator', 'batchsize', 12)
491 batchsize = repo.ui.configint('phabricator', 'batchsize', 12)
492 batchparams = {'ids': range(max(1, i - batchsize), i + 1)}
492 batchparams = {'ids': range(max(1, i - batchsize), i + 1)}
493 drevs = callconduit(repo, 'differential.query', batchparams)
493 drevs = callconduit(repo, 'differential.query', batchparams)
494 # Fill prefetched with the result
494 # Fill prefetched with the result
495 for drev in drevs:
495 for drev in drevs:
496 prefetched[drev[r'phid']] = drev
496 prefetched[drev[r'phid']] = drev
497 prefetched[int(drev[r'id'])] = drev
497 prefetched[int(drev[r'id'])] = drev
498 if key not in prefetched:
498 if key not in prefetched:
499 raise error.Abort(_('cannot get Differential Revision %r') % params)
499 raise error.Abort(_('cannot get Differential Revision %r') % params)
500 return prefetched[key]
500 return prefetched[key]
501
501
502 visited = set()
502 visited = set()
503 result = []
503 result = []
504 queue = [params]
504 queue = [params]
505 while queue:
505 while queue:
506 params = queue.pop()
506 params = queue.pop()
507 drev = fetch(params)
507 drev = fetch(params)
508 if drev[r'id'] in visited:
508 if drev[r'id'] in visited:
509 continue
509 continue
510 visited.add(drev[r'id'])
510 visited.add(drev[r'id'])
511 result.append(drev)
511 result.append(drev)
512 if stack:
512 if stack:
513 auxiliary = drev.get(r'auxiliary', {})
513 auxiliary = drev.get(r'auxiliary', {})
514 depends = auxiliary.get(r'phabricator:depends-on', [])
514 depends = auxiliary.get(r'phabricator:depends-on', [])
515 for phid in depends:
515 for phid in depends:
516 queue.append({'phids': [phid]})
516 queue.append({'phids': [phid]})
517 result.reverse()
517 result.reverse()
518 return result
518 return result
519
519
520 def getdescfromdrev(drev):
520 def getdescfromdrev(drev):
521 """get description (commit message) from "Differential Revision"
521 """get description (commit message) from "Differential Revision"
522
522
523 This is similar to differential.getcommitmessage API. But we only care
523 This is similar to differential.getcommitmessage API. But we only care
524 about limited fields: title, summary, test plan, and URL.
524 about limited fields: title, summary, test plan, and URL.
525 """
525 """
526 title = drev[r'title']
526 title = drev[r'title']
527 summary = drev[r'summary'].rstrip()
527 summary = drev[r'summary'].rstrip()
528 testplan = drev[r'testPlan'].rstrip()
528 testplan = drev[r'testPlan'].rstrip()
529 if testplan:
529 if testplan:
530 testplan = 'Test Plan:\n%s' % testplan
530 testplan = 'Test Plan:\n%s' % testplan
531 uri = 'Differential Revision: %s' % drev[r'uri']
531 uri = 'Differential Revision: %s' % drev[r'uri']
532 return '\n\n'.join(filter(None, [title, summary, testplan, uri]))
532 return '\n\n'.join(filter(None, [title, summary, testplan, uri]))
533
533
534 def getdiffmeta(diff):
534 def getdiffmeta(diff):
535 """get commit metadata (date, node, user, p1) from a diff object
535 """get commit metadata (date, node, user, p1) from a diff object
536
536
537 The metadata could be "hg:meta", sent by phabsend, like:
537 The metadata could be "hg:meta", sent by phabsend, like:
538
538
539 "properties": {
539 "properties": {
540 "hg:meta": {
540 "hg:meta": {
541 "date": "1499571514 25200",
541 "date": "1499571514 25200",
542 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
542 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
543 "user": "Foo Bar <foo@example.com>",
543 "user": "Foo Bar <foo@example.com>",
544 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
544 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
545 }
545 }
546 }
546 }
547
547
548 Or converted from "local:commits", sent by "arc", like:
548 Or converted from "local:commits", sent by "arc", like:
549
549
550 "properties": {
550 "properties": {
551 "local:commits": {
551 "local:commits": {
552 "98c08acae292b2faf60a279b4189beb6cff1414d": {
552 "98c08acae292b2faf60a279b4189beb6cff1414d": {
553 "author": "Foo Bar",
553 "author": "Foo Bar",
554 "time": 1499546314,
554 "time": 1499546314,
555 "branch": "default",
555 "branch": "default",
556 "tag": "",
556 "tag": "",
557 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
557 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
558 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
558 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
559 "local": "1000",
559 "local": "1000",
560 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
560 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
561 "summary": "...",
561 "summary": "...",
562 "message": "...",
562 "message": "...",
563 "authorEmail": "foo@example.com"
563 "authorEmail": "foo@example.com"
564 }
564 }
565 }
565 }
566 }
566 }
567
567
568 Note: metadata extracted from "local:commits" will lose time zone
568 Note: metadata extracted from "local:commits" will lose time zone
569 information.
569 information.
570 """
570 """
571 props = diff.get(r'properties') or {}
571 props = diff.get(r'properties') or {}
572 meta = props.get(r'hg:meta')
572 meta = props.get(r'hg:meta')
573 if not meta and props.get(r'local:commits'):
573 if not meta and props.get(r'local:commits'):
574 commit = sorted(props[r'local:commits'].values())[0]
574 commit = sorted(props[r'local:commits'].values())[0]
575 meta = {
575 meta = {
576 r'date': r'%d 0' % commit[r'time'],
576 r'date': r'%d 0' % commit[r'time'],
577 r'node': commit[r'rev'],
577 r'node': commit[r'rev'],
578 r'user': r'%s <%s>' % (commit[r'author'], commit[r'authorEmail']),
578 r'user': r'%s <%s>' % (commit[r'author'], commit[r'authorEmail']),
579 }
579 }
580 if len(commit.get(r'parents', ())) >= 1:
580 if len(commit.get(r'parents', ())) >= 1:
581 meta[r'parent'] = commit[r'parents'][0]
581 meta[r'parent'] = commit[r'parents'][0]
582 return meta or {}
582 return meta or {}
583
583
584 def readpatch(repo, params, write, stack=False):
584 def readpatch(repo, params, write, stack=False):
585 """generate plain-text patch readable by 'hg import'
585 """generate plain-text patch readable by 'hg import'
586
586
587 write is usually ui.write. params is passed to "differential.query". If
587 write is usually ui.write. params is passed to "differential.query". If
588 stack is True, also write dependent patches.
588 stack is True, also write dependent patches.
589 """
589 """
590 # Differential Revisions
590 # Differential Revisions
591 drevs = querydrev(repo, params, stack)
591 drevs = querydrev(repo, params, stack)
592
592
593 # Prefetch hg:meta property for all diffs
593 # Prefetch hg:meta property for all diffs
594 diffids = sorted(set(max(int(v) for v in drev[r'diffs']) for drev in drevs))
594 diffids = sorted(set(max(int(v) for v in drev[r'diffs']) for drev in drevs))
595 diffs = callconduit(repo, 'differential.querydiffs', {'ids': diffids})
595 diffs = callconduit(repo, 'differential.querydiffs', {'ids': diffids})
596
596
597 # Generate patch for each drev
597 # Generate patch for each drev
598 for drev in drevs:
598 for drev in drevs:
599 repo.ui.note(_('reading D%s\n') % drev[r'id'])
599 repo.ui.note(_('reading D%s\n') % drev[r'id'])
600
600
601 diffid = max(int(v) for v in drev[r'diffs'])
601 diffid = max(int(v) for v in drev[r'diffs'])
602 body = callconduit(repo, 'differential.getrawdiff', {'diffID': diffid})
602 body = callconduit(repo, 'differential.getrawdiff', {'diffID': diffid})
603 desc = getdescfromdrev(drev)
603 desc = getdescfromdrev(drev)
604 header = '# HG changeset patch\n'
604 header = '# HG changeset patch\n'
605
605
606 # Try to preserve metadata from hg:meta property. Write hg patch
606 # Try to preserve metadata from hg:meta property. Write hg patch
607 # headers that can be read by the "import" command. See patchheadermap
607 # headers that can be read by the "import" command. See patchheadermap
608 # and extract in mercurial/patch.py for supported headers.
608 # and extract in mercurial/patch.py for supported headers.
609 meta = getdiffmeta(diffs[str(diffid)])
609 meta = getdiffmeta(diffs[str(diffid)])
610 for k in _metanamemap.keys():
610 for k in _metanamemap.keys():
611 if k in meta:
611 if k in meta:
612 header += '# %s %s\n' % (_metanamemap[k], meta[k])
612 header += '# %s %s\n' % (_metanamemap[k], meta[k])
613
613
614 content = '%s%s\n%s' % (header, desc, body)
614 content = '%s%s\n%s' % (header, desc, body)
615 write(encoding.unitolocal(content))
615 write(encoding.unitolocal(content))
616
616
617 @command('phabread',
617 @command('phabread',
618 [('', 'stack', False, _('read dependencies'))],
618 [('', 'stack', False, _('read dependencies'))],
619 _('REVID [OPTIONS]'))
619 _('REVID [OPTIONS]'))
620 def phabread(ui, repo, revid, **opts):
620 def phabread(ui, repo, revid, **opts):
621 """print patches from Phabricator suitable for importing
621 """print patches from Phabricator suitable for importing
622
622
623 REVID could be a Differential Revision identity, like ``D123``, or just the
623 REVID could be a Differential Revision identity, like ``D123``, or just the
624 number ``123``, or a full URL like ``https://phab.example.com/D123``.
624 number ``123``, or a full URL like ``https://phab.example.com/D123``.
625
625
626 If --stack is given, follow dependencies information and read all patches.
626 If --stack is given, follow dependencies information and read all patches.
627 """
627 """
628 try:
628 try:
629 revid = int(revid.split('/')[-1].replace('D', ''))
629 revid = int(revid.split('/')[-1].replace('D', ''))
630 except ValueError:
630 except ValueError:
631 raise error.Abort(_('invalid Revision ID: %s') % revid)
631 raise error.Abort(_('invalid Revision ID: %s') % revid)
632 readpatch(repo, {'ids': [revid]}, ui.write, opts.get('stack'))
632 readpatch(repo, {'ids': [revid]}, ui.write, opts.get('stack'))
@@ -1,1050 +1,1050 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'evolution'))
105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off, stop):
181 def _fm0readmarkers(data, off, stop):
182 # Loop on markers
182 # Loop on markers
183 while off < stop:
183 while off < stop:
184 # read fixed part
184 # read fixed part
185 cur = data[off:off + _fm0fsize]
185 cur = data[off:off + _fm0fsize]
186 off += _fm0fsize
186 off += _fm0fsize
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 # read replacement
188 # read replacement
189 sucs = ()
189 sucs = ()
190 if numsuc:
190 if numsuc:
191 s = (_fm0fnodesize * numsuc)
191 s = (_fm0fnodesize * numsuc)
192 cur = data[off:off + s]
192 cur = data[off:off + s]
193 sucs = _unpack(_fm0node * numsuc, cur)
193 sucs = _unpack(_fm0node * numsuc, cur)
194 off += s
194 off += s
195 # read metadata
195 # read metadata
196 # (metadata will be decoded on demand)
196 # (metadata will be decoded on demand)
197 metadata = data[off:off + mdsize]
197 metadata = data[off:off + mdsize]
198 if len(metadata) != mdsize:
198 if len(metadata) != mdsize:
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 'short, %d bytes expected, got %d')
200 'short, %d bytes expected, got %d')
201 % (mdsize, len(metadata)))
201 % (mdsize, len(metadata)))
202 off += mdsize
202 off += mdsize
203 metadata = _fm0decodemeta(metadata)
203 metadata = _fm0decodemeta(metadata)
204 try:
204 try:
205 when, offset = metadata.pop('date', '0 0').split(' ')
205 when, offset = metadata.pop('date', '0 0').split(' ')
206 date = float(when), int(offset)
206 date = float(when), int(offset)
207 except ValueError:
207 except ValueError:
208 date = (0., 0)
208 date = (0., 0)
209 parents = None
209 parents = None
210 if 'p2' in metadata:
210 if 'p2' in metadata:
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 elif 'p1' in metadata:
212 elif 'p1' in metadata:
213 parents = (metadata.pop('p1', None),)
213 parents = (metadata.pop('p1', None),)
214 elif 'p0' in metadata:
214 elif 'p0' in metadata:
215 parents = ()
215 parents = ()
216 if parents is not None:
216 if parents is not None:
217 try:
217 try:
218 parents = tuple(node.bin(p) for p in parents)
218 parents = tuple(node.bin(p) for p in parents)
219 # if parent content is not a nodeid, drop the data
219 # if parent content is not a nodeid, drop the data
220 for p in parents:
220 for p in parents:
221 if len(p) != 20:
221 if len(p) != 20:
222 parents = None
222 parents = None
223 break
223 break
224 except TypeError:
224 except TypeError:
225 # if content cannot be translated to nodeid drop the data.
225 # if content cannot be translated to nodeid drop the data.
226 parents = None
226 parents = None
227
227
228 metadata = tuple(sorted(metadata.iteritems()))
228 metadata = tuple(sorted(metadata.iteritems()))
229
229
230 yield (pre, sucs, flags, metadata, date, parents)
230 yield (pre, sucs, flags, metadata, date, parents)
231
231
232 def _fm0encodeonemarker(marker):
232 def _fm0encodeonemarker(marker):
233 pre, sucs, flags, metadata, date, parents = marker
233 pre, sucs, flags, metadata, date, parents = marker
234 if flags & usingsha256:
234 if flags & usingsha256:
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 metadata = dict(metadata)
236 metadata = dict(metadata)
237 time, tz = date
237 time, tz = date
238 metadata['date'] = '%r %i' % (time, tz)
238 metadata['date'] = '%r %i' % (time, tz)
239 if parents is not None:
239 if parents is not None:
240 if not parents:
240 if not parents:
241 # mark that we explicitly recorded no parents
241 # mark that we explicitly recorded no parents
242 metadata['p0'] = ''
242 metadata['p0'] = ''
243 for i, p in enumerate(parents, 1):
243 for i, p in enumerate(parents, 1):
244 metadata['p%i' % i] = node.hex(p)
244 metadata['p%i' % i] = node.hex(p)
245 metadata = _fm0encodemeta(metadata)
245 metadata = _fm0encodemeta(metadata)
246 numsuc = len(sucs)
246 numsuc = len(sucs)
247 format = _fm0fixed + (_fm0node * numsuc)
247 format = _fm0fixed + (_fm0node * numsuc)
248 data = [numsuc, len(metadata), flags, pre]
248 data = [numsuc, len(metadata), flags, pre]
249 data.extend(sucs)
249 data.extend(sucs)
250 return _pack(format, *data) + metadata
250 return _pack(format, *data) + metadata
251
251
252 def _fm0encodemeta(meta):
252 def _fm0encodemeta(meta):
253 """Return encoded metadata string to string mapping.
253 """Return encoded metadata string to string mapping.
254
254
255 Assume no ':' in key and no '\0' in both key and value."""
255 Assume no ':' in key and no '\0' in both key and value."""
256 for key, value in meta.iteritems():
256 for key, value in meta.iteritems():
257 if ':' in key or '\0' in key:
257 if ':' in key or '\0' in key:
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 if '\0' in value:
259 if '\0' in value:
260 raise ValueError("':' is forbidden in metadata value'")
260 raise ValueError("':' is forbidden in metadata value'")
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262
262
263 def _fm0decodemeta(data):
263 def _fm0decodemeta(data):
264 """Return string to string dictionary from encoded version."""
264 """Return string to string dictionary from encoded version."""
265 d = {}
265 d = {}
266 for l in data.split('\0'):
266 for l in data.split('\0'):
267 if l:
267 if l:
268 key, value = l.split(':')
268 key, value = l.split(':')
269 d[key] = value
269 d[key] = value
270 return d
270 return d
271
271
272 ## Parsing and writing of version "1"
272 ## Parsing and writing of version "1"
273 #
273 #
274 # The header is followed by the markers. Each marker is made of:
274 # The header is followed by the markers. Each marker is made of:
275 #
275 #
276 # - uint32: total size of the marker (including this field)
276 # - uint32: total size of the marker (including this field)
277 #
277 #
278 # - float64: date in seconds since epoch
278 # - float64: date in seconds since epoch
279 #
279 #
280 # - int16: timezone offset in minutes
280 # - int16: timezone offset in minutes
281 #
281 #
282 # - uint16: a bit field. It is reserved for flags used in common
282 # - uint16: a bit field. It is reserved for flags used in common
283 # obsolete marker operations, to avoid repeated decoding of metadata
283 # obsolete marker operations, to avoid repeated decoding of metadata
284 # entries.
284 # entries.
285 #
285 #
286 # - uint8: number of successors "N", can be zero.
286 # - uint8: number of successors "N", can be zero.
287 #
287 #
288 # - uint8: number of parents "P", can be zero.
288 # - uint8: number of parents "P", can be zero.
289 #
289 #
290 # 0: parents data stored but no parent,
290 # 0: parents data stored but no parent,
291 # 1: one parent stored,
291 # 1: one parent stored,
292 # 2: two parents stored,
292 # 2: two parents stored,
293 # 3: no parent data stored
293 # 3: no parent data stored
294 #
294 #
295 # - uint8: number of metadata entries M
295 # - uint8: number of metadata entries M
296 #
296 #
297 # - 20 or 32 bytes: predecessor changeset identifier.
297 # - 20 or 32 bytes: predecessor changeset identifier.
298 #
298 #
299 # - N*(20 or 32) bytes: successors changesets identifiers.
299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 #
300 #
301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
302 #
302 #
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 #
304 #
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 _fm1version = 1
306 _fm1version = 1
307 _fm1fixed = '>IdhHBBB20s'
307 _fm1fixed = '>IdhHBBB20s'
308 _fm1nodesha1 = '20s'
308 _fm1nodesha1 = '20s'
309 _fm1nodesha256 = '32s'
309 _fm1nodesha256 = '32s'
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1fsize = _calcsize(_fm1fixed)
312 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1parentnone = 3
313 _fm1parentnone = 3
314 _fm1parentshift = 14
314 _fm1parentshift = 14
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1metapair = 'BB'
316 _fm1metapair = 'BB'
317 _fm1metapairsize = _calcsize(_fm1metapair)
317 _fm1metapairsize = _calcsize(_fm1metapair)
318
318
319 def _fm1purereadmarkers(data, off, stop):
319 def _fm1purereadmarkers(data, off, stop):
320 # make some global constants local for performance
320 # make some global constants local for performance
321 noneflag = _fm1parentnone
321 noneflag = _fm1parentnone
322 sha2flag = usingsha256
322 sha2flag = usingsha256
323 sha1size = _fm1nodesha1size
323 sha1size = _fm1nodesha1size
324 sha2size = _fm1nodesha256size
324 sha2size = _fm1nodesha256size
325 sha1fmt = _fm1nodesha1
325 sha1fmt = _fm1nodesha1
326 sha2fmt = _fm1nodesha256
326 sha2fmt = _fm1nodesha256
327 metasize = _fm1metapairsize
327 metasize = _fm1metapairsize
328 metafmt = _fm1metapair
328 metafmt = _fm1metapair
329 fsize = _fm1fsize
329 fsize = _fm1fsize
330 unpack = _unpack
330 unpack = _unpack
331
331
332 # Loop on markers
332 # Loop on markers
333 ufixed = struct.Struct(_fm1fixed).unpack
333 ufixed = struct.Struct(_fm1fixed).unpack
334
334
335 while off < stop:
335 while off < stop:
336 # read fixed part
336 # read fixed part
337 o1 = off + fsize
337 o1 = off + fsize
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339
339
340 if flags & sha2flag:
340 if flags & sha2flag:
341 # FIXME: prec was read as a SHA1, needs to be amended
341 # FIXME: prec was read as a SHA1, needs to be amended
342
342
343 # read 0 or more successors
343 # read 0 or more successors
344 if numsuc == 1:
344 if numsuc == 1:
345 o2 = o1 + sha2size
345 o2 = o1 + sha2size
346 sucs = (data[o1:o2],)
346 sucs = (data[o1:o2],)
347 else:
347 else:
348 o2 = o1 + sha2size * numsuc
348 o2 = o1 + sha2size * numsuc
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350
350
351 # read parents
351 # read parents
352 if numpar == noneflag:
352 if numpar == noneflag:
353 o3 = o2
353 o3 = o2
354 parents = None
354 parents = None
355 elif numpar == 1:
355 elif numpar == 1:
356 o3 = o2 + sha2size
356 o3 = o2 + sha2size
357 parents = (data[o2:o3],)
357 parents = (data[o2:o3],)
358 else:
358 else:
359 o3 = o2 + sha2size * numpar
359 o3 = o2 + sha2size * numpar
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 else:
361 else:
362 # read 0 or more successors
362 # read 0 or more successors
363 if numsuc == 1:
363 if numsuc == 1:
364 o2 = o1 + sha1size
364 o2 = o1 + sha1size
365 sucs = (data[o1:o2],)
365 sucs = (data[o1:o2],)
366 else:
366 else:
367 o2 = o1 + sha1size * numsuc
367 o2 = o1 + sha1size * numsuc
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369
369
370 # read parents
370 # read parents
371 if numpar == noneflag:
371 if numpar == noneflag:
372 o3 = o2
372 o3 = o2
373 parents = None
373 parents = None
374 elif numpar == 1:
374 elif numpar == 1:
375 o3 = o2 + sha1size
375 o3 = o2 + sha1size
376 parents = (data[o2:o3],)
376 parents = (data[o2:o3],)
377 else:
377 else:
378 o3 = o2 + sha1size * numpar
378 o3 = o2 + sha1size * numpar
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380
380
381 # read metadata
381 # read metadata
382 off = o3 + metasize * nummeta
382 off = o3 + metasize * nummeta
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 metadata = []
384 metadata = []
385 for idx in xrange(0, len(metapairsize), 2):
385 for idx in xrange(0, len(metapairsize), 2):
386 o1 = off + metapairsize[idx]
386 o1 = off + metapairsize[idx]
387 o2 = o1 + metapairsize[idx + 1]
387 o2 = o1 + metapairsize[idx + 1]
388 metadata.append((data[off:o1], data[o1:o2]))
388 metadata.append((data[off:o1], data[o1:o2]))
389 off = o2
389 off = o2
390
390
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392
392
393 def _fm1encodeonemarker(marker):
393 def _fm1encodeonemarker(marker):
394 pre, sucs, flags, metadata, date, parents = marker
394 pre, sucs, flags, metadata, date, parents = marker
395 # determine node size
395 # determine node size
396 _fm1node = _fm1nodesha1
396 _fm1node = _fm1nodesha1
397 if flags & usingsha256:
397 if flags & usingsha256:
398 _fm1node = _fm1nodesha256
398 _fm1node = _fm1nodesha256
399 numsuc = len(sucs)
399 numsuc = len(sucs)
400 numextranodes = numsuc
400 numextranodes = numsuc
401 if parents is None:
401 if parents is None:
402 numpar = _fm1parentnone
402 numpar = _fm1parentnone
403 else:
403 else:
404 numpar = len(parents)
404 numpar = len(parents)
405 numextranodes += numpar
405 numextranodes += numpar
406 formatnodes = _fm1node * numextranodes
406 formatnodes = _fm1node * numextranodes
407 formatmeta = _fm1metapair * len(metadata)
407 formatmeta = _fm1metapair * len(metadata)
408 format = _fm1fixed + formatnodes + formatmeta
408 format = _fm1fixed + formatnodes + formatmeta
409 # tz is stored in minutes so we divide by 60
409 # tz is stored in minutes so we divide by 60
410 tz = date[1]//60
410 tz = date[1]//60
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 data.extend(sucs)
412 data.extend(sucs)
413 if parents is not None:
413 if parents is not None:
414 data.extend(parents)
414 data.extend(parents)
415 totalsize = _calcsize(format)
415 totalsize = _calcsize(format)
416 for key, value in metadata:
416 for key, value in metadata:
417 lk = len(key)
417 lk = len(key)
418 lv = len(value)
418 lv = len(value)
419 data.append(lk)
419 data.append(lk)
420 data.append(lv)
420 data.append(lv)
421 totalsize += lk + lv
421 totalsize += lk + lv
422 data[0] = totalsize
422 data[0] = totalsize
423 data = [_pack(format, *data)]
423 data = [_pack(format, *data)]
424 for key, value in metadata:
424 for key, value in metadata:
425 data.append(key)
425 data.append(key)
426 data.append(value)
426 data.append(value)
427 return ''.join(data)
427 return ''.join(data)
428
428
429 def _fm1readmarkers(data, off, stop):
429 def _fm1readmarkers(data, off, stop):
430 native = getattr(parsers, 'fm1readmarkers', None)
430 native = getattr(parsers, 'fm1readmarkers', None)
431 if not native:
431 if not native:
432 return _fm1purereadmarkers(data, off, stop)
432 return _fm1purereadmarkers(data, off, stop)
433 return native(data, off, stop)
433 return native(data, off, stop)
434
434
435 # mapping to read/write various marker formats
435 # mapping to read/write various marker formats
436 # <version> -> (decoder, encoder)
436 # <version> -> (decoder, encoder)
437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
439
439
440 def _readmarkerversion(data):
440 def _readmarkerversion(data):
441 return _unpack('>B', data[0:1])[0]
441 return _unpack('>B', data[0:1])[0]
442
442
443 @util.nogc
443 @util.nogc
444 def _readmarkers(data, off=None, stop=None):
444 def _readmarkers(data, off=None, stop=None):
445 """Read and enumerate markers from raw data"""
445 """Read and enumerate markers from raw data"""
446 diskversion = _readmarkerversion(data)
446 diskversion = _readmarkerversion(data)
447 if not off:
447 if not off:
448 off = 1 # skip 1 byte version number
448 off = 1 # skip 1 byte version number
449 if stop is None:
449 if stop is None:
450 stop = len(data)
450 stop = len(data)
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off, stop)
454 return diskversion, formats[diskversion][0](data, off, stop)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468 @util.nogc
468 @util.nogc
469 def _addsuccessors(successors, markers):
469 def _addsuccessors(successors, markers):
470 for mark in markers:
470 for mark in markers:
471 successors.setdefault(mark[0], set()).add(mark)
471 successors.setdefault(mark[0], set()).add(mark)
472
472
473 def _addprecursors(*args, **kwargs):
473 def _addprecursors(*args, **kwargs):
474 msg = ("'obsolete._addprecursors' is deprecated, "
474 msg = ("'obsolete._addprecursors' is deprecated, "
475 "use 'obsolete._addpredecessors'")
475 "use 'obsolete._addpredecessors'")
476 util.nouideprecwarn(msg, '4.4')
476 util.nouideprecwarn(msg, '4.4')
477
477
478 return _addpredecessors(*args, **kwargs)
478 return _addpredecessors(*args, **kwargs)
479
479
480 @util.nogc
480 @util.nogc
481 def _addpredecessors(predecessors, markers):
481 def _addpredecessors(predecessors, markers):
482 for mark in markers:
482 for mark in markers:
483 for suc in mark[1]:
483 for suc in mark[1]:
484 predecessors.setdefault(suc, set()).add(mark)
484 predecessors.setdefault(suc, set()).add(mark)
485
485
486 @util.nogc
486 @util.nogc
487 def _addchildren(children, markers):
487 def _addchildren(children, markers):
488 for mark in markers:
488 for mark in markers:
489 parents = mark[5]
489 parents = mark[5]
490 if parents is not None:
490 if parents is not None:
491 for p in parents:
491 for p in parents:
492 children.setdefault(p, set()).add(mark)
492 children.setdefault(p, set()).add(mark)
493
493
494 def _checkinvalidmarkers(markers):
494 def _checkinvalidmarkers(markers):
495 """search for marker with invalid data and raise error if needed
495 """search for marker with invalid data and raise error if needed
496
496
497 Exist as a separated function to allow the evolve extension for a more
497 Exist as a separated function to allow the evolve extension for a more
498 subtle handling.
498 subtle handling.
499 """
499 """
500 for mark in markers:
500 for mark in markers:
501 if node.nullid in mark[1]:
501 if node.nullid in mark[1]:
502 raise error.Abort(_('bad obsolescence marker detected: '
502 raise error.Abort(_('bad obsolescence marker detected: '
503 'invalid successors nullid'))
503 'invalid successors nullid'))
504
504
505 class obsstore(object):
505 class obsstore(object):
506 """Store obsolete markers
506 """Store obsolete markers
507
507
508 Markers can be accessed with two mappings:
508 Markers can be accessed with two mappings:
509 - predecessors[x] -> set(markers on predecessors edges of x)
509 - predecessors[x] -> set(markers on predecessors edges of x)
510 - successors[x] -> set(markers on successors edges of x)
510 - successors[x] -> set(markers on successors edges of x)
511 - children[x] -> set(markers on predecessors edges of children(x)
511 - children[x] -> set(markers on predecessors edges of children(x)
512 """
512 """
513
513
514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
515 # prec: nodeid, predecessors changesets
515 # prec: nodeid, predecessors changesets
516 # succs: tuple of nodeid, successor changesets (0-N length)
516 # succs: tuple of nodeid, successor changesets (0-N length)
517 # flag: integer, flag field carrying modifier for the markers (see doc)
517 # flag: integer, flag field carrying modifier for the markers (see doc)
518 # meta: binary blob, encoded metadata dictionary
518 # meta: binary blob, encoded metadata dictionary
519 # date: (float, int) tuple, date of marker creation
519 # date: (float, int) tuple, date of marker creation
520 # parents: (tuple of nodeid) or None, parents of predecessors
520 # parents: (tuple of nodeid) or None, parents of predecessors
521 # None is used when no data has been recorded
521 # None is used when no data has been recorded
522
522
523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
524 # caches for various obsolescence related cache
524 # caches for various obsolescence related cache
525 self.caches = {}
525 self.caches = {}
526 self.svfs = svfs
526 self.svfs = svfs
527 self._defaultformat = defaultformat
527 self._defaultformat = defaultformat
528 self._readonly = readonly
528 self._readonly = readonly
529
529
530 def __iter__(self):
530 def __iter__(self):
531 return iter(self._all)
531 return iter(self._all)
532
532
533 def __len__(self):
533 def __len__(self):
534 return len(self._all)
534 return len(self._all)
535
535
536 def __nonzero__(self):
536 def __nonzero__(self):
537 if not self._cached('_all'):
537 if not self._cached('_all'):
538 try:
538 try:
539 return self.svfs.stat('obsstore').st_size > 1
539 return self.svfs.stat('obsstore').st_size > 1
540 except OSError as inst:
540 except OSError as inst:
541 if inst.errno != errno.ENOENT:
541 if inst.errno != errno.ENOENT:
542 raise
542 raise
543 # just build an empty _all list if no obsstore exists, which
543 # just build an empty _all list if no obsstore exists, which
544 # avoids further stat() syscalls
544 # avoids further stat() syscalls
545 pass
545 pass
546 return bool(self._all)
546 return bool(self._all)
547
547
548 __bool__ = __nonzero__
548 __bool__ = __nonzero__
549
549
550 @property
550 @property
551 def readonly(self):
551 def readonly(self):
552 """True if marker creation is disabled
552 """True if marker creation is disabled
553
553
554 Remove me in the future when obsolete marker is always on."""
554 Remove me in the future when obsolete marker is always on."""
555 return self._readonly
555 return self._readonly
556
556
557 def create(self, transaction, prec, succs=(), flag=0, parents=None,
557 def create(self, transaction, prec, succs=(), flag=0, parents=None,
558 date=None, metadata=None, ui=None):
558 date=None, metadata=None, ui=None):
559 """obsolete: add a new obsolete marker
559 """obsolete: add a new obsolete marker
560
560
561 * ensuring it is hashable
561 * ensuring it is hashable
562 * check mandatory metadata
562 * check mandatory metadata
563 * encode metadata
563 * encode metadata
564
564
565 If you are a human writing code creating marker you want to use the
565 If you are a human writing code creating marker you want to use the
566 `createmarkers` function in this module instead.
566 `createmarkers` function in this module instead.
567
567
568 return True if a new marker have been added, False if the markers
568 return True if a new marker have been added, False if the markers
569 already existed (no op).
569 already existed (no op).
570 """
570 """
571 if metadata is None:
571 if metadata is None:
572 metadata = {}
572 metadata = {}
573 if date is None:
573 if date is None:
574 if 'date' in metadata:
574 if 'date' in metadata:
575 # as a courtesy for out-of-tree extensions
575 # as a courtesy for out-of-tree extensions
576 date = util.parsedate(metadata.pop('date'))
576 date = util.parsedate(metadata.pop('date'))
577 elif ui is not None:
577 elif ui is not None:
578 date = ui.configdate('devel', 'default-date')
578 date = ui.configdate('devel', 'default-date')
579 if date is None:
579 if date is None:
580 date = util.makedate()
580 date = util.makedate()
581 else:
581 else:
582 date = util.makedate()
582 date = util.makedate()
583 if len(prec) != 20:
583 if len(prec) != 20:
584 raise ValueError(prec)
584 raise ValueError(prec)
585 for succ in succs:
585 for succ in succs:
586 if len(succ) != 20:
586 if len(succ) != 20:
587 raise ValueError(succ)
587 raise ValueError(succ)
588 if prec in succs:
588 if prec in succs:
589 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
589 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
590
590
591 metadata = tuple(sorted(metadata.iteritems()))
591 metadata = tuple(sorted(metadata.iteritems()))
592
592
593 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
593 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
594 return bool(self.add(transaction, [marker]))
594 return bool(self.add(transaction, [marker]))
595
595
596 def add(self, transaction, markers):
596 def add(self, transaction, markers):
597 """Add new markers to the store
597 """Add new markers to the store
598
598
599 Take care of filtering duplicate.
599 Take care of filtering duplicate.
600 Return the number of new marker."""
600 Return the number of new marker."""
601 if self._readonly:
601 if self._readonly:
602 raise error.Abort(_('creating obsolete markers is not enabled on '
602 raise error.Abort(_('creating obsolete markers is not enabled on '
603 'this repo'))
603 'this repo'))
604 known = set()
604 known = set()
605 getsuccessors = self.successors.get
605 getsuccessors = self.successors.get
606 new = []
606 new = []
607 for m in markers:
607 for m in markers:
608 if m not in getsuccessors(m[0], ()) and m not in known:
608 if m not in getsuccessors(m[0], ()) and m not in known:
609 known.add(m)
609 known.add(m)
610 new.append(m)
610 new.append(m)
611 if new:
611 if new:
612 f = self.svfs('obsstore', 'ab')
612 f = self.svfs('obsstore', 'ab')
613 try:
613 try:
614 offset = f.tell()
614 offset = f.tell()
615 transaction.add('obsstore', offset)
615 transaction.add('obsstore', offset)
616 # offset == 0: new file - add the version header
616 # offset == 0: new file - add the version header
617 data = b''.join(encodemarkers(new, offset == 0, self._version))
617 data = b''.join(encodemarkers(new, offset == 0, self._version))
618 f.write(data)
618 f.write(data)
619 finally:
619 finally:
620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 # call 'filecacheentry.refresh()' here
621 # call 'filecacheentry.refresh()' here
622 f.close()
622 f.close()
623 addedmarkers = transaction.changes.get('obsmarkers')
623 addedmarkers = transaction.changes.get('obsmarkers')
624 if addedmarkers is not None:
624 if addedmarkers is not None:
625 addedmarkers.update(new)
625 addedmarkers.update(new)
626 self._addmarkers(new, data)
626 self._addmarkers(new, data)
627 # new marker *may* have changed several set. invalidate the cache.
627 # new marker *may* have changed several set. invalidate the cache.
628 self.caches.clear()
628 self.caches.clear()
629 # records the number of new markers for the transaction hooks
629 # records the number of new markers for the transaction hooks
630 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
630 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
631 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
631 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
632 return len(new)
632 return len(new)
633
633
634 def mergemarkers(self, transaction, data):
634 def mergemarkers(self, transaction, data):
635 """merge a binary stream of markers inside the obsstore
635 """merge a binary stream of markers inside the obsstore
636
636
637 Returns the number of new markers added."""
637 Returns the number of new markers added."""
638 version, markers = _readmarkers(data)
638 version, markers = _readmarkers(data)
639 return self.add(transaction, markers)
639 return self.add(transaction, markers)
640
640
641 @propertycache
641 @propertycache
642 def _data(self):
642 def _data(self):
643 return self.svfs.tryread('obsstore')
643 return self.svfs.tryread('obsstore')
644
644
645 @propertycache
645 @propertycache
646 def _version(self):
646 def _version(self):
647 if len(self._data) >= 1:
647 if len(self._data) >= 1:
648 return _readmarkerversion(self._data)
648 return _readmarkerversion(self._data)
649 else:
649 else:
650 return self._defaultformat
650 return self._defaultformat
651
651
652 @propertycache
652 @propertycache
653 def _all(self):
653 def _all(self):
654 data = self._data
654 data = self._data
655 if not data:
655 if not data:
656 return []
656 return []
657 self._version, markers = _readmarkers(data)
657 self._version, markers = _readmarkers(data)
658 markers = list(markers)
658 markers = list(markers)
659 _checkinvalidmarkers(markers)
659 _checkinvalidmarkers(markers)
660 return markers
660 return markers
661
661
662 @propertycache
662 @propertycache
663 def successors(self):
663 def successors(self):
664 successors = {}
664 successors = {}
665 _addsuccessors(successors, self._all)
665 _addsuccessors(successors, self._all)
666 return successors
666 return successors
667
667
668 @property
668 @property
669 def precursors(self):
669 def precursors(self):
670 msg = ("'obsstore.precursors' is deprecated, "
670 msg = ("'obsstore.precursors' is deprecated, "
671 "use 'obsstore.predecessors'")
671 "use 'obsstore.predecessors'")
672 util.nouideprecwarn(msg, '4.4')
672 util.nouideprecwarn(msg, '4.4')
673
673
674 return self.predecessors
674 return self.predecessors
675
675
676 @propertycache
676 @propertycache
677 def predecessors(self):
677 def predecessors(self):
678 predecessors = {}
678 predecessors = {}
679 _addpredecessors(predecessors, self._all)
679 _addpredecessors(predecessors, self._all)
680 return predecessors
680 return predecessors
681
681
682 @propertycache
682 @propertycache
683 def children(self):
683 def children(self):
684 children = {}
684 children = {}
685 _addchildren(children, self._all)
685 _addchildren(children, self._all)
686 return children
686 return children
687
687
688 def _cached(self, attr):
688 def _cached(self, attr):
689 return attr in self.__dict__
689 return attr in self.__dict__
690
690
691 def _addmarkers(self, markers, rawdata):
691 def _addmarkers(self, markers, rawdata):
692 markers = list(markers) # to allow repeated iteration
692 markers = list(markers) # to allow repeated iteration
693 self._data = self._data + rawdata
693 self._data = self._data + rawdata
694 self._all.extend(markers)
694 self._all.extend(markers)
695 if self._cached('successors'):
695 if self._cached('successors'):
696 _addsuccessors(self.successors, markers)
696 _addsuccessors(self.successors, markers)
697 if self._cached('predecessors'):
697 if self._cached('predecessors'):
698 _addpredecessors(self.predecessors, markers)
698 _addpredecessors(self.predecessors, markers)
699 if self._cached('children'):
699 if self._cached('children'):
700 _addchildren(self.children, markers)
700 _addchildren(self.children, markers)
701 _checkinvalidmarkers(markers)
701 _checkinvalidmarkers(markers)
702
702
703 def relevantmarkers(self, nodes):
703 def relevantmarkers(self, nodes):
704 """return a set of all obsolescence markers relevant to a set of nodes.
704 """return a set of all obsolescence markers relevant to a set of nodes.
705
705
706 "relevant" to a set of nodes mean:
706 "relevant" to a set of nodes mean:
707
707
708 - marker that use this changeset as successor
708 - marker that use this changeset as successor
709 - prune marker of direct children on this changeset
709 - prune marker of direct children on this changeset
710 - recursive application of the two rules on predecessors of these
710 - recursive application of the two rules on predecessors of these
711 markers
711 markers
712
712
713 It is a set so you cannot rely on order."""
713 It is a set so you cannot rely on order."""
714
714
715 pendingnodes = set(nodes)
715 pendingnodes = set(nodes)
716 seenmarkers = set()
716 seenmarkers = set()
717 seennodes = set(pendingnodes)
717 seennodes = set(pendingnodes)
718 precursorsmarkers = self.predecessors
718 precursorsmarkers = self.predecessors
719 succsmarkers = self.successors
719 succsmarkers = self.successors
720 children = self.children
720 children = self.children
721 while pendingnodes:
721 while pendingnodes:
722 direct = set()
722 direct = set()
723 for current in pendingnodes:
723 for current in pendingnodes:
724 direct.update(precursorsmarkers.get(current, ()))
724 direct.update(precursorsmarkers.get(current, ()))
725 pruned = [m for m in children.get(current, ()) if not m[1]]
725 pruned = [m for m in children.get(current, ()) if not m[1]]
726 direct.update(pruned)
726 direct.update(pruned)
727 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
727 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
728 direct.update(pruned)
728 direct.update(pruned)
729 direct -= seenmarkers
729 direct -= seenmarkers
730 pendingnodes = set([m[0] for m in direct])
730 pendingnodes = set([m[0] for m in direct])
731 seenmarkers |= direct
731 seenmarkers |= direct
732 pendingnodes -= seennodes
732 pendingnodes -= seennodes
733 seennodes |= pendingnodes
733 seennodes |= pendingnodes
734 return seenmarkers
734 return seenmarkers
735
735
736 def makestore(ui, repo):
736 def makestore(ui, repo):
737 """Create an obsstore instance from a repo."""
737 """Create an obsstore instance from a repo."""
738 # read default format for new obsstore.
738 # read default format for new obsstore.
739 # developer config: format.obsstore-version
739 # developer config: format.obsstore-version
740 defaultformat = ui.configint('format', 'obsstore-version')
740 defaultformat = ui.configint('format', 'obsstore-version')
741 # rely on obsstore class default when possible.
741 # rely on obsstore class default when possible.
742 kwargs = {}
742 kwargs = {}
743 if defaultformat is not None:
743 if defaultformat is not None:
744 kwargs['defaultformat'] = defaultformat
744 kwargs['defaultformat'] = defaultformat
745 readonly = not isenabled(repo, createmarkersopt)
745 readonly = not isenabled(repo, createmarkersopt)
746 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
746 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
747 if store and readonly:
747 if store and readonly:
748 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
748 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
749 % len(list(store)))
749 % len(list(store)))
750 return store
750 return store
751
751
752 def commonversion(versions):
752 def commonversion(versions):
753 """Return the newest version listed in both versions and our local formats.
753 """Return the newest version listed in both versions and our local formats.
754
754
755 Returns None if no common version exists.
755 Returns None if no common version exists.
756 """
756 """
757 versions.sort(reverse=True)
757 versions.sort(reverse=True)
758 # search for highest version known on both side
758 # search for highest version known on both side
759 for v in versions:
759 for v in versions:
760 if v in formats:
760 if v in formats:
761 return v
761 return v
762 return None
762 return None
763
763
764 # arbitrary picked to fit into 8K limit from HTTP server
764 # arbitrary picked to fit into 8K limit from HTTP server
765 # you have to take in account:
765 # you have to take in account:
766 # - the version header
766 # - the version header
767 # - the base85 encoding
767 # - the base85 encoding
768 _maxpayload = 5300
768 _maxpayload = 5300
769
769
770 def _pushkeyescape(markers):
770 def _pushkeyescape(markers):
771 """encode markers into a dict suitable for pushkey exchange
771 """encode markers into a dict suitable for pushkey exchange
772
772
773 - binary data is base85 encoded
773 - binary data is base85 encoded
774 - split in chunks smaller than 5300 bytes"""
774 - split in chunks smaller than 5300 bytes"""
775 keys = {}
775 keys = {}
776 parts = []
776 parts = []
777 currentlen = _maxpayload * 2 # ensure we create a new part
777 currentlen = _maxpayload * 2 # ensure we create a new part
778 for marker in markers:
778 for marker in markers:
779 nextdata = _fm0encodeonemarker(marker)
779 nextdata = _fm0encodeonemarker(marker)
780 if (len(nextdata) + currentlen > _maxpayload):
780 if (len(nextdata) + currentlen > _maxpayload):
781 currentpart = []
781 currentpart = []
782 currentlen = 0
782 currentlen = 0
783 parts.append(currentpart)
783 parts.append(currentpart)
784 currentpart.append(nextdata)
784 currentpart.append(nextdata)
785 currentlen += len(nextdata)
785 currentlen += len(nextdata)
786 for idx, part in enumerate(reversed(parts)):
786 for idx, part in enumerate(reversed(parts)):
787 data = ''.join([_pack('>B', _fm0version)] + part)
787 data = ''.join([_pack('>B', _fm0version)] + part)
788 keys['dump%i' % idx] = util.b85encode(data)
788 keys['dump%i' % idx] = util.b85encode(data)
789 return keys
789 return keys
790
790
791 def listmarkers(repo):
791 def listmarkers(repo):
792 """List markers over pushkey"""
792 """List markers over pushkey"""
793 if not repo.obsstore:
793 if not repo.obsstore:
794 return {}
794 return {}
795 return _pushkeyescape(sorted(repo.obsstore))
795 return _pushkeyescape(sorted(repo.obsstore))
796
796
797 def pushmarker(repo, key, old, new):
797 def pushmarker(repo, key, old, new):
798 """Push markers over pushkey"""
798 """Push markers over pushkey"""
799 if not key.startswith('dump'):
799 if not key.startswith('dump'):
800 repo.ui.warn(_('unknown key: %r') % key)
800 repo.ui.warn(_('unknown key: %r') % key)
801 return False
801 return False
802 if old:
802 if old:
803 repo.ui.warn(_('unexpected old value for %r') % key)
803 repo.ui.warn(_('unexpected old value for %r') % key)
804 return False
804 return False
805 data = util.b85decode(new)
805 data = util.b85decode(new)
806 lock = repo.lock()
806 lock = repo.lock()
807 try:
807 try:
808 tr = repo.transaction('pushkey: obsolete markers')
808 tr = repo.transaction('pushkey: obsolete markers')
809 try:
809 try:
810 repo.obsstore.mergemarkers(tr, data)
810 repo.obsstore.mergemarkers(tr, data)
811 repo.invalidatevolatilesets()
811 repo.invalidatevolatilesets()
812 tr.close()
812 tr.close()
813 return True
813 return True
814 finally:
814 finally:
815 tr.release()
815 tr.release()
816 finally:
816 finally:
817 lock.release()
817 lock.release()
818
818
819 # keep compatibility for the 4.3 cycle
819 # keep compatibility for the 4.3 cycle
820 def allprecursors(obsstore, nodes, ignoreflags=0):
820 def allprecursors(obsstore, nodes, ignoreflags=0):
821 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
821 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
822 util.nouideprecwarn(movemsg, '4.3')
822 util.nouideprecwarn(movemsg, '4.3')
823 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
823 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
824
824
825 def allsuccessors(obsstore, nodes, ignoreflags=0):
825 def allsuccessors(obsstore, nodes, ignoreflags=0):
826 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
826 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
827 util.nouideprecwarn(movemsg, '4.3')
827 util.nouideprecwarn(movemsg, '4.3')
828 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
828 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
829
829
830 def marker(repo, data):
830 def marker(repo, data):
831 movemsg = 'obsolete.marker moved to obsutil.marker'
831 movemsg = 'obsolete.marker moved to obsutil.marker'
832 repo.ui.deprecwarn(movemsg, '4.3')
832 repo.ui.deprecwarn(movemsg, '4.3')
833 return obsutil.marker(repo, data)
833 return obsutil.marker(repo, data)
834
834
835 def getmarkers(repo, nodes=None, exclusive=False):
835 def getmarkers(repo, nodes=None, exclusive=False):
836 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
836 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
837 repo.ui.deprecwarn(movemsg, '4.3')
837 repo.ui.deprecwarn(movemsg, '4.3')
838 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
838 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
839
839
840 def exclusivemarkers(repo, nodes):
840 def exclusivemarkers(repo, nodes):
841 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
841 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
842 repo.ui.deprecwarn(movemsg, '4.3')
842 repo.ui.deprecwarn(movemsg, '4.3')
843 return obsutil.exclusivemarkers(repo, nodes)
843 return obsutil.exclusivemarkers(repo, nodes)
844
844
845 def foreground(repo, nodes):
845 def foreground(repo, nodes):
846 movemsg = 'obsolete.foreground moved to obsutil.foreground'
846 movemsg = 'obsolete.foreground moved to obsutil.foreground'
847 repo.ui.deprecwarn(movemsg, '4.3')
847 repo.ui.deprecwarn(movemsg, '4.3')
848 return obsutil.foreground(repo, nodes)
848 return obsutil.foreground(repo, nodes)
849
849
850 def successorssets(repo, initialnode, cache=None):
850 def successorssets(repo, initialnode, cache=None):
851 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
851 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
852 repo.ui.deprecwarn(movemsg, '4.3')
852 repo.ui.deprecwarn(movemsg, '4.3')
853 return obsutil.successorssets(repo, initialnode, cache=cache)
853 return obsutil.successorssets(repo, initialnode, cache=cache)
854
854
855 # mapping of 'set-name' -> <function to compute this set>
855 # mapping of 'set-name' -> <function to compute this set>
856 cachefuncs = {}
856 cachefuncs = {}
857 def cachefor(name):
857 def cachefor(name):
858 """Decorator to register a function as computing the cache for a set"""
858 """Decorator to register a function as computing the cache for a set"""
859 def decorator(func):
859 def decorator(func):
860 if name in cachefuncs:
860 if name in cachefuncs:
861 msg = "duplicated registration for volatileset '%s' (existing: %r)"
861 msg = "duplicated registration for volatileset '%s' (existing: %r)"
862 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
862 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
863 cachefuncs[name] = func
863 cachefuncs[name] = func
864 return func
864 return func
865 return decorator
865 return decorator
866
866
867 def getrevs(repo, name):
867 def getrevs(repo, name):
868 """Return the set of revision that belong to the <name> set
868 """Return the set of revision that belong to the <name> set
869
869
870 Such access may compute the set and cache it for future use"""
870 Such access may compute the set and cache it for future use"""
871 repo = repo.unfiltered()
871 repo = repo.unfiltered()
872 if not repo.obsstore:
872 if not repo.obsstore:
873 return frozenset()
873 return frozenset()
874 if name not in repo.obsstore.caches:
874 if name not in repo.obsstore.caches:
875 repo.obsstore.caches[name] = cachefuncs[name](repo)
875 repo.obsstore.caches[name] = cachefuncs[name](repo)
876 return repo.obsstore.caches[name]
876 return repo.obsstore.caches[name]
877
877
878 # To be simple we need to invalidate obsolescence cache when:
878 # To be simple we need to invalidate obsolescence cache when:
879 #
879 #
880 # - new changeset is added:
880 # - new changeset is added:
881 # - public phase is changed
881 # - public phase is changed
882 # - obsolescence marker are added
882 # - obsolescence marker are added
883 # - strip is used a repo
883 # - strip is used a repo
884 def clearobscaches(repo):
884 def clearobscaches(repo):
885 """Remove all obsolescence related cache from a repo
885 """Remove all obsolescence related cache from a repo
886
886
887 This remove all cache in obsstore is the obsstore already exist on the
887 This remove all cache in obsstore is the obsstore already exist on the
888 repo.
888 repo.
889
889
890 (We could be smarter here given the exact event that trigger the cache
890 (We could be smarter here given the exact event that trigger the cache
891 clearing)"""
891 clearing)"""
892 # only clear cache is there is obsstore data in this repo
892 # only clear cache is there is obsstore data in this repo
893 if 'obsstore' in repo._filecache:
893 if 'obsstore' in repo._filecache:
894 repo.obsstore.caches.clear()
894 repo.obsstore.caches.clear()
895
895
896 def _mutablerevs(repo):
896 def _mutablerevs(repo):
897 """the set of mutable revision in the repository"""
897 """the set of mutable revision in the repository"""
898 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
898 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
899
899
900 @cachefor('obsolete')
900 @cachefor('obsolete')
901 def _computeobsoleteset(repo):
901 def _computeobsoleteset(repo):
902 """the set of obsolete revisions"""
902 """the set of obsolete revisions"""
903 getnode = repo.changelog.node
903 getnode = repo.changelog.node
904 notpublic = _mutablerevs(repo)
904 notpublic = _mutablerevs(repo)
905 isobs = repo.obsstore.successors.__contains__
905 isobs = repo.obsstore.successors.__contains__
906 obs = set(r for r in notpublic if isobs(getnode(r)))
906 obs = set(r for r in notpublic if isobs(getnode(r)))
907 return obs
907 return obs
908
908
909 @cachefor('unstable')
909 @cachefor('unstable')
910 def _computeunstableset(repo):
910 def _computeunstableset(repo):
911 """the set of non obsolete revisions with obsolete parents"""
911 """the set of non obsolete revisions with obsolete parents"""
912 pfunc = repo.changelog.parentrevs
912 pfunc = repo.changelog.parentrevs
913 mutable = _mutablerevs(repo)
913 mutable = _mutablerevs(repo)
914 obsolete = getrevs(repo, 'obsolete')
914 obsolete = getrevs(repo, 'obsolete')
915 others = mutable - obsolete
915 others = mutable - obsolete
916 unstable = set()
916 unstable = set()
917 for r in sorted(others):
917 for r in sorted(others):
918 # A rev is unstable if one of its parent is obsolete or unstable
918 # A rev is unstable if one of its parent is obsolete or unstable
919 # this works since we traverse following growing rev order
919 # this works since we traverse following growing rev order
920 for p in pfunc(r):
920 for p in pfunc(r):
921 if p in obsolete or p in unstable:
921 if p in obsolete or p in unstable:
922 unstable.add(r)
922 unstable.add(r)
923 break
923 break
924 return unstable
924 return unstable
925
925
926 @cachefor('suspended')
926 @cachefor('suspended')
927 def _computesuspendedset(repo):
927 def _computesuspendedset(repo):
928 """the set of obsolete parents with non obsolete descendants"""
928 """the set of obsolete parents with non obsolete descendants"""
929 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
929 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
930 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
930 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
931
931
932 @cachefor('extinct')
932 @cachefor('extinct')
933 def _computeextinctset(repo):
933 def _computeextinctset(repo):
934 """the set of obsolete parents without non obsolete descendants"""
934 """the set of obsolete parents without non obsolete descendants"""
935 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
935 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
936
936
937
937
938 @cachefor('bumped')
938 @cachefor('bumped')
939 def _computebumpedset(repo):
939 def _computebumpedset(repo):
940 """the set of revs trying to obsolete public revisions"""
940 """the set of revs trying to obsolete public revisions"""
941 bumped = set()
941 bumped = set()
942 # util function (avoid attribute lookup in the loop)
942 # util function (avoid attribute lookup in the loop)
943 phase = repo._phasecache.phase # would be faster to grab the full list
943 phase = repo._phasecache.phase # would be faster to grab the full list
944 public = phases.public
944 public = phases.public
945 cl = repo.changelog
945 cl = repo.changelog
946 torev = cl.nodemap.get
946 torev = cl.nodemap.get
947 for ctx in repo.set('(not public()) and (not obsolete())'):
947 for ctx in repo.set('(not public()) and (not obsolete())'):
948 rev = ctx.rev()
948 rev = ctx.rev()
949 # We only evaluate mutable, non-obsolete revision
949 # We only evaluate mutable, non-obsolete revision
950 node = ctx.node()
950 node = ctx.node()
951 # (future) A cache of predecessors may worth if split is very common
951 # (future) A cache of predecessors may worth if split is very common
952 for pnode in obsutil.allprecursors(repo.obsstore, [node],
952 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
953 ignoreflags=bumpedfix):
953 ignoreflags=bumpedfix):
954 prev = torev(pnode) # unfiltered! but so is phasecache
954 prev = torev(pnode) # unfiltered! but so is phasecache
955 if (prev is not None) and (phase(repo, prev) <= public):
955 if (prev is not None) and (phase(repo, prev) <= public):
956 # we have a public predecessor
956 # we have a public predecessor
957 bumped.add(rev)
957 bumped.add(rev)
958 break # Next draft!
958 break # Next draft!
959 return bumped
959 return bumped
960
960
961 @cachefor('divergent')
961 @cachefor('divergent')
962 def _computedivergentset(repo):
962 def _computedivergentset(repo):
963 """the set of rev that compete to be the final successors of some revision.
963 """the set of rev that compete to be the final successors of some revision.
964 """
964 """
965 divergent = set()
965 divergent = set()
966 obsstore = repo.obsstore
966 obsstore = repo.obsstore
967 newermap = {}
967 newermap = {}
968 for ctx in repo.set('(not public()) - obsolete()'):
968 for ctx in repo.set('(not public()) - obsolete()'):
969 mark = obsstore.predecessors.get(ctx.node(), ())
969 mark = obsstore.predecessors.get(ctx.node(), ())
970 toprocess = set(mark)
970 toprocess = set(mark)
971 seen = set()
971 seen = set()
972 while toprocess:
972 while toprocess:
973 prec = toprocess.pop()[0]
973 prec = toprocess.pop()[0]
974 if prec in seen:
974 if prec in seen:
975 continue # emergency cycle hanging prevention
975 continue # emergency cycle hanging prevention
976 seen.add(prec)
976 seen.add(prec)
977 if prec not in newermap:
977 if prec not in newermap:
978 obsutil.successorssets(repo, prec, cache=newermap)
978 obsutil.successorssets(repo, prec, cache=newermap)
979 newer = [n for n in newermap[prec] if n]
979 newer = [n for n in newermap[prec] if n]
980 if len(newer) > 1:
980 if len(newer) > 1:
981 divergent.add(ctx.rev())
981 divergent.add(ctx.rev())
982 break
982 break
983 toprocess.update(obsstore.predecessors.get(prec, ()))
983 toprocess.update(obsstore.predecessors.get(prec, ()))
984 return divergent
984 return divergent
985
985
986
986
987 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
987 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
988 operation=None):
988 operation=None):
989 """Add obsolete markers between changesets in a repo
989 """Add obsolete markers between changesets in a repo
990
990
991 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
991 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
992 tuple. `old` and `news` are changectx. metadata is an optional dictionary
992 tuple. `old` and `news` are changectx. metadata is an optional dictionary
993 containing metadata for this marker only. It is merged with the global
993 containing metadata for this marker only. It is merged with the global
994 metadata specified through the `metadata` argument of this function,
994 metadata specified through the `metadata` argument of this function,
995
995
996 Trying to obsolete a public changeset will raise an exception.
996 Trying to obsolete a public changeset will raise an exception.
997
997
998 Current user and date are used except if specified otherwise in the
998 Current user and date are used except if specified otherwise in the
999 metadata attribute.
999 metadata attribute.
1000
1000
1001 This function operates within a transaction of its own, but does
1001 This function operates within a transaction of its own, but does
1002 not take any lock on the repo.
1002 not take any lock on the repo.
1003 """
1003 """
1004 # prepare metadata
1004 # prepare metadata
1005 if metadata is None:
1005 if metadata is None:
1006 metadata = {}
1006 metadata = {}
1007 if 'user' not in metadata:
1007 if 'user' not in metadata:
1008 metadata['user'] = repo.ui.username()
1008 metadata['user'] = repo.ui.username()
1009 useoperation = repo.ui.configbool('experimental',
1009 useoperation = repo.ui.configbool('experimental',
1010 'evolution.track-operation')
1010 'evolution.track-operation')
1011 if useoperation and operation:
1011 if useoperation and operation:
1012 metadata['operation'] = operation
1012 metadata['operation'] = operation
1013 tr = repo.transaction('add-obsolescence-marker')
1013 tr = repo.transaction('add-obsolescence-marker')
1014 try:
1014 try:
1015 markerargs = []
1015 markerargs = []
1016 for rel in relations:
1016 for rel in relations:
1017 prec = rel[0]
1017 prec = rel[0]
1018 sucs = rel[1]
1018 sucs = rel[1]
1019 localmetadata = metadata.copy()
1019 localmetadata = metadata.copy()
1020 if 2 < len(rel):
1020 if 2 < len(rel):
1021 localmetadata.update(rel[2])
1021 localmetadata.update(rel[2])
1022
1022
1023 if not prec.mutable():
1023 if not prec.mutable():
1024 raise error.Abort(_("cannot obsolete public changeset: %s")
1024 raise error.Abort(_("cannot obsolete public changeset: %s")
1025 % prec,
1025 % prec,
1026 hint="see 'hg help phases' for details")
1026 hint="see 'hg help phases' for details")
1027 nprec = prec.node()
1027 nprec = prec.node()
1028 nsucs = tuple(s.node() for s in sucs)
1028 nsucs = tuple(s.node() for s in sucs)
1029 npare = None
1029 npare = None
1030 if not nsucs:
1030 if not nsucs:
1031 npare = tuple(p.node() for p in prec.parents())
1031 npare = tuple(p.node() for p in prec.parents())
1032 if nprec in nsucs:
1032 if nprec in nsucs:
1033 raise error.Abort(_("changeset %s cannot obsolete itself")
1033 raise error.Abort(_("changeset %s cannot obsolete itself")
1034 % prec)
1034 % prec)
1035
1035
1036 # Creating the marker causes the hidden cache to become invalid,
1036 # Creating the marker causes the hidden cache to become invalid,
1037 # which causes recomputation when we ask for prec.parents() above.
1037 # which causes recomputation when we ask for prec.parents() above.
1038 # Resulting in n^2 behavior. So let's prepare all of the args
1038 # Resulting in n^2 behavior. So let's prepare all of the args
1039 # first, then create the markers.
1039 # first, then create the markers.
1040 markerargs.append((nprec, nsucs, npare, localmetadata))
1040 markerargs.append((nprec, nsucs, npare, localmetadata))
1041
1041
1042 for args in markerargs:
1042 for args in markerargs:
1043 nprec, nsucs, npare, localmetadata = args
1043 nprec, nsucs, npare, localmetadata = args
1044 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1044 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1045 date=date, metadata=localmetadata,
1045 date=date, metadata=localmetadata,
1046 ui=repo.ui)
1046 ui=repo.ui)
1047 repo.filteredrevcache.clear()
1047 repo.filteredrevcache.clear()
1048 tr.close()
1048 tr.close()
1049 finally:
1049 finally:
1050 tr.release()
1050 tr.release()
@@ -1,544 +1,553 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 phases,
11 phases,
12 util
12 util
13 )
13 )
14
14
15 class marker(object):
15 class marker(object):
16 """Wrap obsolete marker raw data"""
16 """Wrap obsolete marker raw data"""
17
17
18 def __init__(self, repo, data):
18 def __init__(self, repo, data):
19 # the repo argument will be used to create changectx in later version
19 # the repo argument will be used to create changectx in later version
20 self._repo = repo
20 self._repo = repo
21 self._data = data
21 self._data = data
22 self._decodedmeta = None
22 self._decodedmeta = None
23
23
24 def __hash__(self):
24 def __hash__(self):
25 return hash(self._data)
25 return hash(self._data)
26
26
27 def __eq__(self, other):
27 def __eq__(self, other):
28 if type(other) != type(self):
28 if type(other) != type(self):
29 return False
29 return False
30 return self._data == other._data
30 return self._data == other._data
31
31
32 def precnode(self):
32 def precnode(self):
33 msg = ("'marker.precnode' is deprecated, "
33 msg = ("'marker.precnode' is deprecated, "
34 "use 'marker.precnode'")
34 "use 'marker.precnode'")
35 util.nouideprecwarn(msg, '4.4')
35 util.nouideprecwarn(msg, '4.4')
36 return self.prednode()
36 return self.prednode()
37
37
38 def prednode(self):
38 def prednode(self):
39 """Predecessor changeset node identifier"""
39 """Predecessor changeset node identifier"""
40 return self._data[0]
40 return self._data[0]
41
41
42 def succnodes(self):
42 def succnodes(self):
43 """List of successor changesets node identifiers"""
43 """List of successor changesets node identifiers"""
44 return self._data[1]
44 return self._data[1]
45
45
46 def parentnodes(self):
46 def parentnodes(self):
47 """Parents of the predecessors (None if not recorded)"""
47 """Parents of the predecessors (None if not recorded)"""
48 return self._data[5]
48 return self._data[5]
49
49
50 def metadata(self):
50 def metadata(self):
51 """Decoded metadata dictionary"""
51 """Decoded metadata dictionary"""
52 return dict(self._data[3])
52 return dict(self._data[3])
53
53
54 def date(self):
54 def date(self):
55 """Creation date as (unixtime, offset)"""
55 """Creation date as (unixtime, offset)"""
56 return self._data[4]
56 return self._data[4]
57
57
58 def flags(self):
58 def flags(self):
59 """The flags field of the marker"""
59 """The flags field of the marker"""
60 return self._data[2]
60 return self._data[2]
61
61
62 def getmarkers(repo, nodes=None, exclusive=False):
62 def getmarkers(repo, nodes=None, exclusive=False):
63 """returns markers known in a repository
63 """returns markers known in a repository
64
64
65 If <nodes> is specified, only markers "relevant" to those nodes are are
65 If <nodes> is specified, only markers "relevant" to those nodes are are
66 returned"""
66 returned"""
67 if nodes is None:
67 if nodes is None:
68 rawmarkers = repo.obsstore
68 rawmarkers = repo.obsstore
69 elif exclusive:
69 elif exclusive:
70 rawmarkers = exclusivemarkers(repo, nodes)
70 rawmarkers = exclusivemarkers(repo, nodes)
71 else:
71 else:
72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
72 rawmarkers = repo.obsstore.relevantmarkers(nodes)
73
73
74 for markerdata in rawmarkers:
74 for markerdata in rawmarkers:
75 yield marker(repo, markerdata)
75 yield marker(repo, markerdata)
76
76
77 def closestpredecessors(repo, nodeid):
77 def closestpredecessors(repo, nodeid):
78 """yield the list of next predecessors pointing on visible changectx nodes
78 """yield the list of next predecessors pointing on visible changectx nodes
79
79
80 This function respect the repoview filtering, filtered revision will be
80 This function respect the repoview filtering, filtered revision will be
81 considered missing.
81 considered missing.
82 """
82 """
83
83
84 precursors = repo.obsstore.predecessors
84 precursors = repo.obsstore.predecessors
85 stack = [nodeid]
85 stack = [nodeid]
86 seen = set(stack)
86 seen = set(stack)
87
87
88 while stack:
88 while stack:
89 current = stack.pop()
89 current = stack.pop()
90 currentpreccs = precursors.get(current, ())
90 currentpreccs = precursors.get(current, ())
91
91
92 for prec in currentpreccs:
92 for prec in currentpreccs:
93 precnodeid = prec[0]
93 precnodeid = prec[0]
94
94
95 # Basic cycle protection
95 # Basic cycle protection
96 if precnodeid in seen:
96 if precnodeid in seen:
97 continue
97 continue
98 seen.add(precnodeid)
98 seen.add(precnodeid)
99
99
100 if precnodeid in repo:
100 if precnodeid in repo:
101 yield precnodeid
101 yield precnodeid
102 else:
102 else:
103 stack.append(precnodeid)
103 stack.append(precnodeid)
104
104
105 def allprecursors(obsstore, nodes, ignoreflags=0):
105 def allprecursors(*args, **kwargs):
106 """ (DEPRECATED)
107 """
108 msg = ("'obsutil.allprecursors' is deprecated, "
109 "use 'obsutil.allpredecessors'")
110 util.nouideprecwarn(msg, '4.4')
111
112 return allpredecessors(*args, **kwargs)
113
114 def allpredecessors(obsstore, nodes, ignoreflags=0):
106 """Yield node for every precursors of <nodes>.
115 """Yield node for every precursors of <nodes>.
107
116
108 Some precursors may be unknown locally.
117 Some precursors may be unknown locally.
109
118
110 This is a linear yield unsuited to detecting folded changesets. It includes
119 This is a linear yield unsuited to detecting folded changesets. It includes
111 initial nodes too."""
120 initial nodes too."""
112
121
113 remaining = set(nodes)
122 remaining = set(nodes)
114 seen = set(remaining)
123 seen = set(remaining)
115 while remaining:
124 while remaining:
116 current = remaining.pop()
125 current = remaining.pop()
117 yield current
126 yield current
118 for mark in obsstore.predecessors.get(current, ()):
127 for mark in obsstore.predecessors.get(current, ()):
119 # ignore marker flagged with specified flag
128 # ignore marker flagged with specified flag
120 if mark[2] & ignoreflags:
129 if mark[2] & ignoreflags:
121 continue
130 continue
122 suc = mark[0]
131 suc = mark[0]
123 if suc not in seen:
132 if suc not in seen:
124 seen.add(suc)
133 seen.add(suc)
125 remaining.add(suc)
134 remaining.add(suc)
126
135
127 def allsuccessors(obsstore, nodes, ignoreflags=0):
136 def allsuccessors(obsstore, nodes, ignoreflags=0):
128 """Yield node for every successor of <nodes>.
137 """Yield node for every successor of <nodes>.
129
138
130 Some successors may be unknown locally.
139 Some successors may be unknown locally.
131
140
132 This is a linear yield unsuited to detecting split changesets. It includes
141 This is a linear yield unsuited to detecting split changesets. It includes
133 initial nodes too."""
142 initial nodes too."""
134 remaining = set(nodes)
143 remaining = set(nodes)
135 seen = set(remaining)
144 seen = set(remaining)
136 while remaining:
145 while remaining:
137 current = remaining.pop()
146 current = remaining.pop()
138 yield current
147 yield current
139 for mark in obsstore.successors.get(current, ()):
148 for mark in obsstore.successors.get(current, ()):
140 # ignore marker flagged with specified flag
149 # ignore marker flagged with specified flag
141 if mark[2] & ignoreflags:
150 if mark[2] & ignoreflags:
142 continue
151 continue
143 for suc in mark[1]:
152 for suc in mark[1]:
144 if suc not in seen:
153 if suc not in seen:
145 seen.add(suc)
154 seen.add(suc)
146 remaining.add(suc)
155 remaining.add(suc)
147
156
148 def _filterprunes(markers):
157 def _filterprunes(markers):
149 """return a set with no prune markers"""
158 """return a set with no prune markers"""
150 return set(m for m in markers if m[1])
159 return set(m for m in markers if m[1])
151
160
152 def exclusivemarkers(repo, nodes):
161 def exclusivemarkers(repo, nodes):
153 """set of markers relevant to "nodes" but no other locally-known nodes
162 """set of markers relevant to "nodes" but no other locally-known nodes
154
163
155 This function compute the set of markers "exclusive" to a locally-known
164 This function compute the set of markers "exclusive" to a locally-known
156 node. This means we walk the markers starting from <nodes> until we reach a
165 node. This means we walk the markers starting from <nodes> until we reach a
157 locally-known precursors outside of <nodes>. Element of <nodes> with
166 locally-known precursors outside of <nodes>. Element of <nodes> with
158 locally-known successors outside of <nodes> are ignored (since their
167 locally-known successors outside of <nodes> are ignored (since their
159 precursors markers are also relevant to these successors).
168 precursors markers are also relevant to these successors).
160
169
161 For example:
170 For example:
162
171
163 # (A0 rewritten as A1)
172 # (A0 rewritten as A1)
164 #
173 #
165 # A0 <-1- A1 # Marker "1" is exclusive to A1
174 # A0 <-1- A1 # Marker "1" is exclusive to A1
166
175
167 or
176 or
168
177
169 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
178 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
170 #
179 #
171 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
180 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
172
181
173 or
182 or
174
183
175 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
184 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
176 #
185 #
177 # <-2- A1 # Marker "2" is exclusive to A0,A1
186 # <-2- A1 # Marker "2" is exclusive to A0,A1
178 # /
187 # /
179 # <-1- A0
188 # <-1- A0
180 # \
189 # \
181 # <-3- A2 # Marker "3" is exclusive to A0,A2
190 # <-3- A2 # Marker "3" is exclusive to A0,A2
182 #
191 #
183 # in addition:
192 # in addition:
184 #
193 #
185 # Markers "2,3" are exclusive to A1,A2
194 # Markers "2,3" are exclusive to A1,A2
186 # Markers "1,2,3" are exclusive to A0,A1,A2
195 # Markers "1,2,3" are exclusive to A0,A1,A2
187
196
188 See test/test-obsolete-bundle-strip.t for more examples.
197 See test/test-obsolete-bundle-strip.t for more examples.
189
198
190 An example usage is strip. When stripping a changeset, we also want to
199 An example usage is strip. When stripping a changeset, we also want to
191 strip the markers exclusive to this changeset. Otherwise we would have
200 strip the markers exclusive to this changeset. Otherwise we would have
192 "dangling"" obsolescence markers from its precursors: Obsolescence markers
201 "dangling"" obsolescence markers from its precursors: Obsolescence markers
193 marking a node as obsolete without any successors available locally.
202 marking a node as obsolete without any successors available locally.
194
203
195 As for relevant markers, the prune markers for children will be followed.
204 As for relevant markers, the prune markers for children will be followed.
196 Of course, they will only be followed if the pruned children is
205 Of course, they will only be followed if the pruned children is
197 locally-known. Since the prune markers are relevant to the pruned node.
206 locally-known. Since the prune markers are relevant to the pruned node.
198 However, while prune markers are considered relevant to the parent of the
207 However, while prune markers are considered relevant to the parent of the
199 pruned changesets, prune markers for locally-known changeset (with no
208 pruned changesets, prune markers for locally-known changeset (with no
200 successors) are considered exclusive to the pruned nodes. This allows
209 successors) are considered exclusive to the pruned nodes. This allows
201 to strip the prune markers (with the rest of the exclusive chain) alongside
210 to strip the prune markers (with the rest of the exclusive chain) alongside
202 the pruned changesets.
211 the pruned changesets.
203 """
212 """
204 # running on a filtered repository would be dangerous as markers could be
213 # running on a filtered repository would be dangerous as markers could be
205 # reported as exclusive when they are relevant for other filtered nodes.
214 # reported as exclusive when they are relevant for other filtered nodes.
206 unfi = repo.unfiltered()
215 unfi = repo.unfiltered()
207
216
208 # shortcut to various useful item
217 # shortcut to various useful item
209 nm = unfi.changelog.nodemap
218 nm = unfi.changelog.nodemap
210 precursorsmarkers = unfi.obsstore.predecessors
219 precursorsmarkers = unfi.obsstore.predecessors
211 successormarkers = unfi.obsstore.successors
220 successormarkers = unfi.obsstore.successors
212 childrenmarkers = unfi.obsstore.children
221 childrenmarkers = unfi.obsstore.children
213
222
214 # exclusive markers (return of the function)
223 # exclusive markers (return of the function)
215 exclmarkers = set()
224 exclmarkers = set()
216 # we need fast membership testing
225 # we need fast membership testing
217 nodes = set(nodes)
226 nodes = set(nodes)
218 # looking for head in the obshistory
227 # looking for head in the obshistory
219 #
228 #
220 # XXX we are ignoring all issues in regard with cycle for now.
229 # XXX we are ignoring all issues in regard with cycle for now.
221 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
230 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
222 stack.sort()
231 stack.sort()
223 # nodes already stacked
232 # nodes already stacked
224 seennodes = set(stack)
233 seennodes = set(stack)
225 while stack:
234 while stack:
226 current = stack.pop()
235 current = stack.pop()
227 # fetch precursors markers
236 # fetch precursors markers
228 markers = list(precursorsmarkers.get(current, ()))
237 markers = list(precursorsmarkers.get(current, ()))
229 # extend the list with prune markers
238 # extend the list with prune markers
230 for mark in successormarkers.get(current, ()):
239 for mark in successormarkers.get(current, ()):
231 if not mark[1]:
240 if not mark[1]:
232 markers.append(mark)
241 markers.append(mark)
233 # and markers from children (looking for prune)
242 # and markers from children (looking for prune)
234 for mark in childrenmarkers.get(current, ()):
243 for mark in childrenmarkers.get(current, ()):
235 if not mark[1]:
244 if not mark[1]:
236 markers.append(mark)
245 markers.append(mark)
237 # traverse the markers
246 # traverse the markers
238 for mark in markers:
247 for mark in markers:
239 if mark in exclmarkers:
248 if mark in exclmarkers:
240 # markers already selected
249 # markers already selected
241 continue
250 continue
242
251
243 # If the markers is about the current node, select it
252 # If the markers is about the current node, select it
244 #
253 #
245 # (this delay the addition of markers from children)
254 # (this delay the addition of markers from children)
246 if mark[1] or mark[0] == current:
255 if mark[1] or mark[0] == current:
247 exclmarkers.add(mark)
256 exclmarkers.add(mark)
248
257
249 # should we keep traversing through the precursors?
258 # should we keep traversing through the precursors?
250 prec = mark[0]
259 prec = mark[0]
251
260
252 # nodes in the stack or already processed
261 # nodes in the stack or already processed
253 if prec in seennodes:
262 if prec in seennodes:
254 continue
263 continue
255
264
256 # is this a locally known node ?
265 # is this a locally known node ?
257 known = prec in nm
266 known = prec in nm
258 # if locally-known and not in the <nodes> set the traversal
267 # if locally-known and not in the <nodes> set the traversal
259 # stop here.
268 # stop here.
260 if known and prec not in nodes:
269 if known and prec not in nodes:
261 continue
270 continue
262
271
263 # do not keep going if there are unselected markers pointing to this
272 # do not keep going if there are unselected markers pointing to this
264 # nodes. If we end up traversing these unselected markers later the
273 # nodes. If we end up traversing these unselected markers later the
265 # node will be taken care of at that point.
274 # node will be taken care of at that point.
266 precmarkers = _filterprunes(successormarkers.get(prec))
275 precmarkers = _filterprunes(successormarkers.get(prec))
267 if precmarkers.issubset(exclmarkers):
276 if precmarkers.issubset(exclmarkers):
268 seennodes.add(prec)
277 seennodes.add(prec)
269 stack.append(prec)
278 stack.append(prec)
270
279
271 return exclmarkers
280 return exclmarkers
272
281
273 def foreground(repo, nodes):
282 def foreground(repo, nodes):
274 """return all nodes in the "foreground" of other node
283 """return all nodes in the "foreground" of other node
275
284
276 The foreground of a revision is anything reachable using parent -> children
285 The foreground of a revision is anything reachable using parent -> children
277 or precursor -> successor relation. It is very similar to "descendant" but
286 or precursor -> successor relation. It is very similar to "descendant" but
278 augmented with obsolescence information.
287 augmented with obsolescence information.
279
288
280 Beware that possible obsolescence cycle may result if complex situation.
289 Beware that possible obsolescence cycle may result if complex situation.
281 """
290 """
282 repo = repo.unfiltered()
291 repo = repo.unfiltered()
283 foreground = set(repo.set('%ln::', nodes))
292 foreground = set(repo.set('%ln::', nodes))
284 if repo.obsstore:
293 if repo.obsstore:
285 # We only need this complicated logic if there is obsolescence
294 # We only need this complicated logic if there is obsolescence
286 # XXX will probably deserve an optimised revset.
295 # XXX will probably deserve an optimised revset.
287 nm = repo.changelog.nodemap
296 nm = repo.changelog.nodemap
288 plen = -1
297 plen = -1
289 # compute the whole set of successors or descendants
298 # compute the whole set of successors or descendants
290 while len(foreground) != plen:
299 while len(foreground) != plen:
291 plen = len(foreground)
300 plen = len(foreground)
292 succs = set(c.node() for c in foreground)
301 succs = set(c.node() for c in foreground)
293 mutable = [c.node() for c in foreground if c.mutable()]
302 mutable = [c.node() for c in foreground if c.mutable()]
294 succs.update(allsuccessors(repo.obsstore, mutable))
303 succs.update(allsuccessors(repo.obsstore, mutable))
295 known = (n for n in succs if n in nm)
304 known = (n for n in succs if n in nm)
296 foreground = set(repo.set('%ln::', known))
305 foreground = set(repo.set('%ln::', known))
297 return set(c.node() for c in foreground)
306 return set(c.node() for c in foreground)
298
307
299 def getobsoleted(repo, tr):
308 def getobsoleted(repo, tr):
300 """return the set of pre-existing revisions obsoleted by a transaction"""
309 """return the set of pre-existing revisions obsoleted by a transaction"""
301 torev = repo.unfiltered().changelog.nodemap.get
310 torev = repo.unfiltered().changelog.nodemap.get
302 phase = repo._phasecache.phase
311 phase = repo._phasecache.phase
303 succsmarkers = repo.obsstore.successors.get
312 succsmarkers = repo.obsstore.successors.get
304 public = phases.public
313 public = phases.public
305 addedmarkers = tr.changes.get('obsmarkers')
314 addedmarkers = tr.changes.get('obsmarkers')
306 addedrevs = tr.changes.get('revs')
315 addedrevs = tr.changes.get('revs')
307 seenrevs = set(addedrevs)
316 seenrevs = set(addedrevs)
308 obsoleted = set()
317 obsoleted = set()
309 for mark in addedmarkers:
318 for mark in addedmarkers:
310 node = mark[0]
319 node = mark[0]
311 rev = torev(node)
320 rev = torev(node)
312 if rev is None or rev in seenrevs:
321 if rev is None or rev in seenrevs:
313 continue
322 continue
314 seenrevs.add(rev)
323 seenrevs.add(rev)
315 if phase(repo, rev) == public:
324 if phase(repo, rev) == public:
316 continue
325 continue
317 if set(succsmarkers(node) or []).issubset(addedmarkers):
326 if set(succsmarkers(node) or []).issubset(addedmarkers):
318 obsoleted.add(rev)
327 obsoleted.add(rev)
319 return obsoleted
328 return obsoleted
320
329
321 def successorssets(repo, initialnode, closest=False, cache=None):
330 def successorssets(repo, initialnode, closest=False, cache=None):
322 """Return set of all latest successors of initial nodes
331 """Return set of all latest successors of initial nodes
323
332
324 The successors set of a changeset A are the group of revisions that succeed
333 The successors set of a changeset A are the group of revisions that succeed
325 A. It succeeds A as a consistent whole, each revision being only a partial
334 A. It succeeds A as a consistent whole, each revision being only a partial
326 replacement. By default, the successors set contains non-obsolete
335 replacement. By default, the successors set contains non-obsolete
327 changesets only, walking the obsolescence graph until reaching a leaf. If
336 changesets only, walking the obsolescence graph until reaching a leaf. If
328 'closest' is set to True, closest successors-sets are return (the
337 'closest' is set to True, closest successors-sets are return (the
329 obsolescence walk stops on known changesets).
338 obsolescence walk stops on known changesets).
330
339
331 This function returns the full list of successor sets which is why it
340 This function returns the full list of successor sets which is why it
332 returns a list of tuples and not just a single tuple. Each tuple is a valid
341 returns a list of tuples and not just a single tuple. Each tuple is a valid
333 successors set. Note that (A,) may be a valid successors set for changeset A
342 successors set. Note that (A,) may be a valid successors set for changeset A
334 (see below).
343 (see below).
335
344
336 In most cases, a changeset A will have a single element (e.g. the changeset
345 In most cases, a changeset A will have a single element (e.g. the changeset
337 A is replaced by A') in its successors set. Though, it is also common for a
346 A is replaced by A') in its successors set. Though, it is also common for a
338 changeset A to have no elements in its successor set (e.g. the changeset
347 changeset A to have no elements in its successor set (e.g. the changeset
339 has been pruned). Therefore, the returned list of successors sets will be
348 has been pruned). Therefore, the returned list of successors sets will be
340 [(A',)] or [], respectively.
349 [(A',)] or [], respectively.
341
350
342 When a changeset A is split into A' and B', however, it will result in a
351 When a changeset A is split into A' and B', however, it will result in a
343 successors set containing more than a single element, i.e. [(A',B')].
352 successors set containing more than a single element, i.e. [(A',B')].
344 Divergent changesets will result in multiple successors sets, i.e. [(A',),
353 Divergent changesets will result in multiple successors sets, i.e. [(A',),
345 (A'')].
354 (A'')].
346
355
347 If a changeset A is not obsolete, then it will conceptually have no
356 If a changeset A is not obsolete, then it will conceptually have no
348 successors set. To distinguish this from a pruned changeset, the successor
357 successors set. To distinguish this from a pruned changeset, the successor
349 set will contain itself only, i.e. [(A,)].
358 set will contain itself only, i.e. [(A,)].
350
359
351 Finally, final successors unknown locally are considered to be pruned
360 Finally, final successors unknown locally are considered to be pruned
352 (pruned: obsoleted without any successors). (Final: successors not affected
361 (pruned: obsoleted without any successors). (Final: successors not affected
353 by markers).
362 by markers).
354
363
355 The 'closest' mode respect the repoview filtering. For example, without
364 The 'closest' mode respect the repoview filtering. For example, without
356 filter it will stop at the first locally known changeset, with 'visible'
365 filter it will stop at the first locally known changeset, with 'visible'
357 filter it will stop on visible changesets).
366 filter it will stop on visible changesets).
358
367
359 The optional `cache` parameter is a dictionary that may contains
368 The optional `cache` parameter is a dictionary that may contains
360 precomputed successors sets. It is meant to reuse the computation of a
369 precomputed successors sets. It is meant to reuse the computation of a
361 previous call to `successorssets` when multiple calls are made at the same
370 previous call to `successorssets` when multiple calls are made at the same
362 time. The cache dictionary is updated in place. The caller is responsible
371 time. The cache dictionary is updated in place. The caller is responsible
363 for its life span. Code that makes multiple calls to `successorssets`
372 for its life span. Code that makes multiple calls to `successorssets`
364 *should* use this cache mechanism or risk a performance hit.
373 *should* use this cache mechanism or risk a performance hit.
365
374
366 Since results are different depending of the 'closest' most, the same cache
375 Since results are different depending of the 'closest' most, the same cache
367 cannot be reused for both mode.
376 cannot be reused for both mode.
368 """
377 """
369
378
370 succmarkers = repo.obsstore.successors
379 succmarkers = repo.obsstore.successors
371
380
372 # Stack of nodes we search successors sets for
381 # Stack of nodes we search successors sets for
373 toproceed = [initialnode]
382 toproceed = [initialnode]
374 # set version of above list for fast loop detection
383 # set version of above list for fast loop detection
375 # element added to "toproceed" must be added here
384 # element added to "toproceed" must be added here
376 stackedset = set(toproceed)
385 stackedset = set(toproceed)
377 if cache is None:
386 if cache is None:
378 cache = {}
387 cache = {}
379
388
380 # This while loop is the flattened version of a recursive search for
389 # This while loop is the flattened version of a recursive search for
381 # successors sets
390 # successors sets
382 #
391 #
383 # def successorssets(x):
392 # def successorssets(x):
384 # successors = directsuccessors(x)
393 # successors = directsuccessors(x)
385 # ss = [[]]
394 # ss = [[]]
386 # for succ in directsuccessors(x):
395 # for succ in directsuccessors(x):
387 # # product as in itertools cartesian product
396 # # product as in itertools cartesian product
388 # ss = product(ss, successorssets(succ))
397 # ss = product(ss, successorssets(succ))
389 # return ss
398 # return ss
390 #
399 #
391 # But we can not use plain recursive calls here:
400 # But we can not use plain recursive calls here:
392 # - that would blow the python call stack
401 # - that would blow the python call stack
393 # - obsolescence markers may have cycles, we need to handle them.
402 # - obsolescence markers may have cycles, we need to handle them.
394 #
403 #
395 # The `toproceed` list act as our call stack. Every node we search
404 # The `toproceed` list act as our call stack. Every node we search
396 # successors set for are stacked there.
405 # successors set for are stacked there.
397 #
406 #
398 # The `stackedset` is set version of this stack used to check if a node is
407 # The `stackedset` is set version of this stack used to check if a node is
399 # already stacked. This check is used to detect cycles and prevent infinite
408 # already stacked. This check is used to detect cycles and prevent infinite
400 # loop.
409 # loop.
401 #
410 #
402 # successors set of all nodes are stored in the `cache` dictionary.
411 # successors set of all nodes are stored in the `cache` dictionary.
403 #
412 #
404 # After this while loop ends we use the cache to return the successors sets
413 # After this while loop ends we use the cache to return the successors sets
405 # for the node requested by the caller.
414 # for the node requested by the caller.
406 while toproceed:
415 while toproceed:
407 # Every iteration tries to compute the successors sets of the topmost
416 # Every iteration tries to compute the successors sets of the topmost
408 # node of the stack: CURRENT.
417 # node of the stack: CURRENT.
409 #
418 #
410 # There are four possible outcomes:
419 # There are four possible outcomes:
411 #
420 #
412 # 1) We already know the successors sets of CURRENT:
421 # 1) We already know the successors sets of CURRENT:
413 # -> mission accomplished, pop it from the stack.
422 # -> mission accomplished, pop it from the stack.
414 # 2) Stop the walk:
423 # 2) Stop the walk:
415 # default case: Node is not obsolete
424 # default case: Node is not obsolete
416 # closest case: Node is known at this repo filter level
425 # closest case: Node is known at this repo filter level
417 # -> the node is its own successors sets. Add it to the cache.
426 # -> the node is its own successors sets. Add it to the cache.
418 # 3) We do not know successors set of direct successors of CURRENT:
427 # 3) We do not know successors set of direct successors of CURRENT:
419 # -> We add those successors to the stack.
428 # -> We add those successors to the stack.
420 # 4) We know successors sets of all direct successors of CURRENT:
429 # 4) We know successors sets of all direct successors of CURRENT:
421 # -> We can compute CURRENT successors set and add it to the
430 # -> We can compute CURRENT successors set and add it to the
422 # cache.
431 # cache.
423 #
432 #
424 current = toproceed[-1]
433 current = toproceed[-1]
425
434
426 # case 2 condition is a bit hairy because of closest,
435 # case 2 condition is a bit hairy because of closest,
427 # we compute it on its own
436 # we compute it on its own
428 case2condition = ((current not in succmarkers)
437 case2condition = ((current not in succmarkers)
429 or (closest and current != initialnode
438 or (closest and current != initialnode
430 and current in repo))
439 and current in repo))
431
440
432 if current in cache:
441 if current in cache:
433 # case (1): We already know the successors sets
442 # case (1): We already know the successors sets
434 stackedset.remove(toproceed.pop())
443 stackedset.remove(toproceed.pop())
435 elif case2condition:
444 elif case2condition:
436 # case (2): end of walk.
445 # case (2): end of walk.
437 if current in repo:
446 if current in repo:
438 # We have a valid successors.
447 # We have a valid successors.
439 cache[current] = [(current,)]
448 cache[current] = [(current,)]
440 else:
449 else:
441 # Final obsolete version is unknown locally.
450 # Final obsolete version is unknown locally.
442 # Do not count that as a valid successors
451 # Do not count that as a valid successors
443 cache[current] = []
452 cache[current] = []
444 else:
453 else:
445 # cases (3) and (4)
454 # cases (3) and (4)
446 #
455 #
447 # We proceed in two phases. Phase 1 aims to distinguish case (3)
456 # We proceed in two phases. Phase 1 aims to distinguish case (3)
448 # from case (4):
457 # from case (4):
449 #
458 #
450 # For each direct successors of CURRENT, we check whether its
459 # For each direct successors of CURRENT, we check whether its
451 # successors sets are known. If they are not, we stack the
460 # successors sets are known. If they are not, we stack the
452 # unknown node and proceed to the next iteration of the while
461 # unknown node and proceed to the next iteration of the while
453 # loop. (case 3)
462 # loop. (case 3)
454 #
463 #
455 # During this step, we may detect obsolescence cycles: a node
464 # During this step, we may detect obsolescence cycles: a node
456 # with unknown successors sets but already in the call stack.
465 # with unknown successors sets but already in the call stack.
457 # In such a situation, we arbitrary set the successors sets of
466 # In such a situation, we arbitrary set the successors sets of
458 # the node to nothing (node pruned) to break the cycle.
467 # the node to nothing (node pruned) to break the cycle.
459 #
468 #
460 # If no break was encountered we proceed to phase 2.
469 # If no break was encountered we proceed to phase 2.
461 #
470 #
462 # Phase 2 computes successors sets of CURRENT (case 4); see details
471 # Phase 2 computes successors sets of CURRENT (case 4); see details
463 # in phase 2 itself.
472 # in phase 2 itself.
464 #
473 #
465 # Note the two levels of iteration in each phase.
474 # Note the two levels of iteration in each phase.
466 # - The first one handles obsolescence markers using CURRENT as
475 # - The first one handles obsolescence markers using CURRENT as
467 # precursor (successors markers of CURRENT).
476 # precursor (successors markers of CURRENT).
468 #
477 #
469 # Having multiple entry here means divergence.
478 # Having multiple entry here means divergence.
470 #
479 #
471 # - The second one handles successors defined in each marker.
480 # - The second one handles successors defined in each marker.
472 #
481 #
473 # Having none means pruned node, multiple successors means split,
482 # Having none means pruned node, multiple successors means split,
474 # single successors are standard replacement.
483 # single successors are standard replacement.
475 #
484 #
476 for mark in sorted(succmarkers[current]):
485 for mark in sorted(succmarkers[current]):
477 for suc in mark[1]:
486 for suc in mark[1]:
478 if suc not in cache:
487 if suc not in cache:
479 if suc in stackedset:
488 if suc in stackedset:
480 # cycle breaking
489 # cycle breaking
481 cache[suc] = []
490 cache[suc] = []
482 else:
491 else:
483 # case (3) If we have not computed successors sets
492 # case (3) If we have not computed successors sets
484 # of one of those successors we add it to the
493 # of one of those successors we add it to the
485 # `toproceed` stack and stop all work for this
494 # `toproceed` stack and stop all work for this
486 # iteration.
495 # iteration.
487 toproceed.append(suc)
496 toproceed.append(suc)
488 stackedset.add(suc)
497 stackedset.add(suc)
489 break
498 break
490 else:
499 else:
491 continue
500 continue
492 break
501 break
493 else:
502 else:
494 # case (4): we know all successors sets of all direct
503 # case (4): we know all successors sets of all direct
495 # successors
504 # successors
496 #
505 #
497 # Successors set contributed by each marker depends on the
506 # Successors set contributed by each marker depends on the
498 # successors sets of all its "successors" node.
507 # successors sets of all its "successors" node.
499 #
508 #
500 # Each different marker is a divergence in the obsolescence
509 # Each different marker is a divergence in the obsolescence
501 # history. It contributes successors sets distinct from other
510 # history. It contributes successors sets distinct from other
502 # markers.
511 # markers.
503 #
512 #
504 # Within a marker, a successor may have divergent successors
513 # Within a marker, a successor may have divergent successors
505 # sets. In such a case, the marker will contribute multiple
514 # sets. In such a case, the marker will contribute multiple
506 # divergent successors sets. If multiple successors have
515 # divergent successors sets. If multiple successors have
507 # divergent successors sets, a Cartesian product is used.
516 # divergent successors sets, a Cartesian product is used.
508 #
517 #
509 # At the end we post-process successors sets to remove
518 # At the end we post-process successors sets to remove
510 # duplicated entry and successors set that are strict subset of
519 # duplicated entry and successors set that are strict subset of
511 # another one.
520 # another one.
512 succssets = []
521 succssets = []
513 for mark in sorted(succmarkers[current]):
522 for mark in sorted(succmarkers[current]):
514 # successors sets contributed by this marker
523 # successors sets contributed by this marker
515 markss = [[]]
524 markss = [[]]
516 for suc in mark[1]:
525 for suc in mark[1]:
517 # cardinal product with previous successors
526 # cardinal product with previous successors
518 productresult = []
527 productresult = []
519 for prefix in markss:
528 for prefix in markss:
520 for suffix in cache[suc]:
529 for suffix in cache[suc]:
521 newss = list(prefix)
530 newss = list(prefix)
522 for part in suffix:
531 for part in suffix:
523 # do not duplicated entry in successors set
532 # do not duplicated entry in successors set
524 # first entry wins.
533 # first entry wins.
525 if part not in newss:
534 if part not in newss:
526 newss.append(part)
535 newss.append(part)
527 productresult.append(newss)
536 productresult.append(newss)
528 markss = productresult
537 markss = productresult
529 succssets.extend(markss)
538 succssets.extend(markss)
530 # remove duplicated and subset
539 # remove duplicated and subset
531 seen = []
540 seen = []
532 final = []
541 final = []
533 candidate = sorted(((set(s), s) for s in succssets if s),
542 candidate = sorted(((set(s), s) for s in succssets if s),
534 key=lambda x: len(x[1]), reverse=True)
543 key=lambda x: len(x[1]), reverse=True)
535 for setversion, listversion in candidate:
544 for setversion, listversion in candidate:
536 for seenset in seen:
545 for seenset in seen:
537 if setversion.issubset(seenset):
546 if setversion.issubset(seenset):
538 break
547 break
539 else:
548 else:
540 final.append(listversion)
549 final.append(listversion)
541 seen.append(setversion)
550 seen.append(setversion)
542 final.reverse() # put small successors set first
551 final.reverse() # put small successors set first
543 cache[current] = final
552 cache[current] = final
544 return cache[initialnode]
553 return cache[initialnode]
General Comments 0
You need to be logged in to leave comments. Login now