##// END OF EJS Templates
merge with stable
Yuya Nishihara -
r41109:46e0563c merge default
parent child Browse files
Show More
@@ -0,0 +1,73 b''
1 {
2 "interactions": [
3 {
4 "response": {
5 "status": {
6 "message": "OK",
7 "code": 200
8 },
9 "headers": {
10 "content-type": [
11 "application/json"
12 ],
13 "date": [
14 "Fri, 21 Dec 2018 22:19:11 GMT"
15 ],
16 "x-content-type-options": [
17 "nosniff"
18 ],
19 "cache-control": [
20 "no-store"
21 ],
22 "strict-transport-security": [
23 "max-age=0; includeSubdomains; preload"
24 ],
25 "x-frame-options": [
26 "Deny"
27 ],
28 "set-cookie": [
29 "phsid=A%2Fdv22bpksbdis3vfeksluagfslhfojblbnkro7we4; expires=Wed, 20-Dec-2023 22:19:11 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
30 ],
31 "x-xss-protection": [
32 "1; mode=block"
33 ],
34 "expires": [
35 "Sat, 01 Jan 2000 00:00:00 GMT"
36 ],
37 "transfer-encoding": [
38 "chunked"
39 ],
40 "server": [
41 "Apache/2.4.10 (Debian)"
42 ]
43 },
44 "body": {
45 "string": "{\"result\":{\"data\":[],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
46 }
47 },
48 "request": {
49 "method": "POST",
50 "headers": {
51 "accept": [
52 "application/mercurial-0.1"
53 ],
54 "content-type": [
55 "application/x-www-form-urlencoded"
56 ],
57 "content-length": [
58 "70"
59 ],
60 "host": [
61 "phab.mercurial-scm.org"
62 ],
63 "user-agent": [
64 "mercurial/proto-1.0 (Mercurial 4.8.1+564-6f483b107eb5+20181221)"
65 ]
66 },
67 "uri": "https://phab.mercurial-scm.org//api/user.search",
68 "body": "constraints%5BisBot%5D=true&api.token=cli-hahayouwish"
69 }
70 }
71 ],
72 "version": 1
73 } No newline at end of file
@@ -1,990 +1,992 b''
1 # phabricator.py - simple Phabricator integration
1 # phabricator.py - simple Phabricator integration
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """simple Phabricator integration (EXPERIMENTAL)
7 """simple Phabricator integration (EXPERIMENTAL)
8
8
9 This extension provides a ``phabsend`` command which sends a stack of
9 This extension provides a ``phabsend`` command which sends a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
12 to update statuses in batch.
12 to update statuses in batch.
13
13
14 By default, Phabricator requires ``Test Plan`` which might prevent some
14 By default, Phabricator requires ``Test Plan`` which might prevent some
15 changeset from being sent. The requirement could be disabled by changing
15 changeset from being sent. The requirement could be disabled by changing
16 ``differential.require-test-plan-field`` config server side.
16 ``differential.require-test-plan-field`` config server side.
17
17
18 Config::
18 Config::
19
19
20 [phabricator]
20 [phabricator]
21 # Phabricator URL
21 # Phabricator URL
22 url = https://phab.example.com/
22 url = https://phab.example.com/
23
23
24 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
24 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
25 # callsign is "FOO".
25 # callsign is "FOO".
26 callsign = FOO
26 callsign = FOO
27
27
28 # curl command to use. If not set (default), use builtin HTTP library to
28 # curl command to use. If not set (default), use builtin HTTP library to
29 # communicate. If set, use the specified curl command. This could be useful
29 # communicate. If set, use the specified curl command. This could be useful
30 # if you need to specify advanced options that is not easily supported by
30 # if you need to specify advanced options that is not easily supported by
31 # the internal library.
31 # the internal library.
32 curlcmd = curl --connect-timeout 2 --retry 3 --silent
32 curlcmd = curl --connect-timeout 2 --retry 3 --silent
33
33
34 [auth]
34 [auth]
35 example.schemes = https
35 example.schemes = https
36 example.prefix = phab.example.com
36 example.prefix = phab.example.com
37
37
38 # API token. Get it from https://$HOST/conduit/login/
38 # API token. Get it from https://$HOST/conduit/login/
39 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
39 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
40 """
40 """
41
41
42 from __future__ import absolute_import
42 from __future__ import absolute_import
43
43
44 import itertools
44 import itertools
45 import json
45 import json
46 import operator
46 import operator
47 import re
47 import re
48
48
49 from mercurial.node import bin, nullid
49 from mercurial.node import bin, nullid
50 from mercurial.i18n import _
50 from mercurial.i18n import _
51 from mercurial import (
51 from mercurial import (
52 cmdutil,
52 cmdutil,
53 context,
53 context,
54 encoding,
54 encoding,
55 error,
55 error,
56 httpconnection as httpconnectionmod,
56 httpconnection as httpconnectionmod,
57 mdiff,
57 mdiff,
58 obsutil,
58 obsutil,
59 parser,
59 parser,
60 patch,
60 patch,
61 registrar,
61 registrar,
62 scmutil,
62 scmutil,
63 smartset,
63 smartset,
64 tags,
64 tags,
65 templateutil,
65 templateutil,
66 url as urlmod,
66 url as urlmod,
67 util,
67 util,
68 )
68 )
69 from mercurial.utils import (
69 from mercurial.utils import (
70 procutil,
70 procutil,
71 stringutil,
71 stringutil,
72 )
72 )
73
73
74 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
74 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
75 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
75 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
76 # be specifying the version(s) of Mercurial they are tested with, or
76 # be specifying the version(s) of Mercurial they are tested with, or
77 # leave the attribute unspecified.
77 # leave the attribute unspecified.
78 testedwith = 'ships-with-hg-core'
78 testedwith = 'ships-with-hg-core'
79
79
80 cmdtable = {}
80 cmdtable = {}
81 command = registrar.command(cmdtable)
81 command = registrar.command(cmdtable)
82
82
83 configtable = {}
83 configtable = {}
84 configitem = registrar.configitem(configtable)
84 configitem = registrar.configitem(configtable)
85
85
86 # developer config: phabricator.batchsize
86 # developer config: phabricator.batchsize
87 configitem(b'phabricator', b'batchsize',
87 configitem(b'phabricator', b'batchsize',
88 default=12,
88 default=12,
89 )
89 )
90 configitem(b'phabricator', b'callsign',
90 configitem(b'phabricator', b'callsign',
91 default=None,
91 default=None,
92 )
92 )
93 configitem(b'phabricator', b'curlcmd',
93 configitem(b'phabricator', b'curlcmd',
94 default=None,
94 default=None,
95 )
95 )
96 # developer config: phabricator.repophid
96 # developer config: phabricator.repophid
97 configitem(b'phabricator', b'repophid',
97 configitem(b'phabricator', b'repophid',
98 default=None,
98 default=None,
99 )
99 )
100 configitem(b'phabricator', b'url',
100 configitem(b'phabricator', b'url',
101 default=None,
101 default=None,
102 )
102 )
103 configitem(b'phabsend', b'confirm',
103 configitem(b'phabsend', b'confirm',
104 default=False,
104 default=False,
105 )
105 )
106
106
107 colortable = {
107 colortable = {
108 b'phabricator.action.created': b'green',
108 b'phabricator.action.created': b'green',
109 b'phabricator.action.skipped': b'magenta',
109 b'phabricator.action.skipped': b'magenta',
110 b'phabricator.action.updated': b'magenta',
110 b'phabricator.action.updated': b'magenta',
111 b'phabricator.desc': b'',
111 b'phabricator.desc': b'',
112 b'phabricator.drev': b'bold',
112 b'phabricator.drev': b'bold',
113 b'phabricator.node': b'',
113 b'phabricator.node': b'',
114 }
114 }
115
115
116 _VCR_FLAGS = [
116 _VCR_FLAGS = [
117 (b'', b'test-vcr', b'',
117 (b'', b'test-vcr', b'',
118 _(b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
118 _(b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
119 b', otherwise will mock all http requests using the specified vcr file.'
119 b', otherwise will mock all http requests using the specified vcr file.'
120 b' (ADVANCED)'
120 b' (ADVANCED)'
121 )),
121 )),
122 ]
122 ]
123
123
124 def vcrcommand(name, flags, spec, helpcategory=None):
124 def vcrcommand(name, flags, spec, helpcategory=None):
125 fullflags = flags + _VCR_FLAGS
125 fullflags = flags + _VCR_FLAGS
126 def decorate(fn):
126 def decorate(fn):
127 def inner(*args, **kwargs):
127 def inner(*args, **kwargs):
128 cassette = kwargs.pop(r'test_vcr', None)
128 cassette = kwargs.pop(r'test_vcr', None)
129 if cassette:
129 if cassette:
130 import hgdemandimport
130 import hgdemandimport
131 with hgdemandimport.deactivated():
131 with hgdemandimport.deactivated():
132 import vcr as vcrmod
132 import vcr as vcrmod
133 import vcr.stubs as stubs
133 import vcr.stubs as stubs
134 vcr = vcrmod.VCR(
134 vcr = vcrmod.VCR(
135 serializer=r'json',
135 serializer=r'json',
136 custom_patches=[
136 custom_patches=[
137 (urlmod, 'httpconnection', stubs.VCRHTTPConnection),
137 (urlmod, 'httpconnection', stubs.VCRHTTPConnection),
138 (urlmod, 'httpsconnection',
138 (urlmod, 'httpsconnection',
139 stubs.VCRHTTPSConnection),
139 stubs.VCRHTTPSConnection),
140 ])
140 ])
141 with vcr.use_cassette(cassette):
141 with vcr.use_cassette(cassette):
142 return fn(*args, **kwargs)
142 return fn(*args, **kwargs)
143 return fn(*args, **kwargs)
143 return fn(*args, **kwargs)
144 inner.__name__ = fn.__name__
144 inner.__name__ = fn.__name__
145 inner.__doc__ = fn.__doc__
145 inner.__doc__ = fn.__doc__
146 return command(name, fullflags, spec, helpcategory=helpcategory)(inner)
146 return command(name, fullflags, spec, helpcategory=helpcategory)(inner)
147 return decorate
147 return decorate
148
148
149 def urlencodenested(params):
149 def urlencodenested(params):
150 """like urlencode, but works with nested parameters.
150 """like urlencode, but works with nested parameters.
151
151
152 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
152 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
153 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
153 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
154 urlencode. Note: the encoding is consistent with PHP's http_build_query.
154 urlencode. Note: the encoding is consistent with PHP's http_build_query.
155 """
155 """
156 flatparams = util.sortdict()
156 flatparams = util.sortdict()
157 def process(prefix, obj):
157 def process(prefix, obj):
158 if isinstance(obj, bool):
159 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
158 items = {list: enumerate, dict: lambda x: x.items()}.get(type(obj))
160 items = {list: enumerate, dict: lambda x: x.items()}.get(type(obj))
159 if items is None:
161 if items is None:
160 flatparams[prefix] = obj
162 flatparams[prefix] = obj
161 else:
163 else:
162 for k, v in items(obj):
164 for k, v in items(obj):
163 if prefix:
165 if prefix:
164 process(b'%s[%s]' % (prefix, k), v)
166 process(b'%s[%s]' % (prefix, k), v)
165 else:
167 else:
166 process(k, v)
168 process(k, v)
167 process(b'', params)
169 process(b'', params)
168 return util.urlreq.urlencode(flatparams)
170 return util.urlreq.urlencode(flatparams)
169
171
170 def readurltoken(repo):
172 def readurltoken(repo):
171 """return conduit url, token and make sure they exist
173 """return conduit url, token and make sure they exist
172
174
173 Currently read from [auth] config section. In the future, it might
175 Currently read from [auth] config section. In the future, it might
174 make sense to read from .arcconfig and .arcrc as well.
176 make sense to read from .arcconfig and .arcrc as well.
175 """
177 """
176 url = repo.ui.config(b'phabricator', b'url')
178 url = repo.ui.config(b'phabricator', b'url')
177 if not url:
179 if not url:
178 raise error.Abort(_(b'config %s.%s is required')
180 raise error.Abort(_(b'config %s.%s is required')
179 % (b'phabricator', b'url'))
181 % (b'phabricator', b'url'))
180
182
181 res = httpconnectionmod.readauthforuri(repo.ui, url, util.url(url).user)
183 res = httpconnectionmod.readauthforuri(repo.ui, url, util.url(url).user)
182 token = None
184 token = None
183
185
184 if res:
186 if res:
185 group, auth = res
187 group, auth = res
186
188
187 repo.ui.debug(b"using auth.%s.* for authentication\n" % group)
189 repo.ui.debug(b"using auth.%s.* for authentication\n" % group)
188
190
189 token = auth.get(b'phabtoken')
191 token = auth.get(b'phabtoken')
190
192
191 if not token:
193 if not token:
192 raise error.Abort(_(b'Can\'t find conduit token associated to %s')
194 raise error.Abort(_(b'Can\'t find conduit token associated to %s')
193 % (url,))
195 % (url,))
194
196
195 return url, token
197 return url, token
196
198
197 def callconduit(repo, name, params):
199 def callconduit(repo, name, params):
198 """call Conduit API, params is a dict. return json.loads result, or None"""
200 """call Conduit API, params is a dict. return json.loads result, or None"""
199 host, token = readurltoken(repo)
201 host, token = readurltoken(repo)
200 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
202 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
201 repo.ui.debug(b'Conduit Call: %s %s\n' % (url, params))
203 repo.ui.debug(b'Conduit Call: %s %s\n' % (url, params))
202 params = params.copy()
204 params = params.copy()
203 params[b'api.token'] = token
205 params[b'api.token'] = token
204 data = urlencodenested(params)
206 data = urlencodenested(params)
205 curlcmd = repo.ui.config(b'phabricator', b'curlcmd')
207 curlcmd = repo.ui.config(b'phabricator', b'curlcmd')
206 if curlcmd:
208 if curlcmd:
207 sin, sout = procutil.popen2(b'%s -d @- %s'
209 sin, sout = procutil.popen2(b'%s -d @- %s'
208 % (curlcmd, procutil.shellquote(url)))
210 % (curlcmd, procutil.shellquote(url)))
209 sin.write(data)
211 sin.write(data)
210 sin.close()
212 sin.close()
211 body = sout.read()
213 body = sout.read()
212 else:
214 else:
213 urlopener = urlmod.opener(repo.ui, authinfo)
215 urlopener = urlmod.opener(repo.ui, authinfo)
214 request = util.urlreq.request(url, data=data)
216 request = util.urlreq.request(url, data=data)
215 body = urlopener.open(request).read()
217 body = urlopener.open(request).read()
216 repo.ui.debug(b'Conduit Response: %s\n' % body)
218 repo.ui.debug(b'Conduit Response: %s\n' % body)
217 parsed = json.loads(body)
219 parsed = json.loads(body)
218 if parsed.get(r'error_code'):
220 if parsed.get(r'error_code'):
219 msg = (_(b'Conduit Error (%s): %s')
221 msg = (_(b'Conduit Error (%s): %s')
220 % (parsed[r'error_code'], parsed[r'error_info']))
222 % (parsed[r'error_code'], parsed[r'error_info']))
221 raise error.Abort(msg)
223 raise error.Abort(msg)
222 return parsed[r'result']
224 return parsed[r'result']
223
225
224 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'))
226 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'))
225 def debugcallconduit(ui, repo, name):
227 def debugcallconduit(ui, repo, name):
226 """call Conduit API
228 """call Conduit API
227
229
228 Call parameters are read from stdin as a JSON blob. Result will be written
230 Call parameters are read from stdin as a JSON blob. Result will be written
229 to stdout as a JSON blob.
231 to stdout as a JSON blob.
230 """
232 """
231 params = json.loads(ui.fin.read())
233 params = json.loads(ui.fin.read())
232 result = callconduit(repo, name, params)
234 result = callconduit(repo, name, params)
233 s = json.dumps(result, sort_keys=True, indent=2, separators=(b',', b': '))
235 s = json.dumps(result, sort_keys=True, indent=2, separators=(b',', b': '))
234 ui.write(b'%s\n' % s)
236 ui.write(b'%s\n' % s)
235
237
236 def getrepophid(repo):
238 def getrepophid(repo):
237 """given callsign, return repository PHID or None"""
239 """given callsign, return repository PHID or None"""
238 # developer config: phabricator.repophid
240 # developer config: phabricator.repophid
239 repophid = repo.ui.config(b'phabricator', b'repophid')
241 repophid = repo.ui.config(b'phabricator', b'repophid')
240 if repophid:
242 if repophid:
241 return repophid
243 return repophid
242 callsign = repo.ui.config(b'phabricator', b'callsign')
244 callsign = repo.ui.config(b'phabricator', b'callsign')
243 if not callsign:
245 if not callsign:
244 return None
246 return None
245 query = callconduit(repo, b'diffusion.repository.search',
247 query = callconduit(repo, b'diffusion.repository.search',
246 {b'constraints': {b'callsigns': [callsign]}})
248 {b'constraints': {b'callsigns': [callsign]}})
247 if len(query[r'data']) == 0:
249 if len(query[r'data']) == 0:
248 return None
250 return None
249 repophid = encoding.strtolocal(query[r'data'][0][r'phid'])
251 repophid = encoding.strtolocal(query[r'data'][0][r'phid'])
250 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
252 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
251 return repophid
253 return repophid
252
254
253 _differentialrevisiontagre = re.compile(b'\AD([1-9][0-9]*)\Z')
255 _differentialrevisiontagre = re.compile(b'\AD([1-9][0-9]*)\Z')
254 _differentialrevisiondescre = re.compile(
256 _differentialrevisiondescre = re.compile(
255 b'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M)
257 b'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M)
256
258
257 def getoldnodedrevmap(repo, nodelist):
259 def getoldnodedrevmap(repo, nodelist):
258 """find previous nodes that has been sent to Phabricator
260 """find previous nodes that has been sent to Phabricator
259
261
260 return {node: (oldnode, Differential diff, Differential Revision ID)}
262 return {node: (oldnode, Differential diff, Differential Revision ID)}
261 for node in nodelist with known previous sent versions, or associated
263 for node in nodelist with known previous sent versions, or associated
262 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
264 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
263 be ``None``.
265 be ``None``.
264
266
265 Examines commit messages like "Differential Revision:" to get the
267 Examines commit messages like "Differential Revision:" to get the
266 association information.
268 association information.
267
269
268 If such commit message line is not found, examines all precursors and their
270 If such commit message line is not found, examines all precursors and their
269 tags. Tags with format like "D1234" are considered a match and the node
271 tags. Tags with format like "D1234" are considered a match and the node
270 with that tag, and the number after "D" (ex. 1234) will be returned.
272 with that tag, and the number after "D" (ex. 1234) will be returned.
271
273
272 The ``old node``, if not None, is guaranteed to be the last diff of
274 The ``old node``, if not None, is guaranteed to be the last diff of
273 corresponding Differential Revision, and exist in the repo.
275 corresponding Differential Revision, and exist in the repo.
274 """
276 """
275 url, token = readurltoken(repo)
277 url, token = readurltoken(repo)
276 unfi = repo.unfiltered()
278 unfi = repo.unfiltered()
277 nodemap = unfi.changelog.nodemap
279 nodemap = unfi.changelog.nodemap
278
280
279 result = {} # {node: (oldnode?, lastdiff?, drev)}
281 result = {} # {node: (oldnode?, lastdiff?, drev)}
280 toconfirm = {} # {node: (force, {precnode}, drev)}
282 toconfirm = {} # {node: (force, {precnode}, drev)}
281 for node in nodelist:
283 for node in nodelist:
282 ctx = unfi[node]
284 ctx = unfi[node]
283 # For tags like "D123", put them into "toconfirm" to verify later
285 # For tags like "D123", put them into "toconfirm" to verify later
284 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
286 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
285 for n in precnodes:
287 for n in precnodes:
286 if n in nodemap:
288 if n in nodemap:
287 for tag in unfi.nodetags(n):
289 for tag in unfi.nodetags(n):
288 m = _differentialrevisiontagre.match(tag)
290 m = _differentialrevisiontagre.match(tag)
289 if m:
291 if m:
290 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
292 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
291 continue
293 continue
292
294
293 # Check commit message
295 # Check commit message
294 m = _differentialrevisiondescre.search(ctx.description())
296 m = _differentialrevisiondescre.search(ctx.description())
295 if m:
297 if m:
296 toconfirm[node] = (1, set(precnodes), int(m.group(b'id')))
298 toconfirm[node] = (1, set(precnodes), int(m.group(b'id')))
297
299
298 # Double check if tags are genuine by collecting all old nodes from
300 # Double check if tags are genuine by collecting all old nodes from
299 # Phabricator, and expect precursors overlap with it.
301 # Phabricator, and expect precursors overlap with it.
300 if toconfirm:
302 if toconfirm:
301 drevs = [drev for force, precs, drev in toconfirm.values()]
303 drevs = [drev for force, precs, drev in toconfirm.values()]
302 alldiffs = callconduit(unfi, b'differential.querydiffs',
304 alldiffs = callconduit(unfi, b'differential.querydiffs',
303 {b'revisionIDs': drevs})
305 {b'revisionIDs': drevs})
304 getnode = lambda d: bin(encoding.unitolocal(
306 getnode = lambda d: bin(encoding.unitolocal(
305 getdiffmeta(d).get(r'node', b''))) or None
307 getdiffmeta(d).get(r'node', b''))) or None
306 for newnode, (force, precset, drev) in toconfirm.items():
308 for newnode, (force, precset, drev) in toconfirm.items():
307 diffs = [d for d in alldiffs.values()
309 diffs = [d for d in alldiffs.values()
308 if int(d[r'revisionID']) == drev]
310 if int(d[r'revisionID']) == drev]
309
311
310 # "precursors" as known by Phabricator
312 # "precursors" as known by Phabricator
311 phprecset = set(getnode(d) for d in diffs)
313 phprecset = set(getnode(d) for d in diffs)
312
314
313 # Ignore if precursors (Phabricator and local repo) do not overlap,
315 # Ignore if precursors (Phabricator and local repo) do not overlap,
314 # and force is not set (when commit message says nothing)
316 # and force is not set (when commit message says nothing)
315 if not force and not bool(phprecset & precset):
317 if not force and not bool(phprecset & precset):
316 tagname = b'D%d' % drev
318 tagname = b'D%d' % drev
317 tags.tag(repo, tagname, nullid, message=None, user=None,
319 tags.tag(repo, tagname, nullid, message=None, user=None,
318 date=None, local=True)
320 date=None, local=True)
319 unfi.ui.warn(_(b'D%s: local tag removed - does not match '
321 unfi.ui.warn(_(b'D%s: local tag removed - does not match '
320 b'Differential history\n') % drev)
322 b'Differential history\n') % drev)
321 continue
323 continue
322
324
323 # Find the last node using Phabricator metadata, and make sure it
325 # Find the last node using Phabricator metadata, and make sure it
324 # exists in the repo
326 # exists in the repo
325 oldnode = lastdiff = None
327 oldnode = lastdiff = None
326 if diffs:
328 if diffs:
327 lastdiff = max(diffs, key=lambda d: int(d[r'id']))
329 lastdiff = max(diffs, key=lambda d: int(d[r'id']))
328 oldnode = getnode(lastdiff)
330 oldnode = getnode(lastdiff)
329 if oldnode and oldnode not in nodemap:
331 if oldnode and oldnode not in nodemap:
330 oldnode = None
332 oldnode = None
331
333
332 result[newnode] = (oldnode, lastdiff, drev)
334 result[newnode] = (oldnode, lastdiff, drev)
333
335
334 return result
336 return result
335
337
336 def getdiff(ctx, diffopts):
338 def getdiff(ctx, diffopts):
337 """plain-text diff without header (user, commit message, etc)"""
339 """plain-text diff without header (user, commit message, etc)"""
338 output = util.stringio()
340 output = util.stringio()
339 for chunk, _label in patch.diffui(ctx.repo(), ctx.p1().node(), ctx.node(),
341 for chunk, _label in patch.diffui(ctx.repo(), ctx.p1().node(), ctx.node(),
340 None, opts=diffopts):
342 None, opts=diffopts):
341 output.write(chunk)
343 output.write(chunk)
342 return output.getvalue()
344 return output.getvalue()
343
345
344 def creatediff(ctx):
346 def creatediff(ctx):
345 """create a Differential Diff"""
347 """create a Differential Diff"""
346 repo = ctx.repo()
348 repo = ctx.repo()
347 repophid = getrepophid(repo)
349 repophid = getrepophid(repo)
348 # Create a "Differential Diff" via "differential.createrawdiff" API
350 # Create a "Differential Diff" via "differential.createrawdiff" API
349 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
351 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
350 if repophid:
352 if repophid:
351 params[b'repositoryPHID'] = repophid
353 params[b'repositoryPHID'] = repophid
352 diff = callconduit(repo, b'differential.createrawdiff', params)
354 diff = callconduit(repo, b'differential.createrawdiff', params)
353 if not diff:
355 if not diff:
354 raise error.Abort(_(b'cannot create diff for %s') % ctx)
356 raise error.Abort(_(b'cannot create diff for %s') % ctx)
355 return diff
357 return diff
356
358
357 def writediffproperties(ctx, diff):
359 def writediffproperties(ctx, diff):
358 """write metadata to diff so patches could be applied losslessly"""
360 """write metadata to diff so patches could be applied losslessly"""
359 params = {
361 params = {
360 b'diff_id': diff[r'id'],
362 b'diff_id': diff[r'id'],
361 b'name': b'hg:meta',
363 b'name': b'hg:meta',
362 b'data': json.dumps({
364 b'data': json.dumps({
363 b'user': ctx.user(),
365 b'user': ctx.user(),
364 b'date': b'%d %d' % ctx.date(),
366 b'date': b'%d %d' % ctx.date(),
365 b'node': ctx.hex(),
367 b'node': ctx.hex(),
366 b'parent': ctx.p1().hex(),
368 b'parent': ctx.p1().hex(),
367 }),
369 }),
368 }
370 }
369 callconduit(ctx.repo(), b'differential.setdiffproperty', params)
371 callconduit(ctx.repo(), b'differential.setdiffproperty', params)
370
372
371 params = {
373 params = {
372 b'diff_id': diff[r'id'],
374 b'diff_id': diff[r'id'],
373 b'name': b'local:commits',
375 b'name': b'local:commits',
374 b'data': json.dumps({
376 b'data': json.dumps({
375 ctx.hex(): {
377 ctx.hex(): {
376 b'author': stringutil.person(ctx.user()),
378 b'author': stringutil.person(ctx.user()),
377 b'authorEmail': stringutil.email(ctx.user()),
379 b'authorEmail': stringutil.email(ctx.user()),
378 b'time': ctx.date()[0],
380 b'time': ctx.date()[0],
379 },
381 },
380 }),
382 }),
381 }
383 }
382 callconduit(ctx.repo(), b'differential.setdiffproperty', params)
384 callconduit(ctx.repo(), b'differential.setdiffproperty', params)
383
385
384 def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None,
386 def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None,
385 olddiff=None, actions=None):
387 olddiff=None, actions=None):
386 """create or update a Differential Revision
388 """create or update a Differential Revision
387
389
388 If revid is None, create a new Differential Revision, otherwise update
390 If revid is None, create a new Differential Revision, otherwise update
389 revid. If parentrevid is not None, set it as a dependency.
391 revid. If parentrevid is not None, set it as a dependency.
390
392
391 If oldnode is not None, check if the patch content (without commit message
393 If oldnode is not None, check if the patch content (without commit message
392 and metadata) has changed before creating another diff.
394 and metadata) has changed before creating another diff.
393
395
394 If actions is not None, they will be appended to the transaction.
396 If actions is not None, they will be appended to the transaction.
395 """
397 """
396 repo = ctx.repo()
398 repo = ctx.repo()
397 if oldnode:
399 if oldnode:
398 diffopts = mdiff.diffopts(git=True, context=32767)
400 diffopts = mdiff.diffopts(git=True, context=32767)
399 oldctx = repo.unfiltered()[oldnode]
401 oldctx = repo.unfiltered()[oldnode]
400 neednewdiff = (getdiff(ctx, diffopts) != getdiff(oldctx, diffopts))
402 neednewdiff = (getdiff(ctx, diffopts) != getdiff(oldctx, diffopts))
401 else:
403 else:
402 neednewdiff = True
404 neednewdiff = True
403
405
404 transactions = []
406 transactions = []
405 if neednewdiff:
407 if neednewdiff:
406 diff = creatediff(ctx)
408 diff = creatediff(ctx)
407 transactions.append({b'type': b'update', b'value': diff[r'phid']})
409 transactions.append({b'type': b'update', b'value': diff[r'phid']})
408 else:
410 else:
409 # Even if we don't need to upload a new diff because the patch content
411 # Even if we don't need to upload a new diff because the patch content
410 # does not change. We might still need to update its metadata so
412 # does not change. We might still need to update its metadata so
411 # pushers could know the correct node metadata.
413 # pushers could know the correct node metadata.
412 assert olddiff
414 assert olddiff
413 diff = olddiff
415 diff = olddiff
414 writediffproperties(ctx, diff)
416 writediffproperties(ctx, diff)
415
417
416 # Use a temporary summary to set dependency. There might be better ways but
418 # Use a temporary summary to set dependency. There might be better ways but
417 # I cannot find them for now. But do not do that if we are updating an
419 # I cannot find them for now. But do not do that if we are updating an
418 # existing revision (revid is not None) since that introduces visible
420 # existing revision (revid is not None) since that introduces visible
419 # churns (someone edited "Summary" twice) on the web page.
421 # churns (someone edited "Summary" twice) on the web page.
420 if parentrevid and revid is None:
422 if parentrevid and revid is None:
421 summary = b'Depends on D%s' % parentrevid
423 summary = b'Depends on D%s' % parentrevid
422 transactions += [{b'type': b'summary', b'value': summary},
424 transactions += [{b'type': b'summary', b'value': summary},
423 {b'type': b'summary', b'value': b' '}]
425 {b'type': b'summary', b'value': b' '}]
424
426
425 if actions:
427 if actions:
426 transactions += actions
428 transactions += actions
427
429
428 # Parse commit message and update related fields.
430 # Parse commit message and update related fields.
429 desc = ctx.description()
431 desc = ctx.description()
430 info = callconduit(repo, b'differential.parsecommitmessage',
432 info = callconduit(repo, b'differential.parsecommitmessage',
431 {b'corpus': desc})
433 {b'corpus': desc})
432 for k, v in info[r'fields'].items():
434 for k, v in info[r'fields'].items():
433 if k in [b'title', b'summary', b'testPlan']:
435 if k in [b'title', b'summary', b'testPlan']:
434 transactions.append({b'type': k, b'value': v})
436 transactions.append({b'type': k, b'value': v})
435
437
436 params = {b'transactions': transactions}
438 params = {b'transactions': transactions}
437 if revid is not None:
439 if revid is not None:
438 # Update an existing Differential Revision
440 # Update an existing Differential Revision
439 params[b'objectIdentifier'] = revid
441 params[b'objectIdentifier'] = revid
440
442
441 revision = callconduit(repo, b'differential.revision.edit', params)
443 revision = callconduit(repo, b'differential.revision.edit', params)
442 if not revision:
444 if not revision:
443 raise error.Abort(_(b'cannot create revision for %s') % ctx)
445 raise error.Abort(_(b'cannot create revision for %s') % ctx)
444
446
445 return revision, diff
447 return revision, diff
446
448
447 def userphids(repo, names):
449 def userphids(repo, names):
448 """convert user names to PHIDs"""
450 """convert user names to PHIDs"""
449 query = {b'constraints': {b'usernames': names}}
451 query = {b'constraints': {b'usernames': names}}
450 result = callconduit(repo, b'user.search', query)
452 result = callconduit(repo, b'user.search', query)
451 # username not found is not an error of the API. So check if we have missed
453 # username not found is not an error of the API. So check if we have missed
452 # some names here.
454 # some names here.
453 data = result[r'data']
455 data = result[r'data']
454 resolved = set(entry[r'fields'][r'username'] for entry in data)
456 resolved = set(entry[r'fields'][r'username'] for entry in data)
455 unresolved = set(names) - resolved
457 unresolved = set(names) - resolved
456 if unresolved:
458 if unresolved:
457 raise error.Abort(_(b'unknown username: %s')
459 raise error.Abort(_(b'unknown username: %s')
458 % b' '.join(sorted(unresolved)))
460 % b' '.join(sorted(unresolved)))
459 return [entry[r'phid'] for entry in data]
461 return [entry[r'phid'] for entry in data]
460
462
461 @vcrcommand(b'phabsend',
463 @vcrcommand(b'phabsend',
462 [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
464 [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
463 (b'', b'amend', True, _(b'update commit messages')),
465 (b'', b'amend', True, _(b'update commit messages')),
464 (b'', b'reviewer', [], _(b'specify reviewers')),
466 (b'', b'reviewer', [], _(b'specify reviewers')),
465 (b'', b'confirm', None, _(b'ask for confirmation before sending'))],
467 (b'', b'confirm', None, _(b'ask for confirmation before sending'))],
466 _(b'REV [OPTIONS]'),
468 _(b'REV [OPTIONS]'),
467 helpcategory=command.CATEGORY_IMPORT_EXPORT)
469 helpcategory=command.CATEGORY_IMPORT_EXPORT)
468 def phabsend(ui, repo, *revs, **opts):
470 def phabsend(ui, repo, *revs, **opts):
469 """upload changesets to Phabricator
471 """upload changesets to Phabricator
470
472
471 If there are multiple revisions specified, they will be send as a stack
473 If there are multiple revisions specified, they will be send as a stack
472 with a linear dependencies relationship using the order specified by the
474 with a linear dependencies relationship using the order specified by the
473 revset.
475 revset.
474
476
475 For the first time uploading changesets, local tags will be created to
477 For the first time uploading changesets, local tags will be created to
476 maintain the association. After the first time, phabsend will check
478 maintain the association. After the first time, phabsend will check
477 obsstore and tags information so it can figure out whether to update an
479 obsstore and tags information so it can figure out whether to update an
478 existing Differential Revision, or create a new one.
480 existing Differential Revision, or create a new one.
479
481
480 If --amend is set, update commit messages so they have the
482 If --amend is set, update commit messages so they have the
481 ``Differential Revision`` URL, remove related tags. This is similar to what
483 ``Differential Revision`` URL, remove related tags. This is similar to what
482 arcanist will do, and is more desired in author-push workflows. Otherwise,
484 arcanist will do, and is more desired in author-push workflows. Otherwise,
483 use local tags to record the ``Differential Revision`` association.
485 use local tags to record the ``Differential Revision`` association.
484
486
485 The --confirm option lets you confirm changesets before sending them. You
487 The --confirm option lets you confirm changesets before sending them. You
486 can also add following to your configuration file to make it default
488 can also add following to your configuration file to make it default
487 behaviour::
489 behaviour::
488
490
489 [phabsend]
491 [phabsend]
490 confirm = true
492 confirm = true
491
493
492 phabsend will check obsstore and the above association to decide whether to
494 phabsend will check obsstore and the above association to decide whether to
493 update an existing Differential Revision, or create a new one.
495 update an existing Differential Revision, or create a new one.
494 """
496 """
495 revs = list(revs) + opts.get(b'rev', [])
497 revs = list(revs) + opts.get(b'rev', [])
496 revs = scmutil.revrange(repo, revs)
498 revs = scmutil.revrange(repo, revs)
497
499
498 if not revs:
500 if not revs:
499 raise error.Abort(_(b'phabsend requires at least one changeset'))
501 raise error.Abort(_(b'phabsend requires at least one changeset'))
500 if opts.get(b'amend'):
502 if opts.get(b'amend'):
501 cmdutil.checkunfinished(repo)
503 cmdutil.checkunfinished(repo)
502
504
503 # {newnode: (oldnode, olddiff, olddrev}
505 # {newnode: (oldnode, olddiff, olddrev}
504 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
506 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
505
507
506 confirm = ui.configbool(b'phabsend', b'confirm')
508 confirm = ui.configbool(b'phabsend', b'confirm')
507 confirm |= bool(opts.get(b'confirm'))
509 confirm |= bool(opts.get(b'confirm'))
508 if confirm:
510 if confirm:
509 confirmed = _confirmbeforesend(repo, revs, oldmap)
511 confirmed = _confirmbeforesend(repo, revs, oldmap)
510 if not confirmed:
512 if not confirmed:
511 raise error.Abort(_(b'phabsend cancelled'))
513 raise error.Abort(_(b'phabsend cancelled'))
512
514
513 actions = []
515 actions = []
514 reviewers = opts.get(b'reviewer', [])
516 reviewers = opts.get(b'reviewer', [])
515 if reviewers:
517 if reviewers:
516 phids = userphids(repo, reviewers)
518 phids = userphids(repo, reviewers)
517 actions.append({b'type': b'reviewers.add', b'value': phids})
519 actions.append({b'type': b'reviewers.add', b'value': phids})
518
520
519 drevids = [] # [int]
521 drevids = [] # [int]
520 diffmap = {} # {newnode: diff}
522 diffmap = {} # {newnode: diff}
521
523
522 # Send patches one by one so we know their Differential Revision IDs and
524 # Send patches one by one so we know their Differential Revision IDs and
523 # can provide dependency relationship
525 # can provide dependency relationship
524 lastrevid = None
526 lastrevid = None
525 for rev in revs:
527 for rev in revs:
526 ui.debug(b'sending rev %d\n' % rev)
528 ui.debug(b'sending rev %d\n' % rev)
527 ctx = repo[rev]
529 ctx = repo[rev]
528
530
529 # Get Differential Revision ID
531 # Get Differential Revision ID
530 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
532 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
531 if oldnode != ctx.node() or opts.get(b'amend'):
533 if oldnode != ctx.node() or opts.get(b'amend'):
532 # Create or update Differential Revision
534 # Create or update Differential Revision
533 revision, diff = createdifferentialrevision(
535 revision, diff = createdifferentialrevision(
534 ctx, revid, lastrevid, oldnode, olddiff, actions)
536 ctx, revid, lastrevid, oldnode, olddiff, actions)
535 diffmap[ctx.node()] = diff
537 diffmap[ctx.node()] = diff
536 newrevid = int(revision[r'object'][r'id'])
538 newrevid = int(revision[r'object'][r'id'])
537 if revid:
539 if revid:
538 action = b'updated'
540 action = b'updated'
539 else:
541 else:
540 action = b'created'
542 action = b'created'
541
543
542 # Create a local tag to note the association, if commit message
544 # Create a local tag to note the association, if commit message
543 # does not have it already
545 # does not have it already
544 m = _differentialrevisiondescre.search(ctx.description())
546 m = _differentialrevisiondescre.search(ctx.description())
545 if not m or int(m.group(b'id')) != newrevid:
547 if not m or int(m.group(b'id')) != newrevid:
546 tagname = b'D%d' % newrevid
548 tagname = b'D%d' % newrevid
547 tags.tag(repo, tagname, ctx.node(), message=None, user=None,
549 tags.tag(repo, tagname, ctx.node(), message=None, user=None,
548 date=None, local=True)
550 date=None, local=True)
549 else:
551 else:
550 # Nothing changed. But still set "newrevid" so the next revision
552 # Nothing changed. But still set "newrevid" so the next revision
551 # could depend on this one.
553 # could depend on this one.
552 newrevid = revid
554 newrevid = revid
553 action = b'skipped'
555 action = b'skipped'
554
556
555 actiondesc = ui.label(
557 actiondesc = ui.label(
556 {b'created': _(b'created'),
558 {b'created': _(b'created'),
557 b'skipped': _(b'skipped'),
559 b'skipped': _(b'skipped'),
558 b'updated': _(b'updated')}[action],
560 b'updated': _(b'updated')}[action],
559 b'phabricator.action.%s' % action)
561 b'phabricator.action.%s' % action)
560 drevdesc = ui.label(b'D%s' % newrevid, b'phabricator.drev')
562 drevdesc = ui.label(b'D%s' % newrevid, b'phabricator.drev')
561 nodedesc = ui.label(bytes(ctx), b'phabricator.node')
563 nodedesc = ui.label(bytes(ctx), b'phabricator.node')
562 desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
564 desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
563 ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc,
565 ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc,
564 desc))
566 desc))
565 drevids.append(newrevid)
567 drevids.append(newrevid)
566 lastrevid = newrevid
568 lastrevid = newrevid
567
569
568 # Update commit messages and remove tags
570 # Update commit messages and remove tags
569 if opts.get(b'amend'):
571 if opts.get(b'amend'):
570 unfi = repo.unfiltered()
572 unfi = repo.unfiltered()
571 drevs = callconduit(repo, b'differential.query', {b'ids': drevids})
573 drevs = callconduit(repo, b'differential.query', {b'ids': drevids})
572 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
574 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
573 wnode = unfi[b'.'].node()
575 wnode = unfi[b'.'].node()
574 mapping = {} # {oldnode: [newnode]}
576 mapping = {} # {oldnode: [newnode]}
575 for i, rev in enumerate(revs):
577 for i, rev in enumerate(revs):
576 old = unfi[rev]
578 old = unfi[rev]
577 drevid = drevids[i]
579 drevid = drevids[i]
578 drev = [d for d in drevs if int(d[r'id']) == drevid][0]
580 drev = [d for d in drevs if int(d[r'id']) == drevid][0]
579 newdesc = getdescfromdrev(drev)
581 newdesc = getdescfromdrev(drev)
580 newdesc = encoding.unitolocal(newdesc)
582 newdesc = encoding.unitolocal(newdesc)
581 # Make sure commit message contain "Differential Revision"
583 # Make sure commit message contain "Differential Revision"
582 if old.description() != newdesc:
584 if old.description() != newdesc:
583 parents = [
585 parents = [
584 mapping.get(old.p1().node(), (old.p1(),))[0],
586 mapping.get(old.p1().node(), (old.p1(),))[0],
585 mapping.get(old.p2().node(), (old.p2(),))[0],
587 mapping.get(old.p2().node(), (old.p2(),))[0],
586 ]
588 ]
587 new = context.metadataonlyctx(
589 new = context.metadataonlyctx(
588 repo, old, parents=parents, text=newdesc,
590 repo, old, parents=parents, text=newdesc,
589 user=old.user(), date=old.date(), extra=old.extra())
591 user=old.user(), date=old.date(), extra=old.extra())
590
592
591 newnode = new.commit()
593 newnode = new.commit()
592
594
593 mapping[old.node()] = [newnode]
595 mapping[old.node()] = [newnode]
594 # Update diff property
596 # Update diff property
595 writediffproperties(unfi[newnode], diffmap[old.node()])
597 writediffproperties(unfi[newnode], diffmap[old.node()])
596 # Remove local tags since it's no longer necessary
598 # Remove local tags since it's no longer necessary
597 tagname = b'D%d' % drevid
599 tagname = b'D%d' % drevid
598 if tagname in repo.tags():
600 if tagname in repo.tags():
599 tags.tag(repo, tagname, nullid, message=None, user=None,
601 tags.tag(repo, tagname, nullid, message=None, user=None,
600 date=None, local=True)
602 date=None, local=True)
601 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
603 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
602 if wnode in mapping:
604 if wnode in mapping:
603 unfi.setparents(mapping[wnode][0])
605 unfi.setparents(mapping[wnode][0])
604
606
605 # Map from "hg:meta" keys to header understood by "hg import". The order is
607 # Map from "hg:meta" keys to header understood by "hg import". The order is
606 # consistent with "hg export" output.
608 # consistent with "hg export" output.
607 _metanamemap = util.sortdict([(r'user', b'User'), (r'date', b'Date'),
609 _metanamemap = util.sortdict([(r'user', b'User'), (r'date', b'Date'),
608 (r'node', b'Node ID'), (r'parent', b'Parent ')])
610 (r'node', b'Node ID'), (r'parent', b'Parent ')])
609
611
610 def _confirmbeforesend(repo, revs, oldmap):
612 def _confirmbeforesend(repo, revs, oldmap):
611 url, token = readurltoken(repo)
613 url, token = readurltoken(repo)
612 ui = repo.ui
614 ui = repo.ui
613 for rev in revs:
615 for rev in revs:
614 ctx = repo[rev]
616 ctx = repo[rev]
615 desc = ctx.description().splitlines()[0]
617 desc = ctx.description().splitlines()[0]
616 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
618 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
617 if drevid:
619 if drevid:
618 drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
620 drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
619 else:
621 else:
620 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
622 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
621
623
622 ui.write(_(b'%s - %s: %s\n')
624 ui.write(_(b'%s - %s: %s\n')
623 % (drevdesc,
625 % (drevdesc,
624 ui.label(bytes(ctx), b'phabricator.node'),
626 ui.label(bytes(ctx), b'phabricator.node'),
625 ui.label(desc, b'phabricator.desc')))
627 ui.label(desc, b'phabricator.desc')))
626
628
627 if ui.promptchoice(_(b'Send the above changes to %s (yn)?'
629 if ui.promptchoice(_(b'Send the above changes to %s (yn)?'
628 b'$$ &Yes $$ &No') % url):
630 b'$$ &Yes $$ &No') % url):
629 return False
631 return False
630
632
631 return True
633 return True
632
634
633 _knownstatusnames = {b'accepted', b'needsreview', b'needsrevision', b'closed',
635 _knownstatusnames = {b'accepted', b'needsreview', b'needsrevision', b'closed',
634 b'abandoned'}
636 b'abandoned'}
635
637
636 def _getstatusname(drev):
638 def _getstatusname(drev):
637 """get normalized status name from a Differential Revision"""
639 """get normalized status name from a Differential Revision"""
638 return drev[r'statusName'].replace(b' ', b'').lower()
640 return drev[r'statusName'].replace(b' ', b'').lower()
639
641
640 # Small language to specify differential revisions. Support symbols: (), :X,
642 # Small language to specify differential revisions. Support symbols: (), :X,
641 # +, and -.
643 # +, and -.
642
644
643 _elements = {
645 _elements = {
644 # token-type: binding-strength, primary, prefix, infix, suffix
646 # token-type: binding-strength, primary, prefix, infix, suffix
645 b'(': (12, None, (b'group', 1, b')'), None, None),
647 b'(': (12, None, (b'group', 1, b')'), None, None),
646 b':': (8, None, (b'ancestors', 8), None, None),
648 b':': (8, None, (b'ancestors', 8), None, None),
647 b'&': (5, None, None, (b'and_', 5), None),
649 b'&': (5, None, None, (b'and_', 5), None),
648 b'+': (4, None, None, (b'add', 4), None),
650 b'+': (4, None, None, (b'add', 4), None),
649 b'-': (4, None, None, (b'sub', 4), None),
651 b'-': (4, None, None, (b'sub', 4), None),
650 b')': (0, None, None, None, None),
652 b')': (0, None, None, None, None),
651 b'symbol': (0, b'symbol', None, None, None),
653 b'symbol': (0, b'symbol', None, None, None),
652 b'end': (0, None, None, None, None),
654 b'end': (0, None, None, None, None),
653 }
655 }
654
656
655 def _tokenize(text):
657 def _tokenize(text):
656 view = memoryview(text) # zero-copy slice
658 view = memoryview(text) # zero-copy slice
657 special = b'():+-& '
659 special = b'():+-& '
658 pos = 0
660 pos = 0
659 length = len(text)
661 length = len(text)
660 while pos < length:
662 while pos < length:
661 symbol = b''.join(itertools.takewhile(lambda ch: ch not in special,
663 symbol = b''.join(itertools.takewhile(lambda ch: ch not in special,
662 view[pos:]))
664 view[pos:]))
663 if symbol:
665 if symbol:
664 yield (b'symbol', symbol, pos)
666 yield (b'symbol', symbol, pos)
665 pos += len(symbol)
667 pos += len(symbol)
666 else: # special char, ignore space
668 else: # special char, ignore space
667 if text[pos] != b' ':
669 if text[pos] != b' ':
668 yield (text[pos], None, pos)
670 yield (text[pos], None, pos)
669 pos += 1
671 pos += 1
670 yield (b'end', None, pos)
672 yield (b'end', None, pos)
671
673
672 def _parse(text):
674 def _parse(text):
673 tree, pos = parser.parser(_elements).parse(_tokenize(text))
675 tree, pos = parser.parser(_elements).parse(_tokenize(text))
674 if pos != len(text):
676 if pos != len(text):
675 raise error.ParseError(b'invalid token', pos)
677 raise error.ParseError(b'invalid token', pos)
676 return tree
678 return tree
677
679
678 def _parsedrev(symbol):
680 def _parsedrev(symbol):
679 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
681 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
680 if symbol.startswith(b'D') and symbol[1:].isdigit():
682 if symbol.startswith(b'D') and symbol[1:].isdigit():
681 return int(symbol[1:])
683 return int(symbol[1:])
682 if symbol.isdigit():
684 if symbol.isdigit():
683 return int(symbol)
685 return int(symbol)
684
686
685 def _prefetchdrevs(tree):
687 def _prefetchdrevs(tree):
686 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
688 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
687 drevs = set()
689 drevs = set()
688 ancestordrevs = set()
690 ancestordrevs = set()
689 op = tree[0]
691 op = tree[0]
690 if op == b'symbol':
692 if op == b'symbol':
691 r = _parsedrev(tree[1])
693 r = _parsedrev(tree[1])
692 if r:
694 if r:
693 drevs.add(r)
695 drevs.add(r)
694 elif op == b'ancestors':
696 elif op == b'ancestors':
695 r, a = _prefetchdrevs(tree[1])
697 r, a = _prefetchdrevs(tree[1])
696 drevs.update(r)
698 drevs.update(r)
697 ancestordrevs.update(r)
699 ancestordrevs.update(r)
698 ancestordrevs.update(a)
700 ancestordrevs.update(a)
699 else:
701 else:
700 for t in tree[1:]:
702 for t in tree[1:]:
701 r, a = _prefetchdrevs(t)
703 r, a = _prefetchdrevs(t)
702 drevs.update(r)
704 drevs.update(r)
703 ancestordrevs.update(a)
705 ancestordrevs.update(a)
704 return drevs, ancestordrevs
706 return drevs, ancestordrevs
705
707
706 def querydrev(repo, spec):
708 def querydrev(repo, spec):
707 """return a list of "Differential Revision" dicts
709 """return a list of "Differential Revision" dicts
708
710
709 spec is a string using a simple query language, see docstring in phabread
711 spec is a string using a simple query language, see docstring in phabread
710 for details.
712 for details.
711
713
712 A "Differential Revision dict" looks like:
714 A "Differential Revision dict" looks like:
713
715
714 {
716 {
715 "id": "2",
717 "id": "2",
716 "phid": "PHID-DREV-672qvysjcczopag46qty",
718 "phid": "PHID-DREV-672qvysjcczopag46qty",
717 "title": "example",
719 "title": "example",
718 "uri": "https://phab.example.com/D2",
720 "uri": "https://phab.example.com/D2",
719 "dateCreated": "1499181406",
721 "dateCreated": "1499181406",
720 "dateModified": "1499182103",
722 "dateModified": "1499182103",
721 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
723 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
722 "status": "0",
724 "status": "0",
723 "statusName": "Needs Review",
725 "statusName": "Needs Review",
724 "properties": [],
726 "properties": [],
725 "branch": null,
727 "branch": null,
726 "summary": "",
728 "summary": "",
727 "testPlan": "",
729 "testPlan": "",
728 "lineCount": "2",
730 "lineCount": "2",
729 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
731 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
730 "diffs": [
732 "diffs": [
731 "3",
733 "3",
732 "4",
734 "4",
733 ],
735 ],
734 "commits": [],
736 "commits": [],
735 "reviewers": [],
737 "reviewers": [],
736 "ccs": [],
738 "ccs": [],
737 "hashes": [],
739 "hashes": [],
738 "auxiliary": {
740 "auxiliary": {
739 "phabricator:projects": [],
741 "phabricator:projects": [],
740 "phabricator:depends-on": [
742 "phabricator:depends-on": [
741 "PHID-DREV-gbapp366kutjebt7agcd"
743 "PHID-DREV-gbapp366kutjebt7agcd"
742 ]
744 ]
743 },
745 },
744 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
746 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
745 "sourcePath": null
747 "sourcePath": null
746 }
748 }
747 """
749 """
748 def fetch(params):
750 def fetch(params):
749 """params -> single drev or None"""
751 """params -> single drev or None"""
750 key = (params.get(r'ids') or params.get(r'phids') or [None])[0]
752 key = (params.get(r'ids') or params.get(r'phids') or [None])[0]
751 if key in prefetched:
753 if key in prefetched:
752 return prefetched[key]
754 return prefetched[key]
753 drevs = callconduit(repo, b'differential.query', params)
755 drevs = callconduit(repo, b'differential.query', params)
754 # Fill prefetched with the result
756 # Fill prefetched with the result
755 for drev in drevs:
757 for drev in drevs:
756 prefetched[drev[r'phid']] = drev
758 prefetched[drev[r'phid']] = drev
757 prefetched[int(drev[r'id'])] = drev
759 prefetched[int(drev[r'id'])] = drev
758 if key not in prefetched:
760 if key not in prefetched:
759 raise error.Abort(_(b'cannot get Differential Revision %r')
761 raise error.Abort(_(b'cannot get Differential Revision %r')
760 % params)
762 % params)
761 return prefetched[key]
763 return prefetched[key]
762
764
763 def getstack(topdrevids):
765 def getstack(topdrevids):
764 """given a top, get a stack from the bottom, [id] -> [id]"""
766 """given a top, get a stack from the bottom, [id] -> [id]"""
765 visited = set()
767 visited = set()
766 result = []
768 result = []
767 queue = [{r'ids': [i]} for i in topdrevids]
769 queue = [{r'ids': [i]} for i in topdrevids]
768 while queue:
770 while queue:
769 params = queue.pop()
771 params = queue.pop()
770 drev = fetch(params)
772 drev = fetch(params)
771 if drev[r'id'] in visited:
773 if drev[r'id'] in visited:
772 continue
774 continue
773 visited.add(drev[r'id'])
775 visited.add(drev[r'id'])
774 result.append(int(drev[r'id']))
776 result.append(int(drev[r'id']))
775 auxiliary = drev.get(r'auxiliary', {})
777 auxiliary = drev.get(r'auxiliary', {})
776 depends = auxiliary.get(r'phabricator:depends-on', [])
778 depends = auxiliary.get(r'phabricator:depends-on', [])
777 for phid in depends:
779 for phid in depends:
778 queue.append({b'phids': [phid]})
780 queue.append({b'phids': [phid]})
779 result.reverse()
781 result.reverse()
780 return smartset.baseset(result)
782 return smartset.baseset(result)
781
783
782 # Initialize prefetch cache
784 # Initialize prefetch cache
783 prefetched = {} # {id or phid: drev}
785 prefetched = {} # {id or phid: drev}
784
786
785 tree = _parse(spec)
787 tree = _parse(spec)
786 drevs, ancestordrevs = _prefetchdrevs(tree)
788 drevs, ancestordrevs = _prefetchdrevs(tree)
787
789
788 # developer config: phabricator.batchsize
790 # developer config: phabricator.batchsize
789 batchsize = repo.ui.configint(b'phabricator', b'batchsize')
791 batchsize = repo.ui.configint(b'phabricator', b'batchsize')
790
792
791 # Prefetch Differential Revisions in batch
793 # Prefetch Differential Revisions in batch
792 tofetch = set(drevs)
794 tofetch = set(drevs)
793 for r in ancestordrevs:
795 for r in ancestordrevs:
794 tofetch.update(range(max(1, r - batchsize), r + 1))
796 tofetch.update(range(max(1, r - batchsize), r + 1))
795 if drevs:
797 if drevs:
796 fetch({r'ids': list(tofetch)})
798 fetch({r'ids': list(tofetch)})
797 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
799 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
798
800
799 # Walk through the tree, return smartsets
801 # Walk through the tree, return smartsets
800 def walk(tree):
802 def walk(tree):
801 op = tree[0]
803 op = tree[0]
802 if op == b'symbol':
804 if op == b'symbol':
803 drev = _parsedrev(tree[1])
805 drev = _parsedrev(tree[1])
804 if drev:
806 if drev:
805 return smartset.baseset([drev])
807 return smartset.baseset([drev])
806 elif tree[1] in _knownstatusnames:
808 elif tree[1] in _knownstatusnames:
807 drevs = [r for r in validids
809 drevs = [r for r in validids
808 if _getstatusname(prefetched[r]) == tree[1]]
810 if _getstatusname(prefetched[r]) == tree[1]]
809 return smartset.baseset(drevs)
811 return smartset.baseset(drevs)
810 else:
812 else:
811 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
813 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
812 elif op in {b'and_', b'add', b'sub'}:
814 elif op in {b'and_', b'add', b'sub'}:
813 assert len(tree) == 3
815 assert len(tree) == 3
814 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
816 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
815 elif op == b'group':
817 elif op == b'group':
816 return walk(tree[1])
818 return walk(tree[1])
817 elif op == b'ancestors':
819 elif op == b'ancestors':
818 return getstack(walk(tree[1]))
820 return getstack(walk(tree[1]))
819 else:
821 else:
820 raise error.ProgrammingError(b'illegal tree: %r' % tree)
822 raise error.ProgrammingError(b'illegal tree: %r' % tree)
821
823
822 return [prefetched[r] for r in walk(tree)]
824 return [prefetched[r] for r in walk(tree)]
823
825
824 def getdescfromdrev(drev):
826 def getdescfromdrev(drev):
825 """get description (commit message) from "Differential Revision"
827 """get description (commit message) from "Differential Revision"
826
828
827 This is similar to differential.getcommitmessage API. But we only care
829 This is similar to differential.getcommitmessage API. But we only care
828 about limited fields: title, summary, test plan, and URL.
830 about limited fields: title, summary, test plan, and URL.
829 """
831 """
830 title = drev[r'title']
832 title = drev[r'title']
831 summary = drev[r'summary'].rstrip()
833 summary = drev[r'summary'].rstrip()
832 testplan = drev[r'testPlan'].rstrip()
834 testplan = drev[r'testPlan'].rstrip()
833 if testplan:
835 if testplan:
834 testplan = b'Test Plan:\n%s' % testplan
836 testplan = b'Test Plan:\n%s' % testplan
835 uri = b'Differential Revision: %s' % drev[r'uri']
837 uri = b'Differential Revision: %s' % drev[r'uri']
836 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
838 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
837
839
838 def getdiffmeta(diff):
840 def getdiffmeta(diff):
839 """get commit metadata (date, node, user, p1) from a diff object
841 """get commit metadata (date, node, user, p1) from a diff object
840
842
841 The metadata could be "hg:meta", sent by phabsend, like:
843 The metadata could be "hg:meta", sent by phabsend, like:
842
844
843 "properties": {
845 "properties": {
844 "hg:meta": {
846 "hg:meta": {
845 "date": "1499571514 25200",
847 "date": "1499571514 25200",
846 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
848 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
847 "user": "Foo Bar <foo@example.com>",
849 "user": "Foo Bar <foo@example.com>",
848 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
850 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
849 }
851 }
850 }
852 }
851
853
852 Or converted from "local:commits", sent by "arc", like:
854 Or converted from "local:commits", sent by "arc", like:
853
855
854 "properties": {
856 "properties": {
855 "local:commits": {
857 "local:commits": {
856 "98c08acae292b2faf60a279b4189beb6cff1414d": {
858 "98c08acae292b2faf60a279b4189beb6cff1414d": {
857 "author": "Foo Bar",
859 "author": "Foo Bar",
858 "time": 1499546314,
860 "time": 1499546314,
859 "branch": "default",
861 "branch": "default",
860 "tag": "",
862 "tag": "",
861 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
863 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
862 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
864 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
863 "local": "1000",
865 "local": "1000",
864 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
866 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
865 "summary": "...",
867 "summary": "...",
866 "message": "...",
868 "message": "...",
867 "authorEmail": "foo@example.com"
869 "authorEmail": "foo@example.com"
868 }
870 }
869 }
871 }
870 }
872 }
871
873
872 Note: metadata extracted from "local:commits" will lose time zone
874 Note: metadata extracted from "local:commits" will lose time zone
873 information.
875 information.
874 """
876 """
875 props = diff.get(r'properties') or {}
877 props = diff.get(r'properties') or {}
876 meta = props.get(r'hg:meta')
878 meta = props.get(r'hg:meta')
877 if not meta and props.get(r'local:commits'):
879 if not meta and props.get(r'local:commits'):
878 commit = sorted(props[r'local:commits'].values())[0]
880 commit = sorted(props[r'local:commits'].values())[0]
879 meta = {
881 meta = {
880 r'date': r'%d 0' % commit[r'time'],
882 r'date': r'%d 0' % commit[r'time'],
881 r'node': commit[r'rev'],
883 r'node': commit[r'rev'],
882 r'user': r'%s <%s>' % (commit[r'author'], commit[r'authorEmail']),
884 r'user': r'%s <%s>' % (commit[r'author'], commit[r'authorEmail']),
883 }
885 }
884 if len(commit.get(r'parents', ())) >= 1:
886 if len(commit.get(r'parents', ())) >= 1:
885 meta[r'parent'] = commit[r'parents'][0]
887 meta[r'parent'] = commit[r'parents'][0]
886 return meta or {}
888 return meta or {}
887
889
888 def readpatch(repo, drevs, write):
890 def readpatch(repo, drevs, write):
889 """generate plain-text patch readable by 'hg import'
891 """generate plain-text patch readable by 'hg import'
890
892
891 write is usually ui.write. drevs is what "querydrev" returns, results of
893 write is usually ui.write. drevs is what "querydrev" returns, results of
892 "differential.query".
894 "differential.query".
893 """
895 """
894 # Prefetch hg:meta property for all diffs
896 # Prefetch hg:meta property for all diffs
895 diffids = sorted(set(max(int(v) for v in drev[r'diffs']) for drev in drevs))
897 diffids = sorted(set(max(int(v) for v in drev[r'diffs']) for drev in drevs))
896 diffs = callconduit(repo, b'differential.querydiffs', {b'ids': diffids})
898 diffs = callconduit(repo, b'differential.querydiffs', {b'ids': diffids})
897
899
898 # Generate patch for each drev
900 # Generate patch for each drev
899 for drev in drevs:
901 for drev in drevs:
900 repo.ui.note(_(b'reading D%s\n') % drev[r'id'])
902 repo.ui.note(_(b'reading D%s\n') % drev[r'id'])
901
903
902 diffid = max(int(v) for v in drev[r'diffs'])
904 diffid = max(int(v) for v in drev[r'diffs'])
903 body = callconduit(repo, b'differential.getrawdiff',
905 body = callconduit(repo, b'differential.getrawdiff',
904 {b'diffID': diffid})
906 {b'diffID': diffid})
905 desc = getdescfromdrev(drev)
907 desc = getdescfromdrev(drev)
906 header = b'# HG changeset patch\n'
908 header = b'# HG changeset patch\n'
907
909
908 # Try to preserve metadata from hg:meta property. Write hg patch
910 # Try to preserve metadata from hg:meta property. Write hg patch
909 # headers that can be read by the "import" command. See patchheadermap
911 # headers that can be read by the "import" command. See patchheadermap
910 # and extract in mercurial/patch.py for supported headers.
912 # and extract in mercurial/patch.py for supported headers.
911 meta = getdiffmeta(diffs[str(diffid)])
913 meta = getdiffmeta(diffs[str(diffid)])
912 for k in _metanamemap.keys():
914 for k in _metanamemap.keys():
913 if k in meta:
915 if k in meta:
914 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
916 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
915
917
916 content = b'%s%s\n%s' % (header, desc, body)
918 content = b'%s%s\n%s' % (header, desc, body)
917 write(encoding.unitolocal(content))
919 write(encoding.unitolocal(content))
918
920
919 @vcrcommand(b'phabread',
921 @vcrcommand(b'phabread',
920 [(b'', b'stack', False, _(b'read dependencies'))],
922 [(b'', b'stack', False, _(b'read dependencies'))],
921 _(b'DREVSPEC [OPTIONS]'),
923 _(b'DREVSPEC [OPTIONS]'),
922 helpcategory=command.CATEGORY_IMPORT_EXPORT)
924 helpcategory=command.CATEGORY_IMPORT_EXPORT)
923 def phabread(ui, repo, spec, **opts):
925 def phabread(ui, repo, spec, **opts):
924 """print patches from Phabricator suitable for importing
926 """print patches from Phabricator suitable for importing
925
927
926 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
928 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
927 the number ``123``. It could also have common operators like ``+``, ``-``,
929 the number ``123``. It could also have common operators like ``+``, ``-``,
928 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
930 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
929 select a stack.
931 select a stack.
930
932
931 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
933 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
932 could be used to filter patches by status. For performance reason, they
934 could be used to filter patches by status. For performance reason, they
933 only represent a subset of non-status selections and cannot be used alone.
935 only represent a subset of non-status selections and cannot be used alone.
934
936
935 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
937 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
936 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
938 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
937 stack up to D9.
939 stack up to D9.
938
940
939 If --stack is given, follow dependencies information and read all patches.
941 If --stack is given, follow dependencies information and read all patches.
940 It is equivalent to the ``:`` operator.
942 It is equivalent to the ``:`` operator.
941 """
943 """
942 if opts.get(b'stack'):
944 if opts.get(b'stack'):
943 spec = b':(%s)' % spec
945 spec = b':(%s)' % spec
944 drevs = querydrev(repo, spec)
946 drevs = querydrev(repo, spec)
945 readpatch(repo, drevs, ui.write)
947 readpatch(repo, drevs, ui.write)
946
948
947 @vcrcommand(b'phabupdate',
949 @vcrcommand(b'phabupdate',
948 [(b'', b'accept', False, _(b'accept revisions')),
950 [(b'', b'accept', False, _(b'accept revisions')),
949 (b'', b'reject', False, _(b'reject revisions')),
951 (b'', b'reject', False, _(b'reject revisions')),
950 (b'', b'abandon', False, _(b'abandon revisions')),
952 (b'', b'abandon', False, _(b'abandon revisions')),
951 (b'', b'reclaim', False, _(b'reclaim revisions')),
953 (b'', b'reclaim', False, _(b'reclaim revisions')),
952 (b'm', b'comment', b'', _(b'comment on the last revision')),
954 (b'm', b'comment', b'', _(b'comment on the last revision')),
953 ], _(b'DREVSPEC [OPTIONS]'),
955 ], _(b'DREVSPEC [OPTIONS]'),
954 helpcategory=command.CATEGORY_IMPORT_EXPORT)
956 helpcategory=command.CATEGORY_IMPORT_EXPORT)
955 def phabupdate(ui, repo, spec, **opts):
957 def phabupdate(ui, repo, spec, **opts):
956 """update Differential Revision in batch
958 """update Differential Revision in batch
957
959
958 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
960 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
959 """
961 """
960 flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
962 flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
961 if len(flags) > 1:
963 if len(flags) > 1:
962 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
964 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
963
965
964 actions = []
966 actions = []
965 for f in flags:
967 for f in flags:
966 actions.append({b'type': f, b'value': b'true'})
968 actions.append({b'type': f, b'value': b'true'})
967
969
968 drevs = querydrev(repo, spec)
970 drevs = querydrev(repo, spec)
969 for i, drev in enumerate(drevs):
971 for i, drev in enumerate(drevs):
970 if i + 1 == len(drevs) and opts.get(b'comment'):
972 if i + 1 == len(drevs) and opts.get(b'comment'):
971 actions.append({b'type': b'comment', b'value': opts[b'comment']})
973 actions.append({b'type': b'comment', b'value': opts[b'comment']})
972 if actions:
974 if actions:
973 params = {b'objectIdentifier': drev[r'phid'],
975 params = {b'objectIdentifier': drev[r'phid'],
974 b'transactions': actions}
976 b'transactions': actions}
975 callconduit(repo, b'differential.revision.edit', params)
977 callconduit(repo, b'differential.revision.edit', params)
976
978
977 templatekeyword = registrar.templatekeyword()
979 templatekeyword = registrar.templatekeyword()
978
980
979 @templatekeyword(b'phabreview', requires={b'ctx'})
981 @templatekeyword(b'phabreview', requires={b'ctx'})
980 def template_review(context, mapping):
982 def template_review(context, mapping):
981 """:phabreview: Object describing the review for this changeset.
983 """:phabreview: Object describing the review for this changeset.
982 Has attributes `url` and `id`.
984 Has attributes `url` and `id`.
983 """
985 """
984 ctx = context.resource(mapping, b'ctx')
986 ctx = context.resource(mapping, b'ctx')
985 m = _differentialrevisiondescre.search(ctx.description())
987 m = _differentialrevisiondescre.search(ctx.description())
986 if m:
988 if m:
987 return templateutil.hybriddict({
989 return templateutil.hybriddict({
988 b'url': m.group(b'url'),
990 b'url': m.group(b'url'),
989 b'id': b"D{}".format(m.group(b'id')),
991 b'id': b"D{}".format(m.group(b'id')),
990 })
992 })
@@ -1,2903 +1,2912 b''
1 /*
1 /*
2 parsers.c - efficient content parsing
2 parsers.c - efficient content parsing
3
3
4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8 */
8 */
9
9
10 #include <Python.h>
10 #include <Python.h>
11 #include <assert.h>
11 #include <assert.h>
12 #include <ctype.h>
12 #include <ctype.h>
13 #include <limits.h>
13 #include <limits.h>
14 #include <stddef.h>
14 #include <stddef.h>
15 #include <stdlib.h>
15 #include <stdlib.h>
16 #include <string.h>
16 #include <string.h>
17
17
18 #include "bitmanipulation.h"
18 #include "bitmanipulation.h"
19 #include "charencode.h"
19 #include "charencode.h"
20 #include "revlog.h"
20 #include "revlog.h"
21 #include "util.h"
21 #include "util.h"
22
22
23 #ifdef IS_PY3K
23 #ifdef IS_PY3K
24 /* The mapping of Python types is meant to be temporary to get Python
24 /* The mapping of Python types is meant to be temporary to get Python
25 * 3 to compile. We should remove this once Python 3 support is fully
25 * 3 to compile. We should remove this once Python 3 support is fully
26 * supported and proper types are used in the extensions themselves. */
26 * supported and proper types are used in the extensions themselves. */
27 #define PyInt_Check PyLong_Check
27 #define PyInt_Check PyLong_Check
28 #define PyInt_FromLong PyLong_FromLong
28 #define PyInt_FromLong PyLong_FromLong
29 #define PyInt_FromSsize_t PyLong_FromSsize_t
29 #define PyInt_FromSsize_t PyLong_FromSsize_t
30 #define PyInt_AsLong PyLong_AsLong
30 #define PyInt_AsLong PyLong_AsLong
31 #endif
31 #endif
32
32
33 typedef struct indexObjectStruct indexObject;
33 typedef struct indexObjectStruct indexObject;
34
34
35 typedef struct {
35 typedef struct {
36 int children[16];
36 int children[16];
37 } nodetreenode;
37 } nodetreenode;
38
38
39 /*
39 /*
40 * A base-16 trie for fast node->rev mapping.
40 * A base-16 trie for fast node->rev mapping.
41 *
41 *
42 * Positive value is index of the next node in the trie
42 * Positive value is index of the next node in the trie
43 * Negative value is a leaf: -(rev + 2)
43 * Negative value is a leaf: -(rev + 2)
44 * Zero is empty
44 * Zero is empty
45 */
45 */
46 typedef struct {
46 typedef struct {
47 indexObject *index;
47 indexObject *index;
48 nodetreenode *nodes;
48 nodetreenode *nodes;
49 unsigned length; /* # nodes in use */
49 unsigned length; /* # nodes in use */
50 unsigned capacity; /* # nodes allocated */
50 unsigned capacity; /* # nodes allocated */
51 int depth; /* maximum depth of tree */
51 int depth; /* maximum depth of tree */
52 int splits; /* # splits performed */
52 int splits; /* # splits performed */
53 } nodetree;
53 } nodetree;
54
54
55 typedef struct {
55 typedef struct {
56 PyObject_HEAD /* ; */
56 PyObject_HEAD /* ; */
57 nodetree nt;
57 nodetree nt;
58 } nodetreeObject;
58 } nodetreeObject;
59
59
60 /*
60 /*
61 * This class has two behaviors.
61 * This class has two behaviors.
62 *
62 *
63 * When used in a list-like way (with integer keys), we decode an
63 * When used in a list-like way (with integer keys), we decode an
64 * entry in a RevlogNG index file on demand. Our last entry is a
64 * entry in a RevlogNG index file on demand. Our last entry is a
65 * sentinel, always a nullid. We have limited support for
65 * sentinel, always a nullid. We have limited support for
66 * integer-keyed insert and delete, only at elements right before the
66 * integer-keyed insert and delete, only at elements right before the
67 * sentinel.
67 * sentinel.
68 *
68 *
69 * With string keys, we lazily perform a reverse mapping from node to
69 * With string keys, we lazily perform a reverse mapping from node to
70 * rev, using a base-16 trie.
70 * rev, using a base-16 trie.
71 */
71 */
72 struct indexObjectStruct {
72 struct indexObjectStruct {
73 PyObject_HEAD
73 PyObject_HEAD
74 /* Type-specific fields go here. */
74 /* Type-specific fields go here. */
75 PyObject *data; /* raw bytes of index */
75 PyObject *data; /* raw bytes of index */
76 Py_buffer buf; /* buffer of data */
76 Py_buffer buf; /* buffer of data */
77 PyObject **cache; /* cached tuples */
77 PyObject **cache; /* cached tuples */
78 const char **offsets; /* populated on demand */
78 const char **offsets; /* populated on demand */
79 Py_ssize_t raw_length; /* original number of elements */
79 Py_ssize_t raw_length; /* original number of elements */
80 Py_ssize_t length; /* current number of elements */
80 Py_ssize_t length; /* current number of elements */
81 PyObject *added; /* populated on demand */
81 PyObject *added; /* populated on demand */
82 PyObject *headrevs; /* cache, invalidated on changes */
82 PyObject *headrevs; /* cache, invalidated on changes */
83 PyObject *filteredrevs; /* filtered revs set */
83 PyObject *filteredrevs; /* filtered revs set */
84 nodetree nt; /* base-16 trie */
84 nodetree nt; /* base-16 trie */
85 int ntinitialized; /* 0 or 1 */
85 int ntinitialized; /* 0 or 1 */
86 int ntrev; /* last rev scanned */
86 int ntrev; /* last rev scanned */
87 int ntlookups; /* # lookups */
87 int ntlookups; /* # lookups */
88 int ntmisses; /* # lookups that miss the cache */
88 int ntmisses; /* # lookups that miss the cache */
89 int inlined;
89 int inlined;
90 };
90 };
91
91
92 static Py_ssize_t index_length(const indexObject *self)
92 static Py_ssize_t index_length(const indexObject *self)
93 {
93 {
94 if (self->added == NULL)
94 if (self->added == NULL)
95 return self->length;
95 return self->length;
96 return self->length + PyList_GET_SIZE(self->added);
96 return self->length + PyList_GET_SIZE(self->added);
97 }
97 }
98
98
99 static PyObject *nullentry = NULL;
99 static PyObject *nullentry = NULL;
100 static const char nullid[20] = {0};
100 static const char nullid[20] = {0};
101 static const Py_ssize_t nullrev = -1;
101 static const Py_ssize_t nullrev = -1;
102
102
103 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
103 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
104
104
105 #if LONG_MAX == 0x7fffffffL
105 #if LONG_MAX == 0x7fffffffL
106 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
106 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
107 #else
107 #else
108 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
108 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
109 #endif
109 #endif
110
110
111 /* A RevlogNG v1 index entry is 64 bytes long. */
111 /* A RevlogNG v1 index entry is 64 bytes long. */
112 static const long v1_hdrsize = 64;
112 static const long v1_hdrsize = 64;
113
113
114 static void raise_revlog_error(void)
114 static void raise_revlog_error(void)
115 {
115 {
116 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
116 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
117
117
118 mod = PyImport_ImportModule("mercurial.error");
118 mod = PyImport_ImportModule("mercurial.error");
119 if (mod == NULL) {
119 if (mod == NULL) {
120 goto cleanup;
120 goto cleanup;
121 }
121 }
122
122
123 dict = PyModule_GetDict(mod);
123 dict = PyModule_GetDict(mod);
124 if (dict == NULL) {
124 if (dict == NULL) {
125 goto cleanup;
125 goto cleanup;
126 }
126 }
127 Py_INCREF(dict);
127 Py_INCREF(dict);
128
128
129 errclass = PyDict_GetItemString(dict, "RevlogError");
129 errclass = PyDict_GetItemString(dict, "RevlogError");
130 if (errclass == NULL) {
130 if (errclass == NULL) {
131 PyErr_SetString(PyExc_SystemError,
131 PyErr_SetString(PyExc_SystemError,
132 "could not find RevlogError");
132 "could not find RevlogError");
133 goto cleanup;
133 goto cleanup;
134 }
134 }
135
135
136 /* value of exception is ignored by callers */
136 /* value of exception is ignored by callers */
137 PyErr_SetString(errclass, "RevlogError");
137 PyErr_SetString(errclass, "RevlogError");
138
138
139 cleanup:
139 cleanup:
140 Py_XDECREF(dict);
140 Py_XDECREF(dict);
141 Py_XDECREF(mod);
141 Py_XDECREF(mod);
142 }
142 }
143
143
144 /*
144 /*
145 * Return a pointer to the beginning of a RevlogNG record.
145 * Return a pointer to the beginning of a RevlogNG record.
146 */
146 */
147 static const char *index_deref(indexObject *self, Py_ssize_t pos)
147 static const char *index_deref(indexObject *self, Py_ssize_t pos)
148 {
148 {
149 if (self->inlined && pos > 0) {
149 if (self->inlined && pos > 0) {
150 if (self->offsets == NULL) {
150 if (self->offsets == NULL) {
151 self->offsets = PyMem_Malloc(self->raw_length *
151 self->offsets = PyMem_Malloc(self->raw_length *
152 sizeof(*self->offsets));
152 sizeof(*self->offsets));
153 if (self->offsets == NULL)
153 if (self->offsets == NULL)
154 return (const char *)PyErr_NoMemory();
154 return (const char *)PyErr_NoMemory();
155 inline_scan(self, self->offsets);
155 inline_scan(self, self->offsets);
156 }
156 }
157 return self->offsets[pos];
157 return self->offsets[pos];
158 }
158 }
159
159
160 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
160 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
161 }
161 }
162
162
163 /*
163 /*
164 * Get parents of the given rev.
164 * Get parents of the given rev.
165 *
165 *
166 * The specified rev must be valid and must not be nullrev. A returned
166 * The specified rev must be valid and must not be nullrev. A returned
167 * parent revision may be nullrev, but is guaranteed to be in valid range.
167 * parent revision may be nullrev, but is guaranteed to be in valid range.
168 */
168 */
169 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
169 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
170 int maxrev)
170 int maxrev)
171 {
171 {
172 if (rev >= self->length) {
172 if (rev >= self->length) {
173 long tmp;
173 long tmp;
174 PyObject *tuple =
174 PyObject *tuple =
175 PyList_GET_ITEM(self->added, rev - self->length);
175 PyList_GET_ITEM(self->added, rev - self->length);
176 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
176 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
177 return -1;
177 return -1;
178 }
178 }
179 ps[0] = (int)tmp;
179 ps[0] = (int)tmp;
180 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
180 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
181 return -1;
181 return -1;
182 }
182 }
183 ps[1] = (int)tmp;
183 ps[1] = (int)tmp;
184 } else {
184 } else {
185 const char *data = index_deref(self, rev);
185 const char *data = index_deref(self, rev);
186 ps[0] = getbe32(data + 24);
186 ps[0] = getbe32(data + 24);
187 ps[1] = getbe32(data + 28);
187 ps[1] = getbe32(data + 28);
188 }
188 }
189 /* If index file is corrupted, ps[] may point to invalid revisions. So
189 /* If index file is corrupted, ps[] may point to invalid revisions. So
190 * there is a risk of buffer overflow to trust them unconditionally. */
190 * there is a risk of buffer overflow to trust them unconditionally. */
191 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
191 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
192 PyErr_SetString(PyExc_ValueError, "parent out of range");
192 PyErr_SetString(PyExc_ValueError, "parent out of range");
193 return -1;
193 return -1;
194 }
194 }
195 return 0;
195 return 0;
196 }
196 }
197
197
198 /*
198 /*
199 * Get parents of the given rev.
199 * Get parents of the given rev.
200 *
200 *
201 * If the specified rev is out of range, IndexError will be raised. If the
201 * If the specified rev is out of range, IndexError will be raised. If the
202 * revlog entry is corrupted, ValueError may be raised.
202 * revlog entry is corrupted, ValueError may be raised.
203 *
203 *
204 * Returns 0 on success or -1 on failure.
204 * Returns 0 on success or -1 on failure.
205 */
205 */
206 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
206 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
207 {
207 {
208 int tiprev;
208 int tiprev;
209 if (!op || !HgRevlogIndex_Check(op) || !ps) {
209 if (!op || !HgRevlogIndex_Check(op) || !ps) {
210 PyErr_BadInternalCall();
210 PyErr_BadInternalCall();
211 return -1;
211 return -1;
212 }
212 }
213 tiprev = (int)index_length((indexObject *)op) - 1;
213 tiprev = (int)index_length((indexObject *)op) - 1;
214 if (rev < -1 || rev > tiprev) {
214 if (rev < -1 || rev > tiprev) {
215 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
215 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
216 return -1;
216 return -1;
217 } else if (rev == -1) {
217 } else if (rev == -1) {
218 ps[0] = ps[1] = -1;
218 ps[0] = ps[1] = -1;
219 return 0;
219 return 0;
220 } else {
220 } else {
221 return index_get_parents((indexObject *)op, rev, ps, tiprev);
221 return index_get_parents((indexObject *)op, rev, ps, tiprev);
222 }
222 }
223 }
223 }
224
224
225 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
225 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
226 {
226 {
227 uint64_t offset;
227 uint64_t offset;
228 if (rev == nullrev) {
228 if (rev == nullrev) {
229 return 0;
229 return 0;
230 }
230 }
231 if (rev >= self->length) {
231 if (rev >= self->length) {
232 PyObject *tuple;
232 PyObject *tuple;
233 PyObject *pylong;
233 PyObject *pylong;
234 PY_LONG_LONG tmp;
234 PY_LONG_LONG tmp;
235 tuple = PyList_GET_ITEM(self->added, rev - self->length);
235 tuple = PyList_GET_ITEM(self->added, rev - self->length);
236 pylong = PyTuple_GET_ITEM(tuple, 0);
236 pylong = PyTuple_GET_ITEM(tuple, 0);
237 tmp = PyLong_AsLongLong(pylong);
237 tmp = PyLong_AsLongLong(pylong);
238 if (tmp == -1 && PyErr_Occurred()) {
238 if (tmp == -1 && PyErr_Occurred()) {
239 return -1;
239 return -1;
240 }
240 }
241 if (tmp < 0) {
241 if (tmp < 0) {
242 PyErr_Format(PyExc_OverflowError,
242 PyErr_Format(PyExc_OverflowError,
243 "revlog entry size out of bound (%lld)",
243 "revlog entry size out of bound (%lld)",
244 (long long)tmp);
244 (long long)tmp);
245 return -1;
245 return -1;
246 }
246 }
247 offset = (uint64_t)tmp;
247 offset = (uint64_t)tmp;
248 } else {
248 } else {
249 const char *data = index_deref(self, rev);
249 const char *data = index_deref(self, rev);
250 offset = getbe32(data + 4);
250 offset = getbe32(data + 4);
251 if (rev == 0) {
251 if (rev == 0) {
252 /* mask out version number for the first entry */
252 /* mask out version number for the first entry */
253 offset &= 0xFFFF;
253 offset &= 0xFFFF;
254 } else {
254 } else {
255 uint32_t offset_high = getbe32(data);
255 uint32_t offset_high = getbe32(data);
256 offset |= ((uint64_t)offset_high) << 32;
256 offset |= ((uint64_t)offset_high) << 32;
257 }
257 }
258 }
258 }
259 return (int64_t)(offset >> 16);
259 return (int64_t)(offset >> 16);
260 }
260 }
261
261
262 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
262 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
263 {
263 {
264 if (rev == nullrev) {
264 if (rev == nullrev) {
265 return 0;
265 return 0;
266 }
266 }
267 if (rev >= self->length) {
267 if (rev >= self->length) {
268 PyObject *tuple;
268 PyObject *tuple;
269 PyObject *pylong;
269 PyObject *pylong;
270 long ret;
270 long ret;
271 tuple = PyList_GET_ITEM(self->added, rev - self->length);
271 tuple = PyList_GET_ITEM(self->added, rev - self->length);
272 pylong = PyTuple_GET_ITEM(tuple, 1);
272 pylong = PyTuple_GET_ITEM(tuple, 1);
273 ret = PyInt_AsLong(pylong);
273 ret = PyInt_AsLong(pylong);
274 if (ret == -1 && PyErr_Occurred()) {
274 if (ret == -1 && PyErr_Occurred()) {
275 return -1;
275 return -1;
276 }
276 }
277 if (ret < 0 || ret > (long)INT_MAX) {
277 if (ret < 0 || ret > (long)INT_MAX) {
278 PyErr_Format(PyExc_OverflowError,
278 PyErr_Format(PyExc_OverflowError,
279 "revlog entry size out of bound (%ld)",
279 "revlog entry size out of bound (%ld)",
280 ret);
280 ret);
281 return -1;
281 return -1;
282 }
282 }
283 return (int)ret;
283 return (int)ret;
284 } else {
284 } else {
285 const char *data = index_deref(self, rev);
285 const char *data = index_deref(self, rev);
286 int tmp = (int)getbe32(data + 8);
286 int tmp = (int)getbe32(data + 8);
287 if (tmp < 0) {
287 if (tmp < 0) {
288 PyErr_Format(PyExc_OverflowError,
288 PyErr_Format(PyExc_OverflowError,
289 "revlog entry size out of bound (%d)",
289 "revlog entry size out of bound (%d)",
290 tmp);
290 tmp);
291 return -1;
291 return -1;
292 }
292 }
293 return tmp;
293 return tmp;
294 }
294 }
295 }
295 }
296
296
297 /*
297 /*
298 * RevlogNG format (all in big endian, data may be inlined):
298 * RevlogNG format (all in big endian, data may be inlined):
299 * 6 bytes: offset
299 * 6 bytes: offset
300 * 2 bytes: flags
300 * 2 bytes: flags
301 * 4 bytes: compressed length
301 * 4 bytes: compressed length
302 * 4 bytes: uncompressed length
302 * 4 bytes: uncompressed length
303 * 4 bytes: base revision
303 * 4 bytes: base revision
304 * 4 bytes: link revision
304 * 4 bytes: link revision
305 * 4 bytes: parent 1 revision
305 * 4 bytes: parent 1 revision
306 * 4 bytes: parent 2 revision
306 * 4 bytes: parent 2 revision
307 * 32 bytes: nodeid (only 20 bytes used)
307 * 32 bytes: nodeid (only 20 bytes used)
308 */
308 */
309 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
309 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
310 {
310 {
311 uint64_t offset_flags;
311 uint64_t offset_flags;
312 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
312 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
313 const char *c_node_id;
313 const char *c_node_id;
314 const char *data;
314 const char *data;
315 Py_ssize_t length = index_length(self);
315 Py_ssize_t length = index_length(self);
316 PyObject *entry;
316 PyObject *entry;
317
317
318 if (pos == nullrev) {
318 if (pos == nullrev) {
319 Py_INCREF(nullentry);
319 Py_INCREF(nullentry);
320 return nullentry;
320 return nullentry;
321 }
321 }
322
322
323 if (pos < 0 || pos >= length) {
323 if (pos < 0 || pos >= length) {
324 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
324 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
325 return NULL;
325 return NULL;
326 }
326 }
327
327
328 if (pos >= self->length) {
328 if (pos >= self->length) {
329 PyObject *obj;
329 PyObject *obj;
330 obj = PyList_GET_ITEM(self->added, pos - self->length);
330 obj = PyList_GET_ITEM(self->added, pos - self->length);
331 Py_INCREF(obj);
331 Py_INCREF(obj);
332 return obj;
332 return obj;
333 }
333 }
334
334
335 if (self->cache) {
335 if (self->cache) {
336 if (self->cache[pos]) {
336 if (self->cache[pos]) {
337 Py_INCREF(self->cache[pos]);
337 Py_INCREF(self->cache[pos]);
338 return self->cache[pos];
338 return self->cache[pos];
339 }
339 }
340 } else {
340 } else {
341 self->cache = calloc(self->raw_length, sizeof(PyObject *));
341 self->cache = calloc(self->raw_length, sizeof(PyObject *));
342 if (self->cache == NULL)
342 if (self->cache == NULL)
343 return PyErr_NoMemory();
343 return PyErr_NoMemory();
344 }
344 }
345
345
346 data = index_deref(self, pos);
346 data = index_deref(self, pos);
347 if (data == NULL)
347 if (data == NULL)
348 return NULL;
348 return NULL;
349
349
350 offset_flags = getbe32(data + 4);
350 offset_flags = getbe32(data + 4);
351 if (pos == 0) /* mask out version number for the first entry */
351 if (pos == 0) /* mask out version number for the first entry */
352 offset_flags &= 0xFFFF;
352 offset_flags &= 0xFFFF;
353 else {
353 else {
354 uint32_t offset_high = getbe32(data);
354 uint32_t offset_high = getbe32(data);
355 offset_flags |= ((uint64_t)offset_high) << 32;
355 offset_flags |= ((uint64_t)offset_high) << 32;
356 }
356 }
357
357
358 comp_len = getbe32(data + 8);
358 comp_len = getbe32(data + 8);
359 uncomp_len = getbe32(data + 12);
359 uncomp_len = getbe32(data + 12);
360 base_rev = getbe32(data + 16);
360 base_rev = getbe32(data + 16);
361 link_rev = getbe32(data + 20);
361 link_rev = getbe32(data + 20);
362 parent_1 = getbe32(data + 24);
362 parent_1 = getbe32(data + 24);
363 parent_2 = getbe32(data + 28);
363 parent_2 = getbe32(data + 28);
364 c_node_id = data + 32;
364 c_node_id = data + 32;
365
365
366 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
366 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
367 base_rev, link_rev, parent_1, parent_2, c_node_id,
367 base_rev, link_rev, parent_1, parent_2, c_node_id,
368 20);
368 20);
369
369
370 if (entry) {
370 if (entry) {
371 PyObject_GC_UnTrack(entry);
371 PyObject_GC_UnTrack(entry);
372 Py_INCREF(entry);
372 Py_INCREF(entry);
373 }
373 }
374
374
375 self->cache[pos] = entry;
375 self->cache[pos] = entry;
376
376
377 return entry;
377 return entry;
378 }
378 }
379
379
380 /*
380 /*
381 * Return the 20-byte SHA of the node corresponding to the given rev.
381 * Return the 20-byte SHA of the node corresponding to the given rev.
382 */
382 */
383 static const char *index_node(indexObject *self, Py_ssize_t pos)
383 static const char *index_node(indexObject *self, Py_ssize_t pos)
384 {
384 {
385 Py_ssize_t length = index_length(self);
385 Py_ssize_t length = index_length(self);
386 const char *data;
386 const char *data;
387
387
388 if (pos == nullrev)
388 if (pos == nullrev)
389 return nullid;
389 return nullid;
390
390
391 if (pos >= length)
391 if (pos >= length)
392 return NULL;
392 return NULL;
393
393
394 if (pos >= self->length) {
394 if (pos >= self->length) {
395 PyObject *tuple, *str;
395 PyObject *tuple, *str;
396 tuple = PyList_GET_ITEM(self->added, pos - self->length);
396 tuple = PyList_GET_ITEM(self->added, pos - self->length);
397 str = PyTuple_GetItem(tuple, 7);
397 str = PyTuple_GetItem(tuple, 7);
398 return str ? PyBytes_AS_STRING(str) : NULL;
398 return str ? PyBytes_AS_STRING(str) : NULL;
399 }
399 }
400
400
401 data = index_deref(self, pos);
401 data = index_deref(self, pos);
402 return data ? data + 32 : NULL;
402 return data ? data + 32 : NULL;
403 }
403 }
404
404
405 /*
405 /*
406 * Return the 20-byte SHA of the node corresponding to the given rev. The
406 * Return the 20-byte SHA of the node corresponding to the given rev. The
407 * rev is assumed to be existing. If not, an exception is set.
407 * rev is assumed to be existing. If not, an exception is set.
408 */
408 */
409 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
409 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
410 {
410 {
411 const char *node = index_node(self, pos);
411 const char *node = index_node(self, pos);
412 if (node == NULL) {
412 if (node == NULL) {
413 PyErr_Format(PyExc_IndexError, "could not access rev %d",
413 PyErr_Format(PyExc_IndexError, "could not access rev %d",
414 (int)pos);
414 (int)pos);
415 }
415 }
416 return node;
416 return node;
417 }
417 }
418
418
419 static int nt_insert(nodetree *self, const char *node, int rev);
419 static int nt_insert(nodetree *self, const char *node, int rev);
420
420
421 static int node_check(PyObject *obj, char **node)
421 static int node_check(PyObject *obj, char **node)
422 {
422 {
423 Py_ssize_t nodelen;
423 Py_ssize_t nodelen;
424 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
424 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
425 return -1;
425 return -1;
426 if (nodelen == 20)
426 if (nodelen == 20)
427 return 0;
427 return 0;
428 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
428 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
429 return -1;
429 return -1;
430 }
430 }
431
431
432 static PyObject *index_append(indexObject *self, PyObject *obj)
432 static PyObject *index_append(indexObject *self, PyObject *obj)
433 {
433 {
434 char *node;
434 char *node;
435 Py_ssize_t len;
435 Py_ssize_t len;
436
436
437 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
437 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
438 PyErr_SetString(PyExc_TypeError, "8-tuple required");
438 PyErr_SetString(PyExc_TypeError, "8-tuple required");
439 return NULL;
439 return NULL;
440 }
440 }
441
441
442 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
442 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
443 return NULL;
443 return NULL;
444
444
445 len = index_length(self);
445 len = index_length(self);
446
446
447 if (self->added == NULL) {
447 if (self->added == NULL) {
448 self->added = PyList_New(0);
448 self->added = PyList_New(0);
449 if (self->added == NULL)
449 if (self->added == NULL)
450 return NULL;
450 return NULL;
451 }
451 }
452
452
453 if (PyList_Append(self->added, obj) == -1)
453 if (PyList_Append(self->added, obj) == -1)
454 return NULL;
454 return NULL;
455
455
456 if (self->ntinitialized)
456 if (self->ntinitialized)
457 nt_insert(&self->nt, node, (int)len);
457 nt_insert(&self->nt, node, (int)len);
458
458
459 Py_CLEAR(self->headrevs);
459 Py_CLEAR(self->headrevs);
460 Py_RETURN_NONE;
460 Py_RETURN_NONE;
461 }
461 }
462
462
463 static PyObject *index_stats(indexObject *self)
463 static PyObject *index_stats(indexObject *self)
464 {
464 {
465 PyObject *obj = PyDict_New();
465 PyObject *obj = PyDict_New();
466 PyObject *s = NULL;
466 PyObject *s = NULL;
467 PyObject *t = NULL;
467 PyObject *t = NULL;
468
468
469 if (obj == NULL)
469 if (obj == NULL)
470 return NULL;
470 return NULL;
471
471
472 #define istat(__n, __d) \
472 #define istat(__n, __d) \
473 do { \
473 do { \
474 s = PyBytes_FromString(__d); \
474 s = PyBytes_FromString(__d); \
475 t = PyInt_FromSsize_t(self->__n); \
475 t = PyInt_FromSsize_t(self->__n); \
476 if (!s || !t) \
476 if (!s || !t) \
477 goto bail; \
477 goto bail; \
478 if (PyDict_SetItem(obj, s, t) == -1) \
478 if (PyDict_SetItem(obj, s, t) == -1) \
479 goto bail; \
479 goto bail; \
480 Py_CLEAR(s); \
480 Py_CLEAR(s); \
481 Py_CLEAR(t); \
481 Py_CLEAR(t); \
482 } while (0)
482 } while (0)
483
483
484 if (self->added) {
484 if (self->added) {
485 Py_ssize_t len = PyList_GET_SIZE(self->added);
485 Py_ssize_t len = PyList_GET_SIZE(self->added);
486 s = PyBytes_FromString("index entries added");
486 s = PyBytes_FromString("index entries added");
487 t = PyInt_FromSsize_t(len);
487 t = PyInt_FromSsize_t(len);
488 if (!s || !t)
488 if (!s || !t)
489 goto bail;
489 goto bail;
490 if (PyDict_SetItem(obj, s, t) == -1)
490 if (PyDict_SetItem(obj, s, t) == -1)
491 goto bail;
491 goto bail;
492 Py_CLEAR(s);
492 Py_CLEAR(s);
493 Py_CLEAR(t);
493 Py_CLEAR(t);
494 }
494 }
495
495
496 if (self->raw_length != self->length)
496 if (self->raw_length != self->length)
497 istat(raw_length, "revs on disk");
497 istat(raw_length, "revs on disk");
498 istat(length, "revs in memory");
498 istat(length, "revs in memory");
499 istat(ntlookups, "node trie lookups");
499 istat(ntlookups, "node trie lookups");
500 istat(ntmisses, "node trie misses");
500 istat(ntmisses, "node trie misses");
501 istat(ntrev, "node trie last rev scanned");
501 istat(ntrev, "node trie last rev scanned");
502 if (self->ntinitialized) {
502 if (self->ntinitialized) {
503 istat(nt.capacity, "node trie capacity");
503 istat(nt.capacity, "node trie capacity");
504 istat(nt.depth, "node trie depth");
504 istat(nt.depth, "node trie depth");
505 istat(nt.length, "node trie count");
505 istat(nt.length, "node trie count");
506 istat(nt.splits, "node trie splits");
506 istat(nt.splits, "node trie splits");
507 }
507 }
508
508
509 #undef istat
509 #undef istat
510
510
511 return obj;
511 return obj;
512
512
513 bail:
513 bail:
514 Py_XDECREF(obj);
514 Py_XDECREF(obj);
515 Py_XDECREF(s);
515 Py_XDECREF(s);
516 Py_XDECREF(t);
516 Py_XDECREF(t);
517 return NULL;
517 return NULL;
518 }
518 }
519
519
520 /*
520 /*
521 * When we cache a list, we want to be sure the caller can't mutate
521 * When we cache a list, we want to be sure the caller can't mutate
522 * the cached copy.
522 * the cached copy.
523 */
523 */
524 static PyObject *list_copy(PyObject *list)
524 static PyObject *list_copy(PyObject *list)
525 {
525 {
526 Py_ssize_t len = PyList_GET_SIZE(list);
526 Py_ssize_t len = PyList_GET_SIZE(list);
527 PyObject *newlist = PyList_New(len);
527 PyObject *newlist = PyList_New(len);
528 Py_ssize_t i;
528 Py_ssize_t i;
529
529
530 if (newlist == NULL)
530 if (newlist == NULL)
531 return NULL;
531 return NULL;
532
532
533 for (i = 0; i < len; i++) {
533 for (i = 0; i < len; i++) {
534 PyObject *obj = PyList_GET_ITEM(list, i);
534 PyObject *obj = PyList_GET_ITEM(list, i);
535 Py_INCREF(obj);
535 Py_INCREF(obj);
536 PyList_SET_ITEM(newlist, i, obj);
536 PyList_SET_ITEM(newlist, i, obj);
537 }
537 }
538
538
539 return newlist;
539 return newlist;
540 }
540 }
541
541
542 static int check_filter(PyObject *filter, Py_ssize_t arg)
542 static int check_filter(PyObject *filter, Py_ssize_t arg)
543 {
543 {
544 if (filter) {
544 if (filter) {
545 PyObject *arglist, *result;
545 PyObject *arglist, *result;
546 int isfiltered;
546 int isfiltered;
547
547
548 arglist = Py_BuildValue("(n)", arg);
548 arglist = Py_BuildValue("(n)", arg);
549 if (!arglist) {
549 if (!arglist) {
550 return -1;
550 return -1;
551 }
551 }
552
552
553 result = PyEval_CallObject(filter, arglist);
553 result = PyEval_CallObject(filter, arglist);
554 Py_DECREF(arglist);
554 Py_DECREF(arglist);
555 if (!result) {
555 if (!result) {
556 return -1;
556 return -1;
557 }
557 }
558
558
559 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
559 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
560 * same as this function, so we can just return it directly.*/
560 * same as this function, so we can just return it directly.*/
561 isfiltered = PyObject_IsTrue(result);
561 isfiltered = PyObject_IsTrue(result);
562 Py_DECREF(result);
562 Py_DECREF(result);
563 return isfiltered;
563 return isfiltered;
564 } else {
564 } else {
565 return 0;
565 return 0;
566 }
566 }
567 }
567 }
568
568
569 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
569 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
570 Py_ssize_t marker, char *phases)
570 Py_ssize_t marker, char *phases)
571 {
571 {
572 PyObject *iter = NULL;
572 PyObject *iter = NULL;
573 PyObject *iter_item = NULL;
573 PyObject *iter_item = NULL;
574 Py_ssize_t min_idx = index_length(self) + 2;
574 Py_ssize_t min_idx = index_length(self) + 2;
575 long iter_item_long;
575 long iter_item_long;
576
576
577 if (PyList_GET_SIZE(list) != 0) {
577 if (PyList_GET_SIZE(list) != 0) {
578 iter = PyObject_GetIter(list);
578 iter = PyObject_GetIter(list);
579 if (iter == NULL)
579 if (iter == NULL)
580 return -2;
580 return -2;
581 while ((iter_item = PyIter_Next(iter))) {
581 while ((iter_item = PyIter_Next(iter))) {
582 if (!pylong_to_long(iter_item, &iter_item_long)) {
582 if (!pylong_to_long(iter_item, &iter_item_long)) {
583 Py_DECREF(iter_item);
583 Py_DECREF(iter_item);
584 return -2;
584 return -2;
585 }
585 }
586 Py_DECREF(iter_item);
586 Py_DECREF(iter_item);
587 if (iter_item_long < min_idx)
587 if (iter_item_long < min_idx)
588 min_idx = iter_item_long;
588 min_idx = iter_item_long;
589 phases[iter_item_long] = (char)marker;
589 phases[iter_item_long] = (char)marker;
590 }
590 }
591 Py_DECREF(iter);
591 Py_DECREF(iter);
592 }
592 }
593
593
594 return min_idx;
594 return min_idx;
595 }
595 }
596
596
597 static inline void set_phase_from_parents(char *phases, int parent_1,
597 static inline void set_phase_from_parents(char *phases, int parent_1,
598 int parent_2, Py_ssize_t i)
598 int parent_2, Py_ssize_t i)
599 {
599 {
600 if (parent_1 >= 0 && phases[parent_1] > phases[i])
600 if (parent_1 >= 0 && phases[parent_1] > phases[i])
601 phases[i] = phases[parent_1];
601 phases[i] = phases[parent_1];
602 if (parent_2 >= 0 && phases[parent_2] > phases[i])
602 if (parent_2 >= 0 && phases[parent_2] > phases[i])
603 phases[i] = phases[parent_2];
603 phases[i] = phases[parent_2];
604 }
604 }
605
605
606 static PyObject *reachableroots2(indexObject *self, PyObject *args)
606 static PyObject *reachableroots2(indexObject *self, PyObject *args)
607 {
607 {
608
608
609 /* Input */
609 /* Input */
610 long minroot;
610 long minroot;
611 PyObject *includepatharg = NULL;
611 PyObject *includepatharg = NULL;
612 int includepath = 0;
612 int includepath = 0;
613 /* heads and roots are lists */
613 /* heads and roots are lists */
614 PyObject *heads = NULL;
614 PyObject *heads = NULL;
615 PyObject *roots = NULL;
615 PyObject *roots = NULL;
616 PyObject *reachable = NULL;
616 PyObject *reachable = NULL;
617
617
618 PyObject *val;
618 PyObject *val;
619 Py_ssize_t len = index_length(self);
619 Py_ssize_t len = index_length(self);
620 long revnum;
620 long revnum;
621 Py_ssize_t k;
621 Py_ssize_t k;
622 Py_ssize_t i;
622 Py_ssize_t i;
623 Py_ssize_t l;
623 Py_ssize_t l;
624 int r;
624 int r;
625 int parents[2];
625 int parents[2];
626
626
627 /* Internal data structure:
627 /* Internal data structure:
628 * tovisit: array of length len+1 (all revs + nullrev), filled upto
628 * tovisit: array of length len+1 (all revs + nullrev), filled upto
629 * lentovisit
629 * lentovisit
630 *
630 *
631 * revstates: array of length len+1 (all revs + nullrev) */
631 * revstates: array of length len+1 (all revs + nullrev) */
632 int *tovisit = NULL;
632 int *tovisit = NULL;
633 long lentovisit = 0;
633 long lentovisit = 0;
634 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
634 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
635 char *revstates = NULL;
635 char *revstates = NULL;
636
636
637 /* Get arguments */
637 /* Get arguments */
638 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
638 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
639 &PyList_Type, &roots, &PyBool_Type,
639 &PyList_Type, &roots, &PyBool_Type,
640 &includepatharg))
640 &includepatharg))
641 goto bail;
641 goto bail;
642
642
643 if (includepatharg == Py_True)
643 if (includepatharg == Py_True)
644 includepath = 1;
644 includepath = 1;
645
645
646 /* Initialize return set */
646 /* Initialize return set */
647 reachable = PyList_New(0);
647 reachable = PyList_New(0);
648 if (reachable == NULL)
648 if (reachable == NULL)
649 goto bail;
649 goto bail;
650
650
651 /* Initialize internal datastructures */
651 /* Initialize internal datastructures */
652 tovisit = (int *)malloc((len + 1) * sizeof(int));
652 tovisit = (int *)malloc((len + 1) * sizeof(int));
653 if (tovisit == NULL) {
653 if (tovisit == NULL) {
654 PyErr_NoMemory();
654 PyErr_NoMemory();
655 goto bail;
655 goto bail;
656 }
656 }
657
657
658 revstates = (char *)calloc(len + 1, 1);
658 revstates = (char *)calloc(len + 1, 1);
659 if (revstates == NULL) {
659 if (revstates == NULL) {
660 PyErr_NoMemory();
660 PyErr_NoMemory();
661 goto bail;
661 goto bail;
662 }
662 }
663
663
664 l = PyList_GET_SIZE(roots);
664 l = PyList_GET_SIZE(roots);
665 for (i = 0; i < l; i++) {
665 for (i = 0; i < l; i++) {
666 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
666 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
667 if (revnum == -1 && PyErr_Occurred())
667 if (revnum == -1 && PyErr_Occurred())
668 goto bail;
668 goto bail;
669 /* If root is out of range, e.g. wdir(), it must be unreachable
669 /* If root is out of range, e.g. wdir(), it must be unreachable
670 * from heads. So we can just ignore it. */
670 * from heads. So we can just ignore it. */
671 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
671 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
672 continue;
672 continue;
673 revstates[revnum + 1] |= RS_ROOT;
673 revstates[revnum + 1] |= RS_ROOT;
674 }
674 }
675
675
676 /* Populate tovisit with all the heads */
676 /* Populate tovisit with all the heads */
677 l = PyList_GET_SIZE(heads);
677 l = PyList_GET_SIZE(heads);
678 for (i = 0; i < l; i++) {
678 for (i = 0; i < l; i++) {
679 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
679 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
680 if (revnum == -1 && PyErr_Occurred())
680 if (revnum == -1 && PyErr_Occurred())
681 goto bail;
681 goto bail;
682 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
682 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
683 PyErr_SetString(PyExc_IndexError, "head out of range");
683 PyErr_SetString(PyExc_IndexError, "head out of range");
684 goto bail;
684 goto bail;
685 }
685 }
686 if (!(revstates[revnum + 1] & RS_SEEN)) {
686 if (!(revstates[revnum + 1] & RS_SEEN)) {
687 tovisit[lentovisit++] = (int)revnum;
687 tovisit[lentovisit++] = (int)revnum;
688 revstates[revnum + 1] |= RS_SEEN;
688 revstates[revnum + 1] |= RS_SEEN;
689 }
689 }
690 }
690 }
691
691
692 /* Visit the tovisit list and find the reachable roots */
692 /* Visit the tovisit list and find the reachable roots */
693 k = 0;
693 k = 0;
694 while (k < lentovisit) {
694 while (k < lentovisit) {
695 /* Add the node to reachable if it is a root*/
695 /* Add the node to reachable if it is a root*/
696 revnum = tovisit[k++];
696 revnum = tovisit[k++];
697 if (revstates[revnum + 1] & RS_ROOT) {
697 if (revstates[revnum + 1] & RS_ROOT) {
698 revstates[revnum + 1] |= RS_REACHABLE;
698 revstates[revnum + 1] |= RS_REACHABLE;
699 val = PyInt_FromLong(revnum);
699 val = PyInt_FromLong(revnum);
700 if (val == NULL)
700 if (val == NULL)
701 goto bail;
701 goto bail;
702 r = PyList_Append(reachable, val);
702 r = PyList_Append(reachable, val);
703 Py_DECREF(val);
703 Py_DECREF(val);
704 if (r < 0)
704 if (r < 0)
705 goto bail;
705 goto bail;
706 if (includepath == 0)
706 if (includepath == 0)
707 continue;
707 continue;
708 }
708 }
709
709
710 /* Add its parents to the list of nodes to visit */
710 /* Add its parents to the list of nodes to visit */
711 if (revnum == nullrev)
711 if (revnum == nullrev)
712 continue;
712 continue;
713 r = index_get_parents(self, revnum, parents, (int)len - 1);
713 r = index_get_parents(self, revnum, parents, (int)len - 1);
714 if (r < 0)
714 if (r < 0)
715 goto bail;
715 goto bail;
716 for (i = 0; i < 2; i++) {
716 for (i = 0; i < 2; i++) {
717 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
717 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
718 parents[i] >= minroot) {
718 parents[i] >= minroot) {
719 tovisit[lentovisit++] = parents[i];
719 tovisit[lentovisit++] = parents[i];
720 revstates[parents[i] + 1] |= RS_SEEN;
720 revstates[parents[i] + 1] |= RS_SEEN;
721 }
721 }
722 }
722 }
723 }
723 }
724
724
725 /* Find all the nodes in between the roots we found and the heads
725 /* Find all the nodes in between the roots we found and the heads
726 * and add them to the reachable set */
726 * and add them to the reachable set */
727 if (includepath == 1) {
727 if (includepath == 1) {
728 long minidx = minroot;
728 long minidx = minroot;
729 if (minidx < 0)
729 if (minidx < 0)
730 minidx = 0;
730 minidx = 0;
731 for (i = minidx; i < len; i++) {
731 for (i = minidx; i < len; i++) {
732 if (!(revstates[i + 1] & RS_SEEN))
732 if (!(revstates[i + 1] & RS_SEEN))
733 continue;
733 continue;
734 r = index_get_parents(self, i, parents, (int)len - 1);
734 r = index_get_parents(self, i, parents, (int)len - 1);
735 /* Corrupted index file, error is set from
735 /* Corrupted index file, error is set from
736 * index_get_parents */
736 * index_get_parents */
737 if (r < 0)
737 if (r < 0)
738 goto bail;
738 goto bail;
739 if (((revstates[parents[0] + 1] |
739 if (((revstates[parents[0] + 1] |
740 revstates[parents[1] + 1]) &
740 revstates[parents[1] + 1]) &
741 RS_REACHABLE) &&
741 RS_REACHABLE) &&
742 !(revstates[i + 1] & RS_REACHABLE)) {
742 !(revstates[i + 1] & RS_REACHABLE)) {
743 revstates[i + 1] |= RS_REACHABLE;
743 revstates[i + 1] |= RS_REACHABLE;
744 val = PyInt_FromSsize_t(i);
744 val = PyInt_FromSsize_t(i);
745 if (val == NULL)
745 if (val == NULL)
746 goto bail;
746 goto bail;
747 r = PyList_Append(reachable, val);
747 r = PyList_Append(reachable, val);
748 Py_DECREF(val);
748 Py_DECREF(val);
749 if (r < 0)
749 if (r < 0)
750 goto bail;
750 goto bail;
751 }
751 }
752 }
752 }
753 }
753 }
754
754
755 free(revstates);
755 free(revstates);
756 free(tovisit);
756 free(tovisit);
757 return reachable;
757 return reachable;
758 bail:
758 bail:
759 Py_XDECREF(reachable);
759 Py_XDECREF(reachable);
760 free(revstates);
760 free(revstates);
761 free(tovisit);
761 free(tovisit);
762 return NULL;
762 return NULL;
763 }
763 }
764
764
765 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
765 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
766 {
766 {
767 PyObject *roots = Py_None;
767 PyObject *roots = Py_None;
768 PyObject *ret = NULL;
768 PyObject *ret = NULL;
769 PyObject *phasessize = NULL;
769 PyObject *phasessize = NULL;
770 PyObject *phaseroots = NULL;
770 PyObject *phaseroots = NULL;
771 PyObject *phaseset = NULL;
771 PyObject *phaseset = NULL;
772 PyObject *phasessetlist = NULL;
772 PyObject *phasessetlist = NULL;
773 PyObject *rev = NULL;
773 PyObject *rev = NULL;
774 Py_ssize_t len = index_length(self);
774 Py_ssize_t len = index_length(self);
775 Py_ssize_t numphase = 0;
775 Py_ssize_t numphase = 0;
776 Py_ssize_t minrevallphases = 0;
776 Py_ssize_t minrevallphases = 0;
777 Py_ssize_t minrevphase = 0;
777 Py_ssize_t minrevphase = 0;
778 Py_ssize_t i = 0;
778 Py_ssize_t i = 0;
779 char *phases = NULL;
779 char *phases = NULL;
780 long phase;
780 long phase;
781
781
782 if (!PyArg_ParseTuple(args, "O", &roots))
782 if (!PyArg_ParseTuple(args, "O", &roots))
783 goto done;
783 goto done;
784 if (roots == NULL || !PyList_Check(roots)) {
784 if (roots == NULL || !PyList_Check(roots)) {
785 PyErr_SetString(PyExc_TypeError, "roots must be a list");
785 PyErr_SetString(PyExc_TypeError, "roots must be a list");
786 goto done;
786 goto done;
787 }
787 }
788
788
789 phases = calloc(
789 phases = calloc(
790 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
790 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
791 if (phases == NULL) {
791 if (phases == NULL) {
792 PyErr_NoMemory();
792 PyErr_NoMemory();
793 goto done;
793 goto done;
794 }
794 }
795 /* Put the phase information of all the roots in phases */
795 /* Put the phase information of all the roots in phases */
796 numphase = PyList_GET_SIZE(roots) + 1;
796 numphase = PyList_GET_SIZE(roots) + 1;
797 minrevallphases = len + 1;
797 minrevallphases = len + 1;
798 phasessetlist = PyList_New(numphase);
798 phasessetlist = PyList_New(numphase);
799 if (phasessetlist == NULL)
799 if (phasessetlist == NULL)
800 goto done;
800 goto done;
801
801
802 PyList_SET_ITEM(phasessetlist, 0, Py_None);
802 PyList_SET_ITEM(phasessetlist, 0, Py_None);
803 Py_INCREF(Py_None);
803 Py_INCREF(Py_None);
804
804
805 for (i = 0; i < numphase - 1; i++) {
805 for (i = 0; i < numphase - 1; i++) {
806 phaseroots = PyList_GET_ITEM(roots, i);
806 phaseroots = PyList_GET_ITEM(roots, i);
807 phaseset = PySet_New(NULL);
807 phaseset = PySet_New(NULL);
808 if (phaseset == NULL)
808 if (phaseset == NULL)
809 goto release;
809 goto release;
810 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
810 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
811 if (!PyList_Check(phaseroots)) {
811 if (!PyList_Check(phaseroots)) {
812 PyErr_SetString(PyExc_TypeError,
812 PyErr_SetString(PyExc_TypeError,
813 "roots item must be a list");
813 "roots item must be a list");
814 goto release;
814 goto release;
815 }
815 }
816 minrevphase =
816 minrevphase =
817 add_roots_get_min(self, phaseroots, i + 1, phases);
817 add_roots_get_min(self, phaseroots, i + 1, phases);
818 if (minrevphase == -2) /* Error from add_roots_get_min */
818 if (minrevphase == -2) /* Error from add_roots_get_min */
819 goto release;
819 goto release;
820 minrevallphases = MIN(minrevallphases, minrevphase);
820 minrevallphases = MIN(minrevallphases, minrevphase);
821 }
821 }
822 /* Propagate the phase information from the roots to the revs */
822 /* Propagate the phase information from the roots to the revs */
823 if (minrevallphases != -1) {
823 if (minrevallphases != -1) {
824 int parents[2];
824 int parents[2];
825 for (i = minrevallphases; i < len; i++) {
825 for (i = minrevallphases; i < len; i++) {
826 if (index_get_parents(self, i, parents, (int)len - 1) <
826 if (index_get_parents(self, i, parents, (int)len - 1) <
827 0)
827 0)
828 goto release;
828 goto release;
829 set_phase_from_parents(phases, parents[0], parents[1],
829 set_phase_from_parents(phases, parents[0], parents[1],
830 i);
830 i);
831 }
831 }
832 }
832 }
833 /* Transform phase list to a python list */
833 /* Transform phase list to a python list */
834 phasessize = PyInt_FromSsize_t(len);
834 phasessize = PyInt_FromSsize_t(len);
835 if (phasessize == NULL)
835 if (phasessize == NULL)
836 goto release;
836 goto release;
837 for (i = 0; i < len; i++) {
837 for (i = 0; i < len; i++) {
838 phase = phases[i];
838 phase = phases[i];
839 /* We only store the sets of phase for non public phase, the
839 /* We only store the sets of phase for non public phase, the
840 * public phase is computed as a difference */
840 * public phase is computed as a difference */
841 if (phase != 0) {
841 if (phase != 0) {
842 phaseset = PyList_GET_ITEM(phasessetlist, phase);
842 phaseset = PyList_GET_ITEM(phasessetlist, phase);
843 rev = PyInt_FromSsize_t(i);
843 rev = PyInt_FromSsize_t(i);
844 if (rev == NULL)
844 if (rev == NULL)
845 goto release;
845 goto release;
846 PySet_Add(phaseset, rev);
846 PySet_Add(phaseset, rev);
847 Py_XDECREF(rev);
847 Py_XDECREF(rev);
848 }
848 }
849 }
849 }
850 ret = PyTuple_Pack(2, phasessize, phasessetlist);
850 ret = PyTuple_Pack(2, phasessize, phasessetlist);
851
851
852 release:
852 release:
853 Py_XDECREF(phasessize);
853 Py_XDECREF(phasessize);
854 Py_XDECREF(phasessetlist);
854 Py_XDECREF(phasessetlist);
855 done:
855 done:
856 free(phases);
856 free(phases);
857 return ret;
857 return ret;
858 }
858 }
859
859
860 static PyObject *index_headrevs(indexObject *self, PyObject *args)
860 static PyObject *index_headrevs(indexObject *self, PyObject *args)
861 {
861 {
862 Py_ssize_t i, j, len;
862 Py_ssize_t i, j, len;
863 char *nothead = NULL;
863 char *nothead = NULL;
864 PyObject *heads = NULL;
864 PyObject *heads = NULL;
865 PyObject *filter = NULL;
865 PyObject *filter = NULL;
866 PyObject *filteredrevs = Py_None;
866 PyObject *filteredrevs = Py_None;
867
867
868 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
868 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
869 return NULL;
869 return NULL;
870 }
870 }
871
871
872 if (self->headrevs && filteredrevs == self->filteredrevs)
872 if (self->headrevs && filteredrevs == self->filteredrevs)
873 return list_copy(self->headrevs);
873 return list_copy(self->headrevs);
874
874
875 Py_DECREF(self->filteredrevs);
875 Py_DECREF(self->filteredrevs);
876 self->filteredrevs = filteredrevs;
876 self->filteredrevs = filteredrevs;
877 Py_INCREF(filteredrevs);
877 Py_INCREF(filteredrevs);
878
878
879 if (filteredrevs != Py_None) {
879 if (filteredrevs != Py_None) {
880 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
880 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
881 if (!filter) {
881 if (!filter) {
882 PyErr_SetString(
882 PyErr_SetString(
883 PyExc_TypeError,
883 PyExc_TypeError,
884 "filteredrevs has no attribute __contains__");
884 "filteredrevs has no attribute __contains__");
885 goto bail;
885 goto bail;
886 }
886 }
887 }
887 }
888
888
889 len = index_length(self);
889 len = index_length(self);
890 heads = PyList_New(0);
890 heads = PyList_New(0);
891 if (heads == NULL)
891 if (heads == NULL)
892 goto bail;
892 goto bail;
893 if (len == 0) {
893 if (len == 0) {
894 PyObject *nullid = PyInt_FromLong(-1);
894 PyObject *nullid = PyInt_FromLong(-1);
895 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
895 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
896 Py_XDECREF(nullid);
896 Py_XDECREF(nullid);
897 goto bail;
897 goto bail;
898 }
898 }
899 goto done;
899 goto done;
900 }
900 }
901
901
902 nothead = calloc(len, 1);
902 nothead = calloc(len, 1);
903 if (nothead == NULL) {
903 if (nothead == NULL) {
904 PyErr_NoMemory();
904 PyErr_NoMemory();
905 goto bail;
905 goto bail;
906 }
906 }
907
907
908 for (i = len - 1; i >= 0; i--) {
908 for (i = len - 1; i >= 0; i--) {
909 int isfiltered;
909 int isfiltered;
910 int parents[2];
910 int parents[2];
911
911
912 /* If nothead[i] == 1, it means we've seen an unfiltered child
912 /* If nothead[i] == 1, it means we've seen an unfiltered child
913 * of this node already, and therefore this node is not
913 * of this node already, and therefore this node is not
914 * filtered. So we can skip the expensive check_filter step.
914 * filtered. So we can skip the expensive check_filter step.
915 */
915 */
916 if (nothead[i] != 1) {
916 if (nothead[i] != 1) {
917 isfiltered = check_filter(filter, i);
917 isfiltered = check_filter(filter, i);
918 if (isfiltered == -1) {
918 if (isfiltered == -1) {
919 PyErr_SetString(PyExc_TypeError,
919 PyErr_SetString(PyExc_TypeError,
920 "unable to check filter");
920 "unable to check filter");
921 goto bail;
921 goto bail;
922 }
922 }
923
923
924 if (isfiltered) {
924 if (isfiltered) {
925 nothead[i] = 1;
925 nothead[i] = 1;
926 continue;
926 continue;
927 }
927 }
928 }
928 }
929
929
930 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
930 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
931 goto bail;
931 goto bail;
932 for (j = 0; j < 2; j++) {
932 for (j = 0; j < 2; j++) {
933 if (parents[j] >= 0)
933 if (parents[j] >= 0)
934 nothead[parents[j]] = 1;
934 nothead[parents[j]] = 1;
935 }
935 }
936 }
936 }
937
937
938 for (i = 0; i < len; i++) {
938 for (i = 0; i < len; i++) {
939 PyObject *head;
939 PyObject *head;
940
940
941 if (nothead[i])
941 if (nothead[i])
942 continue;
942 continue;
943 head = PyInt_FromSsize_t(i);
943 head = PyInt_FromSsize_t(i);
944 if (head == NULL || PyList_Append(heads, head) == -1) {
944 if (head == NULL || PyList_Append(heads, head) == -1) {
945 Py_XDECREF(head);
945 Py_XDECREF(head);
946 goto bail;
946 goto bail;
947 }
947 }
948 }
948 }
949
949
950 done:
950 done:
951 self->headrevs = heads;
951 self->headrevs = heads;
952 Py_XDECREF(filter);
952 Py_XDECREF(filter);
953 free(nothead);
953 free(nothead);
954 return list_copy(self->headrevs);
954 return list_copy(self->headrevs);
955 bail:
955 bail:
956 Py_XDECREF(filter);
956 Py_XDECREF(filter);
957 Py_XDECREF(heads);
957 Py_XDECREF(heads);
958 free(nothead);
958 free(nothead);
959 return NULL;
959 return NULL;
960 }
960 }
961
961
962 /**
962 /**
963 * Obtain the base revision index entry.
963 * Obtain the base revision index entry.
964 *
964 *
965 * Callers must ensure that rev >= 0 or illegal memory access may occur.
965 * Callers must ensure that rev >= 0 or illegal memory access may occur.
966 */
966 */
967 static inline int index_baserev(indexObject *self, int rev)
967 static inline int index_baserev(indexObject *self, int rev)
968 {
968 {
969 const char *data;
969 const char *data;
970 int result;
970
971
971 if (rev >= self->length) {
972 if (rev >= self->length) {
972 PyObject *tuple =
973 PyObject *tuple =
973 PyList_GET_ITEM(self->added, rev - self->length);
974 PyList_GET_ITEM(self->added, rev - self->length);
974 long ret;
975 long ret;
975 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
976 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
976 return -2;
977 return -2;
977 }
978 }
978 return (int)ret;
979 result = (int)ret;
979 } else {
980 } else {
980 data = index_deref(self, rev);
981 data = index_deref(self, rev);
981 if (data == NULL) {
982 if (data == NULL) {
982 return -2;
983 return -2;
983 }
984 }
984
985
985 return getbe32(data + 16);
986 result = getbe32(data + 16);
986 }
987 }
988 if (result > rev) {
989 PyErr_Format(
990 PyExc_ValueError,
991 "corrupted revlog, revision base above revision: %d, %d",
992 rev, result);
993 return -2;
994 }
995 return result;
987 }
996 }
988
997
989 static PyObject *index_deltachain(indexObject *self, PyObject *args)
998 static PyObject *index_deltachain(indexObject *self, PyObject *args)
990 {
999 {
991 int rev, generaldelta;
1000 int rev, generaldelta;
992 PyObject *stoparg;
1001 PyObject *stoparg;
993 int stoprev, iterrev, baserev = -1;
1002 int stoprev, iterrev, baserev = -1;
994 int stopped;
1003 int stopped;
995 PyObject *chain = NULL, *result = NULL;
1004 PyObject *chain = NULL, *result = NULL;
996 const Py_ssize_t length = index_length(self);
1005 const Py_ssize_t length = index_length(self);
997
1006
998 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1007 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
999 return NULL;
1008 return NULL;
1000 }
1009 }
1001
1010
1002 if (PyInt_Check(stoparg)) {
1011 if (PyInt_Check(stoparg)) {
1003 stoprev = (int)PyInt_AsLong(stoparg);
1012 stoprev = (int)PyInt_AsLong(stoparg);
1004 if (stoprev == -1 && PyErr_Occurred()) {
1013 if (stoprev == -1 && PyErr_Occurred()) {
1005 return NULL;
1014 return NULL;
1006 }
1015 }
1007 } else if (stoparg == Py_None) {
1016 } else if (stoparg == Py_None) {
1008 stoprev = -2;
1017 stoprev = -2;
1009 } else {
1018 } else {
1010 PyErr_SetString(PyExc_ValueError,
1019 PyErr_SetString(PyExc_ValueError,
1011 "stoprev must be integer or None");
1020 "stoprev must be integer or None");
1012 return NULL;
1021 return NULL;
1013 }
1022 }
1014
1023
1015 if (rev < 0 || rev >= length) {
1024 if (rev < 0 || rev >= length) {
1016 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1025 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1017 return NULL;
1026 return NULL;
1018 }
1027 }
1019
1028
1020 chain = PyList_New(0);
1029 chain = PyList_New(0);
1021 if (chain == NULL) {
1030 if (chain == NULL) {
1022 return NULL;
1031 return NULL;
1023 }
1032 }
1024
1033
1025 baserev = index_baserev(self, rev);
1034 baserev = index_baserev(self, rev);
1026
1035
1027 /* This should never happen. */
1036 /* This should never happen. */
1028 if (baserev <= -2) {
1037 if (baserev <= -2) {
1029 /* Error should be set by index_deref() */
1038 /* Error should be set by index_deref() */
1030 assert(PyErr_Occurred());
1039 assert(PyErr_Occurred());
1031 goto bail;
1040 goto bail;
1032 }
1041 }
1033
1042
1034 iterrev = rev;
1043 iterrev = rev;
1035
1044
1036 while (iterrev != baserev && iterrev != stoprev) {
1045 while (iterrev != baserev && iterrev != stoprev) {
1037 PyObject *value = PyInt_FromLong(iterrev);
1046 PyObject *value = PyInt_FromLong(iterrev);
1038 if (value == NULL) {
1047 if (value == NULL) {
1039 goto bail;
1048 goto bail;
1040 }
1049 }
1041 if (PyList_Append(chain, value)) {
1050 if (PyList_Append(chain, value)) {
1042 Py_DECREF(value);
1051 Py_DECREF(value);
1043 goto bail;
1052 goto bail;
1044 }
1053 }
1045 Py_DECREF(value);
1054 Py_DECREF(value);
1046
1055
1047 if (generaldelta) {
1056 if (generaldelta) {
1048 iterrev = baserev;
1057 iterrev = baserev;
1049 } else {
1058 } else {
1050 iterrev--;
1059 iterrev--;
1051 }
1060 }
1052
1061
1053 if (iterrev < 0) {
1062 if (iterrev < 0) {
1054 break;
1063 break;
1055 }
1064 }
1056
1065
1057 if (iterrev >= length) {
1066 if (iterrev >= length) {
1058 PyErr_SetString(PyExc_IndexError,
1067 PyErr_SetString(PyExc_IndexError,
1059 "revision outside index");
1068 "revision outside index");
1060 return NULL;
1069 return NULL;
1061 }
1070 }
1062
1071
1063 baserev = index_baserev(self, iterrev);
1072 baserev = index_baserev(self, iterrev);
1064
1073
1065 /* This should never happen. */
1074 /* This should never happen. */
1066 if (baserev <= -2) {
1075 if (baserev <= -2) {
1067 /* Error should be set by index_deref() */
1076 /* Error should be set by index_deref() */
1068 assert(PyErr_Occurred());
1077 assert(PyErr_Occurred());
1069 goto bail;
1078 goto bail;
1070 }
1079 }
1071 }
1080 }
1072
1081
1073 if (iterrev == stoprev) {
1082 if (iterrev == stoprev) {
1074 stopped = 1;
1083 stopped = 1;
1075 } else {
1084 } else {
1076 PyObject *value = PyInt_FromLong(iterrev);
1085 PyObject *value = PyInt_FromLong(iterrev);
1077 if (value == NULL) {
1086 if (value == NULL) {
1078 goto bail;
1087 goto bail;
1079 }
1088 }
1080 if (PyList_Append(chain, value)) {
1089 if (PyList_Append(chain, value)) {
1081 Py_DECREF(value);
1090 Py_DECREF(value);
1082 goto bail;
1091 goto bail;
1083 }
1092 }
1084 Py_DECREF(value);
1093 Py_DECREF(value);
1085
1094
1086 stopped = 0;
1095 stopped = 0;
1087 }
1096 }
1088
1097
1089 if (PyList_Reverse(chain)) {
1098 if (PyList_Reverse(chain)) {
1090 goto bail;
1099 goto bail;
1091 }
1100 }
1092
1101
1093 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1102 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1094 Py_DECREF(chain);
1103 Py_DECREF(chain);
1095 return result;
1104 return result;
1096
1105
1097 bail:
1106 bail:
1098 Py_DECREF(chain);
1107 Py_DECREF(chain);
1099 return NULL;
1108 return NULL;
1100 }
1109 }
1101
1110
1102 static inline int64_t
1111 static inline int64_t
1103 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1112 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1104 {
1113 {
1105 int64_t start_offset;
1114 int64_t start_offset;
1106 int64_t end_offset;
1115 int64_t end_offset;
1107 int end_size;
1116 int end_size;
1108 start_offset = index_get_start(self, start_rev);
1117 start_offset = index_get_start(self, start_rev);
1109 if (start_offset < 0) {
1118 if (start_offset < 0) {
1110 return -1;
1119 return -1;
1111 }
1120 }
1112 end_offset = index_get_start(self, end_rev);
1121 end_offset = index_get_start(self, end_rev);
1113 if (end_offset < 0) {
1122 if (end_offset < 0) {
1114 return -1;
1123 return -1;
1115 }
1124 }
1116 end_size = index_get_length(self, end_rev);
1125 end_size = index_get_length(self, end_rev);
1117 if (end_size < 0) {
1126 if (end_size < 0) {
1118 return -1;
1127 return -1;
1119 }
1128 }
1120 if (end_offset < start_offset) {
1129 if (end_offset < start_offset) {
1121 PyErr_Format(PyExc_ValueError,
1130 PyErr_Format(PyExc_ValueError,
1122 "corrupted revlog index: inconsistent offset "
1131 "corrupted revlog index: inconsistent offset "
1123 "between revisions (%zd) and (%zd)",
1132 "between revisions (%zd) and (%zd)",
1124 start_rev, end_rev);
1133 start_rev, end_rev);
1125 return -1;
1134 return -1;
1126 }
1135 }
1127 return (end_offset - start_offset) + (int64_t)end_size;
1136 return (end_offset - start_offset) + (int64_t)end_size;
1128 }
1137 }
1129
1138
1130 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1139 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1131 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1140 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1132 Py_ssize_t startidx, Py_ssize_t endidx)
1141 Py_ssize_t startidx, Py_ssize_t endidx)
1133 {
1142 {
1134 int length;
1143 int length;
1135 while (endidx > 1 && endidx > startidx) {
1144 while (endidx > 1 && endidx > startidx) {
1136 length = index_get_length(self, revs[endidx - 1]);
1145 length = index_get_length(self, revs[endidx - 1]);
1137 if (length < 0) {
1146 if (length < 0) {
1138 return -1;
1147 return -1;
1139 }
1148 }
1140 if (length != 0) {
1149 if (length != 0) {
1141 break;
1150 break;
1142 }
1151 }
1143 endidx -= 1;
1152 endidx -= 1;
1144 }
1153 }
1145 return endidx;
1154 return endidx;
1146 }
1155 }
1147
1156
1148 struct Gap {
1157 struct Gap {
1149 int64_t size;
1158 int64_t size;
1150 Py_ssize_t idx;
1159 Py_ssize_t idx;
1151 };
1160 };
1152
1161
1153 static int gap_compare(const void *left, const void *right)
1162 static int gap_compare(const void *left, const void *right)
1154 {
1163 {
1155 const struct Gap *l_left = ((const struct Gap *)left);
1164 const struct Gap *l_left = ((const struct Gap *)left);
1156 const struct Gap *l_right = ((const struct Gap *)right);
1165 const struct Gap *l_right = ((const struct Gap *)right);
1157 if (l_left->size < l_right->size) {
1166 if (l_left->size < l_right->size) {
1158 return -1;
1167 return -1;
1159 } else if (l_left->size > l_right->size) {
1168 } else if (l_left->size > l_right->size) {
1160 return 1;
1169 return 1;
1161 }
1170 }
1162 return 0;
1171 return 0;
1163 }
1172 }
1164 static int Py_ssize_t_compare(const void *left, const void *right)
1173 static int Py_ssize_t_compare(const void *left, const void *right)
1165 {
1174 {
1166 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1175 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1167 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1176 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1168 if (l_left < l_right) {
1177 if (l_left < l_right) {
1169 return -1;
1178 return -1;
1170 } else if (l_left > l_right) {
1179 } else if (l_left > l_right) {
1171 return 1;
1180 return 1;
1172 }
1181 }
1173 return 0;
1182 return 0;
1174 }
1183 }
1175
1184
1176 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1185 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1177 {
1186 {
1178 /* method arguments */
1187 /* method arguments */
1179 PyObject *list_revs = NULL; /* revisions in the chain */
1188 PyObject *list_revs = NULL; /* revisions in the chain */
1180 double targetdensity = 0; /* min density to achieve */
1189 double targetdensity = 0; /* min density to achieve */
1181 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1190 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1182
1191
1183 /* other core variables */
1192 /* other core variables */
1184 Py_ssize_t idxlen = index_length(self);
1193 Py_ssize_t idxlen = index_length(self);
1185 Py_ssize_t i; /* used for various iteration */
1194 Py_ssize_t i; /* used for various iteration */
1186 PyObject *result = NULL; /* the final return of the function */
1195 PyObject *result = NULL; /* the final return of the function */
1187
1196
1188 /* generic information about the delta chain being slice */
1197 /* generic information about the delta chain being slice */
1189 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1198 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1190 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1199 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1191 int64_t chainpayload = 0; /* sum of all delta in the chain */
1200 int64_t chainpayload = 0; /* sum of all delta in the chain */
1192 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1201 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1193
1202
1194 /* variable used for slicing the delta chain */
1203 /* variable used for slicing the delta chain */
1195 int64_t readdata = 0; /* amount of data currently planned to be read */
1204 int64_t readdata = 0; /* amount of data currently planned to be read */
1196 double density = 0; /* ration of payload data compared to read ones */
1205 double density = 0; /* ration of payload data compared to read ones */
1197 int64_t previous_end;
1206 int64_t previous_end;
1198 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1207 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1199 Py_ssize_t num_gaps =
1208 Py_ssize_t num_gaps =
1200 0; /* total number of notable gap recorded so far */
1209 0; /* total number of notable gap recorded so far */
1201 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1210 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1202 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1211 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1203 PyObject *chunk = NULL; /* individual slice */
1212 PyObject *chunk = NULL; /* individual slice */
1204 PyObject *allchunks = NULL; /* all slices */
1213 PyObject *allchunks = NULL; /* all slices */
1205 Py_ssize_t previdx;
1214 Py_ssize_t previdx;
1206
1215
1207 /* parsing argument */
1216 /* parsing argument */
1208 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1217 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1209 &targetdensity, &mingapsize)) {
1218 &targetdensity, &mingapsize)) {
1210 goto bail;
1219 goto bail;
1211 }
1220 }
1212
1221
1213 /* If the delta chain contains a single element, we do not need slicing
1222 /* If the delta chain contains a single element, we do not need slicing
1214 */
1223 */
1215 num_revs = PyList_GET_SIZE(list_revs);
1224 num_revs = PyList_GET_SIZE(list_revs);
1216 if (num_revs <= 1) {
1225 if (num_revs <= 1) {
1217 result = PyTuple_Pack(1, list_revs);
1226 result = PyTuple_Pack(1, list_revs);
1218 goto done;
1227 goto done;
1219 }
1228 }
1220
1229
1221 /* Turn the python list into a native integer array (for efficiency) */
1230 /* Turn the python list into a native integer array (for efficiency) */
1222 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1231 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1223 if (revs == NULL) {
1232 if (revs == NULL) {
1224 PyErr_NoMemory();
1233 PyErr_NoMemory();
1225 goto bail;
1234 goto bail;
1226 }
1235 }
1227 for (i = 0; i < num_revs; i++) {
1236 for (i = 0; i < num_revs; i++) {
1228 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1237 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1229 if (revnum == -1 && PyErr_Occurred()) {
1238 if (revnum == -1 && PyErr_Occurred()) {
1230 goto bail;
1239 goto bail;
1231 }
1240 }
1232 if (revnum < nullrev || revnum >= idxlen) {
1241 if (revnum < nullrev || revnum >= idxlen) {
1233 PyErr_Format(PyExc_IndexError,
1242 PyErr_Format(PyExc_IndexError,
1234 "index out of range: %zd", revnum);
1243 "index out of range: %zd", revnum);
1235 goto bail;
1244 goto bail;
1236 }
1245 }
1237 revs[i] = revnum;
1246 revs[i] = revnum;
1238 }
1247 }
1239
1248
1240 /* Compute and check various property of the unsliced delta chain */
1249 /* Compute and check various property of the unsliced delta chain */
1241 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1250 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1242 if (deltachainspan < 0) {
1251 if (deltachainspan < 0) {
1243 goto bail;
1252 goto bail;
1244 }
1253 }
1245
1254
1246 if (deltachainspan <= mingapsize) {
1255 if (deltachainspan <= mingapsize) {
1247 result = PyTuple_Pack(1, list_revs);
1256 result = PyTuple_Pack(1, list_revs);
1248 goto done;
1257 goto done;
1249 }
1258 }
1250 chainpayload = 0;
1259 chainpayload = 0;
1251 for (i = 0; i < num_revs; i++) {
1260 for (i = 0; i < num_revs; i++) {
1252 int tmp = index_get_length(self, revs[i]);
1261 int tmp = index_get_length(self, revs[i]);
1253 if (tmp < 0) {
1262 if (tmp < 0) {
1254 goto bail;
1263 goto bail;
1255 }
1264 }
1256 chainpayload += tmp;
1265 chainpayload += tmp;
1257 }
1266 }
1258
1267
1259 readdata = deltachainspan;
1268 readdata = deltachainspan;
1260 density = 1.0;
1269 density = 1.0;
1261
1270
1262 if (0 < deltachainspan) {
1271 if (0 < deltachainspan) {
1263 density = (double)chainpayload / (double)deltachainspan;
1272 density = (double)chainpayload / (double)deltachainspan;
1264 }
1273 }
1265
1274
1266 if (density >= targetdensity) {
1275 if (density >= targetdensity) {
1267 result = PyTuple_Pack(1, list_revs);
1276 result = PyTuple_Pack(1, list_revs);
1268 goto done;
1277 goto done;
1269 }
1278 }
1270
1279
1271 /* if chain is too sparse, look for relevant gaps */
1280 /* if chain is too sparse, look for relevant gaps */
1272 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1281 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1273 if (gaps == NULL) {
1282 if (gaps == NULL) {
1274 PyErr_NoMemory();
1283 PyErr_NoMemory();
1275 goto bail;
1284 goto bail;
1276 }
1285 }
1277
1286
1278 previous_end = -1;
1287 previous_end = -1;
1279 for (i = 0; i < num_revs; i++) {
1288 for (i = 0; i < num_revs; i++) {
1280 int64_t revstart;
1289 int64_t revstart;
1281 int revsize;
1290 int revsize;
1282 revstart = index_get_start(self, revs[i]);
1291 revstart = index_get_start(self, revs[i]);
1283 if (revstart < 0) {
1292 if (revstart < 0) {
1284 goto bail;
1293 goto bail;
1285 };
1294 };
1286 revsize = index_get_length(self, revs[i]);
1295 revsize = index_get_length(self, revs[i]);
1287 if (revsize < 0) {
1296 if (revsize < 0) {
1288 goto bail;
1297 goto bail;
1289 };
1298 };
1290 if (revsize == 0) {
1299 if (revsize == 0) {
1291 continue;
1300 continue;
1292 }
1301 }
1293 if (previous_end >= 0) {
1302 if (previous_end >= 0) {
1294 int64_t gapsize = revstart - previous_end;
1303 int64_t gapsize = revstart - previous_end;
1295 if (gapsize > mingapsize) {
1304 if (gapsize > mingapsize) {
1296 gaps[num_gaps].size = gapsize;
1305 gaps[num_gaps].size = gapsize;
1297 gaps[num_gaps].idx = i;
1306 gaps[num_gaps].idx = i;
1298 num_gaps += 1;
1307 num_gaps += 1;
1299 }
1308 }
1300 }
1309 }
1301 previous_end = revstart + revsize;
1310 previous_end = revstart + revsize;
1302 }
1311 }
1303 if (num_gaps == 0) {
1312 if (num_gaps == 0) {
1304 result = PyTuple_Pack(1, list_revs);
1313 result = PyTuple_Pack(1, list_revs);
1305 goto done;
1314 goto done;
1306 }
1315 }
1307 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1316 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1308
1317
1309 /* Slice the largest gap first, they improve the density the most */
1318 /* Slice the largest gap first, they improve the density the most */
1310 selected_indices =
1319 selected_indices =
1311 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1320 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1312 if (selected_indices == NULL) {
1321 if (selected_indices == NULL) {
1313 PyErr_NoMemory();
1322 PyErr_NoMemory();
1314 goto bail;
1323 goto bail;
1315 }
1324 }
1316
1325
1317 for (i = num_gaps - 1; i >= 0; i--) {
1326 for (i = num_gaps - 1; i >= 0; i--) {
1318 selected_indices[num_selected] = gaps[i].idx;
1327 selected_indices[num_selected] = gaps[i].idx;
1319 readdata -= gaps[i].size;
1328 readdata -= gaps[i].size;
1320 num_selected += 1;
1329 num_selected += 1;
1321 if (readdata <= 0) {
1330 if (readdata <= 0) {
1322 density = 1.0;
1331 density = 1.0;
1323 } else {
1332 } else {
1324 density = (double)chainpayload / (double)readdata;
1333 density = (double)chainpayload / (double)readdata;
1325 }
1334 }
1326 if (density >= targetdensity) {
1335 if (density >= targetdensity) {
1327 break;
1336 break;
1328 }
1337 }
1329 }
1338 }
1330 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1339 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1331 &Py_ssize_t_compare);
1340 &Py_ssize_t_compare);
1332
1341
1333 /* create the resulting slice */
1342 /* create the resulting slice */
1334 allchunks = PyList_New(0);
1343 allchunks = PyList_New(0);
1335 if (allchunks == NULL) {
1344 if (allchunks == NULL) {
1336 goto bail;
1345 goto bail;
1337 }
1346 }
1338 previdx = 0;
1347 previdx = 0;
1339 selected_indices[num_selected] = num_revs;
1348 selected_indices[num_selected] = num_revs;
1340 for (i = 0; i <= num_selected; i++) {
1349 for (i = 0; i <= num_selected; i++) {
1341 Py_ssize_t idx = selected_indices[i];
1350 Py_ssize_t idx = selected_indices[i];
1342 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1351 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1343 if (endidx < 0) {
1352 if (endidx < 0) {
1344 goto bail;
1353 goto bail;
1345 }
1354 }
1346 if (previdx < endidx) {
1355 if (previdx < endidx) {
1347 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1356 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1348 if (chunk == NULL) {
1357 if (chunk == NULL) {
1349 goto bail;
1358 goto bail;
1350 }
1359 }
1351 if (PyList_Append(allchunks, chunk) == -1) {
1360 if (PyList_Append(allchunks, chunk) == -1) {
1352 goto bail;
1361 goto bail;
1353 }
1362 }
1354 Py_DECREF(chunk);
1363 Py_DECREF(chunk);
1355 chunk = NULL;
1364 chunk = NULL;
1356 }
1365 }
1357 previdx = idx;
1366 previdx = idx;
1358 }
1367 }
1359 result = allchunks;
1368 result = allchunks;
1360 goto done;
1369 goto done;
1361
1370
1362 bail:
1371 bail:
1363 Py_XDECREF(allchunks);
1372 Py_XDECREF(allchunks);
1364 Py_XDECREF(chunk);
1373 Py_XDECREF(chunk);
1365 done:
1374 done:
1366 free(revs);
1375 free(revs);
1367 free(gaps);
1376 free(gaps);
1368 free(selected_indices);
1377 free(selected_indices);
1369 return result;
1378 return result;
1370 }
1379 }
1371
1380
1372 static inline int nt_level(const char *node, Py_ssize_t level)
1381 static inline int nt_level(const char *node, Py_ssize_t level)
1373 {
1382 {
1374 int v = node[level >> 1];
1383 int v = node[level >> 1];
1375 if (!(level & 1))
1384 if (!(level & 1))
1376 v >>= 4;
1385 v >>= 4;
1377 return v & 0xf;
1386 return v & 0xf;
1378 }
1387 }
1379
1388
1380 /*
1389 /*
1381 * Return values:
1390 * Return values:
1382 *
1391 *
1383 * -4: match is ambiguous (multiple candidates)
1392 * -4: match is ambiguous (multiple candidates)
1384 * -2: not found
1393 * -2: not found
1385 * rest: valid rev
1394 * rest: valid rev
1386 */
1395 */
1387 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1396 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1388 int hex)
1397 int hex)
1389 {
1398 {
1390 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1399 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1391 int level, maxlevel, off;
1400 int level, maxlevel, off;
1392
1401
1393 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1402 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1394 return -1;
1403 return -1;
1395
1404
1396 if (hex)
1405 if (hex)
1397 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1406 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1398 else
1407 else
1399 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1408 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1400
1409
1401 for (level = off = 0; level < maxlevel; level++) {
1410 for (level = off = 0; level < maxlevel; level++) {
1402 int k = getnybble(node, level);
1411 int k = getnybble(node, level);
1403 nodetreenode *n = &self->nodes[off];
1412 nodetreenode *n = &self->nodes[off];
1404 int v = n->children[k];
1413 int v = n->children[k];
1405
1414
1406 if (v < 0) {
1415 if (v < 0) {
1407 const char *n;
1416 const char *n;
1408 Py_ssize_t i;
1417 Py_ssize_t i;
1409
1418
1410 v = -(v + 2);
1419 v = -(v + 2);
1411 n = index_node(self->index, v);
1420 n = index_node(self->index, v);
1412 if (n == NULL)
1421 if (n == NULL)
1413 return -2;
1422 return -2;
1414 for (i = level; i < maxlevel; i++)
1423 for (i = level; i < maxlevel; i++)
1415 if (getnybble(node, i) != nt_level(n, i))
1424 if (getnybble(node, i) != nt_level(n, i))
1416 return -2;
1425 return -2;
1417 return v;
1426 return v;
1418 }
1427 }
1419 if (v == 0)
1428 if (v == 0)
1420 return -2;
1429 return -2;
1421 off = v;
1430 off = v;
1422 }
1431 }
1423 /* multiple matches against an ambiguous prefix */
1432 /* multiple matches against an ambiguous prefix */
1424 return -4;
1433 return -4;
1425 }
1434 }
1426
1435
1427 static int nt_new(nodetree *self)
1436 static int nt_new(nodetree *self)
1428 {
1437 {
1429 if (self->length == self->capacity) {
1438 if (self->length == self->capacity) {
1430 unsigned newcapacity;
1439 unsigned newcapacity;
1431 nodetreenode *newnodes;
1440 nodetreenode *newnodes;
1432 newcapacity = self->capacity * 2;
1441 newcapacity = self->capacity * 2;
1433 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1442 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1434 PyErr_SetString(PyExc_MemoryError,
1443 PyErr_SetString(PyExc_MemoryError,
1435 "overflow in nt_new");
1444 "overflow in nt_new");
1436 return -1;
1445 return -1;
1437 }
1446 }
1438 newnodes =
1447 newnodes =
1439 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1448 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1440 if (newnodes == NULL) {
1449 if (newnodes == NULL) {
1441 PyErr_SetString(PyExc_MemoryError, "out of memory");
1450 PyErr_SetString(PyExc_MemoryError, "out of memory");
1442 return -1;
1451 return -1;
1443 }
1452 }
1444 self->capacity = newcapacity;
1453 self->capacity = newcapacity;
1445 self->nodes = newnodes;
1454 self->nodes = newnodes;
1446 memset(&self->nodes[self->length], 0,
1455 memset(&self->nodes[self->length], 0,
1447 sizeof(nodetreenode) * (self->capacity - self->length));
1456 sizeof(nodetreenode) * (self->capacity - self->length));
1448 }
1457 }
1449 return self->length++;
1458 return self->length++;
1450 }
1459 }
1451
1460
1452 static int nt_insert(nodetree *self, const char *node, int rev)
1461 static int nt_insert(nodetree *self, const char *node, int rev)
1453 {
1462 {
1454 int level = 0;
1463 int level = 0;
1455 int off = 0;
1464 int off = 0;
1456
1465
1457 while (level < 40) {
1466 while (level < 40) {
1458 int k = nt_level(node, level);
1467 int k = nt_level(node, level);
1459 nodetreenode *n;
1468 nodetreenode *n;
1460 int v;
1469 int v;
1461
1470
1462 n = &self->nodes[off];
1471 n = &self->nodes[off];
1463 v = n->children[k];
1472 v = n->children[k];
1464
1473
1465 if (v == 0) {
1474 if (v == 0) {
1466 n->children[k] = -rev - 2;
1475 n->children[k] = -rev - 2;
1467 return 0;
1476 return 0;
1468 }
1477 }
1469 if (v < 0) {
1478 if (v < 0) {
1470 const char *oldnode =
1479 const char *oldnode =
1471 index_node_existing(self->index, -(v + 2));
1480 index_node_existing(self->index, -(v + 2));
1472 int noff;
1481 int noff;
1473
1482
1474 if (oldnode == NULL)
1483 if (oldnode == NULL)
1475 return -1;
1484 return -1;
1476 if (!memcmp(oldnode, node, 20)) {
1485 if (!memcmp(oldnode, node, 20)) {
1477 n->children[k] = -rev - 2;
1486 n->children[k] = -rev - 2;
1478 return 0;
1487 return 0;
1479 }
1488 }
1480 noff = nt_new(self);
1489 noff = nt_new(self);
1481 if (noff == -1)
1490 if (noff == -1)
1482 return -1;
1491 return -1;
1483 /* self->nodes may have been changed by realloc */
1492 /* self->nodes may have been changed by realloc */
1484 self->nodes[off].children[k] = noff;
1493 self->nodes[off].children[k] = noff;
1485 off = noff;
1494 off = noff;
1486 n = &self->nodes[off];
1495 n = &self->nodes[off];
1487 n->children[nt_level(oldnode, ++level)] = v;
1496 n->children[nt_level(oldnode, ++level)] = v;
1488 if (level > self->depth)
1497 if (level > self->depth)
1489 self->depth = level;
1498 self->depth = level;
1490 self->splits += 1;
1499 self->splits += 1;
1491 } else {
1500 } else {
1492 level += 1;
1501 level += 1;
1493 off = v;
1502 off = v;
1494 }
1503 }
1495 }
1504 }
1496
1505
1497 return -1;
1506 return -1;
1498 }
1507 }
1499
1508
1500 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1509 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1501 {
1510 {
1502 Py_ssize_t rev;
1511 Py_ssize_t rev;
1503 const char *node;
1512 const char *node;
1504 Py_ssize_t length;
1513 Py_ssize_t length;
1505 if (!PyArg_ParseTuple(args, "n", &rev))
1514 if (!PyArg_ParseTuple(args, "n", &rev))
1506 return NULL;
1515 return NULL;
1507 length = index_length(self->nt.index);
1516 length = index_length(self->nt.index);
1508 if (rev < 0 || rev >= length) {
1517 if (rev < 0 || rev >= length) {
1509 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1518 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1510 return NULL;
1519 return NULL;
1511 }
1520 }
1512 node = index_node_existing(self->nt.index, rev);
1521 node = index_node_existing(self->nt.index, rev);
1513 if (nt_insert(&self->nt, node, (int)rev) == -1)
1522 if (nt_insert(&self->nt, node, (int)rev) == -1)
1514 return NULL;
1523 return NULL;
1515 Py_RETURN_NONE;
1524 Py_RETURN_NONE;
1516 }
1525 }
1517
1526
1518 static int nt_delete_node(nodetree *self, const char *node)
1527 static int nt_delete_node(nodetree *self, const char *node)
1519 {
1528 {
1520 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1529 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1521 */
1530 */
1522 return nt_insert(self, node, -2);
1531 return nt_insert(self, node, -2);
1523 }
1532 }
1524
1533
1525 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1534 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1526 {
1535 {
1527 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1536 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1528 self->nodes = NULL;
1537 self->nodes = NULL;
1529
1538
1530 self->index = index;
1539 self->index = index;
1531 /* The input capacity is in terms of revisions, while the field is in
1540 /* The input capacity is in terms of revisions, while the field is in
1532 * terms of nodetree nodes. */
1541 * terms of nodetree nodes. */
1533 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1542 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1534 self->depth = 0;
1543 self->depth = 0;
1535 self->splits = 0;
1544 self->splits = 0;
1536 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1545 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1537 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1546 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1538 return -1;
1547 return -1;
1539 }
1548 }
1540 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1549 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1541 if (self->nodes == NULL) {
1550 if (self->nodes == NULL) {
1542 PyErr_NoMemory();
1551 PyErr_NoMemory();
1543 return -1;
1552 return -1;
1544 }
1553 }
1545 self->length = 1;
1554 self->length = 1;
1546 return 0;
1555 return 0;
1547 }
1556 }
1548
1557
1549 static int ntobj_init(nodetreeObject *self, PyObject *args)
1558 static int ntobj_init(nodetreeObject *self, PyObject *args)
1550 {
1559 {
1551 PyObject *index;
1560 PyObject *index;
1552 unsigned capacity;
1561 unsigned capacity;
1553 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1562 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1554 &capacity))
1563 &capacity))
1555 return -1;
1564 return -1;
1556 Py_INCREF(index);
1565 Py_INCREF(index);
1557 return nt_init(&self->nt, (indexObject *)index, capacity);
1566 return nt_init(&self->nt, (indexObject *)index, capacity);
1558 }
1567 }
1559
1568
1560 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1569 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1561 {
1570 {
1562 return nt_find(self, node, nodelen, 1);
1571 return nt_find(self, node, nodelen, 1);
1563 }
1572 }
1564
1573
1565 /*
1574 /*
1566 * Find the length of the shortest unique prefix of node.
1575 * Find the length of the shortest unique prefix of node.
1567 *
1576 *
1568 * Return values:
1577 * Return values:
1569 *
1578 *
1570 * -3: error (exception set)
1579 * -3: error (exception set)
1571 * -2: not found (no exception set)
1580 * -2: not found (no exception set)
1572 * rest: length of shortest prefix
1581 * rest: length of shortest prefix
1573 */
1582 */
1574 static int nt_shortest(nodetree *self, const char *node)
1583 static int nt_shortest(nodetree *self, const char *node)
1575 {
1584 {
1576 int level, off;
1585 int level, off;
1577
1586
1578 for (level = off = 0; level < 40; level++) {
1587 for (level = off = 0; level < 40; level++) {
1579 int k, v;
1588 int k, v;
1580 nodetreenode *n = &self->nodes[off];
1589 nodetreenode *n = &self->nodes[off];
1581 k = nt_level(node, level);
1590 k = nt_level(node, level);
1582 v = n->children[k];
1591 v = n->children[k];
1583 if (v < 0) {
1592 if (v < 0) {
1584 const char *n;
1593 const char *n;
1585 v = -(v + 2);
1594 v = -(v + 2);
1586 n = index_node_existing(self->index, v);
1595 n = index_node_existing(self->index, v);
1587 if (n == NULL)
1596 if (n == NULL)
1588 return -3;
1597 return -3;
1589 if (memcmp(node, n, 20) != 0)
1598 if (memcmp(node, n, 20) != 0)
1590 /*
1599 /*
1591 * Found a unique prefix, but it wasn't for the
1600 * Found a unique prefix, but it wasn't for the
1592 * requested node (i.e the requested node does
1601 * requested node (i.e the requested node does
1593 * not exist).
1602 * not exist).
1594 */
1603 */
1595 return -2;
1604 return -2;
1596 return level + 1;
1605 return level + 1;
1597 }
1606 }
1598 if (v == 0)
1607 if (v == 0)
1599 return -2;
1608 return -2;
1600 off = v;
1609 off = v;
1601 }
1610 }
1602 /*
1611 /*
1603 * The node was still not unique after 40 hex digits, so this won't
1612 * The node was still not unique after 40 hex digits, so this won't
1604 * happen. Also, if we get here, then there's a programming error in
1613 * happen. Also, if we get here, then there's a programming error in
1605 * this file that made us insert a node longer than 40 hex digits.
1614 * this file that made us insert a node longer than 40 hex digits.
1606 */
1615 */
1607 PyErr_SetString(PyExc_Exception, "broken node tree");
1616 PyErr_SetString(PyExc_Exception, "broken node tree");
1608 return -3;
1617 return -3;
1609 }
1618 }
1610
1619
1611 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1620 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1612 {
1621 {
1613 PyObject *val;
1622 PyObject *val;
1614 char *node;
1623 char *node;
1615 int length;
1624 int length;
1616
1625
1617 if (!PyArg_ParseTuple(args, "O", &val))
1626 if (!PyArg_ParseTuple(args, "O", &val))
1618 return NULL;
1627 return NULL;
1619 if (node_check(val, &node) == -1)
1628 if (node_check(val, &node) == -1)
1620 return NULL;
1629 return NULL;
1621
1630
1622 length = nt_shortest(&self->nt, node);
1631 length = nt_shortest(&self->nt, node);
1623 if (length == -3)
1632 if (length == -3)
1624 return NULL;
1633 return NULL;
1625 if (length == -2) {
1634 if (length == -2) {
1626 raise_revlog_error();
1635 raise_revlog_error();
1627 return NULL;
1636 return NULL;
1628 }
1637 }
1629 return PyInt_FromLong(length);
1638 return PyInt_FromLong(length);
1630 }
1639 }
1631
1640
1632 static void nt_dealloc(nodetree *self)
1641 static void nt_dealloc(nodetree *self)
1633 {
1642 {
1634 free(self->nodes);
1643 free(self->nodes);
1635 self->nodes = NULL;
1644 self->nodes = NULL;
1636 }
1645 }
1637
1646
1638 static void ntobj_dealloc(nodetreeObject *self)
1647 static void ntobj_dealloc(nodetreeObject *self)
1639 {
1648 {
1640 Py_XDECREF(self->nt.index);
1649 Py_XDECREF(self->nt.index);
1641 nt_dealloc(&self->nt);
1650 nt_dealloc(&self->nt);
1642 PyObject_Del(self);
1651 PyObject_Del(self);
1643 }
1652 }
1644
1653
1645 static PyMethodDef ntobj_methods[] = {
1654 static PyMethodDef ntobj_methods[] = {
1646 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1655 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1647 "insert an index entry"},
1656 "insert an index entry"},
1648 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1657 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1649 "find length of shortest hex nodeid of a binary ID"},
1658 "find length of shortest hex nodeid of a binary ID"},
1650 {NULL} /* Sentinel */
1659 {NULL} /* Sentinel */
1651 };
1660 };
1652
1661
1653 static PyTypeObject nodetreeType = {
1662 static PyTypeObject nodetreeType = {
1654 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1663 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1655 "parsers.nodetree", /* tp_name */
1664 "parsers.nodetree", /* tp_name */
1656 sizeof(nodetreeObject), /* tp_basicsize */
1665 sizeof(nodetreeObject), /* tp_basicsize */
1657 0, /* tp_itemsize */
1666 0, /* tp_itemsize */
1658 (destructor)ntobj_dealloc, /* tp_dealloc */
1667 (destructor)ntobj_dealloc, /* tp_dealloc */
1659 0, /* tp_print */
1668 0, /* tp_print */
1660 0, /* tp_getattr */
1669 0, /* tp_getattr */
1661 0, /* tp_setattr */
1670 0, /* tp_setattr */
1662 0, /* tp_compare */
1671 0, /* tp_compare */
1663 0, /* tp_repr */
1672 0, /* tp_repr */
1664 0, /* tp_as_number */
1673 0, /* tp_as_number */
1665 0, /* tp_as_sequence */
1674 0, /* tp_as_sequence */
1666 0, /* tp_as_mapping */
1675 0, /* tp_as_mapping */
1667 0, /* tp_hash */
1676 0, /* tp_hash */
1668 0, /* tp_call */
1677 0, /* tp_call */
1669 0, /* tp_str */
1678 0, /* tp_str */
1670 0, /* tp_getattro */
1679 0, /* tp_getattro */
1671 0, /* tp_setattro */
1680 0, /* tp_setattro */
1672 0, /* tp_as_buffer */
1681 0, /* tp_as_buffer */
1673 Py_TPFLAGS_DEFAULT, /* tp_flags */
1682 Py_TPFLAGS_DEFAULT, /* tp_flags */
1674 "nodetree", /* tp_doc */
1683 "nodetree", /* tp_doc */
1675 0, /* tp_traverse */
1684 0, /* tp_traverse */
1676 0, /* tp_clear */
1685 0, /* tp_clear */
1677 0, /* tp_richcompare */
1686 0, /* tp_richcompare */
1678 0, /* tp_weaklistoffset */
1687 0, /* tp_weaklistoffset */
1679 0, /* tp_iter */
1688 0, /* tp_iter */
1680 0, /* tp_iternext */
1689 0, /* tp_iternext */
1681 ntobj_methods, /* tp_methods */
1690 ntobj_methods, /* tp_methods */
1682 0, /* tp_members */
1691 0, /* tp_members */
1683 0, /* tp_getset */
1692 0, /* tp_getset */
1684 0, /* tp_base */
1693 0, /* tp_base */
1685 0, /* tp_dict */
1694 0, /* tp_dict */
1686 0, /* tp_descr_get */
1695 0, /* tp_descr_get */
1687 0, /* tp_descr_set */
1696 0, /* tp_descr_set */
1688 0, /* tp_dictoffset */
1697 0, /* tp_dictoffset */
1689 (initproc)ntobj_init, /* tp_init */
1698 (initproc)ntobj_init, /* tp_init */
1690 0, /* tp_alloc */
1699 0, /* tp_alloc */
1691 };
1700 };
1692
1701
1693 static int index_init_nt(indexObject *self)
1702 static int index_init_nt(indexObject *self)
1694 {
1703 {
1695 if (!self->ntinitialized) {
1704 if (!self->ntinitialized) {
1696 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1705 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1697 nt_dealloc(&self->nt);
1706 nt_dealloc(&self->nt);
1698 return -1;
1707 return -1;
1699 }
1708 }
1700 if (nt_insert(&self->nt, nullid, -1) == -1) {
1709 if (nt_insert(&self->nt, nullid, -1) == -1) {
1701 nt_dealloc(&self->nt);
1710 nt_dealloc(&self->nt);
1702 return -1;
1711 return -1;
1703 }
1712 }
1704 self->ntinitialized = 1;
1713 self->ntinitialized = 1;
1705 self->ntrev = (int)index_length(self);
1714 self->ntrev = (int)index_length(self);
1706 self->ntlookups = 1;
1715 self->ntlookups = 1;
1707 self->ntmisses = 0;
1716 self->ntmisses = 0;
1708 }
1717 }
1709 return 0;
1718 return 0;
1710 }
1719 }
1711
1720
1712 /*
1721 /*
1713 * Return values:
1722 * Return values:
1714 *
1723 *
1715 * -3: error (exception set)
1724 * -3: error (exception set)
1716 * -2: not found (no exception set)
1725 * -2: not found (no exception set)
1717 * rest: valid rev
1726 * rest: valid rev
1718 */
1727 */
1719 static int index_find_node(indexObject *self, const char *node,
1728 static int index_find_node(indexObject *self, const char *node,
1720 Py_ssize_t nodelen)
1729 Py_ssize_t nodelen)
1721 {
1730 {
1722 int rev;
1731 int rev;
1723
1732
1724 if (index_init_nt(self) == -1)
1733 if (index_init_nt(self) == -1)
1725 return -3;
1734 return -3;
1726
1735
1727 self->ntlookups++;
1736 self->ntlookups++;
1728 rev = nt_find(&self->nt, node, nodelen, 0);
1737 rev = nt_find(&self->nt, node, nodelen, 0);
1729 if (rev >= -1)
1738 if (rev >= -1)
1730 return rev;
1739 return rev;
1731
1740
1732 /*
1741 /*
1733 * For the first handful of lookups, we scan the entire index,
1742 * For the first handful of lookups, we scan the entire index,
1734 * and cache only the matching nodes. This optimizes for cases
1743 * and cache only the matching nodes. This optimizes for cases
1735 * like "hg tip", where only a few nodes are accessed.
1744 * like "hg tip", where only a few nodes are accessed.
1736 *
1745 *
1737 * After that, we cache every node we visit, using a single
1746 * After that, we cache every node we visit, using a single
1738 * scan amortized over multiple lookups. This gives the best
1747 * scan amortized over multiple lookups. This gives the best
1739 * bulk performance, e.g. for "hg log".
1748 * bulk performance, e.g. for "hg log".
1740 */
1749 */
1741 if (self->ntmisses++ < 4) {
1750 if (self->ntmisses++ < 4) {
1742 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1751 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1743 const char *n = index_node_existing(self, rev);
1752 const char *n = index_node_existing(self, rev);
1744 if (n == NULL)
1753 if (n == NULL)
1745 return -3;
1754 return -3;
1746 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1755 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1747 if (nt_insert(&self->nt, n, rev) == -1)
1756 if (nt_insert(&self->nt, n, rev) == -1)
1748 return -3;
1757 return -3;
1749 break;
1758 break;
1750 }
1759 }
1751 }
1760 }
1752 } else {
1761 } else {
1753 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1762 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1754 const char *n = index_node_existing(self, rev);
1763 const char *n = index_node_existing(self, rev);
1755 if (n == NULL)
1764 if (n == NULL)
1756 return -3;
1765 return -3;
1757 if (nt_insert(&self->nt, n, rev) == -1) {
1766 if (nt_insert(&self->nt, n, rev) == -1) {
1758 self->ntrev = rev + 1;
1767 self->ntrev = rev + 1;
1759 return -3;
1768 return -3;
1760 }
1769 }
1761 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1770 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1762 break;
1771 break;
1763 }
1772 }
1764 }
1773 }
1765 self->ntrev = rev;
1774 self->ntrev = rev;
1766 }
1775 }
1767
1776
1768 if (rev >= 0)
1777 if (rev >= 0)
1769 return rev;
1778 return rev;
1770 return -2;
1779 return -2;
1771 }
1780 }
1772
1781
1773 static PyObject *index_getitem(indexObject *self, PyObject *value)
1782 static PyObject *index_getitem(indexObject *self, PyObject *value)
1774 {
1783 {
1775 char *node;
1784 char *node;
1776 int rev;
1785 int rev;
1777
1786
1778 if (PyInt_Check(value)) {
1787 if (PyInt_Check(value)) {
1779 long idx;
1788 long idx;
1780 if (!pylong_to_long(value, &idx)) {
1789 if (!pylong_to_long(value, &idx)) {
1781 return NULL;
1790 return NULL;
1782 }
1791 }
1783 return index_get(self, idx);
1792 return index_get(self, idx);
1784 }
1793 }
1785
1794
1786 if (node_check(value, &node) == -1)
1795 if (node_check(value, &node) == -1)
1787 return NULL;
1796 return NULL;
1788 rev = index_find_node(self, node, 20);
1797 rev = index_find_node(self, node, 20);
1789 if (rev >= -1)
1798 if (rev >= -1)
1790 return PyInt_FromLong(rev);
1799 return PyInt_FromLong(rev);
1791 if (rev == -2)
1800 if (rev == -2)
1792 raise_revlog_error();
1801 raise_revlog_error();
1793 return NULL;
1802 return NULL;
1794 }
1803 }
1795
1804
1796 /*
1805 /*
1797 * Fully populate the radix tree.
1806 * Fully populate the radix tree.
1798 */
1807 */
1799 static int index_populate_nt(indexObject *self)
1808 static int index_populate_nt(indexObject *self)
1800 {
1809 {
1801 int rev;
1810 int rev;
1802 if (self->ntrev > 0) {
1811 if (self->ntrev > 0) {
1803 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1812 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1804 const char *n = index_node_existing(self, rev);
1813 const char *n = index_node_existing(self, rev);
1805 if (n == NULL)
1814 if (n == NULL)
1806 return -1;
1815 return -1;
1807 if (nt_insert(&self->nt, n, rev) == -1)
1816 if (nt_insert(&self->nt, n, rev) == -1)
1808 return -1;
1817 return -1;
1809 }
1818 }
1810 self->ntrev = -1;
1819 self->ntrev = -1;
1811 }
1820 }
1812 return 0;
1821 return 0;
1813 }
1822 }
1814
1823
1815 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1824 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1816 {
1825 {
1817 const char *fullnode;
1826 const char *fullnode;
1818 int nodelen;
1827 int nodelen;
1819 char *node;
1828 char *node;
1820 int rev, i;
1829 int rev, i;
1821
1830
1822 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1831 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1823 return NULL;
1832 return NULL;
1824
1833
1825 if (nodelen < 1) {
1834 if (nodelen < 1) {
1826 PyErr_SetString(PyExc_ValueError, "key too short");
1835 PyErr_SetString(PyExc_ValueError, "key too short");
1827 return NULL;
1836 return NULL;
1828 }
1837 }
1829
1838
1830 if (nodelen > 40) {
1839 if (nodelen > 40) {
1831 PyErr_SetString(PyExc_ValueError, "key too long");
1840 PyErr_SetString(PyExc_ValueError, "key too long");
1832 return NULL;
1841 return NULL;
1833 }
1842 }
1834
1843
1835 for (i = 0; i < nodelen; i++)
1844 for (i = 0; i < nodelen; i++)
1836 hexdigit(node, i);
1845 hexdigit(node, i);
1837 if (PyErr_Occurred()) {
1846 if (PyErr_Occurred()) {
1838 /* input contains non-hex characters */
1847 /* input contains non-hex characters */
1839 PyErr_Clear();
1848 PyErr_Clear();
1840 Py_RETURN_NONE;
1849 Py_RETURN_NONE;
1841 }
1850 }
1842
1851
1843 if (index_init_nt(self) == -1)
1852 if (index_init_nt(self) == -1)
1844 return NULL;
1853 return NULL;
1845 if (index_populate_nt(self) == -1)
1854 if (index_populate_nt(self) == -1)
1846 return NULL;
1855 return NULL;
1847 rev = nt_partialmatch(&self->nt, node, nodelen);
1856 rev = nt_partialmatch(&self->nt, node, nodelen);
1848
1857
1849 switch (rev) {
1858 switch (rev) {
1850 case -4:
1859 case -4:
1851 raise_revlog_error();
1860 raise_revlog_error();
1852 return NULL;
1861 return NULL;
1853 case -2:
1862 case -2:
1854 Py_RETURN_NONE;
1863 Py_RETURN_NONE;
1855 case -1:
1864 case -1:
1856 return PyBytes_FromStringAndSize(nullid, 20);
1865 return PyBytes_FromStringAndSize(nullid, 20);
1857 }
1866 }
1858
1867
1859 fullnode = index_node_existing(self, rev);
1868 fullnode = index_node_existing(self, rev);
1860 if (fullnode == NULL) {
1869 if (fullnode == NULL) {
1861 return NULL;
1870 return NULL;
1862 }
1871 }
1863 return PyBytes_FromStringAndSize(fullnode, 20);
1872 return PyBytes_FromStringAndSize(fullnode, 20);
1864 }
1873 }
1865
1874
1866 static PyObject *index_shortest(indexObject *self, PyObject *args)
1875 static PyObject *index_shortest(indexObject *self, PyObject *args)
1867 {
1876 {
1868 PyObject *val;
1877 PyObject *val;
1869 char *node;
1878 char *node;
1870 int length;
1879 int length;
1871
1880
1872 if (!PyArg_ParseTuple(args, "O", &val))
1881 if (!PyArg_ParseTuple(args, "O", &val))
1873 return NULL;
1882 return NULL;
1874 if (node_check(val, &node) == -1)
1883 if (node_check(val, &node) == -1)
1875 return NULL;
1884 return NULL;
1876
1885
1877 self->ntlookups++;
1886 self->ntlookups++;
1878 if (index_init_nt(self) == -1)
1887 if (index_init_nt(self) == -1)
1879 return NULL;
1888 return NULL;
1880 if (index_populate_nt(self) == -1)
1889 if (index_populate_nt(self) == -1)
1881 return NULL;
1890 return NULL;
1882 length = nt_shortest(&self->nt, node);
1891 length = nt_shortest(&self->nt, node);
1883 if (length == -3)
1892 if (length == -3)
1884 return NULL;
1893 return NULL;
1885 if (length == -2) {
1894 if (length == -2) {
1886 raise_revlog_error();
1895 raise_revlog_error();
1887 return NULL;
1896 return NULL;
1888 }
1897 }
1889 return PyInt_FromLong(length);
1898 return PyInt_FromLong(length);
1890 }
1899 }
1891
1900
1892 static PyObject *index_m_get(indexObject *self, PyObject *args)
1901 static PyObject *index_m_get(indexObject *self, PyObject *args)
1893 {
1902 {
1894 PyObject *val;
1903 PyObject *val;
1895 char *node;
1904 char *node;
1896 int rev;
1905 int rev;
1897
1906
1898 if (!PyArg_ParseTuple(args, "O", &val))
1907 if (!PyArg_ParseTuple(args, "O", &val))
1899 return NULL;
1908 return NULL;
1900 if (node_check(val, &node) == -1)
1909 if (node_check(val, &node) == -1)
1901 return NULL;
1910 return NULL;
1902 rev = index_find_node(self, node, 20);
1911 rev = index_find_node(self, node, 20);
1903 if (rev == -3)
1912 if (rev == -3)
1904 return NULL;
1913 return NULL;
1905 if (rev == -2)
1914 if (rev == -2)
1906 Py_RETURN_NONE;
1915 Py_RETURN_NONE;
1907 return PyInt_FromLong(rev);
1916 return PyInt_FromLong(rev);
1908 }
1917 }
1909
1918
1910 static int index_contains(indexObject *self, PyObject *value)
1919 static int index_contains(indexObject *self, PyObject *value)
1911 {
1920 {
1912 char *node;
1921 char *node;
1913
1922
1914 if (PyInt_Check(value)) {
1923 if (PyInt_Check(value)) {
1915 long rev;
1924 long rev;
1916 if (!pylong_to_long(value, &rev)) {
1925 if (!pylong_to_long(value, &rev)) {
1917 return -1;
1926 return -1;
1918 }
1927 }
1919 return rev >= -1 && rev < index_length(self);
1928 return rev >= -1 && rev < index_length(self);
1920 }
1929 }
1921
1930
1922 if (node_check(value, &node) == -1)
1931 if (node_check(value, &node) == -1)
1923 return -1;
1932 return -1;
1924
1933
1925 switch (index_find_node(self, node, 20)) {
1934 switch (index_find_node(self, node, 20)) {
1926 case -3:
1935 case -3:
1927 return -1;
1936 return -1;
1928 case -2:
1937 case -2:
1929 return 0;
1938 return 0;
1930 default:
1939 default:
1931 return 1;
1940 return 1;
1932 }
1941 }
1933 }
1942 }
1934
1943
1935 typedef uint64_t bitmask;
1944 typedef uint64_t bitmask;
1936
1945
1937 /*
1946 /*
1938 * Given a disjoint set of revs, return all candidates for the
1947 * Given a disjoint set of revs, return all candidates for the
1939 * greatest common ancestor. In revset notation, this is the set
1948 * greatest common ancestor. In revset notation, this is the set
1940 * "heads(::a and ::b and ...)"
1949 * "heads(::a and ::b and ...)"
1941 */
1950 */
1942 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1951 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1943 int revcount)
1952 int revcount)
1944 {
1953 {
1945 const bitmask allseen = (1ull << revcount) - 1;
1954 const bitmask allseen = (1ull << revcount) - 1;
1946 const bitmask poison = 1ull << revcount;
1955 const bitmask poison = 1ull << revcount;
1947 PyObject *gca = PyList_New(0);
1956 PyObject *gca = PyList_New(0);
1948 int i, v, interesting;
1957 int i, v, interesting;
1949 int maxrev = -1;
1958 int maxrev = -1;
1950 bitmask sp;
1959 bitmask sp;
1951 bitmask *seen;
1960 bitmask *seen;
1952
1961
1953 if (gca == NULL)
1962 if (gca == NULL)
1954 return PyErr_NoMemory();
1963 return PyErr_NoMemory();
1955
1964
1956 for (i = 0; i < revcount; i++) {
1965 for (i = 0; i < revcount; i++) {
1957 if (revs[i] > maxrev)
1966 if (revs[i] > maxrev)
1958 maxrev = revs[i];
1967 maxrev = revs[i];
1959 }
1968 }
1960
1969
1961 seen = calloc(sizeof(*seen), maxrev + 1);
1970 seen = calloc(sizeof(*seen), maxrev + 1);
1962 if (seen == NULL) {
1971 if (seen == NULL) {
1963 Py_DECREF(gca);
1972 Py_DECREF(gca);
1964 return PyErr_NoMemory();
1973 return PyErr_NoMemory();
1965 }
1974 }
1966
1975
1967 for (i = 0; i < revcount; i++)
1976 for (i = 0; i < revcount; i++)
1968 seen[revs[i]] = 1ull << i;
1977 seen[revs[i]] = 1ull << i;
1969
1978
1970 interesting = revcount;
1979 interesting = revcount;
1971
1980
1972 for (v = maxrev; v >= 0 && interesting; v--) {
1981 for (v = maxrev; v >= 0 && interesting; v--) {
1973 bitmask sv = seen[v];
1982 bitmask sv = seen[v];
1974 int parents[2];
1983 int parents[2];
1975
1984
1976 if (!sv)
1985 if (!sv)
1977 continue;
1986 continue;
1978
1987
1979 if (sv < poison) {
1988 if (sv < poison) {
1980 interesting -= 1;
1989 interesting -= 1;
1981 if (sv == allseen) {
1990 if (sv == allseen) {
1982 PyObject *obj = PyInt_FromLong(v);
1991 PyObject *obj = PyInt_FromLong(v);
1983 if (obj == NULL)
1992 if (obj == NULL)
1984 goto bail;
1993 goto bail;
1985 if (PyList_Append(gca, obj) == -1) {
1994 if (PyList_Append(gca, obj) == -1) {
1986 Py_DECREF(obj);
1995 Py_DECREF(obj);
1987 goto bail;
1996 goto bail;
1988 }
1997 }
1989 sv |= poison;
1998 sv |= poison;
1990 for (i = 0; i < revcount; i++) {
1999 for (i = 0; i < revcount; i++) {
1991 if (revs[i] == v)
2000 if (revs[i] == v)
1992 goto done;
2001 goto done;
1993 }
2002 }
1994 }
2003 }
1995 }
2004 }
1996 if (index_get_parents(self, v, parents, maxrev) < 0)
2005 if (index_get_parents(self, v, parents, maxrev) < 0)
1997 goto bail;
2006 goto bail;
1998
2007
1999 for (i = 0; i < 2; i++) {
2008 for (i = 0; i < 2; i++) {
2000 int p = parents[i];
2009 int p = parents[i];
2001 if (p == -1)
2010 if (p == -1)
2002 continue;
2011 continue;
2003 sp = seen[p];
2012 sp = seen[p];
2004 if (sv < poison) {
2013 if (sv < poison) {
2005 if (sp == 0) {
2014 if (sp == 0) {
2006 seen[p] = sv;
2015 seen[p] = sv;
2007 interesting++;
2016 interesting++;
2008 } else if (sp != sv)
2017 } else if (sp != sv)
2009 seen[p] |= sv;
2018 seen[p] |= sv;
2010 } else {
2019 } else {
2011 if (sp && sp < poison)
2020 if (sp && sp < poison)
2012 interesting--;
2021 interesting--;
2013 seen[p] = sv;
2022 seen[p] = sv;
2014 }
2023 }
2015 }
2024 }
2016 }
2025 }
2017
2026
2018 done:
2027 done:
2019 free(seen);
2028 free(seen);
2020 return gca;
2029 return gca;
2021 bail:
2030 bail:
2022 free(seen);
2031 free(seen);
2023 Py_XDECREF(gca);
2032 Py_XDECREF(gca);
2024 return NULL;
2033 return NULL;
2025 }
2034 }
2026
2035
2027 /*
2036 /*
2028 * Given a disjoint set of revs, return the subset with the longest
2037 * Given a disjoint set of revs, return the subset with the longest
2029 * path to the root.
2038 * path to the root.
2030 */
2039 */
2031 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2040 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2032 {
2041 {
2033 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2042 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2034 static const Py_ssize_t capacity = 24;
2043 static const Py_ssize_t capacity = 24;
2035 int *depth, *interesting = NULL;
2044 int *depth, *interesting = NULL;
2036 int i, j, v, ninteresting;
2045 int i, j, v, ninteresting;
2037 PyObject *dict = NULL, *keys = NULL;
2046 PyObject *dict = NULL, *keys = NULL;
2038 long *seen = NULL;
2047 long *seen = NULL;
2039 int maxrev = -1;
2048 int maxrev = -1;
2040 long final;
2049 long final;
2041
2050
2042 if (revcount > capacity) {
2051 if (revcount > capacity) {
2043 PyErr_Format(PyExc_OverflowError,
2052 PyErr_Format(PyExc_OverflowError,
2044 "bitset size (%ld) > capacity (%ld)",
2053 "bitset size (%ld) > capacity (%ld)",
2045 (long)revcount, (long)capacity);
2054 (long)revcount, (long)capacity);
2046 return NULL;
2055 return NULL;
2047 }
2056 }
2048
2057
2049 for (i = 0; i < revcount; i++) {
2058 for (i = 0; i < revcount; i++) {
2050 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2059 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2051 if (n > maxrev)
2060 if (n > maxrev)
2052 maxrev = n;
2061 maxrev = n;
2053 }
2062 }
2054
2063
2055 depth = calloc(sizeof(*depth), maxrev + 1);
2064 depth = calloc(sizeof(*depth), maxrev + 1);
2056 if (depth == NULL)
2065 if (depth == NULL)
2057 return PyErr_NoMemory();
2066 return PyErr_NoMemory();
2058
2067
2059 seen = calloc(sizeof(*seen), maxrev + 1);
2068 seen = calloc(sizeof(*seen), maxrev + 1);
2060 if (seen == NULL) {
2069 if (seen == NULL) {
2061 PyErr_NoMemory();
2070 PyErr_NoMemory();
2062 goto bail;
2071 goto bail;
2063 }
2072 }
2064
2073
2065 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2074 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2066 if (interesting == NULL) {
2075 if (interesting == NULL) {
2067 PyErr_NoMemory();
2076 PyErr_NoMemory();
2068 goto bail;
2077 goto bail;
2069 }
2078 }
2070
2079
2071 if (PyList_Sort(revs) == -1)
2080 if (PyList_Sort(revs) == -1)
2072 goto bail;
2081 goto bail;
2073
2082
2074 for (i = 0; i < revcount; i++) {
2083 for (i = 0; i < revcount; i++) {
2075 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2084 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2076 long b = 1l << i;
2085 long b = 1l << i;
2077 depth[n] = 1;
2086 depth[n] = 1;
2078 seen[n] = b;
2087 seen[n] = b;
2079 interesting[b] = 1;
2088 interesting[b] = 1;
2080 }
2089 }
2081
2090
2082 /* invariant: ninteresting is the number of non-zero entries in
2091 /* invariant: ninteresting is the number of non-zero entries in
2083 * interesting. */
2092 * interesting. */
2084 ninteresting = (int)revcount;
2093 ninteresting = (int)revcount;
2085
2094
2086 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2095 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2087 int dv = depth[v];
2096 int dv = depth[v];
2088 int parents[2];
2097 int parents[2];
2089 long sv;
2098 long sv;
2090
2099
2091 if (dv == 0)
2100 if (dv == 0)
2092 continue;
2101 continue;
2093
2102
2094 sv = seen[v];
2103 sv = seen[v];
2095 if (index_get_parents(self, v, parents, maxrev) < 0)
2104 if (index_get_parents(self, v, parents, maxrev) < 0)
2096 goto bail;
2105 goto bail;
2097
2106
2098 for (i = 0; i < 2; i++) {
2107 for (i = 0; i < 2; i++) {
2099 int p = parents[i];
2108 int p = parents[i];
2100 long sp;
2109 long sp;
2101 int dp;
2110 int dp;
2102
2111
2103 if (p == -1)
2112 if (p == -1)
2104 continue;
2113 continue;
2105
2114
2106 dp = depth[p];
2115 dp = depth[p];
2107 sp = seen[p];
2116 sp = seen[p];
2108 if (dp <= dv) {
2117 if (dp <= dv) {
2109 depth[p] = dv + 1;
2118 depth[p] = dv + 1;
2110 if (sp != sv) {
2119 if (sp != sv) {
2111 interesting[sv] += 1;
2120 interesting[sv] += 1;
2112 seen[p] = sv;
2121 seen[p] = sv;
2113 if (sp) {
2122 if (sp) {
2114 interesting[sp] -= 1;
2123 interesting[sp] -= 1;
2115 if (interesting[sp] == 0)
2124 if (interesting[sp] == 0)
2116 ninteresting -= 1;
2125 ninteresting -= 1;
2117 }
2126 }
2118 }
2127 }
2119 } else if (dv == dp - 1) {
2128 } else if (dv == dp - 1) {
2120 long nsp = sp | sv;
2129 long nsp = sp | sv;
2121 if (nsp == sp)
2130 if (nsp == sp)
2122 continue;
2131 continue;
2123 seen[p] = nsp;
2132 seen[p] = nsp;
2124 interesting[sp] -= 1;
2133 interesting[sp] -= 1;
2125 if (interesting[sp] == 0)
2134 if (interesting[sp] == 0)
2126 ninteresting -= 1;
2135 ninteresting -= 1;
2127 if (interesting[nsp] == 0)
2136 if (interesting[nsp] == 0)
2128 ninteresting += 1;
2137 ninteresting += 1;
2129 interesting[nsp] += 1;
2138 interesting[nsp] += 1;
2130 }
2139 }
2131 }
2140 }
2132 interesting[sv] -= 1;
2141 interesting[sv] -= 1;
2133 if (interesting[sv] == 0)
2142 if (interesting[sv] == 0)
2134 ninteresting -= 1;
2143 ninteresting -= 1;
2135 }
2144 }
2136
2145
2137 final = 0;
2146 final = 0;
2138 j = ninteresting;
2147 j = ninteresting;
2139 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2148 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2140 if (interesting[i] == 0)
2149 if (interesting[i] == 0)
2141 continue;
2150 continue;
2142 final |= i;
2151 final |= i;
2143 j -= 1;
2152 j -= 1;
2144 }
2153 }
2145 if (final == 0) {
2154 if (final == 0) {
2146 keys = PyList_New(0);
2155 keys = PyList_New(0);
2147 goto bail;
2156 goto bail;
2148 }
2157 }
2149
2158
2150 dict = PyDict_New();
2159 dict = PyDict_New();
2151 if (dict == NULL)
2160 if (dict == NULL)
2152 goto bail;
2161 goto bail;
2153
2162
2154 for (i = 0; i < revcount; i++) {
2163 for (i = 0; i < revcount; i++) {
2155 PyObject *key;
2164 PyObject *key;
2156
2165
2157 if ((final & (1 << i)) == 0)
2166 if ((final & (1 << i)) == 0)
2158 continue;
2167 continue;
2159
2168
2160 key = PyList_GET_ITEM(revs, i);
2169 key = PyList_GET_ITEM(revs, i);
2161 Py_INCREF(key);
2170 Py_INCREF(key);
2162 Py_INCREF(Py_None);
2171 Py_INCREF(Py_None);
2163 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2172 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2164 Py_DECREF(key);
2173 Py_DECREF(key);
2165 Py_DECREF(Py_None);
2174 Py_DECREF(Py_None);
2166 goto bail;
2175 goto bail;
2167 }
2176 }
2168 }
2177 }
2169
2178
2170 keys = PyDict_Keys(dict);
2179 keys = PyDict_Keys(dict);
2171
2180
2172 bail:
2181 bail:
2173 free(depth);
2182 free(depth);
2174 free(seen);
2183 free(seen);
2175 free(interesting);
2184 free(interesting);
2176 Py_XDECREF(dict);
2185 Py_XDECREF(dict);
2177
2186
2178 return keys;
2187 return keys;
2179 }
2188 }
2180
2189
2181 /*
2190 /*
2182 * Given a (possibly overlapping) set of revs, return all the
2191 * Given a (possibly overlapping) set of revs, return all the
2183 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2192 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2184 */
2193 */
2185 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2194 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2186 {
2195 {
2187 PyObject *ret = NULL;
2196 PyObject *ret = NULL;
2188 Py_ssize_t argcount, i, len;
2197 Py_ssize_t argcount, i, len;
2189 bitmask repeat = 0;
2198 bitmask repeat = 0;
2190 int revcount = 0;
2199 int revcount = 0;
2191 int *revs;
2200 int *revs;
2192
2201
2193 argcount = PySequence_Length(args);
2202 argcount = PySequence_Length(args);
2194 revs = PyMem_Malloc(argcount * sizeof(*revs));
2203 revs = PyMem_Malloc(argcount * sizeof(*revs));
2195 if (argcount > 0 && revs == NULL)
2204 if (argcount > 0 && revs == NULL)
2196 return PyErr_NoMemory();
2205 return PyErr_NoMemory();
2197 len = index_length(self);
2206 len = index_length(self);
2198
2207
2199 for (i = 0; i < argcount; i++) {
2208 for (i = 0; i < argcount; i++) {
2200 static const int capacity = 24;
2209 static const int capacity = 24;
2201 PyObject *obj = PySequence_GetItem(args, i);
2210 PyObject *obj = PySequence_GetItem(args, i);
2202 bitmask x;
2211 bitmask x;
2203 long val;
2212 long val;
2204
2213
2205 if (!PyInt_Check(obj)) {
2214 if (!PyInt_Check(obj)) {
2206 PyErr_SetString(PyExc_TypeError,
2215 PyErr_SetString(PyExc_TypeError,
2207 "arguments must all be ints");
2216 "arguments must all be ints");
2208 Py_DECREF(obj);
2217 Py_DECREF(obj);
2209 goto bail;
2218 goto bail;
2210 }
2219 }
2211 val = PyInt_AsLong(obj);
2220 val = PyInt_AsLong(obj);
2212 Py_DECREF(obj);
2221 Py_DECREF(obj);
2213 if (val == -1) {
2222 if (val == -1) {
2214 ret = PyList_New(0);
2223 ret = PyList_New(0);
2215 goto done;
2224 goto done;
2216 }
2225 }
2217 if (val < 0 || val >= len) {
2226 if (val < 0 || val >= len) {
2218 PyErr_SetString(PyExc_IndexError, "index out of range");
2227 PyErr_SetString(PyExc_IndexError, "index out of range");
2219 goto bail;
2228 goto bail;
2220 }
2229 }
2221 /* this cheesy bloom filter lets us avoid some more
2230 /* this cheesy bloom filter lets us avoid some more
2222 * expensive duplicate checks in the common set-is-disjoint
2231 * expensive duplicate checks in the common set-is-disjoint
2223 * case */
2232 * case */
2224 x = 1ull << (val & 0x3f);
2233 x = 1ull << (val & 0x3f);
2225 if (repeat & x) {
2234 if (repeat & x) {
2226 int k;
2235 int k;
2227 for (k = 0; k < revcount; k++) {
2236 for (k = 0; k < revcount; k++) {
2228 if (val == revs[k])
2237 if (val == revs[k])
2229 goto duplicate;
2238 goto duplicate;
2230 }
2239 }
2231 } else
2240 } else
2232 repeat |= x;
2241 repeat |= x;
2233 if (revcount >= capacity) {
2242 if (revcount >= capacity) {
2234 PyErr_Format(PyExc_OverflowError,
2243 PyErr_Format(PyExc_OverflowError,
2235 "bitset size (%d) > capacity (%d)",
2244 "bitset size (%d) > capacity (%d)",
2236 revcount, capacity);
2245 revcount, capacity);
2237 goto bail;
2246 goto bail;
2238 }
2247 }
2239 revs[revcount++] = (int)val;
2248 revs[revcount++] = (int)val;
2240 duplicate:;
2249 duplicate:;
2241 }
2250 }
2242
2251
2243 if (revcount == 0) {
2252 if (revcount == 0) {
2244 ret = PyList_New(0);
2253 ret = PyList_New(0);
2245 goto done;
2254 goto done;
2246 }
2255 }
2247 if (revcount == 1) {
2256 if (revcount == 1) {
2248 PyObject *obj;
2257 PyObject *obj;
2249 ret = PyList_New(1);
2258 ret = PyList_New(1);
2250 if (ret == NULL)
2259 if (ret == NULL)
2251 goto bail;
2260 goto bail;
2252 obj = PyInt_FromLong(revs[0]);
2261 obj = PyInt_FromLong(revs[0]);
2253 if (obj == NULL)
2262 if (obj == NULL)
2254 goto bail;
2263 goto bail;
2255 PyList_SET_ITEM(ret, 0, obj);
2264 PyList_SET_ITEM(ret, 0, obj);
2256 goto done;
2265 goto done;
2257 }
2266 }
2258
2267
2259 ret = find_gca_candidates(self, revs, revcount);
2268 ret = find_gca_candidates(self, revs, revcount);
2260 if (ret == NULL)
2269 if (ret == NULL)
2261 goto bail;
2270 goto bail;
2262
2271
2263 done:
2272 done:
2264 PyMem_Free(revs);
2273 PyMem_Free(revs);
2265 return ret;
2274 return ret;
2266
2275
2267 bail:
2276 bail:
2268 PyMem_Free(revs);
2277 PyMem_Free(revs);
2269 Py_XDECREF(ret);
2278 Py_XDECREF(ret);
2270 return NULL;
2279 return NULL;
2271 }
2280 }
2272
2281
2273 /*
2282 /*
2274 * Given a (possibly overlapping) set of revs, return the greatest
2283 * Given a (possibly overlapping) set of revs, return the greatest
2275 * common ancestors: those with the longest path to the root.
2284 * common ancestors: those with the longest path to the root.
2276 */
2285 */
2277 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2286 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2278 {
2287 {
2279 PyObject *ret;
2288 PyObject *ret;
2280 PyObject *gca = index_commonancestorsheads(self, args);
2289 PyObject *gca = index_commonancestorsheads(self, args);
2281 if (gca == NULL)
2290 if (gca == NULL)
2282 return NULL;
2291 return NULL;
2283
2292
2284 if (PyList_GET_SIZE(gca) <= 1) {
2293 if (PyList_GET_SIZE(gca) <= 1) {
2285 return gca;
2294 return gca;
2286 }
2295 }
2287
2296
2288 ret = find_deepest(self, gca);
2297 ret = find_deepest(self, gca);
2289 Py_DECREF(gca);
2298 Py_DECREF(gca);
2290 return ret;
2299 return ret;
2291 }
2300 }
2292
2301
2293 /*
2302 /*
2294 * Invalidate any trie entries introduced by added revs.
2303 * Invalidate any trie entries introduced by added revs.
2295 */
2304 */
2296 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2305 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2297 {
2306 {
2298 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2307 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2299
2308
2300 for (i = start; i < len; i++) {
2309 for (i = start; i < len; i++) {
2301 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2310 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2302 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2311 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2303
2312
2304 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2313 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2305 }
2314 }
2306
2315
2307 if (start == 0)
2316 if (start == 0)
2308 Py_CLEAR(self->added);
2317 Py_CLEAR(self->added);
2309 }
2318 }
2310
2319
2311 /*
2320 /*
2312 * Delete a numeric range of revs, which must be at the end of the
2321 * Delete a numeric range of revs, which must be at the end of the
2313 * range, but exclude the sentinel nullid entry.
2322 * range, but exclude the sentinel nullid entry.
2314 */
2323 */
2315 static int index_slice_del(indexObject *self, PyObject *item)
2324 static int index_slice_del(indexObject *self, PyObject *item)
2316 {
2325 {
2317 Py_ssize_t start, stop, step, slicelength;
2326 Py_ssize_t start, stop, step, slicelength;
2318 Py_ssize_t length = index_length(self) + 1;
2327 Py_ssize_t length = index_length(self) + 1;
2319 int ret = 0;
2328 int ret = 0;
2320
2329
2321 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2330 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2322 #ifdef IS_PY3K
2331 #ifdef IS_PY3K
2323 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2332 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2324 &slicelength) < 0)
2333 &slicelength) < 0)
2325 #else
2334 #else
2326 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2335 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2327 &step, &slicelength) < 0)
2336 &step, &slicelength) < 0)
2328 #endif
2337 #endif
2329 return -1;
2338 return -1;
2330
2339
2331 if (slicelength <= 0)
2340 if (slicelength <= 0)
2332 return 0;
2341 return 0;
2333
2342
2334 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2343 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2335 stop = start;
2344 stop = start;
2336
2345
2337 if (step < 0) {
2346 if (step < 0) {
2338 stop = start + 1;
2347 stop = start + 1;
2339 start = stop + step * (slicelength - 1) - 1;
2348 start = stop + step * (slicelength - 1) - 1;
2340 step = -step;
2349 step = -step;
2341 }
2350 }
2342
2351
2343 if (step != 1) {
2352 if (step != 1) {
2344 PyErr_SetString(PyExc_ValueError,
2353 PyErr_SetString(PyExc_ValueError,
2345 "revlog index delete requires step size of 1");
2354 "revlog index delete requires step size of 1");
2346 return -1;
2355 return -1;
2347 }
2356 }
2348
2357
2349 if (stop != length - 1) {
2358 if (stop != length - 1) {
2350 PyErr_SetString(PyExc_IndexError,
2359 PyErr_SetString(PyExc_IndexError,
2351 "revlog index deletion indices are invalid");
2360 "revlog index deletion indices are invalid");
2352 return -1;
2361 return -1;
2353 }
2362 }
2354
2363
2355 if (start < self->length) {
2364 if (start < self->length) {
2356 if (self->ntinitialized) {
2365 if (self->ntinitialized) {
2357 Py_ssize_t i;
2366 Py_ssize_t i;
2358
2367
2359 for (i = start + 1; i < self->length; i++) {
2368 for (i = start + 1; i < self->length; i++) {
2360 const char *node = index_node_existing(self, i);
2369 const char *node = index_node_existing(self, i);
2361 if (node == NULL)
2370 if (node == NULL)
2362 return -1;
2371 return -1;
2363
2372
2364 nt_delete_node(&self->nt, node);
2373 nt_delete_node(&self->nt, node);
2365 }
2374 }
2366 if (self->added)
2375 if (self->added)
2367 index_invalidate_added(self, 0);
2376 index_invalidate_added(self, 0);
2368 if (self->ntrev > start)
2377 if (self->ntrev > start)
2369 self->ntrev = (int)start;
2378 self->ntrev = (int)start;
2370 }
2379 }
2371 self->length = start;
2380 self->length = start;
2372 if (start < self->raw_length) {
2381 if (start < self->raw_length) {
2373 if (self->cache) {
2382 if (self->cache) {
2374 Py_ssize_t i;
2383 Py_ssize_t i;
2375 for (i = start; i < self->raw_length; i++)
2384 for (i = start; i < self->raw_length; i++)
2376 Py_CLEAR(self->cache[i]);
2385 Py_CLEAR(self->cache[i]);
2377 }
2386 }
2378 self->raw_length = start;
2387 self->raw_length = start;
2379 }
2388 }
2380 goto done;
2389 goto done;
2381 }
2390 }
2382
2391
2383 if (self->ntinitialized) {
2392 if (self->ntinitialized) {
2384 index_invalidate_added(self, start - self->length);
2393 index_invalidate_added(self, start - self->length);
2385 if (self->ntrev > start)
2394 if (self->ntrev > start)
2386 self->ntrev = (int)start;
2395 self->ntrev = (int)start;
2387 }
2396 }
2388 if (self->added)
2397 if (self->added)
2389 ret = PyList_SetSlice(self->added, start - self->length,
2398 ret = PyList_SetSlice(self->added, start - self->length,
2390 PyList_GET_SIZE(self->added), NULL);
2399 PyList_GET_SIZE(self->added), NULL);
2391 done:
2400 done:
2392 Py_CLEAR(self->headrevs);
2401 Py_CLEAR(self->headrevs);
2393 return ret;
2402 return ret;
2394 }
2403 }
2395
2404
2396 /*
2405 /*
2397 * Supported ops:
2406 * Supported ops:
2398 *
2407 *
2399 * slice deletion
2408 * slice deletion
2400 * string assignment (extend node->rev mapping)
2409 * string assignment (extend node->rev mapping)
2401 * string deletion (shrink node->rev mapping)
2410 * string deletion (shrink node->rev mapping)
2402 */
2411 */
2403 static int index_assign_subscript(indexObject *self, PyObject *item,
2412 static int index_assign_subscript(indexObject *self, PyObject *item,
2404 PyObject *value)
2413 PyObject *value)
2405 {
2414 {
2406 char *node;
2415 char *node;
2407 long rev;
2416 long rev;
2408
2417
2409 if (PySlice_Check(item) && value == NULL)
2418 if (PySlice_Check(item) && value == NULL)
2410 return index_slice_del(self, item);
2419 return index_slice_del(self, item);
2411
2420
2412 if (node_check(item, &node) == -1)
2421 if (node_check(item, &node) == -1)
2413 return -1;
2422 return -1;
2414
2423
2415 if (value == NULL)
2424 if (value == NULL)
2416 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2425 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2417 : 0;
2426 : 0;
2418 rev = PyInt_AsLong(value);
2427 rev = PyInt_AsLong(value);
2419 if (rev > INT_MAX || rev < 0) {
2428 if (rev > INT_MAX || rev < 0) {
2420 if (!PyErr_Occurred())
2429 if (!PyErr_Occurred())
2421 PyErr_SetString(PyExc_ValueError, "rev out of range");
2430 PyErr_SetString(PyExc_ValueError, "rev out of range");
2422 return -1;
2431 return -1;
2423 }
2432 }
2424
2433
2425 if (index_init_nt(self) == -1)
2434 if (index_init_nt(self) == -1)
2426 return -1;
2435 return -1;
2427 return nt_insert(&self->nt, node, (int)rev);
2436 return nt_insert(&self->nt, node, (int)rev);
2428 }
2437 }
2429
2438
2430 /*
2439 /*
2431 * Find all RevlogNG entries in an index that has inline data. Update
2440 * Find all RevlogNG entries in an index that has inline data. Update
2432 * the optional "offsets" table with those entries.
2441 * the optional "offsets" table with those entries.
2433 */
2442 */
2434 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2443 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2435 {
2444 {
2436 const char *data = (const char *)self->buf.buf;
2445 const char *data = (const char *)self->buf.buf;
2437 Py_ssize_t pos = 0;
2446 Py_ssize_t pos = 0;
2438 Py_ssize_t end = self->buf.len;
2447 Py_ssize_t end = self->buf.len;
2439 long incr = v1_hdrsize;
2448 long incr = v1_hdrsize;
2440 Py_ssize_t len = 0;
2449 Py_ssize_t len = 0;
2441
2450
2442 while (pos + v1_hdrsize <= end && pos >= 0) {
2451 while (pos + v1_hdrsize <= end && pos >= 0) {
2443 uint32_t comp_len;
2452 uint32_t comp_len;
2444 /* 3rd element of header is length of compressed inline data */
2453 /* 3rd element of header is length of compressed inline data */
2445 comp_len = getbe32(data + pos + 8);
2454 comp_len = getbe32(data + pos + 8);
2446 incr = v1_hdrsize + comp_len;
2455 incr = v1_hdrsize + comp_len;
2447 if (offsets)
2456 if (offsets)
2448 offsets[len] = data + pos;
2457 offsets[len] = data + pos;
2449 len++;
2458 len++;
2450 pos += incr;
2459 pos += incr;
2451 }
2460 }
2452
2461
2453 if (pos != end) {
2462 if (pos != end) {
2454 if (!PyErr_Occurred())
2463 if (!PyErr_Occurred())
2455 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2464 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2456 return -1;
2465 return -1;
2457 }
2466 }
2458
2467
2459 return len;
2468 return len;
2460 }
2469 }
2461
2470
2462 static int index_init(indexObject *self, PyObject *args)
2471 static int index_init(indexObject *self, PyObject *args)
2463 {
2472 {
2464 PyObject *data_obj, *inlined_obj;
2473 PyObject *data_obj, *inlined_obj;
2465 Py_ssize_t size;
2474 Py_ssize_t size;
2466
2475
2467 /* Initialize before argument-checking to avoid index_dealloc() crash.
2476 /* Initialize before argument-checking to avoid index_dealloc() crash.
2468 */
2477 */
2469 self->raw_length = 0;
2478 self->raw_length = 0;
2470 self->added = NULL;
2479 self->added = NULL;
2471 self->cache = NULL;
2480 self->cache = NULL;
2472 self->data = NULL;
2481 self->data = NULL;
2473 memset(&self->buf, 0, sizeof(self->buf));
2482 memset(&self->buf, 0, sizeof(self->buf));
2474 self->headrevs = NULL;
2483 self->headrevs = NULL;
2475 self->filteredrevs = Py_None;
2484 self->filteredrevs = Py_None;
2476 Py_INCREF(Py_None);
2485 Py_INCREF(Py_None);
2477 self->ntinitialized = 0;
2486 self->ntinitialized = 0;
2478 self->offsets = NULL;
2487 self->offsets = NULL;
2479
2488
2480 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2489 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2481 return -1;
2490 return -1;
2482 if (!PyObject_CheckBuffer(data_obj)) {
2491 if (!PyObject_CheckBuffer(data_obj)) {
2483 PyErr_SetString(PyExc_TypeError,
2492 PyErr_SetString(PyExc_TypeError,
2484 "data does not support buffer interface");
2493 "data does not support buffer interface");
2485 return -1;
2494 return -1;
2486 }
2495 }
2487
2496
2488 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2497 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2489 return -1;
2498 return -1;
2490 size = self->buf.len;
2499 size = self->buf.len;
2491
2500
2492 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2501 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2493 self->data = data_obj;
2502 self->data = data_obj;
2494
2503
2495 self->ntlookups = self->ntmisses = 0;
2504 self->ntlookups = self->ntmisses = 0;
2496 self->ntrev = -1;
2505 self->ntrev = -1;
2497 Py_INCREF(self->data);
2506 Py_INCREF(self->data);
2498
2507
2499 if (self->inlined) {
2508 if (self->inlined) {
2500 Py_ssize_t len = inline_scan(self, NULL);
2509 Py_ssize_t len = inline_scan(self, NULL);
2501 if (len == -1)
2510 if (len == -1)
2502 goto bail;
2511 goto bail;
2503 self->raw_length = len;
2512 self->raw_length = len;
2504 self->length = len;
2513 self->length = len;
2505 } else {
2514 } else {
2506 if (size % v1_hdrsize) {
2515 if (size % v1_hdrsize) {
2507 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2516 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2508 goto bail;
2517 goto bail;
2509 }
2518 }
2510 self->raw_length = size / v1_hdrsize;
2519 self->raw_length = size / v1_hdrsize;
2511 self->length = self->raw_length;
2520 self->length = self->raw_length;
2512 }
2521 }
2513
2522
2514 return 0;
2523 return 0;
2515 bail:
2524 bail:
2516 return -1;
2525 return -1;
2517 }
2526 }
2518
2527
2519 static PyObject *index_nodemap(indexObject *self)
2528 static PyObject *index_nodemap(indexObject *self)
2520 {
2529 {
2521 Py_INCREF(self);
2530 Py_INCREF(self);
2522 return (PyObject *)self;
2531 return (PyObject *)self;
2523 }
2532 }
2524
2533
2525 static void _index_clearcaches(indexObject *self)
2534 static void _index_clearcaches(indexObject *self)
2526 {
2535 {
2527 if (self->cache) {
2536 if (self->cache) {
2528 Py_ssize_t i;
2537 Py_ssize_t i;
2529
2538
2530 for (i = 0; i < self->raw_length; i++)
2539 for (i = 0; i < self->raw_length; i++)
2531 Py_CLEAR(self->cache[i]);
2540 Py_CLEAR(self->cache[i]);
2532 free(self->cache);
2541 free(self->cache);
2533 self->cache = NULL;
2542 self->cache = NULL;
2534 }
2543 }
2535 if (self->offsets) {
2544 if (self->offsets) {
2536 PyMem_Free((void *)self->offsets);
2545 PyMem_Free((void *)self->offsets);
2537 self->offsets = NULL;
2546 self->offsets = NULL;
2538 }
2547 }
2539 if (self->ntinitialized) {
2548 if (self->ntinitialized) {
2540 nt_dealloc(&self->nt);
2549 nt_dealloc(&self->nt);
2541 }
2550 }
2542 self->ntinitialized = 0;
2551 self->ntinitialized = 0;
2543 Py_CLEAR(self->headrevs);
2552 Py_CLEAR(self->headrevs);
2544 }
2553 }
2545
2554
2546 static PyObject *index_clearcaches(indexObject *self)
2555 static PyObject *index_clearcaches(indexObject *self)
2547 {
2556 {
2548 _index_clearcaches(self);
2557 _index_clearcaches(self);
2549 self->ntrev = -1;
2558 self->ntrev = -1;
2550 self->ntlookups = self->ntmisses = 0;
2559 self->ntlookups = self->ntmisses = 0;
2551 Py_RETURN_NONE;
2560 Py_RETURN_NONE;
2552 }
2561 }
2553
2562
2554 static void index_dealloc(indexObject *self)
2563 static void index_dealloc(indexObject *self)
2555 {
2564 {
2556 _index_clearcaches(self);
2565 _index_clearcaches(self);
2557 Py_XDECREF(self->filteredrevs);
2566 Py_XDECREF(self->filteredrevs);
2558 if (self->buf.buf) {
2567 if (self->buf.buf) {
2559 PyBuffer_Release(&self->buf);
2568 PyBuffer_Release(&self->buf);
2560 memset(&self->buf, 0, sizeof(self->buf));
2569 memset(&self->buf, 0, sizeof(self->buf));
2561 }
2570 }
2562 Py_XDECREF(self->data);
2571 Py_XDECREF(self->data);
2563 Py_XDECREF(self->added);
2572 Py_XDECREF(self->added);
2564 PyObject_Del(self);
2573 PyObject_Del(self);
2565 }
2574 }
2566
2575
2567 static PySequenceMethods index_sequence_methods = {
2576 static PySequenceMethods index_sequence_methods = {
2568 (lenfunc)index_length, /* sq_length */
2577 (lenfunc)index_length, /* sq_length */
2569 0, /* sq_concat */
2578 0, /* sq_concat */
2570 0, /* sq_repeat */
2579 0, /* sq_repeat */
2571 (ssizeargfunc)index_get, /* sq_item */
2580 (ssizeargfunc)index_get, /* sq_item */
2572 0, /* sq_slice */
2581 0, /* sq_slice */
2573 0, /* sq_ass_item */
2582 0, /* sq_ass_item */
2574 0, /* sq_ass_slice */
2583 0, /* sq_ass_slice */
2575 (objobjproc)index_contains, /* sq_contains */
2584 (objobjproc)index_contains, /* sq_contains */
2576 };
2585 };
2577
2586
2578 static PyMappingMethods index_mapping_methods = {
2587 static PyMappingMethods index_mapping_methods = {
2579 (lenfunc)index_length, /* mp_length */
2588 (lenfunc)index_length, /* mp_length */
2580 (binaryfunc)index_getitem, /* mp_subscript */
2589 (binaryfunc)index_getitem, /* mp_subscript */
2581 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2590 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2582 };
2591 };
2583
2592
2584 static PyMethodDef index_methods[] = {
2593 static PyMethodDef index_methods[] = {
2585 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2594 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2586 "return the gca set of the given revs"},
2595 "return the gca set of the given revs"},
2587 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2596 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2588 METH_VARARGS,
2597 METH_VARARGS,
2589 "return the heads of the common ancestors of the given revs"},
2598 "return the heads of the common ancestors of the given revs"},
2590 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2599 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2591 "clear the index caches"},
2600 "clear the index caches"},
2592 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2601 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2593 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2602 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2594 "compute phases"},
2603 "compute phases"},
2595 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2604 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2596 "reachableroots"},
2605 "reachableroots"},
2597 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2606 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2598 "get head revisions"}, /* Can do filtering since 3.2 */
2607 "get head revisions"}, /* Can do filtering since 3.2 */
2599 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2608 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2600 "get filtered head revisions"}, /* Can always do filtering */
2609 "get filtered head revisions"}, /* Can always do filtering */
2601 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2610 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2602 "determine revisions with deltas to reconstruct fulltext"},
2611 "determine revisions with deltas to reconstruct fulltext"},
2603 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2612 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2604 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2613 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2605 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2614 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2606 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2615 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2607 "match a potentially ambiguous node ID"},
2616 "match a potentially ambiguous node ID"},
2608 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2617 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2609 "find length of shortest hex nodeid of a binary ID"},
2618 "find length of shortest hex nodeid of a binary ID"},
2610 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2619 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2611 {NULL} /* Sentinel */
2620 {NULL} /* Sentinel */
2612 };
2621 };
2613
2622
2614 static PyGetSetDef index_getset[] = {
2623 static PyGetSetDef index_getset[] = {
2615 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2624 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2616 {NULL} /* Sentinel */
2625 {NULL} /* Sentinel */
2617 };
2626 };
2618
2627
2619 PyTypeObject HgRevlogIndex_Type = {
2628 PyTypeObject HgRevlogIndex_Type = {
2620 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2629 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2621 "parsers.index", /* tp_name */
2630 "parsers.index", /* tp_name */
2622 sizeof(indexObject), /* tp_basicsize */
2631 sizeof(indexObject), /* tp_basicsize */
2623 0, /* tp_itemsize */
2632 0, /* tp_itemsize */
2624 (destructor)index_dealloc, /* tp_dealloc */
2633 (destructor)index_dealloc, /* tp_dealloc */
2625 0, /* tp_print */
2634 0, /* tp_print */
2626 0, /* tp_getattr */
2635 0, /* tp_getattr */
2627 0, /* tp_setattr */
2636 0, /* tp_setattr */
2628 0, /* tp_compare */
2637 0, /* tp_compare */
2629 0, /* tp_repr */
2638 0, /* tp_repr */
2630 0, /* tp_as_number */
2639 0, /* tp_as_number */
2631 &index_sequence_methods, /* tp_as_sequence */
2640 &index_sequence_methods, /* tp_as_sequence */
2632 &index_mapping_methods, /* tp_as_mapping */
2641 &index_mapping_methods, /* tp_as_mapping */
2633 0, /* tp_hash */
2642 0, /* tp_hash */
2634 0, /* tp_call */
2643 0, /* tp_call */
2635 0, /* tp_str */
2644 0, /* tp_str */
2636 0, /* tp_getattro */
2645 0, /* tp_getattro */
2637 0, /* tp_setattro */
2646 0, /* tp_setattro */
2638 0, /* tp_as_buffer */
2647 0, /* tp_as_buffer */
2639 Py_TPFLAGS_DEFAULT, /* tp_flags */
2648 Py_TPFLAGS_DEFAULT, /* tp_flags */
2640 "revlog index", /* tp_doc */
2649 "revlog index", /* tp_doc */
2641 0, /* tp_traverse */
2650 0, /* tp_traverse */
2642 0, /* tp_clear */
2651 0, /* tp_clear */
2643 0, /* tp_richcompare */
2652 0, /* tp_richcompare */
2644 0, /* tp_weaklistoffset */
2653 0, /* tp_weaklistoffset */
2645 0, /* tp_iter */
2654 0, /* tp_iter */
2646 0, /* tp_iternext */
2655 0, /* tp_iternext */
2647 index_methods, /* tp_methods */
2656 index_methods, /* tp_methods */
2648 0, /* tp_members */
2657 0, /* tp_members */
2649 index_getset, /* tp_getset */
2658 index_getset, /* tp_getset */
2650 0, /* tp_base */
2659 0, /* tp_base */
2651 0, /* tp_dict */
2660 0, /* tp_dict */
2652 0, /* tp_descr_get */
2661 0, /* tp_descr_get */
2653 0, /* tp_descr_set */
2662 0, /* tp_descr_set */
2654 0, /* tp_dictoffset */
2663 0, /* tp_dictoffset */
2655 (initproc)index_init, /* tp_init */
2664 (initproc)index_init, /* tp_init */
2656 0, /* tp_alloc */
2665 0, /* tp_alloc */
2657 };
2666 };
2658
2667
2659 /*
2668 /*
2660 * returns a tuple of the form (index, index, cache) with elements as
2669 * returns a tuple of the form (index, index, cache) with elements as
2661 * follows:
2670 * follows:
2662 *
2671 *
2663 * index: an index object that lazily parses RevlogNG records
2672 * index: an index object that lazily parses RevlogNG records
2664 * cache: if data is inlined, a tuple (0, index_file_content), else None
2673 * cache: if data is inlined, a tuple (0, index_file_content), else None
2665 * index_file_content could be a string, or a buffer
2674 * index_file_content could be a string, or a buffer
2666 *
2675 *
2667 * added complications are for backwards compatibility
2676 * added complications are for backwards compatibility
2668 */
2677 */
2669 PyObject *parse_index2(PyObject *self, PyObject *args)
2678 PyObject *parse_index2(PyObject *self, PyObject *args)
2670 {
2679 {
2671 PyObject *tuple = NULL, *cache = NULL;
2680 PyObject *tuple = NULL, *cache = NULL;
2672 indexObject *idx;
2681 indexObject *idx;
2673 int ret;
2682 int ret;
2674
2683
2675 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2684 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2676 if (idx == NULL)
2685 if (idx == NULL)
2677 goto bail;
2686 goto bail;
2678
2687
2679 ret = index_init(idx, args);
2688 ret = index_init(idx, args);
2680 if (ret == -1)
2689 if (ret == -1)
2681 goto bail;
2690 goto bail;
2682
2691
2683 if (idx->inlined) {
2692 if (idx->inlined) {
2684 cache = Py_BuildValue("iO", 0, idx->data);
2693 cache = Py_BuildValue("iO", 0, idx->data);
2685 if (cache == NULL)
2694 if (cache == NULL)
2686 goto bail;
2695 goto bail;
2687 } else {
2696 } else {
2688 cache = Py_None;
2697 cache = Py_None;
2689 Py_INCREF(cache);
2698 Py_INCREF(cache);
2690 }
2699 }
2691
2700
2692 tuple = Py_BuildValue("NN", idx, cache);
2701 tuple = Py_BuildValue("NN", idx, cache);
2693 if (!tuple)
2702 if (!tuple)
2694 goto bail;
2703 goto bail;
2695 return tuple;
2704 return tuple;
2696
2705
2697 bail:
2706 bail:
2698 Py_XDECREF(idx);
2707 Py_XDECREF(idx);
2699 Py_XDECREF(cache);
2708 Py_XDECREF(cache);
2700 Py_XDECREF(tuple);
2709 Py_XDECREF(tuple);
2701 return NULL;
2710 return NULL;
2702 }
2711 }
2703
2712
2704 #ifdef WITH_RUST
2713 #ifdef WITH_RUST
2705
2714
2706 /* rustlazyancestors: iteration over ancestors implemented in Rust
2715 /* rustlazyancestors: iteration over ancestors implemented in Rust
2707 *
2716 *
2708 * This class holds a reference to an index and to the Rust iterator.
2717 * This class holds a reference to an index and to the Rust iterator.
2709 */
2718 */
2710 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2719 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2711
2720
2712 struct rustlazyancestorsObjectStruct {
2721 struct rustlazyancestorsObjectStruct {
2713 PyObject_HEAD
2722 PyObject_HEAD
2714 /* Type-specific fields go here. */
2723 /* Type-specific fields go here. */
2715 indexObject *index; /* Ref kept to avoid GC'ing the index */
2724 indexObject *index; /* Ref kept to avoid GC'ing the index */
2716 void *iter; /* Rust iterator */
2725 void *iter; /* Rust iterator */
2717 };
2726 };
2718
2727
2719 /* FFI exposed from Rust code */
2728 /* FFI exposed from Rust code */
2720 rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
2729 rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
2721 /* intrevs vector */
2730 /* intrevs vector */
2722 Py_ssize_t initrevslen,
2731 Py_ssize_t initrevslen,
2723 long *initrevs, long stoprev,
2732 long *initrevs, long stoprev,
2724 int inclusive);
2733 int inclusive);
2725 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2734 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2726 int rustlazyancestors_next(rustlazyancestorsObject *self);
2735 int rustlazyancestors_next(rustlazyancestorsObject *self);
2727 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2736 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2728
2737
2729 /* CPython instance methods */
2738 /* CPython instance methods */
2730 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2739 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2731 {
2740 {
2732 PyObject *initrevsarg = NULL;
2741 PyObject *initrevsarg = NULL;
2733 PyObject *inclusivearg = NULL;
2742 PyObject *inclusivearg = NULL;
2734 long stoprev = 0;
2743 long stoprev = 0;
2735 long *initrevs = NULL;
2744 long *initrevs = NULL;
2736 int inclusive = 0;
2745 int inclusive = 0;
2737 Py_ssize_t i;
2746 Py_ssize_t i;
2738
2747
2739 indexObject *index;
2748 indexObject *index;
2740 if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
2749 if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
2741 &PyList_Type, &initrevsarg, &stoprev,
2750 &PyList_Type, &initrevsarg, &stoprev,
2742 &PyBool_Type, &inclusivearg))
2751 &PyBool_Type, &inclusivearg))
2743 return -1;
2752 return -1;
2744
2753
2745 Py_INCREF(index);
2754 Py_INCREF(index);
2746 self->index = index;
2755 self->index = index;
2747
2756
2748 if (inclusivearg == Py_True)
2757 if (inclusivearg == Py_True)
2749 inclusive = 1;
2758 inclusive = 1;
2750
2759
2751 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2760 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2752
2761
2753 initrevs = (long *)calloc(linit, sizeof(long));
2762 initrevs = (long *)calloc(linit, sizeof(long));
2754
2763
2755 if (initrevs == NULL) {
2764 if (initrevs == NULL) {
2756 PyErr_NoMemory();
2765 PyErr_NoMemory();
2757 goto bail;
2766 goto bail;
2758 }
2767 }
2759
2768
2760 for (i = 0; i < linit; i++) {
2769 for (i = 0; i < linit; i++) {
2761 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2770 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2762 }
2771 }
2763 if (PyErr_Occurred())
2772 if (PyErr_Occurred())
2764 goto bail;
2773 goto bail;
2765
2774
2766 self->iter =
2775 self->iter =
2767 rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
2776 rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
2768 if (self->iter == NULL) {
2777 if (self->iter == NULL) {
2769 /* if this is because of GraphError::ParentOutOfRange
2778 /* if this is because of GraphError::ParentOutOfRange
2770 * HgRevlogIndex_GetParents() has already set the proper
2779 * HgRevlogIndex_GetParents() has already set the proper
2771 * exception */
2780 * exception */
2772 goto bail;
2781 goto bail;
2773 }
2782 }
2774
2783
2775 free(initrevs);
2784 free(initrevs);
2776 return 0;
2785 return 0;
2777
2786
2778 bail:
2787 bail:
2779 free(initrevs);
2788 free(initrevs);
2780 return -1;
2789 return -1;
2781 };
2790 };
2782
2791
2783 static void rustla_dealloc(rustlazyancestorsObject *self)
2792 static void rustla_dealloc(rustlazyancestorsObject *self)
2784 {
2793 {
2785 Py_XDECREF(self->index);
2794 Py_XDECREF(self->index);
2786 if (self->iter != NULL) { /* can happen if rustla_init failed */
2795 if (self->iter != NULL) { /* can happen if rustla_init failed */
2787 rustlazyancestors_drop(self->iter);
2796 rustlazyancestors_drop(self->iter);
2788 }
2797 }
2789 PyObject_Del(self);
2798 PyObject_Del(self);
2790 }
2799 }
2791
2800
2792 static PyObject *rustla_next(rustlazyancestorsObject *self)
2801 static PyObject *rustla_next(rustlazyancestorsObject *self)
2793 {
2802 {
2794 int res = rustlazyancestors_next(self->iter);
2803 int res = rustlazyancestors_next(self->iter);
2795 if (res == -1) {
2804 if (res == -1) {
2796 /* Setting an explicit exception seems unnecessary
2805 /* Setting an explicit exception seems unnecessary
2797 * as examples from Python source code (Objects/rangeobjets.c
2806 * as examples from Python source code (Objects/rangeobjets.c
2798 * and Modules/_io/stringio.c) seem to demonstrate.
2807 * and Modules/_io/stringio.c) seem to demonstrate.
2799 */
2808 */
2800 return NULL;
2809 return NULL;
2801 }
2810 }
2802 return PyInt_FromLong(res);
2811 return PyInt_FromLong(res);
2803 }
2812 }
2804
2813
2805 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2814 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2806 {
2815 {
2807 long lrev;
2816 long lrev;
2808 if (!pylong_to_long(rev, &lrev)) {
2817 if (!pylong_to_long(rev, &lrev)) {
2809 PyErr_Clear();
2818 PyErr_Clear();
2810 return 0;
2819 return 0;
2811 }
2820 }
2812 return rustlazyancestors_contains(self->iter, lrev);
2821 return rustlazyancestors_contains(self->iter, lrev);
2813 }
2822 }
2814
2823
2815 static PySequenceMethods rustla_sequence_methods = {
2824 static PySequenceMethods rustla_sequence_methods = {
2816 0, /* sq_length */
2825 0, /* sq_length */
2817 0, /* sq_concat */
2826 0, /* sq_concat */
2818 0, /* sq_repeat */
2827 0, /* sq_repeat */
2819 0, /* sq_item */
2828 0, /* sq_item */
2820 0, /* sq_slice */
2829 0, /* sq_slice */
2821 0, /* sq_ass_item */
2830 0, /* sq_ass_item */
2822 0, /* sq_ass_slice */
2831 0, /* sq_ass_slice */
2823 (objobjproc)rustla_contains, /* sq_contains */
2832 (objobjproc)rustla_contains, /* sq_contains */
2824 };
2833 };
2825
2834
2826 static PyTypeObject rustlazyancestorsType = {
2835 static PyTypeObject rustlazyancestorsType = {
2827 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2836 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2828 "parsers.rustlazyancestors", /* tp_name */
2837 "parsers.rustlazyancestors", /* tp_name */
2829 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2838 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2830 0, /* tp_itemsize */
2839 0, /* tp_itemsize */
2831 (destructor)rustla_dealloc, /* tp_dealloc */
2840 (destructor)rustla_dealloc, /* tp_dealloc */
2832 0, /* tp_print */
2841 0, /* tp_print */
2833 0, /* tp_getattr */
2842 0, /* tp_getattr */
2834 0, /* tp_setattr */
2843 0, /* tp_setattr */
2835 0, /* tp_compare */
2844 0, /* tp_compare */
2836 0, /* tp_repr */
2845 0, /* tp_repr */
2837 0, /* tp_as_number */
2846 0, /* tp_as_number */
2838 &rustla_sequence_methods, /* tp_as_sequence */
2847 &rustla_sequence_methods, /* tp_as_sequence */
2839 0, /* tp_as_mapping */
2848 0, /* tp_as_mapping */
2840 0, /* tp_hash */
2849 0, /* tp_hash */
2841 0, /* tp_call */
2850 0, /* tp_call */
2842 0, /* tp_str */
2851 0, /* tp_str */
2843 0, /* tp_getattro */
2852 0, /* tp_getattro */
2844 0, /* tp_setattro */
2853 0, /* tp_setattro */
2845 0, /* tp_as_buffer */
2854 0, /* tp_as_buffer */
2846 Py_TPFLAGS_DEFAULT, /* tp_flags */
2855 Py_TPFLAGS_DEFAULT, /* tp_flags */
2847 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2856 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2848 0, /* tp_traverse */
2857 0, /* tp_traverse */
2849 0, /* tp_clear */
2858 0, /* tp_clear */
2850 0, /* tp_richcompare */
2859 0, /* tp_richcompare */
2851 0, /* tp_weaklistoffset */
2860 0, /* tp_weaklistoffset */
2852 0, /* tp_iter */
2861 0, /* tp_iter */
2853 (iternextfunc)rustla_next, /* tp_iternext */
2862 (iternextfunc)rustla_next, /* tp_iternext */
2854 0, /* tp_methods */
2863 0, /* tp_methods */
2855 0, /* tp_members */
2864 0, /* tp_members */
2856 0, /* tp_getset */
2865 0, /* tp_getset */
2857 0, /* tp_base */
2866 0, /* tp_base */
2858 0, /* tp_dict */
2867 0, /* tp_dict */
2859 0, /* tp_descr_get */
2868 0, /* tp_descr_get */
2860 0, /* tp_descr_set */
2869 0, /* tp_descr_set */
2861 0, /* tp_dictoffset */
2870 0, /* tp_dictoffset */
2862 (initproc)rustla_init, /* tp_init */
2871 (initproc)rustla_init, /* tp_init */
2863 0, /* tp_alloc */
2872 0, /* tp_alloc */
2864 };
2873 };
2865 #endif /* WITH_RUST */
2874 #endif /* WITH_RUST */
2866
2875
2867 void revlog_module_init(PyObject *mod)
2876 void revlog_module_init(PyObject *mod)
2868 {
2877 {
2869 PyObject *caps = NULL;
2878 PyObject *caps = NULL;
2870 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
2879 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
2871 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
2880 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
2872 return;
2881 return;
2873 Py_INCREF(&HgRevlogIndex_Type);
2882 Py_INCREF(&HgRevlogIndex_Type);
2874 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
2883 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
2875
2884
2876 nodetreeType.tp_new = PyType_GenericNew;
2885 nodetreeType.tp_new = PyType_GenericNew;
2877 if (PyType_Ready(&nodetreeType) < 0)
2886 if (PyType_Ready(&nodetreeType) < 0)
2878 return;
2887 return;
2879 Py_INCREF(&nodetreeType);
2888 Py_INCREF(&nodetreeType);
2880 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2889 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2881
2890
2882 if (!nullentry) {
2891 if (!nullentry) {
2883 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2892 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2884 0, -1, -1, -1, -1, nullid, 20);
2893 0, -1, -1, -1, -1, nullid, 20);
2885 }
2894 }
2886 if (nullentry)
2895 if (nullentry)
2887 PyObject_GC_UnTrack(nullentry);
2896 PyObject_GC_UnTrack(nullentry);
2888
2897
2889 caps = PyCapsule_New(HgRevlogIndex_GetParents,
2898 caps = PyCapsule_New(HgRevlogIndex_GetParents,
2890 "mercurial.cext.parsers.index_get_parents_CAPI",
2899 "mercurial.cext.parsers.index_get_parents_CAPI",
2891 NULL);
2900 NULL);
2892 if (caps != NULL)
2901 if (caps != NULL)
2893 PyModule_AddObject(mod, "index_get_parents_CAPI", caps);
2902 PyModule_AddObject(mod, "index_get_parents_CAPI", caps);
2894
2903
2895 #ifdef WITH_RUST
2904 #ifdef WITH_RUST
2896 rustlazyancestorsType.tp_new = PyType_GenericNew;
2905 rustlazyancestorsType.tp_new = PyType_GenericNew;
2897 if (PyType_Ready(&rustlazyancestorsType) < 0)
2906 if (PyType_Ready(&rustlazyancestorsType) < 0)
2898 return;
2907 return;
2899 Py_INCREF(&rustlazyancestorsType);
2908 Py_INCREF(&rustlazyancestorsType);
2900 PyModule_AddObject(mod, "rustlazyancestors",
2909 PyModule_AddObject(mod, "rustlazyancestors",
2901 (PyObject *)&rustlazyancestorsType);
2910 (PyObject *)&rustlazyancestorsType);
2902 #endif
2911 #endif
2903 }
2912 }
@@ -1,77 +1,98 b''
1 #require vcr
1 #require vcr
2 $ cat >> $HGRCPATH <<EOF
2 $ cat >> $HGRCPATH <<EOF
3 > [extensions]
3 > [extensions]
4 > phabricator =
4 > phabricator =
5 > EOF
5 > EOF
6 $ hg init repo
6 $ hg init repo
7 $ cd repo
7 $ cd repo
8 $ cat >> .hg/hgrc <<EOF
8 $ cat >> .hg/hgrc <<EOF
9 > [phabricator]
9 > [phabricator]
10 > url = https://phab.mercurial-scm.org/
10 > url = https://phab.mercurial-scm.org/
11 > callsign = HG
11 > callsign = HG
12 >
12 >
13 > [auth]
13 > [auth]
14 > hgphab.schemes = https
14 > hgphab.schemes = https
15 > hgphab.prefix = phab.mercurial-scm.org
15 > hgphab.prefix = phab.mercurial-scm.org
16 > # When working on the extension and making phabricator interaction
16 > # When working on the extension and making phabricator interaction
17 > # changes, edit this to be a real phabricator token. When done, edit
17 > # changes, edit this to be a real phabricator token. When done, edit
18 > # it back, and make sure to also edit your VCR transcripts to match
18 > # it back, and make sure to also edit your VCR transcripts to match
19 > # whatever value you put here.
19 > # whatever value you put here.
20 > hgphab.phabtoken = cli-hahayouwish
20 > hgphab.phabtoken = cli-hahayouwish
21 > EOF
21 > EOF
22 $ VCR="$TESTDIR/phabricator"
22 $ VCR="$TESTDIR/phabricator"
23
23
24 Error is handled reasonably. We override the phabtoken here so that
24 Error is handled reasonably. We override the phabtoken here so that
25 when you're developing changes to phabricator.py you can edit the
25 when you're developing changes to phabricator.py you can edit the
26 above config and have a real token in the test but not have to edit
26 above config and have a real token in the test but not have to edit
27 this test.
27 this test.
28 $ hg phabread --config auth.hgphab.phabtoken=cli-notavalidtoken \
28 $ hg phabread --config auth.hgphab.phabtoken=cli-notavalidtoken \
29 > --test-vcr "$VCR/phabread-conduit-error.json" D4480 | head
29 > --test-vcr "$VCR/phabread-conduit-error.json" D4480 | head
30 abort: Conduit Error (ERR-INVALID-AUTH): API token "cli-notavalidtoken" has the wrong length. API tokens should be 32 characters long.
30 abort: Conduit Error (ERR-INVALID-AUTH): API token "cli-notavalidtoken" has the wrong length. API tokens should be 32 characters long.
31
31
32 Basic phabread:
32 Basic phabread:
33 $ hg phabread --test-vcr "$VCR/phabread-4480.json" D4480 | head
33 $ hg phabread --test-vcr "$VCR/phabread-4480.json" D4480 | head
34 # HG changeset patch
34 # HG changeset patch
35 exchangev2: start to implement pull with wire protocol v2
35 exchangev2: start to implement pull with wire protocol v2
36
36
37 Wire protocol version 2 will take a substantially different
37 Wire protocol version 2 will take a substantially different
38 approach to exchange than version 1 (at least as far as pulling
38 approach to exchange than version 1 (at least as far as pulling
39 is concerned).
39 is concerned).
40
40
41 This commit establishes a new exchangev2 module for holding
41 This commit establishes a new exchangev2 module for holding
42 code related to exchange using wire protocol v2. I could have
42 code related to exchange using wire protocol v2. I could have
43 added things to the existing exchange module. But it is already
43 added things to the existing exchange module. But it is already
44
44
45 phabupdate with an accept:
45 phabupdate with an accept:
46 $ hg phabupdate --accept D4564 \
46 $ hg phabupdate --accept D4564 \
47 > -m 'I think I like where this is headed. Will read rest of series later.'\
47 > -m 'I think I like where this is headed. Will read rest of series later.'\
48 > --test-vcr "$VCR/accept-4564.json"
48 > --test-vcr "$VCR/accept-4564.json"
49
49
50 Create a differential diff:
50 Create a differential diff:
51 $ echo alpha > alpha
51 $ echo alpha > alpha
52 $ hg ci --addremove -m 'create alpha for phabricator test'
52 $ hg ci --addremove -m 'create alpha for phabricator test'
53 adding alpha
53 adding alpha
54 $ hg phabsend -r . --test-vcr "$VCR/phabsend-create-alpha.json"
54 $ hg phabsend -r . --test-vcr "$VCR/phabsend-create-alpha.json"
55 D4596 - created - 5206a4fa1e6c: create alpha for phabricator test
55 D4596 - created - 5206a4fa1e6c: create alpha for phabricator test
56 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/5206a4fa1e6c-dec9e777-phabsend.hg
56 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/5206a4fa1e6c-dec9e777-phabsend.hg
57 $ echo more >> alpha
57 $ echo more >> alpha
58 $ HGEDITOR=true hg ci --amend
58 $ HGEDITOR=true hg ci --amend
59 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d8f232f7d799-c573510a-amend.hg
59 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d8f232f7d799-c573510a-amend.hg
60 $ echo beta > beta
60 $ echo beta > beta
61 $ hg ci --addremove -m 'create beta for phabricator test'
61 $ hg ci --addremove -m 'create beta for phabricator test'
62 adding beta
62 adding beta
63 $ hg phabsend -r ".^::" --test-vcr "$VCR/phabsend-update-alpha-create-beta.json"
63 $ hg phabsend -r ".^::" --test-vcr "$VCR/phabsend-update-alpha-create-beta.json"
64 D4596 - updated - f70265671c65: create alpha for phabricator test
64 D4596 - updated - f70265671c65: create alpha for phabricator test
65 D4597 - created - 1a5640df7bbf: create beta for phabricator test
65 D4597 - created - 1a5640df7bbf: create beta for phabricator test
66 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/1a5640df7bbf-6daf3e6e-phabsend.hg
66 saved backup bundle to $TESTTMP/repo/.hg/strip-backup/1a5640df7bbf-6daf3e6e-phabsend.hg
67
67
68 $ hg debugcallconduit user.search --test-vcr "$VCR/phab-conduit.json" <<EOF
69 > {
70 > "constraints": {
71 > "isBot": true
72 > }
73 > }
74 > EOF
75 {
76 "cursor": {
77 "after": null,
78 "before": null,
79 "limit": 100,
80 "order": null
81 },
82 "data": [],
83 "maps": {},
84 "query": {
85 "queryKey": null
86 }
87 }
88
68 Template keywords
89 Template keywords
69 $ hg log -T'{rev} {phabreview|json}\n'
90 $ hg log -T'{rev} {phabreview|json}\n'
70 1 {"id": "D4597", "url": "https://phab.mercurial-scm.org/D4597"}
91 1 {"id": "D4597", "url": "https://phab.mercurial-scm.org/D4597"}
71 0 {"id": "D4596", "url": "https://phab.mercurial-scm.org/D4596"}
92 0 {"id": "D4596", "url": "https://phab.mercurial-scm.org/D4596"}
72
93
73 $ hg log -T'{rev} {phabreview.url} {phabreview.id}\n'
94 $ hg log -T'{rev} {phabreview.url} {phabreview.id}\n'
74 1 https://phab.mercurial-scm.org/D4597 D4597
95 1 https://phab.mercurial-scm.org/D4597 D4597
75 0 https://phab.mercurial-scm.org/D4596 D4596
96 0 https://phab.mercurial-scm.org/D4596 D4596
76
97
77 $ cd ..
98 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now