##// END OF EJS Templates
byteify-strings: fix misalignment with multi-line parenthesis...
Raphaël Gomès -
r42914:26a31c88 default
parent child Browse files
Show More
@@ -1,307 +1,311
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 #
2 #
3 # byteify-strings.py - transform string literals to be Python 3 safe
3 # byteify-strings.py - transform string literals to be Python 3 safe
4 #
4 #
5 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
5 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import, print_function
10 from __future__ import absolute_import, print_function
11
11
12 import argparse
12 import argparse
13 import contextlib
13 import contextlib
14 import errno
14 import errno
15 import os
15 import os
16 import sys
16 import sys
17 import tempfile
17 import tempfile
18 import token
18 import token
19 import tokenize
19 import tokenize
20
20
21 def adjusttokenpos(t, ofs):
21 def adjusttokenpos(t, ofs):
22 """Adjust start/end column of the given token"""
22 """Adjust start/end column of the given token"""
23 return t._replace(start=(t.start[0], t.start[1] + ofs),
23 return t._replace(start=(t.start[0], t.start[1] + ofs),
24 end=(t.end[0], t.end[1] + ofs))
24 end=(t.end[0], t.end[1] + ofs))
25
25
26 def replacetokens(tokens, opts):
26 def replacetokens(tokens, opts):
27 """Transform a stream of tokens from raw to Python 3.
27 """Transform a stream of tokens from raw to Python 3.
28
28
29 Returns a generator of possibly rewritten tokens.
29 Returns a generator of possibly rewritten tokens.
30
30
31 The input token list may be mutated as part of processing. However,
31 The input token list may be mutated as part of processing. However,
32 its changes do not necessarily match the output token stream.
32 its changes do not necessarily match the output token stream.
33 """
33 """
34 sysstrtokens = set()
34 sysstrtokens = set()
35
35
36 # The following utility functions access the tokens list and i index of
36 # The following utility functions access the tokens list and i index of
37 # the for i, t enumerate(tokens) loop below
37 # the for i, t enumerate(tokens) loop below
38 def _isop(j, *o):
38 def _isop(j, *o):
39 """Assert that tokens[j] is an OP with one of the given values"""
39 """Assert that tokens[j] is an OP with one of the given values"""
40 try:
40 try:
41 return tokens[j].type == token.OP and tokens[j].string in o
41 return tokens[j].type == token.OP and tokens[j].string in o
42 except IndexError:
42 except IndexError:
43 return False
43 return False
44
44
45 def _findargnofcall(n):
45 def _findargnofcall(n):
46 """Find arg n of a call expression (start at 0)
46 """Find arg n of a call expression (start at 0)
47
47
48 Returns index of the first token of that argument, or None if
48 Returns index of the first token of that argument, or None if
49 there is not that many arguments.
49 there is not that many arguments.
50
50
51 Assumes that token[i + 1] is '('.
51 Assumes that token[i + 1] is '('.
52
52
53 """
53 """
54 nested = 0
54 nested = 0
55 for j in range(i + 2, len(tokens)):
55 for j in range(i + 2, len(tokens)):
56 if _isop(j, ')', ']', '}'):
56 if _isop(j, ')', ']', '}'):
57 # end of call, tuple, subscription or dict / set
57 # end of call, tuple, subscription or dict / set
58 nested -= 1
58 nested -= 1
59 if nested < 0:
59 if nested < 0:
60 return None
60 return None
61 elif n == 0:
61 elif n == 0:
62 # this is the starting position of arg
62 # this is the starting position of arg
63 return j
63 return j
64 elif _isop(j, '(', '[', '{'):
64 elif _isop(j, '(', '[', '{'):
65 nested += 1
65 nested += 1
66 elif _isop(j, ',') and nested == 0:
66 elif _isop(j, ',') and nested == 0:
67 n -= 1
67 n -= 1
68
68
69 return None
69 return None
70
70
71 def _ensuresysstr(j):
71 def _ensuresysstr(j):
72 """Make sure the token at j is a system string
72 """Make sure the token at j is a system string
73
73
74 Remember the given token so the string transformer won't add
74 Remember the given token so the string transformer won't add
75 the byte prefix.
75 the byte prefix.
76
76
77 Ignores tokens that are not strings. Assumes bounds checking has
77 Ignores tokens that are not strings. Assumes bounds checking has
78 already been done.
78 already been done.
79
79
80 """
80 """
81 k = j
81 k = j
82 currtoken = tokens[k]
82 currtoken = tokens[k]
83 while currtoken.type in (token.STRING, token.NEWLINE, tokenize.NL):
83 while currtoken.type in (token.STRING, token.NEWLINE, tokenize.NL):
84 k += 1
84 k += 1
85 if (
85 if (
86 currtoken.type == token.STRING
86 currtoken.type == token.STRING
87 and currtoken.string.startswith(("'", '"'))
87 and currtoken.string.startswith(("'", '"'))
88 ):
88 ):
89 sysstrtokens.add(currtoken)
89 sysstrtokens.add(currtoken)
90 try:
90 try:
91 currtoken = tokens[k]
91 currtoken = tokens[k]
92 except IndexError:
92 except IndexError:
93 break
93 break
94
94
95 def _isitemaccess(j):
95 def _isitemaccess(j):
96 """Assert the next tokens form an item access on `tokens[j]` and that
96 """Assert the next tokens form an item access on `tokens[j]` and that
97 `tokens[j]` is a name.
97 `tokens[j]` is a name.
98 """
98 """
99 try:
99 try:
100 return (
100 return (
101 tokens[j].type == token.NAME
101 tokens[j].type == token.NAME
102 and _isop(j + 1, '[')
102 and _isop(j + 1, '[')
103 and tokens[j + 2].type == token.STRING
103 and tokens[j + 2].type == token.STRING
104 and _isop(j + 3, ']')
104 and _isop(j + 3, ']')
105 )
105 )
106 except IndexError:
106 except IndexError:
107 return False
107 return False
108
108
109 def _ismethodcall(j, *methodnames):
109 def _ismethodcall(j, *methodnames):
110 """Assert the next tokens form a call to `methodname` with a string
110 """Assert the next tokens form a call to `methodname` with a string
111 as first argument on `tokens[j]` and that `tokens[j]` is a name.
111 as first argument on `tokens[j]` and that `tokens[j]` is a name.
112 """
112 """
113 try:
113 try:
114 return (
114 return (
115 tokens[j].type == token.NAME
115 tokens[j].type == token.NAME
116 and _isop(j + 1, '.')
116 and _isop(j + 1, '.')
117 and tokens[j + 2].type == token.NAME
117 and tokens[j + 2].type == token.NAME
118 and tokens[j + 2].string in methodnames
118 and tokens[j + 2].string in methodnames
119 and _isop(j + 3, '(')
119 and _isop(j + 3, '(')
120 and tokens[j + 4].type == token.STRING
120 and tokens[j + 4].type == token.STRING
121 )
121 )
122 except IndexError:
122 except IndexError:
123 return False
123 return False
124
124
125 coldelta = 0 # column increment for new opening parens
125 coldelta = 0 # column increment for new opening parens
126 coloffset = -1 # column offset for the current line (-1: TBD)
126 coloffset = -1 # column offset for the current line (-1: TBD)
127 parens = [(0, 0, 0)] # stack of (line, end-column, column-offset)
127 parens = [(0, 0, 0, -1)] # stack of (line, end-column, column-offset, type)
128 ignorenextline = False # don't transform the next line
128 ignorenextline = False # don't transform the next line
129 insideignoreblock = False # don't transform until turned off
129 insideignoreblock = False # don't transform until turned off
130 for i, t in enumerate(tokens):
130 for i, t in enumerate(tokens):
131 # Compute the column offset for the current line, such that
131 # Compute the column offset for the current line, such that
132 # the current line will be aligned to the last opening paren
132 # the current line will be aligned to the last opening paren
133 # as before.
133 # as before.
134 if coloffset < 0:
134 if coloffset < 0:
135 if t.start[1] == parens[-1][1]:
135 lastparen = parens[-1]
136 coloffset = parens[-1][2]
136 if t.start[1] == lastparen[1]:
137 elif t.start[1] + 1 == parens[-1][1]:
137 coloffset = lastparen[2]
138 elif (
139 t.start[1] + 1 == lastparen[1]
140 and lastparen[3] not in (token.NEWLINE, tokenize.NL)
141 ):
138 # fix misaligned indent of s/util.Abort/error.Abort/
142 # fix misaligned indent of s/util.Abort/error.Abort/
139 coloffset = parens[-1][2] + (parens[-1][1] - t.start[1])
143 coloffset = lastparen[2] + (lastparen[1] - t.start[1])
140 else:
144 else:
141 coloffset = 0
145 coloffset = 0
142
146
143 # Reset per-line attributes at EOL.
147 # Reset per-line attributes at EOL.
144 if t.type in (token.NEWLINE, tokenize.NL):
148 if t.type in (token.NEWLINE, tokenize.NL):
145 yield adjusttokenpos(t, coloffset)
149 yield adjusttokenpos(t, coloffset)
146 coldelta = 0
150 coldelta = 0
147 coloffset = -1
151 coloffset = -1
148 if not insideignoreblock:
152 if not insideignoreblock:
149 ignorenextline = (
153 ignorenextline = (
150 tokens[i - 1].type == token.COMMENT
154 tokens[i - 1].type == token.COMMENT
151 and tokens[i - 1].string == "#no-py3-transform"
155 and tokens[i - 1].string == "#no-py3-transform"
152 )
156 )
153 continue
157 continue
154
158
155 if t.type == token.COMMENT:
159 if t.type == token.COMMENT:
156 if t.string == "#py3-transform: off":
160 if t.string == "#py3-transform: off":
157 insideignoreblock = True
161 insideignoreblock = True
158 if t.string == "#py3-transform: on":
162 if t.string == "#py3-transform: on":
159 insideignoreblock = False
163 insideignoreblock = False
160
164
161 if ignorenextline or insideignoreblock:
165 if ignorenextline or insideignoreblock:
162 yield adjusttokenpos(t, coloffset)
166 yield adjusttokenpos(t, coloffset)
163 continue
167 continue
164
168
165 # Remember the last paren position.
169 # Remember the last paren position.
166 if _isop(i, '(', '[', '{'):
170 if _isop(i, '(', '[', '{'):
167 parens.append(t.end + (coloffset + coldelta,))
171 parens.append(t.end + (coloffset + coldelta, tokens[i + 1].type))
168 elif _isop(i, ')', ']', '}'):
172 elif _isop(i, ')', ']', '}'):
169 parens.pop()
173 parens.pop()
170
174
171 # Convert most string literals to byte literals. String literals
175 # Convert most string literals to byte literals. String literals
172 # in Python 2 are bytes. String literals in Python 3 are unicode.
176 # in Python 2 are bytes. String literals in Python 3 are unicode.
173 # Most strings in Mercurial are bytes and unicode strings are rare.
177 # Most strings in Mercurial are bytes and unicode strings are rare.
174 # Rather than rewrite all string literals to use ``b''`` to indicate
178 # Rather than rewrite all string literals to use ``b''`` to indicate
175 # byte strings, we apply this token transformer to insert the ``b``
179 # byte strings, we apply this token transformer to insert the ``b``
176 # prefix nearly everywhere.
180 # prefix nearly everywhere.
177 if t.type == token.STRING and t not in sysstrtokens:
181 if t.type == token.STRING and t not in sysstrtokens:
178 s = t.string
182 s = t.string
179
183
180 # Preserve docstrings as string literals. This is inconsistent
184 # Preserve docstrings as string literals. This is inconsistent
181 # with regular unprefixed strings. However, the
185 # with regular unprefixed strings. However, the
182 # "from __future__" parsing (which allows a module docstring to
186 # "from __future__" parsing (which allows a module docstring to
183 # exist before it) doesn't properly handle the docstring if it
187 # exist before it) doesn't properly handle the docstring if it
184 # is b''' prefixed, leading to a SyntaxError. We leave all
188 # is b''' prefixed, leading to a SyntaxError. We leave all
185 # docstrings as unprefixed to avoid this. This means Mercurial
189 # docstrings as unprefixed to avoid this. This means Mercurial
186 # components touching docstrings need to handle unicode,
190 # components touching docstrings need to handle unicode,
187 # unfortunately.
191 # unfortunately.
188 if s[0:3] in ("'''", '"""'):
192 if s[0:3] in ("'''", '"""'):
189 # If it's assigned to something, it's not a docstring
193 # If it's assigned to something, it's not a docstring
190 if not _isop(i - 1, '='):
194 if not _isop(i - 1, '='):
191 yield adjusttokenpos(t, coloffset)
195 yield adjusttokenpos(t, coloffset)
192 continue
196 continue
193
197
194 # If the first character isn't a quote, it is likely a string
198 # If the first character isn't a quote, it is likely a string
195 # prefixing character (such as 'b', 'u', or 'r'. Ignore.
199 # prefixing character (such as 'b', 'u', or 'r'. Ignore.
196 if s[0] not in ("'", '"'):
200 if s[0] not in ("'", '"'):
197 yield adjusttokenpos(t, coloffset)
201 yield adjusttokenpos(t, coloffset)
198 continue
202 continue
199
203
200 # String literal. Prefix to make a b'' string.
204 # String literal. Prefix to make a b'' string.
201 yield adjusttokenpos(t._replace(string='b%s' % t.string),
205 yield adjusttokenpos(t._replace(string='b%s' % t.string),
202 coloffset)
206 coloffset)
203 coldelta += 1
207 coldelta += 1
204 continue
208 continue
205
209
206 # This looks like a function call.
210 # This looks like a function call.
207 if t.type == token.NAME and _isop(i + 1, '('):
211 if t.type == token.NAME and _isop(i + 1, '('):
208 fn = t.string
212 fn = t.string
209
213
210 # *attr() builtins don't accept byte strings to 2nd argument.
214 # *attr() builtins don't accept byte strings to 2nd argument.
211 if fn in (
215 if fn in (
212 'getattr', 'setattr', 'hasattr', 'safehasattr', 'wrapfunction',
216 'getattr', 'setattr', 'hasattr', 'safehasattr', 'wrapfunction',
213 'wrapclass', 'addattr'
217 'wrapclass', 'addattr'
214 ) and (opts['allow-attr-methods'] or not _isop(i - 1, '.')):
218 ) and (opts['allow-attr-methods'] or not _isop(i - 1, '.')):
215 arg1idx = _findargnofcall(1)
219 arg1idx = _findargnofcall(1)
216 if arg1idx is not None:
220 if arg1idx is not None:
217 _ensuresysstr(arg1idx)
221 _ensuresysstr(arg1idx)
218
222
219 # .encode() and .decode() on str/bytes/unicode don't accept
223 # .encode() and .decode() on str/bytes/unicode don't accept
220 # byte strings on Python 3.
224 # byte strings on Python 3.
221 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
225 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
222 for argn in range(2):
226 for argn in range(2):
223 argidx = _findargnofcall(argn)
227 argidx = _findargnofcall(argn)
224 if argidx is not None:
228 if argidx is not None:
225 _ensuresysstr(argidx)
229 _ensuresysstr(argidx)
226
230
227 # It changes iteritems/values to items/values as they are not
231 # It changes iteritems/values to items/values as they are not
228 # present in Python 3 world.
232 # present in Python 3 world.
229 elif opts['dictiter'] and fn in ('iteritems', 'itervalues'):
233 elif opts['dictiter'] and fn in ('iteritems', 'itervalues'):
230 yield adjusttokenpos(t._replace(string=fn[4:]), coloffset)
234 yield adjusttokenpos(t._replace(string=fn[4:]), coloffset)
231 continue
235 continue
232
236
233 if t.type == token.NAME and t.string in opts['treat-as-kwargs']:
237 if t.type == token.NAME and t.string in opts['treat-as-kwargs']:
234 if _isitemaccess(i):
238 if _isitemaccess(i):
235 _ensuresysstr(i + 2)
239 _ensuresysstr(i + 2)
236 if _ismethodcall(i, 'get', 'pop', 'setdefault', 'popitem'):
240 if _ismethodcall(i, 'get', 'pop', 'setdefault', 'popitem'):
237 _ensuresysstr(i + 4)
241 _ensuresysstr(i + 4)
238
242
239 # Looks like "if __name__ == '__main__'".
243 # Looks like "if __name__ == '__main__'".
240 if (t.type == token.NAME and t.string == '__name__'
244 if (t.type == token.NAME and t.string == '__name__'
241 and _isop(i + 1, '==')):
245 and _isop(i + 1, '==')):
242 _ensuresysstr(i + 2)
246 _ensuresysstr(i + 2)
243
247
244 # Emit unmodified token.
248 # Emit unmodified token.
245 yield adjusttokenpos(t, coloffset)
249 yield adjusttokenpos(t, coloffset)
246
250
247 def process(fin, fout, opts):
251 def process(fin, fout, opts):
248 tokens = tokenize.tokenize(fin.readline)
252 tokens = tokenize.tokenize(fin.readline)
249 tokens = replacetokens(list(tokens), opts)
253 tokens = replacetokens(list(tokens), opts)
250 fout.write(tokenize.untokenize(tokens))
254 fout.write(tokenize.untokenize(tokens))
251
255
252 def tryunlink(fname):
256 def tryunlink(fname):
253 try:
257 try:
254 os.unlink(fname)
258 os.unlink(fname)
255 except OSError as err:
259 except OSError as err:
256 if err.errno != errno.ENOENT:
260 if err.errno != errno.ENOENT:
257 raise
261 raise
258
262
259 @contextlib.contextmanager
263 @contextlib.contextmanager
260 def editinplace(fname):
264 def editinplace(fname):
261 n = os.path.basename(fname)
265 n = os.path.basename(fname)
262 d = os.path.dirname(fname)
266 d = os.path.dirname(fname)
263 fp = tempfile.NamedTemporaryFile(prefix='.%s-' % n, suffix='~', dir=d,
267 fp = tempfile.NamedTemporaryFile(prefix='.%s-' % n, suffix='~', dir=d,
264 delete=False)
268 delete=False)
265 try:
269 try:
266 yield fp
270 yield fp
267 fp.close()
271 fp.close()
268 if os.name == 'nt':
272 if os.name == 'nt':
269 tryunlink(fname)
273 tryunlink(fname)
270 os.rename(fp.name, fname)
274 os.rename(fp.name, fname)
271 finally:
275 finally:
272 fp.close()
276 fp.close()
273 tryunlink(fp.name)
277 tryunlink(fp.name)
274
278
275 def main():
279 def main():
276 ap = argparse.ArgumentParser()
280 ap = argparse.ArgumentParser()
277 ap.add_argument('-i', '--inplace', action='store_true', default=False,
281 ap.add_argument('-i', '--inplace', action='store_true', default=False,
278 help='edit files in place')
282 help='edit files in place')
279 ap.add_argument('--dictiter', action='store_true', default=False,
283 ap.add_argument('--dictiter', action='store_true', default=False,
280 help='rewrite iteritems() and itervalues()'),
284 help='rewrite iteritems() and itervalues()'),
281 ap.add_argument('--allow-attr-methods', action='store_true',
285 ap.add_argument('--allow-attr-methods', action='store_true',
282 default=False,
286 default=False,
283 help='also handle attr*() when they are methods'),
287 help='also handle attr*() when they are methods'),
284 ap.add_argument('--treat-as-kwargs', nargs="+", default=[],
288 ap.add_argument('--treat-as-kwargs', nargs="+", default=[],
285 help="ignore kwargs-like objects"),
289 help="ignore kwargs-like objects"),
286 ap.add_argument('files', metavar='FILE', nargs='+', help='source file')
290 ap.add_argument('files', metavar='FILE', nargs='+', help='source file')
287 args = ap.parse_args()
291 args = ap.parse_args()
288 opts = {
292 opts = {
289 'dictiter': args.dictiter,
293 'dictiter': args.dictiter,
290 'treat-as-kwargs': set(args.treat_as_kwargs),
294 'treat-as-kwargs': set(args.treat_as_kwargs),
291 'allow-attr-methods': args.allow_attr_methods,
295 'allow-attr-methods': args.allow_attr_methods,
292 }
296 }
293 for fname in args.files:
297 for fname in args.files:
294 if args.inplace:
298 if args.inplace:
295 with editinplace(fname) as fout:
299 with editinplace(fname) as fout:
296 with open(fname, 'rb') as fin:
300 with open(fname, 'rb') as fin:
297 process(fin, fout, opts)
301 process(fin, fout, opts)
298 else:
302 else:
299 with open(fname, 'rb') as fin:
303 with open(fname, 'rb') as fin:
300 fout = sys.stdout.buffer
304 fout = sys.stdout.buffer
301 process(fin, fout, opts)
305 process(fin, fout, opts)
302
306
303 if __name__ == '__main__':
307 if __name__ == '__main__':
304 if sys.version_info.major < 3:
308 if sys.version_info.major < 3:
305 print('This script must be run under Python 3.')
309 print('This script must be run under Python 3.')
306 sys.exit(3)
310 sys.exit(3)
307 main()
311 main()
@@ -1,217 +1,261
1 #require py3
1 #require py3
2
2
3 $ byteify_strings () {
3 $ byteify_strings () {
4 > $PYTHON "$TESTDIR/../contrib/byteify-strings.py" "$@"
4 > $PYTHON "$TESTDIR/../contrib/byteify-strings.py" "$@"
5 > }
5 > }
6
6
7 Test in-place
7 Test in-place
8
8
9 $ cat > testfile.py <<EOF
9 $ cat > testfile.py <<EOF
10 > obj['test'] = b"1234"
10 > obj['test'] = b"1234"
11 > mydict.iteritems()
11 > mydict.iteritems()
12 > EOF
12 > EOF
13 $ byteify_strings testfile.py -i
13 $ byteify_strings testfile.py -i
14 $ cat testfile.py
14 $ cat testfile.py
15 obj[b'test'] = b"1234"
15 obj[b'test'] = b"1234"
16 mydict.iteritems()
16 mydict.iteritems()
17
17
18 Test with dictiter
18 Test with dictiter
19
19
20 $ cat > testfile.py <<EOF
20 $ cat > testfile.py <<EOF
21 > obj['test'] = b"1234"
21 > obj['test'] = b"1234"
22 > mydict.iteritems()
22 > mydict.iteritems()
23 > EOF
23 > EOF
24 $ byteify_strings testfile.py --dictiter
24 $ byteify_strings testfile.py --dictiter
25 obj[b'test'] = b"1234"
25 obj[b'test'] = b"1234"
26 mydict.items()
26 mydict.items()
27
27
28 Test kwargs-like objects
28 Test kwargs-like objects
29
29
30 $ cat > testfile.py <<EOF
30 $ cat > testfile.py <<EOF
31 > kwargs['test'] = "123"
31 > kwargs['test'] = "123"
32 > kwargs[test['testing']]
32 > kwargs[test['testing']]
33 > kwargs[test[[['testing']]]]
33 > kwargs[test[[['testing']]]]
34 > kwargs[kwargs['testing']]
34 > kwargs[kwargs['testing']]
35 > kwargs.get('test')
35 > kwargs.get('test')
36 > kwargs.pop('test')
36 > kwargs.pop('test')
37 > kwargs.get('test', 'testing')
37 > kwargs.get('test', 'testing')
38 > kwargs.pop('test', 'testing')
38 > kwargs.pop('test', 'testing')
39 > kwargs.setdefault('test', 'testing')
39 > kwargs.setdefault('test', 'testing')
40 >
40 >
41 > opts['test'] = "123"
41 > opts['test'] = "123"
42 > opts[test['testing']]
42 > opts[test['testing']]
43 > opts[test[[['testing']]]]
43 > opts[test[[['testing']]]]
44 > opts[opts['testing']]
44 > opts[opts['testing']]
45 > opts.get('test')
45 > opts.get('test')
46 > opts.pop('test')
46 > opts.pop('test')
47 > opts.get('test', 'testing')
47 > opts.get('test', 'testing')
48 > opts.pop('test', 'testing')
48 > opts.pop('test', 'testing')
49 > opts.setdefault('test', 'testing')
49 > opts.setdefault('test', 'testing')
50 >
50 >
51 > commitopts['test'] = "123"
51 > commitopts['test'] = "123"
52 > commitopts[test['testing']]
52 > commitopts[test['testing']]
53 > commitopts[test[[['testing']]]]
53 > commitopts[test[[['testing']]]]
54 > commitopts[commitopts['testing']]
54 > commitopts[commitopts['testing']]
55 > commitopts.get('test')
55 > commitopts.get('test')
56 > commitopts.pop('test')
56 > commitopts.pop('test')
57 > commitopts.get('test', 'testing')
57 > commitopts.get('test', 'testing')
58 > commitopts.pop('test', 'testing')
58 > commitopts.pop('test', 'testing')
59 > commitopts.setdefault('test', 'testing')
59 > commitopts.setdefault('test', 'testing')
60 > EOF
60 > EOF
61 $ byteify_strings testfile.py --treat-as-kwargs kwargs opts commitopts
61 $ byteify_strings testfile.py --treat-as-kwargs kwargs opts commitopts
62 kwargs['test'] = b"123"
62 kwargs['test'] = b"123"
63 kwargs[test[b'testing']]
63 kwargs[test[b'testing']]
64 kwargs[test[[[b'testing']]]]
64 kwargs[test[[[b'testing']]]]
65 kwargs[kwargs['testing']]
65 kwargs[kwargs['testing']]
66 kwargs.get('test')
66 kwargs.get('test')
67 kwargs.pop('test')
67 kwargs.pop('test')
68 kwargs.get('test', b'testing')
68 kwargs.get('test', b'testing')
69 kwargs.pop('test', b'testing')
69 kwargs.pop('test', b'testing')
70 kwargs.setdefault('test', b'testing')
70 kwargs.setdefault('test', b'testing')
71
71
72 opts['test'] = b"123"
72 opts['test'] = b"123"
73 opts[test[b'testing']]
73 opts[test[b'testing']]
74 opts[test[[[b'testing']]]]
74 opts[test[[[b'testing']]]]
75 opts[opts['testing']]
75 opts[opts['testing']]
76 opts.get('test')
76 opts.get('test')
77 opts.pop('test')
77 opts.pop('test')
78 opts.get('test', b'testing')
78 opts.get('test', b'testing')
79 opts.pop('test', b'testing')
79 opts.pop('test', b'testing')
80 opts.setdefault('test', b'testing')
80 opts.setdefault('test', b'testing')
81
81
82 commitopts['test'] = b"123"
82 commitopts['test'] = b"123"
83 commitopts[test[b'testing']]
83 commitopts[test[b'testing']]
84 commitopts[test[[[b'testing']]]]
84 commitopts[test[[[b'testing']]]]
85 commitopts[commitopts['testing']]
85 commitopts[commitopts['testing']]
86 commitopts.get('test')
86 commitopts.get('test')
87 commitopts.pop('test')
87 commitopts.pop('test')
88 commitopts.get('test', b'testing')
88 commitopts.get('test', b'testing')
89 commitopts.pop('test', b'testing')
89 commitopts.pop('test', b'testing')
90 commitopts.setdefault('test', b'testing')
90 commitopts.setdefault('test', b'testing')
91
91
92 Test attr*() as methods
92 Test attr*() as methods
93
93
94 $ cat > testfile.py <<EOF
94 $ cat > testfile.py <<EOF
95 > setattr(o, 'a', 1)
95 > setattr(o, 'a', 1)
96 > util.setattr(o, 'ae', 1)
96 > util.setattr(o, 'ae', 1)
97 > util.getattr(o, 'alksjdf', 'default')
97 > util.getattr(o, 'alksjdf', 'default')
98 > util.addattr(o, 'asdf')
98 > util.addattr(o, 'asdf')
99 > util.hasattr(o, 'lksjdf', 'default')
99 > util.hasattr(o, 'lksjdf', 'default')
100 > util.safehasattr(o, 'lksjdf', 'default')
100 > util.safehasattr(o, 'lksjdf', 'default')
101 > @eh.wrapfunction(func, 'lksjdf')
101 > @eh.wrapfunction(func, 'lksjdf')
102 > def f():
102 > def f():
103 > pass
103 > pass
104 > @eh.wrapclass(klass, 'lksjdf')
104 > @eh.wrapclass(klass, 'lksjdf')
105 > def f():
105 > def f():
106 > pass
106 > pass
107 > EOF
107 > EOF
108 $ byteify_strings testfile.py --allow-attr-methods
108 $ byteify_strings testfile.py --allow-attr-methods
109 setattr(o, 'a', 1)
109 setattr(o, 'a', 1)
110 util.setattr(o, 'ae', 1)
110 util.setattr(o, 'ae', 1)
111 util.getattr(o, 'alksjdf', b'default')
111 util.getattr(o, 'alksjdf', b'default')
112 util.addattr(o, 'asdf')
112 util.addattr(o, 'asdf')
113 util.hasattr(o, 'lksjdf', b'default')
113 util.hasattr(o, 'lksjdf', b'default')
114 util.safehasattr(o, 'lksjdf', b'default')
114 util.safehasattr(o, 'lksjdf', b'default')
115 @eh.wrapfunction(func, 'lksjdf')
115 @eh.wrapfunction(func, 'lksjdf')
116 def f():
116 def f():
117 pass
117 pass
118 @eh.wrapclass(klass, 'lksjdf')
118 @eh.wrapclass(klass, 'lksjdf')
119 def f():
119 def f():
120 pass
120 pass
121
121
122 Test without attr*() as methods
122 Test without attr*() as methods
123
123
124 $ cat > testfile.py <<EOF
124 $ cat > testfile.py <<EOF
125 > setattr(o, 'a', 1)
125 > setattr(o, 'a', 1)
126 > util.setattr(o, 'ae', 1)
126 > util.setattr(o, 'ae', 1)
127 > util.getattr(o, 'alksjdf', 'default')
127 > util.getattr(o, 'alksjdf', 'default')
128 > util.addattr(o, 'asdf')
128 > util.addattr(o, 'asdf')
129 > util.hasattr(o, 'lksjdf', 'default')
129 > util.hasattr(o, 'lksjdf', 'default')
130 > util.safehasattr(o, 'lksjdf', 'default')
130 > util.safehasattr(o, 'lksjdf', 'default')
131 > @eh.wrapfunction(func, 'lksjdf')
131 > @eh.wrapfunction(func, 'lksjdf')
132 > def f():
132 > def f():
133 > pass
133 > pass
134 > @eh.wrapclass(klass, 'lksjdf')
134 > @eh.wrapclass(klass, 'lksjdf')
135 > def f():
135 > def f():
136 > pass
136 > pass
137 > EOF
137 > EOF
138 $ byteify_strings testfile.py
138 $ byteify_strings testfile.py
139 setattr(o, 'a', 1)
139 setattr(o, 'a', 1)
140 util.setattr(o, b'ae', 1)
140 util.setattr(o, b'ae', 1)
141 util.getattr(o, b'alksjdf', b'default')
141 util.getattr(o, b'alksjdf', b'default')
142 util.addattr(o, b'asdf')
142 util.addattr(o, b'asdf')
143 util.hasattr(o, b'lksjdf', b'default')
143 util.hasattr(o, b'lksjdf', b'default')
144 util.safehasattr(o, b'lksjdf', b'default')
144 util.safehasattr(o, b'lksjdf', b'default')
145 @eh.wrapfunction(func, b'lksjdf')
145 @eh.wrapfunction(func, b'lksjdf')
146 def f():
146 def f():
147 pass
147 pass
148 @eh.wrapclass(klass, b'lksjdf')
148 @eh.wrapclass(klass, b'lksjdf')
149 def f():
149 def f():
150 pass
150 pass
151
151
152 Test ignore comments
152 Test ignore comments
153
153
154 $ cat > testfile.py <<EOF
154 $ cat > testfile.py <<EOF
155 > #py3-transform: off
155 > #py3-transform: off
156 > "none"
156 > "none"
157 > "of"
157 > "of"
158 > 'these'
158 > 'these'
159 > s = """should"""
159 > s = """should"""
160 > d = '''be'''
160 > d = '''be'''
161 > #py3-transform: on
161 > #py3-transform: on
162 > "this should"
162 > "this should"
163 > 'and this also'
163 > 'and this also'
164 >
164 >
165 > #no-py3-transform
165 > #no-py3-transform
166 > l = "this should be ignored"
166 > l = "this should be ignored"
167 > l2 = "this shouldn't"
167 > l2 = "this shouldn't"
168 >
168 >
169 > EOF
169 > EOF
170 $ byteify_strings testfile.py
170 $ byteify_strings testfile.py
171 #py3-transform: off
171 #py3-transform: off
172 "none"
172 "none"
173 "of"
173 "of"
174 'these'
174 'these'
175 s = """should"""
175 s = """should"""
176 d = '''be'''
176 d = '''be'''
177 #py3-transform: on
177 #py3-transform: on
178 b"this should"
178 b"this should"
179 b'and this also'
179 b'and this also'
180
180
181 #no-py3-transform
181 #no-py3-transform
182 l = "this should be ignored"
182 l = "this should be ignored"
183 l2 = b"this shouldn't"
183 l2 = b"this shouldn't"
184
184
185 Test triple-quoted strings
185 Test triple-quoted strings
186
186
187 $ cat > testfile.py <<EOF
187 $ cat > testfile.py <<EOF
188 > """This is ignored
188 > """This is ignored
189 > """
189 > """
190 >
190 >
191 > line = """
191 > line = """
192 > This should not be
192 > This should not be
193 > """
193 > """
194 > line = '''
194 > line = '''
195 > Neither should this
195 > Neither should this
196 > '''
196 > '''
197 > EOF
197 > EOF
198 $ byteify_strings testfile.py
198 $ byteify_strings testfile.py
199 """This is ignored
199 """This is ignored
200 """
200 """
201
201
202 line = b"""
202 line = b"""
203 This should not be
203 This should not be
204 """
204 """
205 line = b'''
205 line = b'''
206 Neither should this
206 Neither should this
207 '''
207 '''
208
208
209 Test prefixed strings
209 Test prefixed strings
210
210
211 $ cat > testfile.py <<EOF
211 $ cat > testfile.py <<EOF
212 > obj['test'] = b"1234"
212 > obj['test'] = b"1234"
213 > obj[r'test'] = u"1234"
213 > obj[r'test'] = u"1234"
214 > EOF
214 > EOF
215 $ byteify_strings testfile.py
215 $ byteify_strings testfile.py
216 obj[b'test'] = b"1234"
216 obj[b'test'] = b"1234"
217 obj[r'test'] = u"1234"
217 obj[r'test'] = u"1234"
218
219 Test multi-line alignment
220
221 $ cat > testfile.py <<'EOF'
222 > def foo():
223 > error.Abort(_("foo"
224 > "bar"
225 > "%s")
226 > % parameter)
227 > {
228 > 'test': dict,
229 > 'test2': dict,
230 > }
231 > [
232 > "thing",
233 > "thing2"
234 > ]
235 > (
236 > "tuple",
237 > "tuple2",
238 > )
239 > {"thing",
240 > }
241 > EOF
242 $ byteify_strings testfile.py
243 def foo():
244 error.Abort(_(b"foo"
245 b"bar"
246 b"%s")
247 % parameter)
248 {
249 b'test': dict,
250 b'test2': dict,
251 }
252 [
253 b"thing",
254 b"thing2"
255 ]
256 (
257 b"tuple",
258 b"tuple2",
259 )
260 {b"thing",
261 }
General Comments 0
You need to be logged in to leave comments. Login now