##// END OF EJS Templates
Add tests for things in utils
Thomas Kluyver -
Show More
@@ -0,0 +1,10 b''
1 from IPython.utils import decorators
2
3 def test_flag_calls():
4 @decorators.flag_calls
5 def f():
6 pass
7
8 assert not f.called
9 f()
10 assert f.called No newline at end of file
@@ -0,0 +1,59 b''
1 import os
2 from unittest import TestCase
3
4 from IPython.testing.decorators import skip
5 from IPython.utils.tempdir import TemporaryDirectory
6 from IPython.utils.pickleshare import PickleShareDB
7
8
9 class PickleShareDBTestCase(TestCase):
10 def setUp(self):
11 self.tempdir = TemporaryDirectory()
12
13 def tearDown(self):
14 self.tempdir.cleanup()
15
16 def test_picklesharedb(self):
17 db = PickleShareDB(self.tempdir.name)
18 db.clear()
19 print("Should be empty:",db.items())
20 db['hello'] = 15
21 db['aku ankka'] = [1,2,313]
22 db['paths/nest/ok/keyname'] = [1,(5,46)]
23 db.hset('hash', 'aku', 12)
24 db.hset('hash', 'ankka', 313)
25 self.assertEqual(db.hget('hash','aku'), 12)
26 self.assertEqual(db.hget('hash','ankka'), 313)
27 print("all hashed",db.hdict('hash'))
28 print(db.keys())
29 print(db.keys('paths/nest/ok/k*'))
30 print(dict(db)) # snapsot of whole db
31 db.uncache() # frees memory, causes re-reads later
32
33 # shorthand for accessing deeply nested files
34 lnk = db.getlink('myobjects/test')
35 lnk.foo = 2
36 lnk.bar = lnk.foo + 5
37 self.assertEqual(lnk.bar, 7)
38
39 @skip("Too slow for regular running.")
40 def test_stress(self):
41 db = PickleShareDB('~/fsdbtest')
42 import time,sys
43 for i in range(1000):
44 for j in range(1000):
45 if i % 15 == 0 and i < 200:
46 if str(j) in db:
47 del db[str(j)]
48 continue
49
50 if j%33 == 0:
51 time.sleep(0.02)
52
53 db[str(j)] = db.get(str(j), []) + [(i,j,"proc %d" % os.getpid())]
54 db.hset('hash',j, db.hget('hash',j,15) + 1 )
55
56 print(i, end=' ')
57 sys.stdout.flush()
58 if i % 10 == 0:
59 db.uncache() No newline at end of file
@@ -1,370 +1,325 b''
1 1 #!/usr/bin/env python
2 2
3 3 """ PickleShare - a small 'shelve' like datastore with concurrency support
4 4
5 5 Like shelve, a PickleShareDB object acts like a normal dictionary. Unlike
6 6 shelve, many processes can access the database simultaneously. Changing a
7 7 value in database is immediately visible to other processes accessing the
8 8 same database.
9 9
10 10 Concurrency is possible because the values are stored in separate files. Hence
11 11 the "database" is a directory where *all* files are governed by PickleShare.
12 12
13 13 Example usage::
14 14
15 15 from pickleshare import *
16 16 db = PickleShareDB('~/testpickleshare')
17 17 db.clear()
18 18 print "Should be empty:",db.items()
19 19 db['hello'] = 15
20 20 db['aku ankka'] = [1,2,313]
21 21 db['paths/are/ok/key'] = [1,(5,46)]
22 22 print db.keys()
23 23 del db['aku ankka']
24 24
25 25 This module is certainly not ZODB, but can be used for low-load
26 26 (non-mission-critical) situations where tiny code size trumps the
27 27 advanced features of a "real" object database.
28 28
29 29 Installation guide: easy_install pickleshare
30 30
31 31 Author: Ville Vainio <vivainio@gmail.com>
32 32 License: MIT open source license.
33 33
34 34 """
35 35 from __future__ import print_function
36 36
37 37 from IPython.external.path import path as Path
38 38 import os,stat,time
39 39 import collections
40 40 try:
41 41 import cPickle as pickle
42 42 except ImportError:
43 43 import pickle
44 44 import glob
45 45
46 46 def gethashfile(key):
47 47 return ("%02x" % abs(hash(key) % 256))[-2:]
48 48
49 49 _sentinel = object()
50 50
51 51 class PickleShareDB(collections.MutableMapping):
52 52 """ The main 'connection' object for PickleShare database """
53 53 def __init__(self,root):
54 54 """ Return a db object that will manage the specied directory"""
55 55 self.root = Path(root).expanduser().abspath()
56 56 if not self.root.isdir():
57 57 self.root.makedirs()
58 58 # cache has { 'key' : (obj, orig_mod_time) }
59 59 self.cache = {}
60 60
61 61
62 62 def __getitem__(self,key):
63 63 """ db['key'] reading """
64 64 fil = self.root / key
65 65 try:
66 66 mtime = (fil.stat()[stat.ST_MTIME])
67 67 except OSError:
68 68 raise KeyError(key)
69 69
70 70 if fil in self.cache and mtime == self.cache[fil][1]:
71 71 return self.cache[fil][0]
72 72 try:
73 73 # The cached item has expired, need to read
74 74 with fil.open("rb") as f:
75 75 obj = pickle.loads(f.read())
76 76 except:
77 77 raise KeyError(key)
78 78
79 79 self.cache[fil] = (obj,mtime)
80 80 return obj
81 81
82 82 def __setitem__(self,key,value):
83 83 """ db['key'] = 5 """
84 84 fil = self.root / key
85 85 parent = fil.parent
86 86 if parent and not parent.isdir():
87 87 parent.makedirs()
88 88 # We specify protocol 2, so that we can mostly go between Python 2
89 89 # and Python 3. We can upgrade to protocol 3 when Python 2 is obsolete.
90 90 with fil.open('wb') as f:
91 91 pickled = pickle.dump(value, f, protocol=2)
92 92 try:
93 93 self.cache[fil] = (value,fil.mtime)
94 94 except OSError as e:
95 95 if e.errno != 2:
96 96 raise
97 97
98 98 def hset(self, hashroot, key, value):
99 99 """ hashed set """
100 100 hroot = self.root / hashroot
101 101 if not hroot.isdir():
102 102 hroot.makedirs()
103 103 hfile = hroot / gethashfile(key)
104 104 d = self.get(hfile, {})
105 105 d.update( {key : value})
106 106 self[hfile] = d
107 107
108 108
109 109
110 110 def hget(self, hashroot, key, default = _sentinel, fast_only = True):
111 111 """ hashed get """
112 112 hroot = self.root / hashroot
113 113 hfile = hroot / gethashfile(key)
114 114
115 115 d = self.get(hfile, _sentinel )
116 116 #print "got dict",d,"from",hfile
117 117 if d is _sentinel:
118 118 if fast_only:
119 119 if default is _sentinel:
120 120 raise KeyError(key)
121 121
122 122 return default
123 123
124 124 # slow mode ok, works even after hcompress()
125 125 d = self.hdict(hashroot)
126 126
127 127 return d.get(key, default)
128 128
129 129 def hdict(self, hashroot):
130 130 """ Get all data contained in hashed category 'hashroot' as dict """
131 131 hfiles = self.keys(hashroot + "/*")
132 132 hfiles.sort()
133 133 last = len(hfiles) and hfiles[-1] or ''
134 134 if last.endswith('xx'):
135 135 # print "using xx"
136 136 hfiles = [last] + hfiles[:-1]
137 137
138 138 all = {}
139 139
140 140 for f in hfiles:
141 141 # print "using",f
142 142 try:
143 143 all.update(self[f])
144 144 except KeyError:
145 145 print("Corrupt",f,"deleted - hset is not threadsafe!")
146 146 del self[f]
147 147
148 148 self.uncache(f)
149 149
150 150 return all
151 151
152 152 def hcompress(self, hashroot):
153 153 """ Compress category 'hashroot', so hset is fast again
154 154
155 155 hget will fail if fast_only is True for compressed items (that were
156 156 hset before hcompress).
157 157
158 158 """
159 159 hfiles = self.keys(hashroot + "/*")
160 160 all = {}
161 161 for f in hfiles:
162 162 # print "using",f
163 163 all.update(self[f])
164 164 self.uncache(f)
165 165
166 166 self[hashroot + '/xx'] = all
167 167 for f in hfiles:
168 168 p = self.root / f
169 169 if p.basename() == 'xx':
170 170 continue
171 171 p.remove()
172 172
173 173
174 174
175 175 def __delitem__(self,key):
176 176 """ del db["key"] """
177 177 fil = self.root / key
178 178 self.cache.pop(fil,None)
179 179 try:
180 180 fil.remove()
181 181 except OSError:
182 182 # notfound and permission denied are ok - we
183 183 # lost, the other process wins the conflict
184 184 pass
185 185
186 186 def _normalized(self, p):
187 187 """ Make a key suitable for user's eyes """
188 188 return str(self.root.relpathto(p)).replace('\\','/')
189 189
190 190 def keys(self, globpat = None):
191 191 """ All keys in DB, or all keys matching a glob"""
192 192
193 193 if globpat is None:
194 194 files = self.root.walkfiles()
195 195 else:
196 196 files = [Path(p) for p in glob.glob(self.root/globpat)]
197 197 return [self._normalized(p) for p in files if p.isfile()]
198 198
199 199 def __iter__(self):
200 200 return iter(self.keys())
201 201
202 202 def __len__(self):
203 203 return len(self.keys())
204 204
205 205 def uncache(self,*items):
206 206 """ Removes all, or specified items from cache
207 207
208 208 Use this after reading a large amount of large objects
209 209 to free up memory, when you won't be needing the objects
210 210 for a while.
211 211
212 212 """
213 213 if not items:
214 214 self.cache = {}
215 215 for it in items:
216 216 self.cache.pop(it,None)
217 217
218 218 def waitget(self,key, maxwaittime = 60 ):
219 219 """ Wait (poll) for a key to get a value
220 220
221 221 Will wait for `maxwaittime` seconds before raising a KeyError.
222 222 The call exits normally if the `key` field in db gets a value
223 223 within the timeout period.
224 224
225 225 Use this for synchronizing different processes or for ensuring
226 226 that an unfortunately timed "db['key'] = newvalue" operation
227 227 in another process (which causes all 'get' operation to cause a
228 228 KeyError for the duration of pickling) won't screw up your program
229 229 logic.
230 230 """
231 231
232 232 wtimes = [0.2] * 3 + [0.5] * 2 + [1]
233 233 tries = 0
234 234 waited = 0
235 235 while 1:
236 236 try:
237 237 val = self[key]
238 238 return val
239 239 except KeyError:
240 240 pass
241 241
242 242 if waited > maxwaittime:
243 243 raise KeyError(key)
244 244
245 245 time.sleep(wtimes[tries])
246 246 waited+=wtimes[tries]
247 247 if tries < len(wtimes) -1:
248 248 tries+=1
249 249
250 250 def getlink(self,folder):
251 251 """ Get a convenient link for accessing items """
252 252 return PickleShareLink(self, folder)
253 253
254 254 def __repr__(self):
255 255 return "PickleShareDB('%s')" % self.root
256 256
257 257
258 258
259 259 class PickleShareLink:
260 260 """ A shortdand for accessing nested PickleShare data conveniently.
261 261
262 262 Created through PickleShareDB.getlink(), example::
263 263
264 264 lnk = db.getlink('myobjects/test')
265 265 lnk.foo = 2
266 266 lnk.bar = lnk.foo + 5
267 267
268 268 """
269 269 def __init__(self, db, keydir ):
270 270 self.__dict__.update(locals())
271 271
272 272 def __getattr__(self,key):
273 273 return self.__dict__['db'][self.__dict__['keydir']+'/' + key]
274 274 def __setattr__(self,key,val):
275 275 self.db[self.keydir+'/' + key] = val
276 276 def __repr__(self):
277 277 db = self.__dict__['db']
278 278 keys = db.keys( self.__dict__['keydir'] +"/*")
279 279 return "<PickleShareLink '%s': %s>" % (
280 280 self.__dict__['keydir'],
281 281 ";".join([Path(k).basename() for k in keys]))
282 282
283
284 def test():
285 db = PickleShareDB('~/testpickleshare')
286 db.clear()
287 print("Should be empty:",db.items())
288 db['hello'] = 15
289 db['aku ankka'] = [1,2,313]
290 db['paths/nest/ok/keyname'] = [1,(5,46)]
291 db.hset('hash', 'aku', 12)
292 db.hset('hash', 'ankka', 313)
293 print("12 =",db.hget('hash','aku'))
294 print("313 =",db.hget('hash','ankka'))
295 print("all hashed",db.hdict('hash'))
296 print(db.keys())
297 print(db.keys('paths/nest/ok/k*'))
298 print(dict(db)) # snapsot of whole db
299 db.uncache() # frees memory, causes re-reads later
300
301 # shorthand for accessing deeply nested files
302 lnk = db.getlink('myobjects/test')
303 lnk.foo = 2
304 lnk.bar = lnk.foo + 5
305 print(lnk.bar) # 7
306
307 def stress():
308 db = PickleShareDB('~/fsdbtest')
309 import time,sys
310 for i in range(1000):
311 for j in range(1000):
312 if i % 15 == 0 and i < 200:
313 if str(j) in db:
314 del db[str(j)]
315 continue
316
317 if j%33 == 0:
318 time.sleep(0.02)
319
320 db[str(j)] = db.get(str(j), []) + [(i,j,"proc %d" % os.getpid())]
321 db.hset('hash',j, db.hget('hash',j,15) + 1 )
322
323 print(i, end=' ')
324 sys.stdout.flush()
325 if i % 10 == 0:
326 db.uncache()
327
328 283 def main():
329 284 import textwrap
330 285 usage = textwrap.dedent("""\
331 286 pickleshare - manage PickleShare databases
332 287
333 288 Usage:
334 289
335 290 pickleshare dump /path/to/db > dump.txt
336 291 pickleshare load /path/to/db < dump.txt
337 292 pickleshare test /path/to/db
338 293 """)
339 294 DB = PickleShareDB
340 295 import sys
341 296 if len(sys.argv) < 2:
342 297 print(usage)
343 298 return
344 299
345 300 cmd = sys.argv[1]
346 301 args = sys.argv[2:]
347 302 if cmd == 'dump':
348 303 if not args: args= ['.']
349 304 db = DB(args[0])
350 305 import pprint
351 306 pprint.pprint(db.items())
352 307 elif cmd == 'load':
353 308 cont = sys.stdin.read()
354 309 db = DB(args[0])
355 310 data = eval(cont)
356 311 db.clear()
357 312 for k,v in db.items():
358 313 db[k] = v
359 314 elif cmd == 'testwait':
360 315 db = DB(args[0])
361 316 db.clear()
362 317 print(db.waitget('250'))
363 318 elif cmd == 'test':
364 319 test()
365 320 stress()
366 321
367 322 if __name__== "__main__":
368 323 main()
369 324
370 325
@@ -1,23 +1,39 b''
1 1 import io
2 2 import os.path
3 3 import nose.tools as nt
4 4
5 5 from IPython.utils import openpy
6 6
7 7 mydir = os.path.dirname(__file__)
8 8 nonascii_path = os.path.join(mydir, '../../core/tests/nonascii.py')
9 9
10 10 def test_detect_encoding():
11 11 f = open(nonascii_path, 'rb')
12 12 enc, lines = openpy.detect_encoding(f.readline)
13 13 nt.assert_equal(enc, 'iso-8859-5')
14 14
15 15 def test_read_file():
16 16 read_specified_enc = io.open(nonascii_path, encoding='iso-8859-5').read()
17 17 read_detected_enc = openpy.read_py_file(nonascii_path, skip_encoding_cookie=False)
18 18 nt.assert_equal(read_detected_enc, read_specified_enc)
19 19 assert u'coding: iso-8859-5' in read_detected_enc
20 20
21 21 read_strip_enc_cookie = openpy.read_py_file(nonascii_path, skip_encoding_cookie=True)
22 22 assert u'coding: iso-8859-5' not in read_strip_enc_cookie
23
23
24 def test_source_to_unicode():
25 with io.open(nonascii_path, 'rb') as f:
26 source_bytes = f.read()
27 nt.assert_equal(openpy.source_to_unicode(source_bytes, skip_encoding_cookie=False),
28 source_bytes.decode('iso-8859-5'))
29
30 source_no_cookie = openpy.source_to_unicode(source_bytes, skip_encoding_cookie=True)
31 nt.assert_not_in(u'coding: iso-8859-5', source_no_cookie)
32
33 def test_list_readline():
34 l = ['a', 'b']
35 readline = openpy._list_readline(l)
36 nt.assert_equal(readline(), 'a')
37 nt.assert_equal(readline(), 'b')
38 with nt.assert_raises(StopIteration):
39 readline() No newline at end of file
@@ -1,177 +1,190 b''
1 1 # encoding: utf-8
2 2 """Tests for IPython.utils.text"""
3 3 from __future__ import print_function
4 4
5 5 #-----------------------------------------------------------------------------
6 6 # Copyright (C) 2011 The IPython Development Team
7 7 #
8 8 # Distributed under the terms of the BSD License. The full license is in
9 9 # the file COPYING, distributed as part of this software.
10 10 #-----------------------------------------------------------------------------
11 11
12 12 #-----------------------------------------------------------------------------
13 13 # Imports
14 14 #-----------------------------------------------------------------------------
15 15
16 16 import os
17 17 import math
18 18 import random
19 19 import sys
20 20
21 21 import nose.tools as nt
22 22
23 23 from IPython.utils import text
24 24
25 25 #-----------------------------------------------------------------------------
26 26 # Globals
27 27 #-----------------------------------------------------------------------------
28 28
29 29 def test_columnize():
30 30 """Basic columnize tests."""
31 31 size = 5
32 32 items = [l*size for l in 'abc']
33 33 out = text.columnize(items, displaywidth=80)
34 34 nt.assert_equal(out, 'aaaaa bbbbb ccccc\n')
35 35 out = text.columnize(items, displaywidth=12)
36 36 nt.assert_equal(out, 'aaaaa ccccc\nbbbbb\n')
37 37 out = text.columnize(items, displaywidth=10)
38 38 nt.assert_equal(out, 'aaaaa\nbbbbb\nccccc\n')
39 39
40 40 def test_columnize_random():
41 41 """Test with random input to hopfully catch edge case """
42 42 for nitems in [random.randint(2,70) for i in range(2,20)]:
43 43 displaywidth = random.randint(20,200)
44 44 rand_len = [random.randint(2,displaywidth) for i in range(nitems)]
45 45 items = ['x'*l for l in rand_len]
46 46 out = text.columnize(items, displaywidth=displaywidth)
47 47 longer_line = max([len(x) for x in out.split('\n')])
48 48 longer_element = max(rand_len)
49 49 if longer_line > displaywidth:
50 50 print("Columnize displayed something lager than displaywidth : %s " % longer_line)
51 51 print("longer element : %s " % longer_element)
52 52 print("displaywidth : %s " % displaywidth)
53 53 print("number of element : %s " % nitems)
54 54 print("size of each element :\n %s" % rand_len)
55 55 assert False
56 56
57 57 def test_columnize_medium():
58 58 """Test with inputs than shouldn't be wider tahn 80 """
59 59 size = 40
60 60 items = [l*size for l in 'abc']
61 61 out = text.columnize(items, displaywidth=80)
62 62 nt.assert_equal(out, '\n'.join(items+['']))
63 63
64 64 def test_columnize_long():
65 65 """Test columnize with inputs longer than the display window"""
66 66 size = 11
67 67 items = [l*size for l in 'abc']
68 68 out = text.columnize(items, displaywidth=size-1)
69 69 nt.assert_equal(out, '\n'.join(items+['']))
70 70
71 71 def eval_formatter_check(f):
72 72 ns = dict(n=12, pi=math.pi, stuff='hello there', os=os, u=u"cafΓ©", b="cafΓ©")
73 73 s = f.format("{n} {n//4} {stuff.split()[0]}", **ns)
74 74 nt.assert_equal(s, "12 3 hello")
75 75 s = f.format(' '.join(['{n//%i}'%i for i in range(1,8)]), **ns)
76 76 nt.assert_equal(s, "12 6 4 3 2 2 1")
77 77 s = f.format('{[n//i for i in range(1,8)]}', **ns)
78 78 nt.assert_equal(s, "[12, 6, 4, 3, 2, 2, 1]")
79 79 s = f.format("{stuff!s}", **ns)
80 80 nt.assert_equal(s, ns['stuff'])
81 81 s = f.format("{stuff!r}", **ns)
82 82 nt.assert_equal(s, repr(ns['stuff']))
83 83
84 84 # Check with unicode:
85 85 s = f.format("{u}", **ns)
86 86 nt.assert_equal(s, ns['u'])
87 87 # This decodes in a platform dependent manner, but it shouldn't error out
88 88 s = f.format("{b}", **ns)
89 89
90 90 nt.assert_raises(NameError, f.format, '{dne}', **ns)
91 91
92 92 def eval_formatter_slicing_check(f):
93 93 ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
94 94 s = f.format(" {stuff.split()[:]} ", **ns)
95 95 nt.assert_equal(s, " ['hello', 'there'] ")
96 96 s = f.format(" {stuff.split()[::-1]} ", **ns)
97 97 nt.assert_equal(s, " ['there', 'hello'] ")
98 98 s = f.format("{stuff[::2]}", **ns)
99 99 nt.assert_equal(s, ns['stuff'][::2])
100 100
101 101 nt.assert_raises(SyntaxError, f.format, "{n:x}", **ns)
102 102
103 103 def eval_formatter_no_slicing_check(f):
104 104 ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
105 105
106 106 s = f.format('{n:x} {pi**2:+f}', **ns)
107 107 nt.assert_equal(s, "c +9.869604")
108 108
109 109 s = f.format('{stuff[slice(1,4)]}', **ns)
110 110 nt.assert_equal(s, 'ell')
111 111
112 112 if sys.version_info >= (3, 4):
113 113 # String formatting has changed in Python 3.4, so this now works.
114 114 s = f.format("{a[:]}", a=[1, 2])
115 115 nt.assert_equal(s, "[1, 2]")
116 116 else:
117 117 nt.assert_raises(SyntaxError, f.format, "{a[:]}")
118 118
119 119 def test_eval_formatter():
120 120 f = text.EvalFormatter()
121 121 eval_formatter_check(f)
122 122 eval_formatter_no_slicing_check(f)
123 123
124 124 def test_full_eval_formatter():
125 125 f = text.FullEvalFormatter()
126 126 eval_formatter_check(f)
127 127 eval_formatter_slicing_check(f)
128 128
129 129 def test_dollar_formatter():
130 130 f = text.DollarFormatter()
131 131 eval_formatter_check(f)
132 132 eval_formatter_slicing_check(f)
133 133
134 134 ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
135 135 s = f.format("$n", **ns)
136 136 nt.assert_equal(s, "12")
137 137 s = f.format("$n.real", **ns)
138 138 nt.assert_equal(s, "12")
139 139 s = f.format("$n/{stuff[:5]}", **ns)
140 140 nt.assert_equal(s, "12/hello")
141 141 s = f.format("$n $$HOME", **ns)
142 142 nt.assert_equal(s, "12 $HOME")
143 143 s = f.format("${foo}", foo="HOME")
144 144 nt.assert_equal(s, "$HOME")
145 145
146 146
147 147 def test_long_substr():
148 148 data = ['hi']
149 149 nt.assert_equal(text.long_substr(data), 'hi')
150 150
151 151
152 152 def test_long_substr2():
153 153 data = ['abc', 'abd', 'abf', 'ab']
154 154 nt.assert_equal(text.long_substr(data), 'ab')
155 155
156 156 def test_long_substr_empty():
157 157 data = []
158 158 nt.assert_equal(text.long_substr(data), '')
159 159
160 160 def test_strip_email():
161 161 src = """\
162 162 >> >>> def f(x):
163 163 >> ... return x+1
164 164 >> ...
165 165 >> >>> zz = f(2.5)"""
166 166 cln = """\
167 167 >>> def f(x):
168 168 ... return x+1
169 169 ...
170 170 >>> zz = f(2.5)"""
171 171 nt.assert_equal(text.strip_email_quotes(src), cln)
172 172
173 173
174 174 def test_strip_email2():
175 175 src = '> > > list()'
176 176 cln = 'list()'
177 177 nt.assert_equal(text.strip_email_quotes(src), cln)
178
179 def test_LSString():
180 lss = text.LSString("abc\ndef")
181 nt.assert_equal(lss.l, ['abc', 'def'])
182 nt.assert_equal(lss.s, 'abc def')
183
184 def test_SList():
185 sl = text.SList(['a 11', 'b 1', 'a 2'])
186 nt.assert_equal(sl.n, 'a 11\nb 1\na 2')
187 nt.assert_equal(sl.s, 'a 11 b 1 a 2')
188 nt.assert_equal(sl.grep(lambda x: x.startswith('a')), text.SList(['a 11', 'a 2']))
189 nt.assert_equal(sl.fields(0), text.SList(['a', 'b', 'a']))
190 nt.assert_equal(sl.sort(field=1, nums=True), text.SList(['b 1', 'a 2', 'a 11'])) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now