##// END OF EJS Templates
Merge pull request #6380 from ellisonbg/latex-complete...
Thomas Kluyver -
r17812:3b47a9b4 merge
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (1297 lines changed) Show them Hide them
@@ -0,0 +1,1297 b''
1 # encoding: utf-8
2
3 # DO NOT EDIT THIS FILE BY HAND.
4
5 # To update this file, run the script /tools/gen_latex_symbols.py using Python 3
6
7 # This file is autogenerated from the file:
8 # https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
9 # This original list is filtered to remove any unicode characters that are not valid
10 # Python identifiers.
11
12 latex_symbols = {
13
14 "\\^a" : "ᵃ",
15 "\\^b" : "ᵇ",
16 "\\^c" : "ᶜ",
17 "\\^d" : "ᵈ",
18 "\\^e" : "ᵉ",
19 "\\^f" : "ᶠ",
20 "\\^g" : "ᵍ",
21 "\\^h" : "ʰ",
22 "\\^i" : "ⁱ",
23 "\\^j" : "ʲ",
24 "\\^k" : "ᵏ",
25 "\\^l" : "ˡ",
26 "\\^m" : "ᵐ",
27 "\\^n" : "ⁿ",
28 "\\^o" : "ᵒ",
29 "\\^p" : "ᵖ",
30 "\\^r" : "ʳ",
31 "\\^s" : "ˢ",
32 "\\^t" : "ᵗ",
33 "\\^u" : "ᵘ",
34 "\\^v" : "ᵛ",
35 "\\^w" : "ʷ",
36 "\\^x" : "ˣ",
37 "\\^y" : "ʸ",
38 "\\^z" : "ᶻ",
39 "\\^A" : "ᴬ",
40 "\\^B" : "ᴮ",
41 "\\^D" : "ᴰ",
42 "\\^E" : "ᴱ",
43 "\\^G" : "ᴳ",
44 "\\^H" : "ᴴ",
45 "\\^I" : "ᴵ",
46 "\\^J" : "ᴶ",
47 "\\^K" : "ᴷ",
48 "\\^L" : "ᴸ",
49 "\\^M" : "ᴹ",
50 "\\^N" : "ᴺ",
51 "\\^O" : "ᴼ",
52 "\\^P" : "ᴾ",
53 "\\^R" : "ᴿ",
54 "\\^T" : "ᵀ",
55 "\\^U" : "ᵁ",
56 "\\^V" : "ⱽ",
57 "\\^W" : "ᵂ",
58 "\\^alpha" : "ᵅ",
59 "\\^beta" : "ᵝ",
60 "\\^gamma" : "ᵞ",
61 "\\^delta" : "ᵟ",
62 "\\^epsilon" : "ᵋ",
63 "\\^theta" : "ᶿ",
64 "\\^iota" : "ᶥ",
65 "\\^phi" : "ᵠ",
66 "\\^chi" : "ᵡ",
67 "\\^Phi" : "ᶲ",
68 "\\_a" : "ₐ",
69 "\\_e" : "ₑ",
70 "\\_h" : "ₕ",
71 "\\_i" : "ᵢ",
72 "\\_j" : "ⱼ",
73 "\\_k" : "ₖ",
74 "\\_l" : "ₗ",
75 "\\_m" : "ₘ",
76 "\\_n" : "ₙ",
77 "\\_o" : "ₒ",
78 "\\_p" : "ₚ",
79 "\\_r" : "ᵣ",
80 "\\_s" : "ₛ",
81 "\\_t" : "ₜ",
82 "\\_u" : "ᵤ",
83 "\\_v" : "ᵥ",
84 "\\_x" : "ₓ",
85 "\\_schwa" : "ₔ",
86 "\\_beta" : "ᵦ",
87 "\\_gamma" : "ᵧ",
88 "\\_rho" : "ᵨ",
89 "\\_phi" : "ᵩ",
90 "\\_chi" : "ᵪ",
91 "\\hbar" : "ħ",
92 "\\sout" : "̶",
93 "\\textordfeminine" : "ª",
94 "\\cdotp" : "·",
95 "\\textordmasculine" : "º",
96 "\\AA" : "Å",
97 "\\AE" : "Æ",
98 "\\DH" : "Ð",
99 "\\O" : "Ø",
100 "\\TH" : "Þ",
101 "\\ss" : "ß",
102 "\\aa" : "å",
103 "\\ae" : "æ",
104 "\\eth" : "ð",
105 "\\o" : "ø",
106 "\\th" : "þ",
107 "\\DJ" : "Đ",
108 "\\dj" : "đ",
109 "\\Elzxh" : "ħ",
110 "\\imath" : "ı",
111 "\\L" : "Ł",
112 "\\l" : "ł",
113 "\\NG" : "Ŋ",
114 "\\ng" : "ŋ",
115 "\\OE" : "Œ",
116 "\\oe" : "œ",
117 "\\texthvlig" : "ƕ",
118 "\\textnrleg" : "ƞ",
119 "\\textdoublepipe" : "ǂ",
120 "\\Elztrna" : "ɐ",
121 "\\Elztrnsa" : "ɒ",
122 "\\Elzopeno" : "ɔ",
123 "\\Elzrtld" : "ɖ",
124 "\\Elzschwa" : "ə",
125 "\\varepsilon" : "ɛ",
126 "\\Elzpgamma" : "ɣ",
127 "\\Elzpbgam" : "ɤ",
128 "\\Elztrnh" : "ɥ",
129 "\\Elzbtdl" : "ɬ",
130 "\\Elzrtll" : "ɭ",
131 "\\Elztrnm" : "ɯ",
132 "\\Elztrnmlr" : "ɰ",
133 "\\Elzltlmr" : "ɱ",
134 "\\Elzltln" : "ɲ",
135 "\\Elzrtln" : "ɳ",
136 "\\Elzclomeg" : "ɷ",
137 "\\textphi" : "ɸ",
138 "\\Elztrnr" : "ɹ",
139 "\\Elztrnrl" : "ɺ",
140 "\\Elzrttrnr" : "ɻ",
141 "\\Elzrl" : "ɼ",
142 "\\Elzrtlr" : "ɽ",
143 "\\Elzfhr" : "ɾ",
144 "\\Elzrtls" : "ʂ",
145 "\\Elzesh" : "ʃ",
146 "\\Elztrnt" : "ʇ",
147 "\\Elzrtlt" : "ʈ",
148 "\\Elzpupsil" : "ʊ",
149 "\\Elzpscrv" : "ʋ",
150 "\\Elzinvv" : "ʌ",
151 "\\Elzinvw" : "ʍ",
152 "\\Elztrny" : "ʎ",
153 "\\Elzrtlz" : "ʐ",
154 "\\Elzyogh" : "ʒ",
155 "\\Elzglst" : "ʔ",
156 "\\Elzreglst" : "ʕ",
157 "\\Elzinglst" : "ʖ",
158 "\\textturnk" : "ʞ",
159 "\\Elzdyogh" : "ʤ",
160 "\\Elztesh" : "ʧ",
161 "\\rasp" : "ʼ",
162 "\\textasciicaron" : "ˇ",
163 "\\Elzverts" : "ˈ",
164 "\\Elzverti" : "ˌ",
165 "\\Elzlmrk" : "ː",
166 "\\Elzhlmrk" : "ˑ",
167 "\\grave" : "̀",
168 "\\acute" : "́",
169 "\\hat" : "̂",
170 "\\tilde" : "̃",
171 "\\bar" : "̄",
172 "\\breve" : "̆",
173 "\\dot" : "̇",
174 "\\ddot" : "̈",
175 "\\ocirc" : "̊",
176 "\\H" : "̋",
177 "\\check" : "̌",
178 "\\Elzpalh" : "̡",
179 "\\Elzrh" : "̢",
180 "\\c" : "̧",
181 "\\k" : "̨",
182 "\\Elzsbbrg" : "̪",
183 "\\Elzxl" : "̵",
184 "\\Elzbar" : "̶",
185 "\\Alpha" : "Α",
186 "\\Beta" : "Β",
187 "\\Gamma" : "Γ",
188 "\\Delta" : "Δ",
189 "\\Epsilon" : "Ε",
190 "\\Zeta" : "Ζ",
191 "\\Eta" : "Η",
192 "\\Theta" : "Θ",
193 "\\Iota" : "Ι",
194 "\\Kappa" : "Κ",
195 "\\Lambda" : "Λ",
196 "\\Xi" : "Ξ",
197 "\\Pi" : "Π",
198 "\\Rho" : "Ρ",
199 "\\Sigma" : "Σ",
200 "\\Tau" : "Τ",
201 "\\Upsilon" : "Υ",
202 "\\Phi" : "Φ",
203 "\\Chi" : "Χ",
204 "\\Psi" : "Ψ",
205 "\\Omega" : "Ω",
206 "\\alpha" : "α",
207 "\\beta" : "β",
208 "\\gamma" : "γ",
209 "\\delta" : "δ",
210 "\\zeta" : "ζ",
211 "\\eta" : "η",
212 "\\theta" : "θ",
213 "\\iota" : "ι",
214 "\\kappa" : "κ",
215 "\\lambda" : "λ",
216 "\\mu" : "μ",
217 "\\nu" : "ν",
218 "\\xi" : "ξ",
219 "\\pi" : "π",
220 "\\rho" : "ρ",
221 "\\varsigma" : "ς",
222 "\\sigma" : "σ",
223 "\\tau" : "τ",
224 "\\upsilon" : "υ",
225 "\\varphi" : "φ",
226 "\\chi" : "χ",
227 "\\psi" : "ψ",
228 "\\omega" : "ω",
229 "\\vartheta" : "ϑ",
230 "\\phi" : "ϕ",
231 "\\varpi" : "ϖ",
232 "\\Stigma" : "Ϛ",
233 "\\Digamma" : "Ϝ",
234 "\\digamma" : "ϝ",
235 "\\Koppa" : "Ϟ",
236 "\\Sampi" : "Ϡ",
237 "\\varkappa" : "ϰ",
238 "\\varrho" : "ϱ",
239 "\\textTheta" : "ϴ",
240 "\\epsilon" : "ϵ",
241 "\\dddot" : "⃛",
242 "\\ddddot" : "⃜",
243 "\\hslash" : "ℏ",
244 "\\Im" : "ℑ",
245 "\\ell" : "ℓ",
246 "\\wp" : "℘",
247 "\\Re" : "ℜ",
248 "\\aleph" : "ℵ",
249 "\\beth" : "ℶ",
250 "\\gimel" : "ℷ",
251 "\\daleth" : "ℸ",
252 "\\BbbPi" : "ℿ",
253 "\\Zbar" : "Ƶ",
254 "\\overbar" : "̅",
255 "\\ovhook" : "̉",
256 "\\candra" : "̐",
257 "\\oturnedcomma" : "̒",
258 "\\ocommatopright" : "̕",
259 "\\droang" : "̚",
260 "\\wideutilde" : "̰",
261 "\\underbar" : "̱",
262 "\\not" : "̸",
263 "\\upMu" : "Μ",
264 "\\upNu" : "Ν",
265 "\\upOmicron" : "Ο",
266 "\\upepsilon" : "ε",
267 "\\upomicron" : "ο",
268 "\\upvarbeta" : "ϐ",
269 "\\upoldKoppa" : "Ϙ",
270 "\\upoldkoppa" : "ϙ",
271 "\\upstigma" : "ϛ",
272 "\\upkoppa" : "ϟ",
273 "\\upsampi" : "ϡ",
274 "\\tieconcat" : "⁀",
275 "\\leftharpoonaccent" : "⃐",
276 "\\rightharpoonaccent" : "⃑",
277 "\\vertoverlay" : "⃒",
278 "\\overleftarrow" : "⃖",
279 "\\vec" : "⃗",
280 "\\overleftrightarrow" : "⃡",
281 "\\annuity" : "⃧",
282 "\\threeunderdot" : "⃨",
283 "\\widebridgeabove" : "⃩",
284 "\\BbbC" : "ℂ",
285 "\\Eulerconst" : "ℇ",
286 "\\mscrg" : "ℊ",
287 "\\mscrH" : "ℋ",
288 "\\mfrakH" : "ℌ",
289 "\\BbbH" : "ℍ",
290 "\\Planckconst" : "ℎ",
291 "\\mscrI" : "ℐ",
292 "\\mscrL" : "ℒ",
293 "\\BbbN" : "ℕ",
294 "\\BbbP" : "ℙ",
295 "\\BbbQ" : "ℚ",
296 "\\mscrR" : "ℛ",
297 "\\BbbR" : "ℝ",
298 "\\BbbZ" : "ℤ",
299 "\\mfrakZ" : "ℨ",
300 "\\Angstrom" : "Å",
301 "\\mscrB" : "ℬ",
302 "\\mfrakC" : "ℭ",
303 "\\mscre" : "ℯ",
304 "\\mscrE" : "ℰ",
305 "\\mscrF" : "ℱ",
306 "\\Finv" : "Ⅎ",
307 "\\mscrM" : "ℳ",
308 "\\mscro" : "ℴ",
309 "\\Bbbgamma" : "ℽ",
310 "\\BbbGamma" : "ℾ",
311 "\\mitBbbD" : "ⅅ",
312 "\\mitBbbd" : "ⅆ",
313 "\\mitBbbe" : "ⅇ",
314 "\\mitBbbi" : "ⅈ",
315 "\\mitBbbj" : "ⅉ",
316 "\\mbfA" : "𝐀",
317 "\\mbfB" : "𝐁",
318 "\\mbfC" : "𝐂",
319 "\\mbfD" : "𝐃",
320 "\\mbfE" : "𝐄",
321 "\\mbfF" : "𝐅",
322 "\\mbfG" : "𝐆",
323 "\\mbfH" : "𝐇",
324 "\\mbfI" : "𝐈",
325 "\\mbfJ" : "𝐉",
326 "\\mbfK" : "𝐊",
327 "\\mbfL" : "𝐋",
328 "\\mbfM" : "𝐌",
329 "\\mbfN" : "𝐍",
330 "\\mbfO" : "𝐎",
331 "\\mbfP" : "𝐏",
332 "\\mbfQ" : "𝐐",
333 "\\mbfR" : "𝐑",
334 "\\mbfS" : "𝐒",
335 "\\mbfT" : "𝐓",
336 "\\mbfU" : "𝐔",
337 "\\mbfV" : "𝐕",
338 "\\mbfW" : "𝐖",
339 "\\mbfX" : "𝐗",
340 "\\mbfY" : "𝐘",
341 "\\mbfZ" : "𝐙",
342 "\\mbfa" : "𝐚",
343 "\\mbfb" : "𝐛",
344 "\\mbfc" : "𝐜",
345 "\\mbfd" : "𝐝",
346 "\\mbfe" : "𝐞",
347 "\\mbff" : "𝐟",
348 "\\mbfg" : "𝐠",
349 "\\mbfh" : "𝐡",
350 "\\mbfi" : "𝐢",
351 "\\mbfj" : "𝐣",
352 "\\mbfk" : "𝐤",
353 "\\mbfl" : "𝐥",
354 "\\mbfm" : "𝐦",
355 "\\mbfn" : "𝐧",
356 "\\mbfo" : "𝐨",
357 "\\mbfp" : "𝐩",
358 "\\mbfq" : "𝐪",
359 "\\mbfr" : "𝐫",
360 "\\mbfs" : "𝐬",
361 "\\mbft" : "𝐭",
362 "\\mbfu" : "𝐮",
363 "\\mbfv" : "𝐯",
364 "\\mbfw" : "𝐰",
365 "\\mbfx" : "𝐱",
366 "\\mbfy" : "𝐲",
367 "\\mbfz" : "𝐳",
368 "\\mitA" : "𝐴",
369 "\\mitB" : "𝐵",
370 "\\mitC" : "𝐶",
371 "\\mitD" : "𝐷",
372 "\\mitE" : "𝐸",
373 "\\mitF" : "𝐹",
374 "\\mitG" : "𝐺",
375 "\\mitH" : "𝐻",
376 "\\mitI" : "𝐼",
377 "\\mitJ" : "𝐽",
378 "\\mitK" : "𝐾",
379 "\\mitL" : "𝐿",
380 "\\mitM" : "𝑀",
381 "\\mitN" : "𝑁",
382 "\\mitO" : "𝑂",
383 "\\mitP" : "𝑃",
384 "\\mitQ" : "𝑄",
385 "\\mitR" : "𝑅",
386 "\\mitS" : "𝑆",
387 "\\mitT" : "𝑇",
388 "\\mitU" : "𝑈",
389 "\\mitV" : "𝑉",
390 "\\mitW" : "𝑊",
391 "\\mitX" : "𝑋",
392 "\\mitY" : "𝑌",
393 "\\mitZ" : "𝑍",
394 "\\mita" : "𝑎",
395 "\\mitb" : "𝑏",
396 "\\mitc" : "𝑐",
397 "\\mitd" : "𝑑",
398 "\\mite" : "𝑒",
399 "\\mitf" : "𝑓",
400 "\\mitg" : "𝑔",
401 "\\miti" : "𝑖",
402 "\\mitj" : "𝑗",
403 "\\mitk" : "𝑘",
404 "\\mitl" : "𝑙",
405 "\\mitm" : "𝑚",
406 "\\mitn" : "𝑛",
407 "\\mito" : "𝑜",
408 "\\mitp" : "𝑝",
409 "\\mitq" : "𝑞",
410 "\\mitr" : "𝑟",
411 "\\mits" : "𝑠",
412 "\\mitt" : "𝑡",
413 "\\mitu" : "𝑢",
414 "\\mitv" : "𝑣",
415 "\\mitw" : "𝑤",
416 "\\mitx" : "𝑥",
417 "\\mity" : "𝑦",
418 "\\mitz" : "𝑧",
419 "\\mbfitA" : "𝑨",
420 "\\mbfitB" : "𝑩",
421 "\\mbfitC" : "𝑪",
422 "\\mbfitD" : "𝑫",
423 "\\mbfitE" : "𝑬",
424 "\\mbfitF" : "𝑭",
425 "\\mbfitG" : "𝑮",
426 "\\mbfitH" : "𝑯",
427 "\\mbfitI" : "𝑰",
428 "\\mbfitJ" : "𝑱",
429 "\\mbfitK" : "𝑲",
430 "\\mbfitL" : "𝑳",
431 "\\mbfitM" : "𝑴",
432 "\\mbfitN" : "𝑵",
433 "\\mbfitO" : "𝑶",
434 "\\mbfitP" : "𝑷",
435 "\\mbfitQ" : "𝑸",
436 "\\mbfitR" : "𝑹",
437 "\\mbfitS" : "𝑺",
438 "\\mbfitT" : "𝑻",
439 "\\mbfitU" : "𝑼",
440 "\\mbfitV" : "𝑽",
441 "\\mbfitW" : "𝑾",
442 "\\mbfitX" : "𝑿",
443 "\\mbfitY" : "𝒀",
444 "\\mbfitZ" : "𝒁",
445 "\\mbfita" : "𝒂",
446 "\\mbfitb" : "𝒃",
447 "\\mbfitc" : "𝒄",
448 "\\mbfitd" : "𝒅",
449 "\\mbfite" : "𝒆",
450 "\\mbfitf" : "𝒇",
451 "\\mbfitg" : "𝒈",
452 "\\mbfith" : "𝒉",
453 "\\mbfiti" : "𝒊",
454 "\\mbfitj" : "𝒋",
455 "\\mbfitk" : "𝒌",
456 "\\mbfitl" : "𝒍",
457 "\\mbfitm" : "𝒎",
458 "\\mbfitn" : "𝒏",
459 "\\mbfito" : "𝒐",
460 "\\mbfitp" : "𝒑",
461 "\\mbfitq" : "𝒒",
462 "\\mbfitr" : "𝒓",
463 "\\mbfits" : "𝒔",
464 "\\mbfitt" : "𝒕",
465 "\\mbfitu" : "𝒖",
466 "\\mbfitv" : "𝒗",
467 "\\mbfitw" : "𝒘",
468 "\\mbfitx" : "𝒙",
469 "\\mbfity" : "𝒚",
470 "\\mbfitz" : "𝒛",
471 "\\mscrA" : "𝒜",
472 "\\mscrC" : "𝒞",
473 "\\mscrD" : "𝒟",
474 "\\mscrG" : "𝒢",
475 "\\mscrJ" : "𝒥",
476 "\\mscrK" : "𝒦",
477 "\\mscrN" : "𝒩",
478 "\\mscrO" : "𝒪",
479 "\\mscrP" : "𝒫",
480 "\\mscrQ" : "𝒬",
481 "\\mscrS" : "𝒮",
482 "\\mscrT" : "𝒯",
483 "\\mscrU" : "𝒰",
484 "\\mscrV" : "𝒱",
485 "\\mscrW" : "𝒲",
486 "\\mscrX" : "𝒳",
487 "\\mscrY" : "𝒴",
488 "\\mscrZ" : "𝒵",
489 "\\mscra" : "𝒶",
490 "\\mscrb" : "𝒷",
491 "\\mscrc" : "𝒸",
492 "\\mscrd" : "𝒹",
493 "\\mscrf" : "𝒻",
494 "\\mscrh" : "𝒽",
495 "\\mscri" : "𝒾",
496 "\\mscrj" : "𝒿",
497 "\\mscrk" : "𝓀",
498 "\\mscrm" : "𝓂",
499 "\\mscrn" : "𝓃",
500 "\\mscrp" : "𝓅",
501 "\\mscrq" : "𝓆",
502 "\\mscrr" : "𝓇",
503 "\\mscrs" : "𝓈",
504 "\\mscrt" : "𝓉",
505 "\\mscru" : "𝓊",
506 "\\mscrv" : "𝓋",
507 "\\mscrw" : "𝓌",
508 "\\mscrx" : "𝓍",
509 "\\mscry" : "𝓎",
510 "\\mscrz" : "𝓏",
511 "\\mbfscrA" : "𝓐",
512 "\\mbfscrB" : "𝓑",
513 "\\mbfscrC" : "𝓒",
514 "\\mbfscrD" : "𝓓",
515 "\\mbfscrE" : "𝓔",
516 "\\mbfscrF" : "𝓕",
517 "\\mbfscrG" : "𝓖",
518 "\\mbfscrH" : "𝓗",
519 "\\mbfscrI" : "𝓘",
520 "\\mbfscrJ" : "𝓙",
521 "\\mbfscrK" : "𝓚",
522 "\\mbfscrL" : "𝓛",
523 "\\mbfscrM" : "𝓜",
524 "\\mbfscrN" : "𝓝",
525 "\\mbfscrO" : "𝓞",
526 "\\mbfscrP" : "𝓟",
527 "\\mbfscrQ" : "𝓠",
528 "\\mbfscrR" : "𝓡",
529 "\\mbfscrS" : "𝓢",
530 "\\mbfscrT" : "𝓣",
531 "\\mbfscrU" : "𝓤",
532 "\\mbfscrV" : "𝓥",
533 "\\mbfscrW" : "𝓦",
534 "\\mbfscrX" : "𝓧",
535 "\\mbfscrY" : "𝓨",
536 "\\mbfscrZ" : "𝓩",
537 "\\mbfscra" : "𝓪",
538 "\\mbfscrb" : "𝓫",
539 "\\mbfscrc" : "𝓬",
540 "\\mbfscrd" : "𝓭",
541 "\\mbfscre" : "𝓮",
542 "\\mbfscrf" : "𝓯",
543 "\\mbfscrg" : "𝓰",
544 "\\mbfscrh" : "𝓱",
545 "\\mbfscri" : "𝓲",
546 "\\mbfscrj" : "𝓳",
547 "\\mbfscrk" : "𝓴",
548 "\\mbfscrl" : "𝓵",
549 "\\mbfscrm" : "𝓶",
550 "\\mbfscrn" : "𝓷",
551 "\\mbfscro" : "𝓸",
552 "\\mbfscrp" : "𝓹",
553 "\\mbfscrq" : "𝓺",
554 "\\mbfscrr" : "𝓻",
555 "\\mbfscrs" : "𝓼",
556 "\\mbfscrt" : "𝓽",
557 "\\mbfscru" : "𝓾",
558 "\\mbfscrv" : "𝓿",
559 "\\mbfscrw" : "𝔀",
560 "\\mbfscrx" : "𝔁",
561 "\\mbfscry" : "𝔂",
562 "\\mbfscrz" : "𝔃",
563 "\\mfrakA" : "𝔄",
564 "\\mfrakB" : "𝔅",
565 "\\mfrakD" : "𝔇",
566 "\\mfrakE" : "𝔈",
567 "\\mfrakF" : "𝔉",
568 "\\mfrakG" : "𝔊",
569 "\\mfrakJ" : "𝔍",
570 "\\mfrakK" : "𝔎",
571 "\\mfrakL" : "𝔏",
572 "\\mfrakM" : "𝔐",
573 "\\mfrakN" : "𝔑",
574 "\\mfrakO" : "𝔒",
575 "\\mfrakP" : "𝔓",
576 "\\mfrakQ" : "𝔔",
577 "\\mfrakS" : "𝔖",
578 "\\mfrakT" : "𝔗",
579 "\\mfrakU" : "𝔘",
580 "\\mfrakV" : "𝔙",
581 "\\mfrakW" : "𝔚",
582 "\\mfrakX" : "𝔛",
583 "\\mfrakY" : "𝔜",
584 "\\mfraka" : "𝔞",
585 "\\mfrakb" : "𝔟",
586 "\\mfrakc" : "𝔠",
587 "\\mfrakd" : "𝔡",
588 "\\mfrake" : "𝔢",
589 "\\mfrakf" : "𝔣",
590 "\\mfrakg" : "𝔤",
591 "\\mfrakh" : "𝔥",
592 "\\mfraki" : "𝔦",
593 "\\mfrakj" : "𝔧",
594 "\\mfrakk" : "𝔨",
595 "\\mfrakl" : "𝔩",
596 "\\mfrakm" : "𝔪",
597 "\\mfrakn" : "𝔫",
598 "\\mfrako" : "𝔬",
599 "\\mfrakp" : "𝔭",
600 "\\mfrakq" : "𝔮",
601 "\\mfrakr" : "𝔯",
602 "\\mfraks" : "𝔰",
603 "\\mfrakt" : "𝔱",
604 "\\mfraku" : "𝔲",
605 "\\mfrakv" : "𝔳",
606 "\\mfrakw" : "𝔴",
607 "\\mfrakx" : "𝔵",
608 "\\mfraky" : "𝔶",
609 "\\mfrakz" : "𝔷",
610 "\\BbbA" : "𝔸",
611 "\\BbbB" : "𝔹",
612 "\\BbbD" : "𝔻",
613 "\\BbbE" : "𝔼",
614 "\\BbbF" : "𝔽",
615 "\\BbbG" : "𝔾",
616 "\\BbbI" : "𝕀",
617 "\\BbbJ" : "𝕁",
618 "\\BbbK" : "𝕂",
619 "\\BbbL" : "𝕃",
620 "\\BbbM" : "𝕄",
621 "\\BbbO" : "𝕆",
622 "\\BbbS" : "𝕊",
623 "\\BbbT" : "𝕋",
624 "\\BbbU" : "𝕌",
625 "\\BbbV" : "𝕍",
626 "\\BbbW" : "𝕎",
627 "\\BbbX" : "𝕏",
628 "\\BbbY" : "𝕐",
629 "\\Bbba" : "𝕒",
630 "\\Bbbb" : "𝕓",
631 "\\Bbbc" : "𝕔",
632 "\\Bbbd" : "𝕕",
633 "\\Bbbe" : "𝕖",
634 "\\Bbbf" : "𝕗",
635 "\\Bbbg" : "𝕘",
636 "\\Bbbh" : "𝕙",
637 "\\Bbbi" : "𝕚",
638 "\\Bbbj" : "𝕛",
639 "\\Bbbk" : "𝕜",
640 "\\Bbbl" : "𝕝",
641 "\\Bbbm" : "𝕞",
642 "\\Bbbn" : "𝕟",
643 "\\Bbbo" : "𝕠",
644 "\\Bbbp" : "𝕡",
645 "\\Bbbq" : "𝕢",
646 "\\Bbbr" : "𝕣",
647 "\\Bbbs" : "𝕤",
648 "\\Bbbt" : "𝕥",
649 "\\Bbbu" : "𝕦",
650 "\\Bbbv" : "𝕧",
651 "\\Bbbw" : "𝕨",
652 "\\Bbbx" : "𝕩",
653 "\\Bbby" : "𝕪",
654 "\\Bbbz" : "𝕫",
655 "\\mbffrakA" : "𝕬",
656 "\\mbffrakB" : "𝕭",
657 "\\mbffrakC" : "𝕮",
658 "\\mbffrakD" : "𝕯",
659 "\\mbffrakE" : "𝕰",
660 "\\mbffrakF" : "𝕱",
661 "\\mbffrakG" : "𝕲",
662 "\\mbffrakH" : "𝕳",
663 "\\mbffrakI" : "𝕴",
664 "\\mbffrakJ" : "𝕵",
665 "\\mbffrakK" : "𝕶",
666 "\\mbffrakL" : "𝕷",
667 "\\mbffrakM" : "𝕸",
668 "\\mbffrakN" : "𝕹",
669 "\\mbffrakO" : "𝕺",
670 "\\mbffrakP" : "𝕻",
671 "\\mbffrakQ" : "𝕼",
672 "\\mbffrakR" : "𝕽",
673 "\\mbffrakS" : "𝕾",
674 "\\mbffrakT" : "𝕿",
675 "\\mbffrakU" : "𝖀",
676 "\\mbffrakV" : "𝖁",
677 "\\mbffrakW" : "𝖂",
678 "\\mbffrakX" : "𝖃",
679 "\\mbffrakY" : "𝖄",
680 "\\mbffrakZ" : "𝖅",
681 "\\mbffraka" : "𝖆",
682 "\\mbffrakb" : "𝖇",
683 "\\mbffrakc" : "𝖈",
684 "\\mbffrakd" : "𝖉",
685 "\\mbffrake" : "𝖊",
686 "\\mbffrakf" : "𝖋",
687 "\\mbffrakg" : "𝖌",
688 "\\mbffrakh" : "𝖍",
689 "\\mbffraki" : "𝖎",
690 "\\mbffrakj" : "𝖏",
691 "\\mbffrakk" : "𝖐",
692 "\\mbffrakl" : "𝖑",
693 "\\mbffrakm" : "𝖒",
694 "\\mbffrakn" : "𝖓",
695 "\\mbffrako" : "𝖔",
696 "\\mbffrakp" : "𝖕",
697 "\\mbffrakq" : "𝖖",
698 "\\mbffrakr" : "𝖗",
699 "\\mbffraks" : "𝖘",
700 "\\mbffrakt" : "𝖙",
701 "\\mbffraku" : "𝖚",
702 "\\mbffrakv" : "𝖛",
703 "\\mbffrakw" : "𝖜",
704 "\\mbffrakx" : "𝖝",
705 "\\mbffraky" : "𝖞",
706 "\\mbffrakz" : "𝖟",
707 "\\msansA" : "𝖠",
708 "\\msansB" : "𝖡",
709 "\\msansC" : "𝖢",
710 "\\msansD" : "𝖣",
711 "\\msansE" : "𝖤",
712 "\\msansF" : "𝖥",
713 "\\msansG" : "𝖦",
714 "\\msansH" : "𝖧",
715 "\\msansI" : "𝖨",
716 "\\msansJ" : "𝖩",
717 "\\msansK" : "𝖪",
718 "\\msansL" : "𝖫",
719 "\\msansM" : "𝖬",
720 "\\msansN" : "𝖭",
721 "\\msansO" : "𝖮",
722 "\\msansP" : "𝖯",
723 "\\msansQ" : "𝖰",
724 "\\msansR" : "𝖱",
725 "\\msansS" : "𝖲",
726 "\\msansT" : "𝖳",
727 "\\msansU" : "𝖴",
728 "\\msansV" : "𝖵",
729 "\\msansW" : "𝖶",
730 "\\msansX" : "𝖷",
731 "\\msansY" : "𝖸",
732 "\\msansZ" : "𝖹",
733 "\\msansa" : "𝖺",
734 "\\msansb" : "𝖻",
735 "\\msansc" : "𝖼",
736 "\\msansd" : "𝖽",
737 "\\msanse" : "𝖾",
738 "\\msansf" : "𝖿",
739 "\\msansg" : "𝗀",
740 "\\msansh" : "𝗁",
741 "\\msansi" : "𝗂",
742 "\\msansj" : "𝗃",
743 "\\msansk" : "𝗄",
744 "\\msansl" : "𝗅",
745 "\\msansm" : "𝗆",
746 "\\msansn" : "𝗇",
747 "\\msanso" : "𝗈",
748 "\\msansp" : "𝗉",
749 "\\msansq" : "𝗊",
750 "\\msansr" : "𝗋",
751 "\\msanss" : "𝗌",
752 "\\msanst" : "𝗍",
753 "\\msansu" : "𝗎",
754 "\\msansv" : "𝗏",
755 "\\msansw" : "𝗐",
756 "\\msansx" : "𝗑",
757 "\\msansy" : "𝗒",
758 "\\msansz" : "𝗓",
759 "\\mbfsansA" : "𝗔",
760 "\\mbfsansB" : "𝗕",
761 "\\mbfsansC" : "𝗖",
762 "\\mbfsansD" : "𝗗",
763 "\\mbfsansE" : "𝗘",
764 "\\mbfsansF" : "𝗙",
765 "\\mbfsansG" : "𝗚",
766 "\\mbfsansH" : "𝗛",
767 "\\mbfsansI" : "𝗜",
768 "\\mbfsansJ" : "𝗝",
769 "\\mbfsansK" : "𝗞",
770 "\\mbfsansL" : "𝗟",
771 "\\mbfsansM" : "𝗠",
772 "\\mbfsansN" : "𝗡",
773 "\\mbfsansO" : "𝗢",
774 "\\mbfsansP" : "𝗣",
775 "\\mbfsansQ" : "𝗤",
776 "\\mbfsansR" : "𝗥",
777 "\\mbfsansS" : "𝗦",
778 "\\mbfsansT" : "𝗧",
779 "\\mbfsansU" : "𝗨",
780 "\\mbfsansV" : "𝗩",
781 "\\mbfsansW" : "𝗪",
782 "\\mbfsansX" : "𝗫",
783 "\\mbfsansY" : "𝗬",
784 "\\mbfsansZ" : "𝗭",
785 "\\mbfsansa" : "𝗮",
786 "\\mbfsansb" : "𝗯",
787 "\\mbfsansc" : "𝗰",
788 "\\mbfsansd" : "𝗱",
789 "\\mbfsanse" : "𝗲",
790 "\\mbfsansf" : "𝗳",
791 "\\mbfsansg" : "𝗴",
792 "\\mbfsansh" : "𝗵",
793 "\\mbfsansi" : "𝗶",
794 "\\mbfsansj" : "𝗷",
795 "\\mbfsansk" : "𝗸",
796 "\\mbfsansl" : "𝗹",
797 "\\mbfsansm" : "𝗺",
798 "\\mbfsansn" : "𝗻",
799 "\\mbfsanso" : "𝗼",
800 "\\mbfsansp" : "𝗽",
801 "\\mbfsansq" : "𝗾",
802 "\\mbfsansr" : "𝗿",
803 "\\mbfsanss" : "𝘀",
804 "\\mbfsanst" : "𝘁",
805 "\\mbfsansu" : "𝘂",
806 "\\mbfsansv" : "𝘃",
807 "\\mbfsansw" : "𝘄",
808 "\\mbfsansx" : "𝘅",
809 "\\mbfsansy" : "𝘆",
810 "\\mbfsansz" : "𝘇",
811 "\\mitsansA" : "𝘈",
812 "\\mitsansB" : "𝘉",
813 "\\mitsansC" : "𝘊",
814 "\\mitsansD" : "𝘋",
815 "\\mitsansE" : "𝘌",
816 "\\mitsansF" : "𝘍",
817 "\\mitsansG" : "𝘎",
818 "\\mitsansH" : "𝘏",
819 "\\mitsansI" : "𝘐",
820 "\\mitsansJ" : "𝘑",
821 "\\mitsansK" : "𝘒",
822 "\\mitsansL" : "𝘓",
823 "\\mitsansM" : "𝘔",
824 "\\mitsansN" : "𝘕",
825 "\\mitsansO" : "𝘖",
826 "\\mitsansP" : "𝘗",
827 "\\mitsansQ" : "𝘘",
828 "\\mitsansR" : "𝘙",
829 "\\mitsansS" : "𝘚",
830 "\\mitsansT" : "𝘛",
831 "\\mitsansU" : "𝘜",
832 "\\mitsansV" : "𝘝",
833 "\\mitsansW" : "𝘞",
834 "\\mitsansX" : "𝘟",
835 "\\mitsansY" : "𝘠",
836 "\\mitsansZ" : "𝘡",
837 "\\mitsansa" : "𝘢",
838 "\\mitsansb" : "𝘣",
839 "\\mitsansc" : "𝘤",
840 "\\mitsansd" : "𝘥",
841 "\\mitsanse" : "𝘦",
842 "\\mitsansf" : "𝘧",
843 "\\mitsansg" : "𝘨",
844 "\\mitsansh" : "𝘩",
845 "\\mitsansi" : "𝘪",
846 "\\mitsansj" : "𝘫",
847 "\\mitsansk" : "𝘬",
848 "\\mitsansl" : "𝘭",
849 "\\mitsansm" : "𝘮",
850 "\\mitsansn" : "𝘯",
851 "\\mitsanso" : "𝘰",
852 "\\mitsansp" : "𝘱",
853 "\\mitsansq" : "𝘲",
854 "\\mitsansr" : "𝘳",
855 "\\mitsanss" : "𝘴",
856 "\\mitsanst" : "𝘵",
857 "\\mitsansu" : "𝘶",
858 "\\mitsansv" : "𝘷",
859 "\\mitsansw" : "𝘸",
860 "\\mitsansx" : "𝘹",
861 "\\mitsansy" : "𝘺",
862 "\\mitsansz" : "𝘻",
863 "\\mbfitsansA" : "𝘼",
864 "\\mbfitsansB" : "𝘽",
865 "\\mbfitsansC" : "𝘾",
866 "\\mbfitsansD" : "𝘿",
867 "\\mbfitsansE" : "𝙀",
868 "\\mbfitsansF" : "𝙁",
869 "\\mbfitsansG" : "𝙂",
870 "\\mbfitsansH" : "𝙃",
871 "\\mbfitsansI" : "𝙄",
872 "\\mbfitsansJ" : "𝙅",
873 "\\mbfitsansK" : "𝙆",
874 "\\mbfitsansL" : "𝙇",
875 "\\mbfitsansM" : "𝙈",
876 "\\mbfitsansN" : "𝙉",
877 "\\mbfitsansO" : "𝙊",
878 "\\mbfitsansP" : "𝙋",
879 "\\mbfitsansQ" : "𝙌",
880 "\\mbfitsansR" : "𝙍",
881 "\\mbfitsansS" : "𝙎",
882 "\\mbfitsansT" : "𝙏",
883 "\\mbfitsansU" : "𝙐",
884 "\\mbfitsansV" : "𝙑",
885 "\\mbfitsansW" : "𝙒",
886 "\\mbfitsansX" : "𝙓",
887 "\\mbfitsansY" : "𝙔",
888 "\\mbfitsansZ" : "𝙕",
889 "\\mbfitsansa" : "𝙖",
890 "\\mbfitsansb" : "𝙗",
891 "\\mbfitsansc" : "𝙘",
892 "\\mbfitsansd" : "𝙙",
893 "\\mbfitsanse" : "𝙚",
894 "\\mbfitsansf" : "𝙛",
895 "\\mbfitsansg" : "𝙜",
896 "\\mbfitsansh" : "𝙝",
897 "\\mbfitsansi" : "𝙞",
898 "\\mbfitsansj" : "𝙟",
899 "\\mbfitsansk" : "𝙠",
900 "\\mbfitsansl" : "𝙡",
901 "\\mbfitsansm" : "𝙢",
902 "\\mbfitsansn" : "𝙣",
903 "\\mbfitsanso" : "𝙤",
904 "\\mbfitsansp" : "𝙥",
905 "\\mbfitsansq" : "𝙦",
906 "\\mbfitsansr" : "𝙧",
907 "\\mbfitsanss" : "𝙨",
908 "\\mbfitsanst" : "𝙩",
909 "\\mbfitsansu" : "𝙪",
910 "\\mbfitsansv" : "𝙫",
911 "\\mbfitsansw" : "𝙬",
912 "\\mbfitsansx" : "𝙭",
913 "\\mbfitsansy" : "𝙮",
914 "\\mbfitsansz" : "𝙯",
915 "\\mttA" : "𝙰",
916 "\\mttB" : "𝙱",
917 "\\mttC" : "𝙲",
918 "\\mttD" : "𝙳",
919 "\\mttE" : "𝙴",
920 "\\mttF" : "𝙵",
921 "\\mttG" : "𝙶",
922 "\\mttH" : "𝙷",
923 "\\mttI" : "𝙸",
924 "\\mttJ" : "𝙹",
925 "\\mttK" : "𝙺",
926 "\\mttL" : "𝙻",
927 "\\mttM" : "𝙼",
928 "\\mttN" : "𝙽",
929 "\\mttO" : "𝙾",
930 "\\mttP" : "𝙿",
931 "\\mttQ" : "𝚀",
932 "\\mttR" : "𝚁",
933 "\\mttS" : "𝚂",
934 "\\mttT" : "𝚃",
935 "\\mttU" : "𝚄",
936 "\\mttV" : "𝚅",
937 "\\mttW" : "𝚆",
938 "\\mttX" : "𝚇",
939 "\\mttY" : "𝚈",
940 "\\mttZ" : "𝚉",
941 "\\mtta" : "𝚊",
942 "\\mttb" : "𝚋",
943 "\\mttc" : "𝚌",
944 "\\mttd" : "𝚍",
945 "\\mtte" : "𝚎",
946 "\\mttf" : "𝚏",
947 "\\mttg" : "𝚐",
948 "\\mtth" : "𝚑",
949 "\\mtti" : "𝚒",
950 "\\mttj" : "𝚓",
951 "\\mttk" : "𝚔",
952 "\\mttl" : "𝚕",
953 "\\mttm" : "𝚖",
954 "\\mttn" : "𝚗",
955 "\\mtto" : "𝚘",
956 "\\mttp" : "𝚙",
957 "\\mttq" : "𝚚",
958 "\\mttr" : "𝚛",
959 "\\mtts" : "𝚜",
960 "\\mttt" : "𝚝",
961 "\\mttu" : "𝚞",
962 "\\mttv" : "𝚟",
963 "\\mttw" : "𝚠",
964 "\\mttx" : "𝚡",
965 "\\mtty" : "𝚢",
966 "\\mttz" : "𝚣",
967 "\\mbfAlpha" : "𝚨",
968 "\\mbfBeta" : "𝚩",
969 "\\mbfGamma" : "𝚪",
970 "\\mbfDelta" : "𝚫",
971 "\\mbfEpsilon" : "𝚬",
972 "\\mbfZeta" : "𝚭",
973 "\\mbfEta" : "𝚮",
974 "\\mbfTheta" : "𝚯",
975 "\\mbfIota" : "𝚰",
976 "\\mbfKappa" : "𝚱",
977 "\\mbfLambda" : "𝚲",
978 "\\mbfMu" : "𝚳",
979 "\\mbfNu" : "𝚴",
980 "\\mbfXi" : "𝚵",
981 "\\mbfOmicron" : "𝚶",
982 "\\mbfPi" : "𝚷",
983 "\\mbfRho" : "𝚸",
984 "\\mbfvarTheta" : "𝚹",
985 "\\mbfSigma" : "𝚺",
986 "\\mbfTau" : "𝚻",
987 "\\mbfUpsilon" : "𝚼",
988 "\\mbfPhi" : "𝚽",
989 "\\mbfChi" : "𝚾",
990 "\\mbfPsi" : "𝚿",
991 "\\mbfOmega" : "𝛀",
992 "\\mbfalpha" : "𝛂",
993 "\\mbfbeta" : "𝛃",
994 "\\mbfgamma" : "𝛄",
995 "\\mbfdelta" : "𝛅",
996 "\\mbfepsilon" : "𝛆",
997 "\\mbfzeta" : "𝛇",
998 "\\mbfeta" : "𝛈",
999 "\\mbftheta" : "𝛉",
1000 "\\mbfiota" : "𝛊",
1001 "\\mbfkappa" : "𝛋",
1002 "\\mbflambda" : "𝛌",
1003 "\\mbfmu" : "𝛍",
1004 "\\mbfnu" : "𝛎",
1005 "\\mbfxi" : "𝛏",
1006 "\\mbfomicron" : "𝛐",
1007 "\\mbfpi" : "𝛑",
1008 "\\mbfrho" : "𝛒",
1009 "\\mbfvarsigma" : "𝛓",
1010 "\\mbfsigma" : "𝛔",
1011 "\\mbftau" : "𝛕",
1012 "\\mbfupsilon" : "𝛖",
1013 "\\mbfvarphi" : "𝛗",
1014 "\\mbfchi" : "𝛘",
1015 "\\mbfpsi" : "𝛙",
1016 "\\mbfomega" : "𝛚",
1017 "\\mbfvarepsilon" : "𝛜",
1018 "\\mbfvartheta" : "𝛝",
1019 "\\mbfvarkappa" : "𝛞",
1020 "\\mbfphi" : "𝛟",
1021 "\\mbfvarrho" : "𝛠",
1022 "\\mbfvarpi" : "𝛡",
1023 "\\mitAlpha" : "𝛢",
1024 "\\mitBeta" : "𝛣",
1025 "\\mitGamma" : "𝛤",
1026 "\\mitDelta" : "𝛥",
1027 "\\mitEpsilon" : "𝛦",
1028 "\\mitZeta" : "𝛧",
1029 "\\mitEta" : "𝛨",
1030 "\\mitTheta" : "𝛩",
1031 "\\mitIota" : "𝛪",
1032 "\\mitKappa" : "𝛫",
1033 "\\mitLambda" : "𝛬",
1034 "\\mitMu" : "𝛭",
1035 "\\mitNu" : "𝛮",
1036 "\\mitXi" : "𝛯",
1037 "\\mitOmicron" : "𝛰",
1038 "\\mitPi" : "𝛱",
1039 "\\mitRho" : "𝛲",
1040 "\\mitvarTheta" : "𝛳",
1041 "\\mitSigma" : "𝛴",
1042 "\\mitTau" : "𝛵",
1043 "\\mitUpsilon" : "𝛶",
1044 "\\mitPhi" : "𝛷",
1045 "\\mitChi" : "𝛸",
1046 "\\mitPsi" : "𝛹",
1047 "\\mitOmega" : "𝛺",
1048 "\\mitalpha" : "𝛼",
1049 "\\mitbeta" : "𝛽",
1050 "\\mitgamma" : "𝛾",
1051 "\\mitdelta" : "𝛿",
1052 "\\mitepsilon" : "𝜀",
1053 "\\mitzeta" : "𝜁",
1054 "\\miteta" : "𝜂",
1055 "\\mittheta" : "𝜃",
1056 "\\mitiota" : "𝜄",
1057 "\\mitkappa" : "𝜅",
1058 "\\mitlambda" : "𝜆",
1059 "\\mitmu" : "𝜇",
1060 "\\mitnu" : "𝜈",
1061 "\\mitxi" : "𝜉",
1062 "\\mitomicron" : "𝜊",
1063 "\\mitpi" : "𝜋",
1064 "\\mitrho" : "𝜌",
1065 "\\mitvarsigma" : "𝜍",
1066 "\\mitsigma" : "𝜎",
1067 "\\mittau" : "𝜏",
1068 "\\mitupsilon" : "𝜐",
1069 "\\mitphi" : "𝜑",
1070 "\\mitchi" : "𝜒",
1071 "\\mitpsi" : "𝜓",
1072 "\\mitomega" : "𝜔",
1073 "\\mitvarepsilon" : "𝜖",
1074 "\\mitvartheta" : "𝜗",
1075 "\\mitvarkappa" : "𝜘",
1076 "\\mitvarphi" : "𝜙",
1077 "\\mitvarrho" : "𝜚",
1078 "\\mitvarpi" : "𝜛",
1079 "\\mbfitAlpha" : "𝜜",
1080 "\\mbfitBeta" : "𝜝",
1081 "\\mbfitGamma" : "𝜞",
1082 "\\mbfitDelta" : "𝜟",
1083 "\\mbfitEpsilon" : "𝜠",
1084 "\\mbfitZeta" : "𝜡",
1085 "\\mbfitEta" : "𝜢",
1086 "\\mbfitTheta" : "𝜣",
1087 "\\mbfitIota" : "𝜤",
1088 "\\mbfitKappa" : "𝜥",
1089 "\\mbfitLambda" : "𝜦",
1090 "\\mbfitMu" : "𝜧",
1091 "\\mbfitNu" : "𝜨",
1092 "\\mbfitXi" : "𝜩",
1093 "\\mbfitOmicron" : "𝜪",
1094 "\\mbfitPi" : "𝜫",
1095 "\\mbfitRho" : "𝜬",
1096 "\\mbfitvarTheta" : "𝜭",
1097 "\\mbfitSigma" : "𝜮",
1098 "\\mbfitTau" : "𝜯",
1099 "\\mbfitUpsilon" : "𝜰",
1100 "\\mbfitPhi" : "𝜱",
1101 "\\mbfitChi" : "𝜲",
1102 "\\mbfitPsi" : "𝜳",
1103 "\\mbfitOmega" : "𝜴",
1104 "\\mbfitalpha" : "𝜶",
1105 "\\mbfitbeta" : "𝜷",
1106 "\\mbfitgamma" : "𝜸",
1107 "\\mbfitdelta" : "𝜹",
1108 "\\mbfitepsilon" : "𝜺",
1109 "\\mbfitzeta" : "𝜻",
1110 "\\mbfiteta" : "𝜼",
1111 "\\mbfittheta" : "𝜽",
1112 "\\mbfitiota" : "𝜾",
1113 "\\mbfitkappa" : "𝜿",
1114 "\\mbfitlambda" : "𝝀",
1115 "\\mbfitmu" : "𝝁",
1116 "\\mbfitnu" : "𝝂",
1117 "\\mbfitxi" : "𝝃",
1118 "\\mbfitomicron" : "𝝄",
1119 "\\mbfitpi" : "𝝅",
1120 "\\mbfitrho" : "𝝆",
1121 "\\mbfitvarsigma" : "𝝇",
1122 "\\mbfitsigma" : "𝝈",
1123 "\\mbfittau" : "𝝉",
1124 "\\mbfitupsilon" : "𝝊",
1125 "\\mbfitphi" : "𝝋",
1126 "\\mbfitchi" : "𝝌",
1127 "\\mbfitpsi" : "𝝍",
1128 "\\mbfitomega" : "𝝎",
1129 "\\mbfitvarepsilon" : "𝝐",
1130 "\\mbfitvartheta" : "𝝑",
1131 "\\mbfitvarkappa" : "𝝒",
1132 "\\mbfitvarphi" : "𝝓",
1133 "\\mbfitvarrho" : "𝝔",
1134 "\\mbfitvarpi" : "𝝕",
1135 "\\mbfsansAlpha" : "𝝖",
1136 "\\mbfsansBeta" : "𝝗",
1137 "\\mbfsansGamma" : "𝝘",
1138 "\\mbfsansDelta" : "𝝙",
1139 "\\mbfsansEpsilon" : "𝝚",
1140 "\\mbfsansZeta" : "𝝛",
1141 "\\mbfsansEta" : "𝝜",
1142 "\\mbfsansTheta" : "𝝝",
1143 "\\mbfsansIota" : "𝝞",
1144 "\\mbfsansKappa" : "𝝟",
1145 "\\mbfsansLambda" : "𝝠",
1146 "\\mbfsansMu" : "𝝡",
1147 "\\mbfsansNu" : "𝝢",
1148 "\\mbfsansXi" : "𝝣",
1149 "\\mbfsansOmicron" : "𝝤",
1150 "\\mbfsansPi" : "𝝥",
1151 "\\mbfsansRho" : "𝝦",
1152 "\\mbfsansvarTheta" : "𝝧",
1153 "\\mbfsansSigma" : "𝝨",
1154 "\\mbfsansTau" : "𝝩",
1155 "\\mbfsansUpsilon" : "𝝪",
1156 "\\mbfsansPhi" : "𝝫",
1157 "\\mbfsansChi" : "𝝬",
1158 "\\mbfsansPsi" : "𝝭",
1159 "\\mbfsansOmega" : "𝝮",
1160 "\\mbfsansalpha" : "𝝰",
1161 "\\mbfsansbeta" : "𝝱",
1162 "\\mbfsansgamma" : "𝝲",
1163 "\\mbfsansdelta" : "𝝳",
1164 "\\mbfsansepsilon" : "𝝴",
1165 "\\mbfsanszeta" : "𝝵",
1166 "\\mbfsanseta" : "𝝶",
1167 "\\mbfsanstheta" : "𝝷",
1168 "\\mbfsansiota" : "𝝸",
1169 "\\mbfsanskappa" : "𝝹",
1170 "\\mbfsanslambda" : "𝝺",
1171 "\\mbfsansmu" : "𝝻",
1172 "\\mbfsansnu" : "𝝼",
1173 "\\mbfsansxi" : "𝝽",
1174 "\\mbfsansomicron" : "𝝾",
1175 "\\mbfsanspi" : "𝝿",
1176 "\\mbfsansrho" : "𝞀",
1177 "\\mbfsansvarsigma" : "𝞁",
1178 "\\mbfsanssigma" : "𝞂",
1179 "\\mbfsanstau" : "𝞃",
1180 "\\mbfsansupsilon" : "𝞄",
1181 "\\mbfsansphi" : "𝞅",
1182 "\\mbfsanschi" : "𝞆",
1183 "\\mbfsanspsi" : "𝞇",
1184 "\\mbfsansomega" : "𝞈",
1185 "\\mbfsansvarepsilon" : "𝞊",
1186 "\\mbfsansvartheta" : "𝞋",
1187 "\\mbfsansvarkappa" : "𝞌",
1188 "\\mbfsansvarphi" : "𝞍",
1189 "\\mbfsansvarrho" : "𝞎",
1190 "\\mbfsansvarpi" : "𝞏",
1191 "\\mbfitsansAlpha" : "𝞐",
1192 "\\mbfitsansBeta" : "𝞑",
1193 "\\mbfitsansGamma" : "𝞒",
1194 "\\mbfitsansDelta" : "𝞓",
1195 "\\mbfitsansEpsilon" : "𝞔",
1196 "\\mbfitsansZeta" : "𝞕",
1197 "\\mbfitsansEta" : "𝞖",
1198 "\\mbfitsansTheta" : "𝞗",
1199 "\\mbfitsansIota" : "𝞘",
1200 "\\mbfitsansKappa" : "𝞙",
1201 "\\mbfitsansLambda" : "𝞚",
1202 "\\mbfitsansMu" : "𝞛",
1203 "\\mbfitsansNu" : "𝞜",
1204 "\\mbfitsansXi" : "𝞝",
1205 "\\mbfitsansOmicron" : "𝞞",
1206 "\\mbfitsansPi" : "𝞟",
1207 "\\mbfitsansRho" : "𝞠",
1208 "\\mbfitsansvarTheta" : "𝞡",
1209 "\\mbfitsansSigma" : "𝞢",
1210 "\\mbfitsansTau" : "𝞣",
1211 "\\mbfitsansUpsilon" : "𝞤",
1212 "\\mbfitsansPhi" : "𝞥",
1213 "\\mbfitsansChi" : "𝞦",
1214 "\\mbfitsansPsi" : "𝞧",
1215 "\\mbfitsansOmega" : "𝞨",
1216 "\\mbfitsansalpha" : "𝞪",
1217 "\\mbfitsansbeta" : "𝞫",
1218 "\\mbfitsansgamma" : "𝞬",
1219 "\\mbfitsansdelta" : "𝞭",
1220 "\\mbfitsansepsilon" : "𝞮",
1221 "\\mbfitsanszeta" : "𝞯",
1222 "\\mbfitsanseta" : "𝞰",
1223 "\\mbfitsanstheta" : "𝞱",
1224 "\\mbfitsansiota" : "𝞲",
1225 "\\mbfitsanskappa" : "𝞳",
1226 "\\mbfitsanslambda" : "𝞴",
1227 "\\mbfitsansmu" : "𝞵",
1228 "\\mbfitsansnu" : "𝞶",
1229 "\\mbfitsansxi" : "𝞷",
1230 "\\mbfitsansomicron" : "𝞸",
1231 "\\mbfitsanspi" : "𝞹",
1232 "\\mbfitsansrho" : "𝞺",
1233 "\\mbfitsansvarsigma" : "𝞻",
1234 "\\mbfitsanssigma" : "𝞼",
1235 "\\mbfitsanstau" : "𝞽",
1236 "\\mbfitsansupsilon" : "𝞾",
1237 "\\mbfitsansphi" : "𝞿",
1238 "\\mbfitsanschi" : "𝟀",
1239 "\\mbfitsanspsi" : "𝟁",
1240 "\\mbfitsansomega" : "𝟂",
1241 "\\mbfitsansvarepsilon" : "𝟄",
1242 "\\mbfitsansvartheta" : "𝟅",
1243 "\\mbfitsansvarkappa" : "𝟆",
1244 "\\mbfitsansvarphi" : "𝟇",
1245 "\\mbfitsansvarrho" : "𝟈",
1246 "\\mbfitsansvarpi" : "𝟉",
1247 "\\mbfzero" : "𝟎",
1248 "\\mbfone" : "𝟏",
1249 "\\mbftwo" : "𝟐",
1250 "\\mbfthree" : "𝟑",
1251 "\\mbffour" : "𝟒",
1252 "\\mbffive" : "𝟓",
1253 "\\mbfsix" : "𝟔",
1254 "\\mbfseven" : "𝟕",
1255 "\\mbfeight" : "𝟖",
1256 "\\mbfnine" : "𝟗",
1257 "\\Bbbzero" : "𝟘",
1258 "\\Bbbone" : "𝟙",
1259 "\\Bbbtwo" : "𝟚",
1260 "\\Bbbthree" : "𝟛",
1261 "\\Bbbfour" : "𝟜",
1262 "\\Bbbfive" : "𝟝",
1263 "\\Bbbsix" : "𝟞",
1264 "\\Bbbseven" : "𝟟",
1265 "\\Bbbeight" : "𝟠",
1266 "\\Bbbnine" : "𝟡",
1267 "\\msanszero" : "𝟢",
1268 "\\msansone" : "𝟣",
1269 "\\msanstwo" : "𝟤",
1270 "\\msansthree" : "𝟥",
1271 "\\msansfour" : "𝟦",
1272 "\\msansfive" : "𝟧",
1273 "\\msanssix" : "𝟨",
1274 "\\msansseven" : "𝟩",
1275 "\\msanseight" : "𝟪",
1276 "\\msansnine" : "𝟫",
1277 "\\mbfsanszero" : "𝟬",
1278 "\\mbfsansone" : "𝟭",
1279 "\\mbfsanstwo" : "𝟮",
1280 "\\mbfsansthree" : "𝟯",
1281 "\\mbfsansfour" : "𝟰",
1282 "\\mbfsansfive" : "𝟱",
1283 "\\mbfsanssix" : "𝟲",
1284 "\\mbfsansseven" : "𝟳",
1285 "\\mbfsanseight" : "𝟴",
1286 "\\mbfsansnine" : "𝟵",
1287 "\\mttzero" : "𝟶",
1288 "\\mttone" : "𝟷",
1289 "\\mtttwo" : "𝟸",
1290 "\\mttthree" : "𝟹",
1291 "\\mttfour" : "𝟺",
1292 "\\mttfive" : "𝟻",
1293 "\\mttsix" : "𝟼",
1294 "\\mttseven" : "𝟽",
1295 "\\mtteight" : "𝟾",
1296 "\\mttnine" : "𝟿",
1297 }
@@ -0,0 +1,84 b''
1 # coding: utf-8
2
3 # This script autogenerates `IPython.core.latex_symbols.py`, which contains a
4 # single dict , named `latex_symbols`. The keys in this dict are latex symbols,
5 # such as `\\alpha` and the values in the dict are the unicode equivalents for
6 # those. Most importantly, only unicode symbols that are valid identifers in
7 # Python 3 are included.
8
9 #
10 # The original mapping of latex symbols to unicode comes from the `latex_symbols.jl` files from Julia.
11
12 from __future__ import print_function
13 import os, sys
14
15 if not sys.version_info[0] == 3:
16 print("This script must be run with Python 3, exiting...")
17 sys.exit(1)
18
19 # Import the Julia LaTeX symbols
20 print('Importing latex_symbols.js from Julia...')
21 import requests
22 url = 'https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl'
23 r = requests.get(url)
24
25
26 # Build a list of key, value pairs
27 print('Building a list of (latex, unicode) key-vaule pairs...')
28 lines = r.text.splitlines()[60:]
29 lines = [line for line in lines if '=>' in line]
30 lines = [line.replace('=>',':') for line in lines]
31
32 def line_to_tuple(line):
33 """Convert a single line of the .jl file to a 2-tuple of strings like ("\\alpha", "α")"""
34 kv = line.split(',')[0].split(':')
35 # kv = tuple(line.strip(', ').split(':'))
36 k, v = kv[0].strip(' "'), kv[1].strip(' "')
37 # if not test_ident(v):
38 # print(line)
39 return k, v
40
41 assert line_to_tuple(' "\\sqrt" : "\u221A",') == ('\\sqrt', '\u221A')
42 lines = [line_to_tuple(line) for line in lines]
43
44
45 # Filter out non-valid identifiers
46 print('Filtering out characters that are not valid Python 3 identifiers')
47
48 def test_ident(i):
49 """Is the unicode string valid in a Python 3 identifer."""
50 # Some characters are not valid at the start of a name, but we still want to
51 # include them. So prefix with 'a', which is valid at the start.
52 return ('a' + i).isidentifier()
53
54 assert test_ident("α")
55 assert not test_ident('‴')
56
57 valid_idents = [line for line in lines if test_ident(line[1])]
58
59
60 # Write the `latex_symbols.py` module in the cwd
61
62 s = """# encoding: utf-8
63
64 # DO NOT EDIT THIS FILE BY HAND.
65
66 # To update this file, run the script /tools/gen_latex_symbols.py using Python 3
67
68 # This file is autogenerated from the file:
69 # https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
70 # This original list is filtered to remove any unicode characters that are not valid
71 # Python identifiers.
72
73 latex_symbols = {\n
74 """
75 for line in valid_idents:
76 s += ' "%s" : "%s",\n' % (line[0], line[1])
77 s += "}\n"
78
79 fn = os.path.join('..','IPython','core','latex_symbols.py')
80 print("Writing the file: %s" % fn)
81 with open(fn, 'w', encoding='utf-8') as f:
82 f.write(s)
83
84
@@ -1,1142 +1,1171 b''
1 # encoding: utf-8
1 """Word completion for IPython.
2 """Word completion for IPython.
2
3
3 This module is a fork of the rlcompleter module in the Python standard
4 This module is a fork of the rlcompleter module in the Python standard
4 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
5 upstream and were accepted as of Python 2.3, but we need a lot more
6 upstream and were accepted as of Python 2.3, but we need a lot more
6 functionality specific to IPython, so this module will continue to live as an
7 functionality specific to IPython, so this module will continue to live as an
7 IPython-specific utility.
8 IPython-specific utility.
8
9
9 Original rlcompleter documentation:
10 Original rlcompleter documentation:
10
11
11 This requires the latest extension to the readline module (the
12 This requires the latest extension to the readline module (the
12 completes keywords, built-ins and globals in __main__; when completing
13 completes keywords, built-ins and globals in __main__; when completing
13 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 completes its attributes.
15 completes its attributes.
15
16
16 It's very cool to do "import string" type "string.", hit the
17 It's very cool to do "import string" type "string.", hit the
17 completion key (twice), and see the list of names defined by the
18 completion key (twice), and see the list of names defined by the
18 string module!
19 string module!
19
20
20 Tip: to use the tab key as the completion key, call
21 Tip: to use the tab key as the completion key, call
21
22
22 readline.parse_and_bind("tab: complete")
23 readline.parse_and_bind("tab: complete")
23
24
24 Notes:
25 Notes:
25
26
26 - Exceptions raised by the completer function are *ignored* (and
27 - Exceptions raised by the completer function are *ignored* (and
27 generally cause the completion to fail). This is a feature -- since
28 generally cause the completion to fail). This is a feature -- since
28 readline sets the tty device in raw (or cbreak) mode, printing a
29 readline sets the tty device in raw (or cbreak) mode, printing a
29 traceback wouldn't work well without some complicated hoopla to save,
30 traceback wouldn't work well without some complicated hoopla to save,
30 reset and restore the tty state.
31 reset and restore the tty state.
31
32
32 - The evaluation of the NAME.NAME... form may cause arbitrary
33 - The evaluation of the NAME.NAME... form may cause arbitrary
33 application defined code to be executed if an object with a
34 application defined code to be executed if an object with a
34 ``__getattr__`` hook is found. Since it is the responsibility of the
35 ``__getattr__`` hook is found. Since it is the responsibility of the
35 application (or the user) to enable this feature, I consider this an
36 application (or the user) to enable this feature, I consider this an
36 acceptable risk. More complicated expressions (e.g. function calls or
37 acceptable risk. More complicated expressions (e.g. function calls or
37 indexing operations) are *not* evaluated.
38 indexing operations) are *not* evaluated.
38
39
39 - GNU readline is also used by the built-in functions input() and
40 - GNU readline is also used by the built-in functions input() and
40 raw_input(), and thus these also benefit/suffer from the completer
41 raw_input(), and thus these also benefit/suffer from the completer
41 features. Clearly an interactive application can benefit by
42 features. Clearly an interactive application can benefit by
42 specifying its own completer function and using raw_input() for all
43 specifying its own completer function and using raw_input() for all
43 its input.
44 its input.
44
45
45 - When the original stdin is not a tty device, GNU readline is never
46 - When the original stdin is not a tty device, GNU readline is never
46 used, and this module (and the readline module) are silently inactive.
47 used, and this module (and the readline module) are silently inactive.
47 """
48 """
48
49
49 # Copyright (c) IPython Development Team.
50 # Copyright (c) IPython Development Team.
50 # Distributed under the terms of the Modified BSD License.
51 # Distributed under the terms of the Modified BSD License.
51 #
52 #
52 # Some of this code originated from rlcompleter in the Python standard library
53 # Some of this code originated from rlcompleter in the Python standard library
53 # Copyright (C) 2001 Python Software Foundation, www.python.org
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
54
55
55 import __main__
56 import __main__
56 import glob
57 import glob
57 import inspect
58 import inspect
58 import itertools
59 import itertools
59 import keyword
60 import keyword
60 import os
61 import os
61 import re
62 import re
62 import sys
63 import sys
63
64
64 from IPython.config.configurable import Configurable
65 from IPython.config.configurable import Configurable
65 from IPython.core.error import TryNext
66 from IPython.core.error import TryNext
66 from IPython.core.inputsplitter import ESC_MAGIC
67 from IPython.core.inputsplitter import ESC_MAGIC
68 from IPython.core.latex_symbols import latex_symbols
67 from IPython.utils import generics
69 from IPython.utils import generics
68 from IPython.utils import io
70 from IPython.utils import io
69 from IPython.utils.decorators import undoc
71 from IPython.utils.decorators import undoc
70 from IPython.utils.dir2 import dir2
72 from IPython.utils.dir2 import dir2
71 from IPython.utils.process import arg_split
73 from IPython.utils.process import arg_split
72 from IPython.utils.py3compat import builtin_mod, string_types
74 from IPython.utils.py3compat import builtin_mod, string_types, PY3
73 from IPython.utils.traitlets import CBool, Enum
75 from IPython.utils.traitlets import CBool, Enum
74
76
75 #-----------------------------------------------------------------------------
77 #-----------------------------------------------------------------------------
76 # Globals
78 # Globals
77 #-----------------------------------------------------------------------------
79 #-----------------------------------------------------------------------------
78
80
79 # Public API
81 # Public API
80 __all__ = ['Completer','IPCompleter']
82 __all__ = ['Completer','IPCompleter']
81
83
82 if sys.platform == 'win32':
84 if sys.platform == 'win32':
83 PROTECTABLES = ' '
85 PROTECTABLES = ' '
84 else:
86 else:
85 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
87 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
86
88
87
89
88 #-----------------------------------------------------------------------------
90 #-----------------------------------------------------------------------------
89 # Main functions and classes
91 # Main functions and classes
90 #-----------------------------------------------------------------------------
92 #-----------------------------------------------------------------------------
91
93
92 def has_open_quotes(s):
94 def has_open_quotes(s):
93 """Return whether a string has open quotes.
95 """Return whether a string has open quotes.
94
96
95 This simply counts whether the number of quote characters of either type in
97 This simply counts whether the number of quote characters of either type in
96 the string is odd.
98 the string is odd.
97
99
98 Returns
100 Returns
99 -------
101 -------
100 If there is an open quote, the quote character is returned. Else, return
102 If there is an open quote, the quote character is returned. Else, return
101 False.
103 False.
102 """
104 """
103 # We check " first, then ', so complex cases with nested quotes will get
105 # We check " first, then ', so complex cases with nested quotes will get
104 # the " to take precedence.
106 # the " to take precedence.
105 if s.count('"') % 2:
107 if s.count('"') % 2:
106 return '"'
108 return '"'
107 elif s.count("'") % 2:
109 elif s.count("'") % 2:
108 return "'"
110 return "'"
109 else:
111 else:
110 return False
112 return False
111
113
112
114
113 def protect_filename(s):
115 def protect_filename(s):
114 """Escape a string to protect certain characters."""
116 """Escape a string to protect certain characters."""
115
117
116 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
118 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
117 for ch in s])
119 for ch in s])
118
120
119 def expand_user(path):
121 def expand_user(path):
120 """Expand '~'-style usernames in strings.
122 """Expand '~'-style usernames in strings.
121
123
122 This is similar to :func:`os.path.expanduser`, but it computes and returns
124 This is similar to :func:`os.path.expanduser`, but it computes and returns
123 extra information that will be useful if the input was being used in
125 extra information that will be useful if the input was being used in
124 computing completions, and you wish to return the completions with the
126 computing completions, and you wish to return the completions with the
125 original '~' instead of its expanded value.
127 original '~' instead of its expanded value.
126
128
127 Parameters
129 Parameters
128 ----------
130 ----------
129 path : str
131 path : str
130 String to be expanded. If no ~ is present, the output is the same as the
132 String to be expanded. If no ~ is present, the output is the same as the
131 input.
133 input.
132
134
133 Returns
135 Returns
134 -------
136 -------
135 newpath : str
137 newpath : str
136 Result of ~ expansion in the input path.
138 Result of ~ expansion in the input path.
137 tilde_expand : bool
139 tilde_expand : bool
138 Whether any expansion was performed or not.
140 Whether any expansion was performed or not.
139 tilde_val : str
141 tilde_val : str
140 The value that ~ was replaced with.
142 The value that ~ was replaced with.
141 """
143 """
142 # Default values
144 # Default values
143 tilde_expand = False
145 tilde_expand = False
144 tilde_val = ''
146 tilde_val = ''
145 newpath = path
147 newpath = path
146
148
147 if path.startswith('~'):
149 if path.startswith('~'):
148 tilde_expand = True
150 tilde_expand = True
149 rest = len(path)-1
151 rest = len(path)-1
150 newpath = os.path.expanduser(path)
152 newpath = os.path.expanduser(path)
151 if rest:
153 if rest:
152 tilde_val = newpath[:-rest]
154 tilde_val = newpath[:-rest]
153 else:
155 else:
154 tilde_val = newpath
156 tilde_val = newpath
155
157
156 return newpath, tilde_expand, tilde_val
158 return newpath, tilde_expand, tilde_val
157
159
158
160
159 def compress_user(path, tilde_expand, tilde_val):
161 def compress_user(path, tilde_expand, tilde_val):
160 """Does the opposite of expand_user, with its outputs.
162 """Does the opposite of expand_user, with its outputs.
161 """
163 """
162 if tilde_expand:
164 if tilde_expand:
163 return path.replace(tilde_val, '~')
165 return path.replace(tilde_val, '~')
164 else:
166 else:
165 return path
167 return path
166
168
167
169
168
170
169 def penalize_magics_key(word):
171 def penalize_magics_key(word):
170 """key for sorting that penalizes magic commands in the ordering
172 """key for sorting that penalizes magic commands in the ordering
171
173
172 Normal words are left alone.
174 Normal words are left alone.
173
175
174 Magic commands have the initial % moved to the end, e.g.
176 Magic commands have the initial % moved to the end, e.g.
175 %matplotlib is transformed as follows:
177 %matplotlib is transformed as follows:
176
178
177 %matplotlib -> matplotlib%
179 %matplotlib -> matplotlib%
178
180
179 [The choice of the final % is arbitrary.]
181 [The choice of the final % is arbitrary.]
180
182
181 Since "matplotlib" < "matplotlib%" as strings,
183 Since "matplotlib" < "matplotlib%" as strings,
182 "timeit" will appear before the magic "%timeit" in the ordering
184 "timeit" will appear before the magic "%timeit" in the ordering
183
185
184 For consistency, move "%%" to the end, so cell magics appear *after*
186 For consistency, move "%%" to the end, so cell magics appear *after*
185 line magics with the same name.
187 line magics with the same name.
186
188
187 A check is performed that there are no other "%" in the string;
189 A check is performed that there are no other "%" in the string;
188 if there are, then the string is not a magic command and is left unchanged.
190 if there are, then the string is not a magic command and is left unchanged.
189
191
190 """
192 """
191
193
192 # Move any % signs from start to end of the key
194 # Move any % signs from start to end of the key
193 # provided there are no others elsewhere in the string
195 # provided there are no others elsewhere in the string
194
196
195 if word[:2] == "%%":
197 if word[:2] == "%%":
196 if not "%" in word[2:]:
198 if not "%" in word[2:]:
197 return word[2:] + "%%"
199 return word[2:] + "%%"
198
200
199 if word[:1] == "%":
201 if word[:1] == "%":
200 if not "%" in word[1:]:
202 if not "%" in word[1:]:
201 return word[1:] + "%"
203 return word[1:] + "%"
202
204
203 return word
205 return word
204
206
205
207
206 @undoc
208 @undoc
207 class Bunch(object): pass
209 class Bunch(object): pass
208
210
209
211
210 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
212 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
211 GREEDY_DELIMS = ' =\r\n'
213 GREEDY_DELIMS = ' =\r\n'
212
214
213
215
214 class CompletionSplitter(object):
216 class CompletionSplitter(object):
215 """An object to split an input line in a manner similar to readline.
217 """An object to split an input line in a manner similar to readline.
216
218
217 By having our own implementation, we can expose readline-like completion in
219 By having our own implementation, we can expose readline-like completion in
218 a uniform manner to all frontends. This object only needs to be given the
220 a uniform manner to all frontends. This object only needs to be given the
219 line of text to be split and the cursor position on said line, and it
221 line of text to be split and the cursor position on said line, and it
220 returns the 'word' to be completed on at the cursor after splitting the
222 returns the 'word' to be completed on at the cursor after splitting the
221 entire line.
223 entire line.
222
224
223 What characters are used as splitting delimiters can be controlled by
225 What characters are used as splitting delimiters can be controlled by
224 setting the `delims` attribute (this is a property that internally
226 setting the `delims` attribute (this is a property that internally
225 automatically builds the necessary regular expression)"""
227 automatically builds the necessary regular expression)"""
226
228
227 # Private interface
229 # Private interface
228
230
229 # A string of delimiter characters. The default value makes sense for
231 # A string of delimiter characters. The default value makes sense for
230 # IPython's most typical usage patterns.
232 # IPython's most typical usage patterns.
231 _delims = DELIMS
233 _delims = DELIMS
232
234
233 # The expression (a normal string) to be compiled into a regular expression
235 # The expression (a normal string) to be compiled into a regular expression
234 # for actual splitting. We store it as an attribute mostly for ease of
236 # for actual splitting. We store it as an attribute mostly for ease of
235 # debugging, since this type of code can be so tricky to debug.
237 # debugging, since this type of code can be so tricky to debug.
236 _delim_expr = None
238 _delim_expr = None
237
239
238 # The regular expression that does the actual splitting
240 # The regular expression that does the actual splitting
239 _delim_re = None
241 _delim_re = None
240
242
241 def __init__(self, delims=None):
243 def __init__(self, delims=None):
242 delims = CompletionSplitter._delims if delims is None else delims
244 delims = CompletionSplitter._delims if delims is None else delims
243 self.delims = delims
245 self.delims = delims
244
246
245 @property
247 @property
246 def delims(self):
248 def delims(self):
247 """Return the string of delimiter characters."""
249 """Return the string of delimiter characters."""
248 return self._delims
250 return self._delims
249
251
250 @delims.setter
252 @delims.setter
251 def delims(self, delims):
253 def delims(self, delims):
252 """Set the delimiters for line splitting."""
254 """Set the delimiters for line splitting."""
253 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
255 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
254 self._delim_re = re.compile(expr)
256 self._delim_re = re.compile(expr)
255 self._delims = delims
257 self._delims = delims
256 self._delim_expr = expr
258 self._delim_expr = expr
257
259
258 def split_line(self, line, cursor_pos=None):
260 def split_line(self, line, cursor_pos=None):
259 """Split a line of text with a cursor at the given position.
261 """Split a line of text with a cursor at the given position.
260 """
262 """
261 l = line if cursor_pos is None else line[:cursor_pos]
263 l = line if cursor_pos is None else line[:cursor_pos]
262 return self._delim_re.split(l)[-1]
264 return self._delim_re.split(l)[-1]
263
265
264
266
265 class Completer(Configurable):
267 class Completer(Configurable):
266
268
267 greedy = CBool(False, config=True,
269 greedy = CBool(False, config=True,
268 help="""Activate greedy completion
270 help="""Activate greedy completion
269
271
270 This will enable completion on elements of lists, results of function calls, etc.,
272 This will enable completion on elements of lists, results of function calls, etc.,
271 but can be unsafe because the code is actually evaluated on TAB.
273 but can be unsafe because the code is actually evaluated on TAB.
272 """
274 """
273 )
275 )
274
276
275
277
276 def __init__(self, namespace=None, global_namespace=None, **kwargs):
278 def __init__(self, namespace=None, global_namespace=None, **kwargs):
277 """Create a new completer for the command line.
279 """Create a new completer for the command line.
278
280
279 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
281 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
280
282
281 If unspecified, the default namespace where completions are performed
283 If unspecified, the default namespace where completions are performed
282 is __main__ (technically, __main__.__dict__). Namespaces should be
284 is __main__ (technically, __main__.__dict__). Namespaces should be
283 given as dictionaries.
285 given as dictionaries.
284
286
285 An optional second namespace can be given. This allows the completer
287 An optional second namespace can be given. This allows the completer
286 to handle cases where both the local and global scopes need to be
288 to handle cases where both the local and global scopes need to be
287 distinguished.
289 distinguished.
288
290
289 Completer instances should be used as the completion mechanism of
291 Completer instances should be used as the completion mechanism of
290 readline via the set_completer() call:
292 readline via the set_completer() call:
291
293
292 readline.set_completer(Completer(my_namespace).complete)
294 readline.set_completer(Completer(my_namespace).complete)
293 """
295 """
294
296
295 # Don't bind to namespace quite yet, but flag whether the user wants a
297 # Don't bind to namespace quite yet, but flag whether the user wants a
296 # specific namespace or to use __main__.__dict__. This will allow us
298 # specific namespace or to use __main__.__dict__. This will allow us
297 # to bind to __main__.__dict__ at completion time, not now.
299 # to bind to __main__.__dict__ at completion time, not now.
298 if namespace is None:
300 if namespace is None:
299 self.use_main_ns = 1
301 self.use_main_ns = 1
300 else:
302 else:
301 self.use_main_ns = 0
303 self.use_main_ns = 0
302 self.namespace = namespace
304 self.namespace = namespace
303
305
304 # The global namespace, if given, can be bound directly
306 # The global namespace, if given, can be bound directly
305 if global_namespace is None:
307 if global_namespace is None:
306 self.global_namespace = {}
308 self.global_namespace = {}
307 else:
309 else:
308 self.global_namespace = global_namespace
310 self.global_namespace = global_namespace
309
311
310 super(Completer, self).__init__(**kwargs)
312 super(Completer, self).__init__(**kwargs)
311
313
312 def complete(self, text, state):
314 def complete(self, text, state):
313 """Return the next possible completion for 'text'.
315 """Return the next possible completion for 'text'.
314
316
315 This is called successively with state == 0, 1, 2, ... until it
317 This is called successively with state == 0, 1, 2, ... until it
316 returns None. The completion should begin with 'text'.
318 returns None. The completion should begin with 'text'.
317
319
318 """
320 """
319 if self.use_main_ns:
321 if self.use_main_ns:
320 self.namespace = __main__.__dict__
322 self.namespace = __main__.__dict__
321
323
322 if state == 0:
324 if state == 0:
323 if "." in text:
325 if "." in text:
324 self.matches = self.attr_matches(text)
326 self.matches = self.attr_matches(text)
325 else:
327 else:
326 self.matches = self.global_matches(text)
328 self.matches = self.global_matches(text)
327 try:
329 try:
328 return self.matches[state]
330 return self.matches[state]
329 except IndexError:
331 except IndexError:
330 return None
332 return None
331
333
332 def global_matches(self, text):
334 def global_matches(self, text):
333 """Compute matches when text is a simple name.
335 """Compute matches when text is a simple name.
334
336
335 Return a list of all keywords, built-in functions and names currently
337 Return a list of all keywords, built-in functions and names currently
336 defined in self.namespace or self.global_namespace that match.
338 defined in self.namespace or self.global_namespace that match.
337
339
338 """
340 """
339 #print 'Completer->global_matches, txt=%r' % text # dbg
341 #print 'Completer->global_matches, txt=%r' % text # dbg
340 matches = []
342 matches = []
341 match_append = matches.append
343 match_append = matches.append
342 n = len(text)
344 n = len(text)
343 for lst in [keyword.kwlist,
345 for lst in [keyword.kwlist,
344 builtin_mod.__dict__.keys(),
346 builtin_mod.__dict__.keys(),
345 self.namespace.keys(),
347 self.namespace.keys(),
346 self.global_namespace.keys()]:
348 self.global_namespace.keys()]:
347 for word in lst:
349 for word in lst:
348 if word[:n] == text and word != "__builtins__":
350 if word[:n] == text and word != "__builtins__":
349 match_append(word)
351 match_append(word)
350 return matches
352 return matches
351
353
352 def attr_matches(self, text):
354 def attr_matches(self, text):
353 """Compute matches when text contains a dot.
355 """Compute matches when text contains a dot.
354
356
355 Assuming the text is of the form NAME.NAME....[NAME], and is
357 Assuming the text is of the form NAME.NAME....[NAME], and is
356 evaluatable in self.namespace or self.global_namespace, it will be
358 evaluatable in self.namespace or self.global_namespace, it will be
357 evaluated and its attributes (as revealed by dir()) are used as
359 evaluated and its attributes (as revealed by dir()) are used as
358 possible completions. (For class instances, class members are are
360 possible completions. (For class instances, class members are are
359 also considered.)
361 also considered.)
360
362
361 WARNING: this can still invoke arbitrary C code, if an object
363 WARNING: this can still invoke arbitrary C code, if an object
362 with a __getattr__ hook is evaluated.
364 with a __getattr__ hook is evaluated.
363
365
364 """
366 """
365
367
366 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
368 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
367 # Another option, seems to work great. Catches things like ''.<tab>
369 # Another option, seems to work great. Catches things like ''.<tab>
368 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
370 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
369
371
370 if m:
372 if m:
371 expr, attr = m.group(1, 3)
373 expr, attr = m.group(1, 3)
372 elif self.greedy:
374 elif self.greedy:
373 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
375 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
374 if not m2:
376 if not m2:
375 return []
377 return []
376 expr, attr = m2.group(1,2)
378 expr, attr = m2.group(1,2)
377 else:
379 else:
378 return []
380 return []
379
381
380 try:
382 try:
381 obj = eval(expr, self.namespace)
383 obj = eval(expr, self.namespace)
382 except:
384 except:
383 try:
385 try:
384 obj = eval(expr, self.global_namespace)
386 obj = eval(expr, self.global_namespace)
385 except:
387 except:
386 return []
388 return []
387
389
388 if self.limit_to__all__ and hasattr(obj, '__all__'):
390 if self.limit_to__all__ and hasattr(obj, '__all__'):
389 words = get__all__entries(obj)
391 words = get__all__entries(obj)
390 else:
392 else:
391 words = dir2(obj)
393 words = dir2(obj)
392
394
393 try:
395 try:
394 words = generics.complete_object(obj, words)
396 words = generics.complete_object(obj, words)
395 except TryNext:
397 except TryNext:
396 pass
398 pass
397 except Exception:
399 except Exception:
398 # Silence errors from completion function
400 # Silence errors from completion function
399 #raise # dbg
401 #raise # dbg
400 pass
402 pass
401 # Build match list to return
403 # Build match list to return
402 n = len(attr)
404 n = len(attr)
403 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
405 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
404 return res
406 return res
405
407
406
408
407 def get__all__entries(obj):
409 def get__all__entries(obj):
408 """returns the strings in the __all__ attribute"""
410 """returns the strings in the __all__ attribute"""
409 try:
411 try:
410 words = getattr(obj, '__all__')
412 words = getattr(obj, '__all__')
411 except:
413 except:
412 return []
414 return []
413
415
414 return [w for w in words if isinstance(w, string_types)]
416 return [w for w in words if isinstance(w, string_types)]
415
417
416
418
417 def match_dict_keys(keys, prefix):
419 def match_dict_keys(keys, prefix):
418 """Used by dict_key_matches, matching the prefix to a list of keys"""
420 """Used by dict_key_matches, matching the prefix to a list of keys"""
419 if not prefix:
421 if not prefix:
420 return None, 0, [repr(k) for k in keys
422 return None, 0, [repr(k) for k in keys
421 if isinstance(k, (string_types, bytes))]
423 if isinstance(k, (string_types, bytes))]
422 quote_match = re.search('["\']', prefix)
424 quote_match = re.search('["\']', prefix)
423 quote = quote_match.group()
425 quote = quote_match.group()
424 try:
426 try:
425 prefix_str = eval(prefix + quote, {})
427 prefix_str = eval(prefix + quote, {})
426 except Exception:
428 except Exception:
427 return None, 0, []
429 return None, 0, []
428
430
429 token_match = re.search(r'\w*$', prefix, re.UNICODE)
431 token_match = re.search(r'\w*$', prefix, re.UNICODE)
430 token_start = token_match.start()
432 token_start = token_match.start()
431 token_prefix = token_match.group()
433 token_prefix = token_match.group()
432
434
433 # TODO: support bytes in Py3k
435 # TODO: support bytes in Py3k
434 matched = []
436 matched = []
435 for key in keys:
437 for key in keys:
436 try:
438 try:
437 if not key.startswith(prefix_str):
439 if not key.startswith(prefix_str):
438 continue
440 continue
439 except (AttributeError, TypeError, UnicodeError):
441 except (AttributeError, TypeError, UnicodeError):
440 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
441 continue
443 continue
442
444
443 # reformat remainder of key to begin with prefix
445 # reformat remainder of key to begin with prefix
444 rem = key[len(prefix_str):]
446 rem = key[len(prefix_str):]
445 # force repr wrapped in '
447 # force repr wrapped in '
446 rem_repr = repr(rem + '"')
448 rem_repr = repr(rem + '"')
447 if rem_repr.startswith('u') and prefix[0] not in 'uU':
449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
448 # Found key is unicode, but prefix is Py2 string.
450 # Found key is unicode, but prefix is Py2 string.
449 # Therefore attempt to interpret key as string.
451 # Therefore attempt to interpret key as string.
450 try:
452 try:
451 rem_repr = repr(rem.encode('ascii') + '"')
453 rem_repr = repr(rem.encode('ascii') + '"')
452 except UnicodeEncodeError:
454 except UnicodeEncodeError:
453 continue
455 continue
454
456
455 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
456 if quote == '"':
458 if quote == '"':
457 # The entered prefix is quoted with ",
459 # The entered prefix is quoted with ",
458 # but the match is quoted with '.
460 # but the match is quoted with '.
459 # A contained " hence needs escaping for comparison:
461 # A contained " hence needs escaping for comparison:
460 rem_repr = rem_repr.replace('"', '\\"')
462 rem_repr = rem_repr.replace('"', '\\"')
461
463
462 # then reinsert prefix from start of token
464 # then reinsert prefix from start of token
463 matched.append('%s%s' % (token_prefix, rem_repr))
465 matched.append('%s%s' % (token_prefix, rem_repr))
464 return quote, token_start, matched
466 return quote, token_start, matched
465
467
466
468
467 def _safe_isinstance(obj, module, class_name):
469 def _safe_isinstance(obj, module, class_name):
468 """Checks if obj is an instance of module.class_name if loaded
470 """Checks if obj is an instance of module.class_name if loaded
469 """
471 """
470 return (module in sys.modules and
472 return (module in sys.modules and
471 isinstance(obj, getattr(__import__(module), class_name)))
473 isinstance(obj, getattr(__import__(module), class_name)))
472
474
473
475
474
476
475 class IPCompleter(Completer):
477 class IPCompleter(Completer):
476 """Extension of the completer class with IPython-specific features"""
478 """Extension of the completer class with IPython-specific features"""
477
479
478 def _greedy_changed(self, name, old, new):
480 def _greedy_changed(self, name, old, new):
479 """update the splitter and readline delims when greedy is changed"""
481 """update the splitter and readline delims when greedy is changed"""
480 if new:
482 if new:
481 self.splitter.delims = GREEDY_DELIMS
483 self.splitter.delims = GREEDY_DELIMS
482 else:
484 else:
483 self.splitter.delims = DELIMS
485 self.splitter.delims = DELIMS
484
486
485 if self.readline:
487 if self.readline:
486 self.readline.set_completer_delims(self.splitter.delims)
488 self.readline.set_completer_delims(self.splitter.delims)
487
489
488 merge_completions = CBool(True, config=True,
490 merge_completions = CBool(True, config=True,
489 help="""Whether to merge completion results into a single list
491 help="""Whether to merge completion results into a single list
490
492
491 If False, only the completion results from the first non-empty
493 If False, only the completion results from the first non-empty
492 completer will be returned.
494 completer will be returned.
493 """
495 """
494 )
496 )
495 omit__names = Enum((0,1,2), default_value=2, config=True,
497 omit__names = Enum((0,1,2), default_value=2, config=True,
496 help="""Instruct the completer to omit private method names
498 help="""Instruct the completer to omit private method names
497
499
498 Specifically, when completing on ``object.<tab>``.
500 Specifically, when completing on ``object.<tab>``.
499
501
500 When 2 [default]: all names that start with '_' will be excluded.
502 When 2 [default]: all names that start with '_' will be excluded.
501
503
502 When 1: all 'magic' names (``__foo__``) will be excluded.
504 When 1: all 'magic' names (``__foo__``) will be excluded.
503
505
504 When 0: nothing will be excluded.
506 When 0: nothing will be excluded.
505 """
507 """
506 )
508 )
507 limit_to__all__ = CBool(default_value=False, config=True,
509 limit_to__all__ = CBool(default_value=False, config=True,
508 help="""Instruct the completer to use __all__ for the completion
510 help="""Instruct the completer to use __all__ for the completion
509
511
510 Specifically, when completing on ``object.<tab>``.
512 Specifically, when completing on ``object.<tab>``.
511
513
512 When True: only those names in obj.__all__ will be included.
514 When True: only those names in obj.__all__ will be included.
513
515
514 When False [default]: the __all__ attribute is ignored
516 When False [default]: the __all__ attribute is ignored
515 """
517 """
516 )
518 )
517
519
518 def __init__(self, shell=None, namespace=None, global_namespace=None,
520 def __init__(self, shell=None, namespace=None, global_namespace=None,
519 use_readline=True, config=None, **kwargs):
521 use_readline=True, config=None, **kwargs):
520 """IPCompleter() -> completer
522 """IPCompleter() -> completer
521
523
522 Return a completer object suitable for use by the readline library
524 Return a completer object suitable for use by the readline library
523 via readline.set_completer().
525 via readline.set_completer().
524
526
525 Inputs:
527 Inputs:
526
528
527 - shell: a pointer to the ipython shell itself. This is needed
529 - shell: a pointer to the ipython shell itself. This is needed
528 because this completer knows about magic functions, and those can
530 because this completer knows about magic functions, and those can
529 only be accessed via the ipython instance.
531 only be accessed via the ipython instance.
530
532
531 - namespace: an optional dict where completions are performed.
533 - namespace: an optional dict where completions are performed.
532
534
533 - global_namespace: secondary optional dict for completions, to
535 - global_namespace: secondary optional dict for completions, to
534 handle cases (such as IPython embedded inside functions) where
536 handle cases (such as IPython embedded inside functions) where
535 both Python scopes are visible.
537 both Python scopes are visible.
536
538
537 use_readline : bool, optional
539 use_readline : bool, optional
538 If true, use the readline library. This completer can still function
540 If true, use the readline library. This completer can still function
539 without readline, though in that case callers must provide some extra
541 without readline, though in that case callers must provide some extra
540 information on each call about the current line."""
542 information on each call about the current line."""
541
543
542 self.magic_escape = ESC_MAGIC
544 self.magic_escape = ESC_MAGIC
543 self.splitter = CompletionSplitter()
545 self.splitter = CompletionSplitter()
544
546
545 # Readline configuration, only used by the rlcompleter method.
547 # Readline configuration, only used by the rlcompleter method.
546 if use_readline:
548 if use_readline:
547 # We store the right version of readline so that later code
549 # We store the right version of readline so that later code
548 import IPython.utils.rlineimpl as readline
550 import IPython.utils.rlineimpl as readline
549 self.readline = readline
551 self.readline = readline
550 else:
552 else:
551 self.readline = None
553 self.readline = None
552
554
553 # _greedy_changed() depends on splitter and readline being defined:
555 # _greedy_changed() depends on splitter and readline being defined:
554 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
556 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
555 config=config, **kwargs)
557 config=config, **kwargs)
556
558
557 # List where completion matches will be stored
559 # List where completion matches will be stored
558 self.matches = []
560 self.matches = []
559 self.shell = shell
561 self.shell = shell
560 # Regexp to split filenames with spaces in them
562 # Regexp to split filenames with spaces in them
561 self.space_name_re = re.compile(r'([^\\] )')
563 self.space_name_re = re.compile(r'([^\\] )')
562 # Hold a local ref. to glob.glob for speed
564 # Hold a local ref. to glob.glob for speed
563 self.glob = glob.glob
565 self.glob = glob.glob
564
566
565 # Determine if we are running on 'dumb' terminals, like (X)Emacs
567 # Determine if we are running on 'dumb' terminals, like (X)Emacs
566 # buffers, to avoid completion problems.
568 # buffers, to avoid completion problems.
567 term = os.environ.get('TERM','xterm')
569 term = os.environ.get('TERM','xterm')
568 self.dumb_terminal = term in ['dumb','emacs']
570 self.dumb_terminal = term in ['dumb','emacs']
569
571
570 # Special handling of backslashes needed in win32 platforms
572 # Special handling of backslashes needed in win32 platforms
571 if sys.platform == "win32":
573 if sys.platform == "win32":
572 self.clean_glob = self._clean_glob_win32
574 self.clean_glob = self._clean_glob_win32
573 else:
575 else:
574 self.clean_glob = self._clean_glob
576 self.clean_glob = self._clean_glob
575
577
576 #regexp to parse docstring for function signature
578 #regexp to parse docstring for function signature
577 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
579 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
578 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
580 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
579 #use this if positional argument name is also needed
581 #use this if positional argument name is also needed
580 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
582 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
581
583
582 # All active matcher routines for completion
584 # All active matcher routines for completion
583 self.matchers = [self.python_matches,
585 self.matchers = [self.python_matches,
584 self.file_matches,
586 self.file_matches,
585 self.magic_matches,
587 self.magic_matches,
586 self.python_func_kw_matches,
588 self.python_func_kw_matches,
587 self.dict_key_matches,
589 self.dict_key_matches,
588 ]
590 ]
589
591
590 def all_completions(self, text):
592 def all_completions(self, text):
591 """
593 """
592 Wrapper around the complete method for the benefit of emacs
594 Wrapper around the complete method for the benefit of emacs
593 and pydb.
595 and pydb.
594 """
596 """
595 return self.complete(text)[1]
597 return self.complete(text)[1]
596
598
597 def _clean_glob(self,text):
599 def _clean_glob(self,text):
598 return self.glob("%s*" % text)
600 return self.glob("%s*" % text)
599
601
600 def _clean_glob_win32(self,text):
602 def _clean_glob_win32(self,text):
601 return [f.replace("\\","/")
603 return [f.replace("\\","/")
602 for f in self.glob("%s*" % text)]
604 for f in self.glob("%s*" % text)]
603
605
604 def file_matches(self, text):
606 def file_matches(self, text):
605 """Match filenames, expanding ~USER type strings.
607 """Match filenames, expanding ~USER type strings.
606
608
607 Most of the seemingly convoluted logic in this completer is an
609 Most of the seemingly convoluted logic in this completer is an
608 attempt to handle filenames with spaces in them. And yet it's not
610 attempt to handle filenames with spaces in them. And yet it's not
609 quite perfect, because Python's readline doesn't expose all of the
611 quite perfect, because Python's readline doesn't expose all of the
610 GNU readline details needed for this to be done correctly.
612 GNU readline details needed for this to be done correctly.
611
613
612 For a filename with a space in it, the printed completions will be
614 For a filename with a space in it, the printed completions will be
613 only the parts after what's already been typed (instead of the
615 only the parts after what's already been typed (instead of the
614 full completions, as is normally done). I don't think with the
616 full completions, as is normally done). I don't think with the
615 current (as of Python 2.3) Python readline it's possible to do
617 current (as of Python 2.3) Python readline it's possible to do
616 better."""
618 better."""
617
619
618 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
620 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
619
621
620 # chars that require escaping with backslash - i.e. chars
622 # chars that require escaping with backslash - i.e. chars
621 # that readline treats incorrectly as delimiters, but we
623 # that readline treats incorrectly as delimiters, but we
622 # don't want to treat as delimiters in filename matching
624 # don't want to treat as delimiters in filename matching
623 # when escaped with backslash
625 # when escaped with backslash
624 if text.startswith('!'):
626 if text.startswith('!'):
625 text = text[1:]
627 text = text[1:]
626 text_prefix = '!'
628 text_prefix = '!'
627 else:
629 else:
628 text_prefix = ''
630 text_prefix = ''
629
631
630 text_until_cursor = self.text_until_cursor
632 text_until_cursor = self.text_until_cursor
631 # track strings with open quotes
633 # track strings with open quotes
632 open_quotes = has_open_quotes(text_until_cursor)
634 open_quotes = has_open_quotes(text_until_cursor)
633
635
634 if '(' in text_until_cursor or '[' in text_until_cursor:
636 if '(' in text_until_cursor or '[' in text_until_cursor:
635 lsplit = text
637 lsplit = text
636 else:
638 else:
637 try:
639 try:
638 # arg_split ~ shlex.split, but with unicode bugs fixed by us
640 # arg_split ~ shlex.split, but with unicode bugs fixed by us
639 lsplit = arg_split(text_until_cursor)[-1]
641 lsplit = arg_split(text_until_cursor)[-1]
640 except ValueError:
642 except ValueError:
641 # typically an unmatched ", or backslash without escaped char.
643 # typically an unmatched ", or backslash without escaped char.
642 if open_quotes:
644 if open_quotes:
643 lsplit = text_until_cursor.split(open_quotes)[-1]
645 lsplit = text_until_cursor.split(open_quotes)[-1]
644 else:
646 else:
645 return []
647 return []
646 except IndexError:
648 except IndexError:
647 # tab pressed on empty line
649 # tab pressed on empty line
648 lsplit = ""
650 lsplit = ""
649
651
650 if not open_quotes and lsplit != protect_filename(lsplit):
652 if not open_quotes and lsplit != protect_filename(lsplit):
651 # if protectables are found, do matching on the whole escaped name
653 # if protectables are found, do matching on the whole escaped name
652 has_protectables = True
654 has_protectables = True
653 text0,text = text,lsplit
655 text0,text = text,lsplit
654 else:
656 else:
655 has_protectables = False
657 has_protectables = False
656 text = os.path.expanduser(text)
658 text = os.path.expanduser(text)
657
659
658 if text == "":
660 if text == "":
659 return [text_prefix + protect_filename(f) for f in self.glob("*")]
661 return [text_prefix + protect_filename(f) for f in self.glob("*")]
660
662
661 # Compute the matches from the filesystem
663 # Compute the matches from the filesystem
662 m0 = self.clean_glob(text.replace('\\',''))
664 m0 = self.clean_glob(text.replace('\\',''))
663
665
664 if has_protectables:
666 if has_protectables:
665 # If we had protectables, we need to revert our changes to the
667 # If we had protectables, we need to revert our changes to the
666 # beginning of filename so that we don't double-write the part
668 # beginning of filename so that we don't double-write the part
667 # of the filename we have so far
669 # of the filename we have so far
668 len_lsplit = len(lsplit)
670 len_lsplit = len(lsplit)
669 matches = [text_prefix + text0 +
671 matches = [text_prefix + text0 +
670 protect_filename(f[len_lsplit:]) for f in m0]
672 protect_filename(f[len_lsplit:]) for f in m0]
671 else:
673 else:
672 if open_quotes:
674 if open_quotes:
673 # if we have a string with an open quote, we don't need to
675 # if we have a string with an open quote, we don't need to
674 # protect the names at all (and we _shouldn't_, as it
676 # protect the names at all (and we _shouldn't_, as it
675 # would cause bugs when the filesystem call is made).
677 # would cause bugs when the filesystem call is made).
676 matches = m0
678 matches = m0
677 else:
679 else:
678 matches = [text_prefix +
680 matches = [text_prefix +
679 protect_filename(f) for f in m0]
681 protect_filename(f) for f in m0]
680
682
681 #io.rprint('mm', matches) # dbg
683 #io.rprint('mm', matches) # dbg
682
684
683 # Mark directories in input list by appending '/' to their names.
685 # Mark directories in input list by appending '/' to their names.
684 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
686 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
685 return matches
687 return matches
686
688
687 def magic_matches(self, text):
689 def magic_matches(self, text):
688 """Match magics"""
690 """Match magics"""
689 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
691 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
690 # Get all shell magics now rather than statically, so magics loaded at
692 # Get all shell magics now rather than statically, so magics loaded at
691 # runtime show up too.
693 # runtime show up too.
692 lsm = self.shell.magics_manager.lsmagic()
694 lsm = self.shell.magics_manager.lsmagic()
693 line_magics = lsm['line']
695 line_magics = lsm['line']
694 cell_magics = lsm['cell']
696 cell_magics = lsm['cell']
695 pre = self.magic_escape
697 pre = self.magic_escape
696 pre2 = pre+pre
698 pre2 = pre+pre
697
699
698 # Completion logic:
700 # Completion logic:
699 # - user gives %%: only do cell magics
701 # - user gives %%: only do cell magics
700 # - user gives %: do both line and cell magics
702 # - user gives %: do both line and cell magics
701 # - no prefix: do both
703 # - no prefix: do both
702 # In other words, line magics are skipped if the user gives %% explicitly
704 # In other words, line magics are skipped if the user gives %% explicitly
703 bare_text = text.lstrip(pre)
705 bare_text = text.lstrip(pre)
704 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
706 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
705 if not text.startswith(pre2):
707 if not text.startswith(pre2):
706 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
708 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
707 return comp
709 return comp
708
710
709 def python_matches(self,text):
711 def python_matches(self,text):
710 """Match attributes or global python names"""
712 """Match attributes or global python names"""
711
713
712 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
714 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
713 if "." in text:
715 if "." in text:
714 try:
716 try:
715 matches = self.attr_matches(text)
717 matches = self.attr_matches(text)
716 if text.endswith('.') and self.omit__names:
718 if text.endswith('.') and self.omit__names:
717 if self.omit__names == 1:
719 if self.omit__names == 1:
718 # true if txt is _not_ a __ name, false otherwise:
720 # true if txt is _not_ a __ name, false otherwise:
719 no__name = (lambda txt:
721 no__name = (lambda txt:
720 re.match(r'.*\.__.*?__',txt) is None)
722 re.match(r'.*\.__.*?__',txt) is None)
721 else:
723 else:
722 # true if txt is _not_ a _ name, false otherwise:
724 # true if txt is _not_ a _ name, false otherwise:
723 no__name = (lambda txt:
725 no__name = (lambda txt:
724 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
726 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
725 matches = filter(no__name, matches)
727 matches = filter(no__name, matches)
726 except NameError:
728 except NameError:
727 # catches <undefined attributes>.<tab>
729 # catches <undefined attributes>.<tab>
728 matches = []
730 matches = []
729 else:
731 else:
730 matches = self.global_matches(text)
732 matches = self.global_matches(text)
731
733
732 return matches
734 return matches
733
735
734 def _default_arguments_from_docstring(self, doc):
736 def _default_arguments_from_docstring(self, doc):
735 """Parse the first line of docstring for call signature.
737 """Parse the first line of docstring for call signature.
736
738
737 Docstring should be of the form 'min(iterable[, key=func])\n'.
739 Docstring should be of the form 'min(iterable[, key=func])\n'.
738 It can also parse cython docstring of the form
740 It can also parse cython docstring of the form
739 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
741 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
740 """
742 """
741 if doc is None:
743 if doc is None:
742 return []
744 return []
743
745
744 #care only the firstline
746 #care only the firstline
745 line = doc.lstrip().splitlines()[0]
747 line = doc.lstrip().splitlines()[0]
746
748
747 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
749 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
748 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
750 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
749 sig = self.docstring_sig_re.search(line)
751 sig = self.docstring_sig_re.search(line)
750 if sig is None:
752 if sig is None:
751 return []
753 return []
752 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
754 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
753 sig = sig.groups()[0].split(',')
755 sig = sig.groups()[0].split(',')
754 ret = []
756 ret = []
755 for s in sig:
757 for s in sig:
756 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
758 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
757 ret += self.docstring_kwd_re.findall(s)
759 ret += self.docstring_kwd_re.findall(s)
758 return ret
760 return ret
759
761
760 def _default_arguments(self, obj):
762 def _default_arguments(self, obj):
761 """Return the list of default arguments of obj if it is callable,
763 """Return the list of default arguments of obj if it is callable,
762 or empty list otherwise."""
764 or empty list otherwise."""
763 call_obj = obj
765 call_obj = obj
764 ret = []
766 ret = []
765 if inspect.isbuiltin(obj):
767 if inspect.isbuiltin(obj):
766 pass
768 pass
767 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
769 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
768 if inspect.isclass(obj):
770 if inspect.isclass(obj):
769 #for cython embededsignature=True the constructor docstring
771 #for cython embededsignature=True the constructor docstring
770 #belongs to the object itself not __init__
772 #belongs to the object itself not __init__
771 ret += self._default_arguments_from_docstring(
773 ret += self._default_arguments_from_docstring(
772 getattr(obj, '__doc__', ''))
774 getattr(obj, '__doc__', ''))
773 # for classes, check for __init__,__new__
775 # for classes, check for __init__,__new__
774 call_obj = (getattr(obj, '__init__', None) or
776 call_obj = (getattr(obj, '__init__', None) or
775 getattr(obj, '__new__', None))
777 getattr(obj, '__new__', None))
776 # for all others, check if they are __call__able
778 # for all others, check if they are __call__able
777 elif hasattr(obj, '__call__'):
779 elif hasattr(obj, '__call__'):
778 call_obj = obj.__call__
780 call_obj = obj.__call__
779
781
780 ret += self._default_arguments_from_docstring(
782 ret += self._default_arguments_from_docstring(
781 getattr(call_obj, '__doc__', ''))
783 getattr(call_obj, '__doc__', ''))
782
784
783 try:
785 try:
784 args,_,_1,defaults = inspect.getargspec(call_obj)
786 args,_,_1,defaults = inspect.getargspec(call_obj)
785 if defaults:
787 if defaults:
786 ret+=args[-len(defaults):]
788 ret+=args[-len(defaults):]
787 except TypeError:
789 except TypeError:
788 pass
790 pass
789
791
790 return list(set(ret))
792 return list(set(ret))
791
793
792 def python_func_kw_matches(self,text):
794 def python_func_kw_matches(self,text):
793 """Match named parameters (kwargs) of the last open function"""
795 """Match named parameters (kwargs) of the last open function"""
794
796
795 if "." in text: # a parameter cannot be dotted
797 if "." in text: # a parameter cannot be dotted
796 return []
798 return []
797 try: regexp = self.__funcParamsRegex
799 try: regexp = self.__funcParamsRegex
798 except AttributeError:
800 except AttributeError:
799 regexp = self.__funcParamsRegex = re.compile(r'''
801 regexp = self.__funcParamsRegex = re.compile(r'''
800 '.*?(?<!\\)' | # single quoted strings or
802 '.*?(?<!\\)' | # single quoted strings or
801 ".*?(?<!\\)" | # double quoted strings or
803 ".*?(?<!\\)" | # double quoted strings or
802 \w+ | # identifier
804 \w+ | # identifier
803 \S # other characters
805 \S # other characters
804 ''', re.VERBOSE | re.DOTALL)
806 ''', re.VERBOSE | re.DOTALL)
805 # 1. find the nearest identifier that comes before an unclosed
807 # 1. find the nearest identifier that comes before an unclosed
806 # parenthesis before the cursor
808 # parenthesis before the cursor
807 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
809 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
808 tokens = regexp.findall(self.text_until_cursor)
810 tokens = regexp.findall(self.text_until_cursor)
809 tokens.reverse()
811 tokens.reverse()
810 iterTokens = iter(tokens); openPar = 0
812 iterTokens = iter(tokens); openPar = 0
811
813
812 for token in iterTokens:
814 for token in iterTokens:
813 if token == ')':
815 if token == ')':
814 openPar -= 1
816 openPar -= 1
815 elif token == '(':
817 elif token == '(':
816 openPar += 1
818 openPar += 1
817 if openPar > 0:
819 if openPar > 0:
818 # found the last unclosed parenthesis
820 # found the last unclosed parenthesis
819 break
821 break
820 else:
822 else:
821 return []
823 return []
822 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
824 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
823 ids = []
825 ids = []
824 isId = re.compile(r'\w+$').match
826 isId = re.compile(r'\w+$').match
825
827
826 while True:
828 while True:
827 try:
829 try:
828 ids.append(next(iterTokens))
830 ids.append(next(iterTokens))
829 if not isId(ids[-1]):
831 if not isId(ids[-1]):
830 ids.pop(); break
832 ids.pop(); break
831 if not next(iterTokens) == '.':
833 if not next(iterTokens) == '.':
832 break
834 break
833 except StopIteration:
835 except StopIteration:
834 break
836 break
835 # lookup the candidate callable matches either using global_matches
837 # lookup the candidate callable matches either using global_matches
836 # or attr_matches for dotted names
838 # or attr_matches for dotted names
837 if len(ids) == 1:
839 if len(ids) == 1:
838 callableMatches = self.global_matches(ids[0])
840 callableMatches = self.global_matches(ids[0])
839 else:
841 else:
840 callableMatches = self.attr_matches('.'.join(ids[::-1]))
842 callableMatches = self.attr_matches('.'.join(ids[::-1]))
841 argMatches = []
843 argMatches = []
842 for callableMatch in callableMatches:
844 for callableMatch in callableMatches:
843 try:
845 try:
844 namedArgs = self._default_arguments(eval(callableMatch,
846 namedArgs = self._default_arguments(eval(callableMatch,
845 self.namespace))
847 self.namespace))
846 except:
848 except:
847 continue
849 continue
848
850
849 for namedArg in namedArgs:
851 for namedArg in namedArgs:
850 if namedArg.startswith(text):
852 if namedArg.startswith(text):
851 argMatches.append("%s=" %namedArg)
853 argMatches.append("%s=" %namedArg)
852 return argMatches
854 return argMatches
853
855
854 def dict_key_matches(self, text):
856 def dict_key_matches(self, text):
855 "Match string keys in a dictionary, after e.g. 'foo[' "
857 "Match string keys in a dictionary, after e.g. 'foo[' "
856 def get_keys(obj):
858 def get_keys(obj):
857 # Only allow completion for known in-memory dict-like types
859 # Only allow completion for known in-memory dict-like types
858 if isinstance(obj, dict) or\
860 if isinstance(obj, dict) or\
859 _safe_isinstance(obj, 'pandas', 'DataFrame'):
861 _safe_isinstance(obj, 'pandas', 'DataFrame'):
860 try:
862 try:
861 return list(obj.keys())
863 return list(obj.keys())
862 except Exception:
864 except Exception:
863 return []
865 return []
864 elif _safe_isinstance(obj, 'numpy', 'ndarray'):
866 elif _safe_isinstance(obj, 'numpy', 'ndarray'):
865 return obj.dtype.names or []
867 return obj.dtype.names or []
866 return []
868 return []
867
869
868 try:
870 try:
869 regexps = self.__dict_key_regexps
871 regexps = self.__dict_key_regexps
870 except AttributeError:
872 except AttributeError:
871 dict_key_re_fmt = r'''(?x)
873 dict_key_re_fmt = r'''(?x)
872 ( # match dict-referring expression wrt greedy setting
874 ( # match dict-referring expression wrt greedy setting
873 %s
875 %s
874 )
876 )
875 \[ # open bracket
877 \[ # open bracket
876 \s* # and optional whitespace
878 \s* # and optional whitespace
877 ([uUbB]? # string prefix (r not handled)
879 ([uUbB]? # string prefix (r not handled)
878 (?: # unclosed string
880 (?: # unclosed string
879 '(?:[^']|(?<!\\)\\')*
881 '(?:[^']|(?<!\\)\\')*
880 |
882 |
881 "(?:[^"]|(?<!\\)\\")*
883 "(?:[^"]|(?<!\\)\\")*
882 )
884 )
883 )?
885 )?
884 $
886 $
885 '''
887 '''
886 regexps = self.__dict_key_regexps = {
888 regexps = self.__dict_key_regexps = {
887 False: re.compile(dict_key_re_fmt % '''
889 False: re.compile(dict_key_re_fmt % '''
888 # identifiers separated by .
890 # identifiers separated by .
889 (?!\d)\w+
891 (?!\d)\w+
890 (?:\.(?!\d)\w+)*
892 (?:\.(?!\d)\w+)*
891 '''),
893 '''),
892 True: re.compile(dict_key_re_fmt % '''
894 True: re.compile(dict_key_re_fmt % '''
893 .+
895 .+
894 ''')
896 ''')
895 }
897 }
896
898
897 match = regexps[self.greedy].search(self.text_until_cursor)
899 match = regexps[self.greedy].search(self.text_until_cursor)
898 if match is None:
900 if match is None:
899 return []
901 return []
900
902
901 expr, prefix = match.groups()
903 expr, prefix = match.groups()
902 try:
904 try:
903 obj = eval(expr, self.namespace)
905 obj = eval(expr, self.namespace)
904 except Exception:
906 except Exception:
905 try:
907 try:
906 obj = eval(expr, self.global_namespace)
908 obj = eval(expr, self.global_namespace)
907 except Exception:
909 except Exception:
908 return []
910 return []
909
911
910 keys = get_keys(obj)
912 keys = get_keys(obj)
911 if not keys:
913 if not keys:
912 return keys
914 return keys
913 closing_quote, token_offset, matches = match_dict_keys(keys, prefix)
915 closing_quote, token_offset, matches = match_dict_keys(keys, prefix)
914 if not matches:
916 if not matches:
915 return matches
917 return matches
916
918
917 # get the cursor position of
919 # get the cursor position of
918 # - the text being completed
920 # - the text being completed
919 # - the start of the key text
921 # - the start of the key text
920 # - the start of the completion
922 # - the start of the completion
921 text_start = len(self.text_until_cursor) - len(text)
923 text_start = len(self.text_until_cursor) - len(text)
922 if prefix:
924 if prefix:
923 key_start = match.start(2)
925 key_start = match.start(2)
924 completion_start = key_start + token_offset
926 completion_start = key_start + token_offset
925 else:
927 else:
926 key_start = completion_start = match.end()
928 key_start = completion_start = match.end()
927
929
928 # grab the leading prefix, to make sure all completions start with `text`
930 # grab the leading prefix, to make sure all completions start with `text`
929 if text_start > key_start:
931 if text_start > key_start:
930 leading = ''
932 leading = ''
931 else:
933 else:
932 leading = text[text_start:completion_start]
934 leading = text[text_start:completion_start]
933
935
934 # the index of the `[` character
936 # the index of the `[` character
935 bracket_idx = match.end(1)
937 bracket_idx = match.end(1)
936
938
937 # append closing quote and bracket as appropriate
939 # append closing quote and bracket as appropriate
938 # this is *not* appropriate if the opening quote or bracket is outside
940 # this is *not* appropriate if the opening quote or bracket is outside
939 # the text given to this method
941 # the text given to this method
940 suf = ''
942 suf = ''
941 continuation = self.line_buffer[len(self.text_until_cursor):]
943 continuation = self.line_buffer[len(self.text_until_cursor):]
942 if key_start > text_start and closing_quote:
944 if key_start > text_start and closing_quote:
943 # quotes were opened inside text, maybe close them
945 # quotes were opened inside text, maybe close them
944 if continuation.startswith(closing_quote):
946 if continuation.startswith(closing_quote):
945 continuation = continuation[len(closing_quote):]
947 continuation = continuation[len(closing_quote):]
946 else:
948 else:
947 suf += closing_quote
949 suf += closing_quote
948 if bracket_idx > text_start:
950 if bracket_idx > text_start:
949 # brackets were opened inside text, maybe close them
951 # brackets were opened inside text, maybe close them
950 if not continuation.startswith(']'):
952 if not continuation.startswith(']'):
951 suf += ']'
953 suf += ']'
952
954
953 return [leading + k + suf for k in matches]
955 return [leading + k + suf for k in matches]
954
956
957 def latex_matches(self, text):
958 u"""Match Latex syntax for unicode characters.
959
960 This does both \\alp -> \\alpha and \\alpha -> α
961
962 Used on Python 3 only.
963 """
964 slashpos = text.rfind('\\')
965 if slashpos > -1:
966 s = text[slashpos:]
967 if s in latex_symbols:
968 # Try to complete a full latex symbol to unicode
969 # \\alpha -> α
970 return s, [latex_symbols[s]]
971 else:
972 # If a user has partially typed a latex symbol, give them
973 # a full list of options \al -> [\aleph, \alpha]
974 matches = [k for k in latex_symbols if k.startswith(s)]
975 return s, matches
976 return u'', []
977
955 def dispatch_custom_completer(self, text):
978 def dispatch_custom_completer(self, text):
956 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
979 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
957 line = self.line_buffer
980 line = self.line_buffer
958 if not line.strip():
981 if not line.strip():
959 return None
982 return None
960
983
961 # Create a little structure to pass all the relevant information about
984 # Create a little structure to pass all the relevant information about
962 # the current completion to any custom completer.
985 # the current completion to any custom completer.
963 event = Bunch()
986 event = Bunch()
964 event.line = line
987 event.line = line
965 event.symbol = text
988 event.symbol = text
966 cmd = line.split(None,1)[0]
989 cmd = line.split(None,1)[0]
967 event.command = cmd
990 event.command = cmd
968 event.text_until_cursor = self.text_until_cursor
991 event.text_until_cursor = self.text_until_cursor
969
992
970 #print "\ncustom:{%s]\n" % event # dbg
993 #print "\ncustom:{%s]\n" % event # dbg
971
994
972 # for foo etc, try also to find completer for %foo
995 # for foo etc, try also to find completer for %foo
973 if not cmd.startswith(self.magic_escape):
996 if not cmd.startswith(self.magic_escape):
974 try_magic = self.custom_completers.s_matches(
997 try_magic = self.custom_completers.s_matches(
975 self.magic_escape + cmd)
998 self.magic_escape + cmd)
976 else:
999 else:
977 try_magic = []
1000 try_magic = []
978
1001
979 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1002 for c in itertools.chain(self.custom_completers.s_matches(cmd),
980 try_magic,
1003 try_magic,
981 self.custom_completers.flat_matches(self.text_until_cursor)):
1004 self.custom_completers.flat_matches(self.text_until_cursor)):
982 #print "try",c # dbg
1005 #print "try",c # dbg
983 try:
1006 try:
984 res = c(event)
1007 res = c(event)
985 if res:
1008 if res:
986 # first, try case sensitive match
1009 # first, try case sensitive match
987 withcase = [r for r in res if r.startswith(text)]
1010 withcase = [r for r in res if r.startswith(text)]
988 if withcase:
1011 if withcase:
989 return withcase
1012 return withcase
990 # if none, then case insensitive ones are ok too
1013 # if none, then case insensitive ones are ok too
991 text_low = text.lower()
1014 text_low = text.lower()
992 return [r for r in res if r.lower().startswith(text_low)]
1015 return [r for r in res if r.lower().startswith(text_low)]
993 except TryNext:
1016 except TryNext:
994 pass
1017 pass
995
1018
996 return None
1019 return None
997
1020
998 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1021 def complete(self, text=None, line_buffer=None, cursor_pos=None):
999 """Find completions for the given text and line context.
1022 """Find completions for the given text and line context.
1000
1023
1001 Note that both the text and the line_buffer are optional, but at least
1024 Note that both the text and the line_buffer are optional, but at least
1002 one of them must be given.
1025 one of them must be given.
1003
1026
1004 Parameters
1027 Parameters
1005 ----------
1028 ----------
1006 text : string, optional
1029 text : string, optional
1007 Text to perform the completion on. If not given, the line buffer
1030 Text to perform the completion on. If not given, the line buffer
1008 is split using the instance's CompletionSplitter object.
1031 is split using the instance's CompletionSplitter object.
1009
1032
1010 line_buffer : string, optional
1033 line_buffer : string, optional
1011 If not given, the completer attempts to obtain the current line
1034 If not given, the completer attempts to obtain the current line
1012 buffer via readline. This keyword allows clients which are
1035 buffer via readline. This keyword allows clients which are
1013 requesting for text completions in non-readline contexts to inform
1036 requesting for text completions in non-readline contexts to inform
1014 the completer of the entire text.
1037 the completer of the entire text.
1015
1038
1016 cursor_pos : int, optional
1039 cursor_pos : int, optional
1017 Index of the cursor in the full line buffer. Should be provided by
1040 Index of the cursor in the full line buffer. Should be provided by
1018 remote frontends where kernel has no access to frontend state.
1041 remote frontends where kernel has no access to frontend state.
1019
1042
1020 Returns
1043 Returns
1021 -------
1044 -------
1022 text : str
1045 text : str
1023 Text that was actually used in the completion.
1046 Text that was actually used in the completion.
1024
1047
1025 matches : list
1048 matches : list
1026 A list of completion matches.
1049 A list of completion matches.
1027 """
1050 """
1028 #io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1051 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1029
1052
1030 # if the cursor position isn't given, the only sane assumption we can
1053 # if the cursor position isn't given, the only sane assumption we can
1031 # make is that it's at the end of the line (the common case)
1054 # make is that it's at the end of the line (the common case)
1032 if cursor_pos is None:
1055 if cursor_pos is None:
1033 cursor_pos = len(line_buffer) if text is None else len(text)
1056 cursor_pos = len(line_buffer) if text is None else len(text)
1034
1057
1058 if PY3:
1059 latex_text = text if not line_buffer else line_buffer[:cursor_pos]
1060 latex_text, latex_matches = self.latex_matches(latex_text)
1061 if latex_matches:
1062 return latex_text, latex_matches
1063
1035 # if text is either None or an empty string, rely on the line buffer
1064 # if text is either None or an empty string, rely on the line buffer
1036 if not text:
1065 if not text:
1037 text = self.splitter.split_line(line_buffer, cursor_pos)
1066 text = self.splitter.split_line(line_buffer, cursor_pos)
1038
1067
1039 # If no line buffer is given, assume the input text is all there was
1068 # If no line buffer is given, assume the input text is all there was
1040 if line_buffer is None:
1069 if line_buffer is None:
1041 line_buffer = text
1070 line_buffer = text
1042
1071
1043 self.line_buffer = line_buffer
1072 self.line_buffer = line_buffer
1044 self.text_until_cursor = self.line_buffer[:cursor_pos]
1073 self.text_until_cursor = self.line_buffer[:cursor_pos]
1045 #io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1074 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1046
1075
1047 # Start with a clean slate of completions
1076 # Start with a clean slate of completions
1048 self.matches[:] = []
1077 self.matches[:] = []
1049 custom_res = self.dispatch_custom_completer(text)
1078 custom_res = self.dispatch_custom_completer(text)
1050 if custom_res is not None:
1079 if custom_res is not None:
1051 # did custom completers produce something?
1080 # did custom completers produce something?
1052 self.matches = custom_res
1081 self.matches = custom_res
1053 else:
1082 else:
1054 # Extend the list of completions with the results of each
1083 # Extend the list of completions with the results of each
1055 # matcher, so we return results to the user from all
1084 # matcher, so we return results to the user from all
1056 # namespaces.
1085 # namespaces.
1057 if self.merge_completions:
1086 if self.merge_completions:
1058 self.matches = []
1087 self.matches = []
1059 for matcher in self.matchers:
1088 for matcher in self.matchers:
1060 try:
1089 try:
1061 self.matches.extend(matcher(text))
1090 self.matches.extend(matcher(text))
1062 except:
1091 except:
1063 # Show the ugly traceback if the matcher causes an
1092 # Show the ugly traceback if the matcher causes an
1064 # exception, but do NOT crash the kernel!
1093 # exception, but do NOT crash the kernel!
1065 sys.excepthook(*sys.exc_info())
1094 sys.excepthook(*sys.exc_info())
1066 else:
1095 else:
1067 for matcher in self.matchers:
1096 for matcher in self.matchers:
1068 self.matches = matcher(text)
1097 self.matches = matcher(text)
1069 if self.matches:
1098 if self.matches:
1070 break
1099 break
1071 # FIXME: we should extend our api to return a dict with completions for
1100 # FIXME: we should extend our api to return a dict with completions for
1072 # different types of objects. The rlcomplete() method could then
1101 # different types of objects. The rlcomplete() method could then
1073 # simply collapse the dict into a list for readline, but we'd have
1102 # simply collapse the dict into a list for readline, but we'd have
1074 # richer completion semantics in other evironments.
1103 # richer completion semantics in other evironments.
1075
1104
1076 # use penalize_magics_key to put magics after variables with same name
1105 # use penalize_magics_key to put magics after variables with same name
1077 self.matches = sorted(set(self.matches), key=penalize_magics_key)
1106 self.matches = sorted(set(self.matches), key=penalize_magics_key)
1078
1107
1079 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1108 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1080 return text, self.matches
1109 return text, self.matches
1081
1110
1082 def rlcomplete(self, text, state):
1111 def rlcomplete(self, text, state):
1083 """Return the state-th possible completion for 'text'.
1112 """Return the state-th possible completion for 'text'.
1084
1113
1085 This is called successively with state == 0, 1, 2, ... until it
1114 This is called successively with state == 0, 1, 2, ... until it
1086 returns None. The completion should begin with 'text'.
1115 returns None. The completion should begin with 'text'.
1087
1116
1088 Parameters
1117 Parameters
1089 ----------
1118 ----------
1090 text : string
1119 text : string
1091 Text to perform the completion on.
1120 Text to perform the completion on.
1092
1121
1093 state : int
1122 state : int
1094 Counter used by readline.
1123 Counter used by readline.
1095 """
1124 """
1096 if state==0:
1125 if state==0:
1097
1126
1098 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1127 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1099 cursor_pos = self.readline.get_endidx()
1128 cursor_pos = self.readline.get_endidx()
1100
1129
1101 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1130 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1102 # (text, line_buffer, cursor_pos) ) # dbg
1131 # (text, line_buffer, cursor_pos) ) # dbg
1103
1132
1104 # if there is only a tab on a line with only whitespace, instead of
1133 # if there is only a tab on a line with only whitespace, instead of
1105 # the mostly useless 'do you want to see all million completions'
1134 # the mostly useless 'do you want to see all million completions'
1106 # message, just do the right thing and give the user his tab!
1135 # message, just do the right thing and give the user his tab!
1107 # Incidentally, this enables pasting of tabbed text from an editor
1136 # Incidentally, this enables pasting of tabbed text from an editor
1108 # (as long as autoindent is off).
1137 # (as long as autoindent is off).
1109
1138
1110 # It should be noted that at least pyreadline still shows file
1139 # It should be noted that at least pyreadline still shows file
1111 # completions - is there a way around it?
1140 # completions - is there a way around it?
1112
1141
1113 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1142 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1114 # we don't interfere with their own tab-completion mechanism.
1143 # we don't interfere with their own tab-completion mechanism.
1115 if not (self.dumb_terminal or line_buffer.strip()):
1144 if not (self.dumb_terminal or line_buffer.strip()):
1116 self.readline.insert_text('\t')
1145 self.readline.insert_text('\t')
1117 sys.stdout.flush()
1146 sys.stdout.flush()
1118 return None
1147 return None
1119
1148
1120 # Note: debugging exceptions that may occur in completion is very
1149 # Note: debugging exceptions that may occur in completion is very
1121 # tricky, because readline unconditionally silences them. So if
1150 # tricky, because readline unconditionally silences them. So if
1122 # during development you suspect a bug in the completion code, turn
1151 # during development you suspect a bug in the completion code, turn
1123 # this flag on temporarily by uncommenting the second form (don't
1152 # this flag on temporarily by uncommenting the second form (don't
1124 # flip the value in the first line, as the '# dbg' marker can be
1153 # flip the value in the first line, as the '# dbg' marker can be
1125 # automatically detected and is used elsewhere).
1154 # automatically detected and is used elsewhere).
1126 DEBUG = False
1155 DEBUG = False
1127 #DEBUG = True # dbg
1156 #DEBUG = True # dbg
1128 if DEBUG:
1157 if DEBUG:
1129 try:
1158 try:
1130 self.complete(text, line_buffer, cursor_pos)
1159 self.complete(text, line_buffer, cursor_pos)
1131 except:
1160 except:
1132 import traceback; traceback.print_exc()
1161 import traceback; traceback.print_exc()
1133 else:
1162 else:
1134 # The normal production version is here
1163 # The normal production version is here
1135
1164
1136 # This method computes the self.matches array
1165 # This method computes the self.matches array
1137 self.complete(text, line_buffer, cursor_pos)
1166 self.complete(text, line_buffer, cursor_pos)
1138
1167
1139 try:
1168 try:
1140 return self.matches[state]
1169 return self.matches[state]
1141 except IndexError:
1170 except IndexError:
1142 return None
1171 return None
@@ -1,352 +1,353 b''
1 # encoding: utf-8
1 """Implementations for various useful completers.
2 """Implementations for various useful completers.
2
3
3 These are all loaded by default by IPython.
4 These are all loaded by default by IPython.
4 """
5 """
5 #-----------------------------------------------------------------------------
6 #-----------------------------------------------------------------------------
6 # Copyright (C) 2010-2011 The IPython Development Team.
7 # Copyright (C) 2010-2011 The IPython Development Team.
7 #
8 #
8 # Distributed under the terms of the BSD License.
9 # Distributed under the terms of the BSD License.
9 #
10 #
10 # The full license is in the file COPYING.txt, distributed with this software.
11 # The full license is in the file COPYING.txt, distributed with this software.
11 #-----------------------------------------------------------------------------
12 #-----------------------------------------------------------------------------
12
13
13 #-----------------------------------------------------------------------------
14 #-----------------------------------------------------------------------------
14 # Imports
15 # Imports
15 #-----------------------------------------------------------------------------
16 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17 from __future__ import print_function
17
18
18 # Stdlib imports
19 # Stdlib imports
19 import glob
20 import glob
20 import inspect
21 import inspect
21 import os
22 import os
22 import re
23 import re
23 import sys
24 import sys
24
25
25 try:
26 try:
26 # Python >= 3.3
27 # Python >= 3.3
27 from importlib.machinery import all_suffixes
28 from importlib.machinery import all_suffixes
28 _suffixes = all_suffixes()
29 _suffixes = all_suffixes()
29 except ImportError:
30 except ImportError:
30 from imp import get_suffixes
31 from imp import get_suffixes
31 _suffixes = [ s[0] for s in get_suffixes() ]
32 _suffixes = [ s[0] for s in get_suffixes() ]
32
33
33 # Third-party imports
34 # Third-party imports
34 from time import time
35 from time import time
35 from zipimport import zipimporter
36 from zipimport import zipimporter
36
37
37 # Our own imports
38 # Our own imports
38 from IPython.core.completer import expand_user, compress_user
39 from IPython.core.completer import expand_user, compress_user
39 from IPython.core.error import TryNext
40 from IPython.core.error import TryNext
40 from IPython.utils._process_common import arg_split
41 from IPython.utils._process_common import arg_split
41 from IPython.utils.py3compat import string_types
42 from IPython.utils.py3compat import string_types
42
43
43 # FIXME: this should be pulled in with the right call via the component system
44 # FIXME: this should be pulled in with the right call via the component system
44 from IPython import get_ipython
45 from IPython import get_ipython
45
46
46 #-----------------------------------------------------------------------------
47 #-----------------------------------------------------------------------------
47 # Globals and constants
48 # Globals and constants
48 #-----------------------------------------------------------------------------
49 #-----------------------------------------------------------------------------
49
50
50 # Time in seconds after which the rootmodules will be stored permanently in the
51 # Time in seconds after which the rootmodules will be stored permanently in the
51 # ipython ip.db database (kept in the user's .ipython dir).
52 # ipython ip.db database (kept in the user's .ipython dir).
52 TIMEOUT_STORAGE = 2
53 TIMEOUT_STORAGE = 2
53
54
54 # Time in seconds after which we give up
55 # Time in seconds after which we give up
55 TIMEOUT_GIVEUP = 20
56 TIMEOUT_GIVEUP = 20
56
57
57 # Regular expression for the python import statement
58 # Regular expression for the python import statement
58 import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
59 import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
59 r'(?P<package>[/\\]__init__)?'
60 r'(?P<package>[/\\]__init__)?'
60 r'(?P<suffix>%s)$' %
61 r'(?P<suffix>%s)$' %
61 r'|'.join(re.escape(s) for s in _suffixes))
62 r'|'.join(re.escape(s) for s in _suffixes))
62
63
63 # RE for the ipython %run command (python + ipython scripts)
64 # RE for the ipython %run command (python + ipython scripts)
64 magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
65 magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
65
66
66 #-----------------------------------------------------------------------------
67 #-----------------------------------------------------------------------------
67 # Local utilities
68 # Local utilities
68 #-----------------------------------------------------------------------------
69 #-----------------------------------------------------------------------------
69
70
70 def module_list(path):
71 def module_list(path):
71 """
72 """
72 Return the list containing the names of the modules available in the given
73 Return the list containing the names of the modules available in the given
73 folder.
74 folder.
74 """
75 """
75 # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
76 # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
76 if path == '':
77 if path == '':
77 path = '.'
78 path = '.'
78
79
79 # A few local constants to be used in loops below
80 # A few local constants to be used in loops below
80 pjoin = os.path.join
81 pjoin = os.path.join
81
82
82 if os.path.isdir(path):
83 if os.path.isdir(path):
83 # Build a list of all files in the directory and all files
84 # Build a list of all files in the directory and all files
84 # in its subdirectories. For performance reasons, do not
85 # in its subdirectories. For performance reasons, do not
85 # recurse more than one level into subdirectories.
86 # recurse more than one level into subdirectories.
86 files = []
87 files = []
87 for root, dirs, nondirs in os.walk(path, followlinks=True):
88 for root, dirs, nondirs in os.walk(path, followlinks=True):
88 subdir = root[len(path)+1:]
89 subdir = root[len(path)+1:]
89 if subdir:
90 if subdir:
90 files.extend(pjoin(subdir, f) for f in nondirs)
91 files.extend(pjoin(subdir, f) for f in nondirs)
91 dirs[:] = [] # Do not recurse into additional subdirectories.
92 dirs[:] = [] # Do not recurse into additional subdirectories.
92 else:
93 else:
93 files.extend(nondirs)
94 files.extend(nondirs)
94
95
95 else:
96 else:
96 try:
97 try:
97 files = list(zipimporter(path)._files.keys())
98 files = list(zipimporter(path)._files.keys())
98 except:
99 except:
99 files = []
100 files = []
100
101
101 # Build a list of modules which match the import_re regex.
102 # Build a list of modules which match the import_re regex.
102 modules = []
103 modules = []
103 for f in files:
104 for f in files:
104 m = import_re.match(f)
105 m = import_re.match(f)
105 if m:
106 if m:
106 modules.append(m.group('name'))
107 modules.append(m.group('name'))
107 return list(set(modules))
108 return list(set(modules))
108
109
109
110
110 def get_root_modules():
111 def get_root_modules():
111 """
112 """
112 Returns a list containing the names of all the modules available in the
113 Returns a list containing the names of all the modules available in the
113 folders of the pythonpath.
114 folders of the pythonpath.
114
115
115 ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
116 ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
116 """
117 """
117 ip = get_ipython()
118 ip = get_ipython()
118 rootmodules_cache = ip.db.get('rootmodules_cache', {})
119 rootmodules_cache = ip.db.get('rootmodules_cache', {})
119 rootmodules = list(sys.builtin_module_names)
120 rootmodules = list(sys.builtin_module_names)
120 start_time = time()
121 start_time = time()
121 store = False
122 store = False
122 for path in sys.path:
123 for path in sys.path:
123 try:
124 try:
124 modules = rootmodules_cache[path]
125 modules = rootmodules_cache[path]
125 except KeyError:
126 except KeyError:
126 modules = module_list(path)
127 modules = module_list(path)
127 try:
128 try:
128 modules.remove('__init__')
129 modules.remove('__init__')
129 except ValueError:
130 except ValueError:
130 pass
131 pass
131 if path not in ('', '.'): # cwd modules should not be cached
132 if path not in ('', '.'): # cwd modules should not be cached
132 rootmodules_cache[path] = modules
133 rootmodules_cache[path] = modules
133 if time() - start_time > TIMEOUT_STORAGE and not store:
134 if time() - start_time > TIMEOUT_STORAGE and not store:
134 store = True
135 store = True
135 print("\nCaching the list of root modules, please wait!")
136 print("\nCaching the list of root modules, please wait!")
136 print("(This will only be done once - type '%rehashx' to "
137 print("(This will only be done once - type '%rehashx' to "
137 "reset cache!)\n")
138 "reset cache!)\n")
138 sys.stdout.flush()
139 sys.stdout.flush()
139 if time() - start_time > TIMEOUT_GIVEUP:
140 if time() - start_time > TIMEOUT_GIVEUP:
140 print("This is taking too long, we give up.\n")
141 print("This is taking too long, we give up.\n")
141 return []
142 return []
142 rootmodules.extend(modules)
143 rootmodules.extend(modules)
143 if store:
144 if store:
144 ip.db['rootmodules_cache'] = rootmodules_cache
145 ip.db['rootmodules_cache'] = rootmodules_cache
145 rootmodules = list(set(rootmodules))
146 rootmodules = list(set(rootmodules))
146 return rootmodules
147 return rootmodules
147
148
148
149
149 def is_importable(module, attr, only_modules):
150 def is_importable(module, attr, only_modules):
150 if only_modules:
151 if only_modules:
151 return inspect.ismodule(getattr(module, attr))
152 return inspect.ismodule(getattr(module, attr))
152 else:
153 else:
153 return not(attr[:2] == '__' and attr[-2:] == '__')
154 return not(attr[:2] == '__' and attr[-2:] == '__')
154
155
155
156
156 def try_import(mod, only_modules=False):
157 def try_import(mod, only_modules=False):
157 try:
158 try:
158 m = __import__(mod)
159 m = __import__(mod)
159 except:
160 except:
160 return []
161 return []
161 mods = mod.split('.')
162 mods = mod.split('.')
162 for module in mods[1:]:
163 for module in mods[1:]:
163 m = getattr(m, module)
164 m = getattr(m, module)
164
165
165 m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
166 m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
166
167
167 completions = []
168 completions = []
168 if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
169 if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
169 completions.extend( [attr for attr in dir(m) if
170 completions.extend( [attr for attr in dir(m) if
170 is_importable(m, attr, only_modules)])
171 is_importable(m, attr, only_modules)])
171
172
172 completions.extend(getattr(m, '__all__', []))
173 completions.extend(getattr(m, '__all__', []))
173 if m_is_init:
174 if m_is_init:
174 completions.extend(module_list(os.path.dirname(m.__file__)))
175 completions.extend(module_list(os.path.dirname(m.__file__)))
175 completions = set(completions)
176 completions = set(completions)
176 if '__init__' in completions:
177 if '__init__' in completions:
177 completions.remove('__init__')
178 completions.remove('__init__')
178 return list(completions)
179 return list(completions)
179
180
180
181
181 #-----------------------------------------------------------------------------
182 #-----------------------------------------------------------------------------
182 # Completion-related functions.
183 # Completion-related functions.
183 #-----------------------------------------------------------------------------
184 #-----------------------------------------------------------------------------
184
185
185 def quick_completer(cmd, completions):
186 def quick_completer(cmd, completions):
186 """ Easily create a trivial completer for a command.
187 """ Easily create a trivial completer for a command.
187
188
188 Takes either a list of completions, or all completions in string (that will
189 Takes either a list of completions, or all completions in string (that will
189 be split on whitespace).
190 be split on whitespace).
190
191
191 Example::
192 Example::
192
193
193 [d:\ipython]|1> import ipy_completers
194 [d:\ipython]|1> import ipy_completers
194 [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
195 [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
195 [d:\ipython]|3> foo b<TAB>
196 [d:\ipython]|3> foo b<TAB>
196 bar baz
197 bar baz
197 [d:\ipython]|3> foo ba
198 [d:\ipython]|3> foo ba
198 """
199 """
199
200
200 if isinstance(completions, string_types):
201 if isinstance(completions, string_types):
201 completions = completions.split()
202 completions = completions.split()
202
203
203 def do_complete(self, event):
204 def do_complete(self, event):
204 return completions
205 return completions
205
206
206 get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
207 get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
207
208
208 def module_completion(line):
209 def module_completion(line):
209 """
210 """
210 Returns a list containing the completion possibilities for an import line.
211 Returns a list containing the completion possibilities for an import line.
211
212
212 The line looks like this :
213 The line looks like this :
213 'import xml.d'
214 'import xml.d'
214 'from xml.dom import'
215 'from xml.dom import'
215 """
216 """
216
217
217 words = line.split(' ')
218 words = line.split(' ')
218 nwords = len(words)
219 nwords = len(words)
219
220
220 # from whatever <tab> -> 'import '
221 # from whatever <tab> -> 'import '
221 if nwords == 3 and words[0] == 'from':
222 if nwords == 3 and words[0] == 'from':
222 return ['import ']
223 return ['import ']
223
224
224 # 'from xy<tab>' or 'import xy<tab>'
225 # 'from xy<tab>' or 'import xy<tab>'
225 if nwords < 3 and (words[0] in ['import','from']) :
226 if nwords < 3 and (words[0] in ['import','from']) :
226 if nwords == 1:
227 if nwords == 1:
227 return get_root_modules()
228 return get_root_modules()
228 mod = words[1].split('.')
229 mod = words[1].split('.')
229 if len(mod) < 2:
230 if len(mod) < 2:
230 return get_root_modules()
231 return get_root_modules()
231 completion_list = try_import('.'.join(mod[:-1]), True)
232 completion_list = try_import('.'.join(mod[:-1]), True)
232 return ['.'.join(mod[:-1] + [el]) for el in completion_list]
233 return ['.'.join(mod[:-1] + [el]) for el in completion_list]
233
234
234 # 'from xyz import abc<tab>'
235 # 'from xyz import abc<tab>'
235 if nwords >= 3 and words[0] == 'from':
236 if nwords >= 3 and words[0] == 'from':
236 mod = words[1]
237 mod = words[1]
237 return try_import(mod)
238 return try_import(mod)
238
239
239 #-----------------------------------------------------------------------------
240 #-----------------------------------------------------------------------------
240 # Completers
241 # Completers
241 #-----------------------------------------------------------------------------
242 #-----------------------------------------------------------------------------
242 # These all have the func(self, event) signature to be used as custom
243 # These all have the func(self, event) signature to be used as custom
243 # completers
244 # completers
244
245
245 def module_completer(self,event):
246 def module_completer(self,event):
246 """Give completions after user has typed 'import ...' or 'from ...'"""
247 """Give completions after user has typed 'import ...' or 'from ...'"""
247
248
248 # This works in all versions of python. While 2.5 has
249 # This works in all versions of python. While 2.5 has
249 # pkgutil.walk_packages(), that particular routine is fairly dangerous,
250 # pkgutil.walk_packages(), that particular routine is fairly dangerous,
250 # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
251 # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
251 # of possibly problematic side effects.
252 # of possibly problematic side effects.
252 # This search the folders in the sys.path for available modules.
253 # This search the folders in the sys.path for available modules.
253
254
254 return module_completion(event.line)
255 return module_completion(event.line)
255
256
256 # FIXME: there's a lot of logic common to the run, cd and builtin file
257 # FIXME: there's a lot of logic common to the run, cd and builtin file
257 # completers, that is currently reimplemented in each.
258 # completers, that is currently reimplemented in each.
258
259
259 def magic_run_completer(self, event):
260 def magic_run_completer(self, event):
260 """Complete files that end in .py or .ipy or .ipynb for the %run command.
261 """Complete files that end in .py or .ipy or .ipynb for the %run command.
261 """
262 """
262 comps = arg_split(event.line, strict=False)
263 comps = arg_split(event.line, strict=False)
263 # relpath should be the current token that we need to complete.
264 # relpath should be the current token that we need to complete.
264 if (len(comps) > 1) and (not event.line.endswith(' ')):
265 if (len(comps) > 1) and (not event.line.endswith(' ')):
265 relpath = comps[-1].strip("'\"")
266 relpath = comps[-1].strip("'\"")
266 else:
267 else:
267 relpath = ''
268 relpath = ''
268
269
269 #print("\nev=", event) # dbg
270 #print("\nev=", event) # dbg
270 #print("rp=", relpath) # dbg
271 #print("rp=", relpath) # dbg
271 #print('comps=', comps) # dbg
272 #print('comps=', comps) # dbg
272
273
273 lglob = glob.glob
274 lglob = glob.glob
274 isdir = os.path.isdir
275 isdir = os.path.isdir
275 relpath, tilde_expand, tilde_val = expand_user(relpath)
276 relpath, tilde_expand, tilde_val = expand_user(relpath)
276
277
277 # Find if the user has already typed the first filename, after which we
278 # Find if the user has already typed the first filename, after which we
278 # should complete on all files, since after the first one other files may
279 # should complete on all files, since after the first one other files may
279 # be arguments to the input script.
280 # be arguments to the input script.
280
281
281 if any(magic_run_re.match(c) for c in comps):
282 if any(magic_run_re.match(c) for c in comps):
282 matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
283 matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
283 for f in lglob(relpath+'*')]
284 for f in lglob(relpath+'*')]
284 else:
285 else:
285 dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
286 dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
286 pys = [f.replace('\\','/')
287 pys = [f.replace('\\','/')
287 for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
288 for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
288 lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
289 lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
289
290
290 matches = dirs + pys
291 matches = dirs + pys
291
292
292 #print('run comp:', dirs+pys) # dbg
293 #print('run comp:', dirs+pys) # dbg
293 return [compress_user(p, tilde_expand, tilde_val) for p in matches]
294 return [compress_user(p, tilde_expand, tilde_val) for p in matches]
294
295
295
296
296 def cd_completer(self, event):
297 def cd_completer(self, event):
297 """Completer function for cd, which only returns directories."""
298 """Completer function for cd, which only returns directories."""
298 ip = get_ipython()
299 ip = get_ipython()
299 relpath = event.symbol
300 relpath = event.symbol
300
301
301 #print(event) # dbg
302 #print(event) # dbg
302 if event.line.endswith('-b') or ' -b ' in event.line:
303 if event.line.endswith('-b') or ' -b ' in event.line:
303 # return only bookmark completions
304 # return only bookmark completions
304 bkms = self.db.get('bookmarks', None)
305 bkms = self.db.get('bookmarks', None)
305 if bkms:
306 if bkms:
306 return bkms.keys()
307 return bkms.keys()
307 else:
308 else:
308 return []
309 return []
309
310
310 if event.symbol == '-':
311 if event.symbol == '-':
311 width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
312 width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
312 # jump in directory history by number
313 # jump in directory history by number
313 fmt = '-%0' + width_dh +'d [%s]'
314 fmt = '-%0' + width_dh +'d [%s]'
314 ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
315 ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
315 if len(ents) > 1:
316 if len(ents) > 1:
316 return ents
317 return ents
317 return []
318 return []
318
319
319 if event.symbol.startswith('--'):
320 if event.symbol.startswith('--'):
320 return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
321 return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
321
322
322 # Expand ~ in path and normalize directory separators.
323 # Expand ~ in path and normalize directory separators.
323 relpath, tilde_expand, tilde_val = expand_user(relpath)
324 relpath, tilde_expand, tilde_val = expand_user(relpath)
324 relpath = relpath.replace('\\','/')
325 relpath = relpath.replace('\\','/')
325
326
326 found = []
327 found = []
327 for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
328 for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
328 if os.path.isdir(f)]:
329 if os.path.isdir(f)]:
329 if ' ' in d:
330 if ' ' in d:
330 # we don't want to deal with any of that, complex code
331 # we don't want to deal with any of that, complex code
331 # for this is elsewhere
332 # for this is elsewhere
332 raise TryNext
333 raise TryNext
333
334
334 found.append(d)
335 found.append(d)
335
336
336 if not found:
337 if not found:
337 if os.path.isdir(relpath):
338 if os.path.isdir(relpath):
338 return [compress_user(relpath, tilde_expand, tilde_val)]
339 return [compress_user(relpath, tilde_expand, tilde_val)]
339
340
340 # if no completions so far, try bookmarks
341 # if no completions so far, try bookmarks
341 bks = self.db.get('bookmarks',{})
342 bks = self.db.get('bookmarks',{})
342 bkmatches = [s for s in bks if s.startswith(event.symbol)]
343 bkmatches = [s for s in bks if s.startswith(event.symbol)]
343 if bkmatches:
344 if bkmatches:
344 return bkmatches
345 return bkmatches
345
346
346 raise TryNext
347 raise TryNext
347
348
348 return [compress_user(p, tilde_expand, tilde_val) for p in found]
349 return [compress_user(p, tilde_expand, tilde_val) for p in found]
349
350
350 def reset_completer(self, event):
351 def reset_completer(self, event):
351 "A completer for %reset magic"
352 "A completer for %reset magic"
352 return '-f -s in out array dhist'.split()
353 return '-f -s in out array dhist'.split()
@@ -1,680 +1,701 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import unittest
9 import unittest
10
10
11 from contextlib import contextmanager
11 from contextlib import contextmanager
12
12
13 import nose.tools as nt
13 import nose.tools as nt
14
14
15 from IPython.config.loader import Config
15 from IPython.config.loader import Config
16 from IPython.core import completer
16 from IPython.core import completer
17 from IPython.external.decorators import knownfailureif
17 from IPython.external.decorators import knownfailureif
18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 from IPython.utils.generics import complete_object
19 from IPython.utils.generics import complete_object
20 from IPython.utils import py3compat
20 from IPython.utils import py3compat
21 from IPython.utils.py3compat import string_types, unicode_type
21 from IPython.utils.py3compat import string_types, unicode_type
22 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
23
23
24 #-----------------------------------------------------------------------------
24 #-----------------------------------------------------------------------------
25 # Test functions
25 # Test functions
26 #-----------------------------------------------------------------------------
26 #-----------------------------------------------------------------------------
27
27
28 @contextmanager
28 @contextmanager
29 def greedy_completion():
29 def greedy_completion():
30 ip = get_ipython()
30 ip = get_ipython()
31 greedy_original = ip.Completer.greedy
31 greedy_original = ip.Completer.greedy
32 try:
32 try:
33 ip.Completer.greedy = True
33 ip.Completer.greedy = True
34 yield
34 yield
35 finally:
35 finally:
36 ip.Completer.greedy = greedy_original
36 ip.Completer.greedy = greedy_original
37
37
38 def test_protect_filename():
38 def test_protect_filename():
39 pairs = [ ('abc','abc'),
39 pairs = [ ('abc','abc'),
40 (' abc',r'\ abc'),
40 (' abc',r'\ abc'),
41 ('a bc',r'a\ bc'),
41 ('a bc',r'a\ bc'),
42 ('a bc',r'a\ \ bc'),
42 ('a bc',r'a\ \ bc'),
43 (' bc',r'\ \ bc'),
43 (' bc',r'\ \ bc'),
44 ]
44 ]
45 # On posix, we also protect parens and other special characters
45 # On posix, we also protect parens and other special characters
46 if sys.platform != 'win32':
46 if sys.platform != 'win32':
47 pairs.extend( [('a(bc',r'a\(bc'),
47 pairs.extend( [('a(bc',r'a\(bc'),
48 ('a)bc',r'a\)bc'),
48 ('a)bc',r'a\)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
50 ('a[1]bc', r'a\[1\]bc'),
50 ('a[1]bc', r'a\[1\]bc'),
51 ('a{1}bc', r'a\{1\}bc'),
51 ('a{1}bc', r'a\{1\}bc'),
52 ('a#bc', r'a\#bc'),
52 ('a#bc', r'a\#bc'),
53 ('a?bc', r'a\?bc'),
53 ('a?bc', r'a\?bc'),
54 ('a=bc', r'a\=bc'),
54 ('a=bc', r'a\=bc'),
55 ('a\\bc', r'a\\bc'),
55 ('a\\bc', r'a\\bc'),
56 ('a|bc', r'a\|bc'),
56 ('a|bc', r'a\|bc'),
57 ('a;bc', r'a\;bc'),
57 ('a;bc', r'a\;bc'),
58 ('a:bc', r'a\:bc'),
58 ('a:bc', r'a\:bc'),
59 ("a'bc", r"a\'bc"),
59 ("a'bc", r"a\'bc"),
60 ('a*bc', r'a\*bc'),
60 ('a*bc', r'a\*bc'),
61 ('a"bc', r'a\"bc'),
61 ('a"bc', r'a\"bc'),
62 ('a^bc', r'a\^bc'),
62 ('a^bc', r'a\^bc'),
63 ('a&bc', r'a\&bc'),
63 ('a&bc', r'a\&bc'),
64 ] )
64 ] )
65 # run the actual tests
65 # run the actual tests
66 for s1, s2 in pairs:
66 for s1, s2 in pairs:
67 s1p = completer.protect_filename(s1)
67 s1p = completer.protect_filename(s1)
68 nt.assert_equal(s1p, s2)
68 nt.assert_equal(s1p, s2)
69
69
70
70
71 def check_line_split(splitter, test_specs):
71 def check_line_split(splitter, test_specs):
72 for part1, part2, split in test_specs:
72 for part1, part2, split in test_specs:
73 cursor_pos = len(part1)
73 cursor_pos = len(part1)
74 line = part1+part2
74 line = part1+part2
75 out = splitter.split_line(line, cursor_pos)
75 out = splitter.split_line(line, cursor_pos)
76 nt.assert_equal(out, split)
76 nt.assert_equal(out, split)
77
77
78
78
79 def test_line_split():
79 def test_line_split():
80 """Basic line splitter test with default specs."""
80 """Basic line splitter test with default specs."""
81 sp = completer.CompletionSplitter()
81 sp = completer.CompletionSplitter()
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 # was at the end of part1. So an empty part2 represents someone hitting
84 # was at the end of part1. So an empty part2 represents someone hitting
85 # tab at the end of the line, the most common case.
85 # tab at the end of the line, the most common case.
86 t = [('run some/scrip', '', 'some/scrip'),
86 t = [('run some/scrip', '', 'some/scrip'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 ('echo $HOM', '', 'HOM'),
88 ('echo $HOM', '', 'HOM'),
89 ('print sys.pa', '', 'sys.pa'),
89 ('print sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
91 ("execfile('scripts/er", '', 'scripts/er'),
91 ("execfile('scripts/er", '', 'scripts/er'),
92 ('a[x.', '', 'x.'),
92 ('a[x.', '', 'x.'),
93 ('a[x.', 'y', 'x.'),
93 ('a[x.', 'y', 'x.'),
94 ('cd "some_file/', '', 'some_file/'),
94 ('cd "some_file/', '', 'some_file/'),
95 ]
95 ]
96 check_line_split(sp, t)
96 check_line_split(sp, t)
97 # Ensure splitting works OK with unicode by re-running the tests with
97 # Ensure splitting works OK with unicode by re-running the tests with
98 # all inputs turned into unicode
98 # all inputs turned into unicode
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100
100
101
101
102 def test_custom_completion_error():
102 def test_custom_completion_error():
103 """Test that errors from custom attribute completers are silenced."""
103 """Test that errors from custom attribute completers are silenced."""
104 ip = get_ipython()
104 ip = get_ipython()
105 class A(object): pass
105 class A(object): pass
106 ip.user_ns['a'] = A()
106 ip.user_ns['a'] = A()
107
107
108 @complete_object.when_type(A)
108 @complete_object.when_type(A)
109 def complete_A(a, existing_completions):
109 def complete_A(a, existing_completions):
110 raise TypeError("this should be silenced")
110 raise TypeError("this should be silenced")
111
111
112 ip.complete("a.")
112 ip.complete("a.")
113
113
114
114
115 def test_unicode_completions():
115 def test_unicode_completions():
116 ip = get_ipython()
116 ip = get_ipython()
117 # Some strings that trigger different types of completion. Check them both
117 # Some strings that trigger different types of completion. Check them both
118 # in str and unicode forms
118 # in str and unicode forms
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 for t in s + list(map(unicode_type, s)):
120 for t in s + list(map(unicode_type, s)):
121 # We don't need to check exact completion values (they may change
121 # We don't need to check exact completion values (they may change
122 # depending on the state of the namespace, but at least no exceptions
122 # depending on the state of the namespace, but at least no exceptions
123 # should be thrown and the return value should be a pair of text, list
123 # should be thrown and the return value should be a pair of text, list
124 # values.
124 # values.
125 text, matches = ip.complete(t)
125 text, matches = ip.complete(t)
126 nt.assert_true(isinstance(text, string_types))
126 nt.assert_true(isinstance(text, string_types))
127 nt.assert_true(isinstance(matches, list))
127 nt.assert_true(isinstance(matches, list))
128
128
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 def test_latex_completions():
131 from IPython.core.latex_symbols import latex_symbols
132 import random
133 ip = get_ipython()
134 # Test some random unicode symbols
135 keys = random.sample(latex_symbols.keys(), 10)
136 for k in keys:
137 text, matches = ip.complete(k)
138 nt.assert_equal(len(matches),1)
139 nt.assert_equal(text, k)
140 nt.assert_equal(matches[0], latex_symbols[k])
141 # Test a more complex line
142 text, matches = ip.complete(u'print(\\alpha')
143 nt.assert_equals(text, u'\\alpha')
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 # Test multiple matching latex symbols
146 text, matches = ip.complete(u'\\al')
147 nt.assert_in('\\alpha', matches)
148 nt.assert_in('\\aleph', matches)
149
129
150
130 class CompletionSplitterTestCase(unittest.TestCase):
151 class CompletionSplitterTestCase(unittest.TestCase):
131 def setUp(self):
152 def setUp(self):
132 self.sp = completer.CompletionSplitter()
153 self.sp = completer.CompletionSplitter()
133
154
134 def test_delim_setting(self):
155 def test_delim_setting(self):
135 self.sp.delims = ' '
156 self.sp.delims = ' '
136 nt.assert_equal(self.sp.delims, ' ')
157 nt.assert_equal(self.sp.delims, ' ')
137 nt.assert_equal(self.sp._delim_expr, '[\ ]')
158 nt.assert_equal(self.sp._delim_expr, '[\ ]')
138
159
139 def test_spaces(self):
160 def test_spaces(self):
140 """Test with only spaces as split chars."""
161 """Test with only spaces as split chars."""
141 self.sp.delims = ' '
162 self.sp.delims = ' '
142 t = [('foo', '', 'foo'),
163 t = [('foo', '', 'foo'),
143 ('run foo', '', 'foo'),
164 ('run foo', '', 'foo'),
144 ('run foo', 'bar', 'foo'),
165 ('run foo', 'bar', 'foo'),
145 ]
166 ]
146 check_line_split(self.sp, t)
167 check_line_split(self.sp, t)
147
168
148
169
149 def test_has_open_quotes1():
170 def test_has_open_quotes1():
150 for s in ["'", "'''", "'hi' '"]:
171 for s in ["'", "'''", "'hi' '"]:
151 nt.assert_equal(completer.has_open_quotes(s), "'")
172 nt.assert_equal(completer.has_open_quotes(s), "'")
152
173
153
174
154 def test_has_open_quotes2():
175 def test_has_open_quotes2():
155 for s in ['"', '"""', '"hi" "']:
176 for s in ['"', '"""', '"hi" "']:
156 nt.assert_equal(completer.has_open_quotes(s), '"')
177 nt.assert_equal(completer.has_open_quotes(s), '"')
157
178
158
179
159 def test_has_open_quotes3():
180 def test_has_open_quotes3():
160 for s in ["''", "''' '''", "'hi' 'ipython'"]:
181 for s in ["''", "''' '''", "'hi' 'ipython'"]:
161 nt.assert_false(completer.has_open_quotes(s))
182 nt.assert_false(completer.has_open_quotes(s))
162
183
163
184
164 def test_has_open_quotes4():
185 def test_has_open_quotes4():
165 for s in ['""', '""" """', '"hi" "ipython"']:
186 for s in ['""', '""" """', '"hi" "ipython"']:
166 nt.assert_false(completer.has_open_quotes(s))
187 nt.assert_false(completer.has_open_quotes(s))
167
188
168
189
169 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
190 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
170 def test_abspath_file_completions():
191 def test_abspath_file_completions():
171 ip = get_ipython()
192 ip = get_ipython()
172 with TemporaryDirectory() as tmpdir:
193 with TemporaryDirectory() as tmpdir:
173 prefix = os.path.join(tmpdir, 'foo')
194 prefix = os.path.join(tmpdir, 'foo')
174 suffixes = ['1', '2']
195 suffixes = ['1', '2']
175 names = [prefix+s for s in suffixes]
196 names = [prefix+s for s in suffixes]
176 for n in names:
197 for n in names:
177 open(n, 'w').close()
198 open(n, 'w').close()
178
199
179 # Check simple completion
200 # Check simple completion
180 c = ip.complete(prefix)[1]
201 c = ip.complete(prefix)[1]
181 nt.assert_equal(c, names)
202 nt.assert_equal(c, names)
182
203
183 # Now check with a function call
204 # Now check with a function call
184 cmd = 'a = f("%s' % prefix
205 cmd = 'a = f("%s' % prefix
185 c = ip.complete(prefix, cmd)[1]
206 c = ip.complete(prefix, cmd)[1]
186 comp = [prefix+s for s in suffixes]
207 comp = [prefix+s for s in suffixes]
187 nt.assert_equal(c, comp)
208 nt.assert_equal(c, comp)
188
209
189
210
190 def test_local_file_completions():
211 def test_local_file_completions():
191 ip = get_ipython()
212 ip = get_ipython()
192 with TemporaryWorkingDirectory():
213 with TemporaryWorkingDirectory():
193 prefix = './foo'
214 prefix = './foo'
194 suffixes = ['1', '2']
215 suffixes = ['1', '2']
195 names = [prefix+s for s in suffixes]
216 names = [prefix+s for s in suffixes]
196 for n in names:
217 for n in names:
197 open(n, 'w').close()
218 open(n, 'w').close()
198
219
199 # Check simple completion
220 # Check simple completion
200 c = ip.complete(prefix)[1]
221 c = ip.complete(prefix)[1]
201 nt.assert_equal(c, names)
222 nt.assert_equal(c, names)
202
223
203 # Now check with a function call
224 # Now check with a function call
204 cmd = 'a = f("%s' % prefix
225 cmd = 'a = f("%s' % prefix
205 c = ip.complete(prefix, cmd)[1]
226 c = ip.complete(prefix, cmd)[1]
206 comp = [prefix+s for s in suffixes]
227 comp = [prefix+s for s in suffixes]
207 nt.assert_equal(c, comp)
228 nt.assert_equal(c, comp)
208
229
209
230
210 def test_greedy_completions():
231 def test_greedy_completions():
211 ip = get_ipython()
232 ip = get_ipython()
212 ip.ex('a=list(range(5))')
233 ip.ex('a=list(range(5))')
213 _,c = ip.complete('.',line='a[0].')
234 _,c = ip.complete('.',line='a[0].')
214 nt.assert_false('a[0].real' in c,
235 nt.assert_false('a[0].real' in c,
215 "Shouldn't have completed on a[0]: %s"%c)
236 "Shouldn't have completed on a[0]: %s"%c)
216 with greedy_completion():
237 with greedy_completion():
217 _,c = ip.complete('.',line='a[0].')
238 _,c = ip.complete('.',line='a[0].')
218 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
239 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
219
240
220
241
221 def test_omit__names():
242 def test_omit__names():
222 # also happens to test IPCompleter as a configurable
243 # also happens to test IPCompleter as a configurable
223 ip = get_ipython()
244 ip = get_ipython()
224 ip._hidden_attr = 1
245 ip._hidden_attr = 1
225 ip._x = {}
246 ip._x = {}
226 c = ip.Completer
247 c = ip.Completer
227 ip.ex('ip=get_ipython()')
248 ip.ex('ip=get_ipython()')
228 cfg = Config()
249 cfg = Config()
229 cfg.IPCompleter.omit__names = 0
250 cfg.IPCompleter.omit__names = 0
230 c.update_config(cfg)
251 c.update_config(cfg)
231 s,matches = c.complete('ip.')
252 s,matches = c.complete('ip.')
232 nt.assert_in('ip.__str__', matches)
253 nt.assert_in('ip.__str__', matches)
233 nt.assert_in('ip._hidden_attr', matches)
254 nt.assert_in('ip._hidden_attr', matches)
234 cfg.IPCompleter.omit__names = 1
255 cfg.IPCompleter.omit__names = 1
235 c.update_config(cfg)
256 c.update_config(cfg)
236 s,matches = c.complete('ip.')
257 s,matches = c.complete('ip.')
237 nt.assert_not_in('ip.__str__', matches)
258 nt.assert_not_in('ip.__str__', matches)
238 nt.assert_in('ip._hidden_attr', matches)
259 nt.assert_in('ip._hidden_attr', matches)
239 cfg.IPCompleter.omit__names = 2
260 cfg.IPCompleter.omit__names = 2
240 c.update_config(cfg)
261 c.update_config(cfg)
241 s,matches = c.complete('ip.')
262 s,matches = c.complete('ip.')
242 nt.assert_not_in('ip.__str__', matches)
263 nt.assert_not_in('ip.__str__', matches)
243 nt.assert_not_in('ip._hidden_attr', matches)
264 nt.assert_not_in('ip._hidden_attr', matches)
244 s,matches = c.complete('ip._x.')
265 s,matches = c.complete('ip._x.')
245 nt.assert_in('ip._x.keys', matches)
266 nt.assert_in('ip._x.keys', matches)
246 del ip._hidden_attr
267 del ip._hidden_attr
247
268
248
269
249 def test_limit_to__all__False_ok():
270 def test_limit_to__all__False_ok():
250 ip = get_ipython()
271 ip = get_ipython()
251 c = ip.Completer
272 c = ip.Completer
252 ip.ex('class D: x=24')
273 ip.ex('class D: x=24')
253 ip.ex('d=D()')
274 ip.ex('d=D()')
254 cfg = Config()
275 cfg = Config()
255 cfg.IPCompleter.limit_to__all__ = False
276 cfg.IPCompleter.limit_to__all__ = False
256 c.update_config(cfg)
277 c.update_config(cfg)
257 s, matches = c.complete('d.')
278 s, matches = c.complete('d.')
258 nt.assert_in('d.x', matches)
279 nt.assert_in('d.x', matches)
259
280
260
281
261 def test_limit_to__all__True_ok():
282 def test_limit_to__all__True_ok():
262 ip = get_ipython()
283 ip = get_ipython()
263 c = ip.Completer
284 c = ip.Completer
264 ip.ex('class D: x=24')
285 ip.ex('class D: x=24')
265 ip.ex('d=D()')
286 ip.ex('d=D()')
266 ip.ex("d.__all__=['z']")
287 ip.ex("d.__all__=['z']")
267 cfg = Config()
288 cfg = Config()
268 cfg.IPCompleter.limit_to__all__ = True
289 cfg.IPCompleter.limit_to__all__ = True
269 c.update_config(cfg)
290 c.update_config(cfg)
270 s, matches = c.complete('d.')
291 s, matches = c.complete('d.')
271 nt.assert_in('d.z', matches)
292 nt.assert_in('d.z', matches)
272 nt.assert_not_in('d.x', matches)
293 nt.assert_not_in('d.x', matches)
273
294
274
295
275 def test_get__all__entries_ok():
296 def test_get__all__entries_ok():
276 class A(object):
297 class A(object):
277 __all__ = ['x', 1]
298 __all__ = ['x', 1]
278 words = completer.get__all__entries(A())
299 words = completer.get__all__entries(A())
279 nt.assert_equal(words, ['x'])
300 nt.assert_equal(words, ['x'])
280
301
281
302
282 def test_get__all__entries_no__all__ok():
303 def test_get__all__entries_no__all__ok():
283 class A(object):
304 class A(object):
284 pass
305 pass
285 words = completer.get__all__entries(A())
306 words = completer.get__all__entries(A())
286 nt.assert_equal(words, [])
307 nt.assert_equal(words, [])
287
308
288
309
289 def test_func_kw_completions():
310 def test_func_kw_completions():
290 ip = get_ipython()
311 ip = get_ipython()
291 c = ip.Completer
312 c = ip.Completer
292 ip.ex('def myfunc(a=1,b=2): return a+b')
313 ip.ex('def myfunc(a=1,b=2): return a+b')
293 s, matches = c.complete(None, 'myfunc(1,b')
314 s, matches = c.complete(None, 'myfunc(1,b')
294 nt.assert_in('b=', matches)
315 nt.assert_in('b=', matches)
295 # Simulate completing with cursor right after b (pos==10):
316 # Simulate completing with cursor right after b (pos==10):
296 s, matches = c.complete(None, 'myfunc(1,b)', 10)
317 s, matches = c.complete(None, 'myfunc(1,b)', 10)
297 nt.assert_in('b=', matches)
318 nt.assert_in('b=', matches)
298 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
319 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
299 nt.assert_in('b=', matches)
320 nt.assert_in('b=', matches)
300 #builtin function
321 #builtin function
301 s, matches = c.complete(None, 'min(k, k')
322 s, matches = c.complete(None, 'min(k, k')
302 nt.assert_in('key=', matches)
323 nt.assert_in('key=', matches)
303
324
304
325
305 def test_default_arguments_from_docstring():
326 def test_default_arguments_from_docstring():
306 doc = min.__doc__
327 doc = min.__doc__
307 ip = get_ipython()
328 ip = get_ipython()
308 c = ip.Completer
329 c = ip.Completer
309 kwd = c._default_arguments_from_docstring(
330 kwd = c._default_arguments_from_docstring(
310 'min(iterable[, key=func]) -> value')
331 'min(iterable[, key=func]) -> value')
311 nt.assert_equal(kwd, ['key'])
332 nt.assert_equal(kwd, ['key'])
312 #with cython type etc
333 #with cython type etc
313 kwd = c._default_arguments_from_docstring(
334 kwd = c._default_arguments_from_docstring(
314 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
335 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
315 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
336 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
316 #white spaces
337 #white spaces
317 kwd = c._default_arguments_from_docstring(
338 kwd = c._default_arguments_from_docstring(
318 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
339 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
319 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
340 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
320
341
321 def test_line_magics():
342 def test_line_magics():
322 ip = get_ipython()
343 ip = get_ipython()
323 c = ip.Completer
344 c = ip.Completer
324 s, matches = c.complete(None, 'lsmag')
345 s, matches = c.complete(None, 'lsmag')
325 nt.assert_in('%lsmagic', matches)
346 nt.assert_in('%lsmagic', matches)
326 s, matches = c.complete(None, '%lsmag')
347 s, matches = c.complete(None, '%lsmag')
327 nt.assert_in('%lsmagic', matches)
348 nt.assert_in('%lsmagic', matches)
328
349
329
350
330 def test_cell_magics():
351 def test_cell_magics():
331 from IPython.core.magic import register_cell_magic
352 from IPython.core.magic import register_cell_magic
332
353
333 @register_cell_magic
354 @register_cell_magic
334 def _foo_cellm(line, cell):
355 def _foo_cellm(line, cell):
335 pass
356 pass
336
357
337 ip = get_ipython()
358 ip = get_ipython()
338 c = ip.Completer
359 c = ip.Completer
339
360
340 s, matches = c.complete(None, '_foo_ce')
361 s, matches = c.complete(None, '_foo_ce')
341 nt.assert_in('%%_foo_cellm', matches)
362 nt.assert_in('%%_foo_cellm', matches)
342 s, matches = c.complete(None, '%%_foo_ce')
363 s, matches = c.complete(None, '%%_foo_ce')
343 nt.assert_in('%%_foo_cellm', matches)
364 nt.assert_in('%%_foo_cellm', matches)
344
365
345
366
346 def test_line_cell_magics():
367 def test_line_cell_magics():
347 from IPython.core.magic import register_line_cell_magic
368 from IPython.core.magic import register_line_cell_magic
348
369
349 @register_line_cell_magic
370 @register_line_cell_magic
350 def _bar_cellm(line, cell):
371 def _bar_cellm(line, cell):
351 pass
372 pass
352
373
353 ip = get_ipython()
374 ip = get_ipython()
354 c = ip.Completer
375 c = ip.Completer
355
376
356 # The policy here is trickier, see comments in completion code. The
377 # The policy here is trickier, see comments in completion code. The
357 # returned values depend on whether the user passes %% or not explicitly,
378 # returned values depend on whether the user passes %% or not explicitly,
358 # and this will show a difference if the same name is both a line and cell
379 # and this will show a difference if the same name is both a line and cell
359 # magic.
380 # magic.
360 s, matches = c.complete(None, '_bar_ce')
381 s, matches = c.complete(None, '_bar_ce')
361 nt.assert_in('%_bar_cellm', matches)
382 nt.assert_in('%_bar_cellm', matches)
362 nt.assert_in('%%_bar_cellm', matches)
383 nt.assert_in('%%_bar_cellm', matches)
363 s, matches = c.complete(None, '%_bar_ce')
384 s, matches = c.complete(None, '%_bar_ce')
364 nt.assert_in('%_bar_cellm', matches)
385 nt.assert_in('%_bar_cellm', matches)
365 nt.assert_in('%%_bar_cellm', matches)
386 nt.assert_in('%%_bar_cellm', matches)
366 s, matches = c.complete(None, '%%_bar_ce')
387 s, matches = c.complete(None, '%%_bar_ce')
367 nt.assert_not_in('%_bar_cellm', matches)
388 nt.assert_not_in('%_bar_cellm', matches)
368 nt.assert_in('%%_bar_cellm', matches)
389 nt.assert_in('%%_bar_cellm', matches)
369
390
370
391
371 def test_magic_completion_order():
392 def test_magic_completion_order():
372
393
373 ip = get_ipython()
394 ip = get_ipython()
374 c = ip.Completer
395 c = ip.Completer
375
396
376 # Test ordering of magics and non-magics with the same name
397 # Test ordering of magics and non-magics with the same name
377 # We want the non-magic first
398 # We want the non-magic first
378
399
379 # Before importing matplotlib, there should only be one option:
400 # Before importing matplotlib, there should only be one option:
380
401
381 text, matches = c.complete('mat')
402 text, matches = c.complete('mat')
382 nt.assert_equal(matches, ["%matplotlib"])
403 nt.assert_equal(matches, ["%matplotlib"])
383
404
384
405
385 ip.run_cell("matplotlib = 1") # introduce name into namespace
406 ip.run_cell("matplotlib = 1") # introduce name into namespace
386
407
387 # After the import, there should be two options, ordered like this:
408 # After the import, there should be two options, ordered like this:
388 text, matches = c.complete('mat')
409 text, matches = c.complete('mat')
389 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
410 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
390
411
391
412
392 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
413 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
393
414
394 # Order of user variable and line and cell magics with same name:
415 # Order of user variable and line and cell magics with same name:
395 text, matches = c.complete('timeit')
416 text, matches = c.complete('timeit')
396 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
417 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
397
418
398
419
399 def test_dict_key_completion_string():
420 def test_dict_key_completion_string():
400 """Test dictionary key completion for string keys"""
421 """Test dictionary key completion for string keys"""
401 ip = get_ipython()
422 ip = get_ipython()
402 complete = ip.Completer.complete
423 complete = ip.Completer.complete
403
424
404 ip.user_ns['d'] = {'abc': None}
425 ip.user_ns['d'] = {'abc': None}
405
426
406 # check completion at different stages
427 # check completion at different stages
407 _, matches = complete(line_buffer="d[")
428 _, matches = complete(line_buffer="d[")
408 nt.assert_in("'abc'", matches)
429 nt.assert_in("'abc'", matches)
409 nt.assert_not_in("'abc']", matches)
430 nt.assert_not_in("'abc']", matches)
410
431
411 _, matches = complete(line_buffer="d['")
432 _, matches = complete(line_buffer="d['")
412 nt.assert_in("abc", matches)
433 nt.assert_in("abc", matches)
413 nt.assert_not_in("abc']", matches)
434 nt.assert_not_in("abc']", matches)
414
435
415 _, matches = complete(line_buffer="d['a")
436 _, matches = complete(line_buffer="d['a")
416 nt.assert_in("abc", matches)
437 nt.assert_in("abc", matches)
417 nt.assert_not_in("abc']", matches)
438 nt.assert_not_in("abc']", matches)
418
439
419 # check use of different quoting
440 # check use of different quoting
420 _, matches = complete(line_buffer="d[\"")
441 _, matches = complete(line_buffer="d[\"")
421 nt.assert_in("abc", matches)
442 nt.assert_in("abc", matches)
422 nt.assert_not_in('abc\"]', matches)
443 nt.assert_not_in('abc\"]', matches)
423
444
424 _, matches = complete(line_buffer="d[\"a")
445 _, matches = complete(line_buffer="d[\"a")
425 nt.assert_in("abc", matches)
446 nt.assert_in("abc", matches)
426 nt.assert_not_in('abc\"]', matches)
447 nt.assert_not_in('abc\"]', matches)
427
448
428 # check sensitivity to following context
449 # check sensitivity to following context
429 _, matches = complete(line_buffer="d[]", cursor_pos=2)
450 _, matches = complete(line_buffer="d[]", cursor_pos=2)
430 nt.assert_in("'abc'", matches)
451 nt.assert_in("'abc'", matches)
431
452
432 _, matches = complete(line_buffer="d['']", cursor_pos=3)
453 _, matches = complete(line_buffer="d['']", cursor_pos=3)
433 nt.assert_in("abc", matches)
454 nt.assert_in("abc", matches)
434 nt.assert_not_in("abc'", matches)
455 nt.assert_not_in("abc'", matches)
435 nt.assert_not_in("abc']", matches)
456 nt.assert_not_in("abc']", matches)
436
457
437 # check multiple solutions are correctly returned and that noise is not
458 # check multiple solutions are correctly returned and that noise is not
438 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
459 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
439 5: None}
460 5: None}
440
461
441 _, matches = complete(line_buffer="d['a")
462 _, matches = complete(line_buffer="d['a")
442 nt.assert_in("abc", matches)
463 nt.assert_in("abc", matches)
443 nt.assert_in("abd", matches)
464 nt.assert_in("abd", matches)
444 nt.assert_not_in("bad", matches)
465 nt.assert_not_in("bad", matches)
445 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
466 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
446
467
447 # check escaping and whitespace
468 # check escaping and whitespace
448 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
469 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
449 _, matches = complete(line_buffer="d['a")
470 _, matches = complete(line_buffer="d['a")
450 nt.assert_in("a\\nb", matches)
471 nt.assert_in("a\\nb", matches)
451 nt.assert_in("a\\'b", matches)
472 nt.assert_in("a\\'b", matches)
452 nt.assert_in("a\"b", matches)
473 nt.assert_in("a\"b", matches)
453 nt.assert_in("a word", matches)
474 nt.assert_in("a word", matches)
454 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
475 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
455
476
456 # - can complete on non-initial word of the string
477 # - can complete on non-initial word of the string
457 _, matches = complete(line_buffer="d['a w")
478 _, matches = complete(line_buffer="d['a w")
458 nt.assert_in("word", matches)
479 nt.assert_in("word", matches)
459
480
460 # - understands quote escaping
481 # - understands quote escaping
461 _, matches = complete(line_buffer="d['a\\'")
482 _, matches = complete(line_buffer="d['a\\'")
462 nt.assert_in("b", matches)
483 nt.assert_in("b", matches)
463
484
464 # - default quoting should work like repr
485 # - default quoting should work like repr
465 _, matches = complete(line_buffer="d[")
486 _, matches = complete(line_buffer="d[")
466 nt.assert_in("\"a'b\"", matches)
487 nt.assert_in("\"a'b\"", matches)
467
488
468 # - when opening quote with ", possible to match with unescaped apostrophe
489 # - when opening quote with ", possible to match with unescaped apostrophe
469 _, matches = complete(line_buffer="d[\"a'")
490 _, matches = complete(line_buffer="d[\"a'")
470 nt.assert_in("b", matches)
491 nt.assert_in("b", matches)
471
492
472
493
473 def test_dict_key_completion_contexts():
494 def test_dict_key_completion_contexts():
474 """Test expression contexts in which dict key completion occurs"""
495 """Test expression contexts in which dict key completion occurs"""
475 ip = get_ipython()
496 ip = get_ipython()
476 complete = ip.Completer.complete
497 complete = ip.Completer.complete
477 d = {'abc': None}
498 d = {'abc': None}
478 ip.user_ns['d'] = d
499 ip.user_ns['d'] = d
479
500
480 class C:
501 class C:
481 data = d
502 data = d
482 ip.user_ns['C'] = C
503 ip.user_ns['C'] = C
483 ip.user_ns['get'] = lambda: d
504 ip.user_ns['get'] = lambda: d
484
505
485 def assert_no_completion(**kwargs):
506 def assert_no_completion(**kwargs):
486 _, matches = complete(**kwargs)
507 _, matches = complete(**kwargs)
487 nt.assert_not_in('abc', matches)
508 nt.assert_not_in('abc', matches)
488 nt.assert_not_in('abc\'', matches)
509 nt.assert_not_in('abc\'', matches)
489 nt.assert_not_in('abc\']', matches)
510 nt.assert_not_in('abc\']', matches)
490 nt.assert_not_in('\'abc\'', matches)
511 nt.assert_not_in('\'abc\'', matches)
491 nt.assert_not_in('\'abc\']', matches)
512 nt.assert_not_in('\'abc\']', matches)
492
513
493 def assert_completion(**kwargs):
514 def assert_completion(**kwargs):
494 _, matches = complete(**kwargs)
515 _, matches = complete(**kwargs)
495 nt.assert_in("'abc'", matches)
516 nt.assert_in("'abc'", matches)
496 nt.assert_not_in("'abc']", matches)
517 nt.assert_not_in("'abc']", matches)
497
518
498 # no completion after string closed, even if reopened
519 # no completion after string closed, even if reopened
499 assert_no_completion(line_buffer="d['a'")
520 assert_no_completion(line_buffer="d['a'")
500 assert_no_completion(line_buffer="d[\"a\"")
521 assert_no_completion(line_buffer="d[\"a\"")
501 assert_no_completion(line_buffer="d['a' + ")
522 assert_no_completion(line_buffer="d['a' + ")
502 assert_no_completion(line_buffer="d['a' + '")
523 assert_no_completion(line_buffer="d['a' + '")
503
524
504 # completion in non-trivial expressions
525 # completion in non-trivial expressions
505 assert_completion(line_buffer="+ d[")
526 assert_completion(line_buffer="+ d[")
506 assert_completion(line_buffer="(d[")
527 assert_completion(line_buffer="(d[")
507 assert_completion(line_buffer="C.data[")
528 assert_completion(line_buffer="C.data[")
508
529
509 # greedy flag
530 # greedy flag
510 def assert_completion(**kwargs):
531 def assert_completion(**kwargs):
511 _, matches = complete(**kwargs)
532 _, matches = complete(**kwargs)
512 nt.assert_in("get()['abc']", matches)
533 nt.assert_in("get()['abc']", matches)
513
534
514 assert_no_completion(line_buffer="get()[")
535 assert_no_completion(line_buffer="get()[")
515 with greedy_completion():
536 with greedy_completion():
516 assert_completion(line_buffer="get()[")
537 assert_completion(line_buffer="get()[")
517 assert_completion(line_buffer="get()['")
538 assert_completion(line_buffer="get()['")
518 assert_completion(line_buffer="get()['a")
539 assert_completion(line_buffer="get()['a")
519 assert_completion(line_buffer="get()['ab")
540 assert_completion(line_buffer="get()['ab")
520 assert_completion(line_buffer="get()['abc")
541 assert_completion(line_buffer="get()['abc")
521
542
522
543
523
544
524 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
545 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
525 def test_dict_key_completion_bytes():
546 def test_dict_key_completion_bytes():
526 """Test handling of bytes in dict key completion"""
547 """Test handling of bytes in dict key completion"""
527 ip = get_ipython()
548 ip = get_ipython()
528 complete = ip.Completer.complete
549 complete = ip.Completer.complete
529
550
530 ip.user_ns['d'] = {'abc': None, b'abd': None}
551 ip.user_ns['d'] = {'abc': None, b'abd': None}
531
552
532 _, matches = complete(line_buffer="d[")
553 _, matches = complete(line_buffer="d[")
533 nt.assert_in("'abc'", matches)
554 nt.assert_in("'abc'", matches)
534 nt.assert_in("b'abd'", matches)
555 nt.assert_in("b'abd'", matches)
535
556
536 if False: # not currently implemented
557 if False: # not currently implemented
537 _, matches = complete(line_buffer="d[b")
558 _, matches = complete(line_buffer="d[b")
538 nt.assert_in("b'abd'", matches)
559 nt.assert_in("b'abd'", matches)
539 nt.assert_not_in("b'abc'", matches)
560 nt.assert_not_in("b'abc'", matches)
540
561
541 _, matches = complete(line_buffer="d[b'")
562 _, matches = complete(line_buffer="d[b'")
542 nt.assert_in("abd", matches)
563 nt.assert_in("abd", matches)
543 nt.assert_not_in("abc", matches)
564 nt.assert_not_in("abc", matches)
544
565
545 _, matches = complete(line_buffer="d[B'")
566 _, matches = complete(line_buffer="d[B'")
546 nt.assert_in("abd", matches)
567 nt.assert_in("abd", matches)
547 nt.assert_not_in("abc", matches)
568 nt.assert_not_in("abc", matches)
548
569
549 _, matches = complete(line_buffer="d['")
570 _, matches = complete(line_buffer="d['")
550 nt.assert_in("abc", matches)
571 nt.assert_in("abc", matches)
551 nt.assert_not_in("abd", matches)
572 nt.assert_not_in("abd", matches)
552
573
553
574
554 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
575 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
555 def test_dict_key_completion_unicode_py2():
576 def test_dict_key_completion_unicode_py2():
556 """Test handling of unicode in dict key completion"""
577 """Test handling of unicode in dict key completion"""
557 ip = get_ipython()
578 ip = get_ipython()
558 complete = ip.Completer.complete
579 complete = ip.Completer.complete
559
580
560 ip.user_ns['d'] = {u'abc': None,
581 ip.user_ns['d'] = {u'abc': None,
561 u'a\u05d0b': None}
582 u'a\u05d0b': None}
562
583
563 _, matches = complete(line_buffer="d[")
584 _, matches = complete(line_buffer="d[")
564 nt.assert_in("u'abc'", matches)
585 nt.assert_in("u'abc'", matches)
565 nt.assert_in("u'a\\u05d0b'", matches)
586 nt.assert_in("u'a\\u05d0b'", matches)
566
587
567 _, matches = complete(line_buffer="d['a")
588 _, matches = complete(line_buffer="d['a")
568 nt.assert_in("abc", matches)
589 nt.assert_in("abc", matches)
569 nt.assert_not_in("a\\u05d0b", matches)
590 nt.assert_not_in("a\\u05d0b", matches)
570
591
571 _, matches = complete(line_buffer="d[u'a")
592 _, matches = complete(line_buffer="d[u'a")
572 nt.assert_in("abc", matches)
593 nt.assert_in("abc", matches)
573 nt.assert_in("a\\u05d0b", matches)
594 nt.assert_in("a\\u05d0b", matches)
574
595
575 _, matches = complete(line_buffer="d[U'a")
596 _, matches = complete(line_buffer="d[U'a")
576 nt.assert_in("abc", matches)
597 nt.assert_in("abc", matches)
577 nt.assert_in("a\\u05d0b", matches)
598 nt.assert_in("a\\u05d0b", matches)
578
599
579 # query using escape
600 # query using escape
580 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
601 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
581 nt.assert_in("u05d0b", matches) # tokenized after \\
602 nt.assert_in("u05d0b", matches) # tokenized after \\
582
603
583 # query using character
604 # query using character
584 _, matches = complete(line_buffer=u"d[u'a\u05d0")
605 _, matches = complete(line_buffer=u"d[u'a\u05d0")
585 nt.assert_in(u"a\u05d0b", matches)
606 nt.assert_in(u"a\u05d0b", matches)
586
607
587 with greedy_completion():
608 with greedy_completion():
588 _, matches = complete(line_buffer="d[")
609 _, matches = complete(line_buffer="d[")
589 nt.assert_in("d[u'abc']", matches)
610 nt.assert_in("d[u'abc']", matches)
590 nt.assert_in("d[u'a\\u05d0b']", matches)
611 nt.assert_in("d[u'a\\u05d0b']", matches)
591
612
592 _, matches = complete(line_buffer="d['a")
613 _, matches = complete(line_buffer="d['a")
593 nt.assert_in("d['abc']", matches)
614 nt.assert_in("d['abc']", matches)
594 nt.assert_not_in("d[u'a\\u05d0b']", matches)
615 nt.assert_not_in("d[u'a\\u05d0b']", matches)
595
616
596 _, matches = complete(line_buffer="d[u'a")
617 _, matches = complete(line_buffer="d[u'a")
597 nt.assert_in("d[u'abc']", matches)
618 nt.assert_in("d[u'abc']", matches)
598 nt.assert_in("d[u'a\\u05d0b']", matches)
619 nt.assert_in("d[u'a\\u05d0b']", matches)
599
620
600 _, matches = complete(line_buffer="d[U'a")
621 _, matches = complete(line_buffer="d[U'a")
601 nt.assert_in("d[U'abc']", matches)
622 nt.assert_in("d[U'abc']", matches)
602 nt.assert_in("d[U'a\\u05d0b']", matches)
623 nt.assert_in("d[U'a\\u05d0b']", matches)
603
624
604 # query using escape
625 # query using escape
605 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
626 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
606 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
627 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
607
628
608 # query using character
629 # query using character
609 _, matches = complete(line_buffer=u"d[u'a\u05d0")
630 _, matches = complete(line_buffer=u"d[u'a\u05d0")
610 nt.assert_in(u"d[u'a\u05d0b']", matches)
631 nt.assert_in(u"d[u'a\u05d0b']", matches)
611
632
612
633
613 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
634 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
614 def test_dict_key_completion_unicode_py3():
635 def test_dict_key_completion_unicode_py3():
615 """Test handling of unicode in dict key completion"""
636 """Test handling of unicode in dict key completion"""
616 ip = get_ipython()
637 ip = get_ipython()
617 complete = ip.Completer.complete
638 complete = ip.Completer.complete
618
639
619 ip.user_ns['d'] = {u'a\u05d0': None}
640 ip.user_ns['d'] = {u'a\u05d0': None}
620
641
621 # query using escape
642 # query using escape
622 _, matches = complete(line_buffer="d['a\\u05d0")
643 _, matches = complete(line_buffer="d['a\\u05d0")
623 nt.assert_in("u05d0", matches) # tokenized after \\
644 nt.assert_in("u05d0", matches) # tokenized after \\
624
645
625 # query using character
646 # query using character
626 _, matches = complete(line_buffer="d['a\u05d0")
647 _, matches = complete(line_buffer="d['a\u05d0")
627 nt.assert_in(u"a\u05d0", matches)
648 nt.assert_in(u"a\u05d0", matches)
628
649
629 with greedy_completion():
650 with greedy_completion():
630 # query using escape
651 # query using escape
631 _, matches = complete(line_buffer="d['a\\u05d0")
652 _, matches = complete(line_buffer="d['a\\u05d0")
632 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
653 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
633
654
634 # query using character
655 # query using character
635 _, matches = complete(line_buffer="d['a\u05d0")
656 _, matches = complete(line_buffer="d['a\u05d0")
636 nt.assert_in(u"d['a\u05d0']", matches)
657 nt.assert_in(u"d['a\u05d0']", matches)
637
658
638
659
639
660
640 @dec.skip_without('numpy')
661 @dec.skip_without('numpy')
641 def test_struct_array_key_completion():
662 def test_struct_array_key_completion():
642 """Test dict key completion applies to numpy struct arrays"""
663 """Test dict key completion applies to numpy struct arrays"""
643 import numpy
664 import numpy
644 ip = get_ipython()
665 ip = get_ipython()
645 complete = ip.Completer.complete
666 complete = ip.Completer.complete
646 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
667 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
647 _, matches = complete(line_buffer="d['")
668 _, matches = complete(line_buffer="d['")
648 nt.assert_in("hello", matches)
669 nt.assert_in("hello", matches)
649 nt.assert_in("world", matches)
670 nt.assert_in("world", matches)
650
671
651
672
652 @dec.skip_without('pandas')
673 @dec.skip_without('pandas')
653 def test_dataframe_key_completion():
674 def test_dataframe_key_completion():
654 """Test dict key completion applies to pandas DataFrames"""
675 """Test dict key completion applies to pandas DataFrames"""
655 import pandas
676 import pandas
656 ip = get_ipython()
677 ip = get_ipython()
657 complete = ip.Completer.complete
678 complete = ip.Completer.complete
658 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
679 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
659 _, matches = complete(line_buffer="d['")
680 _, matches = complete(line_buffer="d['")
660 nt.assert_in("hello", matches)
681 nt.assert_in("hello", matches)
661 nt.assert_in("world", matches)
682 nt.assert_in("world", matches)
662
683
663
684
664 def test_dict_key_completion_invalids():
685 def test_dict_key_completion_invalids():
665 """Smoke test cases dict key completion can't handle"""
686 """Smoke test cases dict key completion can't handle"""
666 ip = get_ipython()
687 ip = get_ipython()
667 complete = ip.Completer.complete
688 complete = ip.Completer.complete
668
689
669 ip.user_ns['no_getitem'] = None
690 ip.user_ns['no_getitem'] = None
670 ip.user_ns['no_keys'] = []
691 ip.user_ns['no_keys'] = []
671 ip.user_ns['cant_call_keys'] = dict
692 ip.user_ns['cant_call_keys'] = dict
672 ip.user_ns['empty'] = {}
693 ip.user_ns['empty'] = {}
673 ip.user_ns['d'] = {'abc': 5}
694 ip.user_ns['d'] = {'abc': 5}
674
695
675 _, matches = complete(line_buffer="no_getitem['")
696 _, matches = complete(line_buffer="no_getitem['")
676 _, matches = complete(line_buffer="no_keys['")
697 _, matches = complete(line_buffer="no_keys['")
677 _, matches = complete(line_buffer="cant_call_keys['")
698 _, matches = complete(line_buffer="cant_call_keys['")
678 _, matches = complete(line_buffer="empty['")
699 _, matches = complete(line_buffer="empty['")
679 _, matches = complete(line_buffer="name_error['")
700 _, matches = complete(line_buffer="name_error['")
680 _, matches = complete(line_buffer="d['\\") # incomplete escape
701 _, matches = complete(line_buffer="d['\\") # incomplete escape
@@ -1,22 +1,27 b''
1 // IPython mode is just a slightly altered Python Mode with `?` beeing a extra
1 // IPython mode is just a slightly altered Python Mode with `?` beeing a extra
2 // single operator. Here we define `ipython` mode in the require `python`
2 // single operator. Here we define `ipython` mode in the require `python`
3 // callback to auto-load python mode, which is more likely not the best things
3 // callback to auto-load python mode, which is more likely not the best things
4 // to do, but at least the simple one for now.
4 // to do, but at least the simple one for now.
5
5
6 CodeMirror.requireMode('python',function(){
6 CodeMirror.requireMode('python',function(){
7 "use strict";
7 "use strict";
8
8
9 CodeMirror.defineMode("ipython", function(conf, parserConf) {
9 CodeMirror.defineMode("ipython", function(conf, parserConf) {
10 var pythonConf = {};
10 var pythonConf = {};
11 for (var prop in parserConf) {
11 for (var prop in parserConf) {
12 if (parserConf.hasOwnProperty(prop)) {
12 if (parserConf.hasOwnProperty(prop)) {
13 pythonConf[prop] = parserConf[prop];
13 pythonConf[prop] = parserConf[prop];
14 }
14 }
15 }
15 }
16 pythonConf.name = 'python';
16 pythonConf.name = 'python';
17 pythonConf.singleOperators = new RegExp("^[\\+\\-\\*/%&|\\^~<>!\\?]");
17 pythonConf.singleOperators = new RegExp("^[\\+\\-\\*/%&|\\^~<>!\\?]");
18 if (pythonConf.version === 3) {
19 pythonConf.identifiers = new RegExp("^[_A-Za-z\u00A1-\uFFFF][_A-Za-z0-9\u00A1-\uFFFF]*");
20 } else if (pythonConf.version === 2) {
21 pythonConf.identifiers = new RegExp("^[_A-Za-z][_A-Za-z0-9]*");
22 }
18 return CodeMirror.getMode(conf, pythonConf);
23 return CodeMirror.getMode(conf, pythonConf);
19 }, 'python');
24 }, 'python');
20
25
21 CodeMirror.defineMIME("text/x-ipython", "ipython");
26 CodeMirror.defineMIME("text/x-ipython", "ipython");
22 })
27 })
General Comments 0
You need to be logged in to leave comments. Login now